logstash-filter-metrics 0.1.0

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml ADDED
@@ -0,0 +1,15 @@
1
+ ---
2
+ !binary "U0hBMQ==":
3
+ metadata.gz: !binary |-
4
+ ZDcwOTM5NjE4ZGNmOGQyMDQ4NTVkMDA2YjAwMzI0Y2NiMTBkZWViZA==
5
+ data.tar.gz: !binary |-
6
+ ZDIyYmI3NzYyNmQwOGU3NGEzNmNkZmI0MTY5YzFlN2IxNjY3NDUxZQ==
7
+ SHA512:
8
+ metadata.gz: !binary |-
9
+ NGEzNWU1MThmNzA0OGQ0YWM1NDg1ODQ3M2YwNjE0MjcxYzY4Yjk4ZTYxZWZm
10
+ MzhmMzFlMzA2ZWEwZmViYWQ4YWIzY2FkM2FkYmY3Y2I5YWIyYWExOTAwMWIz
11
+ NzQ5ZGIwMDJlOWUzYThjZGI1MjFhMmQ0NDlhYWFiY2UxMzkwYTA=
12
+ data.tar.gz: !binary |-
13
+ MjQ3YzQ4NDM0NzA4NTA4MWRiYTY2NWQ4YzAxMjZkMmNlZjJkZjdhMjQ5YWY0
14
+ ZWU0NTI3MWMxOTcxYmZhMjNmYmQ3Y2ViOGI3ODg3MDc2NjQ5YjdjZjI5Nzk4
15
+ ODJlYTQyZjU5OWFkYWRkMWViYWJlMjI0YzE2YmQ1OTJlZGZkYWE=
data/.gitignore ADDED
@@ -0,0 +1,4 @@
1
+ *.gem
2
+ Gemfile.lock
3
+ .bundle
4
+ vendor
data/Gemfile ADDED
@@ -0,0 +1,3 @@
1
+ source 'http://rubygems.org'
2
+ gem 'rake'
3
+ gem 'gem_publisher'
data/Rakefile ADDED
@@ -0,0 +1,6 @@
1
+ @files=[]
2
+
3
+ task :default do
4
+ system("rake -T")
5
+ end
6
+
@@ -0,0 +1,241 @@
1
+ # encoding: utf-8
2
+ require "securerandom"
3
+ require "logstash/filters/base"
4
+ require "logstash/namespace"
5
+
6
+ # The metrics filter is useful for aggregating metrics.
7
+ #
8
+ # For example, if you have a field 'response' that is
9
+ # a http response code, and you want to count each
10
+ # kind of response, you can do this:
11
+ #
12
+ # filter {
13
+ # metrics {
14
+ # meter => [ "http.%{response}" ]
15
+ # add_tag => "metric"
16
+ # }
17
+ # }
18
+ #
19
+ # Metrics are flushed every 5 seconds by default or according to
20
+ # 'flush_interval'. Metrics appear as
21
+ # new events in the event stream and go through any filters
22
+ # that occur after as well as outputs.
23
+ #
24
+ # In general, you will want to add a tag to your metrics and have an output
25
+ # explicitly look for that tag.
26
+ #
27
+ # The event that is flushed will include every 'meter' and 'timer'
28
+ # metric in the following way:
29
+ #
30
+ # #### 'meter' values
31
+ #
32
+ # For a `meter => "something"` you will receive the following fields:
33
+ #
34
+ # * "thing.count" - the total count of events
35
+ # * "thing.rate_1m" - the 1-minute rate (sliding)
36
+ # * "thing.rate_5m" - the 5-minute rate (sliding)
37
+ # * "thing.rate_15m" - the 15-minute rate (sliding)
38
+ #
39
+ # #### 'timer' values
40
+ #
41
+ # For a `timer => [ "thing", "%{duration}" ]` you will receive the following fields:
42
+ #
43
+ # * "thing.count" - the total count of events
44
+ # * "thing.rate_1m" - the 1-minute rate of events (sliding)
45
+ # * "thing.rate_5m" - the 5-minute rate of events (sliding)
46
+ # * "thing.rate_15m" - the 15-minute rate of events (sliding)
47
+ # * "thing.min" - the minimum value seen for this metric
48
+ # * "thing.max" - the maximum value seen for this metric
49
+ # * "thing.stddev" - the standard deviation for this metric
50
+ # * "thing.mean" - the mean for this metric
51
+ # * "thing.pXX" - the XXth percentile for this metric (see `percentiles`)
52
+ #
53
+ # #### Example: computing event rate
54
+ #
55
+ # For a simple example, let's track how many events per second are running
56
+ # through logstash:
57
+ #
58
+ # input {
59
+ # generator {
60
+ # type => "generated"
61
+ # }
62
+ # }
63
+ #
64
+ # filter {
65
+ # if [type] == "generated" {
66
+ # metrics {
67
+ # meter => "events"
68
+ # add_tag => "metric"
69
+ # }
70
+ # }
71
+ # }
72
+ #
73
+ # output {
74
+ # # only emit events with the 'metric' tag
75
+ # if "metric" in [tags] {
76
+ # stdout {
77
+ # codec => line {
78
+ # format => "rate: %{events.rate_1m}"
79
+ # }
80
+ # }
81
+ # }
82
+ # }
83
+ #
84
+ # Running the above:
85
+ #
86
+ # % bin/logstash -f example.conf
87
+ # rate: 23721.983566819246
88
+ # rate: 24811.395722536377
89
+ # rate: 25875.892745934525
90
+ # rate: 26836.42375967113
91
+ #
92
+ # We see the output includes our 'events' 1-minute rate.
93
+ #
94
+ # In the real world, you would emit this to graphite or another metrics store,
95
+ # like so:
96
+ #
97
+ # output {
98
+ # graphite {
99
+ # metrics => [ "events.rate_1m", "%{events.rate_1m}" ]
100
+ # }
101
+ # }
102
+ class LogStash::Filters::Metrics < LogStash::Filters::Base
103
+ config_name "metrics"
104
+ milestone 1
105
+
106
+ # syntax: `meter => [ "name of metric", "name of metric" ]`
107
+ config :meter, :validate => :array, :default => []
108
+
109
+ # syntax: `timer => [ "name of metric", "%{time_value}" ]`
110
+ config :timer, :validate => :hash, :default => {}
111
+
112
+ # Don't track events that have @timestamp older than some number of seconds.
113
+ #
114
+ # This is useful if you want to only include events that are near real-time
115
+ # in your metrics.
116
+ #
117
+ # Example, to only count events that are within 10 seconds of real-time, you
118
+ # would do this:
119
+ #
120
+ # filter {
121
+ # metrics {
122
+ # meter => [ "hits" ]
123
+ # ignore_older_than => 10
124
+ # }
125
+ # }
126
+ config :ignore_older_than, :validate => :number, :default => 0
127
+
128
+ # The flush interval, when the metrics event is created. Must be a multiple of 5s.
129
+ config :flush_interval, :validate => :number, :default => 5
130
+
131
+ # The clear interval, when all counter are reset.
132
+ #
133
+ # If set to -1, the default value, the metrics will never be cleared.
134
+ # Otherwise, should be a multiple of 5s.
135
+ config :clear_interval, :validate => :number, :default => -1
136
+
137
+ # The rates that should be measured, in minutes.
138
+ # Possible values are 1, 5, and 15.
139
+ config :rates, :validate => :array, :default => [1, 5, 15]
140
+
141
+ # The percentiles that should be measured
142
+ config :percentiles, :validate => :array, :default => [1, 5, 10, 90, 95, 99, 100]
143
+
144
+ def register
145
+ require "metriks"
146
+ require "socket"
147
+ require "atomic"
148
+ require "thread_safe"
149
+ @last_flush = Atomic.new(0) # how many seconds ago the metrics where flushed.
150
+ @last_clear = Atomic.new(0) # how many seconds ago the metrics where cleared.
151
+ @random_key_preffix = SecureRandom.hex
152
+ unless (@rates - [1, 5, 15]).empty?
153
+ raise LogStash::ConfigurationError, "Invalid rates configuration. possible rates are 1, 5, 15. Rates: #{rates}."
154
+ end
155
+ @metric_meters = ThreadSafe::Cache.new { |h,k| h[k] = Metriks.meter metric_key(k) }
156
+ @metric_timers = ThreadSafe::Cache.new { |h,k| h[k] = Metriks.timer metric_key(k) }
157
+ end # def register
158
+
159
+ def filter(event)
160
+ return unless filter?(event)
161
+
162
+ # TODO(piavlo): This should probably be moved to base filter class.
163
+ if @ignore_older_than > 0 && Time.now - event.timestamp.time > @ignore_older_than
164
+ @logger.debug("Skipping metriks for old event", :event => event)
165
+ return
166
+ end
167
+
168
+ @meter.each do |m|
169
+ @metric_meters[event.sprintf(m)].mark
170
+ end
171
+
172
+ @timer.each do |name, value|
173
+ @metric_timers[event.sprintf(name)].update(event.sprintf(value).to_f)
174
+ end
175
+ end # def filter
176
+
177
+ def flush
178
+ # Add 5 seconds to @last_flush and @last_clear counters
179
+ # since this method is called every 5 seconds.
180
+ @last_flush.update { |v| v + 5 }
181
+ @last_clear.update { |v| v + 5 }
182
+
183
+ # Do nothing if there's nothing to do ;)
184
+ return unless should_flush?
185
+
186
+ event = LogStash::Event.new
187
+ event["message"] = Socket.gethostname
188
+ @metric_meters.each_pair do |name, metric|
189
+ flush_rates event, name, metric
190
+ metric.clear if should_clear?
191
+ end
192
+
193
+ @metric_timers.each_pair do |name, metric|
194
+ flush_rates event, name, metric
195
+ # These 4 values are not sliding, so they probably are not useful.
196
+ event["#{name}.min"] = metric.min
197
+ event["#{name}.max"] = metric.max
198
+ # timer's stddev currently returns variance, fix it.
199
+ event["#{name}.stddev"] = metric.stddev ** 0.5
200
+ event["#{name}.mean"] = metric.mean
201
+
202
+ @percentiles.each do |percentile|
203
+ event["#{name}.p#{percentile}"] = metric.snapshot.value(percentile / 100.0)
204
+ end
205
+ metric.clear if should_clear?
206
+ end
207
+
208
+ # Reset counter since metrics were flushed
209
+ @last_flush.value = 0
210
+
211
+ if should_clear?
212
+ #Reset counter since metrics were cleared
213
+ @last_clear.value = 0
214
+ @metric_meters.clear
215
+ @metric_timers.clear
216
+ end
217
+
218
+ filter_matched(event)
219
+ return [event]
220
+ end
221
+
222
+ private
223
+ def flush_rates(event, name, metric)
224
+ event["#{name}.count"] = metric.count
225
+ event["#{name}.rate_1m"] = metric.one_minute_rate if @rates.include? 1
226
+ event["#{name}.rate_5m"] = metric.five_minute_rate if @rates.include? 5
227
+ event["#{name}.rate_15m"] = metric.fifteen_minute_rate if @rates.include? 15
228
+ end
229
+
230
+ def metric_key(key)
231
+ "#{@random_key_preffix}_#{key}"
232
+ end
233
+
234
+ def should_flush?
235
+ @last_flush.value >= @flush_interval && (!@metric_meters.empty? || !@metric_timers.empty?)
236
+ end
237
+
238
+ def should_clear?
239
+ @clear_interval > 0 && @last_clear.value >= @clear_interval
240
+ end
241
+ end # class LogStash::Filters::Metrics
@@ -0,0 +1,26 @@
1
+ Gem::Specification.new do |s|
2
+
3
+ s.name = 'logstash-filter-metrics'
4
+ s.version = '0.1.0'
5
+ s.licenses = ['Apache License (2.0)']
6
+ s.summary = "The metrics filter is useful for aggregating metrics."
7
+ s.description = "The metrics filter is useful for aggregating metrics."
8
+ s.authors = ["Elasticsearch"]
9
+ s.email = 'richard.pijnenburg@elasticsearch.com'
10
+ s.homepage = "http://logstash.net/"
11
+ s.require_paths = ["lib"]
12
+
13
+ # Files
14
+ s.files = `git ls-files`.split($\)
15
+
16
+ # Tests
17
+ s.test_files = s.files.grep(%r{^(test|spec|features)/})
18
+
19
+ # Special flag to let us know this is actually a logstash plugin
20
+ s.metadata = { "logstash_plugin" => "true", "group" => "filter" }
21
+
22
+ # Gem dependencies
23
+ s.add_runtime_dependency 'logstash', '>= 1.4.0', '< 2.0.0'
24
+
25
+ end
26
+
@@ -0,0 +1,9 @@
1
+ require "gem_publisher"
2
+
3
+ desc "Publish gem to RubyGems.org"
4
+ task :publish_gem do |t|
5
+ gem_file = Dir.glob(File.expand_path('../*.gemspec',File.dirname(__FILE__))).first
6
+ gem = GemPublisher.publish_if_updated(gem_file, :rubygems)
7
+ puts "Published #{gem}" if gem
8
+ end
9
+
@@ -0,0 +1,169 @@
1
+ require "net/http"
2
+ require "uri"
3
+ require "digest/sha1"
4
+
5
+ def vendor(*args)
6
+ return File.join("vendor", *args)
7
+ end
8
+
9
+ directory "vendor/" => ["vendor"] do |task, args|
10
+ mkdir task.name
11
+ end
12
+
13
+ def fetch(url, sha1, output)
14
+
15
+ puts "Downloading #{url}"
16
+ actual_sha1 = download(url, output)
17
+
18
+ if actual_sha1 != sha1
19
+ fail "SHA1 does not match (expected '#{sha1}' but got '#{actual_sha1}')"
20
+ end
21
+ end # def fetch
22
+
23
+ def file_fetch(url, sha1)
24
+ filename = File.basename( URI(url).path )
25
+ output = "vendor/#{filename}"
26
+ task output => [ "vendor/" ] do
27
+ begin
28
+ actual_sha1 = file_sha1(output)
29
+ if actual_sha1 != sha1
30
+ fetch(url, sha1, output)
31
+ end
32
+ rescue Errno::ENOENT
33
+ fetch(url, sha1, output)
34
+ end
35
+ end.invoke
36
+
37
+ return output
38
+ end
39
+
40
+ def file_sha1(path)
41
+ digest = Digest::SHA1.new
42
+ fd = File.new(path, "r")
43
+ while true
44
+ begin
45
+ digest << fd.sysread(16384)
46
+ rescue EOFError
47
+ break
48
+ end
49
+ end
50
+ return digest.hexdigest
51
+ ensure
52
+ fd.close if fd
53
+ end
54
+
55
+ def download(url, output)
56
+ uri = URI(url)
57
+ digest = Digest::SHA1.new
58
+ tmp = "#{output}.tmp"
59
+ Net::HTTP.start(uri.host, uri.port, :use_ssl => (uri.scheme == "https")) do |http|
60
+ request = Net::HTTP::Get.new(uri.path)
61
+ http.request(request) do |response|
62
+ fail "HTTP fetch failed for #{url}. #{response}" if [200, 301].include?(response.code)
63
+ size = (response["content-length"].to_i || -1).to_f
64
+ count = 0
65
+ File.open(tmp, "w") do |fd|
66
+ response.read_body do |chunk|
67
+ fd.write(chunk)
68
+ digest << chunk
69
+ if size > 0 && $stdout.tty?
70
+ count += chunk.bytesize
71
+ $stdout.write(sprintf("\r%0.2f%%", count/size * 100))
72
+ end
73
+ end
74
+ end
75
+ $stdout.write("\r \r") if $stdout.tty?
76
+ end
77
+ end
78
+
79
+ File.rename(tmp, output)
80
+
81
+ return digest.hexdigest
82
+ rescue SocketError => e
83
+ puts "Failure while downloading #{url}: #{e}"
84
+ raise
85
+ ensure
86
+ File.unlink(tmp) if File.exist?(tmp)
87
+ end # def download
88
+
89
+ def untar(tarball, &block)
90
+ require "archive/tar/minitar"
91
+ tgz = Zlib::GzipReader.new(File.open(tarball))
92
+ # Pull out typesdb
93
+ tar = Archive::Tar::Minitar::Input.open(tgz)
94
+ tar.each do |entry|
95
+ path = block.call(entry)
96
+ next if path.nil?
97
+ parent = File.dirname(path)
98
+
99
+ mkdir_p parent unless File.directory?(parent)
100
+
101
+ # Skip this file if the output file is the same size
102
+ if entry.directory?
103
+ mkdir path unless File.directory?(path)
104
+ else
105
+ entry_mode = entry.instance_eval { @mode } & 0777
106
+ if File.exists?(path)
107
+ stat = File.stat(path)
108
+ # TODO(sissel): Submit a patch to archive-tar-minitar upstream to
109
+ # expose headers in the entry.
110
+ entry_size = entry.instance_eval { @size }
111
+ # If file sizes are same, skip writing.
112
+ next if stat.size == entry_size && (stat.mode & 0777) == entry_mode
113
+ end
114
+ puts "Extracting #{entry.full_name} from #{tarball} #{entry_mode.to_s(8)}"
115
+ File.open(path, "w") do |fd|
116
+ # eof? check lets us skip empty files. Necessary because the API provided by
117
+ # Archive::Tar::Minitar::Reader::EntryStream only mostly acts like an
118
+ # IO object. Something about empty files in this EntryStream causes
119
+ # IO.copy_stream to throw "can't convert nil into String" on JRuby
120
+ # TODO(sissel): File a bug about this.
121
+ while !entry.eof?
122
+ chunk = entry.read(16384)
123
+ fd.write(chunk)
124
+ end
125
+ #IO.copy_stream(entry, fd)
126
+ end
127
+ File.chmod(entry_mode, path)
128
+ end
129
+ end
130
+ tar.close
131
+ File.unlink(tarball) if File.file?(tarball)
132
+ end # def untar
133
+
134
+ def ungz(file)
135
+
136
+ outpath = file.gsub('.gz', '')
137
+ tgz = Zlib::GzipReader.new(File.open(file))
138
+ begin
139
+ File.open(outpath, "w") do |out|
140
+ IO::copy_stream(tgz, out)
141
+ end
142
+ File.unlink(file)
143
+ rescue
144
+ File.unlink(outpath) if File.file?(outpath)
145
+ raise
146
+ end
147
+ tgz.close
148
+ end
149
+
150
+ desc "Process any vendor files required for this plugin"
151
+ task "vendor" do |task, args|
152
+
153
+ @files.each do |file|
154
+ download = file_fetch(file['url'], file['sha1'])
155
+ if download =~ /.tar.gz/
156
+ prefix = download.gsub('.tar.gz', '').gsub('vendor/', '')
157
+ untar(download) do |entry|
158
+ if !file['files'].nil?
159
+ next unless file['files'].include?(entry.full_name.gsub(prefix, ''))
160
+ out = entry.full_name.split("/").last
161
+ end
162
+ File.join('vendor', out)
163
+ end
164
+ elsif download =~ /.gz/
165
+ ungz(download)
166
+ end
167
+ end
168
+
169
+ end
@@ -0,0 +1,233 @@
1
+ require "spec_helper"
2
+ require "logstash/filters/metrics"
3
+
4
+ describe LogStash::Filters::Metrics do
5
+
6
+ context "with basic meter config" do
7
+ context "when no events were received" do
8
+ it "should not flush" do
9
+ config = {"meter" => ["http.%{response}"]}
10
+ filter = LogStash::Filters::Metrics.new config
11
+ filter.register
12
+
13
+ events = filter.flush
14
+ insist { events }.nil?
15
+ end
16
+ end
17
+
18
+ context "when events are received" do
19
+ context "on the first flush" do
20
+ subject {
21
+ config = {"meter" => ["http.%{response}"]}
22
+ filter = LogStash::Filters::Metrics.new config
23
+ filter.register
24
+ filter.filter LogStash::Event.new({"response" => 200})
25
+ filter.filter LogStash::Event.new({"response" => 200})
26
+ filter.filter LogStash::Event.new({"response" => 404})
27
+ filter.flush
28
+ }
29
+
30
+ it "should flush counts" do
31
+ insist { subject.length } == 1
32
+ insist { subject.first["http.200.count"] } == 2
33
+ insist { subject.first["http.404.count"] } == 1
34
+ end
35
+
36
+ it "should include rates and percentiles" do
37
+ metrics = ["http.200.rate_1m", "http.200.rate_5m", "http.200.rate_15m",
38
+ "http.404.rate_1m", "http.404.rate_5m", "http.404.rate_15m"]
39
+ metrics.each do |metric|
40
+ insist { subject.first }.include? metric
41
+ end
42
+ end
43
+ end
44
+
45
+ context "on the second flush" do
46
+ it "should not reset counts" do
47
+ config = {"meter" => ["http.%{response}"]}
48
+ filter = LogStash::Filters::Metrics.new config
49
+ filter.register
50
+ filter.filter LogStash::Event.new({"response" => 200})
51
+ filter.filter LogStash::Event.new({"response" => 200})
52
+ filter.filter LogStash::Event.new({"response" => 404})
53
+
54
+ events = filter.flush
55
+ events = filter.flush
56
+ insist { events.length } == 1
57
+ insist { events.first["http.200.count"] } == 2
58
+ insist { events.first["http.404.count"] } == 1
59
+ end
60
+ end
61
+ end
62
+
63
+ context "when custom rates and percentiles are selected" do
64
+ context "on the first flush" do
65
+ subject {
66
+ config = {
67
+ "meter" => ["http.%{response}"],
68
+ "rates" => [1]
69
+ }
70
+ filter = LogStash::Filters::Metrics.new config
71
+ filter.register
72
+ filter.filter LogStash::Event.new({"response" => 200})
73
+ filter.filter LogStash::Event.new({"response" => 200})
74
+ filter.filter LogStash::Event.new({"response" => 404})
75
+ filter.flush
76
+ }
77
+
78
+ it "should include only the requested rates" do
79
+ rate_fields = subject.first.to_hash.keys.select {|field| field.start_with?("http.200.rate") }
80
+ insist { rate_fields.length } == 1
81
+ insist { rate_fields }.include? "http.200.rate_1m"
82
+ end
83
+ end
84
+ end
85
+ end
86
+
87
+ context "with multiple instances" do
88
+ it "counts should be independent" do
89
+ config_tag1 = {"meter" => ["http.%{response}"], "tags" => ["tag1"]}
90
+ config_tag2 = {"meter" => ["http.%{response}"], "tags" => ["tag2"]}
91
+ filter_tag1 = LogStash::Filters::Metrics.new config_tag1
92
+ filter_tag2 = LogStash::Filters::Metrics.new config_tag2
93
+ event_tag1 = LogStash::Event.new({"response" => 200, "tags" => [ "tag1" ]})
94
+ event_tag2 = LogStash::Event.new({"response" => 200, "tags" => [ "tag2" ]})
95
+ event2_tag2 = LogStash::Event.new({"response" => 200, "tags" => [ "tag2" ]})
96
+ filter_tag1.register
97
+ filter_tag2.register
98
+
99
+ [event_tag1, event_tag2, event2_tag2].each do |event|
100
+ filter_tag1.filter event
101
+ filter_tag2.filter event
102
+ end
103
+
104
+ events_tag1 = filter_tag1.flush
105
+ events_tag2 = filter_tag2.flush
106
+
107
+ insist { events_tag1.first["http.200.count"] } == 1
108
+ insist { events_tag2.first["http.200.count"] } == 2
109
+ end
110
+ end
111
+
112
+ context "with timer config" do
113
+ context "on the first flush" do
114
+ subject {
115
+ config = {"timer" => ["http.request_time", "%{request_time}"]}
116
+ filter = LogStash::Filters::Metrics.new config
117
+ filter.register
118
+ filter.filter LogStash::Event.new({"request_time" => 10})
119
+ filter.filter LogStash::Event.new({"request_time" => 20})
120
+ filter.filter LogStash::Event.new({"request_time" => 30})
121
+ filter.flush
122
+ }
123
+
124
+ it "should flush counts" do
125
+ insist { subject.length } == 1
126
+ insist { subject.first["http.request_time.count"] } == 3
127
+ end
128
+
129
+ it "should include rates and percentiles keys" do
130
+ metrics = ["rate_1m", "rate_5m", "rate_15m", "p1", "p5", "p10", "p90", "p95", "p99"]
131
+ metrics.each do |metric|
132
+ insist { subject.first }.include? "http.request_time.#{metric}"
133
+ end
134
+ end
135
+
136
+ it "should include min value" do
137
+ insist { subject.first['http.request_time.min'] } == 10.0
138
+ end
139
+
140
+ it "should include mean value" do
141
+ insist { subject.first['http.request_time.mean'] } == 20.0
142
+ end
143
+
144
+ it "should include stddev value" do
145
+ insist { subject.first['http.request_time.stddev'] } == Math.sqrt(10.0)
146
+ end
147
+
148
+ it "should include max value" do
149
+ insist { subject.first['http.request_time.max'] } == 30.0
150
+ end
151
+
152
+ it "should include percentile value" do
153
+ insist { subject.first['http.request_time.p99'] } == 30.0
154
+ end
155
+ end
156
+ end
157
+
158
+ context "when custom rates and percentiles are selected" do
159
+ context "on the first flush" do
160
+ subject {
161
+ config = {
162
+ "timer" => ["http.request_time", "request_time"],
163
+ "rates" => [1],
164
+ "percentiles" => [1, 2]
165
+ }
166
+ filter = LogStash::Filters::Metrics.new config
167
+ filter.register
168
+ filter.filter LogStash::Event.new({"request_time" => 1})
169
+ filter.flush
170
+ }
171
+
172
+ it "should flush counts" do
173
+ insist { subject.length } == 1
174
+ insist { subject.first["http.request_time.count"] } == 1
175
+ end
176
+
177
+ it "should include only the requested rates" do
178
+ rate_fields = subject.first.to_hash.keys.select {|field| field.start_with?("http.request_time.rate") }
179
+ insist { rate_fields.length } == 1
180
+ insist { rate_fields }.include? "http.request_time.rate_1m"
181
+ end
182
+
183
+ it "should include only the requested percentiles" do
184
+ percentile_fields = subject.first.to_hash.keys.select {|field| field.start_with?("http.request_time.p") }
185
+ insist { percentile_fields.length } == 2
186
+ insist { percentile_fields }.include? "http.request_time.p1"
187
+ insist { percentile_fields }.include? "http.request_time.p2"
188
+ end
189
+ end
190
+ end
191
+
192
+
193
+ context "when a custom flush_interval is set" do
194
+ it "should flush only when required" do
195
+ config = {"meter" => ["http.%{response}"], "flush_interval" => 15}
196
+ filter = LogStash::Filters::Metrics.new config
197
+ filter.register
198
+ filter.filter LogStash::Event.new({"response" => 200})
199
+
200
+ insist { filter.flush }.nil? # 5s
201
+ insist { filter.flush }.nil? # 10s
202
+ insist { filter.flush.length } == 1 # 15s
203
+ insist { filter.flush }.nil? # 20s
204
+ insist { filter.flush }.nil? # 25s
205
+ insist { filter.flush.length } == 1 # 30s
206
+ end
207
+ end
208
+
209
+ context "when a custom clear_interval is set" do
210
+ it "should clear the metrics after interval has passed" do
211
+ config = {"meter" => ["http.%{response}"], "clear_interval" => 15}
212
+ filter = LogStash::Filters::Metrics.new config
213
+ filter.register
214
+ filter.filter LogStash::Event.new({"response" => 200})
215
+
216
+ insist { filter.flush.first["http.200.count"] } == 1 # 5s
217
+ insist { filter.flush.first["http.200.count"] } == 1 # 10s
218
+ insist { filter.flush.first["http.200.count"] } == 1 # 15s
219
+ insist { filter.flush }.nil? # 20s
220
+ end
221
+ end
222
+
223
+ context "when invalid rates are set" do
224
+ subject {
225
+ config = {"meter" => ["http.%{response}"], "rates" => [90]}
226
+ filter = LogStash::Filters::Metrics.new config
227
+ }
228
+
229
+ it "should raise an error" do
230
+ insist {subject.register }.raises(LogStash::ConfigurationError)
231
+ end
232
+ end
233
+ end
metadata ADDED
@@ -0,0 +1,74 @@
1
+ --- !ruby/object:Gem::Specification
2
+ name: logstash-filter-metrics
3
+ version: !ruby/object:Gem::Version
4
+ version: 0.1.0
5
+ platform: ruby
6
+ authors:
7
+ - Elasticsearch
8
+ autorequire:
9
+ bindir: bin
10
+ cert_chain: []
11
+ date: 2014-11-02 00:00:00.000000000 Z
12
+ dependencies:
13
+ - !ruby/object:Gem::Dependency
14
+ name: logstash
15
+ requirement: !ruby/object:Gem::Requirement
16
+ requirements:
17
+ - - ! '>='
18
+ - !ruby/object:Gem::Version
19
+ version: 1.4.0
20
+ - - <
21
+ - !ruby/object:Gem::Version
22
+ version: 2.0.0
23
+ type: :runtime
24
+ prerelease: false
25
+ version_requirements: !ruby/object:Gem::Requirement
26
+ requirements:
27
+ - - ! '>='
28
+ - !ruby/object:Gem::Version
29
+ version: 1.4.0
30
+ - - <
31
+ - !ruby/object:Gem::Version
32
+ version: 2.0.0
33
+ description: The metrics filter is useful for aggregating metrics.
34
+ email: richard.pijnenburg@elasticsearch.com
35
+ executables: []
36
+ extensions: []
37
+ extra_rdoc_files: []
38
+ files:
39
+ - .gitignore
40
+ - Gemfile
41
+ - Rakefile
42
+ - lib/logstash/filters/metrics.rb
43
+ - logstash-filter-metrics.gemspec
44
+ - rakelib/publish.rake
45
+ - rakelib/vendor.rake
46
+ - spec/filters/metrics_spec.rb
47
+ homepage: http://logstash.net/
48
+ licenses:
49
+ - Apache License (2.0)
50
+ metadata:
51
+ logstash_plugin: 'true'
52
+ group: filter
53
+ post_install_message:
54
+ rdoc_options: []
55
+ require_paths:
56
+ - lib
57
+ required_ruby_version: !ruby/object:Gem::Requirement
58
+ requirements:
59
+ - - ! '>='
60
+ - !ruby/object:Gem::Version
61
+ version: '0'
62
+ required_rubygems_version: !ruby/object:Gem::Requirement
63
+ requirements:
64
+ - - ! '>='
65
+ - !ruby/object:Gem::Version
66
+ version: '0'
67
+ requirements: []
68
+ rubyforge_project:
69
+ rubygems_version: 2.4.1
70
+ signing_key:
71
+ specification_version: 4
72
+ summary: The metrics filter is useful for aggregating metrics.
73
+ test_files:
74
+ - spec/filters/metrics_spec.rb