logstash-filter-throttle 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml ADDED
@@ -0,0 +1,15 @@
1
+ ---
2
+ !binary "U0hBMQ==":
3
+ metadata.gz: !binary |-
4
+ MDViZWUyMDc0NzZhYzQ1MDgxNzRmNDQ4MmU0MzJjYzA4NzMwMzQwMQ==
5
+ data.tar.gz: !binary |-
6
+ ZmQzZTQwYThjODVhZmMzZTVjODI1NWU1ZDc2ZWIzY2ZmOTcxZjc5Zg==
7
+ SHA512:
8
+ metadata.gz: !binary |-
9
+ NjQ0ZDBmNjc0ZjcyODFhOTFlZDMxNThkODE4NzM1MTM1ZWZhMDc4NWQ4NzVj
10
+ ODg2NjQ0OGQyMzQwNWE0YjQxZDIwNjNmMTY3N2NiZDc4Y2IyNDM1ODEzYjhh
11
+ ZTU4NDZhYjk2NmJmNDBkNTdhNzIxM2Y3OWUzYzVhYzRmZWJjNWQ=
12
+ data.tar.gz: !binary |-
13
+ ZjVmNTE1YTkxMjEwYTlmOGEyYzBjNTE2MDhkOGU1N2UzNzU3MmI2MWQ0NTVh
14
+ ZmI0MGNjYzgzZjk2MDVkYWZjZTg1MWNjODZlYzI3MDA5YTJjMjE3OGUyNzQ4
15
+ YzczMjk5Y2EzOGU1ZTM0ZDhjMDI0NjVlMTdkYjU5NzI0ZmFkMzg=
data/.gitignore ADDED
@@ -0,0 +1,4 @@
1
+ *.gem
2
+ Gemfile.lock
3
+ .bundle
4
+ vendor
data/Gemfile ADDED
@@ -0,0 +1,4 @@
1
+ source 'http://rubygems.org'
2
+ gem 'rake'
3
+ gem 'gem_publisher'
4
+ gem 'archive-tar-minitar'
data/Rakefile ADDED
@@ -0,0 +1,6 @@
1
+ @files=[]
2
+
3
+ task :default do
4
+ system("rake -T")
5
+ end
6
+
@@ -0,0 +1,261 @@
1
+ require "logstash/filters/base"
2
+ require "logstash/namespace"
3
+
4
+ # The throttle filter is for throttling the number of events received. The filter
5
+ # is configured with a lower bound, the before_count, and upper bound, the after_count,
6
+ # and a period of time. All events passing through the filter will be counted based on
7
+ # a key. As long as the count is less than the before_count or greater than the
8
+ # after_count, the event will be "throttled" which means the filter will be considered
9
+ # successful and any tags or fields will be added.
10
+ #
11
+ # For example, if you wanted to throttle events so you only receive an event after 2
12
+ # occurrences and you get no more than 3 in 10 minutes, you would use the
13
+ # configuration:
14
+ # period => 600
15
+ # before_count => 3
16
+ # after_count => 5
17
+ #
18
+ # Which would result in:
19
+ # event 1 - throttled (successful filter, period start)
20
+ # event 2 - throttled (successful filter)
21
+ # event 3 - not throttled
22
+ # event 4 - not throttled
23
+ # event 5 - not throttled
24
+ # event 6 - throttled (successful filter)
25
+ # event 7 - throttled (successful filter)
26
+ # event x - throttled (successful filter)
27
+ # period end
28
+ # event 1 - throttled (successful filter, period start)
29
+ # event 2 - throttled (successful filter)
30
+ # event 3 - not throttled
31
+ # event 4 - not throttled
32
+ # event 5 - not throttled
33
+ # event 6 - throttled (successful filter)
34
+ # ...
35
+ #
36
+ # Another example is if you wanted to throttle events so you only receive 1 event per
37
+ # hour, you would use the configuration:
38
+ # period => 3600
39
+ # before_count => -1
40
+ # after_count => 1
41
+ #
42
+ # Which would result in:
43
+ # event 1 - not throttled (period start)
44
+ # event 2 - throttled (successful filter)
45
+ # event 3 - throttled (successful filter)
46
+ # event 4 - throttled (successful filter)
47
+ # event x - throttled (successful filter)
48
+ # period end
49
+ # event 1 - not throttled (period start)
50
+ # event 2 - throttled (successful filter)
51
+ # event 3 - throttled (successful filter)
52
+ # event 4 - throttled (successful filter)
53
+ # ...
54
+ #
55
+ # A common use case would be to use the throttle filter to throttle events before 3 and
56
+ # after 5 while using multiple fields for the key and then use the drop filter to remove
57
+ # throttled events. This configuration might appear as:
58
+ #
59
+ # filter {
60
+ # throttle {
61
+ # before_count => 3
62
+ # after_count => 5
63
+ # period => 3600
64
+ # key => "%{host}%{message}"
65
+ # add_tag => "throttled"
66
+ # }
67
+ # if "throttled" in [tags] {
68
+ # drop { }
69
+ # }
70
+ # }
71
+ #
72
+ # Another case would be to store all events, but only email non-throttled
73
+ # events so the op's inbox isn't flooded with emails in the event of a system error.
74
+ # This configuration might appear as:
75
+ #
76
+ # filter {
77
+ # throttle {
78
+ # before_count => 3
79
+ # after_count => 5
80
+ # period => 3600
81
+ # key => "%{message}"
82
+ # add_tag => "throttled"
83
+ # }
84
+ # }
85
+ # output {
86
+ # if "throttled" not in [tags] {
87
+ # email {
88
+ # from => "logstash@mycompany.com"
89
+ # subject => "Production System Alert"
90
+ # to => "ops@mycompany.com"
91
+ # via => "sendmail"
92
+ # body => "Alert on %{host} from path %{path}:\n\n%{message}"
93
+ # options => { "location" => "/usr/sbin/sendmail" }
94
+ # }
95
+ # }
96
+ # elasticsearch_http {
97
+ # host => "localhost"
98
+ # port => "19200"
99
+ # }
100
+ # }
101
+ #
102
+ # The event counts are cleared after the configured period elapses since the
103
+ # first instance of the event. That is, all the counts don't reset at the same
104
+ # time but rather the throttle period is per unique key value.
105
+ #
106
+ # Mike Pilone (@mikepilone)
107
+ #
108
+ class LogStash::Filters::Throttle < LogStash::Filters::Base
109
+
110
+ # The name to use in configuration files.
111
+ config_name "throttle"
112
+
113
+ # New plugins should start life at milestone 1.
114
+ milestone 1
115
+
116
+ # The key used to identify events. Events with the same key will be throttled
117
+ # as a group. Field substitutions are allowed, so you can combine multiple
118
+ # fields.
119
+ config :key, :validate => :string, :required => true
120
+
121
+ # Events less than this count will be throttled. Setting this value to -1, the
122
+ # default, will cause no messages to be throttled based on the lower bound.
123
+ config :before_count, :validate => :number, :default => -1, :required => false
124
+
125
+ # Events greater than this count will be throttled. Setting this value to -1, the
126
+ # default, will cause no messages to be throttled based on the upper bound.
127
+ config :after_count, :validate => :number, :default => -1, :required => false
128
+
129
+ # The period in seconds after the first occurrence of an event until the count is
130
+ # reset for the event. This period is tracked per unique key value. Field
131
+ # substitutions are allowed in this value. They will be evaluated when the _first_
132
+ # event for a given key is seen. This allows you to specify that certain kinds
133
+ # of events throttle for a specific period.
134
+ config :period, :validate => :string, :default => "3600", :required => false
135
+
136
+ # The maximum number of counters to store before the oldest counter is purged. Setting
137
+ # this value to -1 will prevent an upper bound no constraint on the number of counters
138
+ # and they will only be purged after expiration. This configuration value should only
139
+ # be used as a memory control mechanism and can cause early counter expiration if the
140
+ # value is reached. It is recommended to leave the default value and ensure that your
141
+ # key is selected such that it limits the number of counters required (i.e. don't
142
+ # use UUID as the key!)
143
+ config :max_counters, :validate => :number, :default => 100000, :required => false
144
+
145
+ # Performs initialization of the filter.
146
+ public
147
+ def register
148
+ @threadsafe = false
149
+
150
+ @event_counters = Hash.new
151
+ @next_expiration = nil
152
+ end # def register
153
+
154
+ # Filters the event. The filter is successful if the event should be throttled.
155
+ public
156
+ def filter(event)
157
+
158
+ # Return nothing unless there's an actual filter event
159
+ return unless filter?(event)
160
+
161
+ now = Time.now
162
+ key = event.sprintf(@key)
163
+
164
+ # Purge counters if too large to prevent OOM.
165
+ if @max_counters != -1 && @event_counters.size > @max_counters then
166
+ purgeOldestEventCounter()
167
+ end
168
+
169
+ # Expire existing counter if needed
170
+ if @next_expiration.nil? || now >= @next_expiration then
171
+ expireEventCounters(now)
172
+ end
173
+
174
+ @logger.debug? and @logger.debug(
175
+ "filters/#{self.class.name}: next expiration",
176
+ { "next_expiration" => @next_expiration })
177
+
178
+ # Create new counter for this event if this is the first occurrence
179
+ counter = nil
180
+ if !@event_counters.include?(key) then
181
+ period = event.sprintf(@period).to_i
182
+ period = 3600 if period == 0
183
+ expiration = now + period
184
+ @event_counters[key] = { :count => 0, :expiration => expiration }
185
+
186
+ @logger.debug? and @logger.debug("filters/#{self.class.name}: new event",
187
+ { :key => key, :expiration => expiration })
188
+ end
189
+
190
+ # Fetch the counter
191
+ counter = @event_counters[key]
192
+
193
+ # Count this event
194
+ counter[:count] = counter[:count] + 1;
195
+
196
+ @logger.debug? and @logger.debug("filters/#{self.class.name}: current count",
197
+ { :key => key, :count => counter[:count] })
198
+
199
+ # Throttle if count is < before count or > after count
200
+ if ((@before_count != -1 && counter[:count] < @before_count) ||
201
+ (@after_count != -1 && counter[:count] > @after_count)) then
202
+ @logger.debug? and @logger.debug(
203
+ "filters/#{self.class.name}: throttling event", { :key => key })
204
+
205
+ filter_matched(event)
206
+ end
207
+
208
+ end # def filter
209
+
210
+ # Expires any counts where the period has elapsed. Sets the next expiration time
211
+ # for when this method should be called again.
212
+ private
213
+ def expireEventCounters(now)
214
+
215
+ @next_expiration = nil
216
+
217
+ @event_counters.delete_if do |key, counter|
218
+ expiration = counter[:expiration]
219
+ expired = expiration <= now
220
+
221
+ if expired then
222
+ @logger.debug? and @logger.debug(
223
+ "filters/#{self.class.name}: deleting expired counter",
224
+ { :key => key })
225
+
226
+ elsif @next_expiration.nil? || (expiration < @next_expiration)
227
+ @next_expiration = expiration
228
+ end
229
+
230
+ expired
231
+ end
232
+
233
+ end # def expireEventCounters
234
+
235
+ # Purges the oldest event counter. This operation is for memory control only
236
+ # and can cause early period expiration and thrashing if invoked.
237
+ private
238
+ def purgeOldestEventCounter()
239
+
240
+ # Return unless we have something to purge
241
+ return unless @event_counters.size > 0
242
+
243
+ oldestCounter = nil
244
+ oldestKey = nil
245
+
246
+ @event_counters.each do |key, counter|
247
+ if oldestCounter.nil? || counter[:expiration] < oldestCounter[:expiration] then
248
+ oldestKey = key;
249
+ oldestCounter = counter;
250
+ end
251
+ end
252
+
253
+ @logger.warn? and @logger.warn(
254
+ "filters/#{self.class.name}: Purging oldest counter because max_counters " +
255
+ "exceeded. Use a better key to prevent too many unique event counters.",
256
+ { :key => oldestKey, :expiration => oldestCounter[:expiration] })
257
+
258
+ @event_counters.delete(oldestKey)
259
+
260
+ end
261
+ end # class LogStash::Filters::Throttle
@@ -0,0 +1,26 @@
1
+ Gem::Specification.new do |s|
2
+
3
+ s.name = 'logstash-filter-throttle'
4
+ s.version = '0.1.0'
5
+ s.licenses = ['Apache License (2.0)']
6
+ s.summary = "The throttle filter is for throttling the number of events received."
7
+ s.description = "The throttle filter is for throttling the number of events received."
8
+ s.authors = ["Elasticsearch"]
9
+ s.email = 'richard.pijnenburg@elasticsearch.com'
10
+ s.homepage = "http://logstash.net/"
11
+ s.require_paths = ["lib"]
12
+
13
+ # Files
14
+ s.files = `git ls-files`.split($\)
15
+
16
+ # Tests
17
+ s.test_files = s.files.grep(%r{^(test|spec|features)/})
18
+
19
+ # Special flag to let us know this is actually a logstash plugin
20
+ s.metadata = { "logstash_plugin" => "true", "group" => "filter" }
21
+
22
+ # Gem dependencies
23
+ s.add_runtime_dependency 'logstash', '>= 1.4.0', '< 2.0.0'
24
+
25
+ end
26
+
@@ -0,0 +1,9 @@
1
+ require "gem_publisher"
2
+
3
+ desc "Publish gem to RubyGems.org"
4
+ task :publish_gem do |t|
5
+ gem_file = Dir.glob(File.expand_path('../*.gemspec',File.dirname(__FILE__))).first
6
+ gem = GemPublisher.publish_if_updated(gem_file, :rubygems)
7
+ puts "Published #{gem}" if gem
8
+ end
9
+
@@ -0,0 +1,169 @@
1
+ require "net/http"
2
+ require "uri"
3
+ require "digest/sha1"
4
+
5
+ def vendor(*args)
6
+ return File.join("vendor", *args)
7
+ end
8
+
9
+ directory "vendor/" => ["vendor"] do |task, args|
10
+ mkdir task.name
11
+ end
12
+
13
+ def fetch(url, sha1, output)
14
+
15
+ puts "Downloading #{url}"
16
+ actual_sha1 = download(url, output)
17
+
18
+ if actual_sha1 != sha1
19
+ fail "SHA1 does not match (expected '#{sha1}' but got '#{actual_sha1}')"
20
+ end
21
+ end # def fetch
22
+
23
+ def file_fetch(url, sha1)
24
+ filename = File.basename( URI(url).path )
25
+ output = "vendor/#{filename}"
26
+ task output => [ "vendor/" ] do
27
+ begin
28
+ actual_sha1 = file_sha1(output)
29
+ if actual_sha1 != sha1
30
+ fetch(url, sha1, output)
31
+ end
32
+ rescue Errno::ENOENT
33
+ fetch(url, sha1, output)
34
+ end
35
+ end.invoke
36
+
37
+ return output
38
+ end
39
+
40
+ def file_sha1(path)
41
+ digest = Digest::SHA1.new
42
+ fd = File.new(path, "r")
43
+ while true
44
+ begin
45
+ digest << fd.sysread(16384)
46
+ rescue EOFError
47
+ break
48
+ end
49
+ end
50
+ return digest.hexdigest
51
+ ensure
52
+ fd.close if fd
53
+ end
54
+
55
+ def download(url, output)
56
+ uri = URI(url)
57
+ digest = Digest::SHA1.new
58
+ tmp = "#{output}.tmp"
59
+ Net::HTTP.start(uri.host, uri.port, :use_ssl => (uri.scheme == "https")) do |http|
60
+ request = Net::HTTP::Get.new(uri.path)
61
+ http.request(request) do |response|
62
+ fail "HTTP fetch failed for #{url}. #{response}" if [200, 301].include?(response.code)
63
+ size = (response["content-length"].to_i || -1).to_f
64
+ count = 0
65
+ File.open(tmp, "w") do |fd|
66
+ response.read_body do |chunk|
67
+ fd.write(chunk)
68
+ digest << chunk
69
+ if size > 0 && $stdout.tty?
70
+ count += chunk.bytesize
71
+ $stdout.write(sprintf("\r%0.2f%%", count/size * 100))
72
+ end
73
+ end
74
+ end
75
+ $stdout.write("\r \r") if $stdout.tty?
76
+ end
77
+ end
78
+
79
+ File.rename(tmp, output)
80
+
81
+ return digest.hexdigest
82
+ rescue SocketError => e
83
+ puts "Failure while downloading #{url}: #{e}"
84
+ raise
85
+ ensure
86
+ File.unlink(tmp) if File.exist?(tmp)
87
+ end # def download
88
+
89
+ def untar(tarball, &block)
90
+ require "archive/tar/minitar"
91
+ tgz = Zlib::GzipReader.new(File.open(tarball))
92
+ # Pull out typesdb
93
+ tar = Archive::Tar::Minitar::Input.open(tgz)
94
+ tar.each do |entry|
95
+ path = block.call(entry)
96
+ next if path.nil?
97
+ parent = File.dirname(path)
98
+
99
+ mkdir_p parent unless File.directory?(parent)
100
+
101
+ # Skip this file if the output file is the same size
102
+ if entry.directory?
103
+ mkdir path unless File.directory?(path)
104
+ else
105
+ entry_mode = entry.instance_eval { @mode } & 0777
106
+ if File.exists?(path)
107
+ stat = File.stat(path)
108
+ # TODO(sissel): Submit a patch to archive-tar-minitar upstream to
109
+ # expose headers in the entry.
110
+ entry_size = entry.instance_eval { @size }
111
+ # If file sizes are same, skip writing.
112
+ next if stat.size == entry_size && (stat.mode & 0777) == entry_mode
113
+ end
114
+ puts "Extracting #{entry.full_name} from #{tarball} #{entry_mode.to_s(8)}"
115
+ File.open(path, "w") do |fd|
116
+ # eof? check lets us skip empty files. Necessary because the API provided by
117
+ # Archive::Tar::Minitar::Reader::EntryStream only mostly acts like an
118
+ # IO object. Something about empty files in this EntryStream causes
119
+ # IO.copy_stream to throw "can't convert nil into String" on JRuby
120
+ # TODO(sissel): File a bug about this.
121
+ while !entry.eof?
122
+ chunk = entry.read(16384)
123
+ fd.write(chunk)
124
+ end
125
+ #IO.copy_stream(entry, fd)
126
+ end
127
+ File.chmod(entry_mode, path)
128
+ end
129
+ end
130
+ tar.close
131
+ File.unlink(tarball) if File.file?(tarball)
132
+ end # def untar
133
+
134
+ def ungz(file)
135
+
136
+ outpath = file.gsub('.gz', '')
137
+ tgz = Zlib::GzipReader.new(File.open(file))
138
+ begin
139
+ File.open(outpath, "w") do |out|
140
+ IO::copy_stream(tgz, out)
141
+ end
142
+ File.unlink(file)
143
+ rescue
144
+ File.unlink(outpath) if File.file?(outpath)
145
+ raise
146
+ end
147
+ tgz.close
148
+ end
149
+
150
+ desc "Process any vendor files required for this plugin"
151
+ task "vendor" do |task, args|
152
+
153
+ @files.each do |file|
154
+ download = file_fetch(file['url'], file['sha1'])
155
+ if download =~ /.tar.gz/
156
+ prefix = download.gsub('.tar.gz', '').gsub('vendor/', '')
157
+ untar(download) do |entry|
158
+ if !file['files'].nil?
159
+ next unless file['files'].include?(entry.full_name.gsub(prefix, ''))
160
+ out = entry.full_name.split("/").last
161
+ end
162
+ File.join('vendor', out)
163
+ end
164
+ elsif download =~ /.gz/
165
+ ungz(download)
166
+ end
167
+ end
168
+
169
+ end
@@ -0,0 +1,196 @@
1
+ require "spec_helper"
2
+ require "logstash/filters/throttle"
3
+
4
+ describe LogStash::Filters::Throttle do
5
+
6
+ describe "no before_count" do
7
+ config <<-CONFIG
8
+ filter {
9
+ throttle {
10
+ period => 60
11
+ after_count => 2
12
+ key => "%{host}"
13
+ add_tag => [ "throttled" ]
14
+ }
15
+ }
16
+ CONFIG
17
+
18
+ event = {
19
+ "host" => "server1"
20
+ }
21
+
22
+ sample event do
23
+ insist { subject["tags"] } == nil
24
+ end
25
+ end
26
+
27
+ describe "before_count throttled" do
28
+ config <<-CONFIG
29
+ filter {
30
+ throttle {
31
+ period => 60
32
+ before_count => 2
33
+ after_count => 3
34
+ key => "%{host}"
35
+ add_tag => [ "throttled" ]
36
+ }
37
+ }
38
+ CONFIG
39
+
40
+ event = {
41
+ "host" => "server1"
42
+ }
43
+
44
+ sample event do
45
+ insist { subject["tags"] } == [ "throttled" ]
46
+ end
47
+ end
48
+
49
+ describe "before_count exceeded" do
50
+ config <<-CONFIG
51
+ filter {
52
+ throttle {
53
+ period => 60
54
+ before_count => 2
55
+ after_count => 3
56
+ key => "%{host}"
57
+ add_tag => [ "throttled" ]
58
+ }
59
+ }
60
+ CONFIG
61
+
62
+ events = [{
63
+ "host" => "server1"
64
+ }, {
65
+ "host" => "server1"
66
+ }]
67
+
68
+ sample events do
69
+ insist { subject[0]["tags"] } == [ "throttled" ]
70
+ insist { subject[1]["tags"] } == nil
71
+ end
72
+ end
73
+
74
+ describe "after_count exceeded" do
75
+ config <<-CONFIG
76
+ filter {
77
+ throttle {
78
+ period => 60
79
+ before_count => 2
80
+ after_count => 3
81
+ key => "%{host}"
82
+ add_tag => [ "throttled" ]
83
+ }
84
+ }
85
+ CONFIG
86
+
87
+ events = [{
88
+ "host" => "server1"
89
+ }, {
90
+ "host" => "server1"
91
+ }, {
92
+ "host" => "server1"
93
+ }, {
94
+ "host" => "server1"
95
+ }]
96
+
97
+ sample events do
98
+ insist { subject[0]["tags"] } == [ "throttled" ]
99
+ insist { subject[1]["tags"] } == nil
100
+ insist { subject[2]["tags"] } == nil
101
+ insist { subject[3]["tags"] } == [ "throttled" ]
102
+ end
103
+ end
104
+
105
+ describe "different keys" do
106
+ config <<-CONFIG
107
+ filter {
108
+ throttle {
109
+ period => 60
110
+ after_count => 2
111
+ key => "%{host}"
112
+ add_tag => [ "throttled" ]
113
+ }
114
+ }
115
+ CONFIG
116
+
117
+ events = [{
118
+ "host" => "server1"
119
+ }, {
120
+ "host" => "server2"
121
+ }, {
122
+ "host" => "server3"
123
+ }, {
124
+ "host" => "server4"
125
+ }]
126
+
127
+ sample events do
128
+ subject.each { | s |
129
+ insist { s["tags"] } == nil
130
+ }
131
+ end
132
+ end
133
+
134
+ describe "composite key" do
135
+ config <<-CONFIG
136
+ filter {
137
+ throttle {
138
+ period => 60
139
+ after_count => 1
140
+ key => "%{host}%{message}"
141
+ add_tag => [ "throttled" ]
142
+ }
143
+ }
144
+ CONFIG
145
+
146
+ events = [{
147
+ "host" => "server1",
148
+ "message" => "foo"
149
+ }, {
150
+ "host" => "server1",
151
+ "message" => "bar"
152
+ }, {
153
+ "host" => "server2",
154
+ "message" => "foo"
155
+ }, {
156
+ "host" => "server2",
157
+ "message" => "bar"
158
+ }]
159
+
160
+ sample events do
161
+ subject.each { | s |
162
+ insist { s["tags"] } == nil
163
+ }
164
+ end
165
+ end
166
+
167
+ describe "max_counter exceeded" do
168
+ config <<-CONFIG
169
+ filter {
170
+ throttle {
171
+ period => 60
172
+ after_count => 1
173
+ max_counters => 2
174
+ key => "%{message}"
175
+ add_tag => [ "throttled" ]
176
+ }
177
+ }
178
+ CONFIG
179
+
180
+ events = [{
181
+ "message" => "foo"
182
+ }, {
183
+ "message" => "bar"
184
+ }, {
185
+ "message" => "poo"
186
+ }, {
187
+ "message" => "foo"
188
+ }]
189
+
190
+ sample events do
191
+ insist { subject[3]["tags"] } == nil
192
+ end
193
+ end
194
+
195
+ end # LogStash::Filters::Throttle
196
+
metadata ADDED
@@ -0,0 +1,74 @@
1
+ --- !ruby/object:Gem::Specification
2
+ name: logstash-filter-throttle
3
+ version: !ruby/object:Gem::Version
4
+ version: 0.1.0
5
+ platform: ruby
6
+ authors:
7
+ - Elasticsearch
8
+ autorequire:
9
+ bindir: bin
10
+ cert_chain: []
11
+ date: 2014-11-02 00:00:00.000000000 Z
12
+ dependencies:
13
+ - !ruby/object:Gem::Dependency
14
+ name: logstash
15
+ requirement: !ruby/object:Gem::Requirement
16
+ requirements:
17
+ - - ! '>='
18
+ - !ruby/object:Gem::Version
19
+ version: 1.4.0
20
+ - - <
21
+ - !ruby/object:Gem::Version
22
+ version: 2.0.0
23
+ type: :runtime
24
+ prerelease: false
25
+ version_requirements: !ruby/object:Gem::Requirement
26
+ requirements:
27
+ - - ! '>='
28
+ - !ruby/object:Gem::Version
29
+ version: 1.4.0
30
+ - - <
31
+ - !ruby/object:Gem::Version
32
+ version: 2.0.0
33
+ description: The throttle filter is for throttling the number of events received.
34
+ email: richard.pijnenburg@elasticsearch.com
35
+ executables: []
36
+ extensions: []
37
+ extra_rdoc_files: []
38
+ files:
39
+ - .gitignore
40
+ - Gemfile
41
+ - Rakefile
42
+ - lib/logstash/filters/throttle.rb
43
+ - logstash-filter-throttle.gemspec
44
+ - rakelib/publish.rake
45
+ - rakelib/vendor.rake
46
+ - spec/filters/throttle_spec.rb
47
+ homepage: http://logstash.net/
48
+ licenses:
49
+ - Apache License (2.0)
50
+ metadata:
51
+ logstash_plugin: 'true'
52
+ group: filter
53
+ post_install_message:
54
+ rdoc_options: []
55
+ require_paths:
56
+ - lib
57
+ required_ruby_version: !ruby/object:Gem::Requirement
58
+ requirements:
59
+ - - ! '>='
60
+ - !ruby/object:Gem::Version
61
+ version: '0'
62
+ required_rubygems_version: !ruby/object:Gem::Requirement
63
+ requirements:
64
+ - - ! '>='
65
+ - !ruby/object:Gem::Version
66
+ version: '0'
67
+ requirements: []
68
+ rubyforge_project:
69
+ rubygems_version: 2.4.1
70
+ signing_key:
71
+ specification_version: 4
72
+ summary: The throttle filter is for throttling the number of events received.
73
+ test_files:
74
+ - spec/filters/throttle_spec.rb