fluent-plugin-netflow-enchanced 1.0.0.rc1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/.gitignore +24 -0
- data/.travis.yml +18 -0
- data/Gemfile +3 -0
- data/README.md +180 -0
- data/Rakefile +14 -0
- data/VERSION +1 -0
- data/example/fluentd.conf +9 -0
- data/fluent-plugin-netflow.gemspec +24 -0
- data/lib/fluent/plugin/in_netflow.rb +80 -0
- data/lib/fluent/plugin/netflow_fields.yaml +302 -0
- data/lib/fluent/plugin/netflow_records.rb +160 -0
- data/lib/fluent/plugin/parser_netflow.rb +403 -0
- data/lib/fluent/plugin/vash.rb +75 -0
- data/test/dump/netflow.v5.dump +0 -0
- data/test/dump/netflow.v9.dump +0 -0
- data/test/dump/netflow.v9.flowStartMilliseconds.dump +0 -0
- data/test/dump/netflow.v9.mpls-data.dump +0 -0
- data/test/dump/netflow.v9.mpls-template.dump +0 -0
- data/test/dump/netflow.v9.sampler.dump +0 -0
- data/test/dump/netflow.v9.sampler_template.dump +0 -0
- data/test/dump/netflow.v9.template.as2.dump +0 -0
- data/test/dump/netflow.v9.template.dump +0 -0
- data/test/dump/netflow.v9.template.flowStartMilliseconds.dump +0 -0
- data/test/helper.rb +26 -0
- data/test/test_in_netflow.rb +34 -0
- data/test/test_parser_netflow.rb +380 -0
- data/test/test_parser_netflow9.rb +223 -0
- metadata +132 -0
@@ -0,0 +1,75 @@
|
|
1
|
+
module Fluent
|
2
|
+
module Plugin
|
3
|
+
class NetflowParser < Parser
|
4
|
+
# https://gist.github.com/joshaven/184837
|
5
|
+
class Vash < Hash
|
6
|
+
def initialize(constructor = {})
|
7
|
+
@register ||= {}
|
8
|
+
if constructor.is_a?(Hash)
|
9
|
+
super()
|
10
|
+
merge(constructor)
|
11
|
+
else
|
12
|
+
super(constructor)
|
13
|
+
end
|
14
|
+
end
|
15
|
+
|
16
|
+
alias_method :regular_writer, :[]= unless method_defined?(:regular_writer)
|
17
|
+
alias_method :regular_reader, :[] unless method_defined?(:regular_reader)
|
18
|
+
|
19
|
+
def [](key)
|
20
|
+
sterilize(key)
|
21
|
+
clear(key) if expired?(key)
|
22
|
+
regular_reader(key)
|
23
|
+
end
|
24
|
+
|
25
|
+
def []=(key, *args)
|
26
|
+
if args.length == 2
|
27
|
+
value, ttl = args[1], args[0]
|
28
|
+
elsif args.length == 1
|
29
|
+
value, ttl = args[0], 60
|
30
|
+
else
|
31
|
+
raise ArgumentError, "Wrong number of arguments, expected 2 or 3, received: #{args.length+1}\n"+
|
32
|
+
"Example Usage: volatile_hash[:key]=value OR volatile_hash[:key, ttl]=value"
|
33
|
+
end
|
34
|
+
sterilize(key)
|
35
|
+
ttl(key, ttl)
|
36
|
+
regular_writer(key, value)
|
37
|
+
end
|
38
|
+
|
39
|
+
def merge(hsh)
|
40
|
+
hsh.map {|key,value| self[sterile(key)] = hsh[key]}
|
41
|
+
self
|
42
|
+
end
|
43
|
+
|
44
|
+
def cleanup!
|
45
|
+
now = Time.now.to_i
|
46
|
+
@register.map {|k,v| clear(k) if v < now}
|
47
|
+
end
|
48
|
+
|
49
|
+
def clear(key)
|
50
|
+
sterilize(key)
|
51
|
+
@register.delete key
|
52
|
+
self.delete key
|
53
|
+
end
|
54
|
+
|
55
|
+
private
|
56
|
+
|
57
|
+
def expired?(key)
|
58
|
+
Time.now.to_i > @register[key].to_i
|
59
|
+
end
|
60
|
+
|
61
|
+
def ttl(key, secs=60)
|
62
|
+
@register[key] = Time.now.to_i + secs.to_i
|
63
|
+
end
|
64
|
+
|
65
|
+
def sterile(key)
|
66
|
+
String === key ? key.chomp('!').chomp('=') : key.to_s.chomp('!').chomp('=').to_sym
|
67
|
+
end
|
68
|
+
|
69
|
+
def sterilize(key)
|
70
|
+
key = sterile(key)
|
71
|
+
end
|
72
|
+
end
|
73
|
+
end
|
74
|
+
end
|
75
|
+
end
|
Binary file
|
Binary file
|
Binary file
|
Binary file
|
Binary file
|
Binary file
|
Binary file
|
Binary file
|
Binary file
|
Binary file
|
data/test/helper.rb
ADDED
@@ -0,0 +1,26 @@
|
|
1
|
+
require 'rubygems'
|
2
|
+
require 'bundler'
|
3
|
+
begin
|
4
|
+
Bundler.setup(:default, :development)
|
5
|
+
rescue Bundler::BundlerError => e
|
6
|
+
$stderr.puts e.message
|
7
|
+
$stderr.puts "Run `bundle install` to install missing gems"
|
8
|
+
exit e.status_code
|
9
|
+
end
|
10
|
+
require 'test/unit'
|
11
|
+
|
12
|
+
$LOAD_PATH.unshift(File.join(File.dirname(__FILE__), '..', 'lib'))
|
13
|
+
$LOAD_PATH.unshift(File.dirname(__FILE__))
|
14
|
+
require 'fluent/test'
|
15
|
+
|
16
|
+
# $log = Fluent::Log.new(Fluent::Test::DummyLogDevice.new, Fluent::Log::LEVEL_INFO)
|
17
|
+
|
18
|
+
require 'fluent/plugin/parser_netflow'
|
19
|
+
require 'fluent/plugin/in_netflow'
|
20
|
+
|
21
|
+
def unused_port
|
22
|
+
s = TCPServer.open(0)
|
23
|
+
port = s.addr[1]
|
24
|
+
s.close
|
25
|
+
port
|
26
|
+
end
|
@@ -0,0 +1,34 @@
|
|
1
|
+
require 'helper'
|
2
|
+
require 'fluent/test/driver/input'
|
3
|
+
|
4
|
+
class NetflowInputTest < Test::Unit::TestCase
|
5
|
+
def setup
|
6
|
+
Fluent::Test.setup
|
7
|
+
end
|
8
|
+
|
9
|
+
PORT = unused_port
|
10
|
+
CONFIG = %[
|
11
|
+
port #{PORT}
|
12
|
+
bind 127.0.0.1
|
13
|
+
tag test.netflow
|
14
|
+
]
|
15
|
+
|
16
|
+
def create_driver(conf=CONFIG)
|
17
|
+
Fluent::Test::Driver::Input.new(Fluent::Plugin::NetflowInput).configure(conf)
|
18
|
+
end
|
19
|
+
|
20
|
+
def test_configure
|
21
|
+
d = create_driver
|
22
|
+
assert_equal PORT, d.instance.port
|
23
|
+
assert_equal '127.0.0.1', d.instance.bind
|
24
|
+
assert_equal 'test.netflow', d.instance.tag
|
25
|
+
assert_equal :udp, d.instance.protocol_type
|
26
|
+
assert_equal 2048, d.instance.max_bytes
|
27
|
+
|
28
|
+
assert_raise Fluent::ConfigError do
|
29
|
+
d = create_driver CONFIG + %[
|
30
|
+
protocol_type tcp
|
31
|
+
]
|
32
|
+
end
|
33
|
+
end
|
34
|
+
end
|
@@ -0,0 +1,380 @@
|
|
1
|
+
require 'helper'
|
2
|
+
require 'fluent/test/driver/parser'
|
3
|
+
|
4
|
+
class NetflowParserTest < Test::Unit::TestCase
|
5
|
+
def setup
|
6
|
+
Fluent::Test.setup
|
7
|
+
end
|
8
|
+
|
9
|
+
def create_parser(conf={})
|
10
|
+
parser = Fluent::Plugin::NetflowParser.new
|
11
|
+
parser.configure(Fluent::Config::Element.new('ROOT', '', conf, []))
|
12
|
+
parser
|
13
|
+
end
|
14
|
+
|
15
|
+
test 'configure' do
|
16
|
+
assert_nothing_raised do
|
17
|
+
parser = create_parser
|
18
|
+
end
|
19
|
+
end
|
20
|
+
|
21
|
+
test 'parse v5 binary data, dumped by netflow-generator' do
|
22
|
+
# generated by https://github.com/mshindo/NetFlow-Generator
|
23
|
+
parser = create_parser
|
24
|
+
raw_data = File.binread(File.join(__dir__, "dump/netflow.v5.dump"))
|
25
|
+
bytes_for_1record = 72
|
26
|
+
assert_equal bytes_for_1record, raw_data.size
|
27
|
+
parsed = []
|
28
|
+
parser.call(raw_data) do |time, data|
|
29
|
+
parsed << [time, data]
|
30
|
+
end
|
31
|
+
assert_equal 1, parsed.size
|
32
|
+
assert_equal Time.parse('2016-02-29 11:14:00 -0800').to_i, parsed.first[0]
|
33
|
+
expected_record = {
|
34
|
+
# header
|
35
|
+
"version" => 5,
|
36
|
+
"uptime" => 1785097000,
|
37
|
+
"flow_records" => 1,
|
38
|
+
"flow_seq_num" => 1,
|
39
|
+
"engine_type" => 1,
|
40
|
+
"engine_id" => 1,
|
41
|
+
"sampling_algorithm" => 0,
|
42
|
+
"sampling_interval" => 0,
|
43
|
+
|
44
|
+
# record
|
45
|
+
"ipv4_src_addr" => "10.0.0.11",
|
46
|
+
"ipv4_dst_addr" => "20.0.0.187",
|
47
|
+
"ipv4_next_hop" => "30.0.0.254",
|
48
|
+
"input_snmp" => 1,
|
49
|
+
"output_snmp" => 2,
|
50
|
+
"in_pkts" => 173,
|
51
|
+
"in_bytes" => 4581,
|
52
|
+
"first_switched" => "2016-02-29T19:13:59.215Z",
|
53
|
+
"last_switched" => "2016-02-29T19:14:00.090Z",
|
54
|
+
"l4_src_port" => 1001,
|
55
|
+
"l4_dst_port" => 3001,
|
56
|
+
"tcp_flags" => 27,
|
57
|
+
"protocol" => 6,
|
58
|
+
"src_tos" => 0,
|
59
|
+
"src_as" => 101,
|
60
|
+
"dst_as" => 201,
|
61
|
+
"src_mask" => 24,
|
62
|
+
"dst_mask" => 24,
|
63
|
+
}
|
64
|
+
assert_equal expected_record, parsed.first[1]
|
65
|
+
end
|
66
|
+
|
67
|
+
DEFAULT_UPTIME = 1048383625 # == (((12 * 24 + 3) * 60 + 13) * 60 + 3) * 1000 + 625
|
68
|
+
# 12days 3hours 13minutes 3seconds 625 milliseconds
|
69
|
+
|
70
|
+
DEFAULT_TIME = Time.parse('2016-02-29 11:14:00 -0800').to_i
|
71
|
+
DEFAULT_NSEC = rand(1_000_000_000)
|
72
|
+
|
73
|
+
def msec_from_boot_to_time_by_rational(msec, uptime: DEFAULT_UPTIME, sec: DEFAULT_TIME, nsec: DEFAULT_NSEC)
|
74
|
+
current_time = Rational(sec) + Rational(nsec, 1_000_000_000)
|
75
|
+
diff_msec = uptime - msec
|
76
|
+
target_time = current_time - Rational(diff_msec, 1_000)
|
77
|
+
Time.at(target_time)
|
78
|
+
end
|
79
|
+
|
80
|
+
def msec_from_boot_to_time(msec, uptime: DEFAULT_UPTIME, sec: DEFAULT_TIME, nsec: DEFAULT_NSEC)
|
81
|
+
millis = uptime - msec
|
82
|
+
seconds = sec - (millis / 1000)
|
83
|
+
micros = (nsec / 1000) - ((millis % 1000) * 1000)
|
84
|
+
if micros < 0
|
85
|
+
seconds -= 1
|
86
|
+
micros += 1000000
|
87
|
+
end
|
88
|
+
Time.at(seconds, micros)
|
89
|
+
end
|
90
|
+
|
91
|
+
def format_for_switched(time)
|
92
|
+
time.utc.strftime("%Y-%m-%dT%H:%M:%S.%3NZ")
|
93
|
+
end
|
94
|
+
|
95
|
+
test 'converting msec from boottime to time works correctly' do
|
96
|
+
assert_equal msec_from_boot_to_time(300).to_i, msec_from_boot_to_time_by_rational(300).to_i
|
97
|
+
assert_equal msec_from_boot_to_time(300).usec, msec_from_boot_to_time_by_rational(300).usec
|
98
|
+
end
|
99
|
+
|
100
|
+
test 'check performance degradation about stringifying *_switched times' do
|
101
|
+
parser = create_parser({"switched_times_from_uptime" => true})
|
102
|
+
data = v5_data(
|
103
|
+
version: 5,
|
104
|
+
flow_records: 50,
|
105
|
+
uptime: DEFAULT_UPTIME,
|
106
|
+
unix_sec: DEFAULT_TIME,
|
107
|
+
unix_nsec: DEFAULT_NSEC,
|
108
|
+
flow_seq_num: 1,
|
109
|
+
engine_type: 1,
|
110
|
+
engine_id: 1,
|
111
|
+
sampling_algorithm: 0,
|
112
|
+
sampling_interval: 0,
|
113
|
+
records: [
|
114
|
+
v5_record(), v5_record(), v5_record(), v5_record(), v5_record(),
|
115
|
+
v5_record(), v5_record(), v5_record(), v5_record(), v5_record(),
|
116
|
+
v5_record(), v5_record(), v5_record(), v5_record(), v5_record(),
|
117
|
+
v5_record(), v5_record(), v5_record(), v5_record(), v5_record(),
|
118
|
+
v5_record(), v5_record(), v5_record(), v5_record(), v5_record(),
|
119
|
+
v5_record(), v5_record(), v5_record(), v5_record(), v5_record(),
|
120
|
+
v5_record(), v5_record(), v5_record(), v5_record(), v5_record(),
|
121
|
+
v5_record(), v5_record(), v5_record(), v5_record(), v5_record(),
|
122
|
+
v5_record(), v5_record(), v5_record(), v5_record(), v5_record(),
|
123
|
+
v5_record(), v5_record(), v5_record(), v5_record(), v5_record(),
|
124
|
+
]
|
125
|
+
)
|
126
|
+
|
127
|
+
bench_data = data.to_binary_s # 50 records
|
128
|
+
|
129
|
+
# configure to leave uptime-based value as-is
|
130
|
+
count = 0
|
131
|
+
GC.start
|
132
|
+
t1 = Time.now
|
133
|
+
1000.times do
|
134
|
+
parser.call(bench_data) do |time, record|
|
135
|
+
# do nothing
|
136
|
+
count += 1
|
137
|
+
end
|
138
|
+
end
|
139
|
+
t2 = Time.now
|
140
|
+
uptime_based_switched = t2 - t1
|
141
|
+
|
142
|
+
assert{ count == 50000 }
|
143
|
+
|
144
|
+
# make time conversion to use Rational
|
145
|
+
count = 0
|
146
|
+
GC.start
|
147
|
+
t3 = Time.now
|
148
|
+
1000.times do
|
149
|
+
parser.call(bench_data) do |time, record|
|
150
|
+
record["first_switched"] = format_for_switched(msec_from_boot_to_time_by_rational(record["first_switched"]))
|
151
|
+
record["last_switched"] = format_for_switched(msec_from_boot_to_time_by_rational(record["last_switched"]))
|
152
|
+
count += 1
|
153
|
+
end
|
154
|
+
end
|
155
|
+
t4 = Time.now
|
156
|
+
using_rational = t4 - t3
|
157
|
+
|
158
|
+
assert{ count == 50000 }
|
159
|
+
|
160
|
+
# skip time formatting
|
161
|
+
count = 0
|
162
|
+
GC.start
|
163
|
+
t5 = Time.now
|
164
|
+
1000.times do
|
165
|
+
parser.call(bench_data) do |time, record|
|
166
|
+
record["first_switched"] = msec_from_boot_to_time(record["first_switched"])
|
167
|
+
record["last_switched"] = msec_from_boot_to_time(record["last_switched"])
|
168
|
+
count += 1
|
169
|
+
end
|
170
|
+
end
|
171
|
+
t6 = Time.now
|
172
|
+
skip_time_formatting = t6 - t5
|
173
|
+
|
174
|
+
assert{ count == 50000 }
|
175
|
+
|
176
|
+
# with full time conversion (default)
|
177
|
+
parser = create_parser
|
178
|
+
count = 0
|
179
|
+
GC.start
|
180
|
+
t7 = Time.now
|
181
|
+
1000.times do
|
182
|
+
parser.call(bench_data) do |time, record|
|
183
|
+
count += 1
|
184
|
+
end
|
185
|
+
end
|
186
|
+
t8 = Time.now
|
187
|
+
default_formatting = t8 - t7
|
188
|
+
|
189
|
+
assert{ count == 50000 }
|
190
|
+
|
191
|
+
assert{ using_rational > default_formatting }
|
192
|
+
assert{ default_formatting > skip_time_formatting }
|
193
|
+
assert{ skip_time_formatting > uptime_based_switched }
|
194
|
+
end
|
195
|
+
|
196
|
+
test 'parse v5 binary data contains 1 record, generated from definition' do
|
197
|
+
parser = create_parser
|
198
|
+
parsed = []
|
199
|
+
|
200
|
+
time1 = DEFAULT_TIME
|
201
|
+
data1 = v5_data(
|
202
|
+
version: 5,
|
203
|
+
flow_records: 1,
|
204
|
+
uptime: DEFAULT_UPTIME,
|
205
|
+
unix_sec: DEFAULT_TIME,
|
206
|
+
unix_nsec: DEFAULT_NSEC,
|
207
|
+
flow_seq_num: 1,
|
208
|
+
engine_type: 1,
|
209
|
+
engine_id: 1,
|
210
|
+
sampling_algorithm: 0,
|
211
|
+
sampling_interval: 0,
|
212
|
+
records: [
|
213
|
+
v5_record,
|
214
|
+
]
|
215
|
+
)
|
216
|
+
|
217
|
+
parser.call(data1.to_binary_s) do |time, record|
|
218
|
+
parsed << [time, record]
|
219
|
+
end
|
220
|
+
|
221
|
+
assert_equal 1, parsed.size
|
222
|
+
assert_equal time1, parsed.first[0]
|
223
|
+
|
224
|
+
event = parsed.first[1]
|
225
|
+
|
226
|
+
assert_equal 5, event["version"]
|
227
|
+
assert_equal 1, event["flow_records"]
|
228
|
+
assert_equal 1, event["flow_seq_num"]
|
229
|
+
assert_equal 1, event["engine_type"]
|
230
|
+
assert_equal 1, event["engine_id"]
|
231
|
+
assert_equal 0, event["sampling_algorithm"]
|
232
|
+
assert_equal 0, event["sampling_interval"]
|
233
|
+
|
234
|
+
assert_equal "10.0.1.122", event["ipv4_src_addr"]
|
235
|
+
assert_equal "192.168.0.3", event["ipv4_dst_addr"]
|
236
|
+
assert_equal "10.0.0.3", event["ipv4_next_hop"]
|
237
|
+
assert_equal 1, event["input_snmp"]
|
238
|
+
assert_equal 2, event["output_snmp"]
|
239
|
+
assert_equal 156, event["in_pkts"]
|
240
|
+
assert_equal 1024, event["in_bytes"]
|
241
|
+
assert_equal format_for_switched(msec_from_boot_to_time(DEFAULT_UPTIME - 13000)), event["first_switched"]
|
242
|
+
assert_equal format_for_switched(msec_from_boot_to_time(DEFAULT_UPTIME - 12950)), event["last_switched"]
|
243
|
+
assert_equal 1048, event["l4_src_port"]
|
244
|
+
assert_equal 80, event["l4_dst_port"]
|
245
|
+
assert_equal 27, event["tcp_flags"]
|
246
|
+
assert_equal 6, event["protocol"]
|
247
|
+
assert_equal 0, event["src_tos"]
|
248
|
+
assert_equal 101, event["src_as"]
|
249
|
+
assert_equal 201, event["dst_as"]
|
250
|
+
assert_equal 24, event["src_mask"]
|
251
|
+
assert_equal 24, event["dst_mask"]
|
252
|
+
end
|
253
|
+
|
254
|
+
test 'parse v5 binary data contains 1 record, generated from definition, leaving switched times as using uptime' do
|
255
|
+
parser = create_parser({"switched_times_from_uptime" => true})
|
256
|
+
parsed = []
|
257
|
+
|
258
|
+
time1 = DEFAULT_TIME
|
259
|
+
data1 = v5_data(
|
260
|
+
version: 5,
|
261
|
+
flow_records: 1,
|
262
|
+
uptime: DEFAULT_UPTIME,
|
263
|
+
unix_sec: DEFAULT_TIME,
|
264
|
+
unix_nsec: DEFAULT_NSEC,
|
265
|
+
flow_seq_num: 1,
|
266
|
+
engine_type: 1,
|
267
|
+
engine_id: 1,
|
268
|
+
sampling_algorithm: 0,
|
269
|
+
sampling_interval: 0,
|
270
|
+
records: [
|
271
|
+
v5_record,
|
272
|
+
]
|
273
|
+
)
|
274
|
+
|
275
|
+
parser.call(data1.to_binary_s) do |time, record|
|
276
|
+
parsed << [time, record]
|
277
|
+
end
|
278
|
+
|
279
|
+
assert_equal 1, parsed.size
|
280
|
+
assert_equal time1, parsed.first[0]
|
281
|
+
|
282
|
+
event = parsed.first[1]
|
283
|
+
|
284
|
+
assert_equal 5, event["version"]
|
285
|
+
assert_equal 1, event["flow_records"]
|
286
|
+
assert_equal 1, event["flow_seq_num"]
|
287
|
+
assert_equal 1, event["engine_type"]
|
288
|
+
assert_equal 1, event["engine_id"]
|
289
|
+
assert_equal 0, event["sampling_algorithm"]
|
290
|
+
assert_equal 0, event["sampling_interval"]
|
291
|
+
|
292
|
+
assert_equal "10.0.1.122", event["ipv4_src_addr"]
|
293
|
+
assert_equal "192.168.0.3", event["ipv4_dst_addr"]
|
294
|
+
assert_equal "10.0.0.3", event["ipv4_next_hop"]
|
295
|
+
assert_equal 1, event["input_snmp"]
|
296
|
+
assert_equal 2, event["output_snmp"]
|
297
|
+
assert_equal 156, event["in_pkts"]
|
298
|
+
assert_equal 1024, event["in_bytes"]
|
299
|
+
assert_equal (DEFAULT_UPTIME - 13000), event["first_switched"]
|
300
|
+
assert_equal (DEFAULT_UPTIME - 12950), event["last_switched"]
|
301
|
+
assert_equal 1048, event["l4_src_port"]
|
302
|
+
assert_equal 80, event["l4_dst_port"]
|
303
|
+
assert_equal 27, event["tcp_flags"]
|
304
|
+
assert_equal 6, event["protocol"]
|
305
|
+
assert_equal 0, event["src_tos"]
|
306
|
+
assert_equal 101, event["src_as"]
|
307
|
+
assert_equal 201, event["dst_as"]
|
308
|
+
assert_equal 24, event["src_mask"]
|
309
|
+
assert_equal 24, event["dst_mask"]
|
310
|
+
end
|
311
|
+
|
312
|
+
require 'fluent/plugin/netflow_records'
|
313
|
+
def ipv4addr(v)
|
314
|
+
addr = Fluent::Plugin::NetflowParser::IP4Addr.new
|
315
|
+
addr.set(v)
|
316
|
+
addr
|
317
|
+
end
|
318
|
+
|
319
|
+
def ipv6addr(v)
|
320
|
+
addr = Fluent::Plugin::NetflowParser::IP6Addr.new
|
321
|
+
addr.set(v)
|
322
|
+
addr
|
323
|
+
end
|
324
|
+
|
325
|
+
def macaddr(v)
|
326
|
+
addr = Fluent::Plugin::NetflowParser::MacAddr.new
|
327
|
+
addr.set(v)
|
328
|
+
addr
|
329
|
+
end
|
330
|
+
|
331
|
+
def mplslabel(v)
|
332
|
+
label = Fluent::Plugin::NetflowParser::MplsLabel.new
|
333
|
+
label.set(v)
|
334
|
+
label
|
335
|
+
end
|
336
|
+
|
337
|
+
def v5_record(hash={})
|
338
|
+
{
|
339
|
+
ipv4_src_addr: "10.0.1.122",
|
340
|
+
ipv4_dst_addr: "192.168.0.3",
|
341
|
+
ipv4_next_hop: "10.0.0.3",
|
342
|
+
input_snmp: 1,
|
343
|
+
output_snmp: 2,
|
344
|
+
in_pkts: 156,
|
345
|
+
in_bytes: 1024,
|
346
|
+
first_switched: DEFAULT_UPTIME - 13000, # 13seconds ago
|
347
|
+
last_switched: DEFAULT_UPTIME - 12950, # 50msec later after first switched
|
348
|
+
l4_src_port: 1048,
|
349
|
+
l4_dst_port: 80,
|
350
|
+
tcp_flags: 27,
|
351
|
+
protocol: 6,
|
352
|
+
src_tos: 0,
|
353
|
+
src_as: 101,
|
354
|
+
dst_as: 201,
|
355
|
+
src_mask: 24,
|
356
|
+
dst_mask: 24,
|
357
|
+
}.merge(hash)
|
358
|
+
end
|
359
|
+
|
360
|
+
def v5_data(hash={})
|
361
|
+
hash = hash.dup
|
362
|
+
hash[:records] = (hash[:records] || []).map{|r|
|
363
|
+
r = r.dup
|
364
|
+
[:ipv4_src_addr, :ipv4_dst_addr, :ipv4_next_hop].each do |key|
|
365
|
+
r[key] = ipv4addr(r[key]) if r[key]
|
366
|
+
end
|
367
|
+
r
|
368
|
+
}
|
369
|
+
Fluent::Plugin::NetflowParser::Netflow5PDU.new(hash)
|
370
|
+
end
|
371
|
+
|
372
|
+
def v9_template(hash)
|
373
|
+
end
|
374
|
+
|
375
|
+
def v9_option(hash)
|
376
|
+
end
|
377
|
+
|
378
|
+
def v9_data(hash)
|
379
|
+
end
|
380
|
+
end
|