fluent-plugin-netflowipfix 1.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/Gemfile +3 -0
- data/LICENSE +202 -0
- data/README.md +83 -0
- data/Rakefile +13 -0
- data/fluent-plugin-netflowipfix.gemspec +28 -0
- data/lib/.DS_Store +0 -0
- data/lib/fluent/.DS_Store +0 -0
- data/lib/fluent/plugin/in_netflowipfix.rb +161 -0
- data/lib/fluent/plugin/ipfix_fields.yaml +1155 -0
- data/lib/fluent/plugin/netflow_fields.yaml +346 -0
- data/lib/fluent/plugin/netflowipfix_records.rb +243 -0
- data/lib/fluent/plugin/parser_netflow_v5.rb +129 -0
- data/lib/fluent/plugin/parser_netflow_v9.rb +346 -0
- data/lib/fluent/plugin/vash.rb +76 -0
- data/test/helper.rb +8 -0
- data/test/plugin/test_in_netflowipfix.rb +18 -0
- metadata +125 -0
@@ -0,0 +1,129 @@
|
|
1
|
+
#
|
2
|
+
# Copyright 2018 Yves Desharnais
|
3
|
+
#
|
4
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5
|
+
# you may not use this file except in compliance with the License.
|
6
|
+
# You may obtain a copy of the License at
|
7
|
+
#
|
8
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9
|
+
#
|
10
|
+
# Unless required by applicable law or agreed to in writing, software
|
11
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13
|
+
# See the License for the specific language governing permissions and
|
14
|
+
# limitations under the License.
|
15
|
+
|
16
|
+
require "fluent/plugin/parser"
|
17
|
+
require "bindata"
|
18
|
+
require 'yaml'
|
19
|
+
|
20
|
+
module Fluent
|
21
|
+
module Plugin
|
22
|
+
class NetflowipfixInput < Fluent::Plugin::Input
|
23
|
+
|
24
|
+
|
25
|
+
class ParserNetflowBase
|
26
|
+
|
27
|
+
private
|
28
|
+
|
29
|
+
def ipv4_addr_to_string(uint32)
|
30
|
+
"#{(uint32 & 0xff000000) >> 24}.#{(uint32 & 0x00ff0000) >> 16}.#{(uint32 & 0x0000ff00) >> 8}.#{uint32 & 0x000000ff}"
|
31
|
+
end
|
32
|
+
|
33
|
+
def msec_from_boot_to_time(msec, uptime, current_unix_time, current_nsec)
|
34
|
+
millis = uptime - msec
|
35
|
+
seconds = current_unix_time - (millis / 1000)
|
36
|
+
micros = (current_nsec / 1000) - ((millis % 1000) * 1000)
|
37
|
+
if micros < 0
|
38
|
+
seconds -= 1
|
39
|
+
micros += 1000000
|
40
|
+
end
|
41
|
+
Time.at(seconds, micros)
|
42
|
+
end # def msec_from_boot_to_time
|
43
|
+
|
44
|
+
def format_for_switched(time)
|
45
|
+
time.utc.strftime("%Y-%m-%dT%H:%M:%S.%3NZ".freeze)
|
46
|
+
end # def format_for_switched(time)
|
47
|
+
|
48
|
+
def format_for_flowSeconds(time)
|
49
|
+
time.utc.strftime("%Y-%m-%dT%H:%M:%S".freeze)
|
50
|
+
end # def format_for_flowSeconds(time)
|
51
|
+
|
52
|
+
def format_for_flowMilliSeconds(time)
|
53
|
+
time.utc.strftime("%Y-%m-%dT%H:%M:%S.%3NZ".freeze)
|
54
|
+
end # def format_for_flowMilliSeconds(time)
|
55
|
+
|
56
|
+
def format_for_flowMicroSeconds(time)
|
57
|
+
time.utc.strftime("%Y-%m-%dT%H:%M:%S.%6NZ".freeze)
|
58
|
+
end # def format_for_flowMicroSeconds(time)
|
59
|
+
|
60
|
+
def format_for_flowNanoSeconds(time)
|
61
|
+
time.utc.strftime("%Y-%m-%dT%H:%M:%S.%9NZ".freeze)
|
62
|
+
end # def format_for_flowNanoSeconds(time)
|
63
|
+
end # class ParserNetflow
|
64
|
+
|
65
|
+
|
66
|
+
class ParserNetflowv5 < ParserNetflowBase
|
67
|
+
|
68
|
+
def configure(conf)
|
69
|
+
super
|
70
|
+
end # def configure
|
71
|
+
|
72
|
+
|
73
|
+
|
74
|
+
private
|
75
|
+
|
76
|
+
def handle(host, packet, block)
|
77
|
+
packet.records.each do |flowset|
|
78
|
+
# handle_flowset_data(host, packet, flowset, block, null, null)
|
79
|
+
|
80
|
+
record = {
|
81
|
+
"version" => packet.version,
|
82
|
+
"uptime" => packet.uptime,
|
83
|
+
"flow_records" => packet.flow_records,
|
84
|
+
"flow_seq_num" => packet.flow_seq_num,
|
85
|
+
"engine_type" => packet.engine_type,
|
86
|
+
"engine_id" => packet.engine_id,
|
87
|
+
"sampling_algorithm" => packet.sampling_algorithm,
|
88
|
+
"sampling_interval" => packet.sampling_interval,
|
89
|
+
|
90
|
+
"ipv4_src_addr" => flowset.ipv4_src_addr,
|
91
|
+
"ipv4_dst_addr" => flowset.ipv4_dst_addr,
|
92
|
+
"ipv4_next_hop" => flowset.ipv4_next_hop,
|
93
|
+
"input_snmp" => flowset.input_snmp,
|
94
|
+
"output_snmp" => flowset.output_snmp,
|
95
|
+
"in_pkts" => flowset.in_pkts,
|
96
|
+
"in_bytes" => flowset.in_bytes,
|
97
|
+
"first_switched" => flowset.first_switched,
|
98
|
+
"last_switched" => flowset.last_switched,
|
99
|
+
"l4_src_port" => flowset.l4_src_port,
|
100
|
+
"l4_dst_port" => flowset.l4_dst_port,
|
101
|
+
"tcp_flags" => flowset.tcp_flags,
|
102
|
+
"protocol" => flowset.protocol,
|
103
|
+
"src_tos" => flowset.src_tos,
|
104
|
+
"src_as" => flowset.src_as,
|
105
|
+
"dst_as" => flowset.dst_as,
|
106
|
+
"src_mask" => flowset.src_mask,
|
107
|
+
"dst_mask" => flowset.dst_mask
|
108
|
+
}
|
109
|
+
unless @switched_times_from_uptime
|
110
|
+
record["first_switched"] = format_for_switched(msec_from_boot_to_time(record["first_switched"], packet.uptime, packet.unix_sec, packet.unix_nsec))
|
111
|
+
record["last_switched"] = format_for_switched(msec_from_boot_to_time(record["last_switched"] , packet.uptime, packet.unix_sec, packet.unix_nsec))
|
112
|
+
end # unless
|
113
|
+
|
114
|
+
time = Time.at(packet.unix_sec, packet.unix_nsec / 1000).to_i # TODO: Fluent::EventTime
|
115
|
+
block.call(time, record)
|
116
|
+
end # do flowset
|
117
|
+
end # def handle_v5
|
118
|
+
|
119
|
+
|
120
|
+
|
121
|
+
|
122
|
+
|
123
|
+
|
124
|
+
end # class ParserNetflowv5
|
125
|
+
|
126
|
+
end # class NetflowipfixInput
|
127
|
+
end # module Plugin
|
128
|
+
end # module Fluent
|
129
|
+
|
@@ -0,0 +1,346 @@
|
|
1
|
+
#
|
2
|
+
# Copyright 2018 Yves Desharnais
|
3
|
+
#
|
4
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5
|
+
# you may not use this file except in compliance with the License.
|
6
|
+
# You may obtain a copy of the License at
|
7
|
+
#
|
8
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9
|
+
#
|
10
|
+
# Unless required by applicable law or agreed to in writing, software
|
11
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13
|
+
# See the License for the specific language governing permissions and
|
14
|
+
# limitations under the License.
|
15
|
+
|
16
|
+
require "fluent/plugin/parser"
|
17
|
+
require "bindata"
|
18
|
+
require 'yaml'
|
19
|
+
|
20
|
+
# Cisco NetFlow Export Datagram Format
|
21
|
+
# http://www.cisco.com/c/en/us/td/docs/net_mgmt/netflow_collection_engine/3-6/user/guide/format.html
|
22
|
+
# Cisco NetFlow Version 9 Flow-Record Format
|
23
|
+
# http://www.cisco.com/en/US/technologies/tk648/tk362/technologies_white_paper09186a00800a3db9.html
|
24
|
+
|
25
|
+
|
26
|
+
module Fluent
|
27
|
+
module Plugin
|
28
|
+
class NetflowipfixInput < Fluent::Plugin::Input
|
29
|
+
|
30
|
+
class ParserNetflowIpfix < ParserNetflowBase
|
31
|
+
|
32
|
+
=begin
|
33
|
+
config_param :switched_times_from_uptime, :bool, default: false
|
34
|
+
config_param :versions, :array, default: [5, 9, 10]
|
35
|
+
=end
|
36
|
+
|
37
|
+
|
38
|
+
def configure(cache_ttl, definitions)
|
39
|
+
@cache_ttl = cache_ttl
|
40
|
+
|
41
|
+
|
42
|
+
@switched_times_from_uptime = false #, :bool, default: false
|
43
|
+
# @versions = [5, 9, 10]
|
44
|
+
@definitions = definitions
|
45
|
+
@missingTemplates = {}
|
46
|
+
@skipUnsupportedField = {}
|
47
|
+
end # def configure
|
48
|
+
|
49
|
+
private
|
50
|
+
|
51
|
+
def handle_flowset_template(host, pdu, flowset, templates, p_fields)
|
52
|
+
# $log.warn 'handle_flowset_template:', host, ';', pdu.version
|
53
|
+
flowset.flowset_data.templates.each do |template|
|
54
|
+
# $log.warn 'added template:', template.template_id, ',ver:',pdu.version
|
55
|
+
key = "#{host}|#{pdu.source_id}|#{template.template_id}"
|
56
|
+
catch (:field) do
|
57
|
+
fields = []
|
58
|
+
template.template_fields.each do |field|
|
59
|
+
# $log.warn 'v9 added field:', field.field_type
|
60
|
+
entry = netflowipfix_field_for(field.field_type, field.field_length, p_fields, key)
|
61
|
+
throw :field unless entry
|
62
|
+
fields += entry
|
63
|
+
end # do field
|
64
|
+
if !@missingTemplates[key].nil? && @missingTemplates[key] > 0
|
65
|
+
$log.warn "Template received after missing #{@missingTemplates[key]} packets",
|
66
|
+
host: host, source_id: pdu.source_id, flowset_id: template.template_id
|
67
|
+
@missingTemplates[key] = 0
|
68
|
+
end
|
69
|
+
# We get this far, we have a list of fields
|
70
|
+
templates[key, @cache_ttl] = BinData::Struct.new(endian: :big, fields: fields)
|
71
|
+
# $log.info("cache_ttl is #{@cache_ttl}")
|
72
|
+
# $log.info("v9 added template,flowset.source_id|template.template_id is #{key}")
|
73
|
+
# Purge any expired templates
|
74
|
+
templates.cleanup!
|
75
|
+
end # catch
|
76
|
+
end # each do |template|
|
77
|
+
end # def handle_flowset_template
|
78
|
+
|
79
|
+
def netflowipfix_field_for(type, length, p_fields, category='option', key)
|
80
|
+
unless field = p_fields[category][type]
|
81
|
+
# TODO?: repeated message, but acceptable now
|
82
|
+
# Skip unsupported field type=201 length=4 key="172.17.0.1|0|2049
|
83
|
+
fkey = "#{key}|#{type}|#{length}"
|
84
|
+
if @skipUnsupportedField[fkey].nil? || @skipUnsupportedField[fkey] == 0
|
85
|
+
$log.warn "Skip unsupported field", type: type, length: length, key:key
|
86
|
+
@skipUnsupportedField[fkey] = 1
|
87
|
+
else
|
88
|
+
@skipUnsupportedField[fkey] = @skipUnsupportedField[fkey] + 1
|
89
|
+
end
|
90
|
+
return [[:skip, nil, {length: length}]]
|
91
|
+
end # unless
|
92
|
+
|
93
|
+
unless field.is_a?(Array)
|
94
|
+
$log.warn "Skip non-Array definition", fields: field
|
95
|
+
return [[:skip, nil, {length: length}]]
|
96
|
+
end # unless
|
97
|
+
|
98
|
+
# Small bit of fixup for numeric value, :skip or :string field length, which are dynamic
|
99
|
+
case field[0]
|
100
|
+
when Integer
|
101
|
+
[[uint_field(length, field[0]), field[1]]]
|
102
|
+
when :skip
|
103
|
+
[field + [nil, {length: length}]]
|
104
|
+
when :string
|
105
|
+
[field + [{length: length, trim_padding: true}]]
|
106
|
+
when "octetArray"
|
107
|
+
# $log.warn "v10_paddingOctets ", type:field[0], name:field[1], len:length
|
108
|
+
oField = octetArray(length)
|
109
|
+
[[oField, field[1]]]
|
110
|
+
else
|
111
|
+
[field]
|
112
|
+
end # case
|
113
|
+
end #def netflowipfix_field_for
|
114
|
+
|
115
|
+
NETFLOWIPFIX_FIELD_CATEGORIES = ['scope', 'option']
|
116
|
+
|
117
|
+
def handle_flowset_options_template(host, pdu, flowset, templates, p_fields)
|
118
|
+
flowset.flowset_data.templates.each do |template|
|
119
|
+
catch (:field) do
|
120
|
+
# We get this far, we have a list of fields
|
121
|
+
key = "#{host}|#{pdu.source_id}|#{template.template_id}"
|
122
|
+
|
123
|
+
fields = []
|
124
|
+
|
125
|
+
NETFLOWIPFIX_FIELD_CATEGORIES.each do |category|
|
126
|
+
template["#{category}_fields"].each do |field|
|
127
|
+
entry = netflowipfix_field_for(field.field_type, field.field_length, p_fields, category, key)
|
128
|
+
throw :field unless entry
|
129
|
+
fields += entry
|
130
|
+
end # do field
|
131
|
+
end # do category
|
132
|
+
|
133
|
+
templates[key, @cache_ttl] = BinData::Struct.new(endian: :big, fields: fields)
|
134
|
+
# Purge any expired templates
|
135
|
+
templates.cleanup!
|
136
|
+
end # catch
|
137
|
+
end # do templates
|
138
|
+
end # def handle_flowset_options_template
|
139
|
+
|
140
|
+
FIELDS_FOR_COPY_v9_10 = ['version', 'flow_seq_num']
|
141
|
+
|
142
|
+
def handle_flowset_data(host, packet, flowset, block, templates, fields, ver)
|
143
|
+
template_key = "#{host}|#{packet.source_id}|#{flowset.flowset_id}"
|
144
|
+
# $log.warn 'handle_flowset_data template:', template_key
|
145
|
+
template = templates[template_key]
|
146
|
+
if !template
|
147
|
+
# FIXED: repeating error message adds no value, added a count of missing packet until template is received
|
148
|
+
if @missingTemplates[template_key].nil? || @missingTemplates[template_key] == 0
|
149
|
+
@missingTemplates[template_key] = 1
|
150
|
+
$log.warn 'No matching template for', host: host, source_id: packet.source_id, flowset_id: flowset.flowset_id
|
151
|
+
else
|
152
|
+
@missingTemplates[template_key] = @missingTemplates[template_key] + 1
|
153
|
+
end
|
154
|
+
|
155
|
+
return
|
156
|
+
end # if !template
|
157
|
+
|
158
|
+
# $log.info "v #{packet.version} flowset ", $flowset
|
159
|
+
length = flowset.flowset_length - 4
|
160
|
+
# length = flowset.flowset_length
|
161
|
+
|
162
|
+
if packet.version == 9
|
163
|
+
# Template shouldn't be longer than the flowset and there should
|
164
|
+
# be at most 3 padding bytes
|
165
|
+
# if template.num_bytes > length or ! (length % template.num_bytes).between?(0, 3)
|
166
|
+
# warn: v9 Template length doesn't fit cleanly into flowset template_id=1024 template_length=59 flowset_length=120
|
167
|
+
# p (124
|
168
|
+
# TODO: is this a bug ????
|
169
|
+
if template.num_bytes > flowset.flowset_length or ! (length % template.num_bytes).between?(0, 3)
|
170
|
+
$log.warn "v9 Template length doesn't fit cleanly into flowset",
|
171
|
+
template_id: flowset.flowset_id,
|
172
|
+
template_length: template.num_bytes,
|
173
|
+
flowset_length: length
|
174
|
+
# return
|
175
|
+
end
|
176
|
+
|
177
|
+
array = BinData::Array.new(type: template, initial_length: length / template.num_bytes)
|
178
|
+
elsif packet.version == 10
|
179
|
+
# array = BinData::Array.new(type: template, initial_length: length / template.num_bytes)
|
180
|
+
array = BinData::Array.new(type: template, :read_until => :eof)
|
181
|
+
end
|
182
|
+
|
183
|
+
|
184
|
+
fields = array.read(flowset.flowset_data)
|
185
|
+
fields.each do |r|
|
186
|
+
#if is_sampler?(r)
|
187
|
+
# sampler_key = "#{host}|#{pdu.source_id}|#{r.flow_sampler_id}"
|
188
|
+
# register_sampler_v9 sampler_key, r
|
189
|
+
# next
|
190
|
+
#end
|
191
|
+
|
192
|
+
time = packet.unix_sec # TODO: pending from netflow plugin: Fluent::EventTime (see: forV5)
|
193
|
+
event = {}
|
194
|
+
|
195
|
+
# Fewer fields in the v9 header
|
196
|
+
FIELDS_FOR_COPY_v9_10.each do |f|
|
197
|
+
event[f] = packet[f]
|
198
|
+
end
|
199
|
+
|
200
|
+
event['flowset_id'] = flowset.flowset_id
|
201
|
+
|
202
|
+
r.each_pair { |k,v| event[k.to_s] = v }
|
203
|
+
# TODO: bug - this causes crashes, need to debug
|
204
|
+
unless @switched_times_from_uptime
|
205
|
+
if packet.version == 9
|
206
|
+
# event['first_switched'] = format_for_switched(msec_from_boot_to_time(event['first_switched'], packet.uptime, time, 0))
|
207
|
+
# event['last_switched'] = format_for_switched(msec_from_boot_to_time(event['last_switched'] , packet.uptime, time, 0))
|
208
|
+
elsif packet.version == 10
|
209
|
+
# event['first_switched'] = format_for_switched(msec_from_boot_to_time(event['first_switched'], packet.unix_sec, time, 0))
|
210
|
+
# event['last_switched'] = format_for_switched(msec_from_boot_to_time(event['last_switched'] , packet.unix_sec, time, 0))
|
211
|
+
end
|
212
|
+
end
|
213
|
+
|
214
|
+
#if sampler_id = r['flow_sampler_id']
|
215
|
+
# sampler_key = "#{host}|#{pdu.source_id}|#{sampler_id}"
|
216
|
+
# if sampler = @samplers_v9[sampler_key]
|
217
|
+
# event['sampling_algorithm'] ||= sampler['flow_sampler_mode']
|
218
|
+
# event['sampling_interval'] ||= sampler['flow_sampler_random_interval']
|
219
|
+
# end
|
220
|
+
#end
|
221
|
+
|
222
|
+
block.call(time, event)
|
223
|
+
end # fields = array.read
|
224
|
+
end # def handle_flowset_data
|
225
|
+
|
226
|
+
# covers Netflow v9 and v10 (a.k.a IPFIX)
|
227
|
+
def is_sampler?(record)
|
228
|
+
record['flow_sampler_id'] && record['flow_sampler_mode'] && record['flow_sampler_random_interval']
|
229
|
+
end # def is_sampler?(record)
|
230
|
+
|
231
|
+
|
232
|
+
def uint_field(length, default)
|
233
|
+
# If length is 4, return :uint32, etc. and use default if length is 0
|
234
|
+
# $log.warn ("uint" + (((length > 0) ? length : default) * 8).to_s)
|
235
|
+
("uint" + (((length > 0) ? length : default) * 8).to_s).to_sym
|
236
|
+
end # def uint_field
|
237
|
+
|
238
|
+
|
239
|
+
def octetArray(length)
|
240
|
+
("OctetArray" + length.to_s).to_sym
|
241
|
+
case length
|
242
|
+
when 1,"1"
|
243
|
+
("OctetArray1").to_sym
|
244
|
+
when 2,"2"
|
245
|
+
("OctetArray2").to_sym
|
246
|
+
else
|
247
|
+
$log.error "No octet array of #{length} bytes"
|
248
|
+
end
|
249
|
+
end #def octetArray
|
250
|
+
|
251
|
+
end # class ParserNetflowIpfix
|
252
|
+
|
253
|
+
|
254
|
+
|
255
|
+
|
256
|
+
class ParserNetflowv9 < ParserNetflowIpfix
|
257
|
+
|
258
|
+
def configure(cache_ttl, definitions)
|
259
|
+
super(cache_ttl, definitions)
|
260
|
+
@templates9 = Vash.new()
|
261
|
+
@samplers_v9 = Vash.new()
|
262
|
+
# Path to default Netflow v9 field definitions
|
263
|
+
filename9 = File.expand_path('../netflow_fields.yaml', __FILE__)
|
264
|
+
|
265
|
+
begin
|
266
|
+
@fields9 = YAML.load_file(filename9)
|
267
|
+
rescue => e
|
268
|
+
raise ConfigError, "Bad syntax in definitions file #{filename9}, error_class = #{e.class.name}, error = #{e.message}"
|
269
|
+
end
|
270
|
+
|
271
|
+
# Allow the user to augment/override/rename the supported Netflow fields
|
272
|
+
if @definitions
|
273
|
+
raise ConfigError, "definitions file #{@definitions} doesn't exist" unless File.exist?(@definitions)
|
274
|
+
begin
|
275
|
+
@fields9['option'].merge!(YAML.load_file(@definitions))
|
276
|
+
rescue => e
|
277
|
+
raise ConfigError, "Bad syntax in definitions file #{@definitions}, error_class = #{e.class.name}, error = #{e.message}"
|
278
|
+
end
|
279
|
+
end
|
280
|
+
end # def configure
|
281
|
+
|
282
|
+
def handle_v9(host, pdu, block)
|
283
|
+
pdu.records.each do |flowset|
|
284
|
+
case flowset.flowset_id
|
285
|
+
when 0
|
286
|
+
handle_flowset_template(host, pdu, flowset, @templates9, @fields9)
|
287
|
+
when 1
|
288
|
+
handle_flowset_options_template(host, pdu, flowset, @templates9, @fields9)
|
289
|
+
when 256..65535
|
290
|
+
handle_flowset_data(host, pdu, flowset, block, @templates9, @fields9, 9)
|
291
|
+
else
|
292
|
+
$log.warn 'v9 Unsupported flowset', flowset_id: flowset.flowset_id
|
293
|
+
end # case
|
294
|
+
end # do
|
295
|
+
end # def handle_v9
|
296
|
+
|
297
|
+
end # class ParserNetflowv9
|
298
|
+
|
299
|
+
class ParserIPfixv10 < ParserNetflowIpfix
|
300
|
+
|
301
|
+
def configure(cache_ttl, definitions)
|
302
|
+
super(cache_ttl, definitions)
|
303
|
+
@templates10 = Vash.new()
|
304
|
+
@samplers_v10 = Vash.new()
|
305
|
+
|
306
|
+
# Path to default Netflow v10 field definitions
|
307
|
+
filename10 = File.expand_path('../ipfix_fields.yaml', __FILE__)
|
308
|
+
|
309
|
+
begin
|
310
|
+
@fields10 = YAML.load_file(filename10)
|
311
|
+
rescue => e
|
312
|
+
raise ConfigError, "Bad syntax in definitions file #{filename10}, error_class = #{e.class.name}, error = #{e.message}"
|
313
|
+
end
|
314
|
+
|
315
|
+
# Allow the user to augment/override/rename the supported Netflow fields
|
316
|
+
if @definitions
|
317
|
+
raise ConfigError, "definitions file #{@definitions} doesn't exist" unless File.exist?(@definitions)
|
318
|
+
begin
|
319
|
+
@fields10['option'].merge!(YAML.load_file(@definitions))
|
320
|
+
rescue => e
|
321
|
+
raise ConfigError, "Bad syntax in definitions file #{@definitions}, error_class = #{e.class.name}, error = #{e.message}"
|
322
|
+
end
|
323
|
+
end
|
324
|
+
end # def configure
|
325
|
+
|
326
|
+
def handle_v10(host, pdu, block)
|
327
|
+
pdu.records.each do |flowset|
|
328
|
+
case flowset.flowset_id
|
329
|
+
when 2
|
330
|
+
handle_flowset_template(host, pdu, flowset, @templates10, @fields10)
|
331
|
+
when 3
|
332
|
+
handle_flowset_options_template(host, pdu, flowset, @templates10, @fields10)
|
333
|
+
when 256..65535
|
334
|
+
handle_flowset_data(host, pdu, flowset, block, @templates10, @fields10, 10)
|
335
|
+
else
|
336
|
+
$log.warn 'v10 Unsupported set', set_id: flowset.set_id
|
337
|
+
end # case
|
338
|
+
end # do
|
339
|
+
end # def handle_v10
|
340
|
+
|
341
|
+
end # class ParserIPfixv10
|
342
|
+
|
343
|
+
end # class NetflowipfixInput
|
344
|
+
end # module Plugin
|
345
|
+
end # module Fluent
|
346
|
+
|