logstash-codec-netflow 2.0.5 → 2.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/CHANGELOG.md +15 -0
- data/CONTRIBUTORS +12 -0
- data/Gemfile +1 -1
- data/LICENSE +1 -1
- data/README.md +12 -3
- data/lib/logstash/codecs/netflow.rb +314 -32
- data/lib/logstash/codecs/netflow/iana2yaml.rb +77 -0
- data/lib/logstash/codecs/netflow/ipfix.yaml +2333 -0
- data/lib/logstash/codecs/netflow/netflow.yaml +86 -2
- data/lib/logstash/codecs/netflow/util.rb +77 -10
- data/logstash-codec-netflow.gemspec +4 -3
- data/spec/codecs/ipfix.dat +0 -0
- data/spec/codecs/netflow5_test_invalid01.dat +0 -0
- data/spec/codecs/netflow5_test_invalid02.dat +0 -0
- data/spec/codecs/netflow9_test_invalid01.dat +0 -0
- data/spec/codecs/netflow9_test_macaddr_data.dat +0 -0
- data/spec/codecs/netflow9_test_macaddr_tpl.dat +0 -0
- data/spec/codecs/netflow9_test_nprobe_data.dat +0 -0
- data/spec/codecs/netflow9_test_nprobe_tpl.dat +0 -0
- data/spec/codecs/netflow9_test_softflowd_tpl_data.dat +0 -0
- data/spec/codecs/netflow9_test_valid01.dat +0 -0
- data/spec/codecs/netflow9_test_valid_cisco_asa_data.dat +0 -0
- data/spec/codecs/netflow9_test_valid_cisco_asa_tpl.dat +0 -0
- data/spec/codecs/netflow_spec.rb +524 -53
- metadata +49 -25
- data/spec/codecs/netflow9.dat +0 -0
@@ -164,10 +164,10 @@
|
|
164
164
|
- :dst_tos
|
165
165
|
56:
|
166
166
|
- :mac_addr
|
167
|
-
- :
|
167
|
+
- :in_src_mac
|
168
168
|
57:
|
169
169
|
- :mac_addr
|
170
|
-
- :
|
170
|
+
- :out_dst_mac
|
171
171
|
58:
|
172
172
|
- :uint16
|
173
173
|
- :src_vlan
|
@@ -213,3 +213,87 @@
|
|
213
213
|
83:
|
214
214
|
- :string
|
215
215
|
- :if_desc
|
216
|
+
84:
|
217
|
+
- :string
|
218
|
+
- :sampler_name
|
219
|
+
85:
|
220
|
+
- :uint32
|
221
|
+
- :in_permanent_bytes
|
222
|
+
86:
|
223
|
+
- :uint32
|
224
|
+
- :in_permanent_pkts
|
225
|
+
148:
|
226
|
+
- :uint32
|
227
|
+
- :conn_id
|
228
|
+
176:
|
229
|
+
- :uint8
|
230
|
+
- :icmp_type
|
231
|
+
177:
|
232
|
+
- :uint8
|
233
|
+
- :icmp_code
|
234
|
+
178:
|
235
|
+
- :uint8
|
236
|
+
- :icmp_type_ipv6
|
237
|
+
179:
|
238
|
+
- :uint8
|
239
|
+
- :icmp_code_ipv6
|
240
|
+
225:
|
241
|
+
- :ip4_addr
|
242
|
+
- :xlate_src_addr_ipv4
|
243
|
+
226:
|
244
|
+
- :ip4_addr
|
245
|
+
- :xlate_dst_addr_ipv4
|
246
|
+
227:
|
247
|
+
- :uint16
|
248
|
+
- :xlate_src_port
|
249
|
+
228:
|
250
|
+
- :uint16
|
251
|
+
- :xlate_dst_port
|
252
|
+
233:
|
253
|
+
- :uint8
|
254
|
+
- :fw_event
|
255
|
+
281:
|
256
|
+
- :ip6_addr
|
257
|
+
- :xlate_src_addr_ipv6
|
258
|
+
282:
|
259
|
+
- :ip6_addr
|
260
|
+
- :xlate_dst_addr_ipv6
|
261
|
+
33002:
|
262
|
+
- :uint16
|
263
|
+
- :fw_ext_event
|
264
|
+
323:
|
265
|
+
- 8
|
266
|
+
- :event_time_msec
|
267
|
+
152:
|
268
|
+
- 8
|
269
|
+
- :flow_start_msec
|
270
|
+
231:
|
271
|
+
- :uint32
|
272
|
+
- :fwd_flow_delta_bytes
|
273
|
+
232:
|
274
|
+
- :uint32
|
275
|
+
- :rev_flow_delta_bytes
|
276
|
+
33000:
|
277
|
+
- :acl_id_asa
|
278
|
+
- :ingress_acl_id
|
279
|
+
33001:
|
280
|
+
- :acl_id_asa
|
281
|
+
- egress_acl_id
|
282
|
+
40000:
|
283
|
+
- :string
|
284
|
+
- :username
|
285
|
+
40001:
|
286
|
+
- :ip4_addr
|
287
|
+
- :xlate_src_addr_ipv4
|
288
|
+
40002:
|
289
|
+
- :ip4_addr
|
290
|
+
- :xlate_dst_addr_ipv4
|
291
|
+
40003:
|
292
|
+
- :uint16
|
293
|
+
- :xlate_src_port
|
294
|
+
40004:
|
295
|
+
- :uint16
|
296
|
+
- :xlate_dst_port
|
297
|
+
40005:
|
298
|
+
- :uint8
|
299
|
+
- :fw_event
|
@@ -47,7 +47,20 @@ class MacAddr < BinData::Primitive
|
|
47
47
|
end
|
48
48
|
|
49
49
|
def get
|
50
|
-
self.bytes.collect { |byte| byte.to_s(16) }.join(":")
|
50
|
+
self.bytes.collect { |byte| byte.value.to_s(16).rjust(2,'0') }.join(":")
|
51
|
+
end
|
52
|
+
end
|
53
|
+
|
54
|
+
class ACLIdASA < BinData::Primitive
|
55
|
+
array :bytes, :type => :uint8, :initial_length => 12
|
56
|
+
|
57
|
+
def set(val)
|
58
|
+
self.bytes = val.split("-").collect { |aclid| aclid.scan(/../).collect { |hex| hex.to_i(16)} }.flatten
|
59
|
+
end
|
60
|
+
|
61
|
+
def get
|
62
|
+
hexstring = self.bytes.collect { |byte| byte.value.to_s(16).rjust(2,'0') }.join
|
63
|
+
hexstring.scan(/......../).collect { |aclid| aclid }.join("-")
|
51
64
|
end
|
52
65
|
end
|
53
66
|
|
@@ -59,7 +72,7 @@ end
|
|
59
72
|
class Netflow5PDU < BinData::Record
|
60
73
|
endian :big
|
61
74
|
uint16 :version
|
62
|
-
uint16 :flow_records
|
75
|
+
uint16 :flow_records, :assert => lambda { flow_records.value.between?(1,30) }
|
63
76
|
uint32 :uptime
|
64
77
|
uint32 :unix_sec
|
65
78
|
uint32 :unix_nsec
|
@@ -68,7 +81,7 @@ class Netflow5PDU < BinData::Record
|
|
68
81
|
uint8 :engine_id
|
69
82
|
bit2 :sampling_algorithm
|
70
83
|
bit14 :sampling_interval
|
71
|
-
array
|
84
|
+
array :records, :initial_length => :flow_records do
|
72
85
|
ip4_addr :ipv4_src_addr
|
73
86
|
ip4_addr :ipv4_dst_addr
|
74
87
|
ip4_addr :ipv4_next_hop
|
@@ -92,7 +105,7 @@ class Netflow5PDU < BinData::Record
|
|
92
105
|
end
|
93
106
|
end
|
94
107
|
|
95
|
-
class
|
108
|
+
class NetflowTemplateFlowset < BinData::Record
|
96
109
|
endian :big
|
97
110
|
array :templates, :read_until => lambda { array.num_bytes == flowset_length - 4 } do
|
98
111
|
uint16 :template_id
|
@@ -104,7 +117,7 @@ class TemplateFlowset < BinData::Record
|
|
104
117
|
end
|
105
118
|
end
|
106
119
|
|
107
|
-
class
|
120
|
+
class NetflowOptionFlowset < BinData::Record
|
108
121
|
endian :big
|
109
122
|
array :templates, :read_until => lambda { flowset_length - 4 - array.num_bytes <= 2 } do
|
110
123
|
uint16 :template_id
|
@@ -131,12 +144,66 @@ class Netflow9PDU < BinData::Record
|
|
131
144
|
uint32 :flow_seq_num
|
132
145
|
uint32 :source_id
|
133
146
|
array :records, :read_until => :eof do
|
134
|
-
uint16 :flowset_id
|
135
|
-
uint16 :flowset_length
|
147
|
+
uint16 :flowset_id, :assert => lambda { [0, 1, *(256..65535)].include?(flowset_id) }
|
148
|
+
uint16 :flowset_length, :assert => lambda { flowset_length > 4 }
|
149
|
+
choice :flowset_data, :selection => :flowset_id do
|
150
|
+
netflow_template_flowset 0
|
151
|
+
netflow_option_flowset 1
|
152
|
+
string :default, :read_length => lambda { flowset_length - 4 }
|
153
|
+
end
|
154
|
+
end
|
155
|
+
end
|
156
|
+
|
157
|
+
class IpfixTemplateFlowset < BinData::Record
|
158
|
+
endian :big
|
159
|
+
array :templates, :read_until => lambda { flowset_length - 4 - array.num_bytes <= 2 } do
|
160
|
+
uint16 :template_id
|
161
|
+
uint16 :field_count
|
162
|
+
array :fields, :initial_length => :field_count do
|
163
|
+
bit1 :enterprise
|
164
|
+
bit15 :field_type
|
165
|
+
uint16 :field_length
|
166
|
+
uint32 :enterprise_id, :onlyif => lambda { enterprise != 0 }
|
167
|
+
end
|
168
|
+
end
|
169
|
+
# skip :length => lambda { flowset_length - 4 - set.num_bytes } ?
|
170
|
+
end
|
171
|
+
|
172
|
+
class IpfixOptionFlowset < BinData::Record
|
173
|
+
endian :big
|
174
|
+
array :templates, :read_until => lambda { flowset_length - 4 - array.num_bytes <= 2 } do
|
175
|
+
uint16 :template_id
|
176
|
+
uint16 :field_count
|
177
|
+
uint16 :scope_count, :assert => lambda { scope_count > 0 }
|
178
|
+
array :scope_fields, :initial_length => lambda { scope_count } do
|
179
|
+
bit1 :enterprise
|
180
|
+
bit15 :field_type
|
181
|
+
uint16 :field_length
|
182
|
+
uint32 :enterprise_id, :onlyif => lambda { enterprise != 0 }
|
183
|
+
end
|
184
|
+
array :option_fields, :initial_length => lambda { field_count - scope_count } do
|
185
|
+
bit1 :enterprise
|
186
|
+
bit15 :field_type
|
187
|
+
uint16 :field_length
|
188
|
+
uint32 :enterprise_id, :onlyif => lambda { enterprise != 0 }
|
189
|
+
end
|
190
|
+
end
|
191
|
+
end
|
192
|
+
|
193
|
+
class IpfixPDU < BinData::Record
|
194
|
+
endian :big
|
195
|
+
uint16 :version
|
196
|
+
uint16 :pdu_length
|
197
|
+
uint32 :unix_sec
|
198
|
+
uint32 :flow_seq_num
|
199
|
+
uint32 :observation_domain_id
|
200
|
+
array :records, :read_until => lambda { array.num_bytes == pdu_length - 16 } do
|
201
|
+
uint16 :flowset_id, :assert => lambda { [2, 3, *(256..65535)].include?(flowset_id) }
|
202
|
+
uint16 :flowset_length, :assert => lambda { flowset_length > 4 }
|
136
203
|
choice :flowset_data, :selection => :flowset_id do
|
137
|
-
|
138
|
-
|
139
|
-
string
|
204
|
+
ipfix_template_flowset 2
|
205
|
+
ipfix_option_flowset 3
|
206
|
+
string :default, :read_length => lambda { flowset_length - 4 }
|
140
207
|
end
|
141
208
|
end
|
142
209
|
end
|
@@ -1,10 +1,10 @@
|
|
1
1
|
Gem::Specification.new do |s|
|
2
2
|
|
3
3
|
s.name = 'logstash-codec-netflow'
|
4
|
-
s.version = '2.0
|
4
|
+
s.version = '2.1.0'
|
5
5
|
s.licenses = ['Apache License (2.0)']
|
6
|
-
s.summary = "The netflow codec is for decoding Netflow v5/v9 flows."
|
7
|
-
s.description = "This gem is a
|
6
|
+
s.summary = "The netflow codec is for decoding Netflow v5/v9/IPFIX flows."
|
7
|
+
s.description = "This gem is a Logstash plugin required to be installed on top of the Logstash core pipeline using $LS_HOME/bin/logstash-plugin install gemname. This gem is not a stand-alone program"
|
8
8
|
s.authors = ["Elastic"]
|
9
9
|
s.email = 'info@elastic.co'
|
10
10
|
s.homepage = "http://www.elastic.co/guide/en/logstash/current/index.html"
|
@@ -25,3 +25,4 @@ Gem::Specification.new do |s|
|
|
25
25
|
s.add_development_dependency 'logstash-devutils'
|
26
26
|
end
|
27
27
|
|
28
|
+
|
Binary file
|
Binary file
|
Binary file
|
Binary file
|
Binary file
|
Binary file
|
Binary file
|
Binary file
|
Binary file
|
Binary file
|
Binary file
|
Binary file
|
data/spec/codecs/netflow_spec.rb
CHANGED
@@ -13,16 +13,19 @@ describe LogStash::Codecs::Netflow do
|
|
13
13
|
|
14
14
|
let(:decode) do
|
15
15
|
[].tap do |events|
|
16
|
-
subject.decode(
|
16
|
+
data.each { |packet| subject.decode(packet){|event| events << event}}
|
17
17
|
end
|
18
18
|
end
|
19
19
|
|
20
|
-
|
20
|
+
### NETFLOW v5
|
21
|
+
|
22
|
+
context "Netflow 5 valid 01" do
|
21
23
|
let(:data) do
|
22
24
|
# this netflow raw data was produced with softflowd and captured with netcat
|
23
25
|
# softflowd -D -i eth0 -v 5 -t maxlife=1 -n 127.0.01:8765
|
24
26
|
# nc -k -4 -u -l 127.0.0.1 8765 > netflow5.dat
|
25
|
-
|
27
|
+
data = []
|
28
|
+
data << IO.read(File.join(File.dirname(__FILE__), "netflow5.dat"), :mode => "rb")
|
26
29
|
end
|
27
30
|
|
28
31
|
let(:json_events) do
|
@@ -123,61 +126,162 @@ describe LogStash::Codecs::Netflow do
|
|
123
126
|
end
|
124
127
|
end
|
125
128
|
|
126
|
-
context "Netflow
|
129
|
+
context "Netflow 5 invalid 01 " do
|
130
|
+
let(:data) do
|
131
|
+
data = []
|
132
|
+
data << IO.read(File.join(File.dirname(__FILE__), "netflow5_test_invalid01.dat"), :mode => "rb")
|
133
|
+
end
|
134
|
+
|
135
|
+
it "should not raise_error " do
|
136
|
+
expect{decode.size}.not_to raise_error
|
137
|
+
end
|
138
|
+
end
|
139
|
+
|
140
|
+
context "Netflow 5 invalid 02 " do
|
141
|
+
let(:data) do
|
142
|
+
data = []
|
143
|
+
data << IO.read(File.join(File.dirname(__FILE__), "netflow5_test_invalid02.dat"), :mode => "rb")
|
144
|
+
end
|
145
|
+
|
146
|
+
it "should not raise_error" do
|
147
|
+
expect{decode.size}.not_to raise_error
|
148
|
+
end
|
149
|
+
end
|
150
|
+
|
151
|
+
### NETFLOW v9
|
152
|
+
|
153
|
+
context "Netflow 9 valid 01" do
|
127
154
|
let(:data) do
|
128
155
|
# this netflow raw data was produced with softflowd and captured with netcat
|
129
|
-
# softflowd -
|
130
|
-
# nc -
|
131
|
-
|
156
|
+
# softflowd -v 9 -n 172.16.32.202:2055
|
157
|
+
# nc -4 -u -l 172.16.32.202 8765 > netflow9_test_valid01.dat
|
158
|
+
data = []
|
159
|
+
data << IO.read(File.join(File.dirname(__FILE__), "netflow9_test_valid01.dat"), :mode => "rb")
|
132
160
|
end
|
133
161
|
|
134
162
|
let(:json_events) do
|
135
163
|
events = []
|
136
164
|
events << <<-END
|
137
165
|
{
|
138
|
-
"@timestamp": "2015-
|
166
|
+
"@timestamp": "2015-10-08T19:04:30.000Z",
|
139
167
|
"netflow": {
|
140
168
|
"version": 9,
|
141
|
-
"flow_seq_num":
|
142
|
-
"flowset_id":
|
143
|
-
"ipv4_src_addr": "
|
144
|
-
"ipv4_dst_addr":
|
145
|
-
"last_switched":
|
146
|
-
"first_switched":
|
147
|
-
"in_bytes":
|
148
|
-
"in_pkts":
|
149
|
-
"input_snmp":
|
150
|
-
"output_snmp":
|
151
|
-
"l4_src_port":
|
152
|
-
"l4_dst_port":
|
153
|
-
"protocol":
|
154
|
-
"tcp_flags":
|
155
|
-
"ip_protocol_version":
|
169
|
+
"flow_seq_num":1,
|
170
|
+
"flowset_id":1024,
|
171
|
+
"ipv4_src_addr": "172.16.32.100",
|
172
|
+
"ipv4_dst_addr":"172.16.32.248",
|
173
|
+
"last_switched":"2015-10-08T19:03:47.999Z",
|
174
|
+
"first_switched":"2015-10-08T19:03:47.999Z",
|
175
|
+
"in_bytes":76,
|
176
|
+
"in_pkts":1,
|
177
|
+
"input_snmp":0,
|
178
|
+
"output_snmp":0,
|
179
|
+
"l4_src_port":123,
|
180
|
+
"l4_dst_port":123,
|
181
|
+
"protocol":17,
|
182
|
+
"tcp_flags":0,
|
183
|
+
"ip_protocol_version":4,
|
184
|
+
"src_tos":0
|
156
185
|
},
|
157
186
|
"@version": "1"
|
158
187
|
}
|
159
188
|
END
|
160
189
|
|
190
|
+
events.map{|event| event.gsub(/\s+/, "")}
|
191
|
+
|
192
|
+
end
|
193
|
+
|
194
|
+
it "should decode raw data" do
|
195
|
+
expect(decode.size).to eq(7)
|
196
|
+
expect(decode[0]["[netflow][version]"]).to eq(9)
|
197
|
+
end
|
198
|
+
|
199
|
+
it "should serialize to json" do
|
200
|
+
# generated json order can change with different implementation, convert back to hash to compare.
|
201
|
+
expect(JSON.parse(decode[0].to_json)).to eq(JSON.parse(json_events[0]))
|
202
|
+
end
|
203
|
+
|
204
|
+
end
|
205
|
+
|
206
|
+
context "Netflow 9 macaddress" do
|
207
|
+
let(:data) do
|
208
|
+
data = []
|
209
|
+
data << IO.read(File.join(File.dirname(__FILE__), "netflow9_test_macaddr_tpl.dat"), :mode => "rb")
|
210
|
+
data << IO.read(File.join(File.dirname(__FILE__), "netflow9_test_macaddr_data.dat"), :mode => "rb")
|
211
|
+
end
|
212
|
+
|
213
|
+
let(:json_events) do
|
214
|
+
events = []
|
215
|
+
events << <<-END
|
216
|
+
{
|
217
|
+
"@timestamp":"2015-10-10T08:47:01.000Z",
|
218
|
+
"netflow":{
|
219
|
+
"version":9,
|
220
|
+
"flow_seq_num":2,
|
221
|
+
"flowset_id":257,
|
222
|
+
"protocol":6,
|
223
|
+
"l4_src_port":65058,
|
224
|
+
"ipv4_src_addr":"172.16.32.1",
|
225
|
+
"l4_dst_port":22,
|
226
|
+
"ipv4_dst_addr":"172.16.32.201",
|
227
|
+
"in_src_mac":"00:50:56:c0:00:01",
|
228
|
+
"in_dst_mac":"00:0c:29:70:86:09"
|
229
|
+
},
|
230
|
+
"@version":"1"
|
231
|
+
}
|
232
|
+
END
|
233
|
+
|
234
|
+
events.map{|event| event.gsub(/\s+/, "")}
|
235
|
+
end
|
236
|
+
|
237
|
+
it "should decode the mac address" do
|
238
|
+
expect(decode[1]["[netflow][in_src_mac]"]).to eq("00:50:56:c0:00:01")
|
239
|
+
expect(decode[1]["[netflow][in_dst_mac]"]).to eq("00:0c:29:70:86:09")
|
240
|
+
end
|
241
|
+
|
242
|
+
it "should serialize to json" do
|
243
|
+
expect(JSON.parse(decode[1].to_json)).to eq(JSON.parse(json_events[0]))
|
244
|
+
end
|
245
|
+
end
|
246
|
+
|
247
|
+
context "Netflow 9 Cisco ASA" do
|
248
|
+
let(:data) do
|
249
|
+
packets = []
|
250
|
+
packets << IO.read(File.join(File.dirname(__FILE__), "netflow9_test_valid_cisco_asa_tpl.dat"), :mode => "rb")
|
251
|
+
packets << IO.read(File.join(File.dirname(__FILE__), "netflow9_test_valid_cisco_asa_data.dat"), :mode => "rb")
|
252
|
+
end
|
253
|
+
|
254
|
+
let(:json_events) do
|
255
|
+
events = []
|
161
256
|
events << <<-END
|
162
257
|
{
|
163
|
-
"@timestamp": "2015-
|
258
|
+
"@timestamp": "2015-10-09T09:47:51.000Z",
|
164
259
|
"netflow": {
|
165
260
|
"version": 9,
|
166
|
-
"flow_seq_num":
|
167
|
-
"flowset_id":
|
168
|
-
"
|
169
|
-
"
|
170
|
-
"
|
171
|
-
"
|
172
|
-
"
|
173
|
-
"
|
174
|
-
"
|
175
|
-
"
|
176
|
-
"
|
177
|
-
"
|
178
|
-
"
|
179
|
-
"
|
180
|
-
"
|
261
|
+
"flow_seq_num": 662,
|
262
|
+
"flowset_id": 265,
|
263
|
+
"conn_id": 8501,
|
264
|
+
"ipv4_src_addr": "192.168.23.22",
|
265
|
+
"l4_src_port": 17549,
|
266
|
+
"input_snmp": 2,
|
267
|
+
"ipv4_dst_addr": "164.164.37.11",
|
268
|
+
"l4_dst_port": 0,
|
269
|
+
"output_snmp": 3,
|
270
|
+
"protocol": 1,
|
271
|
+
"icmp_type": 8,
|
272
|
+
"icmp_code": 0,
|
273
|
+
"xlate_src_addr_ipv4": "192.168.23.22",
|
274
|
+
"xlate_dst_addr_ipv4": "164.164.37.11",
|
275
|
+
"xlate_src_port": 17549,
|
276
|
+
"xlate_dst_port": 0,
|
277
|
+
"fw_event": 2,
|
278
|
+
"fw_ext_event": 2025,
|
279
|
+
"event_time_msec": 1444384070179,
|
280
|
+
"in_permanent_bytes": 56,
|
281
|
+
"flow_start_msec": 1444384068169,
|
282
|
+
"ingress_acl_id": "0f8e7ff3-fc1a030f-00000000",
|
283
|
+
"egress_acl_id": "00000000-00000000-00000000",
|
284
|
+
"username": ""
|
181
285
|
},
|
182
286
|
"@version": "1"
|
183
287
|
}
|
@@ -187,27 +291,394 @@ describe LogStash::Codecs::Netflow do
|
|
187
291
|
end
|
188
292
|
|
189
293
|
it "should decode raw data" do
|
190
|
-
expect(decode.size).to eq(
|
294
|
+
expect(decode.size).to eq(14)
|
295
|
+
expect(decode[1]["[netflow][version]"]).to eq(9)
|
296
|
+
end
|
191
297
|
|
192
|
-
|
193
|
-
expect(decode[
|
194
|
-
|
195
|
-
|
196
|
-
expect(decode[0]["[netflow][l4_dst_port]"]).to eq(22)
|
197
|
-
expect(decode[0]["[netflow][tcp_flags]"]).to eq(16)
|
298
|
+
it "should serialize to json" do
|
299
|
+
expect(JSON.parse(decode[1].to_json)).to eq(JSON.parse(json_events[0]))
|
300
|
+
end
|
301
|
+
end
|
198
302
|
|
199
|
-
|
200
|
-
|
201
|
-
|
202
|
-
|
203
|
-
|
204
|
-
|
303
|
+
context "Netflow 9 multple netflow exporters" do
|
304
|
+
let(:data) do
|
305
|
+
# This tests whether a template from a 2nd netflow exporter overwrites the template sent from the first.
|
306
|
+
# In this test the 3rd packet (from nprobe) should still decode succesfully.
|
307
|
+
# Note that in this case the SourceID from exporter 1 is different from exporter 2, otherwise we hit issue #9
|
308
|
+
data = []
|
309
|
+
data << IO.read(File.join(File.dirname(__FILE__), "netflow9_test_nprobe_tpl.dat"), :mode => "rb")
|
310
|
+
data << IO.read(File.join(File.dirname(__FILE__), "netflow9_test_softflowd_tpl_data.dat"), :mode => "rb")
|
311
|
+
data << IO.read(File.join(File.dirname(__FILE__), "netflow9_test_nprobe_data.dat"), :mode => "rb")
|
312
|
+
end
|
313
|
+
|
314
|
+
let(:json_events) do
|
315
|
+
events = []
|
316
|
+
events << <<-END
|
317
|
+
{
|
318
|
+
"@timestamp": "2015-10-08T19:04:30.000Z",
|
319
|
+
"netflow": {
|
320
|
+
"version":9,
|
321
|
+
"flow_seq_num":1,
|
322
|
+
"flowset_id":1024,
|
323
|
+
"ipv4_src_addr":"172.16.32.100",
|
324
|
+
"ipv4_dst_addr":"172.16.32.248",
|
325
|
+
"last_switched":"2015-10-08T19:03:47.999Z",
|
326
|
+
"first_switched":"2015-10-08T19:03:47.999Z",
|
327
|
+
"in_bytes":76,
|
328
|
+
"in_pkts":1,
|
329
|
+
"input_snmp":0,
|
330
|
+
"output_snmp":0,
|
331
|
+
"l4_src_port":123,
|
332
|
+
"l4_dst_port":123,
|
333
|
+
"protocol":17,
|
334
|
+
"tcp_flags":0,
|
335
|
+
"ip_protocol_version":4,
|
336
|
+
"src_tos":0
|
337
|
+
},
|
338
|
+
"@version":"1"
|
339
|
+
}
|
340
|
+
END
|
341
|
+
|
342
|
+
events << <<-END
|
343
|
+
{
|
344
|
+
"@timestamp":"2015-10-08T19:06:29.000Z",
|
345
|
+
"netflow": {
|
346
|
+
"version":9,
|
347
|
+
"flow_seq_num":1,
|
348
|
+
"flowset_id":257,
|
349
|
+
"in_bytes":200,
|
350
|
+
"in_pkts":2,
|
351
|
+
"protocol":6,
|
352
|
+
"src_tos":16,
|
353
|
+
"tcp_flags":24,
|
354
|
+
"l4_src_port":22,
|
355
|
+
"ipv4_src_addr":"172.16.32.201",
|
356
|
+
"src_mask":0,
|
357
|
+
"input_snmp":0,
|
358
|
+
"l4_dst_port":65058,
|
359
|
+
"ipv4_dst_addr":"172.16.32.1",
|
360
|
+
"dst_mask":0,
|
361
|
+
"output_snmp":0,
|
362
|
+
"ipv4_next_hop":"0.0.0.0",
|
363
|
+
"src_as":0,
|
364
|
+
"dst_as":0,
|
365
|
+
"last_switched":"2015-10-08T19:05:56.999Z",
|
366
|
+
"first_switched":"2015-10-08T19:05:56.999Z"
|
367
|
+
},
|
368
|
+
"@version":"1"
|
369
|
+
}
|
370
|
+
END
|
371
|
+
|
372
|
+
events.map{|event| event.gsub(/\s+/, "")}
|
373
|
+
end
|
374
|
+
|
375
|
+
# These tests will start to fail whenever options template decoding is added.
|
376
|
+
# Nprobe includes options templates, which this test included a sample from.
|
377
|
+
# Currently it is not decoded, but if it is, decode.size will be 9, and
|
378
|
+
# the packet currently identified with decode[7] will be decode[8]
|
379
|
+
|
380
|
+
it "should decode raw data" do
|
381
|
+
expect(decode.size).to eq(9)
|
382
|
+
expect(decode[1]["[netflow][l4_src_port]"]).to eq(123)
|
383
|
+
expect(decode[8]["[netflow][l4_src_port]"]).to eq(22)
|
384
|
+
end
|
385
|
+
|
386
|
+
it "should serialize to json" do
|
387
|
+
expect(JSON.parse(decode[1].to_json)).to eq(JSON.parse(json_events[0]))
|
388
|
+
expect(JSON.parse(decode[8].to_json)).to eq(JSON.parse(json_events[1]))
|
389
|
+
end
|
390
|
+
end
|
391
|
+
|
392
|
+
context "Netflow 9 invalid 01 " do
|
393
|
+
let(:data) do
|
394
|
+
data = []
|
395
|
+
data << IO.read(File.join(File.dirname(__FILE__), "netflow9_test_invalid01.dat"), :mode => "rb")
|
396
|
+
end
|
397
|
+
|
398
|
+
it "should not raise_error" do
|
399
|
+
expect{decode.size}.not_to raise_error
|
400
|
+
end
|
401
|
+
end
|
402
|
+
|
403
|
+
context "Netflow 9 options template with scope fields" do
|
404
|
+
let(:data) do
|
405
|
+
data = []
|
406
|
+
data << IO.read(File.join(File.dirname(__FILE__), "netflow9_test_nprobe_tpl.dat"), :mode => "rb")
|
407
|
+
end
|
408
|
+
|
409
|
+
let(:json_events) do
|
410
|
+
events = []
|
411
|
+
events << <<-END
|
412
|
+
{
|
413
|
+
"@timestamp":"2015-10-08T19:06:29.000Z",
|
414
|
+
"netflow": {
|
415
|
+
"version":9,
|
416
|
+
"flow_seq_num":0,
|
417
|
+
"flowset_id":259,
|
418
|
+
"scope_system":0,
|
419
|
+
"total_flows_exp":1,
|
420
|
+
"total_pkts_exp":0
|
421
|
+
},
|
422
|
+
"@version":"1"
|
423
|
+
}
|
424
|
+
END
|
425
|
+
|
426
|
+
events.map{|event| event.gsub(/\s+/, "")}
|
427
|
+
end
|
428
|
+
|
429
|
+
it "should serialize to json" do
|
430
|
+
expect(JSON.parse(decode[0].to_json)).to eq(JSON.parse(json_events[0]))
|
431
|
+
end
|
432
|
+
|
433
|
+
it "should decode raw data" do
|
434
|
+
expect(decode[0]["[netflow][scope_system]"]).to eq(0)
|
435
|
+
expect(decode[0]["[netflow][total_flows_exp]"]).to eq(1)
|
436
|
+
end
|
437
|
+
end
|
438
|
+
|
439
|
+
context "IPFIX" do
|
440
|
+
let(:data) do
|
441
|
+
# this netflow raw data was produced with softflowd and captured with netcat
|
442
|
+
# softflowd -D -i eth0 -v 10 -t maxlife=1 -n 127.0.01:8765
|
443
|
+
# nc -k -4 -u -l 127.0.0.1 8765 > ipfix.dat
|
444
|
+
data = []
|
445
|
+
data << IO.read(File.join(File.dirname(__FILE__), "ipfix.dat"), :mode => "rb")
|
446
|
+
end
|
447
|
+
|
448
|
+
let(:json_events) do
|
449
|
+
events = []
|
450
|
+
events << <<-END
|
451
|
+
{
|
452
|
+
"@timestamp": "2015-05-13T11:20:26.000Z",
|
453
|
+
"netflow": {
|
454
|
+
"version": 10,
|
455
|
+
"meteringProcessId": 2679,
|
456
|
+
"systemInitTimeMilliseconds": 1431516013506,
|
457
|
+
"selectorAlgorithm": 1,
|
458
|
+
"samplingPacketInterval": 1,
|
459
|
+
"samplingPacketSpace": 0
|
460
|
+
},
|
461
|
+
"@version": "1"
|
462
|
+
}
|
463
|
+
END
|
464
|
+
|
465
|
+
events << <<-END
|
466
|
+
{
|
467
|
+
"@timestamp": "2015-05-13T11:20:26.000Z",
|
468
|
+
"netflow": {
|
469
|
+
"version": 10,
|
470
|
+
"sourceIPv4Address": "192.168.253.1",
|
471
|
+
"destinationIPv4Address": "192.168.253.128",
|
472
|
+
"octetDeltaCount": 260,
|
473
|
+
"packetDeltaCount": 5,
|
474
|
+
"ingressInterface": 0,
|
475
|
+
"egressInterface": 0,
|
476
|
+
"sourceTransportPort": 60560,
|
477
|
+
"destinationTransportPort": 22,
|
478
|
+
"protocolIdentifier": 6,
|
479
|
+
"tcpControlBits": 16,
|
480
|
+
"ipVersion": 4,
|
481
|
+
"ipClassOfService": 0,
|
482
|
+
"icmpTypeCodeIPv4": 0,
|
483
|
+
"vlanId": 0,
|
484
|
+
"flowStartSysUpTime": 0,
|
485
|
+
"flowEndSysUpTime": 12726
|
486
|
+
},
|
487
|
+
"@version": "1"
|
488
|
+
}
|
489
|
+
END
|
490
|
+
|
491
|
+
events << <<-END
|
492
|
+
{
|
493
|
+
"@timestamp": "2015-05-13T11:20:26.000Z",
|
494
|
+
"netflow": {
|
495
|
+
"version": 10,
|
496
|
+
"sourceIPv4Address": "192.168.253.128",
|
497
|
+
"destinationIPv4Address": "192.168.253.1",
|
498
|
+
"octetDeltaCount": 1000,
|
499
|
+
"packetDeltaCount": 6,
|
500
|
+
"ingressInterface": 0,
|
501
|
+
"egressInterface": 0,
|
502
|
+
"sourceTransportPort": 22,
|
503
|
+
"destinationTransportPort": 60560,
|
504
|
+
"protocolIdentifier": 6,
|
505
|
+
"tcpControlBits": 24,
|
506
|
+
"ipVersion": 4,
|
507
|
+
"ipClassOfService": 0,
|
508
|
+
"icmpTypeCodeIPv4": 0,
|
509
|
+
"vlanId": 0,
|
510
|
+
"flowStartSysUpTime": 0,
|
511
|
+
"flowEndSysUpTime": 12726
|
512
|
+
},
|
513
|
+
"@version": "1"
|
514
|
+
}
|
515
|
+
END
|
516
|
+
|
517
|
+
events << <<-END
|
518
|
+
{
|
519
|
+
"@timestamp": "2015-05-13T11:20:26.000Z",
|
520
|
+
"netflow": {
|
521
|
+
"version": 10,
|
522
|
+
"sourceIPv4Address": "192.168.253.2",
|
523
|
+
"destinationIPv4Address": "192.168.253.132",
|
524
|
+
"octetDeltaCount": 601,
|
525
|
+
"packetDeltaCount": 2,
|
526
|
+
"ingressInterface": 0,
|
527
|
+
"egressInterface": 0,
|
528
|
+
"sourceTransportPort": 53,
|
529
|
+
"destinationTransportPort": 35262,
|
530
|
+
"protocolIdentifier": 17,
|
531
|
+
"tcpControlBits": 0,
|
532
|
+
"ipVersion": 4,
|
533
|
+
"ipClassOfService": 0,
|
534
|
+
"icmpTypeCodeIPv4": 0,
|
535
|
+
"vlanId": 0,
|
536
|
+
"flowStartSysUpTime": 1104,
|
537
|
+
"flowEndSysUpTime": 1142
|
538
|
+
},
|
539
|
+
"@version": "1"
|
540
|
+
}
|
541
|
+
END
|
542
|
+
|
543
|
+
events << <<-END
|
544
|
+
{
|
545
|
+
"@timestamp": "2015-05-13T11:20:26.000Z",
|
546
|
+
"netflow": {
|
547
|
+
"version": 10,
|
548
|
+
"sourceIPv4Address": "192.168.253.132",
|
549
|
+
"destinationIPv4Address": "192.168.253.2",
|
550
|
+
"octetDeltaCount": 148,
|
551
|
+
"packetDeltaCount": 2,
|
552
|
+
"ingressInterface": 0,
|
553
|
+
"egressInterface": 0,
|
554
|
+
"sourceTransportPort": 35262,
|
555
|
+
"destinationTransportPort": 53,
|
556
|
+
"protocolIdentifier": 17,
|
557
|
+
"tcpControlBits": 0,
|
558
|
+
"ipVersion": 4,
|
559
|
+
"ipClassOfService": 0,
|
560
|
+
"icmpTypeCodeIPv4": 0,
|
561
|
+
"vlanId": 0,
|
562
|
+
"flowStartSysUpTime": 1104,
|
563
|
+
"flowEndSysUpTime": 1142
|
564
|
+
},
|
565
|
+
"@version": "1"
|
566
|
+
}
|
567
|
+
END
|
568
|
+
|
569
|
+
events << <<-END
|
570
|
+
{
|
571
|
+
"@timestamp": "2015-05-13T11:20:26.000Z",
|
572
|
+
"netflow": {
|
573
|
+
"version": 10,
|
574
|
+
"sourceIPv4Address": "54.214.9.161",
|
575
|
+
"destinationIPv4Address": "192.168.253.132",
|
576
|
+
"octetDeltaCount": 5946,
|
577
|
+
"packetDeltaCount": 14,
|
578
|
+
"ingressInterface": 0,
|
579
|
+
"egressInterface": 0,
|
580
|
+
"sourceTransportPort": 443,
|
581
|
+
"destinationTransportPort": 49935,
|
582
|
+
"protocolIdentifier": 6,
|
583
|
+
"tcpControlBits": 26,
|
584
|
+
"ipVersion": 4,
|
585
|
+
"ipClassOfService": 0,
|
586
|
+
"icmpTypeCodeIPv4": 0,
|
587
|
+
"vlanId": 0,
|
588
|
+
"flowStartSysUpTime": 1142,
|
589
|
+
"flowEndSysUpTime": 2392
|
590
|
+
},
|
591
|
+
"@version": "1"
|
592
|
+
}
|
593
|
+
END
|
594
|
+
|
595
|
+
events << <<-END
|
596
|
+
{
|
597
|
+
"@timestamp": "2015-05-13T11:20:26.000Z",
|
598
|
+
"netflow": {
|
599
|
+
"version": 10,
|
600
|
+
"sourceIPv4Address": "192.168.253.132",
|
601
|
+
"destinationIPv4Address": "54.214.9.161",
|
602
|
+
"octetDeltaCount": 2608,
|
603
|
+
"packetDeltaCount": 13,
|
604
|
+
"ingressInterface": 0,
|
605
|
+
"egressInterface": 0,
|
606
|
+
"sourceTransportPort": 49935,
|
607
|
+
"destinationTransportPort": 443,
|
608
|
+
"protocolIdentifier": 6,
|
609
|
+
"tcpControlBits": 26,
|
610
|
+
"ipVersion": 4,
|
611
|
+
"ipClassOfService": 0,
|
612
|
+
"icmpTypeCodeIPv4": 0,
|
613
|
+
"vlanId": 0,
|
614
|
+
"flowStartSysUpTime": 1142,
|
615
|
+
"flowEndSysUpTime": 2392
|
616
|
+
},
|
617
|
+
"@version": "1"
|
618
|
+
}
|
619
|
+
END
|
620
|
+
|
621
|
+
events.map{|event| event.gsub(/\s+/, "")}
|
622
|
+
end
|
623
|
+
|
624
|
+
it "should decode raw data" do
|
625
|
+
expect(decode.size).to eq(7)
|
626
|
+
|
627
|
+
expect(decode[0]["[netflow][version]"]).to eq(10)
|
628
|
+
expect(decode[0]["[netflow][systemInitTimeMilliseconds]"]).to eq(1431516013506)
|
629
|
+
|
630
|
+
expect(decode[1]["[netflow][version]"]).to eq(10)
|
631
|
+
expect(decode[1]["[netflow][sourceIPv4Address]"]).to eq("192.168.253.1")
|
632
|
+
expect(decode[1]["[netflow][destinationIPv4Address]"]).to eq("192.168.253.128")
|
633
|
+
expect(decode[1]["[netflow][sourceTransportPort]"]).to eq(60560)
|
634
|
+
expect(decode[1]["[netflow][destinationTransportPort]"]).to eq(22)
|
635
|
+
expect(decode[1]["[netflow][protocolIdentifier]"]).to eq(6)
|
636
|
+
expect(decode[1]["[netflow][tcpControlBits]"]).to eq(16)
|
637
|
+
|
638
|
+
expect(decode[2]["[netflow][version]"]).to eq(10)
|
639
|
+
expect(decode[2]["[netflow][sourceIPv4Address]"]).to eq("192.168.253.128")
|
640
|
+
expect(decode[2]["[netflow][destinationIPv4Address]"]).to eq("192.168.253.1")
|
641
|
+
expect(decode[2]["[netflow][sourceTransportPort]"]).to eq(22)
|
642
|
+
expect(decode[2]["[netflow][destinationTransportPort]"]).to eq(60560)
|
643
|
+
expect(decode[2]["[netflow][protocolIdentifier]"]).to eq(6)
|
644
|
+
expect(decode[2]["[netflow][tcpControlBits]"]).to eq(24)
|
645
|
+
|
646
|
+
expect(decode[3]["[netflow][sourceIPv4Address]"]).to eq("192.168.253.2")
|
647
|
+
expect(decode[3]["[netflow][destinationIPv4Address]"]).to eq("192.168.253.132")
|
648
|
+
expect(decode[3]["[netflow][sourceTransportPort]"]).to eq(53)
|
649
|
+
expect(decode[3]["[netflow][destinationTransportPort]"]).to eq(35262)
|
650
|
+
expect(decode[3]["[netflow][protocolIdentifier]"]).to eq(17)
|
651
|
+
|
652
|
+
expect(decode[4]["[netflow][sourceIPv4Address]"]).to eq("192.168.253.132")
|
653
|
+
expect(decode[4]["[netflow][destinationIPv4Address]"]).to eq("192.168.253.2")
|
654
|
+
expect(decode[4]["[netflow][sourceTransportPort]"]).to eq(35262)
|
655
|
+
expect(decode[4]["[netflow][destinationTransportPort]"]).to eq(53)
|
656
|
+
expect(decode[4]["[netflow][protocolIdentifier]"]).to eq(17)
|
657
|
+
|
658
|
+
expect(decode[5]["[netflow][sourceIPv4Address]"]).to eq("54.214.9.161")
|
659
|
+
expect(decode[5]["[netflow][destinationIPv4Address]"]).to eq("192.168.253.132")
|
660
|
+
expect(decode[5]["[netflow][sourceTransportPort]"]).to eq(443)
|
661
|
+
expect(decode[5]["[netflow][destinationTransportPort]"]).to eq(49935)
|
662
|
+
expect(decode[5]["[netflow][protocolIdentifier]"]).to eq(6)
|
663
|
+
expect(decode[5]["[netflow][tcpControlBits]"]).to eq(26)
|
664
|
+
|
665
|
+
expect(decode[6]["[netflow][sourceIPv4Address]"]).to eq("192.168.253.132")
|
666
|
+
expect(decode[6]["[netflow][destinationIPv4Address]"]).to eq("54.214.9.161")
|
667
|
+
expect(decode[6]["[netflow][sourceTransportPort]"]).to eq(49935)
|
668
|
+
expect(decode[6]["[netflow][destinationTransportPort]"]).to eq(443)
|
669
|
+
expect(decode[6]["[netflow][protocolIdentifier]"]).to eq(6)
|
670
|
+
expect(decode[6]["[netflow][tcpControlBits]"]).to eq(26)
|
205
671
|
end
|
206
672
|
|
207
673
|
it "should serialize to json" do
|
208
|
-
# generated json order can change with different implementation, convert back to hash to compare.
|
209
674
|
expect(JSON.parse(decode[0].to_json)).to eq(JSON.parse(json_events[0]))
|
210
675
|
expect(JSON.parse(decode[1].to_json)).to eq(JSON.parse(json_events[1]))
|
676
|
+
expect(JSON.parse(decode[2].to_json)).to eq(JSON.parse(json_events[2]))
|
677
|
+
expect(JSON.parse(decode[3].to_json)).to eq(JSON.parse(json_events[3]))
|
678
|
+
expect(JSON.parse(decode[4].to_json)).to eq(JSON.parse(json_events[4]))
|
679
|
+
expect(JSON.parse(decode[5].to_json)).to eq(JSON.parse(json_events[5]))
|
680
|
+
expect(JSON.parse(decode[6].to_json)).to eq(JSON.parse(json_events[6]))
|
211
681
|
end
|
212
682
|
end
|
683
|
+
|
213
684
|
end
|