logstash-codec-netflow 0.1.5 → 0.1.6

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA1:
3
- metadata.gz: 0c0d1bab4ccf9556c23efa021fbe4793b0284510
4
- data.tar.gz: e8479587a6bb5449a9d8441cc2de89ff3ddd670c
3
+ metadata.gz: e032fc3de23093eb169875c297a10d3cf47f1bb6
4
+ data.tar.gz: c168272c6b4b61c0795358f8fc4629aa788720a1
5
5
  SHA512:
6
- metadata.gz: a04238f6f20d9b27d57c116ea0f0469dec8b92083c40b75302336cdbc7f9a24d8f9fb7ef507d30e1711e693274d1ab1d51022a21c0f53035085f0ab82c0bcd41
7
- data.tar.gz: 7455790420b9afa7c6ad4b03d4eef7591ca9d859d4ae6f6dfd3a8c8a02b8af494e5b26bb04dd0675a72cc5517c3144f59c062d468142e7fe1eb6b5e758c3d733
6
+ metadata.gz: 05316c177d6c74883b70e8a1bff3a027f2a1205a958ab050fa529241472c321bc5bbe9087e629e33d1295cd276431f777b1d8ac3c9d3662ce0ea3a7b7373bce4
7
+ data.tar.gz: acaf980bd49a558f75c79f522aff5254c2c10714a19376b12274acc8abb12b0ab9398c1a86e2398cfdfa66783c3ead834285def904b4476ee66e33c1d6b254c5
@@ -33,13 +33,16 @@ class LogStash::Codecs::Netflow < LogStash::Codecs::Base
33
33
  # See <https://github.com/logstash-plugins/logstash-codec-netflow/blob/master/lib/logstash/codecs/netflow/netflow.yaml> for the base set.
34
34
  config :definitions, :validate => :path
35
35
 
36
- public
37
- def initialize(params={})
36
+ NETFLOW5_FIELDS = ['version', 'flow_seq_num', 'engine_type', 'engine_id', 'sampling_algorithm', 'sampling_interval', 'flow_records']
37
+ NETFLOW9_FIELDS = ['version', 'flow_seq_num']
38
+ SWITCHED = /_switched$/
39
+ FLOWSET_ID = "flowset_id"
40
+
41
+ def initialize(params = {})
38
42
  super(params)
39
43
  @threadsafe = false
40
44
  end
41
45
 
42
- public
43
46
  def register
44
47
  require "logstash/codecs/netflow/util"
45
48
  @templates = Vash.new()
@@ -64,7 +67,6 @@ class LogStash::Codecs::Netflow < LogStash::Codecs::Base
64
67
  end
65
68
  end # def register
66
69
 
67
- public
68
70
  def decode(payload, &block)
69
71
  header = Header.read(payload)
70
72
 
@@ -75,169 +77,166 @@ class LogStash::Codecs::Netflow < LogStash::Codecs::Base
75
77
 
76
78
  if header.version == 5
77
79
  flowset = Netflow5PDU.read(payload)
80
+ flowset.records.each do |record|
81
+ yield(decode_netflow5(flowset, record))
82
+ end
78
83
  elsif header.version == 9
79
84
  flowset = Netflow9PDU.read(payload)
85
+ flowset.records.each do |record|
86
+ decode_netflow9(flowset, record).each{|event| yield(event)}
87
+ end
80
88
  else
81
89
  @logger.warn("Unsupported Netflow version v#{header.version}")
82
- return
83
90
  end
91
+ end
84
92
 
85
- flowset.records.each do |record|
86
- if flowset.version == 5
87
- event = LogStash::Event.new
93
+ private
88
94
 
89
- # FIXME Probably not doing this right WRT JRuby?
90
- #
91
- # The flowset header gives us the UTC epoch seconds along with
92
- # residual nanoseconds so we can set @timestamp to that easily
93
- event.timestamp = LogStash::Timestamp.at(flowset.unix_sec, flowset.unix_nsec / 1000)
94
- event[@target] = {}
95
+ def decode_netflow5(flowset, record)
96
+ event = {
97
+ LogStash::Event::TIMESTAMP => LogStash::Timestamp.at(flowset.unix_sec.snapshot, flowset.unix_nsec.snapshot / 1000),
98
+ @target => {}
99
+ }
95
100
 
96
- # Copy some of the pertinent fields in the header to the event
97
- ['version', 'flow_seq_num', 'engine_type', 'engine_id', 'sampling_algorithm', 'sampling_interval', 'flow_records'].each do |f|
98
- event[@target][f] = flowset[f]
99
- end
101
+ # Copy some of the pertinent fields in the header to the event
102
+ NETFLOW5_FIELDS.each do |f|
103
+ event[@target][f] = flowset[f].snapshot
104
+ end
100
105
 
101
- # Create fields in the event from each field in the flow record
102
- record.each_pair do |k,v|
103
- case k.to_s
104
- when /_switched$/
105
- # The flow record sets the first and last times to the device
106
- # uptime in milliseconds. Given the actual uptime is provided
107
- # in the flowset header along with the epoch seconds we can
108
- # convert these into absolute times
109
- millis = flowset.uptime - v
110
- seconds = flowset.unix_sec - (millis / 1000)
111
- micros = (flowset.unix_nsec / 1000) - (millis % 1000)
112
- if micros < 0
113
- seconds--
114
- micros += 1000000
115
- end
116
- # FIXME Again, probably doing this wrong WRT JRuby?
117
- event[@target][k.to_s] = Time.at(seconds, micros).utc.strftime("%Y-%m-%dT%H:%M:%S.%3NZ")
118
- else
119
- event[@target][k.to_s] = v
120
- end
106
+ # Create fields in the event from each field in the flow record
107
+ record.each_pair do |k, v|
108
+ case k.to_s
109
+ when SWITCHED
110
+ # The flow record sets the first and last times to the device
111
+ # uptime in milliseconds. Given the actual uptime is provided
112
+ # in the flowset header along with the epoch seconds we can
113
+ # convert these into absolute times
114
+ millis = flowset.uptime - v
115
+ seconds = flowset.unix_sec - (millis / 1000)
116
+ micros = (flowset.unix_nsec / 1000) - (millis % 1000)
117
+ if micros < 0
118
+ seconds--
119
+ micros += 1000000
121
120
  end
121
+ event[@target][k.to_s] = LogStash::Timestamp.at(seconds, micros).to_iso8601
122
+ else
123
+ event[@target][k.to_s] = v.snapshot
124
+ end
125
+ end
122
126
 
123
- yield event
124
- elsif flowset.version == 9
125
- case record.flowset_id
126
- when 0
127
- # Template flowset
128
- record.flowset_data.templates.each do |template|
129
- catch (:field) do
130
- fields = []
131
- template.fields.each do |field|
132
- entry = netflow_field_for(field.field_type, field.field_length)
133
- if ! entry
134
- throw :field
135
- end
136
- fields += entry
137
- end
138
- # We get this far, we have a list of fields
139
- #key = "#{flowset.source_id}|#{event["source"]}|#{template.template_id}"
140
- key = "#{flowset.source_id}|#{template.template_id}"
141
- @templates[key, @cache_ttl] = BinData::Struct.new(:endian => :big, :fields => fields)
142
- # Purge any expired templates
143
- @templates.cleanup!
144
- end
145
- end
146
- when 1
147
- # Options template flowset
148
- record.flowset_data.templates.each do |template|
149
- catch (:field) do
150
- fields = []
151
- template.option_fields.each do |field|
152
- entry = netflow_field_for(field.field_type, field.field_length)
153
- if ! entry
154
- throw :field
155
- end
156
- fields += entry
157
- end
158
- # We get this far, we have a list of fields
159
- #key = "#{flowset.source_id}|#{event["source"]}|#{template.template_id}"
160
- key = "#{flowset.source_id}|#{template.template_id}"
161
- @templates[key, @cache_ttl] = BinData::Struct.new(:endian => :big, :fields => fields)
162
- # Purge any expired templates
163
- @templates.cleanup!
164
- end
127
+ LogStash::Event.new(event)
128
+ end
129
+
130
+ def decode_netflow9(flowset, record)
131
+ events = []
132
+
133
+ case record.flowset_id
134
+ when 0
135
+ # Template flowset
136
+ record.flowset_data.templates.each do |template|
137
+ catch (:field) do
138
+ fields = []
139
+ template.fields.each do |field|
140
+ entry = netflow_field_for(field.field_type, field.field_length)
141
+ throw :field unless entry
142
+ fields += entry
165
143
  end
166
- when 256..65535
167
- # Data flowset
168
- #key = "#{flowset.source_id}|#{event["source"]}|#{record.flowset_id}"
169
- key = "#{flowset.source_id}|#{record.flowset_id}"
170
- template = @templates[key]
171
-
172
- if ! template
173
- #@logger.warn("No matching template for flow id #{record.flowset_id} from #{event["source"]}")
174
- @logger.warn("No matching template for flow id #{record.flowset_id}")
175
- next
144
+ # We get this far, we have a list of fields
145
+ #key = "#{flowset.source_id}|#{event["source"]}|#{template.template_id}"
146
+ key = "#{flowset.source_id}|#{template.template_id}"
147
+ @templates[key, @cache_ttl] = BinData::Struct.new(:endian => :big, :fields => fields)
148
+ # Purge any expired templates
149
+ @templates.cleanup!
150
+ end
151
+ end
152
+ when 1
153
+ # Options template flowset
154
+ record.flowset_data.templates.each do |template|
155
+ catch (:field) do
156
+ fields = []
157
+ template.option_fields.each do |field|
158
+ entry = netflow_field_for(field.field_type, field.field_length)
159
+ throw :field unless entry
160
+ fields += entry
176
161
  end
162
+ # We get this far, we have a list of fields
163
+ #key = "#{flowset.source_id}|#{event["source"]}|#{template.template_id}"
164
+ key = "#{flowset.source_id}|#{template.template_id}"
165
+ @templates[key, @cache_ttl] = BinData::Struct.new(:endian => :big, :fields => fields)
166
+ # Purge any expired templates
167
+ @templates.cleanup!
168
+ end
169
+ end
170
+ when 256..65535
171
+ # Data flowset
172
+ #key = "#{flowset.source_id}|#{event["source"]}|#{record.flowset_id}"
173
+ key = "#{flowset.source_id}|#{record.flowset_id}"
174
+ template = @templates[key]
175
+
176
+ unless template
177
+ #@logger.warn("No matching template for flow id #{record.flowset_id} from #{event["source"]}")
178
+ @logger.warn("No matching template for flow id #{record.flowset_id}")
179
+ next
180
+ end
177
181
 
178
- length = record.flowset_length - 4
182
+ length = record.flowset_length - 4
179
183
 
180
- # Template shouldn't be longer than the record and there should
181
- # be at most 3 padding bytes
182
- if template.num_bytes > length or ! (length % template.num_bytes).between?(0, 3)
183
- @logger.warn("Template length doesn't fit cleanly into flowset", :template_id => record.flowset_id, :template_length => template.num_bytes, :record_length => length)
184
- next
185
- end
184
+ # Template shouldn't be longer than the record and there should
185
+ # be at most 3 padding bytes
186
+ if template.num_bytes > length or ! (length % template.num_bytes).between?(0, 3)
187
+ @logger.warn("Template length doesn't fit cleanly into flowset", :template_id => record.flowset_id, :template_length => template.num_bytes, :record_length => length)
188
+ next
189
+ end
190
+
191
+ array = BinData::Array.new(:type => template, :initial_length => length / template.num_bytes)
192
+ records = array.read(record.flowset_data)
193
+
194
+ records.each do |r|
195
+ event = {
196
+ LogStash::Event::TIMESTAMP => LogStash::Timestamp.at(flowset.unix_sec),
197
+ @target => {}
198
+ }
186
199
 
187
- array = BinData::Array.new(:type => template, :initial_length => length / template.num_bytes)
188
-
189
- records = array.read(record.flowset_data)
190
-
191
- records.each do |r|
192
- event = LogStash::Event.new(
193
- LogStash::Event::TIMESTAMP => LogStash::Timestamp.at(flowset.unix_sec),
194
- @target => {}
195
- )
196
-
197
- # Fewer fields in the v9 header
198
- ['version', 'flow_seq_num'].each do |f|
199
- event[@target][f] = flowset[f]
200
- end
201
-
202
- event[@target]['flowset_id'] = record.flowset_id
203
-
204
- r.each_pair do |k,v|
205
- case k.to_s
206
- when /_switched$/
207
- millis = flowset.uptime - v
208
- seconds = flowset.unix_sec - (millis / 1000)
209
- # v9 did away with the nanosecs field
210
- micros = 1000000 - (millis % 1000)
211
- event[@target][k.to_s] = Time.at(seconds, micros).utc.strftime("%Y-%m-%dT%H:%M:%S.%3NZ")
212
- else
213
- event[@target][k.to_s] = v
214
- end
215
- end
216
-
217
- yield event
200
+ # Fewer fields in the v9 header
201
+ NETFLOW9_FIELDS.each do |f|
202
+ event[@target][f] = flowset[f].snapshot
203
+ end
204
+
205
+ event[@target][FLOWSET_ID] = record.flowset_id.snapshot
206
+
207
+ r.each_pair do |k, v|
208
+ case k.to_s
209
+ when SWITCHED
210
+ millis = flowset.uptime - v
211
+ seconds = flowset.unix_sec - (millis / 1000)
212
+ # v9 did away with the nanosecs field
213
+ micros = 1000000 - (millis % 1000)
214
+ event[@target][k.to_s] = LogStash::Timestamp.at(seconds, micros).to_iso8601
215
+ else
216
+ event[@target][k.to_s] = v.snapshot
218
217
  end
219
- else
220
- @logger.warn("Unsupported flowset id #{record.flowset_id}")
221
218
  end
219
+
220
+ events << LogStash::Event.new(event)
222
221
  end
222
+ else
223
+ @logger.warn("Unsupported flowset id #{record.flowset_id}")
223
224
  end
224
- end # def filter
225
225
 
226
- private
226
+ events
227
+ end
228
+
227
229
  def uint_field(length, default)
228
230
  # If length is 4, return :uint32, etc. and use default if length is 0
229
231
  ("uint" + (((length > 0) ? length : default) * 8).to_s).to_sym
230
232
  end # def uint_field
231
233
 
232
- private
233
234
  def netflow_field_for(type, length)
234
235
  if @fields.include?(type)
235
236
  field = @fields[type]
236
237
  if field.is_a?(Array)
237
238
 
238
- if field[0].is_a?(Integer)
239
- field[0] = uint_field(length, field[0])
240
- end
239
+ field[0] = uint_field(length, field[0]) if field[0].is_a?(Integer)
241
240
 
242
241
  # Small bit of fixup for skip or string field types where the length
243
242
  # is dynamic
@@ -248,7 +247,8 @@ class LogStash::Codecs::Netflow < LogStash::Codecs::Base
248
247
  field += [{:length => length, :trim_padding => true}]
249
248
  end
250
249
 
251
- @logger.debug("Definition complete", :field => field)
250
+ @logger.debug? and @logger.debug("Definition complete", :field => field)
251
+
252
252
  [field]
253
253
  else
254
254
  @logger.warn("Definition should be an array", :field => field)
@@ -1,7 +1,7 @@
1
1
  Gem::Specification.new do |s|
2
2
 
3
3
  s.name = 'logstash-codec-netflow'
4
- s.version = '0.1.5'
4
+ s.version = '0.1.6'
5
5
  s.licenses = ['Apache License (2.0)']
6
6
  s.summary = "The netflow codec is for decoding Netflow v5/v9 flows."
7
7
  s.description = "This gem is a logstash plugin required to be installed on top of the Logstash core pipeline using $LS_HOME/bin/plugin install gemname. This gem is not a stand-alone program"
Binary file
Binary file
@@ -1 +1,208 @@
1
1
  require "logstash/devutils/rspec/spec_helper"
2
+ require "logstash/codecs/netflow"
3
+
4
+ describe LogStash::Codecs::Netflow do
5
+ subject do
6
+ LogStash::Codecs::Netflow.new.tap do |codec|
7
+ expect{codec.register}.not_to raise_error
8
+ end
9
+ end
10
+
11
+ let(:decode) do
12
+ [].tap do |events|
13
+ subject.decode(data){|event| events << event}
14
+ end
15
+ end
16
+
17
+ context "Netflow 5" do
18
+ let(:data) do
19
+ # this netflow raw data was produced with softflowd and captured with netcat
20
+ # softflowd -D -i eth0 -v 5 -t maxlife=1 -n 127.0.01:8765
21
+ # nc -k -4 -u -l 127.0.0.1 8765 > netflow5.dat
22
+ IO.read(File.join(File.dirname(__FILE__), "netflow5.dat"), :mode => "rb")
23
+ end
24
+
25
+ let(:json_events) do
26
+ events = []
27
+ events << <<-END
28
+ {
29
+ "@timestamp": "2015-05-02T18:38:08.280Z",
30
+ "netflow": {
31
+ "version": 5,
32
+ "flow_seq_num": 0,
33
+ "engine_type": 0,
34
+ "engine_id": 0,
35
+ "sampling_algorithm": 0,
36
+ "sampling_interval": 0,
37
+ "flow_records": 2,
38
+ "ipv4_src_addr": "10.0.2.2",
39
+ "ipv4_dst_addr": "10.0.2.15",
40
+ "ipv4_next_hop": "0.0.0.0",
41
+ "input_snmp": 0,
42
+ "output_snmp": 0,
43
+ "in_pkts": 5,
44
+ "in_bytes": 230,
45
+ "first_switched": "2015-06-21T11:40:52.280Z",
46
+ "last_switched": "2015-05-02T18:38:08.279Z",
47
+ "l4_src_port": 54435,
48
+ "l4_dst_port": 22,
49
+ "tcp_flags": 16,
50
+ "protocol": 6,
51
+ "src_tos": 0,
52
+ "src_as": 0,
53
+ "dst_as": 0,
54
+ "src_mask": 0,
55
+ "dst_mask": 0
56
+ },
57
+ "@version": "1"
58
+ }
59
+ END
60
+
61
+ events << <<-END
62
+ {
63
+ "@timestamp": "2015-05-02T18:38:08.280Z",
64
+ "netflow": {
65
+ "version": 5,
66
+ "flow_seq_num": 0,
67
+ "engine_type": 0,
68
+ "engine_id": 0,
69
+ "sampling_algorithm": 0,
70
+ "sampling_interval": 0,
71
+ "flow_records": 2,
72
+ "ipv4_src_addr": "10.0.2.15",
73
+ "ipv4_dst_addr": "10.0.2.2",
74
+ "ipv4_next_hop": "0.0.0.0",
75
+ "input_snmp": 0,
76
+ "output_snmp": 0,
77
+ "in_pkts": 4,
78
+ "in_bytes": 304,
79
+ "first_switched": "2015-06-21T11:40:52.280Z",
80
+ "last_switched": "2015-05-02T18:38:08.279Z",
81
+ "l4_src_port": 22,
82
+ "l4_dst_port": 54435,
83
+ "tcp_flags": 24,
84
+ "protocol": 6,
85
+ "src_tos": 0,
86
+ "src_as": 0,
87
+ "dst_as": 0,
88
+ "src_mask": 0,
89
+ "dst_mask": 0
90
+ },
91
+ "@version": "1"
92
+ }
93
+ END
94
+
95
+ events.map{|event| event.gsub(/\s+/, "")}
96
+ end
97
+
98
+ it "should decode raw data" do
99
+ expect(decode.size).to eq(2)
100
+
101
+ expect(decode[0]["[netflow][version]"]).to eq(5)
102
+ expect(decode[0]["[netflow][ipv4_src_addr]"]).to eq("10.0.2.2")
103
+ expect(decode[0]["[netflow][ipv4_dst_addr]"]).to eq("10.0.2.15")
104
+ expect(decode[0]["[netflow][l4_src_port]"]).to eq(54435)
105
+ expect(decode[0]["[netflow][l4_dst_port]"]).to eq(22)
106
+ expect(decode[0]["[netflow][tcp_flags]"]).to eq(16)
107
+
108
+ expect(decode[1]["[netflow][version]"]).to eq(5)
109
+ expect(decode[1]["[netflow][ipv4_src_addr]"]).to eq("10.0.2.15")
110
+ expect(decode[1]["[netflow][ipv4_dst_addr]"]).to eq("10.0.2.2")
111
+ expect(decode[1]["[netflow][l4_src_port]"]).to eq(22)
112
+ expect(decode[1]["[netflow][l4_dst_port]"]).to eq(54435)
113
+ expect(decode[1]["[netflow][tcp_flags]"]).to eq(24)
114
+ end
115
+
116
+ it "should serialize to json" do
117
+ expect(decode[0].to_json).to eq(json_events[0])
118
+ expect(decode[1].to_json).to eq(json_events[1])
119
+ end
120
+ end
121
+
122
+ context "Netflow 9" do
123
+ let(:data) do
124
+ # this netflow raw data was produced with softflowd and captured with netcat
125
+ # softflowd -D -i eth0 -v 9 -t maxlife=1 -n 127.0.01:8765
126
+ # nc -k -4 -u -l 127.0.0.1 8765 > netflow9.dat
127
+ IO.read(File.join(File.dirname(__FILE__), "netflow9.dat"), :mode => "rb")
128
+ end
129
+
130
+ let(:json_events) do
131
+ events = []
132
+ events << <<-END
133
+ {
134
+ "@timestamp": "2015-05-02T22:10:07.000Z",
135
+ "netflow": {
136
+ "version": 9,
137
+ "flow_seq_num": 0,
138
+ "flowset_id": 1024,
139
+ "ipv4_src_addr": "10.0.2.2",
140
+ "ipv4_dst_addr": "10.0.2.15",
141
+ "last_switched": "2015-05-02T22:10:07.999Z",
142
+ "first_switched": "2015-06-21T15:12:49.999Z",
143
+ "in_bytes": 230,
144
+ "in_pkts": 5,
145
+ "input_snmp": 0,
146
+ "output_snmp": 0,
147
+ "l4_src_port": 57369,
148
+ "l4_dst_port": 22,
149
+ "protocol": 6,
150
+ "tcp_flags": 16,
151
+ "ip_protocol_version": 4
152
+ },
153
+ "@version": "1"
154
+ }
155
+ END
156
+
157
+ events << <<-END
158
+ {
159
+ "@timestamp": "2015-05-02T22:10:07.000Z",
160
+ "netflow": {
161
+ "version": 9,
162
+ "flow_seq_num": 0,
163
+ "flowset_id": 1024,
164
+ "ipv4_src_addr": "10.0.2.15",
165
+ "ipv4_dst_addr": "10.0.2.2",
166
+ "last_switched": "2015-05-02T22:10:07.999Z",
167
+ "first_switched": "2015-06-21T15:12:49.999Z",
168
+ "in_bytes": 352,
169
+ "in_pkts": 4,
170
+ "input_snmp": 0,
171
+ "output_snmp": 0,
172
+ "l4_src_port": 22,
173
+ "l4_dst_port": 57369,
174
+ "protocol": 6,
175
+ "tcp_flags": 24,
176
+ "ip_protocol_version": 4
177
+ },
178
+ "@version": "1"
179
+ }
180
+ END
181
+
182
+ events.map{|event| event.gsub(/\s+/, "")}
183
+ end
184
+
185
+ it "should decode raw data" do
186
+ expect(decode.size).to eq(2)
187
+
188
+ expect(decode[0]["[netflow][version]"]).to eq(9)
189
+ expect(decode[0]["[netflow][ipv4_src_addr]"]).to eq("10.0.2.2")
190
+ expect(decode[0]["[netflow][ipv4_dst_addr]"]).to eq("10.0.2.15")
191
+ expect(decode[0]["[netflow][l4_src_port]"]).to eq(57369)
192
+ expect(decode[0]["[netflow][l4_dst_port]"]).to eq(22)
193
+ expect(decode[0]["[netflow][tcp_flags]"]).to eq(16)
194
+
195
+ expect(decode[1]["[netflow][version]"]).to eq(9)
196
+ expect(decode[1]["[netflow][ipv4_src_addr]"]).to eq("10.0.2.15")
197
+ expect(decode[1]["[netflow][ipv4_dst_addr]"]).to eq("10.0.2.2")
198
+ expect(decode[1]["[netflow][l4_src_port]"]).to eq(22)
199
+ expect(decode[1]["[netflow][l4_dst_port]"]).to eq(57369)
200
+ expect(decode[1]["[netflow][tcp_flags]"]).to eq(24)
201
+ end
202
+
203
+ it "should serialize to json" do
204
+ expect(decode[0].to_json).to eq(json_events[0])
205
+ expect(decode[1].to_json).to eq(json_events[1])
206
+ end
207
+ end
208
+ end
metadata CHANGED
@@ -1,17 +1,18 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: logstash-codec-netflow
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.1.5
4
+ version: 0.1.6
5
5
  platform: ruby
6
6
  authors:
7
7
  - Elastic
8
8
  autorequire:
9
9
  bindir: bin
10
10
  cert_chain: []
11
- date: 2015-04-20 00:00:00.000000000 Z
11
+ date: 2015-05-05 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
- requirement: !ruby/object:Gem::Requirement
14
+ name: logstash-core
15
+ version_requirements: !ruby/object:Gem::Requirement
15
16
  requirements:
16
17
  - - '>='
17
18
  - !ruby/object:Gem::Version
@@ -19,10 +20,7 @@ dependencies:
19
20
  - - <
20
21
  - !ruby/object:Gem::Version
21
22
  version: 2.0.0
22
- name: logstash-core
23
- prerelease: false
24
- type: :runtime
25
- version_requirements: !ruby/object:Gem::Requirement
23
+ requirement: !ruby/object:Gem::Requirement
26
24
  requirements:
27
25
  - - '>='
28
26
  - !ruby/object:Gem::Version
@@ -30,34 +28,36 @@ dependencies:
30
28
  - - <
31
29
  - !ruby/object:Gem::Version
32
30
  version: 2.0.0
31
+ prerelease: false
32
+ type: :runtime
33
33
  - !ruby/object:Gem::Dependency
34
+ name: bindata
35
+ version_requirements: !ruby/object:Gem::Requirement
36
+ requirements:
37
+ - - '>='
38
+ - !ruby/object:Gem::Version
39
+ version: 1.5.0
34
40
  requirement: !ruby/object:Gem::Requirement
35
41
  requirements:
36
42
  - - '>='
37
43
  - !ruby/object:Gem::Version
38
44
  version: 1.5.0
39
- name: bindata
40
45
  prerelease: false
41
46
  type: :runtime
47
+ - !ruby/object:Gem::Dependency
48
+ name: logstash-devutils
42
49
  version_requirements: !ruby/object:Gem::Requirement
43
50
  requirements:
44
51
  - - '>='
45
52
  - !ruby/object:Gem::Version
46
- version: 1.5.0
47
- - !ruby/object:Gem::Dependency
53
+ version: '0'
48
54
  requirement: !ruby/object:Gem::Requirement
49
55
  requirements:
50
56
  - - '>='
51
57
  - !ruby/object:Gem::Version
52
58
  version: '0'
53
- name: logstash-devutils
54
59
  prerelease: false
55
60
  type: :development
56
- version_requirements: !ruby/object:Gem::Requirement
57
- requirements:
58
- - - '>='
59
- - !ruby/object:Gem::Version
60
- version: '0'
61
61
  description: This gem is a logstash plugin required to be installed on top of the Logstash core pipeline using $LS_HOME/bin/plugin install gemname. This gem is not a stand-alone program
62
62
  email: info@elastic.co
63
63
  executables: []
@@ -74,6 +74,8 @@ files:
74
74
  - lib/logstash/codecs/netflow/netflow.yaml
75
75
  - lib/logstash/codecs/netflow/util.rb
76
76
  - logstash-codec-netflow.gemspec
77
+ - spec/codecs/netflow5.dat
78
+ - spec/codecs/netflow9.dat
77
79
  - spec/codecs/netflow_spec.rb
78
80
  homepage: http://www.elastic.co/guide/en/logstash/current/index.html
79
81
  licenses:
@@ -102,4 +104,6 @@ signing_key:
102
104
  specification_version: 4
103
105
  summary: The netflow codec is for decoding Netflow v5/v9 flows.
104
106
  test_files:
107
+ - spec/codecs/netflow5.dat
108
+ - spec/codecs/netflow9.dat
105
109
  - spec/codecs/netflow_spec.rb