DIY-pcap 0.2.5 → 0.2.6
Sign up to get free protection for your applications and to get access to all the features.
- data/bin/pcap +2 -62
- data/bin/rpcap +2 -63
- data/lib/diy/command.rb +80 -0
- data/lib/diy/device_finder.rb +1 -1
- data/lib/diy/dig.rb +3 -1
- data/lib/diy/live.rb +5 -0
- data/lib/diy/parser/mu/fixnum_ext.rb +7 -0
- data/lib/diy/parser/mu/pcap/ethernet.rb +148 -0
- data/lib/diy/parser/mu/pcap/header.rb +75 -0
- data/lib/diy/parser/mu/pcap/io_pair.rb +67 -0
- data/lib/diy/parser/mu/pcap/io_wrapper.rb +76 -0
- data/lib/diy/parser/mu/pcap/ip.rb +61 -0
- data/lib/diy/parser/mu/pcap/ipv4.rb +257 -0
- data/lib/diy/parser/mu/pcap/ipv6.rb +148 -0
- data/lib/diy/parser/mu/pcap/packet.rb +104 -0
- data/lib/diy/parser/mu/pcap/pkthdr.rb +155 -0
- data/lib/diy/parser/mu/pcap/reader.rb +61 -0
- data/lib/diy/parser/mu/pcap/reader/http_family.rb +170 -0
- data/lib/diy/parser/mu/pcap/sctp.rb +367 -0
- data/lib/diy/parser/mu/pcap/sctp/chunk.rb +123 -0
- data/lib/diy/parser/mu/pcap/sctp/chunk/data.rb +134 -0
- data/lib/diy/parser/mu/pcap/sctp/chunk/init.rb +100 -0
- data/lib/diy/parser/mu/pcap/sctp/chunk/init_ack.rb +68 -0
- data/lib/diy/parser/mu/pcap/sctp/parameter.rb +110 -0
- data/lib/diy/parser/mu/pcap/sctp/parameter/ip_address.rb +48 -0
- data/lib/diy/parser/mu/pcap/stream_packetizer.rb +72 -0
- data/lib/diy/parser/mu/pcap/tcp.rb +505 -0
- data/lib/diy/parser/mu/pcap/udp.rb +69 -0
- data/lib/diy/parser/mu/scenario/pcap.rb +172 -0
- data/lib/diy/parser/mu/scenario/pcap/fields.rb +50 -0
- data/lib/diy/parser/mu/scenario/pcap/rtp.rb +71 -0
- data/lib/diy/parser/pcap.rb +113 -0
- data/lib/diy/parser/readme.md +72 -0
- data/lib/diy/utils.rb +9 -1
- data/lib/diy/version.rb +1 -1
- data/lib/diy/worker.rb +3 -2
- data/lib/diy/worker_keeper.rb +6 -0
- data/spec/helper/tcp.dat +0 -0
- data/spec/live_spec.rb +9 -0
- data/spec/mu_parser_spec.rb +12 -0
- data/spec/utils_spec.rb +1 -1
- metadata +34 -3
@@ -0,0 +1,48 @@
|
|
1
|
+
# http://www.mudynamics.com
|
2
|
+
# http://labs.mudynamics.com
|
3
|
+
# http://www.pcapr.net
|
4
|
+
|
5
|
+
module Mu
|
6
|
+
class Pcap
|
7
|
+
class SCTP
|
8
|
+
class Parameter
|
9
|
+
|
10
|
+
class IpAddress < Parameter
|
11
|
+
attr_accessor :value
|
12
|
+
|
13
|
+
def initialize
|
14
|
+
super
|
15
|
+
|
16
|
+
@value = nil
|
17
|
+
end
|
18
|
+
|
19
|
+
def self.from_bytes type, size, bytes
|
20
|
+
# Basic validation
|
21
|
+
if PARAM_IPV4 == type
|
22
|
+
Pcap.assert(size == 8, "Invalid IPv4 address: 4 != #{size}")
|
23
|
+
else
|
24
|
+
Pcap.assert(size == 20, "Invalid IPv6 address: 16 != #{size}")
|
25
|
+
end
|
26
|
+
|
27
|
+
# Create IP address parameter
|
28
|
+
ip_address = IpAddress.new
|
29
|
+
ip_address.type = type
|
30
|
+
ip_address.size = size
|
31
|
+
ip_address.value = IPAddr.new_ntoh(bytes[0, size - 4])
|
32
|
+
|
33
|
+
# Set raw payload
|
34
|
+
ip_address.payload_raw = bytes[0, size - 4]
|
35
|
+
|
36
|
+
# Return the result
|
37
|
+
return ip_address
|
38
|
+
end
|
39
|
+
|
40
|
+
def to_s
|
41
|
+
return "address(%s)" % [@value]
|
42
|
+
end
|
43
|
+
end # class IpAddress
|
44
|
+
|
45
|
+
end # class Parameter
|
46
|
+
end # class SCTP
|
47
|
+
end # class Pcap
|
48
|
+
end # module Mu
|
@@ -0,0 +1,72 @@
|
|
1
|
+
# http://www.mudynamics.com
|
2
|
+
# http://labs.mudynamics.com
|
3
|
+
# http://www.pcapr.net
|
4
|
+
|
5
|
+
require 'mu/pcap/io_pair'
|
6
|
+
require 'mu/pcap/io_wrapper'
|
7
|
+
|
8
|
+
module Mu
|
9
|
+
class Pcap
|
10
|
+
class StreamPacketizer
|
11
|
+
attr_reader :io_pair, :parser
|
12
|
+
def initialize parser
|
13
|
+
@parser = parser
|
14
|
+
@key_to_idx = Hash.new do |hash,key|
|
15
|
+
if hash.size >= 2
|
16
|
+
raise ArgumentError, "Only two endpoints are allowed in a transaction"
|
17
|
+
end
|
18
|
+
hash[key] = hash.size
|
19
|
+
end
|
20
|
+
@sent_messages = [[], []].freeze
|
21
|
+
@inner_pair = IOPair.stream_pair
|
22
|
+
@io_pair = @inner_pair.map{|io| IOWrapper.new io, parser}.freeze
|
23
|
+
end
|
24
|
+
|
25
|
+
def msg_count key
|
26
|
+
key = key.inspect
|
27
|
+
widx = @key_to_idx[key]
|
28
|
+
messages = @sent_messages[widx]
|
29
|
+
messages.size
|
30
|
+
end
|
31
|
+
|
32
|
+
def extra_bytes w_key
|
33
|
+
w_key = w_key.inspect
|
34
|
+
|
35
|
+
ridx = @key_to_idx[w_key] ^ 1
|
36
|
+
reader = @io_pair[ridx]
|
37
|
+
incomplete = reader.unread
|
38
|
+
incomplete.empty? ? nil : incomplete.dup
|
39
|
+
end
|
40
|
+
|
41
|
+
def push key, bytes
|
42
|
+
key = key.inspect
|
43
|
+
widx = @key_to_idx[key]
|
44
|
+
writer = @io_pair[widx]
|
45
|
+
raw_writer = @inner_pair[widx]
|
46
|
+
raw_writer.write bytes
|
47
|
+
|
48
|
+
messages = @sent_messages[widx]
|
49
|
+
|
50
|
+
ridx = widx ^ 1
|
51
|
+
reader = @io_pair[ridx]
|
52
|
+
while msg = reader.read
|
53
|
+
messages << msg
|
54
|
+
writer.record_write bytes
|
55
|
+
end
|
56
|
+
|
57
|
+
nil
|
58
|
+
end
|
59
|
+
|
60
|
+
def next_msg key
|
61
|
+
key = key.inspect
|
62
|
+
idx = @key_to_idx[key]
|
63
|
+
if m = @sent_messages[idx].shift
|
64
|
+
return m.dup
|
65
|
+
else
|
66
|
+
nil
|
67
|
+
end
|
68
|
+
end
|
69
|
+
end
|
70
|
+
end
|
71
|
+
end
|
72
|
+
|
@@ -0,0 +1,505 @@
|
|
1
|
+
# http://www.mudynamics.com
|
2
|
+
# http://labs.mudynamics.com
|
3
|
+
# http://www.pcapr.net
|
4
|
+
|
5
|
+
require 'mu/pcap/reader'
|
6
|
+
require 'mu/pcap/stream_packetizer'
|
7
|
+
|
8
|
+
module Mu
|
9
|
+
class Pcap
|
10
|
+
|
11
|
+
class TCP < Packet
|
12
|
+
attr_accessor :src_port, :dst_port, :seq, :ack, :flags, :window, :urgent, :mss, :proto_family
|
13
|
+
|
14
|
+
TH_FIN = 0x01
|
15
|
+
TH_SYN = 0x02
|
16
|
+
TH_RST = 0x04
|
17
|
+
TH_PUSH = 0x08
|
18
|
+
TH_ACK = 0x10
|
19
|
+
TH_URG = 0x20
|
20
|
+
TH_ECE = 0x40
|
21
|
+
TH_CWR = 0x80
|
22
|
+
|
23
|
+
MSS = 2
|
24
|
+
|
25
|
+
def initialize
|
26
|
+
super
|
27
|
+
@src_port = 0
|
28
|
+
@dst_port = 0
|
29
|
+
@seq = 0
|
30
|
+
@ack = 0
|
31
|
+
@flags = 0
|
32
|
+
@window = 0
|
33
|
+
@urgent = 0
|
34
|
+
@mss = 0
|
35
|
+
@proto_family = nil
|
36
|
+
end
|
37
|
+
|
38
|
+
def flow_id
|
39
|
+
return [:tcp, @src_port, @dst_port]
|
40
|
+
end
|
41
|
+
|
42
|
+
def self.from_bytes bytes
|
43
|
+
Pcap.assert bytes.length >= 20, 'Truncated TCP header: ' +
|
44
|
+
"expected 20 bytes, got #{bytes.length} bytes"
|
45
|
+
sport, dport, seq, ack, offset, flags, win, sum, urp =
|
46
|
+
bytes.unpack('nnNNCCnnn')
|
47
|
+
offset = (offset >> 4) * 4
|
48
|
+
Pcap.assert offset >= 20, 'Truncated TCP header: ' +
|
49
|
+
"expected at least 20 bytes, got #{offset} bytes"
|
50
|
+
Pcap.assert bytes.length >= offset, 'Truncated TCP header: ' +
|
51
|
+
"expected at least #{offset} bytes, got #{bytes.length} bytes"
|
52
|
+
|
53
|
+
if TH_SYN == flags
|
54
|
+
ss = TCP.get_option bytes[20, offset-20], MSS
|
55
|
+
else
|
56
|
+
ss = 0
|
57
|
+
end
|
58
|
+
|
59
|
+
IPv4.check_options bytes[20, offset-20], 'TCP'
|
60
|
+
|
61
|
+
tcp = TCP.new
|
62
|
+
tcp.src_port = sport
|
63
|
+
tcp.dst_port = dport
|
64
|
+
tcp.seq = seq
|
65
|
+
tcp.ack = ack
|
66
|
+
tcp.flags = flags
|
67
|
+
tcp.window = win
|
68
|
+
tcp.urgent = urp
|
69
|
+
tcp.mss = ss
|
70
|
+
tcp.payload = tcp.payload_raw = bytes[offset..-1]
|
71
|
+
return tcp
|
72
|
+
end
|
73
|
+
|
74
|
+
def self.get_option options, option_type
|
75
|
+
while not options.empty?
|
76
|
+
type = options.slice!(0, 1)[0].ord
|
77
|
+
if type == 0 or type == 1
|
78
|
+
next
|
79
|
+
end
|
80
|
+
length = options.slice!(0, 1)[0].ord
|
81
|
+
if 2 < length
|
82
|
+
case length
|
83
|
+
when 3
|
84
|
+
format = "C"
|
85
|
+
when 4
|
86
|
+
format = "n"
|
87
|
+
when 6
|
88
|
+
format = "N"
|
89
|
+
when 10
|
90
|
+
format = "Q"
|
91
|
+
else
|
92
|
+
Pcap.warning "Bad TCP option length: #{length}"
|
93
|
+
end
|
94
|
+
option = options.slice!(0, length - 2).unpack(format)[0]
|
95
|
+
end
|
96
|
+
if option_type == type
|
97
|
+
return option
|
98
|
+
end
|
99
|
+
end
|
100
|
+
return 0
|
101
|
+
end
|
102
|
+
|
103
|
+
def write io, ip
|
104
|
+
if @payload.length + 40 > 65535
|
105
|
+
raise NotImplementedError, "TCP segment too large"
|
106
|
+
end
|
107
|
+
pseudo_header = ip.pseudo_header 20 + @payload.length
|
108
|
+
header = [@src_port, @dst_port, @seq, @ack, 5 << 4, @flags, @window,
|
109
|
+
0, @urgent].pack('nnNNCCnnn')
|
110
|
+
checksum = IP.checksum pseudo_header + header + @payload
|
111
|
+
header = [@src_port, @dst_port, @seq, @ack, 5 << 4, @flags, @window,
|
112
|
+
checksum, @urgent].pack('nnNNCCnnn')
|
113
|
+
io.write header
|
114
|
+
io.write @payload
|
115
|
+
end
|
116
|
+
|
117
|
+
class ReorderError < StandardError ; end
|
118
|
+
|
119
|
+
ReorderState = ::Struct.new(:next_seq, :queued)
|
120
|
+
|
121
|
+
# Reorder packets by TCP sequence number. TCP packets are assumed to
|
122
|
+
# be over IP over Ethernet.
|
123
|
+
def self.reorder packets
|
124
|
+
packets = packets.dup
|
125
|
+
reordered_packets = []
|
126
|
+
flow_to_state = {}
|
127
|
+
while not packets.empty?
|
128
|
+
packet = packets.shift
|
129
|
+
# Don't reorder non-TCP packets
|
130
|
+
if not tcp? packet
|
131
|
+
reordered_packets << packet
|
132
|
+
next
|
133
|
+
end
|
134
|
+
# Sanity check: must not be a fragment
|
135
|
+
if packet.payload.v4? and packet.payload.fragment?
|
136
|
+
raise ReorderError, "TCP stream contains IP fragments"
|
137
|
+
end
|
138
|
+
tcp = packet.payload.payload
|
139
|
+
# Must not contain urgent data
|
140
|
+
if tcp.flags & TH_URG != 0
|
141
|
+
raise ReorderError, "TCP stream contains urgent data: "+
|
142
|
+
pretty_flow_name(packet)
|
143
|
+
end
|
144
|
+
# Get/create state
|
145
|
+
if flow_to_state.member? packet.flow_id
|
146
|
+
state = flow_to_state[packet.flow_id]
|
147
|
+
else
|
148
|
+
state = ReorderState.new nil, []
|
149
|
+
flow_to_state[packet.flow_id] = state
|
150
|
+
end
|
151
|
+
if not state.next_seq
|
152
|
+
# First packet in TCP stream
|
153
|
+
reordered_packets << packet
|
154
|
+
state.next_seq = tcp.seq + tcp.payload.length
|
155
|
+
if tcp.flags & TCP::TH_SYN != 0
|
156
|
+
state.next_seq += 1
|
157
|
+
end
|
158
|
+
if tcp.flags & TCP::TH_FIN != 0
|
159
|
+
state.next_seq += 1
|
160
|
+
end
|
161
|
+
state.next_seq %= 2**32
|
162
|
+
elsif seq_eq(tcp.seq, state.next_seq)
|
163
|
+
# Next expected sequence number in TCP stream
|
164
|
+
|
165
|
+
# SYN must not appear in middle of stream
|
166
|
+
if tcp.flags & TCP::TH_SYN != 0
|
167
|
+
raise ReorderError, "SYN in middle of TCP stream " +
|
168
|
+
pretty_flow_name(packet)
|
169
|
+
end
|
170
|
+
|
171
|
+
reordered_packets << packet
|
172
|
+
state.next_seq += tcp.payload.length
|
173
|
+
if tcp.flags & TCP::TH_FIN != 0
|
174
|
+
state.next_seq += 1
|
175
|
+
end
|
176
|
+
state.next_seq %= 2**32
|
177
|
+
|
178
|
+
# Reinject any packets in the queue into the packet stream
|
179
|
+
if not state.queued.empty?
|
180
|
+
packets.unshift(*state.queued)
|
181
|
+
state.queued.clear
|
182
|
+
end
|
183
|
+
elsif seq_lt(tcp.seq, state.next_seq)
|
184
|
+
# Old sequence number
|
185
|
+
if seq_lte(tcp.seq + tcp.payload.length, state.next_seq)
|
186
|
+
# No overlap: retransmitted packet, ignore
|
187
|
+
else
|
188
|
+
# Overlap: reassembler must slice in overlapping data
|
189
|
+
reordered_packets << packet
|
190
|
+
end
|
191
|
+
else
|
192
|
+
# Future sequence number - queue
|
193
|
+
state.queued << packet
|
194
|
+
end
|
195
|
+
end
|
196
|
+
|
197
|
+
flow_to_state.each do |flow_id, state|
|
198
|
+
if not state.queued.empty?
|
199
|
+
raise ReorderError, "Data missing from TCP stream "+
|
200
|
+
pretty_flow_name(state.queued[0]) + ': ' +
|
201
|
+
"expecting sequence number #{state.next_seq}"
|
202
|
+
end
|
203
|
+
end
|
204
|
+
|
205
|
+
return reordered_packets
|
206
|
+
end
|
207
|
+
|
208
|
+
class MergeError < StandardError ; end
|
209
|
+
|
210
|
+
# Merge adjacent TCP packets. Non-data TCP packets are also removed.
|
211
|
+
# reorder() should be run first. This can create packets that are larger
|
212
|
+
# than the maximum possible IPv4 packet - use split() to make them smaller.
|
213
|
+
def self.merge packets
|
214
|
+
merged_packets = []
|
215
|
+
merged_packet = nil
|
216
|
+
next_seq = nil
|
217
|
+
packets.each do |packet|
|
218
|
+
if not tcp? packet
|
219
|
+
# Skip non-TCP packets.
|
220
|
+
if merged_packet
|
221
|
+
merged_packets << merged_packet
|
222
|
+
merged_packet = nil
|
223
|
+
end
|
224
|
+
merged_packets << packet
|
225
|
+
elsif packet.payload.v4? and packet.payload.fragment?
|
226
|
+
# Sanity check: must not be a fragment
|
227
|
+
raise MergeError, 'TCP stream contains IP fragments'
|
228
|
+
else
|
229
|
+
tcp = packet.payload.payload
|
230
|
+
if tcp.flags & TCP::TH_SYN == 0 and tcp.payload == ''
|
231
|
+
# Ignore non-data packets. SYNs are kept so the TCP
|
232
|
+
# transport is created at the correct spot.
|
233
|
+
elsif not merged_packet or
|
234
|
+
merged_packet.flow_id != packet.flow_id
|
235
|
+
# New TCP stream
|
236
|
+
if merged_packet
|
237
|
+
merged_packets << merged_packet
|
238
|
+
end
|
239
|
+
merged_packet = packet.deepdup
|
240
|
+
next_seq = tcp.seq + tcp.payload.length
|
241
|
+
elsif seq_eq tcp.seq, next_seq
|
242
|
+
# Next expected sequence number
|
243
|
+
merged_packet.payload.payload.payload << tcp.payload
|
244
|
+
next_seq += tcp.payload.length
|
245
|
+
elsif seq_lte(tcp.seq + tcp.payload.length, next_seq)
|
246
|
+
# Old data: ignore
|
247
|
+
elsif seq_lt tcp.seq, next_seq
|
248
|
+
# Overlapping segment: merge newest part
|
249
|
+
length = seq_sub(tcp.seq + tcp.payload.length, next_seq)
|
250
|
+
bytes = tcp.payload[-length..-1]
|
251
|
+
merged_packet.payload.payload.payload << bytes
|
252
|
+
next_seq += length
|
253
|
+
else
|
254
|
+
# Error (sanify check, reorder_tcp will raise an error)
|
255
|
+
raise MergeError, 'TCP stream is missing segments'
|
256
|
+
end
|
257
|
+
if next_seq
|
258
|
+
if tcp.flags & TCP::TH_SYN != 0
|
259
|
+
next_seq += 1
|
260
|
+
end
|
261
|
+
if tcp.flags & TCP::TH_FIN != 0
|
262
|
+
next_seq += 1
|
263
|
+
end
|
264
|
+
next_seq %= 2**32
|
265
|
+
end
|
266
|
+
end
|
267
|
+
end
|
268
|
+
if merged_packet
|
269
|
+
merged_packets << merged_packet
|
270
|
+
end
|
271
|
+
|
272
|
+
merged_packets = create_message_boundaries(merged_packets)
|
273
|
+
|
274
|
+
return merged_packets
|
275
|
+
end
|
276
|
+
|
277
|
+
def self.create_message_boundaries packets
|
278
|
+
# Get complete bytes for each tcp flow before trying to
|
279
|
+
# identify the protocol.
|
280
|
+
flow_to_bytes = {}
|
281
|
+
packets.each do |packet|
|
282
|
+
if tcp? packet
|
283
|
+
tcp = packet.payload.payload
|
284
|
+
flow = packet.flow_id
|
285
|
+
bytes = flow_to_bytes[flow] ||= ""
|
286
|
+
bytes << tcp.payload.to_s
|
287
|
+
end
|
288
|
+
end
|
289
|
+
|
290
|
+
# If any proto plugin can parse a message off of the stream we will
|
291
|
+
# use that plugin to detect message boundaries and guide message
|
292
|
+
# reassembly.
|
293
|
+
flow_to_packetizer = {}
|
294
|
+
flow_to_bytes.each_pair do |flow, bytes|
|
295
|
+
[ Reader::HttpFamily ].each do |klass|
|
296
|
+
reader = klass.new
|
297
|
+
reader.pcap2scenario = true
|
298
|
+
if reader.read_message bytes
|
299
|
+
tx_key = flow.flatten.sort_by {|o| o.to_s}
|
300
|
+
|
301
|
+
tx = flow_to_packetizer[tx_key] ||= StreamPacketizer.new(klass.new)
|
302
|
+
break
|
303
|
+
end
|
304
|
+
end
|
305
|
+
end
|
306
|
+
|
307
|
+
# Merge/split packets along message boundaries. This is done as an
|
308
|
+
# atomic transaction per tcp connection. The loop below adds merged
|
309
|
+
# packets alongside the original unmerged packets. If the stream
|
310
|
+
# is completely merged (no fragments left at end) we remove the
|
311
|
+
# original packets otherwise we rollback by removing the newly
|
312
|
+
# created packets.
|
313
|
+
changes = Hash.new do |hash,key|
|
314
|
+
# tuple of original/replacement packets per flow.
|
315
|
+
hash[key] = [[], []]
|
316
|
+
end
|
317
|
+
rollback_list = []
|
318
|
+
|
319
|
+
merged = []
|
320
|
+
partial_messages = Hash.new {|hash,key| hash[key] = []}
|
321
|
+
packets.each do |packet|
|
322
|
+
merged << packet
|
323
|
+
|
324
|
+
next if not tcp? packet
|
325
|
+
tcp = packet.payload.payload
|
326
|
+
|
327
|
+
flow = packet.flow_id
|
328
|
+
|
329
|
+
# Check if we have message boundaries for this flow
|
330
|
+
tx_key = flow.flatten.sort_by {|o| o.to_s}
|
331
|
+
if not tx = flow_to_packetizer[tx_key]
|
332
|
+
next
|
333
|
+
end
|
334
|
+
|
335
|
+
# Keep track of new vs orig packets so we can delete one set at the end.
|
336
|
+
orig_packets, new_packets = changes[flow]
|
337
|
+
orig_packets << packet
|
338
|
+
|
339
|
+
if tcp.payload.empty?
|
340
|
+
p = packet.deepdup
|
341
|
+
new_packets << p
|
342
|
+
p.payload.payload.proto_family = tx.parser.family
|
343
|
+
next
|
344
|
+
end
|
345
|
+
|
346
|
+
# Does the current packet result in any completed messages?
|
347
|
+
tx.push(flow, tcp.payload)
|
348
|
+
fragments = partial_messages[flow]
|
349
|
+
if tx.msg_count(flow) == 0
|
350
|
+
# No, record packet as a fragment and move to next packet.
|
351
|
+
fragments << packet
|
352
|
+
next
|
353
|
+
end
|
354
|
+
|
355
|
+
# Yes, packet did result in completed messages. Create a new
|
356
|
+
# tcp packet for each higher level protocol message.
|
357
|
+
first_inc_packet = (fragments.empty? ? packet : fragments[0])
|
358
|
+
next_seq = first_inc_packet.payload.payload.seq
|
359
|
+
while tcp_payload = tx.next_msg(flow)
|
360
|
+
if tcp_payload.size > MAX_SEGMENT_PAYLOAD
|
361
|
+
# Abort merging for this flow because this packet
|
362
|
+
# will be split and result in a scenario where
|
363
|
+
# we send one logical message but try and receive
|
364
|
+
# two.
|
365
|
+
rollback_list << tx_key
|
366
|
+
$stderr.puts "Warning: Message too big, cannot enforce " \
|
367
|
+
"message boundaries."
|
368
|
+
end
|
369
|
+
next_packet = packet.deepdup
|
370
|
+
new_packets << next_packet
|
371
|
+
next_tcp = next_packet.payload.payload
|
372
|
+
next_tcp.seq = next_seq
|
373
|
+
next_tcp.payload = tcp_payload
|
374
|
+
next_tcp.proto_family = tx.parser.family
|
375
|
+
next_seq += tcp_payload.size
|
376
|
+
merged << next_packet
|
377
|
+
end
|
378
|
+
fragments.clear
|
379
|
+
|
380
|
+
# If there are unconsumed bytes then add a fragment to the
|
381
|
+
# incomplete list.
|
382
|
+
if extra_bytes = tx.extra_bytes(flow)
|
383
|
+
frag = packet.deepdup
|
384
|
+
new_packets << frag
|
385
|
+
fragments << frag
|
386
|
+
tcp = frag.payload.payload
|
387
|
+
tcp.payload = extra_bytes
|
388
|
+
tcp.seq = next_seq
|
389
|
+
tcp.proto_family = tx.parser.family
|
390
|
+
end
|
391
|
+
end
|
392
|
+
|
393
|
+
# Figure out which connections have incompletely merged flows.
|
394
|
+
# Rollback for those and commit the rest.
|
395
|
+
partial_messages.each_pair do |flow, list|
|
396
|
+
if not list.empty?
|
397
|
+
tx_key = flow.flatten.sort_by {|o| o.to_s}
|
398
|
+
$stderr.puts "Warning: Left over fragments, cannot force message boundaries."
|
399
|
+
rollback_list << tx_key
|
400
|
+
end
|
401
|
+
end
|
402
|
+
changes.each_pair do |flow, orig_new|
|
403
|
+
orig, new = orig_new
|
404
|
+
tx_key = flow.flatten.sort_by {|o| o.to_s}
|
405
|
+
if rollback_list.include?(tx_key)
|
406
|
+
new.each {|p| p.payload = :remove}
|
407
|
+
else
|
408
|
+
orig.each {|p| p.payload = :remove}
|
409
|
+
end
|
410
|
+
end
|
411
|
+
merged.reject! {|p| p.payload == :remove}
|
412
|
+
|
413
|
+
merged
|
414
|
+
end
|
415
|
+
|
416
|
+
|
417
|
+
# Split-up TCP packets that are too large to serialize. (I.e., total
|
418
|
+
# length including all headers greater than 65535 - 20 - 20 - 14.)
|
419
|
+
MAX_SEGMENT_PAYLOAD = 65535 - 20 - 20 - 14
|
420
|
+
def self.split packets
|
421
|
+
split_packets = []
|
422
|
+
packets.each do |packet|
|
423
|
+
if not tcp? packet
|
424
|
+
# Skip non-TCP packets.
|
425
|
+
split_packets << packet
|
426
|
+
next
|
427
|
+
elsif packet.payload.v4? and packet.payload.fragment?
|
428
|
+
# Sanity check: must not be a fragment
|
429
|
+
raise MergeError, 'TCP stream contains IP fragments'
|
430
|
+
elsif packet.payload.payload.payload.length <= MAX_SEGMENT_PAYLOAD
|
431
|
+
split_packets << packet
|
432
|
+
else
|
433
|
+
tcp = packet.payload.payload
|
434
|
+
payload = tcp.payload
|
435
|
+
tcp.payload = payload.slice! 0, MAX_SEGMENT_PAYLOAD
|
436
|
+
next_seq = tcp.seq + tcp.payload.length
|
437
|
+
split_packets << packet
|
438
|
+
while payload != ''
|
439
|
+
next_packet = packet.deepdup
|
440
|
+
next_tcp = next_packet.payload.payload
|
441
|
+
next_tcp.seq = next_seq
|
442
|
+
next_tcp.payload = payload.slice! 0, MAX_SEGMENT_PAYLOAD
|
443
|
+
next_seq += next_tcp.payload.length
|
444
|
+
split_packets << next_packet
|
445
|
+
end
|
446
|
+
end
|
447
|
+
end
|
448
|
+
return split_packets
|
449
|
+
end
|
450
|
+
|
451
|
+
def self.tcp? packet
|
452
|
+
return packet.is_a?(Ethernet) &&
|
453
|
+
packet.payload.is_a?(IP) &&
|
454
|
+
packet.payload.payload.is_a?(TCP)
|
455
|
+
end
|
456
|
+
|
457
|
+
# Subtract two sequence numbers module 2**32.
|
458
|
+
def self.seq_sub a, b
|
459
|
+
if a - b > 2**31
|
460
|
+
return -((b - a) % 2**32)
|
461
|
+
elsif a - b < -2**31
|
462
|
+
return (a - b) % 2**32
|
463
|
+
else
|
464
|
+
return a - b
|
465
|
+
end
|
466
|
+
end
|
467
|
+
|
468
|
+
# Compare TCP sequence numbers modulo 2**32.
|
469
|
+
def self.seq_eq a, b
|
470
|
+
return seq_sub(a, b) == 0
|
471
|
+
end
|
472
|
+
|
473
|
+
def self.seq_lt a, b
|
474
|
+
return seq_sub(a, b) < 0
|
475
|
+
end
|
476
|
+
|
477
|
+
def self.seq_lte a, b
|
478
|
+
return seq_sub(a, b) <= 0
|
479
|
+
end
|
480
|
+
|
481
|
+
# Generate a pretty name for a TCP flow
|
482
|
+
def self.pretty_flow_name packet
|
483
|
+
ip = packet.payload
|
484
|
+
return "#{ip.src}:#{ip.payload.src_port} <-> " +
|
485
|
+
"#{ip.dst}:#{ip.payload.dst_port}"
|
486
|
+
end
|
487
|
+
|
488
|
+
def to_s
|
489
|
+
return "tcp(%d, %d, %s)" % [@src_port, @dst_port, @payload.inspect]
|
490
|
+
end
|
491
|
+
|
492
|
+
def == other
|
493
|
+
return super &&
|
494
|
+
self.src_port == other.src_port &&
|
495
|
+
self.dst_port == other.dst_port &&
|
496
|
+
self.seq == other.seq &&
|
497
|
+
self.ack == other.ack &&
|
498
|
+
self.flags == other.flags &&
|
499
|
+
self.window == other.window &&
|
500
|
+
self.urgent == other.urgent
|
501
|
+
end
|
502
|
+
end
|
503
|
+
|
504
|
+
end
|
505
|
+
end
|