traffic-taffy 0.2__tar.gz → 0.3.5__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (20) hide show
  1. {traffic-taffy-0.2 → traffic-taffy-0.3.5}/PKG-INFO +1 -1
  2. {traffic-taffy-0.2 → traffic-taffy-0.3.5}/setup.py +8 -1
  3. {traffic-taffy-0.2 → traffic-taffy-0.3.5}/traffic_taffy/cache_info.py +6 -0
  4. {traffic-taffy-0.2 → traffic-taffy-0.3.5}/traffic_taffy/compare.py +106 -35
  5. {traffic-taffy-0.2 → traffic-taffy-0.3.5}/traffic_taffy/dissectmany.py +1 -1
  6. {traffic-taffy-0.2 → traffic-taffy-0.3.5}/traffic_taffy/dissector.py +60 -5
  7. traffic-taffy-0.3.5/traffic_taffy/explore.py +222 -0
  8. {traffic-taffy-0.2 → traffic-taffy-0.3.5}/traffic_taffy/graph.py +0 -16
  9. {traffic-taffy-0.2 → traffic-taffy-0.3.5}/traffic_taffy/pcap_splitter.py +36 -17
  10. {traffic-taffy-0.2 → traffic-taffy-0.3.5}/traffic_taffy.egg-info/PKG-INFO +1 -1
  11. {traffic-taffy-0.2 → traffic-taffy-0.3.5}/traffic_taffy.egg-info/SOURCES.txt +2 -0
  12. traffic-taffy-0.3.5/traffic_taffy.egg-info/requires.txt +5 -0
  13. {traffic-taffy-0.2 → traffic-taffy-0.3.5}/README.md +0 -0
  14. {traffic-taffy-0.2 → traffic-taffy-0.3.5}/pyproject.toml +0 -0
  15. {traffic-taffy-0.2 → traffic-taffy-0.3.5}/setup.cfg +0 -0
  16. {traffic-taffy-0.2 → traffic-taffy-0.3.5}/traffic_taffy/__init__.py +0 -0
  17. {traffic-taffy-0.2 → traffic-taffy-0.3.5}/traffic_taffy/dissectorresults.py +0 -0
  18. {traffic-taffy-0.2 → traffic-taffy-0.3.5}/traffic_taffy.egg-info/dependency_links.txt +0 -0
  19. {traffic-taffy-0.2 → traffic-taffy-0.3.5}/traffic_taffy.egg-info/entry_points.txt +0 -0
  20. {traffic-taffy-0.2 → traffic-taffy-0.3.5}/traffic_taffy.egg-info/top_level.txt +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: traffic-taffy
3
- Version: 0.2
3
+ Version: 0.3.5
4
4
  Summary: A tool for doing differential analysis of pcap files
5
5
  Home-page: https://github.com/hardaker/traffic-taffy
6
6
  Author: Wes Hardaker
@@ -5,7 +5,7 @@ with open("README.md", "r") as fh:
5
5
 
6
6
  setuptools.setup(
7
7
  name="traffic-taffy",
8
- version="0.2",
8
+ version="0.3.5",
9
9
  author="Wes Hardaker",
10
10
  author_email="opensource@hardakers.net",
11
11
  description="A tool for doing differential analysis of pcap files",
@@ -28,4 +28,11 @@ setuptools.setup(
28
28
  python_requires=">=3.7",
29
29
  test_suite="nose.collector",
30
30
  tests_require=["nose"],
31
+ install_requires=[
32
+ "pandas",
33
+ "rich",
34
+ "seaborn",
35
+ "scapy",
36
+ "dpkt",
37
+ ],
31
38
  )
@@ -47,6 +47,12 @@ def main():
47
47
  for key in contents["parameters"]:
48
48
  print(f" {key:<16} {contents['parameters'][key]}")
49
49
 
50
+ print("data info:")
51
+ timestamps = list(contents["dissection"].keys())
52
+ print(f" timestamps: {len(timestamps)}")
53
+ print(f" first: {timestamps[1]}") # skips 0 = global
54
+ print(f" last: {timestamps[-1]}")
55
+
50
56
 
51
57
  if __name__ == "__main__":
52
58
  main()
@@ -1,7 +1,7 @@
1
1
  """Takes a set of pcap files to compare and creates a report"""
2
2
 
3
3
  import logging
4
- from logging import info
4
+ from logging import info, debug
5
5
  from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
6
6
  from typing import List
7
7
  from rich.console import Console
@@ -33,6 +33,7 @@ class PcapCompare:
33
33
  only_negative: bool = False,
34
34
  cache_results: bool = False,
35
35
  dissection_level: PCAPDissectorType = PCAPDissectorType.COUNT_ONLY,
36
+ between_times: List[int] | None = None,
36
37
  ) -> None:
37
38
  self.pcaps = pcaps
38
39
  self.deep = deep
@@ -45,40 +46,45 @@ class PcapCompare:
45
46
  self.only_negative = only_negative
46
47
  self.cache_results = cache_results
47
48
  self.dissection_level = dissection_level
49
+ self.between_times = between_times
48
50
 
49
- def compare_results(self, report1: dict, report2: dict) -> dict:
51
+ @property
52
+ def reports(self):
53
+ return self._reports
54
+
55
+ @reports.setter
56
+ def reports(self, newvalue):
57
+ self._reports = newvalue
58
+
59
+ def compare_dissections(self, dissection1: dict, dissection2: dict) -> dict:
50
60
  "compares the results from two reports"
51
61
 
52
62
  # TODO: handle recursive depths, where items are subtrees rather than Counters
53
63
 
54
64
  report = {}
55
65
 
56
- # TODO: we're only (currently) doing full pcap compares
57
- report1 = report1[0]
58
- report2 = report2[0]
59
-
60
- # TODO: missing key in report2 (major items added)
61
- for key in report1:
66
+ # TODO: missing key in dissection2 (major items added)
67
+ for key in dissection1:
62
68
  # TODO: deal with missing keys from one set
63
- report1_total = report1[key].total()
64
- report2_total = report2[key].total()
69
+ dissection1_total = dissection1[key].total()
70
+ dissection2_total = dissection2[key].total()
65
71
  report[key] = {}
66
72
 
67
- for subkey in report1[key].keys():
73
+ for subkey in dissection1[key].keys():
68
74
  delta = 0.0
69
75
  total = 0
70
- if subkey in report1[key] and subkey in report2[key]:
76
+ if subkey in dissection1[key] and subkey in dissection2[key]:
71
77
  delta = (
72
- report2[key][subkey] / report2_total
73
- - report1[key][subkey] / report1_total
78
+ dissection2[key][subkey] / dissection2_total
79
+ - dissection1[key][subkey] / dissection1_total
74
80
  )
75
- total = report2[key][subkey] + report1[key][subkey]
76
- ref_count = report1[key][subkey]
77
- comp_count = report2[key][subkey]
81
+ total = dissection2[key][subkey] + dissection1[key][subkey]
82
+ ref_count = dissection1[key][subkey]
83
+ comp_count = dissection2[key][subkey]
78
84
  else:
79
85
  delta = -1.0
80
- total = report1[key][subkey]
81
- ref_count = report1[key][subkey]
86
+ total = dissection1[key][subkey]
87
+ ref_count = dissection1[key][subkey]
82
88
  comp_count = 0
83
89
 
84
90
  report[key][subkey] = {
@@ -88,12 +94,12 @@ class PcapCompare:
88
94
  "comp_count": comp_count,
89
95
  }
90
96
 
91
- for subkey in report2[key].keys():
97
+ for subkey in dissection2[key].keys():
92
98
  if subkey not in report[key]:
93
99
  delta = 1.0
94
- total = report2[key][subkey]
100
+ total = dissection2[key][subkey]
95
101
  ref_count = 0
96
- comp_count = report2[key][subkey]
102
+ comp_count = dissection2[key][subkey]
97
103
 
98
104
  report[key][subkey] = {
99
105
  "delta": delta,
@@ -136,6 +142,7 @@ class PcapCompare:
136
142
 
137
143
  def print_report(self, report: dict) -> None:
138
144
  "prints a report to the console"
145
+
139
146
  console = Console()
140
147
  for key in sorted(report):
141
148
  reported: bool = False
@@ -180,16 +187,11 @@ class PcapCompare:
180
187
  def print(self) -> None:
181
188
  "outputs the results"
182
189
  for n, report in enumerate(self.reports):
183
- print(f"************ report #{n}")
184
- self.print_report(report)
185
-
186
- def compare(self) -> None:
187
- "Compares each pcap against the original source"
188
-
189
- reports = []
190
-
191
- # TODO: use parallel processes to load multiple at a time
190
+ title = report.get("title", f"report #{n}")
191
+ print(f"************ {title}")
192
+ self.print_report(report["report"])
192
193
 
194
+ def load_pcaps(self) -> None:
193
195
  # load the first as a reference pcap
194
196
  info(f"reading pcap files using level={self.dissection_level}")
195
197
  pdm = PCAPDissectMany(
@@ -201,11 +203,71 @@ class PcapCompare:
201
203
  dissector_level=self.dissection_level,
202
204
  )
203
205
  results = pdm.load_all()
206
+ return results
207
+
208
+ def compare(self) -> None:
209
+ "Compares each pcap against the original source"
210
+
211
+ results = self.load_pcaps()
212
+ self.compare_all(results)
204
213
 
205
- reference = next(results)
206
- for other in results:
207
- # compare the two
208
- reports.append(self.compare_results(reference["data"], other["data"]))
214
+ def compare_all(self, results):
215
+ reports = []
216
+ if len(self.pcaps) > 1:
217
+ # multiple file comparison
218
+ reference = next(results)
219
+ for other in results:
220
+ # compare the two global summaries
221
+ reports.append(
222
+ {
223
+ "report": self.compare_dissections(
224
+ reference["data"][0], other["data"][0]
225
+ ),
226
+ "title": f"{reference['file']} vs {other['file']}",
227
+ }
228
+ )
229
+
230
+ else:
231
+ # deal with timestamps within a single file
232
+ results = list(results)
233
+ reference = results[0]
234
+ timestamps = list(reference["data"].keys())
235
+ debug(
236
+ f"found {len(timestamps)} timestamps from {timestamps[2]} to {timestamps[-1]}"
237
+ )
238
+ for timestamp in range(
239
+ 2, len(timestamps)
240
+ ): # second real non-zero timestamp to last
241
+ time_left = timestamps[timestamp - 1]
242
+ time_right = timestamps[timestamp]
243
+
244
+ # see if we were asked to only use particular time ranges
245
+ if self.between_times and (
246
+ time_left < self.between_times[0]
247
+ or time_right > self.between_times[1]
248
+ ):
249
+ # debug(f"skipping timestamps {time_left} and {time_right}")
250
+ continue
251
+
252
+ debug(f"comparing timestamps {time_left} and {time_right}")
253
+
254
+ report = self.compare_dissections(
255
+ reference["data"][time_left], reference["data"][time_right]
256
+ )
257
+
258
+ title = f"time {time_left} vs time {time_right}"
259
+ print(f"************ {title}")
260
+ self.print_report(report)
261
+
262
+ continue
263
+
264
+ # takes way too much memory to do it "right"
265
+ # reports.append(
266
+ # {
267
+ # "report": report,
268
+ # "title": f"time {time_left} vs time {time_right}",
269
+ # }
270
+ # )
209
271
 
210
272
  self.reports = reports
211
273
 
@@ -236,6 +298,14 @@ def parse_args():
236
298
  "-N", "--only-negative", action="store_true", help="Only show negative entries"
237
299
  )
238
300
 
301
+ limiting_parser.add_argument(
302
+ "-T",
303
+ "--between-times",
304
+ nargs=2,
305
+ type=int,
306
+ help="For single files, only display results between these timestamps",
307
+ )
308
+
239
309
  dissector_add_parseargs(parser)
240
310
 
241
311
  debugging_group = parser.add_argument_group("Debugging options")
@@ -270,6 +340,7 @@ def main():
270
340
  only_negative=args.only_negative,
271
341
  cache_results=args.cache_pcap_results,
272
342
  dissection_level=args.dissection_level,
343
+ between_times=args.between_times,
273
344
  )
274
345
 
275
346
  # compare the pcaps
@@ -32,7 +32,7 @@ class PCAPDissectMany:
32
32
  pd.load()
33
33
  return pd.data
34
34
 
35
- def load_pcap(self, pcap_file, split_size=100000, maximum_count=0):
35
+ def load_pcap(self, pcap_file, split_size=None, maximum_count=0):
36
36
  pd = PCAPDissector(
37
37
  pcap_file,
38
38
  *self.args,
@@ -110,6 +110,50 @@ class PCAPDissector:
110
110
  data[timestamp][key]
111
111
  )
112
112
 
113
+ @staticmethod
114
+ def open_maybe_compressed(filename):
115
+ """Opens a pcap file, potentially decompressing it."""
116
+
117
+ magic_dict = {
118
+ bytes([0x1F, 0x8B, 0x08]): "gz",
119
+ bytes([0x42, 0x5A, 0x68]): "bz2",
120
+ bytes([0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00]): "xz",
121
+ }
122
+ max_len = max(len(x) for x in magic_dict)
123
+
124
+ base_handle = open(filename, "rb")
125
+ file_start = base_handle.read(max_len)
126
+ base_handle.close()
127
+
128
+ for magic, filetype in magic_dict.items():
129
+ if file_start.startswith(magic):
130
+ try:
131
+ if filetype == "gz":
132
+ import gzip
133
+
134
+ return_handle = gzip.open(filename, "rb")
135
+ return return_handle
136
+ elif filetype == "bz2":
137
+ import bz2
138
+
139
+ return_handle = bz2.open(filename, "rb")
140
+ setattr(return_handle, "name", filename)
141
+ return return_handle
142
+ elif filetype == "xz":
143
+ import lzma
144
+
145
+ return_handle = lzma.open(filename, "rb")
146
+ return return_handle
147
+ else:
148
+ raise ValueError("unknown compression error")
149
+ except Exception:
150
+ # likely we failed to find a compression module
151
+ debug(f"failed to use {filetype} module to decode the input stream")
152
+ raise ValueError("cannot decode file")
153
+
154
+ # return a raw file and hope it's not compressed'
155
+ return open(filename, "rb")
156
+
113
157
  def incr(self, key: str, value: Any, count: int = 1):
114
158
  # always save a total count at the zero bin
115
159
  # note: there should be no recorded tcpdump files from 1970 Jan 01 :-)
@@ -158,7 +202,7 @@ class PCAPDissector:
158
202
  if parameter == "dissector_level":
159
203
  debug("------------ here 1")
160
204
  if parameter == "dissector_level" and specified <= cached:
161
- debug("here with dissector_level {specified} and {cached}")
205
+ debug(f"here with dissector_level {specified} and {cached}")
162
206
  # loading a more detailed cache is ok
163
207
  continue
164
208
 
@@ -206,7 +250,7 @@ class PCAPDissector:
206
250
  self.timestamp = self.timestamp - self.timestamp % self.bin_size
207
251
  self.incr(self.TOTAL_COUNT, self.TOTAL_SUBKEY)
208
252
 
209
- if self.dissector_level == PCAPDissectorType.THROUGH_IP.value:
253
+ if self.dissector_level >= PCAPDissectorType.THROUGH_IP.value:
210
254
  eth = dpkt.ethernet.Ethernet(packet)
211
255
  # these names are designed to match scapy names
212
256
  self.incr("Ethernet.dst", eth.dst)
@@ -262,7 +306,7 @@ class PCAPDissector:
262
306
  def load_via_dpkt(self) -> dict:
263
307
  self.data = {0: defaultdict(Counter)}
264
308
  if isinstance(self.pcap_file, str):
265
- pcap = dpkt.pcap.Reader(open(self.pcap_file, "rb"))
309
+ pcap = dpkt.pcap.Reader(PCAPDissector.open_maybe_compressed(self.pcap_file))
266
310
  else:
267
311
  # it's an open handle already
268
312
  pcap = dpkt.pcap.Reader(self.pcap_file)
@@ -335,8 +379,11 @@ class PCAPDissector:
335
379
 
336
380
  def load_via_scapy(self) -> dict:
337
381
  "Loads a pcap file into a nested dictionary of statistical counts"
382
+ load_this = self.pcap_file
383
+ if isinstance(self.pcap_file, str):
384
+ load_this = PCAPDissector.open_maybe_compressed(self.pcap_file)
338
385
  sniff(
339
- offline=self.pcap_file,
386
+ offline=load_this,
340
387
  prn=self.scapy_callback,
341
388
  store=0,
342
389
  count=self.maximum_count,
@@ -452,6 +499,14 @@ def dissector_add_parseargs(parser, add_subgroup: bool = True):
452
499
  help="Maximum number of packets to analyze",
453
500
  )
454
501
 
502
+ parser.add_argument(
503
+ "-b",
504
+ "--bin-size",
505
+ type=int,
506
+ default=3600,
507
+ help="Bin results into this many seconds",
508
+ )
509
+
455
510
  parser.add_argument(
456
511
  "-C",
457
512
  "--cache-pcap-results",
@@ -550,7 +605,7 @@ def main():
550
605
 
551
606
  pd = PCAPDissector(
552
607
  args.input_file,
553
- bin_size=0,
608
+ bin_size=args.bin_size,
554
609
  dissector_level=args.dissection_level,
555
610
  maximum_count=args.packet_count,
556
611
  cache_results=args.cache_pcap_results,
@@ -0,0 +1,222 @@
1
+ import sys
2
+ import logging
3
+ from logging import info, debug
4
+ from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
5
+ from PyQt6.QtWidgets import QPushButton, QVBoxLayout, QLineEdit, QTextEdit, QWidget, QApplication, QLabel
6
+ from traffic_taffy.dissector import (
7
+ PCAPDissectorType,
8
+ dissector_add_parseargs,
9
+ limitor_add_parseargs,
10
+ PCAPDissector,
11
+ check_dissector_level,
12
+ )
13
+ from traffic_taffy.compare import PcapCompare
14
+
15
+ # https://stackoverflow.com/questions/32476006/how-to-make-an-expandable-collapsable-section-widget-in-qt
16
+
17
+ # class Widget(QWidget):
18
+ # def __init__(self):
19
+ # super().__init__()
20
+ # self.__initUi()
21
+
22
+ # def __initUi(self):
23
+ # addBtn = QPushButton('Add')
24
+ # addBtn.clicked.connect(self.__add)
25
+ # self.__foldableListWidget = FoldableListWidget()
26
+ # lay = QVBoxLayout()
27
+ # lay.addWidget(addBtn)
28
+ # lay.addWidget(self.__foldableListWidget)
29
+ # self.setLayout(lay)
30
+
31
+ # def __add(self):
32
+ # foldedItem = QLabel("folded")
33
+ # # foldedItem.setPlaceholderText('Input...')
34
+
35
+ # sublist = FoldableListWidget()
36
+ # subitem1 = QLabel("main item")
37
+ # subitem2 = QLabel("sub item")
38
+ # sublist.setFoldableListWidgetItem(subitem1, subitem2)
39
+
40
+ # self.__foldableListWidget.setFoldableListWidgetItem(foldedItem, sublist)
41
+
42
+
43
+ from PyQt6.QtWidgets import (QPushButton, QDialog, QTreeWidget,
44
+ QTreeWidgetItem, QVBoxLayout,
45
+ QHBoxLayout, QFrame, QLabel,
46
+ QApplication)
47
+
48
+ class SectionExpandButton(QPushButton):
49
+ """a QPushbutton that can expand or collapse its section
50
+ """
51
+ def __init__(self, item, text = "", parent = None):
52
+ super().__init__(text, parent)
53
+ self.section = item
54
+ self.clicked.connect(self.on_clicked)
55
+
56
+ def on_clicked(self):
57
+ """toggle expand/collapse of section by clicking
58
+ """
59
+ if self.section.isExpanded():
60
+ self.section.setExpanded(False)
61
+ else:
62
+ self.section.setExpanded(True)
63
+
64
+
65
+ class TaffyExplorer(QDialog):
66
+ """Explore PCAP files by comparison slices"""
67
+ def __init__(self, args):
68
+ super().__init__()
69
+ self.tree = QTreeWidget()
70
+ self.tree.setHeaderHidden(True)
71
+ self.mainLayout = QVBoxLayout()
72
+ self.mainLayout.addWidget(self.tree)
73
+ self.setLayout(self.mainLayout)
74
+ self.tree.setIndentation(0)
75
+
76
+ self.sections = []
77
+ self.define_sections()
78
+ self.add_sections()
79
+
80
+ self.plusone = QPushButton("Add one")
81
+ self.mainLayout.addWidget(self.plusone)
82
+ self.plusone.clicked.connect(self.addone)
83
+
84
+ self.args = args
85
+
86
+ def addone(self):
87
+ print("here")
88
+ self.add_section("new item", QLabel("one thing"))
89
+
90
+ def add_section(self, title, widget):
91
+ button1 = self.add_button(title)
92
+ section1 = self.add_widget(button1, widget)
93
+ button1.addChild(section1)
94
+
95
+ def add_sections(self):
96
+ """adds a collapsible sections for every
97
+ (title, widget) tuple in self.sections
98
+ """
99
+ #self.tree.clear()
100
+ for (title, widget) in self.sections:
101
+ self.add_section(title, widget)
102
+
103
+ def define_sections(self):
104
+ """reimplement this to define all your sections
105
+ and add them as (title, widget) tuples to self.sections
106
+ """
107
+ widget = QFrame(self.tree)
108
+ layout = QHBoxLayout(widget)
109
+ layout.addWidget(QLabel("Bla"))
110
+ layout.addWidget(QLabel("Blubb"))
111
+ title = "Section 1"
112
+ self.sections.append((title, widget))
113
+
114
+ def add_button(self, title):
115
+ """creates a QTreeWidgetItem containing a button
116
+ to expand or collapse its section
117
+ """
118
+ item = QTreeWidgetItem()
119
+ self.tree.addTopLevelItem(item)
120
+ self.tree.setItemWidget(item, 0, SectionExpandButton(item, text = title))
121
+ return item
122
+
123
+ def add_widget(self, button, widget):
124
+ """creates a QWidgetItem containing the widget,
125
+ as child of the button-QWidgetItem
126
+ """
127
+ section = QTreeWidgetItem(button)
128
+ section.setDisabled(True)
129
+ self.tree.setItemWidget(section, 0, widget)
130
+ return section
131
+
132
+ def create_comparison(self):
133
+ self.pc = PcapCompare(
134
+ self.args.pcap_files,
135
+ maximum_count=self.args.packet_count,
136
+ print_threshold=float(self.args.print_threshold) / 100.0,
137
+ print_minimum_count=self.args.minimum_count,
138
+ print_match_string=self.args.match_string,
139
+ only_positive=self.args.only_positive,
140
+ only_negative=self.args.only_negative,
141
+ cache_results=self.args.cache_pcap_results,
142
+ dissection_level=self.args.dissection_level,
143
+ between_times=self.args.between_times,
144
+ )
145
+
146
+ # compare the pcaps
147
+ self.pcap_data = list(self.pc.load_pcaps())
148
+
149
+ def show_comparison(self, pcap_one, timestamp_one, pcap_two, timestamp_two):
150
+
151
+
152
+
153
+ def parse_args():
154
+ "Parse the command line arguments."
155
+ parser = ArgumentParser(
156
+ formatter_class=ArgumentDefaultsHelpFormatter,
157
+ description=__doc__,
158
+ epilog="Exmaple Usage: ",
159
+ )
160
+
161
+ limiting_parser = limitor_add_parseargs(parser)
162
+
163
+ limiting_parser.add_argument(
164
+ "-t",
165
+ "--print-threshold",
166
+ default=0.0,
167
+ type=float,
168
+ help="Don't print results with abs(percent) less than this threshold",
169
+ )
170
+
171
+ limiting_parser.add_argument(
172
+ "-P", "--only-positive", action="store_true", help="Only show positive entries"
173
+ )
174
+
175
+ limiting_parser.add_argument(
176
+ "-N", "--only-negative", action="store_true", help="Only show negative entries"
177
+ )
178
+
179
+ limiting_parser.add_argument(
180
+ "-T",
181
+ "--between-times",
182
+ nargs=2,
183
+ type=int,
184
+ help="For single files, only display results between these timestamps",
185
+ )
186
+
187
+ dissector_add_parseargs(parser)
188
+
189
+ debugging_group = parser.add_argument_group("Debugging options")
190
+
191
+ debugging_group.add_argument(
192
+ "--log-level",
193
+ "--ll",
194
+ default="info",
195
+ help="Define the logging verbosity level (debug, info, warning, error, ...).",
196
+ )
197
+
198
+ parser.add_argument("pcap_files", type=str, nargs="*", help="PCAP files to analyze")
199
+
200
+ args = parser.parse_args()
201
+ log_level = args.log_level.upper()
202
+ logging.basicConfig(level=log_level, format="%(levelname)-10s:\t%(message)s")
203
+
204
+ check_dissector_level(args.dissection_level)
205
+
206
+ return args
207
+
208
+
209
+ def main():
210
+ args = parse_args()
211
+
212
+ app = QApplication(sys.argv)
213
+ window = TaffyExplorer(args)
214
+ window.create_comparison()
215
+ window.show()
216
+ sys.exit(app.exec())
217
+
218
+
219
+
220
+ if __name__ == "__main__":
221
+ main()
222
+
@@ -26,14 +26,6 @@ def parse_args():
26
26
  epilog="Exmaple Usage: ",
27
27
  )
28
28
 
29
- parser.add_argument(
30
- "-g",
31
- "--graph-elements",
32
- default=None,
33
- type=str,
34
- help="Graph these particular elements; the default is packet counts",
35
- )
36
-
37
29
  parser.add_argument(
38
30
  "-o",
39
31
  "--output-file",
@@ -49,14 +41,6 @@ def parse_args():
49
41
  help="Define verbosity level (debug, info, warning, error, fotal, critical).",
50
42
  )
51
43
 
52
- parser.add_argument(
53
- "-b",
54
- "--bin-size",
55
- type=int,
56
- default=1,
57
- help="Bin results into this many seconds",
58
- )
59
-
60
44
  parser.add_argument(
61
45
  "-i",
62
46
  "--interactive",
@@ -3,10 +3,11 @@
3
3
  import io
4
4
  import os
5
5
  import multiprocessing
6
+ from traffic_taffy.dissector import PCAPDissector
6
7
  from typing import List
7
8
  import dpkt
8
9
  from concurrent.futures import ProcessPoolExecutor, Future
9
- from logging import debug
10
+ from logging import debug, info
10
11
 
11
12
 
12
13
  class PCAPSplitter:
@@ -26,6 +27,7 @@ class PCAPSplitter:
26
27
  self.split_size: int = split_size
27
28
  self.maximum_count: int = maximum_count
28
29
  self.pcap_filter: str | None = pcap_filter
30
+ self.maximum_cores = maximum_cores
29
31
 
30
32
  self.header: bytes = None
31
33
  self.buffer: bytes = None
@@ -38,29 +40,46 @@ class PCAPSplitter:
38
40
  if not os.path.exists(self.pcap_file):
39
41
  raise ValueError(f"failed to find pcap file '{self.pcap_file}'")
40
42
 
41
- if not self.split_size:
42
- cores = multiprocessing.cpu_count()
43
- if maximum_cores and cores > maximum_cores:
44
- cores = maximum_cores
43
+ def set_split_size(self):
44
+ "Attempt to calculate a reasonable split size"
45
+ if self.split_size:
46
+ info(f"split size already set to {self.split_size}")
47
+ return self.split_size
45
48
 
46
- if self.maximum_count:
47
- # not ideal math, but better than nothing
48
- self.split_size = int(self.maximum_count / cores)
49
+ cores = multiprocessing.cpu_count()
50
+ if self.maximum_cores and cores > self.maximum_cores:
51
+ cores = self.maximum_cores
52
+
53
+ if self.maximum_count and self.maximum_count > 0:
54
+ # not ideal math, but better than nothing
55
+ self.split_size = int(self.maximum_count / cores)
56
+ else:
57
+ if isinstance(self.our_data, io.BufferedReader):
58
+ # raw uncompressed file
59
+ divide_size = 1200
49
60
  else:
50
- # even worse math and assumes generally large packets
51
- stats = os.stat(self.pcap_file)
52
- file_size = stats.st_size
53
- self.split_size = int(file_size / 1200 / cores)
61
+ # likely a compressed file
62
+ divide_size = 5000
63
+
64
+ # even worse math and assumes generally large packets
65
+ stats = os.stat(self.pcap_file)
66
+ file_size = stats.st_size
67
+ self.split_size = int(file_size / divide_size / cores)
68
+ debug(
69
+ f"split info: {file_size=}, {divide_size=}, {cores=}, {self.split_size=}"
70
+ )
54
71
 
55
- # even 1000 is kinda silly to split, but is better than nothing
56
- self.split_size = min(self.split_size, 1000)
57
- debug(f"setting PCAPSplitter split size to {self.split_size} for {cores}")
72
+ # even 1000 is kinda silly to split, but is better than nothing
73
+ self.split_size = max(self.split_size, 1000)
74
+ debug(f"setting PCAPSplitter split size to {self.split_size} for {cores} cores")
58
75
 
59
76
  def split(self) -> List[io.BytesIO] | List[Future]:
60
77
  "Does the actual reading and splitting"
61
78
  # open one for the dpkt reader and one for us independently
62
- self.our_data = open(self.pcap_file, "rb")
63
- self.dpkt_data = open(self.pcap_file, "rb")
79
+ self.our_data = PCAPDissector.open_maybe_compressed(self.pcap_file)
80
+ self.dpkt_data = PCAPDissector.open_maybe_compressed(self.pcap_file)
81
+
82
+ self.set_split_size()
64
83
 
65
84
  # read the first 24 bytes which is the pcap header
66
85
  self.header = self.our_data.read(24)
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: traffic-taffy
3
- Version: 0.2
3
+ Version: 0.3.5
4
4
  Summary: A tool for doing differential analysis of pcap files
5
5
  Home-page: https://github.com/hardaker/traffic-taffy
6
6
  Author: Wes Hardaker
@@ -7,10 +7,12 @@ traffic_taffy/compare.py
7
7
  traffic_taffy/dissectmany.py
8
8
  traffic_taffy/dissector.py
9
9
  traffic_taffy/dissectorresults.py
10
+ traffic_taffy/explore.py
10
11
  traffic_taffy/graph.py
11
12
  traffic_taffy/pcap_splitter.py
12
13
  traffic_taffy.egg-info/PKG-INFO
13
14
  traffic_taffy.egg-info/SOURCES.txt
14
15
  traffic_taffy.egg-info/dependency_links.txt
15
16
  traffic_taffy.egg-info/entry_points.txt
17
+ traffic_taffy.egg-info/requires.txt
16
18
  traffic_taffy.egg-info/top_level.txt
@@ -0,0 +1,5 @@
1
+ pandas
2
+ rich
3
+ seaborn
4
+ scapy
5
+ dpkt
File without changes
File without changes