traffic-taffy 0.3.6__py3-none-any.whl → 0.4.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (36) hide show
  1. traffic_taffy/cache_info.py +0 -6
  2. traffic_taffy/compare.py +154 -250
  3. traffic_taffy/comparison.py +26 -0
  4. traffic_taffy/dissection.py +383 -0
  5. traffic_taffy/dissectmany.py +20 -18
  6. traffic_taffy/dissector.py +128 -476
  7. traffic_taffy/dissector_engine/__init__.py +35 -0
  8. traffic_taffy/dissector_engine/dpkt.py +98 -0
  9. traffic_taffy/dissector_engine/scapy.py +98 -0
  10. traffic_taffy/graph.py +23 -90
  11. traffic_taffy/graphdata.py +35 -20
  12. traffic_taffy/output/__init__.py +118 -0
  13. traffic_taffy/output/console.py +72 -0
  14. traffic_taffy/output/fsdb.py +50 -0
  15. traffic_taffy/output/memory.py +51 -0
  16. traffic_taffy/pcap_splitter.py +17 -36
  17. traffic_taffy/tools/cache_info.py +65 -0
  18. traffic_taffy/tools/compare.py +110 -0
  19. traffic_taffy/tools/dissect.py +77 -0
  20. traffic_taffy/tools/explore.py +686 -0
  21. traffic_taffy/tools/graph.py +85 -0
  22. {traffic_taffy-0.3.6.dist-info → traffic_taffy-0.4.1.dist-info}/METADATA +1 -1
  23. traffic_taffy-0.4.1.dist-info/RECORD +29 -0
  24. traffic_taffy-0.4.1.dist-info/entry_points.txt +6 -0
  25. pcap_compare/cache_info.py +0 -46
  26. pcap_compare/compare.py +0 -288
  27. pcap_compare/dissectmany.py +0 -21
  28. pcap_compare/dissector.py +0 -512
  29. pcap_compare/dissectorresults.py +0 -21
  30. pcap_compare/graph.py +0 -210
  31. traffic_taffy/explore.py +0 -221
  32. traffic_taffy-0.3.6.dist-info/RECORD +0 -22
  33. traffic_taffy-0.3.6.dist-info/entry_points.txt +0 -5
  34. {pcap_compare → traffic_taffy/tools}/__init__.py +0 -0
  35. {traffic_taffy-0.3.6.dist-info → traffic_taffy-0.4.1.dist-info}/WHEEL +0 -0
  36. {traffic_taffy-0.3.6.dist-info → traffic_taffy-0.4.1.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,35 @@
1
+ """Base class for a dissection engine with subclasses overriding load()"""
2
+
3
+ from traffic_taffy.dissection import Dissection, PCAPDissectorLevel
4
+
5
+
6
+ class DissectionEngine:
7
+ def __init__(
8
+ self,
9
+ pcap_file,
10
+ pcap_filter: str = "",
11
+ maximum_count: int = 0,
12
+ bin_size: int = 0,
13
+ dissector_level: PCAPDissectorLevel = PCAPDissectorLevel.DETAILED,
14
+ cache_file_suffix: str = "pkl",
15
+ ignore_list: list = [],
16
+ ):
17
+ self.pcap_file = pcap_file
18
+ self.dissector_level = dissector_level
19
+ self.bin_size = bin_size
20
+ self.maximum_count = maximum_count
21
+ self.pcap_filter = pcap_filter
22
+ self.cache_file_suffix = cache_file_suffix
23
+ self.ignore_list = set(ignore_list)
24
+
25
+ def init_dissection(self) -> Dissection:
26
+ self.dissection = Dissection(
27
+ pcap_file=self.pcap_file,
28
+ dissector_level=self.dissector_level,
29
+ bin_size=self.bin_size,
30
+ pcap_filter=self.pcap_filter,
31
+ maximum_count=self.maximum_count,
32
+ cache_file_suffix=self.cache_file_suffix,
33
+ ignore_list=self.ignore_list,
34
+ )
35
+ return self.dissection
@@ -0,0 +1,98 @@
1
+ from traffic_taffy.dissector_engine import DissectionEngine
2
+ from traffic_taffy.dissection import Dissection, PCAPDissectorLevel
3
+ from pcap_parallel import PCAPParallel as pcapp
4
+
5
+ import dpkt
6
+
7
+
8
+ class DissectionEngineDpkt(DissectionEngine):
9
+ def __init__(self, *args, **kwargs):
10
+ super().__init__(*args, **kwargs)
11
+
12
+ def load(self) -> Dissection:
13
+ self.init_dissection()
14
+ if isinstance(self.pcap_file, str):
15
+ pcap = dpkt.pcap.Reader(pcapp.open_maybe_compressed(self.pcap_file))
16
+ else:
17
+ # it's an open handle already
18
+ pcap = dpkt.pcap.Reader(self.pcap_file)
19
+ if self.pcap_filter:
20
+ pcap.setfilter(self.pcap_filter)
21
+ pcap.dispatch(self.maximum_count, self.callback)
22
+
23
+ self.dissection.calculate_metadata()
24
+ return self.dissection
25
+
26
+ def incr(self, dissection, name, value):
27
+ if name not in self.ignore_list:
28
+ dissection.incr(name, value)
29
+
30
+ def callback(self, timestamp: float, packet: bytes):
31
+ # if binning is requested, save it in a binned time slot
32
+ dissection: Dissection = self.dissection
33
+
34
+ dissection.timestamp = int(timestamp)
35
+ if dissection.bin_size:
36
+ dissection.timestamp = (
37
+ dissection.timestamp - dissection.timestamp % dissection.bin_size
38
+ )
39
+
40
+ dissection.incr(Dissection.TOTAL_COUNT, dissection.TOTAL_SUBKEY)
41
+
42
+ level = self.dissector_level
43
+ if isinstance(level, PCAPDissectorLevel):
44
+ level = level.value
45
+ if level >= PCAPDissectorLevel.THROUGH_IP.value:
46
+ eth = dpkt.ethernet.Ethernet(packet)
47
+ # these names are designed to match scapy names
48
+ self.incr(dissection, "Ethernet.dst", eth.dst)
49
+ self.incr(dissection, "Ethernet.src", eth.src)
50
+ self.incr(dissection, "Ethernet.type", eth.type)
51
+
52
+ if isinstance(eth.data, dpkt.ip.IP):
53
+ ip = eth.data
54
+
55
+ IPVER = "IP"
56
+ if ip.v == 6:
57
+ IPVER = "IPv6"
58
+
59
+ prefix = f"Ethernet.{IPVER}."
60
+
61
+ # TODO: make sure all these match scapy
62
+ self.incr(dissection, prefix + "dst", ip.dst)
63
+ self.incr(dissection, prefix + "src", ip.src)
64
+ self.incr(dissection, prefix + "df", ip.df)
65
+ self.incr(dissection, prefix + "offset", ip.offset)
66
+ self.incr(dissection, prefix + "tos", ip.tos)
67
+ self.incr(dissection, prefix + "len", ip.len)
68
+ self.incr(dissection, prefix + "id", ip.id)
69
+ self.incr(dissection, prefix + "hl", ip.hl)
70
+ self.incr(dissection, prefix + "rf", ip.rf)
71
+ self.incr(dissection, prefix + "p", ip.p)
72
+ self.incr(dissection, prefix + "chksum", ip.sum)
73
+ self.incr(dissection, prefix + "tos", ip.tos)
74
+ self.incr(dissection, prefix + "version", ip.v)
75
+ self.incr(dissection, prefix + "ttl", ip.ttl)
76
+
77
+ if isinstance(ip.data, dpkt.udp.UDP):
78
+ udp = ip.data
79
+ self.incr(dissection, prefix + "UDP.sport", udp.sport)
80
+ self.incr(dissection, prefix + "UDP.dport", udp.dport)
81
+ self.incr(dissection, prefix + "UDP.len", udp.ulen)
82
+ self.incr(dissection, prefix + "UDP.chksum", udp.sum)
83
+
84
+ # TODO: handle DNS and others for level 3
85
+
86
+ elif isinstance(ip.data, dpkt.tcp.TCP):
87
+ # TODO
88
+ tcp = ip.data
89
+ self.incr(dissection, prefix + "TCP.sport", tcp.sport)
90
+ self.incr(dissection, prefix + "TCP.dport", tcp.dport)
91
+ self.incr(dissection, prefix + "TCP.seq", tcp.seq)
92
+ self.incr(dissection, prefix + "TCP.flags", tcp.flags)
93
+ # self.incr(dissection, prefix + "TCP.reserved", tcp.reserved)
94
+ self.incr(dissection, prefix + "TCP.window", tcp.win)
95
+ self.incr(dissection, prefix + "TCP.chksum", tcp.sum)
96
+ self.incr(dissection, prefix + "TCP.options", tcp.opts)
97
+
98
+ # TODO: handle DNS and others for level 3
@@ -0,0 +1,98 @@
1
+ from traffic_taffy.dissector_engine import DissectionEngine
2
+ from traffic_taffy.dissection import Dissection
3
+ from pcap_parallel import PCAPParallel as pcapp
4
+ from logging import warning
5
+
6
+ from scapy.all import sniff
7
+
8
+
9
+ class DissectionEngineScapy(DissectionEngine):
10
+ def _init_(self, *args, **kwargs):
11
+ super()._init_(*args, **kwargs)
12
+
13
+ def load(self) -> Dissection:
14
+ "Loads a pcap file into a nested dictionary of statistical counts"
15
+ self.init_dissection()
16
+ load_this = self.pcap_file
17
+ if isinstance(self.pcap_file, str):
18
+ load_this = pcapp.open_maybe_compressed(self.pcap_file)
19
+ sniff(
20
+ offline=load_this,
21
+ prn=self.callback,
22
+ store=0,
23
+ count=self.maximum_count,
24
+ filter=self.pcap_filter,
25
+ )
26
+ self.dissection.calculate_metadata()
27
+ # TODO: for some reason this fails on xz compressed files when processing in parallel
28
+ return self.dissection
29
+
30
+ def add_item(self, field_value, prefix: str) -> None:
31
+ "Adds an item to the self.dissection regardless of it's various types"
32
+
33
+ if isinstance(field_value, list):
34
+ if len(field_value) > 0:
35
+ # if it's a list of tuples, count the (eg TCP option) names
36
+ # TODO: values can be always the same or things like timestamps
37
+ # that will always change or are too unique
38
+ if isinstance(field_value[0], tuple):
39
+ for item in field_value:
40
+ self.dissection.incr(prefix, item[0])
41
+ else:
42
+ for item in field_value:
43
+ self.add_item(item, prefix)
44
+ # else:
45
+ # debug(f"ignoring empty-list: {field_value}")
46
+ elif (
47
+ isinstance(field_value, str)
48
+ or isinstance(field_value, int)
49
+ or isinstance(field_value, float)
50
+ ):
51
+ self.dissection.incr(prefix, field_value)
52
+
53
+ elif isinstance(field_value, bytes):
54
+ try:
55
+ converted = field_value.decode("utf-8")
56
+ self.dissection.incr(prefix, converted)
57
+ except Exception:
58
+ converted = "0x" + field_value.hex()
59
+ self.dissection.incr(prefix, converted)
60
+
61
+ def add_layer(self, layer, prefix: str | None = "") -> None:
62
+ "Analyzes a layer to add counts to each layer sub-component"
63
+
64
+ if hasattr(layer, "fields_desc"):
65
+ name_list = [field.name for field in layer.fields_desc]
66
+ elif hasattr(layer, "fields"):
67
+ name_list = [field.name for field in layer.fields]
68
+ else:
69
+ warning(f"unavailable to deep dive into: {layer}")
70
+ return
71
+
72
+ for field_name in name_list:
73
+ new_prefix = prefix + field_name
74
+
75
+ if new_prefix in self.ignore_list:
76
+ continue
77
+
78
+ try:
79
+ field_value = getattr(layer, field_name)
80
+ if hasattr(field_value, "fields"):
81
+ self.add_layer(field_value, new_prefix + ".")
82
+ else:
83
+ self.add_item(field_value, new_prefix)
84
+ except Exception as e:
85
+ warning(f"scapy error at '{prefix}' in field '{field_name}'")
86
+ warning(e)
87
+
88
+ def callback(self, packet):
89
+ prefix = "."
90
+ self.timestamp = int(packet.time)
91
+ if self.bin_size:
92
+ self.timestamp = self.timestamp - self.timestamp % self.bin_size
93
+
94
+ self.dissection.timestamp = int(self.timestamp)
95
+ self.dissection.incr(Dissection.TOTAL_COUNT, Dissection.TOTAL_SUBKEY)
96
+ for payload in packet.iterpayloads():
97
+ prefix = f"{prefix}{payload.name}."
98
+ self.add_layer(payload, prefix[1:])
traffic_taffy/graph.py CHANGED
@@ -1,62 +1,12 @@
1
- """Read a PCAP file and graph it or parts of it"""
2
-
3
1
  import seaborn as sns
4
2
  import matplotlib.pyplot as plt
5
- from traffic_taffy.dissector import (
6
- PCAPDissectorType,
7
- dissector_add_parseargs,
8
- limitor_add_parseargs,
9
- check_dissector_level,
10
- )
3
+ from logging import debug, info
4
+ from typing import List
5
+
6
+ from traffic_taffy.dissector import PCAPDissectorLevel
11
7
  from traffic_taffy.dissectmany import PCAPDissectMany
12
8
  from traffic_taffy.graphdata import PcapGraphData
13
9
 
14
- from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
15
- from logging import debug, info
16
- import logging
17
-
18
-
19
- def parse_args():
20
- "Parse the command line arguments."
21
- parser = ArgumentParser(
22
- formatter_class=ArgumentDefaultsHelpFormatter,
23
- description=__doc__,
24
- epilog="Exmaple Usage: ",
25
- )
26
-
27
- parser.add_argument(
28
- "-o",
29
- "--output-file",
30
- default=None,
31
- type=str,
32
- help="Where to save the output (png)",
33
- )
34
-
35
- parser.add_argument(
36
- "--log-level",
37
- "--ll",
38
- default="info",
39
- help="Define verbosity level (debug, info, warning, error, fotal, critical).",
40
- )
41
-
42
- parser.add_argument(
43
- "-i",
44
- "--interactive",
45
- action="store_true",
46
- help="Prompt repeatedly for graph data to create",
47
- )
48
-
49
- dissector_add_parseargs(parser)
50
- limitor_add_parseargs(parser)
51
-
52
- parser.add_argument("input_file", type=str, help="PCAP file to graph", nargs="+")
53
-
54
- args = parser.parse_args()
55
- log_level = args.log_level.upper()
56
- logging.basicConfig(level=log_level, format="%(levelname)-10s:\t%(message)s")
57
- logging.getLogger("matplotlib.font_manager").setLevel(logging.ERROR)
58
- return args
59
-
60
10
 
61
11
  class PcapGraph(PcapGraphData):
62
12
  def __init__(
@@ -66,11 +16,13 @@ class PcapGraph(PcapGraphData):
66
16
  maximum_count: int = None,
67
17
  minimum_count: int = None,
68
18
  bin_size: int = None,
69
- match_key: str = None,
19
+ match_string: str = None,
70
20
  match_value: str = None,
71
21
  cache_pcap_results: bool = False,
72
- dissector_level: PCAPDissectorType = PCAPDissectorType.COUNT_ONLY,
22
+ dissector_level: PCAPDissectorLevel = PCAPDissectorLevel.COUNT_ONLY,
73
23
  interactive: bool = False,
24
+ ignore_list: List[str] = [],
25
+ by_percentage: bool = False,
74
26
  ):
75
27
  self.pcap_files = pcap_files
76
28
  self.output_file = output_file
@@ -79,11 +31,13 @@ class PcapGraph(PcapGraphData):
79
31
  self.bin_size = bin_size
80
32
  self.subsections = None
81
33
  self.pkt_filter = None
82
- self.match_key = match_key
34
+ self.match_string = match_string
83
35
  self.match_value = match_value
84
36
  self.cache_pcap_results = cache_pcap_results
85
37
  self.dissector_level = dissector_level
86
38
  self.interactive = interactive
39
+ self.ignore_list = ignore_list
40
+ self.by_percentage = by_percentage
87
41
 
88
42
  super().__init__()
89
43
 
@@ -99,26 +53,29 @@ class PcapGraph(PcapGraphData):
99
53
  dissector_level=self.dissector_level,
100
54
  pcap_filter=self.pkt_filter,
101
55
  cache_results=self.cache_pcap_results,
56
+ ignore_list=self.ignore_list,
102
57
  )
103
- results = pdm.load_all()
104
-
105
- for result in results:
106
- self.data[result["file"]] = result["data"]
58
+ self.dissections = pdm.load_all()
107
59
  info("done reading pcap files")
108
60
 
109
61
  def create_graph(self):
110
- df = self.merge_datasets()
111
- debug(df)
62
+ df = self.get_dataframe(merge=True, calculate_load_fraction=self.by_percentage)
112
63
 
113
64
  hue_variable = "index"
114
65
  if df[hue_variable].nunique() == 1:
115
66
  hue_variable = None
116
67
 
68
+ if self.by_percentage:
69
+ df["load_fraction"]
70
+ y_column = "load_fraction"
71
+ else:
72
+ y_column = "count"
73
+
117
74
  ax = sns.relplot(
118
75
  data=df,
119
76
  kind="line",
120
77
  x="time",
121
- y="count",
78
+ y=y_column,
122
79
  hue=hue_variable,
123
80
  aspect=1.77,
124
81
  )
@@ -143,9 +100,9 @@ class PcapGraph(PcapGraphData):
143
100
  self.create_graph()
144
101
 
145
102
  if self.interactive:
146
- self.match_key = input("search key: ")
103
+ self.match_string = input("search key: ")
147
104
  self.match_value = input("value key: ")
148
- if not self.match_key and not self.match_value:
105
+ if not self.match_string and not self.match_value:
149
106
  self.interactive = False
150
107
 
151
108
  def graph_it(self):
@@ -153,27 +110,3 @@ class PcapGraph(PcapGraphData):
153
110
  self.load_pcaps()
154
111
  debug("--- creating graph")
155
112
  self.show_graph()
156
-
157
-
158
- def main():
159
- args = parse_args()
160
-
161
- check_dissector_level(args.dissection_level)
162
-
163
- pc = PcapGraph(
164
- args.input_file,
165
- args.output_file,
166
- maximum_count=args.packet_count,
167
- minimum_count=args.minimum_count,
168
- bin_size=args.bin_size,
169
- match_key=args.match_string,
170
- match_value=args.match_value,
171
- cache_pcap_results=args.cache_pcap_results,
172
- dissector_level=args.dissection_level,
173
- interactive=args.interactive,
174
- )
175
- pc.graph_it()
176
-
177
-
178
- if __name__ == "__main__":
179
- main()
@@ -1,35 +1,32 @@
1
1
  import os
2
- from traffic_taffy.dissector import PCAPDissector
3
2
  from pandas import DataFrame, to_datetime, concat
4
3
 
5
4
 
6
5
  class PcapGraphData:
7
6
  def __init__(self):
7
+ self.dissections = []
8
8
  pass
9
9
 
10
10
  @property
11
- def data(self):
12
- return self._data
11
+ def dissections(self):
12
+ return self._dissections
13
13
 
14
- @data.setter
15
- def data(self, newvalue):
16
- self._data = newvalue
14
+ @dissections.setter
15
+ def dissections(self, newvalue):
16
+ self._dissections = newvalue
17
17
 
18
- def normalize_bins(self, counters):
18
+ def normalize_bins(self, dissection):
19
19
  results = {}
20
- time_keys = list(counters.keys())
20
+ time_keys = list(dissection.data.keys())
21
21
  if time_keys[0] == 0: # likely always
22
22
  time_keys.pop(0)
23
- time_keys[0]
24
- time_keys[-1]
25
23
 
26
- results = {"time": [], "count": [], "index": []}
24
+ results = {"time": [], "count": [], "index": [], "key": [], "subkey": []}
27
25
 
28
26
  # TODO: this could likely be made much more efficient and needs hole-filling
29
- for timestamp, key, subkey, value in PCAPDissector.find_data(
30
- counters,
27
+ for timestamp, key, subkey, value in dissection.find_data(
31
28
  timestamps=time_keys,
32
- match_string=self.match_key,
29
+ match_string=self.match_string,
33
30
  match_value=self.match_value,
34
31
  minimum_count=self.minimum_count,
35
32
  make_printable=True,
@@ -37,17 +34,35 @@ class PcapGraphData:
37
34
  index = key + "=" + subkey
38
35
  results["count"].append(int(value))
39
36
  results["index"].append(index)
37
+ results["key"].append(key)
38
+ results["subkey"].append(subkey)
40
39
  results["time"].append(timestamp)
41
40
 
42
41
  return results
43
42
 
44
- def merge_datasets(self):
43
+ def get_dataframe(self, merge=False, calculate_load_fraction=False):
45
44
  datasets = []
46
- for dataset in self.data:
47
- data = self.normalize_bins(self.data[dataset])
45
+ if merge:
46
+ dissection = next(self.dissections).clone()
47
+ for tomerge in self.dissections:
48
+ dissection.merge(tomerge)
49
+ dissections = [dissection]
50
+ else:
51
+ dissections = self.dissections
52
+
53
+ for dissection in dissections:
54
+ data = self.normalize_bins(dissection)
48
55
  data = DataFrame.from_records(data)
49
- data["filename"] = os.path.basename(dataset)
50
- data["time"] = to_datetime(data["time"], unit="s")
56
+ data["filename"] = os.path.basename(dissection.pcap_file)
57
+ data["time"] = to_datetime(data["time"], unit="s", utc=True)
58
+ data["key"] = data["index"]
51
59
  datasets.append(data)
52
- datasets = concat(datasets)
60
+ datasets = concat(datasets, ignore_index=True)
61
+
62
+ if calculate_load_fraction:
63
+ time_groups = datasets.groupby("time")
64
+ datasets["load_fraction"] = (
65
+ 100 * datasets["count"] / time_groups.transform("max")["count"]
66
+ )
67
+
53
68
  return datasets
@@ -0,0 +1,118 @@
1
+ class Output:
2
+ def __init__(self, report, options={}):
3
+ self.report = report
4
+ self.output_options = options
5
+
6
+ @property
7
+ def report(self):
8
+ return self._report
9
+
10
+ @report.setter
11
+ def report(self, new_report):
12
+ self._report = new_report
13
+
14
+ @property
15
+ def output_options(self):
16
+ return self._output_options
17
+
18
+ @output_options.setter
19
+ def output_options(self, new_output_options):
20
+ self._output_options = new_output_options
21
+
22
+ def output(self, report=None):
23
+ if not report:
24
+ report = self.report
25
+ contents = report.contents
26
+
27
+ first_of_anything: bool = True
28
+
29
+ top_records = self.output_options.get("top_records")
30
+
31
+ # intentionally reversed, as it should default to high to low
32
+ sort_order = not self.output_options.get("reverse_sort", False)
33
+
34
+ for key in sorted(contents):
35
+ reported: bool = False
36
+
37
+ if (
38
+ self.output_options.get("match_string") is not None
39
+ and self.output_options["match_string"] not in key
40
+ ):
41
+ continue
42
+
43
+ # TODO: we don't do match_value here?
44
+
45
+ record_count = 0
46
+ for subkey, data in sorted(
47
+ contents[key].items(),
48
+ key=lambda x: x[1]["delta_percentage"],
49
+ reverse=sort_order,
50
+ ):
51
+ if not self.filter_check(data):
52
+ continue
53
+
54
+ # print the header
55
+ if not reported:
56
+ if first_of_anything:
57
+ self.output_start(report)
58
+ first_of_anything = False
59
+
60
+ self.output_new_section(key)
61
+ reported = True
62
+
63
+ self.output_record(key, subkey, data)
64
+
65
+ record_count += 1
66
+
67
+ if top_records and record_count >= top_records:
68
+ break
69
+
70
+ self.output_close()
71
+
72
+ def output_new_section(self, key):
73
+ return
74
+
75
+ def output_close(self):
76
+ return
77
+
78
+ def filter_check(self, data: dict) -> bool:
79
+ "Return true if we should include it."
80
+ delta: float = data["delta_percentage"]
81
+ total: int = data["total"]
82
+
83
+ if self.output_options["only_positive"] and delta <= 0:
84
+ return False
85
+
86
+ if self.output_options["only_negative"] and delta >= 0:
87
+ return False
88
+
89
+ if (
90
+ not self.output_options["print_threshold"]
91
+ and not self.output_options["minimum_count"]
92
+ ):
93
+ # always print
94
+ return True
95
+
96
+ if (
97
+ self.output_options["print_threshold"]
98
+ and not self.output_options["minimum_count"]
99
+ ):
100
+ # check output_options["print_threshold"] as a fraction
101
+ if abs(delta) > self.output_options["print_threshold"]:
102
+ return True
103
+ elif (
104
+ not self.output_options["print_threshold"]
105
+ and self.output_options["minimum_count"]
106
+ ):
107
+ # just check output_options["minimum_count"]
108
+ if total > self.output_options["minimum_count"]:
109
+ return True
110
+ else:
111
+ # require both
112
+ if (
113
+ total > self.output_options["minimum_count"]
114
+ and abs(delta) > self.output_options["print_threshold"]
115
+ ):
116
+ return True
117
+
118
+ return False
@@ -0,0 +1,72 @@
1
+ from traffic_taffy.output import Output
2
+ from traffic_taffy.dissection import Dissection
3
+ from rich.console import Console as RichConsole
4
+
5
+
6
+ class Console(Output):
7
+ def __init__(self, *args, **kwargs):
8
+ super().__init__(*args, **kwargs)
9
+ self.console = None
10
+ self.have_done_header = False
11
+
12
+ # actual routines to print stuff
13
+ def init_console(self):
14
+ if not self.console:
15
+ self.console = RichConsole()
16
+
17
+ def output_start(self, report):
18
+ "Prints the header about columns being displayed"
19
+ # This should match the spacing in print_contents()
20
+ self.init_console()
21
+
22
+ self.console.print(f"======== {report.title}")
23
+ if self.have_done_header:
24
+ return
25
+
26
+ self.have_done_header = True
27
+
28
+ style = ""
29
+ subkey = "Value"
30
+ endstyle = ""
31
+ left_count = "Left"
32
+ right_count = "Right"
33
+ actual_delta = "Delta"
34
+
35
+ left_percent = "Left %"
36
+ right_percent = "Right %"
37
+ percent_delta = "Delta-%"
38
+
39
+ line = f" {style}{subkey:<50}{endstyle}"
40
+ line += f" {left_count:>8} {right_count:>8} {actual_delta:>8}"
41
+ line += f" {left_percent:>8} {right_percent:>8} {percent_delta:>7}"
42
+
43
+ self.console.print(line)
44
+
45
+ def output_new_section(self, key):
46
+ print(f"----- {key}")
47
+
48
+ def output_record(self, key, subkey, data) -> None:
49
+ "prints a report to the console"
50
+
51
+ delta_percentage: float = data["delta_percentage"]
52
+
53
+ # apply some fancy styling
54
+ style = ""
55
+ if delta_percentage < -0.5:
56
+ style = "[bold red]"
57
+ elif delta_percentage < 0.0:
58
+ style = "[red]"
59
+ elif delta_percentage > 0.5:
60
+ style = "[bold green]"
61
+ elif delta_percentage > 0.0:
62
+ style = "[green]"
63
+ endstyle = style.replace("[", "[/")
64
+
65
+ # construct the output line with styling
66
+ subkey = Dissection.make_printable(key, subkey)
67
+ line = f" {style}{subkey:<50}{endstyle}"
68
+ line += f" {data['left_count']:>8} {data['right_count']:>8} {data['delta_absolute']:>8}"
69
+ line += f" {100*data['left_percentage']:>7.2f} {100*data['right_percentage']:>7.2f} {100*delta_percentage:>7.2f}"
70
+
71
+ # print it to the rich console
72
+ self.console.print(line)