traffic-taffy 0.8.1__py3-none-any.whl → 0.9__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- traffic_taffy/__init__.py +1 -1
- traffic_taffy/algorithms/__init__.py +14 -7
- traffic_taffy/algorithms/comparecorrelation.py +164 -0
- traffic_taffy/algorithms/comparecorrelationchanges.py +210 -0
- traffic_taffy/algorithms/compareseries.py +117 -0
- traffic_taffy/algorithms/compareslices.py +116 -0
- traffic_taffy/algorithms/statistical.py +9 -9
- traffic_taffy/compare.py +149 -159
- traffic_taffy/comparison.py +18 -4
- traffic_taffy/config.py +133 -0
- traffic_taffy/dissection.py +171 -6
- traffic_taffy/dissectmany.py +26 -16
- traffic_taffy/dissector.py +189 -77
- traffic_taffy/dissector_engine/scapy.py +41 -8
- traffic_taffy/graph.py +54 -53
- traffic_taffy/graphdata.py +13 -2
- traffic_taffy/hooks/ip2asn.py +20 -7
- traffic_taffy/hooks/labels.py +45 -0
- traffic_taffy/hooks/psl.py +21 -3
- traffic_taffy/iana/tables.msgpak +0 -0
- traffic_taffy/output/__init__.py +8 -48
- traffic_taffy/output/console.py +37 -25
- traffic_taffy/output/fsdb.py +24 -18
- traffic_taffy/reports/__init__.py +5 -0
- traffic_taffy/reports/compareslicesreport.py +85 -0
- traffic_taffy/reports/correlationchangereport.py +54 -0
- traffic_taffy/reports/correlationreport.py +42 -0
- traffic_taffy/taffy_config.py +44 -0
- traffic_taffy/tests/test_compare_results.py +22 -7
- traffic_taffy/tests/test_config.py +149 -0
- traffic_taffy/tests/test_global_config.py +33 -0
- traffic_taffy/tests/test_normalize.py +1 -0
- traffic_taffy/tests/test_pcap_dissector.py +12 -2
- traffic_taffy/tests/test_pcap_splitter.py +21 -10
- traffic_taffy/tools/cache_info.py +3 -2
- traffic_taffy/tools/compare.py +32 -24
- traffic_taffy/tools/config.py +83 -0
- traffic_taffy/tools/dissect.py +51 -59
- traffic_taffy/tools/explore.py +5 -4
- traffic_taffy/tools/export.py +28 -17
- traffic_taffy/tools/graph.py +25 -27
- {traffic_taffy-0.8.1.dist-info → traffic_taffy-0.9.dist-info}/METADATA +4 -1
- traffic_taffy-0.9.dist-info/RECORD +56 -0
- {traffic_taffy-0.8.1.dist-info → traffic_taffy-0.9.dist-info}/entry_points.txt +1 -0
- traffic_taffy/report.py +0 -12
- traffic_taffy/tests/test_dpkt_engine.py +0 -15
- traffic_taffy-0.8.1.dist-info/RECORD +0 -43
- {traffic_taffy-0.8.1.dist-info → traffic_taffy-0.9.dist-info}/WHEEL +0 -0
- {traffic_taffy-0.8.1.dist-info → traffic_taffy-0.9.dist-info}/licenses/LICENSE.txt +0 -0
traffic_taffy/graphdata.py
CHANGED
@@ -8,9 +8,19 @@ from traffic_taffy.dissection import Dissection
|
|
8
8
|
class PcapGraphData:
|
9
9
|
"""A base class for storing/transforming data (frequently to be graphed)."""
|
10
10
|
|
11
|
-
def __init__(
|
11
|
+
def __init__(
|
12
|
+
self,
|
13
|
+
match_string: str = None,
|
14
|
+
match_value: str = None,
|
15
|
+
minimum_count: int = None,
|
16
|
+
match_expression: str = None,
|
17
|
+
):
|
12
18
|
"""Create an instance of a PcapGraphData."""
|
13
19
|
self.dissections = []
|
20
|
+
self.match_string = match_string
|
21
|
+
self.match_value = match_value
|
22
|
+
self.minimum_count = minimum_count
|
23
|
+
self.match_expression = match_expression
|
14
24
|
|
15
25
|
@property
|
16
26
|
def dissections(self) -> list:
|
@@ -21,7 +31,7 @@ class PcapGraphData:
|
|
21
31
|
def dissections(self, newvalue: list) -> None:
|
22
32
|
self._dissections = newvalue
|
23
33
|
|
24
|
-
def normalize_bins(self, dissection: Dissection) -> dict:
|
34
|
+
def normalize_bins(self, dissection: Dissection, minimalize: bool = False) -> dict:
|
25
35
|
"""Transform a dissection's list of data into a dictionary."""
|
26
36
|
results: dict = {}
|
27
37
|
time_keys: list = list(dissection.data.keys())
|
@@ -37,6 +47,7 @@ class PcapGraphData:
|
|
37
47
|
match_value=self.match_value,
|
38
48
|
minimum_count=self.minimum_count,
|
39
49
|
make_printable=True,
|
50
|
+
match_expression=self.match_expression,
|
40
51
|
):
|
41
52
|
index = key + "=" + subkey
|
42
53
|
results["count"].append(int(value))
|
traffic_taffy/hooks/ip2asn.py
CHANGED
@@ -5,18 +5,29 @@ import ip2asn
|
|
5
5
|
from traffic_taffy.hooks import register_hook
|
6
6
|
from traffic_taffy.dissector import POST_DISSECT_HOOK
|
7
7
|
from traffic_taffy.dissection import Dissection
|
8
|
+
from traffic_taffy.taffy_config import taffy_default, TaffyConfig
|
8
9
|
|
9
|
-
|
10
|
-
error("The ip2asn plugin requires a ip2asn-combined.tsv in this directory")
|
11
|
-
error("Please download it from https://iptoasn.com/")
|
10
|
+
i2a = None
|
12
11
|
|
13
|
-
|
14
|
-
i2a = ip2asn.IP2ASN("ip2asn-combined.tsv")
|
15
|
-
info(" ... loaded")
|
12
|
+
taffy_default("modules.ip2asn.database", "ip2asn-combined.tsv")
|
16
13
|
|
17
14
|
|
18
15
|
@register_hook(POST_DISSECT_HOOK)
|
19
16
|
def ip_to_asn(dissection: Dissection, **kwargs):
|
17
|
+
global i2a
|
18
|
+
|
19
|
+
if i2a is None:
|
20
|
+
config = TaffyConfig()
|
21
|
+
db_path = config.get_dotnest("modules.ip2asn.database")
|
22
|
+
|
23
|
+
if not Path(db_path).exists():
|
24
|
+
error("The ip2asn plugin requires a ip2asn-combined.tsv in this directory")
|
25
|
+
error("Please download it from https://iptoasn.com/")
|
26
|
+
|
27
|
+
info(f"loading {db_path}")
|
28
|
+
i2a = ip2asn.IP2ASN(db_path)
|
29
|
+
info(" ... loaded")
|
30
|
+
|
20
31
|
timestamps = dissection.data.keys()
|
21
32
|
|
22
33
|
for timestamp in timestamps:
|
@@ -33,10 +44,12 @@ def ip_to_asn(dissection: Dissection, **kwargs):
|
|
33
44
|
for value in dissection.data[timestamp][key]:
|
34
45
|
count = dissection.data[timestamp][key][value]
|
35
46
|
details = None
|
47
|
+
|
48
|
+
# TODO(hardaker): doesn't handle bytes addresses from dpkt
|
36
49
|
try:
|
37
50
|
details = i2a.lookup_address(value)
|
38
51
|
except Exception:
|
39
|
-
debug("failed to parse address: {value}")
|
52
|
+
debug(f"failed to parse address: {value}")
|
40
53
|
if not details:
|
41
54
|
continue
|
42
55
|
|
@@ -0,0 +1,45 @@
|
|
1
|
+
"""A traffic taffy module to split the last five labels and count them"""
|
2
|
+
from traffic_taffy.hooks import register_hook
|
3
|
+
from traffic_taffy.dissector import POST_DISSECT_HOOK
|
4
|
+
from traffic_taffy.dissection import Dissection
|
5
|
+
|
6
|
+
import dnssplitter
|
7
|
+
|
8
|
+
splitter = dnssplitter.DNSSplitter()
|
9
|
+
splitter.init_tree()
|
10
|
+
|
11
|
+
|
12
|
+
@register_hook(POST_DISSECT_HOOK)
|
13
|
+
def split_dns_names(dissection: Dissection, **kwargs):
|
14
|
+
"""Split a DNS name into pieces and count the last 5 labels"""
|
15
|
+
timestamps = dissection.data.keys()
|
16
|
+
|
17
|
+
for timestamp in timestamps:
|
18
|
+
keys = list(dissection.data[timestamp].keys())
|
19
|
+
|
20
|
+
for key in keys:
|
21
|
+
key = str(key)
|
22
|
+
if (
|
23
|
+
key.endswith("_qname")
|
24
|
+
or key.endswith("_mname")
|
25
|
+
or key.endswith("_rrname")
|
26
|
+
):
|
27
|
+
for value in dissection.data[timestamp][key]:
|
28
|
+
count = dissection.data[timestamp][key][value]
|
29
|
+
|
30
|
+
parts = value.split(".")
|
31
|
+
if parts[-1] == "":
|
32
|
+
parts = parts[:-1] # drop the empty end "." split
|
33
|
+
dissection.data[timestamp][key + "_tld"][parts[-1]] += count
|
34
|
+
if len(parts) > 1:
|
35
|
+
dissection.data[timestamp][key + "_sld"][parts[-2]] += count
|
36
|
+
if len(parts) > 2:
|
37
|
+
dissection.data[timestamp][key + "_3ld"][parts[-3]] += count
|
38
|
+
if len(parts) > 3:
|
39
|
+
dissection.data[timestamp][key + "_4ld"][
|
40
|
+
parts[-4]
|
41
|
+
] += count
|
42
|
+
if len(parts) > 4:
|
43
|
+
dissection.data[timestamp][key + "_5ld"][
|
44
|
+
parts[-5]
|
45
|
+
] += count
|
traffic_taffy/hooks/psl.py
CHANGED
@@ -1,15 +1,33 @@
|
|
1
|
+
from logging import info
|
2
|
+
import dnssplitter
|
3
|
+
|
1
4
|
from traffic_taffy.hooks import register_hook
|
2
5
|
from traffic_taffy.dissector import POST_DISSECT_HOOK
|
3
6
|
from traffic_taffy.dissection import Dissection
|
7
|
+
from traffic_taffy.taffy_config import taffy_default, TaffyConfig
|
4
8
|
|
5
|
-
|
9
|
+
splitter = None
|
6
10
|
|
7
|
-
|
8
|
-
splitter.init_tree()
|
11
|
+
taffy_default("modules.psl.database", "__internal__")
|
9
12
|
|
10
13
|
|
11
14
|
@register_hook(POST_DISSECT_HOOK)
|
12
15
|
def split_dns_names(dissection: Dissection, **kwargs):
|
16
|
+
global splitter
|
17
|
+
|
18
|
+
if not splitter:
|
19
|
+
config = TaffyConfig()
|
20
|
+
splitter = dnssplitter.DNSSplitter()
|
21
|
+
|
22
|
+
path = config.get_dotnest("modules.psl.database")
|
23
|
+
|
24
|
+
if path == "__internal__":
|
25
|
+
splitter.init_tree()
|
26
|
+
info("loading PSL data from internal")
|
27
|
+
else:
|
28
|
+
info(f"loading PSL from {path}")
|
29
|
+
splitter.load_psl_file(path)
|
30
|
+
|
13
31
|
timestamps = dissection.data.keys()
|
14
32
|
|
15
33
|
for timestamp in timestamps:
|
Binary file
|
traffic_taffy/output/__init__.py
CHANGED
@@ -33,11 +33,11 @@ class Output:
|
|
33
33
|
def output_options(self, new_output_options: dict) -> None:
|
34
34
|
self._output_options = new_output_options
|
35
35
|
|
36
|
-
def output(self,
|
37
|
-
"""Dump a
|
38
|
-
if not
|
39
|
-
|
40
|
-
contents =
|
36
|
+
def output(self, comparison: Comparison | None = None) -> None:
|
37
|
+
"""Dump a comparison to the output stream."""
|
38
|
+
if not comparison:
|
39
|
+
comparison = self.report
|
40
|
+
contents = comparison.contents
|
41
41
|
|
42
42
|
first_of_anything: bool = True
|
43
43
|
|
@@ -72,19 +72,20 @@ class Output:
|
|
72
72
|
|
73
73
|
# TODO(hardaker): we don't do match_value here?
|
74
74
|
|
75
|
+
sort_by = comparison.sort_by
|
75
76
|
record_count = 0
|
76
77
|
for subkey, data in sorted(
|
77
78
|
contents[key].items(),
|
78
79
|
key=lambda x: getattr(x[1], sort_by),
|
79
80
|
reverse=sort_order,
|
80
81
|
):
|
81
|
-
if not
|
82
|
+
if not data.filter_check(self.output_options):
|
82
83
|
continue
|
83
84
|
|
84
85
|
# print the header
|
85
86
|
if not reported:
|
86
87
|
if first_of_anything:
|
87
|
-
self.output_start(
|
88
|
+
self.output_start(comparison, contents[key][subkey])
|
88
89
|
first_of_anything = False
|
89
90
|
|
90
91
|
self.output_new_section(key)
|
@@ -106,44 +107,3 @@ class Output:
|
|
106
107
|
def output_close(self) -> None:
|
107
108
|
"""Close the output stream."""
|
108
109
|
return
|
109
|
-
|
110
|
-
def filter_check(self, data: dict) -> bool:
|
111
|
-
"""Return true if we should include it."""
|
112
|
-
delta: float = data.delta_percentage
|
113
|
-
total: int = data.total
|
114
|
-
|
115
|
-
if self.output_options["only_positive"] and delta <= 0:
|
116
|
-
return False
|
117
|
-
|
118
|
-
if self.output_options["only_negative"] and delta >= 0:
|
119
|
-
return False
|
120
|
-
|
121
|
-
if (
|
122
|
-
not self.output_options["print_threshold"]
|
123
|
-
and not self.output_options["minimum_count"]
|
124
|
-
):
|
125
|
-
# always print
|
126
|
-
return True
|
127
|
-
|
128
|
-
if (
|
129
|
-
self.output_options["print_threshold"]
|
130
|
-
and not self.output_options["minimum_count"]
|
131
|
-
):
|
132
|
-
# check output_options["print_threshold"] as a fraction
|
133
|
-
if abs(delta) > self.output_options["print_threshold"]:
|
134
|
-
return True
|
135
|
-
elif (
|
136
|
-
not self.output_options["print_threshold"]
|
137
|
-
and self.output_options["minimum_count"]
|
138
|
-
):
|
139
|
-
# just check output_options["minimum_count"]
|
140
|
-
if total > self.output_options["minimum_count"]:
|
141
|
-
return True
|
142
|
-
elif (
|
143
|
-
total > self.output_options["minimum_count"]
|
144
|
-
and abs(delta) > self.output_options["print_threshold"]
|
145
|
-
):
|
146
|
-
# require both
|
147
|
-
return True
|
148
|
-
|
149
|
-
return False
|
traffic_taffy/output/console.py
CHANGED
@@ -7,8 +7,11 @@ from rich.console import Console as RichConsole
|
|
7
7
|
from traffic_taffy.output import Output
|
8
8
|
from traffic_taffy.dissection import Dissection
|
9
9
|
|
10
|
+
import dataclasses
|
11
|
+
|
10
12
|
if TYPE_CHECKING:
|
11
13
|
from traffic_taffy.comparison import Comparison
|
14
|
+
from traffic_taffy.reports import Report
|
12
15
|
|
13
16
|
|
14
17
|
class Console(Output):
|
@@ -29,12 +32,12 @@ class Console(Output):
|
|
29
32
|
if not self.console:
|
30
33
|
self.console = RichConsole()
|
31
34
|
|
32
|
-
def output_start(self, report:
|
35
|
+
def output_start(self, comparison: Comparison, report: Report) -> None:
|
33
36
|
"""Print the header about columns being displayed."""
|
34
37
|
# This should match the spacing in print_contents()
|
35
38
|
self.init_console()
|
36
39
|
|
37
|
-
self.console.print(f"======== {
|
40
|
+
self.console.print(f"======== {comparison.title}")
|
38
41
|
if self.have_done_header:
|
39
42
|
return
|
40
43
|
|
@@ -43,17 +46,15 @@ class Console(Output):
|
|
43
46
|
style = ""
|
44
47
|
subkey = "Value"
|
45
48
|
endstyle = ""
|
46
|
-
left_count = "Left"
|
47
|
-
right_count = "Right"
|
48
|
-
actual_delta = "Delta"
|
49
49
|
|
50
|
-
|
51
|
-
right_percent = "Right %"
|
52
|
-
percent_delta = "Delta-%"
|
50
|
+
field_values = {field.name: field.name for field in dataclasses.fields(report)}
|
53
51
|
|
54
|
-
line =
|
55
|
-
|
56
|
-
|
52
|
+
line = report.header_string.format(
|
53
|
+
style=style,
|
54
|
+
endstyle=endstyle,
|
55
|
+
subkey=subkey,
|
56
|
+
**field_values,
|
57
|
+
)
|
57
58
|
|
58
59
|
self.console.print(line)
|
59
60
|
|
@@ -63,25 +64,36 @@ class Console(Output):
|
|
63
64
|
|
64
65
|
def output_record(self, key: str, subkey: Any, data: Dict[str, Any]) -> None:
|
65
66
|
"""Print a report to the console."""
|
66
|
-
delta_percentage: float = data.delta_percentage
|
67
67
|
|
68
|
-
# apply some fancy styling
|
69
68
|
style = ""
|
70
|
-
|
71
|
-
|
72
|
-
|
73
|
-
|
74
|
-
|
75
|
-
|
76
|
-
|
77
|
-
|
78
|
-
|
69
|
+
endstyle = ""
|
70
|
+
if getattr(data, "delta_percentage", None):
|
71
|
+
delta_percentage: float = data.delta_percentage
|
72
|
+
|
73
|
+
# apply some styling depending on range
|
74
|
+
if delta_percentage < -Console.BOLD_LIMIT:
|
75
|
+
style = "[bold red]"
|
76
|
+
elif delta_percentage < Console.POSITIVE:
|
77
|
+
style = "[red]"
|
78
|
+
elif delta_percentage > Console.BOLD_LIMIT:
|
79
|
+
style = "[bold green]"
|
80
|
+
elif delta_percentage > Console.POSITIVE:
|
81
|
+
style = "[green]"
|
82
|
+
endstyle = style.replace("[", "[/")
|
79
83
|
|
80
84
|
# construct the output line with styling
|
81
85
|
subkey = Dissection.make_printable(key, subkey)
|
82
|
-
|
83
|
-
|
84
|
-
|
86
|
+
|
87
|
+
field_values = {
|
88
|
+
field.name: getattr(data, field.name) for field in dataclasses.fields(data)
|
89
|
+
}
|
90
|
+
|
91
|
+
line = data.format_string.format(
|
92
|
+
style=style,
|
93
|
+
endstyle=endstyle,
|
94
|
+
subkey=subkey,
|
95
|
+
**field_values,
|
96
|
+
)
|
85
97
|
|
86
98
|
# print it to the rich console
|
87
99
|
self.console.print(line)
|
traffic_taffy/output/fsdb.py
CHANGED
@@ -6,6 +6,9 @@ from typing import Any
|
|
6
6
|
from traffic_taffy.output import Output
|
7
7
|
from traffic_taffy.dissection import Dissection
|
8
8
|
from traffic_taffy.comparison import Comparison
|
9
|
+
from traffic_taffy.reports import Report
|
10
|
+
|
11
|
+
import dataclasses
|
9
12
|
|
10
13
|
|
11
14
|
class Fsdb(Output):
|
@@ -17,39 +20,42 @@ class Fsdb(Output):
|
|
17
20
|
self.console = None
|
18
21
|
self.have_done_header = False
|
19
22
|
self.in_report = None
|
23
|
+
self.fsdb = None
|
24
|
+
|
25
|
+
def init_fsdb(self, firstreport):
|
26
|
+
self.fields = dataclasses.fields(firstreport)
|
27
|
+
self.columns = []
|
28
|
+
self.converters = []
|
29
|
+
|
30
|
+
for field in self.fields:
|
31
|
+
self.columns.append(field.name)
|
32
|
+
self.converters.append(field.type)
|
20
33
|
|
21
34
|
self.fsdb = pyfsdb.Fsdb(out_file_handle=sys.stdout)
|
35
|
+
|
22
36
|
self.fsdb.out_column_names = [
|
23
37
|
"report",
|
24
|
-
"
|
38
|
+
"key",
|
25
39
|
"subkey",
|
26
|
-
|
27
|
-
|
28
|
-
|
29
|
-
|
30
|
-
"right_fraction",
|
31
|
-
"delta_fraction",
|
32
|
-
]
|
33
|
-
self.fsdb.converters = [str, str, str, int, int, int, float, float, float]
|
34
|
-
|
35
|
-
def output_start(self, report: Comparison) -> None:
|
40
|
+
] + self.columns
|
41
|
+
self.fsdb.converters = [str, str, str] + self.converters
|
42
|
+
|
43
|
+
def output_start(self, comparison: Comparison, report: Report) -> None:
|
36
44
|
"""Print the header about columns being displayed."""
|
37
45
|
# This should match the spacing in print_contents()
|
38
|
-
self.in_report =
|
46
|
+
self.in_report = comparison.title
|
39
47
|
|
40
48
|
def output_record(self, key: str, subkey: Any, data: dict) -> None:
|
41
49
|
"""Print a report to the console."""
|
50
|
+
if self.fsdb is None:
|
51
|
+
self.init_fsdb(data)
|
52
|
+
|
42
53
|
subkey = Dissection.make_printable(key, subkey)
|
43
54
|
self.fsdb.append(
|
44
55
|
[
|
45
56
|
self.in_report,
|
46
57
|
key,
|
47
58
|
subkey,
|
48
|
-
data.left_count,
|
49
|
-
data.right_count,
|
50
|
-
data.delta_absolute,
|
51
|
-
data.left_percentage,
|
52
|
-
data.right_percentage,
|
53
|
-
data.delta_percentage,
|
54
59
|
]
|
60
|
+
+ [getattr(data, field.name) for field in dataclasses.fields(data)]
|
55
61
|
)
|
@@ -0,0 +1,85 @@
|
|
1
|
+
"""Report for storing a comparison between two different dissections."""
|
2
|
+
|
3
|
+
from __future__ import annotations
|
4
|
+
from typing import Dict
|
5
|
+
from dataclasses import dataclass
|
6
|
+
from traffic_taffy.reports import Report
|
7
|
+
|
8
|
+
|
9
|
+
@dataclass
|
10
|
+
class CompareSlicesReport(Report):
|
11
|
+
"""Report for storing a comparison between two different dissections."""
|
12
|
+
|
13
|
+
delta_percentage: float
|
14
|
+
delta_absolute: int
|
15
|
+
total: int
|
16
|
+
left_count: int
|
17
|
+
right_count: int
|
18
|
+
left_percentage: float
|
19
|
+
right_percentage: float
|
20
|
+
|
21
|
+
@property
|
22
|
+
def formatting(self) -> Dict[str, str]:
|
23
|
+
"""Formatting field recommendations."""
|
24
|
+
return {
|
25
|
+
"delta_percentage": ">7.2f",
|
26
|
+
"delta_absolute": ">8",
|
27
|
+
"total": None,
|
28
|
+
"left_count": ">8",
|
29
|
+
"right_count": ">8",
|
30
|
+
"left_percentage": ">7.2f",
|
31
|
+
"right_percentage": ">7.2f",
|
32
|
+
}
|
33
|
+
|
34
|
+
@property
|
35
|
+
def header_string(self) -> str:
|
36
|
+
"""Header string."""
|
37
|
+
line = " {style}{subkey:<50}{endstyle}"
|
38
|
+
line += " {left_count:>8} {right_count:>8} {delta_absolute:>8}"
|
39
|
+
line += " {left_percentage:>7} {right_percentage:>7} {delta_percentage:>7}"
|
40
|
+
|
41
|
+
return line
|
42
|
+
|
43
|
+
@property
|
44
|
+
def format_string(self) -> str:
|
45
|
+
"""Formatting string for each printed line."""
|
46
|
+
line = " {style}{subkey:<50}{endstyle}"
|
47
|
+
line += " {left_count:>8} {right_count:>8} {delta_absolute:>8}"
|
48
|
+
line += " {left_percentage:>7.2f} {right_percentage:>7.2f} {delta_percentage:>7.2f}"
|
49
|
+
|
50
|
+
return line
|
51
|
+
|
52
|
+
def filter_check(self, output_options: dict) -> bool:
|
53
|
+
"""Return true if we should include it."""
|
54
|
+
delta: float = self.delta_percentage
|
55
|
+
total: int = self.total
|
56
|
+
|
57
|
+
if output_options["only_positive"] and delta <= 0:
|
58
|
+
return False
|
59
|
+
|
60
|
+
if output_options["only_negative"] and delta >= 0:
|
61
|
+
return False
|
62
|
+
|
63
|
+
if (
|
64
|
+
not output_options["print_threshold"]
|
65
|
+
and not output_options["minimum_count"]
|
66
|
+
):
|
67
|
+
# always print
|
68
|
+
return True
|
69
|
+
|
70
|
+
if output_options["print_threshold"] and not output_options["minimum_count"]:
|
71
|
+
# check output_options["print_threshold"] as a fraction
|
72
|
+
if abs(delta) > output_options["print_threshold"]:
|
73
|
+
return True
|
74
|
+
elif not output_options["print_threshold"] and output_options["minimum_count"]:
|
75
|
+
# just check output_options["minimum_count"]
|
76
|
+
if total > output_options["minimum_count"]:
|
77
|
+
return True
|
78
|
+
elif (
|
79
|
+
total > output_options["minimum_count"]
|
80
|
+
and abs(delta) > output_options["print_threshold"]
|
81
|
+
):
|
82
|
+
# require both
|
83
|
+
return True
|
84
|
+
|
85
|
+
return False
|
@@ -0,0 +1,54 @@
|
|
1
|
+
"""Report for storing correlations between signals."""
|
2
|
+
|
3
|
+
from __future__ import annotations
|
4
|
+
from typing import Dict
|
5
|
+
from dataclasses import dataclass
|
6
|
+
from traffic_taffy.reports import Report
|
7
|
+
|
8
|
+
|
9
|
+
@dataclass
|
10
|
+
class CorrelationChangeReport(Report):
|
11
|
+
"""Report for storing correlations between signals."""
|
12
|
+
|
13
|
+
left_correlation: float
|
14
|
+
right_correlation: float
|
15
|
+
delta_correlation: float
|
16
|
+
timestamp: int
|
17
|
+
|
18
|
+
@property
|
19
|
+
def formatting(self) -> Dict[str, str]:
|
20
|
+
"""Formatting field recommendations."""
|
21
|
+
return {
|
22
|
+
"timestamp": ">7",
|
23
|
+
"left_correlation": ">7.2f",
|
24
|
+
"right_correlation": ">7.2f",
|
25
|
+
"delta_correlation": ">7.2f",
|
26
|
+
}
|
27
|
+
|
28
|
+
@property
|
29
|
+
def header_string(self) -> str:
|
30
|
+
"""Formatting string for each printed line."""
|
31
|
+
line = " {style}{subkey:<50}{endstyle}"
|
32
|
+
line += " {timestamp:>10}"
|
33
|
+
line += " {left_correlation:>17}"
|
34
|
+
line += " {right_correlation:>17}"
|
35
|
+
line += " {delta_correlation:>17}"
|
36
|
+
|
37
|
+
return line
|
38
|
+
|
39
|
+
@property
|
40
|
+
def format_string(self) -> str:
|
41
|
+
"""Formatting string for each printed line."""
|
42
|
+
line = " {style}{subkey:<50}{endstyle}"
|
43
|
+
line += " {timestamp:>10}"
|
44
|
+
line += " {left_correlation:>17.2f}"
|
45
|
+
line += " {right_correlation:>17.2f}"
|
46
|
+
line += " {delta_correlation:>17.2f}"
|
47
|
+
|
48
|
+
return line
|
49
|
+
|
50
|
+
def filter_check(self, output_options: dict) -> bool:
|
51
|
+
"""Return true if we should include it
|
52
|
+
|
53
|
+
(which is always as we pre-filter for correlations)."""
|
54
|
+
return True
|
@@ -0,0 +1,42 @@
|
|
1
|
+
"""Report for storing correlations between signals."""
|
2
|
+
|
3
|
+
from __future__ import annotations
|
4
|
+
from typing import Dict
|
5
|
+
from dataclasses import dataclass
|
6
|
+
from traffic_taffy.reports import Report
|
7
|
+
|
8
|
+
|
9
|
+
@dataclass
|
10
|
+
class CorrelationReport(Report):
|
11
|
+
"""Report for storing correlations between signals."""
|
12
|
+
|
13
|
+
correlation: float
|
14
|
+
|
15
|
+
@property
|
16
|
+
def formatting(self) -> Dict[str, str]:
|
17
|
+
"""Formatting field recommendations."""
|
18
|
+
return {
|
19
|
+
"correlation": ">7.2f",
|
20
|
+
}
|
21
|
+
|
22
|
+
@property
|
23
|
+
def header_string(self) -> str:
|
24
|
+
"""Formatting string for each printed line."""
|
25
|
+
line = " {style}{subkey:<50}{endstyle}"
|
26
|
+
line += " {correlation:>11}"
|
27
|
+
|
28
|
+
return line
|
29
|
+
|
30
|
+
@property
|
31
|
+
def format_string(self) -> str:
|
32
|
+
"""Formatting string for each printed line."""
|
33
|
+
line = " {style}{subkey:<50}{endstyle}"
|
34
|
+
line += " {correlation:>11.2f}"
|
35
|
+
|
36
|
+
return line
|
37
|
+
|
38
|
+
def filter_check(self, output_options: dict) -> bool:
|
39
|
+
"""Return true if we should include it
|
40
|
+
|
41
|
+
(which is always as we pre-filter for correlations)."""
|
42
|
+
return True
|
@@ -0,0 +1,44 @@
|
|
1
|
+
"""A global configuration storage class that can be easily accessed."""
|
2
|
+
|
3
|
+
from typing import Any
|
4
|
+
|
5
|
+
from traffic_taffy.config import Config
|
6
|
+
|
7
|
+
|
8
|
+
class TT_CFG:
|
9
|
+
LOG_LEVEL: str = "log_level"
|
10
|
+
CACHE_RESULTS: str = "cache_results"
|
11
|
+
|
12
|
+
|
13
|
+
taffy_global_defaults = {"log_level": "info"}
|
14
|
+
|
15
|
+
|
16
|
+
class TaffyConfig(object):
|
17
|
+
"""A global configuration storage class that can be easily accessed."""
|
18
|
+
|
19
|
+
_instance = None
|
20
|
+
|
21
|
+
def __new__(class_obj, *args, **kwargs):
|
22
|
+
if not isinstance(class_obj._instance, Config):
|
23
|
+
class_obj._instance = Config(*args, **kwargs)
|
24
|
+
class_obj._instance.update(taffy_global_defaults)
|
25
|
+
return class_obj._instance
|
26
|
+
|
27
|
+
|
28
|
+
def taffy_default(parameter: str, value: Any) -> bool:
|
29
|
+
""""""
|
30
|
+
config = TaffyConfig()
|
31
|
+
try:
|
32
|
+
value = (
|
33
|
+
config.get_dotnest(parameter, return_none=False) is not None
|
34
|
+
) # ignore any value
|
35
|
+
except ValueError:
|
36
|
+
# a value doesn't exist, so create it
|
37
|
+
config.set_dotnest(parameter, value)
|
38
|
+
return True
|
39
|
+
|
40
|
+
if value is None:
|
41
|
+
config.set_dotnest(parameter, value)
|
42
|
+
return True
|
43
|
+
|
44
|
+
return False
|