metripy 0.2.7__py3-none-any.whl → 0.3.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of metripy might be problematic. Click here for more details.
- metripy/Application/Analyzer.py +23 -3
- metripy/Application/Application.py +16 -2
- metripy/Application/Config/Config.py +34 -0
- metripy/Application/Config/File/ConfigFileReaderFactory.py +6 -5
- metripy/Application/Config/File/ConfigFileReaderInterface.py +70 -3
- metripy/Application/Config/File/JsonConfigFileReader.py +5 -70
- metripy/Application/Config/File/YamlConfigFileReader.py +17 -0
- metripy/Application/Config/Parser.py +24 -11
- metripy/Application/Config/ProjectConfig.py +64 -0
- metripy/Application/Info.py +61 -0
- metripy/Dependency/Dependency.py +17 -1
- metripy/Dependency/Pip/Pip.py +21 -31
- metripy/Dependency/Pip/PyPi.py +1 -0
- metripy/Git/GitAnalyzer.py +0 -3
- metripy/Import/Json/JsonImporter.py +17 -0
- metripy/LangAnalyzer/AbstractLangAnalyzer.py +4 -3
- metripy/LangAnalyzer/Php/PhpAnalyzer.py +2 -1
- metripy/LangAnalyzer/Python/PythonAnalyzer.py +31 -9
- metripy/LangAnalyzer/Python/PythonHalSteadAnalyzer.py +55 -0
- metripy/LangAnalyzer/Typescript/TypescriptAnalyzer.py +12 -9
- metripy/LangAnalyzer/Typescript/TypescriptAstParser.py +1 -1
- metripy/Metric/Code/AggregatedMetrics.py +12 -5
- metripy/Metric/Code/FileMetrics.py +32 -1
- metripy/Metric/Code/ModuleMetrics.py +5 -5
- metripy/Metric/Code/SegmentedMetrics.py +72 -36
- metripy/Metric/Code/Segmentor.py +44 -0
- metripy/Metric/FileTree/FileTreeParser.py +0 -4
- metripy/Metric/Git/GitMetrics.py +1 -1
- metripy/Metric/ProjectMetrics.py +29 -0
- metripy/Metric/Trend/AggregatedTrendMetric.py +101 -0
- metripy/Metric/Trend/ClassTrendMetric.py +20 -0
- metripy/Metric/Trend/FileTrendMetric.py +46 -0
- metripy/Metric/Trend/FunctionTrendMetric.py +28 -0
- metripy/Metric/Trend/SegmentedTrendMetric.py +29 -0
- metripy/Report/Html/DependencyPageRenderer.py +21 -0
- metripy/Report/Html/FilesPageRenderer.py +28 -0
- metripy/Report/Html/GitAnalysisPageRenderer.py +55 -0
- metripy/Report/Html/IndexPageRenderer.py +47 -0
- metripy/Report/Html/PageRenderer.py +43 -0
- metripy/Report/Html/PageRendererFactory.py +37 -0
- metripy/Report/Html/Reporter.py +78 -137
- metripy/Report/Html/TopOffendersPageRenderer.py +84 -0
- metripy/Report/Html/TrendsPageRenderer.py +137 -0
- metripy/Report/Json/GitJsonReporter.py +3 -0
- metripy/Report/Json/JsonReporter.py +6 -2
- metripy/Report/ReporterFactory.py +6 -3
- metripy/Tree/ClassNode.py +21 -0
- metripy/Tree/FunctionNode.py +66 -1
- metripy/Trend/TrendAnalyzer.py +150 -0
- metripy/templates/html_report/css/styles.css +1386 -0
- metripy/templates/html_report/dependencies.html +411 -0
- metripy/templates/html_report/files.html +1080 -0
- metripy/templates/html_report/git_analysis.html +325 -0
- metripy/templates/html_report/images/logo.svg +31 -0
- metripy/templates/html_report/index.html +374 -0
- metripy/templates/html_report/js/charts.js +313 -0
- metripy/templates/html_report/js/dashboard.js +546 -0
- metripy/templates/html_report/js/git_analysis.js +383 -0
- metripy/templates/html_report/top_offenders.html +267 -0
- metripy/templates/html_report/trends.html +468 -0
- {metripy-0.2.7.dist-info → metripy-0.3.6.dist-info}/METADATA +27 -9
- metripy-0.3.6.dist-info/RECORD +96 -0
- {metripy-0.2.7.dist-info → metripy-0.3.6.dist-info}/licenses/LICENSE +1 -1
- metripy-0.2.7.dist-info/RECORD +0 -66
- {metripy-0.2.7.dist-info → metripy-0.3.6.dist-info}/WHEEL +0 -0
- {metripy-0.2.7.dist-info → metripy-0.3.6.dist-info}/entry_points.txt +0 -0
- {metripy-0.2.7.dist-info → metripy-0.3.6.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,17 @@
|
|
|
1
|
+
import json
|
|
2
|
+
|
|
3
|
+
from metripy.Component.Output.CliOutput import CliOutput
|
|
4
|
+
from metripy.Metric.ProjectMetrics import ProjectMetrics
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
class JsonImporter:
|
|
8
|
+
def __init__(self, output: CliOutput):
|
|
9
|
+
self.output = output
|
|
10
|
+
|
|
11
|
+
def import_data(self, path: str) -> ProjectMetrics:
|
|
12
|
+
self.output.writeln(f"<info>Importing data from {path}...</info>")
|
|
13
|
+
with open(path, "r") as file:
|
|
14
|
+
data = json.load(file)
|
|
15
|
+
project_metrics = ProjectMetrics.from_dict(data)
|
|
16
|
+
self.output.writeln("<success>Data imported successfuly</success>")
|
|
17
|
+
return project_metrics
|
|
@@ -40,13 +40,13 @@ class AbstractLangAnalyzer(ABC):
|
|
|
40
40
|
full_name = module.full_name
|
|
41
41
|
|
|
42
42
|
if len(module.functions) > 0:
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
) / len(module.functions)
|
|
43
|
+
totalCc = sum(function.complexity for function in module.functions)
|
|
44
|
+
avgCcPerFunction = totalCc / len(module.functions)
|
|
46
45
|
avgLocPerFunction = (
|
|
47
46
|
module.lloc - module.comments - len(module.functions)
|
|
48
47
|
) / len(module.functions)
|
|
49
48
|
else:
|
|
49
|
+
totalCc = 0
|
|
50
50
|
avgCcPerFunction = 0
|
|
51
51
|
avgLocPerFunction = 0
|
|
52
52
|
maintainabilityIndex = module.maintainability_index
|
|
@@ -54,6 +54,7 @@ class AbstractLangAnalyzer(ABC):
|
|
|
54
54
|
file_metric = FileMetrics(
|
|
55
55
|
full_name=full_name,
|
|
56
56
|
loc=module.loc,
|
|
57
|
+
totalCc=totalCc,
|
|
57
58
|
avgCcPerFunction=avgCcPerFunction,
|
|
58
59
|
maintainabilityIndex=maintainabilityIndex,
|
|
59
60
|
avgLocPerFunction=avgLocPerFunction,
|
|
@@ -131,7 +131,7 @@ class PhpAnalyzer(AbstractLangAnalyzer):
|
|
|
131
131
|
|
|
132
132
|
code_lines = code.split("\n")
|
|
133
133
|
for func_name, function_node in functions.items():
|
|
134
|
-
lines = code_lines[function_node.lineno:function_node.line_end]
|
|
134
|
+
lines = code_lines[function_node.lineno : function_node.line_end]
|
|
135
135
|
function_metrics = self.halstead_analyzer.calculate_halstead_metrics(
|
|
136
136
|
"\n".join(lines)
|
|
137
137
|
)
|
|
@@ -147,6 +147,7 @@ class PhpAnalyzer(AbstractLangAnalyzer):
|
|
|
147
147
|
function_node.calculated_length = function_metrics["calculated_length"]
|
|
148
148
|
function_node.bugs = function_metrics["bugs"]
|
|
149
149
|
function_node.time = function_metrics["time"]
|
|
150
|
+
function_node.calc_mi()
|
|
150
151
|
|
|
151
152
|
maintainability_index = self._calculate_maintainability_index(
|
|
152
153
|
functions.values(), module_node
|
|
@@ -5,6 +5,7 @@ from radon.visitors import Class, Function
|
|
|
5
5
|
|
|
6
6
|
from metripy.Component.Output.ProgressBar import ProgressBar
|
|
7
7
|
from metripy.LangAnalyzer.AbstractLangAnalyzer import AbstractLangAnalyzer
|
|
8
|
+
from metripy.LangAnalyzer.Python.PythonHalSteadAnalyzer import PythonHalSteadAnalyzer
|
|
8
9
|
from metripy.Metric.Code.FileMetrics import FileMetrics
|
|
9
10
|
from metripy.Tree.ClassNode import ClassNode
|
|
10
11
|
from metripy.Tree.FunctionNode import FunctionNode
|
|
@@ -15,6 +16,7 @@ class PythonAnalyzer(AbstractLangAnalyzer):
|
|
|
15
16
|
|
|
16
17
|
def __init__(self):
|
|
17
18
|
super().__init__()
|
|
19
|
+
self.fallback_halstead_analyzer = PythonHalSteadAnalyzer()
|
|
18
20
|
|
|
19
21
|
def get_lang_name(self) -> str:
|
|
20
22
|
return "Python"
|
|
@@ -62,6 +64,7 @@ class PythonAnalyzer(AbstractLangAnalyzer):
|
|
|
62
64
|
function_node = FunctionNode(
|
|
63
65
|
full_name, item.name, item.lineno, item.col_offset, item.complexity
|
|
64
66
|
)
|
|
67
|
+
function_node.line_end = item.endline
|
|
65
68
|
if item.is_method:
|
|
66
69
|
class_node = classes.get(full_class_name)
|
|
67
70
|
if class_node is not None:
|
|
@@ -74,11 +77,6 @@ class PythonAnalyzer(AbstractLangAnalyzer):
|
|
|
74
77
|
else:
|
|
75
78
|
raise ValueError(f"Unknown item type: {type(item)}")
|
|
76
79
|
|
|
77
|
-
# print("--------------------------------")
|
|
78
|
-
# print(json.dumps([c.__dict__() for c in classes.values()], indent=4))
|
|
79
|
-
# print("--------------------------------")
|
|
80
|
-
# print(json.dumps([f.__dict__() for f in functions.values()], indent=4))
|
|
81
|
-
# exit()
|
|
82
80
|
module = analyze(code)
|
|
83
81
|
full_name = self.full_name(filename)
|
|
84
82
|
module_node = ModuleNode(
|
|
@@ -93,12 +91,9 @@ class PythonAnalyzer(AbstractLangAnalyzer):
|
|
|
93
91
|
)
|
|
94
92
|
module_node.classes.extend(classes.values())
|
|
95
93
|
module_node.functions.extend(functions.values())
|
|
96
|
-
|
|
97
|
-
# print(json.dumps([m.to_dict() for m in modules.values()], indent=4))
|
|
98
|
-
# exit()
|
|
94
|
+
|
|
99
95
|
h = h_visit(code)
|
|
100
96
|
assert isinstance(h, Halstead)
|
|
101
|
-
# print(h.total)
|
|
102
97
|
function_name: str
|
|
103
98
|
report: HalsteadReport
|
|
104
99
|
for function_name, report in h.functions:
|
|
@@ -117,9 +112,36 @@ class PythonAnalyzer(AbstractLangAnalyzer):
|
|
|
117
112
|
function_node.effort = report.effort
|
|
118
113
|
function_node.bugs = report.bugs
|
|
119
114
|
function_node.time = report.time
|
|
115
|
+
function_node.calc_mi()
|
|
120
116
|
else:
|
|
121
117
|
raise ValueError(f"Function node not found for function {full_name}")
|
|
122
118
|
|
|
119
|
+
code_lines = code.split("\n")
|
|
120
|
+
for func_name, function_node in functions.items():
|
|
121
|
+
if function_node.maintainability_index != 0:
|
|
122
|
+
continue
|
|
123
|
+
# if MI is 0, we want to take another look, radon does not like boring functions
|
|
124
|
+
|
|
125
|
+
lines = code_lines[function_node.lineno : function_node.line_end]
|
|
126
|
+
function_metrics = (
|
|
127
|
+
self.fallback_halstead_analyzer.calculate_halstead_metrics(
|
|
128
|
+
"\n".join(lines)
|
|
129
|
+
)
|
|
130
|
+
)
|
|
131
|
+
function_node.h1 = function_metrics["n1"]
|
|
132
|
+
function_node.h2 = function_metrics["n2"]
|
|
133
|
+
function_node.N1 = function_metrics["N1"]
|
|
134
|
+
function_node.N2 = function_metrics["N2"]
|
|
135
|
+
function_node.vocabulary = function_metrics["vocabulary"]
|
|
136
|
+
function_node.length = function_metrics["length"]
|
|
137
|
+
function_node.volume = function_metrics["volume"]
|
|
138
|
+
function_node.difficulty = function_metrics["difficulty"]
|
|
139
|
+
function_node.effort = function_metrics["effort"]
|
|
140
|
+
function_node.calculated_length = function_metrics["calculated_length"]
|
|
141
|
+
function_node.bugs = function_metrics["bugs"]
|
|
142
|
+
function_node.time = function_metrics["time"]
|
|
143
|
+
function_node.calc_mi()
|
|
144
|
+
|
|
123
145
|
maintainability_index = mi_visit(code, True)
|
|
124
146
|
module_node.maintainability_index = maintainability_index
|
|
125
147
|
|
|
@@ -0,0 +1,55 @@
|
|
|
1
|
+
from metripy.LangAnalyzer.Generic.HalSteadAnalyzer import HalSteadAnalyzer
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
class PythonHalSteadAnalyzer:
|
|
5
|
+
def __init__(self):
|
|
6
|
+
self.operators = [
|
|
7
|
+
"+",
|
|
8
|
+
"-",
|
|
9
|
+
"*",
|
|
10
|
+
"/",
|
|
11
|
+
"//",
|
|
12
|
+
"%",
|
|
13
|
+
"**",
|
|
14
|
+
"==",
|
|
15
|
+
"!=",
|
|
16
|
+
">",
|
|
17
|
+
"<",
|
|
18
|
+
">=",
|
|
19
|
+
"<=",
|
|
20
|
+
"=",
|
|
21
|
+
"+=",
|
|
22
|
+
"-=",
|
|
23
|
+
"*=",
|
|
24
|
+
"/=",
|
|
25
|
+
"%=",
|
|
26
|
+
"//=",
|
|
27
|
+
"**=",
|
|
28
|
+
"and",
|
|
29
|
+
"or",
|
|
30
|
+
"not",
|
|
31
|
+
"&",
|
|
32
|
+
"|",
|
|
33
|
+
"^",
|
|
34
|
+
"~",
|
|
35
|
+
"<<",
|
|
36
|
+
">>",
|
|
37
|
+
"in",
|
|
38
|
+
"not in",
|
|
39
|
+
"is",
|
|
40
|
+
"is not",
|
|
41
|
+
":",
|
|
42
|
+
",",
|
|
43
|
+
".",
|
|
44
|
+
"(",
|
|
45
|
+
")",
|
|
46
|
+
"[",
|
|
47
|
+
"]",
|
|
48
|
+
"{",
|
|
49
|
+
"}",
|
|
50
|
+
]
|
|
51
|
+
|
|
52
|
+
self.analyzer = HalSteadAnalyzer(self.operators)
|
|
53
|
+
|
|
54
|
+
def calculate_halstead_metrics(self, code: str):
|
|
55
|
+
return self.analyzer.calculate_halstead_metrics(code)
|
|
@@ -4,14 +4,16 @@ import lizard
|
|
|
4
4
|
|
|
5
5
|
from metripy.Component.Output.ProgressBar import ProgressBar
|
|
6
6
|
from metripy.LangAnalyzer.AbstractLangAnalyzer import AbstractLangAnalyzer
|
|
7
|
-
from metripy.LangAnalyzer.Typescript.TypescriptAstParser import
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
|
|
11
|
-
from metripy.LangAnalyzer.Typescript.TypescriptBasicLocAnalyzer import
|
|
12
|
-
TypescriptBasicLocAnalyzer
|
|
13
|
-
|
|
14
|
-
|
|
7
|
+
from metripy.LangAnalyzer.Typescript.TypescriptAstParser import TypescriptAstParser
|
|
8
|
+
from metripy.LangAnalyzer.Typescript.TypescriptBasicComplexityAnalyzer import (
|
|
9
|
+
TypescriptBasicComplexityAnalzyer,
|
|
10
|
+
)
|
|
11
|
+
from metripy.LangAnalyzer.Typescript.TypescriptBasicLocAnalyzer import (
|
|
12
|
+
TypescriptBasicLocAnalyzer,
|
|
13
|
+
)
|
|
14
|
+
from metripy.LangAnalyzer.Typescript.TypescriptHalSteadAnalyzer import (
|
|
15
|
+
TypeScriptHalSteadAnalyzer,
|
|
16
|
+
)
|
|
15
17
|
from metripy.Tree.ClassNode import ClassNode
|
|
16
18
|
from metripy.Tree.FunctionNode import FunctionNode
|
|
17
19
|
from metripy.Tree.ModuleNode import ModuleNode
|
|
@@ -146,7 +148,7 @@ class TypescriptAnalyzer(AbstractLangAnalyzer):
|
|
|
146
148
|
|
|
147
149
|
code_lines = code.split("\n")
|
|
148
150
|
for func_name, function_node in functions.items():
|
|
149
|
-
lines = code_lines[function_node.lineno:function_node.line_end]
|
|
151
|
+
lines = code_lines[function_node.lineno : function_node.line_end]
|
|
150
152
|
function_metrics = self.halstead_analyzer.calculate_halstead_metrics(
|
|
151
153
|
"\n".join(lines)
|
|
152
154
|
)
|
|
@@ -162,6 +164,7 @@ class TypescriptAnalyzer(AbstractLangAnalyzer):
|
|
|
162
164
|
function_node.calculated_length = function_metrics["calculated_length"]
|
|
163
165
|
function_node.bugs = function_metrics["bugs"]
|
|
164
166
|
function_node.time = function_metrics["time"]
|
|
167
|
+
function_node.calc_mi()
|
|
165
168
|
|
|
166
169
|
maintainability_index = self._calculate_maintainability_index(
|
|
167
170
|
functions.values(), module_node
|
|
@@ -8,7 +8,7 @@ class TypescriptAstParser:
|
|
|
8
8
|
self.parser = get_parser("typescript")
|
|
9
9
|
|
|
10
10
|
def _get_node_text(self, code: str, node) -> str:
|
|
11
|
-
return code[node.start_byte:node.end_byte].decode("utf-8")
|
|
11
|
+
return code[node.start_byte : node.end_byte].decode("utf-8")
|
|
12
12
|
|
|
13
13
|
def extract_structure(self, code: str) -> dict:
|
|
14
14
|
tree = self.parser.parse(bytes(code, "utf8"))
|
|
@@ -1,4 +1,5 @@
|
|
|
1
1
|
from metripy.Metric.Code.SegmentedMetrics import SegmentedMetrics
|
|
2
|
+
from metripy.Metric.Trend.AggregatedTrendMetric import AggregatedTrendMetric
|
|
2
3
|
|
|
3
4
|
|
|
4
5
|
class AggregatedMetrics:
|
|
@@ -29,13 +30,19 @@ class AggregatedMetrics:
|
|
|
29
30
|
"methodSize": segmented_method_size,
|
|
30
31
|
}
|
|
31
32
|
|
|
33
|
+
self.trend: AggregatedTrendMetric | None = None
|
|
34
|
+
|
|
32
35
|
def to_dict(self) -> dict:
|
|
33
36
|
return {
|
|
34
|
-
"loc":
|
|
35
|
-
"avgCcPerFunction":
|
|
36
|
-
"maintainabilityIndex":
|
|
37
|
-
"avgLocPerFunction":
|
|
38
|
-
"num_files":
|
|
37
|
+
"loc": self.loc,
|
|
38
|
+
"avgCcPerFunction": round(self.avgCcPerFunction, 2),
|
|
39
|
+
"maintainabilityIndex": round(self.maintainabilityIndex, 2),
|
|
40
|
+
"avgLocPerFunction": round(self.avgLocPerFunction, 2),
|
|
41
|
+
"num_files": self.num_files,
|
|
42
|
+
"trend": self.trend.to_dict() if self.trend else None,
|
|
43
|
+
"trend_segmentation": (
|
|
44
|
+
self.trend.to_dict_segmentation() if self.trend else None
|
|
45
|
+
),
|
|
39
46
|
}
|
|
40
47
|
|
|
41
48
|
def to_dict_segmentation(self) -> dict:
|
|
@@ -1,3 +1,7 @@
|
|
|
1
|
+
from typing import Self
|
|
2
|
+
|
|
3
|
+
from metripy.Metric.Code.Segmentor import Segmentor
|
|
4
|
+
from metripy.Metric.Trend.FileTrendMetric import FileTrendMetric
|
|
1
5
|
from metripy.Tree.ClassNode import ClassNode
|
|
2
6
|
from metripy.Tree.FunctionNode import FunctionNode
|
|
3
7
|
|
|
@@ -7,6 +11,7 @@ class FileMetrics:
|
|
|
7
11
|
self,
|
|
8
12
|
full_name: str,
|
|
9
13
|
loc: int,
|
|
14
|
+
totalCc: int,
|
|
10
15
|
avgCcPerFunction: float,
|
|
11
16
|
maintainabilityIndex: float,
|
|
12
17
|
avgLocPerFunction: float,
|
|
@@ -15,19 +20,45 @@ class FileMetrics:
|
|
|
15
20
|
):
|
|
16
21
|
self.full_name = full_name
|
|
17
22
|
self.loc = loc
|
|
23
|
+
self.totalCc = totalCc
|
|
18
24
|
self.avgCcPerFunction = avgCcPerFunction
|
|
19
25
|
self.maintainabilityIndex = maintainabilityIndex
|
|
20
26
|
self.avgLocPerFunction = avgLocPerFunction
|
|
21
27
|
self.class_nodes = class_nodes
|
|
22
28
|
self.function_nodes = function_nodes
|
|
29
|
+
self.trend: FileTrendMetric | None = None
|
|
23
30
|
|
|
24
31
|
def to_dict(self) -> dict:
|
|
25
32
|
return {
|
|
26
33
|
"full_name": self.full_name,
|
|
27
34
|
"loc": self.loc,
|
|
35
|
+
"loc_segment": Segmentor.get_loc_segment(self.loc),
|
|
36
|
+
"totalCc": self.totalCc,
|
|
37
|
+
"complexity_segment": Segmentor.get_complexity_segment(
|
|
38
|
+
self.avgCcPerFunction
|
|
39
|
+
),
|
|
28
40
|
"avgCcPerFunction": self.avgCcPerFunction,
|
|
29
|
-
"maintainabilityIndex":
|
|
41
|
+
"maintainabilityIndex": round(self.maintainabilityIndex, 2),
|
|
42
|
+
"maintainability_segment": Segmentor.get_maintainability_segment(
|
|
43
|
+
self.maintainabilityIndex
|
|
44
|
+
),
|
|
30
45
|
"avgLocPerFunction": self.avgLocPerFunction,
|
|
46
|
+
"method_size_segment": Segmentor.get_method_size_segment(
|
|
47
|
+
self.avgLocPerFunction
|
|
48
|
+
),
|
|
31
49
|
"class_nodes": [node.to_dict() for node in self.class_nodes],
|
|
32
50
|
"function_nodes": [node.to_dict() for node in self.function_nodes],
|
|
33
51
|
}
|
|
52
|
+
|
|
53
|
+
@staticmethod
|
|
54
|
+
def from_dict(data: dict) -> Self:
|
|
55
|
+
return FileMetrics(
|
|
56
|
+
full_name=data["full_name"],
|
|
57
|
+
loc=data["loc"],
|
|
58
|
+
totalCc=data["totalCc"],
|
|
59
|
+
avgCcPerFunction=data["avgCcPerFunction"],
|
|
60
|
+
maintainabilityIndex=data["maintainabilityIndex"],
|
|
61
|
+
avgLocPerFunction=data["avgLocPerFunction"],
|
|
62
|
+
class_nodes=[ClassNode.from_dict(d) for d in data["class_nodes"]],
|
|
63
|
+
function_nodes=[FunctionNode.from_dict(d) for d in data["function_nodes"]],
|
|
64
|
+
)
|
|
@@ -22,11 +22,11 @@ class ModuleMetrics:
|
|
|
22
22
|
|
|
23
23
|
def to_dict(self) -> dict:
|
|
24
24
|
return {
|
|
25
|
-
"loc":
|
|
26
|
-
"avgCcPerFunction":
|
|
27
|
-
"maintainabilityIndex":
|
|
28
|
-
"avgLocPerFunction":
|
|
29
|
-
"num_files":
|
|
25
|
+
"loc": self.loc,
|
|
26
|
+
"avgCcPerFunction": round(self.avgCcPerFunction, 2),
|
|
27
|
+
"maintainabilityIndex": round(self.maintainabilityIndex, 2),
|
|
28
|
+
"avgLocPerFunction": round(self.avgLocPerFunction, 2),
|
|
29
|
+
"num_files": self.num_files,
|
|
30
30
|
"class_nodes": [node.to_dict() for node in self.class_nodes],
|
|
31
31
|
"function_nodes": [node.to_dict() for node in self.function_nodes],
|
|
32
32
|
}
|
|
@@ -1,5 +1,7 @@
|
|
|
1
1
|
from typing import Self
|
|
2
2
|
|
|
3
|
+
from metripy.Metric.Code.Segmentor import Segmentor
|
|
4
|
+
|
|
3
5
|
|
|
4
6
|
class SegmentedMetrics:
|
|
5
7
|
def __init__(self):
|
|
@@ -16,50 +18,84 @@ class SegmentedMetrics:
|
|
|
16
18
|
"critical": self.critical,
|
|
17
19
|
}
|
|
18
20
|
|
|
21
|
+
def to_dict_with_percent(self) -> dict:
|
|
22
|
+
return {
|
|
23
|
+
"good": self.good,
|
|
24
|
+
"good_percent": round(
|
|
25
|
+
self.good / (self.good + self.ok + self.warning + self.critical) * 100,
|
|
26
|
+
2,
|
|
27
|
+
),
|
|
28
|
+
"ok": self.ok,
|
|
29
|
+
"ok_percent": round(
|
|
30
|
+
self.ok / (self.good + self.ok + self.warning + self.critical) * 100, 2
|
|
31
|
+
),
|
|
32
|
+
"warning": self.warning,
|
|
33
|
+
"warning_percent": round(
|
|
34
|
+
self.warning
|
|
35
|
+
/ (self.good + self.ok + self.warning + self.critical)
|
|
36
|
+
* 100,
|
|
37
|
+
2,
|
|
38
|
+
),
|
|
39
|
+
"critical": self.critical,
|
|
40
|
+
"critical_percent": round(
|
|
41
|
+
self.critical
|
|
42
|
+
/ (self.good + self.ok + self.warning + self.critical)
|
|
43
|
+
* 100,
|
|
44
|
+
2,
|
|
45
|
+
),
|
|
46
|
+
}
|
|
47
|
+
|
|
48
|
+
def _set_values(self, values: dict[str, int]) -> Self:
|
|
49
|
+
self.good = values["good"]
|
|
50
|
+
self.ok = values["ok"]
|
|
51
|
+
self.warning = values["warning"]
|
|
52
|
+
self.critical = values["critical"]
|
|
53
|
+
return self
|
|
54
|
+
|
|
19
55
|
def set_loc(self, values: list[int]) -> Self:
|
|
56
|
+
d = {
|
|
57
|
+
"good": self.good,
|
|
58
|
+
"ok": self.ok,
|
|
59
|
+
"warning": self.warning,
|
|
60
|
+
"critical": self.critical,
|
|
61
|
+
}
|
|
20
62
|
for value in values:
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
self.ok += 1
|
|
25
|
-
elif value <= 1000:
|
|
26
|
-
self.warning += 1
|
|
27
|
-
else:
|
|
28
|
-
self.critical += 1
|
|
29
|
-
return self
|
|
63
|
+
d[Segmentor.get_loc_segment(value)] += 1
|
|
64
|
+
|
|
65
|
+
return self._set_values(d)
|
|
30
66
|
|
|
31
67
|
def set_complexity(self, values: list[int]) -> Self:
|
|
68
|
+
d = {
|
|
69
|
+
"good": self.good,
|
|
70
|
+
"ok": self.ok,
|
|
71
|
+
"warning": self.warning,
|
|
72
|
+
"critical": self.critical,
|
|
73
|
+
}
|
|
32
74
|
for value in values:
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
self.ok += 1
|
|
37
|
-
elif value <= 20:
|
|
38
|
-
self.warning += 1
|
|
39
|
-
else:
|
|
40
|
-
self.critical += 1
|
|
41
|
-
return self
|
|
75
|
+
d[Segmentor.get_complexity_segment(value)] += 1
|
|
76
|
+
|
|
77
|
+
return self._set_values(d)
|
|
42
78
|
|
|
43
79
|
def set_maintainability(self, values: list[int]) -> Self:
|
|
80
|
+
d = {
|
|
81
|
+
"good": self.good,
|
|
82
|
+
"ok": self.ok,
|
|
83
|
+
"warning": self.warning,
|
|
84
|
+
"critical": self.critical,
|
|
85
|
+
}
|
|
44
86
|
for value in values:
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
self.warning += 1
|
|
49
|
-
elif value <= 40:
|
|
50
|
-
self.ok += 1
|
|
51
|
-
else:
|
|
52
|
-
self.good += 1
|
|
53
|
-
return self
|
|
87
|
+
d[Segmentor.get_maintainability_segment(value)] += 1
|
|
88
|
+
|
|
89
|
+
return self._set_values(d)
|
|
54
90
|
|
|
55
91
|
def set_method_size(self, values: list[int]) -> Self:
|
|
92
|
+
d = {
|
|
93
|
+
"good": self.good,
|
|
94
|
+
"ok": self.ok,
|
|
95
|
+
"warning": self.warning,
|
|
96
|
+
"critical": self.critical,
|
|
97
|
+
}
|
|
56
98
|
for value in values:
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
self.ok += 1
|
|
61
|
-
elif value <= 50:
|
|
62
|
-
self.warning += 1
|
|
63
|
-
else:
|
|
64
|
-
self.critical += 1
|
|
65
|
-
return self
|
|
99
|
+
d[Segmentor.get_method_size_segment(value)] += 1
|
|
100
|
+
|
|
101
|
+
return self._set_values(d)
|
|
@@ -0,0 +1,44 @@
|
|
|
1
|
+
class Segmentor:
|
|
2
|
+
@staticmethod
|
|
3
|
+
def get_loc_segment(loc: int) -> str:
|
|
4
|
+
if loc <= 200:
|
|
5
|
+
return "good"
|
|
6
|
+
elif loc <= 500:
|
|
7
|
+
return "ok"
|
|
8
|
+
elif loc <= 1000:
|
|
9
|
+
return "warning"
|
|
10
|
+
else:
|
|
11
|
+
return "critical"
|
|
12
|
+
|
|
13
|
+
@staticmethod
|
|
14
|
+
def get_complexity_segment(complexity: float) -> str:
|
|
15
|
+
if complexity <= 5:
|
|
16
|
+
return "good"
|
|
17
|
+
elif complexity <= 10:
|
|
18
|
+
return "ok"
|
|
19
|
+
elif complexity <= 20:
|
|
20
|
+
return "warning"
|
|
21
|
+
else:
|
|
22
|
+
return "critical"
|
|
23
|
+
|
|
24
|
+
@staticmethod
|
|
25
|
+
def get_maintainability_segment(maintainability: float) -> str:
|
|
26
|
+
if maintainability > 80:
|
|
27
|
+
return "good"
|
|
28
|
+
elif maintainability > 60:
|
|
29
|
+
return "ok"
|
|
30
|
+
elif maintainability > 40:
|
|
31
|
+
return "warning"
|
|
32
|
+
else:
|
|
33
|
+
return "critical"
|
|
34
|
+
|
|
35
|
+
@staticmethod
|
|
36
|
+
def get_method_size_segment(method_size: float) -> str:
|
|
37
|
+
if method_size <= 15:
|
|
38
|
+
return "good"
|
|
39
|
+
elif method_size <= 30:
|
|
40
|
+
return "ok"
|
|
41
|
+
elif method_size <= 50:
|
|
42
|
+
return "warning"
|
|
43
|
+
else:
|
|
44
|
+
return "critical"
|
metripy/Metric/Git/GitMetrics.py
CHANGED
|
@@ -136,7 +136,7 @@ class GitMetrics:
|
|
|
136
136
|
def to_dict(self) -> dict[str, any]:
|
|
137
137
|
return {
|
|
138
138
|
"analysis_start_date": self.analysis_start_date,
|
|
139
|
-
"avg_commit_size":
|
|
139
|
+
"avg_commit_size": round(self.get_avg_commit_size(), 2),
|
|
140
140
|
"commit_stats_per_month": self.get_commit_stats_per_month(),
|
|
141
141
|
"churn_per_month": self.get_churn_per_month(),
|
|
142
142
|
"total_commits": self.total_commits,
|
metripy/Metric/ProjectMetrics.py
CHANGED
|
@@ -1,3 +1,5 @@
|
|
|
1
|
+
from typing import Self
|
|
2
|
+
|
|
1
3
|
from metripy.Dependency.Dependency import Dependency
|
|
2
4
|
from metripy.Metric.Code.AggregatedMetrics import AggregatedMetrics
|
|
3
5
|
from metripy.Metric.Code.FileMetrics import FileMetrics
|
|
@@ -53,3 +55,30 @@ class ProjectMetrics:
|
|
|
53
55
|
|
|
54
56
|
def _avg(self, items: list[float | int]) -> float:
|
|
55
57
|
return sum(items) / len(items)
|
|
58
|
+
|
|
59
|
+
def to_dict(self) -> dict:
|
|
60
|
+
data = {
|
|
61
|
+
"file_metrics": [m.to_dict() for m in self.file_metrics],
|
|
62
|
+
"aggregated": self.total_code_metrics.to_dict(),
|
|
63
|
+
"aggregated_segmented": self.total_code_metrics.to_dict_segmentation(),
|
|
64
|
+
}
|
|
65
|
+
if self.git_metrics:
|
|
66
|
+
data["git_metrics"] = self.git_metrics.to_dict()
|
|
67
|
+
if self.dependencies:
|
|
68
|
+
data["dependencies"] = [d.to_dict() for d in self.dependencies]
|
|
69
|
+
data["license_distribution"] = Dependency.get_lisence_distribution(
|
|
70
|
+
self.dependencies
|
|
71
|
+
)
|
|
72
|
+
return data
|
|
73
|
+
|
|
74
|
+
@staticmethod
|
|
75
|
+
def from_dict(data: dict) -> Self:
|
|
76
|
+
# TODO: not needed yet
|
|
77
|
+
# git_metrics = GitMetrics.from_dict(data["git_metrics"]) if "git_metrics" in data.keys() else None
|
|
78
|
+
# dependencies = [Dependency.from_dict(d) for d in data["dependencies"]] if "dependencies" in data.keys() else None
|
|
79
|
+
|
|
80
|
+
return ProjectMetrics(
|
|
81
|
+
file_metrics=[FileMetrics.from_dict(m) for m in data["file_metrics"]],
|
|
82
|
+
git_metrics=None,
|
|
83
|
+
dependencies=None,
|
|
84
|
+
)
|