metripy 0.2.8__py3-none-any.whl → 0.3.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of metripy might be problematic. Click here for more details.

Files changed (56) hide show
  1. metripy/Application/Analyzer.py +23 -3
  2. metripy/Application/Application.py +16 -2
  3. metripy/Application/Config/Config.py +34 -0
  4. metripy/Application/Config/File/ConfigFileReaderFactory.py +6 -5
  5. metripy/Application/Config/File/ConfigFileReaderInterface.py +70 -3
  6. metripy/Application/Config/File/JsonConfigFileReader.py +5 -70
  7. metripy/Application/Config/File/YamlConfigFileReader.py +17 -0
  8. metripy/Application/Config/Parser.py +24 -11
  9. metripy/Application/Config/ProjectConfig.py +64 -0
  10. metripy/Application/Info.py +36 -0
  11. metripy/Dependency/Dependency.py +2 -1
  12. metripy/Dependency/Pip/Pip.py +1 -2
  13. metripy/Dependency/Pip/PyPi.py +1 -0
  14. metripy/Git/GitAnalyzer.py +0 -3
  15. metripy/Import/Json/JsonImporter.py +17 -0
  16. metripy/LangAnalyzer/AbstractLangAnalyzer.py +4 -3
  17. metripy/LangAnalyzer/Php/PhpAnalyzer.py +2 -1
  18. metripy/LangAnalyzer/Python/PythonAnalyzer.py +31 -9
  19. metripy/LangAnalyzer/Python/PythonHalSteadAnalyzer.py +55 -0
  20. metripy/LangAnalyzer/Typescript/TypescriptAnalyzer.py +12 -9
  21. metripy/LangAnalyzer/Typescript/TypescriptAstParser.py +1 -1
  22. metripy/Metric/Code/AggregatedMetrics.py +12 -5
  23. metripy/Metric/Code/FileMetrics.py +32 -1
  24. metripy/Metric/Code/ModuleMetrics.py +5 -5
  25. metripy/Metric/Code/SegmentedMetrics.py +72 -36
  26. metripy/Metric/Code/Segmentor.py +44 -0
  27. metripy/Metric/FileTree/FileTreeParser.py +0 -4
  28. metripy/Metric/Git/GitMetrics.py +1 -1
  29. metripy/Metric/ProjectMetrics.py +17 -2
  30. metripy/Metric/Trend/AggregatedTrendMetric.py +101 -0
  31. metripy/Metric/Trend/ClassTrendMetric.py +20 -0
  32. metripy/Metric/Trend/FileTrendMetric.py +46 -0
  33. metripy/Metric/Trend/FunctionTrendMetric.py +28 -0
  34. metripy/Metric/Trend/SegmentedTrendMetric.py +29 -0
  35. metripy/Report/Html/DependencyPageRenderer.py +21 -0
  36. metripy/Report/Html/FilesPageRenderer.py +28 -0
  37. metripy/Report/Html/GitAnalysisPageRenderer.py +55 -0
  38. metripy/Report/Html/IndexPageRenderer.py +40 -0
  39. metripy/Report/Html/PageRenderer.py +43 -0
  40. metripy/Report/Html/PageRendererFactory.py +37 -0
  41. metripy/Report/Html/Reporter.py +49 -130
  42. metripy/Report/Html/TopOffendersPageRenderer.py +84 -0
  43. metripy/Report/Html/TrendsPageRenderer.py +114 -0
  44. metripy/Report/Json/GitJsonReporter.py +3 -1
  45. metripy/Report/Json/JsonReporter.py +4 -1
  46. metripy/Report/ReporterFactory.py +4 -2
  47. metripy/Tree/ClassNode.py +21 -0
  48. metripy/Tree/FunctionNode.py +66 -1
  49. metripy/Trend/TrendAnalyzer.py +150 -0
  50. {metripy-0.2.8.dist-info → metripy-0.3.1.dist-info}/METADATA +3 -3
  51. metripy-0.3.1.dist-info/RECORD +85 -0
  52. metripy-0.2.8.dist-info/RECORD +0 -66
  53. {metripy-0.2.8.dist-info → metripy-0.3.1.dist-info}/WHEEL +0 -0
  54. {metripy-0.2.8.dist-info → metripy-0.3.1.dist-info}/entry_points.txt +0 -0
  55. {metripy-0.2.8.dist-info → metripy-0.3.1.dist-info}/licenses/LICENSE +0 -0
  56. {metripy-0.2.8.dist-info → metripy-0.3.1.dist-info}/top_level.txt +0 -0
@@ -5,6 +5,7 @@ from radon.visitors import Class, Function
5
5
 
6
6
  from metripy.Component.Output.ProgressBar import ProgressBar
7
7
  from metripy.LangAnalyzer.AbstractLangAnalyzer import AbstractLangAnalyzer
8
+ from metripy.LangAnalyzer.Python.PythonHalSteadAnalyzer import PythonHalSteadAnalyzer
8
9
  from metripy.Metric.Code.FileMetrics import FileMetrics
9
10
  from metripy.Tree.ClassNode import ClassNode
10
11
  from metripy.Tree.FunctionNode import FunctionNode
@@ -15,6 +16,7 @@ class PythonAnalyzer(AbstractLangAnalyzer):
15
16
 
16
17
  def __init__(self):
17
18
  super().__init__()
19
+ self.fallback_halstead_analyzer = PythonHalSteadAnalyzer()
18
20
 
19
21
  def get_lang_name(self) -> str:
20
22
  return "Python"
@@ -62,6 +64,7 @@ class PythonAnalyzer(AbstractLangAnalyzer):
62
64
  function_node = FunctionNode(
63
65
  full_name, item.name, item.lineno, item.col_offset, item.complexity
64
66
  )
67
+ function_node.line_end = item.endline
65
68
  if item.is_method:
66
69
  class_node = classes.get(full_class_name)
67
70
  if class_node is not None:
@@ -74,11 +77,6 @@ class PythonAnalyzer(AbstractLangAnalyzer):
74
77
  else:
75
78
  raise ValueError(f"Unknown item type: {type(item)}")
76
79
 
77
- # print("--------------------------------")
78
- # print(json.dumps([c.__dict__() for c in classes.values()], indent=4))
79
- # print("--------------------------------")
80
- # print(json.dumps([f.__dict__() for f in functions.values()], indent=4))
81
- # exit()
82
80
  module = analyze(code)
83
81
  full_name = self.full_name(filename)
84
82
  module_node = ModuleNode(
@@ -93,12 +91,9 @@ class PythonAnalyzer(AbstractLangAnalyzer):
93
91
  )
94
92
  module_node.classes.extend(classes.values())
95
93
  module_node.functions.extend(functions.values())
96
- # print(module)
97
- # print(json.dumps([m.to_dict() for m in modules.values()], indent=4))
98
- # exit()
94
+
99
95
  h = h_visit(code)
100
96
  assert isinstance(h, Halstead)
101
- # print(h.total)
102
97
  function_name: str
103
98
  report: HalsteadReport
104
99
  for function_name, report in h.functions:
@@ -117,9 +112,36 @@ class PythonAnalyzer(AbstractLangAnalyzer):
117
112
  function_node.effort = report.effort
118
113
  function_node.bugs = report.bugs
119
114
  function_node.time = report.time
115
+ function_node.calc_mi()
120
116
  else:
121
117
  raise ValueError(f"Function node not found for function {full_name}")
122
118
 
119
+ code_lines = code.split("\n")
120
+ for func_name, function_node in functions.items():
121
+ if function_node.maintainability_index != 0:
122
+ continue
123
+ # if MI is 0, we want to take another look, radon does not like boring functions
124
+
125
+ lines = code_lines[function_node.lineno : function_node.line_end]
126
+ function_metrics = (
127
+ self.fallback_halstead_analyzer.calculate_halstead_metrics(
128
+ "\n".join(lines)
129
+ )
130
+ )
131
+ function_node.h1 = function_metrics["n1"]
132
+ function_node.h2 = function_metrics["n2"]
133
+ function_node.N1 = function_metrics["N1"]
134
+ function_node.N2 = function_metrics["N2"]
135
+ function_node.vocabulary = function_metrics["vocabulary"]
136
+ function_node.length = function_metrics["length"]
137
+ function_node.volume = function_metrics["volume"]
138
+ function_node.difficulty = function_metrics["difficulty"]
139
+ function_node.effort = function_metrics["effort"]
140
+ function_node.calculated_length = function_metrics["calculated_length"]
141
+ function_node.bugs = function_metrics["bugs"]
142
+ function_node.time = function_metrics["time"]
143
+ function_node.calc_mi()
144
+
123
145
  maintainability_index = mi_visit(code, True)
124
146
  module_node.maintainability_index = maintainability_index
125
147
 
@@ -0,0 +1,55 @@
1
+ from metripy.LangAnalyzer.Generic.HalSteadAnalyzer import HalSteadAnalyzer
2
+
3
+
4
+ class PythonHalSteadAnalyzer:
5
+ def __init__(self):
6
+ self.operators = [
7
+ "+",
8
+ "-",
9
+ "*",
10
+ "/",
11
+ "//",
12
+ "%",
13
+ "**",
14
+ "==",
15
+ "!=",
16
+ ">",
17
+ "<",
18
+ ">=",
19
+ "<=",
20
+ "=",
21
+ "+=",
22
+ "-=",
23
+ "*=",
24
+ "/=",
25
+ "%=",
26
+ "//=",
27
+ "**=",
28
+ "and",
29
+ "or",
30
+ "not",
31
+ "&",
32
+ "|",
33
+ "^",
34
+ "~",
35
+ "<<",
36
+ ">>",
37
+ "in",
38
+ "not in",
39
+ "is",
40
+ "is not",
41
+ ":",
42
+ ",",
43
+ ".",
44
+ "(",
45
+ ")",
46
+ "[",
47
+ "]",
48
+ "{",
49
+ "}",
50
+ ]
51
+
52
+ self.analyzer = HalSteadAnalyzer(self.operators)
53
+
54
+ def calculate_halstead_metrics(self, code: str):
55
+ return self.analyzer.calculate_halstead_metrics(code)
@@ -4,14 +4,16 @@ import lizard
4
4
 
5
5
  from metripy.Component.Output.ProgressBar import ProgressBar
6
6
  from metripy.LangAnalyzer.AbstractLangAnalyzer import AbstractLangAnalyzer
7
- from metripy.LangAnalyzer.Typescript.TypescriptAstParser import \
8
- TypescriptAstParser
9
- from metripy.LangAnalyzer.Typescript.TypescriptBasicComplexityAnalyzer import \
10
- TypescriptBasicComplexityAnalzyer
11
- from metripy.LangAnalyzer.Typescript.TypescriptBasicLocAnalyzer import \
12
- TypescriptBasicLocAnalyzer
13
- from metripy.LangAnalyzer.Typescript.TypescriptHalSteadAnalyzer import \
14
- TypeScriptHalSteadAnalyzer
7
+ from metripy.LangAnalyzer.Typescript.TypescriptAstParser import TypescriptAstParser
8
+ from metripy.LangAnalyzer.Typescript.TypescriptBasicComplexityAnalyzer import (
9
+ TypescriptBasicComplexityAnalzyer,
10
+ )
11
+ from metripy.LangAnalyzer.Typescript.TypescriptBasicLocAnalyzer import (
12
+ TypescriptBasicLocAnalyzer,
13
+ )
14
+ from metripy.LangAnalyzer.Typescript.TypescriptHalSteadAnalyzer import (
15
+ TypeScriptHalSteadAnalyzer,
16
+ )
15
17
  from metripy.Tree.ClassNode import ClassNode
16
18
  from metripy.Tree.FunctionNode import FunctionNode
17
19
  from metripy.Tree.ModuleNode import ModuleNode
@@ -146,7 +148,7 @@ class TypescriptAnalyzer(AbstractLangAnalyzer):
146
148
 
147
149
  code_lines = code.split("\n")
148
150
  for func_name, function_node in functions.items():
149
- lines = code_lines[function_node.lineno:function_node.line_end]
151
+ lines = code_lines[function_node.lineno : function_node.line_end]
150
152
  function_metrics = self.halstead_analyzer.calculate_halstead_metrics(
151
153
  "\n".join(lines)
152
154
  )
@@ -162,6 +164,7 @@ class TypescriptAnalyzer(AbstractLangAnalyzer):
162
164
  function_node.calculated_length = function_metrics["calculated_length"]
163
165
  function_node.bugs = function_metrics["bugs"]
164
166
  function_node.time = function_metrics["time"]
167
+ function_node.calc_mi()
165
168
 
166
169
  maintainability_index = self._calculate_maintainability_index(
167
170
  functions.values(), module_node
@@ -8,7 +8,7 @@ class TypescriptAstParser:
8
8
  self.parser = get_parser("typescript")
9
9
 
10
10
  def _get_node_text(self, code: str, node) -> str:
11
- return code[node.start_byte:node.end_byte].decode("utf-8")
11
+ return code[node.start_byte : node.end_byte].decode("utf-8")
12
12
 
13
13
  def extract_structure(self, code: str) -> dict:
14
14
  tree = self.parser.parse(bytes(code, "utf8"))
@@ -1,4 +1,5 @@
1
1
  from metripy.Metric.Code.SegmentedMetrics import SegmentedMetrics
2
+ from metripy.Metric.Trend.AggregatedTrendMetric import AggregatedTrendMetric
2
3
 
3
4
 
4
5
  class AggregatedMetrics:
@@ -29,13 +30,19 @@ class AggregatedMetrics:
29
30
  "methodSize": segmented_method_size,
30
31
  }
31
32
 
33
+ self.trend: AggregatedTrendMetric | None = None
34
+
32
35
  def to_dict(self) -> dict:
33
36
  return {
34
- "loc": str(self.loc),
35
- "avgCcPerFunction": f"{self.avgCcPerFunction:.2f}",
36
- "maintainabilityIndex": f"{self.maintainabilityIndex:.2f}",
37
- "avgLocPerFunction": f"{self.avgLocPerFunction:.2f}",
38
- "num_files": str(self.num_files),
37
+ "loc": self.loc,
38
+ "avgCcPerFunction": round(self.avgCcPerFunction, 2),
39
+ "maintainabilityIndex": round(self.maintainabilityIndex, 2),
40
+ "avgLocPerFunction": round(self.avgLocPerFunction, 2),
41
+ "num_files": self.num_files,
42
+ "trend": self.trend.to_dict() if self.trend else None,
43
+ "trend_segmentation": (
44
+ self.trend.to_dict_segmentation() if self.trend else None
45
+ ),
39
46
  }
40
47
 
41
48
  def to_dict_segmentation(self) -> dict:
@@ -1,3 +1,7 @@
1
+ from typing import Self
2
+
3
+ from metripy.Metric.Code.Segmentor import Segmentor
4
+ from metripy.Metric.Trend.FileTrendMetric import FileTrendMetric
1
5
  from metripy.Tree.ClassNode import ClassNode
2
6
  from metripy.Tree.FunctionNode import FunctionNode
3
7
 
@@ -7,6 +11,7 @@ class FileMetrics:
7
11
  self,
8
12
  full_name: str,
9
13
  loc: int,
14
+ totalCc: int,
10
15
  avgCcPerFunction: float,
11
16
  maintainabilityIndex: float,
12
17
  avgLocPerFunction: float,
@@ -15,19 +20,45 @@ class FileMetrics:
15
20
  ):
16
21
  self.full_name = full_name
17
22
  self.loc = loc
23
+ self.totalCc = totalCc
18
24
  self.avgCcPerFunction = avgCcPerFunction
19
25
  self.maintainabilityIndex = maintainabilityIndex
20
26
  self.avgLocPerFunction = avgLocPerFunction
21
27
  self.class_nodes = class_nodes
22
28
  self.function_nodes = function_nodes
29
+ self.trend: FileTrendMetric | None = None
23
30
 
24
31
  def to_dict(self) -> dict:
25
32
  return {
26
33
  "full_name": self.full_name,
27
34
  "loc": self.loc,
35
+ "loc_segment": Segmentor.get_loc_segment(self.loc),
36
+ "totalCc": self.totalCc,
37
+ "complexity_segment": Segmentor.get_complexity_segment(
38
+ self.avgCcPerFunction
39
+ ),
28
40
  "avgCcPerFunction": self.avgCcPerFunction,
29
- "maintainabilityIndex": f"{self.maintainabilityIndex:.2f}",
41
+ "maintainabilityIndex": round(self.maintainabilityIndex, 2),
42
+ "maintainability_segment": Segmentor.get_maintainability_segment(
43
+ self.maintainabilityIndex
44
+ ),
30
45
  "avgLocPerFunction": self.avgLocPerFunction,
46
+ "method_size_segment": Segmentor.get_method_size_segment(
47
+ self.avgLocPerFunction
48
+ ),
31
49
  "class_nodes": [node.to_dict() for node in self.class_nodes],
32
50
  "function_nodes": [node.to_dict() for node in self.function_nodes],
33
51
  }
52
+
53
+ @staticmethod
54
+ def from_dict(data: dict) -> Self:
55
+ return FileMetrics(
56
+ full_name=data["full_name"],
57
+ loc=data["loc"],
58
+ totalCc=data["totalCc"],
59
+ avgCcPerFunction=data["avgCcPerFunction"],
60
+ maintainabilityIndex=data["maintainabilityIndex"],
61
+ avgLocPerFunction=data["avgLocPerFunction"],
62
+ class_nodes=[ClassNode.from_dict(d) for d in data["class_nodes"]],
63
+ function_nodes=[FunctionNode.from_dict(d) for d in data["function_nodes"]],
64
+ )
@@ -22,11 +22,11 @@ class ModuleMetrics:
22
22
 
23
23
  def to_dict(self) -> dict:
24
24
  return {
25
- "loc": str(self.loc),
26
- "avgCcPerFunction": f"{self.avgCcPerFunction:.2f}",
27
- "maintainabilityIndex": f"{self.maintainabilityIndex:.2f}",
28
- "avgLocPerFunction": f"{self.avgLocPerFunction:.2f}",
29
- "num_files": str(self.num_files),
25
+ "loc": self.loc,
26
+ "avgCcPerFunction": round(self.avgCcPerFunction, 2),
27
+ "maintainabilityIndex": round(self.maintainabilityIndex, 2),
28
+ "avgLocPerFunction": round(self.avgLocPerFunction, 2),
29
+ "num_files": self.num_files,
30
30
  "class_nodes": [node.to_dict() for node in self.class_nodes],
31
31
  "function_nodes": [node.to_dict() for node in self.function_nodes],
32
32
  }
@@ -1,5 +1,7 @@
1
1
  from typing import Self
2
2
 
3
+ from metripy.Metric.Code.Segmentor import Segmentor
4
+
3
5
 
4
6
  class SegmentedMetrics:
5
7
  def __init__(self):
@@ -16,50 +18,84 @@ class SegmentedMetrics:
16
18
  "critical": self.critical,
17
19
  }
18
20
 
21
+ def to_dict_with_percent(self) -> dict:
22
+ return {
23
+ "good": self.good,
24
+ "good_percent": round(
25
+ self.good / (self.good + self.ok + self.warning + self.critical) * 100,
26
+ 2,
27
+ ),
28
+ "ok": self.ok,
29
+ "ok_percent": round(
30
+ self.ok / (self.good + self.ok + self.warning + self.critical) * 100, 2
31
+ ),
32
+ "warning": self.warning,
33
+ "warning_percent": round(
34
+ self.warning
35
+ / (self.good + self.ok + self.warning + self.critical)
36
+ * 100,
37
+ 2,
38
+ ),
39
+ "critical": self.critical,
40
+ "critical_percent": round(
41
+ self.critical
42
+ / (self.good + self.ok + self.warning + self.critical)
43
+ * 100,
44
+ 2,
45
+ ),
46
+ }
47
+
48
+ def _set_values(self, values: dict[str, int]) -> Self:
49
+ self.good = values["good"]
50
+ self.ok = values["ok"]
51
+ self.warning = values["warning"]
52
+ self.critical = values["critical"]
53
+ return self
54
+
19
55
  def set_loc(self, values: list[int]) -> Self:
56
+ d = {
57
+ "good": self.good,
58
+ "ok": self.ok,
59
+ "warning": self.warning,
60
+ "critical": self.critical,
61
+ }
20
62
  for value in values:
21
- if value <= 200:
22
- self.good += 1
23
- elif value <= 500:
24
- self.ok += 1
25
- elif value <= 1000:
26
- self.warning += 1
27
- else:
28
- self.critical += 1
29
- return self
63
+ d[Segmentor.get_loc_segment(value)] += 1
64
+
65
+ return self._set_values(d)
30
66
 
31
67
  def set_complexity(self, values: list[int]) -> Self:
68
+ d = {
69
+ "good": self.good,
70
+ "ok": self.ok,
71
+ "warning": self.warning,
72
+ "critical": self.critical,
73
+ }
32
74
  for value in values:
33
- if value <= 5:
34
- self.good += 1
35
- elif value <= 10:
36
- self.ok += 1
37
- elif value <= 20:
38
- self.warning += 1
39
- else:
40
- self.critical += 1
41
- return self
75
+ d[Segmentor.get_complexity_segment(value)] += 1
76
+
77
+ return self._set_values(d)
42
78
 
43
79
  def set_maintainability(self, values: list[int]) -> Self:
80
+ d = {
81
+ "good": self.good,
82
+ "ok": self.ok,
83
+ "warning": self.warning,
84
+ "critical": self.critical,
85
+ }
44
86
  for value in values:
45
- if value <= 80:
46
- self.critical += 1
47
- elif value <= 60:
48
- self.warning += 1
49
- elif value <= 40:
50
- self.ok += 1
51
- else:
52
- self.good += 1
53
- return self
87
+ d[Segmentor.get_maintainability_segment(value)] += 1
88
+
89
+ return self._set_values(d)
54
90
 
55
91
  def set_method_size(self, values: list[int]) -> Self:
92
+ d = {
93
+ "good": self.good,
94
+ "ok": self.ok,
95
+ "warning": self.warning,
96
+ "critical": self.critical,
97
+ }
56
98
  for value in values:
57
- if value <= 15:
58
- self.good += 1
59
- elif value <= 30:
60
- self.ok += 1
61
- elif value <= 50:
62
- self.warning += 1
63
- else:
64
- self.critical += 1
65
- return self
99
+ d[Segmentor.get_method_size_segment(value)] += 1
100
+
101
+ return self._set_values(d)
@@ -0,0 +1,44 @@
1
+ class Segmentor:
2
+ @staticmethod
3
+ def get_loc_segment(loc: int) -> str:
4
+ if loc <= 200:
5
+ return "good"
6
+ elif loc <= 500:
7
+ return "ok"
8
+ elif loc <= 1000:
9
+ return "warning"
10
+ else:
11
+ return "critical"
12
+
13
+ @staticmethod
14
+ def get_complexity_segment(complexity: float) -> str:
15
+ if complexity <= 5:
16
+ return "good"
17
+ elif complexity <= 10:
18
+ return "ok"
19
+ elif complexity <= 20:
20
+ return "warning"
21
+ else:
22
+ return "critical"
23
+
24
+ @staticmethod
25
+ def get_maintainability_segment(maintainability: float) -> str:
26
+ if maintainability > 80:
27
+ return "good"
28
+ elif maintainability > 60:
29
+ return "ok"
30
+ elif maintainability > 40:
31
+ return "warning"
32
+ else:
33
+ return "critical"
34
+
35
+ @staticmethod
36
+ def get_method_size_segment(method_size: float) -> str:
37
+ if method_size <= 15:
38
+ return "good"
39
+ elif method_size <= 30:
40
+ return "ok"
41
+ elif method_size <= 50:
42
+ return "warning"
43
+ else:
44
+ return "critical"
@@ -36,7 +36,3 @@ class FileTreeParser:
36
36
 
37
37
  for child in node.children:
38
38
  FileTreeParser._shorten_tree(child)
39
-
40
- # print(json.dumps(root.to_dict(), indent=4))
41
- # exit()
42
- # return root
@@ -136,7 +136,7 @@ class GitMetrics:
136
136
  def to_dict(self) -> dict[str, any]:
137
137
  return {
138
138
  "analysis_start_date": self.analysis_start_date,
139
- "avg_commit_size": f"{self.get_avg_commit_size():.2f}",
139
+ "avg_commit_size": round(self.get_avg_commit_size(), 2),
140
140
  "commit_stats_per_month": self.get_commit_stats_per_month(),
141
141
  "churn_per_month": self.get_churn_per_month(),
142
142
  "total_commits": self.total_commits,
@@ -1,9 +1,10 @@
1
+ from typing import Self
2
+
1
3
  from metripy.Dependency.Dependency import Dependency
2
4
  from metripy.Metric.Code.AggregatedMetrics import AggregatedMetrics
3
5
  from metripy.Metric.Code.FileMetrics import FileMetrics
4
6
  from metripy.Metric.Code.SegmentedMetrics import SegmentedMetrics
5
7
  from metripy.Metric.Git.GitMetrics import GitMetrics
6
- from metripy.Dependency.Dependency import Dependency
7
8
 
8
9
 
9
10
  class ProjectMetrics:
@@ -65,5 +66,19 @@ class ProjectMetrics:
65
66
  data["git_metrics"] = self.git_metrics.to_dict()
66
67
  if self.dependencies:
67
68
  data["dependencies"] = [d.to_dict() for d in self.dependencies]
68
- data["license_distribution"] = Dependency.get_lisence_distribution(self.dependencies)
69
+ data["license_distribution"] = Dependency.get_lisence_distribution(
70
+ self.dependencies
71
+ )
69
72
  return data
73
+
74
+ @staticmethod
75
+ def from_dict(data: dict) -> Self:
76
+ # TODO: not needed yet
77
+ # git_metrics = GitMetrics.from_dict(data["git_metrics"]) if "git_metrics" in data.keys() else None
78
+ # dependencies = [Dependency.from_dict(d) for d in data["dependencies"]] if "dependencies" in data.keys() else None
79
+
80
+ return ProjectMetrics(
81
+ file_metrics=[FileMetrics.from_dict(m) for m in data["file_metrics"]],
82
+ git_metrics=None,
83
+ dependencies=None,
84
+ )
@@ -0,0 +1,101 @@
1
+ from metripy.Metric.Code.SegmentedMetrics import SegmentedMetrics
2
+ from metripy.Metric.Trend.SegmentedTrendMetric import SegmentedTrendMetric
3
+
4
+
5
+ class AggregatedTrendMetric:
6
+ def __init__(
7
+ self,
8
+ historical_loc: int,
9
+ loc: int,
10
+ historical_avgCcPerFunction: float,
11
+ avgCcPerFunction: float,
12
+ historical_maintainabilityIndex: float,
13
+ maintainabilityIndex: float,
14
+ historical_avgLocPerFunction: float,
15
+ avgLocPerFunction: float,
16
+ historical_num_files: int,
17
+ num_files: int,
18
+ historical_segmented_loc: SegmentedMetrics,
19
+ segmented_loc: SegmentedMetrics,
20
+ historical_segmented_complexity: SegmentedMetrics,
21
+ segmented_complexity: SegmentedMetrics,
22
+ historical_segmented_maintainability: SegmentedMetrics,
23
+ segmented_maintainability: SegmentedMetrics,
24
+ historical_segmented_method_size: SegmentedMetrics,
25
+ segmented_method_size: SegmentedMetrics,
26
+ ):
27
+ self.historical_loc = historical_loc
28
+ self.loc_delta = loc - historical_loc
29
+ self.historical_avgCcPerFunction = historical_avgCcPerFunction
30
+ self.avgCcPerFunction_delta = avgCcPerFunction - historical_avgCcPerFunction
31
+ self.historical_maintainabilityIndex = historical_maintainabilityIndex
32
+ self.maintainabilityIndex_delta = (
33
+ maintainabilityIndex - historical_maintainabilityIndex
34
+ )
35
+ self.historical_avgLocPerFunction = historical_avgLocPerFunction
36
+ self.avgLocPerFunction_delta = avgLocPerFunction - historical_avgLocPerFunction
37
+ self.historical_num_files = historical_num_files
38
+ self.num_files_delta = num_files - historical_num_files
39
+
40
+ self.historical_segmentation_data = {
41
+ "loc": historical_segmented_loc,
42
+ "complexity": historical_segmented_complexity,
43
+ "maintainability": historical_segmented_maintainability,
44
+ "methodSize": historical_segmented_method_size,
45
+ }
46
+
47
+ self.segmentation_data_deltas = {
48
+ "loc": SegmentedTrendMetric(historical_segmented_loc, segmented_loc),
49
+ "complexity": SegmentedTrendMetric(
50
+ historical_segmented_complexity, segmented_complexity
51
+ ),
52
+ "maintainability": SegmentedTrendMetric(
53
+ historical_segmented_maintainability, segmented_maintainability
54
+ ),
55
+ "methodSize": SegmentedTrendMetric(
56
+ historical_segmented_method_size, segmented_method_size
57
+ ),
58
+ }
59
+
60
+ def get_trend_type(self, delta: float, up_is_good: bool) -> str:
61
+ if up_is_good:
62
+ return "positive" if delta > 0 else "negative" if delta < 0 else "neutral"
63
+ else:
64
+ return "negative" if delta > 0 else "positive" if delta < 0 else "neutral"
65
+
66
+ def get_trend_icon(self, delta: float) -> str:
67
+ return "arrow-up" if delta > 0 else "arrow-down" if delta < 0 else "arrow-right"
68
+
69
+ def to_dict(self) -> dict:
70
+ return {
71
+ "loc_delta": round(self.loc_delta, 2),
72
+ "loc_trend_type": self.get_trend_type(self.loc_delta, False),
73
+ "loc_trend_icon": self.get_trend_icon(self.loc_delta),
74
+ "avgCcPerFunction_delta": round(self.avgCcPerFunction_delta, 2),
75
+ "avgCcPerFunction_trend_type": self.get_trend_type(
76
+ self.avgCcPerFunction_delta, False
77
+ ),
78
+ "avgCcPerFunction_trend_icon": self.get_trend_icon(
79
+ self.avgCcPerFunction_delta
80
+ ),
81
+ "maintainabilityIndex_delta": round(self.maintainabilityIndex_delta, 2),
82
+ "maintainabilityIndex_trend_type": self.get_trend_type(
83
+ self.maintainabilityIndex_delta, True
84
+ ),
85
+ "maintainabilityIndex_trend_icon": self.get_trend_icon(
86
+ self.maintainabilityIndex_delta
87
+ ),
88
+ "avgLocPerFunction_delta": round(self.avgLocPerFunction_delta, 2),
89
+ "avgLocPerFunction_trend_type": self.get_trend_type(
90
+ self.avgLocPerFunction_delta, False
91
+ ),
92
+ "avgLocPerFunction_trend_icon": self.get_trend_icon(
93
+ self.avgLocPerFunction_delta
94
+ ),
95
+ "num_files_delta": self.num_files_delta,
96
+ "num_files_trend_type": self.get_trend_type(self.num_files_delta, False),
97
+ "num_files_trend_icon": self.get_trend_icon(self.num_files_delta),
98
+ }
99
+
100
+ def to_dict_segmentation(self) -> dict:
101
+ return {k: v.to_dict() for k, v in self.segmentation_data_deltas.items()}
@@ -0,0 +1,20 @@
1
+ class ClassTrendMetric:
2
+ def __init__(
3
+ self,
4
+ historical_lineno: int,
5
+ lineno: int,
6
+ historical_real_complexity: int,
7
+ real_complexity: int,
8
+ ):
9
+ self.historical_lineno = historical_lineno
10
+ self.lineno_delta = lineno - historical_lineno
11
+ self.historical_real_complexity = historical_real_complexity
12
+ self.real_complexity_delta = real_complexity - historical_real_complexity
13
+
14
+ def to_dict(self) -> dict:
15
+ return {
16
+ "historical_lineno": self.historical_lineno,
17
+ "lineno_delta": self.lineno_delta,
18
+ "historical_real_complexity": self.historical_real_complexity,
19
+ "real_complexity_delta": self.real_complexity_delta,
20
+ }