metripy 0.2.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of metripy might be problematic. Click here for more details.

Files changed (66) hide show
  1. metripy/Application/Analyzer.py +106 -0
  2. metripy/Application/Application.py +54 -0
  3. metripy/Application/Config/Config.py +13 -0
  4. metripy/Application/Config/File/ConfigFileReaderFactory.py +24 -0
  5. metripy/Application/Config/File/ConfigFileReaderInterface.py +14 -0
  6. metripy/Application/Config/File/JsonConfigFileReader.py +82 -0
  7. metripy/Application/Config/GitConfig.py +10 -0
  8. metripy/Application/Config/Parser.py +31 -0
  9. metripy/Application/Config/ProjectConfig.py +27 -0
  10. metripy/Application/Config/ReportConfig.py +10 -0
  11. metripy/Application/__init__.py +0 -0
  12. metripy/Component/Debug/Debugger.py +20 -0
  13. metripy/Component/File/Finder.py +37 -0
  14. metripy/Component/Output/CliOutput.py +49 -0
  15. metripy/Component/Output/ProgressBar.py +27 -0
  16. metripy/Dependency/Composer/Composer.py +30 -0
  17. metripy/Dependency/Composer/Packegist.py +55 -0
  18. metripy/Dependency/Dependency.py +30 -0
  19. metripy/Dependency/Npm/Npm.py +30 -0
  20. metripy/Dependency/Npm/NpmOrg.py +47 -0
  21. metripy/Dependency/Pip/Pip.py +69 -0
  22. metripy/Dependency/Pip/PyPi.py +49 -0
  23. metripy/Git/GitAnalyzer.py +86 -0
  24. metripy/LangAnalyzer/AbstractLangAnalyzer.py +65 -0
  25. metripy/LangAnalyzer/Generic/HalSteadAnalyzer.py +58 -0
  26. metripy/LangAnalyzer/Generic/__init__.py +0 -0
  27. metripy/LangAnalyzer/Php/PhpAnalyzer.py +193 -0
  28. metripy/LangAnalyzer/Php/PhpBasicAstParser.py +56 -0
  29. metripy/LangAnalyzer/Php/PhpBasicLocAnalyzer.py +174 -0
  30. metripy/LangAnalyzer/Php/PhpHalSteadAnalyzer.py +44 -0
  31. metripy/LangAnalyzer/Python/PythonAnalyzer.py +129 -0
  32. metripy/LangAnalyzer/Typescript/TypescriptAnalyzer.py +208 -0
  33. metripy/LangAnalyzer/Typescript/TypescriptAstParser.py +68 -0
  34. metripy/LangAnalyzer/Typescript/TypescriptBasicComplexityAnalyzer.py +114 -0
  35. metripy/LangAnalyzer/Typescript/TypescriptBasicLocAnalyzer.py +69 -0
  36. metripy/LangAnalyzer/Typescript/TypescriptHalSteadAnalyzer.py +55 -0
  37. metripy/LangAnalyzer/__init__.py +0 -0
  38. metripy/Metric/Code/AggregatedMetrics.py +42 -0
  39. metripy/Metric/Code/FileMetrics.py +33 -0
  40. metripy/Metric/Code/ModuleMetrics.py +32 -0
  41. metripy/Metric/Code/SegmentedMetrics.py +65 -0
  42. metripy/Metric/FileTree/FileTree.py +15 -0
  43. metripy/Metric/FileTree/FileTreeParser.py +42 -0
  44. metripy/Metric/Git/GitCodeHotspot.py +37 -0
  45. metripy/Metric/Git/GitContributor.py +37 -0
  46. metripy/Metric/Git/GitKnowledgeSilo.py +27 -0
  47. metripy/Metric/Git/GitMetrics.py +148 -0
  48. metripy/Metric/ProjectMetrics.py +55 -0
  49. metripy/Report/Csv/Reporter.py +12 -0
  50. metripy/Report/Html/Reporter.py +210 -0
  51. metripy/Report/Json/AbstractJsonReporter.py +11 -0
  52. metripy/Report/Json/GitJsonReporter.py +21 -0
  53. metripy/Report/Json/JsonReporter.py +12 -0
  54. metripy/Report/ReporterFactory.py +22 -0
  55. metripy/Report/ReporterInterface.py +17 -0
  56. metripy/Tree/ClassNode.py +32 -0
  57. metripy/Tree/FunctionNode.py +49 -0
  58. metripy/Tree/ModuleNode.py +42 -0
  59. metripy/__init__.py +0 -0
  60. metripy/metripy.py +15 -0
  61. metripy-0.2.7.dist-info/METADATA +113 -0
  62. metripy-0.2.7.dist-info/RECORD +66 -0
  63. metripy-0.2.7.dist-info/WHEEL +5 -0
  64. metripy-0.2.7.dist-info/entry_points.txt +2 -0
  65. metripy-0.2.7.dist-info/licenses/LICENSE +21 -0
  66. metripy-0.2.7.dist-info/top_level.txt +1 -0
@@ -0,0 +1,114 @@
1
+ import re
2
+
3
+
4
+ class TypescriptBasicComplexityAnalzyer:
5
+ def get_complexity(self, code: str, function_name: str) -> dict[str, int] | None:
6
+ function_scope = self._extract_function(code, function_name)
7
+ if not function_scope:
8
+ return None
9
+ (start_index, end_index, start_line, end_line) = function_scope
10
+ function_code = code[start_index:end_index]
11
+
12
+ complexity = self._determine_complexity(function_code)
13
+
14
+ return {
15
+ "start_line": start_line,
16
+ "end_line": end_line,
17
+ "complexity": complexity,
18
+ }
19
+
20
+ def _determine_complexity(self, function_code: str) -> int:
21
+ complexity = 1
22
+
23
+ # Keywords that increase complexity
24
+ keywords = [
25
+ r"\bif\b",
26
+ r"\bfor\b",
27
+ r"\bwhile\b",
28
+ r"\bcase\b",
29
+ r"\bcatch\b",
30
+ r"\?\s*",
31
+ r"\belse\s+if\b",
32
+ r"\breturn\b.*\?",
33
+ r"\bthrow\b.*\?",
34
+ r"&&",
35
+ r"\|\|",
36
+ ]
37
+
38
+ for keyword in keywords:
39
+ matches = re.findall(keyword, function_code)
40
+ complexity += len(matches)
41
+
42
+ return complexity
43
+
44
+ def _extract_function(
45
+ self, code: str, function_name: str
46
+ ) -> tuple[int, int, int, int] | None:
47
+ # This pattern matches various JavaScript/TypeScript function declarations:
48
+ # 1. Arrow functions:
49
+ # - With or without `export`
50
+ # - With or without `async`
51
+ # - With or without generics (`<T>`)
52
+ # - With or without return types (`: Type`)
53
+ # - With multiline or single-line parameters
54
+ # - Including curried arrow functions (arrow returning another arrow)
55
+ # 2. Traditional functions:
56
+ # - With optional `export` and `async`
57
+ # 3. Class methods:
58
+ # - With optional access modifiers (`public`, `private`, `protected`)
59
+ # - With optional `static` and `async`
60
+ # - With optional generics and return types
61
+ # 4. Bare class methods (no modifiers)
62
+
63
+ pattern = re.compile(
64
+ rf"""
65
+ (
66
+ (export\s+)?(const|let|var)\s+{re.escape(function_name)}\s*
67
+ (<[^>]*>)?\s*=\s*(async\s+)?\(\s*.*?\s*\)\s*
68
+ (:\s*[^=]+)?\s*=>\s*
69
+ (\s*\(\s*.*?\s*\)\s*=>)?\s*\{{?
70
+ )
71
+ |
72
+ (
73
+ (export\s+)?(async\s+)?function\s+{re.escape(function_name)}\s*
74
+ \(\s*.*?\s*\)\s*\{{
75
+ )
76
+ |
77
+ (
78
+ (?:public|private|protected)?\s*
79
+ (?:static\s+)?(?:async\s+)?{re.escape(function_name)}\s*
80
+ (<[^>]+>)?\s*\(\s*.*?\s*\)\s*
81
+ (:\s*[^\{{]+)?\s*\{{
82
+ )
83
+ |
84
+ (
85
+ {re.escape(function_name)}\s*\(\s*.*?\s*\)\s*\{{
86
+ )
87
+ """,
88
+ re.MULTILINE | re.DOTALL | re.VERBOSE,
89
+ )
90
+
91
+ match = pattern.search(code)
92
+ if not match:
93
+ print(f"Function '{function_name}' not found.")
94
+ return None
95
+
96
+ start_index = match.start()
97
+ start_line = code[:start_index].count("\n") + 1
98
+
99
+ # Find the matching closing brace
100
+ brace_count = 0
101
+ i = match.end() - 1
102
+ while i < len(code):
103
+ if code[i] == "{":
104
+ brace_count += 1
105
+ elif code[i] == "}":
106
+ brace_count -= 1
107
+ if brace_count == 0:
108
+ end_index = i + 1
109
+ end_line = code[:end_index].count("\n") + 1
110
+ return (start_index, end_index, start_line, end_line)
111
+ i += 1
112
+
113
+ print(f"Function '{function_name}' seems to be incomplete or malformed.")
114
+ return None
@@ -0,0 +1,69 @@
1
+ class TypescriptBasicLocAnalyzer:
2
+ @staticmethod
3
+ def get_loc_metrics(code: str, filename: str) -> dict:
4
+ """Fallback LOC calculation using manual analysis"""
5
+ try:
6
+ lines = code.split("\n")
7
+
8
+ total_lines = len(lines)
9
+ blank_lines = TypescriptBasicLocAnalyzer._count_blank_lines(lines)
10
+ comment_lines = TypescriptBasicLocAnalyzer._count_comment_lines(lines)
11
+ code_lines = total_lines - blank_lines - comment_lines
12
+
13
+ return {
14
+ "lines": total_lines,
15
+ "linesOfCode": code_lines,
16
+ "logicalLinesOfCode": code_lines,
17
+ "commentLines": comment_lines,
18
+ "blankLines": blank_lines,
19
+ }
20
+ except Exception as e:
21
+ print(f"Fallback LOC analysis failed: {e}")
22
+ return {
23
+ "lines": 0,
24
+ "linesOfCode": 0,
25
+ "logicalLinesOfCode": 0,
26
+ "commentLines": 0,
27
+ "blankLines": 0,
28
+ }
29
+
30
+ @staticmethod
31
+ def _count_blank_lines(lines: list) -> int:
32
+ """Count blank lines"""
33
+ return sum(1 for line in lines if not line.strip())
34
+
35
+ @staticmethod
36
+ def _count_comment_lines(lines: list) -> int:
37
+ """Count comment lines (single-line and multi-line)"""
38
+ comment_count = 0
39
+ in_multiline_comment = False
40
+
41
+ for line in lines:
42
+ stripped = line.strip()
43
+
44
+ # Handle multi-line comments
45
+ if "/*" in stripped:
46
+ in_multiline_comment = True
47
+ comment_count += 1
48
+ # Check if comment closes on same line
49
+ if "*/" in stripped:
50
+ in_multiline_comment = False
51
+ continue
52
+
53
+ if in_multiline_comment:
54
+ comment_count += 1
55
+ if "*/" in stripped:
56
+ in_multiline_comment = False
57
+ continue
58
+
59
+ # Handle single-line comments
60
+ if stripped.startswith(("//")) or stripped.startswith("#"):
61
+ comment_count += 1
62
+ continue
63
+
64
+ # Handle doc comments
65
+ if stripped.startswith("*") and not stripped.startswith("*/"):
66
+ comment_count += 1
67
+ continue
68
+
69
+ return comment_count
@@ -0,0 +1,55 @@
1
+ from metripy.LangAnalyzer.Generic.HalSteadAnalyzer import HalSteadAnalyzer
2
+
3
+
4
+ class TypeScriptHalSteadAnalyzer:
5
+ def __init__(self):
6
+ self.operators = set(
7
+ [
8
+ "+",
9
+ "-",
10
+ "*",
11
+ "/",
12
+ "%",
13
+ "++",
14
+ "--",
15
+ "==",
16
+ "!=",
17
+ "===",
18
+ "!==",
19
+ "<",
20
+ ">",
21
+ "<=",
22
+ ">=",
23
+ "&&",
24
+ "||",
25
+ "!",
26
+ "=",
27
+ "+=",
28
+ "-=",
29
+ "*=",
30
+ "/=",
31
+ "%=",
32
+ "&",
33
+ "|",
34
+ "^",
35
+ "~",
36
+ "<<",
37
+ ">>",
38
+ "?",
39
+ ":",
40
+ ".",
41
+ ",",
42
+ ";",
43
+ "=>",
44
+ "(",
45
+ ")",
46
+ "[",
47
+ "]",
48
+ "{",
49
+ "}",
50
+ ]
51
+ )
52
+ self.analyzer = HalSteadAnalyzer(self.operators)
53
+
54
+ def calculate_halstead_metrics(self, code: str):
55
+ return self.analyzer.calculate_halstead_metrics(code)
File without changes
@@ -0,0 +1,42 @@
1
+ from metripy.Metric.Code.SegmentedMetrics import SegmentedMetrics
2
+
3
+
4
+ class AggregatedMetrics:
5
+ """Used to show aggregated metrics on the index page"""
6
+
7
+ def __init__(
8
+ self,
9
+ loc: int = 0,
10
+ avgCcPerFunction: float = 0.0,
11
+ maintainabilityIndex: float = 0.0,
12
+ avgLocPerFunction: float = 0.0,
13
+ num_files: int = 0,
14
+ segmented_loc: SegmentedMetrics = SegmentedMetrics(),
15
+ segmented_complexity: SegmentedMetrics = SegmentedMetrics(),
16
+ segmented_maintainability: SegmentedMetrics = SegmentedMetrics(),
17
+ segmented_method_size: SegmentedMetrics = SegmentedMetrics(),
18
+ ) -> None:
19
+ self.loc = loc
20
+ self.avgCcPerFunction = avgCcPerFunction
21
+ self.maintainabilityIndex = maintainabilityIndex
22
+ self.avgLocPerFunction = avgLocPerFunction
23
+ self.num_files = num_files
24
+
25
+ self.segmentation_data = {
26
+ "loc": segmented_loc,
27
+ "complexity": segmented_complexity,
28
+ "maintainability": segmented_maintainability,
29
+ "methodSize": segmented_method_size,
30
+ }
31
+
32
+ def to_dict(self) -> dict:
33
+ return {
34
+ "loc": str(self.loc),
35
+ "avgCcPerFunction": f"{self.avgCcPerFunction:.2f}",
36
+ "maintainabilityIndex": f"{self.maintainabilityIndex:.2f}",
37
+ "avgLocPerFunction": f"{self.avgLocPerFunction:.2f}",
38
+ "num_files": str(self.num_files),
39
+ }
40
+
41
+ def to_dict_segmentation(self) -> dict:
42
+ return {k: v.to_dict() for k, v in self.segmentation_data.items()}
@@ -0,0 +1,33 @@
1
+ from metripy.Tree.ClassNode import ClassNode
2
+ from metripy.Tree.FunctionNode import FunctionNode
3
+
4
+
5
+ class FileMetrics:
6
+ def __init__(
7
+ self,
8
+ full_name: str,
9
+ loc: int,
10
+ avgCcPerFunction: float,
11
+ maintainabilityIndex: float,
12
+ avgLocPerFunction: float,
13
+ class_nodes: list[ClassNode],
14
+ function_nodes: list[FunctionNode],
15
+ ):
16
+ self.full_name = full_name
17
+ self.loc = loc
18
+ self.avgCcPerFunction = avgCcPerFunction
19
+ self.maintainabilityIndex = maintainabilityIndex
20
+ self.avgLocPerFunction = avgLocPerFunction
21
+ self.class_nodes = class_nodes
22
+ self.function_nodes = function_nodes
23
+
24
+ def to_dict(self) -> dict:
25
+ return {
26
+ "full_name": self.full_name,
27
+ "loc": self.loc,
28
+ "avgCcPerFunction": self.avgCcPerFunction,
29
+ "maintainabilityIndex": f"{self.maintainabilityIndex:.2f}",
30
+ "avgLocPerFunction": self.avgLocPerFunction,
31
+ "class_nodes": [node.to_dict() for node in self.class_nodes],
32
+ "function_nodes": [node.to_dict() for node in self.function_nodes],
33
+ }
@@ -0,0 +1,32 @@
1
+ from metripy.Tree.ClassNode import ClassNode
2
+ from metripy.Tree.FunctionNode import FunctionNode
3
+
4
+
5
+ class ModuleMetrics:
6
+ def __init__(
7
+ self,
8
+ loc: int,
9
+ avgCcPerFunction: float,
10
+ maintainabilityIndex: float,
11
+ avgLocPerFunction: float,
12
+ class_nodes: list[ClassNode],
13
+ function_nodes: list[FunctionNode],
14
+ ):
15
+ self.loc = loc
16
+ self.avgCcPerFunction = avgCcPerFunction
17
+ self.maintainabilityIndex = maintainabilityIndex
18
+ self.avgLocPerFunction = avgLocPerFunction
19
+ self.num_files = 1
20
+ self.class_nodes: list[ClassNode] = class_nodes
21
+ self.function_nodes: list[FunctionNode] = function_nodes
22
+
23
+ def to_dict(self) -> dict:
24
+ return {
25
+ "loc": str(self.loc),
26
+ "avgCcPerFunction": f"{self.avgCcPerFunction:.2f}",
27
+ "maintainabilityIndex": f"{self.maintainabilityIndex:.2f}",
28
+ "avgLocPerFunction": f"{self.avgLocPerFunction:.2f}",
29
+ "num_files": str(self.num_files),
30
+ "class_nodes": [node.to_dict() for node in self.class_nodes],
31
+ "function_nodes": [node.to_dict() for node in self.function_nodes],
32
+ }
@@ -0,0 +1,65 @@
1
+ from typing import Self
2
+
3
+
4
+ class SegmentedMetrics:
5
+ def __init__(self):
6
+ self.good = 0
7
+ self.ok = 0
8
+ self.warning = 0
9
+ self.critical = 0
10
+
11
+ def to_dict(self) -> dict:
12
+ return {
13
+ "good": self.good,
14
+ "ok": self.ok,
15
+ "warning": self.warning,
16
+ "critical": self.critical,
17
+ }
18
+
19
+ def set_loc(self, values: list[int]) -> Self:
20
+ for value in values:
21
+ if value <= 200:
22
+ self.good += 1
23
+ elif value <= 500:
24
+ self.ok += 1
25
+ elif value <= 1000:
26
+ self.warning += 1
27
+ else:
28
+ self.critical += 1
29
+ return self
30
+
31
+ def set_complexity(self, values: list[int]) -> Self:
32
+ for value in values:
33
+ if value <= 5:
34
+ self.good += 1
35
+ elif value <= 10:
36
+ self.ok += 1
37
+ elif value <= 20:
38
+ self.warning += 1
39
+ else:
40
+ self.critical += 1
41
+ return self
42
+
43
+ def set_maintainability(self, values: list[int]) -> Self:
44
+ for value in values:
45
+ if value <= 80:
46
+ self.critical += 1
47
+ elif value <= 60:
48
+ self.warning += 1
49
+ elif value <= 40:
50
+ self.ok += 1
51
+ else:
52
+ self.good += 1
53
+ return self
54
+
55
+ def set_method_size(self, values: list[int]) -> Self:
56
+ for value in values:
57
+ if value <= 15:
58
+ self.good += 1
59
+ elif value <= 30:
60
+ self.ok += 1
61
+ elif value <= 50:
62
+ self.warning += 1
63
+ else:
64
+ self.critical += 1
65
+ return self
@@ -0,0 +1,15 @@
1
+ from typing import Self
2
+
3
+
4
+ class FileTree:
5
+ def __init__(self, name: str, full_name: str, children: list[Self] | None = None):
6
+ self.name = name
7
+ self.full_name = full_name
8
+ self.children: list[Self] = children if children is not None else []
9
+
10
+ def to_dict(self) -> dict:
11
+ return {
12
+ "name": self.name,
13
+ "full_name": self.full_name,
14
+ "children": [child.to_dict() for child in self.children],
15
+ }
@@ -0,0 +1,42 @@
1
+ from metripy.Metric.FileTree.FileTree import FileTree
2
+
3
+
4
+ class FileTreeParser:
5
+ @staticmethod
6
+ def parse(paths: list[str], shorten: bool = False) -> FileTree:
7
+ root = FileTree(".", ".")
8
+
9
+ for path in paths:
10
+ parts = path.strip("./").split("/")
11
+ current = root
12
+
13
+ for part in parts:
14
+ # Check if part already exists in current children
15
+ found = next(
16
+ (child for child in current.children if child.name == part), None
17
+ )
18
+ if not found:
19
+ found = FileTree(part, path)
20
+ current.children.append(found)
21
+ current = found
22
+
23
+ if shorten:
24
+ FileTreeParser._shorten_tree(root)
25
+
26
+ return root
27
+
28
+ @staticmethod
29
+ def _shorten_tree(node: FileTree):
30
+ """shorten tree nodes that only have a single child"""
31
+ while len(node.children) == 1:
32
+ child = node.children[0]
33
+ node.name += "/" + child.name
34
+ node.full_name = child.full_name
35
+ node.children = child.children
36
+
37
+ for child in node.children:
38
+ FileTreeParser._shorten_tree(child)
39
+
40
+ # print(json.dumps(root.to_dict(), indent=4))
41
+ # exit()
42
+ # return root
@@ -0,0 +1,37 @@
1
+ class GitCodeHotspot:
2
+
3
+ def __init__(self, file_path: str, changes_count: int, contributors_count: int):
4
+ self.file_path = file_path
5
+ self.changes_count = changes_count
6
+ self.contributors_count = contributors_count
7
+ self.risk_level = self._calc_risk_level(changes_count, contributors_count)
8
+ self.risk_label = self._label_from_risk_level()
9
+
10
+ def _calc_risk_level(self, changes_count: int, contributors_count: int) -> str:
11
+ if changes_count < 10:
12
+ if contributors_count < 5:
13
+ return "low"
14
+ else:
15
+ return "medium"
16
+ elif changes_count < 50:
17
+ if contributors_count < 5:
18
+ return "medium"
19
+ else:
20
+ return "high"
21
+ else:
22
+ if contributors_count < 1:
23
+ return "medium"
24
+ else:
25
+ return "high"
26
+
27
+ def _label_from_risk_level(self) -> str:
28
+ return self.risk_level.capitalize()
29
+
30
+ def to_dict(self) -> dict[str, str]:
31
+ return {
32
+ "file_path": self.file_path,
33
+ "changes_count": self.changes_count,
34
+ "risk_level": self.risk_level,
35
+ "risk_label": self.risk_label,
36
+ "contributors_count": self.contributors_count,
37
+ }
@@ -0,0 +1,37 @@
1
+ class GitContributor:
2
+ def __init__(
3
+ self,
4
+ name: str,
5
+ commits_count: int,
6
+ lines_added: int,
7
+ lines_removed: int,
8
+ contribution_percentage: int,
9
+ ):
10
+ self.name = name
11
+ self.initials = self._get_initials(name)
12
+ self.commits_count = commits_count
13
+ self.lines_added = lines_added
14
+ self.lines_removed = lines_removed
15
+ self.contribution_percentage = contribution_percentage
16
+
17
+ def _get_initials(self, name: str) -> str:
18
+ try:
19
+ parts = name.split()
20
+ if len(parts) >= 2:
21
+ return (parts[0][0] + parts[1][0]).upper()
22
+ elif len(parts) == 1:
23
+ return parts[0][:2].upper()
24
+ else:
25
+ return "UN"
26
+ except Exception:
27
+ return "UN"
28
+
29
+ def to_dict(self) -> dict[str, int]:
30
+ return {
31
+ "name": self.name,
32
+ "initials": self.initials,
33
+ "commits_count": self.commits_count,
34
+ "lines_added": self.lines_added,
35
+ "lines_removed": self.lines_removed,
36
+ "contribution_percentage": self.contribution_percentage,
37
+ }
@@ -0,0 +1,27 @@
1
+ class GitKnowledgeSilo:
2
+ def __init__(self, file_path: str, owner: str, commits_count: int):
3
+ self.file_path = file_path
4
+ self.owner = owner
5
+ self.commits_count = commits_count
6
+ self.risk_level = self._calc_risk_level(commits_count)
7
+ self.risk_label = self._calc_risk_label(self.risk_level)
8
+
9
+ def _calc_risk_level(self, commits_count: int) -> str:
10
+ if commits_count >= 15:
11
+ return "high"
12
+ elif commits_count >= 8:
13
+ return "medium"
14
+ else:
15
+ return "low"
16
+
17
+ def _calc_risk_label(self, risk_level: str) -> str:
18
+ return risk_level.capitalize()
19
+
20
+ def to_dict(self) -> dict[str, int]:
21
+ return {
22
+ "file_path": f"{self.file_path}",
23
+ "owner": f"{self.owner}",
24
+ "commits_count": f"{self.commits_count}",
25
+ "risk_level": f"{self.risk_level}",
26
+ "risk_label": f"{self.risk_label}",
27
+ }