qgis-plugin-analyzer 1.4.0__py3-none-any.whl → 1.6.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- analyzer/__init__.py +2 -1
- analyzer/cli/__init__.py +14 -0
- analyzer/cli/app.py +147 -0
- analyzer/cli/base.py +93 -0
- analyzer/cli/commands/__init__.py +19 -0
- analyzer/cli/commands/analyze.py +47 -0
- analyzer/cli/commands/fix.py +58 -0
- analyzer/cli/commands/init.py +41 -0
- analyzer/cli/commands/list_rules.py +41 -0
- analyzer/cli/commands/security.py +46 -0
- analyzer/cli/commands/summary.py +52 -0
- analyzer/cli/commands/version.py +41 -0
- analyzer/cli.py +4 -281
- analyzer/commands.py +163 -0
- analyzer/engine.py +491 -245
- analyzer/fixer.py +206 -130
- analyzer/reporters/markdown_reporter.py +88 -14
- analyzer/reporters/summary_reporter.py +226 -49
- analyzer/rules/qgis_rules.py +3 -1
- analyzer/scanner.py +219 -711
- analyzer/secrets.py +84 -0
- analyzer/security_checker.py +85 -0
- analyzer/security_rules.py +127 -0
- analyzer/transformers.py +29 -8
- analyzer/utils/__init__.py +2 -0
- analyzer/utils/path_utils.py +53 -1
- analyzer/validators.py +90 -55
- analyzer/visitors/__init__.py +19 -0
- analyzer/visitors/base.py +75 -0
- analyzer/visitors/composite_visitor.py +73 -0
- analyzer/visitors/imports_visitor.py +85 -0
- analyzer/visitors/metrics_visitor.py +158 -0
- analyzer/visitors/security_visitor.py +52 -0
- analyzer/visitors/standards_visitor.py +284 -0
- {qgis_plugin_analyzer-1.4.0.dist-info → qgis_plugin_analyzer-1.6.0.dist-info}/METADATA +32 -10
- qgis_plugin_analyzer-1.6.0.dist-info/RECORD +52 -0
- {qgis_plugin_analyzer-1.4.0.dist-info → qgis_plugin_analyzer-1.6.0.dist-info}/WHEEL +1 -1
- qgis_plugin_analyzer-1.4.0.dist-info/RECORD +0 -30
- {qgis_plugin_analyzer-1.4.0.dist-info → qgis_plugin_analyzer-1.6.0.dist-info}/entry_points.txt +0 -0
- {qgis_plugin_analyzer-1.4.0.dist-info → qgis_plugin_analyzer-1.6.0.dist-info}/licenses/LICENSE +0 -0
- {qgis_plugin_analyzer-1.4.0.dist-info → qgis_plugin_analyzer-1.6.0.dist-info}/top_level.txt +0 -0
analyzer/engine.py
CHANGED
|
@@ -24,7 +24,8 @@ import os
|
|
|
24
24
|
import pathlib
|
|
25
25
|
import subprocess
|
|
26
26
|
from concurrent.futures import ProcessPoolExecutor, as_completed
|
|
27
|
-
from
|
|
27
|
+
from dataclasses import dataclass, field
|
|
28
|
+
from typing import Any, Dict, List, Optional, TypedDict, cast
|
|
28
29
|
|
|
29
30
|
from .reporters import (
|
|
30
31
|
generate_html_report,
|
|
@@ -32,6 +33,7 @@ from .reporters import (
|
|
|
32
33
|
save_json_context,
|
|
33
34
|
)
|
|
34
35
|
from .scanner import (
|
|
36
|
+
ModuleAnalysisResult,
|
|
35
37
|
analyze_module_worker,
|
|
36
38
|
audit_qgis_standards,
|
|
37
39
|
)
|
|
@@ -39,20 +41,240 @@ from .semantic import DependencyGraph, ResourceValidator
|
|
|
39
41
|
from .utils import (
|
|
40
42
|
IgnoreMatcher,
|
|
41
43
|
ProgressTracker,
|
|
44
|
+
discover_project_files,
|
|
42
45
|
load_ignore_patterns,
|
|
43
46
|
load_profile_config,
|
|
44
47
|
logger,
|
|
45
|
-
safe_path_resolve,
|
|
46
48
|
setup_logger,
|
|
47
49
|
)
|
|
48
50
|
from .validators import (
|
|
49
|
-
calculate_package_size,
|
|
50
|
-
scan_for_binaries,
|
|
51
51
|
validate_metadata,
|
|
52
52
|
validate_metadata_urls,
|
|
53
|
+
validate_package_constraints,
|
|
53
54
|
validate_plugin_structure,
|
|
54
55
|
)
|
|
55
56
|
|
|
57
|
+
# --- Types ---
|
|
58
|
+
|
|
59
|
+
|
|
60
|
+
@dataclass(frozen=True)
|
|
61
|
+
class ProjectConfig:
|
|
62
|
+
"""Strongly typed project configuration."""
|
|
63
|
+
|
|
64
|
+
strict: bool = False
|
|
65
|
+
generate_html: bool = True
|
|
66
|
+
fail_on_error: bool = False
|
|
67
|
+
project_type: str = "auto"
|
|
68
|
+
rules: Dict[str, Any] = field(default_factory=dict)
|
|
69
|
+
fail_on_critical: bool = False
|
|
70
|
+
|
|
71
|
+
|
|
72
|
+
class QGISChecksResult(TypedDict):
|
|
73
|
+
"""Result of QGIS-specific validation checks."""
|
|
74
|
+
|
|
75
|
+
compliance: Dict[str, Any]
|
|
76
|
+
structure: Dict[str, Any]
|
|
77
|
+
metadata: Dict[str, Any]
|
|
78
|
+
binaries: List[str]
|
|
79
|
+
package_size: float
|
|
80
|
+
package_constraints: Dict[str, Any]
|
|
81
|
+
url_status: Dict[str, str]
|
|
82
|
+
|
|
83
|
+
|
|
84
|
+
class SemanticAnalysisResult(TypedDict):
|
|
85
|
+
"""Result of semantic analysis."""
|
|
86
|
+
|
|
87
|
+
cycles: List[List[str]]
|
|
88
|
+
metrics: Dict[str, Any]
|
|
89
|
+
missing_resources: List[str]
|
|
90
|
+
|
|
91
|
+
|
|
92
|
+
class ProjectScores(TypedDict):
|
|
93
|
+
"""Calculated project quality scores."""
|
|
94
|
+
|
|
95
|
+
code_score: float
|
|
96
|
+
maint_score: float
|
|
97
|
+
qgis_score: float
|
|
98
|
+
security_score: float
|
|
99
|
+
|
|
100
|
+
|
|
101
|
+
class FullAnalysisResult(TypedDict, total=False):
|
|
102
|
+
"""Consolidated analysis result for the entire project."""
|
|
103
|
+
|
|
104
|
+
project_name: str
|
|
105
|
+
project_type: str
|
|
106
|
+
metrics: Dict[str, Any]
|
|
107
|
+
ruff_findings: List[Dict[str, Any]]
|
|
108
|
+
security: Dict[str, Any]
|
|
109
|
+
semantic: Dict[str, Any]
|
|
110
|
+
modules: List[ModuleAnalysisResult]
|
|
111
|
+
research_summary: Dict[str, Any]
|
|
112
|
+
qgis_compliance: Dict[str, Any]
|
|
113
|
+
repository_compliance: Dict[str, Any]
|
|
114
|
+
|
|
115
|
+
|
|
116
|
+
class ScoringEngine:
|
|
117
|
+
"""Specialized engine for calculating project quality scores."""
|
|
118
|
+
|
|
119
|
+
def __init__(self, project_type: str) -> None:
|
|
120
|
+
self.project_type = project_type
|
|
121
|
+
|
|
122
|
+
def calculate_project_scores(
|
|
123
|
+
self,
|
|
124
|
+
modules_data: List[ModuleAnalysisResult],
|
|
125
|
+
ruff_findings: List[Dict[str, Any]],
|
|
126
|
+
qgis_checks: Optional[QGISChecksResult],
|
|
127
|
+
semantic: SemanticAnalysisResult,
|
|
128
|
+
) -> ProjectScores:
|
|
129
|
+
"""Calculates project quality scores based on industry-standard formulas."""
|
|
130
|
+
if not modules_data:
|
|
131
|
+
return {
|
|
132
|
+
"code_score": 0.0,
|
|
133
|
+
"maint_score": 0.0,
|
|
134
|
+
"qgis_score": 0.0,
|
|
135
|
+
"security_score": 0.0,
|
|
136
|
+
}
|
|
137
|
+
|
|
138
|
+
module_score = self._get_mi_score(modules_data)
|
|
139
|
+
maintainability_score = self._get_maint_score(modules_data, ruff_findings)
|
|
140
|
+
modernization_bonus = self._get_modernization_bonus(modules_data)
|
|
141
|
+
maintainability_score = min(100.0, maintainability_score + modernization_bonus)
|
|
142
|
+
|
|
143
|
+
# Security context
|
|
144
|
+
security_penalty = self._get_security_penalty(modules_data)
|
|
145
|
+
security_score = max(0.0, 100.0 - security_penalty)
|
|
146
|
+
|
|
147
|
+
# Global penalties (e.g., circular dependencies)
|
|
148
|
+
cycles = semantic["cycles"]
|
|
149
|
+
penalty = len(cycles) * 10
|
|
150
|
+
module_score = max(0, module_score - penalty)
|
|
151
|
+
maintainability_score = max(0, maintainability_score - penalty)
|
|
152
|
+
|
|
153
|
+
if self.project_type == "generic" or not qgis_checks:
|
|
154
|
+
return {
|
|
155
|
+
"code_score": round(module_score, 1),
|
|
156
|
+
"maint_score": round(maintainability_score, 1),
|
|
157
|
+
"qgis_score": 0.0,
|
|
158
|
+
"security_score": round(security_score, 1),
|
|
159
|
+
}
|
|
160
|
+
|
|
161
|
+
qgis_score = self._get_qgis_score(
|
|
162
|
+
qgis_checks["compliance"],
|
|
163
|
+
qgis_checks["structure"],
|
|
164
|
+
qgis_checks["metadata"],
|
|
165
|
+
semantic["missing_resources"],
|
|
166
|
+
qgis_checks["binaries"],
|
|
167
|
+
qgis_checks["package_size"],
|
|
168
|
+
security_penalty,
|
|
169
|
+
)
|
|
170
|
+
|
|
171
|
+
return {
|
|
172
|
+
"code_score": round(module_score, 1),
|
|
173
|
+
"maint_score": round(maintainability_score, 1),
|
|
174
|
+
"qgis_score": round(qgis_score, 1),
|
|
175
|
+
"security_score": round(security_score, 1),
|
|
176
|
+
}
|
|
177
|
+
|
|
178
|
+
def _get_mi_score(self, modules_data: List[ModuleAnalysisResult]) -> float:
|
|
179
|
+
"""Calculates module stability based on Maintainability Index (MI)."""
|
|
180
|
+
mi_scores = []
|
|
181
|
+
for m in modules_data:
|
|
182
|
+
cc = m.get("complexity", 1)
|
|
183
|
+
sloc = max(1, m.get("lines", 1))
|
|
184
|
+
mi = (171 - 0.23 * cc - 16.2 * math.log(sloc)) * 100 / 171
|
|
185
|
+
mi_scores.append(max(0, mi))
|
|
186
|
+
return sum(mi_scores) / len(mi_scores) if mi_scores else 0.0
|
|
187
|
+
|
|
188
|
+
def _get_maint_score(
|
|
189
|
+
self,
|
|
190
|
+
modules_data: List[ModuleAnalysisResult],
|
|
191
|
+
ruff_findings: List[Dict[str, Any]],
|
|
192
|
+
) -> float:
|
|
193
|
+
"""Calculates maintainability based on function complexity and linting penalties."""
|
|
194
|
+
all_func_comp = []
|
|
195
|
+
for m in modules_data:
|
|
196
|
+
for f in m.get("functions", []):
|
|
197
|
+
all_func_comp.append(f["complexity"])
|
|
198
|
+
|
|
199
|
+
avg_func_comp = sum(all_func_comp) / len(all_func_comp) if all_func_comp else 1.0
|
|
200
|
+
func_score = max(0, 100 - (max(0, avg_func_comp - 10) * 5))
|
|
201
|
+
|
|
202
|
+
total_lines = sum(m.get("lines", 0) for m in modules_data)
|
|
203
|
+
errors = sum(1 for f in ruff_findings if f.get("code", "").startswith(("E", "F")))
|
|
204
|
+
others = len(ruff_findings) - errors
|
|
205
|
+
|
|
206
|
+
lint_penalty = ((5 * errors + others) / max(1, total_lines / 10)) * 10
|
|
207
|
+
lint_score = max(0, 100 - lint_penalty)
|
|
208
|
+
|
|
209
|
+
return float((func_score * 0.7) + (lint_score * 0.3))
|
|
210
|
+
|
|
211
|
+
def _get_modernization_bonus(self, modules_data: List[ModuleAnalysisResult]) -> float:
|
|
212
|
+
"""Calculates modernization bonuses based on type hints and documentation styles."""
|
|
213
|
+
total_functions = 0
|
|
214
|
+
total_params = 0
|
|
215
|
+
annotated_params = 0
|
|
216
|
+
has_return_hint = 0
|
|
217
|
+
detected_styles = set()
|
|
218
|
+
|
|
219
|
+
for m in modules_data:
|
|
220
|
+
metrics = m.get("research_metrics", {})
|
|
221
|
+
t_stats = metrics.get("type_hint_stats", {})
|
|
222
|
+
total_functions += t_stats.get("total_functions", 0)
|
|
223
|
+
total_params += t_stats.get("total_parameters", 0)
|
|
224
|
+
annotated_params += t_stats.get("annotated_parameters", 0)
|
|
225
|
+
has_return_hint += t_stats.get("has_return_hint", 0)
|
|
226
|
+
detected_styles.update(metrics.get("docstring_styles", []))
|
|
227
|
+
|
|
228
|
+
bonus = 0.0
|
|
229
|
+
if total_params > 0 or total_functions > 0:
|
|
230
|
+
param_cov = annotated_params / max(1, total_params)
|
|
231
|
+
ret_cov = has_return_hint / max(1, total_functions)
|
|
232
|
+
if param_cov >= 0.8 and ret_cov >= 0.8:
|
|
233
|
+
bonus += 5.0
|
|
234
|
+
|
|
235
|
+
if detected_styles:
|
|
236
|
+
bonus += 2.0
|
|
237
|
+
return bonus
|
|
238
|
+
|
|
239
|
+
def _get_qgis_score(
|
|
240
|
+
self,
|
|
241
|
+
compliance: Dict[str, Any],
|
|
242
|
+
structure: Dict[str, Any],
|
|
243
|
+
metadata: Dict[str, Any],
|
|
244
|
+
missing_resources: List[str],
|
|
245
|
+
binaries: List[str],
|
|
246
|
+
package_size: float,
|
|
247
|
+
security_penalty: float = 0.0,
|
|
248
|
+
) -> float:
|
|
249
|
+
"""Calculates QGIS-specific compliance score."""
|
|
250
|
+
score = 100.0
|
|
251
|
+
score -= compliance.get("issues_count", 0) * 2
|
|
252
|
+
if not structure.get("is_valid", True):
|
|
253
|
+
score -= 20
|
|
254
|
+
if not metadata.get("is_valid", True):
|
|
255
|
+
score -= 10
|
|
256
|
+
score -= len(missing_resources) * 5
|
|
257
|
+
score -= len(binaries) * 50
|
|
258
|
+
if package_size > 20:
|
|
259
|
+
score -= 10
|
|
260
|
+
|
|
261
|
+
score -= security_penalty
|
|
262
|
+
return float(max(0, score))
|
|
263
|
+
|
|
264
|
+
def _get_security_penalty(self, modules_data: List[ModuleAnalysisResult]) -> float:
|
|
265
|
+
"""Calculates total penalty for security vulnerabilities."""
|
|
266
|
+
penalty = 0.0
|
|
267
|
+
for m in modules_data:
|
|
268
|
+
for issue in m.get("security_issues", []):
|
|
269
|
+
sev = issue.get("severity", "medium").lower()
|
|
270
|
+
if sev == "high":
|
|
271
|
+
penalty += 10.0
|
|
272
|
+
elif sev == "medium":
|
|
273
|
+
penalty += 5.0
|
|
274
|
+
else:
|
|
275
|
+
penalty += 2.0
|
|
276
|
+
return penalty
|
|
277
|
+
|
|
56
278
|
|
|
57
279
|
class ProjectAnalyzer:
|
|
58
280
|
def __init__(
|
|
@@ -79,47 +301,32 @@ class ProjectAnalyzer:
|
|
|
79
301
|
self.max_workers = min(os.cpu_count() or 4, 4)
|
|
80
302
|
self.max_file_size_kb = 500
|
|
81
303
|
|
|
82
|
-
# Load
|
|
83
|
-
|
|
304
|
+
# Load and wrap config
|
|
305
|
+
raw_config = load_profile_config(self.project_path, profile)
|
|
306
|
+
self.config = ProjectConfig(
|
|
307
|
+
strict=raw_config.get("strict", False),
|
|
308
|
+
generate_html=raw_config.get("generate_html", True),
|
|
309
|
+
fail_on_error=raw_config.get("fail_on_error", False),
|
|
310
|
+
project_type=raw_config.get("project_type", "auto"),
|
|
311
|
+
rules=raw_config.get("rules", {}),
|
|
312
|
+
)
|
|
84
313
|
|
|
85
314
|
# Detect project type
|
|
86
|
-
self.project_type = self.config.
|
|
315
|
+
self.project_type = self.config.project_type
|
|
87
316
|
if self.project_type == "auto":
|
|
88
317
|
metadata_file = self.project_path / "metadata.txt"
|
|
89
318
|
self.project_type = "qgis" if metadata_file.exists() else "generic"
|
|
90
319
|
|
|
91
320
|
logger.info(f"📁 Project type: {self.project_type.upper()}")
|
|
92
321
|
|
|
322
|
+
# Initialize Engines
|
|
323
|
+
self.scoring = ScoringEngine(self.project_type)
|
|
324
|
+
|
|
93
325
|
# Load .analyzerignore
|
|
94
326
|
ignore_file = self.project_path / ".analyzerignore"
|
|
95
327
|
patterns = load_ignore_patterns(ignore_file)
|
|
96
328
|
self.matcher = IgnoreMatcher(self.project_path, patterns)
|
|
97
329
|
|
|
98
|
-
def get_python_files(self) -> List[pathlib.Path]:
|
|
99
|
-
"""Scans Python files ignoring common folders and .analyzerignore patterns.
|
|
100
|
-
|
|
101
|
-
Returns:
|
|
102
|
-
A sorted list of pathlib.Path objects for all detected Python files.
|
|
103
|
-
"""
|
|
104
|
-
python_files = []
|
|
105
|
-
for root, dirs, files in os.walk(self.project_path):
|
|
106
|
-
root_path = pathlib.Path(root)
|
|
107
|
-
|
|
108
|
-
# Filter directories
|
|
109
|
-
dirs[:] = [d for d in dirs if not self.matcher.is_ignored(root_path / d)]
|
|
110
|
-
|
|
111
|
-
for file in files:
|
|
112
|
-
file_path = root_path / file
|
|
113
|
-
if file.endswith(".py") and not self.matcher.is_ignored(file_path):
|
|
114
|
-
# Skip very large files to avoid OOM
|
|
115
|
-
if file_path.stat().st_size > self.max_file_size_kb * 1024:
|
|
116
|
-
logger.warning(
|
|
117
|
-
f"⚠️ Skipping large file: {file_path.name} (> {self.max_file_size_kb}KB)"
|
|
118
|
-
)
|
|
119
|
-
continue
|
|
120
|
-
python_files.append(file_path)
|
|
121
|
-
return sorted(python_files)
|
|
122
|
-
|
|
123
330
|
def run_ruff_audit(self) -> List[Dict[str, Any]]:
|
|
124
331
|
"""Executes Ruff linting via subprocess.
|
|
125
332
|
|
|
@@ -146,8 +353,8 @@ class ProjectAnalyzer:
|
|
|
146
353
|
return []
|
|
147
354
|
|
|
148
355
|
def _run_parallel_analysis(
|
|
149
|
-
self, files: List[pathlib.Path], rules_config:
|
|
150
|
-
) -> List[
|
|
356
|
+
self, files: List[pathlib.Path], rules_config: Dict[str, Any]
|
|
357
|
+
) -> List[ModuleAnalysisResult]:
|
|
151
358
|
"""Runs parallel analysis on all Python files.
|
|
152
359
|
|
|
153
360
|
Args:
|
|
@@ -158,7 +365,7 @@ class ProjectAnalyzer:
|
|
|
158
365
|
A list of module analysis results.
|
|
159
366
|
"""
|
|
160
367
|
tracker = ProgressTracker(len(files))
|
|
161
|
-
modules_data = []
|
|
368
|
+
modules_data: List[ModuleAnalysisResult] = []
|
|
162
369
|
|
|
163
370
|
with ProcessPoolExecutor(max_workers=self.max_workers) as executor:
|
|
164
371
|
futures = {
|
|
@@ -175,44 +382,51 @@ class ProjectAnalyzer:
|
|
|
175
382
|
return modules_data
|
|
176
383
|
|
|
177
384
|
def _run_qgis_specific_checks(
|
|
178
|
-
self,
|
|
179
|
-
|
|
385
|
+
self,
|
|
386
|
+
modules_data: List[ModuleAnalysisResult],
|
|
387
|
+
rules_config: Dict[str, Any],
|
|
388
|
+
discovery: Dict[str, Any],
|
|
389
|
+
) -> QGISChecksResult:
|
|
180
390
|
"""Runs QGIS-specific validation checks.
|
|
181
391
|
|
|
182
392
|
Args:
|
|
183
|
-
modules_data:
|
|
184
|
-
rules_config:
|
|
393
|
+
modules_data: Results from module analysis.
|
|
394
|
+
rules_config: Configuration for rules.
|
|
395
|
+
discovery: Discovery results from the project scanner.
|
|
185
396
|
|
|
186
397
|
Returns:
|
|
187
|
-
A
|
|
398
|
+
A QGISChecksResult containing findings from all checks.
|
|
188
399
|
"""
|
|
189
|
-
|
|
190
|
-
|
|
191
|
-
)
|
|
192
|
-
|
|
193
|
-
# Official repository audit
|
|
194
|
-
metadata_path = safe_path_resolve(self.project_path, "metadata.txt")
|
|
400
|
+
metadata_file = self.project_path / "metadata.txt"
|
|
401
|
+
compliance = audit_qgis_standards(modules_data, self.project_path, rules_config)
|
|
195
402
|
structure = validate_plugin_structure(self.project_path)
|
|
196
|
-
metadata = validate_metadata(
|
|
403
|
+
metadata = validate_metadata(metadata_file)
|
|
197
404
|
|
|
198
|
-
# Repository
|
|
199
|
-
|
|
200
|
-
|
|
201
|
-
|
|
202
|
-
url_status = {}
|
|
203
|
-
if metadata.get("is_valid") and "metadata" in metadata:
|
|
204
|
-
url_status = validate_metadata_urls(metadata["metadata"])
|
|
405
|
+
# New Repository Constraints
|
|
406
|
+
constraints = validate_package_constraints(
|
|
407
|
+
discovery["total_size_mb"], discovery["binaries"]
|
|
408
|
+
)
|
|
205
409
|
|
|
206
|
-
return
|
|
410
|
+
return {
|
|
411
|
+
"compliance": compliance,
|
|
412
|
+
"structure": structure,
|
|
413
|
+
"metadata": metadata,
|
|
414
|
+
"binaries": discovery["binaries"],
|
|
415
|
+
"package_size": discovery["total_size_mb"],
|
|
416
|
+
"package_constraints": constraints,
|
|
417
|
+
"url_status": validate_metadata_urls(metadata.get("metadata", {})),
|
|
418
|
+
}
|
|
207
419
|
|
|
208
|
-
def _run_semantic_analysis(
|
|
420
|
+
def _run_semantic_analysis(
|
|
421
|
+
self, modules_data: List[ModuleAnalysisResult]
|
|
422
|
+
) -> SemanticAnalysisResult:
|
|
209
423
|
"""Runs semantic analysis including dependencies and resources.
|
|
210
424
|
|
|
211
425
|
Args:
|
|
212
426
|
modules_data: List of analyzed module entries.
|
|
213
427
|
|
|
214
428
|
Returns:
|
|
215
|
-
A
|
|
429
|
+
A dictionary containing cycles, metrics, and missing resources.
|
|
216
430
|
"""
|
|
217
431
|
dep_graph = DependencyGraph()
|
|
218
432
|
all_resource_usages = []
|
|
@@ -223,9 +437,11 @@ class ProjectAnalyzer:
|
|
|
223
437
|
res_validator.scan_project_resources(self.matcher)
|
|
224
438
|
|
|
225
439
|
for m in modules_data:
|
|
226
|
-
dep_graph.add_node(m["path"], m)
|
|
440
|
+
dep_graph.add_node(m["path"], cast(Dict[str, Any], m))
|
|
227
441
|
if self.project_type == "qgis" and "resource_usages" in m:
|
|
228
|
-
|
|
442
|
+
# Type safe usage of resource_usages from TypedDict
|
|
443
|
+
resource_usages = m.get("resource_usages", [])
|
|
444
|
+
all_resource_usages.extend(resource_usages)
|
|
229
445
|
|
|
230
446
|
dep_graph.build_edges(self.project_path)
|
|
231
447
|
cycles = dep_graph.detect_cycles()
|
|
@@ -235,68 +451,92 @@ class ProjectAnalyzer:
|
|
|
235
451
|
if self.project_type == "qgis" and res_validator:
|
|
236
452
|
missing_resources = res_validator.validate_usage(all_resource_usages)
|
|
237
453
|
|
|
238
|
-
return
|
|
454
|
+
return {
|
|
455
|
+
"cycles": cycles,
|
|
456
|
+
"metrics": metrics,
|
|
457
|
+
"missing_resources": missing_resources,
|
|
458
|
+
}
|
|
239
459
|
|
|
240
460
|
def _build_analysis_results(
|
|
241
461
|
self,
|
|
242
462
|
files: List[pathlib.Path],
|
|
243
|
-
modules_data: List[
|
|
463
|
+
modules_data: List[ModuleAnalysisResult],
|
|
244
464
|
ruff_findings: List[Dict[str, Any]],
|
|
245
|
-
|
|
246
|
-
|
|
247
|
-
|
|
248
|
-
|
|
249
|
-
|
|
250
|
-
|
|
251
|
-
|
|
252
|
-
|
|
253
|
-
|
|
254
|
-
|
|
255
|
-
|
|
256
|
-
|
|
257
|
-
|
|
258
|
-
|
|
465
|
+
scores: ProjectScores,
|
|
466
|
+
qgis_checks: Optional[QGISChecksResult],
|
|
467
|
+
semantic: SemanticAnalysisResult,
|
|
468
|
+
) -> FullAnalysisResult:
|
|
469
|
+
"""Consolidates analysis results into a single dictionary."""
|
|
470
|
+
analyses: FullAnalysisResult = {
|
|
471
|
+
"project_name": self.project_path.name,
|
|
472
|
+
"project_type": self.project_type,
|
|
473
|
+
"metrics": self._get_metrics_summary(files, modules_data, scores),
|
|
474
|
+
"ruff_findings": ruff_findings,
|
|
475
|
+
"security": self._get_security_summary(modules_data, scores),
|
|
476
|
+
"semantic": {
|
|
477
|
+
"circular_dependencies": semantic["cycles"],
|
|
478
|
+
"coupling_metrics": semantic["metrics"],
|
|
479
|
+
},
|
|
480
|
+
"modules": modules_data,
|
|
481
|
+
"research_summary": self._get_research_summary(modules_data),
|
|
482
|
+
}
|
|
259
483
|
|
|
260
|
-
|
|
261
|
-
|
|
262
|
-
|
|
263
|
-
|
|
264
|
-
|
|
265
|
-
|
|
266
|
-
|
|
267
|
-
|
|
268
|
-
|
|
269
|
-
|
|
270
|
-
|
|
271
|
-
|
|
272
|
-
missing_resources
|
|
273
|
-
|
|
274
|
-
|
|
275
|
-
|
|
484
|
+
if self.project_type == "qgis" and qgis_checks:
|
|
485
|
+
analyses["metrics"]["overall_score"] = round(
|
|
486
|
+
(scores["code_score"] * 0.5) + (scores["qgis_score"] * 0.5), 1
|
|
487
|
+
)
|
|
488
|
+
analyses["qgis_compliance"] = {
|
|
489
|
+
"compliance_score": round(scores["qgis_score"], 1),
|
|
490
|
+
"best_practices": qgis_checks["compliance"],
|
|
491
|
+
"repository_standards": {
|
|
492
|
+
"structure": qgis_checks["structure"],
|
|
493
|
+
"metadata": qgis_checks["metadata"],
|
|
494
|
+
},
|
|
495
|
+
}
|
|
496
|
+
analyses["semantic"]["missing_resources"] = semantic["missing_resources"]
|
|
497
|
+
analyses["repository_compliance"] = {
|
|
498
|
+
"binaries": qgis_checks["binaries"],
|
|
499
|
+
"package_size_mb": round(qgis_checks["package_size"], 2),
|
|
500
|
+
"url_validation": qgis_checks["url_status"],
|
|
501
|
+
"folder_name_valid": qgis_checks["structure"].get("folder_name_valid", True),
|
|
502
|
+
"constraint_errors": qgis_checks["package_constraints"].get("errors", []),
|
|
503
|
+
"is_compliant": qgis_checks["package_constraints"].get("is_valid", True)
|
|
504
|
+
and qgis_checks["structure"].get("is_valid", True),
|
|
505
|
+
}
|
|
276
506
|
|
|
277
|
-
|
|
278
|
-
|
|
279
|
-
|
|
280
|
-
|
|
507
|
+
return analyses
|
|
508
|
+
|
|
509
|
+
def _get_metrics_summary(
|
|
510
|
+
self,
|
|
511
|
+
files: List[pathlib.Path],
|
|
512
|
+
modules_data: List[ModuleAnalysisResult],
|
|
513
|
+
scores: ProjectScores,
|
|
514
|
+
) -> Dict[str, Any]:
|
|
515
|
+
"""Generates the metrics summary portion of the results."""
|
|
516
|
+
return {
|
|
281
517
|
"total_files": len(files),
|
|
282
518
|
"total_lines": sum(m["lines"] for m in modules_data),
|
|
283
|
-
"quality_score": round(code_score, 1),
|
|
284
|
-
"maintainability_score": round(maint_score, 1),
|
|
519
|
+
"quality_score": round(scores["code_score"], 1),
|
|
520
|
+
"maintainability_score": round(scores["maint_score"], 1),
|
|
521
|
+
"security_score": round(scores["security_score"], 1),
|
|
285
522
|
}
|
|
286
523
|
|
|
287
|
-
|
|
288
|
-
|
|
524
|
+
def _get_security_summary(
|
|
525
|
+
self, modules_data: List[ModuleAnalysisResult], scores: ProjectScores
|
|
526
|
+
) -> Dict[str, Any]:
|
|
527
|
+
"""Generates the security summary portion of the results."""
|
|
528
|
+
all_security_issues = []
|
|
529
|
+
for m in modules_data:
|
|
530
|
+
all_security_issues.extend(m.get("security_issues", []))
|
|
289
531
|
|
|
290
|
-
|
|
291
|
-
"
|
|
292
|
-
"
|
|
293
|
-
"
|
|
294
|
-
"ruff_findings": ruff_findings,
|
|
295
|
-
"semantic": {"circular_dependencies": cycles, "coupling_metrics": metrics},
|
|
296
|
-
"modules": modules_data,
|
|
532
|
+
return {
|
|
533
|
+
"findings": all_security_issues,
|
|
534
|
+
"count": len(all_security_issues),
|
|
535
|
+
"score": round(scores["security_score"], 1),
|
|
297
536
|
}
|
|
298
537
|
|
|
299
|
-
|
|
538
|
+
def _get_research_summary(self, modules_data: List[ModuleAnalysisResult]) -> Dict[str, Any]:
|
|
539
|
+
"""Aggregates research metrics for summary."""
|
|
300
540
|
total_functions = 0
|
|
301
541
|
total_params = 0
|
|
302
542
|
annotated_params = 0
|
|
@@ -319,7 +559,7 @@ class ProjectAnalyzer:
|
|
|
319
559
|
|
|
320
560
|
detected_styles.update(r_metrics.get("docstring_styles", []))
|
|
321
561
|
|
|
322
|
-
|
|
562
|
+
return {
|
|
323
563
|
"type_hint_coverage": round((annotated_params / max(1, total_params)) * 100, 1)
|
|
324
564
|
if total_params > 0
|
|
325
565
|
else 0.0,
|
|
@@ -332,32 +572,17 @@ class ProjectAnalyzer:
|
|
|
332
572
|
"detected_docstring_styles": list(detected_styles),
|
|
333
573
|
}
|
|
334
574
|
|
|
335
|
-
|
|
336
|
-
analyses["qgis_compliance"] = {
|
|
337
|
-
"compliance_score": round(qgis_score, 1),
|
|
338
|
-
"best_practices": compliance,
|
|
339
|
-
"repository_standards": {"structure": structure, "metadata": metadata},
|
|
340
|
-
}
|
|
341
|
-
analyses["semantic"]["missing_resources"] = missing_resources
|
|
342
|
-
analyses["repository_compliance"] = {
|
|
343
|
-
"binaries": binaries,
|
|
344
|
-
"package_size_mb": round(package_size, 2),
|
|
345
|
-
"url_validation": url_status,
|
|
346
|
-
"is_compliant": len(binaries) == 0 and package_size <= 20,
|
|
347
|
-
}
|
|
348
|
-
|
|
349
|
-
return analyses
|
|
350
|
-
|
|
351
|
-
def _save_reports(self, analyses: Dict[str, Any]) -> None:
|
|
575
|
+
def _save_reports(self, analyses: FullAnalysisResult) -> None:
|
|
352
576
|
"""Saves all generated analysis reports to the output directory.
|
|
353
577
|
|
|
354
578
|
Args:
|
|
355
579
|
analyses: The consolidated analysis results dictionary.
|
|
356
580
|
"""
|
|
357
|
-
|
|
358
|
-
|
|
359
|
-
|
|
360
|
-
|
|
581
|
+
data = cast(Dict[str, Any], analyses)
|
|
582
|
+
generate_markdown_summary(data, self.output_dir / "PROJECT_SUMMARY.md")
|
|
583
|
+
if self.config.generate_html:
|
|
584
|
+
generate_html_report(data, self.output_dir / "PROJECT_SUMMARY.html")
|
|
585
|
+
save_json_context(data, self.output_dir / "project_context.json")
|
|
361
586
|
|
|
362
587
|
def run(self) -> bool:
|
|
363
588
|
"""Executes the complete analysis pipeline.
|
|
@@ -367,8 +592,16 @@ class ProjectAnalyzer:
|
|
|
367
592
|
False if it failed due to critical system errors or strict mode violations.
|
|
368
593
|
"""
|
|
369
594
|
logger.info(f"🔍 Analyzing: {self.project_path}")
|
|
370
|
-
|
|
371
|
-
|
|
595
|
+
|
|
596
|
+
# Unified Project Discovery
|
|
597
|
+
discovery = discover_project_files(self.project_path, self.matcher)
|
|
598
|
+
files = discovery["python_files"]
|
|
599
|
+
rules_config = self.config.rules
|
|
600
|
+
|
|
601
|
+
# Update Project Type if it was auto
|
|
602
|
+
if self.config.project_type == "auto":
|
|
603
|
+
self.project_type = "qgis" if discovery["has_metadata"] else "generic"
|
|
604
|
+
logger.info(f"📁 Project type: {self.project_type.upper()}")
|
|
372
605
|
|
|
373
606
|
# Parallel analysis
|
|
374
607
|
modules_data = self._run_parallel_analysis(files, rules_config)
|
|
@@ -377,59 +610,31 @@ class ProjectAnalyzer:
|
|
|
377
610
|
ruff_findings = self.run_ruff_audit()
|
|
378
611
|
|
|
379
612
|
# Initialize defaults
|
|
380
|
-
|
|
381
|
-
structure: Dict[str, Any] = {"is_valid": True}
|
|
382
|
-
metadata: Dict[str, Any] = {"is_valid": True}
|
|
383
|
-
binaries: List[str] = []
|
|
384
|
-
package_size = 0
|
|
385
|
-
url_status = {}
|
|
613
|
+
qgis_checks: Optional[QGISChecksResult] = None
|
|
386
614
|
|
|
387
615
|
# QGIS-specific checks
|
|
388
616
|
if self.project_type == "qgis":
|
|
389
|
-
|
|
390
|
-
self._run_qgis_specific_checks(modules_data, rules_config)
|
|
391
|
-
)
|
|
617
|
+
qgis_checks = self._run_qgis_specific_checks(modules_data, rules_config, discovery)
|
|
392
618
|
|
|
393
619
|
# Semantic Analysis
|
|
394
|
-
|
|
395
|
-
cycles = semantic_res[0] if len(semantic_res) > 0 else []
|
|
396
|
-
metrics = semantic_res[1] if len(semantic_res) > 1 else {}
|
|
397
|
-
missing_resources = semantic_res[2] if len(semantic_res) > 2 else []
|
|
620
|
+
semantic = self._run_semantic_analysis(modules_data)
|
|
398
621
|
|
|
399
|
-
# Calculate scores
|
|
400
|
-
scores = self.
|
|
622
|
+
# Calculate scores via ScoringEngine
|
|
623
|
+
scores = self.scoring.calculate_project_scores(
|
|
401
624
|
modules_data,
|
|
402
625
|
ruff_findings,
|
|
403
|
-
|
|
404
|
-
|
|
405
|
-
metadata,
|
|
406
|
-
cycles,
|
|
407
|
-
missing_resources,
|
|
408
|
-
binaries,
|
|
409
|
-
package_size,
|
|
626
|
+
qgis_checks,
|
|
627
|
+
semantic,
|
|
410
628
|
)
|
|
411
|
-
# Handle potential return length mismatches gracefully (Robustness v1.0.0+)
|
|
412
|
-
code_score = scores[0] if len(scores) > 0 else 0.0
|
|
413
|
-
maint_score = scores[1] if len(scores) > 1 else 0.0
|
|
414
|
-
qgis_score = scores[2] if len(scores) > 2 else 0.0
|
|
415
629
|
|
|
416
630
|
# Build results
|
|
417
631
|
analyses = self._build_analysis_results(
|
|
418
632
|
files,
|
|
419
633
|
modules_data,
|
|
420
634
|
ruff_findings,
|
|
421
|
-
|
|
422
|
-
|
|
423
|
-
|
|
424
|
-
compliance,
|
|
425
|
-
structure,
|
|
426
|
-
metadata,
|
|
427
|
-
cycles,
|
|
428
|
-
metrics,
|
|
429
|
-
missing_resources,
|
|
430
|
-
binaries,
|
|
431
|
-
package_size,
|
|
432
|
-
url_status,
|
|
635
|
+
scores,
|
|
636
|
+
qgis_checks,
|
|
637
|
+
semantic,
|
|
433
638
|
)
|
|
434
639
|
|
|
435
640
|
# Save reports
|
|
@@ -438,11 +643,15 @@ class ProjectAnalyzer:
|
|
|
438
643
|
logger.info(f"✅ Analysis completed. Reports in: {self.output_dir}")
|
|
439
644
|
|
|
440
645
|
# Fail on error if strict mode is on
|
|
441
|
-
if self.config.
|
|
646
|
+
if self.config.fail_on_error and self.project_type == "qgis" and qgis_checks:
|
|
647
|
+
compliance = qgis_checks["compliance"]
|
|
648
|
+
structure = qgis_checks["structure"]
|
|
649
|
+
metadata = qgis_checks["metadata"]
|
|
442
650
|
if (
|
|
443
651
|
int(compliance.get("issues_count", 0)) > 0
|
|
444
|
-
or not structure
|
|
445
|
-
or not metadata
|
|
652
|
+
or not structure.get("is_valid", True)
|
|
653
|
+
or not metadata.get("is_valid", True)
|
|
654
|
+
or not qgis_checks["package_constraints"].get("is_valid", True)
|
|
446
655
|
):
|
|
447
656
|
logger.error(
|
|
448
657
|
"❌ Strict Mode: Critical QGIS compliance issues detected. Failing analysis."
|
|
@@ -453,77 +662,108 @@ class ProjectAnalyzer:
|
|
|
453
662
|
|
|
454
663
|
def _calculate_scores(
|
|
455
664
|
self,
|
|
456
|
-
modules_data: List[
|
|
665
|
+
modules_data: List[ModuleAnalysisResult],
|
|
457
666
|
ruff_findings: List[Dict[str, Any]],
|
|
458
|
-
|
|
459
|
-
|
|
460
|
-
|
|
461
|
-
cycles: List[List[str]],
|
|
462
|
-
missing_resources: List[str],
|
|
463
|
-
binaries: List[str],
|
|
464
|
-
package_size: float,
|
|
465
|
-
) -> tuple:
|
|
667
|
+
qgis_checks: Optional[QGISChecksResult],
|
|
668
|
+
semantic: SemanticAnalysisResult,
|
|
669
|
+
) -> ProjectScores:
|
|
466
670
|
"""Calculates project quality scores based on industry-standard formulas.
|
|
467
671
|
|
|
468
672
|
Args:
|
|
469
673
|
modules_data: Detailed analysis results for each module.
|
|
470
674
|
ruff_findings: List of Ruff linting findings.
|
|
471
|
-
|
|
472
|
-
|
|
473
|
-
metadata: Results of metadata.txt validation.
|
|
474
|
-
cycles: List of circular dependency cycles.
|
|
475
|
-
missing_resources: List of missing QRC resource paths.
|
|
476
|
-
binaries: List of prohibited binary files.
|
|
477
|
-
package_size: Size of the plugin package in MB.
|
|
675
|
+
qgis_checks: Results of QGIS-specific validation checks.
|
|
676
|
+
semantic: Results of semantic analysis.
|
|
478
677
|
|
|
479
678
|
Returns:
|
|
480
|
-
A
|
|
679
|
+
A ProjectScores TypedDict containing stability, maintainability, and qgis scores.
|
|
481
680
|
"""
|
|
482
681
|
if not modules_data:
|
|
483
|
-
return
|
|
682
|
+
return {
|
|
683
|
+
"code_score": 0.0,
|
|
684
|
+
"maint_score": 0.0,
|
|
685
|
+
"qgis_score": 0.0,
|
|
686
|
+
"security_score": 0.0,
|
|
687
|
+
}
|
|
688
|
+
|
|
689
|
+
module_score = self._get_mi_score(modules_data)
|
|
690
|
+
maintainability_score = self._get_maint_score(modules_data, ruff_findings)
|
|
691
|
+
modernization_bonus = self._get_modernization_bonus(modules_data)
|
|
692
|
+
maintainability_score = min(100.0, maintainability_score + modernization_bonus)
|
|
693
|
+
|
|
694
|
+
# Security context
|
|
695
|
+
security_penalty = self._get_security_penalty(modules_data)
|
|
696
|
+
security_score = max(0.0, 100.0 - security_penalty)
|
|
484
697
|
|
|
485
|
-
#
|
|
486
|
-
|
|
698
|
+
# Global penalties (e.g., circular dependencies)
|
|
699
|
+
cycles = semantic["cycles"]
|
|
700
|
+
penalty = len(cycles) * 10
|
|
701
|
+
module_score = max(0, module_score - penalty)
|
|
702
|
+
maintainability_score = max(0, maintainability_score - penalty)
|
|
703
|
+
|
|
704
|
+
if self.project_type == "generic" or not qgis_checks:
|
|
705
|
+
return {
|
|
706
|
+
"code_score": round(module_score, 1),
|
|
707
|
+
"maint_score": round(maintainability_score, 1),
|
|
708
|
+
"qgis_score": 0.0,
|
|
709
|
+
"security_score": round(security_score, 1),
|
|
710
|
+
}
|
|
711
|
+
|
|
712
|
+
qgis_score = self._get_qgis_score(
|
|
713
|
+
qgis_checks["compliance"],
|
|
714
|
+
qgis_checks["structure"],
|
|
715
|
+
qgis_checks["metadata"],
|
|
716
|
+
semantic["missing_resources"],
|
|
717
|
+
qgis_checks["binaries"],
|
|
718
|
+
qgis_checks["package_size"],
|
|
719
|
+
security_penalty,
|
|
720
|
+
)
|
|
721
|
+
|
|
722
|
+
return {
|
|
723
|
+
"code_score": round(module_score, 1),
|
|
724
|
+
"maint_score": round(maintainability_score, 1),
|
|
725
|
+
"qgis_score": round(qgis_score, 1),
|
|
726
|
+
"security_score": round(security_score, 1),
|
|
727
|
+
}
|
|
728
|
+
|
|
729
|
+
def _get_mi_score(self, modules_data: List[ModuleAnalysisResult]) -> float:
|
|
730
|
+
"""Calculates module stability based on Maintainability Index (MI)."""
|
|
487
731
|
mi_scores = []
|
|
488
732
|
for m in modules_data:
|
|
489
733
|
cc = m.get("complexity", 1)
|
|
490
734
|
sloc = max(1, m.get("lines", 1))
|
|
735
|
+
# Formula: MI = (171 - 0.23 * CC - 16.2 * ln(SLOC)) * 100 / 171
|
|
491
736
|
mi = (171 - 0.23 * cc - 16.2 * math.log(sloc)) * 100 / 171
|
|
492
737
|
mi_scores.append(max(0, mi))
|
|
738
|
+
return sum(mi_scores) / len(mi_scores) if mi_scores else 0.0
|
|
493
739
|
|
|
494
|
-
|
|
495
|
-
|
|
496
|
-
|
|
740
|
+
def _get_maint_score(
|
|
741
|
+
self,
|
|
742
|
+
modules_data: List[ModuleAnalysisResult],
|
|
743
|
+
ruff_findings: List[Dict[str, Any]],
|
|
744
|
+
) -> float:
|
|
745
|
+
"""Calculates maintainability based on function complexity and linting penalties."""
|
|
746
|
+
# 1. Function Complexity Score
|
|
497
747
|
all_func_comp = []
|
|
498
748
|
for m in modules_data:
|
|
499
749
|
for f in m.get("functions", []):
|
|
500
750
|
all_func_comp.append(f["complexity"])
|
|
501
751
|
|
|
502
752
|
avg_func_comp = sum(all_func_comp) / len(all_func_comp) if all_func_comp else 1.0
|
|
503
|
-
# Function complexity score: 100 is perfect, -5 per point over 10
|
|
504
753
|
func_score = max(0, 100 - (max(0, avg_func_comp - 10) * 5))
|
|
505
754
|
|
|
506
|
-
#
|
|
507
|
-
# 10 - ((5*E + W + R + C) / statements) * 10
|
|
755
|
+
# 2. Lint Scoring (Pylint style)
|
|
508
756
|
total_lines = sum(m.get("lines", 0) for m in modules_data)
|
|
509
|
-
errors =
|
|
510
|
-
others =
|
|
511
|
-
for find in ruff_findings:
|
|
512
|
-
code = find.get("code", "")
|
|
513
|
-
if code.startswith(("E", "F")):
|
|
514
|
-
errors += 1
|
|
515
|
-
else:
|
|
516
|
-
others += 1
|
|
757
|
+
errors = sum(1 for f in ruff_findings if f.get("code", "").startswith(("E", "F")))
|
|
758
|
+
others = len(ruff_findings) - errors
|
|
517
759
|
|
|
518
760
|
lint_penalty = ((5 * errors + others) / max(1, total_lines / 10)) * 10
|
|
519
761
|
lint_score = max(0, 100 - lint_penalty)
|
|
520
762
|
|
|
521
|
-
|
|
522
|
-
maintainability_score = (func_score * 0.7) + (lint_score * 0.3)
|
|
763
|
+
return float((func_score * 0.7) + (lint_score * 0.3))
|
|
523
764
|
|
|
524
|
-
|
|
525
|
-
|
|
526
|
-
has_docstring_count = 0
|
|
765
|
+
def _get_modernization_bonus(self, modules_data: List[ModuleAnalysisResult]) -> float:
|
|
766
|
+
"""Calculates modernization bonuses based on type hints and documentation styles."""
|
|
527
767
|
total_functions = 0
|
|
528
768
|
total_params = 0
|
|
529
769
|
annotated_params = 0
|
|
@@ -532,55 +772,61 @@ class ProjectAnalyzer:
|
|
|
532
772
|
|
|
533
773
|
for m in modules_data:
|
|
534
774
|
metrics = m.get("research_metrics", {})
|
|
535
|
-
d_stats = metrics.get("docstring_stats", {})
|
|
536
|
-
total_public_items += d_stats.get("total_public_items", 0)
|
|
537
|
-
has_docstring_count += d_stats.get("has_docstring", 0)
|
|
538
|
-
|
|
539
775
|
t_stats = metrics.get("type_hint_stats", {})
|
|
540
776
|
total_functions += t_stats.get("total_functions", 0)
|
|
541
777
|
total_params += t_stats.get("total_parameters", 0)
|
|
542
778
|
annotated_params += t_stats.get("annotated_parameters", 0)
|
|
543
779
|
has_return_hint += t_stats.get("has_return_hint", 0)
|
|
544
|
-
|
|
545
780
|
detected_styles.update(metrics.get("docstring_styles", []))
|
|
546
781
|
|
|
547
|
-
|
|
548
|
-
modernization_bonus = 0.0
|
|
549
|
-
# Type Hint Bonus: > 80% coverage on params and returns
|
|
782
|
+
bonus = 0.0
|
|
550
783
|
if total_params > 0 or total_functions > 0:
|
|
551
784
|
param_cov = annotated_params / max(1, total_params)
|
|
552
785
|
ret_cov = has_return_hint / max(1, total_functions)
|
|
553
786
|
if param_cov >= 0.8 and ret_cov >= 0.8:
|
|
554
|
-
|
|
787
|
+
bonus += 5.0
|
|
555
788
|
|
|
556
|
-
# Docstring Style Bonus: Standardized formats (Google/NumPy)
|
|
557
789
|
if detected_styles:
|
|
558
|
-
|
|
559
|
-
|
|
560
|
-
maintainability_score = min(100.0, maintainability_score + modernization_bonus)
|
|
561
|
-
|
|
562
|
-
# Global penalties
|
|
563
|
-
penalty = len(cycles) * 10
|
|
564
|
-
module_score = max(0, module_score - penalty)
|
|
565
|
-
maintainability_score = max(0, maintainability_score - penalty)
|
|
790
|
+
bonus += 2.0
|
|
791
|
+
return bonus
|
|
566
792
|
|
|
567
|
-
|
|
568
|
-
|
|
569
|
-
|
|
570
|
-
|
|
571
|
-
|
|
572
|
-
|
|
793
|
+
def _get_qgis_score(
|
|
794
|
+
self,
|
|
795
|
+
compliance: Dict[str, Any],
|
|
796
|
+
structure: Dict[str, Any],
|
|
797
|
+
metadata: Dict[str, Any],
|
|
798
|
+
missing_resources: List[str],
|
|
799
|
+
binaries: List[str],
|
|
800
|
+
package_size: float,
|
|
801
|
+
security_penalty: float = 0.0,
|
|
802
|
+
) -> float:
|
|
803
|
+
"""Calculates QGIS-specific compliance score."""
|
|
804
|
+
score = 100.0
|
|
805
|
+
score -= compliance.get("issues_count", 0) * 2
|
|
573
806
|
if not structure.get("is_valid", True):
|
|
574
|
-
|
|
807
|
+
score -= 20
|
|
575
808
|
if not metadata.get("is_valid", True):
|
|
576
|
-
|
|
577
|
-
|
|
578
|
-
|
|
809
|
+
score -= 10
|
|
810
|
+
score -= len(missing_resources) * 5
|
|
811
|
+
score -= len(binaries) * 50
|
|
579
812
|
if package_size > 20:
|
|
580
|
-
|
|
813
|
+
score -= 10
|
|
581
814
|
|
|
582
|
-
|
|
583
|
-
|
|
584
|
-
|
|
585
|
-
|
|
586
|
-
|
|
815
|
+
# Security penalty
|
|
816
|
+
score -= security_penalty
|
|
817
|
+
|
|
818
|
+
return float(max(0, score))
|
|
819
|
+
|
|
820
|
+
def _get_security_penalty(self, modules_data: List[ModuleAnalysisResult]) -> float:
|
|
821
|
+
"""Calculates total penalty for security vulnerabilities."""
|
|
822
|
+
penalty = 0.0
|
|
823
|
+
for m in modules_data:
|
|
824
|
+
for issue in m.get("security_issues", []):
|
|
825
|
+
sev = issue.get("severity", "medium").lower()
|
|
826
|
+
if sev == "high":
|
|
827
|
+
penalty += 10.0
|
|
828
|
+
elif sev == "medium":
|
|
829
|
+
penalty += 5.0
|
|
830
|
+
else:
|
|
831
|
+
penalty += 2.0
|
|
832
|
+
return penalty
|