qgis-plugin-analyzer 1.5.0__py3-none-any.whl → 1.6.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (37) hide show
  1. analyzer/cli/__init__.py +14 -0
  2. analyzer/cli/app.py +147 -0
  3. analyzer/cli/base.py +93 -0
  4. analyzer/cli/commands/__init__.py +19 -0
  5. analyzer/cli/commands/analyze.py +47 -0
  6. analyzer/cli/commands/fix.py +58 -0
  7. analyzer/cli/commands/init.py +41 -0
  8. analyzer/cli/commands/list_rules.py +41 -0
  9. analyzer/cli/commands/security.py +46 -0
  10. analyzer/cli/commands/summary.py +52 -0
  11. analyzer/cli/commands/version.py +41 -0
  12. analyzer/cli.py +4 -184
  13. analyzer/commands.py +7 -7
  14. analyzer/engine.py +421 -238
  15. analyzer/fixer.py +206 -130
  16. analyzer/reporters/markdown_reporter.py +48 -15
  17. analyzer/reporters/summary_reporter.py +193 -80
  18. analyzer/scanner.py +218 -138
  19. analyzer/transformers.py +29 -8
  20. analyzer/utils/__init__.py +2 -0
  21. analyzer/utils/path_utils.py +53 -1
  22. analyzer/validators.py +90 -55
  23. analyzer/visitors/__init__.py +19 -0
  24. analyzer/visitors/base.py +75 -0
  25. analyzer/visitors/composite_visitor.py +73 -0
  26. analyzer/visitors/imports_visitor.py +85 -0
  27. analyzer/visitors/metrics_visitor.py +158 -0
  28. analyzer/visitors/security_visitor.py +52 -0
  29. analyzer/visitors/standards_visitor.py +284 -0
  30. {qgis_plugin_analyzer-1.5.0.dist-info → qgis_plugin_analyzer-1.6.0.dist-info}/METADATA +16 -7
  31. qgis_plugin_analyzer-1.6.0.dist-info/RECORD +52 -0
  32. analyzer/visitors.py +0 -455
  33. qgis_plugin_analyzer-1.5.0.dist-info/RECORD +0 -35
  34. {qgis_plugin_analyzer-1.5.0.dist-info → qgis_plugin_analyzer-1.6.0.dist-info}/WHEEL +0 -0
  35. {qgis_plugin_analyzer-1.5.0.dist-info → qgis_plugin_analyzer-1.6.0.dist-info}/entry_points.txt +0 -0
  36. {qgis_plugin_analyzer-1.5.0.dist-info → qgis_plugin_analyzer-1.6.0.dist-info}/licenses/LICENSE +0 -0
  37. {qgis_plugin_analyzer-1.5.0.dist-info → qgis_plugin_analyzer-1.6.0.dist-info}/top_level.txt +0 -0
analyzer/engine.py CHANGED
@@ -24,7 +24,8 @@ import os
24
24
  import pathlib
25
25
  import subprocess
26
26
  from concurrent.futures import ProcessPoolExecutor, as_completed
27
- from typing import Any, Dict, List, Optional
27
+ from dataclasses import dataclass, field
28
+ from typing import Any, Dict, List, Optional, TypedDict, cast
28
29
 
29
30
  from .reporters import (
30
31
  generate_html_report,
@@ -32,6 +33,7 @@ from .reporters import (
32
33
  save_json_context,
33
34
  )
34
35
  from .scanner import (
36
+ ModuleAnalysisResult,
35
37
  analyze_module_worker,
36
38
  audit_qgis_standards,
37
39
  )
@@ -39,20 +41,240 @@ from .semantic import DependencyGraph, ResourceValidator
39
41
  from .utils import (
40
42
  IgnoreMatcher,
41
43
  ProgressTracker,
44
+ discover_project_files,
42
45
  load_ignore_patterns,
43
46
  load_profile_config,
44
47
  logger,
45
- safe_path_resolve,
46
48
  setup_logger,
47
49
  )
48
50
  from .validators import (
49
- calculate_package_size,
50
- scan_for_binaries,
51
51
  validate_metadata,
52
52
  validate_metadata_urls,
53
+ validate_package_constraints,
53
54
  validate_plugin_structure,
54
55
  )
55
56
 
57
+ # --- Types ---
58
+
59
+
60
+ @dataclass(frozen=True)
61
+ class ProjectConfig:
62
+ """Strongly typed project configuration."""
63
+
64
+ strict: bool = False
65
+ generate_html: bool = True
66
+ fail_on_error: bool = False
67
+ project_type: str = "auto"
68
+ rules: Dict[str, Any] = field(default_factory=dict)
69
+ fail_on_critical: bool = False
70
+
71
+
72
+ class QGISChecksResult(TypedDict):
73
+ """Result of QGIS-specific validation checks."""
74
+
75
+ compliance: Dict[str, Any]
76
+ structure: Dict[str, Any]
77
+ metadata: Dict[str, Any]
78
+ binaries: List[str]
79
+ package_size: float
80
+ package_constraints: Dict[str, Any]
81
+ url_status: Dict[str, str]
82
+
83
+
84
+ class SemanticAnalysisResult(TypedDict):
85
+ """Result of semantic analysis."""
86
+
87
+ cycles: List[List[str]]
88
+ metrics: Dict[str, Any]
89
+ missing_resources: List[str]
90
+
91
+
92
+ class ProjectScores(TypedDict):
93
+ """Calculated project quality scores."""
94
+
95
+ code_score: float
96
+ maint_score: float
97
+ qgis_score: float
98
+ security_score: float
99
+
100
+
101
+ class FullAnalysisResult(TypedDict, total=False):
102
+ """Consolidated analysis result for the entire project."""
103
+
104
+ project_name: str
105
+ project_type: str
106
+ metrics: Dict[str, Any]
107
+ ruff_findings: List[Dict[str, Any]]
108
+ security: Dict[str, Any]
109
+ semantic: Dict[str, Any]
110
+ modules: List[ModuleAnalysisResult]
111
+ research_summary: Dict[str, Any]
112
+ qgis_compliance: Dict[str, Any]
113
+ repository_compliance: Dict[str, Any]
114
+
115
+
116
+ class ScoringEngine:
117
+ """Specialized engine for calculating project quality scores."""
118
+
119
+ def __init__(self, project_type: str) -> None:
120
+ self.project_type = project_type
121
+
122
+ def calculate_project_scores(
123
+ self,
124
+ modules_data: List[ModuleAnalysisResult],
125
+ ruff_findings: List[Dict[str, Any]],
126
+ qgis_checks: Optional[QGISChecksResult],
127
+ semantic: SemanticAnalysisResult,
128
+ ) -> ProjectScores:
129
+ """Calculates project quality scores based on industry-standard formulas."""
130
+ if not modules_data:
131
+ return {
132
+ "code_score": 0.0,
133
+ "maint_score": 0.0,
134
+ "qgis_score": 0.0,
135
+ "security_score": 0.0,
136
+ }
137
+
138
+ module_score = self._get_mi_score(modules_data)
139
+ maintainability_score = self._get_maint_score(modules_data, ruff_findings)
140
+ modernization_bonus = self._get_modernization_bonus(modules_data)
141
+ maintainability_score = min(100.0, maintainability_score + modernization_bonus)
142
+
143
+ # Security context
144
+ security_penalty = self._get_security_penalty(modules_data)
145
+ security_score = max(0.0, 100.0 - security_penalty)
146
+
147
+ # Global penalties (e.g., circular dependencies)
148
+ cycles = semantic["cycles"]
149
+ penalty = len(cycles) * 10
150
+ module_score = max(0, module_score - penalty)
151
+ maintainability_score = max(0, maintainability_score - penalty)
152
+
153
+ if self.project_type == "generic" or not qgis_checks:
154
+ return {
155
+ "code_score": round(module_score, 1),
156
+ "maint_score": round(maintainability_score, 1),
157
+ "qgis_score": 0.0,
158
+ "security_score": round(security_score, 1),
159
+ }
160
+
161
+ qgis_score = self._get_qgis_score(
162
+ qgis_checks["compliance"],
163
+ qgis_checks["structure"],
164
+ qgis_checks["metadata"],
165
+ semantic["missing_resources"],
166
+ qgis_checks["binaries"],
167
+ qgis_checks["package_size"],
168
+ security_penalty,
169
+ )
170
+
171
+ return {
172
+ "code_score": round(module_score, 1),
173
+ "maint_score": round(maintainability_score, 1),
174
+ "qgis_score": round(qgis_score, 1),
175
+ "security_score": round(security_score, 1),
176
+ }
177
+
178
+ def _get_mi_score(self, modules_data: List[ModuleAnalysisResult]) -> float:
179
+ """Calculates module stability based on Maintainability Index (MI)."""
180
+ mi_scores = []
181
+ for m in modules_data:
182
+ cc = m.get("complexity", 1)
183
+ sloc = max(1, m.get("lines", 1))
184
+ mi = (171 - 0.23 * cc - 16.2 * math.log(sloc)) * 100 / 171
185
+ mi_scores.append(max(0, mi))
186
+ return sum(mi_scores) / len(mi_scores) if mi_scores else 0.0
187
+
188
+ def _get_maint_score(
189
+ self,
190
+ modules_data: List[ModuleAnalysisResult],
191
+ ruff_findings: List[Dict[str, Any]],
192
+ ) -> float:
193
+ """Calculates maintainability based on function complexity and linting penalties."""
194
+ all_func_comp = []
195
+ for m in modules_data:
196
+ for f in m.get("functions", []):
197
+ all_func_comp.append(f["complexity"])
198
+
199
+ avg_func_comp = sum(all_func_comp) / len(all_func_comp) if all_func_comp else 1.0
200
+ func_score = max(0, 100 - (max(0, avg_func_comp - 10) * 5))
201
+
202
+ total_lines = sum(m.get("lines", 0) for m in modules_data)
203
+ errors = sum(1 for f in ruff_findings if f.get("code", "").startswith(("E", "F")))
204
+ others = len(ruff_findings) - errors
205
+
206
+ lint_penalty = ((5 * errors + others) / max(1, total_lines / 10)) * 10
207
+ lint_score = max(0, 100 - lint_penalty)
208
+
209
+ return float((func_score * 0.7) + (lint_score * 0.3))
210
+
211
+ def _get_modernization_bonus(self, modules_data: List[ModuleAnalysisResult]) -> float:
212
+ """Calculates modernization bonuses based on type hints and documentation styles."""
213
+ total_functions = 0
214
+ total_params = 0
215
+ annotated_params = 0
216
+ has_return_hint = 0
217
+ detected_styles = set()
218
+
219
+ for m in modules_data:
220
+ metrics = m.get("research_metrics", {})
221
+ t_stats = metrics.get("type_hint_stats", {})
222
+ total_functions += t_stats.get("total_functions", 0)
223
+ total_params += t_stats.get("total_parameters", 0)
224
+ annotated_params += t_stats.get("annotated_parameters", 0)
225
+ has_return_hint += t_stats.get("has_return_hint", 0)
226
+ detected_styles.update(metrics.get("docstring_styles", []))
227
+
228
+ bonus = 0.0
229
+ if total_params > 0 or total_functions > 0:
230
+ param_cov = annotated_params / max(1, total_params)
231
+ ret_cov = has_return_hint / max(1, total_functions)
232
+ if param_cov >= 0.8 and ret_cov >= 0.8:
233
+ bonus += 5.0
234
+
235
+ if detected_styles:
236
+ bonus += 2.0
237
+ return bonus
238
+
239
+ def _get_qgis_score(
240
+ self,
241
+ compliance: Dict[str, Any],
242
+ structure: Dict[str, Any],
243
+ metadata: Dict[str, Any],
244
+ missing_resources: List[str],
245
+ binaries: List[str],
246
+ package_size: float,
247
+ security_penalty: float = 0.0,
248
+ ) -> float:
249
+ """Calculates QGIS-specific compliance score."""
250
+ score = 100.0
251
+ score -= compliance.get("issues_count", 0) * 2
252
+ if not structure.get("is_valid", True):
253
+ score -= 20
254
+ if not metadata.get("is_valid", True):
255
+ score -= 10
256
+ score -= len(missing_resources) * 5
257
+ score -= len(binaries) * 50
258
+ if package_size > 20:
259
+ score -= 10
260
+
261
+ score -= security_penalty
262
+ return float(max(0, score))
263
+
264
+ def _get_security_penalty(self, modules_data: List[ModuleAnalysisResult]) -> float:
265
+ """Calculates total penalty for security vulnerabilities."""
266
+ penalty = 0.0
267
+ for m in modules_data:
268
+ for issue in m.get("security_issues", []):
269
+ sev = issue.get("severity", "medium").lower()
270
+ if sev == "high":
271
+ penalty += 10.0
272
+ elif sev == "medium":
273
+ penalty += 5.0
274
+ else:
275
+ penalty += 2.0
276
+ return penalty
277
+
56
278
 
57
279
  class ProjectAnalyzer:
58
280
  def __init__(
@@ -79,56 +301,32 @@ class ProjectAnalyzer:
79
301
  self.max_workers = min(os.cpu_count() or 4, 4)
80
302
  self.max_file_size_kb = 500
81
303
 
82
- # Load profile config
83
- self.config = load_profile_config(self.project_path, profile)
304
+ # Load and wrap config
305
+ raw_config = load_profile_config(self.project_path, profile)
306
+ self.config = ProjectConfig(
307
+ strict=raw_config.get("strict", False),
308
+ generate_html=raw_config.get("generate_html", True),
309
+ fail_on_error=raw_config.get("fail_on_error", False),
310
+ project_type=raw_config.get("project_type", "auto"),
311
+ rules=raw_config.get("rules", {}),
312
+ )
84
313
 
85
314
  # Detect project type
86
- self.project_type = self.config.get("project_type", "auto")
315
+ self.project_type = self.config.project_type
87
316
  if self.project_type == "auto":
88
317
  metadata_file = self.project_path / "metadata.txt"
89
318
  self.project_type = "qgis" if metadata_file.exists() else "generic"
90
319
 
91
320
  logger.info(f"📁 Project type: {self.project_type.upper()}")
92
321
 
322
+ # Initialize Engines
323
+ self.scoring = ScoringEngine(self.project_type)
324
+
93
325
  # Load .analyzerignore
94
326
  ignore_file = self.project_path / ".analyzerignore"
95
327
  patterns = load_ignore_patterns(ignore_file)
96
328
  self.matcher = IgnoreMatcher(self.project_path, patterns)
97
329
 
98
- def get_python_files(self) -> List[pathlib.Path]:
99
- """Scans Python files ignoring common folders and .analyzerignore patterns.
100
-
101
- Returns:
102
- A sorted list of pathlib.Path objects for all detected Python files.
103
- """
104
- python_files = []
105
- project_path = pathlib.Path(self.project_path)
106
-
107
- # Handle direct file input
108
- if project_path.is_file():
109
- if project_path.suffix == ".py":
110
- return [project_path]
111
- return []
112
-
113
- # Handle directory scan
114
- for root, dirs, files in os.walk(self.project_path):
115
- root_path = pathlib.Path(root)
116
-
117
- # Filter directories
118
- dirs[:] = [d for d in dirs if not self.matcher.is_ignored(root_path / d)]
119
-
120
- for file in files:
121
- file_path = root_path / file
122
- if file.endswith(".py") and not self.matcher.is_ignored(file_path):
123
- # Skip very large files to avoid OOM
124
- if file_path.stat().st_size > self.max_file_size_kb * 1024:
125
- logger.warning(
126
- f"⚠️ Skipping large file: {file_path.name} (> {self.max_file_size_kb}KB)"
127
- )
128
- continue
129
- python_files.append(file_path)
130
- return sorted(python_files)
131
-
132
330
  def run_ruff_audit(self) -> List[Dict[str, Any]]:
133
331
  """Executes Ruff linting via subprocess.
134
332
 
@@ -155,8 +353,8 @@ class ProjectAnalyzer:
155
353
  return []
156
354
 
157
355
  def _run_parallel_analysis(
158
- self, files: List[pathlib.Path], rules_config: dict
159
- ) -> List[Dict[str, Any]]:
356
+ self, files: List[pathlib.Path], rules_config: Dict[str, Any]
357
+ ) -> List[ModuleAnalysisResult]:
160
358
  """Runs parallel analysis on all Python files.
161
359
 
162
360
  Args:
@@ -167,7 +365,7 @@ class ProjectAnalyzer:
167
365
  A list of module analysis results.
168
366
  """
169
367
  tracker = ProgressTracker(len(files))
170
- modules_data = []
368
+ modules_data: List[ModuleAnalysisResult] = []
171
369
 
172
370
  with ProcessPoolExecutor(max_workers=self.max_workers) as executor:
173
371
  futures = {
@@ -184,44 +382,51 @@ class ProjectAnalyzer:
184
382
  return modules_data
185
383
 
186
384
  def _run_qgis_specific_checks(
187
- self, modules_data: List[Dict[str, Any]], rules_config: dict
188
- ) -> tuple:
385
+ self,
386
+ modules_data: List[ModuleAnalysisResult],
387
+ rules_config: Dict[str, Any],
388
+ discovery: Dict[str, Any],
389
+ ) -> QGISChecksResult:
189
390
  """Runs QGIS-specific validation checks.
190
391
 
191
392
  Args:
192
- modules_data: List of already analyzed module data.
193
- rules_config: Rule-specific configuration overrides.
393
+ modules_data: Results from module analysis.
394
+ rules_config: Configuration for rules.
395
+ discovery: Discovery results from the project scanner.
194
396
 
195
397
  Returns:
196
- A tuple of (compliance, structure, metadata, binaries, package_size, url_status).
398
+ A QGISChecksResult containing findings from all checks.
197
399
  """
198
- compliance = audit_qgis_standards(
199
- modules_data, self.project_path, rules_config=rules_config
200
- )
201
-
202
- # Official repository audit
203
- metadata_path = safe_path_resolve(self.project_path, "metadata.txt")
400
+ metadata_file = self.project_path / "metadata.txt"
401
+ compliance = audit_qgis_standards(modules_data, self.project_path, rules_config)
204
402
  structure = validate_plugin_structure(self.project_path)
205
- metadata = validate_metadata(metadata_path)
403
+ metadata = validate_metadata(metadata_file)
206
404
 
207
- # Repository Compliance Checks
208
- logger.info("Running QGIS repository compliance checks...")
209
- binaries = scan_for_binaries(self.project_path, self.matcher)
210
- package_size = calculate_package_size(self.project_path, self.matcher)
211
- url_status = {}
212
- if metadata.get("is_valid") and "metadata" in metadata:
213
- url_status = validate_metadata_urls(metadata["metadata"])
405
+ # New Repository Constraints
406
+ constraints = validate_package_constraints(
407
+ discovery["total_size_mb"], discovery["binaries"]
408
+ )
214
409
 
215
- return compliance, structure, metadata, binaries, package_size, url_status
410
+ return {
411
+ "compliance": compliance,
412
+ "structure": structure,
413
+ "metadata": metadata,
414
+ "binaries": discovery["binaries"],
415
+ "package_size": discovery["total_size_mb"],
416
+ "package_constraints": constraints,
417
+ "url_status": validate_metadata_urls(metadata.get("metadata", {})),
418
+ }
216
419
 
217
- def _run_semantic_analysis(self, modules_data: List[Dict[str, Any]]) -> tuple:
420
+ def _run_semantic_analysis(
421
+ self, modules_data: List[ModuleAnalysisResult]
422
+ ) -> SemanticAnalysisResult:
218
423
  """Runs semantic analysis including dependencies and resources.
219
424
 
220
425
  Args:
221
426
  modules_data: List of analyzed module entries.
222
427
 
223
428
  Returns:
224
- A tuple of (cycles, metrics, missing_resources).
429
+ A dictionary containing cycles, metrics, and missing resources.
225
430
  """
226
431
  dep_graph = DependencyGraph()
227
432
  all_resource_usages = []
@@ -232,9 +437,11 @@ class ProjectAnalyzer:
232
437
  res_validator.scan_project_resources(self.matcher)
233
438
 
234
439
  for m in modules_data:
235
- dep_graph.add_node(m["path"], m)
440
+ dep_graph.add_node(m["path"], cast(Dict[str, Any], m))
236
441
  if self.project_type == "qgis" and "resource_usages" in m:
237
- all_resource_usages.extend(m["resource_usages"])
442
+ # Type safe usage of resource_usages from TypedDict
443
+ resource_usages = m.get("resource_usages", [])
444
+ all_resource_usages.extend(resource_usages)
238
445
 
239
446
  dep_graph.build_edges(self.project_path)
240
447
  cycles = dep_graph.detect_cycles()
@@ -244,76 +451,92 @@ class ProjectAnalyzer:
244
451
  if self.project_type == "qgis" and res_validator:
245
452
  missing_resources = res_validator.validate_usage(all_resource_usages)
246
453
 
247
- return cycles, metrics, missing_resources
454
+ return {
455
+ "cycles": cycles,
456
+ "metrics": metrics,
457
+ "missing_resources": missing_resources,
458
+ }
248
459
 
249
460
  def _build_analysis_results(
250
461
  self,
251
462
  files: List[pathlib.Path],
252
- modules_data: List[Dict[str, Any]],
463
+ modules_data: List[ModuleAnalysisResult],
253
464
  ruff_findings: List[Dict[str, Any]],
254
- code_score: float,
255
- maint_score: float,
256
- qgis_score: float,
257
- compliance: Dict[str, Any],
258
- structure: Dict[str, Any],
259
- metadata: Dict[str, Any],
260
- cycles: List[List[str]],
261
- metrics: Dict[str, Any],
262
- missing_resources: List[str],
263
- binaries: List[str],
264
- package_size: float,
265
- url_status: Dict[str, str],
266
- security_score: float,
267
- all_security_issues: List[Dict[str, Any]],
268
- ) -> Dict[str, Any]:
269
- """Consolidates analysis results into a single dictionary.
465
+ scores: ProjectScores,
466
+ qgis_checks: Optional[QGISChecksResult],
467
+ semantic: SemanticAnalysisResult,
468
+ ) -> FullAnalysisResult:
469
+ """Consolidates analysis results into a single dictionary."""
470
+ analyses: FullAnalysisResult = {
471
+ "project_name": self.project_path.name,
472
+ "project_type": self.project_type,
473
+ "metrics": self._get_metrics_summary(files, modules_data, scores),
474
+ "ruff_findings": ruff_findings,
475
+ "security": self._get_security_summary(modules_data, scores),
476
+ "semantic": {
477
+ "circular_dependencies": semantic["cycles"],
478
+ "coupling_metrics": semantic["metrics"],
479
+ },
480
+ "modules": modules_data,
481
+ "research_summary": self._get_research_summary(modules_data),
482
+ }
270
483
 
271
- Args:
272
- files: List of analyzed files.
273
- modules_data: Detailed analysis for each module.
274
- ruff_findings: Results from Ruff linting.
275
- code_score: Calculated module stability score.
276
- maint_score: Calculated maintainability score.
277
- qgis_score: Calculated QGIS compliance score.
278
- compliance: Detailed QGIS compliance findings.
279
- structure: Plugin structure validation results.
280
- metadata: Metadata validation results.
281
- cycles: Detected circular dependency cycles.
282
- metrics: Coupling and complexity metrics.
283
- missing_resources: List of missing QRC resources.
284
- binaries: List of prohibited binary files.
285
- package_size: Size of the plugin package in MB.
286
- url_status: Status of URLs in metadata.txt.
484
+ if self.project_type == "qgis" and qgis_checks:
485
+ analyses["metrics"]["overall_score"] = round(
486
+ (scores["code_score"] * 0.5) + (scores["qgis_score"] * 0.5), 1
487
+ )
488
+ analyses["qgis_compliance"] = {
489
+ "compliance_score": round(scores["qgis_score"], 1),
490
+ "best_practices": qgis_checks["compliance"],
491
+ "repository_standards": {
492
+ "structure": qgis_checks["structure"],
493
+ "metadata": qgis_checks["metadata"],
494
+ },
495
+ }
496
+ analyses["semantic"]["missing_resources"] = semantic["missing_resources"]
497
+ analyses["repository_compliance"] = {
498
+ "binaries": qgis_checks["binaries"],
499
+ "package_size_mb": round(qgis_checks["package_size"], 2),
500
+ "url_validation": qgis_checks["url_status"],
501
+ "folder_name_valid": qgis_checks["structure"].get("folder_name_valid", True),
502
+ "constraint_errors": qgis_checks["package_constraints"].get("errors", []),
503
+ "is_compliant": qgis_checks["package_constraints"].get("is_valid", True)
504
+ and qgis_checks["structure"].get("is_valid", True),
505
+ }
287
506
 
288
- Returns:
289
- The final analysis results dictionary.
290
- """
291
- metrics_summary = {
507
+ return analyses
508
+
509
+ def _get_metrics_summary(
510
+ self,
511
+ files: List[pathlib.Path],
512
+ modules_data: List[ModuleAnalysisResult],
513
+ scores: ProjectScores,
514
+ ) -> Dict[str, Any]:
515
+ """Generates the metrics summary portion of the results."""
516
+ return {
292
517
  "total_files": len(files),
293
518
  "total_lines": sum(m["lines"] for m in modules_data),
294
- "quality_score": round(code_score, 1),
295
- "maintainability_score": round(maint_score, 1),
296
- "security_score": round(security_score, 1),
519
+ "quality_score": round(scores["code_score"], 1),
520
+ "maintainability_score": round(scores["maint_score"], 1),
521
+ "security_score": round(scores["security_score"], 1),
297
522
  }
298
523
 
299
- if self.project_type == "qgis":
300
- metrics_summary["overall_score"] = round((code_score * 0.5) + (qgis_score * 0.5), 1)
524
+ def _get_security_summary(
525
+ self, modules_data: List[ModuleAnalysisResult], scores: ProjectScores
526
+ ) -> Dict[str, Any]:
527
+ """Generates the security summary portion of the results."""
528
+ all_security_issues = []
529
+ for m in modules_data:
530
+ all_security_issues.extend(m.get("security_issues", []))
301
531
 
302
- analyses = {
303
- "project_name": self.project_path.name,
304
- "project_type": self.project_type,
305
- "metrics": metrics_summary,
306
- "ruff_findings": ruff_findings,
307
- "security": {
308
- "findings": all_security_issues,
309
- "count": len(all_security_issues),
310
- "score": round(security_score, 1),
311
- },
312
- "semantic": {"circular_dependencies": cycles, "coupling_metrics": metrics},
313
- "modules": modules_data,
532
+ return {
533
+ "findings": all_security_issues,
534
+ "count": len(all_security_issues),
535
+ "score": round(scores["security_score"], 1),
314
536
  }
315
537
 
316
- # Aggregate research metrics for summary
538
+ def _get_research_summary(self, modules_data: List[ModuleAnalysisResult]) -> Dict[str, Any]:
539
+ """Aggregates research metrics for summary."""
317
540
  total_functions = 0
318
541
  total_params = 0
319
542
  annotated_params = 0
@@ -336,7 +559,7 @@ class ProjectAnalyzer:
336
559
 
337
560
  detected_styles.update(r_metrics.get("docstring_styles", []))
338
561
 
339
- analyses["research_summary"] = {
562
+ return {
340
563
  "type_hint_coverage": round((annotated_params / max(1, total_params)) * 100, 1)
341
564
  if total_params > 0
342
565
  else 0.0,
@@ -349,32 +572,17 @@ class ProjectAnalyzer:
349
572
  "detected_docstring_styles": list(detected_styles),
350
573
  }
351
574
 
352
- if self.project_type == "qgis":
353
- analyses["qgis_compliance"] = {
354
- "compliance_score": round(qgis_score, 1),
355
- "best_practices": compliance,
356
- "repository_standards": {"structure": structure, "metadata": metadata},
357
- }
358
- analyses["semantic"]["missing_resources"] = missing_resources
359
- analyses["repository_compliance"] = {
360
- "binaries": binaries,
361
- "package_size_mb": round(package_size, 2),
362
- "url_validation": url_status,
363
- "is_compliant": len(binaries) == 0 and package_size <= 20,
364
- }
365
-
366
- return analyses
367
-
368
- def _save_reports(self, analyses: Dict[str, Any]) -> None:
575
+ def _save_reports(self, analyses: FullAnalysisResult) -> None:
369
576
  """Saves all generated analysis reports to the output directory.
370
577
 
371
578
  Args:
372
579
  analyses: The consolidated analysis results dictionary.
373
580
  """
374
- generate_markdown_summary(analyses, self.output_dir / "PROJECT_SUMMARY.md")
375
- if self.config.get("generate_html", True):
376
- generate_html_report(analyses, self.output_dir / "PROJECT_SUMMARY.html")
377
- save_json_context(analyses, self.output_dir / "project_context.json")
581
+ data = cast(Dict[str, Any], analyses)
582
+ generate_markdown_summary(data, self.output_dir / "PROJECT_SUMMARY.md")
583
+ if self.config.generate_html:
584
+ generate_html_report(data, self.output_dir / "PROJECT_SUMMARY.html")
585
+ save_json_context(data, self.output_dir / "project_context.json")
378
586
 
379
587
  def run(self) -> bool:
380
588
  """Executes the complete analysis pipeline.
@@ -384,8 +592,16 @@ class ProjectAnalyzer:
384
592
  False if it failed due to critical system errors or strict mode violations.
385
593
  """
386
594
  logger.info(f"🔍 Analyzing: {self.project_path}")
387
- files = self.get_python_files()
388
- rules_config = self.config.get("rules", {})
595
+
596
+ # Unified Project Discovery
597
+ discovery = discover_project_files(self.project_path, self.matcher)
598
+ files = discovery["python_files"]
599
+ rules_config = self.config.rules
600
+
601
+ # Update Project Type if it was auto
602
+ if self.config.project_type == "auto":
603
+ self.project_type = "qgis" if discovery["has_metadata"] else "generic"
604
+ logger.info(f"📁 Project type: {self.project_type.upper()}")
389
605
 
390
606
  # Parallel analysis
391
607
  modules_data = self._run_parallel_analysis(files, rules_config)
@@ -394,66 +610,31 @@ class ProjectAnalyzer:
394
610
  ruff_findings = self.run_ruff_audit()
395
611
 
396
612
  # Initialize defaults
397
- compliance: Dict[str, Any] = {"issues": [], "issues_count": 0}
398
- structure: Dict[str, Any] = {"is_valid": True}
399
- metadata: Dict[str, Any] = {"is_valid": True}
400
- binaries: List[str] = []
401
- package_size = 0
402
- url_status = {}
613
+ qgis_checks: Optional[QGISChecksResult] = None
403
614
 
404
615
  # QGIS-specific checks
405
616
  if self.project_type == "qgis":
406
- compliance, structure, metadata, binaries, package_size, url_status = (
407
- self._run_qgis_specific_checks(modules_data, rules_config)
408
- )
617
+ qgis_checks = self._run_qgis_specific_checks(modules_data, rules_config, discovery)
409
618
 
410
619
  # Semantic Analysis
411
- semantic_res = self._run_semantic_analysis(modules_data)
412
- cycles = semantic_res[0] if len(semantic_res) > 0 else []
413
- metrics = semantic_res[1] if len(semantic_res) > 1 else {}
414
- missing_resources = semantic_res[2] if len(semantic_res) > 2 else []
415
- # Calculate scores
416
- scores = self._calculate_scores(
620
+ semantic = self._run_semantic_analysis(modules_data)
621
+
622
+ # Calculate scores via ScoringEngine
623
+ scores = self.scoring.calculate_project_scores(
417
624
  modules_data,
418
625
  ruff_findings,
419
- compliance,
420
- structure,
421
- metadata,
422
- cycles,
423
- missing_resources,
424
- binaries,
425
- package_size,
626
+ qgis_checks,
627
+ semantic,
426
628
  )
427
629
 
428
- code_score = scores[0] if len(scores) > 0 else 0.0
429
- maint_score = scores[1] if len(scores) > 1 else 0.0
430
- qgis_score = scores[2] if len(scores) > 2 else 0.0
431
- security_score = scores[3] if len(scores) > 3 else 0.0
432
-
433
- # Aggregate all security findings
434
- all_security_issues = []
435
- for m in modules_data:
436
- all_security_issues.extend(m.get("security_issues", []))
437
-
438
630
  # Build results
439
631
  analyses = self._build_analysis_results(
440
632
  files,
441
633
  modules_data,
442
634
  ruff_findings,
443
- code_score,
444
- maint_score,
445
- qgis_score,
446
- compliance,
447
- structure,
448
- metadata,
449
- cycles,
450
- metrics,
451
- missing_resources,
452
- binaries,
453
- package_size,
454
- url_status,
455
- security_score,
456
- all_security_issues,
635
+ scores,
636
+ qgis_checks,
637
+ semantic,
457
638
  )
458
639
 
459
640
  # Save reports
@@ -462,11 +643,15 @@ class ProjectAnalyzer:
462
643
  logger.info(f"✅ Analysis completed. Reports in: {self.output_dir}")
463
644
 
464
645
  # Fail on error if strict mode is on
465
- if self.config.get("fail_on_error") and self.project_type == "qgis":
646
+ if self.config.fail_on_error and self.project_type == "qgis" and qgis_checks:
647
+ compliance = qgis_checks["compliance"]
648
+ structure = qgis_checks["structure"]
649
+ metadata = qgis_checks["metadata"]
466
650
  if (
467
651
  int(compliance.get("issues_count", 0)) > 0
468
- or not structure["is_valid"]
469
- or not metadata["is_valid"]
652
+ or not structure.get("is_valid", True)
653
+ or not metadata.get("is_valid", True)
654
+ or not qgis_checks["package_constraints"].get("is_valid", True)
470
655
  ):
471
656
  logger.error(
472
657
  "❌ Strict Mode: Critical QGIS compliance issues detected. Failing analysis."
@@ -477,34 +662,29 @@ class ProjectAnalyzer:
477
662
 
478
663
  def _calculate_scores(
479
664
  self,
480
- modules_data: List[Dict[str, Any]],
665
+ modules_data: List[ModuleAnalysisResult],
481
666
  ruff_findings: List[Dict[str, Any]],
482
- compliance: Dict[str, Any],
483
- structure: Dict[str, Any],
484
- metadata: Dict[str, Any],
485
- cycles: List[List[str]],
486
- missing_resources: List[str],
487
- binaries: List[str],
488
- package_size: float,
489
- ) -> tuple:
667
+ qgis_checks: Optional[QGISChecksResult],
668
+ semantic: SemanticAnalysisResult,
669
+ ) -> ProjectScores:
490
670
  """Calculates project quality scores based on industry-standard formulas.
491
671
 
492
672
  Args:
493
673
  modules_data: Detailed analysis results for each module.
494
674
  ruff_findings: List of Ruff linting findings.
495
- compliance: Findings from QGIS standard audit.
496
- structure: Results of plugin structure validation.
497
- metadata: Results of metadata.txt validation.
498
- cycles: List of circular dependency cycles.
499
- missing_resources: List of missing QRC resource paths.
500
- binaries: List of prohibited binary files.
501
- package_size: Size of the plugin package in MB.
675
+ qgis_checks: Results of QGIS-specific validation checks.
676
+ semantic: Results of semantic analysis.
502
677
 
503
678
  Returns:
504
- A tuple of (module_stability, maintainability, qgis_compliance) scores out of 100.
679
+ A ProjectScores TypedDict containing stability, maintainability, and qgis scores.
505
680
  """
506
681
  if not modules_data:
507
- return 0.0, 0.0, 0.0, 0.0
682
+ return {
683
+ "code_score": 0.0,
684
+ "maint_score": 0.0,
685
+ "qgis_score": 0.0,
686
+ "security_score": 0.0,
687
+ }
508
688
 
509
689
  module_score = self._get_mi_score(modules_data)
510
690
  maintainability_score = self._get_maint_score(modules_data, ruff_findings)
@@ -516,36 +696,37 @@ class ProjectAnalyzer:
516
696
  security_score = max(0.0, 100.0 - security_penalty)
517
697
 
518
698
  # Global penalties (e.g., circular dependencies)
699
+ cycles = semantic["cycles"]
519
700
  penalty = len(cycles) * 10
520
701
  module_score = max(0, module_score - penalty)
521
702
  maintainability_score = max(0, maintainability_score - penalty)
522
703
 
523
- if self.project_type == "generic":
524
- return (
525
- round(module_score, 1),
526
- round(maintainability_score, 1),
527
- 0.0,
528
- round(security_score, 1),
529
- )
704
+ if self.project_type == "generic" or not qgis_checks:
705
+ return {
706
+ "code_score": round(module_score, 1),
707
+ "maint_score": round(maintainability_score, 1),
708
+ "qgis_score": 0.0,
709
+ "security_score": round(security_score, 1),
710
+ }
530
711
 
531
712
  qgis_score = self._get_qgis_score(
532
- compliance,
533
- structure,
534
- metadata,
535
- missing_resources,
536
- binaries,
537
- package_size,
713
+ qgis_checks["compliance"],
714
+ qgis_checks["structure"],
715
+ qgis_checks["metadata"],
716
+ semantic["missing_resources"],
717
+ qgis_checks["binaries"],
718
+ qgis_checks["package_size"],
538
719
  security_penalty,
539
720
  )
540
721
 
541
- return (
542
- round(module_score, 1),
543
- round(maintainability_score, 1),
544
- round(qgis_score, 1),
545
- round(security_score, 1),
546
- )
722
+ return {
723
+ "code_score": round(module_score, 1),
724
+ "maint_score": round(maintainability_score, 1),
725
+ "qgis_score": round(qgis_score, 1),
726
+ "security_score": round(security_score, 1),
727
+ }
547
728
 
548
- def _get_mi_score(self, modules_data: List[Dict[str, Any]]) -> float:
729
+ def _get_mi_score(self, modules_data: List[ModuleAnalysisResult]) -> float:
549
730
  """Calculates module stability based on Maintainability Index (MI)."""
550
731
  mi_scores = []
551
732
  for m in modules_data:
@@ -557,7 +738,9 @@ class ProjectAnalyzer:
557
738
  return sum(mi_scores) / len(mi_scores) if mi_scores else 0.0
558
739
 
559
740
  def _get_maint_score(
560
- self, modules_data: List[Dict[str, Any]], ruff_findings: List[Dict[str, Any]]
741
+ self,
742
+ modules_data: List[ModuleAnalysisResult],
743
+ ruff_findings: List[Dict[str, Any]],
561
744
  ) -> float:
562
745
  """Calculates maintainability based on function complexity and linting penalties."""
563
746
  # 1. Function Complexity Score
@@ -579,7 +762,7 @@ class ProjectAnalyzer:
579
762
 
580
763
  return float((func_score * 0.7) + (lint_score * 0.3))
581
764
 
582
- def _get_modernization_bonus(self, modules_data: List[Dict[str, Any]]) -> float:
765
+ def _get_modernization_bonus(self, modules_data: List[ModuleAnalysisResult]) -> float:
583
766
  """Calculates modernization bonuses based on type hints and documentation styles."""
584
767
  total_functions = 0
585
768
  total_params = 0
@@ -634,7 +817,7 @@ class ProjectAnalyzer:
634
817
 
635
818
  return float(max(0, score))
636
819
 
637
- def _get_security_penalty(self, modules_data: List[Dict[str, Any]]) -> float:
820
+ def _get_security_penalty(self, modules_data: List[ModuleAnalysisResult]) -> float:
638
821
  """Calculates total penalty for security vulnerabilities."""
639
822
  penalty = 0.0
640
823
  for m in modules_data: