qgis-plugin-analyzer 1.4.0__py3-none-any.whl → 1.5.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- analyzer/__init__.py +2 -1
- analyzer/cli.py +49 -146
- analyzer/commands.py +163 -0
- analyzer/engine.py +121 -58
- analyzer/reporters/markdown_reporter.py +41 -0
- analyzer/reporters/summary_reporter.py +67 -3
- analyzer/rules/qgis_rules.py +3 -1
- analyzer/scanner.py +31 -603
- analyzer/secrets.py +84 -0
- analyzer/security_checker.py +85 -0
- analyzer/security_rules.py +127 -0
- analyzer/visitors.py +455 -0
- {qgis_plugin_analyzer-1.4.0.dist-info → qgis_plugin_analyzer-1.5.0.dist-info}/METADATA +20 -7
- {qgis_plugin_analyzer-1.4.0.dist-info → qgis_plugin_analyzer-1.5.0.dist-info}/RECORD +18 -13
- {qgis_plugin_analyzer-1.4.0.dist-info → qgis_plugin_analyzer-1.5.0.dist-info}/WHEEL +1 -1
- {qgis_plugin_analyzer-1.4.0.dist-info → qgis_plugin_analyzer-1.5.0.dist-info}/entry_points.txt +0 -0
- {qgis_plugin_analyzer-1.4.0.dist-info → qgis_plugin_analyzer-1.5.0.dist-info}/licenses/LICENSE +0 -0
- {qgis_plugin_analyzer-1.4.0.dist-info → qgis_plugin_analyzer-1.5.0.dist-info}/top_level.txt +0 -0
analyzer/__init__.py
CHANGED
analyzer/cli.py
CHANGED
|
@@ -23,7 +23,15 @@ import argparse
|
|
|
23
23
|
import pathlib
|
|
24
24
|
import sys
|
|
25
25
|
|
|
26
|
-
from .
|
|
26
|
+
from . import __version__
|
|
27
|
+
from .commands import (
|
|
28
|
+
handle_analyze,
|
|
29
|
+
handle_fix,
|
|
30
|
+
handle_init,
|
|
31
|
+
handle_list_rules,
|
|
32
|
+
handle_security,
|
|
33
|
+
handle_summary,
|
|
34
|
+
)
|
|
27
35
|
from .utils import logger, setup_logger
|
|
28
36
|
|
|
29
37
|
|
|
@@ -36,6 +44,7 @@ def _setup_argument_parser() -> argparse.ArgumentParser:
|
|
|
36
44
|
parser = argparse.ArgumentParser(
|
|
37
45
|
description="QGIS Plugin Analyzer - A guardian for your PyQGIS code"
|
|
38
46
|
)
|
|
47
|
+
parser.add_argument("-v", "--version", action="version", version=f"%(prog)s {__version__}")
|
|
39
48
|
subparsers = parser.add_subparsers(dest="command", help="Command to execute")
|
|
40
49
|
|
|
41
50
|
# Analyze Command
|
|
@@ -60,6 +69,27 @@ def _setup_argument_parser() -> argparse.ArgumentParser:
|
|
|
60
69
|
default="default",
|
|
61
70
|
)
|
|
62
71
|
|
|
72
|
+
# Security Command
|
|
73
|
+
security_parser = subparsers.add_parser("security", help="Run a focused security scan")
|
|
74
|
+
security_parser.add_argument("project_path", help="Path to the QGIS project to scan")
|
|
75
|
+
security_parser.add_argument(
|
|
76
|
+
"-o",
|
|
77
|
+
"--output",
|
|
78
|
+
help="Output directory for reports",
|
|
79
|
+
default="./analysis_results",
|
|
80
|
+
)
|
|
81
|
+
security_parser.add_argument(
|
|
82
|
+
"-p",
|
|
83
|
+
"--profile",
|
|
84
|
+
help="Configuration profile from pyproject.toml",
|
|
85
|
+
default="default",
|
|
86
|
+
)
|
|
87
|
+
security_parser.add_argument(
|
|
88
|
+
"--deep",
|
|
89
|
+
action="store_true",
|
|
90
|
+
help="Run more intensive (but slower) security checks",
|
|
91
|
+
)
|
|
92
|
+
|
|
63
93
|
# Fix Command
|
|
64
94
|
fix_parser = subparsers.add_parser("fix", help="Auto-fix common QGIS plugin issues")
|
|
65
95
|
fix_parser.add_argument("path", type=str, help="Path to the QGIS plugin directory")
|
|
@@ -90,6 +120,9 @@ def _setup_argument_parser() -> argparse.ArgumentParser:
|
|
|
90
120
|
# List Rules Command
|
|
91
121
|
subparsers.add_parser("list-rules", help="List all available QGIS audit rules")
|
|
92
122
|
|
|
123
|
+
# Version Command
|
|
124
|
+
subparsers.add_parser("version", help="Show the current version of the analyzer")
|
|
125
|
+
|
|
93
126
|
# Init Command
|
|
94
127
|
subparsers.add_parser("init", help="Initialize a new .analyzerignore with defaults")
|
|
95
128
|
|
|
@@ -114,141 +147,6 @@ def _setup_argument_parser() -> argparse.ArgumentParser:
|
|
|
114
147
|
return parser
|
|
115
148
|
|
|
116
149
|
|
|
117
|
-
def _handle_fix_command(args: argparse.Namespace) -> bool:
|
|
118
|
-
"""Handles the execution of the 'fix' command.
|
|
119
|
-
|
|
120
|
-
Args:
|
|
121
|
-
args: Parsed command line arguments.
|
|
122
|
-
|
|
123
|
-
Returns:
|
|
124
|
-
True if the fix process completed successfully, False otherwise.
|
|
125
|
-
"""
|
|
126
|
-
import json
|
|
127
|
-
|
|
128
|
-
from .fixer import AutoFixer
|
|
129
|
-
|
|
130
|
-
project_path = pathlib.Path(args.path).resolve()
|
|
131
|
-
if not project_path.exists():
|
|
132
|
-
print(f"❌ Path not found: {project_path}")
|
|
133
|
-
return False
|
|
134
|
-
|
|
135
|
-
# Run analysis first
|
|
136
|
-
print("🔍 Analyzing project for fixable issues...")
|
|
137
|
-
analyzer = ProjectAnalyzer(
|
|
138
|
-
str(project_path),
|
|
139
|
-
args.output if hasattr(args, "output") else "./analysis_results",
|
|
140
|
-
args.profile if hasattr(args, "profile") else "default",
|
|
141
|
-
)
|
|
142
|
-
analyzer.run()
|
|
143
|
-
|
|
144
|
-
# Load issues
|
|
145
|
-
context_file = analyzer.output_dir / "project_context.json"
|
|
146
|
-
with open(context_file) as f:
|
|
147
|
-
context = json.load(f)
|
|
148
|
-
|
|
149
|
-
all_issues = []
|
|
150
|
-
for module in context.get("modules", []):
|
|
151
|
-
all_issues.extend(module.get("ast_issues", []))
|
|
152
|
-
|
|
153
|
-
if args.rules:
|
|
154
|
-
rule_ids = [r.strip() for r in args.rules.split(",")]
|
|
155
|
-
all_issues = [i for i in all_issues if i.get("type") in rule_ids]
|
|
156
|
-
|
|
157
|
-
fixer = AutoFixer(project_path, dry_run=not args.apply)
|
|
158
|
-
fixable = fixer.get_fixable_issues(all_issues)
|
|
159
|
-
|
|
160
|
-
if not fixable:
|
|
161
|
-
print("✅ No fixable issues found!")
|
|
162
|
-
return True
|
|
163
|
-
|
|
164
|
-
print(f"\n📋 Found {len(fixable)} fixable issue(s)")
|
|
165
|
-
if not args.apply:
|
|
166
|
-
print("\n⚠️ DRY RUN MODE (use --apply to execute changes)\n")
|
|
167
|
-
|
|
168
|
-
stats = fixer.apply_fixes(fixable, interactive=not args.auto_approve)
|
|
169
|
-
print(
|
|
170
|
-
f"\n📊 Summary: Applied: {stats['applied']}, Skipped: {stats['skipped']}, Failed: {stats['failed']}"
|
|
171
|
-
)
|
|
172
|
-
return True
|
|
173
|
-
|
|
174
|
-
|
|
175
|
-
def _handle_analyze_command(args: argparse.Namespace) -> None:
|
|
176
|
-
"""Handles the execution of the 'analyze' command.
|
|
177
|
-
|
|
178
|
-
Args:
|
|
179
|
-
args: Parsed command line arguments.
|
|
180
|
-
"""
|
|
181
|
-
# Force generate_html based on flag, overriding profile if necessary for CLI usage
|
|
182
|
-
# We pass it via a temporary config override or modify the analyzer init
|
|
183
|
-
# For now, let's pass it to the analyzer constructor or modify config after init
|
|
184
|
-
|
|
185
|
-
analyzer = ProjectAnalyzer(args.project_path, args.output, args.profile)
|
|
186
|
-
|
|
187
|
-
# Override config based on CLI flag
|
|
188
|
-
if hasattr(args, "report") and args.report:
|
|
189
|
-
analyzer.config["generate_html"] = True
|
|
190
|
-
else:
|
|
191
|
-
analyzer.config["generate_html"] = False
|
|
192
|
-
|
|
193
|
-
success = analyzer.run()
|
|
194
|
-
|
|
195
|
-
# Always show terminal summary
|
|
196
|
-
from .reporters.summary_reporter import report_summary
|
|
197
|
-
|
|
198
|
-
# If we didn't generate reports, we might still want to show the summary
|
|
199
|
-
# using the in-memory data or the context file if it was saved.
|
|
200
|
-
# Engine saves json context by default? Let's check engine.py.
|
|
201
|
-
# Assuming engine saves project_context.json always or we need to access results directly.
|
|
202
|
-
# To keep it simple, we depend on the engine saving the context or returning it.
|
|
203
|
-
# Current engine.run retuns bool.
|
|
204
|
-
|
|
205
|
-
context_path = analyzer.output_dir / "project_context.json"
|
|
206
|
-
if context_path.exists():
|
|
207
|
-
report_summary(context_path)
|
|
208
|
-
|
|
209
|
-
if not success:
|
|
210
|
-
sys.exit(1)
|
|
211
|
-
|
|
212
|
-
|
|
213
|
-
def _handle_list_rules_command() -> None:
|
|
214
|
-
"""Handles the 'list-rules' command by displaying available audit rules."""
|
|
215
|
-
from .rules import get_qgis_audit_rules
|
|
216
|
-
|
|
217
|
-
rules = get_qgis_audit_rules()
|
|
218
|
-
print("\n📋 QGIS Audit Rules Catalog:")
|
|
219
|
-
print("=" * 30)
|
|
220
|
-
for r in rules:
|
|
221
|
-
print(f"- [{r['severity'].upper()}] {r['id']}: {r['message']}")
|
|
222
|
-
print(f"\nTotal: {len(rules)} rules.\n")
|
|
223
|
-
|
|
224
|
-
|
|
225
|
-
def _handle_init_command() -> None:
|
|
226
|
-
"""Handles the 'init' command by creating a default .analyzerignore file."""
|
|
227
|
-
from .utils import DEFAULT_EXCLUDE
|
|
228
|
-
|
|
229
|
-
ignore_file = pathlib.Path(".analyzerignore")
|
|
230
|
-
if ignore_file.exists():
|
|
231
|
-
print("⚠️ .analyzerignore already exists. Skipping.")
|
|
232
|
-
else:
|
|
233
|
-
with open(ignore_file, "w") as f:
|
|
234
|
-
f.write("# QGIS Plugin Analyzer Ignore File\n")
|
|
235
|
-
for p in DEFAULT_EXCLUDE:
|
|
236
|
-
f.write(f"{p}\n")
|
|
237
|
-
print("✅ Created .analyzerignore with default excludes.")
|
|
238
|
-
|
|
239
|
-
|
|
240
|
-
def _handle_summary_command(args: argparse.Namespace) -> None:
|
|
241
|
-
"""Handles the 'summary' command by displaying a terminal report.
|
|
242
|
-
|
|
243
|
-
Args:
|
|
244
|
-
args: Parsed command line arguments.
|
|
245
|
-
"""
|
|
246
|
-
from .reporters.summary_reporter import report_summary
|
|
247
|
-
|
|
248
|
-
input_path = pathlib.Path(args.input).resolve()
|
|
249
|
-
report_summary(input_path, by=args.by)
|
|
250
|
-
|
|
251
|
-
|
|
252
150
|
def main() -> None:
|
|
253
151
|
"""Main entry point for the QGIS Plugin Analyzer CLI.
|
|
254
152
|
|
|
@@ -260,6 +158,8 @@ def main() -> None:
|
|
|
260
158
|
# Legacy support / default to analyze if no command provided
|
|
261
159
|
if len(sys.argv) > 1 and sys.argv[1] not in [
|
|
262
160
|
"analyze",
|
|
161
|
+
"security",
|
|
162
|
+
"version",
|
|
263
163
|
"fix",
|
|
264
164
|
"list-rules",
|
|
265
165
|
"init",
|
|
@@ -278,17 +178,20 @@ def main() -> None:
|
|
|
278
178
|
output_dir.mkdir(parents=True, exist_ok=True)
|
|
279
179
|
setup_logger(output_dir)
|
|
280
180
|
|
|
181
|
+
# Command Dispatcher
|
|
182
|
+
dispatch = {
|
|
183
|
+
"fix": lambda: handle_fix(args),
|
|
184
|
+
"analyze": lambda: handle_analyze(args),
|
|
185
|
+
"list-rules": lambda: handle_list_rules(),
|
|
186
|
+
"init": lambda: handle_init(),
|
|
187
|
+
"summary": lambda: handle_summary(args),
|
|
188
|
+
"security": lambda: handle_security(args),
|
|
189
|
+
"version": lambda: print(f"qgis-analyzer {__version__}"),
|
|
190
|
+
}
|
|
191
|
+
|
|
281
192
|
try:
|
|
282
|
-
if args.command
|
|
283
|
-
|
|
284
|
-
elif args.command == "analyze":
|
|
285
|
-
_handle_analyze_command(args)
|
|
286
|
-
elif args.command == "list-rules":
|
|
287
|
-
_handle_list_rules_command()
|
|
288
|
-
elif args.command == "init":
|
|
289
|
-
_handle_init_command()
|
|
290
|
-
elif args.command == "summary":
|
|
291
|
-
_handle_summary_command(args)
|
|
193
|
+
if args.command in dispatch:
|
|
194
|
+
dispatch[args.command]()
|
|
292
195
|
else:
|
|
293
196
|
parser.print_help()
|
|
294
197
|
|
analyzer/commands.py
ADDED
|
@@ -0,0 +1,163 @@
|
|
|
1
|
+
"""Command handlers for the QGIS Plugin Analyzer CLI.
|
|
2
|
+
|
|
3
|
+
This module contains the implementation of individual CLI commands to separate
|
|
4
|
+
interface definition (cli.py) from execution logic.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
import argparse
|
|
8
|
+
import json
|
|
9
|
+
import pathlib
|
|
10
|
+
import sys
|
|
11
|
+
|
|
12
|
+
from .engine import ProjectAnalyzer
|
|
13
|
+
from .fixer import AutoFixer
|
|
14
|
+
from .reporters.summary_reporter import report_summary
|
|
15
|
+
from .rules import get_qgis_audit_rules
|
|
16
|
+
from .utils import DEFAULT_EXCLUDE
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
def handle_fix(args: argparse.Namespace) -> bool:
|
|
20
|
+
"""Handles the execution of the 'fix' command.
|
|
21
|
+
|
|
22
|
+
Args:
|
|
23
|
+
args: Parsed command line arguments.
|
|
24
|
+
|
|
25
|
+
Returns:
|
|
26
|
+
True if the fix process completed successfully, False otherwise.
|
|
27
|
+
"""
|
|
28
|
+
project_path = pathlib.Path(args.path).resolve()
|
|
29
|
+
if not project_path.exists():
|
|
30
|
+
print(f"❌ Path not found: {project_path}")
|
|
31
|
+
return False
|
|
32
|
+
|
|
33
|
+
# Run analysis first
|
|
34
|
+
print("🔍 Analyzing project for fixable issues...")
|
|
35
|
+
analyzer = ProjectAnalyzer(
|
|
36
|
+
str(project_path),
|
|
37
|
+
args.output if hasattr(args, "output") else "./analysis_results",
|
|
38
|
+
args.profile if hasattr(args, "profile") else "default",
|
|
39
|
+
)
|
|
40
|
+
analyzer.run()
|
|
41
|
+
|
|
42
|
+
# Load issues
|
|
43
|
+
context_file = analyzer.output_dir / "project_context.json"
|
|
44
|
+
if not context_file.exists():
|
|
45
|
+
print("❌ Analysis failed to generate context file.")
|
|
46
|
+
return False
|
|
47
|
+
|
|
48
|
+
with open(context_file) as f:
|
|
49
|
+
context = json.load(f)
|
|
50
|
+
|
|
51
|
+
all_issues = []
|
|
52
|
+
for module in context.get("modules", []):
|
|
53
|
+
all_issues.extend(module.get("ast_issues", []))
|
|
54
|
+
|
|
55
|
+
if args.rules:
|
|
56
|
+
rule_ids = [r.strip() for r in args.rules.split(",")]
|
|
57
|
+
all_issues = [i for i in all_issues if i.get("type") in rule_ids]
|
|
58
|
+
|
|
59
|
+
fixer = AutoFixer(project_path, dry_run=not args.apply)
|
|
60
|
+
fixable = fixer.get_fixable_issues(all_issues)
|
|
61
|
+
|
|
62
|
+
if not fixable:
|
|
63
|
+
print("✅ No fixable issues found!")
|
|
64
|
+
return True
|
|
65
|
+
|
|
66
|
+
print(f"\n📋 Found {len(fixable)} fixable issue(s)")
|
|
67
|
+
if not args.apply:
|
|
68
|
+
print("\n⚠️ DRY RUN MODE (use --apply to execute changes)\n")
|
|
69
|
+
|
|
70
|
+
stats = fixer.apply_fixes(fixable, interactive=not args.auto_approve)
|
|
71
|
+
print(
|
|
72
|
+
f"\n📊 Summary: Applied: {stats['applied']}, Skipped: {stats['skipped']}, Failed: {stats['failed']}"
|
|
73
|
+
)
|
|
74
|
+
return True
|
|
75
|
+
|
|
76
|
+
|
|
77
|
+
def handle_analyze(args: argparse.Namespace) -> None:
|
|
78
|
+
"""Handles the execution of the 'analyze' command.
|
|
79
|
+
|
|
80
|
+
Args:
|
|
81
|
+
args: Parsed command line arguments.
|
|
82
|
+
"""
|
|
83
|
+
analyzer = ProjectAnalyzer(args.project_path, args.output, args.profile)
|
|
84
|
+
|
|
85
|
+
# Override config based on CLI flag
|
|
86
|
+
if hasattr(args, "report") and args.report:
|
|
87
|
+
analyzer.config["generate_html"] = True
|
|
88
|
+
else:
|
|
89
|
+
analyzer.config["generate_html"] = False
|
|
90
|
+
|
|
91
|
+
success = analyzer.run()
|
|
92
|
+
|
|
93
|
+
context_path = analyzer.output_dir / "project_context.json"
|
|
94
|
+
if context_path.exists():
|
|
95
|
+
report_summary(context_path)
|
|
96
|
+
|
|
97
|
+
if not success:
|
|
98
|
+
sys.exit(1)
|
|
99
|
+
|
|
100
|
+
|
|
101
|
+
def handle_list_rules() -> None:
|
|
102
|
+
"""Handles the 'list-rules' command by displaying available audit rules."""
|
|
103
|
+
rules = get_qgis_audit_rules()
|
|
104
|
+
print("\n📋 QGIS Audit Rules Catalog:")
|
|
105
|
+
print("=" * 30)
|
|
106
|
+
for r in rules:
|
|
107
|
+
print(f"- [{r['severity'].upper()}] {r['id']}: {r['message']}")
|
|
108
|
+
print(f"\nTotal: {len(rules)} rules.\n")
|
|
109
|
+
|
|
110
|
+
|
|
111
|
+
def handle_init() -> None:
|
|
112
|
+
"""Handles the 'init' command by creating a default .analyzerignore file."""
|
|
113
|
+
ignore_file = pathlib.Path(".analyzerignore")
|
|
114
|
+
if ignore_file.exists():
|
|
115
|
+
print("⚠️ .analyzerignore already exists. Skipping.")
|
|
116
|
+
else:
|
|
117
|
+
with open(ignore_file, "w") as f:
|
|
118
|
+
f.write("# QGIS Plugin Analyzer Ignore File\n")
|
|
119
|
+
for p in DEFAULT_EXCLUDE:
|
|
120
|
+
f.write(f"{p}\n")
|
|
121
|
+
print("✅ Created .analyzerignore with default excludes.")
|
|
122
|
+
|
|
123
|
+
|
|
124
|
+
def handle_summary(args: argparse.Namespace) -> None:
|
|
125
|
+
"""Handles the 'summary' command by displaying a terminal report.
|
|
126
|
+
|
|
127
|
+
Args:
|
|
128
|
+
args: Parsed command line arguments.
|
|
129
|
+
"""
|
|
130
|
+
input_path = pathlib.Path(args.input).resolve()
|
|
131
|
+
report_summary(input_path, by=args.by)
|
|
132
|
+
|
|
133
|
+
|
|
134
|
+
def handle_security(args: argparse.Namespace) -> None:
|
|
135
|
+
"""Handles the execution of the 'security' command.
|
|
136
|
+
|
|
137
|
+
Args:
|
|
138
|
+
args: Parsed command line arguments.
|
|
139
|
+
"""
|
|
140
|
+
project_path = pathlib.Path(args.project_path).resolve()
|
|
141
|
+
if not project_path.exists():
|
|
142
|
+
print(f"❌ Path not found: {project_path}")
|
|
143
|
+
sys.exit(1)
|
|
144
|
+
|
|
145
|
+
print(f"🛡️ Starting focused security scan for: {project_path.name}...")
|
|
146
|
+
|
|
147
|
+
# Run analyzer with current profile
|
|
148
|
+
analyzer = ProjectAnalyzer(str(project_path), args.output, args.profile)
|
|
149
|
+
|
|
150
|
+
# We could potentially add a flag to 'deep' mode in the analyzer config
|
|
151
|
+
if args.deep:
|
|
152
|
+
analyzer.config["security_deep_scan"] = True
|
|
153
|
+
print("🔍 Deep scan enabled (Entropy analysis and full secret detection)")
|
|
154
|
+
|
|
155
|
+
success = analyzer.run()
|
|
156
|
+
|
|
157
|
+
context_path = analyzer.output_dir / "project_context.json"
|
|
158
|
+
if context_path.exists():
|
|
159
|
+
# Use the specialized security reporter
|
|
160
|
+
report_summary(context_path, by="security")
|
|
161
|
+
|
|
162
|
+
if not success:
|
|
163
|
+
sys.exit(1)
|
analyzer/engine.py
CHANGED
|
@@ -102,6 +102,15 @@ class ProjectAnalyzer:
|
|
|
102
102
|
A sorted list of pathlib.Path objects for all detected Python files.
|
|
103
103
|
"""
|
|
104
104
|
python_files = []
|
|
105
|
+
project_path = pathlib.Path(self.project_path)
|
|
106
|
+
|
|
107
|
+
# Handle direct file input
|
|
108
|
+
if project_path.is_file():
|
|
109
|
+
if project_path.suffix == ".py":
|
|
110
|
+
return [project_path]
|
|
111
|
+
return []
|
|
112
|
+
|
|
113
|
+
# Handle directory scan
|
|
105
114
|
for root, dirs, files in os.walk(self.project_path):
|
|
106
115
|
root_path = pathlib.Path(root)
|
|
107
116
|
|
|
@@ -254,6 +263,8 @@ class ProjectAnalyzer:
|
|
|
254
263
|
binaries: List[str],
|
|
255
264
|
package_size: float,
|
|
256
265
|
url_status: Dict[str, str],
|
|
266
|
+
security_score: float,
|
|
267
|
+
all_security_issues: List[Dict[str, Any]],
|
|
257
268
|
) -> Dict[str, Any]:
|
|
258
269
|
"""Consolidates analysis results into a single dictionary.
|
|
259
270
|
|
|
@@ -282,6 +293,7 @@ class ProjectAnalyzer:
|
|
|
282
293
|
"total_lines": sum(m["lines"] for m in modules_data),
|
|
283
294
|
"quality_score": round(code_score, 1),
|
|
284
295
|
"maintainability_score": round(maint_score, 1),
|
|
296
|
+
"security_score": round(security_score, 1),
|
|
285
297
|
}
|
|
286
298
|
|
|
287
299
|
if self.project_type == "qgis":
|
|
@@ -292,6 +304,11 @@ class ProjectAnalyzer:
|
|
|
292
304
|
"project_type": self.project_type,
|
|
293
305
|
"metrics": metrics_summary,
|
|
294
306
|
"ruff_findings": ruff_findings,
|
|
307
|
+
"security": {
|
|
308
|
+
"findings": all_security_issues,
|
|
309
|
+
"count": len(all_security_issues),
|
|
310
|
+
"score": round(security_score, 1),
|
|
311
|
+
},
|
|
295
312
|
"semantic": {"circular_dependencies": cycles, "coupling_metrics": metrics},
|
|
296
313
|
"modules": modules_data,
|
|
297
314
|
}
|
|
@@ -395,7 +412,6 @@ class ProjectAnalyzer:
|
|
|
395
412
|
cycles = semantic_res[0] if len(semantic_res) > 0 else []
|
|
396
413
|
metrics = semantic_res[1] if len(semantic_res) > 1 else {}
|
|
397
414
|
missing_resources = semantic_res[2] if len(semantic_res) > 2 else []
|
|
398
|
-
|
|
399
415
|
# Calculate scores
|
|
400
416
|
scores = self._calculate_scores(
|
|
401
417
|
modules_data,
|
|
@@ -408,10 +424,16 @@ class ProjectAnalyzer:
|
|
|
408
424
|
binaries,
|
|
409
425
|
package_size,
|
|
410
426
|
)
|
|
411
|
-
|
|
427
|
+
|
|
412
428
|
code_score = scores[0] if len(scores) > 0 else 0.0
|
|
413
429
|
maint_score = scores[1] if len(scores) > 1 else 0.0
|
|
414
430
|
qgis_score = scores[2] if len(scores) > 2 else 0.0
|
|
431
|
+
security_score = scores[3] if len(scores) > 3 else 0.0
|
|
432
|
+
|
|
433
|
+
# Aggregate all security findings
|
|
434
|
+
all_security_issues = []
|
|
435
|
+
for m in modules_data:
|
|
436
|
+
all_security_issues.extend(m.get("security_issues", []))
|
|
415
437
|
|
|
416
438
|
# Build results
|
|
417
439
|
analyses = self._build_analysis_results(
|
|
@@ -430,6 +452,8 @@ class ProjectAnalyzer:
|
|
|
430
452
|
binaries,
|
|
431
453
|
package_size,
|
|
432
454
|
url_status,
|
|
455
|
+
security_score,
|
|
456
|
+
all_security_issues,
|
|
433
457
|
)
|
|
434
458
|
|
|
435
459
|
# Save reports
|
|
@@ -480,50 +504,83 @@ class ProjectAnalyzer:
|
|
|
480
504
|
A tuple of (module_stability, maintainability, qgis_compliance) scores out of 100.
|
|
481
505
|
"""
|
|
482
506
|
if not modules_data:
|
|
483
|
-
return 0.0, 0.0, 0.0
|
|
507
|
+
return 0.0, 0.0, 0.0, 0.0
|
|
508
|
+
|
|
509
|
+
module_score = self._get_mi_score(modules_data)
|
|
510
|
+
maintainability_score = self._get_maint_score(modules_data, ruff_findings)
|
|
511
|
+
modernization_bonus = self._get_modernization_bonus(modules_data)
|
|
512
|
+
maintainability_score = min(100.0, maintainability_score + modernization_bonus)
|
|
513
|
+
|
|
514
|
+
# Security context
|
|
515
|
+
security_penalty = self._get_security_penalty(modules_data)
|
|
516
|
+
security_score = max(0.0, 100.0 - security_penalty)
|
|
517
|
+
|
|
518
|
+
# Global penalties (e.g., circular dependencies)
|
|
519
|
+
penalty = len(cycles) * 10
|
|
520
|
+
module_score = max(0, module_score - penalty)
|
|
521
|
+
maintainability_score = max(0, maintainability_score - penalty)
|
|
522
|
+
|
|
523
|
+
if self.project_type == "generic":
|
|
524
|
+
return (
|
|
525
|
+
round(module_score, 1),
|
|
526
|
+
round(maintainability_score, 1),
|
|
527
|
+
0.0,
|
|
528
|
+
round(security_score, 1),
|
|
529
|
+
)
|
|
530
|
+
|
|
531
|
+
qgis_score = self._get_qgis_score(
|
|
532
|
+
compliance,
|
|
533
|
+
structure,
|
|
534
|
+
metadata,
|
|
535
|
+
missing_resources,
|
|
536
|
+
binaries,
|
|
537
|
+
package_size,
|
|
538
|
+
security_penalty,
|
|
539
|
+
)
|
|
540
|
+
|
|
541
|
+
return (
|
|
542
|
+
round(module_score, 1),
|
|
543
|
+
round(maintainability_score, 1),
|
|
544
|
+
round(qgis_score, 1),
|
|
545
|
+
round(security_score, 1),
|
|
546
|
+
)
|
|
484
547
|
|
|
485
|
-
|
|
486
|
-
|
|
548
|
+
def _get_mi_score(self, modules_data: List[Dict[str, Any]]) -> float:
|
|
549
|
+
"""Calculates module stability based on Maintainability Index (MI)."""
|
|
487
550
|
mi_scores = []
|
|
488
551
|
for m in modules_data:
|
|
489
552
|
cc = m.get("complexity", 1)
|
|
490
553
|
sloc = max(1, m.get("lines", 1))
|
|
554
|
+
# Formula: MI = (171 - 0.23 * CC - 16.2 * ln(SLOC)) * 100 / 171
|
|
491
555
|
mi = (171 - 0.23 * cc - 16.2 * math.log(sloc)) * 100 / 171
|
|
492
556
|
mi_scores.append(max(0, mi))
|
|
557
|
+
return sum(mi_scores) / len(mi_scores) if mi_scores else 0.0
|
|
493
558
|
|
|
494
|
-
|
|
495
|
-
|
|
496
|
-
|
|
559
|
+
def _get_maint_score(
|
|
560
|
+
self, modules_data: List[Dict[str, Any]], ruff_findings: List[Dict[str, Any]]
|
|
561
|
+
) -> float:
|
|
562
|
+
"""Calculates maintainability based on function complexity and linting penalties."""
|
|
563
|
+
# 1. Function Complexity Score
|
|
497
564
|
all_func_comp = []
|
|
498
565
|
for m in modules_data:
|
|
499
566
|
for f in m.get("functions", []):
|
|
500
567
|
all_func_comp.append(f["complexity"])
|
|
501
568
|
|
|
502
569
|
avg_func_comp = sum(all_func_comp) / len(all_func_comp) if all_func_comp else 1.0
|
|
503
|
-
# Function complexity score: 100 is perfect, -5 per point over 10
|
|
504
570
|
func_score = max(0, 100 - (max(0, avg_func_comp - 10) * 5))
|
|
505
571
|
|
|
506
|
-
#
|
|
507
|
-
# 10 - ((5*E + W + R + C) / statements) * 10
|
|
572
|
+
# 2. Lint Scoring (Pylint style)
|
|
508
573
|
total_lines = sum(m.get("lines", 0) for m in modules_data)
|
|
509
|
-
errors =
|
|
510
|
-
others =
|
|
511
|
-
for find in ruff_findings:
|
|
512
|
-
code = find.get("code", "")
|
|
513
|
-
if code.startswith(("E", "F")):
|
|
514
|
-
errors += 1
|
|
515
|
-
else:
|
|
516
|
-
others += 1
|
|
574
|
+
errors = sum(1 for f in ruff_findings if f.get("code", "").startswith(("E", "F")))
|
|
575
|
+
others = len(ruff_findings) - errors
|
|
517
576
|
|
|
518
577
|
lint_penalty = ((5 * errors + others) / max(1, total_lines / 10)) * 10
|
|
519
578
|
lint_score = max(0, 100 - lint_penalty)
|
|
520
579
|
|
|
521
|
-
|
|
522
|
-
maintainability_score = (func_score * 0.7) + (lint_score * 0.3)
|
|
580
|
+
return float((func_score * 0.7) + (lint_score * 0.3))
|
|
523
581
|
|
|
524
|
-
|
|
525
|
-
|
|
526
|
-
has_docstring_count = 0
|
|
582
|
+
def _get_modernization_bonus(self, modules_data: List[Dict[str, Any]]) -> float:
|
|
583
|
+
"""Calculates modernization bonuses based on type hints and documentation styles."""
|
|
527
584
|
total_functions = 0
|
|
528
585
|
total_params = 0
|
|
529
586
|
annotated_params = 0
|
|
@@ -532,55 +589,61 @@ class ProjectAnalyzer:
|
|
|
532
589
|
|
|
533
590
|
for m in modules_data:
|
|
534
591
|
metrics = m.get("research_metrics", {})
|
|
535
|
-
d_stats = metrics.get("docstring_stats", {})
|
|
536
|
-
total_public_items += d_stats.get("total_public_items", 0)
|
|
537
|
-
has_docstring_count += d_stats.get("has_docstring", 0)
|
|
538
|
-
|
|
539
592
|
t_stats = metrics.get("type_hint_stats", {})
|
|
540
593
|
total_functions += t_stats.get("total_functions", 0)
|
|
541
594
|
total_params += t_stats.get("total_parameters", 0)
|
|
542
595
|
annotated_params += t_stats.get("annotated_parameters", 0)
|
|
543
596
|
has_return_hint += t_stats.get("has_return_hint", 0)
|
|
544
|
-
|
|
545
597
|
detected_styles.update(metrics.get("docstring_styles", []))
|
|
546
598
|
|
|
547
|
-
|
|
548
|
-
modernization_bonus = 0.0
|
|
549
|
-
# Type Hint Bonus: > 80% coverage on params and returns
|
|
599
|
+
bonus = 0.0
|
|
550
600
|
if total_params > 0 or total_functions > 0:
|
|
551
601
|
param_cov = annotated_params / max(1, total_params)
|
|
552
602
|
ret_cov = has_return_hint / max(1, total_functions)
|
|
553
603
|
if param_cov >= 0.8 and ret_cov >= 0.8:
|
|
554
|
-
|
|
604
|
+
bonus += 5.0
|
|
555
605
|
|
|
556
|
-
# Docstring Style Bonus: Standardized formats (Google/NumPy)
|
|
557
606
|
if detected_styles:
|
|
558
|
-
|
|
559
|
-
|
|
560
|
-
maintainability_score = min(100.0, maintainability_score + modernization_bonus)
|
|
561
|
-
|
|
562
|
-
# Global penalties
|
|
563
|
-
penalty = len(cycles) * 10
|
|
564
|
-
module_score = max(0, module_score - penalty)
|
|
565
|
-
maintainability_score = max(0, maintainability_score - penalty)
|
|
566
|
-
|
|
567
|
-
if self.project_type == "generic":
|
|
568
|
-
return round(module_score, 1), round(maintainability_score, 1), 0.0
|
|
607
|
+
bonus += 2.0
|
|
608
|
+
return bonus
|
|
569
609
|
|
|
570
|
-
|
|
571
|
-
|
|
572
|
-
|
|
610
|
+
def _get_qgis_score(
|
|
611
|
+
self,
|
|
612
|
+
compliance: Dict[str, Any],
|
|
613
|
+
structure: Dict[str, Any],
|
|
614
|
+
metadata: Dict[str, Any],
|
|
615
|
+
missing_resources: List[str],
|
|
616
|
+
binaries: List[str],
|
|
617
|
+
package_size: float,
|
|
618
|
+
security_penalty: float = 0.0,
|
|
619
|
+
) -> float:
|
|
620
|
+
"""Calculates QGIS-specific compliance score."""
|
|
621
|
+
score = 100.0
|
|
622
|
+
score -= compliance.get("issues_count", 0) * 2
|
|
573
623
|
if not structure.get("is_valid", True):
|
|
574
|
-
|
|
624
|
+
score -= 20
|
|
575
625
|
if not metadata.get("is_valid", True):
|
|
576
|
-
|
|
577
|
-
|
|
578
|
-
|
|
626
|
+
score -= 10
|
|
627
|
+
score -= len(missing_resources) * 5
|
|
628
|
+
score -= len(binaries) * 50
|
|
579
629
|
if package_size > 20:
|
|
580
|
-
|
|
630
|
+
score -= 10
|
|
581
631
|
|
|
582
|
-
|
|
583
|
-
|
|
584
|
-
|
|
585
|
-
|
|
586
|
-
|
|
632
|
+
# Security penalty
|
|
633
|
+
score -= security_penalty
|
|
634
|
+
|
|
635
|
+
return float(max(0, score))
|
|
636
|
+
|
|
637
|
+
def _get_security_penalty(self, modules_data: List[Dict[str, Any]]) -> float:
|
|
638
|
+
"""Calculates total penalty for security vulnerabilities."""
|
|
639
|
+
penalty = 0.0
|
|
640
|
+
for m in modules_data:
|
|
641
|
+
for issue in m.get("security_issues", []):
|
|
642
|
+
sev = issue.get("severity", "medium").lower()
|
|
643
|
+
if sev == "high":
|
|
644
|
+
penalty += 10.0
|
|
645
|
+
elif sev == "medium":
|
|
646
|
+
penalty += 5.0
|
|
647
|
+
else:
|
|
648
|
+
penalty += 2.0
|
|
649
|
+
return penalty
|