claude-mpm 4.3.20__py3-none-any.whl → 4.4.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- claude_mpm/VERSION +1 -1
- claude_mpm/agents/agent_loader.py +2 -2
- claude_mpm/agents/agent_loader_integration.py +2 -2
- claude_mpm/agents/async_agent_loader.py +2 -2
- claude_mpm/agents/base_agent_loader.py +2 -2
- claude_mpm/agents/frontmatter_validator.py +2 -2
- claude_mpm/agents/system_agent_config.py +2 -2
- claude_mpm/agents/templates/data_engineer.json +1 -2
- claude_mpm/cli/commands/doctor.py +2 -2
- claude_mpm/cli/commands/mpm_init.py +560 -47
- claude_mpm/cli/commands/mpm_init_handler.py +6 -0
- claude_mpm/cli/parsers/mpm_init_parser.py +39 -1
- claude_mpm/cli/startup_logging.py +11 -9
- claude_mpm/commands/mpm-init.md +76 -12
- claude_mpm/config/agent_config.py +2 -2
- claude_mpm/config/paths.py +2 -2
- claude_mpm/core/agent_name_normalizer.py +2 -2
- claude_mpm/core/config.py +2 -1
- claude_mpm/core/config_aliases.py +2 -2
- claude_mpm/core/file_utils.py +1 -0
- claude_mpm/core/log_manager.py +2 -2
- claude_mpm/core/tool_access_control.py +2 -2
- claude_mpm/core/unified_agent_registry.py +2 -2
- claude_mpm/core/unified_paths.py +2 -2
- claude_mpm/experimental/cli_enhancements.py +3 -2
- claude_mpm/hooks/base_hook.py +2 -2
- claude_mpm/hooks/instruction_reinforcement.py +2 -2
- claude_mpm/hooks/memory_integration_hook.py +1 -1
- claude_mpm/hooks/validation_hooks.py +2 -2
- claude_mpm/scripts/mpm_doctor.py +2 -2
- claude_mpm/services/agents/loading/agent_profile_loader.py +2 -2
- claude_mpm/services/agents/loading/base_agent_manager.py +2 -2
- claude_mpm/services/agents/loading/framework_agent_loader.py +2 -2
- claude_mpm/services/agents/management/agent_capabilities_generator.py +2 -2
- claude_mpm/services/agents/management/agent_management_service.py +2 -2
- claude_mpm/services/agents/memory/content_manager.py +5 -2
- claude_mpm/services/agents/memory/memory_categorization_service.py +5 -2
- claude_mpm/services/agents/memory/memory_file_service.py +28 -6
- claude_mpm/services/agents/memory/memory_format_service.py +5 -2
- claude_mpm/services/agents/memory/memory_limits_service.py +4 -2
- claude_mpm/services/agents/registry/deployed_agent_discovery.py +2 -2
- claude_mpm/services/agents/registry/modification_tracker.py +4 -4
- claude_mpm/services/async_session_logger.py +2 -1
- claude_mpm/services/claude_session_logger.py +2 -2
- claude_mpm/services/core/path_resolver.py +3 -2
- claude_mpm/services/diagnostics/diagnostic_runner.py +4 -3
- claude_mpm/services/event_bus/direct_relay.py +2 -1
- claude_mpm/services/event_bus/event_bus.py +2 -1
- claude_mpm/services/event_bus/relay.py +2 -2
- claude_mpm/services/framework_claude_md_generator/content_assembler.py +2 -2
- claude_mpm/services/infrastructure/daemon_manager.py +2 -2
- claude_mpm/services/memory/cache/simple_cache.py +2 -2
- claude_mpm/services/project/archive_manager.py +981 -0
- claude_mpm/services/project/documentation_manager.py +536 -0
- claude_mpm/services/project/enhanced_analyzer.py +491 -0
- claude_mpm/services/project/project_organizer.py +904 -0
- claude_mpm/services/response_tracker.py +2 -2
- claude_mpm/services/socketio/handlers/connection.py +14 -33
- claude_mpm/services/socketio/server/eventbus_integration.py +2 -2
- claude_mpm/services/unified/__init__.py +65 -0
- claude_mpm/services/unified/analyzer_strategies/__init__.py +44 -0
- claude_mpm/services/unified/analyzer_strategies/code_analyzer.py +473 -0
- claude_mpm/services/unified/analyzer_strategies/dependency_analyzer.py +643 -0
- claude_mpm/services/unified/analyzer_strategies/performance_analyzer.py +804 -0
- claude_mpm/services/unified/analyzer_strategies/security_analyzer.py +661 -0
- claude_mpm/services/unified/analyzer_strategies/structure_analyzer.py +696 -0
- claude_mpm/services/unified/deployment_strategies/__init__.py +97 -0
- claude_mpm/services/unified/deployment_strategies/base.py +557 -0
- claude_mpm/services/unified/deployment_strategies/cloud_strategies.py +486 -0
- claude_mpm/services/unified/deployment_strategies/local.py +594 -0
- claude_mpm/services/unified/deployment_strategies/utils.py +672 -0
- claude_mpm/services/unified/deployment_strategies/vercel.py +471 -0
- claude_mpm/services/unified/interfaces.py +499 -0
- claude_mpm/services/unified/migration.py +532 -0
- claude_mpm/services/unified/strategies.py +551 -0
- claude_mpm/services/unified/unified_analyzer.py +534 -0
- claude_mpm/services/unified/unified_config.py +688 -0
- claude_mpm/services/unified/unified_deployment.py +470 -0
- claude_mpm/services/version_control/version_parser.py +5 -4
- claude_mpm/storage/state_storage.py +2 -2
- claude_mpm/utils/agent_dependency_loader.py +49 -0
- claude_mpm/utils/common.py +542 -0
- claude_mpm/utils/database_connector.py +298 -0
- claude_mpm/utils/error_handler.py +2 -1
- claude_mpm/utils/log_cleanup.py +2 -2
- claude_mpm/utils/path_operations.py +2 -2
- claude_mpm/utils/robust_installer.py +56 -0
- claude_mpm/utils/session_logging.py +2 -2
- claude_mpm/utils/subprocess_utils.py +2 -2
- claude_mpm/validation/agent_validator.py +2 -2
- {claude_mpm-4.3.20.dist-info → claude_mpm-4.4.0.dist-info}/METADATA +1 -1
- {claude_mpm-4.3.20.dist-info → claude_mpm-4.4.0.dist-info}/RECORD +96 -71
- {claude_mpm-4.3.20.dist-info → claude_mpm-4.4.0.dist-info}/WHEEL +0 -0
- {claude_mpm-4.3.20.dist-info → claude_mpm-4.4.0.dist-info}/entry_points.txt +0 -0
- {claude_mpm-4.3.20.dist-info → claude_mpm-4.4.0.dist-info}/licenses/LICENSE +0 -0
- {claude_mpm-4.3.20.dist-info → claude_mpm-4.4.0.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,661 @@
|
|
1
|
+
"""
|
2
|
+
Security Analyzer Strategy Implementation
|
3
|
+
=========================================
|
4
|
+
|
5
|
+
Analyzes code for security vulnerabilities and compliance issues.
|
6
|
+
Consolidates security analysis functionality from multiple services.
|
7
|
+
|
8
|
+
Author: Claude MPM Development Team
|
9
|
+
Created: 2025-01-26
|
10
|
+
"""
|
11
|
+
|
12
|
+
import ast
|
13
|
+
import re
|
14
|
+
from pathlib import Path
|
15
|
+
from typing import Any, Dict, List, Optional, Set, Tuple
|
16
|
+
|
17
|
+
from claude_mpm.core.logging_utils import get_logger
|
18
|
+
|
19
|
+
from ..strategies import AnalyzerStrategy, StrategyContext, StrategyMetadata, StrategyPriority
|
20
|
+
|
21
|
+
logger = get_logger(__name__)
|
22
|
+
|
23
|
+
|
24
|
+
class SecurityAnalyzerStrategy(AnalyzerStrategy):
|
25
|
+
"""
|
26
|
+
Strategy for analyzing security vulnerabilities and compliance.
|
27
|
+
|
28
|
+
Consolidates:
|
29
|
+
- Vulnerability detection (SQL injection, XSS, etc.)
|
30
|
+
- Hardcoded secrets detection
|
31
|
+
- Insecure configuration detection
|
32
|
+
- Dependency vulnerability scanning
|
33
|
+
- Security best practices validation
|
34
|
+
"""
|
35
|
+
|
36
|
+
# Common security vulnerability patterns
|
37
|
+
VULNERABILITY_PATTERNS = {
|
38
|
+
"sql_injection": {
|
39
|
+
"patterns": [
|
40
|
+
r'(execute|query)\s*\(\s*["\'].*%[s|d].*["\'].*%',
|
41
|
+
r'(execute|query)\s*\(\s*.*\+.*\)',
|
42
|
+
r'f["\'].*SELECT.*{.*}.*FROM',
|
43
|
+
],
|
44
|
+
"severity": "critical",
|
45
|
+
"description": "Potential SQL injection vulnerability",
|
46
|
+
},
|
47
|
+
"hardcoded_secret": {
|
48
|
+
"patterns": [
|
49
|
+
r'(password|passwd|pwd|secret|token|api_key)\s*=\s*["\'][^"\']{8,}["\']',
|
50
|
+
r'(AWS|AZURE|GCP)_.*KEY\s*=\s*["\'][^"\']+["\']',
|
51
|
+
r'(private_key|secret_key)\s*=\s*["\'][^"\']+["\']',
|
52
|
+
],
|
53
|
+
"severity": "high",
|
54
|
+
"description": "Hardcoded secret or credential detected",
|
55
|
+
},
|
56
|
+
"weak_crypto": {
|
57
|
+
"patterns": [
|
58
|
+
r'(MD5|SHA1)\s*\(',
|
59
|
+
r'DES\s*\(',
|
60
|
+
r'Random\(\)(?!\.SystemRandom)',
|
61
|
+
],
|
62
|
+
"severity": "medium",
|
63
|
+
"description": "Weak cryptographic algorithm usage",
|
64
|
+
},
|
65
|
+
"command_injection": {
|
66
|
+
"patterns": [
|
67
|
+
r'os\.(system|popen|spawn.*)\s*\([^)]*\+[^)]*\)',
|
68
|
+
r'subprocess\.(run|call|Popen)\s*\([^)]*shell\s*=\s*True',
|
69
|
+
r'eval\s*\([^)]*input\s*\(',
|
70
|
+
],
|
71
|
+
"severity": "critical",
|
72
|
+
"description": "Potential command injection vulnerability",
|
73
|
+
},
|
74
|
+
"path_traversal": {
|
75
|
+
"patterns": [
|
76
|
+
r'open\s*\([^)]*\.\.[/\\]',
|
77
|
+
r'(read_file|write_file)\s*\([^)]*user_input',
|
78
|
+
r'Path\s*\([^)]*\+[^)]*\)',
|
79
|
+
],
|
80
|
+
"severity": "high",
|
81
|
+
"description": "Potential path traversal vulnerability",
|
82
|
+
},
|
83
|
+
"xss": {
|
84
|
+
"patterns": [
|
85
|
+
r'innerHTML\s*=\s*[^;]*user',
|
86
|
+
r'document\.write\s*\([^)]*user',
|
87
|
+
r'v-html\s*=\s*["\'][^"\']*user',
|
88
|
+
],
|
89
|
+
"severity": "high",
|
90
|
+
"description": "Potential cross-site scripting (XSS) vulnerability",
|
91
|
+
},
|
92
|
+
}
|
93
|
+
|
94
|
+
# Insecure configuration patterns
|
95
|
+
CONFIG_ISSUES = {
|
96
|
+
"debug_enabled": {
|
97
|
+
"patterns": [
|
98
|
+
r'DEBUG\s*=\s*True',
|
99
|
+
r'debug\s*:\s*true',
|
100
|
+
r'app\.debug\s*=\s*True',
|
101
|
+
],
|
102
|
+
"severity": "medium",
|
103
|
+
"description": "Debug mode enabled in production configuration",
|
104
|
+
},
|
105
|
+
"insecure_cors": {
|
106
|
+
"patterns": [
|
107
|
+
r'Access-Control-Allow-Origin.*\*',
|
108
|
+
r'cors\s*\(.*origin\s*:\s*["\'].*\*',
|
109
|
+
r'CORS_ORIGIN_ALLOW_ALL\s*=\s*True',
|
110
|
+
],
|
111
|
+
"severity": "medium",
|
112
|
+
"description": "Insecure CORS configuration allowing all origins",
|
113
|
+
},
|
114
|
+
"missing_csrf": {
|
115
|
+
"patterns": [
|
116
|
+
r'csrf_enabled\s*=\s*False',
|
117
|
+
r'CSRF_ENABLED\s*=\s*False',
|
118
|
+
r'@csrf_exempt',
|
119
|
+
],
|
120
|
+
"severity": "high",
|
121
|
+
"description": "CSRF protection disabled",
|
122
|
+
},
|
123
|
+
}
|
124
|
+
|
125
|
+
# Security headers to check
|
126
|
+
SECURITY_HEADERS = [
|
127
|
+
"Content-Security-Policy",
|
128
|
+
"X-Content-Type-Options",
|
129
|
+
"X-Frame-Options",
|
130
|
+
"Strict-Transport-Security",
|
131
|
+
"X-XSS-Protection",
|
132
|
+
]
|
133
|
+
|
134
|
+
def __init__(self):
|
135
|
+
"""Initialize security analyzer strategy."""
|
136
|
+
metadata = StrategyMetadata(
|
137
|
+
name="SecurityAnalyzer",
|
138
|
+
description="Analyzes code for security vulnerabilities and compliance",
|
139
|
+
supported_types=["file", "directory", "project"],
|
140
|
+
supported_operations=["analyze", "scan", "audit", "compliance"],
|
141
|
+
priority=StrategyPriority.CRITICAL,
|
142
|
+
tags={"security", "vulnerabilities", "compliance", "audit"},
|
143
|
+
)
|
144
|
+
super().__init__(metadata)
|
145
|
+
|
146
|
+
self._vulnerability_cache = {}
|
147
|
+
|
148
|
+
def can_handle(self, context: StrategyContext) -> bool:
|
149
|
+
"""Check if strategy can handle the given context."""
|
150
|
+
return (
|
151
|
+
context.target_type in self.metadata.supported_types
|
152
|
+
and context.operation in self.metadata.supported_operations
|
153
|
+
)
|
154
|
+
|
155
|
+
def validate_input(self, input_data: Any) -> List[str]:
|
156
|
+
"""Validate input data for strategy."""
|
157
|
+
errors = []
|
158
|
+
|
159
|
+
if not input_data:
|
160
|
+
errors.append("Input data is required")
|
161
|
+
return errors
|
162
|
+
|
163
|
+
if isinstance(input_data, (str, Path)):
|
164
|
+
path = Path(input_data)
|
165
|
+
if not path.exists():
|
166
|
+
errors.append(f"Path does not exist: {path}")
|
167
|
+
else:
|
168
|
+
errors.append(f"Invalid input type: {type(input_data).__name__}")
|
169
|
+
|
170
|
+
return errors
|
171
|
+
|
172
|
+
def analyze(
|
173
|
+
self, target: Any, options: Optional[Dict[str, Any]] = None
|
174
|
+
) -> Dict[str, Any]:
|
175
|
+
"""
|
176
|
+
Execute security analysis on target.
|
177
|
+
|
178
|
+
Args:
|
179
|
+
target: File, directory, or project to analyze
|
180
|
+
options: Analysis options (scan_depth, check_dependencies, etc.)
|
181
|
+
|
182
|
+
Returns:
|
183
|
+
Analysis results with security findings
|
184
|
+
"""
|
185
|
+
options = options or {}
|
186
|
+
|
187
|
+
if isinstance(target, (str, Path)):
|
188
|
+
target_path = Path(target)
|
189
|
+
|
190
|
+
if target_path.is_file():
|
191
|
+
return self._analyze_file(target_path, options)
|
192
|
+
elif target_path.is_dir():
|
193
|
+
return self._analyze_directory(target_path, options)
|
194
|
+
|
195
|
+
return {
|
196
|
+
"status": "error",
|
197
|
+
"message": f"Unsupported target type: {type(target).__name__}",
|
198
|
+
}
|
199
|
+
|
200
|
+
def _analyze_file(self, file_path: Path, options: Dict[str, Any]) -> Dict[str, Any]:
|
201
|
+
"""Analyze a single file for security issues."""
|
202
|
+
results = {
|
203
|
+
"status": "success",
|
204
|
+
"type": "file",
|
205
|
+
"path": str(file_path),
|
206
|
+
"vulnerabilities": [],
|
207
|
+
"risk_score": 0,
|
208
|
+
}
|
209
|
+
|
210
|
+
try:
|
211
|
+
content = file_path.read_text(encoding="utf-8")
|
212
|
+
|
213
|
+
# Check for vulnerability patterns
|
214
|
+
vulnerabilities = self._scan_for_vulnerabilities(content, file_path)
|
215
|
+
results["vulnerabilities"].extend(vulnerabilities)
|
216
|
+
|
217
|
+
# Check for configuration issues
|
218
|
+
config_issues = self._scan_for_config_issues(content, file_path)
|
219
|
+
results["vulnerabilities"].extend(config_issues)
|
220
|
+
|
221
|
+
# Language-specific analysis
|
222
|
+
if file_path.suffix == ".py":
|
223
|
+
python_issues = self._analyze_python_security(content, file_path)
|
224
|
+
results["vulnerabilities"].extend(python_issues)
|
225
|
+
elif file_path.suffix in [".js", ".jsx", ".ts", ".tsx"]:
|
226
|
+
js_issues = self._analyze_javascript_security(content, file_path)
|
227
|
+
results["vulnerabilities"].extend(js_issues)
|
228
|
+
|
229
|
+
# Calculate risk score
|
230
|
+
results["risk_score"] = self._calculate_risk_score(results["vulnerabilities"])
|
231
|
+
|
232
|
+
# Add summary
|
233
|
+
results["summary"] = self._generate_summary(results["vulnerabilities"])
|
234
|
+
|
235
|
+
except Exception as e:
|
236
|
+
logger.error(f"Error analyzing file {file_path}: {e}")
|
237
|
+
results["status"] = "error"
|
238
|
+
results["error"] = str(e)
|
239
|
+
|
240
|
+
return results
|
241
|
+
|
242
|
+
def _analyze_directory(self, dir_path: Path, options: Dict[str, Any]) -> Dict[str, Any]:
|
243
|
+
"""Analyze all files in a directory for security issues."""
|
244
|
+
results = {
|
245
|
+
"status": "success",
|
246
|
+
"type": "directory",
|
247
|
+
"path": str(dir_path),
|
248
|
+
"files_analyzed": 0,
|
249
|
+
"total_vulnerabilities": 0,
|
250
|
+
"vulnerabilities_by_severity": {},
|
251
|
+
"files": [],
|
252
|
+
"risk_score": 0,
|
253
|
+
}
|
254
|
+
|
255
|
+
# Define file extensions to analyze
|
256
|
+
analyzable_extensions = {
|
257
|
+
".py", ".js", ".jsx", ".ts", ".tsx", ".java", ".cs",
|
258
|
+
".php", ".rb", ".go", ".rs", ".cpp", ".c", ".h",
|
259
|
+
".yml", ".yaml", ".json", ".xml", ".conf", ".config",
|
260
|
+
".env", ".ini", ".properties",
|
261
|
+
}
|
262
|
+
|
263
|
+
# Analyze each file
|
264
|
+
for file_path in dir_path.rglob("*"):
|
265
|
+
if file_path.is_file() and file_path.suffix in analyzable_extensions:
|
266
|
+
# Skip common ignore patterns
|
267
|
+
if any(part.startswith(".") for part in file_path.parts):
|
268
|
+
continue
|
269
|
+
if "node_modules" in file_path.parts:
|
270
|
+
continue
|
271
|
+
if "__pycache__" in file_path.parts:
|
272
|
+
continue
|
273
|
+
|
274
|
+
file_result = self._analyze_file(file_path, options)
|
275
|
+
if file_result["status"] == "success" and file_result["vulnerabilities"]:
|
276
|
+
results["files"].append(file_result)
|
277
|
+
results["files_analyzed"] += 1
|
278
|
+
results["total_vulnerabilities"] += len(file_result["vulnerabilities"])
|
279
|
+
|
280
|
+
# Count by severity
|
281
|
+
for vuln in file_result["vulnerabilities"]:
|
282
|
+
severity = vuln.get("severity", "unknown")
|
283
|
+
results["vulnerabilities_by_severity"][severity] = \
|
284
|
+
results["vulnerabilities_by_severity"].get(severity, 0) + 1
|
285
|
+
|
286
|
+
# Calculate overall risk score
|
287
|
+
results["risk_score"] = self._calculate_overall_risk(results)
|
288
|
+
|
289
|
+
# Add recommendations
|
290
|
+
results["recommendations"] = self._generate_recommendations(results)
|
291
|
+
|
292
|
+
return results
|
293
|
+
|
294
|
+
def _scan_for_vulnerabilities(self, content: str, file_path: Path) -> List[Dict[str, Any]]:
|
295
|
+
"""Scan content for known vulnerability patterns."""
|
296
|
+
vulnerabilities = []
|
297
|
+
|
298
|
+
for vuln_type, vuln_info in self.VULNERABILITY_PATTERNS.items():
|
299
|
+
for pattern in vuln_info["patterns"]:
|
300
|
+
matches = re.finditer(pattern, content, re.IGNORECASE | re.MULTILINE)
|
301
|
+
for match in matches:
|
302
|
+
line_num = content[:match.start()].count("\n") + 1
|
303
|
+
|
304
|
+
vulnerabilities.append({
|
305
|
+
"type": vuln_type,
|
306
|
+
"severity": vuln_info["severity"],
|
307
|
+
"description": vuln_info["description"],
|
308
|
+
"file": str(file_path),
|
309
|
+
"line": line_num,
|
310
|
+
"code": match.group(0)[:100], # Truncate long matches
|
311
|
+
"pattern": pattern,
|
312
|
+
})
|
313
|
+
|
314
|
+
return vulnerabilities
|
315
|
+
|
316
|
+
def _scan_for_config_issues(self, content: str, file_path: Path) -> List[Dict[str, Any]]:
|
317
|
+
"""Scan for insecure configuration patterns."""
|
318
|
+
issues = []
|
319
|
+
|
320
|
+
# Only check configuration files
|
321
|
+
config_extensions = {".yml", ".yaml", ".json", ".conf", ".config", ".ini", ".env"}
|
322
|
+
if file_path.suffix not in config_extensions and \
|
323
|
+
file_path.name not in ["settings.py", "config.py", "configuration.py"]:
|
324
|
+
return issues
|
325
|
+
|
326
|
+
for issue_type, issue_info in self.CONFIG_ISSUES.items():
|
327
|
+
for pattern in issue_info["patterns"]:
|
328
|
+
matches = re.finditer(pattern, content, re.IGNORECASE | re.MULTILINE)
|
329
|
+
for match in matches:
|
330
|
+
line_num = content[:match.start()].count("\n") + 1
|
331
|
+
|
332
|
+
issues.append({
|
333
|
+
"type": f"config_{issue_type}",
|
334
|
+
"severity": issue_info["severity"],
|
335
|
+
"description": issue_info["description"],
|
336
|
+
"file": str(file_path),
|
337
|
+
"line": line_num,
|
338
|
+
"code": match.group(0),
|
339
|
+
})
|
340
|
+
|
341
|
+
return issues
|
342
|
+
|
343
|
+
def _analyze_python_security(self, content: str, file_path: Path) -> List[Dict[str, Any]]:
|
344
|
+
"""Perform Python-specific security analysis."""
|
345
|
+
issues = []
|
346
|
+
|
347
|
+
try:
|
348
|
+
tree = ast.parse(content)
|
349
|
+
|
350
|
+
# Check for dangerous functions
|
351
|
+
dangerous_functions = {
|
352
|
+
"eval": "critical",
|
353
|
+
"exec": "critical",
|
354
|
+
"compile": "high",
|
355
|
+
"__import__": "medium",
|
356
|
+
"pickle.loads": "high",
|
357
|
+
"pickle.load": "high",
|
358
|
+
}
|
359
|
+
|
360
|
+
class SecurityVisitor(ast.NodeVisitor):
|
361
|
+
def visit_Call(self, node):
|
362
|
+
if isinstance(node.func, ast.Name):
|
363
|
+
func_name = node.func.id
|
364
|
+
if func_name in dangerous_functions:
|
365
|
+
issues.append({
|
366
|
+
"type": "dangerous_function",
|
367
|
+
"severity": dangerous_functions[func_name],
|
368
|
+
"description": f"Use of dangerous function: {func_name}",
|
369
|
+
"file": str(file_path),
|
370
|
+
"line": node.lineno,
|
371
|
+
"code": func_name,
|
372
|
+
})
|
373
|
+
|
374
|
+
# Check for subprocess with shell=True
|
375
|
+
elif isinstance(node.func, ast.Attribute):
|
376
|
+
if (hasattr(node.func.value, "id") and
|
377
|
+
node.func.value.id == "subprocess" and
|
378
|
+
node.func.attr in ["run", "call", "Popen"]):
|
379
|
+
|
380
|
+
for keyword in node.keywords:
|
381
|
+
if keyword.arg == "shell" and \
|
382
|
+
isinstance(keyword.value, ast.Constant) and \
|
383
|
+
keyword.value.value is True:
|
384
|
+
issues.append({
|
385
|
+
"type": "shell_injection",
|
386
|
+
"severity": "critical",
|
387
|
+
"description": "subprocess with shell=True is vulnerable to injection",
|
388
|
+
"file": str(file_path),
|
389
|
+
"line": node.lineno,
|
390
|
+
"code": "subprocess with shell=True",
|
391
|
+
})
|
392
|
+
|
393
|
+
self.generic_visit(node)
|
394
|
+
|
395
|
+
visitor = SecurityVisitor()
|
396
|
+
visitor.visit(tree)
|
397
|
+
|
398
|
+
except SyntaxError:
|
399
|
+
# Not valid Python, skip AST analysis
|
400
|
+
pass
|
401
|
+
|
402
|
+
return issues
|
403
|
+
|
404
|
+
def _analyze_javascript_security(self, content: str, file_path: Path) -> List[Dict[str, Any]]:
|
405
|
+
"""Perform JavaScript-specific security analysis."""
|
406
|
+
issues = []
|
407
|
+
|
408
|
+
# Check for dangerous JavaScript patterns
|
409
|
+
js_patterns = {
|
410
|
+
"eval_usage": {
|
411
|
+
"pattern": r'\beval\s*\(',
|
412
|
+
"severity": "critical",
|
413
|
+
"description": "Use of eval() is dangerous and should be avoided",
|
414
|
+
},
|
415
|
+
"innerhtml": {
|
416
|
+
"pattern": r'\.innerHTML\s*=',
|
417
|
+
"severity": "high",
|
418
|
+
"description": "Direct innerHTML assignment can lead to XSS",
|
419
|
+
},
|
420
|
+
"document_write": {
|
421
|
+
"pattern": r'document\.write\s*\(',
|
422
|
+
"severity": "medium",
|
423
|
+
"description": "document.write() can be dangerous with user input",
|
424
|
+
},
|
425
|
+
"localstorage_sensitive": {
|
426
|
+
"pattern": r'localStorage\.(setItem|getItem).*["\']*(password|token|secret)',
|
427
|
+
"severity": "high",
|
428
|
+
"description": "Sensitive data in localStorage is insecure",
|
429
|
+
},
|
430
|
+
}
|
431
|
+
|
432
|
+
for issue_type, issue_info in js_patterns.items():
|
433
|
+
matches = re.finditer(issue_info["pattern"], content, re.IGNORECASE)
|
434
|
+
for match in matches:
|
435
|
+
line_num = content[:match.start()].count("\n") + 1
|
436
|
+
|
437
|
+
issues.append({
|
438
|
+
"type": f"js_{issue_type}",
|
439
|
+
"severity": issue_info["severity"],
|
440
|
+
"description": issue_info["description"],
|
441
|
+
"file": str(file_path),
|
442
|
+
"line": line_num,
|
443
|
+
"code": match.group(0),
|
444
|
+
})
|
445
|
+
|
446
|
+
return issues
|
447
|
+
|
448
|
+
def _calculate_risk_score(self, vulnerabilities: List[Dict[str, Any]]) -> float:
|
449
|
+
"""Calculate risk score based on vulnerabilities."""
|
450
|
+
if not vulnerabilities:
|
451
|
+
return 0.0
|
452
|
+
|
453
|
+
severity_scores = {
|
454
|
+
"critical": 10,
|
455
|
+
"high": 7,
|
456
|
+
"medium": 4,
|
457
|
+
"low": 1,
|
458
|
+
"unknown": 0,
|
459
|
+
}
|
460
|
+
|
461
|
+
total_score = sum(
|
462
|
+
severity_scores.get(v.get("severity", "unknown"), 0)
|
463
|
+
for v in vulnerabilities
|
464
|
+
)
|
465
|
+
|
466
|
+
# Normalize to 0-100 scale
|
467
|
+
# Maximum reasonable score would be ~50 critical issues = 500
|
468
|
+
normalized = min(100, (total_score / 50) * 100)
|
469
|
+
|
470
|
+
return round(normalized, 2)
|
471
|
+
|
472
|
+
def _calculate_overall_risk(self, results: Dict[str, Any]) -> float:
|
473
|
+
"""Calculate overall risk score for directory analysis."""
|
474
|
+
severity_weights = {
|
475
|
+
"critical": 1.0,
|
476
|
+
"high": 0.7,
|
477
|
+
"medium": 0.4,
|
478
|
+
"low": 0.1,
|
479
|
+
}
|
480
|
+
|
481
|
+
weighted_score = 0
|
482
|
+
total_weight = 0
|
483
|
+
|
484
|
+
for severity, count in results["vulnerabilities_by_severity"].items():
|
485
|
+
weight = severity_weights.get(severity, 0)
|
486
|
+
weighted_score += count * weight * 10
|
487
|
+
total_weight += count
|
488
|
+
|
489
|
+
if total_weight == 0:
|
490
|
+
return 0.0
|
491
|
+
|
492
|
+
# Normalize and cap at 100
|
493
|
+
return min(100.0, round(weighted_score / max(results["files_analyzed"], 1), 2))
|
494
|
+
|
495
|
+
def _generate_summary(self, vulnerabilities: List[Dict[str, Any]]) -> Dict[str, Any]:
|
496
|
+
"""Generate summary of security findings."""
|
497
|
+
summary = {
|
498
|
+
"total": len(vulnerabilities),
|
499
|
+
"by_severity": {},
|
500
|
+
"by_type": {},
|
501
|
+
"most_critical": None,
|
502
|
+
}
|
503
|
+
|
504
|
+
for vuln in vulnerabilities:
|
505
|
+
# Count by severity
|
506
|
+
severity = vuln.get("severity", "unknown")
|
507
|
+
summary["by_severity"][severity] = summary["by_severity"].get(severity, 0) + 1
|
508
|
+
|
509
|
+
# Count by type
|
510
|
+
vuln_type = vuln.get("type", "unknown")
|
511
|
+
summary["by_type"][vuln_type] = summary["by_type"].get(vuln_type, 0) + 1
|
512
|
+
|
513
|
+
# Find most critical issue
|
514
|
+
critical_vulns = [v for v in vulnerabilities if v.get("severity") == "critical"]
|
515
|
+
if critical_vulns:
|
516
|
+
summary["most_critical"] = critical_vulns[0]
|
517
|
+
|
518
|
+
return summary
|
519
|
+
|
520
|
+
def _generate_recommendations(self, results: Dict[str, Any]) -> List[str]:
|
521
|
+
"""Generate security recommendations based on findings."""
|
522
|
+
recommendations = []
|
523
|
+
|
524
|
+
vuln_by_severity = results.get("vulnerabilities_by_severity", {})
|
525
|
+
|
526
|
+
if vuln_by_severity.get("critical", 0) > 0:
|
527
|
+
recommendations.append(
|
528
|
+
"URGENT: Address critical security vulnerabilities immediately"
|
529
|
+
)
|
530
|
+
|
531
|
+
if vuln_by_severity.get("high", 0) > 0:
|
532
|
+
recommendations.append(
|
533
|
+
"Prioritize fixing high-severity vulnerabilities"
|
534
|
+
)
|
535
|
+
|
536
|
+
# Type-specific recommendations
|
537
|
+
if results["files"]:
|
538
|
+
all_types = set()
|
539
|
+
for file_result in results["files"]:
|
540
|
+
for vuln in file_result.get("vulnerabilities", []):
|
541
|
+
all_types.add(vuln.get("type"))
|
542
|
+
|
543
|
+
if "sql_injection" in all_types:
|
544
|
+
recommendations.append(
|
545
|
+
"Use parameterized queries or prepared statements to prevent SQL injection"
|
546
|
+
)
|
547
|
+
|
548
|
+
if "hardcoded_secret" in all_types:
|
549
|
+
recommendations.append(
|
550
|
+
"Move secrets to environment variables or secure vaults"
|
551
|
+
)
|
552
|
+
|
553
|
+
if "command_injection" in all_types or "shell_injection" in all_types:
|
554
|
+
recommendations.append(
|
555
|
+
"Avoid shell=True in subprocess calls; use argument lists instead"
|
556
|
+
)
|
557
|
+
|
558
|
+
if "weak_crypto" in all_types:
|
559
|
+
recommendations.append(
|
560
|
+
"Replace weak cryptographic algorithms with strong ones (e.g., SHA-256+)"
|
561
|
+
)
|
562
|
+
|
563
|
+
if any(t.startswith("config_") for t in all_types):
|
564
|
+
recommendations.append(
|
565
|
+
"Review and harden configuration settings for production"
|
566
|
+
)
|
567
|
+
|
568
|
+
if not recommendations:
|
569
|
+
recommendations.append("No critical security issues found. Continue with regular security audits.")
|
570
|
+
|
571
|
+
return recommendations
|
572
|
+
|
573
|
+
def extract_metrics(self, analysis_result: Dict[str, Any]) -> Dict[str, Any]:
|
574
|
+
"""Extract key metrics from analysis results."""
|
575
|
+
metrics = {}
|
576
|
+
|
577
|
+
if analysis_result.get("status") != "success":
|
578
|
+
return metrics
|
579
|
+
|
580
|
+
if analysis_result.get("type") == "file":
|
581
|
+
metrics.update({
|
582
|
+
"vulnerabilities": len(analysis_result.get("vulnerabilities", [])),
|
583
|
+
"risk_score": analysis_result.get("risk_score", 0),
|
584
|
+
})
|
585
|
+
|
586
|
+
# Count by severity
|
587
|
+
for vuln in analysis_result.get("vulnerabilities", []):
|
588
|
+
severity = vuln.get("severity", "unknown")
|
589
|
+
key = f"severity_{severity}"
|
590
|
+
metrics[key] = metrics.get(key, 0) + 1
|
591
|
+
|
592
|
+
elif analysis_result.get("type") == "directory":
|
593
|
+
metrics.update({
|
594
|
+
"files_analyzed": analysis_result.get("files_analyzed", 0),
|
595
|
+
"total_vulnerabilities": analysis_result.get("total_vulnerabilities", 0),
|
596
|
+
"risk_score": analysis_result.get("risk_score", 0),
|
597
|
+
})
|
598
|
+
|
599
|
+
# Add severity breakdown
|
600
|
+
for severity, count in analysis_result.get("vulnerabilities_by_severity", {}).items():
|
601
|
+
metrics[f"severity_{severity}"] = count
|
602
|
+
|
603
|
+
return metrics
|
604
|
+
|
605
|
+
def compare_results(
|
606
|
+
self, baseline: Dict[str, Any], current: Dict[str, Any]
|
607
|
+
) -> Dict[str, Any]:
|
608
|
+
"""Compare two security analysis results."""
|
609
|
+
comparison = {
|
610
|
+
"risk_change": 0,
|
611
|
+
"vulnerability_changes": {},
|
612
|
+
"new_vulnerabilities": [],
|
613
|
+
"resolved_vulnerabilities": [],
|
614
|
+
"improvements": [],
|
615
|
+
"regressions": [],
|
616
|
+
}
|
617
|
+
|
618
|
+
# Compare risk scores
|
619
|
+
baseline_risk = baseline.get("risk_score", 0)
|
620
|
+
current_risk = current.get("risk_score", 0)
|
621
|
+
comparison["risk_change"] = current_risk - baseline_risk
|
622
|
+
|
623
|
+
# Compare vulnerability counts
|
624
|
+
baseline_metrics = self.extract_metrics(baseline)
|
625
|
+
current_metrics = self.extract_metrics(current)
|
626
|
+
|
627
|
+
for key in ["severity_critical", "severity_high", "severity_medium", "severity_low"]:
|
628
|
+
baseline_count = baseline_metrics.get(key, 0)
|
629
|
+
current_count = current_metrics.get(key, 0)
|
630
|
+
|
631
|
+
if baseline_count != current_count:
|
632
|
+
severity = key.replace("severity_", "")
|
633
|
+
comparison["vulnerability_changes"][severity] = {
|
634
|
+
"baseline": baseline_count,
|
635
|
+
"current": current_count,
|
636
|
+
"change": current_count - baseline_count,
|
637
|
+
}
|
638
|
+
|
639
|
+
# Determine improvements vs regressions
|
640
|
+
if comparison["risk_change"] < 0:
|
641
|
+
comparison["improvements"].append(
|
642
|
+
f"Risk score improved by {abs(comparison['risk_change']):.2f} points"
|
643
|
+
)
|
644
|
+
elif comparison["risk_change"] > 0:
|
645
|
+
comparison["regressions"].append(
|
646
|
+
f"Risk score increased by {comparison['risk_change']:.2f} points"
|
647
|
+
)
|
648
|
+
|
649
|
+
total_baseline = baseline_metrics.get("total_vulnerabilities", 0)
|
650
|
+
total_current = current_metrics.get("total_vulnerabilities", 0)
|
651
|
+
|
652
|
+
if total_current < total_baseline:
|
653
|
+
comparison["improvements"].append(
|
654
|
+
f"Reduced vulnerabilities from {total_baseline} to {total_current}"
|
655
|
+
)
|
656
|
+
elif total_current > total_baseline:
|
657
|
+
comparison["regressions"].append(
|
658
|
+
f"Vulnerabilities increased from {total_baseline} to {total_current}"
|
659
|
+
)
|
660
|
+
|
661
|
+
return comparison
|