devguard 0.2.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- devguard/INTEGRATION_SUMMARY.md +121 -0
- devguard/__init__.py +3 -0
- devguard/__main__.py +6 -0
- devguard/checkers/__init__.py +41 -0
- devguard/checkers/api_usage.py +523 -0
- devguard/checkers/aws_cost.py +331 -0
- devguard/checkers/aws_iam.py +284 -0
- devguard/checkers/base.py +25 -0
- devguard/checkers/container.py +137 -0
- devguard/checkers/domain.py +189 -0
- devguard/checkers/firecrawl.py +117 -0
- devguard/checkers/fly.py +225 -0
- devguard/checkers/github.py +210 -0
- devguard/checkers/npm.py +327 -0
- devguard/checkers/npm_security.py +244 -0
- devguard/checkers/redteam.py +290 -0
- devguard/checkers/secret.py +279 -0
- devguard/checkers/swarm.py +376 -0
- devguard/checkers/tailscale.py +143 -0
- devguard/checkers/tailsnitch.py +303 -0
- devguard/checkers/tavily.py +179 -0
- devguard/checkers/vercel.py +192 -0
- devguard/cli.py +1510 -0
- devguard/cli_helpers.py +189 -0
- devguard/config.py +249 -0
- devguard/core.py +293 -0
- devguard/dashboard.py +715 -0
- devguard/discovery.py +363 -0
- devguard/http_client.py +142 -0
- devguard/llm_service.py +481 -0
- devguard/mcp_server.py +259 -0
- devguard/metrics.py +144 -0
- devguard/models.py +208 -0
- devguard/reporting.py +1571 -0
- devguard/sarif.py +295 -0
- devguard/scripts/ANALYSIS_SUMMARY.md +141 -0
- devguard/scripts/README.md +221 -0
- devguard/scripts/auto_fix_recommendations.py +145 -0
- devguard/scripts/generate_npmignore.py +175 -0
- devguard/scripts/generate_security_report.py +324 -0
- devguard/scripts/prepublish_check.sh +29 -0
- devguard/scripts/redteam_npm_packages.py +1262 -0
- devguard/scripts/review_all_repos.py +300 -0
- devguard/spec.py +617 -0
- devguard/sweeps/__init__.py +23 -0
- devguard/sweeps/ai_editor_config_audit.py +697 -0
- devguard/sweeps/cargo_publish_audit.py +655 -0
- devguard/sweeps/dependency_audit.py +419 -0
- devguard/sweeps/gitignore_audit.py +336 -0
- devguard/sweeps/local_dev.py +260 -0
- devguard/sweeps/local_dirty_worktree_secrets.py +521 -0
- devguard/sweeps/project_flaudit.py +636 -0
- devguard/sweeps/public_github_secrets.py +680 -0
- devguard/sweeps/publish_audit.py +478 -0
- devguard/sweeps/ssh_key_audit.py +327 -0
- devguard/utils.py +174 -0
- devguard-0.2.0.dist-info/METADATA +225 -0
- devguard-0.2.0.dist-info/RECORD +60 -0
- devguard-0.2.0.dist-info/WHEEL +4 -0
- devguard-0.2.0.dist-info/entry_points.txt +2 -0
|
@@ -0,0 +1,244 @@
|
|
|
1
|
+
"""Deep npm package security analysis checker."""
|
|
2
|
+
|
|
3
|
+
import logging
|
|
4
|
+
import tempfile
|
|
5
|
+
from pathlib import Path
|
|
6
|
+
|
|
7
|
+
from devguard.checkers.base import BaseChecker
|
|
8
|
+
from devguard.http_client import create_client
|
|
9
|
+
from devguard.models import CheckResult, Severity, Vulnerability
|
|
10
|
+
|
|
11
|
+
logger = logging.getLogger(__name__)
|
|
12
|
+
|
|
13
|
+
# Import analysis functions from the red team script
|
|
14
|
+
try:
|
|
15
|
+
from devguard.scripts.redteam_npm_packages import (
|
|
16
|
+
analyze_package_contents,
|
|
17
|
+
check_dependency_vulnerabilities,
|
|
18
|
+
download_package_tarball,
|
|
19
|
+
extract_tarball,
|
|
20
|
+
fetch_package_info,
|
|
21
|
+
)
|
|
22
|
+
except ImportError:
|
|
23
|
+
logger.warning("npm security analysis functions not available")
|
|
24
|
+
analyze_package_contents = None
|
|
25
|
+
check_dependency_vulnerabilities = None
|
|
26
|
+
download_package_tarball = None
|
|
27
|
+
extract_tarball = None
|
|
28
|
+
fetch_package_info = None
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
class NpmSecurityChecker(BaseChecker):
|
|
32
|
+
"""Deep security analysis of published npm packages."""
|
|
33
|
+
|
|
34
|
+
check_type = "npm_security"
|
|
35
|
+
|
|
36
|
+
async def check(self) -> CheckResult:
|
|
37
|
+
"""Run deep security analysis on npm packages."""
|
|
38
|
+
vulnerabilities: list[Vulnerability] = []
|
|
39
|
+
errors: list[str] = []
|
|
40
|
+
|
|
41
|
+
if not self.settings.npm_packages_to_monitor:
|
|
42
|
+
return CheckResult(
|
|
43
|
+
check_type=self.check_type,
|
|
44
|
+
success=True,
|
|
45
|
+
vulnerabilities=[],
|
|
46
|
+
errors=["No npm packages configured for deep security analysis"],
|
|
47
|
+
)
|
|
48
|
+
|
|
49
|
+
if not analyze_package_contents:
|
|
50
|
+
return CheckResult(
|
|
51
|
+
check_type=self.check_type,
|
|
52
|
+
success=False,
|
|
53
|
+
vulnerabilities=[],
|
|
54
|
+
errors=["npm security analysis functions not available"],
|
|
55
|
+
)
|
|
56
|
+
|
|
57
|
+
for package in self.settings.npm_packages_to_monitor:
|
|
58
|
+
try:
|
|
59
|
+
pkg_vulns = await self._analyze_package_security(package)
|
|
60
|
+
vulnerabilities.extend(pkg_vulns)
|
|
61
|
+
except Exception as e:
|
|
62
|
+
error_msg = f"Error analyzing package {package}: {str(e)}"
|
|
63
|
+
errors.append(error_msg)
|
|
64
|
+
logger.warning(error_msg)
|
|
65
|
+
|
|
66
|
+
return CheckResult(
|
|
67
|
+
check_type=self.check_type,
|
|
68
|
+
success=len(errors) == 0,
|
|
69
|
+
vulnerabilities=vulnerabilities,
|
|
70
|
+
errors=errors,
|
|
71
|
+
)
|
|
72
|
+
|
|
73
|
+
async def _analyze_package_security(self, package: str) -> list[Vulnerability]:
|
|
74
|
+
"""Perform deep security analysis on a package."""
|
|
75
|
+
vulnerabilities: list[Vulnerability] = []
|
|
76
|
+
|
|
77
|
+
try:
|
|
78
|
+
async with create_client() as client:
|
|
79
|
+
# Get package info
|
|
80
|
+
package_info = await fetch_package_info(client, package)
|
|
81
|
+
|
|
82
|
+
# Get latest version
|
|
83
|
+
dist_tags = package_info.get("dist-tags", {})
|
|
84
|
+
version = dist_tags.get("latest")
|
|
85
|
+
if not version:
|
|
86
|
+
versions = package_info.get("versions", {})
|
|
87
|
+
if versions:
|
|
88
|
+
version = max(versions.keys())
|
|
89
|
+
|
|
90
|
+
if not version:
|
|
91
|
+
logger.warning(f"Could not determine version for {package}")
|
|
92
|
+
return vulnerabilities
|
|
93
|
+
|
|
94
|
+
# Download and analyze package
|
|
95
|
+
tarball_data = await download_package_tarball(client, package, version)
|
|
96
|
+
|
|
97
|
+
# Check dependency vulnerabilities
|
|
98
|
+
dep_vulns = await check_dependency_vulnerabilities(client, package, version)
|
|
99
|
+
|
|
100
|
+
# Extract and analyze contents
|
|
101
|
+
with tempfile.TemporaryDirectory() as tmpdir:
|
|
102
|
+
extract_dir = Path(tmpdir)
|
|
103
|
+
extract_tarball(tarball_data, extract_dir)
|
|
104
|
+
|
|
105
|
+
# Find package directory
|
|
106
|
+
package_dir = extract_dir / "package"
|
|
107
|
+
if not package_dir.exists():
|
|
108
|
+
package_dir = extract_dir
|
|
109
|
+
|
|
110
|
+
findings = analyze_package_contents(package_dir)
|
|
111
|
+
|
|
112
|
+
# Convert findings to vulnerabilities
|
|
113
|
+
vulnerabilities.extend(
|
|
114
|
+
self._convert_findings_to_vulnerabilities(
|
|
115
|
+
package, version, findings, dep_vulns
|
|
116
|
+
)
|
|
117
|
+
)
|
|
118
|
+
|
|
119
|
+
except Exception as e:
|
|
120
|
+
logger.error(f"Error in deep security analysis for {package}: {e}")
|
|
121
|
+
raise
|
|
122
|
+
|
|
123
|
+
return vulnerabilities
|
|
124
|
+
|
|
125
|
+
def _convert_findings_to_vulnerabilities(
|
|
126
|
+
self,
|
|
127
|
+
package: str,
|
|
128
|
+
version: str,
|
|
129
|
+
findings: dict,
|
|
130
|
+
dep_vulns: list[dict],
|
|
131
|
+
) -> list[Vulnerability]:
|
|
132
|
+
"""Convert security findings to Guardian Vulnerability objects."""
|
|
133
|
+
vulnerabilities: list[Vulnerability] = []
|
|
134
|
+
|
|
135
|
+
# Convert secrets to vulnerabilities
|
|
136
|
+
for secret in findings.get("secrets", []):
|
|
137
|
+
severity = self._map_severity(secret.get("severity", "medium"))
|
|
138
|
+
vulnerabilities.append(
|
|
139
|
+
Vulnerability(
|
|
140
|
+
package_name=package,
|
|
141
|
+
package_version=version,
|
|
142
|
+
severity=severity,
|
|
143
|
+
summary=f"Exposed secret: {secret.get('type', 'Unknown')}",
|
|
144
|
+
description=f"Secret found in {secret.get('file', 'unknown')} at line {secret.get('line', '?')}: {secret.get('match', '')[:100]}",
|
|
145
|
+
source="npm_security",
|
|
146
|
+
)
|
|
147
|
+
)
|
|
148
|
+
|
|
149
|
+
# Convert sensitive files to vulnerabilities
|
|
150
|
+
for file_path in findings.get("sensitive_files", []):
|
|
151
|
+
vulnerabilities.append(
|
|
152
|
+
Vulnerability(
|
|
153
|
+
package_name=package,
|
|
154
|
+
package_version=version,
|
|
155
|
+
severity=Severity.HIGH,
|
|
156
|
+
summary=f"Sensitive file published: {file_path}",
|
|
157
|
+
description=f"Package contains sensitive file that should not be published: {file_path}",
|
|
158
|
+
source="npm_security",
|
|
159
|
+
)
|
|
160
|
+
)
|
|
161
|
+
|
|
162
|
+
# Convert obfuscated code to vulnerabilities
|
|
163
|
+
for obf in findings.get("obfuscated_code", []):
|
|
164
|
+
severity = self._map_severity(obf.get("severity", "low"))
|
|
165
|
+
vulnerabilities.append(
|
|
166
|
+
Vulnerability(
|
|
167
|
+
package_name=package,
|
|
168
|
+
package_version=version,
|
|
169
|
+
severity=severity,
|
|
170
|
+
summary=f"Obfuscated code detected: {obf.get('description', 'Unknown pattern')}",
|
|
171
|
+
description=f"Obfuscated code pattern found in package: {obf.get('match', '')[:100]}",
|
|
172
|
+
source="npm_security",
|
|
173
|
+
)
|
|
174
|
+
)
|
|
175
|
+
|
|
176
|
+
# Git history is critical
|
|
177
|
+
if findings.get("git_history"):
|
|
178
|
+
vulnerabilities.append(
|
|
179
|
+
Vulnerability(
|
|
180
|
+
package_name=package,
|
|
181
|
+
package_version=version,
|
|
182
|
+
severity=Severity.CRITICAL,
|
|
183
|
+
summary="Git history published in package",
|
|
184
|
+
description="Package contains .git directory with full commit history",
|
|
185
|
+
source="npm_security",
|
|
186
|
+
)
|
|
187
|
+
)
|
|
188
|
+
|
|
189
|
+
# Missing .npmignore is a warning (medium severity)
|
|
190
|
+
if findings.get("npmignore_missing"):
|
|
191
|
+
vulnerabilities.append(
|
|
192
|
+
Vulnerability(
|
|
193
|
+
package_name=package,
|
|
194
|
+
package_version=version,
|
|
195
|
+
severity=Severity.MEDIUM,
|
|
196
|
+
summary="Missing .npmignore file",
|
|
197
|
+
description="Package lacks .npmignore file, increasing risk of publishing sensitive files",
|
|
198
|
+
source="npm_security",
|
|
199
|
+
)
|
|
200
|
+
)
|
|
201
|
+
|
|
202
|
+
# Suspicious install scripts
|
|
203
|
+
pkg_issues = findings.get("package_json_issues", {})
|
|
204
|
+
for script in pkg_issues.get("suspicious_scripts", []):
|
|
205
|
+
vulnerabilities.append(
|
|
206
|
+
Vulnerability(
|
|
207
|
+
package_name=package,
|
|
208
|
+
package_version=version,
|
|
209
|
+
severity=Severity.HIGH,
|
|
210
|
+
summary=f"Suspicious install script: {script.get('script', 'unknown')}",
|
|
211
|
+
description=f"Install script contains potentially dangerous operations: {script.get('reason', '')}",
|
|
212
|
+
source="npm_security",
|
|
213
|
+
)
|
|
214
|
+
)
|
|
215
|
+
|
|
216
|
+
# Dependency vulnerabilities
|
|
217
|
+
for dep_vuln in dep_vulns:
|
|
218
|
+
severity = self._map_severity(dep_vuln.get("severity", "medium"))
|
|
219
|
+
vulnerabilities.append(
|
|
220
|
+
Vulnerability(
|
|
221
|
+
package_name=package,
|
|
222
|
+
package_version=version,
|
|
223
|
+
severity=severity,
|
|
224
|
+
summary=dep_vuln.get("title", "Dependency vulnerability"),
|
|
225
|
+
description=dep_vuln.get("overview", ""),
|
|
226
|
+
advisory_id=dep_vuln.get("id"),
|
|
227
|
+
cve_id=dep_vuln.get("cves", [None])[0] if dep_vuln.get("cves") else None,
|
|
228
|
+
source="npm_security",
|
|
229
|
+
)
|
|
230
|
+
)
|
|
231
|
+
|
|
232
|
+
return vulnerabilities
|
|
233
|
+
|
|
234
|
+
def _map_severity(self, severity_str: str) -> Severity:
|
|
235
|
+
"""Map string severity to Severity enum."""
|
|
236
|
+
severity_lower = severity_str.lower()
|
|
237
|
+
if severity_lower in ("critical", "crit"):
|
|
238
|
+
return Severity.CRITICAL
|
|
239
|
+
elif severity_lower in ("high", "h"):
|
|
240
|
+
return Severity.HIGH
|
|
241
|
+
elif severity_lower in ("medium", "med", "moderate"):
|
|
242
|
+
return Severity.MEDIUM
|
|
243
|
+
else:
|
|
244
|
+
return Severity.LOW
|
|
@@ -0,0 +1,290 @@
|
|
|
1
|
+
"""Red team security testing for deployments."""
|
|
2
|
+
|
|
3
|
+
import logging
|
|
4
|
+
from datetime import datetime
|
|
5
|
+
|
|
6
|
+
import httpx
|
|
7
|
+
|
|
8
|
+
from devguard.checkers.base import BaseChecker
|
|
9
|
+
from devguard.http_client import create_client
|
|
10
|
+
from devguard.models import CheckResult, Severity, Vulnerability
|
|
11
|
+
|
|
12
|
+
logger = logging.getLogger(__name__)
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
class RedTeamChecker(BaseChecker):
|
|
16
|
+
"""Red team security testing for deployment endpoints."""
|
|
17
|
+
|
|
18
|
+
check_type = "redteam"
|
|
19
|
+
|
|
20
|
+
def __init__(self, settings):
|
|
21
|
+
"""Initialize red team checker."""
|
|
22
|
+
super().__init__(settings)
|
|
23
|
+
self.endpoints_to_test: list[dict[str, str]] = []
|
|
24
|
+
|
|
25
|
+
async def check(self, deployment_results: list | None = None) -> CheckResult:
|
|
26
|
+
"""Run red team security tests on endpoints.
|
|
27
|
+
|
|
28
|
+
Args:
|
|
29
|
+
deployment_results: Optional list of CheckResult objects from deployment
|
|
30
|
+
checkers (vercel, fly) to extract endpoints from.
|
|
31
|
+
"""
|
|
32
|
+
vulnerabilities: list[Vulnerability] = []
|
|
33
|
+
errors: list[str] = []
|
|
34
|
+
|
|
35
|
+
# Reset endpoints for each check run
|
|
36
|
+
self.endpoints_to_test = []
|
|
37
|
+
|
|
38
|
+
# Collect endpoints from deployment results
|
|
39
|
+
self._collect_endpoints_from_results(deployment_results or [])
|
|
40
|
+
|
|
41
|
+
if not self.endpoints_to_test:
|
|
42
|
+
return CheckResult(
|
|
43
|
+
check_type=self.check_type,
|
|
44
|
+
success=True,
|
|
45
|
+
vulnerabilities=[],
|
|
46
|
+
errors=["No endpoints to test"],
|
|
47
|
+
)
|
|
48
|
+
|
|
49
|
+
async with create_client() as client:
|
|
50
|
+
for endpoint_info in self.endpoints_to_test:
|
|
51
|
+
url = endpoint_info["url"]
|
|
52
|
+
platform = endpoint_info.get("platform", "unknown")
|
|
53
|
+
|
|
54
|
+
try:
|
|
55
|
+
findings = await self._test_endpoint(client, url, platform)
|
|
56
|
+
vulnerabilities.extend(findings)
|
|
57
|
+
except Exception as e:
|
|
58
|
+
errors.append(f"Error testing {url}: {str(e)}")
|
|
59
|
+
logger.warning(f"Error testing endpoint {url}: {e}")
|
|
60
|
+
|
|
61
|
+
return CheckResult(
|
|
62
|
+
check_type=self.check_type,
|
|
63
|
+
success=len(errors) == 0,
|
|
64
|
+
vulnerabilities=vulnerabilities,
|
|
65
|
+
errors=errors,
|
|
66
|
+
)
|
|
67
|
+
|
|
68
|
+
def _collect_endpoints_from_results(self, deployment_results: list) -> None:
|
|
69
|
+
"""Collect endpoints from deployment check results."""
|
|
70
|
+
for check_result in deployment_results:
|
|
71
|
+
for deployment in check_result.deployments:
|
|
72
|
+
if deployment.url:
|
|
73
|
+
self.endpoints_to_test.append(
|
|
74
|
+
{
|
|
75
|
+
"url": deployment.url,
|
|
76
|
+
"platform": deployment.platform,
|
|
77
|
+
"project": deployment.project_name,
|
|
78
|
+
}
|
|
79
|
+
)
|
|
80
|
+
|
|
81
|
+
async def _test_endpoint(
|
|
82
|
+
self, client: httpx.AsyncClient, url: str, platform: str
|
|
83
|
+
) -> list[Vulnerability]:
|
|
84
|
+
"""Test a single endpoint for security issues."""
|
|
85
|
+
findings: list[Vulnerability] = []
|
|
86
|
+
|
|
87
|
+
# Test 1: Check for security headers
|
|
88
|
+
try:
|
|
89
|
+
response = await client.head(url, timeout=10.0, follow_redirects=True)
|
|
90
|
+
header_issues = self._check_security_headers(response, url, platform)
|
|
91
|
+
findings.extend(header_issues)
|
|
92
|
+
except Exception as e:
|
|
93
|
+
logger.debug(f"Error checking headers for {url}: {e}")
|
|
94
|
+
|
|
95
|
+
# Test 2: Check for exposed admin/management endpoints
|
|
96
|
+
admin_paths = [
|
|
97
|
+
"/admin",
|
|
98
|
+
"/api/admin",
|
|
99
|
+
"/management",
|
|
100
|
+
"/.env",
|
|
101
|
+
"/.git",
|
|
102
|
+
"/.well-known",
|
|
103
|
+
"/debug",
|
|
104
|
+
"/health",
|
|
105
|
+
"/metrics",
|
|
106
|
+
"/status",
|
|
107
|
+
"/api/health",
|
|
108
|
+
"/api/status",
|
|
109
|
+
]
|
|
110
|
+
|
|
111
|
+
for path in admin_paths:
|
|
112
|
+
try:
|
|
113
|
+
test_url = f"{url.rstrip('/')}{path}"
|
|
114
|
+
response = await client.get(test_url, timeout=5.0, follow_redirects=False)
|
|
115
|
+
if response.status_code == 200:
|
|
116
|
+
# Check if it's actually exposing something
|
|
117
|
+
content_type = response.headers.get("content-type", "").lower()
|
|
118
|
+
if "json" in content_type or "text" in content_type:
|
|
119
|
+
content = response.text[:500] # Sample first 500 chars
|
|
120
|
+
if self._is_sensitive_content(content):
|
|
121
|
+
findings.append(
|
|
122
|
+
Vulnerability(
|
|
123
|
+
package_name=platform,
|
|
124
|
+
package_version="",
|
|
125
|
+
severity=Severity.HIGH,
|
|
126
|
+
summary=f"Exposed endpoint: {path}",
|
|
127
|
+
description=f"Endpoint {test_url} is publicly accessible and may expose sensitive information",
|
|
128
|
+
source="redteam",
|
|
129
|
+
)
|
|
130
|
+
)
|
|
131
|
+
except Exception:
|
|
132
|
+
pass # Endpoint doesn't exist or timed out
|
|
133
|
+
|
|
134
|
+
# Test 3: Check for CORS misconfiguration
|
|
135
|
+
try:
|
|
136
|
+
cors_response = await client.options(
|
|
137
|
+
url,
|
|
138
|
+
headers={"Origin": "https://evil.com"},
|
|
139
|
+
timeout=5.0,
|
|
140
|
+
)
|
|
141
|
+
cors_issues = self._check_cors(cors_response, url, platform)
|
|
142
|
+
findings.extend(cors_issues)
|
|
143
|
+
except Exception:
|
|
144
|
+
pass
|
|
145
|
+
|
|
146
|
+
# Test 4: Check for information disclosure in error messages
|
|
147
|
+
try:
|
|
148
|
+
# Try to trigger an error
|
|
149
|
+
error_response = await client.get(
|
|
150
|
+
f"{url}/nonexistent-path-{datetime.now().timestamp()}",
|
|
151
|
+
timeout=5.0,
|
|
152
|
+
)
|
|
153
|
+
if error_response.status_code >= 400:
|
|
154
|
+
error_issues = self._check_error_disclosure(error_response, url, platform)
|
|
155
|
+
findings.extend(error_issues)
|
|
156
|
+
except Exception:
|
|
157
|
+
pass
|
|
158
|
+
|
|
159
|
+
return findings
|
|
160
|
+
|
|
161
|
+
def _check_security_headers(
|
|
162
|
+
self, response: httpx.Response, url: str, platform: str
|
|
163
|
+
) -> list[Vulnerability]:
|
|
164
|
+
"""Check for missing security headers."""
|
|
165
|
+
findings: list[Vulnerability] = []
|
|
166
|
+
headers = response.headers
|
|
167
|
+
|
|
168
|
+
# Required security headers
|
|
169
|
+
security_headers = {
|
|
170
|
+
"X-Content-Type-Options": "nosniff",
|
|
171
|
+
"X-Frame-Options": "DENY",
|
|
172
|
+
"X-XSS-Protection": "1; mode=block",
|
|
173
|
+
"Strict-Transport-Security": None, # Any value is good
|
|
174
|
+
"Content-Security-Policy": None, # Any value is good
|
|
175
|
+
}
|
|
176
|
+
|
|
177
|
+
missing_headers = []
|
|
178
|
+
for header, expected_value in security_headers.items():
|
|
179
|
+
if header not in headers:
|
|
180
|
+
missing_headers.append(header)
|
|
181
|
+
elif expected_value and headers[header] != expected_value:
|
|
182
|
+
missing_headers.append(f"{header} (incorrect value)")
|
|
183
|
+
|
|
184
|
+
if missing_headers:
|
|
185
|
+
severity = Severity.MEDIUM if len(missing_headers) <= 2 else Severity.HIGH
|
|
186
|
+
findings.append(
|
|
187
|
+
Vulnerability(
|
|
188
|
+
package_name=platform,
|
|
189
|
+
package_version="",
|
|
190
|
+
severity=severity,
|
|
191
|
+
summary=f"Missing security headers: {', '.join(missing_headers)}",
|
|
192
|
+
description=f"Endpoint {url} is missing important security headers",
|
|
193
|
+
source="redteam",
|
|
194
|
+
)
|
|
195
|
+
)
|
|
196
|
+
|
|
197
|
+
return findings
|
|
198
|
+
|
|
199
|
+
def _check_cors(self, response: httpx.Response, url: str, platform: str) -> list[Vulnerability]:
|
|
200
|
+
"""Check for CORS misconfiguration."""
|
|
201
|
+
findings: list[Vulnerability] = []
|
|
202
|
+
headers = response.headers
|
|
203
|
+
|
|
204
|
+
acao = headers.get("Access-Control-Allow-Origin", "")
|
|
205
|
+
acac = headers.get("Access-Control-Allow-Credentials", "")
|
|
206
|
+
|
|
207
|
+
# Check for overly permissive CORS
|
|
208
|
+
if acao == "*" and acac.lower() == "true":
|
|
209
|
+
findings.append(
|
|
210
|
+
Vulnerability(
|
|
211
|
+
package_name=platform,
|
|
212
|
+
package_version="",
|
|
213
|
+
severity=Severity.HIGH,
|
|
214
|
+
summary="Overly permissive CORS configuration",
|
|
215
|
+
description=f"Endpoint {url} allows credentials with wildcard origin (*)",
|
|
216
|
+
source="redteam",
|
|
217
|
+
)
|
|
218
|
+
)
|
|
219
|
+
elif acao == "*":
|
|
220
|
+
findings.append(
|
|
221
|
+
Vulnerability(
|
|
222
|
+
package_name=platform,
|
|
223
|
+
package_version="",
|
|
224
|
+
severity=Severity.MEDIUM,
|
|
225
|
+
summary="Permissive CORS configuration",
|
|
226
|
+
description=f"Endpoint {url} allows all origins (*)",
|
|
227
|
+
source="redteam",
|
|
228
|
+
)
|
|
229
|
+
)
|
|
230
|
+
|
|
231
|
+
return findings
|
|
232
|
+
|
|
233
|
+
def _check_error_disclosure(
|
|
234
|
+
self, response: httpx.Response, url: str, platform: str
|
|
235
|
+
) -> list[Vulnerability]:
|
|
236
|
+
"""Check for information disclosure in error messages."""
|
|
237
|
+
findings: list[Vulnerability] = []
|
|
238
|
+
text = response.text.lower()
|
|
239
|
+
|
|
240
|
+
# Sensitive patterns that shouldn't be exposed
|
|
241
|
+
sensitive_patterns = [
|
|
242
|
+
"stack trace",
|
|
243
|
+
"exception",
|
|
244
|
+
"error at",
|
|
245
|
+
"file://",
|
|
246
|
+
"database",
|
|
247
|
+
"sql",
|
|
248
|
+
"password",
|
|
249
|
+
"secret",
|
|
250
|
+
"api key",
|
|
251
|
+
"token",
|
|
252
|
+
"aws",
|
|
253
|
+
"access key",
|
|
254
|
+
]
|
|
255
|
+
|
|
256
|
+
found_patterns = [p for p in sensitive_patterns if p in text]
|
|
257
|
+
|
|
258
|
+
if found_patterns:
|
|
259
|
+
findings.append(
|
|
260
|
+
Vulnerability(
|
|
261
|
+
package_name=platform,
|
|
262
|
+
package_version="",
|
|
263
|
+
severity=Severity.MEDIUM,
|
|
264
|
+
summary="Information disclosure in error messages",
|
|
265
|
+
description=f"Endpoint {url} exposes sensitive information in error responses: {', '.join(found_patterns[:3])}",
|
|
266
|
+
source="redteam",
|
|
267
|
+
)
|
|
268
|
+
)
|
|
269
|
+
|
|
270
|
+
return findings
|
|
271
|
+
|
|
272
|
+
def _is_sensitive_content(self, content: str) -> bool:
|
|
273
|
+
"""Check if content appears to be sensitive."""
|
|
274
|
+
content_lower = content.lower()
|
|
275
|
+
|
|
276
|
+
# Patterns indicating sensitive data
|
|
277
|
+
sensitive_indicators = [
|
|
278
|
+
"password",
|
|
279
|
+
"secret",
|
|
280
|
+
"api_key",
|
|
281
|
+
"token",
|
|
282
|
+
"private",
|
|
283
|
+
"database",
|
|
284
|
+
"connection",
|
|
285
|
+
"aws",
|
|
286
|
+
"access",
|
|
287
|
+
"credential",
|
|
288
|
+
]
|
|
289
|
+
|
|
290
|
+
return any(indicator in content_lower for indicator in sensitive_indicators)
|