aiptx 2.0.7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- aipt_v2/__init__.py +110 -0
- aipt_v2/__main__.py +24 -0
- aipt_v2/agents/AIPTxAgent/__init__.py +10 -0
- aipt_v2/agents/AIPTxAgent/aiptx_agent.py +211 -0
- aipt_v2/agents/__init__.py +46 -0
- aipt_v2/agents/base.py +520 -0
- aipt_v2/agents/exploit_agent.py +688 -0
- aipt_v2/agents/ptt.py +406 -0
- aipt_v2/agents/state.py +168 -0
- aipt_v2/app.py +957 -0
- aipt_v2/browser/__init__.py +31 -0
- aipt_v2/browser/automation.py +458 -0
- aipt_v2/browser/crawler.py +453 -0
- aipt_v2/cli.py +2933 -0
- aipt_v2/compliance/__init__.py +71 -0
- aipt_v2/compliance/compliance_report.py +449 -0
- aipt_v2/compliance/framework_mapper.py +424 -0
- aipt_v2/compliance/nist_mapping.py +345 -0
- aipt_v2/compliance/owasp_mapping.py +330 -0
- aipt_v2/compliance/pci_mapping.py +297 -0
- aipt_v2/config.py +341 -0
- aipt_v2/core/__init__.py +43 -0
- aipt_v2/core/agent.py +630 -0
- aipt_v2/core/llm.py +395 -0
- aipt_v2/core/memory.py +305 -0
- aipt_v2/core/ptt.py +329 -0
- aipt_v2/database/__init__.py +14 -0
- aipt_v2/database/models.py +232 -0
- aipt_v2/database/repository.py +384 -0
- aipt_v2/docker/__init__.py +23 -0
- aipt_v2/docker/builder.py +260 -0
- aipt_v2/docker/manager.py +222 -0
- aipt_v2/docker/sandbox.py +371 -0
- aipt_v2/evasion/__init__.py +58 -0
- aipt_v2/evasion/request_obfuscator.py +272 -0
- aipt_v2/evasion/tls_fingerprint.py +285 -0
- aipt_v2/evasion/ua_rotator.py +301 -0
- aipt_v2/evasion/waf_bypass.py +439 -0
- aipt_v2/execution/__init__.py +23 -0
- aipt_v2/execution/executor.py +302 -0
- aipt_v2/execution/parser.py +544 -0
- aipt_v2/execution/terminal.py +337 -0
- aipt_v2/health.py +437 -0
- aipt_v2/intelligence/__init__.py +194 -0
- aipt_v2/intelligence/adaptation.py +474 -0
- aipt_v2/intelligence/auth.py +520 -0
- aipt_v2/intelligence/chaining.py +775 -0
- aipt_v2/intelligence/correlation.py +536 -0
- aipt_v2/intelligence/cve_aipt.py +334 -0
- aipt_v2/intelligence/cve_info.py +1111 -0
- aipt_v2/intelligence/knowledge_graph.py +590 -0
- aipt_v2/intelligence/learning.py +626 -0
- aipt_v2/intelligence/llm_analyzer.py +502 -0
- aipt_v2/intelligence/llm_tool_selector.py +518 -0
- aipt_v2/intelligence/payload_generator.py +562 -0
- aipt_v2/intelligence/rag.py +239 -0
- aipt_v2/intelligence/scope.py +442 -0
- aipt_v2/intelligence/searchers/__init__.py +5 -0
- aipt_v2/intelligence/searchers/exploitdb_searcher.py +523 -0
- aipt_v2/intelligence/searchers/github_searcher.py +467 -0
- aipt_v2/intelligence/searchers/google_searcher.py +281 -0
- aipt_v2/intelligence/tools.json +443 -0
- aipt_v2/intelligence/triage.py +670 -0
- aipt_v2/interactive_shell.py +559 -0
- aipt_v2/interface/__init__.py +5 -0
- aipt_v2/interface/cli.py +230 -0
- aipt_v2/interface/main.py +501 -0
- aipt_v2/interface/tui.py +1276 -0
- aipt_v2/interface/utils.py +583 -0
- aipt_v2/llm/__init__.py +39 -0
- aipt_v2/llm/config.py +26 -0
- aipt_v2/llm/llm.py +514 -0
- aipt_v2/llm/memory.py +214 -0
- aipt_v2/llm/request_queue.py +89 -0
- aipt_v2/llm/utils.py +89 -0
- aipt_v2/local_tool_installer.py +1467 -0
- aipt_v2/models/__init__.py +15 -0
- aipt_v2/models/findings.py +295 -0
- aipt_v2/models/phase_result.py +224 -0
- aipt_v2/models/scan_config.py +207 -0
- aipt_v2/monitoring/grafana/dashboards/aipt-dashboard.json +355 -0
- aipt_v2/monitoring/grafana/dashboards/default.yml +17 -0
- aipt_v2/monitoring/grafana/datasources/prometheus.yml +17 -0
- aipt_v2/monitoring/prometheus.yml +60 -0
- aipt_v2/orchestration/__init__.py +52 -0
- aipt_v2/orchestration/pipeline.py +398 -0
- aipt_v2/orchestration/progress.py +300 -0
- aipt_v2/orchestration/scheduler.py +296 -0
- aipt_v2/orchestrator.py +2427 -0
- aipt_v2/payloads/__init__.py +27 -0
- aipt_v2/payloads/cmdi.py +150 -0
- aipt_v2/payloads/sqli.py +263 -0
- aipt_v2/payloads/ssrf.py +204 -0
- aipt_v2/payloads/templates.py +222 -0
- aipt_v2/payloads/traversal.py +166 -0
- aipt_v2/payloads/xss.py +204 -0
- aipt_v2/prompts/__init__.py +60 -0
- aipt_v2/proxy/__init__.py +29 -0
- aipt_v2/proxy/history.py +352 -0
- aipt_v2/proxy/interceptor.py +452 -0
- aipt_v2/recon/__init__.py +44 -0
- aipt_v2/recon/dns.py +241 -0
- aipt_v2/recon/osint.py +367 -0
- aipt_v2/recon/subdomain.py +372 -0
- aipt_v2/recon/tech_detect.py +311 -0
- aipt_v2/reports/__init__.py +17 -0
- aipt_v2/reports/generator.py +313 -0
- aipt_v2/reports/html_report.py +378 -0
- aipt_v2/runtime/__init__.py +53 -0
- aipt_v2/runtime/base.py +30 -0
- aipt_v2/runtime/docker.py +401 -0
- aipt_v2/runtime/local.py +346 -0
- aipt_v2/runtime/tool_server.py +205 -0
- aipt_v2/runtime/vps.py +830 -0
- aipt_v2/scanners/__init__.py +28 -0
- aipt_v2/scanners/base.py +273 -0
- aipt_v2/scanners/nikto.py +244 -0
- aipt_v2/scanners/nmap.py +402 -0
- aipt_v2/scanners/nuclei.py +273 -0
- aipt_v2/scanners/web.py +454 -0
- aipt_v2/scripts/security_audit.py +366 -0
- aipt_v2/setup_wizard.py +941 -0
- aipt_v2/skills/__init__.py +80 -0
- aipt_v2/skills/agents/__init__.py +14 -0
- aipt_v2/skills/agents/api_tester.py +706 -0
- aipt_v2/skills/agents/base.py +477 -0
- aipt_v2/skills/agents/code_review.py +459 -0
- aipt_v2/skills/agents/security_agent.py +336 -0
- aipt_v2/skills/agents/web_pentest.py +818 -0
- aipt_v2/skills/prompts/__init__.py +647 -0
- aipt_v2/system_detector.py +539 -0
- aipt_v2/telemetry/__init__.py +7 -0
- aipt_v2/telemetry/tracer.py +347 -0
- aipt_v2/terminal/__init__.py +28 -0
- aipt_v2/terminal/executor.py +400 -0
- aipt_v2/terminal/sandbox.py +350 -0
- aipt_v2/tools/__init__.py +44 -0
- aipt_v2/tools/active_directory/__init__.py +78 -0
- aipt_v2/tools/active_directory/ad_config.py +238 -0
- aipt_v2/tools/active_directory/bloodhound_wrapper.py +447 -0
- aipt_v2/tools/active_directory/kerberos_attacks.py +430 -0
- aipt_v2/tools/active_directory/ldap_enum.py +533 -0
- aipt_v2/tools/active_directory/smb_attacks.py +505 -0
- aipt_v2/tools/agents_graph/__init__.py +19 -0
- aipt_v2/tools/agents_graph/agents_graph_actions.py +69 -0
- aipt_v2/tools/api_security/__init__.py +76 -0
- aipt_v2/tools/api_security/api_discovery.py +608 -0
- aipt_v2/tools/api_security/graphql_scanner.py +622 -0
- aipt_v2/tools/api_security/jwt_analyzer.py +577 -0
- aipt_v2/tools/api_security/openapi_fuzzer.py +761 -0
- aipt_v2/tools/browser/__init__.py +5 -0
- aipt_v2/tools/browser/browser_actions.py +238 -0
- aipt_v2/tools/browser/browser_instance.py +535 -0
- aipt_v2/tools/browser/tab_manager.py +344 -0
- aipt_v2/tools/cloud/__init__.py +70 -0
- aipt_v2/tools/cloud/cloud_config.py +273 -0
- aipt_v2/tools/cloud/cloud_scanner.py +639 -0
- aipt_v2/tools/cloud/prowler_tool.py +571 -0
- aipt_v2/tools/cloud/scoutsuite_tool.py +359 -0
- aipt_v2/tools/executor.py +307 -0
- aipt_v2/tools/parser.py +408 -0
- aipt_v2/tools/proxy/__init__.py +5 -0
- aipt_v2/tools/proxy/proxy_actions.py +103 -0
- aipt_v2/tools/proxy/proxy_manager.py +789 -0
- aipt_v2/tools/registry.py +196 -0
- aipt_v2/tools/scanners/__init__.py +343 -0
- aipt_v2/tools/scanners/acunetix_tool.py +712 -0
- aipt_v2/tools/scanners/burp_tool.py +631 -0
- aipt_v2/tools/scanners/config.py +156 -0
- aipt_v2/tools/scanners/nessus_tool.py +588 -0
- aipt_v2/tools/scanners/zap_tool.py +612 -0
- aipt_v2/tools/terminal/__init__.py +5 -0
- aipt_v2/tools/terminal/terminal_actions.py +37 -0
- aipt_v2/tools/terminal/terminal_manager.py +153 -0
- aipt_v2/tools/terminal/terminal_session.py +449 -0
- aipt_v2/tools/tool_processing.py +108 -0
- aipt_v2/utils/__init__.py +17 -0
- aipt_v2/utils/logging.py +202 -0
- aipt_v2/utils/model_manager.py +187 -0
- aipt_v2/utils/searchers/__init__.py +269 -0
- aipt_v2/verify_install.py +793 -0
- aiptx-2.0.7.dist-info/METADATA +345 -0
- aiptx-2.0.7.dist-info/RECORD +187 -0
- aiptx-2.0.7.dist-info/WHEEL +5 -0
- aiptx-2.0.7.dist-info/entry_points.txt +7 -0
- aiptx-2.0.7.dist-info/licenses/LICENSE +21 -0
- aiptx-2.0.7.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,536 @@
|
|
|
1
|
+
"""
|
|
2
|
+
AIPT Cross-Target Correlation Engine
|
|
3
|
+
|
|
4
|
+
Finds patterns and insights across multiple penetration tests:
|
|
5
|
+
- Identifies common vulnerabilities across targets
|
|
6
|
+
- Detects systemic issues in an organization
|
|
7
|
+
- Provides portfolio-level risk assessment
|
|
8
|
+
- Tracks vulnerability trends over time
|
|
9
|
+
|
|
10
|
+
This provides strategic insights beyond individual target assessments.
|
|
11
|
+
"""
|
|
12
|
+
from __future__ import annotations
|
|
13
|
+
|
|
14
|
+
import json
|
|
15
|
+
import logging
|
|
16
|
+
from dataclasses import dataclass, field
|
|
17
|
+
from datetime import datetime
|
|
18
|
+
from typing import Any, Optional
|
|
19
|
+
from collections import defaultdict
|
|
20
|
+
|
|
21
|
+
from aipt_v2.models.findings import Finding, Severity, VulnerabilityType
|
|
22
|
+
|
|
23
|
+
logger = logging.getLogger(__name__)
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
@dataclass
|
|
27
|
+
class TargetSummary:
|
|
28
|
+
"""Summary of findings for a single target."""
|
|
29
|
+
target: str
|
|
30
|
+
total_findings: int
|
|
31
|
+
critical_count: int
|
|
32
|
+
high_count: int
|
|
33
|
+
medium_count: int
|
|
34
|
+
low_count: int
|
|
35
|
+
top_vuln_types: list[str]
|
|
36
|
+
scan_date: datetime
|
|
37
|
+
risk_score: float = 0.0
|
|
38
|
+
|
|
39
|
+
def to_dict(self) -> dict[str, Any]:
|
|
40
|
+
return {
|
|
41
|
+
"target": self.target,
|
|
42
|
+
"total_findings": self.total_findings,
|
|
43
|
+
"critical": self.critical_count,
|
|
44
|
+
"high": self.high_count,
|
|
45
|
+
"medium": self.medium_count,
|
|
46
|
+
"low": self.low_count,
|
|
47
|
+
"top_vuln_types": self.top_vuln_types,
|
|
48
|
+
"scan_date": self.scan_date.isoformat(),
|
|
49
|
+
"risk_score": self.risk_score,
|
|
50
|
+
}
|
|
51
|
+
|
|
52
|
+
|
|
53
|
+
@dataclass
|
|
54
|
+
class CommonVulnerability:
|
|
55
|
+
"""A vulnerability type found across multiple targets."""
|
|
56
|
+
vuln_type: str
|
|
57
|
+
occurrence_count: int
|
|
58
|
+
affected_targets: list[str]
|
|
59
|
+
average_severity: str
|
|
60
|
+
is_systemic: bool
|
|
61
|
+
remediation_priority: int
|
|
62
|
+
|
|
63
|
+
def to_dict(self) -> dict[str, Any]:
|
|
64
|
+
return {
|
|
65
|
+
"vuln_type": self.vuln_type,
|
|
66
|
+
"occurrence_count": self.occurrence_count,
|
|
67
|
+
"affected_targets": self.affected_targets,
|
|
68
|
+
"percentage_affected": len(self.affected_targets),
|
|
69
|
+
"average_severity": self.average_severity,
|
|
70
|
+
"is_systemic": self.is_systemic,
|
|
71
|
+
"remediation_priority": self.remediation_priority,
|
|
72
|
+
}
|
|
73
|
+
|
|
74
|
+
|
|
75
|
+
@dataclass
|
|
76
|
+
class SystemicIssue:
|
|
77
|
+
"""A systemic issue identified across the portfolio."""
|
|
78
|
+
issue_type: str
|
|
79
|
+
description: str
|
|
80
|
+
affected_percentage: float
|
|
81
|
+
affected_targets: list[str]
|
|
82
|
+
root_cause_hypothesis: str
|
|
83
|
+
remediation_recommendation: str
|
|
84
|
+
priority: int # 1-5, 1 being highest
|
|
85
|
+
|
|
86
|
+
def to_dict(self) -> dict[str, Any]:
|
|
87
|
+
return {
|
|
88
|
+
"issue_type": self.issue_type,
|
|
89
|
+
"description": self.description,
|
|
90
|
+
"affected_percentage": self.affected_percentage,
|
|
91
|
+
"affected_targets": self.affected_targets,
|
|
92
|
+
"root_cause_hypothesis": self.root_cause_hypothesis,
|
|
93
|
+
"remediation_recommendation": self.remediation_recommendation,
|
|
94
|
+
"priority": self.priority,
|
|
95
|
+
}
|
|
96
|
+
|
|
97
|
+
|
|
98
|
+
@dataclass
|
|
99
|
+
class PortfolioReport:
|
|
100
|
+
"""Comprehensive portfolio analysis report."""
|
|
101
|
+
analyzed_at: datetime
|
|
102
|
+
total_targets: int
|
|
103
|
+
total_findings: int
|
|
104
|
+
|
|
105
|
+
# Summaries
|
|
106
|
+
target_summaries: list[TargetSummary]
|
|
107
|
+
common_vulnerabilities: list[CommonVulnerability]
|
|
108
|
+
systemic_issues: list[SystemicIssue]
|
|
109
|
+
|
|
110
|
+
# Risk metrics
|
|
111
|
+
overall_risk_score: float
|
|
112
|
+
highest_risk_target: str
|
|
113
|
+
lowest_risk_target: str
|
|
114
|
+
|
|
115
|
+
# Trends
|
|
116
|
+
vuln_type_distribution: dict[str, int]
|
|
117
|
+
severity_distribution: dict[str, int]
|
|
118
|
+
|
|
119
|
+
# Recommendations
|
|
120
|
+
strategic_recommendations: list[str]
|
|
121
|
+
quick_wins: list[str]
|
|
122
|
+
|
|
123
|
+
def to_dict(self) -> dict[str, Any]:
|
|
124
|
+
return {
|
|
125
|
+
"analyzed_at": self.analyzed_at.isoformat(),
|
|
126
|
+
"total_targets": self.total_targets,
|
|
127
|
+
"total_findings": self.total_findings,
|
|
128
|
+
"target_summaries": [s.to_dict() for s in self.target_summaries],
|
|
129
|
+
"common_vulnerabilities": [v.to_dict() for v in self.common_vulnerabilities],
|
|
130
|
+
"systemic_issues": [i.to_dict() for i in self.systemic_issues],
|
|
131
|
+
"overall_risk_score": self.overall_risk_score,
|
|
132
|
+
"highest_risk_target": self.highest_risk_target,
|
|
133
|
+
"lowest_risk_target": self.lowest_risk_target,
|
|
134
|
+
"vuln_type_distribution": self.vuln_type_distribution,
|
|
135
|
+
"severity_distribution": self.severity_distribution,
|
|
136
|
+
"strategic_recommendations": self.strategic_recommendations,
|
|
137
|
+
"quick_wins": self.quick_wins,
|
|
138
|
+
}
|
|
139
|
+
|
|
140
|
+
def to_executive_summary(self) -> str:
|
|
141
|
+
"""Generate executive summary text."""
|
|
142
|
+
lines = [
|
|
143
|
+
f"# Portfolio Security Assessment",
|
|
144
|
+
f"",
|
|
145
|
+
f"**Analysis Date:** {self.analyzed_at.strftime('%Y-%m-%d')}",
|
|
146
|
+
f"**Targets Analyzed:** {self.total_targets}",
|
|
147
|
+
f"**Total Findings:** {self.total_findings}",
|
|
148
|
+
f"**Overall Risk Score:** {self.overall_risk_score:.1f}/100",
|
|
149
|
+
f"",
|
|
150
|
+
f"## Key Findings",
|
|
151
|
+
f"",
|
|
152
|
+
]
|
|
153
|
+
|
|
154
|
+
# Add systemic issues
|
|
155
|
+
if self.systemic_issues:
|
|
156
|
+
lines.append("### Systemic Issues Identified")
|
|
157
|
+
for issue in self.systemic_issues[:3]:
|
|
158
|
+
lines.append(f"- **{issue.issue_type}**: {issue.description}")
|
|
159
|
+
lines.append(f" - Affects {issue.affected_percentage:.0f}% of targets")
|
|
160
|
+
lines.append("")
|
|
161
|
+
|
|
162
|
+
# Add common vulnerabilities
|
|
163
|
+
if self.common_vulnerabilities:
|
|
164
|
+
lines.append("### Most Common Vulnerabilities")
|
|
165
|
+
for vuln in self.common_vulnerabilities[:5]:
|
|
166
|
+
lines.append(f"- {vuln.vuln_type}: {vuln.occurrence_count} occurrences across {len(vuln.affected_targets)} targets")
|
|
167
|
+
lines.append("")
|
|
168
|
+
|
|
169
|
+
# Add recommendations
|
|
170
|
+
lines.append("## Strategic Recommendations")
|
|
171
|
+
for i, rec in enumerate(self.strategic_recommendations[:5], 1):
|
|
172
|
+
lines.append(f"{i}. {rec}")
|
|
173
|
+
|
|
174
|
+
return "\n".join(lines)
|
|
175
|
+
|
|
176
|
+
|
|
177
|
+
class CrossTargetAnalyzer:
|
|
178
|
+
"""
|
|
179
|
+
Analyzes findings across multiple targets for patterns and insights.
|
|
180
|
+
|
|
181
|
+
Identifies:
|
|
182
|
+
- Common vulnerabilities across targets
|
|
183
|
+
- Systemic issues suggesting organizational problems
|
|
184
|
+
- Risk distribution and prioritization
|
|
185
|
+
- Strategic remediation recommendations
|
|
186
|
+
|
|
187
|
+
Example:
|
|
188
|
+
analyzer = CrossTargetAnalyzer()
|
|
189
|
+
|
|
190
|
+
# Add findings from multiple targets
|
|
191
|
+
analyzer.add_target_findings("app1.example.com", findings1)
|
|
192
|
+
analyzer.add_target_findings("app2.example.com", findings2)
|
|
193
|
+
analyzer.add_target_findings("api.example.com", findings3)
|
|
194
|
+
|
|
195
|
+
# Generate portfolio report
|
|
196
|
+
report = analyzer.analyze_portfolio()
|
|
197
|
+
print(report.to_executive_summary())
|
|
198
|
+
"""
|
|
199
|
+
|
|
200
|
+
def __init__(self):
|
|
201
|
+
self.target_findings: dict[str, list[Finding]] = {}
|
|
202
|
+
self.scan_dates: dict[str, datetime] = {}
|
|
203
|
+
|
|
204
|
+
def add_target_findings(
|
|
205
|
+
self,
|
|
206
|
+
target: str,
|
|
207
|
+
findings: list[Finding],
|
|
208
|
+
scan_date: datetime = None,
|
|
209
|
+
):
|
|
210
|
+
"""
|
|
211
|
+
Add findings from a target.
|
|
212
|
+
|
|
213
|
+
Args:
|
|
214
|
+
target: Target identifier (domain, IP, app name)
|
|
215
|
+
findings: List of findings from this target
|
|
216
|
+
scan_date: When the scan was performed
|
|
217
|
+
"""
|
|
218
|
+
self.target_findings[target] = findings
|
|
219
|
+
self.scan_dates[target] = scan_date or datetime.utcnow()
|
|
220
|
+
|
|
221
|
+
logger.debug(f"Added {len(findings)} findings for target: {target}")
|
|
222
|
+
|
|
223
|
+
def analyze_portfolio(self) -> PortfolioReport:
|
|
224
|
+
"""
|
|
225
|
+
Analyze all targets and generate a portfolio report.
|
|
226
|
+
|
|
227
|
+
Returns:
|
|
228
|
+
PortfolioReport with comprehensive analysis
|
|
229
|
+
"""
|
|
230
|
+
if not self.target_findings:
|
|
231
|
+
return self._empty_report()
|
|
232
|
+
|
|
233
|
+
# Generate target summaries
|
|
234
|
+
target_summaries = [
|
|
235
|
+
self._summarize_target(target, findings)
|
|
236
|
+
for target, findings in self.target_findings.items()
|
|
237
|
+
]
|
|
238
|
+
|
|
239
|
+
# Find common vulnerabilities
|
|
240
|
+
common_vulns = self._find_common_vulnerabilities()
|
|
241
|
+
|
|
242
|
+
# Identify systemic issues
|
|
243
|
+
systemic_issues = self._identify_systemic_issues(common_vulns)
|
|
244
|
+
|
|
245
|
+
# Calculate distributions
|
|
246
|
+
vuln_distribution = self._calculate_vuln_distribution()
|
|
247
|
+
severity_distribution = self._calculate_severity_distribution()
|
|
248
|
+
|
|
249
|
+
# Calculate risk scores
|
|
250
|
+
for summary in target_summaries:
|
|
251
|
+
summary.risk_score = self._calculate_risk_score(summary)
|
|
252
|
+
|
|
253
|
+
target_summaries.sort(key=lambda s: s.risk_score, reverse=True)
|
|
254
|
+
overall_risk = sum(s.risk_score for s in target_summaries) / len(target_summaries)
|
|
255
|
+
|
|
256
|
+
# Generate recommendations
|
|
257
|
+
recommendations = self._generate_recommendations(common_vulns, systemic_issues)
|
|
258
|
+
quick_wins = self._identify_quick_wins()
|
|
259
|
+
|
|
260
|
+
return PortfolioReport(
|
|
261
|
+
analyzed_at=datetime.utcnow(),
|
|
262
|
+
total_targets=len(self.target_findings),
|
|
263
|
+
total_findings=sum(len(f) for f in self.target_findings.values()),
|
|
264
|
+
target_summaries=target_summaries,
|
|
265
|
+
common_vulnerabilities=common_vulns,
|
|
266
|
+
systemic_issues=systemic_issues,
|
|
267
|
+
overall_risk_score=overall_risk,
|
|
268
|
+
highest_risk_target=target_summaries[0].target if target_summaries else "",
|
|
269
|
+
lowest_risk_target=target_summaries[-1].target if target_summaries else "",
|
|
270
|
+
vuln_type_distribution=vuln_distribution,
|
|
271
|
+
severity_distribution=severity_distribution,
|
|
272
|
+
strategic_recommendations=recommendations,
|
|
273
|
+
quick_wins=quick_wins,
|
|
274
|
+
)
|
|
275
|
+
|
|
276
|
+
def _summarize_target(self, target: str, findings: list[Finding]) -> TargetSummary:
|
|
277
|
+
"""Create a summary for a single target."""
|
|
278
|
+
severity_counts = defaultdict(int)
|
|
279
|
+
vuln_type_counts = defaultdict(int)
|
|
280
|
+
|
|
281
|
+
for f in findings:
|
|
282
|
+
severity_counts[f.severity.value] += 1
|
|
283
|
+
vuln_type_counts[f.vuln_type.value] += 1
|
|
284
|
+
|
|
285
|
+
# Get top vuln types
|
|
286
|
+
sorted_types = sorted(vuln_type_counts.items(), key=lambda x: x[1], reverse=True)
|
|
287
|
+
top_types = [t[0] for t in sorted_types[:5]]
|
|
288
|
+
|
|
289
|
+
return TargetSummary(
|
|
290
|
+
target=target,
|
|
291
|
+
total_findings=len(findings),
|
|
292
|
+
critical_count=severity_counts.get("critical", 0),
|
|
293
|
+
high_count=severity_counts.get("high", 0),
|
|
294
|
+
medium_count=severity_counts.get("medium", 0),
|
|
295
|
+
low_count=severity_counts.get("low", 0),
|
|
296
|
+
top_vuln_types=top_types,
|
|
297
|
+
scan_date=self.scan_dates.get(target, datetime.utcnow()),
|
|
298
|
+
)
|
|
299
|
+
|
|
300
|
+
def _find_common_vulnerabilities(self) -> list[CommonVulnerability]:
|
|
301
|
+
"""Find vulnerabilities that appear across multiple targets."""
|
|
302
|
+
# Count occurrences of each vuln type per target
|
|
303
|
+
vuln_by_target: dict[str, set[str]] = defaultdict(set)
|
|
304
|
+
vuln_severity: dict[str, list[Severity]] = defaultdict(list)
|
|
305
|
+
|
|
306
|
+
for target, findings in self.target_findings.items():
|
|
307
|
+
for f in findings:
|
|
308
|
+
vuln_type = f.vuln_type.value
|
|
309
|
+
vuln_by_target[vuln_type].add(target)
|
|
310
|
+
vuln_severity[vuln_type].append(f.severity)
|
|
311
|
+
|
|
312
|
+
# Build common vulnerability list
|
|
313
|
+
common = []
|
|
314
|
+
total_targets = len(self.target_findings)
|
|
315
|
+
|
|
316
|
+
for vuln_type, affected_targets in vuln_by_target.items():
|
|
317
|
+
if len(affected_targets) < 2:
|
|
318
|
+
continue # Only include if affects multiple targets
|
|
319
|
+
|
|
320
|
+
# Calculate average severity
|
|
321
|
+
severities = vuln_severity[vuln_type]
|
|
322
|
+
severity_values = {"critical": 4, "high": 3, "medium": 2, "low": 1, "info": 0}
|
|
323
|
+
avg_value = sum(severity_values.get(s.value, 0) for s in severities) / len(severities)
|
|
324
|
+
avg_severity = "critical" if avg_value >= 3.5 else "high" if avg_value >= 2.5 else "medium" if avg_value >= 1.5 else "low"
|
|
325
|
+
|
|
326
|
+
# Determine if systemic (affects >50% of targets)
|
|
327
|
+
is_systemic = len(affected_targets) >= total_targets * 0.5
|
|
328
|
+
|
|
329
|
+
# Calculate remediation priority
|
|
330
|
+
priority = 1 if is_systemic and avg_value >= 3 else 2 if avg_value >= 3 else 3 if is_systemic else 4
|
|
331
|
+
|
|
332
|
+
common.append(CommonVulnerability(
|
|
333
|
+
vuln_type=vuln_type,
|
|
334
|
+
occurrence_count=len(severities),
|
|
335
|
+
affected_targets=list(affected_targets),
|
|
336
|
+
average_severity=avg_severity,
|
|
337
|
+
is_systemic=is_systemic,
|
|
338
|
+
remediation_priority=priority,
|
|
339
|
+
))
|
|
340
|
+
|
|
341
|
+
# Sort by priority
|
|
342
|
+
common.sort(key=lambda c: (c.remediation_priority, -c.occurrence_count))
|
|
343
|
+
|
|
344
|
+
return common
|
|
345
|
+
|
|
346
|
+
def _identify_systemic_issues(
|
|
347
|
+
self,
|
|
348
|
+
common_vulns: list[CommonVulnerability],
|
|
349
|
+
) -> list[SystemicIssue]:
|
|
350
|
+
"""Identify systemic issues from common vulnerabilities."""
|
|
351
|
+
issues = []
|
|
352
|
+
total_targets = len(self.target_findings)
|
|
353
|
+
|
|
354
|
+
# Check for input validation issues
|
|
355
|
+
input_vulns = ["sql_injection", "xss_reflected", "xss_stored", "command_injection"]
|
|
356
|
+
input_affected = set()
|
|
357
|
+
for cv in common_vulns:
|
|
358
|
+
if cv.vuln_type in input_vulns:
|
|
359
|
+
input_affected.update(cv.affected_targets)
|
|
360
|
+
|
|
361
|
+
if len(input_affected) >= total_targets * 0.5:
|
|
362
|
+
issues.append(SystemicIssue(
|
|
363
|
+
issue_type="Input Validation",
|
|
364
|
+
description="Widespread input validation failures across the portfolio",
|
|
365
|
+
affected_percentage=(len(input_affected) / total_targets) * 100,
|
|
366
|
+
affected_targets=list(input_affected),
|
|
367
|
+
root_cause_hypothesis="Lack of centralized input validation framework or developer training",
|
|
368
|
+
remediation_recommendation="Implement organization-wide input validation library and secure coding training",
|
|
369
|
+
priority=1,
|
|
370
|
+
))
|
|
371
|
+
|
|
372
|
+
# Check for authentication issues
|
|
373
|
+
auth_vulns = ["auth_bypass", "weak_password", "session_fixation", "idor"]
|
|
374
|
+
auth_affected = set()
|
|
375
|
+
for cv in common_vulns:
|
|
376
|
+
if cv.vuln_type in auth_vulns:
|
|
377
|
+
auth_affected.update(cv.affected_targets)
|
|
378
|
+
|
|
379
|
+
if len(auth_affected) >= total_targets * 0.3:
|
|
380
|
+
issues.append(SystemicIssue(
|
|
381
|
+
issue_type="Authentication & Authorization",
|
|
382
|
+
description="Repeated authentication and authorization weaknesses",
|
|
383
|
+
affected_percentage=(len(auth_affected) / total_targets) * 100,
|
|
384
|
+
affected_targets=list(auth_affected),
|
|
385
|
+
root_cause_hypothesis="Inconsistent identity management practices or outdated authentication frameworks",
|
|
386
|
+
remediation_recommendation="Standardize on a secure authentication framework and implement centralized authorization",
|
|
387
|
+
priority=1,
|
|
388
|
+
))
|
|
389
|
+
|
|
390
|
+
# Check for configuration issues
|
|
391
|
+
config_vulns = ["misconfiguration", "default_credentials", "information_disclosure"]
|
|
392
|
+
config_affected = set()
|
|
393
|
+
for cv in common_vulns:
|
|
394
|
+
if cv.vuln_type in config_vulns:
|
|
395
|
+
config_affected.update(cv.affected_targets)
|
|
396
|
+
|
|
397
|
+
if len(config_affected) >= total_targets * 0.4:
|
|
398
|
+
issues.append(SystemicIssue(
|
|
399
|
+
issue_type="Security Configuration",
|
|
400
|
+
description="Widespread security misconfiguration issues",
|
|
401
|
+
affected_percentage=(len(config_affected) / total_targets) * 100,
|
|
402
|
+
affected_targets=list(config_affected),
|
|
403
|
+
root_cause_hypothesis="Lack of security hardening standards or deployment automation",
|
|
404
|
+
remediation_recommendation="Implement security configuration baselines and automated compliance checking",
|
|
405
|
+
priority=2,
|
|
406
|
+
))
|
|
407
|
+
|
|
408
|
+
# Check for crypto issues
|
|
409
|
+
crypto_vulns = ["weak_crypto", "sensitive_data_exposure"]
|
|
410
|
+
crypto_affected = set()
|
|
411
|
+
for cv in common_vulns:
|
|
412
|
+
if cv.vuln_type in crypto_vulns:
|
|
413
|
+
crypto_affected.update(cv.affected_targets)
|
|
414
|
+
|
|
415
|
+
if len(crypto_affected) >= total_targets * 0.3:
|
|
416
|
+
issues.append(SystemicIssue(
|
|
417
|
+
issue_type="Cryptographic Practices",
|
|
418
|
+
description="Weak cryptography and data protection practices",
|
|
419
|
+
affected_percentage=(len(crypto_affected) / total_targets) * 100,
|
|
420
|
+
affected_targets=list(crypto_affected),
|
|
421
|
+
root_cause_hypothesis="Outdated crypto libraries or lack of data classification",
|
|
422
|
+
remediation_recommendation="Update cryptographic libraries and implement data classification program",
|
|
423
|
+
priority=2,
|
|
424
|
+
))
|
|
425
|
+
|
|
426
|
+
return issues
|
|
427
|
+
|
|
428
|
+
def _calculate_vuln_distribution(self) -> dict[str, int]:
|
|
429
|
+
"""Calculate vulnerability type distribution."""
|
|
430
|
+
distribution = defaultdict(int)
|
|
431
|
+
for findings in self.target_findings.values():
|
|
432
|
+
for f in findings:
|
|
433
|
+
distribution[f.vuln_type.value] += 1
|
|
434
|
+
return dict(sorted(distribution.items(), key=lambda x: x[1], reverse=True))
|
|
435
|
+
|
|
436
|
+
def _calculate_severity_distribution(self) -> dict[str, int]:
|
|
437
|
+
"""Calculate severity distribution."""
|
|
438
|
+
distribution = defaultdict(int)
|
|
439
|
+
for findings in self.target_findings.values():
|
|
440
|
+
for f in findings:
|
|
441
|
+
distribution[f.severity.value] += 1
|
|
442
|
+
return dict(distribution)
|
|
443
|
+
|
|
444
|
+
def _calculate_risk_score(self, summary: TargetSummary) -> float:
|
|
445
|
+
"""Calculate risk score for a target (0-100)."""
|
|
446
|
+
# Weight by severity
|
|
447
|
+
weights = {"critical": 40, "high": 25, "medium": 10, "low": 3}
|
|
448
|
+
|
|
449
|
+
raw_score = (
|
|
450
|
+
summary.critical_count * weights["critical"] +
|
|
451
|
+
summary.high_count * weights["high"] +
|
|
452
|
+
summary.medium_count * weights["medium"] +
|
|
453
|
+
summary.low_count * weights["low"]
|
|
454
|
+
)
|
|
455
|
+
|
|
456
|
+
# Normalize to 0-100 (cap at 100)
|
|
457
|
+
return min(100, raw_score)
|
|
458
|
+
|
|
459
|
+
def _generate_recommendations(
|
|
460
|
+
self,
|
|
461
|
+
common_vulns: list[CommonVulnerability],
|
|
462
|
+
systemic_issues: list[SystemicIssue],
|
|
463
|
+
) -> list[str]:
|
|
464
|
+
"""Generate strategic recommendations."""
|
|
465
|
+
recommendations = []
|
|
466
|
+
|
|
467
|
+
# Add recommendations for systemic issues
|
|
468
|
+
for issue in systemic_issues[:3]:
|
|
469
|
+
recommendations.append(issue.remediation_recommendation)
|
|
470
|
+
|
|
471
|
+
# Add recommendations for common vulnerabilities
|
|
472
|
+
vuln_recommendations = {
|
|
473
|
+
"sql_injection": "Implement parameterized queries and ORM across all applications",
|
|
474
|
+
"xss_reflected": "Deploy Content Security Policy headers and output encoding libraries",
|
|
475
|
+
"xss_stored": "Implement strict input sanitization and output encoding",
|
|
476
|
+
"idor": "Implement centralized authorization checks on all object access",
|
|
477
|
+
"auth_bypass": "Review and strengthen authentication mechanisms organization-wide",
|
|
478
|
+
"misconfiguration": "Develop security configuration baselines for all platforms",
|
|
479
|
+
"default_credentials": "Establish credential rotation and default password policies",
|
|
480
|
+
}
|
|
481
|
+
|
|
482
|
+
for cv in common_vulns[:5]:
|
|
483
|
+
if cv.vuln_type in vuln_recommendations:
|
|
484
|
+
rec = vuln_recommendations[cv.vuln_type]
|
|
485
|
+
if rec not in recommendations:
|
|
486
|
+
recommendations.append(rec)
|
|
487
|
+
|
|
488
|
+
return recommendations[:10]
|
|
489
|
+
|
|
490
|
+
def _identify_quick_wins(self) -> list[str]:
|
|
491
|
+
"""Identify quick wins across the portfolio."""
|
|
492
|
+
quick_wins = []
|
|
493
|
+
|
|
494
|
+
# Check for easy-to-fix issues
|
|
495
|
+
for target, findings in self.target_findings.items():
|
|
496
|
+
for f in findings:
|
|
497
|
+
if f.vuln_type in [VulnerabilityType.DEFAULT_CREDENTIALS, VulnerabilityType.MISCONFIGURATION]:
|
|
498
|
+
quick_wins.append(f"Fix {f.vuln_type.value} on {target}: {f.title}")
|
|
499
|
+
|
|
500
|
+
# Deduplicate and limit
|
|
501
|
+
seen = set()
|
|
502
|
+
unique_wins = []
|
|
503
|
+
for win in quick_wins:
|
|
504
|
+
if win not in seen:
|
|
505
|
+
seen.add(win)
|
|
506
|
+
unique_wins.append(win)
|
|
507
|
+
|
|
508
|
+
return unique_wins[:10]
|
|
509
|
+
|
|
510
|
+
def _empty_report(self) -> PortfolioReport:
|
|
511
|
+
"""Return an empty portfolio report."""
|
|
512
|
+
return PortfolioReport(
|
|
513
|
+
analyzed_at=datetime.utcnow(),
|
|
514
|
+
total_targets=0,
|
|
515
|
+
total_findings=0,
|
|
516
|
+
target_summaries=[],
|
|
517
|
+
common_vulnerabilities=[],
|
|
518
|
+
systemic_issues=[],
|
|
519
|
+
overall_risk_score=0,
|
|
520
|
+
highest_risk_target="",
|
|
521
|
+
lowest_risk_target="",
|
|
522
|
+
vuln_type_distribution={},
|
|
523
|
+
severity_distribution={},
|
|
524
|
+
strategic_recommendations=[],
|
|
525
|
+
quick_wins=[],
|
|
526
|
+
)
|
|
527
|
+
|
|
528
|
+
def export_to_json(self) -> str:
|
|
529
|
+
"""Export analysis to JSON."""
|
|
530
|
+
report = self.analyze_portfolio()
|
|
531
|
+
return json.dumps(report.to_dict(), indent=2, default=str)
|
|
532
|
+
|
|
533
|
+
def clear(self):
|
|
534
|
+
"""Clear all target data."""
|
|
535
|
+
self.target_findings.clear()
|
|
536
|
+
self.scan_dates.clear()
|