aiptx 2.0.7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- aipt_v2/__init__.py +110 -0
- aipt_v2/__main__.py +24 -0
- aipt_v2/agents/AIPTxAgent/__init__.py +10 -0
- aipt_v2/agents/AIPTxAgent/aiptx_agent.py +211 -0
- aipt_v2/agents/__init__.py +46 -0
- aipt_v2/agents/base.py +520 -0
- aipt_v2/agents/exploit_agent.py +688 -0
- aipt_v2/agents/ptt.py +406 -0
- aipt_v2/agents/state.py +168 -0
- aipt_v2/app.py +957 -0
- aipt_v2/browser/__init__.py +31 -0
- aipt_v2/browser/automation.py +458 -0
- aipt_v2/browser/crawler.py +453 -0
- aipt_v2/cli.py +2933 -0
- aipt_v2/compliance/__init__.py +71 -0
- aipt_v2/compliance/compliance_report.py +449 -0
- aipt_v2/compliance/framework_mapper.py +424 -0
- aipt_v2/compliance/nist_mapping.py +345 -0
- aipt_v2/compliance/owasp_mapping.py +330 -0
- aipt_v2/compliance/pci_mapping.py +297 -0
- aipt_v2/config.py +341 -0
- aipt_v2/core/__init__.py +43 -0
- aipt_v2/core/agent.py +630 -0
- aipt_v2/core/llm.py +395 -0
- aipt_v2/core/memory.py +305 -0
- aipt_v2/core/ptt.py +329 -0
- aipt_v2/database/__init__.py +14 -0
- aipt_v2/database/models.py +232 -0
- aipt_v2/database/repository.py +384 -0
- aipt_v2/docker/__init__.py +23 -0
- aipt_v2/docker/builder.py +260 -0
- aipt_v2/docker/manager.py +222 -0
- aipt_v2/docker/sandbox.py +371 -0
- aipt_v2/evasion/__init__.py +58 -0
- aipt_v2/evasion/request_obfuscator.py +272 -0
- aipt_v2/evasion/tls_fingerprint.py +285 -0
- aipt_v2/evasion/ua_rotator.py +301 -0
- aipt_v2/evasion/waf_bypass.py +439 -0
- aipt_v2/execution/__init__.py +23 -0
- aipt_v2/execution/executor.py +302 -0
- aipt_v2/execution/parser.py +544 -0
- aipt_v2/execution/terminal.py +337 -0
- aipt_v2/health.py +437 -0
- aipt_v2/intelligence/__init__.py +194 -0
- aipt_v2/intelligence/adaptation.py +474 -0
- aipt_v2/intelligence/auth.py +520 -0
- aipt_v2/intelligence/chaining.py +775 -0
- aipt_v2/intelligence/correlation.py +536 -0
- aipt_v2/intelligence/cve_aipt.py +334 -0
- aipt_v2/intelligence/cve_info.py +1111 -0
- aipt_v2/intelligence/knowledge_graph.py +590 -0
- aipt_v2/intelligence/learning.py +626 -0
- aipt_v2/intelligence/llm_analyzer.py +502 -0
- aipt_v2/intelligence/llm_tool_selector.py +518 -0
- aipt_v2/intelligence/payload_generator.py +562 -0
- aipt_v2/intelligence/rag.py +239 -0
- aipt_v2/intelligence/scope.py +442 -0
- aipt_v2/intelligence/searchers/__init__.py +5 -0
- aipt_v2/intelligence/searchers/exploitdb_searcher.py +523 -0
- aipt_v2/intelligence/searchers/github_searcher.py +467 -0
- aipt_v2/intelligence/searchers/google_searcher.py +281 -0
- aipt_v2/intelligence/tools.json +443 -0
- aipt_v2/intelligence/triage.py +670 -0
- aipt_v2/interactive_shell.py +559 -0
- aipt_v2/interface/__init__.py +5 -0
- aipt_v2/interface/cli.py +230 -0
- aipt_v2/interface/main.py +501 -0
- aipt_v2/interface/tui.py +1276 -0
- aipt_v2/interface/utils.py +583 -0
- aipt_v2/llm/__init__.py +39 -0
- aipt_v2/llm/config.py +26 -0
- aipt_v2/llm/llm.py +514 -0
- aipt_v2/llm/memory.py +214 -0
- aipt_v2/llm/request_queue.py +89 -0
- aipt_v2/llm/utils.py +89 -0
- aipt_v2/local_tool_installer.py +1467 -0
- aipt_v2/models/__init__.py +15 -0
- aipt_v2/models/findings.py +295 -0
- aipt_v2/models/phase_result.py +224 -0
- aipt_v2/models/scan_config.py +207 -0
- aipt_v2/monitoring/grafana/dashboards/aipt-dashboard.json +355 -0
- aipt_v2/monitoring/grafana/dashboards/default.yml +17 -0
- aipt_v2/monitoring/grafana/datasources/prometheus.yml +17 -0
- aipt_v2/monitoring/prometheus.yml +60 -0
- aipt_v2/orchestration/__init__.py +52 -0
- aipt_v2/orchestration/pipeline.py +398 -0
- aipt_v2/orchestration/progress.py +300 -0
- aipt_v2/orchestration/scheduler.py +296 -0
- aipt_v2/orchestrator.py +2427 -0
- aipt_v2/payloads/__init__.py +27 -0
- aipt_v2/payloads/cmdi.py +150 -0
- aipt_v2/payloads/sqli.py +263 -0
- aipt_v2/payloads/ssrf.py +204 -0
- aipt_v2/payloads/templates.py +222 -0
- aipt_v2/payloads/traversal.py +166 -0
- aipt_v2/payloads/xss.py +204 -0
- aipt_v2/prompts/__init__.py +60 -0
- aipt_v2/proxy/__init__.py +29 -0
- aipt_v2/proxy/history.py +352 -0
- aipt_v2/proxy/interceptor.py +452 -0
- aipt_v2/recon/__init__.py +44 -0
- aipt_v2/recon/dns.py +241 -0
- aipt_v2/recon/osint.py +367 -0
- aipt_v2/recon/subdomain.py +372 -0
- aipt_v2/recon/tech_detect.py +311 -0
- aipt_v2/reports/__init__.py +17 -0
- aipt_v2/reports/generator.py +313 -0
- aipt_v2/reports/html_report.py +378 -0
- aipt_v2/runtime/__init__.py +53 -0
- aipt_v2/runtime/base.py +30 -0
- aipt_v2/runtime/docker.py +401 -0
- aipt_v2/runtime/local.py +346 -0
- aipt_v2/runtime/tool_server.py +205 -0
- aipt_v2/runtime/vps.py +830 -0
- aipt_v2/scanners/__init__.py +28 -0
- aipt_v2/scanners/base.py +273 -0
- aipt_v2/scanners/nikto.py +244 -0
- aipt_v2/scanners/nmap.py +402 -0
- aipt_v2/scanners/nuclei.py +273 -0
- aipt_v2/scanners/web.py +454 -0
- aipt_v2/scripts/security_audit.py +366 -0
- aipt_v2/setup_wizard.py +941 -0
- aipt_v2/skills/__init__.py +80 -0
- aipt_v2/skills/agents/__init__.py +14 -0
- aipt_v2/skills/agents/api_tester.py +706 -0
- aipt_v2/skills/agents/base.py +477 -0
- aipt_v2/skills/agents/code_review.py +459 -0
- aipt_v2/skills/agents/security_agent.py +336 -0
- aipt_v2/skills/agents/web_pentest.py +818 -0
- aipt_v2/skills/prompts/__init__.py +647 -0
- aipt_v2/system_detector.py +539 -0
- aipt_v2/telemetry/__init__.py +7 -0
- aipt_v2/telemetry/tracer.py +347 -0
- aipt_v2/terminal/__init__.py +28 -0
- aipt_v2/terminal/executor.py +400 -0
- aipt_v2/terminal/sandbox.py +350 -0
- aipt_v2/tools/__init__.py +44 -0
- aipt_v2/tools/active_directory/__init__.py +78 -0
- aipt_v2/tools/active_directory/ad_config.py +238 -0
- aipt_v2/tools/active_directory/bloodhound_wrapper.py +447 -0
- aipt_v2/tools/active_directory/kerberos_attacks.py +430 -0
- aipt_v2/tools/active_directory/ldap_enum.py +533 -0
- aipt_v2/tools/active_directory/smb_attacks.py +505 -0
- aipt_v2/tools/agents_graph/__init__.py +19 -0
- aipt_v2/tools/agents_graph/agents_graph_actions.py +69 -0
- aipt_v2/tools/api_security/__init__.py +76 -0
- aipt_v2/tools/api_security/api_discovery.py +608 -0
- aipt_v2/tools/api_security/graphql_scanner.py +622 -0
- aipt_v2/tools/api_security/jwt_analyzer.py +577 -0
- aipt_v2/tools/api_security/openapi_fuzzer.py +761 -0
- aipt_v2/tools/browser/__init__.py +5 -0
- aipt_v2/tools/browser/browser_actions.py +238 -0
- aipt_v2/tools/browser/browser_instance.py +535 -0
- aipt_v2/tools/browser/tab_manager.py +344 -0
- aipt_v2/tools/cloud/__init__.py +70 -0
- aipt_v2/tools/cloud/cloud_config.py +273 -0
- aipt_v2/tools/cloud/cloud_scanner.py +639 -0
- aipt_v2/tools/cloud/prowler_tool.py +571 -0
- aipt_v2/tools/cloud/scoutsuite_tool.py +359 -0
- aipt_v2/tools/executor.py +307 -0
- aipt_v2/tools/parser.py +408 -0
- aipt_v2/tools/proxy/__init__.py +5 -0
- aipt_v2/tools/proxy/proxy_actions.py +103 -0
- aipt_v2/tools/proxy/proxy_manager.py +789 -0
- aipt_v2/tools/registry.py +196 -0
- aipt_v2/tools/scanners/__init__.py +343 -0
- aipt_v2/tools/scanners/acunetix_tool.py +712 -0
- aipt_v2/tools/scanners/burp_tool.py +631 -0
- aipt_v2/tools/scanners/config.py +156 -0
- aipt_v2/tools/scanners/nessus_tool.py +588 -0
- aipt_v2/tools/scanners/zap_tool.py +612 -0
- aipt_v2/tools/terminal/__init__.py +5 -0
- aipt_v2/tools/terminal/terminal_actions.py +37 -0
- aipt_v2/tools/terminal/terminal_manager.py +153 -0
- aipt_v2/tools/terminal/terminal_session.py +449 -0
- aipt_v2/tools/tool_processing.py +108 -0
- aipt_v2/utils/__init__.py +17 -0
- aipt_v2/utils/logging.py +202 -0
- aipt_v2/utils/model_manager.py +187 -0
- aipt_v2/utils/searchers/__init__.py +269 -0
- aipt_v2/verify_install.py +793 -0
- aiptx-2.0.7.dist-info/METADATA +345 -0
- aiptx-2.0.7.dist-info/RECORD +187 -0
- aiptx-2.0.7.dist-info/WHEEL +5 -0
- aiptx-2.0.7.dist-info/entry_points.txt +7 -0
- aiptx-2.0.7.dist-info/licenses/LICENSE +21 -0
- aiptx-2.0.7.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,590 @@
|
|
|
1
|
+
"""
|
|
2
|
+
AIPT Security Knowledge Graph
|
|
3
|
+
|
|
4
|
+
Graph-based storage and analysis of security findings:
|
|
5
|
+
- Stores findings as nodes with relationships
|
|
6
|
+
- Finds attack paths through graph traversal
|
|
7
|
+
- Correlates findings across multiple scans
|
|
8
|
+
- Maps to MITRE ATT&CK techniques
|
|
9
|
+
|
|
10
|
+
This provides a structured way to understand relationships between findings.
|
|
11
|
+
"""
|
|
12
|
+
from __future__ import annotations
|
|
13
|
+
|
|
14
|
+
import json
|
|
15
|
+
import logging
|
|
16
|
+
from dataclasses import dataclass, field
|
|
17
|
+
from datetime import datetime
|
|
18
|
+
from typing import Any, Optional
|
|
19
|
+
from collections import defaultdict
|
|
20
|
+
|
|
21
|
+
from aipt_v2.models.findings import Finding, Severity, VulnerabilityType
|
|
22
|
+
|
|
23
|
+
logger = logging.getLogger(__name__)
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
# MITRE ATT&CK technique mapping
|
|
27
|
+
VULN_TO_MITRE = {
|
|
28
|
+
VulnerabilityType.SQL_INJECTION: {
|
|
29
|
+
"technique_id": "T1190",
|
|
30
|
+
"technique_name": "Exploit Public-Facing Application",
|
|
31
|
+
"tactic": "Initial Access",
|
|
32
|
+
},
|
|
33
|
+
VulnerabilityType.COMMAND_INJECTION: {
|
|
34
|
+
"technique_id": "T1059",
|
|
35
|
+
"technique_name": "Command and Scripting Interpreter",
|
|
36
|
+
"tactic": "Execution",
|
|
37
|
+
},
|
|
38
|
+
VulnerabilityType.RCE: {
|
|
39
|
+
"technique_id": "T1203",
|
|
40
|
+
"technique_name": "Exploitation for Client Execution",
|
|
41
|
+
"tactic": "Execution",
|
|
42
|
+
},
|
|
43
|
+
VulnerabilityType.AUTH_BYPASS: {
|
|
44
|
+
"technique_id": "T1078",
|
|
45
|
+
"technique_name": "Valid Accounts",
|
|
46
|
+
"tactic": "Persistence",
|
|
47
|
+
},
|
|
48
|
+
VulnerabilityType.SSRF: {
|
|
49
|
+
"technique_id": "T1090",
|
|
50
|
+
"technique_name": "Proxy",
|
|
51
|
+
"tactic": "Command and Control",
|
|
52
|
+
},
|
|
53
|
+
VulnerabilityType.XSS_STORED: {
|
|
54
|
+
"technique_id": "T1189",
|
|
55
|
+
"technique_name": "Drive-by Compromise",
|
|
56
|
+
"tactic": "Initial Access",
|
|
57
|
+
},
|
|
58
|
+
VulnerabilityType.IDOR: {
|
|
59
|
+
"technique_id": "T1530",
|
|
60
|
+
"technique_name": "Data from Cloud Storage Object",
|
|
61
|
+
"tactic": "Collection",
|
|
62
|
+
},
|
|
63
|
+
VulnerabilityType.FILE_INCLUSION: {
|
|
64
|
+
"technique_id": "T1005",
|
|
65
|
+
"technique_name": "Data from Local System",
|
|
66
|
+
"tactic": "Collection",
|
|
67
|
+
},
|
|
68
|
+
VulnerabilityType.PRIVILEGE_ESCALATION: {
|
|
69
|
+
"technique_id": "T1068",
|
|
70
|
+
"technique_name": "Exploitation for Privilege Escalation",
|
|
71
|
+
"tactic": "Privilege Escalation",
|
|
72
|
+
},
|
|
73
|
+
}
|
|
74
|
+
|
|
75
|
+
|
|
76
|
+
@dataclass
|
|
77
|
+
class GraphNode:
|
|
78
|
+
"""A node in the security knowledge graph."""
|
|
79
|
+
node_id: str
|
|
80
|
+
node_type: str # "finding", "target", "technique", "asset"
|
|
81
|
+
data: dict[str, Any]
|
|
82
|
+
metadata: dict[str, Any] = field(default_factory=dict)
|
|
83
|
+
created_at: datetime = field(default_factory=datetime.utcnow)
|
|
84
|
+
|
|
85
|
+
|
|
86
|
+
@dataclass
|
|
87
|
+
class GraphEdge:
|
|
88
|
+
"""An edge connecting two nodes in the graph."""
|
|
89
|
+
source_id: str
|
|
90
|
+
target_id: str
|
|
91
|
+
relation: str # "has_vulnerability", "leads_to", "uses_technique", etc.
|
|
92
|
+
weight: float = 1.0
|
|
93
|
+
metadata: dict[str, Any] = field(default_factory=dict)
|
|
94
|
+
|
|
95
|
+
|
|
96
|
+
@dataclass
|
|
97
|
+
class AttackPath:
|
|
98
|
+
"""A path through the graph representing an attack chain."""
|
|
99
|
+
nodes: list[GraphNode]
|
|
100
|
+
edges: list[GraphEdge]
|
|
101
|
+
total_weight: float
|
|
102
|
+
start_node: str
|
|
103
|
+
end_node: str
|
|
104
|
+
|
|
105
|
+
@property
|
|
106
|
+
def length(self) -> int:
|
|
107
|
+
return len(self.nodes)
|
|
108
|
+
|
|
109
|
+
def to_dict(self) -> dict[str, Any]:
|
|
110
|
+
return {
|
|
111
|
+
"nodes": [n.node_id for n in self.nodes],
|
|
112
|
+
"edges": [(e.source_id, e.relation, e.target_id) for e in self.edges],
|
|
113
|
+
"total_weight": self.total_weight,
|
|
114
|
+
"length": self.length,
|
|
115
|
+
}
|
|
116
|
+
|
|
117
|
+
|
|
118
|
+
class SecurityKnowledgeGraph:
|
|
119
|
+
"""
|
|
120
|
+
Graph-based storage and analysis for security findings.
|
|
121
|
+
|
|
122
|
+
Provides graph operations for:
|
|
123
|
+
- Storing findings with relationships
|
|
124
|
+
- Finding attack paths
|
|
125
|
+
- Correlating findings
|
|
126
|
+
- Mapping to MITRE ATT&CK
|
|
127
|
+
|
|
128
|
+
Example:
|
|
129
|
+
graph = SecurityKnowledgeGraph()
|
|
130
|
+
|
|
131
|
+
# Add findings
|
|
132
|
+
for finding in findings:
|
|
133
|
+
graph.add_finding(finding)
|
|
134
|
+
|
|
135
|
+
# Find attack paths to RCE
|
|
136
|
+
paths = graph.find_attack_paths(
|
|
137
|
+
goal_type="rce",
|
|
138
|
+
max_depth=5
|
|
139
|
+
)
|
|
140
|
+
|
|
141
|
+
# Get MITRE mapping
|
|
142
|
+
mitre_map = graph.get_mitre_coverage()
|
|
143
|
+
"""
|
|
144
|
+
|
|
145
|
+
def __init__(self):
|
|
146
|
+
self.nodes: dict[str, GraphNode] = {}
|
|
147
|
+
self.edges: list[GraphEdge] = []
|
|
148
|
+
self._adjacency: dict[str, list[tuple[str, str]]] = defaultdict(list)
|
|
149
|
+
self._reverse_adjacency: dict[str, list[tuple[str, str]]] = defaultdict(list)
|
|
150
|
+
|
|
151
|
+
def add_finding(self, finding: Finding, target: str = None):
|
|
152
|
+
"""
|
|
153
|
+
Add a finding to the graph.
|
|
154
|
+
|
|
155
|
+
Creates nodes for the finding, its target, and MITRE technique,
|
|
156
|
+
with appropriate edges connecting them.
|
|
157
|
+
|
|
158
|
+
Args:
|
|
159
|
+
finding: The finding to add
|
|
160
|
+
target: Optional target identifier
|
|
161
|
+
"""
|
|
162
|
+
# Create finding node
|
|
163
|
+
finding_id = f"finding:{finding.fingerprint}"
|
|
164
|
+
finding_node = GraphNode(
|
|
165
|
+
node_id=finding_id,
|
|
166
|
+
node_type="finding",
|
|
167
|
+
data=finding.to_dict(),
|
|
168
|
+
metadata={
|
|
169
|
+
"severity": finding.severity.value,
|
|
170
|
+
"vuln_type": finding.vuln_type.value,
|
|
171
|
+
"confirmed": finding.confirmed,
|
|
172
|
+
},
|
|
173
|
+
)
|
|
174
|
+
self._add_node(finding_node)
|
|
175
|
+
|
|
176
|
+
# Create or link to target node
|
|
177
|
+
target_id = target or self._extract_host(finding.url)
|
|
178
|
+
if target_id:
|
|
179
|
+
target_node_id = f"target:{target_id}"
|
|
180
|
+
if target_node_id not in self.nodes:
|
|
181
|
+
target_node = GraphNode(
|
|
182
|
+
node_id=target_node_id,
|
|
183
|
+
node_type="target",
|
|
184
|
+
data={"host": target_id},
|
|
185
|
+
)
|
|
186
|
+
self._add_node(target_node)
|
|
187
|
+
|
|
188
|
+
# Connect finding to target
|
|
189
|
+
self._add_edge(GraphEdge(
|
|
190
|
+
source_id=target_node_id,
|
|
191
|
+
target_id=finding_id,
|
|
192
|
+
relation="has_vulnerability",
|
|
193
|
+
weight=self._severity_to_weight(finding.severity),
|
|
194
|
+
))
|
|
195
|
+
|
|
196
|
+
# Create MITRE technique node if mapped
|
|
197
|
+
mitre_info = VULN_TO_MITRE.get(finding.vuln_type)
|
|
198
|
+
if mitre_info:
|
|
199
|
+
technique_id = f"technique:{mitre_info['technique_id']}"
|
|
200
|
+
if technique_id not in self.nodes:
|
|
201
|
+
technique_node = GraphNode(
|
|
202
|
+
node_id=technique_id,
|
|
203
|
+
node_type="technique",
|
|
204
|
+
data=mitre_info,
|
|
205
|
+
)
|
|
206
|
+
self._add_node(technique_node)
|
|
207
|
+
|
|
208
|
+
# Connect finding to technique
|
|
209
|
+
self._add_edge(GraphEdge(
|
|
210
|
+
source_id=finding_id,
|
|
211
|
+
target_id=technique_id,
|
|
212
|
+
relation="uses_technique",
|
|
213
|
+
))
|
|
214
|
+
|
|
215
|
+
# Add potential chain edges based on vulnerability type
|
|
216
|
+
self._add_chain_edges(finding, finding_id)
|
|
217
|
+
|
|
218
|
+
logger.debug(f"Added finding to graph: {finding.title}")
|
|
219
|
+
|
|
220
|
+
def _add_chain_edges(self, finding: Finding, finding_id: str):
|
|
221
|
+
"""Add edges for potential vulnerability chains."""
|
|
222
|
+
# Define what each vuln type can lead to
|
|
223
|
+
leads_to = {
|
|
224
|
+
VulnerabilityType.SSRF: [VulnerabilityType.RCE, VulnerabilityType.SQL_INJECTION],
|
|
225
|
+
VulnerabilityType.SQL_INJECTION: [VulnerabilityType.AUTH_BYPASS, VulnerabilityType.RCE],
|
|
226
|
+
VulnerabilityType.XSS_STORED: [VulnerabilityType.AUTH_BYPASS],
|
|
227
|
+
VulnerabilityType.FILE_INCLUSION: [VulnerabilityType.RCE],
|
|
228
|
+
VulnerabilityType.AUTH_BYPASS: [VulnerabilityType.PRIVILEGE_ESCALATION],
|
|
229
|
+
VulnerabilityType.IDOR: [VulnerabilityType.PRIVILEGE_ESCALATION],
|
|
230
|
+
}
|
|
231
|
+
|
|
232
|
+
targets = leads_to.get(finding.vuln_type, [])
|
|
233
|
+
|
|
234
|
+
# Find existing findings that this could chain to
|
|
235
|
+
for node_id, node in self.nodes.items():
|
|
236
|
+
if node.node_type != "finding":
|
|
237
|
+
continue
|
|
238
|
+
|
|
239
|
+
node_vuln_type = node.metadata.get("vuln_type")
|
|
240
|
+
if node_vuln_type and VulnerabilityType(node_vuln_type) in targets:
|
|
241
|
+
# Check if same target
|
|
242
|
+
if self._same_target(finding, node):
|
|
243
|
+
self._add_edge(GraphEdge(
|
|
244
|
+
source_id=finding_id,
|
|
245
|
+
target_id=node_id,
|
|
246
|
+
relation="leads_to",
|
|
247
|
+
weight=0.8,
|
|
248
|
+
))
|
|
249
|
+
|
|
250
|
+
def _same_target(self, finding: Finding, node: GraphNode) -> bool:
|
|
251
|
+
"""Check if finding and node are for the same target."""
|
|
252
|
+
try:
|
|
253
|
+
node_url = node.data.get("url", "")
|
|
254
|
+
return self._extract_host(finding.url) == self._extract_host(node_url)
|
|
255
|
+
except Exception:
|
|
256
|
+
return False
|
|
257
|
+
|
|
258
|
+
def add_asset(self, asset_id: str, asset_type: str, metadata: dict = None):
|
|
259
|
+
"""
|
|
260
|
+
Add an asset node (subdomain, IP, service, etc.)
|
|
261
|
+
|
|
262
|
+
Args:
|
|
263
|
+
asset_id: Unique identifier for the asset
|
|
264
|
+
asset_type: Type of asset (subdomain, ip, service, port)
|
|
265
|
+
metadata: Additional metadata
|
|
266
|
+
"""
|
|
267
|
+
node_id = f"asset:{asset_id}"
|
|
268
|
+
node = GraphNode(
|
|
269
|
+
node_id=node_id,
|
|
270
|
+
node_type="asset",
|
|
271
|
+
data={
|
|
272
|
+
"asset_id": asset_id,
|
|
273
|
+
"asset_type": asset_type,
|
|
274
|
+
},
|
|
275
|
+
metadata=metadata or {},
|
|
276
|
+
)
|
|
277
|
+
self._add_node(node)
|
|
278
|
+
|
|
279
|
+
def link_finding_to_asset(self, finding: Finding, asset_id: str):
|
|
280
|
+
"""Link a finding to a discovered asset."""
|
|
281
|
+
finding_id = f"finding:{finding.fingerprint}"
|
|
282
|
+
asset_node_id = f"asset:{asset_id}"
|
|
283
|
+
|
|
284
|
+
if finding_id in self.nodes and asset_node_id in self.nodes:
|
|
285
|
+
self._add_edge(GraphEdge(
|
|
286
|
+
source_id=asset_node_id,
|
|
287
|
+
target_id=finding_id,
|
|
288
|
+
relation="has_vulnerability",
|
|
289
|
+
))
|
|
290
|
+
|
|
291
|
+
def find_attack_paths(
|
|
292
|
+
self,
|
|
293
|
+
start_type: str = None,
|
|
294
|
+
goal_type: str = "rce",
|
|
295
|
+
max_depth: int = 5,
|
|
296
|
+
) -> list[AttackPath]:
|
|
297
|
+
"""
|
|
298
|
+
Find attack paths through the graph.
|
|
299
|
+
|
|
300
|
+
Args:
|
|
301
|
+
start_type: Starting vulnerability type (None for all entry points)
|
|
302
|
+
goal_type: Goal vulnerability type to reach
|
|
303
|
+
max_depth: Maximum path depth
|
|
304
|
+
|
|
305
|
+
Returns:
|
|
306
|
+
List of AttackPath objects representing possible attack chains
|
|
307
|
+
"""
|
|
308
|
+
paths = []
|
|
309
|
+
|
|
310
|
+
# Find all potential starting nodes
|
|
311
|
+
start_nodes = []
|
|
312
|
+
for node_id, node in self.nodes.items():
|
|
313
|
+
if node.node_type != "finding":
|
|
314
|
+
continue
|
|
315
|
+
|
|
316
|
+
if start_type:
|
|
317
|
+
if node.metadata.get("vuln_type") == start_type:
|
|
318
|
+
start_nodes.append(node_id)
|
|
319
|
+
else:
|
|
320
|
+
# Use entry-point vulnerabilities as starts
|
|
321
|
+
entry_types = ["sql_injection", "xss_reflected", "ssrf", "command_injection"]
|
|
322
|
+
if node.metadata.get("vuln_type") in entry_types:
|
|
323
|
+
start_nodes.append(node_id)
|
|
324
|
+
|
|
325
|
+
# Find goal nodes
|
|
326
|
+
goal_nodes = []
|
|
327
|
+
for node_id, node in self.nodes.items():
|
|
328
|
+
if node.node_type == "finding" and node.metadata.get("vuln_type") == goal_type:
|
|
329
|
+
goal_nodes.append(node_id)
|
|
330
|
+
|
|
331
|
+
# BFS/DFS to find paths
|
|
332
|
+
for start in start_nodes:
|
|
333
|
+
for goal in goal_nodes:
|
|
334
|
+
if start == goal:
|
|
335
|
+
continue
|
|
336
|
+
found_paths = self._find_paths_bfs(start, goal, max_depth)
|
|
337
|
+
paths.extend(found_paths)
|
|
338
|
+
|
|
339
|
+
# Sort by total weight (lower is better - shorter/simpler paths)
|
|
340
|
+
paths.sort(key=lambda p: p.total_weight)
|
|
341
|
+
|
|
342
|
+
return paths[:10] # Return top 10 paths
|
|
343
|
+
|
|
344
|
+
def _find_paths_bfs(
|
|
345
|
+
self,
|
|
346
|
+
start: str,
|
|
347
|
+
goal: str,
|
|
348
|
+
max_depth: int,
|
|
349
|
+
) -> list[AttackPath]:
|
|
350
|
+
"""Find paths using BFS."""
|
|
351
|
+
paths = []
|
|
352
|
+
queue = [(start, [start], [], 0.0)] # (current, path, edges, weight)
|
|
353
|
+
visited_paths = set()
|
|
354
|
+
|
|
355
|
+
while queue:
|
|
356
|
+
current, path, edges, weight = queue.pop(0)
|
|
357
|
+
|
|
358
|
+
if len(path) > max_depth:
|
|
359
|
+
continue
|
|
360
|
+
|
|
361
|
+
if current == goal:
|
|
362
|
+
path_key = tuple(path)
|
|
363
|
+
if path_key not in visited_paths:
|
|
364
|
+
visited_paths.add(path_key)
|
|
365
|
+
paths.append(AttackPath(
|
|
366
|
+
nodes=[self.nodes[n] for n in path],
|
|
367
|
+
edges=edges,
|
|
368
|
+
total_weight=weight,
|
|
369
|
+
start_node=start,
|
|
370
|
+
end_node=goal,
|
|
371
|
+
))
|
|
372
|
+
continue
|
|
373
|
+
|
|
374
|
+
for neighbor, relation in self._adjacency.get(current, []):
|
|
375
|
+
if neighbor not in path: # Avoid cycles
|
|
376
|
+
edge = self._find_edge(current, neighbor, relation)
|
|
377
|
+
new_weight = weight + (edge.weight if edge else 1.0)
|
|
378
|
+
queue.append((
|
|
379
|
+
neighbor,
|
|
380
|
+
path + [neighbor],
|
|
381
|
+
edges + [edge] if edge else edges,
|
|
382
|
+
new_weight,
|
|
383
|
+
))
|
|
384
|
+
|
|
385
|
+
return paths
|
|
386
|
+
|
|
387
|
+
def get_mitre_coverage(self) -> dict[str, Any]:
|
|
388
|
+
"""
|
|
389
|
+
Get MITRE ATT&CK coverage based on findings.
|
|
390
|
+
|
|
391
|
+
Returns:
|
|
392
|
+
Dictionary with tactics and techniques covered
|
|
393
|
+
"""
|
|
394
|
+
coverage = {
|
|
395
|
+
"tactics": defaultdict(list),
|
|
396
|
+
"techniques": [],
|
|
397
|
+
"total_techniques": 0,
|
|
398
|
+
}
|
|
399
|
+
|
|
400
|
+
technique_nodes = [n for n in self.nodes.values() if n.node_type == "technique"]
|
|
401
|
+
|
|
402
|
+
for node in technique_nodes:
|
|
403
|
+
technique_id = node.data.get("technique_id")
|
|
404
|
+
technique_name = node.data.get("technique_name")
|
|
405
|
+
tactic = node.data.get("tactic")
|
|
406
|
+
|
|
407
|
+
if technique_id:
|
|
408
|
+
coverage["techniques"].append({
|
|
409
|
+
"id": technique_id,
|
|
410
|
+
"name": technique_name,
|
|
411
|
+
"tactic": tactic,
|
|
412
|
+
})
|
|
413
|
+
coverage["tactics"][tactic].append(technique_id)
|
|
414
|
+
|
|
415
|
+
coverage["total_techniques"] = len(coverage["techniques"])
|
|
416
|
+
|
|
417
|
+
return dict(coverage)
|
|
418
|
+
|
|
419
|
+
def get_finding_relationships(self, finding: Finding) -> dict[str, Any]:
|
|
420
|
+
"""
|
|
421
|
+
Get all relationships for a specific finding.
|
|
422
|
+
|
|
423
|
+
Args:
|
|
424
|
+
finding: The finding to analyze
|
|
425
|
+
|
|
426
|
+
Returns:
|
|
427
|
+
Dictionary of relationships
|
|
428
|
+
"""
|
|
429
|
+
finding_id = f"finding:{finding.fingerprint}"
|
|
430
|
+
|
|
431
|
+
if finding_id not in self.nodes:
|
|
432
|
+
return {"error": "Finding not in graph"}
|
|
433
|
+
|
|
434
|
+
# Get outgoing edges
|
|
435
|
+
outgoing = []
|
|
436
|
+
for neighbor, relation in self._adjacency.get(finding_id, []):
|
|
437
|
+
outgoing.append({
|
|
438
|
+
"target": neighbor,
|
|
439
|
+
"relation": relation,
|
|
440
|
+
"target_type": self.nodes[neighbor].node_type if neighbor in self.nodes else "unknown",
|
|
441
|
+
})
|
|
442
|
+
|
|
443
|
+
# Get incoming edges
|
|
444
|
+
incoming = []
|
|
445
|
+
for neighbor, relation in self._reverse_adjacency.get(finding_id, []):
|
|
446
|
+
incoming.append({
|
|
447
|
+
"source": neighbor,
|
|
448
|
+
"relation": relation,
|
|
449
|
+
"source_type": self.nodes[neighbor].node_type if neighbor in self.nodes else "unknown",
|
|
450
|
+
})
|
|
451
|
+
|
|
452
|
+
return {
|
|
453
|
+
"finding_id": finding_id,
|
|
454
|
+
"outgoing": outgoing,
|
|
455
|
+
"incoming": incoming,
|
|
456
|
+
"mitre_technique": self._get_mitre_for_finding(finding_id),
|
|
457
|
+
}
|
|
458
|
+
|
|
459
|
+
def _get_mitre_for_finding(self, finding_id: str) -> Optional[dict]:
|
|
460
|
+
"""Get MITRE technique for a finding."""
|
|
461
|
+
for neighbor, relation in self._adjacency.get(finding_id, []):
|
|
462
|
+
if relation == "uses_technique" and neighbor in self.nodes:
|
|
463
|
+
return self.nodes[neighbor].data
|
|
464
|
+
return None
|
|
465
|
+
|
|
466
|
+
def get_statistics(self) -> dict[str, Any]:
|
|
467
|
+
"""Get graph statistics."""
|
|
468
|
+
finding_nodes = [n for n in self.nodes.values() if n.node_type == "finding"]
|
|
469
|
+
target_nodes = [n for n in self.nodes.values() if n.node_type == "target"]
|
|
470
|
+
|
|
471
|
+
# Count by severity
|
|
472
|
+
by_severity = defaultdict(int)
|
|
473
|
+
for node in finding_nodes:
|
|
474
|
+
sev = node.metadata.get("severity", "unknown")
|
|
475
|
+
by_severity[sev] += 1
|
|
476
|
+
|
|
477
|
+
# Count by vuln type
|
|
478
|
+
by_type = defaultdict(int)
|
|
479
|
+
for node in finding_nodes:
|
|
480
|
+
vtype = node.metadata.get("vuln_type", "unknown")
|
|
481
|
+
by_type[vtype] += 1
|
|
482
|
+
|
|
483
|
+
return {
|
|
484
|
+
"total_nodes": len(self.nodes),
|
|
485
|
+
"total_edges": len(self.edges),
|
|
486
|
+
"findings": len(finding_nodes),
|
|
487
|
+
"targets": len(target_nodes),
|
|
488
|
+
"by_severity": dict(by_severity),
|
|
489
|
+
"by_vuln_type": dict(by_type),
|
|
490
|
+
"mitre_coverage": self.get_mitre_coverage(),
|
|
491
|
+
}
|
|
492
|
+
|
|
493
|
+
def export_to_json(self) -> str:
|
|
494
|
+
"""Export graph to JSON format."""
|
|
495
|
+
data = {
|
|
496
|
+
"nodes": [
|
|
497
|
+
{
|
|
498
|
+
"id": node.node_id,
|
|
499
|
+
"type": node.node_type,
|
|
500
|
+
"data": node.data,
|
|
501
|
+
"metadata": node.metadata,
|
|
502
|
+
}
|
|
503
|
+
for node in self.nodes.values()
|
|
504
|
+
],
|
|
505
|
+
"edges": [
|
|
506
|
+
{
|
|
507
|
+
"source": edge.source_id,
|
|
508
|
+
"target": edge.target_id,
|
|
509
|
+
"relation": edge.relation,
|
|
510
|
+
"weight": edge.weight,
|
|
511
|
+
}
|
|
512
|
+
for edge in self.edges
|
|
513
|
+
],
|
|
514
|
+
}
|
|
515
|
+
return json.dumps(data, indent=2, default=str)
|
|
516
|
+
|
|
517
|
+
def import_from_json(self, json_str: str):
|
|
518
|
+
"""Import graph from JSON format."""
|
|
519
|
+
data = json.loads(json_str)
|
|
520
|
+
|
|
521
|
+
for node_data in data.get("nodes", []):
|
|
522
|
+
node = GraphNode(
|
|
523
|
+
node_id=node_data["id"],
|
|
524
|
+
node_type=node_data["type"],
|
|
525
|
+
data=node_data.get("data", {}),
|
|
526
|
+
metadata=node_data.get("metadata", {}),
|
|
527
|
+
)
|
|
528
|
+
self._add_node(node)
|
|
529
|
+
|
|
530
|
+
for edge_data in data.get("edges", []):
|
|
531
|
+
edge = GraphEdge(
|
|
532
|
+
source_id=edge_data["source"],
|
|
533
|
+
target_id=edge_data["target"],
|
|
534
|
+
relation=edge_data["relation"],
|
|
535
|
+
weight=edge_data.get("weight", 1.0),
|
|
536
|
+
)
|
|
537
|
+
self._add_edge(edge)
|
|
538
|
+
|
|
539
|
+
def _add_node(self, node: GraphNode):
|
|
540
|
+
"""Add a node to the graph."""
|
|
541
|
+
self.nodes[node.node_id] = node
|
|
542
|
+
|
|
543
|
+
def _add_edge(self, edge: GraphEdge):
|
|
544
|
+
"""Add an edge to the graph."""
|
|
545
|
+
# Avoid duplicate edges
|
|
546
|
+
for existing in self.edges:
|
|
547
|
+
if (existing.source_id == edge.source_id and
|
|
548
|
+
existing.target_id == edge.target_id and
|
|
549
|
+
existing.relation == edge.relation):
|
|
550
|
+
return
|
|
551
|
+
|
|
552
|
+
self.edges.append(edge)
|
|
553
|
+
self._adjacency[edge.source_id].append((edge.target_id, edge.relation))
|
|
554
|
+
self._reverse_adjacency[edge.target_id].append((edge.source_id, edge.relation))
|
|
555
|
+
|
|
556
|
+
def _find_edge(self, source: str, target: str, relation: str) -> Optional[GraphEdge]:
|
|
557
|
+
"""Find an edge by source, target, and relation."""
|
|
558
|
+
for edge in self.edges:
|
|
559
|
+
if (edge.source_id == source and
|
|
560
|
+
edge.target_id == target and
|
|
561
|
+
edge.relation == relation):
|
|
562
|
+
return edge
|
|
563
|
+
return None
|
|
564
|
+
|
|
565
|
+
def _extract_host(self, url: str) -> str:
|
|
566
|
+
"""Extract host from URL."""
|
|
567
|
+
try:
|
|
568
|
+
from urllib.parse import urlparse
|
|
569
|
+
parsed = urlparse(url)
|
|
570
|
+
return parsed.netloc or parsed.path.split("/")[0]
|
|
571
|
+
except Exception:
|
|
572
|
+
return url
|
|
573
|
+
|
|
574
|
+
def _severity_to_weight(self, severity: Severity) -> float:
|
|
575
|
+
"""Convert severity to edge weight (lower = more important)."""
|
|
576
|
+
weights = {
|
|
577
|
+
Severity.CRITICAL: 0.2,
|
|
578
|
+
Severity.HIGH: 0.4,
|
|
579
|
+
Severity.MEDIUM: 0.6,
|
|
580
|
+
Severity.LOW: 0.8,
|
|
581
|
+
Severity.INFO: 1.0,
|
|
582
|
+
}
|
|
583
|
+
return weights.get(severity, 0.5)
|
|
584
|
+
|
|
585
|
+
def clear(self):
|
|
586
|
+
"""Clear all data from the graph."""
|
|
587
|
+
self.nodes.clear()
|
|
588
|
+
self.edges.clear()
|
|
589
|
+
self._adjacency.clear()
|
|
590
|
+
self._reverse_adjacency.clear()
|