aiptx 2.0.7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- aipt_v2/__init__.py +110 -0
- aipt_v2/__main__.py +24 -0
- aipt_v2/agents/AIPTxAgent/__init__.py +10 -0
- aipt_v2/agents/AIPTxAgent/aiptx_agent.py +211 -0
- aipt_v2/agents/__init__.py +46 -0
- aipt_v2/agents/base.py +520 -0
- aipt_v2/agents/exploit_agent.py +688 -0
- aipt_v2/agents/ptt.py +406 -0
- aipt_v2/agents/state.py +168 -0
- aipt_v2/app.py +957 -0
- aipt_v2/browser/__init__.py +31 -0
- aipt_v2/browser/automation.py +458 -0
- aipt_v2/browser/crawler.py +453 -0
- aipt_v2/cli.py +2933 -0
- aipt_v2/compliance/__init__.py +71 -0
- aipt_v2/compliance/compliance_report.py +449 -0
- aipt_v2/compliance/framework_mapper.py +424 -0
- aipt_v2/compliance/nist_mapping.py +345 -0
- aipt_v2/compliance/owasp_mapping.py +330 -0
- aipt_v2/compliance/pci_mapping.py +297 -0
- aipt_v2/config.py +341 -0
- aipt_v2/core/__init__.py +43 -0
- aipt_v2/core/agent.py +630 -0
- aipt_v2/core/llm.py +395 -0
- aipt_v2/core/memory.py +305 -0
- aipt_v2/core/ptt.py +329 -0
- aipt_v2/database/__init__.py +14 -0
- aipt_v2/database/models.py +232 -0
- aipt_v2/database/repository.py +384 -0
- aipt_v2/docker/__init__.py +23 -0
- aipt_v2/docker/builder.py +260 -0
- aipt_v2/docker/manager.py +222 -0
- aipt_v2/docker/sandbox.py +371 -0
- aipt_v2/evasion/__init__.py +58 -0
- aipt_v2/evasion/request_obfuscator.py +272 -0
- aipt_v2/evasion/tls_fingerprint.py +285 -0
- aipt_v2/evasion/ua_rotator.py +301 -0
- aipt_v2/evasion/waf_bypass.py +439 -0
- aipt_v2/execution/__init__.py +23 -0
- aipt_v2/execution/executor.py +302 -0
- aipt_v2/execution/parser.py +544 -0
- aipt_v2/execution/terminal.py +337 -0
- aipt_v2/health.py +437 -0
- aipt_v2/intelligence/__init__.py +194 -0
- aipt_v2/intelligence/adaptation.py +474 -0
- aipt_v2/intelligence/auth.py +520 -0
- aipt_v2/intelligence/chaining.py +775 -0
- aipt_v2/intelligence/correlation.py +536 -0
- aipt_v2/intelligence/cve_aipt.py +334 -0
- aipt_v2/intelligence/cve_info.py +1111 -0
- aipt_v2/intelligence/knowledge_graph.py +590 -0
- aipt_v2/intelligence/learning.py +626 -0
- aipt_v2/intelligence/llm_analyzer.py +502 -0
- aipt_v2/intelligence/llm_tool_selector.py +518 -0
- aipt_v2/intelligence/payload_generator.py +562 -0
- aipt_v2/intelligence/rag.py +239 -0
- aipt_v2/intelligence/scope.py +442 -0
- aipt_v2/intelligence/searchers/__init__.py +5 -0
- aipt_v2/intelligence/searchers/exploitdb_searcher.py +523 -0
- aipt_v2/intelligence/searchers/github_searcher.py +467 -0
- aipt_v2/intelligence/searchers/google_searcher.py +281 -0
- aipt_v2/intelligence/tools.json +443 -0
- aipt_v2/intelligence/triage.py +670 -0
- aipt_v2/interactive_shell.py +559 -0
- aipt_v2/interface/__init__.py +5 -0
- aipt_v2/interface/cli.py +230 -0
- aipt_v2/interface/main.py +501 -0
- aipt_v2/interface/tui.py +1276 -0
- aipt_v2/interface/utils.py +583 -0
- aipt_v2/llm/__init__.py +39 -0
- aipt_v2/llm/config.py +26 -0
- aipt_v2/llm/llm.py +514 -0
- aipt_v2/llm/memory.py +214 -0
- aipt_v2/llm/request_queue.py +89 -0
- aipt_v2/llm/utils.py +89 -0
- aipt_v2/local_tool_installer.py +1467 -0
- aipt_v2/models/__init__.py +15 -0
- aipt_v2/models/findings.py +295 -0
- aipt_v2/models/phase_result.py +224 -0
- aipt_v2/models/scan_config.py +207 -0
- aipt_v2/monitoring/grafana/dashboards/aipt-dashboard.json +355 -0
- aipt_v2/monitoring/grafana/dashboards/default.yml +17 -0
- aipt_v2/monitoring/grafana/datasources/prometheus.yml +17 -0
- aipt_v2/monitoring/prometheus.yml +60 -0
- aipt_v2/orchestration/__init__.py +52 -0
- aipt_v2/orchestration/pipeline.py +398 -0
- aipt_v2/orchestration/progress.py +300 -0
- aipt_v2/orchestration/scheduler.py +296 -0
- aipt_v2/orchestrator.py +2427 -0
- aipt_v2/payloads/__init__.py +27 -0
- aipt_v2/payloads/cmdi.py +150 -0
- aipt_v2/payloads/sqli.py +263 -0
- aipt_v2/payloads/ssrf.py +204 -0
- aipt_v2/payloads/templates.py +222 -0
- aipt_v2/payloads/traversal.py +166 -0
- aipt_v2/payloads/xss.py +204 -0
- aipt_v2/prompts/__init__.py +60 -0
- aipt_v2/proxy/__init__.py +29 -0
- aipt_v2/proxy/history.py +352 -0
- aipt_v2/proxy/interceptor.py +452 -0
- aipt_v2/recon/__init__.py +44 -0
- aipt_v2/recon/dns.py +241 -0
- aipt_v2/recon/osint.py +367 -0
- aipt_v2/recon/subdomain.py +372 -0
- aipt_v2/recon/tech_detect.py +311 -0
- aipt_v2/reports/__init__.py +17 -0
- aipt_v2/reports/generator.py +313 -0
- aipt_v2/reports/html_report.py +378 -0
- aipt_v2/runtime/__init__.py +53 -0
- aipt_v2/runtime/base.py +30 -0
- aipt_v2/runtime/docker.py +401 -0
- aipt_v2/runtime/local.py +346 -0
- aipt_v2/runtime/tool_server.py +205 -0
- aipt_v2/runtime/vps.py +830 -0
- aipt_v2/scanners/__init__.py +28 -0
- aipt_v2/scanners/base.py +273 -0
- aipt_v2/scanners/nikto.py +244 -0
- aipt_v2/scanners/nmap.py +402 -0
- aipt_v2/scanners/nuclei.py +273 -0
- aipt_v2/scanners/web.py +454 -0
- aipt_v2/scripts/security_audit.py +366 -0
- aipt_v2/setup_wizard.py +941 -0
- aipt_v2/skills/__init__.py +80 -0
- aipt_v2/skills/agents/__init__.py +14 -0
- aipt_v2/skills/agents/api_tester.py +706 -0
- aipt_v2/skills/agents/base.py +477 -0
- aipt_v2/skills/agents/code_review.py +459 -0
- aipt_v2/skills/agents/security_agent.py +336 -0
- aipt_v2/skills/agents/web_pentest.py +818 -0
- aipt_v2/skills/prompts/__init__.py +647 -0
- aipt_v2/system_detector.py +539 -0
- aipt_v2/telemetry/__init__.py +7 -0
- aipt_v2/telemetry/tracer.py +347 -0
- aipt_v2/terminal/__init__.py +28 -0
- aipt_v2/terminal/executor.py +400 -0
- aipt_v2/terminal/sandbox.py +350 -0
- aipt_v2/tools/__init__.py +44 -0
- aipt_v2/tools/active_directory/__init__.py +78 -0
- aipt_v2/tools/active_directory/ad_config.py +238 -0
- aipt_v2/tools/active_directory/bloodhound_wrapper.py +447 -0
- aipt_v2/tools/active_directory/kerberos_attacks.py +430 -0
- aipt_v2/tools/active_directory/ldap_enum.py +533 -0
- aipt_v2/tools/active_directory/smb_attacks.py +505 -0
- aipt_v2/tools/agents_graph/__init__.py +19 -0
- aipt_v2/tools/agents_graph/agents_graph_actions.py +69 -0
- aipt_v2/tools/api_security/__init__.py +76 -0
- aipt_v2/tools/api_security/api_discovery.py +608 -0
- aipt_v2/tools/api_security/graphql_scanner.py +622 -0
- aipt_v2/tools/api_security/jwt_analyzer.py +577 -0
- aipt_v2/tools/api_security/openapi_fuzzer.py +761 -0
- aipt_v2/tools/browser/__init__.py +5 -0
- aipt_v2/tools/browser/browser_actions.py +238 -0
- aipt_v2/tools/browser/browser_instance.py +535 -0
- aipt_v2/tools/browser/tab_manager.py +344 -0
- aipt_v2/tools/cloud/__init__.py +70 -0
- aipt_v2/tools/cloud/cloud_config.py +273 -0
- aipt_v2/tools/cloud/cloud_scanner.py +639 -0
- aipt_v2/tools/cloud/prowler_tool.py +571 -0
- aipt_v2/tools/cloud/scoutsuite_tool.py +359 -0
- aipt_v2/tools/executor.py +307 -0
- aipt_v2/tools/parser.py +408 -0
- aipt_v2/tools/proxy/__init__.py +5 -0
- aipt_v2/tools/proxy/proxy_actions.py +103 -0
- aipt_v2/tools/proxy/proxy_manager.py +789 -0
- aipt_v2/tools/registry.py +196 -0
- aipt_v2/tools/scanners/__init__.py +343 -0
- aipt_v2/tools/scanners/acunetix_tool.py +712 -0
- aipt_v2/tools/scanners/burp_tool.py +631 -0
- aipt_v2/tools/scanners/config.py +156 -0
- aipt_v2/tools/scanners/nessus_tool.py +588 -0
- aipt_v2/tools/scanners/zap_tool.py +612 -0
- aipt_v2/tools/terminal/__init__.py +5 -0
- aipt_v2/tools/terminal/terminal_actions.py +37 -0
- aipt_v2/tools/terminal/terminal_manager.py +153 -0
- aipt_v2/tools/terminal/terminal_session.py +449 -0
- aipt_v2/tools/tool_processing.py +108 -0
- aipt_v2/utils/__init__.py +17 -0
- aipt_v2/utils/logging.py +202 -0
- aipt_v2/utils/model_manager.py +187 -0
- aipt_v2/utils/searchers/__init__.py +269 -0
- aipt_v2/verify_install.py +793 -0
- aiptx-2.0.7.dist-info/METADATA +345 -0
- aiptx-2.0.7.dist-info/RECORD +187 -0
- aiptx-2.0.7.dist-info/WHEEL +5 -0
- aiptx-2.0.7.dist-info/entry_points.txt +7 -0
- aiptx-2.0.7.dist-info/licenses/LICENSE +21 -0
- aiptx-2.0.7.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,15 @@
|
|
|
1
|
+
"""AIPT Data Models"""
|
|
2
|
+
|
|
3
|
+
from .findings import Finding, Severity, VulnerabilityType
|
|
4
|
+
from .scan_config import ScanConfig, ScanMode
|
|
5
|
+
from .phase_result import PhaseResult, Phase
|
|
6
|
+
|
|
7
|
+
__all__ = [
|
|
8
|
+
"Finding",
|
|
9
|
+
"Severity",
|
|
10
|
+
"VulnerabilityType",
|
|
11
|
+
"ScanConfig",
|
|
12
|
+
"ScanMode",
|
|
13
|
+
"PhaseResult",
|
|
14
|
+
"Phase",
|
|
15
|
+
]
|
|
@@ -0,0 +1,295 @@
|
|
|
1
|
+
"""
|
|
2
|
+
AIPT Finding Model - Unified vulnerability representation
|
|
3
|
+
|
|
4
|
+
This model represents vulnerabilities discovered by ANY tool in the pipeline:
|
|
5
|
+
- Traditional scanners (Acunetix, Burp, Nuclei, ZAP)
|
|
6
|
+
- AI-autonomous agents (Strix)
|
|
7
|
+
- Manual exploitation attempts
|
|
8
|
+
"""
|
|
9
|
+
from __future__ import annotations
|
|
10
|
+
|
|
11
|
+
from dataclasses import dataclass, field
|
|
12
|
+
from datetime import datetime
|
|
13
|
+
from enum import Enum
|
|
14
|
+
from typing import Any
|
|
15
|
+
import hashlib
|
|
16
|
+
import json
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
class Severity(Enum):
|
|
20
|
+
"""CVSS-aligned severity levels"""
|
|
21
|
+
CRITICAL = "critical" # CVSS 9.0-10.0
|
|
22
|
+
HIGH = "high" # CVSS 7.0-8.9
|
|
23
|
+
MEDIUM = "medium" # CVSS 4.0-6.9
|
|
24
|
+
LOW = "low" # CVSS 0.1-3.9
|
|
25
|
+
INFO = "info" # CVSS 0.0 / Informational
|
|
26
|
+
|
|
27
|
+
@classmethod
|
|
28
|
+
def from_cvss(cls, score: float) -> "Severity":
|
|
29
|
+
"""Convert CVSS score to severity level"""
|
|
30
|
+
if score >= 9.0:
|
|
31
|
+
return cls.CRITICAL
|
|
32
|
+
elif score >= 7.0:
|
|
33
|
+
return cls.HIGH
|
|
34
|
+
elif score >= 4.0:
|
|
35
|
+
return cls.MEDIUM
|
|
36
|
+
elif score > 0:
|
|
37
|
+
return cls.LOW
|
|
38
|
+
return cls.INFO
|
|
39
|
+
|
|
40
|
+
def __lt__(self, other: "Severity") -> bool:
|
|
41
|
+
order = [self.INFO, self.LOW, self.MEDIUM, self.HIGH, self.CRITICAL]
|
|
42
|
+
return order.index(self) < order.index(other)
|
|
43
|
+
|
|
44
|
+
|
|
45
|
+
class VulnerabilityType(Enum):
|
|
46
|
+
"""OWASP Top 10 aligned vulnerability categories"""
|
|
47
|
+
# A01:2021 - Broken Access Control
|
|
48
|
+
IDOR = "idor"
|
|
49
|
+
BROKEN_ACCESS_CONTROL = "broken_access_control"
|
|
50
|
+
PRIVILEGE_ESCALATION = "privilege_escalation"
|
|
51
|
+
|
|
52
|
+
# A02:2021 - Cryptographic Failures
|
|
53
|
+
WEAK_CRYPTO = "weak_crypto"
|
|
54
|
+
SENSITIVE_DATA_EXPOSURE = "sensitive_data_exposure"
|
|
55
|
+
|
|
56
|
+
# A03:2021 - Injection
|
|
57
|
+
SQL_INJECTION = "sql_injection"
|
|
58
|
+
COMMAND_INJECTION = "command_injection"
|
|
59
|
+
LDAP_INJECTION = "ldap_injection"
|
|
60
|
+
XPATH_INJECTION = "xpath_injection"
|
|
61
|
+
NOSQL_INJECTION = "nosql_injection"
|
|
62
|
+
|
|
63
|
+
# A04:2021 - Insecure Design
|
|
64
|
+
BUSINESS_LOGIC_FLAW = "business_logic_flaw"
|
|
65
|
+
|
|
66
|
+
# A05:2021 - Security Misconfiguration
|
|
67
|
+
MISCONFIGURATION = "misconfiguration"
|
|
68
|
+
DEFAULT_CREDENTIALS = "default_credentials"
|
|
69
|
+
DIRECTORY_LISTING = "directory_listing"
|
|
70
|
+
|
|
71
|
+
# A06:2021 - Vulnerable Components
|
|
72
|
+
OUTDATED_COMPONENT = "outdated_component"
|
|
73
|
+
KNOWN_CVE = "known_cve"
|
|
74
|
+
|
|
75
|
+
# A07:2021 - Authentication Failures
|
|
76
|
+
AUTH_BYPASS = "auth_bypass"
|
|
77
|
+
WEAK_PASSWORD = "weak_password"
|
|
78
|
+
SESSION_FIXATION = "session_fixation"
|
|
79
|
+
|
|
80
|
+
# A08:2021 - Software Integrity Failures
|
|
81
|
+
INSECURE_DESERIALIZATION = "insecure_deserialization"
|
|
82
|
+
|
|
83
|
+
# A09:2021 - Logging & Monitoring Failures
|
|
84
|
+
INSUFFICIENT_LOGGING = "insufficient_logging"
|
|
85
|
+
|
|
86
|
+
# A10:2021 - SSRF
|
|
87
|
+
SSRF = "ssrf"
|
|
88
|
+
|
|
89
|
+
# Cross-Site Scripting (separate category)
|
|
90
|
+
XSS_REFLECTED = "xss_reflected"
|
|
91
|
+
XSS_STORED = "xss_stored"
|
|
92
|
+
XSS_DOM = "xss_dom"
|
|
93
|
+
|
|
94
|
+
# Other
|
|
95
|
+
OPEN_REDIRECT = "open_redirect"
|
|
96
|
+
FILE_INCLUSION = "file_inclusion"
|
|
97
|
+
FILE_UPLOAD = "file_upload"
|
|
98
|
+
XXE = "xxe"
|
|
99
|
+
CORS_MISCONFIGURATION = "cors_misconfiguration"
|
|
100
|
+
CSRF = "csrf"
|
|
101
|
+
INFORMATION_DISCLOSURE = "information_disclosure"
|
|
102
|
+
RCE = "rce"
|
|
103
|
+
|
|
104
|
+
# Catch-all
|
|
105
|
+
OTHER = "other"
|
|
106
|
+
|
|
107
|
+
|
|
108
|
+
@dataclass
|
|
109
|
+
class Finding:
|
|
110
|
+
"""
|
|
111
|
+
Unified vulnerability finding from any source
|
|
112
|
+
|
|
113
|
+
This is the core data structure that normalizes findings from:
|
|
114
|
+
- Acunetix (JSON API responses)
|
|
115
|
+
- Burp Suite (XML/JSON exports)
|
|
116
|
+
- Nuclei (JSON output)
|
|
117
|
+
- ZAP (JSON API responses)
|
|
118
|
+
- Strix (AI agent reports)
|
|
119
|
+
"""
|
|
120
|
+
|
|
121
|
+
# Core identification
|
|
122
|
+
title: str
|
|
123
|
+
severity: Severity
|
|
124
|
+
vuln_type: VulnerabilityType
|
|
125
|
+
|
|
126
|
+
# Location
|
|
127
|
+
url: str
|
|
128
|
+
parameter: str | None = None
|
|
129
|
+
method: str = "GET"
|
|
130
|
+
|
|
131
|
+
# Evidence
|
|
132
|
+
description: str = ""
|
|
133
|
+
evidence: str = ""
|
|
134
|
+
request: str | None = None
|
|
135
|
+
response: str | None = None
|
|
136
|
+
|
|
137
|
+
# Source tracking
|
|
138
|
+
source: str = "unknown" # acunetix, burp, nuclei, zap, aipt, manual
|
|
139
|
+
source_id: str | None = None # Original ID from source scanner
|
|
140
|
+
|
|
141
|
+
# Validation
|
|
142
|
+
confirmed: bool = False
|
|
143
|
+
exploited: bool = False
|
|
144
|
+
poc_command: str | None = None
|
|
145
|
+
|
|
146
|
+
# Metadata
|
|
147
|
+
cvss_score: float | None = None
|
|
148
|
+
cwe_id: str | None = None
|
|
149
|
+
cve_ids: list[str] = field(default_factory=list)
|
|
150
|
+
references: list[str] = field(default_factory=list)
|
|
151
|
+
|
|
152
|
+
# Remediation
|
|
153
|
+
remediation: str = ""
|
|
154
|
+
|
|
155
|
+
# Timestamps
|
|
156
|
+
discovered_at: datetime = field(default_factory=datetime.utcnow)
|
|
157
|
+
|
|
158
|
+
# AI-specific fields (for Strix findings)
|
|
159
|
+
ai_reasoning: str | None = None
|
|
160
|
+
ai_confidence: float | None = None # 0.0 to 1.0
|
|
161
|
+
|
|
162
|
+
def __post_init__(self):
|
|
163
|
+
"""Generate unique fingerprint for deduplication"""
|
|
164
|
+
self._fingerprint = self._generate_fingerprint()
|
|
165
|
+
|
|
166
|
+
def _generate_fingerprint(self) -> str:
|
|
167
|
+
"""
|
|
168
|
+
Generate a unique fingerprint for finding deduplication.
|
|
169
|
+
|
|
170
|
+
Two findings are considered duplicates if they have the same:
|
|
171
|
+
- URL (normalized)
|
|
172
|
+
- Parameter
|
|
173
|
+
- Vulnerability type
|
|
174
|
+
"""
|
|
175
|
+
normalized_url = self.url.rstrip("/").lower()
|
|
176
|
+
data = f"{normalized_url}:{self.parameter}:{self.vuln_type.value}"
|
|
177
|
+
return hashlib.sha256(data.encode()).hexdigest()[:16]
|
|
178
|
+
|
|
179
|
+
@property
|
|
180
|
+
def fingerprint(self) -> str:
|
|
181
|
+
return self._fingerprint
|
|
182
|
+
|
|
183
|
+
def is_duplicate_of(self, other: "Finding") -> bool:
|
|
184
|
+
"""Check if this finding is a duplicate of another"""
|
|
185
|
+
return self.fingerprint == other.fingerprint
|
|
186
|
+
|
|
187
|
+
def merge_with(self, other: "Finding") -> "Finding":
|
|
188
|
+
"""
|
|
189
|
+
Merge two duplicate findings, keeping the best evidence from both.
|
|
190
|
+
Prefers confirmed/exploited findings, higher confidence, more details.
|
|
191
|
+
"""
|
|
192
|
+
# Prefer the confirmed/exploited finding
|
|
193
|
+
if other.confirmed and not self.confirmed:
|
|
194
|
+
base, supplement = other, self
|
|
195
|
+
elif other.exploited and not self.exploited:
|
|
196
|
+
base, supplement = other, self
|
|
197
|
+
else:
|
|
198
|
+
base, supplement = self, other
|
|
199
|
+
|
|
200
|
+
# Merge evidence
|
|
201
|
+
merged_evidence = base.evidence
|
|
202
|
+
if supplement.evidence and supplement.evidence not in merged_evidence:
|
|
203
|
+
merged_evidence = f"{merged_evidence}\n\n--- Additional Evidence ---\n{supplement.evidence}"
|
|
204
|
+
|
|
205
|
+
# Merge sources
|
|
206
|
+
sources = set([base.source, supplement.source])
|
|
207
|
+
merged_source = ", ".join(sorted(sources))
|
|
208
|
+
|
|
209
|
+
# Take highest confidence
|
|
210
|
+
confidence = max(
|
|
211
|
+
base.ai_confidence or 0,
|
|
212
|
+
supplement.ai_confidence or 0
|
|
213
|
+
) or None
|
|
214
|
+
|
|
215
|
+
return Finding(
|
|
216
|
+
title=base.title,
|
|
217
|
+
severity=max(base.severity, other.severity), # Take highest severity
|
|
218
|
+
vuln_type=base.vuln_type,
|
|
219
|
+
url=base.url,
|
|
220
|
+
parameter=base.parameter,
|
|
221
|
+
method=base.method,
|
|
222
|
+
description=base.description or supplement.description,
|
|
223
|
+
evidence=merged_evidence,
|
|
224
|
+
request=base.request or supplement.request,
|
|
225
|
+
response=base.response or supplement.response,
|
|
226
|
+
source=merged_source,
|
|
227
|
+
confirmed=base.confirmed or supplement.confirmed,
|
|
228
|
+
exploited=base.exploited or supplement.exploited,
|
|
229
|
+
poc_command=base.poc_command or supplement.poc_command,
|
|
230
|
+
cvss_score=base.cvss_score or supplement.cvss_score,
|
|
231
|
+
cwe_id=base.cwe_id or supplement.cwe_id,
|
|
232
|
+
cve_ids=list(set(base.cve_ids + supplement.cve_ids)),
|
|
233
|
+
references=list(set(base.references + supplement.references)),
|
|
234
|
+
remediation=base.remediation or supplement.remediation,
|
|
235
|
+
ai_reasoning=base.ai_reasoning or supplement.ai_reasoning,
|
|
236
|
+
ai_confidence=confidence,
|
|
237
|
+
)
|
|
238
|
+
|
|
239
|
+
def to_dict(self) -> dict[str, Any]:
|
|
240
|
+
"""Convert to dictionary for JSON serialization"""
|
|
241
|
+
return {
|
|
242
|
+
"fingerprint": self.fingerprint,
|
|
243
|
+
"title": self.title,
|
|
244
|
+
"severity": self.severity.value,
|
|
245
|
+
"vuln_type": self.vuln_type.value,
|
|
246
|
+
"url": self.url,
|
|
247
|
+
"parameter": self.parameter,
|
|
248
|
+
"method": self.method,
|
|
249
|
+
"description": self.description,
|
|
250
|
+
"evidence": self.evidence,
|
|
251
|
+
"request": self.request,
|
|
252
|
+
"response": self.response,
|
|
253
|
+
"source": self.source,
|
|
254
|
+
"source_id": self.source_id,
|
|
255
|
+
"confirmed": self.confirmed,
|
|
256
|
+
"exploited": self.exploited,
|
|
257
|
+
"poc_command": self.poc_command,
|
|
258
|
+
"cvss_score": self.cvss_score,
|
|
259
|
+
"cwe_id": self.cwe_id,
|
|
260
|
+
"cve_ids": self.cve_ids,
|
|
261
|
+
"references": self.references,
|
|
262
|
+
"remediation": self.remediation,
|
|
263
|
+
"discovered_at": self.discovered_at.isoformat(),
|
|
264
|
+
"ai_reasoning": self.ai_reasoning,
|
|
265
|
+
"ai_confidence": self.ai_confidence,
|
|
266
|
+
}
|
|
267
|
+
|
|
268
|
+
@classmethod
|
|
269
|
+
def from_dict(cls, data: dict[str, Any]) -> "Finding":
|
|
270
|
+
"""Create Finding from dictionary"""
|
|
271
|
+
return cls(
|
|
272
|
+
title=data["title"],
|
|
273
|
+
severity=Severity(data["severity"]),
|
|
274
|
+
vuln_type=VulnerabilityType(data.get("vuln_type", "other")),
|
|
275
|
+
url=data["url"],
|
|
276
|
+
parameter=data.get("parameter"),
|
|
277
|
+
method=data.get("method", "GET"),
|
|
278
|
+
description=data.get("description", ""),
|
|
279
|
+
evidence=data.get("evidence", ""),
|
|
280
|
+
request=data.get("request"),
|
|
281
|
+
response=data.get("response"),
|
|
282
|
+
source=data.get("source", "unknown"),
|
|
283
|
+
source_id=data.get("source_id"),
|
|
284
|
+
confirmed=data.get("confirmed", False),
|
|
285
|
+
exploited=data.get("exploited", False),
|
|
286
|
+
poc_command=data.get("poc_command"),
|
|
287
|
+
cvss_score=data.get("cvss_score"),
|
|
288
|
+
cwe_id=data.get("cwe_id"),
|
|
289
|
+
cve_ids=data.get("cve_ids", []),
|
|
290
|
+
references=data.get("references", []),
|
|
291
|
+
remediation=data.get("remediation", ""),
|
|
292
|
+
discovered_at=datetime.fromisoformat(data["discovered_at"]) if "discovered_at" in data else datetime.utcnow(),
|
|
293
|
+
ai_reasoning=data.get("ai_reasoning"),
|
|
294
|
+
ai_confidence=data.get("ai_confidence"),
|
|
295
|
+
)
|
|
@@ -0,0 +1,224 @@
|
|
|
1
|
+
"""
|
|
2
|
+
AIPT Phase Result Model
|
|
3
|
+
|
|
4
|
+
Tracks results and status for each phase of the scanning pipeline.
|
|
5
|
+
"""
|
|
6
|
+
from __future__ import annotations
|
|
7
|
+
|
|
8
|
+
from dataclasses import dataclass, field
|
|
9
|
+
from datetime import datetime
|
|
10
|
+
from enum import Enum
|
|
11
|
+
from typing import Any
|
|
12
|
+
|
|
13
|
+
from .findings import Finding
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
class Phase(Enum):
|
|
17
|
+
"""
|
|
18
|
+
AIPT Pipeline Phases
|
|
19
|
+
|
|
20
|
+
The pipeline executes in order:
|
|
21
|
+
1. RECON - Asset discovery and reconnaissance
|
|
22
|
+
2. SCAN - Traditional vulnerability scanning (Acunetix, Burp, Nuclei, ZAP)
|
|
23
|
+
3. AI_PENTEST - AI-autonomous penetration testing (Strix)
|
|
24
|
+
4. EXPLOIT - Exploitation and validation of findings
|
|
25
|
+
5. REPORT - Report generation and delivery
|
|
26
|
+
"""
|
|
27
|
+
RECON = "recon"
|
|
28
|
+
SCAN = "scan"
|
|
29
|
+
AI_PENTEST = "ai_pentest" # NEW: Strix integration
|
|
30
|
+
EXPLOIT = "exploit"
|
|
31
|
+
REPORT = "report"
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
class PhaseStatus(Enum):
|
|
35
|
+
"""Status of a pipeline phase"""
|
|
36
|
+
PENDING = "pending"
|
|
37
|
+
RUNNING = "running"
|
|
38
|
+
COMPLETED = "completed"
|
|
39
|
+
FAILED = "failed"
|
|
40
|
+
SKIPPED = "skipped"
|
|
41
|
+
TIMEOUT = "timeout"
|
|
42
|
+
|
|
43
|
+
|
|
44
|
+
@dataclass
|
|
45
|
+
class PhaseResult:
|
|
46
|
+
"""
|
|
47
|
+
Result of a single pipeline phase
|
|
48
|
+
|
|
49
|
+
Contains all findings, errors, and metadata from phase execution.
|
|
50
|
+
"""
|
|
51
|
+
|
|
52
|
+
phase: Phase
|
|
53
|
+
status: PhaseStatus = PhaseStatus.PENDING
|
|
54
|
+
|
|
55
|
+
# Findings discovered in this phase
|
|
56
|
+
findings: list[Finding] = field(default_factory=list)
|
|
57
|
+
|
|
58
|
+
# Timing
|
|
59
|
+
started_at: datetime | None = None
|
|
60
|
+
completed_at: datetime | None = None
|
|
61
|
+
|
|
62
|
+
# Error tracking
|
|
63
|
+
errors: list[str] = field(default_factory=list)
|
|
64
|
+
warnings: list[str] = field(default_factory=list)
|
|
65
|
+
|
|
66
|
+
# Phase-specific data
|
|
67
|
+
metadata: dict[str, Any] = field(default_factory=dict)
|
|
68
|
+
|
|
69
|
+
# Scanner results (for SCAN phase)
|
|
70
|
+
scanner_results: dict[str, Any] = field(default_factory=dict)
|
|
71
|
+
|
|
72
|
+
# AI agent traces (for AI_PENTEST phase)
|
|
73
|
+
agent_traces: list[dict[str, Any]] = field(default_factory=list)
|
|
74
|
+
|
|
75
|
+
def start(self) -> None:
|
|
76
|
+
"""Mark phase as started"""
|
|
77
|
+
self.status = PhaseStatus.RUNNING
|
|
78
|
+
self.started_at = datetime.utcnow()
|
|
79
|
+
|
|
80
|
+
def complete(self) -> None:
|
|
81
|
+
"""Mark phase as completed"""
|
|
82
|
+
self.status = PhaseStatus.COMPLETED
|
|
83
|
+
self.completed_at = datetime.utcnow()
|
|
84
|
+
|
|
85
|
+
def fail(self, error: str) -> None:
|
|
86
|
+
"""Mark phase as failed"""
|
|
87
|
+
self.status = PhaseStatus.FAILED
|
|
88
|
+
self.completed_at = datetime.utcnow()
|
|
89
|
+
self.errors.append(error)
|
|
90
|
+
|
|
91
|
+
def skip(self, reason: str) -> None:
|
|
92
|
+
"""Mark phase as skipped"""
|
|
93
|
+
self.status = PhaseStatus.SKIPPED
|
|
94
|
+
self.completed_at = datetime.utcnow()
|
|
95
|
+
self.metadata["skip_reason"] = reason
|
|
96
|
+
|
|
97
|
+
def add_finding(self, finding: Finding) -> None:
|
|
98
|
+
"""Add a finding to this phase"""
|
|
99
|
+
self.findings.append(finding)
|
|
100
|
+
|
|
101
|
+
def add_findings(self, findings: list[Finding]) -> None:
|
|
102
|
+
"""Add multiple findings"""
|
|
103
|
+
self.findings.extend(findings)
|
|
104
|
+
|
|
105
|
+
@property
|
|
106
|
+
def duration_seconds(self) -> float | None:
|
|
107
|
+
"""Get phase duration in seconds"""
|
|
108
|
+
if self.started_at and self.completed_at:
|
|
109
|
+
return (self.completed_at - self.started_at).total_seconds()
|
|
110
|
+
return None
|
|
111
|
+
|
|
112
|
+
@property
|
|
113
|
+
def finding_counts(self) -> dict[str, int]:
|
|
114
|
+
"""Get finding counts by severity"""
|
|
115
|
+
from .findings import Severity
|
|
116
|
+
counts = {s.value: 0 for s in Severity}
|
|
117
|
+
for finding in self.findings:
|
|
118
|
+
counts[finding.severity.value] += 1
|
|
119
|
+
return counts
|
|
120
|
+
|
|
121
|
+
def to_dict(self) -> dict[str, Any]:
|
|
122
|
+
"""Convert to dictionary for JSON serialization"""
|
|
123
|
+
return {
|
|
124
|
+
"phase": self.phase.value,
|
|
125
|
+
"status": self.status.value,
|
|
126
|
+
"findings": [f.to_dict() for f in self.findings],
|
|
127
|
+
"finding_counts": self.finding_counts,
|
|
128
|
+
"started_at": self.started_at.isoformat() if self.started_at else None,
|
|
129
|
+
"completed_at": self.completed_at.isoformat() if self.completed_at else None,
|
|
130
|
+
"duration_seconds": self.duration_seconds,
|
|
131
|
+
"errors": self.errors,
|
|
132
|
+
"warnings": self.warnings,
|
|
133
|
+
"metadata": self.metadata,
|
|
134
|
+
}
|
|
135
|
+
|
|
136
|
+
|
|
137
|
+
@dataclass
|
|
138
|
+
class PipelineResult:
|
|
139
|
+
"""
|
|
140
|
+
Complete result of an AIPT scan pipeline
|
|
141
|
+
|
|
142
|
+
Aggregates results from all phases with deduplication.
|
|
143
|
+
"""
|
|
144
|
+
|
|
145
|
+
scan_id: str
|
|
146
|
+
target: str
|
|
147
|
+
started_at: datetime = field(default_factory=datetime.utcnow)
|
|
148
|
+
completed_at: datetime | None = None
|
|
149
|
+
|
|
150
|
+
# Phase results
|
|
151
|
+
phases: dict[Phase, PhaseResult] = field(default_factory=dict)
|
|
152
|
+
|
|
153
|
+
# Aggregated and deduplicated findings
|
|
154
|
+
_all_findings: list[Finding] = field(default_factory=list)
|
|
155
|
+
|
|
156
|
+
def add_phase_result(self, result: PhaseResult) -> None:
|
|
157
|
+
"""Add a phase result and merge findings"""
|
|
158
|
+
self.phases[result.phase] = result
|
|
159
|
+
|
|
160
|
+
def get_all_findings(self, deduplicate: bool = True) -> list[Finding]:
|
|
161
|
+
"""
|
|
162
|
+
Get all findings across all phases.
|
|
163
|
+
|
|
164
|
+
If deduplicate=True, merges duplicate findings from different sources.
|
|
165
|
+
"""
|
|
166
|
+
all_findings: list[Finding] = []
|
|
167
|
+
for phase_result in self.phases.values():
|
|
168
|
+
all_findings.extend(phase_result.findings)
|
|
169
|
+
|
|
170
|
+
if not deduplicate:
|
|
171
|
+
return all_findings
|
|
172
|
+
|
|
173
|
+
# Deduplicate by fingerprint
|
|
174
|
+
unique_findings: dict[str, Finding] = {}
|
|
175
|
+
for finding in all_findings:
|
|
176
|
+
if finding.fingerprint in unique_findings:
|
|
177
|
+
# Merge with existing finding
|
|
178
|
+
existing = unique_findings[finding.fingerprint]
|
|
179
|
+
unique_findings[finding.fingerprint] = existing.merge_with(finding)
|
|
180
|
+
else:
|
|
181
|
+
unique_findings[finding.fingerprint] = finding
|
|
182
|
+
|
|
183
|
+
return list(unique_findings.values())
|
|
184
|
+
|
|
185
|
+
def get_findings_by_severity(self) -> dict[str, list[Finding]]:
|
|
186
|
+
"""Group findings by severity"""
|
|
187
|
+
from .findings import Severity
|
|
188
|
+
grouped = {s.value: [] for s in Severity}
|
|
189
|
+
for finding in self.get_all_findings():
|
|
190
|
+
grouped[finding.severity.value].append(finding)
|
|
191
|
+
return grouped
|
|
192
|
+
|
|
193
|
+
def get_summary(self) -> dict[str, Any]:
|
|
194
|
+
"""Get executive summary of the scan"""
|
|
195
|
+
findings = self.get_all_findings()
|
|
196
|
+
from .findings import Severity
|
|
197
|
+
|
|
198
|
+
return {
|
|
199
|
+
"scan_id": self.scan_id,
|
|
200
|
+
"target": self.target,
|
|
201
|
+
"total_findings": len(findings),
|
|
202
|
+
"critical": len([f for f in findings if f.severity == Severity.CRITICAL]),
|
|
203
|
+
"high": len([f for f in findings if f.severity == Severity.HIGH]),
|
|
204
|
+
"medium": len([f for f in findings if f.severity == Severity.MEDIUM]),
|
|
205
|
+
"low": len([f for f in findings if f.severity == Severity.LOW]),
|
|
206
|
+
"info": len([f for f in findings if f.severity == Severity.INFO]),
|
|
207
|
+
"confirmed_findings": len([f for f in findings if f.confirmed]),
|
|
208
|
+
"exploited_findings": len([f for f in findings if f.exploited]),
|
|
209
|
+
"ai_findings": len([f for f in findings if f.source == "aipt"]),
|
|
210
|
+
"phases_completed": len([p for p in self.phases.values() if p.status == PhaseStatus.COMPLETED]),
|
|
211
|
+
"phases_failed": len([p for p in self.phases.values() if p.status == PhaseStatus.FAILED]),
|
|
212
|
+
}
|
|
213
|
+
|
|
214
|
+
def to_dict(self) -> dict[str, Any]:
|
|
215
|
+
"""Convert to dictionary for JSON serialization"""
|
|
216
|
+
return {
|
|
217
|
+
"scan_id": self.scan_id,
|
|
218
|
+
"target": self.target,
|
|
219
|
+
"started_at": self.started_at.isoformat(),
|
|
220
|
+
"completed_at": self.completed_at.isoformat() if self.completed_at else None,
|
|
221
|
+
"summary": self.get_summary(),
|
|
222
|
+
"phases": {p.value: r.to_dict() for p, r in self.phases.items()},
|
|
223
|
+
"all_findings": [f.to_dict() for f in self.get_all_findings()],
|
|
224
|
+
}
|