opencode-api-security-testing 2.0.0 → 2.1.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +30 -24
- package/SKILL.md +1797 -0
- package/core/advanced_recon.py +788 -0
- package/core/agentic_analyzer.py +445 -0
- package/core/analyzers/api_parser.py +210 -0
- package/core/analyzers/response_analyzer.py +212 -0
- package/core/analyzers/sensitive_finder.py +184 -0
- package/core/api_fuzzer.py +422 -0
- package/core/api_interceptor.py +525 -0
- package/core/api_parser.py +955 -0
- package/core/browser_tester.py +479 -0
- package/core/cloud_storage_tester.py +1330 -0
- package/core/collectors/__init__.py +23 -0
- package/core/collectors/api_path_finder.py +300 -0
- package/core/collectors/browser_collect.py +645 -0
- package/core/collectors/browser_collector.py +411 -0
- package/core/collectors/http_client.py +111 -0
- package/core/collectors/js_collector.py +490 -0
- package/core/collectors/js_parser.py +780 -0
- package/core/collectors/url_collector.py +319 -0
- package/core/context_manager.py +682 -0
- package/core/deep_api_tester_v35.py +844 -0
- package/core/deep_api_tester_v55.py +366 -0
- package/core/dynamic_api_analyzer.py +532 -0
- package/core/http_client.py +179 -0
- package/core/models.py +296 -0
- package/core/orchestrator.py +890 -0
- package/core/prerequisite.py +227 -0
- package/core/reasoning_engine.py +1042 -0
- package/core/response_classifier.py +606 -0
- package/core/runner.py +938 -0
- package/core/scan_engine.py +599 -0
- package/core/skill_executor.py +435 -0
- package/core/skill_executor_v2.py +670 -0
- package/core/skill_executor_v3.py +704 -0
- package/core/smart_analyzer.py +687 -0
- package/core/strategy_pool.py +707 -0
- package/core/testers/auth_tester.py +264 -0
- package/core/testers/idor_tester.py +200 -0
- package/core/testers/sqli_tester.py +211 -0
- package/core/testing_loop.py +655 -0
- package/core/utils/base_path_dict.py +255 -0
- package/core/utils/payload_lib.py +167 -0
- package/core/utils/ssrf_detector.py +220 -0
- package/core/verifiers/vuln_verifier.py +536 -0
- package/package.json +17 -13
- package/references/asset-discovery.md +119 -612
- package/references/graphql-guidance.md +65 -641
- package/references/intake.md +84 -0
- package/references/report-template.md +131 -38
- package/references/rest-guidance.md +55 -526
- package/references/severity-model.md +52 -264
- package/references/test-matrix.md +65 -263
- package/references/validation.md +53 -400
- package/scripts/postinstall.js +46 -0
- package/src/index.ts +259 -275
- package/agents/cyber-supervisor.md +0 -55
- package/agents/probing-miner.md +0 -42
- package/agents/resource-specialist.md +0 -31
- package/commands/api-security-testing-scan.md +0 -59
- package/commands/api-security-testing-test.md +0 -49
- package/commands/api-security-testing.md +0 -72
- package/tsconfig.json +0 -17
|
@@ -0,0 +1,445 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""
|
|
3
|
+
Agentic Security Analyzer - 智能安全分析器
|
|
4
|
+
不是纯脚本,而是有理解能力的 agent
|
|
5
|
+
|
|
6
|
+
核心思维:
|
|
7
|
+
1. 观察现象 -> 2. 理解原因 -> 3. 推断本质 -> 4. 调整策略
|
|
8
|
+
"""
|
|
9
|
+
|
|
10
|
+
import re
|
|
11
|
+
import json
|
|
12
|
+
from typing import Dict, List, Set, Tuple, Optional, Any
|
|
13
|
+
from dataclasses import dataclass, field
|
|
14
|
+
from enum import Enum
|
|
15
|
+
import requests
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
class UnderstandingLevel(Enum):
|
|
19
|
+
"""理解层级"""
|
|
20
|
+
SURFACE = "surface" # 表面现象 (纯脚本级别)
|
|
21
|
+
CONTEXT = "context" # 上下文理解 (agent 级别)
|
|
22
|
+
CAUSAL = "causal" # 因果推理 (高级 agent)
|
|
23
|
+
STRATEGIC = "strategic" # 战略调整 (专家级别)
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
@dataclass
|
|
27
|
+
class Finding:
|
|
28
|
+
"""发现"""
|
|
29
|
+
what: str # 观察到什么
|
|
30
|
+
so_what: str # 这意味着什么 (关键!)
|
|
31
|
+
why: str # 为什么
|
|
32
|
+
implication: str # 对测试的影响
|
|
33
|
+
strategy: str # 调整后的策略
|
|
34
|
+
confidence: float # 置信度 0-1
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
@dataclass
|
|
38
|
+
class AnalysisResult:
|
|
39
|
+
"""分析结果"""
|
|
40
|
+
target: str
|
|
41
|
+
understanding_level: UnderstandingLevel
|
|
42
|
+
|
|
43
|
+
# 现象
|
|
44
|
+
observations: List[str] = field(default_factory=list)
|
|
45
|
+
|
|
46
|
+
# 发现
|
|
47
|
+
findings: List[Finding] = field(default_factory=list)
|
|
48
|
+
|
|
49
|
+
# 推断
|
|
50
|
+
inferences: List[str] = field(default_factory=list)
|
|
51
|
+
|
|
52
|
+
# 最终结论
|
|
53
|
+
conclusion: str = ""
|
|
54
|
+
|
|
55
|
+
# 调整后的策略
|
|
56
|
+
adjusted_strategy: List[str] = field(default_factory=list)
|
|
57
|
+
|
|
58
|
+
# 可测试的端点
|
|
59
|
+
testable_endpoints: List[Dict] = field(default_factory=list)
|
|
60
|
+
|
|
61
|
+
# 不可达的端点 (内网/外网)
|
|
62
|
+
unreachable_endpoints: List[Dict] = field(default_factory=list)
|
|
63
|
+
|
|
64
|
+
|
|
65
|
+
class AgenticAnalyzer:
|
|
66
|
+
"""
|
|
67
|
+
智能分析器
|
|
68
|
+
|
|
69
|
+
不是简单地给响应打标签,而是:
|
|
70
|
+
1. 观察多个现象
|
|
71
|
+
2. 寻找规律
|
|
72
|
+
3. 推断原因
|
|
73
|
+
4. 调整策略
|
|
74
|
+
"""
|
|
75
|
+
|
|
76
|
+
def __init__(self, session: requests.Session = None):
|
|
77
|
+
self.session = session or requests.Session()
|
|
78
|
+
self.observations: List[Dict] = []
|
|
79
|
+
self.patterns: Dict[str, int] = {}
|
|
80
|
+
|
|
81
|
+
def observe(self, url: str, response: requests.Response) -> Dict:
|
|
82
|
+
"""观察响应"""
|
|
83
|
+
obs = {
|
|
84
|
+
'url': url,
|
|
85
|
+
'status': response.status_code,
|
|
86
|
+
'content_type': response.headers.get('Content-Type', ''),
|
|
87
|
+
'length': len(response.content),
|
|
88
|
+
'is_html': '<!doctype' in response.text.lower() or '<html' in response.text.lower(),
|
|
89
|
+
'is_json': self._is_json(response.text),
|
|
90
|
+
'content_preview': response.text[:200],
|
|
91
|
+
}
|
|
92
|
+
|
|
93
|
+
# 检测 SPA 特征
|
|
94
|
+
obs['spa_indicators'] = self._detect_spa_indicators(response.text)
|
|
95
|
+
|
|
96
|
+
# 检测 API 特征
|
|
97
|
+
obs['api_indicators'] = self._detect_api_indicators(response.text)
|
|
98
|
+
|
|
99
|
+
self.observations.append(obs)
|
|
100
|
+
|
|
101
|
+
# 记录模式
|
|
102
|
+
if obs['is_html'] and obs['length'] == 678: # 固定的 SPA fallback 大小
|
|
103
|
+
key = f"spa_fallback_{obs['length']}"
|
|
104
|
+
self.patterns[key] = self.patterns.get(key, 0) + 1
|
|
105
|
+
|
|
106
|
+
return obs
|
|
107
|
+
|
|
108
|
+
def _is_json(self, content: str) -> bool:
|
|
109
|
+
try:
|
|
110
|
+
json.loads(content)
|
|
111
|
+
return True
|
|
112
|
+
except:
|
|
113
|
+
return False
|
|
114
|
+
|
|
115
|
+
def _detect_spa_indicators(self, content: str) -> List[str]:
|
|
116
|
+
indicators = []
|
|
117
|
+
content_lower = content.lower()
|
|
118
|
+
|
|
119
|
+
if 'chunk-vendors' in content_lower:
|
|
120
|
+
indicators.append('webpack_chunk_vendors')
|
|
121
|
+
if '<div id="app">' in content_lower:
|
|
122
|
+
indicators.append('div_id_app')
|
|
123
|
+
if '<div id="root">' in content_lower:
|
|
124
|
+
indicators.append('div_id_root')
|
|
125
|
+
if '<noscript>' in content_lower:
|
|
126
|
+
indicators.append('noscript_tag')
|
|
127
|
+
if 'vue' in content_lower:
|
|
128
|
+
indicators.append('vue_keyword')
|
|
129
|
+
if 'react' in content_lower:
|
|
130
|
+
indicators.append('react_keyword')
|
|
131
|
+
|
|
132
|
+
return indicators
|
|
133
|
+
|
|
134
|
+
def _detect_api_indicators(self, content: str) -> List[str]:
|
|
135
|
+
indicators = []
|
|
136
|
+
content_lower = content.lower()
|
|
137
|
+
|
|
138
|
+
if '"swagger"' in content_lower or "'swagger'" in content_lower:
|
|
139
|
+
indicators.append('swagger_keyword')
|
|
140
|
+
if '"openapi"' in content_lower or "'openapi'" in content_lower:
|
|
141
|
+
indicators.append('openapi_keyword')
|
|
142
|
+
if '"paths"' in content_lower:
|
|
143
|
+
indicators.append('paths_keyword')
|
|
144
|
+
if '/api/' in content_lower:
|
|
145
|
+
indicators.append('api_path')
|
|
146
|
+
if '__schema' in content_lower:
|
|
147
|
+
indicators.append('graphql_schema')
|
|
148
|
+
|
|
149
|
+
return indicators
|
|
150
|
+
|
|
151
|
+
def reason(self) -> AnalysisResult:
|
|
152
|
+
"""
|
|
153
|
+
推理分析
|
|
154
|
+
|
|
155
|
+
从观察到推断:
|
|
156
|
+
1. 所有路径返回相同大小的 HTML?
|
|
157
|
+
2. 有 SPA 特征但请求的是 JSON 文件?
|
|
158
|
+
3. 从 JS 中发现的后端地址是什么?
|
|
159
|
+
"""
|
|
160
|
+
result = AnalysisResult(
|
|
161
|
+
target="current_target",
|
|
162
|
+
understanding_level=UnderstandingLevel.SURFACE
|
|
163
|
+
)
|
|
164
|
+
|
|
165
|
+
# 收集观察
|
|
166
|
+
result.observations = [o['url'] for o in self.observations]
|
|
167
|
+
|
|
168
|
+
# === 因果推理 ===
|
|
169
|
+
|
|
170
|
+
# 检查模式 1: 所有路径都返回相同大小的 HTML
|
|
171
|
+
html_observations = [o for o in self.observations if o.get('is_html')]
|
|
172
|
+
if len(html_observations) >= 3:
|
|
173
|
+
lengths = set(o['length'] for o in html_observations)
|
|
174
|
+
if len(lengths) == 1:
|
|
175
|
+
length = list(lengths)[0]
|
|
176
|
+
|
|
177
|
+
finding = Finding(
|
|
178
|
+
what=f"所有 {len(html_observations)} 个不同路径都返回完全相同大小的 HTML ({length} 字节)",
|
|
179
|
+
so_what="这是典型的 SPA (Vue.js/React) fallback 行为",
|
|
180
|
+
why="前端服务器(Nginx)配置了 catch-all 路由,将所有请求都路由到 index.html",
|
|
181
|
+
implication="后端 API 不在当前服务器,可能在内网 (如 118.31.34.105:8081) 或其他地址",
|
|
182
|
+
strategy="1. 从 JS 中提取后端 API 地址 2. 尝试不同端口/路径探测 3. 如内网地址需要代理访问",
|
|
183
|
+
confidence=0.95
|
|
184
|
+
)
|
|
185
|
+
result.findings.append(finding)
|
|
186
|
+
result.understanding_level = UnderstandingLevel.CAUSAL
|
|
187
|
+
|
|
188
|
+
# 检查模式 2: 请求 JSON 文件但返回 HTML
|
|
189
|
+
json_requests = []
|
|
190
|
+
for o in self.observations:
|
|
191
|
+
if '.json' in o['url'] or 'swagger' in o['url'] or 'api-docs' in o['url']:
|
|
192
|
+
if not o.get('is_json'):
|
|
193
|
+
json_requests.append(o)
|
|
194
|
+
|
|
195
|
+
if len(json_requests) >= 2:
|
|
196
|
+
finding = Finding(
|
|
197
|
+
what=f"请求了 {len(json_requests)} 个 JSON/YAML 文件,但全部返回 HTML",
|
|
198
|
+
so_what="这些路径在服务端不存在,是前端在模拟",
|
|
199
|
+
why="后端 API 服务器与前端分离,SPA fallback 导致请求被发到前端",
|
|
200
|
+
implication="无法通过前端服务器访问真正的 API 文档和服务",
|
|
201
|
+
strategy="1. 识别后端真实地址(从 JS 或网络请求中)2. 直接测试后端地址 3. 使用代理工具(如 Burp)监控真实请求",
|
|
202
|
+
confidence=0.9
|
|
203
|
+
)
|
|
204
|
+
result.findings.append(finding)
|
|
205
|
+
|
|
206
|
+
# 检查模式 3: 从 JS 分析后端地址
|
|
207
|
+
js_api_patterns = []
|
|
208
|
+
for o in self.observations:
|
|
209
|
+
if o.get('api_indicators'):
|
|
210
|
+
# 从 JS 内容中检测到 API 相关内容
|
|
211
|
+
pass
|
|
212
|
+
|
|
213
|
+
# === 生成结论 ===
|
|
214
|
+
if result.findings:
|
|
215
|
+
result.conclusion = self._generate_conclusion(result.findings)
|
|
216
|
+
result.understanding_level = UnderstandingLevel.CAUSAL
|
|
217
|
+
|
|
218
|
+
# 调整策略
|
|
219
|
+
for finding in result.findings:
|
|
220
|
+
result.adjusted_strategy.append(finding.strategy)
|
|
221
|
+
|
|
222
|
+
return result
|
|
223
|
+
|
|
224
|
+
def _generate_conclusion(self, findings: List[Finding]) -> str:
|
|
225
|
+
"""生成综合结论"""
|
|
226
|
+
if not findings:
|
|
227
|
+
return "未发现明显问题"
|
|
228
|
+
|
|
229
|
+
conclusions = []
|
|
230
|
+
|
|
231
|
+
for f in findings:
|
|
232
|
+
if f.confidence >= 0.9:
|
|
233
|
+
conclusions.append(f"[高置信度] {f.so_what}")
|
|
234
|
+
|
|
235
|
+
if not conclusions:
|
|
236
|
+
for f in findings:
|
|
237
|
+
conclusions.append(f"[中置信度] {f.so_what}")
|
|
238
|
+
|
|
239
|
+
return "; ".join(conclusions)
|
|
240
|
+
|
|
241
|
+
def from_js_analysis(self, js_findings: Dict) -> AnalysisResult:
|
|
242
|
+
"""
|
|
243
|
+
从 JS 分析结果进行推理
|
|
244
|
+
|
|
245
|
+
Args:
|
|
246
|
+
js_findings: {
|
|
247
|
+
'base_urls': [...],
|
|
248
|
+
'api_paths': [...],
|
|
249
|
+
'sensitive_data': [...],
|
|
250
|
+
'backend_indicators': [...]
|
|
251
|
+
}
|
|
252
|
+
"""
|
|
253
|
+
result = AnalysisResult(
|
|
254
|
+
target="discovered_from_js",
|
|
255
|
+
understanding_level=UnderstandingLevel.CONTEXT
|
|
256
|
+
)
|
|
257
|
+
|
|
258
|
+
base_urls = js_findings.get('base_urls', [])
|
|
259
|
+
api_paths = js_findings.get('api_paths', [])
|
|
260
|
+
|
|
261
|
+
# 检查是否发现内网地址
|
|
262
|
+
internal_ips = []
|
|
263
|
+
for url in base_urls:
|
|
264
|
+
if self._is_internal_ip(url):
|
|
265
|
+
internal_ips.append(url)
|
|
266
|
+
|
|
267
|
+
if internal_ips:
|
|
268
|
+
finding = Finding(
|
|
269
|
+
what=f"从 JS 中发现 {len(internal_ips)} 个后端地址: {internal_ips}",
|
|
270
|
+
so_what="后端 API 在内网环境,前端无法直接访问",
|
|
271
|
+
why="系统采用前后端分离架构,后端部署在内网",
|
|
272
|
+
implication="无法从外部直接测试后端 API,需要通过代理或内网访问",
|
|
273
|
+
strategy="1. 标记内网地址 2. 建议通过代理工具访问 3. 或寻找外网暴露的测试环境",
|
|
274
|
+
confidence=0.95
|
|
275
|
+
)
|
|
276
|
+
result.findings.append(finding)
|
|
277
|
+
result.unreachable_endpoints = [{'url': ip, 'reason': '内网地址'} for ip in internal_ips]
|
|
278
|
+
|
|
279
|
+
# 检查是否发现外部可访问地址
|
|
280
|
+
external_apis = []
|
|
281
|
+
for url in base_urls:
|
|
282
|
+
if not self._is_internal_ip(url):
|
|
283
|
+
external_apis.append(url)
|
|
284
|
+
|
|
285
|
+
if external_apis:
|
|
286
|
+
result.testable_endpoints = [{'url': url, 'accessible': True} for url in external_apis]
|
|
287
|
+
|
|
288
|
+
if result.findings:
|
|
289
|
+
result.conclusion = self._generate_conclusion(result.findings)
|
|
290
|
+
result.adjusted_strategy = [f.strategy for f in result.findings]
|
|
291
|
+
|
|
292
|
+
return result
|
|
293
|
+
|
|
294
|
+
def _is_internal_ip(self, url: str) -> bool:
|
|
295
|
+
"""判断是否为内网地址"""
|
|
296
|
+
internal_patterns = [
|
|
297
|
+
r'10\.\d+\.\d+\.\d+',
|
|
298
|
+
r'172\.(1[6-9]|2\d|3[01])\.\d+\.\d+',
|
|
299
|
+
r'192\.168\.\d+\.\d+',
|
|
300
|
+
r'127\.\d+\.\d+\.\d+',
|
|
301
|
+
r'localhost',
|
|
302
|
+
r'.*\.local',
|
|
303
|
+
r'118\.31\.34\.105', # 已知的内网地址
|
|
304
|
+
]
|
|
305
|
+
|
|
306
|
+
for pattern in internal_patterns:
|
|
307
|
+
if re.search(pattern, url):
|
|
308
|
+
return True
|
|
309
|
+
|
|
310
|
+
return False
|
|
311
|
+
|
|
312
|
+
|
|
313
|
+
def analyze_with_understanding(target_url: str, session: requests.Session = None) -> AnalysisResult:
|
|
314
|
+
"""
|
|
315
|
+
带理解的分析
|
|
316
|
+
|
|
317
|
+
不是简单地返回 "SPA_FALLBACK"
|
|
318
|
+
而是理解:为什么是 SPA fallback?意味着什么?如何调整策略?
|
|
319
|
+
"""
|
|
320
|
+
session = session or requests.Session()
|
|
321
|
+
analyzer = AgenticAnalyzer(session)
|
|
322
|
+
|
|
323
|
+
print("[*] Phase 1: Observation - 观察多个响应")
|
|
324
|
+
|
|
325
|
+
# 观察多个不同的 URL
|
|
326
|
+
test_urls = [
|
|
327
|
+
f"{target_url}/login",
|
|
328
|
+
f"{target_url}/admin",
|
|
329
|
+
f"{target_url}/system/swagger.json",
|
|
330
|
+
f"{target_url}/api/v3/api-docs",
|
|
331
|
+
f"{target_url}/api-docs",
|
|
332
|
+
f"{target_url}/swagger.json",
|
|
333
|
+
]
|
|
334
|
+
|
|
335
|
+
for url in test_urls:
|
|
336
|
+
try:
|
|
337
|
+
resp = session.get(url, timeout=5, allow_redirects=True)
|
|
338
|
+
obs = analyzer.observe(url, resp)
|
|
339
|
+
|
|
340
|
+
if obs['is_html']:
|
|
341
|
+
print(f" [HTML {resp.status_code}] {url}")
|
|
342
|
+
if obs['spa_indicators']:
|
|
343
|
+
print(f" SPA: {', '.join(obs['spa_indicators'][:3])}")
|
|
344
|
+
elif obs['is_json']:
|
|
345
|
+
print(f" [JSON {resp.status_code}] {url}")
|
|
346
|
+
else:
|
|
347
|
+
print(f" [{resp.status_code}] {url}")
|
|
348
|
+
|
|
349
|
+
except Exception as e:
|
|
350
|
+
print(f" [ERR] {url}: {e}")
|
|
351
|
+
|
|
352
|
+
print("\n[*] Phase 2: Reasoning - 因果推理")
|
|
353
|
+
|
|
354
|
+
# 推理
|
|
355
|
+
result = analyzer.reason()
|
|
356
|
+
|
|
357
|
+
# 从 JS 分析后端地址
|
|
358
|
+
print("\n[*] Phase 3: JS Analysis - 分析 JS 中的后端信息")
|
|
359
|
+
|
|
360
|
+
try:
|
|
361
|
+
resp = session.get(target_url, timeout=10)
|
|
362
|
+
js_urls = re.findall(r'<script[^>]+src=["\']([^"\']+\.js[^"\']*)["\']', resp.text)
|
|
363
|
+
|
|
364
|
+
js_base_urls = set()
|
|
365
|
+
for js_url in js_urls[:3]:
|
|
366
|
+
if not js_url.startswith('http'):
|
|
367
|
+
js_url = target_url.rstrip('/') + '/' + js_url
|
|
368
|
+
|
|
369
|
+
try:
|
|
370
|
+
js_resp = session.get(js_url, timeout=10)
|
|
371
|
+
content = js_resp.text
|
|
372
|
+
|
|
373
|
+
# 提取 baseURL
|
|
374
|
+
base_matches = re.findall(r'(?:baseURL|apiUrl)\s*[:=]\s*["\']([^"\']+)["\']', content)
|
|
375
|
+
js_base_urls.update(base_matches)
|
|
376
|
+
|
|
377
|
+
except:
|
|
378
|
+
pass
|
|
379
|
+
|
|
380
|
+
if js_base_urls:
|
|
381
|
+
print(f" [*] Found base URLs in JS: {js_base_urls}")
|
|
382
|
+
|
|
383
|
+
js_findings = {
|
|
384
|
+
'base_urls': list(js_base_urls),
|
|
385
|
+
'api_paths': [],
|
|
386
|
+
'sensitive_data': [],
|
|
387
|
+
}
|
|
388
|
+
|
|
389
|
+
js_result = analyzer.from_js_analysis(js_findings)
|
|
390
|
+
|
|
391
|
+
# 合并结果
|
|
392
|
+
result.findings.extend(js_result.findings)
|
|
393
|
+
result.unreachable_endpoints.extend(js_result.unreachable_endpoints)
|
|
394
|
+
result.testable_endpoints.extend(js_result.testable_endpoints)
|
|
395
|
+
|
|
396
|
+
if js_result.findings:
|
|
397
|
+
result.conclusion = js_result.conclusion
|
|
398
|
+
result.adjusted_strategy = js_result.adjusted_strategy
|
|
399
|
+
result.understanding_level = UnderstandingLevel.CAUSAL
|
|
400
|
+
|
|
401
|
+
except Exception as e:
|
|
402
|
+
print(f" [!] JS analysis error: {e}")
|
|
403
|
+
|
|
404
|
+
return result
|
|
405
|
+
|
|
406
|
+
|
|
407
|
+
if __name__ == "__main__":
|
|
408
|
+
import sys
|
|
409
|
+
target = sys.argv[1] if len(sys.argv) > 1 else "http://49.65.100.160:6004"
|
|
410
|
+
|
|
411
|
+
result = analyze_with_understanding(target)
|
|
412
|
+
|
|
413
|
+
print("\n" + "=" * 70)
|
|
414
|
+
print(" Agentic Analysis Results")
|
|
415
|
+
print("=" * 70)
|
|
416
|
+
|
|
417
|
+
print(f"\n[*] Understanding Level: {result.understanding_level.value}")
|
|
418
|
+
|
|
419
|
+
if result.findings:
|
|
420
|
+
print(f"\n[*] Findings (因果分析):")
|
|
421
|
+
for i, f in enumerate(result.findings, 1):
|
|
422
|
+
print(f"\n [{i}] CONFIDENCE: {f.confidence*100:.0f}%")
|
|
423
|
+
print(f" WHAT: {f.what}")
|
|
424
|
+
print(f" SO WHAT: {f.so_what}")
|
|
425
|
+
print(f" WHY: {f.why}")
|
|
426
|
+
print(f" IMPLICATION: {f.implication}")
|
|
427
|
+
print(f" STRATEGY: {f.strategy}")
|
|
428
|
+
|
|
429
|
+
if result.conclusion:
|
|
430
|
+
print(f"\n[*] CONCLUSION: {result.conclusion}")
|
|
431
|
+
|
|
432
|
+
if result.adjusted_strategy:
|
|
433
|
+
print(f"\n[*] ADJUSTED STRATEGY:")
|
|
434
|
+
for s in result.adjusted_strategy:
|
|
435
|
+
print(f" - {s}")
|
|
436
|
+
|
|
437
|
+
if result.unreachable_endpoints:
|
|
438
|
+
print(f"\n[*] UNREACHABLE ENDPOINTS ({len(result.unreachable_endpoints)}):")
|
|
439
|
+
for ep in result.unreachable_endpoints:
|
|
440
|
+
print(f" - {ep['url']} ({ep['reason']})")
|
|
441
|
+
|
|
442
|
+
if result.testable_endpoints:
|
|
443
|
+
print(f"\n[*] TESTABLE ENDPOINTS ({len(result.testable_endpoints)}):")
|
|
444
|
+
for ep in result.testable_endpoints:
|
|
445
|
+
print(f" - {ep['url']}")
|
|
@@ -0,0 +1,210 @@
|
|
|
1
|
+
"""
|
|
2
|
+
API端点解析 - 解析API结构
|
|
3
|
+
输入: {apis, base_url}
|
|
4
|
+
输出: {endpoints, methods, parameters}
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
import re
|
|
8
|
+
import json
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
def api_parser(config):
|
|
12
|
+
"""
|
|
13
|
+
解析API端点
|
|
14
|
+
|
|
15
|
+
输入:
|
|
16
|
+
apis: Array<{method, url, post_data}> - API请求列表
|
|
17
|
+
base_url?: string - 基准URL
|
|
18
|
+
|
|
19
|
+
输出:
|
|
20
|
+
endpoints: string[] - 端点路径
|
|
21
|
+
methods: object - 每个端点的方法
|
|
22
|
+
parameters: object - 发现的参数
|
|
23
|
+
"""
|
|
24
|
+
apis = config.get('apis', [])
|
|
25
|
+
base_url = config.get('base_url', '')
|
|
26
|
+
|
|
27
|
+
result = {
|
|
28
|
+
'endpoints': [],
|
|
29
|
+
'methods': {},
|
|
30
|
+
'parameters': {}
|
|
31
|
+
}
|
|
32
|
+
|
|
33
|
+
# 解析每个API
|
|
34
|
+
for api in apis:
|
|
35
|
+
url = api.get('url', '')
|
|
36
|
+
method = api.get('method', 'GET')
|
|
37
|
+
|
|
38
|
+
# 提取路径
|
|
39
|
+
path = extract_path(url, base_url)
|
|
40
|
+
if not path:
|
|
41
|
+
continue
|
|
42
|
+
|
|
43
|
+
# 添加端点
|
|
44
|
+
if path not in result['endpoints']:
|
|
45
|
+
result['endpoints'].append(path)
|
|
46
|
+
result['methods'][path] = []
|
|
47
|
+
|
|
48
|
+
# 添加方法
|
|
49
|
+
if method not in result['methods'][path]:
|
|
50
|
+
result['methods'][path].append(method)
|
|
51
|
+
|
|
52
|
+
# 提取参数
|
|
53
|
+
params = extract_params(url)
|
|
54
|
+
if params:
|
|
55
|
+
result['parameters'][path] = params
|
|
56
|
+
|
|
57
|
+
return result
|
|
58
|
+
|
|
59
|
+
|
|
60
|
+
def extract_path(url, base_url=''):
|
|
61
|
+
"""从URL中提取API路径"""
|
|
62
|
+
# 移除query string
|
|
63
|
+
path = url.split('?')[0]
|
|
64
|
+
|
|
65
|
+
# 如果有base_url,尝试提取相对路径
|
|
66
|
+
if base_url:
|
|
67
|
+
if base_url.startswith('http'):
|
|
68
|
+
from urllib.parse import urlparse
|
|
69
|
+
base_parsed = urlparse(base_url)
|
|
70
|
+
base_netloc = base_parsed.netloc
|
|
71
|
+
|
|
72
|
+
if base_netloc in path:
|
|
73
|
+
# 提取base_netloc之后的部分
|
|
74
|
+
idx = path.find(base_netloc)
|
|
75
|
+
path = path[idx + len(base_netloc):]
|
|
76
|
+
|
|
77
|
+
# 只保留API路径
|
|
78
|
+
if '/api/' in path:
|
|
79
|
+
idx = path.find('/api/')
|
|
80
|
+
path = path[idx:]
|
|
81
|
+
elif '/v' in path and '/v' in path:
|
|
82
|
+
# /v1/, /v2/等
|
|
83
|
+
match = re.search(r'/v\d+/\w+', path)
|
|
84
|
+
if match:
|
|
85
|
+
return match.group(0)
|
|
86
|
+
|
|
87
|
+
return path
|
|
88
|
+
|
|
89
|
+
|
|
90
|
+
def extract_params(url):
|
|
91
|
+
"""从URL中提取参数"""
|
|
92
|
+
params = {}
|
|
93
|
+
|
|
94
|
+
if '?' not in url:
|
|
95
|
+
return params
|
|
96
|
+
|
|
97
|
+
query = url.split('?')[1]
|
|
98
|
+
|
|
99
|
+
# 解析query string
|
|
100
|
+
pairs = query.split('&')
|
|
101
|
+
for pair in pairs:
|
|
102
|
+
if '=' in pair:
|
|
103
|
+
key, value = pair.split('=', 1)
|
|
104
|
+
params[key] = value
|
|
105
|
+
else:
|
|
106
|
+
params[pair] = ''
|
|
107
|
+
|
|
108
|
+
return params
|
|
109
|
+
|
|
110
|
+
|
|
111
|
+
def parse_swagger_json(swagger_content):
|
|
112
|
+
"""
|
|
113
|
+
解析Swagger/OpenAPI JSON
|
|
114
|
+
|
|
115
|
+
输入:
|
|
116
|
+
swagger_content: string - Swagger JSON内容
|
|
117
|
+
|
|
118
|
+
输出:
|
|
119
|
+
endpoints: Array<{path, method, parameters}>
|
|
120
|
+
"""
|
|
121
|
+
endpoints = []
|
|
122
|
+
|
|
123
|
+
try:
|
|
124
|
+
data = json.loads(swagger_content)
|
|
125
|
+
|
|
126
|
+
# OpenAPI 3.x
|
|
127
|
+
if 'paths' in data:
|
|
128
|
+
paths = data['paths']
|
|
129
|
+
for path, methods in paths.items():
|
|
130
|
+
for method, details in methods.items():
|
|
131
|
+
if method.upper() in ['GET', 'POST', 'PUT', 'DELETE', 'PATCH']:
|
|
132
|
+
params = details.get('parameters', [])
|
|
133
|
+
endpoint = {
|
|
134
|
+
'path': path,
|
|
135
|
+
'method': method.upper(),
|
|
136
|
+
'parameters': [p.get('name') for p in params],
|
|
137
|
+
'summary': details.get('summary', '')
|
|
138
|
+
}
|
|
139
|
+
endpoints.append(endpoint)
|
|
140
|
+
|
|
141
|
+
# Swagger 2.x
|
|
142
|
+
elif 'swagger' in data and 'paths' in data:
|
|
143
|
+
paths = data['paths']
|
|
144
|
+
for path, methods in paths.items():
|
|
145
|
+
for method, details in methods.items():
|
|
146
|
+
if method.upper() in ['GET', 'POST', 'PUT', 'DELETE', 'PATCH']:
|
|
147
|
+
params = details.get('parameters', [])
|
|
148
|
+
endpoint = {
|
|
149
|
+
'path': path,
|
|
150
|
+
'method': method.upper(),
|
|
151
|
+
'parameters': [p.get('name') for p in params] if params else [],
|
|
152
|
+
'summary': details.get('summary', '')
|
|
153
|
+
}
|
|
154
|
+
endpoints.append(endpoint)
|
|
155
|
+
|
|
156
|
+
except:
|
|
157
|
+
pass
|
|
158
|
+
|
|
159
|
+
return endpoints
|
|
160
|
+
|
|
161
|
+
|
|
162
|
+
def parse_postman_collection(collection_content):
|
|
163
|
+
"""
|
|
164
|
+
解析Postman Collection
|
|
165
|
+
|
|
166
|
+
输入:
|
|
167
|
+
collection_content: string - Postman JSON内容
|
|
168
|
+
|
|
169
|
+
输出:
|
|
170
|
+
endpoints: Array<{name, method, url, body}>
|
|
171
|
+
"""
|
|
172
|
+
endpoints = []
|
|
173
|
+
|
|
174
|
+
try:
|
|
175
|
+
data = json.loads(collection_content)
|
|
176
|
+
|
|
177
|
+
items = data.get('item', [])
|
|
178
|
+
|
|
179
|
+
for folder in items:
|
|
180
|
+
if isinstance(folder, dict):
|
|
181
|
+
name = folder.get('name', '')
|
|
182
|
+
requests_list = folder.get('item', [])
|
|
183
|
+
|
|
184
|
+
for req in requests_list:
|
|
185
|
+
if isinstance(req, dict):
|
|
186
|
+
endpoint = {
|
|
187
|
+
'name': req.get('name', name),
|
|
188
|
+
'method': req.get('request', {}).get('method', 'GET'),
|
|
189
|
+
'url': str(req.get('request', {}).get('url', '')),
|
|
190
|
+
'body': req.get('request', {}).get('body', {})
|
|
191
|
+
}
|
|
192
|
+
endpoints.append(endpoint)
|
|
193
|
+
|
|
194
|
+
except:
|
|
195
|
+
pass
|
|
196
|
+
|
|
197
|
+
return endpoints
|
|
198
|
+
|
|
199
|
+
|
|
200
|
+
if __name__ == '__main__':
|
|
201
|
+
# 测试
|
|
202
|
+
result = api_parser({
|
|
203
|
+
'apis': [
|
|
204
|
+
{'method': 'GET', 'url': 'https://api.example.com/api/user/info?id=1'},
|
|
205
|
+
{'method': 'POST', 'url': 'https://api.example.com/api/user/login'},
|
|
206
|
+
],
|
|
207
|
+
'base_url': 'https://api.example.com'
|
|
208
|
+
})
|
|
209
|
+
print(f"Endpoints: {result['endpoints']}")
|
|
210
|
+
print(f"Methods: {result['methods']}")
|