repomap-cli 1.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- repomap/__init__.py +320 -0
- repomap/ai.py +1108 -0
- repomap/check.py +1212 -0
- repomap/cli/__init__.py +3 -0
- repomap/cli/__main__.py +12 -0
- repomap/cli/cli.py +2475 -0
- repomap/core.py +730 -0
- repomap/lsp.py +753 -0
- repomap/parser.py +1697 -0
- repomap/ranking.py +639 -0
- repomap/resolver.py +906 -0
- repomap/toolkit.py +850 -0
- repomap/topic.py +600 -0
- repomap_cli-1.0.0.dist-info/METADATA +284 -0
- repomap_cli-1.0.0.dist-info/RECORD +18 -0
- repomap_cli-1.0.0.dist-info/WHEEL +4 -0
- repomap_cli-1.0.0.dist-info/entry_points.txt +2 -0
- repomap_cli-1.0.0.dist-info/licenses/LICENSE +21 -0
repomap/ai.py
ADDED
|
@@ -0,0 +1,1108 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from pathlib import PurePosixPath
|
|
4
|
+
from typing import TYPE_CHECKING, Any
|
|
5
|
+
|
|
6
|
+
if TYPE_CHECKING:
|
|
7
|
+
from .core import RepoMapEngine
|
|
8
|
+
|
|
9
|
+
from .topic import FileMatch, TestMatch, classify_file_role, get_co_change_neighbors
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
RISK_MARK = {"high": "[high]", "medium": "[medium]", "low": "[low]"}
|
|
13
|
+
VISIBILITY_MARK = {"exported": "[exported]", "public": "[public]", "private": "[private]"}
|
|
14
|
+
CONFIDENCE_MARK = {"high": "HIGH", "medium": "MED", "low": "LOW"}
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
def _truncate_output(output: str, max_chars: int) -> str:
|
|
18
|
+
if max_chars <= 0 or len(output) <= max_chars:
|
|
19
|
+
return output
|
|
20
|
+
return output[:max_chars] + "\n\n…(超出字符限制,已截断)"
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
def _get_hot_files(project_root: str, days: int = 30) -> set[str]:
|
|
24
|
+
"""通过 git diff 获取近 N 天修改过的文件集合(路径相对于 project_root)。"""
|
|
25
|
+
import subprocess
|
|
26
|
+
from pathlib import Path
|
|
27
|
+
|
|
28
|
+
try:
|
|
29
|
+
# 获取 git root 用于路径转换
|
|
30
|
+
git_root = subprocess.run(
|
|
31
|
+
["git", "rev-parse", "--show-toplevel"],
|
|
32
|
+
cwd=project_root, capture_output=True, text=True, timeout=5,
|
|
33
|
+
).stdout.strip()
|
|
34
|
+
except Exception:
|
|
35
|
+
return set()
|
|
36
|
+
if not git_root:
|
|
37
|
+
return set()
|
|
38
|
+
|
|
39
|
+
try:
|
|
40
|
+
result = subprocess.run(
|
|
41
|
+
["git", "diff", "--name-only", f"HEAD@{{{days}.days ago}}", "HEAD", "--", "."],
|
|
42
|
+
cwd=project_root, capture_output=True, text=True, timeout=10,
|
|
43
|
+
)
|
|
44
|
+
except Exception:
|
|
45
|
+
return set()
|
|
46
|
+
if result.returncode != 0:
|
|
47
|
+
return set()
|
|
48
|
+
|
|
49
|
+
hot_files: set[str] = set()
|
|
50
|
+
if project_root.startswith(git_root):
|
|
51
|
+
rel = str(Path(project_root).relative_to(git_root))
|
|
52
|
+
prefix = f"{rel}/" if rel not in ("", ".") else ""
|
|
53
|
+
else:
|
|
54
|
+
prefix = ""
|
|
55
|
+
|
|
56
|
+
for line in result.stdout.strip().split("\n"):
|
|
57
|
+
path = line.strip()
|
|
58
|
+
if not path:
|
|
59
|
+
continue
|
|
60
|
+
# 去掉 git root 相对前缀,转为 project_root 相对路径
|
|
61
|
+
if prefix and path.startswith(prefix):
|
|
62
|
+
path = path[len(prefix):]
|
|
63
|
+
hot_files.add(path)
|
|
64
|
+
return hot_files
|
|
65
|
+
|
|
66
|
+
|
|
67
|
+
def _project_summary(engine: "RepoMapEngine", granularity: str) -> str:
|
|
68
|
+
"""生成一句话项目摘要:语言、框架、项目类型。"""
|
|
69
|
+
from .parser import EXT_TO_LANG
|
|
70
|
+
|
|
71
|
+
# 统计语言分布
|
|
72
|
+
lang_counts: dict[str, int] = {}
|
|
73
|
+
for f in engine.graph.file_symbols:
|
|
74
|
+
ext = PurePosixPath(f).suffix.lower()
|
|
75
|
+
lang = EXT_TO_LANG.get(ext, "")
|
|
76
|
+
if lang:
|
|
77
|
+
lang_counts[lang] = lang_counts.get(lang, 0) + 1
|
|
78
|
+
if not lang_counts:
|
|
79
|
+
return ""
|
|
80
|
+
top_langs = sorted(lang_counts.items(), key=lambda x: -x[1])[:3]
|
|
81
|
+
lang_names = {
|
|
82
|
+
"python": "Python", "javascript": "JS", "typescript": "TS", "tsx": "TSX",
|
|
83
|
+
"go": "Go", "rust": "Rust", "c": "C", "cpp": "C++", "java": "Java",
|
|
84
|
+
"kotlin": "Kotlin", "swift": "Swift", "c_sharp": "C#", "php": "PHP",
|
|
85
|
+
"ruby": "Ruby", "html": "HTML", "css": "CSS", "json": "JSON",
|
|
86
|
+
}
|
|
87
|
+
lang_str = " + ".join(
|
|
88
|
+
f"{lang_names.get(l, l)} ({c}f)" for l, c in top_langs
|
|
89
|
+
)
|
|
90
|
+
|
|
91
|
+
# 检测框架
|
|
92
|
+
frameworks: list[str] = []
|
|
93
|
+
routes = engine.list_routes()
|
|
94
|
+
if routes:
|
|
95
|
+
fw_set = {r.framework for r in routes if hasattr(r, "framework")}
|
|
96
|
+
frameworks.extend(sorted(fw_set))
|
|
97
|
+
# 从文件列表推断框架
|
|
98
|
+
file_set = set(engine.graph.file_symbols.keys())
|
|
99
|
+
file_str = " ".join(file_set)
|
|
100
|
+
if any(f.endswith(".rs") for f in file_set):
|
|
101
|
+
if "axum" in file_str:
|
|
102
|
+
frameworks.append("axum")
|
|
103
|
+
if "tauri" in file_str or any("tauri" in f for f in file_set):
|
|
104
|
+
frameworks.append("tauri")
|
|
105
|
+
if "actix" in file_str:
|
|
106
|
+
frameworks.append("actix-web")
|
|
107
|
+
if any(f.endswith((".tsx", ".jsx")) for f in file_set):
|
|
108
|
+
if "next.config" in file_str:
|
|
109
|
+
frameworks.append("next.js")
|
|
110
|
+
elif "vite.config" in file_str:
|
|
111
|
+
frameworks.append("vite")
|
|
112
|
+
else:
|
|
113
|
+
frameworks.append("react")
|
|
114
|
+
|
|
115
|
+
# 项目类型
|
|
116
|
+
ptype = "应用"
|
|
117
|
+
entries = engine.entry_points()
|
|
118
|
+
if entries:
|
|
119
|
+
entry_str = " ".join(entries).lower()
|
|
120
|
+
if "main.rs" in entry_str or "main.go" in entry_str or "main.c" in entry_str:
|
|
121
|
+
ptype = "二进制/CLI 应用"
|
|
122
|
+
elif "lib.rs" in entry_str and "main.rs" not in entry_str:
|
|
123
|
+
ptype = "库"
|
|
124
|
+
elif any("server" in e for e in entries) or routes:
|
|
125
|
+
ptype = "Web 服务"
|
|
126
|
+
if "tui" in file_str or any("tui" in f.lower() for f in file_set):
|
|
127
|
+
ptype = "TUI 应用" if ptype == "二进制/CLI 应用" else ptype
|
|
128
|
+
|
|
129
|
+
parts = [f"**项目类型**: {ptype}"]
|
|
130
|
+
parts.append(f"**语言**: {lang_str}")
|
|
131
|
+
if frameworks:
|
|
132
|
+
parts.append(f"**框架**: {', '.join(frameworks)}")
|
|
133
|
+
return " | ".join(parts)
|
|
134
|
+
|
|
135
|
+
|
|
136
|
+
def _auto_granularity(engine: "RepoMapEngine") -> str:
|
|
137
|
+
"""根据项目规模自动选择报告粒度。
|
|
138
|
+
|
|
139
|
+
- full: < 50 个文件 —— 完整报告
|
|
140
|
+
- medium: 50-300 个文件 —— 精简报告
|
|
141
|
+
- compact: > 300 个文件 —— 极简报告
|
|
142
|
+
"""
|
|
143
|
+
file_count = engine.scan_stats.processed_files
|
|
144
|
+
if file_count < 50:
|
|
145
|
+
return "full"
|
|
146
|
+
elif file_count <= 300:
|
|
147
|
+
return "medium"
|
|
148
|
+
else:
|
|
149
|
+
return "compact"
|
|
150
|
+
|
|
151
|
+
|
|
152
|
+
def render_routes_report(engine: "RepoMapEngine") -> str:
|
|
153
|
+
"""渲染 HTTP 路由表(独立命令用)。"""
|
|
154
|
+
routes = engine.list_routes()
|
|
155
|
+
if not routes:
|
|
156
|
+
return "未检测到 HTTP 路由定义。"
|
|
157
|
+
return _format_route_table(routes)
|
|
158
|
+
|
|
159
|
+
|
|
160
|
+
def _render_route_section(engine: "RepoMapEngine") -> list[str]:
|
|
161
|
+
"""为 overview 渲染 API 路由板块。"""
|
|
162
|
+
routes = engine.list_routes()
|
|
163
|
+
if not routes:
|
|
164
|
+
return []
|
|
165
|
+
return _format_route_lines(routes, compact=True)
|
|
166
|
+
|
|
167
|
+
|
|
168
|
+
def _format_route_lines(routes: list, compact: bool = False) -> list[str]:
|
|
169
|
+
"""格式化路由为 Markdown 行。"""
|
|
170
|
+
from collections import Counter
|
|
171
|
+
|
|
172
|
+
lines = ["## API 路由\n"]
|
|
173
|
+
method_order = {"GET": 0, "POST": 1, "PUT": 2, "PATCH": 3, "DELETE": 4, "HEAD": 5, "OPTIONS": 6, "USE": 7, "ALL": 8}
|
|
174
|
+
routes_sorted = sorted(routes, key=lambda r: (r.file, r.line))
|
|
175
|
+
|
|
176
|
+
if compact and len(routes) > 12:
|
|
177
|
+
# 压缩模式:按模块分组展示概览
|
|
178
|
+
by_file: dict[str, list] = {}
|
|
179
|
+
for r in routes_sorted:
|
|
180
|
+
by_file.setdefault(r.file, []).append(r)
|
|
181
|
+
for file, file_routes in list(by_file.items())[:6]:
|
|
182
|
+
methods = Counter(r.method for r in file_routes)
|
|
183
|
+
method_str = " ".join(f"{m}x{methods[m]}" for m in ("GET", "POST", "PUT", "DELETE", "PATCH") if methods[m])
|
|
184
|
+
lines.append(f"- `{file}` — {len(file_routes)} 个路由({method_str})")
|
|
185
|
+
if len(by_file) > 6:
|
|
186
|
+
lines.append(f"- …还有 {len(by_file) - 6} 个文件包含路由")
|
|
187
|
+
else:
|
|
188
|
+
if len(routes_sorted) > 20 and compact:
|
|
189
|
+
lines.append(f"> (共 {len(routes)} 个路由,以下展示 Top 20)\n")
|
|
190
|
+
lines.append("| Method | Path | Handler | File | Framework |")
|
|
191
|
+
lines.append("|--------|------|---------|------|-----------|")
|
|
192
|
+
for r in routes_sorted[:20]:
|
|
193
|
+
lines.append(f"| {r.method} | `{r.path}` | `{r.handler}` | `{r.file}:{r.line}` | {r.framework} |")
|
|
194
|
+
if len(routes_sorted) > 20 and compact:
|
|
195
|
+
lines.append(f"\n…还有 {len(routes_sorted) - 20} 个路由")
|
|
196
|
+
lines.append("")
|
|
197
|
+
return lines
|
|
198
|
+
|
|
199
|
+
|
|
200
|
+
def _format_route_table(routes: list) -> str:
|
|
201
|
+
"""格式化路由为纯文本表格。"""
|
|
202
|
+
lines = _format_route_lines(routes, compact=False)
|
|
203
|
+
return "\n".join(lines)
|
|
204
|
+
|
|
205
|
+
|
|
206
|
+
def _render_co_change_section(engine: "RepoMapEngine") -> list[str]:
|
|
207
|
+
"""为 overview 渲染隐式耦合板块(git 共变频率最高的文件对)。"""
|
|
208
|
+
import subprocess
|
|
209
|
+
from pathlib import Path
|
|
210
|
+
|
|
211
|
+
project_root = str(engine.project_root)
|
|
212
|
+
|
|
213
|
+
# 计算从 git root 到 project_root 的相对路径前缀
|
|
214
|
+
try:
|
|
215
|
+
git_root = subprocess.run(
|
|
216
|
+
["git", "rev-parse", "--show-toplevel"],
|
|
217
|
+
cwd=project_root,
|
|
218
|
+
capture_output=True, text=True, timeout=5,
|
|
219
|
+
).stdout.strip()
|
|
220
|
+
except Exception:
|
|
221
|
+
return []
|
|
222
|
+
if not git_root:
|
|
223
|
+
return []
|
|
224
|
+
if project_root.startswith(git_root):
|
|
225
|
+
rel = str(Path(project_root).relative_to(git_root))
|
|
226
|
+
git_rel_prefix = rel if rel not in ("", ".") else ""
|
|
227
|
+
else:
|
|
228
|
+
git_rel_prefix = ""
|
|
229
|
+
|
|
230
|
+
# 选取分析得分最高的 8 个非测试文件作为种子
|
|
231
|
+
analysis = engine.file_analysis()
|
|
232
|
+
high_score_files = sorted(
|
|
233
|
+
[item for item in analysis.values() if not item.get("is_test_file")],
|
|
234
|
+
key=lambda item: -item.get("score", 0),
|
|
235
|
+
)[:8]
|
|
236
|
+
|
|
237
|
+
seen_pairs: set[tuple[str, str]] = set()
|
|
238
|
+
pairs: list[tuple[str, str, int]] = []
|
|
239
|
+
for entry in high_score_files:
|
|
240
|
+
file_path = entry["file"]
|
|
241
|
+
# 将分析路径(相对于 project_root)转换为 git 路径(相对于 git root)
|
|
242
|
+
git_path = f"{git_rel_prefix}/{file_path}" if git_rel_prefix else file_path
|
|
243
|
+
neighbors = get_co_change_neighbors(project_root, git_path, top_n=3)
|
|
244
|
+
if not neighbors:
|
|
245
|
+
continue
|
|
246
|
+
# 将 git 路径转换回分析路径用于展示
|
|
247
|
+
for neighbor_git_path, count in neighbors:
|
|
248
|
+
display_a = file_path
|
|
249
|
+
if git_rel_prefix:
|
|
250
|
+
display_b = neighbor_git_path[len(git_rel_prefix) + 1:] if neighbor_git_path.startswith(git_rel_prefix + "/") else neighbor_git_path
|
|
251
|
+
else:
|
|
252
|
+
display_b = neighbor_git_path
|
|
253
|
+
key = tuple(sorted([display_a, display_b]))
|
|
254
|
+
if key in seen_pairs:
|
|
255
|
+
continue
|
|
256
|
+
if count < 2:
|
|
257
|
+
continue
|
|
258
|
+
seen_pairs.add(key)
|
|
259
|
+
pairs.append((display_a, display_b, count))
|
|
260
|
+
if len(pairs) >= 10:
|
|
261
|
+
break
|
|
262
|
+
|
|
263
|
+
if not pairs:
|
|
264
|
+
return []
|
|
265
|
+
|
|
266
|
+
pairs.sort(key=lambda x: -x[2])
|
|
267
|
+
lines = [
|
|
268
|
+
"## 隐式耦合(Git 共变)\n",
|
|
269
|
+
"> 以下文件在 git 历史中频繁一起修改,可能存在未在代码中显式声明的隐含关联。\n",
|
|
270
|
+
]
|
|
271
|
+
for file_a, file_b, count in pairs[:10]:
|
|
272
|
+
lines.append(f"- `{file_a}` ↔ `{file_b}` — 共变 {count} 次")
|
|
273
|
+
lines.append("")
|
|
274
|
+
return lines
|
|
275
|
+
|
|
276
|
+
|
|
277
|
+
def render_overview_report(engine: "RepoMapEngine", max_chars: int = 16000,
|
|
278
|
+
with_heat: bool = False,
|
|
279
|
+
with_co_change: bool = False,
|
|
280
|
+
granularity: str = "auto") -> str:
|
|
281
|
+
# 解析粒度
|
|
282
|
+
if granularity == "auto":
|
|
283
|
+
granularity = _auto_granularity(engine)
|
|
284
|
+
|
|
285
|
+
# 根据粒度调整各板块的数量限制
|
|
286
|
+
if granularity == "compact":
|
|
287
|
+
reading_limit, module_limit, hotspot_limit, summary_files, summary_per_file, supporting_limit = 0, 5, 0, 3, 2, 3
|
|
288
|
+
elif granularity == "medium":
|
|
289
|
+
reading_limit, module_limit, hotspot_limit, summary_files, summary_per_file, supporting_limit = 5, 5, 5, 4, 3, 6
|
|
290
|
+
else: # full
|
|
291
|
+
reading_limit, module_limit, hotspot_limit, summary_files, summary_per_file, supporting_limit = 8, 8, 10, 6, 4, 8
|
|
292
|
+
|
|
293
|
+
lines: list[str] = []
|
|
294
|
+
lines.append(f"# 项目地图 — {engine.project_root.name}")
|
|
295
|
+
if granularity != "full":
|
|
296
|
+
lines[-1] += f"({granularity} 模式)"
|
|
297
|
+
lines[-1] += "\n"
|
|
298
|
+
file_analysis = engine.file_analysis()
|
|
299
|
+
semantic_symbol_total = round(sum(row.get("semantic_symbol_count", 0.0) for row in file_analysis.values()), 1)
|
|
300
|
+
|
|
301
|
+
# 计算依赖边数
|
|
302
|
+
edge_count = sum(len(v) for v in engine.graph.outgoing.values())
|
|
303
|
+
# 获取解析配置数量
|
|
304
|
+
import_config_count = len(engine._resolver.import_configs) if engine._resolver else 0
|
|
305
|
+
|
|
306
|
+
stats_line = (
|
|
307
|
+
f"**文件数**: {engine.scan_stats.processed_files} "
|
|
308
|
+
f"**符号数**: {len(engine.graph.symbols)} "
|
|
309
|
+
f"**有效符号**: {semantic_symbol_total} "
|
|
310
|
+
f"**依赖边**: {edge_count} "
|
|
311
|
+
f"**过滤路径**: {engine.scan_stats.filtered_path_files} "
|
|
312
|
+
f"**过滤大文件**: {engine.scan_stats.filtered_large_files}"
|
|
313
|
+
)
|
|
314
|
+
if import_config_count:
|
|
315
|
+
stats_line += f" **解析配置**: {import_config_count}"
|
|
316
|
+
lines.append(stats_line + "\n")
|
|
317
|
+
|
|
318
|
+
if engine.scan_stats.truncated_files:
|
|
319
|
+
lines.append(f"> `max_files` 截断了 {engine.scan_stats.truncated_files} 个候选文件\n")
|
|
320
|
+
|
|
321
|
+
# 一句话项目摘要
|
|
322
|
+
summary = _project_summary(engine, granularity)
|
|
323
|
+
if summary:
|
|
324
|
+
lines.append(f"> {summary}\n")
|
|
325
|
+
|
|
326
|
+
# 热度计算:如果启用,标记近 30 天频繁修改的文件
|
|
327
|
+
hot_files: set[str] = set()
|
|
328
|
+
if with_heat:
|
|
329
|
+
hot_files = _get_hot_files(str(engine.project_root))
|
|
330
|
+
|
|
331
|
+
suggestions = engine.suggested_reading_order(reading_limit)
|
|
332
|
+
if suggestions:
|
|
333
|
+
lines.append("## 推荐阅读顺序\n")
|
|
334
|
+
for index, item in enumerate(suggestions, 1):
|
|
335
|
+
hot_tag = " [HOT]" if item["file"] in hot_files else ""
|
|
336
|
+
highlights = f";关键符号: {', '.join(item['top_symbols'])}" if item["top_symbols"] else ""
|
|
337
|
+
count_text = (
|
|
338
|
+
f"有效符号 {item['semantic_symbol_count']}"
|
|
339
|
+
if item.get("semantic_symbol_count") is not None
|
|
340
|
+
and item.get("semantic_symbol_count") != item["symbol_count"]
|
|
341
|
+
else f"符号数 {item['symbol_count']}"
|
|
342
|
+
)
|
|
343
|
+
if (
|
|
344
|
+
item.get("semantic_symbol_count") is not None
|
|
345
|
+
and item.get("semantic_symbol_count") != item["symbol_count"]
|
|
346
|
+
):
|
|
347
|
+
count_text += f"(总符号 {item['symbol_count']})"
|
|
348
|
+
lines.append(
|
|
349
|
+
f"{index}. `{item['file']}`{hot_tag} — {item['reason']};"
|
|
350
|
+
f"{count_text}{highlights}"
|
|
351
|
+
)
|
|
352
|
+
lines.append("")
|
|
353
|
+
|
|
354
|
+
supporting_files = engine.supporting_files(supporting_limit)
|
|
355
|
+
if supporting_files:
|
|
356
|
+
lines.append("## 支撑文件(非符号图)\n")
|
|
357
|
+
lines.append(
|
|
358
|
+
"> 符号图优先覆盖源码;以下仅动态列出关键文档、脚本和配置,不能替代 AGENTS.md/CLAUDE.md 的人工上下文。\n"
|
|
359
|
+
)
|
|
360
|
+
for item in supporting_files:
|
|
361
|
+
lines.append(
|
|
362
|
+
f"- `{item['file']}` — {item['reason']}"
|
|
363
|
+
f"({item['role']})"
|
|
364
|
+
)
|
|
365
|
+
lines.append("")
|
|
366
|
+
|
|
367
|
+
modules = engine.module_summary(module_limit)
|
|
368
|
+
if modules:
|
|
369
|
+
lines.append("## 模块摘要\n")
|
|
370
|
+
for module in modules:
|
|
371
|
+
highlights = f";关键符号: {', '.join(module['highlights'])}" if module["highlights"] else ""
|
|
372
|
+
count_text = (
|
|
373
|
+
f"有效符号 {module['semantic_symbol_count']}"
|
|
374
|
+
if module.get("semantic_symbol_count") is not None
|
|
375
|
+
and module.get("semantic_symbol_count") != module["symbol_count"]
|
|
376
|
+
else f"{module['symbol_count']} 符号"
|
|
377
|
+
)
|
|
378
|
+
if (
|
|
379
|
+
module.get("semantic_symbol_count") is not None
|
|
380
|
+
and module.get("semantic_symbol_count") != module["symbol_count"]
|
|
381
|
+
):
|
|
382
|
+
count_text += f"(总符号 {module['symbol_count']})"
|
|
383
|
+
lines.append(
|
|
384
|
+
f"- `{module['module']}` — {module['file_count']} 文件 / {count_text}"
|
|
385
|
+
f";代表文件 `{module['representative_file']}`{highlights}"
|
|
386
|
+
)
|
|
387
|
+
lines.append("")
|
|
388
|
+
|
|
389
|
+
entries = engine.entry_points()
|
|
390
|
+
if entries:
|
|
391
|
+
lines.append("## 入口点\n")
|
|
392
|
+
for entry in entries[:6]:
|
|
393
|
+
lines.append(f"- `{entry}`")
|
|
394
|
+
lines.append("")
|
|
395
|
+
|
|
396
|
+
# API 路由板块
|
|
397
|
+
route_lines = _render_route_section(engine)
|
|
398
|
+
if route_lines:
|
|
399
|
+
lines.extend(route_lines)
|
|
400
|
+
|
|
401
|
+
hotspots = engine.hotspots(hotspot_limit)
|
|
402
|
+
if hotspots:
|
|
403
|
+
lines.append("## 高密度文件(按有效符号密度,默认降低标签/配置噪音)\n")
|
|
404
|
+
for hotspot in hotspots:
|
|
405
|
+
count_text = (
|
|
406
|
+
f"有效符号 {hotspot['semantic_symbol_count']}"
|
|
407
|
+
if hotspot.get("semantic_symbol_count") is not None
|
|
408
|
+
and hotspot.get("semantic_symbol_count") != hotspot["symbol_count"]
|
|
409
|
+
else f"{hotspot['symbol_count']} 个符号"
|
|
410
|
+
)
|
|
411
|
+
if (
|
|
412
|
+
hotspot.get("semantic_symbol_count") is not None
|
|
413
|
+
and hotspot.get("semantic_symbol_count") != hotspot["symbol_count"]
|
|
414
|
+
):
|
|
415
|
+
count_text += f"(总符号 {hotspot['symbol_count']})"
|
|
416
|
+
lines.append(
|
|
417
|
+
f"- {RISK_MARK.get(hotspot['risk'], '[info]')} `{hotspot['file']}`"
|
|
418
|
+
f" — {count_text}"
|
|
419
|
+
)
|
|
420
|
+
lines.append("")
|
|
421
|
+
|
|
422
|
+
summary_sections = engine.summary_symbols(summary_files, summary_per_file)
|
|
423
|
+
if summary_sections:
|
|
424
|
+
lines.append("## 关键实现符号\n")
|
|
425
|
+
lines.append("> 这里优先展示更适合阅读和改动分析的实现符号,默认降低测试、HTML 标签、CSS selector、JSON key 等低语义噪音。\n")
|
|
426
|
+
for section in summary_sections:
|
|
427
|
+
lines.append(f"### `{section['file']}`\n")
|
|
428
|
+
if section.get("reason"):
|
|
429
|
+
lines.append(f"- 理由: {section['reason']}")
|
|
430
|
+
for symbol_row in section["symbols"]:
|
|
431
|
+
pagerank = symbol_row["pagerank"] * 1000
|
|
432
|
+
visibility = VISIBILITY_MARK.get(symbol_row["visibility"], "[private]")
|
|
433
|
+
signature = f" \n *`{symbol_row['signature']}`*" if symbol_row["signature"] else ""
|
|
434
|
+
# 生成重要性说明
|
|
435
|
+
importance_parts = []
|
|
436
|
+
incoming = symbol_row.get("incoming_calls", 0)
|
|
437
|
+
outgoing = symbol_row.get("outgoing_calls", 0)
|
|
438
|
+
if incoming > 0:
|
|
439
|
+
importance_parts.append(f"← {incoming} callers")
|
|
440
|
+
if outgoing > 0:
|
|
441
|
+
importance_parts.append(f"→ {outgoing} callees")
|
|
442
|
+
if not importance_parts:
|
|
443
|
+
if symbol_row["kind"] == "class":
|
|
444
|
+
importance_parts.append("type definition")
|
|
445
|
+
elif symbol_row["visibility"] == "exported":
|
|
446
|
+
importance_parts.append("exported")
|
|
447
|
+
elif incoming == 0 and outgoing == 0:
|
|
448
|
+
if symbol_row.get("summary_score", 0) > 10:
|
|
449
|
+
importance_parts.append("high-importance leaf")
|
|
450
|
+
else:
|
|
451
|
+
importance_parts.append("leaf/entry")
|
|
452
|
+
importance_hint = f" ({', '.join(importance_parts)})" if importance_parts else ""
|
|
453
|
+
lines.append(
|
|
454
|
+
f"- {visibility} **{symbol_row['name']}** `({symbol_row['kind']})`"
|
|
455
|
+
f" L{symbol_row['line']} Score={symbol_row['summary_score']:.2f} PR={pagerank:.1f}{importance_hint}{signature}"
|
|
456
|
+
)
|
|
457
|
+
lines.append("")
|
|
458
|
+
|
|
459
|
+
# 隐式耦合:通过 git 共变历史发现的文件关联。默认关闭,避免普通 overview 触发重 git history。
|
|
460
|
+
if with_co_change:
|
|
461
|
+
co_change_lines = _render_co_change_section(engine)
|
|
462
|
+
if co_change_lines:
|
|
463
|
+
lines.extend(co_change_lines)
|
|
464
|
+
|
|
465
|
+
# Quick Actions
|
|
466
|
+
lines.append("## Quick Actions\n")
|
|
467
|
+
top_file = suggestions[0]["file"] if suggestions else ""
|
|
468
|
+
top_symbol = suggestions[0]["top_symbols"][0] if suggestions and suggestions[0].get("top_symbols") else ""
|
|
469
|
+
lines.append(f"- 查看入口文件详情: `repomap file-detail --project . --file-path {top_file or 'repomap_core.py'}`")
|
|
470
|
+
if top_symbol:
|
|
471
|
+
lines.append(f"- 查看核心符号调用链: `repomap call-chain --project . --symbol {top_symbol}`")
|
|
472
|
+
lines.append("- 搜索特定主题: `repomap query --project . --query <keyword>`")
|
|
473
|
+
lines.append("- 检查诊断: `repomap check --project .`")
|
|
474
|
+
lines.append("- 完整验证: `repomap verify --project .`")
|
|
475
|
+
lines.append("")
|
|
476
|
+
|
|
477
|
+
return _truncate_output("\n".join(lines), max_chars)
|
|
478
|
+
|
|
479
|
+
|
|
480
|
+
def render_call_chain_report(engine: "RepoMapEngine", symbol_name: str, max_depth: int = 3) -> str:
|
|
481
|
+
matches = engine.query_symbol(symbol_name)
|
|
482
|
+
if not matches:
|
|
483
|
+
return f"> 未找到符号 `{symbol_name}`"
|
|
484
|
+
|
|
485
|
+
symbol = matches[0]
|
|
486
|
+
chain = engine.call_chain(symbol.id, "both", max_depth)
|
|
487
|
+
lines = [
|
|
488
|
+
f"## 调用链 — `{symbol.name}`\n",
|
|
489
|
+
f"- **类型**: {symbol.kind}",
|
|
490
|
+
f"- **位置**: `{symbol.file}:{symbol.line}`",
|
|
491
|
+
f"- **重要性**: PR={symbol.pagerank * 1000:.1f}",
|
|
492
|
+
f"- **签名**: `{symbol.signature}`" if symbol.signature else "",
|
|
493
|
+
"",
|
|
494
|
+
]
|
|
495
|
+
|
|
496
|
+
callers = chain["callers"]
|
|
497
|
+
lines.append(f"### 被以下符号调用({len(callers)})\n")
|
|
498
|
+
if callers:
|
|
499
|
+
for caller in callers[:20]:
|
|
500
|
+
lines.append(f"- `{caller.name}` ({caller.kind}) — `{caller.file}:{caller.line}`")
|
|
501
|
+
if len(callers) > 20:
|
|
502
|
+
lines.append(f"- …还有 {len(callers) - 20} 个")
|
|
503
|
+
else:
|
|
504
|
+
lines.append("- (无,可能是入口点)")
|
|
505
|
+
|
|
506
|
+
callees = chain["callees"]
|
|
507
|
+
lines.append(f"\n### 调用了以下符号({len(callees)})\n")
|
|
508
|
+
if callees:
|
|
509
|
+
for callee in callees[:20]:
|
|
510
|
+
lines.append(f"- `{callee.name}` ({callee.kind}) — `{callee.file}:{callee.line}`")
|
|
511
|
+
if len(callees) > 20:
|
|
512
|
+
lines.append(f"- …还有 {len(callees) - 20} 个")
|
|
513
|
+
else:
|
|
514
|
+
lines.append("- (无,叶子函数)")
|
|
515
|
+
|
|
516
|
+
return "\n".join(line for line in lines if line is not None)
|
|
517
|
+
|
|
518
|
+
|
|
519
|
+
def render_file_detail_report(
|
|
520
|
+
engine: "RepoMapEngine",
|
|
521
|
+
file_path: str,
|
|
522
|
+
max_symbols: int = 12,
|
|
523
|
+
max_chars: int = 6000,
|
|
524
|
+
) -> str:
|
|
525
|
+
symbol_ids = engine.graph.file_symbols.get(file_path, [])
|
|
526
|
+
if not symbol_ids:
|
|
527
|
+
matches = [path for path in engine.graph.file_symbols if file_path in path]
|
|
528
|
+
if matches:
|
|
529
|
+
file_path = matches[0]
|
|
530
|
+
symbol_ids = engine.graph.file_symbols[file_path]
|
|
531
|
+
else:
|
|
532
|
+
return f"> 文件 `{file_path}` 未找到或无符号"
|
|
533
|
+
|
|
534
|
+
analysis = engine.file_analysis().get(file_path, {})
|
|
535
|
+
symbols = sorted(
|
|
536
|
+
[engine.graph.symbols[symbol_id] for symbol_id in symbol_ids if symbol_id in engine.graph.symbols],
|
|
537
|
+
key=lambda symbol: symbol.line,
|
|
538
|
+
)
|
|
539
|
+
visible_symbols = symbols if max_symbols <= 0 else symbols[:max_symbols]
|
|
540
|
+
|
|
541
|
+
lines = [
|
|
542
|
+
f"## 文件详情 — `{file_path}`\n",
|
|
543
|
+
f"共 {len(symbols)} 个符号",
|
|
544
|
+
]
|
|
545
|
+
if analysis:
|
|
546
|
+
lines.append(
|
|
547
|
+
f"跨文件关联 {analysis.get('neighbor_count', 0)} 个,"
|
|
548
|
+
f"导出符号 {analysis.get('exported_count', 0)} 个\n"
|
|
549
|
+
)
|
|
550
|
+
else:
|
|
551
|
+
lines.append("")
|
|
552
|
+
|
|
553
|
+
if max_symbols > 0 and len(symbols) > len(visible_symbols):
|
|
554
|
+
lines.append(f"默认仅展开前 {len(visible_symbols)} 个符号,剩余 {len(symbols) - len(visible_symbols)} 个可用 `--max-symbols` 查看。\n")
|
|
555
|
+
|
|
556
|
+
for symbol in visible_symbols:
|
|
557
|
+
pagerank = symbol.pagerank * 1000
|
|
558
|
+
lines.append(f"- `{symbol.name}` ({symbol.kind}) — L{symbol.line} PR={pagerank:.1f}")
|
|
559
|
+
if symbol.signature:
|
|
560
|
+
lines.append(f" - sig: `{symbol.signature}`")
|
|
561
|
+
if symbol.docstring:
|
|
562
|
+
lines.append(f" - doc: {symbol.docstring[:120]}")
|
|
563
|
+
callers = [
|
|
564
|
+
engine.graph.symbols[edge.source].name
|
|
565
|
+
for edge in engine.graph.incoming.get(symbol.id, [])
|
|
566
|
+
if edge.kind == "call" and edge.source in engine.graph.symbols
|
|
567
|
+
][:5]
|
|
568
|
+
if callers:
|
|
569
|
+
lines.append(f" - called by: {', '.join(callers)}")
|
|
570
|
+
lines.append("")
|
|
571
|
+
return _truncate_output("\n".join(lines), max_chars)
|
|
572
|
+
|
|
573
|
+
|
|
574
|
+
# ═══════════════════════════════════════════════════════════════════════════════
|
|
575
|
+
# query 报告渲染
|
|
576
|
+
# ═══════════════════════════════════════════════════════════════════════════════
|
|
577
|
+
|
|
578
|
+
|
|
579
|
+
def render_query_report(
|
|
580
|
+
engine: "RepoMapEngine",
|
|
581
|
+
query: str,
|
|
582
|
+
file_matches: list[FileMatch],
|
|
583
|
+
tests: list[TestMatch],
|
|
584
|
+
max_files: int,
|
|
585
|
+
max_symbols: int,
|
|
586
|
+
max_chars: int = 12000,
|
|
587
|
+
) -> str:
|
|
588
|
+
lines: list[str] = []
|
|
589
|
+
lines.append(f"# Topic Map — {query}\n")
|
|
590
|
+
lines.append(f"Query: `{query}`")
|
|
591
|
+
lines.append(f"Project: `{engine.project_root}`")
|
|
592
|
+
lines.append(f"Files considered: {engine.scan_stats.processed_files}")
|
|
593
|
+
lines.append(f"Matched files: {len(file_matches)}")
|
|
594
|
+
|
|
595
|
+
sym_count = sum(
|
|
596
|
+
len(engine.graph.file_symbols.get(m.path, []))
|
|
597
|
+
for m in file_matches[:max_files]
|
|
598
|
+
)
|
|
599
|
+
lines.append(f"Matched symbols: {sym_count}\n")
|
|
600
|
+
|
|
601
|
+
# Summary
|
|
602
|
+
roles = set(m.role for m in file_matches if m.role != "other")
|
|
603
|
+
role_hint = f"横跨 {'、'.join(sorted(roles))}" if roles else ""
|
|
604
|
+
if role_hint:
|
|
605
|
+
lines.append(f"## Summary\n{query} 主题{role_hint}。\n")
|
|
606
|
+
|
|
607
|
+
# Recommended Reading Order
|
|
608
|
+
analysis = engine.file_analysis()
|
|
609
|
+
reading_order = _build_query_reading_order(file_matches, analysis, max_files)
|
|
610
|
+
if reading_order:
|
|
611
|
+
lines.append("## Recommended Reading Order\n")
|
|
612
|
+
for i, item in enumerate(reading_order, 1):
|
|
613
|
+
lines.append(f"{i}. `{item['file']}` — {item['reason']}")
|
|
614
|
+
lines.append("")
|
|
615
|
+
|
|
616
|
+
# Core Files(只含非测试文件)
|
|
617
|
+
core = [m for m in file_matches[:max_files] if m.score >= 30 and m.role != "test"]
|
|
618
|
+
if core:
|
|
619
|
+
lines.append("## Core Files\n")
|
|
620
|
+
lines.append("| File | Role | Score | Why |")
|
|
621
|
+
lines.append("| --- | --- | ---: | --- |")
|
|
622
|
+
for m in core[:10]:
|
|
623
|
+
why = "; ".join(m.reasons[:2]) if m.reasons else "-"
|
|
624
|
+
lines.append(f"| `{m.path}` | {m.role} | {m.score:.0f} | {why} |")
|
|
625
|
+
lines.append("")
|
|
626
|
+
|
|
627
|
+
# Supporting Files
|
|
628
|
+
supporting = [m for m in file_matches[:max_files] if m.score < 30]
|
|
629
|
+
if supporting:
|
|
630
|
+
lines.append("## Supporting Files\n")
|
|
631
|
+
for m in supporting[:10]:
|
|
632
|
+
why = "; ".join(m.reasons[:2]) if m.reasons else "-"
|
|
633
|
+
lines.append(f"- `{m.path}` ({m.role}, score={m.score:.0f}): {why}")
|
|
634
|
+
lines.append("")
|
|
635
|
+
|
|
636
|
+
# Tests
|
|
637
|
+
if tests:
|
|
638
|
+
lines.append("## Tests\n")
|
|
639
|
+
lines.append("| Test File | Covers | Confidence |")
|
|
640
|
+
lines.append("| --- | --- | --- |")
|
|
641
|
+
for t in tests[:15]:
|
|
642
|
+
lines.append(f"| `{t.test_file}` | `{t.target_file}` | {t.confidence} |")
|
|
643
|
+
lines.append("")
|
|
644
|
+
|
|
645
|
+
# Key Symbols
|
|
646
|
+
symbols_shown = 0
|
|
647
|
+
lines.append("## Key Symbols\n")
|
|
648
|
+
lines.append("| Symbol | File | Line | Role |")
|
|
649
|
+
lines.append("| --- | --- | ---: | --- |")
|
|
650
|
+
for m in file_matches[:max_files]:
|
|
651
|
+
if symbols_shown >= max_symbols:
|
|
652
|
+
break
|
|
653
|
+
ranked = _rank_symbols_for_file(engine, m.path)
|
|
654
|
+
for sym in ranked[:5]:
|
|
655
|
+
if symbols_shown >= max_symbols:
|
|
656
|
+
break
|
|
657
|
+
role_hint = classify_file_role(m.path, engine.graph)
|
|
658
|
+
lines.append(f"| `{sym['name']}` | `{m.path}` | {sym['line']} | {role_hint} |")
|
|
659
|
+
symbols_shown += 1
|
|
660
|
+
lines.append("")
|
|
661
|
+
|
|
662
|
+
# Related Commands
|
|
663
|
+
if file_matches:
|
|
664
|
+
top_file = file_matches[0].path
|
|
665
|
+
top_symbols = _rank_symbols_for_file(engine, top_file)
|
|
666
|
+
lines.append("## Related Commands\n")
|
|
667
|
+
lines.append(f"- `repomap file-detail --project . --file-path {top_file}`")
|
|
668
|
+
if top_symbols:
|
|
669
|
+
lines.append(f"- `repomap refs --project . --symbol {top_symbols[0]['name']}`")
|
|
670
|
+
lines.append(f"- `repomap call-chain --project . --symbol {top_symbols[0]['name']}`")
|
|
671
|
+
|
|
672
|
+
return _truncate_output("\n".join(lines), max_chars)
|
|
673
|
+
|
|
674
|
+
|
|
675
|
+
def _build_query_reading_order(
|
|
676
|
+
file_matches: list[FileMatch],
|
|
677
|
+
analysis: dict,
|
|
678
|
+
max_files: int,
|
|
679
|
+
) -> list[dict[str, Any]]:
|
|
680
|
+
order: list[dict[str, Any]] = []
|
|
681
|
+
seen: set[str] = set()
|
|
682
|
+
|
|
683
|
+
# 入口点优先
|
|
684
|
+
for m in file_matches:
|
|
685
|
+
if m.path in seen:
|
|
686
|
+
continue
|
|
687
|
+
if any(m.path.endswith(suffix) for suffix in ["index.ts", "index.tsx", "main.ts", "main.py"]):
|
|
688
|
+
order.append({"file": m.path, "reason": "入口点/索引"})
|
|
689
|
+
seen.add(m.path)
|
|
690
|
+
|
|
691
|
+
# 高分数核心文件
|
|
692
|
+
for m in file_matches:
|
|
693
|
+
if m.path in seen:
|
|
694
|
+
continue
|
|
695
|
+
if m.score >= 60:
|
|
696
|
+
file_data = analysis.get(m.path, {})
|
|
697
|
+
neighbor_count = file_data.get("neighbor_count", 0)
|
|
698
|
+
reason = f"高分匹配 (score={m.score:.0f})"
|
|
699
|
+
if neighbor_count >= 3:
|
|
700
|
+
reason += ",跨模块枢纽"
|
|
701
|
+
order.append({"file": m.path, "reason": reason})
|
|
702
|
+
seen.add(m.path)
|
|
703
|
+
|
|
704
|
+
# 剩余匹配
|
|
705
|
+
for m in file_matches:
|
|
706
|
+
if m.path in seen:
|
|
707
|
+
continue
|
|
708
|
+
order.append({"file": m.path, "reason": f"相关匹配 (score={m.score:.0f})"})
|
|
709
|
+
seen.add(m.path)
|
|
710
|
+
if len(order) >= max_files:
|
|
711
|
+
break
|
|
712
|
+
|
|
713
|
+
return order[:max_files]
|
|
714
|
+
|
|
715
|
+
|
|
716
|
+
def _rank_symbols_for_file(engine: "RepoMapEngine", file_path: str) -> list[dict[str, Any]]:
|
|
717
|
+
symbols = [
|
|
718
|
+
engine.graph.symbols[sid]
|
|
719
|
+
for sid in engine.graph.file_symbols.get(file_path, [])
|
|
720
|
+
if sid in engine.graph.symbols
|
|
721
|
+
]
|
|
722
|
+
ranked = sorted(
|
|
723
|
+
symbols,
|
|
724
|
+
key=lambda s: (-s.pagerank, s.line),
|
|
725
|
+
)
|
|
726
|
+
return [{"name": s.name, "kind": s.kind, "line": s.line, "pagerank": s.pagerank} for s in ranked]
|
|
727
|
+
|
|
728
|
+
|
|
729
|
+
# ═══════════════════════════════════════════════════════════════════════════════
|
|
730
|
+
# impact 报告渲染
|
|
731
|
+
# ═══════════════════════════════════════════════════════════════════════════════
|
|
732
|
+
|
|
733
|
+
|
|
734
|
+
def render_impact_report(
|
|
735
|
+
engine: "RepoMapEngine",
|
|
736
|
+
target_files: list[str],
|
|
737
|
+
affected_files: list[tuple[str, str, str]], # (file, why, confidence)
|
|
738
|
+
tests: list[TestMatch],
|
|
739
|
+
risk_level: str,
|
|
740
|
+
risk_notes: list[str],
|
|
741
|
+
max_chars: int = 8000,
|
|
742
|
+
key_symbols: list[dict[str, Any]] | None = None,
|
|
743
|
+
read_next: list[dict[str, str]] | None = None,
|
|
744
|
+
lsp_hint: dict[str, Any] | None = None,
|
|
745
|
+
) -> str:
|
|
746
|
+
lines: list[str] = []
|
|
747
|
+
lines.append("# Impact Analysis\n")
|
|
748
|
+
|
|
749
|
+
lines.append("## Input Files\n")
|
|
750
|
+
for f in target_files:
|
|
751
|
+
lines.append(f"- `{f}`")
|
|
752
|
+
lines.append("")
|
|
753
|
+
|
|
754
|
+
if key_symbols or read_next:
|
|
755
|
+
lines.append("## Edit Plan\n")
|
|
756
|
+
lines.append("- Start with target files, then inspect high-confidence affected files and suggested tests.")
|
|
757
|
+
if key_symbols:
|
|
758
|
+
lines.append("- Review key symbols before changing behavior or signatures.")
|
|
759
|
+
if lsp_hint and lsp_hint.get("available"):
|
|
760
|
+
lines.append("- Local LSP is available; use focused diagnostics or `refs --with-lsp` when exact evidence matters.")
|
|
761
|
+
lines.append("")
|
|
762
|
+
# 编辑 checklist
|
|
763
|
+
checklist: list[str] = []
|
|
764
|
+
checklist.append("□ 阅读目标文件及 Read Next 中的高优先级文件")
|
|
765
|
+
if key_symbols:
|
|
766
|
+
checklist.append("□ 检查 Key Symbols 的调用链(repomap call-chain)确认影响范围")
|
|
767
|
+
if affected_files:
|
|
768
|
+
checklist.append("□ 逐个检查 Likely Affected Files 是否需要同步修改")
|
|
769
|
+
if tests:
|
|
770
|
+
checklist.append("□ 修改完成后运行 Suggested Tests 中的测试")
|
|
771
|
+
checklist.append("□ 编辑完成后运行 repomap verify 做最终证据检查")
|
|
772
|
+
if checklist:
|
|
773
|
+
lines.append("### Edit Checklist\n")
|
|
774
|
+
for item in checklist:
|
|
775
|
+
lines.append(item)
|
|
776
|
+
lines.append("")
|
|
777
|
+
|
|
778
|
+
if key_symbols:
|
|
779
|
+
lines.append("## Key Symbols\n")
|
|
780
|
+
lines.append("| Symbol | Kind | Location | Incoming | Outgoing |")
|
|
781
|
+
lines.append("| --- | --- | --- | --- | --- |")
|
|
782
|
+
for item in key_symbols[:12]:
|
|
783
|
+
lines.append(
|
|
784
|
+
f"| `{item['name']}` | {item['kind']} | `{item['file']}:{item['line']}` | {item['incomingCount']} | {item['outgoingCount']} |"
|
|
785
|
+
)
|
|
786
|
+
lines.append("")
|
|
787
|
+
|
|
788
|
+
if read_next:
|
|
789
|
+
lines.append("## Read Next\n")
|
|
790
|
+
for item in read_next[:10]:
|
|
791
|
+
lines.append(f"- `{item['file']}` ({item['role']}): {item['reason']}")
|
|
792
|
+
lines.append("")
|
|
793
|
+
|
|
794
|
+
if affected_files:
|
|
795
|
+
lines.append("## Likely Affected Files\n")
|
|
796
|
+
lines.append("| File | Why | Confidence |")
|
|
797
|
+
lines.append("| --- | --- | --- |")
|
|
798
|
+
for f, why, conf in affected_files[:20]:
|
|
799
|
+
lines.append(f"| `{f}` | {why} | {conf} |")
|
|
800
|
+
lines.append("")
|
|
801
|
+
|
|
802
|
+
areas = _extract_impact_areas(target_files, affected_files)
|
|
803
|
+
if areas:
|
|
804
|
+
lines.append("## Impact Areas\n")
|
|
805
|
+
for area in areas:
|
|
806
|
+
lines.append(f"- {area}")
|
|
807
|
+
lines.append("")
|
|
808
|
+
|
|
809
|
+
if tests:
|
|
810
|
+
lines.append("## Suggested Tests\n")
|
|
811
|
+
for t in tests:
|
|
812
|
+
lines.append(f"- `{t.test_file}` ({t.confidence} confidence: {t.reason})")
|
|
813
|
+
lines.append("")
|
|
814
|
+
|
|
815
|
+
risk_icon = {"high": "HIGH", "medium": "MEDIUM", "low": "LOW"}
|
|
816
|
+
lines.append(f"## Risk Level: {risk_icon.get(risk_level, risk_level)}\n")
|
|
817
|
+
if risk_notes:
|
|
818
|
+
lines.append("## Risk Notes\n")
|
|
819
|
+
for note in risk_notes:
|
|
820
|
+
lines.append(f"- {note}")
|
|
821
|
+
lines.append("")
|
|
822
|
+
|
|
823
|
+
# Related Commands
|
|
824
|
+
lines.append("## Related Commands\n")
|
|
825
|
+
if target_files:
|
|
826
|
+
lines.append(f"- 查看目标文件详情: `repomap file-detail --project . --file-path {target_files[0]}`")
|
|
827
|
+
if affected_files:
|
|
828
|
+
top_affected = affected_files[0][0]
|
|
829
|
+
lines.append(f"- 检查首要受影响文件: `repomap file-detail --project . --file-path {top_affected}`")
|
|
830
|
+
lines.append("- 验证变更: `repomap verify --project .`")
|
|
831
|
+
lines.append("")
|
|
832
|
+
|
|
833
|
+
return _truncate_output("\n".join(lines), max_chars)
|
|
834
|
+
|
|
835
|
+
|
|
836
|
+
def _extract_impact_areas(
|
|
837
|
+
target_files: list[str],
|
|
838
|
+
affected_files: list[tuple[str, str, str]],
|
|
839
|
+
) -> list[str]:
|
|
840
|
+
areas: set[str] = set()
|
|
841
|
+
all_files = target_files + [f for f, _, _ in affected_files]
|
|
842
|
+
for f in all_files:
|
|
843
|
+
parts = PurePosixPath(f).parts
|
|
844
|
+
if len(parts) >= 2:
|
|
845
|
+
top = parts[0] if parts[0] not in ("src", "app", "lib") else (
|
|
846
|
+
parts[1] if len(parts) >= 2 else parts[0]
|
|
847
|
+
)
|
|
848
|
+
areas.add(top)
|
|
849
|
+
return sorted(areas)[:8]
|
|
850
|
+
|
|
851
|
+
|
|
852
|
+
# ═══════════════════════════════════════════════════════════════════════════════
|
|
853
|
+
# diff-risk 报告渲染
|
|
854
|
+
# ═══════════════════════════════════════════════════════════════════════════════
|
|
855
|
+
|
|
856
|
+
|
|
857
|
+
|
|
858
|
+
|
|
859
|
+
# ═══════════════════════════════════════════════════════════════════════════════
|
|
860
|
+
# verify 报告渲染
|
|
861
|
+
# ═══════════════════════════════════════════════════════════════════════════════
|
|
862
|
+
|
|
863
|
+
|
|
864
|
+
def render_verify_report(payload: dict[str, Any], max_chars: int = 10000) -> str:
|
|
865
|
+
result = payload.get("result", {})
|
|
866
|
+
status = result.get("status", "unknown")
|
|
867
|
+
status_label = {"passed": "PASS", "warning": "WARNING", "failed": "FAILED"}.get(status, status.upper())
|
|
868
|
+
lines: list[str] = ["# Verify Report\n"]
|
|
869
|
+
|
|
870
|
+
lines.append("## Overall Status\n")
|
|
871
|
+
lines.append(f"**{status_label}**")
|
|
872
|
+
if status == "passed":
|
|
873
|
+
lines.append("- Evidence looks sufficient for final handoff, assuming required project tests were actually run when needed.")
|
|
874
|
+
elif status == "warning":
|
|
875
|
+
lines.append("- Do not claim full confidence yet; review the warnings and missing evidence below.")
|
|
876
|
+
else:
|
|
877
|
+
lines.append("- Do not claim completion; at least one verification source failed.")
|
|
878
|
+
lines.append("")
|
|
879
|
+
|
|
880
|
+
changed_files = result.get("changedFiles", [])
|
|
881
|
+
lines.append("## Changed Files\n")
|
|
882
|
+
if changed_files:
|
|
883
|
+
for file_path in changed_files[:30]:
|
|
884
|
+
lines.append(f"- `{file_path}` ({classify_file_role(file_path)})")
|
|
885
|
+
if len(changed_files) > 30:
|
|
886
|
+
lines.append(f"- …还有 {len(changed_files) - 30} 个")
|
|
887
|
+
else:
|
|
888
|
+
lines.append("- No changed files detected in the project.")
|
|
889
|
+
lines.append("")
|
|
890
|
+
|
|
891
|
+
risk = result.get("risk", {})
|
|
892
|
+
lines.append("## Risk Summary\n")
|
|
893
|
+
lines.append(f"- Level: **{str(risk.get('level', 'unknown')).upper()}**")
|
|
894
|
+
for reason in risk.get("reasons", []):
|
|
895
|
+
lines.append(f"- {reason}")
|
|
896
|
+
for missing in risk.get("missingChecks", []):
|
|
897
|
+
lines.append(f"- Missing evidence: {missing}")
|
|
898
|
+
lines.append("")
|
|
899
|
+
|
|
900
|
+
tests = result.get("tests", [])
|
|
901
|
+
if tests:
|
|
902
|
+
lines.append("## Suggested Tests\n")
|
|
903
|
+
for command in _test_commands_for_files([
|
|
904
|
+
TestMatch(
|
|
905
|
+
test_file=item.get("testFile", ""),
|
|
906
|
+
target_file=item.get("targetFile", ""),
|
|
907
|
+
confidence=item.get("confidence", ""),
|
|
908
|
+
reason=item.get("reason", ""),
|
|
909
|
+
)
|
|
910
|
+
for item in tests
|
|
911
|
+
]):
|
|
912
|
+
lines.append(f"- `{command}`")
|
|
913
|
+
lines.append("")
|
|
914
|
+
else:
|
|
915
|
+
lines.append("## Suggested Tests\n")
|
|
916
|
+
lines.append("- 未匹配到与变更文件相关的测试文件。")
|
|
917
|
+
changed_files = result.get("changedFiles", [])
|
|
918
|
+
if changed_files:
|
|
919
|
+
# 给出通用测试命令建议
|
|
920
|
+
test_hints: list[str] = []
|
|
921
|
+
for f in changed_files[:3]:
|
|
922
|
+
if f.endswith(".py"):
|
|
923
|
+
test_hints.append("python -m pytest")
|
|
924
|
+
break
|
|
925
|
+
elif f.endswith((".ts", ".tsx", ".js", ".jsx")):
|
|
926
|
+
test_hints.append("npx vitest run")
|
|
927
|
+
break
|
|
928
|
+
elif f.endswith(".go"):
|
|
929
|
+
test_hints.append("go test ./...")
|
|
930
|
+
break
|
|
931
|
+
elif f.endswith(".rs"):
|
|
932
|
+
test_hints.append("cargo test")
|
|
933
|
+
break
|
|
934
|
+
if test_hints:
|
|
935
|
+
lines.append(f"- 建议手动运行: `{test_hints[0]}`")
|
|
936
|
+
else:
|
|
937
|
+
lines.append("- 建议手动运行项目测试套件。")
|
|
938
|
+
lines.append("")
|
|
939
|
+
|
|
940
|
+
untested = result.get("untestedSymbols", [])
|
|
941
|
+
if untested:
|
|
942
|
+
lines.append("## Test Coverage Gaps\n")
|
|
943
|
+
lines.append("> 以下符号缺少测试覆盖,修改时需格外谨慎。\n")
|
|
944
|
+
lines.append("| Symbol | Kind | File | Callers | Risk |")
|
|
945
|
+
lines.append("|--------|------|------|:------:|:----:|")
|
|
946
|
+
for item in untested[:15]:
|
|
947
|
+
risk_label = "HIGH" if item["risk_score"] >= 10 else "MEDIUM" if item["risk_score"] >= 5 else "LOW"
|
|
948
|
+
lines.append(
|
|
949
|
+
f"| `{item['symbol']}` | {item['kind']} | `{item['file']}:{item['line']}` "
|
|
950
|
+
f"| {item['incoming_calls']} | {risk_label} |"
|
|
951
|
+
)
|
|
952
|
+
lines.append("")
|
|
953
|
+
|
|
954
|
+
check = result.get("check", {})
|
|
955
|
+
lines.append("## Check Result\n")
|
|
956
|
+
lines.append(f"- Status: **{str(check.get('status', 'unknown')).upper()}**")
|
|
957
|
+
summary = check.get("summary", {})
|
|
958
|
+
if summary:
|
|
959
|
+
lines.append(
|
|
960
|
+
f"- Errors: {summary.get('total_errors', 0)} | Warnings: {summary.get('total_warnings', 0)} | Tool failures: {summary.get('tool_failures', 0)}"
|
|
961
|
+
)
|
|
962
|
+
for run in check.get("runs", [])[:8]:
|
|
963
|
+
marker = "skipped" if run.get("skipped") else f"exit={run.get('exit_code')}"
|
|
964
|
+
lines.append(f"- {run.get('tool')}: {marker}")
|
|
965
|
+
lines.append("")
|
|
966
|
+
|
|
967
|
+
lsp = result.get("lsp", {})
|
|
968
|
+
lines.append("## LSP Diagnostics\n")
|
|
969
|
+
lines.append(f"- Status: **{str(lsp.get('status', 'skipped')).upper()}**")
|
|
970
|
+
if lsp.get("reason"):
|
|
971
|
+
lines.append(f"- Reason: {lsp['reason']}")
|
|
972
|
+
lsp_summary = lsp.get("summary", {})
|
|
973
|
+
if lsp_summary:
|
|
974
|
+
lines.append(
|
|
975
|
+
f"- Errors: {lsp_summary.get('totalErrors', 0)} | Warnings: {lsp_summary.get('totalWarnings', 0)} | Failed runs: {lsp_summary.get('failedRuns', 0)} | Skipped runs: {lsp_summary.get('skippedRuns', 0)}"
|
|
976
|
+
)
|
|
977
|
+
lines.append("")
|
|
978
|
+
|
|
979
|
+
graph_diff = result.get("graphDiff", {})
|
|
980
|
+
breaking_changes = graph_diff.get("breakingChanges", [])
|
|
981
|
+
if breaking_changes:
|
|
982
|
+
lines.append("## Breaking Changes\n")
|
|
983
|
+
for bc in breaking_changes[:10]:
|
|
984
|
+
risk_icon = {"HIGH": "🔴", "MEDIUM": "🟡", "LOW": "🟢"}
|
|
985
|
+
lines.append(
|
|
986
|
+
f"- {risk_icon.get(bc.get('risk', 'LOW'), '⚪')} "
|
|
987
|
+
f"**{bc['name']}** `({bc.get('kind', '')})` in `{bc['file']}` "
|
|
988
|
+
f"[{bc.get('risk', 'LOW')}]"
|
|
989
|
+
)
|
|
990
|
+
if bc.get("new_signature") and bc.get("old_signature") != bc.get("new_signature"):
|
|
991
|
+
lines.append(f" - 旧: `{bc.get('old_signature', '')}`")
|
|
992
|
+
lines.append(f" - 新: `{bc.get('new_signature', '')}`")
|
|
993
|
+
if bc.get("affected_caller_count", 0) > 0:
|
|
994
|
+
lines.append(f" - {bc['affected_caller_count']} 个调用者受影响")
|
|
995
|
+
lines.append("")
|
|
996
|
+
|
|
997
|
+
lines.append("## Graph Diff\n")
|
|
998
|
+
lines.append(f"- Status: **{str(graph_diff.get('status', 'skipped')).upper()}**")
|
|
999
|
+
if graph_diff.get("reason"):
|
|
1000
|
+
lines.append(f"- Reason: {graph_diff['reason']}")
|
|
1001
|
+
if graph_diff.get("summary"):
|
|
1002
|
+
summary = graph_diff["summary"]
|
|
1003
|
+
lines.append(
|
|
1004
|
+
f"- Symbols +{summary.get('added', 0)} / -{summary.get('removed', 0)} / modified {summary.get('modified', 0)}; edges +{summary.get('edges_added', 0)} / -{summary.get('edges_removed', 0)}"
|
|
1005
|
+
)
|
|
1006
|
+
lines.append("")
|
|
1007
|
+
|
|
1008
|
+
lines.append("## Final Evidence Checklist\n")
|
|
1009
|
+
if status == "passed":
|
|
1010
|
+
lines.append("- [x] Static diagnostics did not fail.")
|
|
1011
|
+
lines.append("- [x] Risk gate did not find high-risk or missing-check blockers.")
|
|
1012
|
+
else:
|
|
1013
|
+
lines.append("- [ ] Review failed/warning sections before final handoff.")
|
|
1014
|
+
if tests:
|
|
1015
|
+
lines.append("- [ ] Run or explicitly account for the suggested tests above.")
|
|
1016
|
+
if lsp.get("status") == "skipped":
|
|
1017
|
+
lines.append("- [ ] LSP evidence was skipped; use `--with-lsp` if exact local diagnostics are needed.")
|
|
1018
|
+
return _truncate_output("\n".join(lines), max_chars)
|
|
1019
|
+
|
|
1020
|
+
|
|
1021
|
+
def render_diff_risk_report(
|
|
1022
|
+
engine: "RepoMapEngine",
|
|
1023
|
+
changed_files: list[str],
|
|
1024
|
+
affected_files: list[tuple[str, str, str]],
|
|
1025
|
+
tests: list[TestMatch],
|
|
1026
|
+
risk_level: str,
|
|
1027
|
+
risk_reasons: list[str],
|
|
1028
|
+
missing_checks: list[str],
|
|
1029
|
+
max_chars: int = 8000,
|
|
1030
|
+
) -> str:
|
|
1031
|
+
lines: list[str] = []
|
|
1032
|
+
lines.append("# Diff Risk Report\n")
|
|
1033
|
+
|
|
1034
|
+
lines.append("## Changed Files\n")
|
|
1035
|
+
for f in changed_files:
|
|
1036
|
+
role = classify_file_role(f, engine.graph)
|
|
1037
|
+
lines.append(f"- `{f}` ({role})")
|
|
1038
|
+
lines.append("")
|
|
1039
|
+
|
|
1040
|
+
areas = _extract_impact_areas(changed_files, affected_files)
|
|
1041
|
+
if areas:
|
|
1042
|
+
lines.append("## Changed Areas\n")
|
|
1043
|
+
for area in areas:
|
|
1044
|
+
lines.append(f"- {area}")
|
|
1045
|
+
lines.append("")
|
|
1046
|
+
|
|
1047
|
+
lines.append(f"## Risk Level\n{risk_level.upper()}\n")
|
|
1048
|
+
|
|
1049
|
+
if risk_reasons:
|
|
1050
|
+
lines.append("## Why\n")
|
|
1051
|
+
for reason in risk_reasons:
|
|
1052
|
+
lines.append(f"- {reason}")
|
|
1053
|
+
lines.append("")
|
|
1054
|
+
|
|
1055
|
+
if tests:
|
|
1056
|
+
lines.append("## Suggested Tests\n")
|
|
1057
|
+
test_cmds = _test_commands_for_files(tests)
|
|
1058
|
+
for cmd in test_cmds:
|
|
1059
|
+
lines.append(f"- `{cmd}`")
|
|
1060
|
+
lines.append("")
|
|
1061
|
+
|
|
1062
|
+
manual = _suggest_manual_verification(changed_files, risk_level)
|
|
1063
|
+
if manual:
|
|
1064
|
+
lines.append("## Manual Verification\n")
|
|
1065
|
+
for item in manual:
|
|
1066
|
+
lines.append(f"- {item}")
|
|
1067
|
+
lines.append("")
|
|
1068
|
+
|
|
1069
|
+
if missing_checks:
|
|
1070
|
+
lines.append("## Potentially Missing Checks\n")
|
|
1071
|
+
for check in missing_checks:
|
|
1072
|
+
lines.append(f"- {check}")
|
|
1073
|
+
lines.append("")
|
|
1074
|
+
|
|
1075
|
+
return _truncate_output("\n".join(lines), max_chars)
|
|
1076
|
+
|
|
1077
|
+
|
|
1078
|
+
def _test_commands_for_files(tests: list[TestMatch]) -> list[str]:
|
|
1079
|
+
commands: list[str] = []
|
|
1080
|
+
seen: set[str] = set()
|
|
1081
|
+
for t in tests:
|
|
1082
|
+
if t.test_file not in seen:
|
|
1083
|
+
seen.add(t.test_file)
|
|
1084
|
+
if t.test_file.endswith((".ts", ".tsx", ".js", ".jsx")):
|
|
1085
|
+
commands.append(f"npx vitest run {t.test_file}")
|
|
1086
|
+
elif t.test_file.endswith(".py"):
|
|
1087
|
+
commands.append(f"python -m pytest {t.test_file} -v")
|
|
1088
|
+
elif t.test_file.endswith(".go"):
|
|
1089
|
+
commands.append(f"go test ./{PurePosixPath(t.test_file).parent}")
|
|
1090
|
+
elif t.test_file.endswith(".rs"):
|
|
1091
|
+
commands.append(f"cargo test -- {t.test_file}")
|
|
1092
|
+
else:
|
|
1093
|
+
commands.append(f"# run tests in {t.test_file}")
|
|
1094
|
+
return commands[:10]
|
|
1095
|
+
|
|
1096
|
+
|
|
1097
|
+
def _suggest_manual_verification(changed_files: list[str], risk_level: str) -> list[str]:
|
|
1098
|
+
items: list[str] = []
|
|
1099
|
+
all_paths = " ".join(changed_files).lower()
|
|
1100
|
+
if any(kw in all_paths for kw in ["terminal", "cli", "tui", "input"]):
|
|
1101
|
+
items.append("在终端中运行常用命令验证输入/输出正常")
|
|
1102
|
+
if any(kw in all_paths for kw in ["auth", "login", "token", "session"]):
|
|
1103
|
+
items.append("验证登录/登出流程正常")
|
|
1104
|
+
if any(kw in all_paths for kw in ["ui", "component", "page", "view"]):
|
|
1105
|
+
items.append("在浏览器中检查相关页面渲染和交互")
|
|
1106
|
+
if risk_level == "high":
|
|
1107
|
+
items.append("考虑在 staging 环境做一次完整的回归测试")
|
|
1108
|
+
return items[:5]
|