super-dev 2.0.9__tar.gz → 2.0.10__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {super_dev-2.0.9/super_dev.egg-info → super_dev-2.0.10}/PKG-INFO +19 -4
- {super_dev-2.0.9 → super_dev-2.0.10}/README.md +18 -3
- {super_dev-2.0.9 → super_dev-2.0.10}/pyproject.toml +1 -1
- {super_dev-2.0.9 → super_dev-2.0.10}/super_dev/__init__.py +1 -1
- {super_dev-2.0.9 → super_dev-2.0.10}/super_dev/analyzer/__init__.py +8 -2
- super_dev-2.0.10/super_dev/analyzer/impact.py +255 -0
- super_dev-2.0.10/super_dev/analyzer/repo_map.py +342 -0
- {super_dev-2.0.9 → super_dev-2.0.10}/super_dev/cli.py +488 -16
- {super_dev-2.0.9 → super_dev-2.0.10}/super_dev/config/manager.py +2 -2
- {super_dev-2.0.9 → super_dev-2.0.10}/super_dev/creators/creator.py +4 -1
- {super_dev-2.0.9 → super_dev-2.0.10}/super_dev/creators/document_generator.py +13 -6
- {super_dev-2.0.9 → super_dev-2.0.10}/super_dev/deployers/delivery.py +1 -1
- {super_dev-2.0.9 → super_dev-2.0.10}/super_dev/orchestrator/engine.py +2 -1
- {super_dev-2.0.9 → super_dev-2.0.10}/super_dev/proof_pack.py +179 -3
- {super_dev-2.0.9 → super_dev-2.0.10}/super_dev/skills/manager.py +1 -1
- super_dev-2.0.10/super_dev/terminal.py +169 -0
- {super_dev-2.0.9 → super_dev-2.0.10}/super_dev/web/api.py +97 -4
- {super_dev-2.0.9 → super_dev-2.0.10/super_dev.egg-info}/PKG-INFO +19 -4
- {super_dev-2.0.9 → super_dev-2.0.10}/super_dev.egg-info/SOURCES.txt +3 -0
- {super_dev-2.0.9 → super_dev-2.0.10}/LICENSE +0 -0
- {super_dev-2.0.9 → super_dev-2.0.10}/setup.cfg +0 -0
- {super_dev-2.0.9 → super_dev-2.0.10}/super_dev/analyzer/analyzer.py +0 -0
- {super_dev-2.0.9 → super_dev-2.0.10}/super_dev/analyzer/detectors.py +0 -0
- {super_dev-2.0.9 → super_dev-2.0.10}/super_dev/analyzer/models.py +0 -0
- {super_dev-2.0.9 → super_dev-2.0.10}/super_dev/catalogs.py +0 -0
- {super_dev-2.0.9 → super_dev-2.0.10}/super_dev/config/__init__.py +0 -0
- {super_dev-2.0.9 → super_dev-2.0.10}/super_dev/config/frontend.py +0 -0
- {super_dev-2.0.9 → super_dev-2.0.10}/super_dev/creators/__init__.py +0 -0
- {super_dev-2.0.9 → super_dev-2.0.10}/super_dev/creators/frontend_builder.py +0 -0
- {super_dev-2.0.9 → super_dev-2.0.10}/super_dev/creators/implementation_builder.py +0 -0
- {super_dev-2.0.9 → super_dev-2.0.10}/super_dev/creators/prompt_generator.py +0 -0
- {super_dev-2.0.9 → super_dev-2.0.10}/super_dev/creators/requirement_parser.py +0 -0
- {super_dev-2.0.9 → super_dev-2.0.10}/super_dev/creators/spec_builder.py +0 -0
- {super_dev-2.0.9 → super_dev-2.0.10}/super_dev/creators/task_executor.py +0 -0
- {super_dev-2.0.9 → super_dev-2.0.10}/super_dev/deployers/__init__.py +0 -0
- {super_dev-2.0.9 → super_dev-2.0.10}/super_dev/deployers/cicd.py +0 -0
- {super_dev-2.0.9 → super_dev-2.0.10}/super_dev/deployers/migration.py +0 -0
- {super_dev-2.0.9 → super_dev-2.0.10}/super_dev/deployers/rehearsal.py +0 -0
- {super_dev-2.0.9 → super_dev-2.0.10}/super_dev/deployers/rehearsal_runner.py +0 -0
- {super_dev-2.0.9 → super_dev-2.0.10}/super_dev/design/__init__.py +0 -0
- {super_dev-2.0.9 → super_dev-2.0.10}/super_dev/design/aesthetics.py +0 -0
- {super_dev-2.0.9 → super_dev-2.0.10}/super_dev/design/charts.py +0 -0
- {super_dev-2.0.9 → super_dev-2.0.10}/super_dev/design/codegen.py +0 -0
- {super_dev-2.0.9 → super_dev-2.0.10}/super_dev/design/engine.py +0 -0
- {super_dev-2.0.9 → super_dev-2.0.10}/super_dev/design/generator.py +0 -0
- {super_dev-2.0.9 → super_dev-2.0.10}/super_dev/design/landing.py +0 -0
- {super_dev-2.0.9 → super_dev-2.0.10}/super_dev/design/tech_stack.py +0 -0
- {super_dev-2.0.9 → super_dev-2.0.10}/super_dev/design/tokens.py +0 -0
- {super_dev-2.0.9 → super_dev-2.0.10}/super_dev/design/ui_intelligence.py +0 -0
- {super_dev-2.0.9 → super_dev-2.0.10}/super_dev/design/ux_guide.py +0 -0
- {super_dev-2.0.9 → super_dev-2.0.10}/super_dev/exceptions.py +0 -0
- {super_dev-2.0.9 → super_dev-2.0.10}/super_dev/experts/__init__.py +0 -0
- {super_dev-2.0.9 → super_dev-2.0.10}/super_dev/experts/service.py +0 -0
- {super_dev-2.0.9 → super_dev-2.0.10}/super_dev/integrations/__init__.py +0 -0
- {super_dev-2.0.9 → super_dev-2.0.10}/super_dev/integrations/manager.py +0 -0
- {super_dev-2.0.9 → super_dev-2.0.10}/super_dev/orchestrator/__init__.py +0 -0
- {super_dev-2.0.9 → super_dev-2.0.10}/super_dev/orchestrator/contracts.py +0 -0
- {super_dev-2.0.9 → super_dev-2.0.10}/super_dev/orchestrator/experts.py +0 -0
- {super_dev-2.0.9 → super_dev-2.0.10}/super_dev/orchestrator/knowledge.py +0 -0
- {super_dev-2.0.9 → super_dev-2.0.10}/super_dev/orchestrator/quality.py +0 -0
- {super_dev-2.0.9 → super_dev-2.0.10}/super_dev/orchestrator/telemetry.py +0 -0
- {super_dev-2.0.9 → super_dev-2.0.10}/super_dev/policy/__init__.py +0 -0
- {super_dev-2.0.9 → super_dev-2.0.10}/super_dev/policy/manager.py +0 -0
- {super_dev-2.0.9 → super_dev-2.0.10}/super_dev/release_readiness.py +0 -0
- {super_dev-2.0.9 → super_dev-2.0.10}/super_dev/review_state.py +0 -0
- {super_dev-2.0.9 → super_dev-2.0.10}/super_dev/reviewers/__init__.py +0 -0
- {super_dev-2.0.9 → super_dev-2.0.10}/super_dev/reviewers/code_review.py +0 -0
- {super_dev-2.0.9 → super_dev-2.0.10}/super_dev/reviewers/quality_gate.py +0 -0
- {super_dev-2.0.9 → super_dev-2.0.10}/super_dev/reviewers/redteam.py +0 -0
- {super_dev-2.0.9 → super_dev-2.0.10}/super_dev/reviewers/ui_review.py +0 -0
- {super_dev-2.0.9 → super_dev-2.0.10}/super_dev/skills/__init__.py +0 -0
- {super_dev-2.0.9 → super_dev-2.0.10}/super_dev/specs/__init__.py +0 -0
- {super_dev-2.0.9 → super_dev-2.0.10}/super_dev/specs/generator.py +0 -0
- {super_dev-2.0.9 → super_dev-2.0.10}/super_dev/specs/manager.py +0 -0
- {super_dev-2.0.9 → super_dev-2.0.10}/super_dev/specs/models.py +0 -0
- {super_dev-2.0.9 → super_dev-2.0.10}/super_dev/specs/validator.py +0 -0
- {super_dev-2.0.9 → super_dev-2.0.10}/super_dev/utils/__init__.py +0 -0
- {super_dev-2.0.9 → super_dev-2.0.10}/super_dev/utils/logger.py +0 -0
- {super_dev-2.0.9 → super_dev-2.0.10}/super_dev.egg-info/dependency_links.txt +0 -0
- {super_dev-2.0.9 → super_dev-2.0.10}/super_dev.egg-info/entry_points.txt +0 -0
- {super_dev-2.0.9 → super_dev-2.0.10}/super_dev.egg-info/requires.txt +0 -0
- {super_dev-2.0.9 → super_dev-2.0.10}/super_dev.egg-info/top_level.txt +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: super-dev
|
|
3
|
-
Version: 2.0.
|
|
3
|
+
Version: 2.0.10
|
|
4
4
|
Summary: Super Dev - Pipeline AI Coding Assistant
|
|
5
5
|
Author-email: Excellent <11964948@qq.com>
|
|
6
6
|
License-Expression: MIT
|
|
@@ -60,7 +60,16 @@ Dynamic: license-file
|
|
|
60
60
|
|
|
61
61
|
## 版本
|
|
62
62
|
|
|
63
|
-
当前版本:`2.0.
|
|
63
|
+
当前版本:`2.0.10`
|
|
64
|
+
|
|
65
|
+
---
|
|
66
|
+
|
|
67
|
+
## 演示视频
|
|
68
|
+
|
|
69
|
+
<video controls playsinline preload="metadata" src="https://shangyankeji.github.io/super-dev/demo.mp4" width="100%"></video>
|
|
70
|
+
|
|
71
|
+
- 在线播放:[观看演示视频](https://shangyankeji.github.io/super-dev/demo.mp4)
|
|
72
|
+
- 仓库文件:[demo.mp4](demo.mp4)
|
|
64
73
|
|
|
65
74
|
---
|
|
66
75
|
|
|
@@ -168,6 +177,9 @@ super-dev
|
|
|
168
177
|
```bash
|
|
169
178
|
super-dev integrate audit --auto --repair --force
|
|
170
179
|
super-dev integrate validate --auto
|
|
180
|
+
super-dev repo-map
|
|
181
|
+
super-dev impact "修改登录流程" --files services/auth.py
|
|
182
|
+
super-dev fix "修复登录接口 500 并补充回归验证"
|
|
171
183
|
super-dev release proof-pack
|
|
172
184
|
super-dev release readiness
|
|
173
185
|
super-dev review architecture --status revision_requested --comment "技术方案需要重构"
|
|
@@ -207,13 +219,13 @@ super-dev bootstrap --name my-project --platform web --frontend next --backend n
|
|
|
207
219
|
### 3. 指定版本安装
|
|
208
220
|
|
|
209
221
|
```bash
|
|
210
|
-
pip install super-dev==2.0.
|
|
222
|
+
pip install super-dev==2.0.10
|
|
211
223
|
```
|
|
212
224
|
|
|
213
225
|
### 4. GitHub 指定标签安装
|
|
214
226
|
|
|
215
227
|
```bash
|
|
216
|
-
pip install git+https://github.com/shangyankeji/super-dev.git@v2.0.
|
|
228
|
+
pip install git+https://github.com/shangyankeji/super-dev.git@v2.0.10
|
|
217
229
|
```
|
|
218
230
|
|
|
219
231
|
### 5. 源码开发安装
|
|
@@ -336,6 +348,9 @@ uv tool install super-dev
|
|
|
336
348
|
2. Claude Code / Gemini CLI / Kiro CLI / Qoder CLI / CodeBuddy CLI 等支持原生映射的宿主,可直接输入:`/super-dev 你的需求`。
|
|
337
349
|
3. Codex CLI、Kimi CLI 当前不使用 `/super-dev`;在宿主会话里输入 `super-dev: 你的需求`。
|
|
338
350
|
4. 宿主会先被约束执行“同类产品研究 -> 三文档 -> 等待用户确认 -> Spec -> 前端运行验证 -> 后端/测试/交付”,不会直接跳到写代码。
|
|
351
|
+
5. 如果是缺陷修复,优先使用 `super-dev fix "缺陷描述"`,走轻量 bugfix 路径,而不是完整功能开发路径。
|
|
352
|
+
6. 接手已有项目或复杂仓库时,优先执行 `super-dev repo-map` 生成代码库地图,再让宿主进入开发。
|
|
353
|
+
7. 如果准备重构、改接口、修登录流或修改关键状态流,先执行 `super-dev impact "变更描述" --files ...` 评估影响范围,再动手。
|
|
339
354
|
|
|
340
355
|
### 宿主如何理解 Super Dev
|
|
341
356
|
|
|
@@ -19,7 +19,16 @@
|
|
|
19
19
|
|
|
20
20
|
## 版本
|
|
21
21
|
|
|
22
|
-
当前版本:`2.0.
|
|
22
|
+
当前版本:`2.0.10`
|
|
23
|
+
|
|
24
|
+
---
|
|
25
|
+
|
|
26
|
+
## 演示视频
|
|
27
|
+
|
|
28
|
+
<video controls playsinline preload="metadata" src="https://shangyankeji.github.io/super-dev/demo.mp4" width="100%"></video>
|
|
29
|
+
|
|
30
|
+
- 在线播放:[观看演示视频](https://shangyankeji.github.io/super-dev/demo.mp4)
|
|
31
|
+
- 仓库文件:[demo.mp4](demo.mp4)
|
|
23
32
|
|
|
24
33
|
---
|
|
25
34
|
|
|
@@ -127,6 +136,9 @@ super-dev
|
|
|
127
136
|
```bash
|
|
128
137
|
super-dev integrate audit --auto --repair --force
|
|
129
138
|
super-dev integrate validate --auto
|
|
139
|
+
super-dev repo-map
|
|
140
|
+
super-dev impact "修改登录流程" --files services/auth.py
|
|
141
|
+
super-dev fix "修复登录接口 500 并补充回归验证"
|
|
130
142
|
super-dev release proof-pack
|
|
131
143
|
super-dev release readiness
|
|
132
144
|
super-dev review architecture --status revision_requested --comment "技术方案需要重构"
|
|
@@ -166,13 +178,13 @@ super-dev bootstrap --name my-project --platform web --frontend next --backend n
|
|
|
166
178
|
### 3. 指定版本安装
|
|
167
179
|
|
|
168
180
|
```bash
|
|
169
|
-
pip install super-dev==2.0.
|
|
181
|
+
pip install super-dev==2.0.10
|
|
170
182
|
```
|
|
171
183
|
|
|
172
184
|
### 4. GitHub 指定标签安装
|
|
173
185
|
|
|
174
186
|
```bash
|
|
175
|
-
pip install git+https://github.com/shangyankeji/super-dev.git@v2.0.
|
|
187
|
+
pip install git+https://github.com/shangyankeji/super-dev.git@v2.0.10
|
|
176
188
|
```
|
|
177
189
|
|
|
178
190
|
### 5. 源码开发安装
|
|
@@ -295,6 +307,9 @@ uv tool install super-dev
|
|
|
295
307
|
2. Claude Code / Gemini CLI / Kiro CLI / Qoder CLI / CodeBuddy CLI 等支持原生映射的宿主,可直接输入:`/super-dev 你的需求`。
|
|
296
308
|
3. Codex CLI、Kimi CLI 当前不使用 `/super-dev`;在宿主会话里输入 `super-dev: 你的需求`。
|
|
297
309
|
4. 宿主会先被约束执行“同类产品研究 -> 三文档 -> 等待用户确认 -> Spec -> 前端运行验证 -> 后端/测试/交付”,不会直接跳到写代码。
|
|
310
|
+
5. 如果是缺陷修复,优先使用 `super-dev fix "缺陷描述"`,走轻量 bugfix 路径,而不是完整功能开发路径。
|
|
311
|
+
6. 接手已有项目或复杂仓库时,优先执行 `super-dev repo-map` 生成代码库地图,再让宿主进入开发。
|
|
312
|
+
7. 如果准备重构、改接口、修登录流或修改关键状态流,先执行 `super-dev impact "变更描述" --files ...` 评估影响范围,再动手。
|
|
298
313
|
|
|
299
314
|
### 宿主如何理解 Super Dev
|
|
300
315
|
|
|
@@ -15,6 +15,8 @@ from .models import (
|
|
|
15
15
|
ProjectType,
|
|
16
16
|
TechStack,
|
|
17
17
|
)
|
|
18
|
+
from .repo_map import RepoMapBuilder, RepoMapItem, RepoMapReport
|
|
19
|
+
from .impact import ImpactAnalyzer, ImpactAnalysisReport, ImpactItem
|
|
18
20
|
|
|
19
21
|
__all__ = [
|
|
20
22
|
"ProjectAnalyzer",
|
|
@@ -27,8 +29,12 @@ __all__ = [
|
|
|
27
29
|
"TechStack",
|
|
28
30
|
"FrameworkType",
|
|
29
31
|
"ArchitecturePattern",
|
|
32
|
+
"RepoMapBuilder",
|
|
33
|
+
"RepoMapItem",
|
|
34
|
+
"RepoMapReport",
|
|
35
|
+
"ImpactAnalyzer",
|
|
36
|
+
"ImpactAnalysisReport",
|
|
37
|
+
"ImpactItem",
|
|
30
38
|
"detect_project_type",
|
|
31
39
|
"detect_tech_stack",
|
|
32
40
|
]
|
|
33
|
-
|
|
34
|
-
|
|
@@ -0,0 +1,255 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import json
|
|
4
|
+
import re
|
|
5
|
+
from dataclasses import dataclass, field
|
|
6
|
+
from pathlib import Path
|
|
7
|
+
from typing import Any
|
|
8
|
+
|
|
9
|
+
from .repo_map import RepoMapBuilder, RepoMapItem
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
def _tokenize(text: str) -> set[str]:
|
|
13
|
+
return {token for token in re.split(r"[^a-zA-Z0-9_\-/]+", text.lower()) if token}
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
@dataclass
|
|
17
|
+
class ImpactItem:
|
|
18
|
+
name: str
|
|
19
|
+
path: str
|
|
20
|
+
reason: str
|
|
21
|
+
category: str
|
|
22
|
+
confidence: float
|
|
23
|
+
|
|
24
|
+
def to_dict(self) -> dict[str, Any]:
|
|
25
|
+
return {
|
|
26
|
+
"name": self.name,
|
|
27
|
+
"path": self.path,
|
|
28
|
+
"reason": self.reason,
|
|
29
|
+
"category": self.category,
|
|
30
|
+
"confidence": self.confidence,
|
|
31
|
+
}
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
@dataclass
|
|
35
|
+
class ImpactAnalysisReport:
|
|
36
|
+
project_name: str
|
|
37
|
+
project_path: str
|
|
38
|
+
description: str
|
|
39
|
+
files: list[str]
|
|
40
|
+
risk_level: str
|
|
41
|
+
summary: str
|
|
42
|
+
affected_modules: list[ImpactItem] = field(default_factory=list)
|
|
43
|
+
affected_entry_points: list[ImpactItem] = field(default_factory=list)
|
|
44
|
+
affected_integration_surfaces: list[ImpactItem] = field(default_factory=list)
|
|
45
|
+
regression_focus: list[str] = field(default_factory=list)
|
|
46
|
+
recommended_steps: list[str] = field(default_factory=list)
|
|
47
|
+
|
|
48
|
+
def to_dict(self) -> dict[str, Any]:
|
|
49
|
+
return {
|
|
50
|
+
"project_name": self.project_name,
|
|
51
|
+
"project_path": self.project_path,
|
|
52
|
+
"description": self.description,
|
|
53
|
+
"files": self.files,
|
|
54
|
+
"risk_level": self.risk_level,
|
|
55
|
+
"summary": self.summary,
|
|
56
|
+
"affected_modules": [item.to_dict() for item in self.affected_modules],
|
|
57
|
+
"affected_entry_points": [item.to_dict() for item in self.affected_entry_points],
|
|
58
|
+
"affected_integration_surfaces": [item.to_dict() for item in self.affected_integration_surfaces],
|
|
59
|
+
"regression_focus": list(self.regression_focus),
|
|
60
|
+
"recommended_steps": list(self.recommended_steps),
|
|
61
|
+
}
|
|
62
|
+
|
|
63
|
+
def to_markdown(self) -> str:
|
|
64
|
+
lines = [
|
|
65
|
+
"# Change Impact Analysis",
|
|
66
|
+
"",
|
|
67
|
+
f"- Project: `{self.project_name}`",
|
|
68
|
+
f"- Path: `{self.project_path}`",
|
|
69
|
+
f"- Risk Level: `{self.risk_level}`",
|
|
70
|
+
]
|
|
71
|
+
if self.description:
|
|
72
|
+
lines.append(f"- Change: {self.description}")
|
|
73
|
+
if self.files:
|
|
74
|
+
lines.append(f"- Files: {', '.join(f'`{f}`' for f in self.files)}")
|
|
75
|
+
lines.extend(["", self.summary, ""])
|
|
76
|
+
self._append_items(lines, "Affected Modules", self.affected_modules)
|
|
77
|
+
self._append_items(lines, "Affected Entry Points", self.affected_entry_points)
|
|
78
|
+
self._append_items(lines, "Affected Integration Surfaces", self.affected_integration_surfaces)
|
|
79
|
+
lines.extend(["", "## Regression Focus", ""])
|
|
80
|
+
if self.regression_focus:
|
|
81
|
+
for item in self.regression_focus:
|
|
82
|
+
lines.append(f"- {item}")
|
|
83
|
+
else:
|
|
84
|
+
lines.append("- No explicit regression focus was inferred.")
|
|
85
|
+
lines.extend(["", "## Recommended Steps", ""])
|
|
86
|
+
for item in self.recommended_steps:
|
|
87
|
+
lines.append(f"- {item}")
|
|
88
|
+
lines.append("")
|
|
89
|
+
return "\n".join(lines)
|
|
90
|
+
|
|
91
|
+
@staticmethod
|
|
92
|
+
def _append_items(lines: list[str], title: str, items: list[ImpactItem]) -> None:
|
|
93
|
+
lines.extend(["", f"## {title}", ""])
|
|
94
|
+
if not items:
|
|
95
|
+
lines.append("- None")
|
|
96
|
+
return
|
|
97
|
+
for item in items:
|
|
98
|
+
lines.append(f"- **{item.name}**: `{item.path}`")
|
|
99
|
+
lines.append(f" - {item.reason} (confidence={item.confidence:.2f})")
|
|
100
|
+
|
|
101
|
+
|
|
102
|
+
class ImpactAnalyzer:
|
|
103
|
+
def __init__(self, project_dir: Path):
|
|
104
|
+
self.project_dir = Path(project_dir).resolve()
|
|
105
|
+
self.output_dir = self.project_dir / "output"
|
|
106
|
+
self.output_dir.mkdir(parents=True, exist_ok=True)
|
|
107
|
+
self.project_name = self.project_dir.name
|
|
108
|
+
self.repo_map_builder = RepoMapBuilder(self.project_dir)
|
|
109
|
+
|
|
110
|
+
def build(self, description: str = "", files: list[str] | None = None) -> ImpactAnalysisReport:
|
|
111
|
+
files = [file for file in (files or []) if file]
|
|
112
|
+
repo_map = self.repo_map_builder.build()
|
|
113
|
+
description_tokens = _tokenize(description)
|
|
114
|
+
normalized_files = [str(Path(file)).replace("\\", "/") for file in files]
|
|
115
|
+
|
|
116
|
+
affected_modules = self._score_repo_items(repo_map.top_modules, normalized_files, description_tokens, "module")
|
|
117
|
+
affected_entry_points = self._score_repo_items(repo_map.entry_points, normalized_files, description_tokens, "entry-point")
|
|
118
|
+
affected_surfaces = self._score_repo_items(repo_map.integration_surfaces, normalized_files, description_tokens, "integration-surface")
|
|
119
|
+
|
|
120
|
+
risk_level = self._risk_level(affected_modules, affected_entry_points, affected_surfaces)
|
|
121
|
+
summary = self._summary(description, normalized_files, risk_level, affected_modules, affected_entry_points, affected_surfaces)
|
|
122
|
+
regression_focus = self._regression_focus(affected_modules, affected_surfaces, normalized_files)
|
|
123
|
+
recommended_steps = self._recommended_steps(risk_level, affected_modules, affected_surfaces, normalized_files)
|
|
124
|
+
|
|
125
|
+
return ImpactAnalysisReport(
|
|
126
|
+
project_name=self.project_name,
|
|
127
|
+
project_path=str(self.project_dir),
|
|
128
|
+
description=description,
|
|
129
|
+
files=normalized_files,
|
|
130
|
+
risk_level=risk_level,
|
|
131
|
+
summary=summary,
|
|
132
|
+
affected_modules=affected_modules,
|
|
133
|
+
affected_entry_points=affected_entry_points,
|
|
134
|
+
affected_integration_surfaces=affected_surfaces,
|
|
135
|
+
regression_focus=regression_focus,
|
|
136
|
+
recommended_steps=recommended_steps,
|
|
137
|
+
)
|
|
138
|
+
|
|
139
|
+
def write(self, report: ImpactAnalysisReport) -> dict[str, Path]:
|
|
140
|
+
md_path = self.output_dir / f"{self.project_name}-impact-analysis.md"
|
|
141
|
+
json_path = self.output_dir / f"{self.project_name}-impact-analysis.json"
|
|
142
|
+
md_path.write_text(report.to_markdown(), encoding="utf-8")
|
|
143
|
+
json_path.write_text(json.dumps(report.to_dict(), ensure_ascii=False, indent=2), encoding="utf-8")
|
|
144
|
+
return {"markdown": md_path, "json": json_path}
|
|
145
|
+
|
|
146
|
+
def _score_repo_items(
|
|
147
|
+
self,
|
|
148
|
+
items: list[RepoMapItem],
|
|
149
|
+
files: list[str],
|
|
150
|
+
description_tokens: set[str],
|
|
151
|
+
category: str,
|
|
152
|
+
) -> list[ImpactItem]:
|
|
153
|
+
scored: list[ImpactItem] = []
|
|
154
|
+
for item in items:
|
|
155
|
+
confidence = 0.0
|
|
156
|
+
reasons: list[str] = []
|
|
157
|
+
item_path = item.path.replace("\\", "/").lower()
|
|
158
|
+
item_tokens = _tokenize(f"{item.name} {item.path} {item.summary}")
|
|
159
|
+
|
|
160
|
+
for file in files:
|
|
161
|
+
file_lower = file.lower()
|
|
162
|
+
top = file_lower.split("/")[0]
|
|
163
|
+
if item_path == file_lower or item_path.startswith(file_lower) or file_lower.startswith(item_path):
|
|
164
|
+
confidence = max(confidence, 0.95)
|
|
165
|
+
reasons.append("direct file/path overlap")
|
|
166
|
+
elif item_path == top or item_path.startswith(f"{top}/") or top == item_path:
|
|
167
|
+
confidence = max(confidence, 0.82)
|
|
168
|
+
reasons.append("same top-level module as changed file")
|
|
169
|
+
|
|
170
|
+
overlap = description_tokens & item_tokens
|
|
171
|
+
if overlap:
|
|
172
|
+
confidence = max(confidence, min(0.75, 0.45 + 0.08 * len(overlap)))
|
|
173
|
+
reasons.append(f"keyword overlap: {', '.join(sorted(list(overlap))[:4])}")
|
|
174
|
+
|
|
175
|
+
if confidence > 0:
|
|
176
|
+
scored.append(
|
|
177
|
+
ImpactItem(
|
|
178
|
+
name=item.name,
|
|
179
|
+
path=item.path,
|
|
180
|
+
reason="; ".join(dict.fromkeys(reasons)),
|
|
181
|
+
category=category,
|
|
182
|
+
confidence=round(confidence, 2),
|
|
183
|
+
)
|
|
184
|
+
)
|
|
185
|
+
|
|
186
|
+
scored.sort(key=lambda item: item.confidence, reverse=True)
|
|
187
|
+
return scored[:6]
|
|
188
|
+
|
|
189
|
+
@staticmethod
|
|
190
|
+
def _risk_level(
|
|
191
|
+
modules: list[ImpactItem],
|
|
192
|
+
entry_points: list[ImpactItem],
|
|
193
|
+
surfaces: list[ImpactItem],
|
|
194
|
+
) -> str:
|
|
195
|
+
score = 0
|
|
196
|
+
score += len([item for item in modules if item.confidence >= 0.8]) * 2
|
|
197
|
+
score += len([item for item in entry_points if item.confidence >= 0.7]) * 2
|
|
198
|
+
score += len([item for item in surfaces if item.confidence >= 0.7]) * 3
|
|
199
|
+
if score >= 8:
|
|
200
|
+
return "high"
|
|
201
|
+
if score >= 4:
|
|
202
|
+
return "medium"
|
|
203
|
+
return "low"
|
|
204
|
+
|
|
205
|
+
@staticmethod
|
|
206
|
+
def _summary(
|
|
207
|
+
description: str,
|
|
208
|
+
files: list[str],
|
|
209
|
+
risk_level: str,
|
|
210
|
+
modules: list[ImpactItem],
|
|
211
|
+
entry_points: list[ImpactItem],
|
|
212
|
+
surfaces: list[ImpactItem],
|
|
213
|
+
) -> str:
|
|
214
|
+
subject = description or ("、".join(files[:3]) if files else "this change")
|
|
215
|
+
return (
|
|
216
|
+
f"The requested change `{subject}` is assessed as `{risk_level}` risk. "
|
|
217
|
+
f"The strongest signals point to {len(modules)} affected modules, {len(entry_points)} entry points, "
|
|
218
|
+
f"and {len(surfaces)} integration surfaces that should be reviewed before implementation."
|
|
219
|
+
)
|
|
220
|
+
|
|
221
|
+
@staticmethod
|
|
222
|
+
def _regression_focus(modules: list[ImpactItem], surfaces: list[ImpactItem], files: list[str]) -> list[str]:
|
|
223
|
+
focus: list[str] = []
|
|
224
|
+
joined_paths = " ".join([item.path.lower() for item in modules + surfaces] + [file.lower() for file in files])
|
|
225
|
+
if any(token in joined_paths for token in ["auth", "login", "session", "permission"]):
|
|
226
|
+
focus.append("Authentication, session, and permission regression checks")
|
|
227
|
+
if any(token in joined_paths for token in ["api", "controller", "route", "router"]):
|
|
228
|
+
focus.append("API contract and route-level regression checks")
|
|
229
|
+
if any(token in joined_paths for token in ["component", "ui", "page", "screen", "view"]):
|
|
230
|
+
focus.append("Critical UI paths, navigation, and state transition checks")
|
|
231
|
+
if any(token in joined_paths for token in ["db", "database", "repository", "model", "entity"]):
|
|
232
|
+
focus.append("Data model, persistence, and migration regression checks")
|
|
233
|
+
if not focus:
|
|
234
|
+
focus.append("Smoke test the primary user flow and the modules most likely to be touched")
|
|
235
|
+
return focus
|
|
236
|
+
|
|
237
|
+
@staticmethod
|
|
238
|
+
def _recommended_steps(
|
|
239
|
+
risk_level: str,
|
|
240
|
+
modules: list[ImpactItem],
|
|
241
|
+
surfaces: list[ImpactItem],
|
|
242
|
+
files: list[str],
|
|
243
|
+
) -> list[str]:
|
|
244
|
+
steps = [
|
|
245
|
+
"Read the repo map first to confirm the likely entry points and module boundaries.",
|
|
246
|
+
"Limit edits to the highest-confidence modules before expanding the scope.",
|
|
247
|
+
]
|
|
248
|
+
if risk_level == "high":
|
|
249
|
+
steps.append("Freeze the affected surface in PRD / Architecture / UIUX or patch docs before coding.")
|
|
250
|
+
if surfaces:
|
|
251
|
+
steps.append("Re-test the affected integration surfaces before declaring the change complete.")
|
|
252
|
+
if files:
|
|
253
|
+
steps.append("Use the changed file list as the minimum review set, then inspect adjacent modules only if impact expands.")
|
|
254
|
+
steps.append("Rerun bugfix/runtime/quality validation after implementation.")
|
|
255
|
+
return steps
|