spec-agent 1.0.3 → 2.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +42 -2
- package/USAGE_FROM_NPM.md +280 -0
- package/dist/commands/analyze.d.ts.map +1 -1
- package/dist/commands/analyze.js +14 -1
- package/dist/commands/analyze.js.map +1 -1
- package/dist/commands/execute.d.ts +15 -0
- package/dist/commands/execute.d.ts.map +1 -0
- package/dist/commands/execute.js +298 -0
- package/dist/commands/execute.js.map +1 -0
- package/dist/commands/handoff.d.ts +11 -0
- package/dist/commands/handoff.d.ts.map +1 -0
- package/dist/commands/handoff.js +320 -0
- package/dist/commands/handoff.js.map +1 -0
- package/dist/commands/learn.d.ts +7 -0
- package/dist/commands/learn.d.ts.map +1 -1
- package/dist/commands/learn.js +31 -4
- package/dist/commands/learn.js.map +1 -1
- package/dist/commands/pipeline.d.ts +1 -0
- package/dist/commands/pipeline.d.ts.map +1 -1
- package/dist/commands/pipeline.js +5 -0
- package/dist/commands/pipeline.js.map +1 -1
- package/dist/commands/scan.js +17 -15
- package/dist/commands/scan.js.map +1 -1
- package/dist/index.js +73 -2
- package/dist/index.js.map +1 -1
- package/dist/services/llm.d.ts +2 -2
- package/dist/services/llm.d.ts.map +1 -1
- package/dist/services/llm.js +8 -2
- package/dist/services/llm.js.map +1 -1
- package/package.json +4 -3
- package/spec-agent-implementation.md +46 -1
- package/src/commands/analyze.ts +14 -1
- package/src/commands/execute.ts +358 -0
- package/src/commands/handoff.ts +351 -0
- package/src/commands/learn.ts +41 -6
- package/src/commands/pipeline.ts +8 -0
- package/src/commands/scan.ts +18 -18
- package/src/index.ts +41 -2
- package/src/services/llm.ts +10 -2
|
@@ -0,0 +1,351 @@
|
|
|
1
|
+
import * as path from 'path';
|
|
2
|
+
import * as fs from 'fs-extra';
|
|
3
|
+
import { Command } from 'commander';
|
|
4
|
+
import { Logger } from '../utils/logger';
|
|
5
|
+
import { ensureDir, fileExists, readJson, writeJson } from '../utils/file';
|
|
6
|
+
import { DispatchPlan, SpecSummary, Task, TaskPlan } from '../types';
|
|
7
|
+
|
|
8
|
+
interface HandoffOptions {
|
|
9
|
+
workspace: string;
|
|
10
|
+
output: string;
|
|
11
|
+
target: string;
|
|
12
|
+
includeSummaries?: boolean;
|
|
13
|
+
dryRun?: boolean;
|
|
14
|
+
}
|
|
15
|
+
|
|
16
|
+
interface HandoffTask {
|
|
17
|
+
id: string;
|
|
18
|
+
name: string;
|
|
19
|
+
type: Task['type'];
|
|
20
|
+
priority: Task['priority'];
|
|
21
|
+
dependencies: string[];
|
|
22
|
+
assignedAgent: string;
|
|
23
|
+
assignedType: string;
|
|
24
|
+
estimatedHours?: number;
|
|
25
|
+
sourceChunks?: number[];
|
|
26
|
+
}
|
|
27
|
+
|
|
28
|
+
export async function handoffCommand(options: HandoffOptions, command: Command): Promise<void> {
|
|
29
|
+
const logger = new Logger();
|
|
30
|
+
|
|
31
|
+
try {
|
|
32
|
+
const workspacePath = path.resolve(options.workspace);
|
|
33
|
+
const dispatchPath = path.join(workspacePath, 'dispatch_plan.json');
|
|
34
|
+
const taskPlanPath = path.join(workspacePath, 'task_plan.json');
|
|
35
|
+
const specPath = path.join(workspacePath, 'spec_summary.json');
|
|
36
|
+
|
|
37
|
+
if (!(await fileExists(dispatchPath))) {
|
|
38
|
+
logger.error(`dispatch_plan.json not found in workspace: ${workspacePath}`);
|
|
39
|
+
logger.info('Run spec-agent dispatch first.');
|
|
40
|
+
process.exit(1);
|
|
41
|
+
}
|
|
42
|
+
if (!(await fileExists(taskPlanPath))) {
|
|
43
|
+
logger.error(`task_plan.json not found in workspace: ${workspacePath}`);
|
|
44
|
+
logger.info('Run spec-agent plan first.');
|
|
45
|
+
process.exit(1);
|
|
46
|
+
}
|
|
47
|
+
|
|
48
|
+
const outputPath = path.isAbsolute(options.output)
|
|
49
|
+
? options.output
|
|
50
|
+
: path.join(workspacePath, options.output || 'handoff');
|
|
51
|
+
|
|
52
|
+
const dispatch = await readJson<DispatchPlan>(dispatchPath);
|
|
53
|
+
const taskPlan = await readJson<TaskPlan>(taskPlanPath);
|
|
54
|
+
const spec = (await fileExists(specPath)) ? await readJson<SpecSummary>(specPath) : null;
|
|
55
|
+
|
|
56
|
+
const taskById = new Map<string, Task>();
|
|
57
|
+
for (const group of taskPlan.parallelGroups) {
|
|
58
|
+
for (const task of group.tasks) {
|
|
59
|
+
taskById.set(task.id, task);
|
|
60
|
+
}
|
|
61
|
+
}
|
|
62
|
+
|
|
63
|
+
const sourceChunkMap = buildSourceChunkMap(spec);
|
|
64
|
+
const handoffTasks: HandoffTask[] = [];
|
|
65
|
+
const agentTaskMap = new Map<string, HandoffTask[]>();
|
|
66
|
+
|
|
67
|
+
for (const [agentType, agents] of Object.entries(dispatch.agentPools)) {
|
|
68
|
+
for (const agent of agents) {
|
|
69
|
+
const rows: HandoffTask[] = [];
|
|
70
|
+
for (const taskId of agent.assignedTasks) {
|
|
71
|
+
const task = taskById.get(taskId);
|
|
72
|
+
if (!task) continue;
|
|
73
|
+
const mapped: HandoffTask = {
|
|
74
|
+
id: task.id,
|
|
75
|
+
name: task.name,
|
|
76
|
+
type: task.type,
|
|
77
|
+
priority: task.priority,
|
|
78
|
+
dependencies: task.dependencies || [],
|
|
79
|
+
assignedAgent: agent.agentId,
|
|
80
|
+
assignedType: agentType,
|
|
81
|
+
estimatedHours: task.estimatedHours,
|
|
82
|
+
sourceChunks: guessSourceChunks(task, sourceChunkMap),
|
|
83
|
+
};
|
|
84
|
+
rows.push(mapped);
|
|
85
|
+
handoffTasks.push(mapped);
|
|
86
|
+
}
|
|
87
|
+
agentTaskMap.set(agent.agentId, rows);
|
|
88
|
+
}
|
|
89
|
+
}
|
|
90
|
+
|
|
91
|
+
if (options.dryRun) {
|
|
92
|
+
logger.info('Dry run mode - handoff plan:');
|
|
93
|
+
logger.info(` workspace: ${workspacePath}`);
|
|
94
|
+
logger.info(` target: ${normalizeTarget(options.target)}`);
|
|
95
|
+
logger.info(` agents: ${agentTaskMap.size}`);
|
|
96
|
+
logger.info(` tasks: ${handoffTasks.length}`);
|
|
97
|
+
logger.info(` output: ${outputPath}`);
|
|
98
|
+
return;
|
|
99
|
+
}
|
|
100
|
+
|
|
101
|
+
await ensureDir(outputPath);
|
|
102
|
+
const tasksDir = path.join(outputPath, 'tasks');
|
|
103
|
+
const agentsDir = path.join(outputPath, 'agents');
|
|
104
|
+
await ensureDir(tasksDir);
|
|
105
|
+
await ensureDir(agentsDir);
|
|
106
|
+
|
|
107
|
+
// Per-task handoff prompts.
|
|
108
|
+
for (const row of handoffTasks) {
|
|
109
|
+
const markdown = renderTaskPrompt({
|
|
110
|
+
row,
|
|
111
|
+
workspacePath,
|
|
112
|
+
target: normalizeTarget(options.target),
|
|
113
|
+
includeSummaries: Boolean(options.includeSummaries),
|
|
114
|
+
});
|
|
115
|
+
await fs.writeFile(path.join(tasksDir, `${row.id}.md`), markdown, 'utf-8');
|
|
116
|
+
}
|
|
117
|
+
|
|
118
|
+
// Per-agent task list.
|
|
119
|
+
for (const [agentId, rows] of agentTaskMap) {
|
|
120
|
+
const markdown = renderAgentPrompt({
|
|
121
|
+
agentId,
|
|
122
|
+
rows,
|
|
123
|
+
target: normalizeTarget(options.target),
|
|
124
|
+
});
|
|
125
|
+
await fs.writeFile(path.join(agentsDir, `${agentId}.md`), markdown, 'utf-8');
|
|
126
|
+
}
|
|
127
|
+
|
|
128
|
+
const bundlePath = path.join(outputPath, 'handoff_bundle.json');
|
|
129
|
+
await writeJson(bundlePath, {
|
|
130
|
+
version: '2.0.0-beta',
|
|
131
|
+
createdAt: new Date().toISOString(),
|
|
132
|
+
workspace: workspacePath,
|
|
133
|
+
target: normalizeTarget(options.target),
|
|
134
|
+
artifacts: {
|
|
135
|
+
specSummary: spec ? 'spec_summary.json' : null,
|
|
136
|
+
taskPlan: 'task_plan.json',
|
|
137
|
+
dispatchPlan: 'dispatch_plan.json',
|
|
138
|
+
summariesDir: options.includeSummaries ? 'summaries/' : null,
|
|
139
|
+
},
|
|
140
|
+
totals: {
|
|
141
|
+
agents: agentTaskMap.size,
|
|
142
|
+
tasks: handoffTasks.length,
|
|
143
|
+
},
|
|
144
|
+
tasks: handoffTasks,
|
|
145
|
+
unassigned: dispatch.unassigned,
|
|
146
|
+
});
|
|
147
|
+
|
|
148
|
+
const indexPath = path.join(outputPath, 'index.md');
|
|
149
|
+
await fs.writeFile(
|
|
150
|
+
indexPath,
|
|
151
|
+
renderIndexMarkdown({
|
|
152
|
+
target: normalizeTarget(options.target),
|
|
153
|
+
totalAgents: agentTaskMap.size,
|
|
154
|
+
totalTasks: handoffTasks.length,
|
|
155
|
+
includeSummaries: Boolean(options.includeSummaries),
|
|
156
|
+
}),
|
|
157
|
+
'utf-8'
|
|
158
|
+
);
|
|
159
|
+
|
|
160
|
+
logger.success('Handoff bundle generated');
|
|
161
|
+
logger.json({
|
|
162
|
+
status: 'success',
|
|
163
|
+
workspace: workspacePath,
|
|
164
|
+
target: normalizeTarget(options.target),
|
|
165
|
+
totalAgents: agentTaskMap.size,
|
|
166
|
+
totalTasks: handoffTasks.length,
|
|
167
|
+
outputPath,
|
|
168
|
+
bundlePath,
|
|
169
|
+
indexPath,
|
|
170
|
+
});
|
|
171
|
+
} catch (error) {
|
|
172
|
+
logger.error(`Handoff failed: ${error instanceof Error ? error.message : String(error)}`);
|
|
173
|
+
process.exit(1);
|
|
174
|
+
}
|
|
175
|
+
}
|
|
176
|
+
|
|
177
|
+
function normalizeTarget(target: string | undefined): string {
|
|
178
|
+
const value = (target || 'generic').toLowerCase();
|
|
179
|
+
if (['cursor', 'qcoder', 'codebuddy', 'generic'].includes(value)) {
|
|
180
|
+
return value;
|
|
181
|
+
}
|
|
182
|
+
return 'generic';
|
|
183
|
+
}
|
|
184
|
+
|
|
185
|
+
function buildSourceChunkMap(spec: SpecSummary | null): {
|
|
186
|
+
features: Map<string, number[]>;
|
|
187
|
+
pages: Map<string, number[]>;
|
|
188
|
+
apis: Map<string, number[]>;
|
|
189
|
+
} {
|
|
190
|
+
const features = new Map<string, number[]>();
|
|
191
|
+
const pages = new Map<string, number[]>();
|
|
192
|
+
const apis = new Map<string, number[]>();
|
|
193
|
+
if (!spec) return { features, pages, apis };
|
|
194
|
+
|
|
195
|
+
for (const item of spec.features || []) {
|
|
196
|
+
const key = normalizeName(item.name);
|
|
197
|
+
features.set(key, pushUnique(features.get(key), item.sourceChunk));
|
|
198
|
+
}
|
|
199
|
+
for (const item of spec.pages || []) {
|
|
200
|
+
const key = normalizeName(item.name);
|
|
201
|
+
pages.set(key, pushUnique(pages.get(key), item.sourceChunk));
|
|
202
|
+
}
|
|
203
|
+
for (const item of spec.apis || []) {
|
|
204
|
+
const key = normalizeName(`${item.method} ${item.path}`);
|
|
205
|
+
apis.set(key, pushUnique(apis.get(key), item.sourceChunk));
|
|
206
|
+
}
|
|
207
|
+
|
|
208
|
+
return { features, pages, apis };
|
|
209
|
+
}
|
|
210
|
+
|
|
211
|
+
function guessSourceChunks(
|
|
212
|
+
task: Task,
|
|
213
|
+
sourceMap: { features: Map<string, number[]>; pages: Map<string, number[]>; apis: Map<string, number[]> }
|
|
214
|
+
): number[] | undefined {
|
|
215
|
+
const key = normalizeName(task.name);
|
|
216
|
+
if (task.type === 'feature' || task.type === 'component') {
|
|
217
|
+
return sourceMap.features.get(key);
|
|
218
|
+
}
|
|
219
|
+
if (task.type === 'page') {
|
|
220
|
+
return sourceMap.pages.get(key);
|
|
221
|
+
}
|
|
222
|
+
if (task.type === 'api') {
|
|
223
|
+
return sourceMap.apis.get(key);
|
|
224
|
+
}
|
|
225
|
+
return undefined;
|
|
226
|
+
}
|
|
227
|
+
|
|
228
|
+
function renderTaskPrompt(input: {
|
|
229
|
+
row: HandoffTask;
|
|
230
|
+
workspacePath: string;
|
|
231
|
+
target: string;
|
|
232
|
+
includeSummaries: boolean;
|
|
233
|
+
}): string {
|
|
234
|
+
const { row, workspacePath, target, includeSummaries } = input;
|
|
235
|
+
const sourceChunkText = row.sourceChunks && row.sourceChunks.length > 0
|
|
236
|
+
? row.sourceChunks.map(i => `chunk_${i}_summary.json`).join(', ')
|
|
237
|
+
: '未定位到明确 source chunk,请从 spec_summary.json 补充判断';
|
|
238
|
+
|
|
239
|
+
return [
|
|
240
|
+
`# Task Handoff - ${row.id} ${row.name}`,
|
|
241
|
+
'',
|
|
242
|
+
`- Target Tool: ${target}`,
|
|
243
|
+
`- Assigned Agent: ${row.assignedAgent} (${row.assignedType})`,
|
|
244
|
+
`- Priority: ${row.priority}`,
|
|
245
|
+
`- Type: ${row.type}`,
|
|
246
|
+
`- Estimated Hours: ${row.estimatedHours ?? 'N/A'}`,
|
|
247
|
+
'',
|
|
248
|
+
'## Objective',
|
|
249
|
+
'',
|
|
250
|
+
`实现任务 **${row.id} ${row.name}**,仅处理当前任务与已满足依赖,不越界扩展。`,
|
|
251
|
+
'',
|
|
252
|
+
'## Dependencies',
|
|
253
|
+
'',
|
|
254
|
+
row.dependencies.length > 0 ? row.dependencies.map(dep => `- ${dep}`).join('\n') : '- 无',
|
|
255
|
+
'',
|
|
256
|
+
'## Required Inputs',
|
|
257
|
+
'',
|
|
258
|
+
'- `spec_summary.json`',
|
|
259
|
+
'- `task_plan.json`',
|
|
260
|
+
'- `dispatch_plan.json`',
|
|
261
|
+
includeSummaries ? '- `summaries/`' : '- (可选)`summaries/`',
|
|
262
|
+
'',
|
|
263
|
+
'## Suggested Evidence',
|
|
264
|
+
'',
|
|
265
|
+
`- ${sourceChunkText}`,
|
|
266
|
+
'',
|
|
267
|
+
'## Execution Constraints',
|
|
268
|
+
'',
|
|
269
|
+
'- 只改与该任务直接相关文件;避免大范围重构',
|
|
270
|
+
'- 明确列出修改文件与验证步骤',
|
|
271
|
+
'- 信息不足时先输出缺失项,再请求补充,不要臆造接口',
|
|
272
|
+
'',
|
|
273
|
+
'## Deliverables',
|
|
274
|
+
'',
|
|
275
|
+
'- 变更文件列表',
|
|
276
|
+
'- 关键实现说明(why + what)',
|
|
277
|
+
'- 可复现实测步骤(命令)',
|
|
278
|
+
'',
|
|
279
|
+
'## Workspace',
|
|
280
|
+
'',
|
|
281
|
+
`- Root: ${workspacePath}`,
|
|
282
|
+
].join('\n');
|
|
283
|
+
}
|
|
284
|
+
|
|
285
|
+
function renderAgentPrompt(input: { agentId: string; rows: HandoffTask[]; target: string }): string {
|
|
286
|
+
const { agentId, rows, target } = input;
|
|
287
|
+
const lines = rows.map(
|
|
288
|
+
row => `- ${row.id} | ${row.priority} | ${row.type} | deps: ${row.dependencies.join(', ') || '-'} | ${row.name}`
|
|
289
|
+
);
|
|
290
|
+
return [
|
|
291
|
+
`# Agent Handoff - ${agentId}`,
|
|
292
|
+
'',
|
|
293
|
+
`- Target Tool: ${target}`,
|
|
294
|
+
`- Total Tasks: ${rows.length}`,
|
|
295
|
+
'',
|
|
296
|
+
'## Task Queue',
|
|
297
|
+
'',
|
|
298
|
+
...(lines.length > 0 ? lines : ['- 无任务']),
|
|
299
|
+
'',
|
|
300
|
+
'## Recommended Order',
|
|
301
|
+
'',
|
|
302
|
+
'1. 先完成依赖少/无依赖任务',
|
|
303
|
+
'2. 优先处理 P0 -> P1 -> P2 -> P3',
|
|
304
|
+
'3. 每完成一项,回填任务状态与变更摘要',
|
|
305
|
+
].join('\n');
|
|
306
|
+
}
|
|
307
|
+
|
|
308
|
+
function renderIndexMarkdown(input: {
|
|
309
|
+
target: string;
|
|
310
|
+
totalAgents: number;
|
|
311
|
+
totalTasks: number;
|
|
312
|
+
includeSummaries: boolean;
|
|
313
|
+
}): string {
|
|
314
|
+
const { target, totalAgents, totalTasks, includeSummaries } = input;
|
|
315
|
+
return [
|
|
316
|
+
'# Handoff Bundle Index',
|
|
317
|
+
'',
|
|
318
|
+
`- Target: ${target}`,
|
|
319
|
+
`- Total Agents: ${totalAgents}`,
|
|
320
|
+
`- Total Tasks: ${totalTasks}`,
|
|
321
|
+
`- Include Summaries: ${includeSummaries ? 'yes' : 'no'}`,
|
|
322
|
+
'',
|
|
323
|
+
'## Files',
|
|
324
|
+
'',
|
|
325
|
+
'- `handoff_bundle.json`: 机器可读任务总览',
|
|
326
|
+
'- `agents/*.md`: 每个 agent 的任务队列',
|
|
327
|
+
'- `tasks/*.md`: 每个任务可直接投喂编码 Agent 的提示词模板',
|
|
328
|
+
'',
|
|
329
|
+
'## Quick Start',
|
|
330
|
+
'',
|
|
331
|
+
'1. 先打开 `agents/<AGENT_ID>.md` 确认任务顺序',
|
|
332
|
+
'2. 按顺序将 `tasks/<TASK_ID>.md` 投喂到目标工具执行',
|
|
333
|
+
'3. 每完成一个任务后人工回填状态到你的任务系统',
|
|
334
|
+
].join('\n');
|
|
335
|
+
}
|
|
336
|
+
|
|
337
|
+
function normalizeName(input: string): string {
|
|
338
|
+
return (input || '')
|
|
339
|
+
.toLowerCase()
|
|
340
|
+
.replace(/[^\u4e00-\u9fa5a-z0-9\s/:_-]/gi, ' ')
|
|
341
|
+
.replace(/\s+/g, ' ')
|
|
342
|
+
.trim();
|
|
343
|
+
}
|
|
344
|
+
|
|
345
|
+
function pushUnique(existing: number[] | undefined, value: number): number[] {
|
|
346
|
+
const base = existing ? existing.slice() : [];
|
|
347
|
+
if (!base.includes(value)) {
|
|
348
|
+
base.push(value);
|
|
349
|
+
}
|
|
350
|
+
return base;
|
|
351
|
+
}
|
package/src/commands/learn.ts
CHANGED
|
@@ -21,6 +21,11 @@ interface LearnOptions {
|
|
|
21
21
|
}
|
|
22
22
|
|
|
23
23
|
const PATTERNS_FILE = '.patterns.json';
|
|
24
|
+
type LearnPhase = 'summaries' | 'plan' | 'dispatch';
|
|
25
|
+
|
|
26
|
+
function isLearnPhase(phase?: string): phase is LearnPhase {
|
|
27
|
+
return phase === 'summaries' || phase === 'plan' || phase === 'dispatch';
|
|
28
|
+
}
|
|
24
29
|
|
|
25
30
|
export async function learnCommand(options: LearnOptions, command: Command): Promise<void> {
|
|
26
31
|
const logger = new Logger();
|
|
@@ -86,14 +91,17 @@ export async function learnCommand(options: LearnOptions, command: Command): Pro
|
|
|
86
91
|
|
|
87
92
|
// Handle automatic learning from phase results
|
|
88
93
|
if (options.from) {
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
94
|
+
if (!isLearnPhase(options.from)) {
|
|
95
|
+
logger.error(`Unknown phase: ${options.from}`);
|
|
96
|
+
return;
|
|
97
|
+
}
|
|
98
|
+
const result = await learnFromWorkspacePhase(workspacePath, options.from, logger);
|
|
99
|
+
if (result.learnedCount > 0) {
|
|
100
|
+
logger.success(`Learned ${result.learnedCount} new patterns from ${options.from}`);
|
|
94
101
|
} else {
|
|
95
102
|
logger.info('No new patterns learned.');
|
|
96
103
|
}
|
|
104
|
+
logger.info(`Patterns file: ${result.patternsPath}`);
|
|
97
105
|
return;
|
|
98
106
|
}
|
|
99
107
|
|
|
@@ -126,9 +134,36 @@ export async function learnCommand(options: LearnOptions, command: Command): Pro
|
|
|
126
134
|
}
|
|
127
135
|
}
|
|
128
136
|
|
|
137
|
+
export async function learnFromWorkspacePhase(
|
|
138
|
+
workspacePath: string,
|
|
139
|
+
phase: LearnPhase,
|
|
140
|
+
logger: Logger
|
|
141
|
+
): Promise<{ learnedCount: number; totalPatterns: number; patternsPath: string }> {
|
|
142
|
+
const patternsPath = path.join(workspacePath, PATTERNS_FILE);
|
|
143
|
+
let patterns: Patterns = {
|
|
144
|
+
version: '1.0.0',
|
|
145
|
+
workspace: workspacePath,
|
|
146
|
+
patterns: [],
|
|
147
|
+
};
|
|
148
|
+
|
|
149
|
+
if (await fileExists(patternsPath)) {
|
|
150
|
+
patterns = await readJson<Patterns>(patternsPath);
|
|
151
|
+
}
|
|
152
|
+
|
|
153
|
+
const learnedCount = await learnFromPhase(workspacePath, phase, patterns, logger);
|
|
154
|
+
// Always persist pattern file after learn attempt to make workspace state explicit.
|
|
155
|
+
await writeJson(patternsPath, patterns);
|
|
156
|
+
|
|
157
|
+
return {
|
|
158
|
+
learnedCount,
|
|
159
|
+
totalPatterns: patterns.patterns.length,
|
|
160
|
+
patternsPath,
|
|
161
|
+
};
|
|
162
|
+
}
|
|
163
|
+
|
|
129
164
|
async function learnFromPhase(
|
|
130
165
|
workspacePath: string,
|
|
131
|
-
phase:
|
|
166
|
+
phase: LearnPhase,
|
|
132
167
|
patterns: Patterns,
|
|
133
168
|
logger: Logger
|
|
134
169
|
): Promise<number> {
|
package/src/commands/pipeline.ts
CHANGED
|
@@ -12,6 +12,7 @@ import { analyzeCommand } from './analyze';
|
|
|
12
12
|
import { mergeCommand } from './merge';
|
|
13
13
|
import { planCommand } from './plan';
|
|
14
14
|
import { dispatchCommand } from './dispatch';
|
|
15
|
+
import { learnFromWorkspacePhase } from './learn';
|
|
15
16
|
|
|
16
17
|
interface PipelineOptions {
|
|
17
18
|
input?: string;
|
|
@@ -23,6 +24,7 @@ interface PipelineOptions {
|
|
|
23
24
|
analyzeBudgetTokens: string;
|
|
24
25
|
framework: string;
|
|
25
26
|
strictLlm?: boolean;
|
|
27
|
+
learn?: boolean;
|
|
26
28
|
stopAt?: string;
|
|
27
29
|
from?: string;
|
|
28
30
|
dryRun?: boolean;
|
|
@@ -206,6 +208,12 @@ async function executePhase(
|
|
|
206
208
|
dryRun: false,
|
|
207
209
|
yes: options.yes,
|
|
208
210
|
}, {} as Command);
|
|
211
|
+
if (options.learn !== false) {
|
|
212
|
+
const learnResult = await learnFromWorkspacePhase(workspacePath, 'summaries', logger);
|
|
213
|
+
logger.info(
|
|
214
|
+
` Learn complete: +${learnResult.learnedCount}, total ${learnResult.totalPatterns} (${learnResult.patternsPath})`
|
|
215
|
+
);
|
|
216
|
+
}
|
|
209
217
|
break;
|
|
210
218
|
|
|
211
219
|
case 'merge':
|
package/src/commands/scan.ts
CHANGED
|
@@ -79,9 +79,7 @@ export async function scanCommand(options: ScanOptions, command: Command): Promi
|
|
|
79
79
|
process.exit(SCAN_EXIT_CODE.INPUT_ERROR);
|
|
80
80
|
}
|
|
81
81
|
|
|
82
|
-
const stats = await
|
|
83
|
-
require('fs').promises.stat(inputPath)
|
|
84
|
-
).catch(() => null);
|
|
82
|
+
const stats = await require('fs').promises.stat(inputPath).catch(() => null);
|
|
85
83
|
|
|
86
84
|
if (!stats) {
|
|
87
85
|
logger.error(`[E_SCAN_INPUT] Cannot access path: ${options.input}`);
|
|
@@ -217,6 +215,23 @@ export async function scanCommand(options: ScanOptions, command: Command): Promi
|
|
|
217
215
|
logger.info(`图片语义统计: 检测 ${imageAssetsDetected} 张, 已摘要 ${imageAssetsDescribed} 张`);
|
|
218
216
|
}
|
|
219
217
|
|
|
218
|
+
// Preview mode
|
|
219
|
+
if (options.dryRun) {
|
|
220
|
+
logger.info('Dry run mode - manifest preview:');
|
|
221
|
+
for (let i = 0; i < rawChunks.slice(0, 20).length; i++) {
|
|
222
|
+
const rawChunk = rawChunks[i];
|
|
223
|
+
const size = Buffer.byteLength(rawChunk.content, 'utf-8');
|
|
224
|
+
const preview = rawChunk.sourceFiles.length === 1
|
|
225
|
+
? `${path.basename(rawChunk.sourceFiles[0])} (${formatSize(size)})`
|
|
226
|
+
: `${rawChunk.sourceFiles.length} files, ${formatSize(size)}`;
|
|
227
|
+
logger.info(` Chunk ${i}: ${preview}`);
|
|
228
|
+
}
|
|
229
|
+
if (rawChunks.length > 20) {
|
|
230
|
+
logger.info(` ... and ${rawChunks.length - 20} more chunks`);
|
|
231
|
+
}
|
|
232
|
+
return;
|
|
233
|
+
}
|
|
234
|
+
|
|
220
235
|
// Prepare chunks directory
|
|
221
236
|
const outputDir = path.dirname(path.resolve(options.output));
|
|
222
237
|
const chunksDir = path.join(outputDir, 'chunks');
|
|
@@ -239,21 +254,6 @@ export async function scanCommand(options: ScanOptions, command: Command): Promi
|
|
|
239
254
|
});
|
|
240
255
|
}
|
|
241
256
|
|
|
242
|
-
// Preview mode
|
|
243
|
-
if (options.dryRun) {
|
|
244
|
-
logger.info('Dry run mode - manifest preview:');
|
|
245
|
-
for (const chunk of chunks.slice(0, 20)) {
|
|
246
|
-
const preview = chunk.content
|
|
247
|
-
? `${path.basename(chunk.sourceFiles[0])} (${formatSize(chunk.size)})`
|
|
248
|
-
: `${chunk.sourceFiles.length} files, ${formatSize(chunk.size)}`;
|
|
249
|
-
logger.info(` Chunk ${chunk.id}: ${preview}`);
|
|
250
|
-
}
|
|
251
|
-
if (chunks.length > 20) {
|
|
252
|
-
logger.info(` ... and ${chunks.length - 20} more chunks`);
|
|
253
|
-
}
|
|
254
|
-
return;
|
|
255
|
-
}
|
|
256
|
-
|
|
257
257
|
// Create manifest
|
|
258
258
|
const manifest: Manifest = {
|
|
259
259
|
version: '1.0.0',
|
package/src/index.ts
CHANGED
|
@@ -1,5 +1,7 @@
|
|
|
1
1
|
import { Command } from 'commander';
|
|
2
2
|
import chalk from 'chalk';
|
|
3
|
+
import * as fs from 'fs';
|
|
4
|
+
import * as path from 'path';
|
|
3
5
|
import { scanCommand } from './commands/scan';
|
|
4
6
|
import { analyzeCommand } from './commands/analyze';
|
|
5
7
|
import { mergeCommand } from './commands/merge';
|
|
@@ -10,13 +12,25 @@ import { pipelineCommand } from './commands/pipeline';
|
|
|
10
12
|
import { statusCommand } from './commands/status';
|
|
11
13
|
import { cleanCommand } from './commands/clean';
|
|
12
14
|
import { doctorCommand } from './commands/doctor';
|
|
15
|
+
import { handoffCommand } from './commands/handoff';
|
|
16
|
+
import { executeCommand } from './commands/execute';
|
|
13
17
|
|
|
14
18
|
const program = new Command();
|
|
19
|
+
const pkgVersion = (() => {
|
|
20
|
+
try {
|
|
21
|
+
const packageJsonPath = path.resolve(__dirname, '..', 'package.json');
|
|
22
|
+
const raw = fs.readFileSync(packageJsonPath, 'utf-8');
|
|
23
|
+
const parsed = JSON.parse(raw) as { version?: string };
|
|
24
|
+
return parsed.version || '1.0.0';
|
|
25
|
+
} catch {
|
|
26
|
+
return '1.0.0';
|
|
27
|
+
}
|
|
28
|
+
})();
|
|
15
29
|
|
|
16
30
|
program
|
|
17
31
|
.name('spec-agent')
|
|
18
|
-
.description('
|
|
19
|
-
.version(
|
|
32
|
+
.description('CLI for requirement decomposition and agent-ready planning')
|
|
33
|
+
.version(pkgVersion);
|
|
20
34
|
|
|
21
35
|
program
|
|
22
36
|
.command('scan')
|
|
@@ -105,6 +119,7 @@ program
|
|
|
105
119
|
.option('--analyze-budget-tokens <count>', 'Max total tokens for analyze in pipeline (0 = unlimited)', '0')
|
|
106
120
|
.option('--strict-llm', 'Fail if LLM chunking fails (no fallback)')
|
|
107
121
|
.option('--framework <fw>', 'Target framework', 'vue3')
|
|
122
|
+
.option('--no-learn', 'Disable automatic learning after analyze phase')
|
|
108
123
|
.option('--stop-at <phase>', 'Stop after phase: scan, analyze, merge, plan, dispatch')
|
|
109
124
|
.option('--from <phase>', 'Resume from phase: scan, analyze, merge, plan, dispatch')
|
|
110
125
|
.option('--dry-run', 'Preview full pipeline without executing')
|
|
@@ -134,4 +149,28 @@ program
|
|
|
134
149
|
.option('--format <format>', 'Output format: text, json', 'text')
|
|
135
150
|
.action(doctorCommand);
|
|
136
151
|
|
|
152
|
+
program
|
|
153
|
+
.command('handoff')
|
|
154
|
+
.description('Generate agent handoff bundles for coding tools')
|
|
155
|
+
.option('-w, --workspace <dir>', 'Workspace directory', '.')
|
|
156
|
+
.option('-o, --output <dir>', 'Output directory for handoff bundle', 'handoff')
|
|
157
|
+
.option('-t, --target <name>', 'Target tool: cursor, qcoder, codebuddy, generic', 'generic')
|
|
158
|
+
.option('--include-summaries', 'Include summary directory as required evidence')
|
|
159
|
+
.option('--dry-run', 'Preview handoff generation plan')
|
|
160
|
+
.action(handoffCommand);
|
|
161
|
+
|
|
162
|
+
program
|
|
163
|
+
.command('execute')
|
|
164
|
+
.description('Run v2 beta execution state machine from handoff bundle')
|
|
165
|
+
.option('-w, --workspace <dir>', 'Workspace directory', '.')
|
|
166
|
+
.option('-b, --bundle <path>', 'Path to handoff_bundle.json (optional)')
|
|
167
|
+
.option('-p, --max-parallel <count>', 'Max parallel running tasks', '4')
|
|
168
|
+
.option('-r, --retry <count>', 'Retry times per task after failure', '1')
|
|
169
|
+
.option('--complete <ids>', 'Mark task IDs as completed, comma-separated')
|
|
170
|
+
.option('--fail <ids>', 'Mark task IDs as failed, comma-separated')
|
|
171
|
+
.option('--error <message>', 'Failure reason when using --fail')
|
|
172
|
+
.option('--reset', 'Reset existing run state and reinitialize from bundle')
|
|
173
|
+
.option('--dry-run', 'Preview execution transitions without writing state')
|
|
174
|
+
.action(executeCommand);
|
|
175
|
+
|
|
137
176
|
program.parse();
|
package/src/services/llm.ts
CHANGED
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
import { Logger } from '../utils/logger';
|
|
2
|
-
import { ChunkSummary, Feature, DataModel, Page, Api } from '../types';
|
|
2
|
+
import { ChunkSummary, Feature, DataModel, Page, Api, LearnedPattern } from '../types';
|
|
3
3
|
|
|
4
4
|
export interface LLMConfig {
|
|
5
5
|
apiKey: string;
|
|
@@ -490,13 +490,21 @@ export async function analyzeChunkWithLLM(
|
|
|
490
490
|
chunkId: number,
|
|
491
491
|
focus: string,
|
|
492
492
|
config: LLMConfig,
|
|
493
|
+
learnedPatterns: LearnedPattern[] = [],
|
|
493
494
|
logger?: Logger
|
|
494
495
|
): Promise<ChunkSummary> {
|
|
495
496
|
validateLLMConfig(config);
|
|
496
497
|
|
|
498
|
+
const learnedContext = learnedPatterns.length > 0
|
|
499
|
+
? `\n\nLearned Patterns (reference only, do not overfit):\n${learnedPatterns
|
|
500
|
+
.slice(0, 20)
|
|
501
|
+
.map((p, i) => `${i + 1}. ${p.name}: ${p.rule} (confidence=${p.confidence.toFixed(2)})`)
|
|
502
|
+
.join('\n')}`
|
|
503
|
+
: '';
|
|
504
|
+
|
|
497
505
|
const prompt = ANALYSIS_PROMPT_TEMPLATE
|
|
498
506
|
.replace('{focus}', focus)
|
|
499
|
-
.replace('{content}', chunkContent);
|
|
507
|
+
.replace('{content}', chunkContent) + learnedContext;
|
|
500
508
|
|
|
501
509
|
const response = await callLLM(prompt, config, logger);
|
|
502
510
|
|