spec-agent 1.0.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +256 -0
- package/bin/spec-agent.js +14 -0
- package/dist/commands/analyze.d.ts +16 -0
- package/dist/commands/analyze.d.ts.map +1 -0
- package/dist/commands/analyze.js +283 -0
- package/dist/commands/analyze.js.map +1 -0
- package/dist/commands/clean.d.ts +9 -0
- package/dist/commands/clean.d.ts.map +1 -0
- package/dist/commands/clean.js +109 -0
- package/dist/commands/clean.js.map +1 -0
- package/dist/commands/dispatch.d.ts +12 -0
- package/dist/commands/dispatch.d.ts.map +1 -0
- package/dist/commands/dispatch.js +232 -0
- package/dist/commands/dispatch.js.map +1 -0
- package/dist/commands/doctor.d.ts +9 -0
- package/dist/commands/doctor.d.ts.map +1 -0
- package/dist/commands/doctor.js +153 -0
- package/dist/commands/doctor.js.map +1 -0
- package/dist/commands/learn.d.ts +13 -0
- package/dist/commands/learn.d.ts.map +1 -0
- package/dist/commands/learn.js +234 -0
- package/dist/commands/learn.js.map +1 -0
- package/dist/commands/merge.d.ts +11 -0
- package/dist/commands/merge.d.ts.map +1 -0
- package/dist/commands/merge.js +335 -0
- package/dist/commands/merge.js.map +1 -0
- package/dist/commands/pipeline.d.ts +19 -0
- package/dist/commands/pipeline.d.ts.map +1 -0
- package/dist/commands/pipeline.js +266 -0
- package/dist/commands/pipeline.js.map +1 -0
- package/dist/commands/plan.d.ts +13 -0
- package/dist/commands/plan.d.ts.map +1 -0
- package/dist/commands/plan.js +314 -0
- package/dist/commands/plan.js.map +1 -0
- package/dist/commands/scan.d.ts +28 -0
- package/dist/commands/scan.d.ts.map +1 -0
- package/dist/commands/scan.js +488 -0
- package/dist/commands/scan.js.map +1 -0
- package/dist/commands/status.d.ts +8 -0
- package/dist/commands/status.d.ts.map +1 -0
- package/dist/commands/status.js +146 -0
- package/dist/commands/status.js.map +1 -0
- package/dist/index.d.ts +2 -0
- package/dist/index.d.ts.map +1 -0
- package/dist/index.js +126 -0
- package/dist/index.js.map +1 -0
- package/dist/services/document-parser.d.ts +49 -0
- package/dist/services/document-parser.d.ts.map +1 -0
- package/dist/services/document-parser.js +499 -0
- package/dist/services/document-parser.js.map +1 -0
- package/dist/services/llm.d.ts +61 -0
- package/dist/services/llm.d.ts.map +1 -0
- package/dist/services/llm.js +716 -0
- package/dist/services/llm.js.map +1 -0
- package/dist/types.d.ts +159 -0
- package/dist/types.d.ts.map +1 -0
- package/dist/types.js +4 -0
- package/dist/types.js.map +1 -0
- package/dist/utils/file.d.ts +10 -0
- package/dist/utils/file.d.ts.map +1 -0
- package/dist/utils/file.js +96 -0
- package/dist/utils/file.js.map +1 -0
- package/dist/utils/logger.d.ts +13 -0
- package/dist/utils/logger.d.ts.map +1 -0
- package/dist/utils/logger.js +55 -0
- package/dist/utils/logger.js.map +1 -0
- package/package.json +48 -0
- package/scripts/publish-npm.js +174 -0
- package/spec-agent-implementation.md +750 -0
- package/src/commands/analyze.ts +322 -0
- package/src/commands/clean.ts +88 -0
- package/src/commands/dispatch.ts +250 -0
- package/src/commands/doctor.ts +136 -0
- package/src/commands/learn.ts +261 -0
- package/src/commands/merge.ts +377 -0
- package/src/commands/pipeline.ts +306 -0
- package/src/commands/plan.ts +331 -0
- package/src/commands/scan.ts +568 -0
- package/src/commands/status.ts +129 -0
- package/src/index.ts +137 -0
- package/src/services/document-parser.ts +548 -0
- package/src/services/llm.ts +857 -0
- package/src/types.ts +161 -0
- package/src/utils/file.ts +60 -0
- package/src/utils/logger.ts +58 -0
- package/tsconfig.json +19 -0
|
@@ -0,0 +1,322 @@
|
|
|
1
|
+
import * as path from 'path';
|
|
2
|
+
import { createHash } from 'crypto';
|
|
3
|
+
import { Command } from 'commander';
|
|
4
|
+
import { Logger } from '../utils/logger';
|
|
5
|
+
import {
|
|
6
|
+
ensureDir,
|
|
7
|
+
fileExists,
|
|
8
|
+
readJson,
|
|
9
|
+
writeJson,
|
|
10
|
+
formatSize,
|
|
11
|
+
readFileContent
|
|
12
|
+
} from '../utils/file';
|
|
13
|
+
import { Manifest, ChunkSummary } from '../types';
|
|
14
|
+
import {
|
|
15
|
+
getLLMConfigForPurpose,
|
|
16
|
+
validateLLMConfig,
|
|
17
|
+
analyzeChunkWithLLM
|
|
18
|
+
} from '../services/llm';
|
|
19
|
+
import { readChunkContent } from '../services/document-parser';
|
|
20
|
+
|
|
21
|
+
interface AnalyzeOptions {
|
|
22
|
+
manifest: string;
|
|
23
|
+
output: string;
|
|
24
|
+
agents: string;
|
|
25
|
+
chunks?: string;
|
|
26
|
+
focus: string;
|
|
27
|
+
retries: string;
|
|
28
|
+
budgetTokens?: string;
|
|
29
|
+
applyLearned?: boolean;
|
|
30
|
+
dryRun?: boolean;
|
|
31
|
+
yes?: boolean;
|
|
32
|
+
}
|
|
33
|
+
|
|
34
|
+
interface ChunkFailure {
|
|
35
|
+
chunkId: number;
|
|
36
|
+
sourceFiles: string[];
|
|
37
|
+
attempts: number;
|
|
38
|
+
error: string;
|
|
39
|
+
}
|
|
40
|
+
|
|
41
|
+
type BatchResult =
|
|
42
|
+
| { ok: true; summary: ChunkSummary }
|
|
43
|
+
| { ok: false; failure: ChunkFailure };
|
|
44
|
+
|
|
45
|
+
const ANALYZE_EXIT_CODE = {
|
|
46
|
+
INPUT_ERROR: 1,
|
|
47
|
+
CONFIG_ERROR: 2,
|
|
48
|
+
PARTIAL_FAILED: 3,
|
|
49
|
+
RUNTIME_ERROR: 10,
|
|
50
|
+
} as const;
|
|
51
|
+
|
|
52
|
+
export async function analyzeCommand(options: AnalyzeOptions, command: Command): Promise<void> {
|
|
53
|
+
const logger = new Logger();
|
|
54
|
+
|
|
55
|
+
try {
|
|
56
|
+
// Check manifest exists
|
|
57
|
+
const manifestPath = path.resolve(options.manifest);
|
|
58
|
+
if (!(await fileExists(manifestPath))) {
|
|
59
|
+
logger.error(`[E_ANALYZE_INPUT] Manifest not found: ${options.manifest}`);
|
|
60
|
+
logger.info('Run spec-agent scan first to create a manifest.');
|
|
61
|
+
process.exit(ANALYZE_EXIT_CODE.INPUT_ERROR);
|
|
62
|
+
}
|
|
63
|
+
|
|
64
|
+
const manifest: Manifest = await readJson(manifestPath);
|
|
65
|
+
|
|
66
|
+
// Determine which chunks to analyze
|
|
67
|
+
let chunkIndices: number[] = [];
|
|
68
|
+
if (options.chunks) {
|
|
69
|
+
chunkIndices = options.chunks.split(',').map(s => parseInt(s.trim(), 10));
|
|
70
|
+
} else {
|
|
71
|
+
chunkIndices = manifest.chunks.map((_, i) => i);
|
|
72
|
+
}
|
|
73
|
+
|
|
74
|
+
// Validate chunk indices
|
|
75
|
+
const invalidIndices = chunkIndices.filter(i => i < 0 || i >= manifest.chunks.length);
|
|
76
|
+
if (invalidIndices.length > 0) {
|
|
77
|
+
logger.error(`[E_ANALYZE_INPUT] Invalid chunk indices: ${invalidIndices.join(', ')}`);
|
|
78
|
+
logger.info(`Valid range: 0-${manifest.chunks.length - 1}`);
|
|
79
|
+
process.exit(ANALYZE_EXIT_CODE.INPUT_ERROR);
|
|
80
|
+
}
|
|
81
|
+
|
|
82
|
+
// Get LLM config
|
|
83
|
+
const llmConfig = getLLMConfigForPurpose('analyze');
|
|
84
|
+
|
|
85
|
+
if (options.dryRun) {
|
|
86
|
+
logger.info('Dry run mode - analysis plan:');
|
|
87
|
+
logger.info(`LLM Config: ${llmConfig.model} @ ${llmConfig.baseUrl}`);
|
|
88
|
+
for (const idx of chunkIndices) {
|
|
89
|
+
const chunk = manifest.chunks[idx];
|
|
90
|
+
logger.info(` Chunk ${idx}: ${chunk.sourceFiles.length} files, ${formatSize(chunk.size)}`);
|
|
91
|
+
}
|
|
92
|
+
return;
|
|
93
|
+
}
|
|
94
|
+
|
|
95
|
+
// Validate LLM config (skip if explicitly bypassed)
|
|
96
|
+
try {
|
|
97
|
+
validateLLMConfig(llmConfig);
|
|
98
|
+
} catch (error) {
|
|
99
|
+
logger.error(`LLM Configuration Error: ${error instanceof Error ? error.message : String(error)}`);
|
|
100
|
+
logger.info('\nTo use analyze, set one of these environment variables:');
|
|
101
|
+
logger.info(' export OPENAI_API_KEY=your_key_here');
|
|
102
|
+
logger.info(' export LLM_API_KEY=your_key_here');
|
|
103
|
+
logger.info('\nOptional configuration:');
|
|
104
|
+
logger.info(' export OPENAI_BASE_URL=https://api.openai.com/v1 # or your custom endpoint');
|
|
105
|
+
logger.info(' export LLM_MODEL=gpt-4o-mini # or gpt-4o, claude-3-sonnet, etc.');
|
|
106
|
+
process.exit(ANALYZE_EXIT_CODE.CONFIG_ERROR);
|
|
107
|
+
}
|
|
108
|
+
|
|
109
|
+
logger.info(`Analyzing ${chunkIndices.length} chunks with focus: ${options.focus}`);
|
|
110
|
+
logger.info(`Using LLM: ${llmConfig.model}`);
|
|
111
|
+
|
|
112
|
+
// Ensure output directory
|
|
113
|
+
const outputDir = path.resolve(options.output);
|
|
114
|
+
await ensureDir(outputDir);
|
|
115
|
+
const cacheDir = path.join(outputDir, '.cache', 'analyze');
|
|
116
|
+
await ensureDir(cacheDir);
|
|
117
|
+
|
|
118
|
+
// Analyze chunks
|
|
119
|
+
const results: ChunkSummary[] = [];
|
|
120
|
+
const startTime = Date.now();
|
|
121
|
+
|
|
122
|
+
logger.info('Starting parallel analysis...');
|
|
123
|
+
|
|
124
|
+
// Process chunks in parallel
|
|
125
|
+
const concurrencyRaw = options.agents === 'auto'
|
|
126
|
+
? Math.min(4, chunkIndices.length)
|
|
127
|
+
: parseInt(options.agents, 10);
|
|
128
|
+
const concurrency = Number.isFinite(concurrencyRaw) && concurrencyRaw > 0
|
|
129
|
+
? concurrencyRaw
|
|
130
|
+
: 1;
|
|
131
|
+
|
|
132
|
+
let totalTokens = 0;
|
|
133
|
+
let cachedChunks = 0;
|
|
134
|
+
let llmInvocations = 0;
|
|
135
|
+
const maxRetries = Math.max(0, parseInt(options.retries || '1', 10) || 0);
|
|
136
|
+
const budgetTokens = Math.max(0, parseInt(options.budgetTokens || process.env.MAX_ANALYZE_TOKENS || '0', 10) || 0);
|
|
137
|
+
const failures: ChunkFailure[] = [];
|
|
138
|
+
|
|
139
|
+
for (let i = 0; i < chunkIndices.length; i += concurrency) {
|
|
140
|
+
const batch = chunkIndices.slice(i, i + concurrency);
|
|
141
|
+
|
|
142
|
+
logger.progress(i, chunkIndices.length, `Analyzing chunks ${batch.join(', ')}...`);
|
|
143
|
+
|
|
144
|
+
// Process batch in parallel
|
|
145
|
+
const batchResults: BatchResult[] = await Promise.all(
|
|
146
|
+
batch.map(async (idx) => {
|
|
147
|
+
const chunk = manifest.chunks[idx];
|
|
148
|
+
|
|
149
|
+
for (let attempt = 0; attempt <= maxRetries; attempt++) {
|
|
150
|
+
try {
|
|
151
|
+
// Read chunk content
|
|
152
|
+
logger.debug(`Reading chunk ${idx}: ${chunk.sourceFiles.length} files`);
|
|
153
|
+
let content: string;
|
|
154
|
+
|
|
155
|
+
if (chunk.contentPath && await fileExists(chunk.contentPath)) {
|
|
156
|
+
// Read from chunk file
|
|
157
|
+
content = await readFileContent(chunk.contentPath);
|
|
158
|
+
} else if (chunk.content) {
|
|
159
|
+
// Read from manifest (legacy)
|
|
160
|
+
content = chunk.content;
|
|
161
|
+
} else {
|
|
162
|
+
// Fallback: read from source files
|
|
163
|
+
content = await readChunkContent(chunk.sourceFiles);
|
|
164
|
+
}
|
|
165
|
+
|
|
166
|
+
const cacheKey = createHash('sha256')
|
|
167
|
+
.update(content)
|
|
168
|
+
.update(`|focus:${options.focus}|model:${llmConfig.model}|v:analyze-2`)
|
|
169
|
+
.digest('hex');
|
|
170
|
+
const cachePath = path.join(cacheDir, `${cacheKey}.json`);
|
|
171
|
+
if (await fileExists(cachePath)) {
|
|
172
|
+
const cached = await readJson<ChunkSummary>(cachePath);
|
|
173
|
+
cachedChunks++;
|
|
174
|
+
if (cached.llmUsage) {
|
|
175
|
+
totalTokens += cached.llmUsage.totalTokens;
|
|
176
|
+
}
|
|
177
|
+
cached.sourceFiles = chunk.sourceFiles;
|
|
178
|
+
cached.size = formatSize(chunk.size);
|
|
179
|
+
return { ok: true as const, summary: cached };
|
|
180
|
+
}
|
|
181
|
+
|
|
182
|
+
if (budgetTokens > 0 && totalTokens >= budgetTokens) {
|
|
183
|
+
return {
|
|
184
|
+
ok: false as const,
|
|
185
|
+
failure: {
|
|
186
|
+
chunkId: idx,
|
|
187
|
+
sourceFiles: chunk.sourceFiles,
|
|
188
|
+
attempts: attempt + 1,
|
|
189
|
+
error: `Token budget exceeded (${totalTokens}/${budgetTokens})`,
|
|
190
|
+
},
|
|
191
|
+
};
|
|
192
|
+
}
|
|
193
|
+
|
|
194
|
+
// Analyze with LLM
|
|
195
|
+
logger.debug(`Sending chunk ${idx} to LLM (${content.length} chars), attempt ${attempt + 1}/${maxRetries + 1}`);
|
|
196
|
+
llmInvocations++;
|
|
197
|
+
const summary = await analyzeChunkWithLLM(
|
|
198
|
+
content,
|
|
199
|
+
idx,
|
|
200
|
+
options.focus,
|
|
201
|
+
llmConfig,
|
|
202
|
+
logger
|
|
203
|
+
);
|
|
204
|
+
|
|
205
|
+
// Fill in source info
|
|
206
|
+
summary.sourceFiles = chunk.sourceFiles;
|
|
207
|
+
summary.size = formatSize(chunk.size);
|
|
208
|
+
|
|
209
|
+
if (summary.llmUsage) {
|
|
210
|
+
totalTokens += summary.llmUsage.totalTokens;
|
|
211
|
+
}
|
|
212
|
+
|
|
213
|
+
await writeJson(cachePath, summary);
|
|
214
|
+
|
|
215
|
+
return { ok: true as const, summary };
|
|
216
|
+
} catch (error) {
|
|
217
|
+
const canRetry = attempt < maxRetries;
|
|
218
|
+
const errorText = error instanceof Error ? error.message : String(error);
|
|
219
|
+
if (canRetry) {
|
|
220
|
+
logger.warn(`Chunk ${idx} 分析失败,准备重试 (${attempt + 1}/${maxRetries}): ${errorText}`);
|
|
221
|
+
continue;
|
|
222
|
+
}
|
|
223
|
+
return {
|
|
224
|
+
ok: false as const,
|
|
225
|
+
failure: {
|
|
226
|
+
chunkId: idx,
|
|
227
|
+
sourceFiles: chunk.sourceFiles,
|
|
228
|
+
attempts: attempt + 1,
|
|
229
|
+
error: errorText,
|
|
230
|
+
},
|
|
231
|
+
};
|
|
232
|
+
}
|
|
233
|
+
}
|
|
234
|
+
|
|
235
|
+
return {
|
|
236
|
+
ok: false as const,
|
|
237
|
+
failure: {
|
|
238
|
+
chunkId: idx,
|
|
239
|
+
sourceFiles: chunk.sourceFiles,
|
|
240
|
+
attempts: maxRetries + 1,
|
|
241
|
+
error: 'Unknown analysis error',
|
|
242
|
+
},
|
|
243
|
+
};
|
|
244
|
+
})
|
|
245
|
+
);
|
|
246
|
+
|
|
247
|
+
const successResults = batchResults
|
|
248
|
+
.filter((item): item is { ok: true; summary: ChunkSummary } => item.ok)
|
|
249
|
+
.map(item => item.summary);
|
|
250
|
+
const failedResults = batchResults
|
|
251
|
+
.filter((item): item is { ok: false; failure: ChunkFailure } => !item.ok)
|
|
252
|
+
.map(item => item.failure);
|
|
253
|
+
|
|
254
|
+
results.push(...successResults);
|
|
255
|
+
failures.push(...failedResults);
|
|
256
|
+
|
|
257
|
+
// Save individual chunk summaries
|
|
258
|
+
for (const summary of successResults) {
|
|
259
|
+
const summaryPath = path.join(outputDir, `chunk_${summary.chunkId}_summary.json`);
|
|
260
|
+
await writeJson(summaryPath, summary);
|
|
261
|
+
}
|
|
262
|
+
}
|
|
263
|
+
|
|
264
|
+
const duration = ((Date.now() - startTime) / 1000).toFixed(1);
|
|
265
|
+
|
|
266
|
+
const failuresPath = path.join(outputDir, 'analyze_failures.json');
|
|
267
|
+
if (failures.length > 0) {
|
|
268
|
+
await writeJson(failuresPath, {
|
|
269
|
+
status: 'partial_failed',
|
|
270
|
+
createdAt: new Date().toISOString(),
|
|
271
|
+
chunksRequested: chunkIndices.length,
|
|
272
|
+
chunksSucceeded: results.length,
|
|
273
|
+
chunksFailed: failures.length,
|
|
274
|
+
failures,
|
|
275
|
+
});
|
|
276
|
+
}
|
|
277
|
+
|
|
278
|
+
logger.success(`Analysis complete in ${duration}s`);
|
|
279
|
+
if (failures.length > 0) {
|
|
280
|
+
logger.warn(`有 ${failures.length} 个 chunk 失败,详情见: ${failuresPath}`);
|
|
281
|
+
}
|
|
282
|
+
logger.json({
|
|
283
|
+
status: failures.length > 0 ? 'partial_failed' : 'success',
|
|
284
|
+
chunksAnalyzed: results.length,
|
|
285
|
+
chunksFailed: failures.length,
|
|
286
|
+
totalFeatures: results.reduce((sum, r) => sum + r.features.length, 0),
|
|
287
|
+
totalDataModels: results.reduce((sum, r) => sum + r.dataModels.length, 0),
|
|
288
|
+
totalPages: results.reduce((sum, r) => sum + r.pages.length, 0),
|
|
289
|
+
totalApis: results.reduce((sum, r) => sum + r.apis.length, 0),
|
|
290
|
+
totalTokens,
|
|
291
|
+
cachedChunks,
|
|
292
|
+
llmInvocations,
|
|
293
|
+
tokenBudget: budgetTokens > 0 ? budgetTokens : undefined,
|
|
294
|
+
retriesPerChunk: maxRetries,
|
|
295
|
+
failuresPath: failures.length > 0 ? failuresPath : undefined,
|
|
296
|
+
outputDir,
|
|
297
|
+
});
|
|
298
|
+
|
|
299
|
+
const analyzeReportPath = path.join(outputDir, 'analyze_report.json');
|
|
300
|
+
await writeJson(analyzeReportPath, {
|
|
301
|
+
createdAt: new Date().toISOString(),
|
|
302
|
+
status: failures.length > 0 ? 'partial_failed' : 'success',
|
|
303
|
+
chunksRequested: chunkIndices.length,
|
|
304
|
+
chunksSucceeded: results.length,
|
|
305
|
+
chunksFailed: failures.length,
|
|
306
|
+
retriesPerChunk: maxRetries,
|
|
307
|
+
cachedChunks,
|
|
308
|
+
llmInvocations,
|
|
309
|
+
totalTokens,
|
|
310
|
+
tokenBudget: budgetTokens > 0 ? budgetTokens : undefined,
|
|
311
|
+
cacheDir,
|
|
312
|
+
});
|
|
313
|
+
|
|
314
|
+
if (failures.length > 0) {
|
|
315
|
+
process.exit(ANALYZE_EXIT_CODE.PARTIAL_FAILED);
|
|
316
|
+
}
|
|
317
|
+
|
|
318
|
+
} catch (error) {
|
|
319
|
+
logger.error(`Analysis failed: ${error instanceof Error ? error.message : String(error)}`);
|
|
320
|
+
process.exit(ANALYZE_EXIT_CODE.RUNTIME_ERROR);
|
|
321
|
+
}
|
|
322
|
+
}
|
|
@@ -0,0 +1,88 @@
|
|
|
1
|
+
import * as path from 'path';
|
|
2
|
+
import { Command } from 'commander';
|
|
3
|
+
import { Logger } from '../utils/logger';
|
|
4
|
+
import { fileExists, findFiles } from '../utils/file';
|
|
5
|
+
import * as fs from 'fs-extra';
|
|
6
|
+
|
|
7
|
+
interface CleanOptions {
|
|
8
|
+
workspace: string;
|
|
9
|
+
dryRun?: boolean;
|
|
10
|
+
yes?: boolean;
|
|
11
|
+
}
|
|
12
|
+
|
|
13
|
+
const INTERMEDIATE_FILES = [
|
|
14
|
+
'manifest.json',
|
|
15
|
+
'summaries/',
|
|
16
|
+
'spec_summary.json',
|
|
17
|
+
'task_plan.json',
|
|
18
|
+
'dispatch_plan.json',
|
|
19
|
+
'.patterns.json',
|
|
20
|
+
'.scan_done',
|
|
21
|
+
'.analyze_done',
|
|
22
|
+
'.merge_done',
|
|
23
|
+
'.plan_done',
|
|
24
|
+
'.dispatch_done',
|
|
25
|
+
];
|
|
26
|
+
|
|
27
|
+
export async function cleanCommand(options: CleanOptions, command: Command): Promise<void> {
|
|
28
|
+
const logger = new Logger();
|
|
29
|
+
const workspacePath = path.resolve(options.workspace);
|
|
30
|
+
|
|
31
|
+
try {
|
|
32
|
+
if (!(await fileExists(workspacePath))) {
|
|
33
|
+
logger.error(`Error: Workspace not found: ${options.workspace}`);
|
|
34
|
+
process.exit(1);
|
|
35
|
+
}
|
|
36
|
+
|
|
37
|
+
// Find all intermediate files
|
|
38
|
+
const filesToClean: Array<{ path: string; type: string }> = [];
|
|
39
|
+
|
|
40
|
+
for (const pattern of INTERMEDIATE_FILES) {
|
|
41
|
+
const fullPath = path.join(workspacePath, pattern);
|
|
42
|
+
|
|
43
|
+
if (await fileExists(fullPath)) {
|
|
44
|
+
const stats = await fs.stat(fullPath);
|
|
45
|
+
filesToClean.push({
|
|
46
|
+
path: fullPath,
|
|
47
|
+
type: stats.isDirectory() ? 'directory' : 'file',
|
|
48
|
+
});
|
|
49
|
+
}
|
|
50
|
+
}
|
|
51
|
+
|
|
52
|
+
if (filesToClean.length === 0) {
|
|
53
|
+
logger.info('No intermediate files to clean.');
|
|
54
|
+
return;
|
|
55
|
+
}
|
|
56
|
+
|
|
57
|
+
logger.info(`Found ${filesToClean.length} items to clean:`);
|
|
58
|
+
for (const item of filesToClean) {
|
|
59
|
+
logger.info(` ${item.type === 'directory' ? '📁' : '📄'} ${path.relative(workspacePath, item.path)}`);
|
|
60
|
+
}
|
|
61
|
+
|
|
62
|
+
// Preview mode
|
|
63
|
+
if (options.dryRun) {
|
|
64
|
+
logger.info('\nDry run mode - no files were deleted.');
|
|
65
|
+
return;
|
|
66
|
+
}
|
|
67
|
+
|
|
68
|
+
// Clean files
|
|
69
|
+
for (const item of filesToClean) {
|
|
70
|
+
try {
|
|
71
|
+
if (item.type === 'directory') {
|
|
72
|
+
await fs.remove(item.path);
|
|
73
|
+
} else {
|
|
74
|
+
await fs.unlink(item.path);
|
|
75
|
+
}
|
|
76
|
+
logger.success(` Cleaned: ${path.relative(workspacePath, item.path)}`);
|
|
77
|
+
} catch (error) {
|
|
78
|
+
logger.error(` Failed to clean: ${path.relative(workspacePath, item.path)}`);
|
|
79
|
+
}
|
|
80
|
+
}
|
|
81
|
+
|
|
82
|
+
logger.success(`\nCleaned ${filesToClean.length} items from workspace`);
|
|
83
|
+
|
|
84
|
+
} catch (error) {
|
|
85
|
+
logger.error(`Clean failed: ${error instanceof Error ? error.message : String(error)}`);
|
|
86
|
+
process.exit(1);
|
|
87
|
+
}
|
|
88
|
+
}
|
|
@@ -0,0 +1,250 @@
|
|
|
1
|
+
import * as path from 'path';
|
|
2
|
+
import { Command } from 'commander';
|
|
3
|
+
import { Logger } from '../utils/logger';
|
|
4
|
+
import {
|
|
5
|
+
ensureDir,
|
|
6
|
+
fileExists,
|
|
7
|
+
readJson,
|
|
8
|
+
writeJson
|
|
9
|
+
} from '../utils/file';
|
|
10
|
+
import { TaskPlan, DispatchPlan, AgentAssignment, Task } from '../types';
|
|
11
|
+
|
|
12
|
+
interface DispatchOptions {
|
|
13
|
+
plan: string;
|
|
14
|
+
output: string;
|
|
15
|
+
agents?: string;
|
|
16
|
+
strategy: string;
|
|
17
|
+
dryRun?: boolean;
|
|
18
|
+
yes?: boolean;
|
|
19
|
+
}
|
|
20
|
+
|
|
21
|
+
export async function dispatchCommand(options: DispatchOptions, command: Command): Promise<void> {
|
|
22
|
+
const logger = new Logger();
|
|
23
|
+
|
|
24
|
+
try {
|
|
25
|
+
const planPath = path.resolve(options.plan);
|
|
26
|
+
|
|
27
|
+
if (!(await fileExists(planPath))) {
|
|
28
|
+
logger.error(`Error: Task plan not found: ${options.plan}`);
|
|
29
|
+
logger.info('Run spec-agent plan first to create task_plan.json.');
|
|
30
|
+
process.exit(1);
|
|
31
|
+
}
|
|
32
|
+
|
|
33
|
+
const taskPlan: TaskPlan = await readJson(planPath);
|
|
34
|
+
logger.info(`Loaded task plan with ${taskPlan.totalTasks} tasks`);
|
|
35
|
+
|
|
36
|
+
// Parse agent mapping
|
|
37
|
+
const agentPools = parseAgentMapping(options.agents, taskPlan.totalTasks);
|
|
38
|
+
logger.info(`Agent pools: ${Object.entries(agentPools).map(([k, v]) => `${k}:${v}`).join(', ')}`);
|
|
39
|
+
|
|
40
|
+
// Preview mode
|
|
41
|
+
if (options.dryRun) {
|
|
42
|
+
logger.info('Dry run mode - dispatch plan preview:');
|
|
43
|
+
logger.info(` Total tasks: ${taskPlan.totalTasks}`);
|
|
44
|
+
logger.info(` Strategy: ${options.strategy}`);
|
|
45
|
+
for (const [type, count] of Object.entries(agentPools)) {
|
|
46
|
+
logger.info(` ${type} agents: ${count}`);
|
|
47
|
+
}
|
|
48
|
+
return;
|
|
49
|
+
}
|
|
50
|
+
|
|
51
|
+
const startTime = Date.now();
|
|
52
|
+
|
|
53
|
+
// Collect all tasks
|
|
54
|
+
const allTasks: Task[] = [];
|
|
55
|
+
for (const group of taskPlan.parallelGroups) {
|
|
56
|
+
allTasks.push(...group.tasks);
|
|
57
|
+
}
|
|
58
|
+
|
|
59
|
+
// Assign tasks to agents based on strategy
|
|
60
|
+
const assignments = assignTasksToAgents(allTasks, agentPools, options.strategy);
|
|
61
|
+
|
|
62
|
+
// Build agent pools structure
|
|
63
|
+
const agentPoolAssignments: { [type: string]: AgentAssignment[] } = {};
|
|
64
|
+
for (const [type, count] of Object.entries(agentPools)) {
|
|
65
|
+
agentPoolAssignments[type] = [];
|
|
66
|
+
for (let i = 1; i <= count; i++) {
|
|
67
|
+
const agentId = `${type.toUpperCase()}-${i}`;
|
|
68
|
+
const assignedTasks = assignments
|
|
69
|
+
.filter(a => a.agentId === agentId)
|
|
70
|
+
.flatMap(a => a.assignedTasks);
|
|
71
|
+
|
|
72
|
+
const totalHours = allTasks
|
|
73
|
+
.filter(t => assignedTasks.includes(t.id))
|
|
74
|
+
.reduce((sum, t) => sum + (t.estimatedHours || 0), 0);
|
|
75
|
+
|
|
76
|
+
agentPoolAssignments[type].push({
|
|
77
|
+
agentId,
|
|
78
|
+
type,
|
|
79
|
+
assignedTasks,
|
|
80
|
+
workload: `${totalHours}h`,
|
|
81
|
+
});
|
|
82
|
+
}
|
|
83
|
+
}
|
|
84
|
+
|
|
85
|
+
// Find unassigned tasks
|
|
86
|
+
const assignedTaskIds = new Set(assignments.flatMap(a => a.assignedTasks));
|
|
87
|
+
const unassigned = allTasks
|
|
88
|
+
.filter(t => !assignedTaskIds.has(t.id))
|
|
89
|
+
.map(t => t.id);
|
|
90
|
+
|
|
91
|
+
if (unassigned.length > 0) {
|
|
92
|
+
logger.warn(`${unassigned.length} tasks could not be assigned`);
|
|
93
|
+
}
|
|
94
|
+
|
|
95
|
+
// Create dispatch plan
|
|
96
|
+
const dispatchPlan: DispatchPlan = {
|
|
97
|
+
version: '1.0.0',
|
|
98
|
+
createdAt: new Date().toISOString(),
|
|
99
|
+
totalTasks: taskPlan.totalTasks,
|
|
100
|
+
agentPools: agentPoolAssignments,
|
|
101
|
+
unassigned,
|
|
102
|
+
dispatchPlanPath: path.resolve(options.output),
|
|
103
|
+
};
|
|
104
|
+
|
|
105
|
+
// Write output
|
|
106
|
+
const outputPath = path.resolve(options.output);
|
|
107
|
+
await ensureDir(path.dirname(outputPath));
|
|
108
|
+
await writeJson(outputPath, dispatchPlan);
|
|
109
|
+
|
|
110
|
+
const duration = ((Date.now() - startTime) / 1000).toFixed(1);
|
|
111
|
+
|
|
112
|
+
logger.success(`Dispatch plan created in ${duration}s`);
|
|
113
|
+
logger.json({
|
|
114
|
+
status: 'success',
|
|
115
|
+
totalTasks: taskPlan.totalTasks,
|
|
116
|
+
agentTypes: Object.keys(agentPools).length,
|
|
117
|
+
totalAgents: Object.values(agentPools).reduce((a, b) => a + b, 0),
|
|
118
|
+
unassigned: unassigned.length,
|
|
119
|
+
outputPath,
|
|
120
|
+
});
|
|
121
|
+
|
|
122
|
+
// Print assignment summary
|
|
123
|
+
for (const [type, agents] of Object.entries(agentPoolAssignments)) {
|
|
124
|
+
for (const agent of agents) {
|
|
125
|
+
logger.info(` ${agent.agentId}: ${agent.assignedTasks.length} tasks, ${agent.workload}`);
|
|
126
|
+
}
|
|
127
|
+
}
|
|
128
|
+
|
|
129
|
+
} catch (error) {
|
|
130
|
+
logger.error(`Dispatch failed: ${error instanceof Error ? error.message : String(error)}`);
|
|
131
|
+
process.exit(1);
|
|
132
|
+
}
|
|
133
|
+
}
|
|
134
|
+
|
|
135
|
+
function parseAgentMapping(agentsStr: string | undefined, totalTasks: number): Record<string, number> {
|
|
136
|
+
if (!agentsStr) {
|
|
137
|
+
// Auto-determine based on task count
|
|
138
|
+
if (totalTasks <= 5) {
|
|
139
|
+
return { frontend: 1, backend: 1 };
|
|
140
|
+
} else if (totalTasks <= 15) {
|
|
141
|
+
return { frontend: 2, backend: 2, qa: 1 };
|
|
142
|
+
} else {
|
|
143
|
+
return { frontend: 3, backend: 3, qa: 2 };
|
|
144
|
+
}
|
|
145
|
+
}
|
|
146
|
+
|
|
147
|
+
const pools: Record<string, number> = {};
|
|
148
|
+
const pairs = agentsStr.split(',');
|
|
149
|
+
|
|
150
|
+
for (const pair of pairs) {
|
|
151
|
+
const [type, count] = pair.split(':');
|
|
152
|
+
if (type && count) {
|
|
153
|
+
pools[type.trim()] = parseInt(count.trim(), 10);
|
|
154
|
+
}
|
|
155
|
+
}
|
|
156
|
+
|
|
157
|
+
return pools;
|
|
158
|
+
}
|
|
159
|
+
|
|
160
|
+
function assignTasksToAgents(
|
|
161
|
+
tasks: Task[],
|
|
162
|
+
agentPools: Record<string, number>,
|
|
163
|
+
strategy: string
|
|
164
|
+
): AgentAssignment[] {
|
|
165
|
+
const assignments: AgentAssignment[] = [];
|
|
166
|
+
|
|
167
|
+
// Create agent instances
|
|
168
|
+
const agents: Array<{ id: string; type: string; workload: number }> = [];
|
|
169
|
+
for (const [type, count] of Object.entries(agentPools)) {
|
|
170
|
+
for (let i = 1; i <= count; i++) {
|
|
171
|
+
agents.push({ id: `${type.toUpperCase()}-${i}`, type, workload: 0 });
|
|
172
|
+
}
|
|
173
|
+
}
|
|
174
|
+
|
|
175
|
+
// Sort tasks by priority and estimated hours
|
|
176
|
+
const sortedTasks = [...tasks].sort((a, b) => {
|
|
177
|
+
const priorityOrder = { P0: 0, P1: 1, P2: 2, P3: 3 };
|
|
178
|
+
if (priorityOrder[a.priority] !== priorityOrder[b.priority]) {
|
|
179
|
+
return priorityOrder[a.priority] - priorityOrder[b.priority];
|
|
180
|
+
}
|
|
181
|
+
return (b.estimatedHours || 0) - (a.estimatedHours || 0);
|
|
182
|
+
});
|
|
183
|
+
|
|
184
|
+
// Assign tasks
|
|
185
|
+
for (const task of sortedTasks) {
|
|
186
|
+
const taskType = mapTaskToAgentType(task);
|
|
187
|
+
|
|
188
|
+
// Find suitable agents
|
|
189
|
+
const suitableAgents = agents.filter(a => a.type === taskType);
|
|
190
|
+
if (suitableAgents.length === 0) {
|
|
191
|
+
// Fall back to any agent
|
|
192
|
+
continue;
|
|
193
|
+
}
|
|
194
|
+
|
|
195
|
+
// Select agent based on strategy
|
|
196
|
+
let selectedAgent;
|
|
197
|
+
switch (strategy) {
|
|
198
|
+
case 'load-first':
|
|
199
|
+
// Assign to agent with least workload
|
|
200
|
+
selectedAgent = suitableAgents.reduce((min, a) =>
|
|
201
|
+
a.workload < min.workload ? a : min
|
|
202
|
+
);
|
|
203
|
+
break;
|
|
204
|
+
case 'skill-first':
|
|
205
|
+
// Assign to first suitable agent (specialization)
|
|
206
|
+
selectedAgent = suitableAgents[0];
|
|
207
|
+
break;
|
|
208
|
+
case 'balanced':
|
|
209
|
+
default:
|
|
210
|
+
// Round-robin with workload consideration
|
|
211
|
+
selectedAgent = suitableAgents.reduce((min, a) =>
|
|
212
|
+
a.workload < min.workload ? a : min
|
|
213
|
+
);
|
|
214
|
+
break;
|
|
215
|
+
}
|
|
216
|
+
|
|
217
|
+
// Record assignment
|
|
218
|
+
let assignment = assignments.find(a => a.agentId === selectedAgent.id);
|
|
219
|
+
if (!assignment) {
|
|
220
|
+
assignment = {
|
|
221
|
+
agentId: selectedAgent.id,
|
|
222
|
+
type: selectedAgent.type,
|
|
223
|
+
assignedTasks: [],
|
|
224
|
+
workload: '0h',
|
|
225
|
+
};
|
|
226
|
+
assignments.push(assignment);
|
|
227
|
+
}
|
|
228
|
+
|
|
229
|
+
assignment.assignedTasks.push(task.id);
|
|
230
|
+
selectedAgent.workload += task.estimatedHours || 0;
|
|
231
|
+
}
|
|
232
|
+
|
|
233
|
+
return assignments;
|
|
234
|
+
}
|
|
235
|
+
|
|
236
|
+
function mapTaskToAgentType(task: Task): string {
|
|
237
|
+
switch (task.type) {
|
|
238
|
+
case 'page':
|
|
239
|
+
case 'component':
|
|
240
|
+
return 'frontend';
|
|
241
|
+
case 'api':
|
|
242
|
+
return 'backend';
|
|
243
|
+
case 'test':
|
|
244
|
+
return 'qa';
|
|
245
|
+
case 'setup':
|
|
246
|
+
return 'frontend'; // or devops
|
|
247
|
+
default:
|
|
248
|
+
return 'frontend';
|
|
249
|
+
}
|
|
250
|
+
}
|