ai-spec-dev 0.31.0 → 0.35.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.claude/commands/add-lesson.md +34 -0
- package/.claude/commands/check-layers.md +65 -0
- package/.claude/commands/installed-deps.md +35 -0
- package/.claude/commands/recall-lessons.md +40 -0
- package/.claude/commands/scan-singletons.md +45 -0
- package/.claude/commands/verify-imports.md +48 -0
- package/.claude/settings.local.json +15 -1
- package/README.md +531 -213
- package/RELEASE_LOG.md +460 -0
- package/cli/commands/config.ts +93 -0
- package/cli/commands/create.ts +1233 -0
- package/cli/commands/dashboard.ts +62 -0
- package/cli/commands/export.ts +66 -0
- package/cli/commands/init.ts +190 -0
- package/cli/commands/learn.ts +30 -0
- package/cli/commands/logs.ts +106 -0
- package/cli/commands/mock.ts +175 -0
- package/cli/commands/model.ts +156 -0
- package/cli/commands/restore.ts +22 -0
- package/cli/commands/review.ts +63 -0
- package/cli/commands/scan.ts +99 -0
- package/cli/commands/trend.ts +36 -0
- package/cli/commands/types.ts +69 -0
- package/cli/commands/update.ts +178 -0
- package/cli/commands/vcr.ts +70 -0
- package/cli/commands/workspace.ts +219 -0
- package/cli/index.ts +34 -2240
- package/cli/utils.ts +83 -0
- package/core/combined-generator.ts +13 -3
- package/core/dashboard-generator.ts +340 -0
- package/core/design-dialogue.ts +124 -0
- package/core/dsl-feedback.ts +285 -0
- package/core/error-feedback.ts +46 -2
- package/core/project-index.ts +301 -0
- package/core/reviewer.ts +84 -6
- package/core/run-logger.ts +109 -3
- package/core/run-trend.ts +261 -0
- package/core/self-evaluator.ts +139 -7
- package/core/spec-generator.ts +14 -8
- package/core/task-generator.ts +17 -0
- package/core/types-generator.ts +219 -0
- package/core/vcr.ts +210 -0
- package/dist/cli/index.js +6692 -4512
- package/dist/cli/index.js.map +1 -1
- package/dist/cli/index.mjs +6692 -4512
- package/dist/cli/index.mjs.map +1 -1
- package/dist/index.d.mts +19 -5
- package/dist/index.d.ts +19 -5
- package/dist/index.js +420 -224
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +418 -224
- package/dist/index.mjs.map +1 -1
- package/docs-assets/purpose/architecture-overview.svg +64 -0
- package/docs-assets/purpose/create-pipeline.svg +113 -0
- package/docs-assets/purpose/task-layering.svg +74 -0
- package/package.json +6 -3
- package/prompts/codegen.prompt.ts +97 -9
- package/prompts/design.prompt.ts +59 -0
- package/prompts/spec.prompt.ts +8 -1
- package/prompts/tasks.prompt.ts +27 -2
- package/purpose.md +600 -174
- package/tests/dsl-extractor.test.ts +264 -0
- package/tests/dsl-feedback.test.ts +266 -0
- package/tests/dsl-validator.test.ts +283 -0
- package/tests/error-feedback.test.ts +292 -0
- package/tests/provider-utils.test.ts +173 -0
- package/tests/run-trend.test.ts +186 -0
- package/tests/self-evaluator.test.ts +339 -0
- package/tests/spec-assessor.test.ts +142 -0
- package/tests/task-generator.test.ts +230 -0
|
@@ -0,0 +1,285 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* dsl-feedback.ts — Two pipeline feedback loops for ai-spec create
|
|
3
|
+
*
|
|
4
|
+
* Loop 1 (DSL → Spec): after DSL extraction, detect sparse/incomplete DSL
|
|
5
|
+
* and offer a targeted spec refinement pass before codegen starts.
|
|
6
|
+
*
|
|
7
|
+
* Loop 2 (Review → DSL): after 3-pass review, detect design-level findings
|
|
8
|
+
* (as opposed to implementation issues) and offer to amend the spec + DSL
|
|
9
|
+
* so the next update/regen starts from a corrected contract.
|
|
10
|
+
*
|
|
11
|
+
* Design constraints:
|
|
12
|
+
* - Both loops are SKIPPED in --auto / --fast / --skip-dsl modes.
|
|
13
|
+
* - Zero extra AI calls until the user explicitly opts in.
|
|
14
|
+
* - Non-blocking: user can always skip.
|
|
15
|
+
*/
|
|
16
|
+
|
|
17
|
+
import chalk from "chalk";
|
|
18
|
+
import { SpecDSL } from "./dsl-types";
|
|
19
|
+
|
|
20
|
+
// ─── Loop 1 Types ─────────────────────────────────────────────────────────────
|
|
21
|
+
|
|
22
|
+
export interface DslGap {
|
|
23
|
+
/** Short machine key for RunLog serialisation */
|
|
24
|
+
code: "sparse_model" | "missing_errors" | "generic_endpoint_desc" | "no_models_no_endpoints";
|
|
25
|
+
/** Human-readable message shown to the user */
|
|
26
|
+
message: string;
|
|
27
|
+
/** Concrete suggestion injected into the refinement prompt */
|
|
28
|
+
hint: string;
|
|
29
|
+
}
|
|
30
|
+
|
|
31
|
+
// ─── Loop 1: DSL Richness Assessment ─────────────────────────────────────────
|
|
32
|
+
|
|
33
|
+
/**
|
|
34
|
+
* Inspect a freshly-extracted DSL for common completeness gaps.
|
|
35
|
+
* Returns a list of DslGap objects (empty = DSL looks adequate).
|
|
36
|
+
*
|
|
37
|
+
* All checks are pure heuristics — zero AI calls.
|
|
38
|
+
*/
|
|
39
|
+
export function assessDslRichness(dsl: SpecDSL): DslGap[] {
|
|
40
|
+
const gaps: DslGap[] = [];
|
|
41
|
+
|
|
42
|
+
// ── No endpoints AND no models ────────────────────────────────────────────
|
|
43
|
+
if (dsl.endpoints.length === 0 && dsl.models.length === 0) {
|
|
44
|
+
gaps.push({
|
|
45
|
+
code: "no_models_no_endpoints",
|
|
46
|
+
message: "DSL has no endpoints and no models — spec may be too abstract for structured extraction",
|
|
47
|
+
hint: "Please add explicit API endpoint definitions (method, path, request/response) and any data models that this feature requires.",
|
|
48
|
+
});
|
|
49
|
+
return gaps; // no point checking the rest
|
|
50
|
+
}
|
|
51
|
+
|
|
52
|
+
// ── Endpoints with very generic / short descriptions ─────────────────────
|
|
53
|
+
const GENERIC_DESC_KEYWORDS = ["handles", "processes", "manages", "操作", "处理", "管理"];
|
|
54
|
+
const GENERIC_DESC_MIN_LEN = 15;
|
|
55
|
+
|
|
56
|
+
for (const ep of dsl.endpoints) {
|
|
57
|
+
const desc = (ep.description ?? "").trim();
|
|
58
|
+
const isGeneric =
|
|
59
|
+
desc.length < GENERIC_DESC_MIN_LEN ||
|
|
60
|
+
GENERIC_DESC_KEYWORDS.some((kw) => desc.toLowerCase().startsWith(kw));
|
|
61
|
+
|
|
62
|
+
if (isGeneric) {
|
|
63
|
+
gaps.push({
|
|
64
|
+
code: "generic_endpoint_desc",
|
|
65
|
+
message: `Endpoint ${ep.method} ${ep.path} has a vague description: "${desc}"`,
|
|
66
|
+
hint: `Clarify what ${ep.method} ${ep.path} does: what inputs are required, what the success response contains, and what business rule it enforces.`,
|
|
67
|
+
});
|
|
68
|
+
}
|
|
69
|
+
}
|
|
70
|
+
|
|
71
|
+
// ── Endpoints with no error definitions (but spec text likely mentions them) ──
|
|
72
|
+
const endpointsWithoutErrors = dsl.endpoints.filter(
|
|
73
|
+
(ep) => !ep.errors || ep.errors.length === 0
|
|
74
|
+
);
|
|
75
|
+
if (endpointsWithoutErrors.length > 0 && dsl.endpoints.length >= 2) {
|
|
76
|
+
gaps.push({
|
|
77
|
+
code: "missing_errors",
|
|
78
|
+
message: `${endpointsWithoutErrors.length}/${dsl.endpoints.length} endpoints have no error definitions`,
|
|
79
|
+
hint: `For each endpoint, specify at least the main error cases: e.g. 400 validation errors, 401 auth failures, 404 not found, 409 conflict. Include an error code (e.g. INVALID_INPUT) and description for each.`,
|
|
80
|
+
});
|
|
81
|
+
}
|
|
82
|
+
|
|
83
|
+
// ── Models with fewer than 2 fields ──────────────────────────────────────
|
|
84
|
+
for (const model of dsl.models) {
|
|
85
|
+
if (!model.fields || model.fields.length < 2) {
|
|
86
|
+
gaps.push({
|
|
87
|
+
code: "sparse_model",
|
|
88
|
+
message: `Model "${model.name}" has only ${model.fields?.length ?? 0} field(s) — likely incomplete`,
|
|
89
|
+
hint: `List all fields for "${model.name}" with their types and whether they are required. Include at minimum an id, created_at, and the core domain fields this model needs.`,
|
|
90
|
+
});
|
|
91
|
+
}
|
|
92
|
+
}
|
|
93
|
+
|
|
94
|
+
return gaps;
|
|
95
|
+
}
|
|
96
|
+
|
|
97
|
+
// ─── Loop 1: Targeted Spec Refinement Prompt ─────────────────────────────────
|
|
98
|
+
|
|
99
|
+
/**
|
|
100
|
+
* Build a targeted AI refinement prompt that focuses the LLM on filling
|
|
101
|
+
* only the specific gaps detected by `assessDslRichness`.
|
|
102
|
+
*/
|
|
103
|
+
export function buildDslGapRefinementPrompt(spec: string, gaps: DslGap[]): string {
|
|
104
|
+
const gapList = gaps
|
|
105
|
+
.map((g, i) => `${i + 1}. [${g.code}] ${g.message}\n → ${g.hint}`)
|
|
106
|
+
.join("\n\n");
|
|
107
|
+
|
|
108
|
+
return `The following feature spec has been structurally analysed. The DSL extracted from it was found to be incomplete in these specific areas:
|
|
109
|
+
|
|
110
|
+
${gapList}
|
|
111
|
+
|
|
112
|
+
Your task: revise the spec below to address ONLY the gaps listed above.
|
|
113
|
+
- Do NOT change the overall feature scope or business logic.
|
|
114
|
+
- Do NOT rewrite sections that are already complete.
|
|
115
|
+
- Add missing error cases, clarify vague endpoint descriptions, complete sparse model field lists.
|
|
116
|
+
- Output ONLY the complete revised Markdown spec. No preamble, no explanation.
|
|
117
|
+
|
|
118
|
+
=== Current Spec ===
|
|
119
|
+
${spec}`;
|
|
120
|
+
}
|
|
121
|
+
|
|
122
|
+
// ─── Loop 2 Types ─────────────────────────────────────────────────────────────
|
|
123
|
+
|
|
124
|
+
export interface StructuralFinding {
|
|
125
|
+
/** Short label for display + RunLog */
|
|
126
|
+
category: "auth_design" | "model_design" | "api_contract" | "layer_violation" | "other_design";
|
|
127
|
+
description: string;
|
|
128
|
+
}
|
|
129
|
+
|
|
130
|
+
// ─── Loop 2: Review Structural Issue Classifier ───────────────────────────────
|
|
131
|
+
|
|
132
|
+
/**
|
|
133
|
+
* Parse a 3-pass review text to extract Pass 1 (architecture) findings
|
|
134
|
+
* that indicate design-level issues in the Spec/DSL — as opposed to
|
|
135
|
+
* implementation-level issues that belong in §9 knowledge.
|
|
136
|
+
*
|
|
137
|
+
* Primary path: parse the structured JSON block emitted by the updated
|
|
138
|
+
* reviewArchitectureSystemPrompt (## 🔍 结构性发现 JSON section).
|
|
139
|
+
* Fallback: legacy regex approach for review texts generated before the
|
|
140
|
+
* structured output format was introduced.
|
|
141
|
+
*
|
|
142
|
+
* Returns an empty array if no structural issues are found or if the
|
|
143
|
+
* review score for Pass 1 is high (≥ 8), indicating overall approval.
|
|
144
|
+
*/
|
|
145
|
+
export function extractStructuralFindings(reviewText: string): StructuralFinding[] {
|
|
146
|
+
// Split by the separator used between passes ("─────...")
|
|
147
|
+
const parts = reviewText.split(/─{20,}/);
|
|
148
|
+
// Pass 1 is always the first section
|
|
149
|
+
const pass1Text = parts[0] ?? "";
|
|
150
|
+
|
|
151
|
+
// If Pass 1 scored well, treat as no structural issues
|
|
152
|
+
const pass1Score = extractPassScore(pass1Text);
|
|
153
|
+
if (pass1Score !== null && pass1Score >= 8) return [];
|
|
154
|
+
|
|
155
|
+
// ── Primary path: parse structured JSON block ─────────────────────────────
|
|
156
|
+
// Look for the JSON block within the "🔍 结构性发现 JSON" section of Pass 1.
|
|
157
|
+
// The block is delimited by ```json ... ``` and always contains a
|
|
158
|
+
// { structuralFindings: [...] } object.
|
|
159
|
+
const jsonBlockMatch = pass1Text.match(/```json\s*(\{[\s\S]*?\})\s*```/);
|
|
160
|
+
if (jsonBlockMatch) {
|
|
161
|
+
try {
|
|
162
|
+
const parsed = JSON.parse(jsonBlockMatch[1]);
|
|
163
|
+
if (Array.isArray(parsed.structuralFindings)) {
|
|
164
|
+
return parsed.structuralFindings.filter(
|
|
165
|
+
(f: unknown): f is StructuralFinding =>
|
|
166
|
+
typeof f === "object" &&
|
|
167
|
+
f !== null &&
|
|
168
|
+
typeof (f as StructuralFinding).category === "string" &&
|
|
169
|
+
typeof (f as StructuralFinding).description === "string"
|
|
170
|
+
);
|
|
171
|
+
}
|
|
172
|
+
} catch {
|
|
173
|
+
// JSON parse failed — fall through to regex fallback
|
|
174
|
+
}
|
|
175
|
+
}
|
|
176
|
+
|
|
177
|
+
// ── Fallback: legacy regex approach ──────────────────────────────────────
|
|
178
|
+
// Used when review text was generated before the structured JSON format
|
|
179
|
+
// was added to reviewArchitectureSystemPrompt.
|
|
180
|
+
const findings: StructuralFinding[] = [];
|
|
181
|
+
|
|
182
|
+
// Auth / 认证 design issues
|
|
183
|
+
if (
|
|
184
|
+
/缺少认证|missing auth|auth.*false|未加认证|鉴权.*缺|endpoint.*public.*should/i.test(pass1Text)
|
|
185
|
+
) {
|
|
186
|
+
const match = pass1Text.match(/[^。\n]*(?:缺少认证|missing auth|auth.*false|未加认证|鉴权.*缺|endpoint.*public.*should)[^。\n]*/i);
|
|
187
|
+
findings.push({
|
|
188
|
+
category: "auth_design",
|
|
189
|
+
description: match ? match[0].trim() : "One or more endpoints may have incorrect authentication requirements",
|
|
190
|
+
});
|
|
191
|
+
}
|
|
192
|
+
|
|
193
|
+
// API contract / 接口设计 issues
|
|
194
|
+
if (
|
|
195
|
+
/接口设计.*问题|接口.*不合理|API design|response.*missing|request.*missing|接口.*缺少/i.test(pass1Text)
|
|
196
|
+
) {
|
|
197
|
+
const match = pass1Text.match(/[^。\n]*(?:接口设计.*问题|接口.*不合理|API design|response.*missing|接口.*缺少)[^。\n]*/i);
|
|
198
|
+
findings.push({
|
|
199
|
+
category: "api_contract",
|
|
200
|
+
description: match ? match[0].trim() : "API contract design may have issues",
|
|
201
|
+
});
|
|
202
|
+
}
|
|
203
|
+
|
|
204
|
+
// Model / 数据模型 design issues
|
|
205
|
+
if (
|
|
206
|
+
/模型.*缺少字段|model.*missing field|数据结构.*问题|schema.*incomplete|字段.*missing/i.test(pass1Text)
|
|
207
|
+
) {
|
|
208
|
+
const match = pass1Text.match(/[^。\n]*(?:模型.*缺少字段|model.*missing field|数据结构.*问题|schema.*incomplete)[^。\n]*/i);
|
|
209
|
+
findings.push({
|
|
210
|
+
category: "model_design",
|
|
211
|
+
description: match ? match[0].trim() : "Data model design may be incomplete",
|
|
212
|
+
});
|
|
213
|
+
}
|
|
214
|
+
|
|
215
|
+
// Layer separation / 层级分离 violations
|
|
216
|
+
if (
|
|
217
|
+
/层级.*违反|layer.*violation|business logic.*controller|controller.*service.*混|分层.*问题/i.test(pass1Text)
|
|
218
|
+
) {
|
|
219
|
+
const match = pass1Text.match(/[^。\n]*(?:层级.*违反|layer.*violation|business logic.*controller|分层.*问题)[^。\n]*/i);
|
|
220
|
+
findings.push({
|
|
221
|
+
category: "layer_violation",
|
|
222
|
+
description: match ? match[0].trim() : "Layer separation may be violated in the generated code",
|
|
223
|
+
});
|
|
224
|
+
}
|
|
225
|
+
|
|
226
|
+
return findings;
|
|
227
|
+
}
|
|
228
|
+
|
|
229
|
+
/** Extract the numeric score from a single pass section. */
|
|
230
|
+
function extractPassScore(text: string): number | null {
|
|
231
|
+
const m = text.match(/Score:\s*(\d+(?:\.\d+)?)\s*\/\s*10/i);
|
|
232
|
+
return m ? parseFloat(m[1]) : null;
|
|
233
|
+
}
|
|
234
|
+
|
|
235
|
+
// ─── Loop 2: Spec Amendment Prompt ────────────────────────────────────────────
|
|
236
|
+
|
|
237
|
+
/**
|
|
238
|
+
* Build a prompt asking the AI to produce a minimal spec amendment
|
|
239
|
+
* that addresses the structural findings from the review.
|
|
240
|
+
*
|
|
241
|
+
* The amendment is a targeted addition/correction — NOT a full rewrite.
|
|
242
|
+
*/
|
|
243
|
+
export function buildStructuralAmendmentPrompt(
|
|
244
|
+
spec: string,
|
|
245
|
+
findings: StructuralFinding[]
|
|
246
|
+
): string {
|
|
247
|
+
const findingList = findings
|
|
248
|
+
.map((f, i) => `${i + 1}. [${f.category}] ${f.description}`)
|
|
249
|
+
.join("\n");
|
|
250
|
+
|
|
251
|
+
return `A code review of the feature built from this spec found the following DESIGN-LEVEL issues.
|
|
252
|
+
These are problems in the spec/contract itself, not in the implementation.
|
|
253
|
+
|
|
254
|
+
=== Structural Findings ===
|
|
255
|
+
${findingList}
|
|
256
|
+
|
|
257
|
+
Your task:
|
|
258
|
+
- Revise the spec below to correct the design issues listed above.
|
|
259
|
+
- Do NOT change the feature scope, business logic, or sections unrelated to these findings.
|
|
260
|
+
- Be minimal: only change what is necessary to fix the design issues.
|
|
261
|
+
- Output ONLY the complete revised Markdown spec. No preamble, no explanation.
|
|
262
|
+
|
|
263
|
+
=== Current Spec ===
|
|
264
|
+
${spec}`;
|
|
265
|
+
}
|
|
266
|
+
|
|
267
|
+
// ─── Display Helpers ──────────────────────────────────────────────────────────
|
|
268
|
+
|
|
269
|
+
export function printDslGaps(gaps: DslGap[]): void {
|
|
270
|
+
console.log(chalk.yellow("\n ⚠ DSL Completeness Check — gaps detected:"));
|
|
271
|
+
for (const gap of gaps) {
|
|
272
|
+
console.log(chalk.yellow(` · ${gap.message}`));
|
|
273
|
+
}
|
|
274
|
+
console.log(chalk.gray(" → A targeted spec refinement can fill these gaps before codegen."));
|
|
275
|
+
}
|
|
276
|
+
|
|
277
|
+
export function printStructuralFindings(findings: StructuralFinding[]): void {
|
|
278
|
+
console.log(chalk.yellow("\n ⚠ Review — structural (design-level) issues found:"));
|
|
279
|
+
for (const f of findings) {
|
|
280
|
+
const label = chalk.gray(`[${f.category}]`);
|
|
281
|
+
console.log(` ${label} ${f.description}`);
|
|
282
|
+
}
|
|
283
|
+
console.log(chalk.gray(" → These are contract issues in the Spec/DSL, not just implementation problems."));
|
|
284
|
+
console.log(chalk.gray(" → Fixing the spec now means the next run generates correct code from the start."));
|
|
285
|
+
}
|
package/core/error-feedback.ts
CHANGED
|
@@ -22,6 +22,22 @@ interface FixResult {
|
|
|
22
22
|
explanation: string;
|
|
23
23
|
}
|
|
24
24
|
|
|
25
|
+
// ─── Budgets ────────────────────────────────────────────────────────────────────
|
|
26
|
+
|
|
27
|
+
/**
|
|
28
|
+
* Maximum characters captured from a single command's output before parsing.
|
|
29
|
+
* ~10K tokens — enough for any realistic error listing; prevents a pathological
|
|
30
|
+
* build output (e.g. 10MB of warnings) from ballooning the AI context.
|
|
31
|
+
*/
|
|
32
|
+
const MAX_COMMAND_OUTPUT_CHARS = 50_000;
|
|
33
|
+
|
|
34
|
+
/**
|
|
35
|
+
* Maximum characters of an existing file sent to the AI for auto-fix.
|
|
36
|
+
* ~12K tokens — covers large files; content beyond this is truncated with a
|
|
37
|
+
* notice so the AI knows it may be seeing an incomplete file.
|
|
38
|
+
*/
|
|
39
|
+
const MAX_FIX_FILE_CHARS = 60_000;
|
|
40
|
+
|
|
25
41
|
// ─── Error Detection ────────────────────────────────────────────────────────────
|
|
26
42
|
|
|
27
43
|
function runCommand(cmd: string, cwd: string): { success: boolean; output: string } {
|
|
@@ -30,7 +46,13 @@ function runCommand(cmd: string, cwd: string): { success: boolean; output: strin
|
|
|
30
46
|
return { success: true, output };
|
|
31
47
|
} catch (err) {
|
|
32
48
|
const e = err as { stdout?: string; stderr?: string; message?: string };
|
|
33
|
-
|
|
49
|
+
const raw = e.stdout || e.stderr || e.message || "";
|
|
50
|
+
// Apply output budget: cap before parsing to prevent huge outputs from
|
|
51
|
+
// filling up the AI context on subsequent fix cycles.
|
|
52
|
+
const output = raw.length > MAX_COMMAND_OUTPUT_CHARS
|
|
53
|
+
? raw.slice(0, MAX_COMMAND_OUTPUT_CHARS) + `\n... [output truncated at ${MAX_COMMAND_OUTPUT_CHARS} chars]`
|
|
54
|
+
: raw;
|
|
55
|
+
return { success: false, output };
|
|
34
56
|
}
|
|
35
57
|
}
|
|
36
58
|
|
|
@@ -328,6 +350,13 @@ async function attemptFix(
|
|
|
328
350
|
const dslSection = dsl ? `\n${buildDslContextSection(dsl)}\n` : "";
|
|
329
351
|
const errorSummary = fileErrors.map((e) => `[${e.source}] ${e.message}`).join("\n");
|
|
330
352
|
|
|
353
|
+
// Apply file content budget — very large files are truncated with a notice.
|
|
354
|
+
// The AI still has enough context to fix the errors (which reference specific lines).
|
|
355
|
+
const fileContent = existingContent.length > MAX_FIX_FILE_CHARS
|
|
356
|
+
? existingContent.slice(0, MAX_FIX_FILE_CHARS) +
|
|
357
|
+
`\n\n// ... [file truncated at ${MAX_FIX_FILE_CHARS} chars — fix only the error lines above]`
|
|
358
|
+
: existingContent;
|
|
359
|
+
|
|
331
360
|
const prompt = `Fix the following errors in the file.
|
|
332
361
|
|
|
333
362
|
File: ${file}
|
|
@@ -336,7 +365,7 @@ ${dslSection}
|
|
|
336
365
|
${errorSummary}
|
|
337
366
|
|
|
338
367
|
=== Current File Content ===
|
|
339
|
-
${
|
|
368
|
+
${fileContent}
|
|
340
369
|
|
|
341
370
|
Output ONLY the complete fixed file content. No markdown fences, no explanations.`;
|
|
342
371
|
|
|
@@ -394,6 +423,8 @@ export async function runErrorFeedback(
|
|
|
394
423
|
|
|
395
424
|
if (buildCmd) console.log(chalk.gray(` Type-check: ${buildCmd}`));
|
|
396
425
|
|
|
426
|
+
let prevErrorCount = Infinity; // circuit-breaker: tracks error count from previous cycle
|
|
427
|
+
|
|
397
428
|
for (let cycle = 1; cycle <= maxCycles; cycle++) {
|
|
398
429
|
const allErrors: ErrorEntry[] = [];
|
|
399
430
|
|
|
@@ -462,6 +493,19 @@ export async function runErrorFeedback(
|
|
|
462
493
|
return true;
|
|
463
494
|
}
|
|
464
495
|
|
|
496
|
+
// Circuit breaker: if the fix cycle made no progress (error count did not
|
|
497
|
+
// decrease), stop immediately rather than spending another AI cycle.
|
|
498
|
+
if (allErrors.length >= prevErrorCount) {
|
|
499
|
+
console.log(
|
|
500
|
+
chalk.yellow(
|
|
501
|
+
`\n ⚠ Auto-fix made no progress (${allErrors.length} error(s) before and after). Stopping early.`
|
|
502
|
+
)
|
|
503
|
+
);
|
|
504
|
+
console.log(chalk.gray(" Manual intervention needed."));
|
|
505
|
+
return false;
|
|
506
|
+
}
|
|
507
|
+
prevErrorCount = allErrors.length;
|
|
508
|
+
|
|
465
509
|
if (cycle < maxCycles) {
|
|
466
510
|
console.log(chalk.cyan(`\n Attempting auto-fix (${allErrors.length} error(s))...`));
|
|
467
511
|
await attemptFix(provider, allErrors, workingDir, dsl);
|
|
@@ -0,0 +1,301 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* project-index.ts — Persistent project discovery & index.
|
|
3
|
+
*
|
|
4
|
+
* Scans a root directory for sub-projects (any dir with a recognisable
|
|
5
|
+
* project manifest), and maintains an incremental JSON index file at
|
|
6
|
+
* .ai-spec-index.json in the scan root.
|
|
7
|
+
*
|
|
8
|
+
* Incremental rules:
|
|
9
|
+
* - New project found → added with firstSeen = now
|
|
10
|
+
* - Existing project → techStack / type / role / hasConstitution refreshed, lastSeen = now
|
|
11
|
+
* - Previously indexed but directory gone → marked missing:true, NOT deleted
|
|
12
|
+
*
|
|
13
|
+
* The index is intentionally lightweight — no AI calls, pure filesystem scan.
|
|
14
|
+
*/
|
|
15
|
+
|
|
16
|
+
import * as fs from "fs-extra";
|
|
17
|
+
import * as path from "path";
|
|
18
|
+
import { detectRepoType, RepoType, RepoRole, WORKSPACE_CONFIG_FILE } from "./workspace-loader";
|
|
19
|
+
import { CONSTITUTION_FILE } from "./constitution-generator";
|
|
20
|
+
|
|
21
|
+
export const INDEX_FILE = ".ai-spec-index.json";
|
|
22
|
+
|
|
23
|
+
// ─── Key dependency lists for tech-stack extraction ──────────────────────────
|
|
24
|
+
|
|
25
|
+
const KEY_DEPS: string[] = [
|
|
26
|
+
// Frameworks
|
|
27
|
+
"express", "fastify", "koa", "@nestjs/core", "hapi",
|
|
28
|
+
"next", "react", "vue", "nuxt", "svelte",
|
|
29
|
+
"react-native", "expo",
|
|
30
|
+
// DB / ORM
|
|
31
|
+
"prisma", "@prisma/client", "mongoose", "typeorm", "sequelize", "drizzle-orm",
|
|
32
|
+
// Auth
|
|
33
|
+
"jsonwebtoken", "passport", "next-auth", "@clerk/nextjs",
|
|
34
|
+
// Build / Lang
|
|
35
|
+
"typescript", "vite", "webpack", "esbuild", "turbo",
|
|
36
|
+
// Testing
|
|
37
|
+
"jest", "vitest", "mocha", "cypress", "playwright",
|
|
38
|
+
// Infra
|
|
39
|
+
"redis", "bull", "socket.io", "graphql", "@trpc/server",
|
|
40
|
+
];
|
|
41
|
+
|
|
42
|
+
// ─── Types ────────────────────────────────────────────────────────────────────
|
|
43
|
+
|
|
44
|
+
export interface ProjectEntry {
|
|
45
|
+
/** Directory name */
|
|
46
|
+
name: string;
|
|
47
|
+
/** Path relative to scanRoot */
|
|
48
|
+
path: string;
|
|
49
|
+
type: RepoType;
|
|
50
|
+
role: RepoRole;
|
|
51
|
+
/** Key dependencies detected (subset of package.json deps or language markers) */
|
|
52
|
+
techStack: string[];
|
|
53
|
+
/** Whether .ai-spec-constitution.md exists */
|
|
54
|
+
hasConstitution: boolean;
|
|
55
|
+
/** Whether .ai-spec-workspace.json exists (this repo is a workspace root) */
|
|
56
|
+
hasWorkspace: boolean;
|
|
57
|
+
/** ISO timestamp of first discovery */
|
|
58
|
+
firstSeen: string;
|
|
59
|
+
/** ISO timestamp of last successful scan */
|
|
60
|
+
lastSeen: string;
|
|
61
|
+
/** true when the directory no longer exists on disk */
|
|
62
|
+
missing?: boolean;
|
|
63
|
+
}
|
|
64
|
+
|
|
65
|
+
export interface ProjectIndex {
|
|
66
|
+
/** Absolute path of the directory that was scanned */
|
|
67
|
+
scanRoot: string;
|
|
68
|
+
/** ISO timestamp of last scan */
|
|
69
|
+
lastScanned: string;
|
|
70
|
+
projects: ProjectEntry[];
|
|
71
|
+
}
|
|
72
|
+
|
|
73
|
+
// ─── Helpers ──────────────────────────────────────────────────────────────────
|
|
74
|
+
|
|
75
|
+
/** Directories to always skip during scan */
|
|
76
|
+
const SKIP_DIRS = new Set([
|
|
77
|
+
"node_modules", ".git", ".svn", "dist", "build", "out", ".next",
|
|
78
|
+
".nuxt", "coverage", ".turbo", ".cache", "__pycache__", "vendor",
|
|
79
|
+
".ai-spec-vcr", ".ai-spec-logs", "specs",
|
|
80
|
+
]);
|
|
81
|
+
|
|
82
|
+
/** Manifest files that identify a directory as a project root */
|
|
83
|
+
const MANIFEST_FILES = [
|
|
84
|
+
"package.json",
|
|
85
|
+
"go.mod",
|
|
86
|
+
"Cargo.toml",
|
|
87
|
+
"pom.xml",
|
|
88
|
+
"build.gradle",
|
|
89
|
+
"build.gradle.kts",
|
|
90
|
+
"requirements.txt",
|
|
91
|
+
"pyproject.toml",
|
|
92
|
+
"setup.py",
|
|
93
|
+
"composer.json",
|
|
94
|
+
];
|
|
95
|
+
|
|
96
|
+
async function isProjectRoot(absPath: string): Promise<boolean> {
|
|
97
|
+
for (const manifest of MANIFEST_FILES) {
|
|
98
|
+
if (await fs.pathExists(path.join(absPath, manifest))) return true;
|
|
99
|
+
}
|
|
100
|
+
return false;
|
|
101
|
+
}
|
|
102
|
+
|
|
103
|
+
async function extractTechStack(absPath: string, type: RepoType): Promise<string[]> {
|
|
104
|
+
const stack: string[] = [];
|
|
105
|
+
|
|
106
|
+
// Language marker for non-Node projects
|
|
107
|
+
if (type === "go") stack.push("go");
|
|
108
|
+
if (type === "rust") stack.push("rust");
|
|
109
|
+
if (type === "java") stack.push("java");
|
|
110
|
+
if (type === "python") stack.push("python");
|
|
111
|
+
if (type === "php") stack.push("php");
|
|
112
|
+
|
|
113
|
+
const pkgPath = path.join(absPath, "package.json");
|
|
114
|
+
if (!(await fs.pathExists(pkgPath))) return stack;
|
|
115
|
+
|
|
116
|
+
let pkg: Record<string, unknown> = {};
|
|
117
|
+
try { pkg = await fs.readJson(pkgPath); } catch { return stack; }
|
|
118
|
+
|
|
119
|
+
const allDeps = {
|
|
120
|
+
...((pkg.dependencies as Record<string, string>) ?? {}),
|
|
121
|
+
...((pkg.devDependencies as Record<string, string>) ?? {}),
|
|
122
|
+
};
|
|
123
|
+
const depKeys = new Set(Object.keys(allDeps));
|
|
124
|
+
|
|
125
|
+
for (const dep of KEY_DEPS) {
|
|
126
|
+
if (depKeys.has(dep)) stack.push(dep);
|
|
127
|
+
}
|
|
128
|
+
|
|
129
|
+
return stack;
|
|
130
|
+
}
|
|
131
|
+
|
|
132
|
+
// ─── Scan ─────────────────────────────────────────────────────────────────────
|
|
133
|
+
|
|
134
|
+
/**
|
|
135
|
+
* Discover all project roots under `rootDir` up to `maxDepth` levels deep.
|
|
136
|
+
* Returns paths relative to rootDir.
|
|
137
|
+
*/
|
|
138
|
+
async function discoverProjects(
|
|
139
|
+
rootDir: string,
|
|
140
|
+
maxDepth: number
|
|
141
|
+
): Promise<string[]> {
|
|
142
|
+
const found: string[] = [];
|
|
143
|
+
|
|
144
|
+
async function walk(absDir: string, depth: number): Promise<void> {
|
|
145
|
+
if (depth > maxDepth) return;
|
|
146
|
+
|
|
147
|
+
let entries: fs.Dirent[];
|
|
148
|
+
try {
|
|
149
|
+
entries = await fs.readdir(absDir, { withFileTypes: true });
|
|
150
|
+
} catch {
|
|
151
|
+
return;
|
|
152
|
+
}
|
|
153
|
+
|
|
154
|
+
for (const entry of entries) {
|
|
155
|
+
if (!entry.isDirectory()) continue;
|
|
156
|
+
if (SKIP_DIRS.has(entry.name) || entry.name.startsWith(".")) continue;
|
|
157
|
+
|
|
158
|
+
const childAbs = path.join(absDir, entry.name);
|
|
159
|
+
|
|
160
|
+
// Skip git worktrees — they have a .git *file* (not directory)
|
|
161
|
+
const gitPath = path.join(childAbs, ".git");
|
|
162
|
+
if (await fs.pathExists(gitPath)) {
|
|
163
|
+
const gitStat = await fs.stat(gitPath);
|
|
164
|
+
if (gitStat.isFile()) continue; // git worktree — skip
|
|
165
|
+
}
|
|
166
|
+
|
|
167
|
+
if (await isProjectRoot(childAbs)) {
|
|
168
|
+
found.push(path.relative(rootDir, childAbs));
|
|
169
|
+
// Don't recurse into a project root — avoids picking up nested node_modules etc.
|
|
170
|
+
} else {
|
|
171
|
+
await walk(childAbs, depth + 1);
|
|
172
|
+
}
|
|
173
|
+
}
|
|
174
|
+
}
|
|
175
|
+
|
|
176
|
+
await walk(rootDir, 0);
|
|
177
|
+
return found;
|
|
178
|
+
}
|
|
179
|
+
|
|
180
|
+
// ─── Index load / save ────────────────────────────────────────────────────────
|
|
181
|
+
|
|
182
|
+
export async function loadIndex(scanRoot: string): Promise<ProjectIndex | null> {
|
|
183
|
+
const filePath = path.join(scanRoot, INDEX_FILE);
|
|
184
|
+
try {
|
|
185
|
+
return await fs.readJson(filePath);
|
|
186
|
+
} catch {
|
|
187
|
+
return null;
|
|
188
|
+
}
|
|
189
|
+
}
|
|
190
|
+
|
|
191
|
+
export async function saveIndex(scanRoot: string, index: ProjectIndex): Promise<string> {
|
|
192
|
+
const filePath = path.join(scanRoot, INDEX_FILE);
|
|
193
|
+
await fs.writeJson(filePath, index, { spaces: 2 });
|
|
194
|
+
return filePath;
|
|
195
|
+
}
|
|
196
|
+
|
|
197
|
+
// ─── Incremental merge ────────────────────────────────────────────────────────
|
|
198
|
+
|
|
199
|
+
export interface ScanResult {
|
|
200
|
+
index: ProjectIndex;
|
|
201
|
+
added: ProjectEntry[];
|
|
202
|
+
updated: ProjectEntry[];
|
|
203
|
+
unchanged: ProjectEntry[];
|
|
204
|
+
nowMissing: ProjectEntry[];
|
|
205
|
+
}
|
|
206
|
+
|
|
207
|
+
/**
|
|
208
|
+
* Run an incremental scan of `scanRoot`, merge with the existing index, and
|
|
209
|
+
* return the updated index along with a change summary.
|
|
210
|
+
*/
|
|
211
|
+
export async function runScan(
|
|
212
|
+
scanRoot: string,
|
|
213
|
+
maxDepth = 2
|
|
214
|
+
): Promise<ScanResult> {
|
|
215
|
+
const now = new Date().toISOString();
|
|
216
|
+
const existing = await loadIndex(scanRoot);
|
|
217
|
+
const existingMap = new Map<string, ProjectEntry>(
|
|
218
|
+
(existing?.projects ?? []).map((p) => [p.path, p])
|
|
219
|
+
);
|
|
220
|
+
|
|
221
|
+
const discoveredPaths = await discoverProjects(scanRoot, maxDepth);
|
|
222
|
+
|
|
223
|
+
const added: ProjectEntry[] = [];
|
|
224
|
+
const updated: ProjectEntry[] = [];
|
|
225
|
+
const unchanged: ProjectEntry[] = [];
|
|
226
|
+
const seenPaths = new Set<string>();
|
|
227
|
+
|
|
228
|
+
for (const relPath of discoveredPaths) {
|
|
229
|
+
const absPath = path.join(scanRoot, relPath);
|
|
230
|
+
seenPaths.add(relPath);
|
|
231
|
+
|
|
232
|
+
const { type, role } = await detectRepoType(absPath);
|
|
233
|
+
const techStack = await extractTechStack(absPath, type);
|
|
234
|
+
const hasConstitution = await fs.pathExists(path.join(absPath, CONSTITUTION_FILE));
|
|
235
|
+
const hasWorkspace = await fs.pathExists(path.join(absPath, WORKSPACE_CONFIG_FILE));
|
|
236
|
+
const name = path.basename(relPath);
|
|
237
|
+
|
|
238
|
+
const prev = existingMap.get(relPath);
|
|
239
|
+
if (!prev) {
|
|
240
|
+
const entry: ProjectEntry = {
|
|
241
|
+
name,
|
|
242
|
+
path: relPath,
|
|
243
|
+
type,
|
|
244
|
+
role,
|
|
245
|
+
techStack,
|
|
246
|
+
hasConstitution,
|
|
247
|
+
hasWorkspace,
|
|
248
|
+
firstSeen: now,
|
|
249
|
+
lastSeen: now,
|
|
250
|
+
};
|
|
251
|
+
added.push(entry);
|
|
252
|
+
existingMap.set(relPath, entry);
|
|
253
|
+
} else {
|
|
254
|
+
// Check if anything changed
|
|
255
|
+
const changed =
|
|
256
|
+
prev.type !== type ||
|
|
257
|
+
prev.role !== role ||
|
|
258
|
+
prev.hasConstitution !== hasConstitution ||
|
|
259
|
+
prev.hasWorkspace !== hasWorkspace ||
|
|
260
|
+
JSON.stringify(prev.techStack.sort()) !== JSON.stringify(techStack.sort());
|
|
261
|
+
|
|
262
|
+
const entry: ProjectEntry = {
|
|
263
|
+
...prev,
|
|
264
|
+
type,
|
|
265
|
+
role,
|
|
266
|
+
techStack,
|
|
267
|
+
hasConstitution,
|
|
268
|
+
hasWorkspace,
|
|
269
|
+
lastSeen: now,
|
|
270
|
+
missing: undefined, // clear missing flag if it came back
|
|
271
|
+
};
|
|
272
|
+
existingMap.set(relPath, entry);
|
|
273
|
+
|
|
274
|
+
if (changed) {
|
|
275
|
+
updated.push(entry);
|
|
276
|
+
} else {
|
|
277
|
+
unchanged.push(entry);
|
|
278
|
+
}
|
|
279
|
+
}
|
|
280
|
+
}
|
|
281
|
+
|
|
282
|
+
// Mark previously known projects as missing if their directory is gone
|
|
283
|
+
const nowMissing: ProjectEntry[] = [];
|
|
284
|
+
for (const [relPath, entry] of existingMap) {
|
|
285
|
+
if (!seenPaths.has(relPath) && !entry.missing) {
|
|
286
|
+
const gone: ProjectEntry = { ...entry, missing: true };
|
|
287
|
+
existingMap.set(relPath, gone);
|
|
288
|
+
nowMissing.push(gone);
|
|
289
|
+
}
|
|
290
|
+
}
|
|
291
|
+
|
|
292
|
+
const projects = [...existingMap.values()].sort((a, b) => a.path.localeCompare(b.path));
|
|
293
|
+
|
|
294
|
+
const index: ProjectIndex = {
|
|
295
|
+
scanRoot,
|
|
296
|
+
lastScanned: now,
|
|
297
|
+
projects,
|
|
298
|
+
};
|
|
299
|
+
|
|
300
|
+
return { index, added, updated, unchanged, nowMissing };
|
|
301
|
+
}
|