ai-spec-dev 0.1.0 → 0.17.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (60) hide show
  1. package/.claude/settings.local.json +18 -0
  2. package/README.md +1215 -146
  3. package/RELEASE_LOG.md +1489 -0
  4. package/cli/index.ts +1981 -0
  5. package/cli/welcome.ts +151 -0
  6. package/core/code-generator.ts +757 -0
  7. package/core/combined-generator.ts +63 -0
  8. package/core/constitution-consolidator.ts +141 -0
  9. package/core/constitution-generator.ts +89 -0
  10. package/core/context-loader.ts +453 -0
  11. package/core/contract-bridge.ts +217 -0
  12. package/core/dsl-extractor.ts +337 -0
  13. package/core/dsl-types.ts +166 -0
  14. package/core/dsl-validator.ts +450 -0
  15. package/core/error-feedback.ts +354 -0
  16. package/core/frontend-context-loader.ts +602 -0
  17. package/core/global-constitution.ts +88 -0
  18. package/core/key-store.ts +49 -0
  19. package/core/knowledge-memory.ts +171 -0
  20. package/core/mock-server-generator.ts +571 -0
  21. package/core/openapi-exporter.ts +361 -0
  22. package/core/requirement-decomposer.ts +198 -0
  23. package/core/reviewer.ts +259 -0
  24. package/core/spec-assessor.ts +99 -0
  25. package/core/spec-generator.ts +428 -0
  26. package/core/spec-refiner.ts +89 -0
  27. package/core/spec-updater.ts +227 -0
  28. package/core/spec-versioning.ts +213 -0
  29. package/core/task-generator.ts +174 -0
  30. package/core/test-generator.ts +273 -0
  31. package/core/workspace-loader.ts +256 -0
  32. package/dist/cli/index.js +6717 -672
  33. package/dist/cli/index.js.map +1 -1
  34. package/dist/cli/index.mjs +6717 -670
  35. package/dist/cli/index.mjs.map +1 -1
  36. package/dist/index.d.mts +147 -27
  37. package/dist/index.d.ts +147 -27
  38. package/dist/index.js +2337 -286
  39. package/dist/index.js.map +1 -1
  40. package/dist/index.mjs +2329 -285
  41. package/dist/index.mjs.map +1 -1
  42. package/git/worktree.ts +109 -0
  43. package/index.ts +9 -0
  44. package/package.json +4 -28
  45. package/prompts/codegen.prompt.ts +259 -0
  46. package/prompts/consolidate.prompt.ts +73 -0
  47. package/prompts/constitution.prompt.ts +63 -0
  48. package/prompts/decompose.prompt.ts +168 -0
  49. package/prompts/dsl.prompt.ts +203 -0
  50. package/prompts/frontend-spec.prompt.ts +191 -0
  51. package/prompts/global-constitution.prompt.ts +61 -0
  52. package/prompts/spec-assess.prompt.ts +53 -0
  53. package/prompts/spec.prompt.ts +102 -0
  54. package/prompts/tasks.prompt.ts +35 -0
  55. package/prompts/testgen.prompt.ts +84 -0
  56. package/prompts/update.prompt.ts +131 -0
  57. package/purpose.docx +0 -0
  58. package/purpose.md +444 -0
  59. package/tsconfig.json +14 -0
  60. package/tsup.config.ts +10 -0
@@ -0,0 +1,428 @@
1
+ import { GoogleGenerativeAI } from "@google/generative-ai";
2
+ import Anthropic from "@anthropic-ai/sdk";
3
+ import OpenAI from "openai";
4
+ import axios from "axios";
5
+ import { ProxyAgent } from "undici";
6
+ import { specPrompt } from "../prompts/spec.prompt";
7
+ import { ProjectContext } from "./context-loader";
8
+
9
+ // ─── Proxy Helper ─────────────────────────────────────────────────────────────
10
+ // 仅用于 Gemini:其他 SDK(Anthropic / OpenAI)会自动读取 HTTPS_PROXY。
11
+ // Gemini SDK 使用 Node.js 原生 fetch(undici),不会自动读代理环境变量,
12
+ // 需要手动创建 ProxyAgent 并通过 fetchOptions 注入。
13
+ // 这是 in-process 级别的配置,完全不影响 execSync 启动的子进程(如 claude CLI)。
14
+
15
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
16
+ function geminiRequestOptions(): any {
17
+ const proxyUrl =
18
+ process.env.GEMINI_PROXY ||
19
+ process.env.HTTPS_PROXY ||
20
+ process.env.https_proxy ||
21
+ process.env.HTTP_PROXY ||
22
+ process.env.http_proxy;
23
+
24
+ if (!proxyUrl) return undefined;
25
+ // fetchOptions.dispatcher は型定義(v0.21)に未記載だが runtime では動作する
26
+ return { fetchOptions: { dispatcher: new ProxyAgent(proxyUrl) } };
27
+ }
28
+
29
+ // ─── Provider Interface ────────────────────────────────────────────────────────
30
+
31
+ export interface AIProvider {
32
+ generate(prompt: string, systemInstruction?: string): Promise<string>;
33
+ readonly providerName: string;
34
+ readonly modelName: string;
35
+ }
36
+
37
+ // ─── Provider Catalog ─────────────────────────────────────────────────────────
38
+ // Single source of truth for all supported providers and their models.
39
+
40
+ export interface ProviderMeta {
41
+ /** Human-readable display name */
42
+ displayName: string;
43
+ /** Short description shown in model picker */
44
+ description: string;
45
+ /** Available models (first is the default) */
46
+ models: string[];
47
+ /** Environment variable name for the API key */
48
+ envKey: string;
49
+ /**
50
+ * Base URL for OpenAI-compatible providers.
51
+ * Undefined means the provider has its own SDK (Gemini / Claude).
52
+ */
53
+ baseURL?: string;
54
+ /**
55
+ * Role to use for system instructions.
56
+ * OpenAI o1/o3 use "developer" instead of "system".
57
+ * Default: "system"
58
+ */
59
+ systemRole?: "system" | "developer";
60
+ /**
61
+ * Extra body params injected into every chat completion request.
62
+ * e.g. Qwen3 needs { enable_thinking: false } to suppress CoT noise.
63
+ */
64
+ extraBody?: Record<string, unknown>;
65
+ }
66
+
67
+ export const PROVIDER_CATALOG: Record<string, ProviderMeta> = {
68
+ // ── International ──────────────────────────────────────────────────────────
69
+ mimo: {
70
+ displayName: "MiMo (Xiaomi)",
71
+ description: "小米 MiMo — mimo-v2-pro (Anthropic-compatible API)",
72
+ models: ["mimo-v2-pro"],
73
+ envKey: "MIMO_API_KEY",
74
+ // baseURL not used — MiMo has a dedicated provider class
75
+ },
76
+ gemini: {
77
+ displayName: "Google Gemini",
78
+ description: "Google AI Studio — Gemini 2.5 / 2.0 series",
79
+ models: [
80
+ "gemini-2.5-pro",
81
+ "gemini-2.5-flash",
82
+ "gemini-2.0-flash",
83
+ "gemini-2.0-flash-lite",
84
+ "gemini-1.5-pro",
85
+ "gemini-1.5-flash",
86
+ ],
87
+ envKey: "GEMINI_API_KEY",
88
+ },
89
+ claude: {
90
+ displayName: "Anthropic Claude",
91
+ description: "Anthropic — Claude 4.x / 3.7 series",
92
+ models: [
93
+ "claude-opus-4-6",
94
+ "claude-sonnet-4-6",
95
+ "claude-haiku-4-5",
96
+ "claude-3-7-sonnet-20250219",
97
+ ],
98
+ envKey: "ANTHROPIC_API_KEY",
99
+ },
100
+ openai: {
101
+ displayName: "OpenAI",
102
+ description: "OpenAI — o3 / GPT-4o series",
103
+ models: [
104
+ "o3",
105
+ "o3-mini",
106
+ "o1",
107
+ "o1-mini",
108
+ "gpt-4o",
109
+ "gpt-4o-mini",
110
+ ],
111
+ envKey: "OPENAI_API_KEY",
112
+ baseURL: "https://api.openai.com/v1",
113
+ },
114
+ deepseek: {
115
+ displayName: "DeepSeek",
116
+ description: "DeepSeek — V3 (chat) / R1 (reasoning)",
117
+ models: [
118
+ "deepseek-chat", // DeepSeek-V3
119
+ "deepseek-reasoner", // DeepSeek-R1
120
+ ],
121
+ envKey: "DEEPSEEK_API_KEY",
122
+ baseURL: "https://api.deepseek.com/v1",
123
+ },
124
+
125
+ // ── Chinese Models (OpenAI-compatible) ────────────────────────────────────
126
+ qwen: {
127
+ displayName: "通义千问 (Qwen)",
128
+ description: "阿里云百炼 — Qwen3 / Qwen2.5 series",
129
+ models: [
130
+ "qwen3-235b-a22b", // Qwen3 MoE flagship (supports thinking mode)
131
+ "qwen3-72b",
132
+ "qwen3-32b",
133
+ "qwen3-8b",
134
+ "qwen-max",
135
+ "qwen-max-latest",
136
+ "qwen-plus",
137
+ "qwen-long",
138
+ ],
139
+ envKey: "DASHSCOPE_API_KEY",
140
+ baseURL: "https://dashscope.aliyuncs.com/compatible-mode/v1",
141
+ // Qwen3 models enable thinking (CoT) by default, which pollutes structured outputs.
142
+ // Disable it so JSON/Markdown responses stay clean.
143
+ extraBody: { enable_thinking: false },
144
+ },
145
+ glm: {
146
+ displayName: "智谱 GLM (Zhipu AI)",
147
+ description: "智谱 AI — GLM-5 / GLM-4 series + Z1 reasoning",
148
+ models: [
149
+ "glm-5", // GLM-5 flagship (如不可用请确认最新 model ID)
150
+ "glm-5-flash",
151
+ "glm-z1", // GLM-Z1 reasoning model
152
+ "glm-z1-flash",
153
+ "glm-4-plus",
154
+ "glm-4-flash",
155
+ "glm-4-long",
156
+ ],
157
+ envKey: "ZHIPU_API_KEY",
158
+ baseURL: "https://open.bigmodel.cn/api/paas/v4/",
159
+ },
160
+ minimax: {
161
+ displayName: "MiniMax",
162
+ description: "MiniMax AI — MiniMax-Text-2.7 / Text-01 series",
163
+ models: [
164
+ "MiniMax-Text-2.7", // MiniMax 最新旗舰 (如不可用请确认最新 model ID)
165
+ "MiniMax-Text-01",
166
+ "abab6.5s-chat",
167
+ ],
168
+ envKey: "MINIMAX_API_KEY",
169
+ baseURL: "https://api.minimax.chat/v1",
170
+ },
171
+ doubao: {
172
+ displayName: "豆包 Doubao (ByteDance)",
173
+ description: "火山引擎 Ark — Doubao Pro/Lite series",
174
+ models: [
175
+ "doubao-pro-256k",
176
+ "doubao-pro-128k",
177
+ "doubao-pro-32k",
178
+ "doubao-lite-128k",
179
+ "doubao-lite-32k",
180
+ ],
181
+ envKey: "ARK_API_KEY",
182
+ baseURL: "https://ark.cn-beijing.volces.com/api/v3",
183
+ },
184
+ };
185
+
186
+ // Derived convenience maps (kept for backward compatibility)
187
+ export const SUPPORTED_PROVIDERS = Object.keys(PROVIDER_CATALOG);
188
+
189
+ export const DEFAULT_MODELS: Record<string, string> = Object.fromEntries(
190
+ Object.entries(PROVIDER_CATALOG).map(([k, v]) => [k, v.models[0]])
191
+ );
192
+
193
+ export const ENV_KEY_MAP: Record<string, string> = Object.fromEntries(
194
+ Object.entries(PROVIDER_CATALOG).map(([k, v]) => [k, v.envKey])
195
+ );
196
+
197
+ // ─── Gemini Provider ───────────────────────────────────────────────────────────
198
+
199
+ export class GeminiProvider implements AIProvider {
200
+ private genAI: GoogleGenerativeAI;
201
+ readonly providerName = "gemini";
202
+ readonly modelName: string;
203
+
204
+ constructor(apiKey: string, modelName = PROVIDER_CATALOG.gemini.models[0]) {
205
+ this.genAI = new GoogleGenerativeAI(apiKey);
206
+ this.modelName = modelName;
207
+ }
208
+
209
+ async generate(prompt: string, systemInstruction?: string): Promise<string> {
210
+ const model = this.genAI.getGenerativeModel(
211
+ { model: this.modelName, ...(systemInstruction ? { systemInstruction } : {}) },
212
+ geminiRequestOptions()
213
+ );
214
+ const result = await model.generateContent(prompt);
215
+ return result.response.text();
216
+ }
217
+ }
218
+
219
+ // ─── Claude Provider ───────────────────────────────────────────────────────────
220
+
221
+ export class ClaudeProvider implements AIProvider {
222
+ private client: Anthropic;
223
+ readonly providerName = "claude";
224
+ readonly modelName: string;
225
+
226
+ constructor(apiKey: string, modelName = PROVIDER_CATALOG.claude.models[0]) {
227
+ this.client = new Anthropic({ apiKey });
228
+ this.modelName = modelName;
229
+ }
230
+
231
+ async generate(prompt: string, systemInstruction?: string): Promise<string> {
232
+ const message = await this.client.messages.create({
233
+ model: this.modelName,
234
+ max_tokens: 8192,
235
+ ...(systemInstruction ? { system: systemInstruction } : {}),
236
+ messages: [{ role: "user", content: prompt }],
237
+ });
238
+ const block = message.content[0];
239
+ if (block.type === "text") return block.text;
240
+ throw new Error("Unexpected response type from Claude API");
241
+ }
242
+ }
243
+
244
+ // ─── OpenAI-Compatible Provider ───────────────────────────────────────────────
245
+ // Handles OpenAI, DeepSeek, Qwen, MiniMax, GLM, Doubao — all expose the same API.
246
+
247
+ export class OpenAICompatibleProvider implements AIProvider {
248
+ protected client: OpenAI;
249
+ readonly providerName: string;
250
+ readonly modelName: string;
251
+ private systemRole: "system" | "developer";
252
+ private extraBody?: Record<string, unknown>;
253
+
254
+ constructor(
255
+ providerName: string,
256
+ apiKey: string,
257
+ modelName: string,
258
+ baseURL?: string,
259
+ systemRole: "system" | "developer" = "system",
260
+ extraBody?: Record<string, unknown>
261
+ ) {
262
+ this.providerName = providerName;
263
+ this.modelName = modelName;
264
+ this.systemRole = systemRole;
265
+ this.extraBody = extraBody;
266
+ this.client = new OpenAI({
267
+ apiKey,
268
+ ...(baseURL ? { baseURL } : {}),
269
+ });
270
+ }
271
+
272
+ async generate(prompt: string, systemInstruction?: string): Promise<string> {
273
+ const messages: OpenAI.Chat.ChatCompletionMessageParam[] = [];
274
+
275
+ if (systemInstruction) {
276
+ // o1/o3 require "developer" role; gpt-4o/gpt-4-turbo use "system"
277
+ // Auto-detect: if model starts with "o1" or "o3", use developer role
278
+ const isOSeries = /^o[13]/.test(this.modelName);
279
+ const role = isOSeries ? "developer" : this.systemRole;
280
+ messages.push({ role, content: systemInstruction } as OpenAI.Chat.ChatCompletionMessageParam);
281
+ }
282
+ messages.push({ role: "user", content: prompt });
283
+
284
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
285
+ const completion = await (this.client.chat.completions.create as any)({
286
+ model: this.modelName,
287
+ messages,
288
+ ...(this.extraBody ? { extra_body: this.extraBody } : {}),
289
+ });
290
+
291
+ return (completion.choices[0].message.content as string) ?? "";
292
+ }
293
+ }
294
+
295
+ // ─── MiMo Provider ─────────────────────────────────────────────────────────────
296
+ // MiMo uses the Anthropic messages format but with a different base URL
297
+ // and a custom "api-key" auth header (not "x-api-key" / "Authorization: Bearer").
298
+ // The Anthropic SDK does not support custom auth headers, so we call the API
299
+ // directly via axios.
300
+
301
+ export class MiMoProvider implements AIProvider {
302
+ readonly providerName = "mimo";
303
+ readonly modelName: string;
304
+ private apiKey: string;
305
+ private readonly baseUrl = "https://api.xiaomimimo.com/anthropic/v1/messages";
306
+
307
+ constructor(apiKey: string, modelName = PROVIDER_CATALOG.mimo.models[0]) {
308
+ this.apiKey = apiKey;
309
+ this.modelName = modelName;
310
+ }
311
+
312
+ async generate(prompt: string, systemInstruction?: string): Promise<string> {
313
+ const body: Record<string, unknown> = {
314
+ model: this.modelName,
315
+ max_tokens: 16384,
316
+ messages: [{ role: "user", content: [{ type: "text", text: prompt }] }],
317
+ top_p: 0.95,
318
+ stream: false,
319
+ temperature: 1.0,
320
+ stop_sequences: null,
321
+ };
322
+
323
+ if (systemInstruction) {
324
+ body.system = systemInstruction;
325
+ }
326
+
327
+ const response = await axios.post(this.baseUrl, body, {
328
+ headers: {
329
+ "api-key": this.apiKey,
330
+ "Content-Type": "application/json",
331
+ },
332
+ });
333
+
334
+ // Response follows Anthropic format: { content: [{ type: "text"|"thinking", ... }] }
335
+ // MiMo may return a "thinking" block before the actual "text" block — skip it.
336
+ const data = response.data as { stop_reason?: string; content?: Array<{ type: string; text?: string; thinking?: string }> };
337
+ const blocks = data?.content ?? [];
338
+
339
+ const textBlock = blocks.find((b) => b.type === "text");
340
+ if (textBlock?.text) return textBlock.text;
341
+
342
+ // If stop_reason is max_tokens, the model was cut off mid-generation (thinking block only)
343
+ if (data?.stop_reason === "max_tokens") {
344
+ throw new Error(`MiMo response truncated (max_tokens reached). The prompt may be too long. Try a shorter spec or switch to a model with larger context.`);
345
+ }
346
+
347
+ throw new Error(`Unexpected MiMo response: ${JSON.stringify(response.data).slice(0, 200)}`);
348
+ }
349
+ }
350
+
351
+ // ─── Factory ──────────────────────────────────────────────────────────────────
352
+
353
+ export function createProvider(
354
+ providerName: string,
355
+ apiKey: string,
356
+ modelName?: string
357
+ ): AIProvider {
358
+ const meta = PROVIDER_CATALOG[providerName];
359
+ if (!meta) {
360
+ throw new Error(
361
+ `Unknown provider: "${providerName}". Valid options: ${SUPPORTED_PROVIDERS.join(", ")}`
362
+ );
363
+ }
364
+
365
+ const model = modelName || meta.models[0];
366
+
367
+ switch (providerName) {
368
+ case "gemini":
369
+ return new GeminiProvider(apiKey, model);
370
+ case "claude":
371
+ return new ClaudeProvider(apiKey, model);
372
+ case "mimo":
373
+ return new MiMoProvider(apiKey, model);
374
+ // All OpenAI-compatible providers: openai, deepseek, qwen, glm, minimax, doubao
375
+ default:
376
+ return new OpenAICompatibleProvider(
377
+ providerName,
378
+ apiKey,
379
+ model,
380
+ meta.baseURL,
381
+ meta.systemRole ?? "system",
382
+ meta.extraBody
383
+ );
384
+ }
385
+ }
386
+
387
+ // ─── Spec Generator ───────────────────────────────────────────────────────────
388
+
389
+ export class SpecGenerator {
390
+ constructor(private provider: AIProvider) {}
391
+
392
+ async generateSpec(idea: string, context?: ProjectContext): Promise<string> {
393
+ const parts: string[] = [idea];
394
+
395
+ if (context) {
396
+ // Constitution is highest priority — put it first so the AI respects it
397
+ if (context.constitution) {
398
+ parts.push(
399
+ `\n\n=== 项目宪法 (Project Constitution — MUST follow these rules) ===\n${context.constitution}`
400
+ );
401
+ }
402
+
403
+ parts.push(`\n\n=== 项目上下文 (Project Context) ===`);
404
+ if (context.techStack.length > 0) {
405
+ parts.push(`技术栈: ${context.techStack.join(", ")}`);
406
+ }
407
+ if (context.dependencies.length > 0) {
408
+ parts.push(`主要依赖: ${context.dependencies.slice(0, 25).join(", ")}`);
409
+ }
410
+ if (context.apiStructure.length > 0) {
411
+ parts.push(
412
+ `\n现有 API 文件:\n${context.apiStructure
413
+ .slice(0, 10)
414
+ .map((f) => ` - ${f}`)
415
+ .join("\n")}`
416
+ );
417
+ }
418
+ if (context.routeSummary) {
419
+ parts.push(`\n路由结构(摘要):\n${context.routeSummary}`);
420
+ }
421
+ if (context.schema) {
422
+ parts.push(`\n数据库 Schema (Prisma):\n${context.schema.slice(0, 3000)}`);
423
+ }
424
+ }
425
+
426
+ return this.provider.generate(parts.join("\n"), specPrompt);
427
+ }
428
+ }
@@ -0,0 +1,89 @@
1
+ import { editor, confirm, select } from "@inquirer/prompts";
2
+ import chalk from "chalk";
3
+ import { AIProvider } from "./spec-generator";
4
+ import { computeDiff, printDiff, printDiffSummary } from "./spec-versioning";
5
+
6
+ export class SpecRefiner {
7
+ constructor(private provider: AIProvider) {}
8
+
9
+ async refineLoop(initialSpec: string): Promise<string> {
10
+ let currentSpec = initialSpec;
11
+ let round = 1;
12
+
13
+ while (true) {
14
+ console.log(chalk.cyan(`\n─── Spec Review (Round ${round}) ─────────────────`));
15
+ console.log(chalk.gray(" Opening spec in editor. Save and close to continue."));
16
+
17
+ // Open spec in editor for user to review/edit
18
+ currentSpec = await editor({
19
+ message: "Review and edit the spec:",
20
+ default: currentSpec,
21
+ postfix: ".md",
22
+ waitForUserInput: false,
23
+ });
24
+
25
+ console.log(chalk.green(" ✔ Spec saved."));
26
+
27
+ // Ask what to do next
28
+ const action = await select({
29
+ message: "What would you like to do?",
30
+ choices: [
31
+ { name: "✅ Finalize — proceed to code generation", value: "finalize" },
32
+ { name: "🤖 AI Polish — let AI improve clarity & completeness", value: "ai" },
33
+ { name: "✏️ Edit again — continue editing", value: "edit" },
34
+ ],
35
+ });
36
+
37
+ if (action === "finalize") {
38
+ break;
39
+ }
40
+
41
+ if (action === "ai") {
42
+ console.log(chalk.blue(` AI (${this.provider.providerName}/${this.provider.modelName}) is polishing the spec...`));
43
+ try {
44
+ const improved = await this.provider.generate(
45
+ `Review the following feature spec and improve it for clarity, completeness, and technical feasibility.
46
+ Keep the same structure and language (Chinese). Fix any gaps in API design, missing error cases, or vague requirements.
47
+ Output ONLY the improved markdown spec, nothing else.
48
+
49
+ ${currentSpec}`,
50
+ "You are a Senior Tech Lead doing a spec review. Output only the improved Markdown."
51
+ );
52
+
53
+ console.log(chalk.yellow("\n AI has suggested improvements. Opening diff in editor..."));
54
+ const acceptImproved = await confirm({
55
+ message: "Accept AI improvements? (opens editor so you can review first)",
56
+ default: true,
57
+ });
58
+
59
+ if (acceptImproved) {
60
+ // Show diff before opening editor
61
+ const diff = computeDiff(currentSpec, improved);
62
+ console.log(chalk.cyan("\n ── AI Changes ──────────────────────────────"));
63
+ printDiffSummary(diff, "AI edits");
64
+ printDiff(diff);
65
+ console.log(chalk.cyan(" ────────────────────────────────────────────\n"));
66
+
67
+ // Let user review AI's version before accepting
68
+ currentSpec = await editor({
69
+ message: "Review AI-improved spec (edit if needed, then save):",
70
+ default: improved,
71
+ postfix: ".md",
72
+ waitForUserInput: false,
73
+ });
74
+ console.log(chalk.green(" ✔ AI-improved spec accepted."));
75
+ } else {
76
+ console.log(chalk.gray(" AI improvements discarded. Keeping your version."));
77
+ }
78
+ } catch (err) {
79
+ console.error(chalk.red(" AI improvement failed:"), err);
80
+ console.log(chalk.gray(" Continuing with current spec."));
81
+ }
82
+ }
83
+
84
+ round++;
85
+ }
86
+
87
+ return currentSpec;
88
+ }
89
+ }