ai-spec-dev 0.41.0 → 0.46.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/core/run-trend.ts CHANGED
@@ -2,8 +2,7 @@ import * as fs from "fs-extra";
2
2
  import * as path from "path";
3
3
  import chalk from "chalk";
4
4
  import { RunLog, reconstructRunLogFromJsonl } from "./run-logger";
5
-
6
- const LOG_DIR = ".ai-spec-logs";
5
+ import { DEFAULT_LOG_DIR } from "./config-defaults";
7
6
 
8
7
  // ─── Types ────────────────────────────────────────────────────────────────────
9
8
 
@@ -45,7 +44,7 @@ export interface TrendReport {
45
44
  * Silently skips unreadable / corrupt files.
46
45
  */
47
46
  export async function loadRunLogs(workingDir: string): Promise<RunLog[]> {
48
- const logDir = path.join(workingDir, LOG_DIR);
47
+ const logDir = path.join(workingDir, DEFAULT_LOG_DIR);
49
48
  if (!(await fs.pathExists(logDir))) return [];
50
49
 
51
50
  const files = await fs.readdir(logDir);
@@ -255,7 +254,7 @@ export function printTrendReport(report: TrendReport, workingDir: string): void
255
254
  }
256
255
 
257
256
  // ── Footer ────────────────────────────────────────────────────────
258
- const logRelDir = path.relative(workingDir, path.join(workingDir, LOG_DIR));
257
+ const logRelDir = path.relative(workingDir, path.join(workingDir, DEFAULT_LOG_DIR));
259
258
  console.log(chalk.gray(`\n ${entries.length} run(s) shown · logs: ${logRelDir}/`));
260
259
  console.log(chalk.cyan("─".repeat(63)));
261
260
  }
@@ -1,16 +1,15 @@
1
1
  import { GoogleGenerativeAI } from "@google/generative-ai";
2
2
  import Anthropic from "@anthropic-ai/sdk";
3
3
  import OpenAI from "openai";
4
- import axios from "axios";
5
4
  import { ProxyAgent } from "undici";
6
5
  import { specPrompt } from "../prompts/spec.prompt";
7
6
  import { ProjectContext } from "./context-loader";
8
7
  import { withReliability } from "./provider-utils";
9
8
 
10
9
  // ─── Proxy Helper ─────────────────────────────────────────────────────────────
11
- // 仅用于 Gemini:其他 SDK(Anthropic / OpenAI)会自动读取 HTTPS_PROXY。
12
10
  // Gemini SDK 使用 Node.js 原生 fetch(undici),不会自动读代理环境变量,
13
11
  // 需要手动创建 ProxyAgent 并通过 fetchOptions 注入。
12
+ // Anthropic SDK (node-fetch) 也不会自动读代理环境变量。
14
13
  // 这是 in-process 级别的配置,完全不影响 execSync 启动的子进程(如 claude CLI)。
15
14
 
16
15
  // eslint-disable-next-line @typescript-eslint/no-explicit-any
@@ -47,6 +46,8 @@ export interface ProviderMeta {
47
46
  models: string[];
48
47
  /** Environment variable name for the API key */
49
48
  envKey: string;
49
+ /** Fallback env var names checked if envKey is not set */
50
+ fallbackEnvKeys?: string[];
50
51
  /**
51
52
  * Base URL for OpenAI-compatible providers.
52
53
  * Undefined means the provider has its own SDK (Gemini / Claude).
@@ -72,6 +73,8 @@ export const PROVIDER_CATALOG: Record<string, ProviderMeta> = {
72
73
  description: "小米 MiMo — mimo-v2-pro (Anthropic-compatible API)",
73
74
  models: ["mimo-v2-pro"],
74
75
  envKey: "MIMO_API_KEY",
76
+ // Fallback env var — MiMo's token plan uses ANTHROPIC_AUTH_TOKEN
77
+ fallbackEnvKeys: ["ANTHROPIC_AUTH_TOKEN"],
75
78
  // baseURL not used — MiMo has a dedicated provider class
76
79
  },
77
80
  gemini: {
@@ -244,8 +247,8 @@ export class ClaudeProvider implements AIProvider {
244
247
  ...(systemInstruction ? { system: systemInstruction } : {}),
245
248
  messages: [{ role: "user", content: prompt }],
246
249
  });
247
- const block = message.content[0];
248
- if (block.type === "text") return block.text;
250
+ const textBlock = message.content.find((b) => b.type === "text");
251
+ if (textBlock) return textBlock.text;
249
252
  throw new Error("Unexpected response type from Claude API");
250
253
  },
251
254
  { label: `${this.providerName}/${this.modelName}` }
@@ -307,58 +310,40 @@ export class OpenAICompatibleProvider implements AIProvider {
307
310
  // ─── MiMo Provider ─────────────────────────────────────────────────────────────
308
311
  // MiMo uses the Anthropic messages format but with a different base URL
309
312
  // and a custom "api-key" auth header (not "x-api-key" / "Authorization: Bearer").
310
- // The Anthropic SDK does not support custom auth headers, so we call the API
311
- // directly via axios.
313
+ // MiMo's token-plan API is Anthropic-compatible we reuse the Anthropic SDK
314
+ // directly, reading base URL from env (MIMO_BASE_URL / ANTHROPIC_BASE_URL).
312
315
 
313
316
  export class MiMoProvider implements AIProvider {
317
+ private client: Anthropic;
314
318
  readonly providerName = "mimo";
315
319
  readonly modelName: string;
316
- private apiKey: string;
317
- private readonly baseUrl = "https://api.xiaomimimo.com/anthropic/v1/messages";
318
320
 
319
321
  constructor(apiKey: string, modelName = PROVIDER_CATALOG.mimo.models[0]) {
320
- this.apiKey = apiKey;
322
+ const baseURL = process.env["MIMO_BASE_URL"]
323
+ || process.env["ANTHROPIC_BASE_URL"]
324
+ || "https://token-plan-cn.xiaomimimo.com/anthropic";
325
+ this.client = new Anthropic({ apiKey, baseURL });
321
326
  this.modelName = modelName;
322
327
  }
323
328
 
324
329
  async generate(prompt: string, systemInstruction?: string): Promise<string> {
325
330
  return withReliability(
326
331
  async () => {
327
- const body: Record<string, unknown> = {
332
+ // Use streaming to avoid timeout errors with large max_tokens
333
+ const stream = this.client.messages.stream({
328
334
  model: this.modelName,
329
- max_tokens: 16384,
330
- messages: [{ role: "user", content: [{ type: "text", text: prompt }] }],
331
- top_p: 0.95,
332
- stream: false,
333
- temperature: 1.0,
334
- stop_sequences: null,
335
- };
336
-
337
- if (systemInstruction) {
338
- body.system = systemInstruction;
339
- }
340
-
341
- const response = await axios.post(this.baseUrl, body, {
342
- headers: {
343
- "api-key": this.apiKey,
344
- "Content-Type": "application/json",
345
- },
335
+ max_tokens: 65536,
336
+ ...(systemInstruction ? { system: systemInstruction } : {}),
337
+ messages: [{ role: "user", content: prompt }],
346
338
  });
347
-
348
- // Response follows Anthropic format: { content: [{ type: "text"|"thinking", ... }] }
349
- // MiMo may return a "thinking" block before the actual "text" block skip it.
350
- const data = response.data as { stop_reason?: string; content?: Array<{ type: string; text?: string; thinking?: string }> };
351
- const blocks = data?.content ?? [];
352
-
353
- const textBlock = blocks.find((b) => b.type === "text");
354
- if (textBlock?.text) return textBlock.text;
355
-
356
- // If stop_reason is max_tokens, the model was cut off mid-generation (thinking block only)
357
- if (data?.stop_reason === "max_tokens") {
358
- throw new Error(`MiMo response truncated (max_tokens reached). The prompt may be too long. Try a shorter spec or switch to a model with larger context.`);
359
- }
360
-
361
- throw new Error(`Unexpected MiMo response: ${JSON.stringify(response.data).slice(0, 200)}`);
339
+ const message = await stream.finalMessage();
340
+ // MiMo may return "thinking" blocks before or instead of "text" blocks.
341
+ // Extract the first text block; fall back to thinking content; last resort: concatenate all.
342
+ const textBlock = message.content.find((b) => b.type === "text");
343
+ if (textBlock) return textBlock.text;
344
+ const thinkBlock = message.content.find((b) => b.type === "thinking");
345
+ if (thinkBlock) return (thinkBlock as unknown as { thinking: string }).thinking;
346
+ return message.content.map((b: { type: string; text?: string }) => b.text ?? "").join("");
362
347
  },
363
348
  { label: `${this.providerName}/${this.modelName}` }
364
349
  );
@@ -6,6 +6,7 @@
6
6
  */
7
7
 
8
8
  import chalk from "chalk";
9
+ import { DEFAULT_TOKEN_BUDGETS as _CONFIG_BUDGETS } from "./config-defaults";
9
10
 
10
11
  // ─── Token Estimation ────────────────────────────────────────────────────────
11
12
 
@@ -110,14 +111,8 @@ export function assembleSections(
110
111
 
111
112
  // ─── Default Budgets ─────────────────────────────────────────────────────────
112
113
 
113
- /** Default context token budgets per provider. */
114
- export const DEFAULT_TOKEN_BUDGETS: Record<string, number> = {
115
- gemini: 900_000,
116
- claude: 180_000,
117
- openai: 120_000,
118
- deepseek: 60_000,
119
- default: 100_000,
120
- };
114
+ /** Default context token budgets per provider (sourced from config-defaults). */
115
+ export const DEFAULT_TOKEN_BUDGETS = _CONFIG_BUDGETS;
121
116
 
122
117
  export function getDefaultBudget(providerName: string): number {
123
118
  return DEFAULT_TOKEN_BUDGETS[providerName] ?? DEFAULT_TOKEN_BUDGETS.default;
package/core/vcr.ts CHANGED
@@ -27,7 +27,9 @@ import * as fs from "fs-extra";
27
27
  import * as path from "path";
28
28
  import { AIProvider } from "./spec-generator";
29
29
 
30
- export const VCR_DIR = ".ai-spec-vcr";
30
+ import { DEFAULT_VCR_DIR } from "./config-defaults";
31
+
32
+ export const VCR_DIR = DEFAULT_VCR_DIR;
31
33
 
32
34
  // ─── Types ────────────────────────────────────────────────────────────────────
33
35