aiwcli 0.13.7 → 0.14.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (39) hide show
  1. package/README.md +11 -1
  2. package/dist/commands/launch.d.ts +8 -0
  3. package/dist/commands/launch.js +96 -5
  4. package/dist/templates/_shared/.claude/skills/codex/SKILL.md +42 -0
  5. package/dist/templates/_shared/.claude/skills/codex/prompt.md +10 -0
  6. package/dist/templates/_shared/lib-ts/agent-exec/backends/headless.ts +33 -0
  7. package/dist/templates/_shared/lib-ts/agent-exec/backends/index.ts +6 -0
  8. package/dist/templates/_shared/lib-ts/agent-exec/backends/tmux.ts +145 -0
  9. package/dist/templates/_shared/lib-ts/agent-exec/base-agent.ts +229 -0
  10. package/dist/templates/_shared/lib-ts/agent-exec/execution-backend.ts +50 -0
  11. package/dist/templates/_shared/lib-ts/agent-exec/index.ts +4 -0
  12. package/dist/templates/_shared/lib-ts/base/cli-args.ts +283 -0
  13. package/dist/templates/_shared/lib-ts/base/inference.ts +53 -47
  14. package/dist/templates/_shared/lib-ts/base/models.ts +16 -0
  15. package/dist/templates/_shared/lib-ts/base/preflight.ts +98 -0
  16. package/dist/templates/_shared/lib-ts/base/tmux-driver.ts +381 -0
  17. package/dist/templates/_shared/lib-ts/base/utils.ts +8 -0
  18. package/dist/templates/_shared/lib-ts/context/context-formatter.ts +35 -11
  19. package/dist/templates/_shared/lib-ts/types.ts +17 -0
  20. package/dist/templates/_shared/scripts/status_line.ts +57 -28
  21. package/dist/templates/_shared/skills/prompt-codex/CLAUDE.md +46 -0
  22. package/dist/templates/_shared/skills/prompt-codex/scripts/launch-codex.ts +254 -0
  23. package/dist/templates/cc-native/.claude/settings.json +121 -1
  24. package/dist/templates/cc-native/_cc-native/CLAUDE.md +73 -0
  25. package/dist/templates/cc-native/_cc-native/cc-native.config.json +2 -1
  26. package/dist/templates/cc-native/_cc-native/lib-ts/CLAUDE.md +70 -0
  27. package/dist/templates/cc-native/_cc-native/lib-ts/settings.ts +3 -1
  28. package/dist/templates/cc-native/_cc-native/lib-ts/types.ts +5 -10
  29. package/dist/templates/cc-native/_cc-native/plan-review/lib/agent-selection.ts +2 -2
  30. package/dist/templates/cc-native/_cc-native/plan-review/lib/preflight.ts +14 -80
  31. package/dist/templates/cc-native/_cc-native/plan-review/lib/reviewers/agent.ts +19 -7
  32. package/dist/templates/cc-native/_cc-native/plan-review/lib/reviewers/base/base-agent.ts +4 -215
  33. package/dist/templates/cc-native/_cc-native/plan-review/lib/reviewers/index.ts +1 -1
  34. package/dist/templates/cc-native/_cc-native/plan-review/lib/reviewers/providers/claude-agent.ts +9 -39
  35. package/dist/templates/cc-native/_cc-native/plan-review/lib/reviewers/providers/codex-agent.ts +19 -21
  36. package/dist/templates/cc-native/_cc-native/plan-review/lib/reviewers/providers/gemini-agent.ts +2 -1
  37. package/dist/templates/cc-native/_cc-native/plan-review/lib/reviewers/providers/orchestrator-claude-agent.ts +13 -15
  38. package/oclif.manifest.json +21 -3
  39. package/package.json +1 -1
@@ -0,0 +1,50 @@
1
+ /**
2
+ * Execution backend interfaces for CLI agent subprocess invocations.
3
+ * Decouples agent logic (prompt building, output parsing) from execution
4
+ * strategy (headless subprocess vs tmux pane).
5
+ */
6
+
7
+ // ---------------------------------------------------------------------------
8
+ // Execution Request / Result
9
+ // ---------------------------------------------------------------------------
10
+
11
+ /** Request to execute a CLI subprocess. */
12
+ export interface ExecutionRequest {
13
+ cliPath: string;
14
+ args: string[];
15
+ input: string;
16
+ env: Record<string, string | undefined>;
17
+ timeoutMs: number;
18
+ /** If set, read output from this file instead of stdout (Codex pattern). */
19
+ outputFilePath?: string;
20
+ maxBuffer?: number;
21
+ shell?: boolean;
22
+ }
23
+
24
+ /** Result from a CLI subprocess execution. */
25
+ export interface ExecutionResult {
26
+ stdout: string;
27
+ stderr: string;
28
+ exitCode: number;
29
+ killed: boolean;
30
+ signal: string | null;
31
+ }
32
+
33
+ // ---------------------------------------------------------------------------
34
+ // Execution Backend
35
+ // ---------------------------------------------------------------------------
36
+
37
+ /** Strategy interface for running CLI agent subprocesses. */
38
+ export interface ExecutionBackend {
39
+ execute(request: ExecutionRequest): Promise<ExecutionResult>;
40
+ }
41
+
42
+ // ---------------------------------------------------------------------------
43
+ // Debug Logger
44
+ // ---------------------------------------------------------------------------
45
+
46
+ /** Injectable debug logger for agents running in _shared context. */
47
+ export interface AgentDebugLogger {
48
+ log(contextPath: string, sessionName: string, component: string, message: string, data?: unknown): void;
49
+ raw(contextPath: string, sessionName: string, component: string, label: string, raw: string): void;
50
+ }
@@ -0,0 +1,4 @@
1
+ export { BaseCliAgent, type AgentExecutionConfig } from "./base-agent.js";
2
+ export type { ExecutionBackend, ExecutionRequest, ExecutionResult, AgentDebugLogger } from "./execution-backend.js";
3
+ export { HeadlessBackend } from "./backends/headless.js";
4
+ export { TmuxBackend } from "./backends/tmux.js";
@@ -0,0 +1,283 @@
1
+ /**
2
+ * Centralized CLI argument construction for agent subprocesses.
3
+ * Single source of truth for Claude CLI and Codex CLI flag patterns,
4
+ * platform quoting, model tier resolution, and env setup.
5
+ */
6
+
7
+ import type { PreflightCommandConfig } from "./preflight.js";
8
+ import { getInternalSubprocessEnv, shellQuoteWin } from "./subprocess-utils.js";
9
+ import { CLAUDE_MODELS, CODEX_MODELS } from "./models.js";
10
+
11
+ export { CLAUDE_MODELS, CODEX_MODELS };
12
+
13
+ // ---------------------------------------------------------------------------
14
+ // Types
15
+ // ---------------------------------------------------------------------------
16
+
17
+ export type InvocationMode = "structured" | "print" | "preflight";
18
+ export type CliProvider = "claude" | "codex";
19
+ export type ModelTier = "fast" | "standard" | "smart";
20
+
21
+ const VALID_SANDBOXES = ["read-only", "workspace-write", "danger-full-access"] as const;
22
+ export type CodexSandbox = (typeof VALID_SANDBOXES)[number];
23
+
24
+ export function isCodexSandbox(value: string): value is CodexSandbox {
25
+ return (VALID_SANDBOXES as readonly string[]).includes(value);
26
+ }
27
+
28
+ export interface CliArgSpec {
29
+ provider: CliProvider;
30
+ model: string | ModelTier;
31
+ mode: InvocationMode;
32
+ jsonSchema?: Record<string, unknown>;
33
+ maxTurns?: number;
34
+ systemPrompt?: string;
35
+ sandbox?: CodexSandbox;
36
+ outputSchemaPath?: string;
37
+ outputFilePath?: string;
38
+ extraArgs?: string[];
39
+ }
40
+
41
+ /** Codex REPL spec — model optional (Codex uses its default when omitted). */
42
+ export interface CodexReplSpec {
43
+ provider: "codex";
44
+ mode: "repl";
45
+ model?: string | ModelTier;
46
+ sandbox?: CodexSandbox;
47
+ extraArgs?: string[];
48
+ }
49
+
50
+ export interface CliInvocation {
51
+ cliName: string;
52
+ args: string[];
53
+ needsShell: boolean;
54
+ env: Record<string, string | undefined>;
55
+ }
56
+
57
+ // ---------------------------------------------------------------------------
58
+ // Model Tier Resolution
59
+ // ---------------------------------------------------------------------------
60
+
61
+ export const MODEL_TIERS: Record<ModelTier, string> = {
62
+ fast: CLAUDE_MODELS.haiku,
63
+ standard: CLAUDE_MODELS.sonnet,
64
+ smart: CLAUDE_MODELS.opus,
65
+ };
66
+
67
+ export const CODEX_MODEL_TIERS: Record<ModelTier, string> = {
68
+ fast: CODEX_MODELS.spark,
69
+ standard: CODEX_MODELS.codex,
70
+ smart: CODEX_MODELS.codex,
71
+ };
72
+
73
+ export const TIER_TIMEOUTS: Record<ModelTier, number> = {
74
+ fast: 15,
75
+ standard: 30,
76
+ smart: 90,
77
+ };
78
+
79
+ export function isModelTier(value: string): value is ModelTier {
80
+ return value in MODEL_TIERS;
81
+ }
82
+
83
+ export function resolveModel(model: string | ModelTier): string {
84
+ if (isModelTier(model)) return MODEL_TIERS[model];
85
+ return model;
86
+ }
87
+
88
+ export function resolveModelForProvider(
89
+ model: string | ModelTier,
90
+ provider: CliProvider,
91
+ ): string {
92
+ if (!isModelTier(model)) return model;
93
+ return provider === "codex" ? CODEX_MODEL_TIERS[model] : MODEL_TIERS[model];
94
+ }
95
+
96
+ export function getTierTimeout(tier: ModelTier): number {
97
+ return TIER_TIMEOUTS[tier];
98
+ }
99
+
100
+ /** Resolve a Codex model: tier resolution + pass-through. No aliases (those are skill-specific). */
101
+ export function resolveCodexModel(input: string): string {
102
+ if (isModelTier(input)) return CODEX_MODEL_TIERS[input as ModelTier];
103
+ return input;
104
+ }
105
+
106
+ // ---------------------------------------------------------------------------
107
+ // Core Builder
108
+ // ---------------------------------------------------------------------------
109
+
110
+ export function buildCliInvocation(spec: CliArgSpec | CodexReplSpec): CliInvocation {
111
+ const env = getInternalSubprocessEnv();
112
+ delete env.ANTHROPIC_API_KEY;
113
+
114
+ if (spec.mode === "repl") {
115
+ const resolvedModel = spec.model ? resolveModelForProvider(spec.model, spec.provider) : undefined;
116
+ return buildCodexReplInvocation(spec, resolvedModel, env);
117
+ }
118
+
119
+ const resolvedModel = resolveModelForProvider((spec as CliArgSpec).model, spec.provider);
120
+ const isWin = process.platform === "win32";
121
+ const empty = isWin ? '""' : "";
122
+
123
+ if (spec.provider === "claude") {
124
+ return buildClaudeInvocation(spec as CliArgSpec, resolvedModel, isWin, empty, env);
125
+ }
126
+
127
+ return buildCodexInvocation(spec as CliArgSpec, resolvedModel, env);
128
+ }
129
+
130
+ function buildClaudeInvocation(
131
+ spec: CliArgSpec,
132
+ model: string,
133
+ isWin: boolean,
134
+ empty: string,
135
+ env: Record<string, string | undefined>,
136
+ ): CliInvocation {
137
+ const args: string[] = [];
138
+
139
+ args.push("--model", model);
140
+
141
+ if (spec.mode === "print") {
142
+ args.push("--print");
143
+ } else {
144
+ // structured and preflight both use json output
145
+ args.push("--output-format", "json");
146
+
147
+ if (spec.jsonSchema) {
148
+ args.push("--json-schema", shellQuoteWin(JSON.stringify(spec.jsonSchema)));
149
+ }
150
+
151
+ const maxTurns = spec.mode === "preflight" ? 1 : (spec.maxTurns ?? 3);
152
+ args.push("--max-turns", String(maxTurns));
153
+ }
154
+
155
+ args.push("--setting-sources", empty);
156
+ args.push("-p");
157
+ args.push("--no-session-persistence");
158
+
159
+ if (spec.systemPrompt) {
160
+ args.push("--system-prompt", shellQuoteWin(spec.systemPrompt));
161
+ }
162
+
163
+ if (spec.extraArgs) {
164
+ args.push(...spec.extraArgs);
165
+ }
166
+
167
+ return { cliName: "claude", args, needsShell: isWin, env };
168
+ }
169
+
170
+ function buildCodexInvocation(
171
+ spec: CliArgSpec,
172
+ model: string,
173
+ env: Record<string, string | undefined>,
174
+ ): CliInvocation {
175
+ const args: string[] = ["exec"];
176
+
177
+ if (spec.sandbox) {
178
+ args.push("--sandbox", spec.sandbox);
179
+ }
180
+
181
+ args.push("--model", model);
182
+
183
+ if (spec.outputSchemaPath) {
184
+ args.push("--output-schema", spec.outputSchemaPath);
185
+ }
186
+
187
+ if (spec.outputFilePath) {
188
+ args.push("-o", spec.outputFilePath);
189
+ }
190
+
191
+ args.push("-");
192
+
193
+ if (spec.extraArgs) {
194
+ args.push(...spec.extraArgs);
195
+ }
196
+
197
+ return { cliName: "codex", args, needsShell: false, env };
198
+ }
199
+
200
+ function buildCodexReplInvocation(
201
+ spec: CodexReplSpec,
202
+ model: string | undefined,
203
+ env: Record<string, string | undefined>,
204
+ ): CliInvocation {
205
+ const args: string[] = [];
206
+ if (spec.sandbox) args.push("--sandbox", spec.sandbox);
207
+ if (model) args.push("--model", model);
208
+ if (spec.extraArgs) args.push(...spec.extraArgs);
209
+ return { cliName: "codex", args, needsShell: false, env };
210
+ }
211
+
212
+ // ---------------------------------------------------------------------------
213
+ // Convenience Presets
214
+ // ---------------------------------------------------------------------------
215
+
216
+ export function preflightSpec(provider: CliProvider, model: string): CliArgSpec {
217
+ if (provider === "codex") {
218
+ return {
219
+ provider: "codex",
220
+ model,
221
+ mode: "preflight",
222
+ sandbox: "read-only",
223
+ };
224
+ }
225
+ return {
226
+ provider: "claude",
227
+ model,
228
+ mode: "preflight",
229
+ };
230
+ }
231
+
232
+ export function inferenceSpec(model: string | ModelTier): CliArgSpec {
233
+ return {
234
+ provider: "claude",
235
+ model,
236
+ mode: "print",
237
+ };
238
+ }
239
+
240
+ export function reviewSpec(
241
+ provider: CliProvider,
242
+ model: string,
243
+ schema: Record<string, unknown>,
244
+ systemPrompt?: string,
245
+ ): CliArgSpec {
246
+ if (provider === "codex") {
247
+ return {
248
+ provider: "codex",
249
+ model,
250
+ mode: "structured",
251
+ sandbox: "read-only",
252
+ };
253
+ }
254
+ return {
255
+ provider: "claude",
256
+ model,
257
+ mode: "structured",
258
+ jsonSchema: schema,
259
+ systemPrompt,
260
+ };
261
+ }
262
+
263
+ export function codexReplSpec(
264
+ model?: string,
265
+ sandbox?: CodexSandbox,
266
+ ): CodexReplSpec {
267
+ return {
268
+ provider: "codex",
269
+ mode: "repl",
270
+ model,
271
+ sandbox,
272
+ };
273
+ }
274
+
275
+ export function preflightCommandConfig(provider: CliProvider): PreflightCommandConfig {
276
+ const input = "Respond with exactly: ok";
277
+
278
+ return {
279
+ cliName: provider === "claude" ? "claude" : "codex",
280
+ buildArgs: (model: string) => buildCliInvocation(preflightSpec(provider, model)).args,
281
+ input,
282
+ };
283
+ }
@@ -9,20 +9,18 @@ import { execFileSync } from "node:child_process";
9
9
  import { logDebug, logWarn } from "./logger.js";
10
10
  import { STOP_WORDS } from "./stop-words.js";
11
11
  import type { InferenceResult } from "../types.js";
12
- import { execFileAsync, getInternalSubprocessEnv, shellQuoteWin } from "./subprocess-utils.js";
13
-
14
- // Model configurations §6.1
15
- const MODELS: Record<string, string> = {
16
- fast: "claude-haiku-4-5-20251001",
17
- standard: "claude-sonnet-4-6",
18
- smart: "claude-opus-4-6",
19
- };
20
-
21
- const TIMEOUTS: Record<string, number> = {
22
- fast: 15,
23
- standard: 30,
24
- smart: 90,
25
- };
12
+ import { execFileAsync } from "./subprocess-utils.js";
13
+ import {
14
+ buildCliInvocation,
15
+ inferenceSpec,
16
+ isModelTier,
17
+ resolveModel,
18
+ getTierTimeout,
19
+ TIER_TIMEOUTS,
20
+ } from "./cli-args.js";
21
+ import { CODEX_MODELS } from "./models.js";
22
+
23
+ const CONTEXT_ID_PRIMARY_MODEL = CODEX_MODELS.spark;
26
24
 
27
25
  /**
28
26
  * Run inference using the claude CLI.
@@ -33,38 +31,33 @@ export function inference(
33
31
  userPrompt: string,
34
32
  level = "fast",
35
33
  timeout?: number,
34
+ options?: { model?: string },
36
35
  ): InferenceResult {
37
36
  const startTime = Date.now();
38
- const model = MODELS[level] ?? MODELS.fast;
39
- const timeoutSec = timeout ?? TIMEOUTS[level] ?? TIMEOUTS.fast;
37
+ const modelInput = options?.model ?? level;
38
+ const model = resolveModel(modelInput);
39
+ const timeoutSec = timeout ?? (isModelTier(modelInput) ? getTierTimeout(modelInput) : TIER_TIMEOUTS.fast);
40
40
  const fullPrompt = `${systemPrompt}\n\n${userPrompt}`;
41
41
 
42
- // Remove ANTHROPIC_API_KEY to force subscription auth
43
- const env = { ...process.env };
44
- delete env.ANTHROPIC_API_KEY;
42
+ const invocation = buildCliInvocation(inferenceSpec(modelInput));
43
+ const isWin = invocation.needsShell;
45
44
 
46
- try {
47
- const isWin = process.platform === "win32";
48
-
49
- // On Windows with shell:true, Node.js sets windowsVerbatimArguments
50
- // args are joined with spaces, NOT individually quoted. We must manually
51
- // wrap multi-word/special-char args in "..." for cmd.exe parsing.
52
- // Inside double quotes: "" = literal ", and |&<> are safe.
53
- const empty = isWin ? '""' : "";
54
- let promptArg = fullPrompt;
55
- if (isWin) {
56
- promptArg = '"' + fullPrompt.replaceAll(/\r?\n/g, " ").replaceAll('"', '""') + '"';
57
- }
45
+ // Prompt arg needs Windows quoting when using shell mode
46
+ let promptArg = fullPrompt;
47
+ if (isWin) {
48
+ promptArg = '"' + fullPrompt.replaceAll(/\r?\n/g, " ").replaceAll('"', '""') + '"';
49
+ }
58
50
 
51
+ try {
59
52
  const stdout = execFileSync(
60
- "claude",
61
- ["--model", model, "--print", "--setting-sources", empty, "-p", "--no-session-persistence", promptArg],
53
+ invocation.cliName,
54
+ [...invocation.args, promptArg],
62
55
  {
63
56
  timeout: timeoutSec * 1000,
64
- env,
57
+ env: invocation.env,
65
58
  encoding: "utf-8",
66
59
  stdio: ["pipe", "pipe", "pipe"],
67
- shell: isWin, // Windows needs shell for .cmd resolution
60
+ shell: isWin,
68
61
  },
69
62
  );
70
63
 
@@ -189,7 +182,7 @@ Respond with ONLY a JSON object: {"slug": "your 8-12 word phrase here"}`;
189
182
 
190
183
  /**
191
184
  * Generate a 5-12 word context ID slug from a user prompt.
192
- * Uses Haiku (fast tier) for low latency.
185
+ * Uses 5.3 Codex Spark first, then falls back to current fast tier for resilience.
193
186
  * See SPEC.md §6.3
194
187
  */
195
188
  export function generateContextIdSlug(
@@ -198,7 +191,20 @@ export function generateContextIdSlug(
198
191
  ): string | null {
199
192
  const truncated = prompt.slice(0, 500);
200
193
 
201
- const result = inference(CONTEXT_ID_SLUG_PROMPT, truncated, "fast", timeout);
194
+ const sparkResult = inference(
195
+ CONTEXT_ID_SLUG_PROMPT,
196
+ truncated,
197
+ "fast",
198
+ timeout,
199
+ { model: CONTEXT_ID_PRIMARY_MODEL },
200
+ );
201
+ if (!sparkResult.success || !sparkResult.output) {
202
+ logWarn(
203
+ "inference",
204
+ `Context ID slug Spark (${CONTEXT_ID_PRIMARY_MODEL}) failed or returned empty output. Falling back to ${resolveModel("fast")}`,
205
+ );
206
+ }
207
+ const result = sparkResult.success && sparkResult.output ? sparkResult : inference(CONTEXT_ID_SLUG_PROMPT, truncated, "fast", timeout);
202
208
 
203
209
  if (!result.success || !result.output) {
204
210
  logWarn("inference", `Context ID slug inference failed: ${result.error}`);
@@ -250,26 +256,26 @@ export async function inferenceAsync(
250
256
  userPrompt: string,
251
257
  level = "fast",
252
258
  timeout?: number,
259
+ options?: { model?: string },
253
260
  ): Promise<InferenceResult> {
254
261
  const startTime = Date.now();
255
- const model = (level in MODELS ? MODELS[level] : undefined) ?? MODELS.fast;
256
- const timeoutSec = timeout ?? (level in TIMEOUTS ? TIMEOUTS[level] : undefined) ?? TIMEOUTS.fast;
262
+ const modelInput = options?.model ?? level;
263
+ const timeoutSec = timeout ?? (isModelTier(modelInput) ? getTierTimeout(modelInput) : TIER_TIMEOUTS.fast);
257
264
  const timeoutMs = timeoutSec * 1000;
258
265
  const fullPrompt = `${systemPrompt}\n\n${userPrompt}`;
259
266
 
260
- const env = getInternalSubprocessEnv();
261
- delete env.ANTHROPIC_API_KEY;
267
+ const invocation = buildCliInvocation(inferenceSpec(modelInput));
268
+ const isWin = invocation.needsShell;
262
269
 
263
- const isWin = process.platform === "win32";
264
- const empty = isWin ? '""' : "";
270
+ // Prompt arg needs Windows quoting when using shell mode
265
271
  const promptArg = isWin
266
- ? shellQuoteWin(fullPrompt.replaceAll(/\r?\n/g, " "))
272
+ ? ('"' + fullPrompt.replaceAll(/\r?\n/g, " ").replaceAll('"', '""') + '"')
267
273
  : fullPrompt;
268
274
 
269
275
  const result = await execFileAsync(
270
- "claude",
271
- ["--model", model, "--print", "--setting-sources", empty, "-p", "--no-session-persistence", promptArg],
272
- { timeout: timeoutMs, env, shell: isWin },
276
+ invocation.cliName,
277
+ [...invocation.args, promptArg],
278
+ { timeout: timeoutMs, env: invocation.env, shell: isWin },
273
279
  );
274
280
 
275
281
  const latencyMs = Date.now() - startTime;
@@ -0,0 +1,16 @@
1
+ /**
2
+ * Canonical model ID constants — single source of truth.
3
+ * All model IDs used across the system should reference these constants.
4
+ */
5
+
6
+ export const CLAUDE_MODELS = {
7
+ haiku: "claude-haiku-4-5-20251001",
8
+ sonnet: "claude-sonnet-4-6",
9
+ opus: "claude-opus-4-6",
10
+ } as const;
11
+
12
+ export const CODEX_MODELS = {
13
+ spark: "gpt-5.3-codex-spark",
14
+ codex: "gpt-5.3-codex",
15
+ gpt: "gpt-5.2",
16
+ } as const;
@@ -0,0 +1,98 @@
1
+ /**
2
+ * Shared preflight health check for provider+model availability.
3
+ * Extracted from plan-review/lib/preflight.ts for reuse by any hook.
4
+ *
5
+ * Validates that a CLI tool + model combo works by running a minimal "ping" request.
6
+ */
7
+
8
+ import { logDebug, logWarn } from "./logger.js";
9
+ import { findExecutable, execFileAsync, getInternalSubprocessEnv } from "./subprocess-utils.js";
10
+
11
+ // ---------------------------------------------------------------------------
12
+ // Types
13
+ // ---------------------------------------------------------------------------
14
+
15
+ export interface PreflightCommandConfig {
16
+ cliName: string;
17
+ buildArgs: (model: string) => string[];
18
+ input: string;
19
+ }
20
+
21
+ export interface PreflightCheckResult {
22
+ provider: string;
23
+ model: string;
24
+ available: boolean;
25
+ latencyMs: number;
26
+ error?: string;
27
+ }
28
+
29
+ // ---------------------------------------------------------------------------
30
+ // Error Classification
31
+ // ---------------------------------------------------------------------------
32
+
33
+ export function classifyError(
34
+ stderr: string,
35
+ exitCode: number | null,
36
+ killed: boolean,
37
+ signal: string | null,
38
+ ): string {
39
+ if (killed || signal === "SIGTERM") return "Preflight timed out";
40
+ if (/model.*not found|not available/i.test(stderr)) return "Model not available for this account";
41
+ if (/rate limit|429/i.test(stderr)) return "Rate limited";
42
+ if (/auth|api key|401/i.test(stderr)) return "Authentication failed";
43
+ if (/quota|billing/i.test(stderr)) return "Quota/billing issue";
44
+ return `Exit code ${exitCode}`;
45
+ }
46
+
47
+ // ---------------------------------------------------------------------------
48
+ // Single Provider+Model Check
49
+ // ---------------------------------------------------------------------------
50
+
51
+ /**
52
+ * Check if a single provider:model combo is available.
53
+ * Takes a PreflightCommandConfig so callers define their own CLI args.
54
+ */
55
+ export async function checkProviderModel(
56
+ provider: string,
57
+ model: string,
58
+ config: PreflightCommandConfig,
59
+ timeoutMs: number,
60
+ hook = "preflight",
61
+ ): Promise<PreflightCheckResult> {
62
+ const cliPath = findExecutable(config.cliName);
63
+ if (!cliPath) {
64
+ return { provider, model, available: false, latencyMs: 0, error: `CLI '${config.cliName}' not found on PATH` };
65
+ }
66
+
67
+ const start = Date.now();
68
+ try {
69
+ const env = getInternalSubprocessEnv();
70
+ const result = await execFileAsync(cliPath, config.buildArgs(model), {
71
+ input: config.input,
72
+ timeout: timeoutMs,
73
+ env: env as Record<string, string>,
74
+ maxBuffer: 1 * 1024 * 1024,
75
+ shell: process.platform === "win32",
76
+ });
77
+
78
+ const latencyMs = Date.now() - start;
79
+
80
+ if (result.killed || result.signal === "SIGTERM") {
81
+ return { provider, model, available: false, latencyMs, error: "Preflight timed out" };
82
+ }
83
+
84
+ if (result.exitCode !== 0) {
85
+ const error = classifyError(result.stderr, result.exitCode, result.killed, result.signal);
86
+ logWarn(hook, `${provider}:${model} failed: ${error} (stderr: ${result.stderr.slice(-200)})`);
87
+ return { provider, model, available: false, latencyMs, error };
88
+ }
89
+
90
+ logDebug(hook, `${provider}:${model} passed (${latencyMs}ms)`);
91
+ return { provider, model, available: true, latencyMs };
92
+ } catch (err) {
93
+ const latencyMs = Date.now() - start;
94
+ const error = err instanceof Error ? err.message : String(err);
95
+ logWarn(hook, `${provider}:${model} exception: ${error}`);
96
+ return { provider, model, available: false, latencyMs, error };
97
+ }
98
+ }