agent-worker 0.18.0 → 0.19.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,1484 +0,0 @@
1
- import { gateway, generateText } from "ai";
2
- import { execa } from "execa";
3
- import { existsSync } from "node:fs";
4
- import { join } from "node:path";
5
-
6
- //#region src/agent/models.ts
7
- const providerCache = {};
8
- /** Provider SDK package mapping */
9
- const PROVIDER_PACKAGES = {
10
- anthropic: {
11
- package: "@ai-sdk/anthropic",
12
- export: "anthropic"
13
- },
14
- openai: {
15
- package: "@ai-sdk/openai",
16
- export: "openai"
17
- },
18
- deepseek: {
19
- package: "@ai-sdk/deepseek",
20
- export: "deepseek"
21
- },
22
- google: {
23
- package: "@ai-sdk/google",
24
- export: "google"
25
- },
26
- groq: {
27
- package: "@ai-sdk/groq",
28
- export: "groq"
29
- },
30
- mistral: {
31
- package: "@ai-sdk/mistral",
32
- export: "mistral"
33
- },
34
- xai: {
35
- package: "@ai-sdk/xai",
36
- export: "xai"
37
- }
38
- };
39
- /**
40
- * Lazy load a provider SDK, caching the result.
41
- * Only caches standard providers (no custom baseURL/apiKey).
42
- */
43
- async function loadProvider(name, packageName, exportName) {
44
- if (name in providerCache) return providerCache[name] ?? null;
45
- try {
46
- const exportedProvider = (await import(packageName))[exportName];
47
- providerCache[name] = exportedProvider;
48
- return exportedProvider;
49
- } catch {
50
- providerCache[name] = null;
51
- return null;
52
- }
53
- }
54
- /**
55
- * Create a provider instance with custom baseURL and/or apiKey.
56
- * Not cached — each call creates a fresh instance.
57
- */
58
- async function createCustomProvider(packageName, exportName, options) {
59
- const createFn = (await import(packageName))[`create${exportName.charAt(0).toUpperCase() + exportName.slice(1)}`];
60
- if (!createFn) throw new Error(`Package ${packageName} does not export create${exportName.charAt(0).toUpperCase() + exportName.slice(1)}`);
61
- return createFn(options);
62
- }
63
- /**
64
- * Resolve api_key field: '$ENV_VAR' → process.env.ENV_VAR, literal → as-is
65
- */
66
- function resolveApiKey(apiKey) {
67
- if (apiKey.startsWith("$")) {
68
- const envVar = apiKey.slice(1);
69
- const value = process.env[envVar];
70
- if (!value) throw new Error(`Environment variable ${envVar} is not set`);
71
- return value;
72
- }
73
- return apiKey;
74
- }
75
- /**
76
- * Create a model using explicit provider configuration.
77
- * Use this when provider details (base_url, api_key) are specified separately from the model name.
78
- *
79
- * Example:
80
- * createModelWithProvider("MiniMax-M2.5", { name: "anthropic", base_url: "https://api.minimax.io/anthropic/v1", api_key: "$MINIMAX_API_KEY" })
81
- */
82
- async function createModelWithProvider(modelName, provider) {
83
- if (typeof provider === "string") {
84
- const pkg = PROVIDER_PACKAGES[provider];
85
- if (!pkg) throw new Error(`Unknown provider: ${provider}. Supported: ${Object.keys(PROVIDER_PACKAGES).join(", ")}`);
86
- const providerFn = await loadProvider(provider, pkg.package, pkg.export);
87
- if (!providerFn) throw new Error(`Failed to load ${pkg.package} for provider "${provider}". Try reinstalling agent-worker.`);
88
- return providerFn(modelName);
89
- }
90
- const { name, base_url, api_key } = provider;
91
- const pkg = PROVIDER_PACKAGES[name];
92
- if (!pkg) throw new Error(`Unknown provider: ${name}. Supported: ${Object.keys(PROVIDER_PACKAGES).join(", ")}`);
93
- if (!base_url && !api_key) {
94
- const providerFn = await loadProvider(name, pkg.package, pkg.export);
95
- if (!providerFn) throw new Error(`Failed to load ${pkg.package} for provider "${name}". Try reinstalling agent-worker.`);
96
- return providerFn(modelName);
97
- }
98
- const opts = {};
99
- if (base_url) opts.baseURL = base_url;
100
- if (api_key) opts.apiKey = resolveApiKey(api_key);
101
- return (await createCustomProvider(pkg.package, pkg.export, opts))(modelName);
102
- }
103
- /**
104
- * Parse model identifier and return the appropriate provider model
105
- *
106
- * Supports three formats:
107
- *
108
- * 1. Provider-only format: provider
109
- * Uses first model from FRONTIER_MODELS via gateway
110
- * Examples: anthropic → anthropic/claude-sonnet-4-5, openai → openai/gpt-5.2
111
- *
112
- * 2. Gateway format: provider/model-name
113
- * Uses Vercel AI Gateway (requires AI_GATEWAY_API_KEY)
114
- * Examples: anthropic/claude-sonnet-4-5, openai/gpt-5.2, deepseek/deepseek-chat
115
- *
116
- * 3. Direct provider format: provider:model-name
117
- * Requires installing the specific @ai-sdk/provider package
118
- * Examples: anthropic:claude-sonnet-4-5, openai:gpt-5.2, deepseek:deepseek-chat
119
- */
120
- function createModel(modelId) {
121
- if (modelId.includes("/")) {
122
- if (process.env.AI_GATEWAY_API_KEY) return gateway(modelId);
123
- const slashIndex = modelId.indexOf("/");
124
- const provider = modelId.slice(0, slashIndex);
125
- const modelName = modelId.slice(slashIndex + 1);
126
- if (provider in providerCache && providerCache[provider]) return providerCache[provider](modelName);
127
- throw new Error(`Provider '${provider}' not loaded. Call createModelAsync() first or set AI_GATEWAY_API_KEY for gateway access.`);
128
- }
129
- if (!modelId.includes(":")) {
130
- const provider = modelId;
131
- if (provider in FRONTIER_MODELS) {
132
- const defaultModel = FRONTIER_MODELS[provider][0];
133
- if (process.env.AI_GATEWAY_API_KEY) return gateway(`${provider}/${defaultModel}`);
134
- if (provider in providerCache && providerCache[provider]) return providerCache[provider](defaultModel);
135
- throw new Error(`Provider '${provider}' not loaded. Call createModelAsync() first or set AI_GATEWAY_API_KEY for gateway access.`);
136
- }
137
- throw new Error(`Unknown provider: ${modelId}. Supported: ${Object.keys(FRONTIER_MODELS).join(", ")}`);
138
- }
139
- const colonIndex = modelId.indexOf(":");
140
- const provider = modelId.slice(0, colonIndex);
141
- const modelName = modelId.slice(colonIndex + 1);
142
- if (!modelName) throw new Error(`Invalid model identifier: ${modelId}. Model name is required.`);
143
- if (provider in providerCache && providerCache[provider]) return providerCache[provider](modelName);
144
- throw new Error(`Provider '${provider}' not loaded. Call createModelAsync() first or set AI_GATEWAY_API_KEY for gateway access.`);
145
- }
146
- /**
147
- * Async version of createModel - supports lazy loading of direct providers
148
- * Use this when you need direct provider access (provider:model format)
149
- */
150
- async function createModelAsync(modelId) {
151
- if (modelId.includes("/")) {
152
- if (process.env.AI_GATEWAY_API_KEY) return gateway(modelId);
153
- const slashIndex = modelId.indexOf("/");
154
- return loadProviderModel(modelId.slice(0, slashIndex), modelId.slice(slashIndex + 1));
155
- }
156
- if (!modelId.includes(":")) {
157
- const provider = modelId;
158
- if (provider in FRONTIER_MODELS) {
159
- const defaultModel = FRONTIER_MODELS[provider][0];
160
- if (process.env.AI_GATEWAY_API_KEY) return gateway(`${provider}/${defaultModel}`);
161
- return loadProviderModel(provider, defaultModel);
162
- }
163
- throw new Error(`Unknown provider: ${modelId}. Supported: ${Object.keys(FRONTIER_MODELS).join(", ")}`);
164
- }
165
- const colonIndex = modelId.indexOf(":");
166
- const provider = modelId.slice(0, colonIndex);
167
- const modelName = modelId.slice(colonIndex + 1);
168
- if (!modelName) throw new Error(`Invalid model identifier: ${modelId}. Model name is required.`);
169
- return loadProviderModel(provider, modelName);
170
- }
171
- /**
172
- * Load a provider SDK and create a model instance.
173
- * Used as fallback when AI Gateway is not available.
174
- */
175
- async function loadProviderModel(provider, modelName) {
176
- const config = PROVIDER_PACKAGES[provider];
177
- if (!config) throw new Error(`Unknown provider: ${provider}. Supported: ${Object.keys(PROVIDER_PACKAGES).join(", ")}. Or use gateway format: provider/model (e.g., openai/gpt-5.2)`);
178
- const providerFn = await loadProvider(provider, config.package, config.export);
179
- if (!providerFn) throw new Error(`Failed to load ${config.package} for provider "${provider}". Try reinstalling agent-worker.`);
180
- return providerFn(modelName);
181
- }
182
- /**
183
- * List of supported providers for direct access
184
- */
185
- const SUPPORTED_PROVIDERS = [
186
- "anthropic",
187
- "openai",
188
- "deepseek",
189
- "google",
190
- "groq",
191
- "mistral",
192
- "xai"
193
- ];
194
- /**
195
- * Default provider when none specified
196
- */
197
- const DEFAULT_PROVIDER = "anthropic";
198
- /**
199
- * Get the default model identifier (provider/model format)
200
- * Uses the first model from the default provider
201
- */
202
- function getDefaultModel() {
203
- return `${DEFAULT_PROVIDER}/${FRONTIER_MODELS[DEFAULT_PROVIDER][0]}`;
204
- }
205
- /**
206
- * Frontier models for each provider (as of 2026-02)
207
- * Only includes the latest/best models, no legacy versions
208
- *
209
- * Note: Some models may be placeholders for testing or future releases.
210
- * Always verify model availability with the provider before production use.
211
- */
212
- const FRONTIER_MODELS = {
213
- anthropic: [
214
- "claude-sonnet-4-5",
215
- "claude-haiku-4-5",
216
- "claude-opus-4-5"
217
- ],
218
- openai: ["gpt-5.2", "gpt-5.2-codex"],
219
- google: [
220
- "gemini-3-pro-preview",
221
- "gemini-2.5-flash",
222
- "gemini-2.5-pro"
223
- ],
224
- deepseek: ["deepseek-chat", "deepseek-reasoner"],
225
- groq: ["meta-llama/llama-4-scout-17b-16e-instruct", "deepseek-r1-distill-llama-70b"],
226
- mistral: [
227
- "mistral-large-latest",
228
- "pixtral-large-latest",
229
- "magistral-medium-2506"
230
- ],
231
- xai: ["grok-4", "grok-4-fast-reasoning"]
232
- };
233
- /**
234
- * Environment variable that each provider uses for authentication.
235
- * Ordered by priority — first match wins during auto-discovery.
236
- *
237
- * Gateway is first because it supports all providers via a single key.
238
- */
239
- const PROVIDER_ENV_KEYS = {
240
- gateway: "AI_GATEWAY_API_KEY",
241
- anthropic: "ANTHROPIC_API_KEY",
242
- openai: "OPENAI_API_KEY",
243
- deepseek: "DEEPSEEK_API_KEY",
244
- google: "GOOGLE_GENERATIVE_AI_API_KEY",
245
- groq: "GROQ_API_KEY",
246
- mistral: "MISTRAL_API_KEY",
247
- xai: "XAI_API_KEY"
248
- };
249
- /** Provider discovery priority order */
250
- const DISCOVERY_ORDER = [
251
- "gateway",
252
- "anthropic",
253
- "openai",
254
- "deepseek",
255
- "google",
256
- "groq",
257
- "mistral",
258
- "xai"
259
- ];
260
- /**
261
- * Reverse map: model name → provider name.
262
- * Built from FRONTIER_MODELS so "deepseek-chat" → "deepseek", etc.
263
- */
264
- const MODEL_TO_PROVIDER = {};
265
- for (const [provider, models] of Object.entries(FRONTIER_MODELS)) for (const model of models) {
266
- const shortName = model.includes("/") ? model.split("/").pop() : model;
267
- MODEL_TO_PROVIDER[model] = provider;
268
- if (shortName !== model) MODEL_TO_PROVIDER[shortName] = provider;
269
- }
270
- /**
271
- * Discover the best available provider by scanning environment variables.
272
- *
273
- * Note: This function does NOT read AGENT_MODEL — that's handled by
274
- * resolveModelFallback() which supports comma-separated fallback chains.
275
- *
276
- * @param options.preferredModel - If set, prefer the provider that owns this model.
277
- * E.g. "deepseek-chat" → prefer "deepseek" if DEEPSEEK_API_KEY is set.
278
- * @param options.env - Environment to scan (defaults to process.env).
279
- * @returns The discovered provider and model, or null if none available.
280
- */
281
- function discoverProvider(options) {
282
- const env = options?.env ?? process.env;
283
- const preferredModel = options?.preferredModel;
284
- if (preferredModel && preferredModel !== "auto") {
285
- const ownerProvider = MODEL_TO_PROVIDER[preferredModel];
286
- if (ownerProvider) {
287
- const envKey = PROVIDER_ENV_KEYS[ownerProvider];
288
- if (envKey && env[envKey]) return {
289
- provider: ownerProvider,
290
- model: preferredModel.includes("/") ? preferredModel : `${ownerProvider}/${preferredModel}`
291
- };
292
- }
293
- }
294
- for (const provider of DISCOVERY_ORDER) {
295
- if (!env[PROVIDER_ENV_KEYS[provider]]) continue;
296
- if (provider === "gateway") {
297
- if (preferredModel && preferredModel !== "auto") return {
298
- provider: "gateway",
299
- model: `${MODEL_TO_PROVIDER[preferredModel] || "anthropic"}/${preferredModel}`
300
- };
301
- return {
302
- provider: "gateway",
303
- model: getDefaultModel()
304
- };
305
- }
306
- const defaultModel = FRONTIER_MODELS[provider]?.[0];
307
- return {
308
- provider,
309
- model: defaultModel ? defaultModel.includes("/") ? defaultModel : `${provider}/${defaultModel}` : provider
310
- };
311
- }
312
- return null;
313
- }
314
- /**
315
- * Check if a value is the "auto" sentinel.
316
- */
317
- function isAutoProvider(value) {
318
- return value === "auto";
319
- }
320
- /**
321
- * Check if a model's provider has a valid API key in the environment.
322
- */
323
- function isModelAvailable(model, env) {
324
- if (model === "auto") return true;
325
- let provider;
326
- provider = MODEL_TO_PROVIDER[model];
327
- if (!provider && model.includes("/")) provider = model.split("/")[0];
328
- if (!provider && model.includes(":")) provider = model.split(":")[0];
329
- if (!provider) return false;
330
- if (env[PROVIDER_ENV_KEYS["gateway"]]) return true;
331
- const envKey = PROVIDER_ENV_KEYS[provider];
332
- return !!envKey && !!env[envKey];
333
- }
334
- /**
335
- * Resolve a model to a single concrete value, supporting fallback chains.
336
- *
337
- * Resolution order:
338
- * 1. AGENT_DEFAULT_MODELS env var — comma-separated preference list
339
- * (e.g. "deepseek-chat, anthropic/claude-sonnet-4-5")
340
- * 2. Workflow YAML model field (single string, or "auto")
341
- * 3. Full auto-discovery — scan all provider API keys
342
- *
343
- * The preference list does NOT contain "auto" — the env var itself IS
344
- * the auto configuration. After exhausting the explicit list, the system
345
- * implicitly falls back to full provider discovery.
346
- *
347
- * Example:
348
- * AGENT_DEFAULT_MODELS="deepseek-chat, anthropic/claude-sonnet-4-5"
349
- * → try deepseek-chat (need DEEPSEEK_API_KEY)
350
- * → try claude-sonnet-4-5 (need ANTHROPIC_API_KEY)
351
- * → implicit fallback: discover any available provider
352
- *
353
- * @returns Resolved { model, provider } — never contains "auto".
354
- * @throws if nothing is available (no explicit candidate and no provider key).
355
- */
356
- function resolveModelFallback(config) {
357
- const env = config.env ?? process.env;
358
- const isProviderAuto = config.provider === "auto";
359
- const autoModel = env.AGENT_DEFAULT_MODELS;
360
- if (!isProviderAuto && config.model && config.model !== "auto" && !autoModel) return {
361
- model: config.model,
362
- provider: config.provider
363
- };
364
- const preferences = autoModel ? autoModel.split(",").map((s) => s.trim()).filter(Boolean) : [];
365
- for (const candidate of preferences) if (isModelAvailable(candidate, env)) return {
366
- model: candidate,
367
- provider: isProviderAuto ? void 0 : config.provider
368
- };
369
- if (isProviderAuto && config.model && config.model !== "auto") {
370
- const model = config.model;
371
- if (isModelAvailable(model, env)) {
372
- const ownerProvider = MODEL_TO_PROVIDER[model];
373
- if (ownerProvider && !model.includes("/") && !model.includes(":")) return {
374
- model: `${ownerProvider}/${model}`,
375
- provider: void 0
376
- };
377
- return {
378
- model,
379
- provider: void 0
380
- };
381
- }
382
- }
383
- const discovered = discoverProvider({ env });
384
- if (discovered) return {
385
- model: discovered.model,
386
- provider: void 0
387
- };
388
- const envVars = Object.values(PROVIDER_ENV_KEYS).join(", ");
389
- const hint = preferences.length > 0 ? `Tried: ${preferences.join(", ")}. ` : "";
390
- throw new Error(`No provider available for auto model resolution. ${hint}Set one of: ${envVars}`);
391
- }
392
-
393
- //#endregion
394
- //#region src/backends/model-maps.ts
395
- /** Normalize backend type, mapping deprecated "sdk" to "default" */
396
- function normalizeBackendType(type) {
397
- if (type === "sdk") return "default";
398
- return type;
399
- }
400
- /** Default model per backend */
401
- const BACKEND_DEFAULT_MODELS = {
402
- mock: "mock-model",
403
- default: "claude-sonnet-4-5",
404
- claude: "sonnet",
405
- cursor: "sonnet-4.5",
406
- codex: "gpt-5.2-codex",
407
- opencode: "deepseek/deepseek-chat"
408
- };
409
- /**
410
- * Model aliases for SDK (Anthropic format)
411
- */
412
- const SDK_MODEL_ALIASES = {
413
- sonnet: "claude-sonnet-4-5-20250514",
414
- opus: "claude-opus-4-20250514",
415
- haiku: "claude-haiku-3-5-20250514",
416
- "claude-sonnet-4-5": "claude-sonnet-4-5-20250514",
417
- "claude-opus-4": "claude-opus-4-20250514",
418
- "claude-haiku-3-5": "claude-haiku-3-5-20250514"
419
- };
420
- /**
421
- * Model translation for Cursor backend
422
- * Cursor uses its own naming convention
423
- */
424
- const CURSOR_MODEL_MAP = {
425
- sonnet: "sonnet-4.5",
426
- opus: "opus-4.5",
427
- "sonnet-4.5": "sonnet-4.5",
428
- "opus-4.5": "opus-4.5",
429
- "opus-4.6": "opus-4.6",
430
- "claude-sonnet-4-5": "sonnet-4.5",
431
- "claude-opus-4-5": "opus-4.5",
432
- "anthropic/claude-sonnet-4-5": "sonnet-4.5",
433
- "anthropic/claude-opus-4-5": "opus-4.5",
434
- "sonnet-4.5-thinking": "sonnet-4.5-thinking",
435
- "opus-4.5-thinking": "opus-4.5-thinking",
436
- "opus-4.6-thinking": "opus-4.6-thinking",
437
- "gpt-5.2": "gpt-5.2",
438
- "gpt-5.1": "gpt-5.1-high",
439
- "gpt-4": "gpt-5.2",
440
- "gemini-pro": "gemini-3-pro",
441
- "gemini-flash": "gemini-3-flash",
442
- auto: "auto"
443
- };
444
- /**
445
- * Model translation for Claude CLI backend
446
- * Claude CLI uses short model names
447
- */
448
- const CLAUDE_MODEL_MAP = {
449
- sonnet: "sonnet",
450
- opus: "opus",
451
- haiku: "haiku",
452
- "sonnet-4.5": "sonnet",
453
- "opus-4.5": "opus",
454
- "claude-sonnet-4-5": "sonnet",
455
- "claude-opus-4": "opus",
456
- "claude-haiku-3-5": "haiku",
457
- "anthropic/claude-sonnet-4-5": "sonnet",
458
- "anthropic/claude-opus-4": "opus"
459
- };
460
- /**
461
- * Model translation for Codex CLI backend
462
- */
463
- const CODEX_MODEL_MAP = {
464
- "gpt-5.2-codex": "gpt-5.2-codex",
465
- "gpt-5.2": "gpt-5.2-codex",
466
- o3: "o3",
467
- "o3-mini": "o3-mini"
468
- };
469
- /**
470
- * Model translation for OpenCode CLI backend
471
- * OpenCode uses provider/model format natively
472
- */
473
- const OPENCODE_MODEL_MAP = {
474
- "deepseek-chat": "deepseek/deepseek-chat",
475
- "deepseek-reasoner": "deepseek/deepseek-reasoner",
476
- "deepseek/deepseek-chat": "deepseek/deepseek-chat",
477
- "deepseek/deepseek-reasoner": "deepseek/deepseek-reasoner",
478
- sonnet: "anthropic/claude-sonnet-4-5-20250514",
479
- opus: "anthropic/claude-opus-4-20250514",
480
- "claude-sonnet-4-5": "anthropic/claude-sonnet-4-5-20250514",
481
- "claude-opus-4": "anthropic/claude-opus-4-20250514",
482
- "gpt-5.2": "openai/gpt-5.2",
483
- o3: "openai/o3"
484
- };
485
- /**
486
- * Get the model name for a specific backend
487
- * Translates generic model names to backend-specific format
488
- */
489
- function getModelForBackend(model, backend) {
490
- if (!model) return BACKEND_DEFAULT_MODELS[backend];
491
- const normalizedModel = model.includes("/") ? model.split("/").pop() : model;
492
- switch (backend) {
493
- case "default": return SDK_MODEL_ALIASES[normalizedModel] || model;
494
- case "cursor": return CURSOR_MODEL_MAP[model] || CURSOR_MODEL_MAP[normalizedModel] || normalizedModel;
495
- case "claude": return CLAUDE_MODEL_MAP[model] || CLAUDE_MODEL_MAP[normalizedModel] || normalizedModel;
496
- case "codex": return CODEX_MODEL_MAP[model] || CODEX_MODEL_MAP[normalizedModel] || normalizedModel;
497
- case "opencode": return OPENCODE_MODEL_MAP[model] || OPENCODE_MODEL_MAP[normalizedModel] || model;
498
- default: return normalizedModel;
499
- }
500
- }
501
- /**
502
- * Parse model string to provider and version (legacy, for SDK backend)
503
- * Format: provider/model-name or just model-name (defaults to anthropic)
504
- */
505
- function parseModel(model) {
506
- const parts = model.split("/");
507
- if (parts.length === 2) return {
508
- provider: parts[0],
509
- model: SDK_MODEL_ALIASES[parts[1]] || parts[1]
510
- };
511
- return {
512
- provider: "anthropic",
513
- model: SDK_MODEL_ALIASES[model] || model
514
- };
515
- }
516
-
517
- //#endregion
518
- //#region src/backends/types.ts
519
- /**
520
- * Default idle timeout for CLI backends (10 minutes).
521
- * Timeout resets on any stdout activity, so this is an inactivity threshold.
522
- */
523
- const DEFAULT_IDLE_TIMEOUT = 6e5;
524
-
525
- //#endregion
526
- //#region src/backends/idle-timeout.ts
527
- /**
528
- * Idle timeout for CLI subprocess execution
529
- *
530
- * Unlike a hard timeout (kill after N ms total), an idle timeout only fires
531
- * when the process produces no stdout/stderr output for the configured duration.
532
- * This allows long-running agent tasks to continue as long as they're actively
533
- * producing output (tool calls, analysis, etc.).
534
- */
535
- /**
536
- * Default startup timeout (30 seconds).
537
- * If the process produces zero output within this window, it's killed.
538
- * This catches unresponsive backends (e.g., nested `claude -p` inside Claude Code).
539
- */
540
- const DEFAULT_STARTUP_TIMEOUT = 3e4;
541
- /** Minimum idle timeout to prevent accidental instant kills */
542
- const MIN_TIMEOUT_MS = 1e3;
543
- /**
544
- * Core implementation shared by both sync and abortable variants.
545
- * Returns the promise, an abort function, and the subprocess handle.
546
- */
547
- function execWithIdleTimeoutInternal(options) {
548
- const { command, args, cwd, onStdout } = options;
549
- const timeout = Math.max(options.timeout, MIN_TIMEOUT_MS);
550
- const rawStartup = options.startupTimeout !== void 0 ? options.startupTimeout : DEFAULT_STARTUP_TIMEOUT;
551
- const startupTimeout = rawStartup > 0 ? Math.min(rawStartup, timeout) : 0;
552
- let idleTimedOut = false;
553
- let hasReceivedOutput = false;
554
- let timer;
555
- let stdout = "";
556
- let stderr = "";
557
- let isAborted = false;
558
- const subprocess = execa(command, args, {
559
- cwd,
560
- stdin: "ignore",
561
- buffer: false
562
- });
563
- const resetTimer = () => {
564
- clearTimeout(timer);
565
- timer = setTimeout(() => {
566
- idleTimedOut = true;
567
- subprocess.kill();
568
- }, timeout);
569
- };
570
- subprocess.stdout?.on("data", (chunk) => {
571
- const text = chunk.toString();
572
- stdout += text;
573
- hasReceivedOutput = true;
574
- resetTimer();
575
- if (onStdout) try {
576
- onStdout(text);
577
- } catch (err) {
578
- console.error("onStdout callback error:", err);
579
- }
580
- });
581
- subprocess.stderr?.on("data", (chunk) => {
582
- stderr += chunk.toString();
583
- hasReceivedOutput = true;
584
- resetTimer();
585
- });
586
- if (startupTimeout > 0) timer = setTimeout(() => {
587
- if (!hasReceivedOutput) {
588
- idleTimedOut = true;
589
- subprocess.kill();
590
- }
591
- }, startupTimeout);
592
- else resetTimer();
593
- const abort = () => {
594
- if (!isAborted) {
595
- isAborted = true;
596
- clearTimeout(timer);
597
- subprocess.kill("SIGTERM");
598
- setTimeout(() => {
599
- if (!subprocess.killed) subprocess.kill("SIGKILL");
600
- }, 1e3);
601
- }
602
- };
603
- return {
604
- promise: (async () => {
605
- try {
606
- await subprocess;
607
- clearTimeout(timer);
608
- return {
609
- stdout: stdout.trimEnd(),
610
- stderr: stderr.trimEnd()
611
- };
612
- } catch (error) {
613
- clearTimeout(timer);
614
- if (isAborted) throw new Error("Process aborted by user");
615
- if (idleTimedOut) throw new IdleTimeoutError(hasReceivedOutput ? timeout : startupTimeout, stdout, stderr);
616
- throw error;
617
- }
618
- })(),
619
- abort
620
- };
621
- }
622
- /**
623
- * Execute a command with idle timeout.
624
- *
625
- * The timeout resets every time the process writes to stdout or stderr.
626
- * If the process goes silent for longer than `timeout` ms, it's killed.
627
- */
628
- async function execWithIdleTimeout(options) {
629
- const { promise } = execWithIdleTimeoutInternal(options);
630
- return promise;
631
- }
632
- /**
633
- * Execute a command with idle timeout and return abort handle.
634
- * This version returns both the promise and an abort function for external control.
635
- */
636
- function execWithIdleTimeoutAbortable(options) {
637
- return execWithIdleTimeoutInternal(options);
638
- }
639
- /**
640
- * Error thrown when a process is killed due to idle timeout
641
- */
642
- var IdleTimeoutError = class extends Error {
643
- timeout;
644
- stdout;
645
- stderr;
646
- constructor(timeout, stdout, stderr) {
647
- super(`Process idle timed out after ${timeout}ms of inactivity`);
648
- this.name = "IdleTimeoutError";
649
- this.timeout = timeout;
650
- this.stdout = stdout;
651
- this.stderr = stderr;
652
- }
653
- };
654
-
655
- //#endregion
656
- //#region src/backends/cli-helpers.ts
657
- /**
658
- * Shared helpers for CLI backends
659
- *
660
- * Eliminates duplicated error handling and availability check patterns
661
- * across claude-code, codex, cursor, and opencode backends.
662
- */
663
- /**
664
- * Handle errors from CLI backend execution.
665
- *
666
- * Standardizes the error handling pattern shared by all CLI backends:
667
- * 1. IdleTimeoutError → human-readable timeout message
668
- * 2. Process exit error → include exit code and stderr
669
- * 3. Everything else → re-throw
670
- */
671
- function handleCliBackendError(error, backendName, timeout) {
672
- if (error instanceof IdleTimeoutError) throw new Error(`${backendName} timed out after ${timeout}ms of inactivity`);
673
- if (error && typeof error === "object" && "exitCode" in error) {
674
- const execError = error;
675
- throw new Error(`${backendName} failed (exit ${execError.exitCode}): ${execError.stderr || execError.shortMessage}`);
676
- }
677
- throw error;
678
- }
679
- /**
680
- * Check if a CLI command is available by running `command --version`.
681
- */
682
- async function checkCliAvailable(command, args = ["--version"], timeout = 5e3) {
683
- try {
684
- await execa(command, args, {
685
- stdin: "ignore",
686
- timeout
687
- });
688
- return true;
689
- } catch {
690
- return false;
691
- }
692
- }
693
-
694
- //#endregion
695
- //#region src/backends/stream-json.ts
696
- /**
697
- * Format a standard StreamEvent into a human-readable progress message.
698
- * Returns null if the event doesn't need display.
699
- *
700
- * This function only knows about StreamEvent — it never touches
701
- * backend-specific raw JSON. Format-specific conversion is handled
702
- * by the EventAdapter.
703
- */
704
- function formatEvent(event, backendName) {
705
- switch (event.kind) {
706
- case "init": {
707
- const details = [];
708
- if (event.model) details.push(`model: ${event.model}`);
709
- if (event.sessionId) details.push(`session: ${event.sessionId}`);
710
- return `${backendName} initialized${details.length > 0 ? ` (${details.join(", ")})` : ""}`;
711
- }
712
- case "tool_call_started": {
713
- const callIdSuffix = event.callId ? ` [${event.callId.slice(0, 8)}]` : "";
714
- return `STARTING ${event.name}${callIdSuffix}`;
715
- }
716
- case "tool_call": {
717
- const truncated = event.args.length > 100 ? event.args.slice(0, 100) + "..." : event.args;
718
- return `CALL ${event.name}(${truncated})`;
719
- }
720
- case "completed": {
721
- const parts = [backendName, "completed"];
722
- const details = [];
723
- if (event.durationMs) details.push(`${(event.durationMs / 1e3).toFixed(1)}s`);
724
- if (event.costUsd) details.push(`$${event.costUsd.toFixed(4)}`);
725
- if (event.usage) details.push(`${event.usage.input} in, ${event.usage.output} out`);
726
- if (details.length > 0) parts.push(`(${details.join(", ")})`);
727
- return parts.join(" ");
728
- }
729
- case "user_message": return `User: ${event.text.length > 80 ? event.text.slice(0, 80) + "..." : event.text}`;
730
- case "assistant_message": return `Assistant: ${event.text.length > 80 ? event.text.slice(0, 80) + "..." : event.text}`;
731
- case "skip": return null;
732
- case "unknown": {
733
- const preview = JSON.stringify(event.raw).slice(0, 100);
734
- return `[DEBUG] ${backendName} unknown event type="${event.type}": ${preview}...`;
735
- }
736
- }
737
- }
738
- /**
739
- * Adapter for Claude/Cursor stream-json format.
740
- *
741
- * Events:
742
- * { type: "system", subtype: "init", model: "..." }
743
- * { type: "assistant", message: { content: [{ type: "tool_use", name, input }] } }
744
- * { type: "result", duration_ms: N, total_cost_usd: N }
745
- */
746
- const claudeAdapter = (raw) => {
747
- const event = raw;
748
- if (event.type === "system" && event.subtype === "init") return {
749
- kind: "init",
750
- model: event.model,
751
- sessionId: event.session_id
752
- };
753
- if (event.type === "user") {
754
- const textContent = event.message.content.find((c) => c.type === "text");
755
- if (textContent && "text" in textContent) return {
756
- kind: "user_message",
757
- text: textContent.text
758
- };
759
- return { kind: "skip" };
760
- }
761
- if (event.type === "assistant") {
762
- const toolCalls = event.message.content.filter((c) => c.type === "tool_use");
763
- if (toolCalls.length > 0) {
764
- const tc = toolCalls[0];
765
- return {
766
- kind: "tool_call",
767
- name: tc.name,
768
- args: formatToolInput(tc.input)
769
- };
770
- }
771
- return { kind: "skip" };
772
- }
773
- if (event.type === "result") return {
774
- kind: "completed",
775
- durationMs: event.duration_ms,
776
- costUsd: event.total_cost_usd
777
- };
778
- return null;
779
- };
780
- /**
781
- * Extract final result from Claude/Cursor stream-json output.
782
- *
783
- * Priority:
784
- * 1. type=result with result field
785
- * 2. Last assistant message with text content
786
- * 3. Raw stdout fallback
787
- */
788
- function extractClaudeResult(stdout) {
789
- const lines = stdout.trim().split("\n");
790
- for (let i = lines.length - 1; i >= 0; i--) try {
791
- const event = JSON.parse(lines[i]);
792
- if (event.type === "result" && event.result) return { content: event.result };
793
- } catch {}
794
- for (let i = lines.length - 1; i >= 0; i--) try {
795
- const event = JSON.parse(lines[i]);
796
- if (event.type === "assistant" && event.message?.content) {
797
- const textParts = event.message.content.filter((c) => c.type === "text").map((c) => c.text);
798
- if (textParts.length > 0) return { content: textParts.join("\n") };
799
- }
800
- } catch {}
801
- return { content: stdout.trim() };
802
- }
803
- /**
804
- * Adapter for Cursor stream-json format.
805
- *
806
- * Events:
807
- * { type: "system", subtype: "init", model: "..." }
808
- * { type: "tool_call", subtype: "started", call_id: "...", tool_call: { shellToolCall: {...} } }
809
- * { type: "tool_call", subtype: "completed", call_id: "..." }
810
- * { type: "result", duration_ms: N }
811
- */
812
- const cursorAdapter = (raw) => {
813
- const event = raw;
814
- if (event.type === "system" && event.subtype === "init") return {
815
- kind: "init",
816
- model: event.model,
817
- sessionId: event.session_id
818
- };
819
- if (event.type === "user") return {
820
- kind: "user_message",
821
- text: event.message.content.map((c) => c.text).join("\n")
822
- };
823
- if (event.type === "assistant") return {
824
- kind: "assistant_message",
825
- text: event.message.content.map((c) => c.text).join("\n")
826
- };
827
- if (event.type === "tool_call") {
828
- if (event.subtype === "started" && event.tool_call) {
829
- if (event.tool_call.shellToolCall) return {
830
- kind: "tool_call_started",
831
- name: "bash",
832
- callId: event.call_id
833
- };
834
- return {
835
- kind: "tool_call_started",
836
- name: "tool",
837
- callId: event.call_id
838
- };
839
- }
840
- return { kind: "skip" };
841
- }
842
- if (event.type === "result") return {
843
- kind: "completed",
844
- durationMs: event.duration_ms
845
- };
846
- return null;
847
- };
848
- /**
849
- * Adapter for Codex --json format.
850
- *
851
- * Events:
852
- * { type: "thread.started", thread_id: "..." }
853
- * { type: "item.completed", item: { type: "function_call", name, arguments } }
854
- * { type: "item.completed", item: { type: "agent_message", text } } → skipped (result only)
855
- * { type: "turn.completed", usage: { input_tokens, output_tokens } }
856
- */
857
- const codexAdapter = (raw) => {
858
- const event = raw;
859
- if (event.type === "thread.started") return {
860
- kind: "init",
861
- sessionId: `${event.thread_id.slice(0, 8)}...`
862
- };
863
- if (event.type === "item.completed") {
864
- if (event.item.type === "function_call") return {
865
- kind: "tool_call",
866
- name: event.item.name,
867
- args: event.item.arguments
868
- };
869
- return { kind: "skip" };
870
- }
871
- if (event.type === "turn.completed") return {
872
- kind: "completed",
873
- usage: {
874
- input: event.usage.input_tokens,
875
- output: event.usage.output_tokens
876
- }
877
- };
878
- return null;
879
- };
880
- /**
881
- * Extract final result from Codex --json output.
882
- *
883
- * Priority:
884
- * 1. Last item.completed with item.type=agent_message
885
- * 2. Raw stdout fallback
886
- */
887
- function extractCodexResult(stdout) {
888
- const lines = stdout.trim().split("\n");
889
- for (let i = lines.length - 1; i >= 0; i--) try {
890
- const event = JSON.parse(lines[i]);
891
- if (event.type === "item.completed" && event.item?.type === "agent_message" && event.item?.text) return { content: event.item.text };
892
- } catch {}
893
- return { content: stdout.trim() };
894
- }
895
- /**
896
- * Create a line-buffered stream parser.
897
- *
898
- * Accumulates stdout chunks, parses each line through the given adapter,
899
- * and emits structured events via callbacks.
900
- *
901
- * Tool call dedup: if a tool_call event's name is in mcpToolNames,
902
- * it's skipped (MCP server already logged it with source="mcp").
903
- *
904
- * @param callbacks - Structured output callbacks
905
- * @param backendName - Display name (e.g., "Cursor", "Claude", "Codex")
906
- * @param adapter - Format-specific adapter to convert raw JSON → StreamEvent
907
- */
908
- function createStreamParser(callbacks, backendName, adapter) {
909
- const { debugLog, outputLog, toolCallLog, mcpToolNames } = callbacks;
910
- let lineBuf = "";
911
- return (chunk) => {
912
- lineBuf += chunk;
913
- const lines = lineBuf.split("\n");
914
- lineBuf = lines.pop() ?? "";
915
- for (const line of lines) {
916
- if (!line.trim()) continue;
917
- try {
918
- const raw = JSON.parse(line);
919
- let event = adapter(raw);
920
- if (!event && raw.type) event = {
921
- kind: "unknown",
922
- type: raw.type,
923
- raw
924
- };
925
- if (!event) continue;
926
- if (event.kind === "tool_call" || event.kind === "tool_call_started") {
927
- if (mcpToolNames?.has(event.name)) continue;
928
- if (toolCallLog && event.kind === "tool_call") {
929
- toolCallLog(event.name, event.args);
930
- continue;
931
- }
932
- }
933
- const progress = formatEvent(event, backendName);
934
- if (progress) ((event.kind === "tool_call" || event.kind === "tool_call_started" || event.kind === "user_message" || event.kind === "assistant_message") && outputLog ? outputLog : debugLog)(progress);
935
- } catch {}
936
- }
937
- };
938
- }
939
- /**
940
- * Format tool call input for display (truncated JSON string)
941
- */
942
- function formatToolInput(input) {
943
- if (!input || typeof input !== "object") return "";
944
- try {
945
- const str = JSON.stringify(input);
946
- return str.length > 100 ? str.slice(0, 100) + "..." : str;
947
- } catch {
948
- return "";
949
- }
950
- }
951
-
952
- //#endregion
953
- //#region src/backends/claude-code.ts
954
- /**
955
- * Claude Code CLI backend
956
- * Uses `claude -p` for non-interactive mode
957
- *
958
- * MCP Configuration:
959
- * Claude supports per-invocation MCP config via --mcp-config flag.
960
- * The loop writes mcp-config.json to the workspace; this backend
961
- * auto-discovers it when workspace is set.
962
- *
963
- * @see https://docs.anthropic.com/en/docs/claude-code
964
- */
965
- var ClaudeCodeBackend = class {
966
- type = "claude";
967
- options;
968
- currentAbort;
969
- constructor(options = {}) {
970
- this.options = {
971
- timeout: DEFAULT_IDLE_TIMEOUT,
972
- ...options
973
- };
974
- }
975
- async send(message, options) {
976
- const args = this.buildArgs(message, options);
977
- const cwd = this.options.workspace || this.options.cwd;
978
- const outputFormat = this.options.outputFormat ?? "stream-json";
979
- const timeout = this.options.timeout ?? DEFAULT_IDLE_TIMEOUT;
980
- try {
981
- const { promise, abort } = execWithIdleTimeoutAbortable({
982
- command: "claude",
983
- args,
984
- cwd,
985
- timeout,
986
- onStdout: outputFormat === "stream-json" && this.options.streamCallbacks ? createStreamParser(this.options.streamCallbacks, "Claude", claudeAdapter) : void 0
987
- });
988
- this.currentAbort = abort;
989
- const { stdout } = await promise;
990
- this.currentAbort = void 0;
991
- if (outputFormat === "stream-json") return extractClaudeResult(stdout);
992
- if (outputFormat === "json") try {
993
- const parsed = JSON.parse(stdout);
994
- return {
995
- content: parsed.content || parsed.result || stdout,
996
- toolCalls: parsed.toolCalls,
997
- usage: parsed.usage
998
- };
999
- } catch {
1000
- return { content: stdout.trim() };
1001
- }
1002
- return { content: stdout.trim() };
1003
- } catch (error) {
1004
- this.currentAbort = void 0;
1005
- if (error instanceof IdleTimeoutError) {
1006
- if (error.stdout === "" && error.stderr === "") throw new Error(`claude produced no output within ${error.timeout}ms. This often happens when running nested 'claude -p' inside an existing Claude Code session. Consider using the SDK backend (model: "anthropic/claude-sonnet-4-5") instead.`);
1007
- throw new Error(`claude timed out after ${timeout}ms of inactivity`);
1008
- }
1009
- if (error && typeof error === "object" && "exitCode" in error) {
1010
- const execError = error;
1011
- throw new Error(`claude failed (exit ${execError.exitCode}): ${execError.stderr || execError.shortMessage}`);
1012
- }
1013
- throw error;
1014
- }
1015
- }
1016
- async isAvailable() {
1017
- return checkCliAvailable("claude");
1018
- }
1019
- getInfo() {
1020
- return {
1021
- name: "Claude Code CLI",
1022
- model: this.options.model
1023
- };
1024
- }
1025
- buildArgs(message, options) {
1026
- const args = [
1027
- "-p",
1028
- "--dangerously-skip-permissions",
1029
- message
1030
- ];
1031
- if (this.options.model) args.push("--model", this.options.model);
1032
- if (options?.system || this.options.appendSystemPrompt) {
1033
- const system = options?.system || this.options.appendSystemPrompt;
1034
- args.push("--append-system-prompt", system);
1035
- }
1036
- if (this.options.allowedTools?.length) args.push("--allowed-tools", this.options.allowedTools.join(","));
1037
- const outputFormat = this.options.outputFormat ?? "stream-json";
1038
- args.push("--output-format", outputFormat);
1039
- if (outputFormat === "stream-json") args.push("--verbose");
1040
- if (this.options.continue) args.push("--continue");
1041
- if (this.options.resume) args.push("--resume", this.options.resume);
1042
- const mcpConfigPath = this.options.mcpConfigPath ?? (this.options.workspace ? (() => {
1043
- const p = join(this.options.workspace, "mcp-config.json");
1044
- return existsSync(p) ? p : void 0;
1045
- })() : void 0);
1046
- if (mcpConfigPath) args.push("--mcp-config", mcpConfigPath);
1047
- return args;
1048
- }
1049
- /**
1050
- * Abort any running claude process
1051
- */
1052
- abort() {
1053
- if (this.currentAbort) {
1054
- this.currentAbort();
1055
- this.currentAbort = void 0;
1056
- }
1057
- }
1058
- };
1059
-
1060
- //#endregion
1061
- //#region src/backends/codex.ts
1062
- var CodexBackend = class {
1063
- type = "codex";
1064
- options;
1065
- constructor(options = {}) {
1066
- this.options = {
1067
- timeout: DEFAULT_IDLE_TIMEOUT,
1068
- ...options
1069
- };
1070
- }
1071
- async send(message, _options) {
1072
- const args = this.buildArgs(message);
1073
- const cwd = this.options.workspace || this.options.cwd;
1074
- const timeout = this.options.timeout ?? DEFAULT_IDLE_TIMEOUT;
1075
- try {
1076
- const { stdout } = await execWithIdleTimeout({
1077
- command: "codex",
1078
- args,
1079
- cwd,
1080
- timeout,
1081
- onStdout: this.options.streamCallbacks ? createStreamParser(this.options.streamCallbacks, "Codex", codexAdapter) : void 0
1082
- });
1083
- return extractCodexResult(stdout);
1084
- } catch (error) {
1085
- handleCliBackendError(error, "codex", timeout);
1086
- }
1087
- }
1088
- async isAvailable() {
1089
- return checkCliAvailable("codex");
1090
- }
1091
- getInfo() {
1092
- return {
1093
- name: "OpenAI Codex CLI",
1094
- model: this.options.model
1095
- };
1096
- }
1097
- buildArgs(message) {
1098
- const args = [
1099
- "exec",
1100
- "--full-auto",
1101
- "--json",
1102
- "--skip-git-repo-check",
1103
- message
1104
- ];
1105
- if (this.options.model) args.push("--model", this.options.model);
1106
- if (this.options.resume) args.push("--resume", this.options.resume);
1107
- return args;
1108
- }
1109
- };
1110
-
1111
- //#endregion
1112
- //#region src/backends/cursor.ts
1113
- var CursorBackend = class {
1114
- type = "cursor";
1115
- options;
1116
- /**
1117
- * Resolved command style:
1118
- * - "subcommand": `cursor agent -p ...` (IDE-bundled CLI)
1119
- * - "direct": `agent -p ...` (standalone install via cursor.com/install)
1120
- * - null: not yet resolved
1121
- */
1122
- resolvedStyle = null;
1123
- constructor(options = {}) {
1124
- this.options = {
1125
- timeout: DEFAULT_IDLE_TIMEOUT,
1126
- ...options
1127
- };
1128
- }
1129
- async send(message, _options) {
1130
- const { command, args } = await this.buildCommand(message);
1131
- const cwd = this.options.workspace || this.options.cwd;
1132
- const timeout = this.options.timeout ?? DEFAULT_IDLE_TIMEOUT;
1133
- try {
1134
- const { stdout } = await execWithIdleTimeout({
1135
- command,
1136
- args,
1137
- cwd,
1138
- timeout,
1139
- onStdout: this.options.streamCallbacks ? createStreamParser(this.options.streamCallbacks, "Cursor", cursorAdapter) : void 0
1140
- });
1141
- return extractClaudeResult(stdout);
1142
- } catch (error) {
1143
- handleCliBackendError(error, "cursor agent", timeout);
1144
- }
1145
- }
1146
- async isAvailable() {
1147
- return await this.resolveStyle() !== null;
1148
- }
1149
- getInfo() {
1150
- return {
1151
- name: "Cursor Agent CLI",
1152
- model: this.options.model
1153
- };
1154
- }
1155
- /**
1156
- * Resolve which cursor command style is available.
1157
- * Tries in order:
1158
- * 1. `cursor agent --version` — IDE-bundled CLI (subcommand style)
1159
- * 2. `agent --version` — standalone install via cursor.com/install (direct style)
1160
- * Result is cached after first resolution.
1161
- */
1162
- async resolveStyle() {
1163
- if (this.resolvedStyle !== null) return this.resolvedStyle;
1164
- if (await checkCliAvailable("cursor", ["agent", "--version"], 2e3)) {
1165
- this.resolvedStyle = "subcommand";
1166
- return "subcommand";
1167
- }
1168
- if (await checkCliAvailable("agent", ["--version"], 2e3)) {
1169
- this.resolvedStyle = "direct";
1170
- return "direct";
1171
- }
1172
- return null;
1173
- }
1174
- async buildCommand(message) {
1175
- const style = await this.resolveStyle();
1176
- const agentArgs = [
1177
- "-p",
1178
- "--force",
1179
- "--approve-mcps",
1180
- "--output-format=stream-json",
1181
- message
1182
- ];
1183
- if (this.options.model) agentArgs.push("--model", this.options.model);
1184
- if (!style) throw new Error("cursor agent CLI not found. Install via: curl -fsS https://cursor.com/install | bash");
1185
- if (style === "direct") return {
1186
- command: "agent",
1187
- args: agentArgs
1188
- };
1189
- return {
1190
- command: "cursor",
1191
- args: ["agent", ...agentArgs]
1192
- };
1193
- }
1194
- };
1195
-
1196
- //#endregion
1197
- //#region src/backends/opencode.ts
1198
- var OpenCodeBackend = class {
1199
- type = "opencode";
1200
- options;
1201
- constructor(options = {}) {
1202
- this.options = {
1203
- timeout: DEFAULT_IDLE_TIMEOUT,
1204
- ...options
1205
- };
1206
- }
1207
- async send(message, _options) {
1208
- const args = this.buildArgs(message);
1209
- const cwd = this.options.workspace || this.options.cwd;
1210
- const timeout = this.options.timeout ?? DEFAULT_IDLE_TIMEOUT;
1211
- try {
1212
- const { stdout } = await execWithIdleTimeout({
1213
- command: "opencode",
1214
- args,
1215
- cwd,
1216
- timeout,
1217
- onStdout: this.options.streamCallbacks ? createStreamParser(this.options.streamCallbacks, "OpenCode", opencodeAdapter) : void 0
1218
- });
1219
- return extractOpenCodeResult(stdout);
1220
- } catch (error) {
1221
- handleCliBackendError(error, "opencode", timeout);
1222
- }
1223
- }
1224
- async isAvailable() {
1225
- return checkCliAvailable("opencode");
1226
- }
1227
- getInfo() {
1228
- return {
1229
- name: "OpenCode CLI",
1230
- model: this.options.model
1231
- };
1232
- }
1233
- buildArgs(message) {
1234
- const args = [
1235
- "run",
1236
- "--format",
1237
- "json",
1238
- message
1239
- ];
1240
- if (this.options.model) args.push("--model", this.options.model);
1241
- return args;
1242
- }
1243
- };
1244
- /**
1245
- * Adapter for OpenCode --format json output.
1246
- *
1247
- * Events:
1248
- * { type: "step_start", sessionID: "..." }
1249
- * { type: "tool_use", part: { tool: "bash", state: { input: {...} } } }
1250
- * { type: "text", part: { text: "..." } } → skipped (result only)
1251
- * { type: "step_finish", part: { cost, tokens } }
1252
- */
1253
- const opencodeAdapter = (raw) => {
1254
- const event = raw;
1255
- if (event.type === "step_start") return {
1256
- kind: "init",
1257
- sessionId: event.sessionID
1258
- };
1259
- if (event.type === "tool_use") {
1260
- const { tool, state } = event.part;
1261
- const args = state.input ? JSON.stringify(state.input) : "";
1262
- return {
1263
- kind: "tool_call",
1264
- name: tool,
1265
- args: args.length > 100 ? args.slice(0, 100) + "..." : args
1266
- };
1267
- }
1268
- if (event.type === "text") return { kind: "skip" };
1269
- if (event.type === "step_finish") {
1270
- const { cost, tokens } = event.part;
1271
- return {
1272
- kind: "completed",
1273
- costUsd: cost,
1274
- usage: tokens ? {
1275
- input: tokens.input,
1276
- output: tokens.output
1277
- } : void 0
1278
- };
1279
- }
1280
- return null;
1281
- };
1282
- /**
1283
- * Extract final result from OpenCode --format json output.
1284
- *
1285
- * Priority:
1286
- * 1. Last text event
1287
- * 2. Raw stdout fallback
1288
- */
1289
- function extractOpenCodeResult(stdout) {
1290
- const lines = stdout.trim().split("\n");
1291
- for (let i = lines.length - 1; i >= 0; i--) try {
1292
- const event = JSON.parse(lines[i]);
1293
- if (event.type === "text" && event.part?.text) return { content: event.part.text };
1294
- } catch {}
1295
- return { content: stdout.trim() };
1296
- }
1297
-
1298
- //#endregion
1299
- //#region src/backends/sdk.ts
1300
- /**
1301
- * Vercel AI SDK backend
1302
- * Uses the AI SDK for direct API access
1303
- */
1304
- var SdkBackend = class {
1305
- type = "default";
1306
- modelId;
1307
- model = null;
1308
- maxTokens;
1309
- provider;
1310
- constructor(options) {
1311
- this.modelId = options.model;
1312
- this.maxTokens = options.maxTokens ?? 4096;
1313
- this.provider = options.provider;
1314
- if (!this.provider) try {
1315
- this.model = createModel(this.modelId);
1316
- } catch {}
1317
- }
1318
- async send(message, options) {
1319
- if (!this.model) this.model = this.provider ? await createModelWithProvider(this.modelId, this.provider) : await createModelAsync(this.modelId);
1320
- const result = await generateText({
1321
- model: this.model,
1322
- system: options?.system,
1323
- prompt: message,
1324
- maxOutputTokens: this.maxTokens
1325
- });
1326
- return {
1327
- content: result.text,
1328
- usage: {
1329
- input: result.usage.inputTokens ?? 0,
1330
- output: result.usage.outputTokens ?? 0,
1331
- total: result.usage.totalTokens ?? 0
1332
- }
1333
- };
1334
- }
1335
- async isAvailable() {
1336
- try {
1337
- if (!this.model) this.model = this.provider ? await createModelWithProvider(this.modelId, this.provider) : await createModelAsync(this.modelId);
1338
- return true;
1339
- } catch {
1340
- return false;
1341
- }
1342
- }
1343
- getInfo() {
1344
- return {
1345
- name: "Vercel AI SDK",
1346
- model: this.modelId
1347
- };
1348
- }
1349
- };
1350
-
1351
- //#endregion
1352
- //#region src/backends/mock.ts
1353
- /**
1354
- * Mock AI Backend for testing
1355
- *
1356
- * In single-agent mode, provides a simple echo send().
1357
- * In workflow mode, the loop handles MCP tool orchestration
1358
- * via the mock runner strategy (loop/mock-runner.ts).
1359
- */
1360
- var MockAIBackend = class {
1361
- type = "mock";
1362
- constructor(debugLog) {
1363
- this.debugLog = debugLog;
1364
- }
1365
- async send(message, _options) {
1366
- (this.debugLog || (() => {}))(`[mock] Received message (${message.length} chars)`);
1367
- return { content: `[mock] Processed: ${message.slice(0, 200)}` };
1368
- }
1369
- };
1370
- /**
1371
- * Create a mock AI backend
1372
- */
1373
- function createMockBackend(debugLog) {
1374
- return new MockAIBackend(debugLog);
1375
- }
1376
-
1377
- //#endregion
1378
- //#region src/backends/index.ts
1379
- /**
1380
- * Create a backend instance
1381
- * Model names are automatically translated to backend-specific format
1382
- * Accepts "sdk" as deprecated alias for "default"
1383
- *
1384
- * Examples:
1385
- * - "sonnet" → cursor: "sonnet-4.5", claude: "sonnet", default: "claude-sonnet-4-5-20250514"
1386
- * - "anthropic/claude-sonnet-4-5" → cursor: "sonnet-4.5", claude: "sonnet"
1387
- */
1388
- function createBackend(config) {
1389
- const normalized = {
1390
- ...config,
1391
- type: normalizeBackendType(config.type)
1392
- };
1393
- const model = getModelForBackend(normalized.model, normalized.type);
1394
- switch (normalized.type) {
1395
- case "default": {
1396
- const provider = normalized.provider;
1397
- return new SdkBackend({
1398
- model,
1399
- maxTokens: normalized.maxTokens,
1400
- provider
1401
- });
1402
- }
1403
- case "claude": return new ClaudeCodeBackend({
1404
- ...normalized.options,
1405
- model
1406
- });
1407
- case "codex": return new CodexBackend({
1408
- ...normalized.options,
1409
- model
1410
- });
1411
- case "cursor": return new CursorBackend({
1412
- ...normalized.options,
1413
- model
1414
- });
1415
- case "opencode": return new OpenCodeBackend({
1416
- ...normalized.options,
1417
- model
1418
- });
1419
- default: throw new Error(`Unknown backend type: ${normalized.type}`);
1420
- }
1421
- }
1422
- /** Check availability with a timeout to avoid hanging when CLIs are missing */
1423
- function withTimeout(promise, ms) {
1424
- return Promise.race([promise, new Promise((resolve) => setTimeout(() => resolve(false), ms))]);
1425
- }
1426
- /**
1427
- * Check which backends are available
1428
- */
1429
- async function checkBackends() {
1430
- const claude = new ClaudeCodeBackend();
1431
- const codex = new CodexBackend();
1432
- const cursor = new CursorBackend();
1433
- const opencode = new OpenCodeBackend();
1434
- const [claudeAvailable, codexAvailable, cursorAvailable, opencodeAvailable] = await Promise.all([
1435
- withTimeout(claude.isAvailable(), 3e3),
1436
- withTimeout(codex.isAvailable(), 3e3),
1437
- withTimeout(cursor.isAvailable(), 3e3),
1438
- withTimeout(opencode.isAvailable(), 3e3)
1439
- ]);
1440
- return {
1441
- default: true,
1442
- claude: claudeAvailable,
1443
- codex: codexAvailable,
1444
- cursor: cursorAvailable,
1445
- opencode: opencodeAvailable,
1446
- mock: true
1447
- };
1448
- }
1449
- /**
1450
- * List available backends with info
1451
- */
1452
- async function listBackends() {
1453
- const availability = await checkBackends();
1454
- return [
1455
- {
1456
- type: "default",
1457
- available: availability.default,
1458
- name: "Vercel AI SDK"
1459
- },
1460
- {
1461
- type: "claude",
1462
- available: availability.claude,
1463
- name: "Claude Code CLI"
1464
- },
1465
- {
1466
- type: "codex",
1467
- available: availability.codex,
1468
- name: "OpenAI Codex CLI"
1469
- },
1470
- {
1471
- type: "cursor",
1472
- available: availability.cursor,
1473
- name: "Cursor Agent CLI"
1474
- },
1475
- {
1476
- type: "opencode",
1477
- available: availability.opencode,
1478
- name: "OpenCode CLI"
1479
- }
1480
- ];
1481
- }
1482
-
1483
- //#endregion
1484
- export { parseModel as A, CLAUDE_MODEL_MAP as C, SDK_MODEL_ALIASES as D, OPENCODE_MODEL_MAP as E, createModelWithProvider as F, getDefaultModel as I, isAutoProvider as L, SUPPORTED_PROVIDERS as M, createModel as N, getModelForBackend as O, createModelAsync as P, resolveModelFallback as R, BACKEND_DEFAULT_MODELS as S, CURSOR_MODEL_MAP as T, extractCodexResult as _, createMockBackend as a, execWithIdleTimeout as b, extractOpenCodeResult as c, CodexBackend as d, ClaudeCodeBackend as f, extractClaudeResult as g, createStreamParser as h, MockAIBackend as i, FRONTIER_MODELS as j, normalizeBackendType as k, opencodeAdapter as l, codexAdapter as m, createBackend as n, SdkBackend as o, claudeAdapter as p, listBackends as r, OpenCodeBackend as s, checkBackends as t, CursorBackend as u, formatEvent as v, CODEX_MODEL_MAP as w, DEFAULT_IDLE_TIMEOUT as x, IdleTimeoutError as y };