agent-worker 0.3.0 → 0.4.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +335 -296
- package/dist/backends-D3MAlJBX.mjs +3 -0
- package/dist/backends-DenGdkrj.mjs +735 -0
- package/dist/cli/index.mjs +2087 -834
- package/dist/context-C7nBmU5D.mjs +4 -0
- package/dist/index.d.mts +272 -173
- package/dist/index.mjs +27 -36
- package/dist/mcp-server-DtIApaBD.mjs +549 -0
- package/dist/{skills-CVdxwuvV.mjs → skills-VyC7eQyK.mjs} +263 -136
- package/dist/workflow-CaRCNEh6.mjs +1784 -0
- package/package.json +10 -4
- package/dist/backends-BZ866Ij9.mjs +0 -564
- package/dist/backends-DXpJ7FJI.mjs +0 -3
|
@@ -0,0 +1,735 @@
|
|
|
1
|
+
import { gateway, generateText } from "ai";
|
|
2
|
+
import { ExecaError, execa } from "execa";
|
|
3
|
+
import { existsSync, mkdirSync, writeFileSync } from "node:fs";
|
|
4
|
+
import { join } from "node:path";
|
|
5
|
+
import { stringify } from "yaml";
|
|
6
|
+
|
|
7
|
+
//#region src/agent/models.ts
|
|
8
|
+
const providerCache = {};
|
|
9
|
+
/**
|
|
10
|
+
* Lazy load a provider, caching the result
|
|
11
|
+
* Supports custom baseURL and apiKey for providers using compatible APIs (e.g., MiniMax using Claude API)
|
|
12
|
+
*/
|
|
13
|
+
async function loadProvider(name, packageName, exportName, options) {
|
|
14
|
+
if (name in providerCache) return providerCache[name] ?? null;
|
|
15
|
+
try {
|
|
16
|
+
const module = await import(packageName);
|
|
17
|
+
if (options?.baseURL || options?.apiKeyEnvVar) {
|
|
18
|
+
const createProvider = module[`create${exportName.charAt(0).toUpperCase() + exportName.slice(1)}`];
|
|
19
|
+
if (createProvider) {
|
|
20
|
+
const providerOptions = {};
|
|
21
|
+
if (options.baseURL) providerOptions.baseURL = options.baseURL;
|
|
22
|
+
if (options.apiKeyEnvVar) {
|
|
23
|
+
const apiKey = process.env[options.apiKeyEnvVar];
|
|
24
|
+
if (!apiKey) throw new Error(`Environment variable ${options.apiKeyEnvVar} is not set (required for ${name} provider)`);
|
|
25
|
+
providerOptions.apiKey = apiKey;
|
|
26
|
+
}
|
|
27
|
+
const provider = createProvider(providerOptions);
|
|
28
|
+
providerCache[name] = provider;
|
|
29
|
+
return provider;
|
|
30
|
+
}
|
|
31
|
+
}
|
|
32
|
+
const exportedProvider = module[exportName];
|
|
33
|
+
providerCache[name] = exportedProvider;
|
|
34
|
+
return exportedProvider;
|
|
35
|
+
} catch {
|
|
36
|
+
providerCache[name] = null;
|
|
37
|
+
return null;
|
|
38
|
+
}
|
|
39
|
+
}
|
|
40
|
+
/**
|
|
41
|
+
* Parse model identifier and return the appropriate provider model
|
|
42
|
+
*
|
|
43
|
+
* Supports three formats:
|
|
44
|
+
*
|
|
45
|
+
* 1. Provider-only format: provider
|
|
46
|
+
* Uses first model from FRONTIER_MODELS via gateway
|
|
47
|
+
* Examples: anthropic → anthropic/claude-sonnet-4-5, openai → openai/gpt-5.2
|
|
48
|
+
*
|
|
49
|
+
* 2. Gateway format: provider/model-name
|
|
50
|
+
* Uses Vercel AI Gateway (requires AI_GATEWAY_API_KEY)
|
|
51
|
+
* Examples: anthropic/claude-sonnet-4-5, openai/gpt-5.2, deepseek/deepseek-chat
|
|
52
|
+
*
|
|
53
|
+
* 3. Direct provider format: provider:model-name
|
|
54
|
+
* Requires installing the specific @ai-sdk/provider package
|
|
55
|
+
* Examples: anthropic:claude-sonnet-4-5, openai:gpt-5.2, deepseek:deepseek-chat
|
|
56
|
+
*/
|
|
57
|
+
function createModel(modelId) {
|
|
58
|
+
if (modelId.includes("/")) return gateway(modelId);
|
|
59
|
+
if (!modelId.includes(":")) {
|
|
60
|
+
const provider = modelId;
|
|
61
|
+
if (provider in FRONTIER_MODELS) {
|
|
62
|
+
const defaultModel = FRONTIER_MODELS[provider][0];
|
|
63
|
+
return gateway(`${provider}/${defaultModel}`);
|
|
64
|
+
}
|
|
65
|
+
throw new Error(`Unknown provider: ${modelId}. Supported: ${Object.keys(FRONTIER_MODELS).join(", ")}`);
|
|
66
|
+
}
|
|
67
|
+
const colonIndex = modelId.indexOf(":");
|
|
68
|
+
const provider = modelId.slice(0, colonIndex);
|
|
69
|
+
const modelName = modelId.slice(colonIndex + 1);
|
|
70
|
+
if (!modelName) throw new Error(`Invalid model identifier: ${modelId}. Model name is required.`);
|
|
71
|
+
if (provider in providerCache && providerCache[provider]) return providerCache[provider](modelName);
|
|
72
|
+
throw new Error(`Provider '${provider}' not loaded. Use gateway format (${provider}/${modelName}) or call createModelAsync() for direct provider access.`);
|
|
73
|
+
}
|
|
74
|
+
/**
|
|
75
|
+
* Async version of createModel - supports lazy loading of direct providers
|
|
76
|
+
* Use this when you need direct provider access (provider:model format)
|
|
77
|
+
*/
|
|
78
|
+
async function createModelAsync(modelId) {
|
|
79
|
+
if (modelId.includes("/")) return gateway(modelId);
|
|
80
|
+
if (!modelId.includes(":")) {
|
|
81
|
+
const provider = modelId;
|
|
82
|
+
if (provider in FRONTIER_MODELS) {
|
|
83
|
+
const defaultModel = FRONTIER_MODELS[provider][0];
|
|
84
|
+
return gateway(`${provider}/${defaultModel}`);
|
|
85
|
+
}
|
|
86
|
+
throw new Error(`Unknown provider: ${modelId}. Supported: ${Object.keys(FRONTIER_MODELS).join(", ")}`);
|
|
87
|
+
}
|
|
88
|
+
const colonIndex = modelId.indexOf(":");
|
|
89
|
+
const provider = modelId.slice(0, colonIndex);
|
|
90
|
+
const modelName = modelId.slice(colonIndex + 1);
|
|
91
|
+
if (!modelName) throw new Error(`Invalid model identifier: ${modelId}. Model name is required.`);
|
|
92
|
+
const providerConfigs = {
|
|
93
|
+
anthropic: {
|
|
94
|
+
package: "@ai-sdk/anthropic",
|
|
95
|
+
export: "anthropic"
|
|
96
|
+
},
|
|
97
|
+
openai: {
|
|
98
|
+
package: "@ai-sdk/openai",
|
|
99
|
+
export: "openai"
|
|
100
|
+
},
|
|
101
|
+
deepseek: {
|
|
102
|
+
package: "@ai-sdk/deepseek",
|
|
103
|
+
export: "deepseek"
|
|
104
|
+
},
|
|
105
|
+
google: {
|
|
106
|
+
package: "@ai-sdk/google",
|
|
107
|
+
export: "google"
|
|
108
|
+
},
|
|
109
|
+
groq: {
|
|
110
|
+
package: "@ai-sdk/groq",
|
|
111
|
+
export: "groq"
|
|
112
|
+
},
|
|
113
|
+
mistral: {
|
|
114
|
+
package: "@ai-sdk/mistral",
|
|
115
|
+
export: "mistral"
|
|
116
|
+
},
|
|
117
|
+
xai: {
|
|
118
|
+
package: "@ai-sdk/xai",
|
|
119
|
+
export: "xai"
|
|
120
|
+
},
|
|
121
|
+
minimax: {
|
|
122
|
+
package: "@ai-sdk/anthropic",
|
|
123
|
+
export: "anthropic",
|
|
124
|
+
options: {
|
|
125
|
+
baseURL: "https://api.minimax.io/anthropic/v1",
|
|
126
|
+
apiKeyEnvVar: "MINIMAX_API_KEY"
|
|
127
|
+
}
|
|
128
|
+
},
|
|
129
|
+
minimax_cn: {
|
|
130
|
+
package: "@ai-sdk/anthropic",
|
|
131
|
+
export: "anthropic",
|
|
132
|
+
options: {
|
|
133
|
+
baseURL: "https://api.minimaxi.com/anthropic/v1",
|
|
134
|
+
apiKeyEnvVar: "MINIMAX_CN_API_KEY"
|
|
135
|
+
}
|
|
136
|
+
}
|
|
137
|
+
};
|
|
138
|
+
const config = providerConfigs[provider];
|
|
139
|
+
if (!config) throw new Error(`Unknown provider: ${provider}. Supported: ${Object.keys(providerConfigs).join(", ")}. Or use gateway format: provider/model (e.g., openai/gpt-5.2)`);
|
|
140
|
+
const providerFn = await loadProvider(provider, config.package, config.export, config.options);
|
|
141
|
+
if (!providerFn) throw new Error(`Install ${config.package} to use ${provider} models directly`);
|
|
142
|
+
return providerFn(modelName);
|
|
143
|
+
}
|
|
144
|
+
/**
|
|
145
|
+
* List of supported providers for direct access
|
|
146
|
+
* Note: minimax uses Claude-compatible API via @ai-sdk/anthropic with custom baseURL
|
|
147
|
+
*/
|
|
148
|
+
const SUPPORTED_PROVIDERS = [
|
|
149
|
+
"anthropic",
|
|
150
|
+
"openai",
|
|
151
|
+
"deepseek",
|
|
152
|
+
"google",
|
|
153
|
+
"groq",
|
|
154
|
+
"mistral",
|
|
155
|
+
"xai",
|
|
156
|
+
"minimax"
|
|
157
|
+
];
|
|
158
|
+
/**
|
|
159
|
+
* Default provider when none specified
|
|
160
|
+
*/
|
|
161
|
+
const DEFAULT_PROVIDER = "anthropic";
|
|
162
|
+
/**
|
|
163
|
+
* Get the default model identifier (provider/model format)
|
|
164
|
+
* Uses the first model from the default provider
|
|
165
|
+
*/
|
|
166
|
+
function getDefaultModel() {
|
|
167
|
+
return `${DEFAULT_PROVIDER}/${FRONTIER_MODELS[DEFAULT_PROVIDER][0]}`;
|
|
168
|
+
}
|
|
169
|
+
/**
|
|
170
|
+
* Frontier models for each provider (as of 2026-02)
|
|
171
|
+
* Only includes the latest/best models, no legacy versions
|
|
172
|
+
*
|
|
173
|
+
* Note: Some models may be placeholders for testing or future releases.
|
|
174
|
+
* Always verify model availability with the provider before production use.
|
|
175
|
+
*/
|
|
176
|
+
const FRONTIER_MODELS = {
|
|
177
|
+
anthropic: [
|
|
178
|
+
"claude-sonnet-4-5",
|
|
179
|
+
"claude-haiku-4-5",
|
|
180
|
+
"claude-opus-4-5"
|
|
181
|
+
],
|
|
182
|
+
openai: ["gpt-5.2", "gpt-5.2-codex"],
|
|
183
|
+
google: [
|
|
184
|
+
"gemini-3-pro-preview",
|
|
185
|
+
"gemini-2.5-flash",
|
|
186
|
+
"gemini-2.5-pro"
|
|
187
|
+
],
|
|
188
|
+
deepseek: ["deepseek-chat", "deepseek-reasoner"],
|
|
189
|
+
groq: ["meta-llama/llama-4-scout-17b-16e-instruct", "deepseek-r1-distill-llama-70b"],
|
|
190
|
+
mistral: [
|
|
191
|
+
"mistral-large-latest",
|
|
192
|
+
"pixtral-large-latest",
|
|
193
|
+
"magistral-medium-2506"
|
|
194
|
+
],
|
|
195
|
+
xai: ["grok-4", "grok-4-fast-reasoning"],
|
|
196
|
+
minimax: ["MiniMax-M2"]
|
|
197
|
+
};
|
|
198
|
+
|
|
199
|
+
//#endregion
|
|
200
|
+
//#region src/backends/model-maps.ts
|
|
201
|
+
/** Default model per backend */
|
|
202
|
+
const BACKEND_DEFAULT_MODELS = {
|
|
203
|
+
mock: "mock-model",
|
|
204
|
+
sdk: "claude-sonnet-4-5",
|
|
205
|
+
claude: "sonnet",
|
|
206
|
+
cursor: "sonnet-4.5",
|
|
207
|
+
codex: "gpt-5.2-codex"
|
|
208
|
+
};
|
|
209
|
+
/**
|
|
210
|
+
* Model aliases for SDK (Anthropic format)
|
|
211
|
+
*/
|
|
212
|
+
const SDK_MODEL_ALIASES = {
|
|
213
|
+
sonnet: "claude-sonnet-4-5-20250514",
|
|
214
|
+
opus: "claude-opus-4-20250514",
|
|
215
|
+
haiku: "claude-haiku-3-5-20250514",
|
|
216
|
+
"claude-sonnet-4-5": "claude-sonnet-4-5-20250514",
|
|
217
|
+
"claude-opus-4": "claude-opus-4-20250514",
|
|
218
|
+
"claude-haiku-3-5": "claude-haiku-3-5-20250514"
|
|
219
|
+
};
|
|
220
|
+
/**
|
|
221
|
+
* Model translation for Cursor backend
|
|
222
|
+
* Cursor uses its own naming convention
|
|
223
|
+
*/
|
|
224
|
+
const CURSOR_MODEL_MAP = {
|
|
225
|
+
sonnet: "sonnet-4.5",
|
|
226
|
+
opus: "opus-4.5",
|
|
227
|
+
"sonnet-4.5": "sonnet-4.5",
|
|
228
|
+
"opus-4.5": "opus-4.5",
|
|
229
|
+
"opus-4.6": "opus-4.6",
|
|
230
|
+
"claude-sonnet-4-5": "sonnet-4.5",
|
|
231
|
+
"claude-opus-4-5": "opus-4.5",
|
|
232
|
+
"anthropic/claude-sonnet-4-5": "sonnet-4.5",
|
|
233
|
+
"anthropic/claude-opus-4-5": "opus-4.5",
|
|
234
|
+
"sonnet-4.5-thinking": "sonnet-4.5-thinking",
|
|
235
|
+
"opus-4.5-thinking": "opus-4.5-thinking",
|
|
236
|
+
"opus-4.6-thinking": "opus-4.6-thinking",
|
|
237
|
+
"gpt-5.2": "gpt-5.2",
|
|
238
|
+
"gpt-5.1": "gpt-5.1-high",
|
|
239
|
+
"gpt-4": "gpt-5.2",
|
|
240
|
+
"gemini-pro": "gemini-3-pro",
|
|
241
|
+
"gemini-flash": "gemini-3-flash",
|
|
242
|
+
auto: "auto"
|
|
243
|
+
};
|
|
244
|
+
/**
|
|
245
|
+
* Model translation for Claude CLI backend
|
|
246
|
+
* Claude CLI uses short model names
|
|
247
|
+
*/
|
|
248
|
+
const CLAUDE_MODEL_MAP = {
|
|
249
|
+
sonnet: "sonnet",
|
|
250
|
+
opus: "opus",
|
|
251
|
+
haiku: "haiku",
|
|
252
|
+
"sonnet-4.5": "sonnet",
|
|
253
|
+
"opus-4.5": "opus",
|
|
254
|
+
"claude-sonnet-4-5": "sonnet",
|
|
255
|
+
"claude-opus-4": "opus",
|
|
256
|
+
"claude-haiku-3-5": "haiku",
|
|
257
|
+
"anthropic/claude-sonnet-4-5": "sonnet",
|
|
258
|
+
"anthropic/claude-opus-4": "opus"
|
|
259
|
+
};
|
|
260
|
+
/**
|
|
261
|
+
* Model translation for Codex CLI backend
|
|
262
|
+
*/
|
|
263
|
+
const CODEX_MODEL_MAP = {
|
|
264
|
+
"gpt-5.2-codex": "gpt-5.2-codex",
|
|
265
|
+
"gpt-5.2": "gpt-5.2-codex",
|
|
266
|
+
o3: "o3",
|
|
267
|
+
"o3-mini": "o3-mini"
|
|
268
|
+
};
|
|
269
|
+
/**
|
|
270
|
+
* Get the model name for a specific backend
|
|
271
|
+
* Translates generic model names to backend-specific format
|
|
272
|
+
*/
|
|
273
|
+
function getModelForBackend(model, backend) {
|
|
274
|
+
if (!model) return BACKEND_DEFAULT_MODELS[backend];
|
|
275
|
+
const normalizedModel = model.includes("/") ? model.split("/").pop() : model;
|
|
276
|
+
switch (backend) {
|
|
277
|
+
case "sdk": return SDK_MODEL_ALIASES[normalizedModel] || model;
|
|
278
|
+
case "cursor": return CURSOR_MODEL_MAP[model] || CURSOR_MODEL_MAP[normalizedModel] || normalizedModel;
|
|
279
|
+
case "claude": return CLAUDE_MODEL_MAP[model] || CLAUDE_MODEL_MAP[normalizedModel] || normalizedModel;
|
|
280
|
+
case "codex": return CODEX_MODEL_MAP[model] || CODEX_MODEL_MAP[normalizedModel] || normalizedModel;
|
|
281
|
+
default: return normalizedModel;
|
|
282
|
+
}
|
|
283
|
+
}
|
|
284
|
+
/**
|
|
285
|
+
* Parse model string to provider and version (legacy, for SDK backend)
|
|
286
|
+
* Format: provider/model-name or just model-name (defaults to anthropic)
|
|
287
|
+
*/
|
|
288
|
+
function parseModel(model) {
|
|
289
|
+
const parts = model.split("/");
|
|
290
|
+
if (parts.length === 2) return {
|
|
291
|
+
provider: parts[0],
|
|
292
|
+
model: SDK_MODEL_ALIASES[parts[1]] || parts[1]
|
|
293
|
+
};
|
|
294
|
+
return {
|
|
295
|
+
provider: "anthropic",
|
|
296
|
+
model: SDK_MODEL_ALIASES[model] || model
|
|
297
|
+
};
|
|
298
|
+
}
|
|
299
|
+
|
|
300
|
+
//#endregion
|
|
301
|
+
//#region src/backends/claude-code.ts
|
|
302
|
+
/**
|
|
303
|
+
* Claude Code CLI backend
|
|
304
|
+
* Uses `claude -p` for non-interactive mode
|
|
305
|
+
*
|
|
306
|
+
* MCP Configuration:
|
|
307
|
+
* Claude supports per-invocation MCP config via --mcp-config flag.
|
|
308
|
+
* Use setWorkspace() for workspace isolation, or setMcpConfigPath() directly.
|
|
309
|
+
*
|
|
310
|
+
* @see https://docs.anthropic.com/en/docs/claude-code
|
|
311
|
+
*/
|
|
312
|
+
var ClaudeCodeBackend = class {
|
|
313
|
+
type = "claude";
|
|
314
|
+
options;
|
|
315
|
+
constructor(options = {}) {
|
|
316
|
+
this.options = {
|
|
317
|
+
timeout: 3e5,
|
|
318
|
+
...options
|
|
319
|
+
};
|
|
320
|
+
}
|
|
321
|
+
/**
|
|
322
|
+
* Set up workspace directory with MCP config
|
|
323
|
+
* Claude uses --mcp-config flag, so we just write the config file
|
|
324
|
+
*/
|
|
325
|
+
setWorkspace(workspaceDir, mcpConfig) {
|
|
326
|
+
this.options.workspace = workspaceDir;
|
|
327
|
+
if (!existsSync(workspaceDir)) mkdirSync(workspaceDir, { recursive: true });
|
|
328
|
+
const mcpConfigPath = join(workspaceDir, "mcp-config.json");
|
|
329
|
+
writeFileSync(mcpConfigPath, JSON.stringify(mcpConfig, null, 2));
|
|
330
|
+
this.options.mcpConfigPath = mcpConfigPath;
|
|
331
|
+
}
|
|
332
|
+
async send(message, options) {
|
|
333
|
+
const args = this.buildArgs(message, options);
|
|
334
|
+
const cwd = this.options.workspace || this.options.cwd;
|
|
335
|
+
try {
|
|
336
|
+
const { stdout } = await execa("claude", args, {
|
|
337
|
+
cwd,
|
|
338
|
+
stdin: "ignore",
|
|
339
|
+
timeout: this.options.timeout
|
|
340
|
+
});
|
|
341
|
+
if (this.options.outputFormat === "json") try {
|
|
342
|
+
const parsed = JSON.parse(stdout);
|
|
343
|
+
return {
|
|
344
|
+
content: parsed.content || parsed.result || stdout,
|
|
345
|
+
toolCalls: parsed.toolCalls,
|
|
346
|
+
usage: parsed.usage
|
|
347
|
+
};
|
|
348
|
+
} catch {
|
|
349
|
+
return { content: stdout.trim() };
|
|
350
|
+
}
|
|
351
|
+
return { content: stdout.trim() };
|
|
352
|
+
} catch (error) {
|
|
353
|
+
if (error instanceof ExecaError) throw new Error(`claude failed (exit ${error.exitCode}): ${error.stderr || error.shortMessage}`);
|
|
354
|
+
throw error;
|
|
355
|
+
}
|
|
356
|
+
}
|
|
357
|
+
async isAvailable() {
|
|
358
|
+
try {
|
|
359
|
+
await execa("claude", ["--version"], {
|
|
360
|
+
stdin: "ignore",
|
|
361
|
+
timeout: 5e3
|
|
362
|
+
});
|
|
363
|
+
return true;
|
|
364
|
+
} catch {
|
|
365
|
+
return false;
|
|
366
|
+
}
|
|
367
|
+
}
|
|
368
|
+
getInfo() {
|
|
369
|
+
return {
|
|
370
|
+
name: "Claude Code CLI",
|
|
371
|
+
model: this.options.model
|
|
372
|
+
};
|
|
373
|
+
}
|
|
374
|
+
buildArgs(message, options) {
|
|
375
|
+
const args = [
|
|
376
|
+
"-p",
|
|
377
|
+
"--dangerously-skip-permissions",
|
|
378
|
+
message
|
|
379
|
+
];
|
|
380
|
+
if (this.options.model) args.push("--model", this.options.model);
|
|
381
|
+
if (options?.system || this.options.appendSystemPrompt) {
|
|
382
|
+
const system = options?.system || this.options.appendSystemPrompt;
|
|
383
|
+
args.push("--append-system-prompt", system);
|
|
384
|
+
}
|
|
385
|
+
if (this.options.allowedTools?.length) args.push("--allowed-tools", this.options.allowedTools.join(","));
|
|
386
|
+
if (this.options.outputFormat) args.push("--output-format", this.options.outputFormat);
|
|
387
|
+
if (this.options.continue) args.push("--continue");
|
|
388
|
+
if (this.options.resume) args.push("--resume", this.options.resume);
|
|
389
|
+
if (this.options.mcpConfigPath) args.push("--mcp-config", this.options.mcpConfigPath);
|
|
390
|
+
return args;
|
|
391
|
+
}
|
|
392
|
+
/**
|
|
393
|
+
* Set MCP config path (for workflow integration)
|
|
394
|
+
*/
|
|
395
|
+
setMcpConfigPath(path) {
|
|
396
|
+
this.options.mcpConfigPath = path;
|
|
397
|
+
}
|
|
398
|
+
};
|
|
399
|
+
|
|
400
|
+
//#endregion
|
|
401
|
+
//#region src/backends/codex.ts
|
|
402
|
+
/**
|
|
403
|
+
* OpenAI Codex CLI backend
|
|
404
|
+
* Uses `codex exec` for non-interactive mode
|
|
405
|
+
*
|
|
406
|
+
* MCP Configuration:
|
|
407
|
+
* Codex uses project-level MCP config. Use setWorkspace() to set up
|
|
408
|
+
* a dedicated workspace directory with .codex/config.yaml for MCP settings.
|
|
409
|
+
*
|
|
410
|
+
* @see https://github.com/openai/codex
|
|
411
|
+
*/
|
|
412
|
+
var CodexBackend = class {
|
|
413
|
+
type = "codex";
|
|
414
|
+
options;
|
|
415
|
+
constructor(options = {}) {
|
|
416
|
+
this.options = {
|
|
417
|
+
timeout: 3e5,
|
|
418
|
+
...options
|
|
419
|
+
};
|
|
420
|
+
}
|
|
421
|
+
/**
|
|
422
|
+
* Set up workspace directory with MCP config
|
|
423
|
+
* Creates .codex/config.yaml in the workspace with MCP server config
|
|
424
|
+
*/
|
|
425
|
+
setWorkspace(workspaceDir, mcpConfig) {
|
|
426
|
+
this.options.workspace = workspaceDir;
|
|
427
|
+
const codexDir = join(workspaceDir, ".codex");
|
|
428
|
+
if (!existsSync(codexDir)) mkdirSync(codexDir, { recursive: true });
|
|
429
|
+
const codexConfig = { mcp_servers: mcpConfig.mcpServers };
|
|
430
|
+
writeFileSync(join(codexDir, "config.yaml"), stringify(codexConfig));
|
|
431
|
+
}
|
|
432
|
+
async send(message, _options) {
|
|
433
|
+
const args = this.buildArgs(message);
|
|
434
|
+
const cwd = this.options.workspace || this.options.cwd;
|
|
435
|
+
try {
|
|
436
|
+
const { stdout } = await execa("codex", args, {
|
|
437
|
+
cwd,
|
|
438
|
+
stdin: "ignore",
|
|
439
|
+
timeout: this.options.timeout
|
|
440
|
+
});
|
|
441
|
+
if (this.options.json) try {
|
|
442
|
+
const lines = stdout.trim().split("\n");
|
|
443
|
+
const lastLine = lines[lines.length - 1];
|
|
444
|
+
if (!lastLine) return { content: stdout.trim() };
|
|
445
|
+
const lastEvent = JSON.parse(lastLine);
|
|
446
|
+
return {
|
|
447
|
+
content: lastEvent.message || lastEvent.content || stdout,
|
|
448
|
+
toolCalls: lastEvent.toolCalls,
|
|
449
|
+
usage: lastEvent.usage
|
|
450
|
+
};
|
|
451
|
+
} catch {
|
|
452
|
+
return { content: stdout.trim() };
|
|
453
|
+
}
|
|
454
|
+
return { content: stdout.trim() };
|
|
455
|
+
} catch (error) {
|
|
456
|
+
if (error instanceof ExecaError) throw new Error(`codex failed (exit ${error.exitCode}): ${error.stderr || error.shortMessage}`);
|
|
457
|
+
throw error;
|
|
458
|
+
}
|
|
459
|
+
}
|
|
460
|
+
async isAvailable() {
|
|
461
|
+
try {
|
|
462
|
+
await execa("codex", ["--version"], {
|
|
463
|
+
stdin: "ignore",
|
|
464
|
+
timeout: 5e3
|
|
465
|
+
});
|
|
466
|
+
return true;
|
|
467
|
+
} catch {
|
|
468
|
+
return false;
|
|
469
|
+
}
|
|
470
|
+
}
|
|
471
|
+
getInfo() {
|
|
472
|
+
return {
|
|
473
|
+
name: "OpenAI Codex CLI",
|
|
474
|
+
model: this.options.model
|
|
475
|
+
};
|
|
476
|
+
}
|
|
477
|
+
buildArgs(message) {
|
|
478
|
+
const args = [
|
|
479
|
+
"exec",
|
|
480
|
+
"--dangerously-bypass-approvals-and-sandbox",
|
|
481
|
+
message
|
|
482
|
+
];
|
|
483
|
+
if (this.options.model) args.push("--model", this.options.model);
|
|
484
|
+
if (this.options.json) args.push("--json");
|
|
485
|
+
if (this.options.skipGitRepoCheck) args.push("--skip-git-repo-check");
|
|
486
|
+
if (this.options.approvalMode) args.push("--approval-mode", this.options.approvalMode);
|
|
487
|
+
if (this.options.resume) args.push("--resume", this.options.resume);
|
|
488
|
+
return args;
|
|
489
|
+
}
|
|
490
|
+
};
|
|
491
|
+
|
|
492
|
+
//#endregion
|
|
493
|
+
//#region src/backends/cursor.ts
|
|
494
|
+
/**
|
|
495
|
+
* Cursor CLI backend
|
|
496
|
+
* Uses `cursor-agent -p` for non-interactive mode
|
|
497
|
+
*
|
|
498
|
+
* MCP Configuration:
|
|
499
|
+
* Cursor uses project-level MCP config via .cursor/mcp.json in the workspace.
|
|
500
|
+
* Use setWorkspace() to set up a dedicated workspace with MCP config.
|
|
501
|
+
*
|
|
502
|
+
* @see https://docs.cursor.com/context/model-context-protocol
|
|
503
|
+
*/
|
|
504
|
+
var CursorBackend = class {
|
|
505
|
+
type = "cursor";
|
|
506
|
+
options;
|
|
507
|
+
constructor(options = {}) {
|
|
508
|
+
this.options = {
|
|
509
|
+
timeout: 12e4,
|
|
510
|
+
...options
|
|
511
|
+
};
|
|
512
|
+
}
|
|
513
|
+
/**
|
|
514
|
+
* Set up workspace directory with MCP config
|
|
515
|
+
* Creates .cursor/mcp.json in the workspace
|
|
516
|
+
*/
|
|
517
|
+
setWorkspace(workspaceDir, mcpConfig) {
|
|
518
|
+
this.options.workspace = workspaceDir;
|
|
519
|
+
const cursorDir = join(workspaceDir, ".cursor");
|
|
520
|
+
if (!existsSync(cursorDir)) mkdirSync(cursorDir, { recursive: true });
|
|
521
|
+
writeFileSync(join(cursorDir, "mcp.json"), JSON.stringify(mcpConfig, null, 2));
|
|
522
|
+
}
|
|
523
|
+
async send(message, _options) {
|
|
524
|
+
const { command, args } = this.buildCommand(message);
|
|
525
|
+
const cwd = this.options.workspace || this.options.cwd;
|
|
526
|
+
try {
|
|
527
|
+
const { stdout } = await execa(command, args, {
|
|
528
|
+
cwd,
|
|
529
|
+
stdin: "ignore",
|
|
530
|
+
timeout: this.options.timeout
|
|
531
|
+
});
|
|
532
|
+
return { content: stdout.trim() };
|
|
533
|
+
} catch (error) {
|
|
534
|
+
if (error instanceof ExecaError) {
|
|
535
|
+
if (error.timedOut) throw new Error(`cursor-agent timed out after ${this.options.timeout}ms`);
|
|
536
|
+
throw new Error(`cursor-agent failed (exit ${error.exitCode}): ${error.stderr || error.shortMessage}`);
|
|
537
|
+
}
|
|
538
|
+
throw error;
|
|
539
|
+
}
|
|
540
|
+
}
|
|
541
|
+
async isAvailable() {
|
|
542
|
+
for (const cmd of ["cursor-agent", "agent"]) try {
|
|
543
|
+
await execa(cmd, ["--version"], {
|
|
544
|
+
stdin: "ignore",
|
|
545
|
+
timeout: 2e3
|
|
546
|
+
});
|
|
547
|
+
return true;
|
|
548
|
+
} catch {}
|
|
549
|
+
return false;
|
|
550
|
+
}
|
|
551
|
+
getInfo() {
|
|
552
|
+
return {
|
|
553
|
+
name: "Cursor Agent CLI",
|
|
554
|
+
model: this.options.model
|
|
555
|
+
};
|
|
556
|
+
}
|
|
557
|
+
buildCommand(message) {
|
|
558
|
+
const args = [
|
|
559
|
+
"-p",
|
|
560
|
+
"--force",
|
|
561
|
+
"--approve-mcps",
|
|
562
|
+
message
|
|
563
|
+
];
|
|
564
|
+
if (this.options.model) args.push("--model", this.options.model);
|
|
565
|
+
return {
|
|
566
|
+
command: "cursor-agent",
|
|
567
|
+
args
|
|
568
|
+
};
|
|
569
|
+
}
|
|
570
|
+
};
|
|
571
|
+
|
|
572
|
+
//#endregion
|
|
573
|
+
//#region src/backends/sdk.ts
|
|
574
|
+
/**
|
|
575
|
+
* Vercel AI SDK backend
|
|
576
|
+
* Uses the AI SDK for direct API access
|
|
577
|
+
*/
|
|
578
|
+
var SdkBackend = class {
|
|
579
|
+
type = "sdk";
|
|
580
|
+
modelId;
|
|
581
|
+
model = null;
|
|
582
|
+
maxTokens;
|
|
583
|
+
constructor(options) {
|
|
584
|
+
this.modelId = options.model;
|
|
585
|
+
this.maxTokens = options.maxTokens ?? 4096;
|
|
586
|
+
try {
|
|
587
|
+
this.model = createModel(this.modelId);
|
|
588
|
+
} catch {}
|
|
589
|
+
}
|
|
590
|
+
async send(message, options) {
|
|
591
|
+
if (!this.model) this.model = await createModelAsync(this.modelId);
|
|
592
|
+
const result = await generateText({
|
|
593
|
+
model: this.model,
|
|
594
|
+
system: options?.system,
|
|
595
|
+
prompt: message,
|
|
596
|
+
maxOutputTokens: this.maxTokens
|
|
597
|
+
});
|
|
598
|
+
return {
|
|
599
|
+
content: result.text,
|
|
600
|
+
usage: {
|
|
601
|
+
input: result.usage.inputTokens ?? 0,
|
|
602
|
+
output: result.usage.outputTokens ?? 0,
|
|
603
|
+
total: result.usage.totalTokens ?? 0
|
|
604
|
+
}
|
|
605
|
+
};
|
|
606
|
+
}
|
|
607
|
+
async isAvailable() {
|
|
608
|
+
try {
|
|
609
|
+
if (!this.model) this.model = await createModelAsync(this.modelId);
|
|
610
|
+
return true;
|
|
611
|
+
} catch {
|
|
612
|
+
return false;
|
|
613
|
+
}
|
|
614
|
+
}
|
|
615
|
+
getInfo() {
|
|
616
|
+
return {
|
|
617
|
+
name: "Vercel AI SDK",
|
|
618
|
+
model: this.modelId
|
|
619
|
+
};
|
|
620
|
+
}
|
|
621
|
+
};
|
|
622
|
+
|
|
623
|
+
//#endregion
|
|
624
|
+
//#region src/backends/mock.ts
|
|
625
|
+
/**
|
|
626
|
+
* Mock AI Backend for testing
|
|
627
|
+
*
|
|
628
|
+
* In single-agent mode, provides a simple echo send().
|
|
629
|
+
* In workflow mode, the controller handles MCP tool orchestration
|
|
630
|
+
* via the mock runner strategy (controller/mock-runner.ts).
|
|
631
|
+
*/
|
|
632
|
+
var MockAIBackend = class {
|
|
633
|
+
type = "mock";
|
|
634
|
+
constructor(debugLog) {
|
|
635
|
+
this.debugLog = debugLog;
|
|
636
|
+
}
|
|
637
|
+
async send(message, _options) {
|
|
638
|
+
(this.debugLog || (() => {}))(`[mock] Received message (${message.length} chars)`);
|
|
639
|
+
return { content: `[mock] Processed: ${message.slice(0, 200)}` };
|
|
640
|
+
}
|
|
641
|
+
};
|
|
642
|
+
/**
|
|
643
|
+
* Create a mock AI backend
|
|
644
|
+
*/
|
|
645
|
+
function createMockBackend(debugLog) {
|
|
646
|
+
return new MockAIBackend(debugLog);
|
|
647
|
+
}
|
|
648
|
+
|
|
649
|
+
//#endregion
|
|
650
|
+
//#region src/backends/index.ts
|
|
651
|
+
/**
|
|
652
|
+
* Create a backend instance
|
|
653
|
+
* Model names are automatically translated to backend-specific format
|
|
654
|
+
*
|
|
655
|
+
* Examples:
|
|
656
|
+
* - "sonnet" → cursor: "sonnet-4.5", claude: "sonnet", sdk: "claude-sonnet-4-5-20250514"
|
|
657
|
+
* - "anthropic/claude-sonnet-4-5" → cursor: "sonnet-4.5", claude: "sonnet"
|
|
658
|
+
*/
|
|
659
|
+
function createBackend(config) {
|
|
660
|
+
const model = getModelForBackend(config.model, config.type);
|
|
661
|
+
switch (config.type) {
|
|
662
|
+
case "sdk": return new SdkBackend({
|
|
663
|
+
model,
|
|
664
|
+
maxTokens: config.maxTokens
|
|
665
|
+
});
|
|
666
|
+
case "claude": return new ClaudeCodeBackend({
|
|
667
|
+
...config.options,
|
|
668
|
+
model
|
|
669
|
+
});
|
|
670
|
+
case "codex": return new CodexBackend({
|
|
671
|
+
...config.options,
|
|
672
|
+
model
|
|
673
|
+
});
|
|
674
|
+
case "cursor": return new CursorBackend({
|
|
675
|
+
...config.options,
|
|
676
|
+
model
|
|
677
|
+
});
|
|
678
|
+
default: throw new Error(`Unknown backend type: ${config.type}`);
|
|
679
|
+
}
|
|
680
|
+
}
|
|
681
|
+
/** Check availability with a timeout to avoid hanging when CLIs are missing */
|
|
682
|
+
function withTimeout(promise, ms) {
|
|
683
|
+
return Promise.race([promise, new Promise((resolve) => setTimeout(() => resolve(false), ms))]);
|
|
684
|
+
}
|
|
685
|
+
/**
|
|
686
|
+
* Check which backends are available
|
|
687
|
+
*/
|
|
688
|
+
async function checkBackends() {
|
|
689
|
+
const claude = new ClaudeCodeBackend();
|
|
690
|
+
const codex = new CodexBackend();
|
|
691
|
+
const cursor = new CursorBackend();
|
|
692
|
+
const [claudeAvailable, codexAvailable, cursorAvailable] = await Promise.all([
|
|
693
|
+
withTimeout(claude.isAvailable(), 3e3),
|
|
694
|
+
withTimeout(codex.isAvailable(), 3e3),
|
|
695
|
+
withTimeout(cursor.isAvailable(), 3e3)
|
|
696
|
+
]);
|
|
697
|
+
return {
|
|
698
|
+
sdk: true,
|
|
699
|
+
claude: claudeAvailable,
|
|
700
|
+
codex: codexAvailable,
|
|
701
|
+
cursor: cursorAvailable,
|
|
702
|
+
mock: true
|
|
703
|
+
};
|
|
704
|
+
}
|
|
705
|
+
/**
|
|
706
|
+
* List available backends with info
|
|
707
|
+
*/
|
|
708
|
+
async function listBackends() {
|
|
709
|
+
const availability = await checkBackends();
|
|
710
|
+
return [
|
|
711
|
+
{
|
|
712
|
+
type: "sdk",
|
|
713
|
+
available: availability.sdk,
|
|
714
|
+
name: "Vercel AI SDK"
|
|
715
|
+
},
|
|
716
|
+
{
|
|
717
|
+
type: "claude",
|
|
718
|
+
available: availability.claude,
|
|
719
|
+
name: "Claude Code CLI"
|
|
720
|
+
},
|
|
721
|
+
{
|
|
722
|
+
type: "codex",
|
|
723
|
+
available: availability.codex,
|
|
724
|
+
name: "OpenAI Codex CLI"
|
|
725
|
+
},
|
|
726
|
+
{
|
|
727
|
+
type: "cursor",
|
|
728
|
+
available: availability.cursor,
|
|
729
|
+
name: "Cursor Agent CLI"
|
|
730
|
+
}
|
|
731
|
+
];
|
|
732
|
+
}
|
|
733
|
+
|
|
734
|
+
//#endregion
|
|
735
|
+
export { FRONTIER_MODELS as _, createMockBackend as a, createModelAsync as b, CodexBackend as c, CLAUDE_MODEL_MAP as d, CODEX_MODEL_MAP as f, parseModel as g, getModelForBackend as h, MockAIBackend as i, ClaudeCodeBackend as l, SDK_MODEL_ALIASES as m, createBackend as n, SdkBackend as o, CURSOR_MODEL_MAP as p, listBackends as r, CursorBackend as s, checkBackends as t, BACKEND_DEFAULT_MODELS as u, SUPPORTED_PROVIDERS as v, getDefaultModel as x, createModel as y };
|