agent-worker 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,182 @@
1
+ import { gateway } from "ai";
2
+
3
+ //#region src/models.ts
4
+ const providerCache = {};
5
+ /**
6
+ * Lazy load a provider, caching the result
7
+ * Supports custom baseURL and apiKey for providers using compatible APIs (e.g., MiniMax using Claude API)
8
+ */
9
+ async function loadProvider(name, packageName, exportName, options) {
10
+ if (name in providerCache) return providerCache[name];
11
+ try {
12
+ const module = await import(packageName);
13
+ if (options?.baseURL || options?.apiKeyEnvVar) {
14
+ const createProvider = module[`create${exportName.charAt(0).toUpperCase() + exportName.slice(1)}`];
15
+ if (createProvider) {
16
+ const providerOptions = {};
17
+ if (options.baseURL) providerOptions.baseURL = options.baseURL;
18
+ if (options.apiKeyEnvVar) providerOptions.apiKey = process.env[options.apiKeyEnvVar];
19
+ providerCache[name] = createProvider(providerOptions);
20
+ return providerCache[name];
21
+ }
22
+ }
23
+ providerCache[name] = module[exportName];
24
+ return providerCache[name];
25
+ } catch {
26
+ providerCache[name] = null;
27
+ return null;
28
+ }
29
+ }
30
+ /**
31
+ * Parse model identifier and return the appropriate provider model
32
+ *
33
+ * Supports three formats:
34
+ *
35
+ * 1. Provider-only format: provider
36
+ * Uses first model from FRONTIER_MODELS via gateway
37
+ * Examples: anthropic → anthropic/claude-sonnet-4-5, openai → openai/gpt-5.2
38
+ *
39
+ * 2. Gateway format: provider/model-name
40
+ * Uses Vercel AI Gateway (requires AI_GATEWAY_API_KEY)
41
+ * Examples: anthropic/claude-sonnet-4-5, openai/gpt-5.2, deepseek/deepseek-chat
42
+ *
43
+ * 3. Direct provider format: provider:model-name
44
+ * Requires installing the specific @ai-sdk/provider package
45
+ * Examples: anthropic:claude-sonnet-4-5, openai:gpt-5.2, deepseek:deepseek-chat
46
+ */
47
+ function createModel(modelId) {
48
+ if (modelId.includes("/")) return gateway(modelId);
49
+ if (!modelId.includes(":")) {
50
+ const provider = modelId;
51
+ if (provider in FRONTIER_MODELS) {
52
+ const defaultModel = FRONTIER_MODELS[provider][0];
53
+ return gateway(`${provider}/${defaultModel}`);
54
+ }
55
+ throw new Error(`Unknown provider: ${modelId}. Supported: ${Object.keys(FRONTIER_MODELS).join(", ")}`);
56
+ }
57
+ const colonIndex = modelId.indexOf(":");
58
+ const provider = modelId.slice(0, colonIndex);
59
+ const modelName = modelId.slice(colonIndex + 1);
60
+ if (!modelName) throw new Error(`Invalid model identifier: ${modelId}. Model name is required.`);
61
+ if (provider in providerCache && providerCache[provider]) return providerCache[provider](modelName);
62
+ throw new Error(`Provider '${provider}' not loaded. Use gateway format (${provider}/${modelName}) or call createModelAsync() for direct provider access.`);
63
+ }
64
+ /**
65
+ * Async version of createModel - supports lazy loading of direct providers
66
+ * Use this when you need direct provider access (provider:model format)
67
+ */
68
+ async function createModelAsync(modelId) {
69
+ if (modelId.includes("/")) return gateway(modelId);
70
+ if (!modelId.includes(":")) {
71
+ const provider = modelId;
72
+ if (provider in FRONTIER_MODELS) {
73
+ const defaultModel = FRONTIER_MODELS[provider][0];
74
+ return gateway(`${provider}/${defaultModel}`);
75
+ }
76
+ throw new Error(`Unknown provider: ${modelId}. Supported: ${Object.keys(FRONTIER_MODELS).join(", ")}`);
77
+ }
78
+ const colonIndex = modelId.indexOf(":");
79
+ const provider = modelId.slice(0, colonIndex);
80
+ const modelName = modelId.slice(colonIndex + 1);
81
+ if (!modelName) throw new Error(`Invalid model identifier: ${modelId}. Model name is required.`);
82
+ const providerConfigs = {
83
+ anthropic: {
84
+ package: "@ai-sdk/anthropic",
85
+ export: "anthropic"
86
+ },
87
+ openai: {
88
+ package: "@ai-sdk/openai",
89
+ export: "openai"
90
+ },
91
+ deepseek: {
92
+ package: "@ai-sdk/deepseek",
93
+ export: "deepseek"
94
+ },
95
+ google: {
96
+ package: "@ai-sdk/google",
97
+ export: "google"
98
+ },
99
+ groq: {
100
+ package: "@ai-sdk/groq",
101
+ export: "groq"
102
+ },
103
+ mistral: {
104
+ package: "@ai-sdk/mistral",
105
+ export: "mistral"
106
+ },
107
+ xai: {
108
+ package: "@ai-sdk/xai",
109
+ export: "xai"
110
+ },
111
+ minimax: {
112
+ package: "@ai-sdk/anthropic",
113
+ export: "anthropic",
114
+ options: {
115
+ baseURL: "https://api.minimax.chat/v1",
116
+ apiKeyEnvVar: "MINIMAX_API_KEY"
117
+ }
118
+ }
119
+ };
120
+ const config = providerConfigs[provider];
121
+ if (!config) throw new Error(`Unknown provider: ${provider}. Supported: ${Object.keys(providerConfigs).join(", ")}. Or use gateway format: provider/model (e.g., openai/gpt-5.2)`);
122
+ const providerFn = await loadProvider(provider, config.package, config.export, config.options);
123
+ if (!providerFn) throw new Error(`Install ${config.package} to use ${provider} models directly`);
124
+ return providerFn(modelName);
125
+ }
126
+ /**
127
+ * List of supported providers for direct access
128
+ * Note: minimax uses Claude-compatible API via @ai-sdk/anthropic with custom baseURL
129
+ */
130
+ const SUPPORTED_PROVIDERS = [
131
+ "anthropic",
132
+ "openai",
133
+ "deepseek",
134
+ "google",
135
+ "groq",
136
+ "mistral",
137
+ "xai",
138
+ "minimax"
139
+ ];
140
+ /**
141
+ * Default provider when none specified
142
+ */
143
+ const DEFAULT_PROVIDER = "anthropic";
144
+ /**
145
+ * Get the default model identifier (provider/model format)
146
+ * Uses the first model from the default provider
147
+ */
148
+ function getDefaultModel() {
149
+ return `${DEFAULT_PROVIDER}/${FRONTIER_MODELS[DEFAULT_PROVIDER][0]}`;
150
+ }
151
+ /**
152
+ * Frontier models for each provider (as of 2026-02)
153
+ * Only includes the latest/best models, no legacy versions
154
+ *
155
+ * Note: Some models may be placeholders for testing or future releases.
156
+ * Always verify model availability with the provider before production use.
157
+ */
158
+ const FRONTIER_MODELS = {
159
+ anthropic: [
160
+ "claude-sonnet-4-5",
161
+ "claude-haiku-4-5",
162
+ "claude-opus-4-5"
163
+ ],
164
+ openai: ["gpt-5.2", "gpt-5.2-codex"],
165
+ google: [
166
+ "gemini-3-pro-preview",
167
+ "gemini-2.5-flash",
168
+ "gemini-2.5-pro"
169
+ ],
170
+ deepseek: ["deepseek-chat", "deepseek-reasoner"],
171
+ groq: ["meta-llama/llama-4-scout-17b-16e-instruct", "deepseek-r1-distill-llama-70b"],
172
+ mistral: [
173
+ "mistral-large-latest",
174
+ "pixtral-large-latest",
175
+ "magistral-medium-2506"
176
+ ],
177
+ xai: ["grok-4", "grok-4-fast-reasoning"],
178
+ minimax: ["MiniMax-M2"]
179
+ };
180
+
181
+ //#endregion
182
+ export { getDefaultModel as a, createModelAsync as i, SUPPORTED_PROVIDERS as n, createModel as r, FRONTIER_MODELS as t };
@@ -0,0 +1,419 @@
1
+ import { i as createModelAsync } from "./models-FOOpWB91.mjs";
2
+ import { ToolLoopAgent, jsonSchema, stepCountIs, tool } from "ai";
3
+
4
+ //#region src/tools.ts
5
+ /**
6
+ * Convert ToolDefinition array to AI SDK tools object
7
+ * Uses tool() with jsonSchema() for runtime-defined mock tools
8
+ */
9
+ function createTools(definitions) {
10
+ const tools = {};
11
+ for (const def of definitions) {
12
+ const schema = jsonSchema(def.parameters);
13
+ tools[def.name] = tool({
14
+ description: def.description,
15
+ inputSchema: schema,
16
+ execute: async (input) => {
17
+ if (def.execute) return def.execute(input);
18
+ return { error: "No mock implementation provided" };
19
+ }
20
+ });
21
+ }
22
+ return tools;
23
+ }
24
+
25
+ //#endregion
26
+ //#region src/session.ts
27
+ /**
28
+ * AgentSession - Stateful session for controlled agent testing
29
+ *
30
+ * Uses ToolLoopAgent internally for multi-step reasoning loops.
31
+ * Maintains conversation state across multiple send() calls,
32
+ * enabling improvisational testing where you observe responses
33
+ * and decide next actions.
34
+ */
35
+ var AgentSession = class {
36
+ id;
37
+ model;
38
+ system;
39
+ createdAt;
40
+ tools;
41
+ maxTokens;
42
+ maxSteps;
43
+ messages = [];
44
+ totalUsage = {
45
+ input: 0,
46
+ output: 0,
47
+ total: 0
48
+ };
49
+ pendingApprovals = [];
50
+ cachedAgent = null;
51
+ toolsChanged = false;
52
+ /**
53
+ * Convert AgentMessage[] to ModelMessage[] for AI SDK
54
+ */
55
+ toModelMessages() {
56
+ return this.messages.filter((m) => m.status !== "responding").map((m) => ({
57
+ role: m.role,
58
+ content: m.content
59
+ }));
60
+ }
61
+ constructor(config, restore) {
62
+ if (restore) {
63
+ this.id = restore.id;
64
+ this.createdAt = restore.createdAt;
65
+ this.messages = [...restore.messages];
66
+ this.totalUsage = { ...restore.totalUsage };
67
+ this.pendingApprovals = [...restore.pendingApprovals ?? []];
68
+ } else {
69
+ this.id = crypto.randomUUID();
70
+ this.createdAt = (/* @__PURE__ */ new Date()).toISOString();
71
+ }
72
+ this.model = config.model;
73
+ this.system = config.system;
74
+ this.tools = config.tools ?? [];
75
+ this.maxTokens = config.maxTokens ?? 4096;
76
+ this.maxSteps = config.maxSteps ?? 10;
77
+ }
78
+ /**
79
+ * Check if a tool needs approval for given arguments
80
+ */
81
+ toolNeedsApproval(tool, args) {
82
+ if (!tool.needsApproval) return false;
83
+ if (typeof tool.needsApproval === "function") return tool.needsApproval(args);
84
+ return tool.needsApproval;
85
+ }
86
+ /**
87
+ * Build tools with approval handling
88
+ */
89
+ buildTools(autoApprove) {
90
+ if (this.tools.length === 0) return void 0;
91
+ return createTools(this.tools.map((tool) => ({
92
+ ...tool,
93
+ execute: async (args) => {
94
+ if (!autoApprove && this.toolNeedsApproval(tool, args)) {
95
+ const approval = {
96
+ id: crypto.randomUUID(),
97
+ toolName: tool.name,
98
+ toolCallId: crypto.randomUUID(),
99
+ arguments: args,
100
+ requestedAt: (/* @__PURE__ */ new Date()).toISOString(),
101
+ status: "pending"
102
+ };
103
+ this.pendingApprovals.push(approval);
104
+ return {
105
+ __approvalRequired: true,
106
+ approvalId: approval.id
107
+ };
108
+ }
109
+ if (tool.execute) return tool.execute(args);
110
+ if (tool.mockResponse !== void 0) return tool.mockResponse;
111
+ return { error: "No mock implementation provided" };
112
+ }
113
+ })));
114
+ }
115
+ /**
116
+ * Get or create cached agent, rebuild if tools changed
117
+ */
118
+ async getAgent(autoApprove) {
119
+ if (!this.cachedAgent || this.toolsChanged || !autoApprove) {
120
+ this.cachedAgent = new ToolLoopAgent({
121
+ model: await createModelAsync(this.model),
122
+ instructions: this.system,
123
+ tools: this.buildTools(autoApprove),
124
+ maxOutputTokens: this.maxTokens,
125
+ stopWhen: stepCountIs(this.maxSteps)
126
+ });
127
+ if (autoApprove) this.toolsChanged = false;
128
+ }
129
+ return this.cachedAgent;
130
+ }
131
+ /**
132
+ * Send a message and get the agent's response
133
+ * Conversation state is maintained across calls
134
+ *
135
+ * @param content - The message to send
136
+ * @param options - Send options (autoApprove, onStepFinish, etc.)
137
+ */
138
+ async send(content, options = {}) {
139
+ const { autoApprove = true, onStepFinish } = options;
140
+ const startTime = performance.now();
141
+ const timestamp = (/* @__PURE__ */ new Date()).toISOString();
142
+ this.messages.push({
143
+ role: "user",
144
+ content,
145
+ status: "complete",
146
+ timestamp
147
+ });
148
+ const agent = await this.getAgent(autoApprove);
149
+ const allToolCalls = [];
150
+ let stepNumber = 0;
151
+ const result = await agent.generate({
152
+ messages: this.toModelMessages(),
153
+ onStepFinish: async ({ usage, toolCalls, toolResults }) => {
154
+ stepNumber++;
155
+ const stepToolCalls = [];
156
+ if (toolCalls) for (const tc of toolCalls) {
157
+ const toolResult = toolResults?.find((tr) => tr.toolCallId === tc.toolCallId);
158
+ const toolCall = {
159
+ name: tc.toolName,
160
+ arguments: tc.input,
161
+ result: toolResult?.output ?? null,
162
+ timing: 0
163
+ };
164
+ stepToolCalls.push(toolCall);
165
+ allToolCalls.push(toolCall);
166
+ }
167
+ if (onStepFinish) {
168
+ const stepUsage = {
169
+ input: usage?.inputTokens ?? 0,
170
+ output: usage?.outputTokens ?? 0,
171
+ total: (usage?.inputTokens ?? 0) + (usage?.outputTokens ?? 0)
172
+ };
173
+ await onStepFinish({
174
+ stepNumber,
175
+ toolCalls: stepToolCalls,
176
+ usage: stepUsage
177
+ });
178
+ }
179
+ }
180
+ });
181
+ const latency = Math.round(performance.now() - startTime);
182
+ this.messages.push({
183
+ role: "assistant",
184
+ content: result.text,
185
+ status: "complete",
186
+ timestamp: (/* @__PURE__ */ new Date()).toISOString()
187
+ });
188
+ const usage = {
189
+ input: result.usage?.inputTokens ?? 0,
190
+ output: result.usage?.outputTokens ?? 0,
191
+ total: (result.usage?.inputTokens ?? 0) + (result.usage?.outputTokens ?? 0)
192
+ };
193
+ this.totalUsage.input += usage.input;
194
+ this.totalUsage.output += usage.output;
195
+ this.totalUsage.total += usage.total;
196
+ const currentPending = this.pendingApprovals.filter((p) => p.status === "pending");
197
+ return {
198
+ content: result.text,
199
+ toolCalls: allToolCalls,
200
+ pendingApprovals: currentPending,
201
+ usage,
202
+ latency
203
+ };
204
+ }
205
+ /**
206
+ * Send a message and stream the response
207
+ * Returns an async iterable of text chunks
208
+ *
209
+ * @param content - The message to send
210
+ * @param options - Send options (autoApprove, onStepFinish, etc.)
211
+ */
212
+ async *sendStream(content, options = {}) {
213
+ const { autoApprove = true, onStepFinish } = options;
214
+ const startTime = performance.now();
215
+ const timestamp = (/* @__PURE__ */ new Date()).toISOString();
216
+ this.messages.push({
217
+ role: "user",
218
+ content,
219
+ status: "complete",
220
+ timestamp
221
+ });
222
+ const assistantMsg = {
223
+ role: "assistant",
224
+ content: "",
225
+ status: "responding",
226
+ timestamp: (/* @__PURE__ */ new Date()).toISOString()
227
+ };
228
+ this.messages.push(assistantMsg);
229
+ const agent = await this.getAgent(autoApprove);
230
+ const allToolCalls = [];
231
+ let stepNumber = 0;
232
+ const result = await agent.stream({
233
+ messages: this.toModelMessages(),
234
+ onStepFinish: async ({ usage, toolCalls, toolResults }) => {
235
+ stepNumber++;
236
+ const stepToolCalls = [];
237
+ if (toolCalls) for (const tc of toolCalls) {
238
+ const toolResult = toolResults?.find((tr) => tr.toolCallId === tc.toolCallId);
239
+ const toolCall = {
240
+ name: tc.toolName,
241
+ arguments: tc.input,
242
+ result: toolResult?.output ?? null,
243
+ timing: 0
244
+ };
245
+ stepToolCalls.push(toolCall);
246
+ allToolCalls.push(toolCall);
247
+ }
248
+ if (onStepFinish) {
249
+ const stepUsage = {
250
+ input: usage?.inputTokens ?? 0,
251
+ output: usage?.outputTokens ?? 0,
252
+ total: (usage?.inputTokens ?? 0) + (usage?.outputTokens ?? 0)
253
+ };
254
+ await onStepFinish({
255
+ stepNumber,
256
+ toolCalls: stepToolCalls,
257
+ usage: stepUsage
258
+ });
259
+ }
260
+ }
261
+ });
262
+ for await (const chunk of result.textStream) {
263
+ assistantMsg.content += chunk;
264
+ yield chunk;
265
+ }
266
+ const latency = Math.round(performance.now() - startTime);
267
+ const text = await result.text;
268
+ assistantMsg.content = text;
269
+ assistantMsg.status = "complete";
270
+ const finalUsage = await result.usage;
271
+ const usage = {
272
+ input: finalUsage?.inputTokens ?? 0,
273
+ output: finalUsage?.outputTokens ?? 0,
274
+ total: (finalUsage?.inputTokens ?? 0) + (finalUsage?.outputTokens ?? 0)
275
+ };
276
+ this.totalUsage.input += usage.input;
277
+ this.totalUsage.output += usage.output;
278
+ this.totalUsage.total += usage.total;
279
+ return {
280
+ content: text,
281
+ toolCalls: allToolCalls,
282
+ pendingApprovals: this.pendingApprovals.filter((p) => p.status === "pending"),
283
+ usage,
284
+ latency
285
+ };
286
+ }
287
+ /**
288
+ * Add a tool definition with mock implementation
289
+ */
290
+ addTool(tool) {
291
+ this.tools.push(tool);
292
+ this.toolsChanged = true;
293
+ this.cachedAgent = null;
294
+ }
295
+ /**
296
+ * Set mock response for an existing tool
297
+ */
298
+ mockTool(name, mockFn) {
299
+ const tool = this.tools.find((t) => t.name === name);
300
+ if (tool) {
301
+ tool.execute = mockFn;
302
+ this.toolsChanged = true;
303
+ this.cachedAgent = null;
304
+ } else throw new Error(`Tool not found: ${name}`);
305
+ }
306
+ /**
307
+ * Get current tool definitions (without execute functions)
308
+ */
309
+ getTools() {
310
+ return this.tools.map((t) => ({
311
+ name: t.name,
312
+ description: t.description,
313
+ parameters: t.parameters,
314
+ needsApproval: t.needsApproval,
315
+ mockResponse: t.mockResponse
316
+ }));
317
+ }
318
+ /**
319
+ * Set a static mock response for an existing tool (JSON-serializable)
320
+ */
321
+ setMockResponse(name, response) {
322
+ const tool = this.tools.find((t) => t.name === name);
323
+ if (tool) {
324
+ tool.mockResponse = response;
325
+ this.toolsChanged = true;
326
+ this.cachedAgent = null;
327
+ } else throw new Error(`Tool not found: ${name}`);
328
+ }
329
+ /**
330
+ * Get conversation history with status information
331
+ * Messages with status 'responding' are still being generated
332
+ */
333
+ history() {
334
+ return [...this.messages];
335
+ }
336
+ /**
337
+ * Get session statistics
338
+ */
339
+ stats() {
340
+ return {
341
+ messageCount: this.messages.length,
342
+ usage: { ...this.totalUsage }
343
+ };
344
+ }
345
+ /**
346
+ * Export full transcript for analysis
347
+ */
348
+ export() {
349
+ return {
350
+ sessionId: this.id,
351
+ model: this.model,
352
+ system: this.system,
353
+ messages: [...this.messages],
354
+ totalUsage: { ...this.totalUsage },
355
+ createdAt: this.createdAt
356
+ };
357
+ }
358
+ /**
359
+ * Get session state for persistence
360
+ */
361
+ getState() {
362
+ return {
363
+ id: this.id,
364
+ createdAt: this.createdAt,
365
+ messages: [...this.messages],
366
+ totalUsage: { ...this.totalUsage },
367
+ pendingApprovals: [...this.pendingApprovals]
368
+ };
369
+ }
370
+ /**
371
+ * Get all pending approvals
372
+ */
373
+ getPendingApprovals() {
374
+ return this.pendingApprovals.filter((p) => p.status === "pending");
375
+ }
376
+ /**
377
+ * Approve a pending tool call and execute it
378
+ * @returns The tool execution result
379
+ */
380
+ async approve(approvalId) {
381
+ const approval = this.pendingApprovals.find((p) => p.id === approvalId);
382
+ if (!approval) throw new Error(`Approval not found: ${approvalId}`);
383
+ if (approval.status !== "pending") throw new Error(`Approval already ${approval.status}: ${approvalId}`);
384
+ const tool = this.tools.find((t) => t.name === approval.toolName);
385
+ if (!tool) throw new Error(`Tool not found: ${approval.toolName}`);
386
+ let result;
387
+ if (tool.execute) result = await tool.execute(approval.arguments);
388
+ else result = { error: "No mock implementation provided" };
389
+ approval.status = "approved";
390
+ return result;
391
+ }
392
+ /**
393
+ * Deny a pending tool call
394
+ * @param approvalId - The approval ID to deny
395
+ * @param reason - Optional reason for denial
396
+ */
397
+ deny(approvalId, reason) {
398
+ const approval = this.pendingApprovals.find((p) => p.id === approvalId);
399
+ if (!approval) throw new Error(`Approval not found: ${approvalId}`);
400
+ if (approval.status !== "pending") throw new Error(`Approval already ${approval.status}: ${approvalId}`);
401
+ approval.status = "denied";
402
+ approval.denyReason = reason;
403
+ }
404
+ /**
405
+ * Clear conversation history (keep system prompt and tools)
406
+ */
407
+ clear() {
408
+ this.messages = [];
409
+ this.totalUsage = {
410
+ input: 0,
411
+ output: 0,
412
+ total: 0
413
+ };
414
+ this.pendingApprovals = [];
415
+ }
416
+ };
417
+
418
+ //#endregion
419
+ export { createTools as n, AgentSession as t };
package/package.json ADDED
@@ -0,0 +1,85 @@
1
+ {
2
+ "name": "agent-worker",
3
+ "version": "0.1.0",
4
+ "description": "SDK and CLI for creating and testing agent workers with Vercel AI SDK",
5
+ "type": "module",
6
+ "main": "./dist/index.mjs",
7
+ "module": "./dist/index.mjs",
8
+ "types": "./dist/index.d.mts",
9
+ "bin": {
10
+ "agent-worker": "./dist/cli/index.mjs"
11
+ },
12
+ "exports": {
13
+ ".": {
14
+ "types": "./dist/index.d.mts",
15
+ "import": "./dist/index.mjs"
16
+ }
17
+ },
18
+ "files": [
19
+ "dist"
20
+ ],
21
+ "scripts": {
22
+ "dev": "tsdown --watch",
23
+ "build": "tsdown",
24
+ "test": "bun test",
25
+ "lint": "oxlint src",
26
+ "lint:fix": "oxlint src --fix",
27
+ "format": "oxfmt src",
28
+ "format:check": "oxfmt src --check",
29
+ "typecheck": "tsgo",
30
+ "prepublishOnly": "bun run build"
31
+ },
32
+ "dependencies": {
33
+ "ai": "^6.0.69",
34
+ "bash-tool": "^1.3.12",
35
+ "commander": "^14.0.3",
36
+ "just-bash": "^2.8.0"
37
+ },
38
+ "devDependencies": {
39
+ "@ai-sdk/anthropic": "^3.0.0",
40
+ "@ai-sdk/deepseek": "^1.0.0",
41
+ "@ai-sdk/google": "^1.0.0",
42
+ "@ai-sdk/groq": "^1.0.0",
43
+ "@ai-sdk/mistral": "^1.0.0",
44
+ "@ai-sdk/openai": "^3.0.0",
45
+ "@ai-sdk/xai": "^1.0.0",
46
+ "@types/bun": "latest",
47
+ "@typescript/native-preview": "^7.0.0-dev.20260203.1",
48
+ "oxfmt": "^0.28.0",
49
+ "oxlint": "^1.43.0",
50
+ "tsdown": "^0.20.1"
51
+ },
52
+ "peerDependencies": {
53
+ "typescript": "^5",
54
+ "@ai-sdk/anthropic": "^3.0.0",
55
+ "@ai-sdk/openai": "^3.0.0",
56
+ "@ai-sdk/deepseek": "^1.0.0",
57
+ "@ai-sdk/google": "^1.0.0",
58
+ "@ai-sdk/groq": "^1.0.0",
59
+ "@ai-sdk/mistral": "^1.0.0",
60
+ "@ai-sdk/xai": "^1.0.0"
61
+ },
62
+ "peerDependenciesMeta": {
63
+ "@ai-sdk/anthropic": {
64
+ "optional": true
65
+ },
66
+ "@ai-sdk/openai": {
67
+ "optional": true
68
+ },
69
+ "@ai-sdk/deepseek": {
70
+ "optional": true
71
+ },
72
+ "@ai-sdk/google": {
73
+ "optional": true
74
+ },
75
+ "@ai-sdk/groq": {
76
+ "optional": true
77
+ },
78
+ "@ai-sdk/mistral": {
79
+ "optional": true
80
+ },
81
+ "@ai-sdk/xai": {
82
+ "optional": true
83
+ }
84
+ }
85
+ }