@vibe-agent-toolkit/runtime-vercel-ai-sdk 0.1.2-rc.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md ADDED
@@ -0,0 +1,468 @@
1
+ # @vibe-agent-toolkit/runtime-vercel-ai-sdk
2
+
3
+ Vercel AI SDK runtime adapter for VAT (Vibe Agent Toolkit) agents.
4
+
5
+ Converts VAT archetype agents to Vercel AI SDK primitives, enabling portability across LLM providers (OpenAI, Anthropic, etc.) while maintaining type safety and agent semantics.
6
+
7
+ ## Installation
8
+
9
+ ```bash
10
+ npm install @vibe-agent-toolkit/runtime-vercel-ai-sdk ai
11
+ # or
12
+ bun add @vibe-agent-toolkit/runtime-vercel-ai-sdk ai
13
+ ```
14
+
15
+ You'll also need an LLM provider package:
16
+ ```bash
17
+ npm install @ai-sdk/openai # For OpenAI
18
+ npm install @ai-sdk/anthropic # For Anthropic Claude
19
+ ```
20
+
21
+ ## Supported Archetypes
22
+
23
+ ### Pure Function Tools → `tool()`
24
+
25
+ Converts synchronous, deterministic VAT agents to Vercel AI SDK tools that can be called by LLMs.
26
+
27
+ **Use cases:** Validation, transformation, computation, structured data operations.
28
+
29
+ **Archetypes:** Pure Function Tool (Archetype 1)
30
+
31
+ #### Example: Haiku Validator
32
+
33
+ ```typescript
34
+ import { openai } from '@ai-sdk/openai';
35
+ import { generateText } from 'ai';
36
+ import { haikuValidatorAgent } from '@vibe-agent-toolkit/vat-example-cat-agents';
37
+ import { HaikuSchema, HaikuValidationResultSchema } from '@vibe-agent-toolkit/vat-example-cat-agents';
38
+ import { convertPureFunctionToTool } from '@vibe-agent-toolkit/runtime-vercel-ai-sdk';
39
+
40
+ // Convert VAT agent to Vercel AI tool
41
+ const haikuTool = convertPureFunctionToTool(
42
+ haikuValidatorAgent,
43
+ HaikuSchema,
44
+ HaikuValidationResultSchema
45
+ );
46
+
47
+ // Use with generateText()
48
+ const result = await generateText({
49
+ model: openai('gpt-4'),
50
+ tools: {
51
+ validateHaiku: haikuTool.tool
52
+ },
53
+ prompt: `
54
+ Write a haiku about an orange cat and validate it using the validateHaiku tool.
55
+ `
56
+ });
57
+
58
+ console.log(result.text);
59
+ console.log(result.toolCalls); // Shows validation results
60
+ ```
61
+
62
+ #### Batch Conversion
63
+
64
+ ```typescript
65
+ import { convertPureFunctionsToTools } from '@vibe-agent-toolkit/runtime-vercel-ai-sdk';
66
+ import { haikuValidatorAgent, nameValidatorAgent } from '@vibe-agent-toolkit/vat-example-cat-agents';
67
+ import { HaikuSchema, HaikuValidationResultSchema, NameValidationInputSchema, NameValidationResultSchema } from '@vibe-agent-toolkit/vat-example-cat-agents';
68
+
69
+ const tools = convertPureFunctionsToTools({
70
+ validateHaiku: {
71
+ agent: haikuValidatorAgent,
72
+ inputSchema: HaikuSchema,
73
+ outputSchema: HaikuValidationResultSchema,
74
+ },
75
+ validateName: {
76
+ agent: nameValidatorAgent,
77
+ inputSchema: NameValidationInputSchema,
78
+ outputSchema: NameValidationResultSchema,
79
+ },
80
+ });
81
+
82
+ const result = await generateText({
83
+ model: openai('gpt-4'),
84
+ tools,
85
+ prompt: 'Generate and validate cat names and haikus...'
86
+ });
87
+ ```
88
+
89
+ ### Conversational Assistants → `streamText()` with History
90
+
91
+ Converts multi-turn conversational agents to executable functions that maintain conversation history.
92
+
93
+ **Use cases:** Interactive dialogs, multi-turn decision-making, stateful conversations, progressive information gathering.
94
+
95
+ **Archetypes:** Conversational Assistant (Archetype 3)
96
+
97
+ #### Example: Breed Advisor
98
+
99
+ ```typescript
100
+ import { openai } from '@ai-sdk/openai';
101
+ import { breedAdvisorAgent } from '@vibe-agent-toolkit/vat-example-cat-agents';
102
+ import { BreedAdvisorInputSchema, BreedAdvisorOutputSchema } from '@vibe-agent-toolkit/vat-example-cat-agents';
103
+ import { convertConversationalAssistantToFunction, type ConversationSession } from '@vibe-agent-toolkit/runtime-vercel-ai-sdk';
104
+
105
+ // Convert VAT agent to executable function
106
+ const breedAdvisor = convertConversationalAssistantToFunction(
107
+ breedAdvisorAgent,
108
+ BreedAdvisorInputSchema,
109
+ BreedAdvisorOutputSchema,
110
+ {
111
+ model: openai('gpt-4'),
112
+ temperature: 0.8,
113
+ }
114
+ );
115
+
116
+ // Initialize conversation session
117
+ const session: ConversationSession = { history: [] };
118
+
119
+ // Turn 1: Initial inquiry
120
+ const turn1 = await breedAdvisor(
121
+ { message: "I'm looking for a cat", sessionState: {} },
122
+ session
123
+ );
124
+ console.log(turn1.reply); // "Great! What's your living situation?"
125
+
126
+ // Turn 2: Continue conversation (history is maintained)
127
+ const turn2 = await breedAdvisor(
128
+ { message: "Small apartment, love jazz music", sessionState: turn1.sessionState },
129
+ session
130
+ );
131
+ console.log(turn2.recommendations); // Breed recommendations based on profile
132
+ ```
133
+
134
+ #### Batch Conversion with Independent Sessions
135
+
136
+ ```typescript
137
+ import { convertConversationalAssistantsToFunctions } from '@vibe-agent-toolkit/runtime-vercel-ai-sdk';
138
+ import { breedAdvisorAgent, petCareAdvisorAgent } from '@vibe-agent-toolkit/vat-example-cat-agents';
139
+
140
+ const assistants = convertConversationalAssistantsToFunctions(
141
+ {
142
+ breedAdvisor: {
143
+ agent: breedAdvisorAgent,
144
+ inputSchema: BreedAdvisorInputSchema,
145
+ outputSchema: BreedAdvisorOutputSchema,
146
+ },
147
+ petCareAdvisor: {
148
+ agent: petCareAdvisorAgent,
149
+ inputSchema: PetCareInputSchema,
150
+ outputSchema: PetCareOutputSchema,
151
+ },
152
+ },
153
+ {
154
+ model: openai('gpt-4'),
155
+ temperature: 0.8,
156
+ }
157
+ );
158
+
159
+ // Each assistant maintains its own independent session
160
+ const breedSession: ConversationSession = { history: [] };
161
+ const careSession: ConversationSession = { history: [] };
162
+
163
+ const breedResponse = await assistants.breedAdvisor({ message: "I want a cat" }, breedSession);
164
+ const careResponse = await assistants.petCareAdvisor({ message: "Feeding schedule?" }, careSession);
165
+ ```
166
+
167
+ ### LLM Analyzers → `generateText()`
168
+
169
+ Converts single-shot LLM analysis agents to executable functions powered by Vercel AI SDK.
170
+
171
+ **Use cases:** Classification, extraction, generation, summarization, sentiment analysis.
172
+
173
+ **Archetypes:** One-Shot LLM Analyzer (Archetype 2)
174
+
175
+ #### Example: Cat Name Generator
176
+
177
+ ```typescript
178
+ import { openai } from '@ai-sdk/openai';
179
+ import { nameGeneratorAgent } from '@vibe-agent-toolkit/vat-example-cat-agents';
180
+ import { NameGeneratorInputSchema, NameSuggestionSchema } from '@vibe-agent-toolkit/vat-example-cat-agents';
181
+ import { convertLLMAnalyzerToFunction } from '@vibe-agent-toolkit/runtime-vercel-ai-sdk';
182
+
183
+ // Convert VAT agent to executable function
184
+ const generateName = convertLLMAnalyzerToFunction(
185
+ nameGeneratorAgent,
186
+ NameGeneratorInputSchema,
187
+ NameSuggestionSchema,
188
+ {
189
+ model: openai('gpt-4'),
190
+ temperature: 0.9, // High creativity for name generation
191
+ }
192
+ );
193
+
194
+ // Use the function directly
195
+ const result = await generateName({
196
+ characteristics: {
197
+ physical: {
198
+ furColor: 'Orange',
199
+ size: 'medium',
200
+ },
201
+ behavioral: {
202
+ personality: ['Mischievous', 'Energetic'],
203
+ quirks: ['Knocks things off tables'],
204
+ },
205
+ description: 'A mischievous orange cat who loves causing trouble',
206
+ },
207
+ });
208
+
209
+ console.log(result.name); // "Sir Knocksalot"
210
+ console.log(result.reasoning); // "Given the cat's tendency to knock..."
211
+ console.log(result.alternatives); // ["Lord Tumbleton", "Duke Paws"]
212
+ ```
213
+
214
+ #### Batch Conversion with Shared Config
215
+
216
+ ```typescript
217
+ import { convertLLMAnalyzersToFunctions } from '@vibe-agent-toolkit/runtime-vercel-ai-sdk';
218
+ import { nameGeneratorAgent, haikuGeneratorAgent } from '@vibe-agent-toolkit/vat-example-cat-agents';
219
+
220
+ const analyzers = convertLLMAnalyzersToFunctions(
221
+ {
222
+ generateName: {
223
+ agent: nameGeneratorAgent,
224
+ inputSchema: NameGeneratorInputSchema,
225
+ outputSchema: NameSuggestionSchema,
226
+ },
227
+ generateHaiku: {
228
+ agent: haikuGeneratorAgent,
229
+ inputSchema: HaikuGeneratorInputSchema,
230
+ outputSchema: HaikuSchema,
231
+ },
232
+ },
233
+ {
234
+ model: openai('gpt-4'),
235
+ temperature: 0.8, // Shared config for all analyzers
236
+ }
237
+ );
238
+
239
+ // Use the functions
240
+ const name = await analyzers.generateName({ characteristics });
241
+ const haiku = await analyzers.generateHaiku({ characteristics });
242
+ ```
243
+
244
+ ## Provider Support
245
+
246
+ Works with any Vercel AI SDK provider:
247
+
248
+ ```typescript
249
+ import { openai } from '@ai-sdk/openai';
250
+ import { anthropic } from '@ai-sdk/anthropic';
251
+ import { google } from '@ai-sdk/google';
252
+
253
+ // OpenAI
254
+ const llmConfig = { model: openai('gpt-4'), temperature: 0.7 };
255
+
256
+ // Anthropic Claude
257
+ const llmConfig = { model: anthropic('claude-3-5-sonnet-20241022'), temperature: 0.8 };
258
+
259
+ // Google Gemini
260
+ const llmConfig = { model: google('gemini-2.0-flash-001'), temperature: 0.9 };
261
+ ```
262
+
263
+ ## Mock Mode vs. Real LLM
264
+
265
+ VAT agents support mock mode for testing. When using this adapter, agents always run in real LLM mode:
266
+
267
+ ```typescript
268
+ // In VAT agent definition (supports both modes)
269
+ export const nameGeneratorAgent = defineLLMAnalyzer(
270
+ { name: 'name-generator', ... },
271
+ async (input, ctx) => {
272
+ if (ctx.mockable) {
273
+ // Fast mock for testing
274
+ return mockGenerateName(input);
275
+ }
276
+ // Real LLM call
277
+ const response = await ctx.callLLM(prompt);
278
+ return JSON.parse(response);
279
+ }
280
+ );
281
+
282
+ // With Vercel AI SDK adapter (always real LLM)
283
+ const generateName = convertLLMAnalyzerToFunction(
284
+ nameGeneratorAgent,
285
+ NameGeneratorInputSchema,
286
+ NameSuggestionSchema,
287
+ { model: openai('gpt-4') }
288
+ );
289
+ // ctx.mockable = false, uses ctx.callLLM() powered by Vercel AI SDK
290
+ ```
291
+
292
+ ## API Reference
293
+
294
+ ### `convertConversationalAssistantToFunction<TInput, TOutput>`
295
+
296
+ Converts a Conversational Assistant agent to an executable async function with conversation history.
297
+
298
+ **Parameters:**
299
+ - `agent: Agent<TInput, TOutput>` - The VAT conversational assistant agent
300
+ - `inputSchema: z.ZodType<TInput>` - Input Zod schema
301
+ - `outputSchema: z.ZodType<TOutput>` - Output Zod schema
302
+ - `llmConfig: VercelAILLMConfig` - LLM configuration (model, temperature, etc.)
303
+
304
+ **Returns:** `(input: TInput, session: ConversationSession) => Promise<TOutput>` - Executable async function that requires a session parameter
305
+
306
+ **Session Management:**
307
+ ```typescript
308
+ interface ConversationSession {
309
+ history: Message[]; // Maintained across turns
310
+ state?: Record<string, unknown>; // Agent-specific state
311
+ }
312
+ ```
313
+
314
+ ### `convertConversationalAssistantsToFunctions`
315
+
316
+ Batch converts multiple Conversational Assistant agents with shared LLM config.
317
+
318
+ **Parameters:**
319
+ - `configs: Record<string, ConversationalAssistantConversionConfig>` - Map of assistant names to conversion configs
320
+ - `llmConfig: VercelAILLMConfig` - Shared LLM configuration
321
+
322
+ **Returns:** `Record<string, (input: any, session: ConversationSession) => Promise<any>>` - Map of assistant names to executable functions
323
+
324
+ ### `convertPureFunctionToTool<TInput, TOutput>`
325
+
326
+ Converts a PureFunctionAgent to a Vercel AI SDK tool.
327
+
328
+ **Parameters:**
329
+ - `agent: PureFunctionAgent<TInput, TOutput>` - The VAT agent
330
+ - `inputSchema: z.ZodType<TInput>` - Input Zod schema
331
+ - `outputSchema: z.ZodType<TOutput>` - Output Zod schema
332
+
333
+ **Returns:** `ConversionResult<TInput, TOutput>`
334
+ - `tool: VercelAITool` - The tool ready for use with generateText()
335
+ - `inputSchema: z.ZodType<TInput>` - Original input schema
336
+ - `outputSchema: z.ZodType<TOutput>` - Original output schema
337
+ - `metadata` - Agent name, description, version, archetype
338
+
339
+ ### `convertPureFunctionsToTools`
340
+
341
+ Batch converts multiple PureFunctionAgents to tools.
342
+
343
+ **Parameters:**
344
+ - `configs: Record<string, ToolConversionConfig>` - Map of tool names to conversion configs
345
+
346
+ **Returns:** `Record<string, VercelAITool>` - Map of tool names to Vercel AI tools
347
+
348
+ ### `convertLLMAnalyzerToFunction<TInput, TOutput>`
349
+
350
+ Converts an LLM Analyzer agent to an executable async function.
351
+
352
+ **Parameters:**
353
+ - `agent: Agent<TInput, TOutput>` - The VAT LLM analyzer agent
354
+ - `inputSchema: z.ZodType<TInput>` - Input Zod schema
355
+ - `outputSchema: z.ZodType<TOutput>` - Output Zod schema
356
+ - `llmConfig: VercelAILLMConfig` - LLM configuration (model, temperature, etc.)
357
+
358
+ **Returns:** `(input: TInput) => Promise<TOutput>` - Executable async function
359
+
360
+ ### `convertLLMAnalyzersToFunctions`
361
+
362
+ Batch converts multiple LLM Analyzer agents with shared LLM config.
363
+
364
+ **Parameters:**
365
+ - `configs: Record<string, LLMAnalyzerConversionConfig>` - Map of function names to conversion configs
366
+ - `llmConfig: VercelAILLMConfig` - Shared LLM configuration
367
+
368
+ **Returns:** `Record<string, (input: any) => Promise<any>>` - Map of function names to executable functions
369
+
370
+ ## Type Definitions
371
+
372
+ ### `VercelAILLMConfig`
373
+
374
+ ```typescript
375
+ interface VercelAILLMConfig {
376
+ model: LanguageModelV1; // From Vercel AI SDK
377
+ temperature?: number; // 0-1, default 0.7
378
+ maxTokens?: number; // Maximum tokens to generate
379
+ additionalSettings?: Record<string, unknown>; // Provider-specific settings
380
+ }
381
+ ```
382
+
383
+ ### `ToolConversionConfig<TInput, TOutput>`
384
+
385
+ ```typescript
386
+ interface ToolConversionConfig<TInput, TOutput> {
387
+ agent: PureFunctionAgent<TInput, TOutput>;
388
+ inputSchema: z.ZodType<TInput>;
389
+ outputSchema: z.ZodType<TOutput>;
390
+ }
391
+ ```
392
+
393
+ ### `ConversationalAssistantConversionConfig<TInput, TOutput>`
394
+
395
+ ```typescript
396
+ interface ConversationalAssistantConversionConfig<TInput, TOutput> {
397
+ agent: Agent<TInput, TOutput>;
398
+ inputSchema: z.ZodType<TInput>;
399
+ outputSchema: z.ZodType<TOutput>;
400
+ }
401
+ ```
402
+
403
+ ### `LLMAnalyzerConversionConfig<TInput, TOutput>`
404
+
405
+ ```typescript
406
+ interface LLMAnalyzerConversionConfig<TInput, TOutput> {
407
+ agent: Agent<TInput, TOutput>;
408
+ inputSchema: z.ZodType<TInput>;
409
+ outputSchema: z.ZodType<TOutput>;
410
+ }
411
+ ```
412
+
413
+ ## Examples
414
+
415
+ See [@vibe-agent-toolkit/vat-example-cat-agents](../vat-example-cat-agents) for complete agent examples that work with this adapter.
416
+
417
+ ## Testing
418
+
419
+ ### Unit Tests
420
+
421
+ Standard unit tests verify adapter structure and type safety without making real LLM calls:
422
+
423
+ ```bash
424
+ bun run test # Run all unit tests (free, fast)
425
+ bun run test:watch # Watch mode for development
426
+ ```
427
+
428
+ ### LLM Regression Tests
429
+
430
+ LLM regression tests make **real API calls** to OpenAI and Anthropic to verify end-to-end integration. These tests are:
431
+ - **Expensive**: Cost money (API calls to GPT-4o-mini and Claude 4.5 Sonnet)
432
+ - **Slow**: Take 15-60 seconds depending on API latency
433
+ - **Skipped by default**: Only run when explicitly requested
434
+
435
+ **Run regression tests:**
436
+
437
+ ```bash
438
+ # From this package directory
439
+ bun run test:llm-regression
440
+
441
+ # Or manually with environment variable
442
+ RUN_LLM_TESTS=true bun test test/llm-regression.test.ts
443
+ ```
444
+
445
+ **What they test:**
446
+ - ✅ Pure function tools work with real LLMs
447
+ - ✅ LLM analyzer functions work with OpenAI
448
+ - ✅ LLM analyzer functions work with Anthropic Claude
449
+ - ✅ Same adapter code works across providers (provider-agnostic architecture)
450
+
451
+ **Requirements:**
452
+ - `OPENAI_API_KEY` environment variable for OpenAI tests
453
+ - `ANTHROPIC_API_KEY` environment variable for Anthropic tests
454
+ - Tests gracefully skip if API keys are not set
455
+
456
+ **When to run:**
457
+ - Before releases to verify provider integrations still work
458
+ - After upgrading `ai` or provider packages (e.g., `@ai-sdk/openai`)
459
+ - When adding support for new LLM providers
460
+ - Periodically (weekly/monthly) to catch API breaking changes
461
+
462
+ **Cost estimate:**
463
+ - Full test suite: ~4 LLM calls (2 OpenAI, 2 Anthropic)
464
+ - Approximate cost: $0.01-0.05 per run (varies by model pricing)
465
+
466
+ ## License
467
+
468
+ MIT
@@ -0,0 +1,109 @@
1
+ import type { Agent, Message } from '@vibe-agent-toolkit/agent-runtime';
2
+ import type { z } from 'zod';
3
+ import type { VercelAILLMConfig } from '../types.js';
4
+ /**
5
+ * Session state for conversational assistants
6
+ * Maintains conversation history and agent-specific state
7
+ */
8
+ export interface ConversationSession {
9
+ /** Conversation history */
10
+ history: Message[];
11
+ /** Agent-specific session state */
12
+ state?: Record<string, unknown>;
13
+ }
14
+ /**
15
+ * Converts a VAT Conversational Assistant agent to a function compatible with Vercel AI SDK.
16
+ *
17
+ * Conversational assistants maintain context across multiple turns using conversation history.
18
+ * They're perfect for interactive dialogs, multi-turn decision-making, and stateful interactions.
19
+ *
20
+ * Example:
21
+ * ```typescript
22
+ * import { openai } from '@ai-sdk/openai';
23
+ * import { breedAdvisorAgent } from '@vibe-agent-toolkit/vat-example-cat-agents';
24
+ * import { BreedAdvisorInputSchema, BreedAdvisorOutputSchema } from '@vibe-agent-toolkit/vat-example-cat-agents';
25
+ * import { convertConversationalAssistantToFunction } from '@vibe-agent-toolkit/runtime-vercel-ai-sdk';
26
+ *
27
+ * const breedAdvisor = convertConversationalAssistantToFunction(
28
+ * breedAdvisorAgent,
29
+ * BreedAdvisorInputSchema,
30
+ * BreedAdvisorOutputSchema,
31
+ * { model: openai('gpt-4'), temperature: 0.8 }
32
+ * );
33
+ *
34
+ * // Start conversation
35
+ * const session: ConversationSession = { history: [] };
36
+ * const turn1 = await breedAdvisor(
37
+ * { message: "I'm looking for a cat", sessionState: {} },
38
+ * session
39
+ * );
40
+ * console.log(turn1.reply); // "Great! Tell me about your living space..."
41
+ *
42
+ * // Continue conversation
43
+ * const turn2 = await breedAdvisor(
44
+ * { message: "I live in an apartment", sessionState: turn1.sessionState },
45
+ * session
46
+ * );
47
+ * console.log(turn2.recommendations); // Breed recommendations
48
+ * ```
49
+ *
50
+ * @param agent - The VAT conversational assistant agent to convert
51
+ * @param inputSchema - The Zod input schema
52
+ * @param outputSchema - The Zod output schema
53
+ * @param llmConfig - Configuration for the LLM (model, temperature, etc.)
54
+ * @returns An async function that executes the agent with conversation context
55
+ */
56
+ export declare function convertConversationalAssistantToFunction<TInput, TOutput>(agent: Agent<TInput, TOutput>, inputSchema: z.ZodType<TInput>, outputSchema: z.ZodType<TOutput>, llmConfig: VercelAILLMConfig): (input: TInput, session: ConversationSession) => Promise<TOutput>;
57
+ /**
58
+ * Batch converts multiple conversational assistant agents to executable functions.
59
+ *
60
+ * Useful when you need multiple conversational agents with shared LLM configuration.
61
+ * Each agent maintains its own independent conversation session.
62
+ *
63
+ * Example:
64
+ * ```typescript
65
+ * import { openai } from '@ai-sdk/openai';
66
+ * import { breedAdvisorAgent, petCareAdvisorAgent } from '@vibe-agent-toolkit/vat-example-cat-agents';
67
+ * import { convertConversationalAssistantsToFunctions } from '@vibe-agent-toolkit/runtime-vercel-ai-sdk';
68
+ *
69
+ * const assistants = convertConversationalAssistantsToFunctions(
70
+ * {
71
+ * breedAdvisor: {
72
+ * agent: breedAdvisorAgent,
73
+ * inputSchema: BreedAdvisorInputSchema,
74
+ * outputSchema: BreedAdvisorOutputSchema,
75
+ * },
76
+ * petCareAdvisor: {
77
+ * agent: petCareAdvisorAgent,
78
+ * inputSchema: PetCareInputSchema,
79
+ * outputSchema: PetCareOutputSchema,
80
+ * },
81
+ * },
82
+ * { model: openai('gpt-4'), temperature: 0.8 }
83
+ * );
84
+ *
85
+ * // Each assistant has its own session
86
+ * const breedSession: ConversationSession = { history: [] };
87
+ * const careSession: ConversationSession = { history: [] };
88
+ *
89
+ * const breedResponse = await assistants.breedAdvisor(
90
+ * { message: "I want a cat" },
91
+ * breedSession
92
+ * );
93
+ * const careResponse = await assistants.petCareAdvisor(
94
+ * { message: "How often should I feed my cat?" },
95
+ * careSession
96
+ * );
97
+ * ```
98
+ *
99
+ * @param configs - Map of assistant names to conversion configurations
100
+ * @param llmConfig - Shared LLM configuration for all assistants
101
+ * @returns Map of assistant names to executable async functions
102
+ */
103
+ export interface ConversationalAssistantConversionConfig<TInput, TOutput> {
104
+ agent: Agent<TInput, TOutput>;
105
+ inputSchema: z.ZodType<TInput>;
106
+ outputSchema: z.ZodType<TOutput>;
107
+ }
108
+ export declare function convertConversationalAssistantsToFunctions<T extends Record<string, ConversationalAssistantConversionConfig<unknown, unknown>>>(configs: T, llmConfig: VercelAILLMConfig): Record<keyof T, (input: unknown, session: ConversationSession) => Promise<unknown>>;
109
+ //# sourceMappingURL=conversational-assistant.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"conversational-assistant.d.ts","sourceRoot":"","sources":["../../src/adapters/conversational-assistant.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,KAAK,EAAE,OAAO,EAAE,MAAM,mCAAmC,CAAC;AAExE,OAAO,KAAK,EAAE,CAAC,EAAE,MAAM,KAAK,CAAC;AAE7B,OAAO,KAAK,EAAE,iBAAiB,EAAE,MAAM,aAAa,CAAC;AAErD;;;GAGG;AACH,MAAM,WAAW,mBAAmB;IAClC,2BAA2B;IAC3B,OAAO,EAAE,OAAO,EAAE,CAAC;IACnB,mCAAmC;IACnC,KAAK,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,CAAC;CACjC;AAED;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;GAyCG;AACH,wBAAgB,wCAAwC,CAAC,MAAM,EAAE,OAAO,EACtE,KAAK,EAAE,KAAK,CAAC,MAAM,EAAE,OAAO,CAAC,EAC7B,WAAW,EAAE,CAAC,CAAC,OAAO,CAAC,MAAM,CAAC,EAC9B,YAAY,EAAE,CAAC,CAAC,OAAO,CAAC,OAAO,CAAC,EAChC,SAAS,EAAE,iBAAiB,GAC3B,CAAC,KAAK,EAAE,MAAM,EAAE,OAAO,EAAE,mBAAmB,KAAK,OAAO,CAAC,OAAO,CAAC,CAkDnE;AAED;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;GA6CG;AACH,MAAM,WAAW,uCAAuC,CAAC,MAAM,EAAE,OAAO;IACtE,KAAK,EAAE,KAAK,CAAC,MAAM,EAAE,OAAO,CAAC,CAAC;IAC9B,WAAW,EAAE,CAAC,CAAC,OAAO,CAAC,MAAM,CAAC,CAAC;IAC/B,YAAY,EAAE,CAAC,CAAC,OAAO,CAAC,OAAO,CAAC,CAAC;CAClC;AAED,wBAAgB,0CAA0C,CACxD,CAAC,SAAS,MAAM,CAAC,MAAM,EAAE,uCAAuC,CAAC,OAAO,EAAE,OAAO,CAAC,CAAC,EAEnF,OAAO,EAAE,CAAC,EACV,SAAS,EAAE,iBAAiB,GAC3B,MAAM,CAAC,MAAM,CAAC,EAAE,CAAC,KAAK,EAAE,OAAO,EAAE,OAAO,EAAE,mBAAmB,KAAK,OAAO,CAAC,OAAO,CAAC,CAAC,CAmBrF"}
@@ -0,0 +1,93 @@
1
+ import { streamText } from 'ai';
2
+ /**
3
+ * Converts a VAT Conversational Assistant agent to a function compatible with Vercel AI SDK.
4
+ *
5
+ * Conversational assistants maintain context across multiple turns using conversation history.
6
+ * They're perfect for interactive dialogs, multi-turn decision-making, and stateful interactions.
7
+ *
8
+ * Example:
9
+ * ```typescript
10
+ * import { openai } from '@ai-sdk/openai';
11
+ * import { breedAdvisorAgent } from '@vibe-agent-toolkit/vat-example-cat-agents';
12
+ * import { BreedAdvisorInputSchema, BreedAdvisorOutputSchema } from '@vibe-agent-toolkit/vat-example-cat-agents';
13
+ * import { convertConversationalAssistantToFunction } from '@vibe-agent-toolkit/runtime-vercel-ai-sdk';
14
+ *
15
+ * const breedAdvisor = convertConversationalAssistantToFunction(
16
+ * breedAdvisorAgent,
17
+ * BreedAdvisorInputSchema,
18
+ * BreedAdvisorOutputSchema,
19
+ * { model: openai('gpt-4'), temperature: 0.8 }
20
+ * );
21
+ *
22
+ * // Start conversation
23
+ * const session: ConversationSession = { history: [] };
24
+ * const turn1 = await breedAdvisor(
25
+ * { message: "I'm looking for a cat", sessionState: {} },
26
+ * session
27
+ * );
28
+ * console.log(turn1.reply); // "Great! Tell me about your living space..."
29
+ *
30
+ * // Continue conversation
31
+ * const turn2 = await breedAdvisor(
32
+ * { message: "I live in an apartment", sessionState: turn1.sessionState },
33
+ * session
34
+ * );
35
+ * console.log(turn2.recommendations); // Breed recommendations
36
+ * ```
37
+ *
38
+ * @param agent - The VAT conversational assistant agent to convert
39
+ * @param inputSchema - The Zod input schema
40
+ * @param outputSchema - The Zod output schema
41
+ * @param llmConfig - Configuration for the LLM (model, temperature, etc.)
42
+ * @returns An async function that executes the agent with conversation context
43
+ */
44
+ export function convertConversationalAssistantToFunction(agent, inputSchema, outputSchema, llmConfig) {
45
+ return async (input, session) => {
46
+ // Validate input
47
+ const validatedInput = inputSchema.parse(input);
48
+ // Initialize session if needed
49
+ session.history ??= [];
50
+ // Convert VAT Message format to Vercel AI SDK format
51
+ const convertToVercelFormat = (messages) => {
52
+ return messages.map((msg) => ({
53
+ role: msg.role,
54
+ content: msg.content,
55
+ }));
56
+ };
57
+ // Create conversation context
58
+ const callLLM = async (messages) => {
59
+ const vercelMessages = convertToVercelFormat(messages);
60
+ // eslint-disable-next-line @typescript-eslint/await-thenable
61
+ const result = await streamText({
62
+ model: llmConfig.model,
63
+ ...(llmConfig.temperature ? { temperature: llmConfig.temperature } : {}),
64
+ ...(llmConfig.maxTokens ? { maxTokens: llmConfig.maxTokens } : {}),
65
+ ...llmConfig.additionalSettings,
66
+ messages: vercelMessages,
67
+ });
68
+ // Collect streamed text from the response
69
+ return await result.text;
70
+ };
71
+ const addToHistory = (role, content) => {
72
+ session.history.push({ role, content });
73
+ };
74
+ const context = {
75
+ mockable: false,
76
+ history: session.history,
77
+ addToHistory,
78
+ callLLM,
79
+ };
80
+ // Call the agent's execute function with the conversation context
81
+ const output = await agent.execute(validatedInput, context);
82
+ // Validate output
83
+ return outputSchema.parse(output);
84
+ };
85
+ }
86
+ export function convertConversationalAssistantsToFunctions(configs, llmConfig) {
87
+ const functions = {};
88
+ for (const [name, config] of Object.entries(configs)) {
89
+ functions[name] = convertConversationalAssistantToFunction(config.agent, config.inputSchema, config.outputSchema, llmConfig);
90
+ }
91
+ return functions;
92
+ }
93
+ //# sourceMappingURL=conversational-assistant.js.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"conversational-assistant.js","sourceRoot":"","sources":["../../src/adapters/conversational-assistant.ts"],"names":[],"mappings":"AACA,OAAO,EAAE,UAAU,EAAE,MAAM,IAAI,CAAC;AAgBhC;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;GAyCG;AACH,MAAM,UAAU,wCAAwC,CACtD,KAA6B,EAC7B,WAA8B,EAC9B,YAAgC,EAChC,SAA4B;IAE5B,OAAO,KAAK,EAAE,KAAa,EAAE,OAA4B,EAAE,EAAE;QAC3D,iBAAiB;QACjB,MAAM,cAAc,GAAG,WAAW,CAAC,KAAK,CAAC,KAAK,CAAC,CAAC;QAEhD,+BAA+B;QAC/B,OAAO,CAAC,OAAO,KAAK,EAAE,CAAC;QAEvB,qDAAqD;QACrD,MAAM,qBAAqB,GAAG,CAAC,QAAmB,EAAE,EAAE;YACpD,OAAO,QAAQ,CAAC,GAAG,CAAC,CAAC,GAAG,EAAE,EAAE,CAAC,CAAC;gBAC5B,IAAI,EAAE,GAAG,CAAC,IAAI;gBACd,OAAO,EAAE,GAAG,CAAC,OAAO;aACrB,CAAC,CAAC,CAAC;QACN,CAAC,CAAC;QAEF,8BAA8B;QAC9B,MAAM,OAAO,GAAG,KAAK,EAAE,QAAmB,EAAE,EAAE;YAC5C,MAAM,cAAc,GAAG,qBAAqB,CAAC,QAAQ,CAAC,CAAC;YAEvD,6DAA6D;YAC7D,MAAM,MAAM,GAAG,MAAM,UAAU,CAAC;gBAC9B,KAAK,EAAE,SAAS,CAAC,KAAK;gBACtB,GAAG,CAAC,SAAS,CAAC,WAAW,CAAC,CAAC,CAAC,EAAE,WAAW,EAAE,SAAS,CAAC,WAAW,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC;gBACxE,GAAG,CAAC,SAAS,CAAC,SAAS,CAAC,CAAC,CAAC,EAAE,SAAS,EAAE,SAAS,CAAC,SAAS,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC;gBAClE,GAAG,SAAS,CAAC,kBAAkB;gBAC/B,QAAQ,EAAE,cAAc;aACzB,CAAC,CAAC;YAEH,0CAA0C;YAC1C,OAAO,MAAM,MAAM,CAAC,IAAI,CAAC;QAC3B,CAAC,CAAC;QAEF,MAAM,YAAY,GAAG,CAAC,IAAqC,EAAE,OAAe,EAAE,EAAE;YAC9E,OAAO,CAAC,OAAO,CAAC,IAAI,CAAC,EAAE,IAAI,EAAE,OAAO,EAAE,CAAC,CAAC;QAC1C,CAAC,CAAC;QAEF,MAAM,OAAO,GAAG;YACd,QAAQ,EAAE,KAAK;YACf,OAAO,EAAE,OAAO,CAAC,OAAO;YACxB,YAAY;YACZ,OAAO;SACR,CAAC;QAEF,kEAAkE;QAClE,MAAM,MAAM,GAAG,MAAM,KAAK,CAAC,OAAO,CAAC,cAAc,EAAE,OAAO,CAAC,CAAC;QAE5D,kBAAkB;QAClB,OAAO,YAAY,CAAC,KAAK,CAAC,MAAM,CAAC,CAAC;IACpC,CAAC,CAAC;AACJ,CAAC;AAsDD,MAAM,UAAU,0CAA0C,CAGxD,OAAU,EACV,SAA4B;IAE5B,MAAM,SAAS,GAGX,EAAE,CAAC;IAEP,KAAK,MAAM,CAAC,IAAI,EAAE,MAAM,CAAC,IAAI,MAAM,CAAC,OAAO,CAAC,OAAO,CAAC,EAAE,CAAC;QACrD,SAAS,CAAC,IAAI,CAAC,GAAG,wCAAwC,CACxD,MAAM,CAAC,KAAK,EACZ,MAAM,CAAC,WAAW,EAClB,MAAM,CAAC,YAAY,EACnB,SAAS,CACV,CAAC;IACJ,CAAC;IAED,OAAO,SAGN,CAAC;AACJ,CAAC"}
@@ -0,0 +1,80 @@
1
+ import type { Agent } from '@vibe-agent-toolkit/agent-runtime';
2
+ import type { z } from 'zod';
3
+ import type { VercelAILLMConfig } from '../types.js';
4
+ /**
5
+ * Converts a VAT LLM Analyzer agent to a function compatible with Vercel AI SDK.
6
+ *
7
+ * LLM analyzers make a single LLM call to analyze input and produce structured output.
8
+ * They're perfect for classification, extraction, generation, and analysis tasks.
9
+ *
10
+ * Example:
11
+ * ```typescript
12
+ * import { openai } from '@ai-sdk/openai';
13
+ * import { nameGeneratorAgent } from '@vibe-agent-toolkit/vat-example-cat-agents';
14
+ * import { NameGeneratorInputSchema, NameSuggestionSchema } from '@vibe-agent-toolkit/vat-example-cat-agents';
15
+ * import { convertLLMAnalyzerToFunction } from '@vibe-agent-toolkit/runtime-vercel-ai-sdk';
16
+ *
17
+ * const generateName = convertLLMAnalyzerToFunction(
18
+ * nameGeneratorAgent,
19
+ * NameGeneratorInputSchema,
20
+ * NameSuggestionSchema,
21
+ * { model: openai('gpt-4'), temperature: 0.9 }
22
+ * );
23
+ *
24
+ * // Use the function
25
+ * const result = await generateName({
26
+ * characteristics: { physical: { furColor: 'Orange' }, behavioral: { personality: ['Mischievous'] } }
27
+ * });
28
+ * console.log(result.name); // "Sir Knocksalot"
29
+ * ```
30
+ *
31
+ * @param agent - The VAT LLM analyzer agent to convert
32
+ * @param inputSchema - The Zod input schema
33
+ * @param outputSchema - The Zod output schema
34
+ * @param llmConfig - Configuration for the LLM (model, temperature, etc.)
35
+ * @returns An async function that executes the agent with the configured LLM
36
+ */
37
+ export declare function convertLLMAnalyzerToFunction<TInput, TOutput>(agent: Agent<TInput, TOutput>, inputSchema: z.ZodType<TInput>, outputSchema: z.ZodType<TOutput>, llmConfig: VercelAILLMConfig): (input: TInput) => Promise<TOutput>;
38
+ /**
39
+ * Batch converts multiple LLM analyzer agents to executable functions.
40
+ *
41
+ * Useful when you need multiple AI-powered analysis functions with a shared LLM configuration.
42
+ *
43
+ * Example:
44
+ * ```typescript
45
+ * import { openai } from '@ai-sdk/openai';
46
+ * import { nameGeneratorAgent, haikuGeneratorAgent } from '@vibe-agent-toolkit/vat-example-cat-agents';
47
+ * import { convertLLMAnalyzersToFunctions } from '@vibe-agent-toolkit/runtime-vercel-ai-sdk';
48
+ *
49
+ * const analyzers = convertLLMAnalyzersToFunctions(
50
+ * {
51
+ * generateName: {
52
+ * agent: nameGeneratorAgent,
53
+ * inputSchema: NameGeneratorInputSchema,
54
+ * outputSchema: NameSuggestionSchema,
55
+ * },
56
+ * generateHaiku: {
57
+ * agent: haikuGeneratorAgent,
58
+ * inputSchema: HaikuGeneratorInputSchema,
59
+ * outputSchema: HaikuSchema,
60
+ * },
61
+ * },
62
+ * { model: openai('gpt-4'), temperature: 0.8 }
63
+ * );
64
+ *
65
+ * // Use the functions
66
+ * const name = await analyzers.generateName({ characteristics });
67
+ * const haiku = await analyzers.generateHaiku({ characteristics });
68
+ * ```
69
+ *
70
+ * @param configs - Map of function names to conversion configurations
71
+ * @param llmConfig - Shared LLM configuration for all agents
72
+ * @returns Map of function names to executable async functions
73
+ */
74
+ export interface LLMAnalyzerConversionConfig<TInput, TOutput> {
75
+ agent: Agent<TInput, TOutput>;
76
+ inputSchema: z.ZodType<TInput>;
77
+ outputSchema: z.ZodType<TOutput>;
78
+ }
79
+ export declare function convertLLMAnalyzersToFunctions<T extends Record<string, LLMAnalyzerConversionConfig<unknown, unknown>>>(configs: T, llmConfig: VercelAILLMConfig): Record<keyof T, (input: unknown) => Promise<unknown>>;
80
+ //# sourceMappingURL=llm-analyzer.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"llm-analyzer.d.ts","sourceRoot":"","sources":["../../src/adapters/llm-analyzer.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,KAAK,EAAE,MAAM,mCAAmC,CAAC;AAE/D,OAAO,KAAK,EAAE,CAAC,EAAE,MAAM,KAAK,CAAC;AAE7B,OAAO,KAAK,EAAE,iBAAiB,EAAE,MAAM,aAAa,CAAC;AAErD;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;GAgCG;AACH,wBAAgB,4BAA4B,CAAC,MAAM,EAAE,OAAO,EAC1D,KAAK,EAAE,KAAK,CAAC,MAAM,EAAE,OAAO,CAAC,EAC7B,WAAW,EAAE,CAAC,CAAC,OAAO,CAAC,MAAM,CAAC,EAC9B,YAAY,EAAE,CAAC,CAAC,OAAO,CAAC,OAAO,CAAC,EAChC,SAAS,EAAE,iBAAiB,GAC3B,CAAC,KAAK,EAAE,MAAM,KAAK,OAAO,CAAC,OAAO,CAAC,CA0CrC;AAED;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;GAmCG;AACH,MAAM,WAAW,2BAA2B,CAAC,MAAM,EAAE,OAAO;IAC1D,KAAK,EAAE,KAAK,CAAC,MAAM,EAAE,OAAO,CAAC,CAAC;IAC9B,WAAW,EAAE,CAAC,CAAC,OAAO,CAAC,MAAM,CAAC,CAAC;IAC/B,YAAY,EAAE,CAAC,CAAC,OAAO,CAAC,OAAO,CAAC,CAAC;CAClC;AAED,wBAAgB,8BAA8B,CAC5C,CAAC,SAAS,MAAM,CAAC,MAAM,EAAE,2BAA2B,CAAC,OAAO,EAAE,OAAO,CAAC,CAAC,EAEvE,OAAO,EAAE,CAAC,EACV,SAAS,EAAE,iBAAiB,GAC3B,MAAM,CAAC,MAAM,CAAC,EAAE,CAAC,KAAK,EAAE,OAAO,KAAK,OAAO,CAAC,OAAO,CAAC,CAAC,CAavD"}
@@ -0,0 +1,81 @@
1
+ import { generateText } from 'ai';
2
+ /**
3
+ * Converts a VAT LLM Analyzer agent to a function compatible with Vercel AI SDK.
4
+ *
5
+ * LLM analyzers make a single LLM call to analyze input and produce structured output.
6
+ * They're perfect for classification, extraction, generation, and analysis tasks.
7
+ *
8
+ * Example:
9
+ * ```typescript
10
+ * import { openai } from '@ai-sdk/openai';
11
+ * import { nameGeneratorAgent } from '@vibe-agent-toolkit/vat-example-cat-agents';
12
+ * import { NameGeneratorInputSchema, NameSuggestionSchema } from '@vibe-agent-toolkit/vat-example-cat-agents';
13
+ * import { convertLLMAnalyzerToFunction } from '@vibe-agent-toolkit/runtime-vercel-ai-sdk';
14
+ *
15
+ * const generateName = convertLLMAnalyzerToFunction(
16
+ * nameGeneratorAgent,
17
+ * NameGeneratorInputSchema,
18
+ * NameSuggestionSchema,
19
+ * { model: openai('gpt-4'), temperature: 0.9 }
20
+ * );
21
+ *
22
+ * // Use the function
23
+ * const result = await generateName({
24
+ * characteristics: { physical: { furColor: 'Orange' }, behavioral: { personality: ['Mischievous'] } }
25
+ * });
26
+ * console.log(result.name); // "Sir Knocksalot"
27
+ * ```
28
+ *
29
+ * @param agent - The VAT LLM analyzer agent to convert
30
+ * @param inputSchema - The Zod input schema
31
+ * @param outputSchema - The Zod output schema
32
+ * @param llmConfig - Configuration for the LLM (model, temperature, etc.)
33
+ * @returns An async function that executes the agent with the configured LLM
34
+ */
35
+ export function convertLLMAnalyzerToFunction(agent, inputSchema, outputSchema, llmConfig) {
36
+ return async (input) => {
37
+ // Validate input
38
+ const validatedInput = inputSchema.parse(input);
39
+ // Create LLM context with Vercel AI SDK's generateText
40
+ const callLLM = async (prompt) => {
41
+ const result = await generateText({
42
+ model: llmConfig.model,
43
+ ...(llmConfig.temperature ? { temperature: llmConfig.temperature } : {}),
44
+ ...(llmConfig.maxTokens ? { maxTokens: llmConfig.maxTokens } : {}),
45
+ ...llmConfig.additionalSettings,
46
+ prompt,
47
+ });
48
+ return result.text;
49
+ };
50
+ // Execute the agent with mock mode disabled (real LLM call)
51
+ // Extract model name from LanguageModel union type
52
+ let modelName;
53
+ if (typeof llmConfig.model === 'string') {
54
+ modelName = llmConfig.model;
55
+ }
56
+ else if ('provider' in llmConfig.model && llmConfig.model.provider) {
57
+ modelName = `${llmConfig.model.provider}/${llmConfig.model.modelId}`;
58
+ }
59
+ else {
60
+ modelName = llmConfig.model.modelId ?? 'unknown';
61
+ }
62
+ const context = {
63
+ mockable: false,
64
+ model: modelName,
65
+ temperature: llmConfig.temperature ?? 0.7,
66
+ callLLM,
67
+ };
68
+ // Call the agent's execute function with the LLM context
69
+ const output = await agent.execute(validatedInput, context);
70
+ // Validate output
71
+ return outputSchema.parse(output);
72
+ };
73
+ }
74
+ export function convertLLMAnalyzersToFunctions(configs, llmConfig) {
75
+ const functions = {};
76
+ for (const [name, config] of Object.entries(configs)) {
77
+ functions[name] = convertLLMAnalyzerToFunction(config.agent, config.inputSchema, config.outputSchema, llmConfig);
78
+ }
79
+ return functions;
80
+ }
81
+ //# sourceMappingURL=llm-analyzer.js.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"llm-analyzer.js","sourceRoot":"","sources":["../../src/adapters/llm-analyzer.ts"],"names":[],"mappings":"AACA,OAAO,EAAE,YAAY,EAAE,MAAM,IAAI,CAAC;AAKlC;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;GAgCG;AACH,MAAM,UAAU,4BAA4B,CAC1C,KAA6B,EAC7B,WAA8B,EAC9B,YAAgC,EAChC,SAA4B;IAE5B,OAAO,KAAK,EAAE,KAAa,EAAE,EAAE;QAC7B,iBAAiB;QACjB,MAAM,cAAc,GAAG,WAAW,CAAC,KAAK,CAAC,KAAK,CAAC,CAAC;QAEhD,uDAAuD;QACvD,MAAM,OAAO,GAAG,KAAK,EAAE,MAAc,EAAE,EAAE;YACvC,MAAM,MAAM,GAAG,MAAM,YAAY,CAAC;gBAChC,KAAK,EAAE,SAAS,CAAC,KAAK;gBACtB,GAAG,CAAC,SAAS,CAAC,WAAW,CAAC,CAAC,CAAC,EAAE,WAAW,EAAE,SAAS,CAAC,WAAW,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC;gBACxE,GAAG,CAAC,SAAS,CAAC,SAAS,CAAC,CAAC,CAAC,EAAE,SAAS,EAAE,SAAS,CAAC,SAAS,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC;gBAClE,GAAG,SAAS,CAAC,kBAAkB;gBAC/B,MAAM;aACP,CAAC,CAAC;YAEH,OAAO,MAAM,CAAC,IAAI,CAAC;QACrB,CAAC,CAAC;QAEF,4DAA4D;QAC5D,mDAAmD;QACnD,IAAI,SAAiB,CAAC;QACtB,IAAI,OAAO,SAAS,CAAC,KAAK,KAAK,QAAQ,EAAE,CAAC;YACxC,SAAS,GAAG,SAAS,CAAC,KAAK,CAAC;QAC9B,CAAC;aAAM,IAAI,UAAU,IAAI,SAAS,CAAC,KAAK,IAAI,SAAS,CAAC,KAAK,CAAC,QAAQ,EAAE,CAAC;YACrE,SAAS,GAAG,GAAG,SAAS,CAAC,KAAK,CAAC,QAAQ,IAAI,SAAS,CAAC,KAAK,CAAC,OAAO,EAAE,CAAC;QACvE,CAAC;aAAM,CAAC;YACN,SAAS,GAAG,SAAS,CAAC,KAAK,CAAC,OAAO,IAAI,SAAS,CAAC;QACnD,CAAC;QAED,MAAM,OAAO,GAAG;YACd,QAAQ,EAAE,KAAK;YACf,KAAK,EAAE,SAAS;YAChB,WAAW,EAAE,SAAS,CAAC,WAAW,IAAI,GAAG;YACzC,OAAO;SACR,CAAC;QAEF,yDAAyD;QACzD,MAAM,MAAM,GAAG,MAAM,KAAK,CAAC,OAAO,CAAC,cAAc,EAAE,OAAO,CAAC,CAAC;QAE5D,kBAAkB;QAClB,OAAO,YAAY,CAAC,KAAK,CAAC,MAAM,CAAC,CAAC;IACpC,CAAC,CAAC;AACJ,CAAC;AA4CD,MAAM,UAAU,8BAA8B,CAG5C,OAAU,EACV,SAA4B;IAE5B,MAAM,SAAS,GAAyD,EAAE,CAAC;IAE3E,KAAK,MAAM,CAAC,IAAI,EAAE,MAAM,CAAC,IAAI,MAAM,CAAC,OAAO,CAAC,OAAO,CAAC,EAAE,CAAC;QACrD,SAAS,CAAC,IAAI,CAAC,GAAG,4BAA4B,CAC5C,MAAM,CAAC,KAAK,EACZ,MAAM,CAAC,WAAW,EAClB,MAAM,CAAC,YAAY,EACnB,SAAS,CACV,CAAC;IACJ,CAAC;IAED,OAAO,SAAkE,CAAC;AAC5E,CAAC"}
@@ -0,0 +1,79 @@
1
+ import type { PureFunctionAgent } from '@vibe-agent-toolkit/agent-runtime';
2
+ import type { z } from 'zod';
3
+ import type { ConversionResult, VercelAITool } from '../types.js';
4
+ /**
5
+ * Converts a VAT PureFunctionAgent to a Vercel AI SDK tool.
6
+ *
7
+ * Pure function tools are synchronous and deterministic - perfect for
8
+ * structured data validation, transformation, and computation.
9
+ *
10
+ * Example:
11
+ * ```typescript
12
+ * import { haikuValidatorAgent } from '@vibe-agent-toolkit/vat-example-cat-agents';
13
+ * import { HaikuSchema, HaikuValidationResultSchema } from '@vibe-agent-toolkit/vat-example-cat-agents';
14
+ * import { convertPureFunctionToTool } from '@vibe-agent-toolkit/runtime-vercel-ai-sdk';
15
+ *
16
+ * const haikuTool = convertPureFunctionToTool(
17
+ * haikuValidatorAgent,
18
+ * HaikuSchema,
19
+ * HaikuValidationResultSchema
20
+ * );
21
+ *
22
+ * // Use with generateText()
23
+ * const result = await generateText({
24
+ * model: openai('gpt-4'),
25
+ * tools: { validateHaiku: haikuTool.tool },
26
+ * prompt: 'Validate this haiku: ...'
27
+ * });
28
+ * ```
29
+ *
30
+ * @param agent - The VAT pure function agent to convert
31
+ * @param inputSchema - The Zod input schema
32
+ * @param outputSchema - The Zod output schema
33
+ * @returns Vercel AI SDK tool definition with metadata
34
+ */
35
+ export declare function convertPureFunctionToTool<TInput, TOutput>(agent: PureFunctionAgent<TInput, TOutput>, inputSchema: z.ZodType<TInput>, outputSchema: z.ZodType<TOutput>): ConversionResult<TInput, TOutput>;
36
+ /**
37
+ * Configuration for batch tool conversion
38
+ */
39
+ export interface ToolConversionConfig<TInput, TOutput> {
40
+ agent: PureFunctionAgent<TInput, TOutput>;
41
+ inputSchema: z.ZodType<TInput>;
42
+ outputSchema: z.ZodType<TOutput>;
43
+ }
44
+ /**
45
+ * Batch converts multiple pure function agents to Vercel AI SDK tools.
46
+ *
47
+ * Useful when you want to provide multiple tools to an LLM in one call.
48
+ *
49
+ * Example:
50
+ * ```typescript
51
+ * import { haikuValidatorAgent, nameValidatorAgent } from '@vibe-agent-toolkit/vat-example-cat-agents';
52
+ * import { HaikuSchema, HaikuValidationResultSchema, NameValidationInputSchema, NameValidationResultSchema } from '@vibe-agent-toolkit/vat-example-cat-agents';
53
+ * import { convertPureFunctionsToTools } from '@vibe-agent-toolkit/runtime-vercel-ai-sdk';
54
+ *
55
+ * const tools = convertPureFunctionsToTools({
56
+ * validateHaiku: {
57
+ * agent: haikuValidatorAgent,
58
+ * inputSchema: HaikuSchema,
59
+ * outputSchema: HaikuValidationResultSchema,
60
+ * },
61
+ * validateName: {
62
+ * agent: nameValidatorAgent,
63
+ * inputSchema: NameValidationInputSchema,
64
+ * outputSchema: NameValidationResultSchema,
65
+ * },
66
+ * });
67
+ *
68
+ * const result = await generateText({
69
+ * model: openai('gpt-4'),
70
+ * tools,
71
+ * prompt: 'Validate these cat names and haikus...'
72
+ * });
73
+ * ```
74
+ *
75
+ * @param configs - Map of tool names to conversion configurations
76
+ * @returns Map of tool names to Vercel AI SDK tools
77
+ */
78
+ export declare function convertPureFunctionsToTools(configs: Record<string, ToolConversionConfig<unknown, unknown>>): Record<string, VercelAITool>;
79
+ //# sourceMappingURL=pure-function.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"pure-function.d.ts","sourceRoot":"","sources":["../../src/adapters/pure-function.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,iBAAiB,EAAE,MAAM,mCAAmC,CAAC;AAE3E,OAAO,KAAK,EAAE,CAAC,EAAE,MAAM,KAAK,CAAC;AAE7B,OAAO,KAAK,EAAE,gBAAgB,EAAE,YAAY,EAAE,MAAM,aAAa,CAAC;AAElE;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;GA8BG;AACH,wBAAgB,yBAAyB,CAAC,MAAM,EAAE,OAAO,EACvD,KAAK,EAAE,iBAAiB,CAAC,MAAM,EAAE,OAAO,CAAC,EACzC,WAAW,EAAE,CAAC,CAAC,OAAO,CAAC,MAAM,CAAC,EAC9B,YAAY,EAAE,CAAC,CAAC,OAAO,CAAC,OAAO,CAAC,GAC/B,gBAAgB,CAAC,MAAM,EAAE,OAAO,CAAC,CA2BnC;AAED;;GAEG;AACH,MAAM,WAAW,oBAAoB,CAAC,MAAM,EAAE,OAAO;IACnD,KAAK,EAAE,iBAAiB,CAAC,MAAM,EAAE,OAAO,CAAC,CAAC;IAC1C,WAAW,EAAE,CAAC,CAAC,OAAO,CAAC,MAAM,CAAC,CAAC;IAC/B,YAAY,EAAE,CAAC,CAAC,OAAO,CAAC,OAAO,CAAC,CAAC;CAClC;AAED;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;GAiCG;AACH,wBAAgB,2BAA2B,CACzC,OAAO,EAAE,MAAM,CAAC,MAAM,EAAE,oBAAoB,CAAC,OAAO,EAAE,OAAO,CAAC,CAAC,GAC9D,MAAM,CAAC,MAAM,EAAE,YAAY,CAAC,CAa9B"}
@@ -0,0 +1,101 @@
1
+ import { tool } from 'ai';
2
+ /**
3
+ * Converts a VAT PureFunctionAgent to a Vercel AI SDK tool.
4
+ *
5
+ * Pure function tools are synchronous and deterministic - perfect for
6
+ * structured data validation, transformation, and computation.
7
+ *
8
+ * Example:
9
+ * ```typescript
10
+ * import { haikuValidatorAgent } from '@vibe-agent-toolkit/vat-example-cat-agents';
11
+ * import { HaikuSchema, HaikuValidationResultSchema } from '@vibe-agent-toolkit/vat-example-cat-agents';
12
+ * import { convertPureFunctionToTool } from '@vibe-agent-toolkit/runtime-vercel-ai-sdk';
13
+ *
14
+ * const haikuTool = convertPureFunctionToTool(
15
+ * haikuValidatorAgent,
16
+ * HaikuSchema,
17
+ * HaikuValidationResultSchema
18
+ * );
19
+ *
20
+ * // Use with generateText()
21
+ * const result = await generateText({
22
+ * model: openai('gpt-4'),
23
+ * tools: { validateHaiku: haikuTool.tool },
24
+ * prompt: 'Validate this haiku: ...'
25
+ * });
26
+ * ```
27
+ *
28
+ * @param agent - The VAT pure function agent to convert
29
+ * @param inputSchema - The Zod input schema
30
+ * @param outputSchema - The Zod output schema
31
+ * @returns Vercel AI SDK tool definition with metadata
32
+ */
33
+ export function convertPureFunctionToTool(agent, inputSchema, outputSchema) {
34
+ const { manifest } = agent;
35
+ // AI SDK v6: Use inputSchema instead of parameters, add options parameter to execute
36
+ // Type assertion needed because generic z.ZodType<TInput> doesn't satisfy tool()'s
37
+ // compile-time type constraints (FlexibleSchema<INPUT>), but works correctly at runtime
38
+ const vercelTool = tool({
39
+ description: manifest.description,
40
+ inputSchema: inputSchema,
41
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
42
+ execute: async (args, _options) => {
43
+ // The schema validates the input at runtime
44
+ return agent.execute(args);
45
+ },
46
+ });
47
+ return {
48
+ tool: vercelTool,
49
+ inputSchema,
50
+ outputSchema,
51
+ metadata: {
52
+ name: manifest.name,
53
+ description: manifest.description,
54
+ version: manifest.version,
55
+ archetype: manifest.archetype,
56
+ },
57
+ };
58
+ }
59
+ /**
60
+ * Batch converts multiple pure function agents to Vercel AI SDK tools.
61
+ *
62
+ * Useful when you want to provide multiple tools to an LLM in one call.
63
+ *
64
+ * Example:
65
+ * ```typescript
66
+ * import { haikuValidatorAgent, nameValidatorAgent } from '@vibe-agent-toolkit/vat-example-cat-agents';
67
+ * import { HaikuSchema, HaikuValidationResultSchema, NameValidationInputSchema, NameValidationResultSchema } from '@vibe-agent-toolkit/vat-example-cat-agents';
68
+ * import { convertPureFunctionsToTools } from '@vibe-agent-toolkit/runtime-vercel-ai-sdk';
69
+ *
70
+ * const tools = convertPureFunctionsToTools({
71
+ * validateHaiku: {
72
+ * agent: haikuValidatorAgent,
73
+ * inputSchema: HaikuSchema,
74
+ * outputSchema: HaikuValidationResultSchema,
75
+ * },
76
+ * validateName: {
77
+ * agent: nameValidatorAgent,
78
+ * inputSchema: NameValidationInputSchema,
79
+ * outputSchema: NameValidationResultSchema,
80
+ * },
81
+ * });
82
+ *
83
+ * const result = await generateText({
84
+ * model: openai('gpt-4'),
85
+ * tools,
86
+ * prompt: 'Validate these cat names and haikus...'
87
+ * });
88
+ * ```
89
+ *
90
+ * @param configs - Map of tool names to conversion configurations
91
+ * @returns Map of tool names to Vercel AI SDK tools
92
+ */
93
+ export function convertPureFunctionsToTools(configs) {
94
+ const tools = {};
95
+ for (const [name, config] of Object.entries(configs)) {
96
+ const converted = convertPureFunctionToTool(config.agent, config.inputSchema, config.outputSchema);
97
+ tools[name] = converted.tool;
98
+ }
99
+ return tools;
100
+ }
101
+ //# sourceMappingURL=pure-function.js.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"pure-function.js","sourceRoot":"","sources":["../../src/adapters/pure-function.ts"],"names":[],"mappings":"AACA,OAAO,EAAE,IAAI,EAAE,MAAM,IAAI,CAAC;AAK1B;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;GA8BG;AACH,MAAM,UAAU,yBAAyB,CACvC,KAAyC,EACzC,WAA8B,EAC9B,YAAgC;IAEhC,MAAM,EAAE,QAAQ,EAAE,GAAG,KAAK,CAAC;IAE3B,qFAAqF;IACrF,mFAAmF;IACnF,wFAAwF;IACxF,MAAM,UAAU,GAAG,IAAI,CAAC;QACtB,WAAW,EAAE,QAAQ,CAAC,WAAW;QACjC,WAAW,EAAE,WAAW;QACxB,8DAA8D;QAC9D,OAAO,EAAE,KAAK,EAAE,IAAS,EAAE,QAAa,EAAE,EAAE;YAC1C,4CAA4C;YAC5C,OAAO,KAAK,CAAC,OAAO,CAAC,IAAc,CAAC,CAAC;QACvC,CAAC;KACuC,CAAC,CAAC;IAE5C,OAAO;QACL,IAAI,EAAE,UAAqC;QAC3C,WAAW;QACX,YAAY;QACZ,QAAQ,EAAE;YACR,IAAI,EAAE,QAAQ,CAAC,IAAI;YACnB,WAAW,EAAE,QAAQ,CAAC,WAAW;YACjC,OAAO,EAAE,QAAQ,CAAC,OAAO;YACzB,SAAS,EAAE,QAAQ,CAAC,SAAS;SAC9B;KACF,CAAC;AACJ,CAAC;AAWD;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;GAiCG;AACH,MAAM,UAAU,2BAA2B,CACzC,OAA+D;IAE/D,MAAM,KAAK,GAAiC,EAAE,CAAC;IAE/C,KAAK,MAAM,CAAC,IAAI,EAAE,MAAM,CAAC,IAAI,MAAM,CAAC,OAAO,CAAC,OAAO,CAAC,EAAE,CAAC;QACrD,MAAM,SAAS,GAAG,yBAAyB,CACzC,MAAM,CAAC,KAAK,EACZ,MAAM,CAAC,WAAW,EAClB,MAAM,CAAC,YAAY,CACpB,CAAC;QACF,KAAK,CAAC,IAAI,CAAC,GAAG,SAAS,CAAC,IAAI,CAAC;IAC/B,CAAC;IAED,OAAO,KAAK,CAAC;AACf,CAAC"}
@@ -0,0 +1,16 @@
1
+ /**
2
+ * @vibe-agent-toolkit/runtime-vercel-ai-sdk
3
+ *
4
+ * Vercel AI SDK runtime adapter for VAT (Vibe Agent Toolkit) agents.
5
+ *
6
+ * Converts VAT archetype agents to Vercel AI SDK primitives:
7
+ * - PureFunctionAgent → tool() for structured data operations
8
+ * - LLMAnalyzerAgent → generateText() for AI-powered analysis
9
+ *
10
+ * Supports OpenAI, Anthropic, and other providers via Vercel AI SDK.
11
+ */
12
+ export { convertPureFunctionToTool, convertPureFunctionsToTools, type ToolConversionConfig, } from './adapters/pure-function.js';
13
+ export { convertLLMAnalyzerToFunction, convertLLMAnalyzersToFunctions, type LLMAnalyzerConversionConfig, } from './adapters/llm-analyzer.js';
14
+ export { convertConversationalAssistantToFunction, convertConversationalAssistantsToFunctions, type ConversationalAssistantConversionConfig, type ConversationSession, } from './adapters/conversational-assistant.js';
15
+ export type { VercelAITool, VercelAILLMConfig, ConversionResult, } from './types.js';
16
+ //# sourceMappingURL=index.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"index.d.ts","sourceRoot":"","sources":["../src/index.ts"],"names":[],"mappings":"AAAA;;;;;;;;;;GAUG;AAGH,OAAO,EACL,yBAAyB,EACzB,2BAA2B,EAC3B,KAAK,oBAAoB,GAC1B,MAAM,6BAA6B,CAAC;AAErC,OAAO,EACL,4BAA4B,EAC5B,8BAA8B,EAC9B,KAAK,2BAA2B,GACjC,MAAM,4BAA4B,CAAC;AAEpC,OAAO,EACL,wCAAwC,EACxC,0CAA0C,EAC1C,KAAK,uCAAuC,EAC5C,KAAK,mBAAmB,GACzB,MAAM,wCAAwC,CAAC;AAGhD,YAAY,EACV,YAAY,EACZ,iBAAiB,EACjB,gBAAgB,GACjB,MAAM,YAAY,CAAC"}
package/dist/index.js ADDED
@@ -0,0 +1,16 @@
1
+ /**
2
+ * @vibe-agent-toolkit/runtime-vercel-ai-sdk
3
+ *
4
+ * Vercel AI SDK runtime adapter for VAT (Vibe Agent Toolkit) agents.
5
+ *
6
+ * Converts VAT archetype agents to Vercel AI SDK primitives:
7
+ * - PureFunctionAgent → tool() for structured data operations
8
+ * - LLMAnalyzerAgent → generateText() for AI-powered analysis
9
+ *
10
+ * Supports OpenAI, Anthropic, and other providers via Vercel AI SDK.
11
+ */
12
+ // Adapters
13
+ export { convertPureFunctionToTool, convertPureFunctionsToTools, } from './adapters/pure-function.js';
14
+ export { convertLLMAnalyzerToFunction, convertLLMAnalyzersToFunctions, } from './adapters/llm-analyzer.js';
15
+ export { convertConversationalAssistantToFunction, convertConversationalAssistantsToFunctions, } from './adapters/conversational-assistant.js';
16
+ //# sourceMappingURL=index.js.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"index.js","sourceRoot":"","sources":["../src/index.ts"],"names":[],"mappings":"AAAA;;;;;;;;;;GAUG;AAEH,WAAW;AACX,OAAO,EACL,yBAAyB,EACzB,2BAA2B,GAE5B,MAAM,6BAA6B,CAAC;AAErC,OAAO,EACL,4BAA4B,EAC5B,8BAA8B,GAE/B,MAAM,4BAA4B,CAAC;AAEpC,OAAO,EACL,wCAAwC,EACxC,0CAA0C,GAG3C,MAAM,wCAAwC,CAAC"}
@@ -0,0 +1,57 @@
1
+ import type { LanguageModel, tool as toolFunction } from 'ai';
2
+ import type { z } from 'zod';
3
+ /**
4
+ * Vercel AI SDK tool definition
5
+ * Compatible with generateText() and streamText()
6
+ */
7
+ export type VercelAITool = ReturnType<typeof toolFunction>;
8
+ /**
9
+ * Configuration for LLM calls via Vercel AI SDK
10
+ */
11
+ export interface VercelAILLMConfig {
12
+ /**
13
+ * The language model to use
14
+ * Can be from any provider (OpenAI, Anthropic, etc.)
15
+ */
16
+ model: LanguageModel;
17
+ /**
18
+ * Temperature for generation (0-1)
19
+ * @default 0.7
20
+ */
21
+ temperature?: number;
22
+ /**
23
+ * Maximum tokens to generate
24
+ */
25
+ maxTokens?: number;
26
+ /**
27
+ * Additional model-specific settings
28
+ */
29
+ additionalSettings?: Record<string, unknown>;
30
+ }
31
+ /**
32
+ * Result from converting a VAT agent to Vercel AI SDK format
33
+ */
34
+ export interface ConversionResult<TInput, TOutput> {
35
+ /**
36
+ * The Vercel AI SDK tool definition
37
+ */
38
+ tool: VercelAITool;
39
+ /**
40
+ * Original input schema for reference
41
+ */
42
+ inputSchema: z.ZodType<TInput>;
43
+ /**
44
+ * Original output schema for reference
45
+ */
46
+ outputSchema: z.ZodType<TOutput>;
47
+ /**
48
+ * Agent metadata
49
+ */
50
+ metadata: {
51
+ name: string;
52
+ description: string;
53
+ version: string;
54
+ archetype: string;
55
+ };
56
+ }
57
+ //# sourceMappingURL=types.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"types.d.ts","sourceRoot":"","sources":["../src/types.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,aAAa,EAAE,IAAI,IAAI,YAAY,EAAE,MAAM,IAAI,CAAC;AAC9D,OAAO,KAAK,EAAE,CAAC,EAAE,MAAM,KAAK,CAAC;AAE7B;;;GAGG;AACH,MAAM,MAAM,YAAY,GAAG,UAAU,CAAC,OAAO,YAAY,CAAC,CAAC;AAE3D;;GAEG;AACH,MAAM,WAAW,iBAAiB;IAChC;;;OAGG;IACH,KAAK,EAAE,aAAa,CAAC;IAErB;;;OAGG;IACH,WAAW,CAAC,EAAE,MAAM,CAAC;IAErB;;OAEG;IACH,SAAS,CAAC,EAAE,MAAM,CAAC;IAEnB;;OAEG;IACH,kBAAkB,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,CAAC;CAC9C;AAED;;GAEG;AACH,MAAM,WAAW,gBAAgB,CAAC,MAAM,EAAE,OAAO;IAC/C;;OAEG;IACH,IAAI,EAAE,YAAY,CAAC;IAEnB;;OAEG;IACH,WAAW,EAAE,CAAC,CAAC,OAAO,CAAC,MAAM,CAAC,CAAC;IAE/B;;OAEG;IACH,YAAY,EAAE,CAAC,CAAC,OAAO,CAAC,OAAO,CAAC,CAAC;IAEjC;;OAEG;IACH,QAAQ,EAAE;QACR,IAAI,EAAE,MAAM,CAAC;QACb,WAAW,EAAE,MAAM,CAAC;QACpB,OAAO,EAAE,MAAM,CAAC;QAChB,SAAS,EAAE,MAAM,CAAC;KACnB,CAAC;CACH"}
package/dist/types.js ADDED
@@ -0,0 +1,2 @@
1
+ export {};
2
+ //# sourceMappingURL=types.js.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"types.js","sourceRoot":"","sources":["../src/types.ts"],"names":[],"mappings":""}
package/package.json ADDED
@@ -0,0 +1,54 @@
1
+ {
2
+ "name": "@vibe-agent-toolkit/runtime-vercel-ai-sdk",
3
+ "version": "0.1.2-rc.4",
4
+ "description": "Vercel AI SDK runtime adapter for VAT agents",
5
+ "type": "module",
6
+ "exports": {
7
+ ".": {
8
+ "types": "./dist/index.d.ts",
9
+ "default": "./dist/index.js"
10
+ }
11
+ },
12
+ "files": [
13
+ "dist"
14
+ ],
15
+ "scripts": {
16
+ "build": "tsc",
17
+ "demo": "tsx examples/demo.ts",
18
+ "llm-demo": "tsx examples/llm-agent-demo.ts",
19
+ "test": "vitest run",
20
+ "test:watch": "vitest",
21
+ "test:llm-regression": "RUN_LLM_TESTS=true bun test test/llm-regression.test.ts",
22
+ "typecheck": "tsc --noEmit"
23
+ },
24
+ "dependencies": {
25
+ "@ai-sdk/provider": "^3.0.4",
26
+ "@ai-sdk/provider-utils": "^4.0.8",
27
+ "@vibe-agent-toolkit/agent-runtime": "0.1.2-rc.4",
28
+ "ai": "^6.0.39",
29
+ "zod": "^3.24.1"
30
+ },
31
+ "devDependencies": {
32
+ "@ai-sdk/anthropic": "^3.0.15",
33
+ "@ai-sdk/openai": "^3.0.12",
34
+ "@types/node": "^22.10.5",
35
+ "@vibe-agent-toolkit/dev-tools": "0.1.2-rc.4",
36
+ "@vibe-agent-toolkit/vat-example-cat-agents": "0.1.2-rc.4",
37
+ "tsx": "^4.19.2",
38
+ "typescript": "^5.7.3",
39
+ "vitest": "^2.1.8"
40
+ },
41
+ "keywords": [
42
+ "ai-agent",
43
+ "vercel-ai-sdk",
44
+ "vibe-agent-toolkit",
45
+ "runtime-adapter"
46
+ ],
47
+ "author": "Jeff Dutton",
48
+ "license": "MIT",
49
+ "repository": {
50
+ "type": "git",
51
+ "url": "https://github.com/jdutton/vibe-agent-toolkit.git",
52
+ "directory": "packages/runtime-vercel-ai-sdk"
53
+ }
54
+ }