@copilotkitnext/agent 0.0.25 → 0.0.27-alpha.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,503 @@
1
+ import { describe, it, expect, vi, beforeEach, afterEach } from "vitest";
2
+ import { z } from "zod";
3
+ import { BasicAgent, defineTool } from "../index";
4
+ import { EventType, type RunAgentInput } from "@ag-ui/client";
5
+ import { streamText } from "ai";
6
+ import {
7
+ mockStreamTextResponse,
8
+ toolCallStreamingStart,
9
+ toolCall,
10
+ toolResult,
11
+ finish,
12
+ collectEvents,
13
+ } from "./test-helpers";
14
+
15
+ // Mock the ai module
16
+ vi.mock("ai", () => ({
17
+ streamText: vi.fn(),
18
+ tool: vi.fn((config) => config),
19
+ stepCountIs: vi.fn((count: number) => ({ type: "stepCount", count })),
20
+ }));
21
+
22
+ // Mock the SDK clients
23
+ vi.mock("@ai-sdk/openai", () => ({
24
+ createOpenAI: vi.fn(() => (modelId: string) => ({
25
+ modelId,
26
+ provider: "openai",
27
+ })),
28
+ }));
29
+
30
+ describe("Config Tools Server-Side Execution", () => {
31
+ const originalEnv = process.env;
32
+
33
+ beforeEach(() => {
34
+ vi.clearAllMocks();
35
+ process.env = { ...originalEnv };
36
+ process.env.OPENAI_API_KEY = "test-key";
37
+ });
38
+
39
+ afterEach(() => {
40
+ process.env = originalEnv;
41
+ });
42
+
43
+ describe("Tool Definition with Execute", () => {
44
+ it("should pass execute function to streamText tools", async () => {
45
+ const executeFn = vi.fn().mockResolvedValue({ result: "executed" });
46
+
47
+ const weatherTool = defineTool({
48
+ name: "getWeather",
49
+ description: "Get weather for a city",
50
+ parameters: z.object({
51
+ city: z.string().describe("The city name"),
52
+ }),
53
+ execute: executeFn,
54
+ });
55
+
56
+ const agent = new BasicAgent({
57
+ model: "openai/gpt-4o",
58
+ tools: [weatherTool],
59
+ });
60
+
61
+ vi.mocked(streamText).mockReturnValue(
62
+ mockStreamTextResponse([finish()]) as any,
63
+ );
64
+
65
+ const input: RunAgentInput = {
66
+ threadId: "thread1",
67
+ runId: "run1",
68
+ messages: [],
69
+ tools: [],
70
+ context: [],
71
+ state: {},
72
+ };
73
+
74
+ await collectEvents(agent["run"](input));
75
+
76
+ // Verify streamText was called with tools that have execute functions
77
+ const callArgs = vi.mocked(streamText).mock.calls[0][0];
78
+ expect(callArgs.tools).toHaveProperty("getWeather");
79
+ expect(callArgs.tools.getWeather).toHaveProperty("execute");
80
+ expect(typeof callArgs.tools.getWeather.execute).toBe("function");
81
+ });
82
+
83
+ it("should include all tool properties in the Vercel AI SDK tool", async () => {
84
+ const executeFn = vi.fn().mockResolvedValue({ temperature: 72 });
85
+
86
+ const weatherTool = defineTool({
87
+ name: "getWeather",
88
+ description: "Get weather for a city",
89
+ parameters: z.object({
90
+ city: z.string(),
91
+ units: z.enum(["celsius", "fahrenheit"]).optional(),
92
+ }),
93
+ execute: executeFn,
94
+ });
95
+
96
+ const agent = new BasicAgent({
97
+ model: "openai/gpt-4o",
98
+ tools: [weatherTool],
99
+ });
100
+
101
+ vi.mocked(streamText).mockReturnValue(
102
+ mockStreamTextResponse([finish()]) as any,
103
+ );
104
+
105
+ const input: RunAgentInput = {
106
+ threadId: "thread1",
107
+ runId: "run1",
108
+ messages: [],
109
+ tools: [],
110
+ context: [],
111
+ state: {},
112
+ };
113
+
114
+ await collectEvents(agent["run"](input));
115
+
116
+ const callArgs = vi.mocked(streamText).mock.calls[0][0];
117
+ const tool = callArgs.tools.getWeather;
118
+
119
+ expect(tool.description).toBe("Get weather for a city");
120
+ expect(tool.inputSchema).toBeDefined();
121
+ expect(tool.execute).toBe(executeFn);
122
+ });
123
+
124
+ it("should handle multiple config tools with execute functions", async () => {
125
+ const weatherExecute = vi.fn().mockResolvedValue({ temp: 72 });
126
+ const searchExecute = vi.fn().mockResolvedValue({ results: [] });
127
+
128
+ const weatherTool = defineTool({
129
+ name: "getWeather",
130
+ description: "Get weather",
131
+ parameters: z.object({ city: z.string() }),
132
+ execute: weatherExecute,
133
+ });
134
+
135
+ const searchTool = defineTool({
136
+ name: "search",
137
+ description: "Search the web",
138
+ parameters: z.object({ query: z.string() }),
139
+ execute: searchExecute,
140
+ });
141
+
142
+ const agent = new BasicAgent({
143
+ model: "openai/gpt-4o",
144
+ tools: [weatherTool, searchTool],
145
+ });
146
+
147
+ vi.mocked(streamText).mockReturnValue(
148
+ mockStreamTextResponse([finish()]) as any,
149
+ );
150
+
151
+ const input: RunAgentInput = {
152
+ threadId: "thread1",
153
+ runId: "run1",
154
+ messages: [],
155
+ tools: [],
156
+ context: [],
157
+ state: {},
158
+ };
159
+
160
+ await collectEvents(agent["run"](input));
161
+
162
+ const callArgs = vi.mocked(streamText).mock.calls[0][0];
163
+
164
+ expect(callArgs.tools.getWeather.execute).toBe(weatherExecute);
165
+ expect(callArgs.tools.search.execute).toBe(searchExecute);
166
+ });
167
+ });
168
+
169
+ describe("Config Tools vs Input Tools", () => {
170
+ it("config tools should have execute, input tools should not", async () => {
171
+ const configExecute = vi.fn().mockResolvedValue({ result: "server" });
172
+
173
+ const configTool = defineTool({
174
+ name: "serverTool",
175
+ description: "Runs on server",
176
+ parameters: z.object({ data: z.string() }),
177
+ execute: configExecute,
178
+ });
179
+
180
+ const agent = new BasicAgent({
181
+ model: "openai/gpt-4o",
182
+ tools: [configTool],
183
+ });
184
+
185
+ vi.mocked(streamText).mockReturnValue(
186
+ mockStreamTextResponse([finish()]) as any,
187
+ );
188
+
189
+ const input: RunAgentInput = {
190
+ threadId: "thread1",
191
+ runId: "run1",
192
+ messages: [],
193
+ tools: [
194
+ {
195
+ name: "clientTool",
196
+ description: "Runs on client",
197
+ parameters: { type: "object", properties: { input: { type: "string" } } },
198
+ },
199
+ ],
200
+ context: [],
201
+ state: {},
202
+ };
203
+
204
+ await collectEvents(agent["run"](input));
205
+
206
+ const callArgs = vi.mocked(streamText).mock.calls[0][0];
207
+
208
+ // Config tool has execute
209
+ expect(callArgs.tools.serverTool.execute).toBe(configExecute);
210
+
211
+ // Input tool does NOT have execute (client-side execution)
212
+ expect(callArgs.tools.clientTool.execute).toBeUndefined();
213
+ });
214
+ });
215
+
216
+ describe("Execute Function Invocation", () => {
217
+ it("execute function can be called with correct arguments", async () => {
218
+ const executeFn = vi.fn().mockResolvedValue({ weather: "sunny", temp: 72 });
219
+
220
+ const weatherTool = defineTool({
221
+ name: "getWeather",
222
+ description: "Get weather",
223
+ parameters: z.object({
224
+ city: z.string(),
225
+ units: z.enum(["celsius", "fahrenheit"]),
226
+ }),
227
+ execute: executeFn,
228
+ });
229
+
230
+ const agent = new BasicAgent({
231
+ model: "openai/gpt-4o",
232
+ tools: [weatherTool],
233
+ });
234
+
235
+ vi.mocked(streamText).mockReturnValue(
236
+ mockStreamTextResponse([finish()]) as any,
237
+ );
238
+
239
+ const input: RunAgentInput = {
240
+ threadId: "thread1",
241
+ runId: "run1",
242
+ messages: [],
243
+ tools: [],
244
+ context: [],
245
+ state: {},
246
+ };
247
+
248
+ await collectEvents(agent["run"](input));
249
+
250
+ // Get the execute function that was passed to streamText
251
+ const callArgs = vi.mocked(streamText).mock.calls[0][0];
252
+ const passedExecute = callArgs.tools.getWeather.execute;
253
+
254
+ // Manually invoke it to verify it works correctly
255
+ const result = await passedExecute({ city: "New York", units: "fahrenheit" });
256
+
257
+ expect(executeFn).toHaveBeenCalledWith({ city: "New York", units: "fahrenheit" });
258
+ expect(result).toEqual({ weather: "sunny", temp: 72 });
259
+ });
260
+
261
+ it("execute function errors are propagated", async () => {
262
+ const executeFn = vi.fn().mockRejectedValue(new Error("API unavailable"));
263
+
264
+ const failingTool = defineTool({
265
+ name: "failingTool",
266
+ description: "A tool that fails",
267
+ parameters: z.object({}),
268
+ execute: executeFn,
269
+ });
270
+
271
+ const agent = new BasicAgent({
272
+ model: "openai/gpt-4o",
273
+ tools: [failingTool],
274
+ });
275
+
276
+ vi.mocked(streamText).mockReturnValue(
277
+ mockStreamTextResponse([finish()]) as any,
278
+ );
279
+
280
+ const input: RunAgentInput = {
281
+ threadId: "thread1",
282
+ runId: "run1",
283
+ messages: [],
284
+ tools: [],
285
+ context: [],
286
+ state: {},
287
+ };
288
+
289
+ await collectEvents(agent["run"](input));
290
+
291
+ const callArgs = vi.mocked(streamText).mock.calls[0][0];
292
+ const passedExecute = callArgs.tools.failingTool.execute;
293
+
294
+ await expect(passedExecute({})).rejects.toThrow("API unavailable");
295
+ });
296
+ });
297
+
298
+ describe("Built-in State Tools Still Work", () => {
299
+ it("AGUISendStateSnapshot should have execute alongside config tools", async () => {
300
+ const configExecute = vi.fn().mockResolvedValue({});
301
+
302
+ const configTool = defineTool({
303
+ name: "myTool",
304
+ description: "My tool",
305
+ parameters: z.object({}),
306
+ execute: configExecute,
307
+ });
308
+
309
+ const agent = new BasicAgent({
310
+ model: "openai/gpt-4o",
311
+ tools: [configTool],
312
+ });
313
+
314
+ vi.mocked(streamText).mockReturnValue(
315
+ mockStreamTextResponse([finish()]) as any,
316
+ );
317
+
318
+ const input: RunAgentInput = {
319
+ threadId: "thread1",
320
+ runId: "run1",
321
+ messages: [],
322
+ tools: [],
323
+ context: [],
324
+ state: { value: 1 },
325
+ };
326
+
327
+ await collectEvents(agent["run"](input));
328
+
329
+ const callArgs = vi.mocked(streamText).mock.calls[0][0];
330
+
331
+ // Both config tool and state tools should have execute
332
+ expect(callArgs.tools.myTool.execute).toBe(configExecute);
333
+ expect(callArgs.tools.AGUISendStateSnapshot.execute).toBeDefined();
334
+ expect(callArgs.tools.AGUISendStateDelta.execute).toBeDefined();
335
+ });
336
+ });
337
+
338
+ describe("Message ID Generation", () => {
339
+ it("should use messageId from text-start event", async () => {
340
+ const executeFn = vi.fn().mockResolvedValue({ result: "ok" });
341
+
342
+ const tool = defineTool({
343
+ name: "myTool",
344
+ description: "My tool",
345
+ parameters: z.object({}),
346
+ execute: executeFn,
347
+ });
348
+
349
+ const agent = new BasicAgent({
350
+ model: "openai/gpt-4o",
351
+ tools: [tool],
352
+ });
353
+
354
+ vi.mocked(streamText).mockReturnValue(
355
+ mockStreamTextResponse([
356
+ { type: "text-start", id: "msg-1" },
357
+ { type: "text-delta", text: "Before " },
358
+ { type: "text-delta", text: "tool" },
359
+ toolCallStreamingStart("call1", "myTool"),
360
+ toolCall("call1", "myTool"),
361
+ toolResult("call1", "myTool", { result: "ok" }),
362
+ { type: "text-start", id: "msg-2" },
363
+ { type: "text-delta", text: "After " },
364
+ { type: "text-delta", text: "tool" },
365
+ finish(),
366
+ ]) as any,
367
+ );
368
+
369
+ const input: RunAgentInput = {
370
+ threadId: "thread1",
371
+ runId: "run1",
372
+ messages: [],
373
+ tools: [],
374
+ context: [],
375
+ state: {},
376
+ };
377
+
378
+ const events = await collectEvents(agent["run"](input));
379
+
380
+ const textEvents = events.filter((e: any) => e.type === EventType.TEXT_MESSAGE_CHUNK);
381
+
382
+ // First two text chunks should have messageId from first text-start
383
+ expect(textEvents[0].messageId).toBe("msg-1");
384
+ expect(textEvents[1].messageId).toBe("msg-1");
385
+
386
+ // After tool result, text chunks should have messageId from second text-start
387
+ expect(textEvents[2].messageId).toBe("msg-2");
388
+ expect(textEvents[3].messageId).toBe("msg-2");
389
+ });
390
+ });
391
+
392
+ describe("Multi-Step Execution (maxSteps)", () => {
393
+ it("should pass stopWhen with stepCountIs when maxSteps is configured", async () => {
394
+ const executeFn = vi.fn().mockResolvedValue({ result: "ok" });
395
+
396
+ const tool = defineTool({
397
+ name: "myTool",
398
+ description: "My tool",
399
+ parameters: z.object({}),
400
+ execute: executeFn,
401
+ });
402
+
403
+ const agent = new BasicAgent({
404
+ model: "openai/gpt-4o",
405
+ maxSteps: 5,
406
+ tools: [tool],
407
+ });
408
+
409
+ vi.mocked(streamText).mockReturnValue(
410
+ mockStreamTextResponse([finish()]) as any,
411
+ );
412
+
413
+ const input: RunAgentInput = {
414
+ threadId: "thread1",
415
+ runId: "run1",
416
+ messages: [],
417
+ tools: [],
418
+ context: [],
419
+ state: {},
420
+ };
421
+
422
+ await collectEvents(agent["run"](input));
423
+
424
+ const callArgs = vi.mocked(streamText).mock.calls[0][0];
425
+
426
+ // stopWhen should be set with stepCountIs(5)
427
+ expect(callArgs.stopWhen).toEqual({ type: "stepCount", count: 5 });
428
+ });
429
+
430
+ it("should not set stopWhen when maxSteps is not configured", async () => {
431
+ const executeFn = vi.fn().mockResolvedValue({ result: "ok" });
432
+
433
+ const tool = defineTool({
434
+ name: "myTool",
435
+ description: "My tool",
436
+ parameters: z.object({}),
437
+ execute: executeFn,
438
+ });
439
+
440
+ const agent = new BasicAgent({
441
+ model: "openai/gpt-4o",
442
+ // maxSteps not set
443
+ tools: [tool],
444
+ });
445
+
446
+ vi.mocked(streamText).mockReturnValue(
447
+ mockStreamTextResponse([finish()]) as any,
448
+ );
449
+
450
+ const input: RunAgentInput = {
451
+ threadId: "thread1",
452
+ runId: "run1",
453
+ messages: [],
454
+ tools: [],
455
+ context: [],
456
+ state: {},
457
+ };
458
+
459
+ await collectEvents(agent["run"](input));
460
+
461
+ const callArgs = vi.mocked(streamText).mock.calls[0][0];
462
+
463
+ // stopWhen should be undefined (defaults to stepCountIs(1) in SDK)
464
+ expect(callArgs.stopWhen).toBeUndefined();
465
+ });
466
+
467
+ it("should allow high maxSteps for complex tool chains", async () => {
468
+ const executeFn = vi.fn().mockResolvedValue({});
469
+
470
+ const tool = defineTool({
471
+ name: "chainTool",
472
+ description: "Tool for chaining",
473
+ parameters: z.object({}),
474
+ execute: executeFn,
475
+ });
476
+
477
+ const agent = new BasicAgent({
478
+ model: "openai/gpt-4o",
479
+ maxSteps: 10,
480
+ tools: [tool],
481
+ });
482
+
483
+ vi.mocked(streamText).mockReturnValue(
484
+ mockStreamTextResponse([finish()]) as any,
485
+ );
486
+
487
+ const input: RunAgentInput = {
488
+ threadId: "thread1",
489
+ runId: "run1",
490
+ messages: [],
491
+ tools: [],
492
+ context: [],
493
+ state: {},
494
+ };
495
+
496
+ await collectEvents(agent["run"](input));
497
+
498
+ const callArgs = vi.mocked(streamText).mock.calls[0][0];
499
+
500
+ expect(callArgs.stopWhen).toEqual({ type: "stepCount", count: 10 });
501
+ });
502
+ });
503
+ });
package/src/index.ts CHANGED
@@ -28,6 +28,7 @@ import {
28
28
  tool as createVercelAISDKTool,
29
29
  ToolChoice,
30
30
  ToolSet,
31
+ stepCountIs,
31
32
  } from "ai";
32
33
  import { experimental_createMCPClient as createMCPClient } from "@ai-sdk/mcp";
33
34
  import { Observable } from "rxjs";
@@ -41,7 +42,6 @@ import {
41
42
  StreamableHTTPClientTransportOptions,
42
43
  } from "@modelcontextprotocol/sdk/client/streamableHttp.js";
43
44
  import { SSEClientTransport } from "@modelcontextprotocol/sdk/client/sse.js";
44
- import { u } from "vitest/dist/chunks/reporters.d.BFLkQcL6.js";
45
45
 
46
46
  /**
47
47
  * Properties that can be overridden by forwardedProps
@@ -211,6 +211,7 @@ export interface ToolDefinition<TParameters extends z.ZodTypeAny = z.ZodTypeAny>
211
211
  name: string;
212
212
  description: string;
213
213
  parameters: TParameters;
214
+ execute: (args: z.infer<TParameters>) => Promise<unknown>;
214
215
  }
215
216
 
216
217
  /**
@@ -218,17 +219,20 @@ export interface ToolDefinition<TParameters extends z.ZodTypeAny = z.ZodTypeAny>
218
219
  * @param name - The name of the tool
219
220
  * @param description - Description of what the tool does
220
221
  * @param parameters - Zod schema for the tool's input parameters
222
+ * @param execute - Function to execute the tool server-side
221
223
  * @returns Tool definition
222
224
  */
223
225
  export function defineTool<TParameters extends z.ZodTypeAny>(config: {
224
226
  name: string;
225
227
  description: string;
226
228
  parameters: TParameters;
229
+ execute: (args: z.infer<TParameters>) => Promise<unknown>;
227
230
  }): ToolDefinition<TParameters> {
228
231
  return {
229
232
  name: config.name,
230
233
  description: config.description,
231
234
  parameters: config.parameters,
235
+ execute: config.execute,
232
236
  };
233
237
  }
234
238
 
@@ -411,6 +415,7 @@ export function convertToolDefinitionsToVercelAITools(tools: ToolDefinition[]):
411
415
  result[tool.name] = createVercelAISDKTool({
412
416
  description: tool.description,
413
417
  inputSchema: tool.parameters,
418
+ execute: tool.execute,
414
419
  });
415
420
  }
416
421
 
@@ -577,6 +582,7 @@ export class BasicAgent extends AbstractAgent {
577
582
  messages,
578
583
  tools: allTools,
579
584
  toolChoice: this.config.toolChoice,
585
+ stopWhen: this.config.maxSteps ? stepCountIs(this.config.maxSteps) : undefined,
580
586
  maxOutputTokens: this.config.maxOutputTokens,
581
587
  temperature: this.config.temperature,
582
588
  topP: this.config.topP,
@@ -776,6 +782,12 @@ export class BasicAgent extends AbstractAgent {
776
782
  break;
777
783
  }
778
784
 
785
+ case "text-start": {
786
+ // New text message starting - use the SDK-provided id
787
+ messageId = "id" in part ? (part.id as typeof messageId) : randomUUID();
788
+ break;
789
+ }
790
+
779
791
  case "text-delta": {
780
792
  // Accumulate text content - in AI SDK 5.0, the property is 'text'
781
793
  const textDelta = "text" in part ? part.text : "";