@copilotkit/runtime 1.55.1 → 1.55.2-next.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +13 -0
- package/dist/agent/converters/aisdk.cjs +215 -0
- package/dist/agent/converters/aisdk.cjs.map +1 -0
- package/dist/agent/converters/aisdk.d.cts +18 -0
- package/dist/agent/converters/aisdk.d.cts.map +1 -0
- package/dist/agent/converters/aisdk.d.mts +18 -0
- package/dist/agent/converters/aisdk.d.mts.map +1 -0
- package/dist/agent/converters/aisdk.mjs +214 -0
- package/dist/agent/converters/aisdk.mjs.map +1 -0
- package/dist/agent/converters/index.d.mts +3 -0
- package/dist/agent/converters/tanstack.cjs +180 -0
- package/dist/agent/converters/tanstack.cjs.map +1 -0
- package/dist/agent/converters/tanstack.d.cts +68 -0
- package/dist/agent/converters/tanstack.d.cts.map +1 -0
- package/dist/agent/converters/tanstack.d.mts +68 -0
- package/dist/agent/converters/tanstack.d.mts.map +1 -0
- package/dist/agent/converters/tanstack.mjs +178 -0
- package/dist/agent/converters/tanstack.mjs.map +1 -0
- package/dist/agent/index.cjs +111 -17
- package/dist/agent/index.cjs.map +1 -1
- package/dist/agent/index.d.cts +61 -4
- package/dist/agent/index.d.cts.map +1 -1
- package/dist/agent/index.d.mts +62 -4
- package/dist/agent/index.d.mts.map +1 -1
- package/dist/agent/index.mjs +111 -17
- package/dist/agent/index.mjs.map +1 -1
- package/dist/lib/integrations/nextjs/pages-router.cjs.map +1 -1
- package/dist/lib/integrations/nextjs/pages-router.d.cts.map +1 -1
- package/dist/lib/integrations/nextjs/pages-router.d.mts.map +1 -1
- package/dist/lib/integrations/nextjs/pages-router.mjs.map +1 -1
- package/dist/lib/runtime/copilot-runtime.cjs +4 -2
- package/dist/lib/runtime/copilot-runtime.cjs.map +1 -1
- package/dist/lib/runtime/copilot-runtime.d.cts.map +1 -1
- package/dist/lib/runtime/copilot-runtime.d.mts.map +1 -1
- package/dist/lib/runtime/copilot-runtime.mjs +4 -2
- package/dist/lib/runtime/copilot-runtime.mjs.map +1 -1
- package/dist/lib/runtime/mcp-tools-utils.cjs +1 -1
- package/dist/lib/runtime/mcp-tools-utils.cjs.map +1 -1
- package/dist/lib/runtime/mcp-tools-utils.mjs +1 -1
- package/dist/lib/runtime/mcp-tools-utils.mjs.map +1 -1
- package/dist/package.cjs +3 -2
- package/dist/package.mjs +3 -2
- package/dist/service-adapters/anthropic/utils.cjs +1 -1
- package/dist/service-adapters/anthropic/utils.cjs.map +1 -1
- package/dist/service-adapters/anthropic/utils.mjs +1 -1
- package/dist/service-adapters/anthropic/utils.mjs.map +1 -1
- package/dist/service-adapters/openai/utils.cjs +1 -1
- package/dist/service-adapters/openai/utils.cjs.map +1 -1
- package/dist/service-adapters/openai/utils.mjs +1 -1
- package/dist/service-adapters/openai/utils.mjs.map +1 -1
- package/dist/v2/index.cjs +5 -0
- package/dist/v2/index.d.cts +4 -2
- package/dist/v2/index.d.mts +4 -2
- package/dist/v2/index.mjs +3 -1
- package/package.json +4 -3
- package/src/agent/__tests__/agent-test-helpers.ts +446 -0
- package/src/agent/__tests__/agent.test.ts +593 -0
- package/src/agent/__tests__/converter-aisdk.test.ts +692 -0
- package/src/agent/__tests__/converter-custom.test.ts +319 -0
- package/src/agent/__tests__/converter-tanstack-input.test.ts +211 -0
- package/src/agent/__tests__/converter-tanstack.test.ts +314 -0
- package/src/agent/__tests__/mcp-servers-integration.test.ts +373 -0
- package/src/agent/__tests__/multimodal-tanstack.test.ts +284 -0
- package/src/agent/__tests__/test-helpers.ts +12 -8
- package/src/agent/converters/aisdk.ts +326 -0
- package/src/agent/converters/index.ts +7 -0
- package/src/agent/converters/tanstack.ts +286 -0
- package/src/agent/index.ts +245 -26
- package/src/lib/integrations/nextjs/pages-router.ts +1 -0
- package/src/lib/runtime/copilot-runtime.ts +21 -12
- package/src/lib/runtime/mcp-tools-utils.ts +1 -1
- package/src/service-adapters/anthropic/utils.ts +1 -1
- package/src/service-adapters/openai/utils.ts +1 -1
- package/src/v2/runtime/__tests__/mcp-apps-middleware-integration.test.ts +275 -0
|
@@ -0,0 +1,314 @@
|
|
|
1
|
+
import { describe, it, expect } from "vitest";
|
|
2
|
+
import { EventType } from "@ag-ui/client";
|
|
3
|
+
import {
|
|
4
|
+
createAgent,
|
|
5
|
+
createDefaultInput,
|
|
6
|
+
collectEvents,
|
|
7
|
+
expectLifecycleWrapped,
|
|
8
|
+
expectEventSequence,
|
|
9
|
+
eventField,
|
|
10
|
+
tanstackTextChunk,
|
|
11
|
+
tanstackToolCallStart,
|
|
12
|
+
tanstackToolCallArgs,
|
|
13
|
+
tanstackToolCallEnd,
|
|
14
|
+
} from "./agent-test-helpers";
|
|
15
|
+
|
|
16
|
+
describe("TanStack AI converter (via Agent)", () => {
|
|
17
|
+
// -------------------------------------------------------------------------
|
|
18
|
+
// Text Events
|
|
19
|
+
// -------------------------------------------------------------------------
|
|
20
|
+
describe("Text Events", () => {
|
|
21
|
+
it("TEXT_MESSAGE_CONTENT chunk produces TEXT_MESSAGE_CHUNK with role assistant and correct delta", async () => {
|
|
22
|
+
const agent = createAgent("tanstack", [tanstackTextChunk("Hello world")]);
|
|
23
|
+
const events = await collectEvents(agent.run(createDefaultInput()));
|
|
24
|
+
|
|
25
|
+
expectLifecycleWrapped(events);
|
|
26
|
+
|
|
27
|
+
const textEvents = events.filter(
|
|
28
|
+
(e) => e.type === EventType.TEXT_MESSAGE_CHUNK,
|
|
29
|
+
);
|
|
30
|
+
expect(textEvents).toHaveLength(1);
|
|
31
|
+
|
|
32
|
+
expect(eventField<string>(textEvents[0], "role")).toBe("assistant");
|
|
33
|
+
expect(eventField<string>(textEvents[0], "delta")).toBe("Hello world");
|
|
34
|
+
expect(eventField<string>(textEvents[0], "messageId")).toBeDefined();
|
|
35
|
+
expect(typeof eventField<string>(textEvents[0], "messageId")).toBe(
|
|
36
|
+
"string",
|
|
37
|
+
);
|
|
38
|
+
expect(
|
|
39
|
+
eventField<string>(textEvents[0], "messageId").length,
|
|
40
|
+
).toBeGreaterThan(0);
|
|
41
|
+
});
|
|
42
|
+
|
|
43
|
+
it("multiple text chunks share the same messageId", async () => {
|
|
44
|
+
const agent = createAgent("tanstack", [
|
|
45
|
+
tanstackTextChunk("Hello "),
|
|
46
|
+
tanstackTextChunk("world"),
|
|
47
|
+
tanstackTextChunk("!"),
|
|
48
|
+
]);
|
|
49
|
+
const events = await collectEvents(agent.run(createDefaultInput()));
|
|
50
|
+
|
|
51
|
+
expectLifecycleWrapped(events);
|
|
52
|
+
|
|
53
|
+
const textEvents = events.filter(
|
|
54
|
+
(e) => e.type === EventType.TEXT_MESSAGE_CHUNK,
|
|
55
|
+
);
|
|
56
|
+
expect(textEvents).toHaveLength(3);
|
|
57
|
+
|
|
58
|
+
const messageIds = new Set(
|
|
59
|
+
textEvents.map((e) => eventField<string>(e, "messageId")),
|
|
60
|
+
);
|
|
61
|
+
expect(messageIds.size).toBe(1);
|
|
62
|
+
});
|
|
63
|
+
|
|
64
|
+
it("empty stream produces only RUN_STARTED + RUN_FINISHED", async () => {
|
|
65
|
+
const agent = createAgent("tanstack", []);
|
|
66
|
+
const events = await collectEvents(agent.run(createDefaultInput()));
|
|
67
|
+
|
|
68
|
+
expectEventSequence(events, [
|
|
69
|
+
EventType.RUN_STARTED,
|
|
70
|
+
EventType.RUN_FINISHED,
|
|
71
|
+
]);
|
|
72
|
+
});
|
|
73
|
+
});
|
|
74
|
+
|
|
75
|
+
// -------------------------------------------------------------------------
|
|
76
|
+
// Tool Call Events
|
|
77
|
+
// -------------------------------------------------------------------------
|
|
78
|
+
describe("Tool Call Events", () => {
|
|
79
|
+
it("full tool call lifecycle produces START, ARGS, END events in order", async () => {
|
|
80
|
+
const agent = createAgent("tanstack", [
|
|
81
|
+
tanstackToolCallStart("tc-1", "myTool"),
|
|
82
|
+
tanstackToolCallArgs("tc-1", '{"key":'),
|
|
83
|
+
tanstackToolCallArgs("tc-1", '"value"}'),
|
|
84
|
+
tanstackToolCallEnd("tc-1"),
|
|
85
|
+
]);
|
|
86
|
+
const events = await collectEvents(agent.run(createDefaultInput()));
|
|
87
|
+
|
|
88
|
+
expectLifecycleWrapped(events);
|
|
89
|
+
expectEventSequence(events, [
|
|
90
|
+
EventType.RUN_STARTED,
|
|
91
|
+
EventType.TOOL_CALL_START,
|
|
92
|
+
EventType.TOOL_CALL_ARGS,
|
|
93
|
+
EventType.TOOL_CALL_ARGS,
|
|
94
|
+
EventType.TOOL_CALL_END,
|
|
95
|
+
EventType.RUN_FINISHED,
|
|
96
|
+
]);
|
|
97
|
+
|
|
98
|
+
expect(eventField<string>(events[1], "toolCallId")).toBe("tc-1");
|
|
99
|
+
expect(eventField<string>(events[1], "toolCallName")).toBe("myTool");
|
|
100
|
+
|
|
101
|
+
expect(eventField<string>(events[2], "toolCallId")).toBe("tc-1");
|
|
102
|
+
expect(eventField<string>(events[2], "delta")).toBe('{"key":');
|
|
103
|
+
|
|
104
|
+
expect(eventField<string>(events[3], "toolCallId")).toBe("tc-1");
|
|
105
|
+
expect(eventField<string>(events[3], "delta")).toBe('"value"}');
|
|
106
|
+
|
|
107
|
+
expect(eventField<string>(events[4], "toolCallId")).toBe("tc-1");
|
|
108
|
+
});
|
|
109
|
+
|
|
110
|
+
it("TOOL_CALL_START sets parentMessageId", async () => {
|
|
111
|
+
const agent = createAgent("tanstack", [
|
|
112
|
+
tanstackTextChunk("before"),
|
|
113
|
+
tanstackToolCallStart("tc-1", "myTool"),
|
|
114
|
+
tanstackToolCallEnd("tc-1"),
|
|
115
|
+
]);
|
|
116
|
+
const events = await collectEvents(agent.run(createDefaultInput()));
|
|
117
|
+
|
|
118
|
+
const textEvent = events.find(
|
|
119
|
+
(e) => e.type === EventType.TEXT_MESSAGE_CHUNK,
|
|
120
|
+
)!;
|
|
121
|
+
const toolStartEvent = events.find(
|
|
122
|
+
(e) => e.type === EventType.TOOL_CALL_START,
|
|
123
|
+
)!;
|
|
124
|
+
|
|
125
|
+
expect(
|
|
126
|
+
eventField<string>(toolStartEvent, "parentMessageId"),
|
|
127
|
+
).toBeDefined();
|
|
128
|
+
expect(eventField<string>(toolStartEvent, "parentMessageId")).toBe(
|
|
129
|
+
eventField<string>(textEvent, "messageId"),
|
|
130
|
+
);
|
|
131
|
+
});
|
|
132
|
+
|
|
133
|
+
it("multiple tool calls in sequence each get correct events", async () => {
|
|
134
|
+
const agent = createAgent("tanstack", [
|
|
135
|
+
tanstackToolCallStart("tc-1", "toolA"),
|
|
136
|
+
tanstackToolCallArgs("tc-1", '{"a":1}'),
|
|
137
|
+
tanstackToolCallEnd("tc-1"),
|
|
138
|
+
tanstackToolCallStart("tc-2", "toolB"),
|
|
139
|
+
tanstackToolCallArgs("tc-2", '{"b":2}'),
|
|
140
|
+
tanstackToolCallEnd("tc-2"),
|
|
141
|
+
]);
|
|
142
|
+
const events = await collectEvents(agent.run(createDefaultInput()));
|
|
143
|
+
|
|
144
|
+
expectLifecycleWrapped(events);
|
|
145
|
+
expectEventSequence(events, [
|
|
146
|
+
EventType.RUN_STARTED,
|
|
147
|
+
EventType.TOOL_CALL_START,
|
|
148
|
+
EventType.TOOL_CALL_ARGS,
|
|
149
|
+
EventType.TOOL_CALL_END,
|
|
150
|
+
EventType.TOOL_CALL_START,
|
|
151
|
+
EventType.TOOL_CALL_ARGS,
|
|
152
|
+
EventType.TOOL_CALL_END,
|
|
153
|
+
EventType.RUN_FINISHED,
|
|
154
|
+
]);
|
|
155
|
+
|
|
156
|
+
// Verify first tool call
|
|
157
|
+
expect(eventField<string>(events[1], "toolCallId")).toBe("tc-1");
|
|
158
|
+
expect(eventField<string>(events[1], "toolCallName")).toBe("toolA");
|
|
159
|
+
|
|
160
|
+
expect(eventField<string>(events[2], "toolCallId")).toBe("tc-1");
|
|
161
|
+
|
|
162
|
+
expect(eventField<string>(events[3], "toolCallId")).toBe("tc-1");
|
|
163
|
+
|
|
164
|
+
// Verify second tool call
|
|
165
|
+
expect(eventField<string>(events[4], "toolCallId")).toBe("tc-2");
|
|
166
|
+
expect(eventField<string>(events[4], "toolCallName")).toBe("toolB");
|
|
167
|
+
|
|
168
|
+
expect(eventField<string>(events[5], "toolCallId")).toBe("tc-2");
|
|
169
|
+
|
|
170
|
+
expect(eventField<string>(events[6], "toolCallId")).toBe("tc-2");
|
|
171
|
+
});
|
|
172
|
+
|
|
173
|
+
it("tool call with no ARGS chunks produces only START + END", async () => {
|
|
174
|
+
const agent = createAgent("tanstack", [
|
|
175
|
+
tanstackToolCallStart("tc-1", "noArgsTool"),
|
|
176
|
+
tanstackToolCallEnd("tc-1"),
|
|
177
|
+
]);
|
|
178
|
+
const events = await collectEvents(agent.run(createDefaultInput()));
|
|
179
|
+
|
|
180
|
+
expectLifecycleWrapped(events);
|
|
181
|
+
expectEventSequence(events, [
|
|
182
|
+
EventType.RUN_STARTED,
|
|
183
|
+
EventType.TOOL_CALL_START,
|
|
184
|
+
EventType.TOOL_CALL_END,
|
|
185
|
+
EventType.RUN_FINISHED,
|
|
186
|
+
]);
|
|
187
|
+
});
|
|
188
|
+
});
|
|
189
|
+
|
|
190
|
+
// -------------------------------------------------------------------------
|
|
191
|
+
// Tool Call Result Events
|
|
192
|
+
// -------------------------------------------------------------------------
|
|
193
|
+
describe("Tool Call Result Events", () => {
|
|
194
|
+
it("TOOL_CALL_RESULT chunk produces TOOL_CALL_RESULT event with correct content", async () => {
|
|
195
|
+
const agent = createAgent("tanstack", [
|
|
196
|
+
tanstackToolCallStart("tc-1", "myTool"),
|
|
197
|
+
tanstackToolCallArgs("tc-1", '{"key":"value"}'),
|
|
198
|
+
tanstackToolCallEnd("tc-1"),
|
|
199
|
+
{
|
|
200
|
+
type: "TOOL_CALL_RESULT",
|
|
201
|
+
toolCallId: "tc-1",
|
|
202
|
+
content: JSON.stringify({ result: "ok" }),
|
|
203
|
+
},
|
|
204
|
+
]);
|
|
205
|
+
const events = await collectEvents(agent.run(createDefaultInput()));
|
|
206
|
+
|
|
207
|
+
expectLifecycleWrapped(events);
|
|
208
|
+
|
|
209
|
+
const resultEvents = events.filter(
|
|
210
|
+
(e) => e.type === EventType.TOOL_CALL_RESULT,
|
|
211
|
+
);
|
|
212
|
+
expect(resultEvents).toHaveLength(1);
|
|
213
|
+
expect(eventField<string>(resultEvents[0], "toolCallId")).toBe("tc-1");
|
|
214
|
+
expect(eventField<string>(resultEvents[0], "role")).toBe("tool");
|
|
215
|
+
expect(
|
|
216
|
+
JSON.parse(eventField<string>(resultEvents[0], "content")),
|
|
217
|
+
).toEqual({ result: "ok" });
|
|
218
|
+
});
|
|
219
|
+
|
|
220
|
+
it("TOOL_CALL_RESULT with object content serializes to JSON", async () => {
|
|
221
|
+
const agent = createAgent("tanstack", [
|
|
222
|
+
tanstackToolCallStart("tc-2", "myTool"),
|
|
223
|
+
tanstackToolCallEnd("tc-2"),
|
|
224
|
+
{
|
|
225
|
+
type: "TOOL_CALL_RESULT",
|
|
226
|
+
toolCallId: "tc-2",
|
|
227
|
+
result: { data: 42 },
|
|
228
|
+
},
|
|
229
|
+
]);
|
|
230
|
+
const events = await collectEvents(agent.run(createDefaultInput()));
|
|
231
|
+
|
|
232
|
+
const resultEvents = events.filter(
|
|
233
|
+
(e) => e.type === EventType.TOOL_CALL_RESULT,
|
|
234
|
+
);
|
|
235
|
+
expect(resultEvents).toHaveLength(1);
|
|
236
|
+
expect(
|
|
237
|
+
JSON.parse(eventField<string>(resultEvents[0], "content")),
|
|
238
|
+
).toEqual({ data: 42 });
|
|
239
|
+
});
|
|
240
|
+
});
|
|
241
|
+
|
|
242
|
+
// -------------------------------------------------------------------------
|
|
243
|
+
// Mixed Content
|
|
244
|
+
// -------------------------------------------------------------------------
|
|
245
|
+
describe("Mixed Content", () => {
|
|
246
|
+
it("text interleaved with tool calls produces correct event types and order", async () => {
|
|
247
|
+
const agent = createAgent("tanstack", [
|
|
248
|
+
tanstackTextChunk("Let me help. "),
|
|
249
|
+
tanstackToolCallStart("tc-1", "search"),
|
|
250
|
+
tanstackToolCallArgs("tc-1", '{"q":"test"}'),
|
|
251
|
+
tanstackToolCallEnd("tc-1"),
|
|
252
|
+
tanstackTextChunk("Here are the results."),
|
|
253
|
+
]);
|
|
254
|
+
const events = await collectEvents(agent.run(createDefaultInput()));
|
|
255
|
+
|
|
256
|
+
expectLifecycleWrapped(events);
|
|
257
|
+
expectEventSequence(events, [
|
|
258
|
+
EventType.RUN_STARTED,
|
|
259
|
+
EventType.TEXT_MESSAGE_CHUNK,
|
|
260
|
+
EventType.TOOL_CALL_START,
|
|
261
|
+
EventType.TOOL_CALL_ARGS,
|
|
262
|
+
EventType.TOOL_CALL_END,
|
|
263
|
+
EventType.TEXT_MESSAGE_CHUNK,
|
|
264
|
+
EventType.RUN_FINISHED,
|
|
265
|
+
]);
|
|
266
|
+
|
|
267
|
+
// Verify content of text events
|
|
268
|
+
const textEvents = events.filter(
|
|
269
|
+
(e) => e.type === EventType.TEXT_MESSAGE_CHUNK,
|
|
270
|
+
);
|
|
271
|
+
expect(eventField<string>(textEvents[0], "delta")).toBe("Let me help. ");
|
|
272
|
+
expect(eventField<string>(textEvents[1], "delta")).toBe(
|
|
273
|
+
"Here are the results.",
|
|
274
|
+
);
|
|
275
|
+
});
|
|
276
|
+
});
|
|
277
|
+
|
|
278
|
+
// -------------------------------------------------------------------------
|
|
279
|
+
// Edge Cases
|
|
280
|
+
// -------------------------------------------------------------------------
|
|
281
|
+
describe("Edge Cases", () => {
|
|
282
|
+
it("unknown chunk types are silently ignored", async () => {
|
|
283
|
+
const agent = createAgent("tanstack", [
|
|
284
|
+
tanstackTextChunk("hello"),
|
|
285
|
+
{ type: "SOME_UNKNOWN_TYPE", data: "foo" },
|
|
286
|
+
{ type: "ANOTHER_MYSTERY", value: 42 },
|
|
287
|
+
tanstackTextChunk(" world"),
|
|
288
|
+
]);
|
|
289
|
+
const events = await collectEvents(agent.run(createDefaultInput()));
|
|
290
|
+
|
|
291
|
+
expectLifecycleWrapped(events);
|
|
292
|
+
expectEventSequence(events, [
|
|
293
|
+
EventType.RUN_STARTED,
|
|
294
|
+
EventType.TEXT_MESSAGE_CHUNK,
|
|
295
|
+
EventType.TEXT_MESSAGE_CHUNK,
|
|
296
|
+
EventType.RUN_FINISHED,
|
|
297
|
+
]);
|
|
298
|
+
});
|
|
299
|
+
|
|
300
|
+
it("large deltas (100k chars) are passed through", async () => {
|
|
301
|
+
const largeDelta = "x".repeat(100_000);
|
|
302
|
+
const agent = createAgent("tanstack", [tanstackTextChunk(largeDelta)]);
|
|
303
|
+
const events = await collectEvents(agent.run(createDefaultInput()));
|
|
304
|
+
|
|
305
|
+
expectLifecycleWrapped(events);
|
|
306
|
+
|
|
307
|
+
const textEvent = events.find(
|
|
308
|
+
(e) => e.type === EventType.TEXT_MESSAGE_CHUNK,
|
|
309
|
+
)!;
|
|
310
|
+
expect(eventField<string>(textEvent, "delta")).toBe(largeDelta);
|
|
311
|
+
expect(eventField<string>(textEvent, "delta").length).toBe(100_000);
|
|
312
|
+
});
|
|
313
|
+
});
|
|
314
|
+
});
|
|
@@ -0,0 +1,373 @@
|
|
|
1
|
+
import { describe, it, expect, vi, beforeEach, afterEach } from "vitest";
|
|
2
|
+
import { BasicAgent } from "../index";
|
|
3
|
+
import { EventType } from "@ag-ui/client";
|
|
4
|
+
import { streamText } from "ai";
|
|
5
|
+
import { LLMock, MCPMock } from "@copilotkit/aimock";
|
|
6
|
+
import {
|
|
7
|
+
mockStreamTextResponse,
|
|
8
|
+
textDelta,
|
|
9
|
+
finish,
|
|
10
|
+
collectEvents,
|
|
11
|
+
toolCall,
|
|
12
|
+
toolResult,
|
|
13
|
+
} from "./test-helpers";
|
|
14
|
+
|
|
15
|
+
// Mock the ai module — we don't want real LLM calls
|
|
16
|
+
vi.mock("ai", () => ({
|
|
17
|
+
streamText: vi.fn(),
|
|
18
|
+
tool: vi.fn((config) => config),
|
|
19
|
+
stepCountIs: vi.fn((count: number) => ({ type: "stepCount", count })),
|
|
20
|
+
}));
|
|
21
|
+
|
|
22
|
+
vi.mock("@ai-sdk/openai", () => ({
|
|
23
|
+
createOpenAI: vi.fn(() => (modelId: string) => ({
|
|
24
|
+
modelId,
|
|
25
|
+
provider: "openai",
|
|
26
|
+
})),
|
|
27
|
+
}));
|
|
28
|
+
|
|
29
|
+
// Do NOT mock @ai-sdk/mcp or @modelcontextprotocol/sdk transports —
|
|
30
|
+
// we want real HTTP connections to the MCPMock server.
|
|
31
|
+
|
|
32
|
+
describe("mcpServers — real MCP server integration", () => {
|
|
33
|
+
const originalEnv = process.env;
|
|
34
|
+
let llm: LLMock;
|
|
35
|
+
let mcpMock: MCPMock;
|
|
36
|
+
|
|
37
|
+
beforeEach(() => {
|
|
38
|
+
vi.clearAllMocks();
|
|
39
|
+
process.env = { ...originalEnv };
|
|
40
|
+
process.env.OPENAI_API_KEY = "test-key";
|
|
41
|
+
});
|
|
42
|
+
|
|
43
|
+
afterEach(async () => {
|
|
44
|
+
process.env = originalEnv;
|
|
45
|
+
if (llm) {
|
|
46
|
+
await llm.stop().catch(() => {});
|
|
47
|
+
}
|
|
48
|
+
});
|
|
49
|
+
|
|
50
|
+
const baseInput = {
|
|
51
|
+
threadId: "thread1",
|
|
52
|
+
runId: "run1",
|
|
53
|
+
messages: [],
|
|
54
|
+
tools: [],
|
|
55
|
+
context: [],
|
|
56
|
+
state: {},
|
|
57
|
+
};
|
|
58
|
+
|
|
59
|
+
/**
|
|
60
|
+
* Start an LLMock with an MCPMock mounted at /mcp.
|
|
61
|
+
* Returns the full MCP endpoint URL.
|
|
62
|
+
*/
|
|
63
|
+
async function startMcpServer(
|
|
64
|
+
tools: Array<{ name: string; description?: string }>,
|
|
65
|
+
): Promise<{ mcpUrl: string; llm: LLMock; mcpMock: MCPMock }> {
|
|
66
|
+
const mock = new MCPMock();
|
|
67
|
+
for (const t of tools) {
|
|
68
|
+
mock.addTool({
|
|
69
|
+
name: t.name,
|
|
70
|
+
description: t.description ?? `${t.name} tool`,
|
|
71
|
+
inputSchema: {
|
|
72
|
+
type: "object",
|
|
73
|
+
properties: { query: { type: "string" } },
|
|
74
|
+
},
|
|
75
|
+
});
|
|
76
|
+
mock.onToolCall(t.name, () => `result from ${t.name}`);
|
|
77
|
+
}
|
|
78
|
+
|
|
79
|
+
const server = new LLMock({ port: 0 });
|
|
80
|
+
server.mount("/mcp", mock);
|
|
81
|
+
await server.start();
|
|
82
|
+
|
|
83
|
+
return {
|
|
84
|
+
mcpUrl: `${server.url}/mcp`,
|
|
85
|
+
llm: server,
|
|
86
|
+
mcpMock: mock,
|
|
87
|
+
};
|
|
88
|
+
}
|
|
89
|
+
|
|
90
|
+
it("HTTP transport fetches tools from MCPMock", async () => {
|
|
91
|
+
const result = await startMcpServer([
|
|
92
|
+
{ name: "get_weather", description: "Get the weather" },
|
|
93
|
+
]);
|
|
94
|
+
llm = result.llm;
|
|
95
|
+
mcpMock = result.mcpMock;
|
|
96
|
+
|
|
97
|
+
const agent = new BasicAgent({
|
|
98
|
+
model: "openai/gpt-4o",
|
|
99
|
+
mcpServers: [{ type: "http", url: result.mcpUrl }],
|
|
100
|
+
});
|
|
101
|
+
|
|
102
|
+
vi.mocked(streamText).mockReturnValue(
|
|
103
|
+
mockStreamTextResponse([textDelta("Hello"), finish()]) as any,
|
|
104
|
+
);
|
|
105
|
+
|
|
106
|
+
await collectEvents(agent["run"](baseInput));
|
|
107
|
+
|
|
108
|
+
const callArgs = vi.mocked(streamText).mock.calls[0][0];
|
|
109
|
+
expect(callArgs.tools).toHaveProperty("get_weather");
|
|
110
|
+
});
|
|
111
|
+
|
|
112
|
+
it("SSE transport against MCPMock emits RUN_ERROR or completes without crash", async () => {
|
|
113
|
+
// MCPMock only supports Streamable HTTP, not SSE.
|
|
114
|
+
// The agent should emit RUN_ERROR when SSE connection fails.
|
|
115
|
+
const result = await startMcpServer([
|
|
116
|
+
{ name: "get_weather", description: "Get the weather" },
|
|
117
|
+
]);
|
|
118
|
+
llm = result.llm;
|
|
119
|
+
mcpMock = result.mcpMock;
|
|
120
|
+
|
|
121
|
+
const agent = new BasicAgent({
|
|
122
|
+
model: "openai/gpt-4o",
|
|
123
|
+
mcpServers: [{ type: "sse", url: result.mcpUrl }],
|
|
124
|
+
});
|
|
125
|
+
|
|
126
|
+
vi.mocked(streamText).mockReturnValue(
|
|
127
|
+
mockStreamTextResponse([finish()]) as any,
|
|
128
|
+
);
|
|
129
|
+
|
|
130
|
+
// Collect events manually — the Observable may error after emitting RUN_ERROR
|
|
131
|
+
const events: any[] = [];
|
|
132
|
+
try {
|
|
133
|
+
await new Promise((resolve, reject) => {
|
|
134
|
+
agent["run"](baseInput).subscribe({
|
|
135
|
+
next: (event: any) => events.push(event),
|
|
136
|
+
error: (err: any) => reject(err),
|
|
137
|
+
complete: () => resolve(events),
|
|
138
|
+
});
|
|
139
|
+
});
|
|
140
|
+
// If it completes without error, that's also acceptable (graceful fallthrough)
|
|
141
|
+
} catch {
|
|
142
|
+
// Expected — SSE transport failure should emit RUN_ERROR then error
|
|
143
|
+
}
|
|
144
|
+
|
|
145
|
+
const hasRunError = events.some((e) => e.type === EventType.RUN_ERROR);
|
|
146
|
+
// Either we got a RUN_ERROR or streamText was never called (connection failed before tools fetch)
|
|
147
|
+
expect(hasRunError || !vi.mocked(streamText).mock.calls.length).toBe(true);
|
|
148
|
+
});
|
|
149
|
+
|
|
150
|
+
it("tool call round-trip emits TOOL_CALL_START, TOOL_CALL_RESULT, and TEXT_MESSAGE_CHUNK", async () => {
|
|
151
|
+
const result = await startMcpServer([
|
|
152
|
+
{ name: "get_weather", description: "Get the weather" },
|
|
153
|
+
]);
|
|
154
|
+
llm = result.llm;
|
|
155
|
+
mcpMock = result.mcpMock;
|
|
156
|
+
|
|
157
|
+
const agent = new BasicAgent({
|
|
158
|
+
model: "openai/gpt-4o",
|
|
159
|
+
mcpServers: [{ type: "http", url: result.mcpUrl }],
|
|
160
|
+
});
|
|
161
|
+
|
|
162
|
+
vi.mocked(streamText).mockReturnValue(
|
|
163
|
+
mockStreamTextResponse([
|
|
164
|
+
toolCall("tc1", "get_weather", { query: "NYC" }),
|
|
165
|
+
toolResult("tc1", "get_weather", "Sunny 72F"),
|
|
166
|
+
textDelta("The weather is sunny."),
|
|
167
|
+
finish(),
|
|
168
|
+
]) as any,
|
|
169
|
+
);
|
|
170
|
+
|
|
171
|
+
const events = await collectEvents(agent["run"](baseInput));
|
|
172
|
+
|
|
173
|
+
const types = events.map((e: any) => e.type);
|
|
174
|
+
expect(types).toContain(EventType.TOOL_CALL_START);
|
|
175
|
+
expect(types).toContain(EventType.TOOL_CALL_RESULT);
|
|
176
|
+
expect(types).toContain(EventType.TEXT_MESSAGE_CHUNK);
|
|
177
|
+
|
|
178
|
+
// Verify the tool call result content
|
|
179
|
+
const resultEvent = events.find(
|
|
180
|
+
(e: any) => e.type === EventType.TOOL_CALL_RESULT,
|
|
181
|
+
) as any;
|
|
182
|
+
expect(resultEvent.toolCallId).toBe("tc1");
|
|
183
|
+
expect(resultEvent.content).toContain("Sunny 72F");
|
|
184
|
+
});
|
|
185
|
+
|
|
186
|
+
it("MCP clients are cleaned up after completion — second run creates fresh connections", async () => {
|
|
187
|
+
const result = await startMcpServer([
|
|
188
|
+
{ name: "get_weather", description: "Get the weather" },
|
|
189
|
+
]);
|
|
190
|
+
llm = result.llm;
|
|
191
|
+
mcpMock = result.mcpMock;
|
|
192
|
+
|
|
193
|
+
const agent = new BasicAgent({
|
|
194
|
+
model: "openai/gpt-4o",
|
|
195
|
+
mcpServers: [{ type: "http", url: result.mcpUrl }],
|
|
196
|
+
});
|
|
197
|
+
|
|
198
|
+
// First run
|
|
199
|
+
vi.mocked(streamText).mockReturnValue(
|
|
200
|
+
mockStreamTextResponse([textDelta("Run 1"), finish()]) as any,
|
|
201
|
+
);
|
|
202
|
+
const events1 = await collectEvents(agent["run"](baseInput));
|
|
203
|
+
expect(events1.some((e: any) => e.type === EventType.RUN_FINISHED)).toBe(
|
|
204
|
+
true,
|
|
205
|
+
);
|
|
206
|
+
|
|
207
|
+
// Second run — should succeed with fresh MCP client connections
|
|
208
|
+
vi.mocked(streamText).mockReturnValue(
|
|
209
|
+
mockStreamTextResponse([textDelta("Run 2"), finish()]) as any,
|
|
210
|
+
);
|
|
211
|
+
const events2 = await collectEvents(agent["run"](baseInput));
|
|
212
|
+
expect(events2.some((e: any) => e.type === EventType.RUN_FINISHED)).toBe(
|
|
213
|
+
true,
|
|
214
|
+
);
|
|
215
|
+
|
|
216
|
+
// streamText was called twice (once per run), each time with MCP tools
|
|
217
|
+
expect(vi.mocked(streamText).mock.calls).toHaveLength(2);
|
|
218
|
+
expect(vi.mocked(streamText).mock.calls[0][0].tools).toHaveProperty(
|
|
219
|
+
"get_weather",
|
|
220
|
+
);
|
|
221
|
+
expect(vi.mocked(streamText).mock.calls[1][0].tools).toHaveProperty(
|
|
222
|
+
"get_weather",
|
|
223
|
+
);
|
|
224
|
+
});
|
|
225
|
+
|
|
226
|
+
it("unreachable MCP server emits RUN_ERROR", async () => {
|
|
227
|
+
const agent = new BasicAgent({
|
|
228
|
+
model: "openai/gpt-4o",
|
|
229
|
+
mcpServers: [{ type: "http", url: "http://localhost:59999" }],
|
|
230
|
+
});
|
|
231
|
+
|
|
232
|
+
vi.mocked(streamText).mockReturnValue(
|
|
233
|
+
mockStreamTextResponse([finish()]) as any,
|
|
234
|
+
);
|
|
235
|
+
|
|
236
|
+
const events: any[] = [];
|
|
237
|
+
try {
|
|
238
|
+
await new Promise((resolve, reject) => {
|
|
239
|
+
agent["run"](baseInput).subscribe({
|
|
240
|
+
next: (event: any) => events.push(event),
|
|
241
|
+
error: (err: any) => reject(err),
|
|
242
|
+
complete: () => resolve(events),
|
|
243
|
+
});
|
|
244
|
+
});
|
|
245
|
+
} catch {
|
|
246
|
+
// Expected — connection refused should cause an error
|
|
247
|
+
}
|
|
248
|
+
|
|
249
|
+
expect(events.some((e) => e.type === EventType.RUN_ERROR)).toBe(true);
|
|
250
|
+
// streamText should not have been called since MCP init failed
|
|
251
|
+
expect(streamText).not.toHaveBeenCalled();
|
|
252
|
+
});
|
|
253
|
+
|
|
254
|
+
it("MCP clients are cleaned up after streamText error — subsequent run still works", async () => {
|
|
255
|
+
const result = await startMcpServer([
|
|
256
|
+
{ name: "get_weather", description: "Get the weather" },
|
|
257
|
+
]);
|
|
258
|
+
llm = result.llm;
|
|
259
|
+
mcpMock = result.mcpMock;
|
|
260
|
+
|
|
261
|
+
const agent = new BasicAgent({
|
|
262
|
+
model: "openai/gpt-4o",
|
|
263
|
+
mcpServers: [{ type: "http", url: result.mcpUrl }],
|
|
264
|
+
});
|
|
265
|
+
|
|
266
|
+
// First run — streamText throws an error
|
|
267
|
+
vi.mocked(streamText).mockImplementation(() => {
|
|
268
|
+
throw new Error("LLM connection failed");
|
|
269
|
+
});
|
|
270
|
+
|
|
271
|
+
const events1: any[] = [];
|
|
272
|
+
try {
|
|
273
|
+
await new Promise((resolve, reject) => {
|
|
274
|
+
agent["run"](baseInput).subscribe({
|
|
275
|
+
next: (event: any) => events1.push(event),
|
|
276
|
+
error: (err: any) => reject(err),
|
|
277
|
+
complete: () => resolve(events1),
|
|
278
|
+
});
|
|
279
|
+
});
|
|
280
|
+
} catch {
|
|
281
|
+
// Expected — streamText threw
|
|
282
|
+
}
|
|
283
|
+
|
|
284
|
+
// Should have emitted RUN_ERROR
|
|
285
|
+
expect(events1.some((e) => e.type === EventType.RUN_ERROR)).toBe(true);
|
|
286
|
+
|
|
287
|
+
// Second run — streamText works normally, proving MCP cleanup happened
|
|
288
|
+
vi.mocked(streamText).mockReturnValue(
|
|
289
|
+
mockStreamTextResponse([textDelta("Recovery"), finish()]) as any,
|
|
290
|
+
);
|
|
291
|
+
const events2 = await collectEvents(agent["run"](baseInput));
|
|
292
|
+
expect(events2.some((e: any) => e.type === EventType.RUN_FINISHED)).toBe(
|
|
293
|
+
true,
|
|
294
|
+
);
|
|
295
|
+
|
|
296
|
+
// The second run should have MCP tools available
|
|
297
|
+
const secondCallArgs = vi.mocked(streamText).mock.calls[1][0];
|
|
298
|
+
expect(secondCallArgs.tools).toHaveProperty("get_weather");
|
|
299
|
+
});
|
|
300
|
+
|
|
301
|
+
it("MCP tool descriptions are passed to streamText tools config", async () => {
|
|
302
|
+
const result = await startMcpServer([
|
|
303
|
+
{ name: "get_weather", description: "Get the weather" },
|
|
304
|
+
]);
|
|
305
|
+
llm = result.llm;
|
|
306
|
+
mcpMock = result.mcpMock;
|
|
307
|
+
|
|
308
|
+
const agent = new BasicAgent({
|
|
309
|
+
model: "openai/gpt-4o",
|
|
310
|
+
mcpServers: [{ type: "http", url: result.mcpUrl }],
|
|
311
|
+
});
|
|
312
|
+
|
|
313
|
+
vi.mocked(streamText).mockReturnValue(
|
|
314
|
+
mockStreamTextResponse([textDelta("Hello"), finish()]) as any,
|
|
315
|
+
);
|
|
316
|
+
|
|
317
|
+
await collectEvents(agent["run"](baseInput));
|
|
318
|
+
|
|
319
|
+
const callArgs = vi.mocked(streamText).mock.calls[0][0];
|
|
320
|
+
expect(callArgs.tools).toHaveProperty("get_weather");
|
|
321
|
+
// The MCP tool should include the description from the MCPMock server
|
|
322
|
+
expect(callArgs.tools.get_weather.description).toBe("Get the weather");
|
|
323
|
+
});
|
|
324
|
+
|
|
325
|
+
it("multiple MCP servers merge tools from both", async () => {
|
|
326
|
+
// First server with get_weather
|
|
327
|
+
const result1 = await startMcpServer([
|
|
328
|
+
{ name: "get_weather", description: "Get the weather" },
|
|
329
|
+
]);
|
|
330
|
+
llm = result1.llm;
|
|
331
|
+
|
|
332
|
+
// Second server with search_docs
|
|
333
|
+
const mock2 = new MCPMock();
|
|
334
|
+
mock2.addTool({
|
|
335
|
+
name: "search_docs",
|
|
336
|
+
description: "Search documentation",
|
|
337
|
+
inputSchema: {
|
|
338
|
+
type: "object",
|
|
339
|
+
properties: { query: { type: "string" } },
|
|
340
|
+
},
|
|
341
|
+
});
|
|
342
|
+
mock2.onToolCall("search_docs", () => "doc results");
|
|
343
|
+
|
|
344
|
+
const llm2 = new LLMock({ port: 0 });
|
|
345
|
+
llm2.mount("/mcp", mock2);
|
|
346
|
+
await llm2.start();
|
|
347
|
+
|
|
348
|
+
try {
|
|
349
|
+
const agent = new BasicAgent({
|
|
350
|
+
model: "openai/gpt-4o",
|
|
351
|
+
mcpServers: [
|
|
352
|
+
{ type: "http", url: result1.mcpUrl },
|
|
353
|
+
{ type: "http", url: `${llm2.url}/mcp` },
|
|
354
|
+
],
|
|
355
|
+
});
|
|
356
|
+
|
|
357
|
+
vi.mocked(streamText).mockReturnValue(
|
|
358
|
+
mockStreamTextResponse([
|
|
359
|
+
textDelta("Both tools available"),
|
|
360
|
+
finish(),
|
|
361
|
+
]) as any,
|
|
362
|
+
);
|
|
363
|
+
|
|
364
|
+
await collectEvents(agent["run"](baseInput));
|
|
365
|
+
|
|
366
|
+
const callArgs = vi.mocked(streamText).mock.calls[0][0];
|
|
367
|
+
expect(callArgs.tools).toHaveProperty("get_weather");
|
|
368
|
+
expect(callArgs.tools).toHaveProperty("search_docs");
|
|
369
|
+
} finally {
|
|
370
|
+
await llm2.stop().catch(() => {});
|
|
371
|
+
}
|
|
372
|
+
});
|
|
373
|
+
});
|