kernl 0.1.3 → 0.2.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.turbo/turbo-build.log +5 -4
- package/CHANGELOG.md +18 -0
- package/dist/agent.d.ts +20 -3
- package/dist/agent.d.ts.map +1 -1
- package/dist/agent.js +60 -41
- package/dist/index.d.ts +1 -1
- package/dist/index.d.ts.map +1 -1
- package/dist/index.js +1 -1
- package/dist/kernl.d.ts +27 -1
- package/dist/kernl.d.ts.map +1 -1
- package/dist/kernl.js +36 -2
- package/dist/mcp/__tests__/integration.test.js +16 -0
- package/dist/thread/__tests__/fixtures/mock-model.d.ts +7 -0
- package/dist/thread/__tests__/fixtures/mock-model.d.ts.map +1 -0
- package/dist/thread/__tests__/fixtures/mock-model.js +59 -0
- package/dist/thread/__tests__/integration.test.d.ts +2 -0
- package/dist/thread/__tests__/integration.test.d.ts.map +1 -0
- package/dist/thread/__tests__/integration.test.js +247 -0
- package/dist/thread/__tests__/stream.test.d.ts +2 -0
- package/dist/thread/__tests__/stream.test.d.ts.map +1 -0
- package/dist/thread/__tests__/stream.test.js +244 -0
- package/dist/thread/__tests__/thread.test.js +612 -763
- package/dist/thread/thread.d.ts +30 -25
- package/dist/thread/thread.d.ts.map +1 -1
- package/dist/thread/thread.js +114 -314
- package/dist/thread/utils.d.ts +16 -1
- package/dist/thread/utils.d.ts.map +1 -1
- package/dist/thread/utils.js +30 -0
- package/dist/tool/index.d.ts +1 -1
- package/dist/tool/index.d.ts.map +1 -1
- package/dist/tool/index.js +1 -1
- package/dist/tool/tool.d.ts.map +1 -1
- package/dist/tool/tool.js +6 -2
- package/dist/tool/toolkit.d.ts +13 -3
- package/dist/tool/toolkit.d.ts.map +1 -1
- package/dist/tool/toolkit.js +11 -3
- package/dist/tool/types.d.ts +8 -0
- package/dist/tool/types.d.ts.map +1 -1
- package/dist/types/agent.d.ts +5 -5
- package/dist/types/agent.d.ts.map +1 -1
- package/dist/types/thread.d.ts +10 -16
- package/dist/types/thread.d.ts.map +1 -1
- package/package.json +6 -4
- package/src/agent.ts +97 -86
- package/src/index.ts +1 -1
- package/src/kernl.ts +51 -2
- package/src/mcp/__tests__/integration.test.ts +17 -0
- package/src/thread/__tests__/fixtures/mock-model.ts +71 -0
- package/src/thread/__tests__/integration.test.ts +349 -0
- package/src/thread/__tests__/thread.test.ts +625 -775
- package/src/thread/thread.ts +134 -381
- package/src/thread/utils.ts +36 -1
- package/src/tool/index.ts +1 -1
- package/src/tool/tool.ts +6 -2
- package/src/tool/toolkit.ts +19 -3
- package/src/tool/types.ts +10 -0
- package/src/types/agent.ts +9 -6
- package/src/types/thread.ts +25 -17
|
@@ -6,36 +6,40 @@ import { Agent } from "../../agent";
|
|
|
6
6
|
import { Kernl } from "../../kernl";
|
|
7
7
|
import { tool, FunctionToolkit } from "../../tool";
|
|
8
8
|
import { ModelBehaviorError } from "../../lib/error";
|
|
9
|
+
import { createMockModel } from "./fixtures/mock-model";
|
|
10
|
+
// Helper to create user message input
|
|
11
|
+
function userMessage(text) {
|
|
12
|
+
return [
|
|
13
|
+
{
|
|
14
|
+
kind: "message",
|
|
15
|
+
id: "msg-test",
|
|
16
|
+
role: "user",
|
|
17
|
+
content: [{ kind: "text", text }],
|
|
18
|
+
},
|
|
19
|
+
];
|
|
20
|
+
}
|
|
9
21
|
describe("Thread", () => {
|
|
10
22
|
describe("Basic Execution", () => {
|
|
11
23
|
it("should execute single turn and terminate with exact history", async () => {
|
|
12
|
-
const model = {
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
{
|
|
20
|
-
kind: "message",
|
|
21
|
-
id: "msg_1",
|
|
22
|
-
role: "assistant",
|
|
23
|
-
content: [{ kind: "text", text: "Hello world" }],
|
|
24
|
-
},
|
|
25
|
-
],
|
|
26
|
-
finishReason: "stop",
|
|
27
|
-
usage: {
|
|
28
|
-
inputTokens: 2,
|
|
29
|
-
outputTokens: 2,
|
|
30
|
-
totalTokens: 4,
|
|
24
|
+
const model = createMockModel(async (req) => {
|
|
25
|
+
return {
|
|
26
|
+
content: [
|
|
27
|
+
{
|
|
28
|
+
kind: "message",
|
|
29
|
+
id: "msg_1",
|
|
30
|
+
role: "assistant",
|
|
31
|
+
content: [{ kind: "text", text: "Hello world" }],
|
|
31
32
|
},
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
33
|
+
],
|
|
34
|
+
finishReason: "stop",
|
|
35
|
+
usage: {
|
|
36
|
+
inputTokens: 2,
|
|
37
|
+
outputTokens: 2,
|
|
38
|
+
totalTokens: 4,
|
|
39
|
+
},
|
|
40
|
+
warnings: [],
|
|
41
|
+
};
|
|
42
|
+
});
|
|
39
43
|
const agent = new Agent({
|
|
40
44
|
id: "test",
|
|
41
45
|
name: "Test",
|
|
@@ -43,7 +47,7 @@ describe("Thread", () => {
|
|
|
43
47
|
model,
|
|
44
48
|
});
|
|
45
49
|
const kernl = new Kernl();
|
|
46
|
-
const thread = new Thread(kernl, agent, "Hello world");
|
|
50
|
+
const thread = new Thread(kernl, agent, userMessage("Hello world"));
|
|
47
51
|
const result = await thread.execute();
|
|
48
52
|
// Access private history via type assertion for testing
|
|
49
53
|
const history = thread.history;
|
|
@@ -61,37 +65,28 @@ describe("Thread", () => {
|
|
|
61
65
|
content: [{ kind: "text", text: "Hello world" }],
|
|
62
66
|
},
|
|
63
67
|
]);
|
|
64
|
-
expect(
|
|
65
|
-
expect(result.state.modelResponses).toHaveLength(1);
|
|
68
|
+
expect(thread._tick).toBe(1);
|
|
66
69
|
});
|
|
67
70
|
it("should convert string input to UserMessage", async () => {
|
|
68
|
-
const model = {
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
{
|
|
76
|
-
kind: "message",
|
|
77
|
-
id: "msg_1",
|
|
78
|
-
role: "assistant",
|
|
79
|
-
content: [{ kind: "text", text: "Response" }],
|
|
80
|
-
},
|
|
81
|
-
],
|
|
82
|
-
finishReason: "stop",
|
|
83
|
-
usage: {
|
|
84
|
-
inputTokens: 2,
|
|
85
|
-
outputTokens: 2,
|
|
86
|
-
totalTokens: 4,
|
|
71
|
+
const model = createMockModel(async (req) => {
|
|
72
|
+
return {
|
|
73
|
+
content: [
|
|
74
|
+
{
|
|
75
|
+
kind: "message",
|
|
76
|
+
id: "msg_1",
|
|
77
|
+
role: "assistant",
|
|
78
|
+
content: [{ kind: "text", text: "Response" }],
|
|
87
79
|
},
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
80
|
+
],
|
|
81
|
+
finishReason: "stop",
|
|
82
|
+
usage: {
|
|
83
|
+
inputTokens: 2,
|
|
84
|
+
outputTokens: 2,
|
|
85
|
+
totalTokens: 4,
|
|
86
|
+
},
|
|
87
|
+
warnings: [],
|
|
88
|
+
};
|
|
89
|
+
});
|
|
95
90
|
const agent = new Agent({
|
|
96
91
|
id: "test",
|
|
97
92
|
name: "Test",
|
|
@@ -99,7 +94,7 @@ describe("Thread", () => {
|
|
|
99
94
|
model,
|
|
100
95
|
});
|
|
101
96
|
const kernl = new Kernl();
|
|
102
|
-
const thread = new Thread(kernl, agent, "Test input");
|
|
97
|
+
const thread = new Thread(kernl, agent, userMessage("Test input"));
|
|
103
98
|
await thread.execute();
|
|
104
99
|
const history = thread.history;
|
|
105
100
|
const firstMessage = history[0];
|
|
@@ -111,33 +106,25 @@ describe("Thread", () => {
|
|
|
111
106
|
});
|
|
112
107
|
});
|
|
113
108
|
it("should use array input as-is", async () => {
|
|
114
|
-
const model = {
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
{
|
|
122
|
-
kind: "message",
|
|
123
|
-
id: "msg_1",
|
|
124
|
-
role: "assistant",
|
|
125
|
-
content: [{ kind: "text", text: "Response" }],
|
|
126
|
-
},
|
|
127
|
-
],
|
|
128
|
-
finishReason: "stop",
|
|
129
|
-
usage: {
|
|
130
|
-
inputTokens: 2,
|
|
131
|
-
outputTokens: 2,
|
|
132
|
-
totalTokens: 4,
|
|
109
|
+
const model = createMockModel(async (req) => {
|
|
110
|
+
return {
|
|
111
|
+
content: [
|
|
112
|
+
{
|
|
113
|
+
kind: "message",
|
|
114
|
+
id: "msg_1",
|
|
115
|
+
role: "assistant",
|
|
116
|
+
content: [{ kind: "text", text: "Response" }],
|
|
133
117
|
},
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
|
|
118
|
+
],
|
|
119
|
+
finishReason: "stop",
|
|
120
|
+
usage: {
|
|
121
|
+
inputTokens: 2,
|
|
122
|
+
outputTokens: 2,
|
|
123
|
+
totalTokens: 4,
|
|
124
|
+
},
|
|
125
|
+
warnings: [],
|
|
126
|
+
};
|
|
127
|
+
});
|
|
141
128
|
const agent = new Agent({
|
|
142
129
|
id: "test",
|
|
143
130
|
name: "Test",
|
|
@@ -163,62 +150,54 @@ describe("Thread", () => {
|
|
|
163
150
|
describe("Multi-Turn Execution", () => {
|
|
164
151
|
it("should execute multi-turn with tool call and exact history", async () => {
|
|
165
152
|
let callCount = 0;
|
|
166
|
-
const model = {
|
|
167
|
-
|
|
168
|
-
|
|
169
|
-
|
|
170
|
-
async generate(req) {
|
|
171
|
-
callCount++;
|
|
172
|
-
// First call: return tool call
|
|
173
|
-
if (callCount === 1) {
|
|
174
|
-
return {
|
|
175
|
-
content: [
|
|
176
|
-
{
|
|
177
|
-
kind: "message",
|
|
178
|
-
id: "msg_1",
|
|
179
|
-
role: "assistant",
|
|
180
|
-
content: [],
|
|
181
|
-
},
|
|
182
|
-
{
|
|
183
|
-
kind: "tool-call",
|
|
184
|
-
toolId: "echo",
|
|
185
|
-
state: IN_PROGRESS,
|
|
186
|
-
callId: "call_1",
|
|
187
|
-
arguments: JSON.stringify({ text: "test" }),
|
|
188
|
-
},
|
|
189
|
-
],
|
|
190
|
-
finishReason: "stop",
|
|
191
|
-
usage: {
|
|
192
|
-
inputTokens: 2,
|
|
193
|
-
outputTokens: 2,
|
|
194
|
-
totalTokens: 4,
|
|
195
|
-
},
|
|
196
|
-
warnings: [],
|
|
197
|
-
};
|
|
198
|
-
}
|
|
199
|
-
// Second call: return final message
|
|
153
|
+
const model = createMockModel(async (req) => {
|
|
154
|
+
callCount++;
|
|
155
|
+
// First call: return tool call
|
|
156
|
+
if (callCount === 1) {
|
|
200
157
|
return {
|
|
201
158
|
content: [
|
|
202
159
|
{
|
|
203
160
|
kind: "message",
|
|
204
|
-
id: "
|
|
161
|
+
id: "msg_1",
|
|
205
162
|
role: "assistant",
|
|
206
|
-
content: [
|
|
163
|
+
content: [],
|
|
164
|
+
},
|
|
165
|
+
{
|
|
166
|
+
kind: "tool-call",
|
|
167
|
+
toolId: "echo",
|
|
168
|
+
state: IN_PROGRESS,
|
|
169
|
+
callId: "call_1",
|
|
170
|
+
arguments: JSON.stringify({ text: "test" }),
|
|
207
171
|
},
|
|
208
172
|
],
|
|
209
173
|
finishReason: "stop",
|
|
210
174
|
usage: {
|
|
211
|
-
inputTokens:
|
|
175
|
+
inputTokens: 2,
|
|
212
176
|
outputTokens: 2,
|
|
213
|
-
totalTokens:
|
|
177
|
+
totalTokens: 4,
|
|
214
178
|
},
|
|
215
179
|
warnings: [],
|
|
216
180
|
};
|
|
217
|
-
}
|
|
218
|
-
|
|
219
|
-
|
|
220
|
-
|
|
221
|
-
|
|
181
|
+
}
|
|
182
|
+
// Second call: return final message
|
|
183
|
+
return {
|
|
184
|
+
content: [
|
|
185
|
+
{
|
|
186
|
+
kind: "message",
|
|
187
|
+
id: "msg_2",
|
|
188
|
+
role: "assistant",
|
|
189
|
+
content: [{ kind: "text", text: "Done!" }],
|
|
190
|
+
},
|
|
191
|
+
],
|
|
192
|
+
finishReason: "stop",
|
|
193
|
+
usage: {
|
|
194
|
+
inputTokens: 4,
|
|
195
|
+
outputTokens: 2,
|
|
196
|
+
totalTokens: 6,
|
|
197
|
+
},
|
|
198
|
+
warnings: [],
|
|
199
|
+
};
|
|
200
|
+
});
|
|
222
201
|
const echoTool = tool({
|
|
223
202
|
id: "echo",
|
|
224
203
|
description: "Echoes input",
|
|
@@ -235,7 +214,7 @@ describe("Thread", () => {
|
|
|
235
214
|
],
|
|
236
215
|
});
|
|
237
216
|
const kernl = new Kernl();
|
|
238
|
-
const thread = new Thread(kernl, agent, "Use the echo tool");
|
|
217
|
+
const thread = new Thread(kernl, agent, userMessage("Use the echo tool"));
|
|
239
218
|
const result = await thread.execute();
|
|
240
219
|
const history = thread.history;
|
|
241
220
|
expect(history).toEqual([
|
|
@@ -256,16 +235,16 @@ describe("Thread", () => {
|
|
|
256
235
|
// Tool call (tick 1)
|
|
257
236
|
{
|
|
258
237
|
kind: "tool-call",
|
|
259
|
-
|
|
238
|
+
toolId: "echo",
|
|
260
239
|
callId: "call_1",
|
|
261
|
-
|
|
240
|
+
state: IN_PROGRESS,
|
|
262
241
|
arguments: JSON.stringify({ text: "test" }),
|
|
263
242
|
},
|
|
264
243
|
// Tool result (executed after tick 1)
|
|
265
244
|
{
|
|
266
245
|
kind: "tool-result",
|
|
267
246
|
callId: "call_1",
|
|
268
|
-
|
|
247
|
+
toolId: "echo",
|
|
269
248
|
state: COMPLETED,
|
|
270
249
|
result: "Echo: test",
|
|
271
250
|
error: null,
|
|
@@ -278,91 +257,82 @@ describe("Thread", () => {
|
|
|
278
257
|
content: [{ kind: "text", text: "Done!" }],
|
|
279
258
|
},
|
|
280
259
|
]);
|
|
281
|
-
expect(
|
|
282
|
-
expect(result.state.modelResponses).toHaveLength(2);
|
|
260
|
+
expect(thread._tick).toBe(2);
|
|
283
261
|
});
|
|
284
262
|
it("should accumulate history across multiple turns", async () => {
|
|
285
263
|
let callCount = 0;
|
|
286
|
-
const model = {
|
|
287
|
-
|
|
288
|
-
|
|
289
|
-
|
|
290
|
-
|
|
291
|
-
|
|
292
|
-
|
|
293
|
-
|
|
294
|
-
|
|
295
|
-
|
|
296
|
-
kind: "message",
|
|
297
|
-
id: "msg_1",
|
|
298
|
-
role: "assistant",
|
|
299
|
-
content: [],
|
|
300
|
-
},
|
|
301
|
-
{
|
|
302
|
-
kind: "tool-call",
|
|
303
|
-
toolId: "simple",
|
|
304
|
-
state: IN_PROGRESS,
|
|
305
|
-
callId: "call_1",
|
|
306
|
-
arguments: "first",
|
|
307
|
-
},
|
|
308
|
-
],
|
|
309
|
-
finishReason: "stop",
|
|
310
|
-
usage: {
|
|
311
|
-
inputTokens: 2,
|
|
312
|
-
outputTokens: 2,
|
|
313
|
-
totalTokens: 4,
|
|
264
|
+
const model = createMockModel(async (req) => {
|
|
265
|
+
callCount++;
|
|
266
|
+
if (callCount === 1) {
|
|
267
|
+
return {
|
|
268
|
+
content: [
|
|
269
|
+
{
|
|
270
|
+
kind: "message",
|
|
271
|
+
id: "msg_1",
|
|
272
|
+
role: "assistant",
|
|
273
|
+
content: [],
|
|
314
274
|
},
|
|
315
|
-
|
|
316
|
-
|
|
317
|
-
|
|
318
|
-
|
|
319
|
-
|
|
320
|
-
|
|
321
|
-
{
|
|
322
|
-
kind: "message",
|
|
323
|
-
id: "msg_2",
|
|
324
|
-
role: "assistant",
|
|
325
|
-
content: [],
|
|
326
|
-
},
|
|
327
|
-
{
|
|
328
|
-
kind: "tool-call",
|
|
329
|
-
toolId: "simple",
|
|
330
|
-
state: IN_PROGRESS,
|
|
331
|
-
callId: "call_2",
|
|
332
|
-
arguments: "second",
|
|
333
|
-
},
|
|
334
|
-
],
|
|
335
|
-
finishReason: "stop",
|
|
336
|
-
usage: {
|
|
337
|
-
inputTokens: 3,
|
|
338
|
-
outputTokens: 2,
|
|
339
|
-
totalTokens: 5,
|
|
275
|
+
{
|
|
276
|
+
kind: "tool-call",
|
|
277
|
+
toolId: "simple",
|
|
278
|
+
state: IN_PROGRESS,
|
|
279
|
+
callId: "call_1",
|
|
280
|
+
arguments: "first",
|
|
340
281
|
},
|
|
341
|
-
|
|
342
|
-
|
|
343
|
-
|
|
282
|
+
],
|
|
283
|
+
finishReason: "stop",
|
|
284
|
+
usage: {
|
|
285
|
+
inputTokens: 2,
|
|
286
|
+
outputTokens: 2,
|
|
287
|
+
totalTokens: 4,
|
|
288
|
+
},
|
|
289
|
+
warnings: [],
|
|
290
|
+
};
|
|
291
|
+
}
|
|
292
|
+
if (callCount === 2) {
|
|
344
293
|
return {
|
|
345
294
|
content: [
|
|
346
295
|
{
|
|
347
296
|
kind: "message",
|
|
348
|
-
id: "
|
|
297
|
+
id: "msg_2",
|
|
349
298
|
role: "assistant",
|
|
350
|
-
content: [
|
|
299
|
+
content: [],
|
|
300
|
+
},
|
|
301
|
+
{
|
|
302
|
+
kind: "tool-call",
|
|
303
|
+
toolId: "simple",
|
|
304
|
+
state: IN_PROGRESS,
|
|
305
|
+
callId: "call_2",
|
|
306
|
+
arguments: "second",
|
|
351
307
|
},
|
|
352
308
|
],
|
|
353
309
|
finishReason: "stop",
|
|
354
310
|
usage: {
|
|
355
|
-
inputTokens:
|
|
311
|
+
inputTokens: 3,
|
|
356
312
|
outputTokens: 2,
|
|
357
|
-
totalTokens:
|
|
313
|
+
totalTokens: 5,
|
|
358
314
|
},
|
|
359
315
|
warnings: [],
|
|
360
316
|
};
|
|
361
|
-
}
|
|
362
|
-
|
|
363
|
-
|
|
364
|
-
|
|
365
|
-
|
|
317
|
+
}
|
|
318
|
+
return {
|
|
319
|
+
content: [
|
|
320
|
+
{
|
|
321
|
+
kind: "message",
|
|
322
|
+
id: "msg_3",
|
|
323
|
+
role: "assistant",
|
|
324
|
+
content: [{ kind: "text", text: "All done" }],
|
|
325
|
+
},
|
|
326
|
+
],
|
|
327
|
+
finishReason: "stop",
|
|
328
|
+
usage: {
|
|
329
|
+
inputTokens: 4,
|
|
330
|
+
outputTokens: 2,
|
|
331
|
+
totalTokens: 6,
|
|
332
|
+
},
|
|
333
|
+
warnings: [],
|
|
334
|
+
};
|
|
335
|
+
});
|
|
366
336
|
const simpleTool = tool({
|
|
367
337
|
id: "simple",
|
|
368
338
|
description: "Simple tool",
|
|
@@ -379,58 +349,35 @@ describe("Thread", () => {
|
|
|
379
349
|
],
|
|
380
350
|
});
|
|
381
351
|
const kernl = new Kernl();
|
|
382
|
-
const thread = new Thread(kernl, agent, "Start");
|
|
352
|
+
const thread = new Thread(kernl, agent, userMessage("Start"));
|
|
383
353
|
const result = await thread.execute();
|
|
384
354
|
const history = thread.history;
|
|
385
355
|
// Should have: 1 user msg + 3 assistant msgs + 2 tool calls + 2 tool results = 8 events
|
|
386
356
|
expect(history).toHaveLength(8);
|
|
387
|
-
expect(
|
|
357
|
+
expect(thread._tick).toBe(3);
|
|
388
358
|
});
|
|
389
359
|
});
|
|
390
360
|
describe("Tool Execution", () => {
|
|
391
361
|
it("should handle tool not found with exact error in history", async () => {
|
|
392
362
|
let callCount = 0;
|
|
393
|
-
const model = {
|
|
394
|
-
|
|
395
|
-
|
|
396
|
-
|
|
397
|
-
async generate(req) {
|
|
398
|
-
callCount++;
|
|
399
|
-
// First call: return tool call
|
|
400
|
-
if (callCount === 1) {
|
|
401
|
-
return {
|
|
402
|
-
content: [
|
|
403
|
-
{
|
|
404
|
-
kind: "message",
|
|
405
|
-
id: "msg_1",
|
|
406
|
-
role: "assistant",
|
|
407
|
-
content: [],
|
|
408
|
-
},
|
|
409
|
-
{
|
|
410
|
-
kind: "tool-call",
|
|
411
|
-
toolId: "nonexistent",
|
|
412
|
-
state: IN_PROGRESS,
|
|
413
|
-
callId: "call_1",
|
|
414
|
-
arguments: "{}",
|
|
415
|
-
},
|
|
416
|
-
],
|
|
417
|
-
finishReason: "stop",
|
|
418
|
-
usage: {
|
|
419
|
-
inputTokens: 2,
|
|
420
|
-
outputTokens: 2,
|
|
421
|
-
totalTokens: 4,
|
|
422
|
-
},
|
|
423
|
-
warnings: [],
|
|
424
|
-
};
|
|
425
|
-
}
|
|
426
|
-
// Second call: return terminal message
|
|
363
|
+
const model = createMockModel(async (req) => {
|
|
364
|
+
callCount++;
|
|
365
|
+
// First call: return tool call
|
|
366
|
+
if (callCount === 1) {
|
|
427
367
|
return {
|
|
428
368
|
content: [
|
|
429
369
|
{
|
|
430
370
|
kind: "message",
|
|
431
|
-
id: "
|
|
371
|
+
id: "msg_1",
|
|
432
372
|
role: "assistant",
|
|
433
|
-
content: [
|
|
373
|
+
content: [],
|
|
374
|
+
},
|
|
375
|
+
{
|
|
376
|
+
kind: "tool-call",
|
|
377
|
+
toolId: "nonexistent",
|
|
378
|
+
state: IN_PROGRESS,
|
|
379
|
+
callId: "call_1",
|
|
380
|
+
arguments: "{}",
|
|
434
381
|
},
|
|
435
382
|
],
|
|
436
383
|
finishReason: "stop",
|
|
@@ -441,11 +388,26 @@ describe("Thread", () => {
|
|
|
441
388
|
},
|
|
442
389
|
warnings: [],
|
|
443
390
|
};
|
|
444
|
-
}
|
|
445
|
-
|
|
446
|
-
|
|
447
|
-
|
|
448
|
-
|
|
391
|
+
}
|
|
392
|
+
// Second call: return terminal message
|
|
393
|
+
return {
|
|
394
|
+
content: [
|
|
395
|
+
{
|
|
396
|
+
kind: "message",
|
|
397
|
+
id: "msg_2",
|
|
398
|
+
role: "assistant",
|
|
399
|
+
content: [{ kind: "text", text: "Done" }],
|
|
400
|
+
},
|
|
401
|
+
],
|
|
402
|
+
finishReason: "stop",
|
|
403
|
+
usage: {
|
|
404
|
+
inputTokens: 2,
|
|
405
|
+
outputTokens: 2,
|
|
406
|
+
totalTokens: 4,
|
|
407
|
+
},
|
|
408
|
+
warnings: [],
|
|
409
|
+
};
|
|
410
|
+
});
|
|
449
411
|
const agent = new Agent({
|
|
450
412
|
id: "test",
|
|
451
413
|
name: "Test",
|
|
@@ -454,7 +416,7 @@ describe("Thread", () => {
|
|
|
454
416
|
toolkits: [], // No tools available
|
|
455
417
|
});
|
|
456
418
|
const kernl = new Kernl();
|
|
457
|
-
const thread = new Thread(kernl, agent, "test");
|
|
419
|
+
const thread = new Thread(kernl, agent, userMessage("test"));
|
|
458
420
|
await thread.execute();
|
|
459
421
|
const history = thread.history;
|
|
460
422
|
// Check that the tool result is an error
|
|
@@ -470,47 +432,24 @@ describe("Thread", () => {
|
|
|
470
432
|
});
|
|
471
433
|
it("should handle tool execution error", async () => {
|
|
472
434
|
let callCount = 0;
|
|
473
|
-
const model = {
|
|
474
|
-
|
|
475
|
-
|
|
476
|
-
|
|
477
|
-
async generate(req) {
|
|
478
|
-
callCount++;
|
|
479
|
-
// First call: return tool call
|
|
480
|
-
if (callCount === 1) {
|
|
481
|
-
return {
|
|
482
|
-
content: [
|
|
483
|
-
{
|
|
484
|
-
kind: "message",
|
|
485
|
-
id: "msg_1",
|
|
486
|
-
role: "assistant",
|
|
487
|
-
content: [],
|
|
488
|
-
},
|
|
489
|
-
{
|
|
490
|
-
kind: "tool-call",
|
|
491
|
-
toolId: "failing",
|
|
492
|
-
state: IN_PROGRESS,
|
|
493
|
-
callId: "call_1",
|
|
494
|
-
arguments: "{}",
|
|
495
|
-
},
|
|
496
|
-
],
|
|
497
|
-
finishReason: "stop",
|
|
498
|
-
usage: {
|
|
499
|
-
inputTokens: 2,
|
|
500
|
-
outputTokens: 2,
|
|
501
|
-
totalTokens: 4,
|
|
502
|
-
},
|
|
503
|
-
warnings: [],
|
|
504
|
-
};
|
|
505
|
-
}
|
|
506
|
-
// Second call: return terminal message
|
|
435
|
+
const model = createMockModel(async (req) => {
|
|
436
|
+
callCount++;
|
|
437
|
+
// First call: return tool call
|
|
438
|
+
if (callCount === 1) {
|
|
507
439
|
return {
|
|
508
440
|
content: [
|
|
509
441
|
{
|
|
510
442
|
kind: "message",
|
|
511
|
-
id: "
|
|
443
|
+
id: "msg_1",
|
|
512
444
|
role: "assistant",
|
|
513
|
-
content: [
|
|
445
|
+
content: [],
|
|
446
|
+
},
|
|
447
|
+
{
|
|
448
|
+
kind: "tool-call",
|
|
449
|
+
toolId: "failing",
|
|
450
|
+
state: IN_PROGRESS,
|
|
451
|
+
callId: "call_1",
|
|
452
|
+
arguments: "{}",
|
|
514
453
|
},
|
|
515
454
|
],
|
|
516
455
|
finishReason: "stop",
|
|
@@ -521,11 +460,26 @@ describe("Thread", () => {
|
|
|
521
460
|
},
|
|
522
461
|
warnings: [],
|
|
523
462
|
};
|
|
524
|
-
}
|
|
525
|
-
|
|
526
|
-
|
|
527
|
-
|
|
528
|
-
|
|
463
|
+
}
|
|
464
|
+
// Second call: return terminal message
|
|
465
|
+
return {
|
|
466
|
+
content: [
|
|
467
|
+
{
|
|
468
|
+
kind: "message",
|
|
469
|
+
id: "msg_2",
|
|
470
|
+
role: "assistant",
|
|
471
|
+
content: [{ kind: "text", text: "Done" }],
|
|
472
|
+
},
|
|
473
|
+
],
|
|
474
|
+
finishReason: "stop",
|
|
475
|
+
usage: {
|
|
476
|
+
inputTokens: 2,
|
|
477
|
+
outputTokens: 2,
|
|
478
|
+
totalTokens: 4,
|
|
479
|
+
},
|
|
480
|
+
warnings: [],
|
|
481
|
+
};
|
|
482
|
+
});
|
|
529
483
|
const failingTool = tool({
|
|
530
484
|
id: "failing",
|
|
531
485
|
description: "Tool that throws",
|
|
@@ -544,7 +498,7 @@ describe("Thread", () => {
|
|
|
544
498
|
],
|
|
545
499
|
});
|
|
546
500
|
const kernl = new Kernl();
|
|
547
|
-
const thread = new Thread(kernl, agent, "test");
|
|
501
|
+
const thread = new Thread(kernl, agent, userMessage("test"));
|
|
548
502
|
await thread.execute();
|
|
549
503
|
const history = thread.history;
|
|
550
504
|
const toolResult = history.find((e) => e.kind === "tool-result");
|
|
@@ -559,47 +513,24 @@ describe("Thread", () => {
|
|
|
559
513
|
});
|
|
560
514
|
it("should execute tool successfully with result in history", async () => {
|
|
561
515
|
let callCount = 0;
|
|
562
|
-
const model = {
|
|
563
|
-
|
|
564
|
-
|
|
565
|
-
|
|
566
|
-
async generate(req) {
|
|
567
|
-
callCount++;
|
|
568
|
-
// First call: return tool call
|
|
569
|
-
if (callCount === 1) {
|
|
570
|
-
return {
|
|
571
|
-
content: [
|
|
572
|
-
{
|
|
573
|
-
kind: "message",
|
|
574
|
-
id: "msg_1",
|
|
575
|
-
role: "assistant",
|
|
576
|
-
content: [],
|
|
577
|
-
},
|
|
578
|
-
{
|
|
579
|
-
kind: "tool-call",
|
|
580
|
-
toolId: "add",
|
|
581
|
-
state: IN_PROGRESS,
|
|
582
|
-
callId: "call_1",
|
|
583
|
-
arguments: JSON.stringify({ a: 5, b: 3 }),
|
|
584
|
-
},
|
|
585
|
-
],
|
|
586
|
-
finishReason: "stop",
|
|
587
|
-
usage: {
|
|
588
|
-
inputTokens: 2,
|
|
589
|
-
outputTokens: 2,
|
|
590
|
-
totalTokens: 4,
|
|
591
|
-
},
|
|
592
|
-
warnings: [],
|
|
593
|
-
};
|
|
594
|
-
}
|
|
595
|
-
// Second call: return terminal message
|
|
516
|
+
const model = createMockModel(async (req) => {
|
|
517
|
+
callCount++;
|
|
518
|
+
// First call: return tool call
|
|
519
|
+
if (callCount === 1) {
|
|
596
520
|
return {
|
|
597
521
|
content: [
|
|
598
522
|
{
|
|
599
523
|
kind: "message",
|
|
600
|
-
id: "
|
|
524
|
+
id: "msg_1",
|
|
601
525
|
role: "assistant",
|
|
602
|
-
content: [
|
|
526
|
+
content: [],
|
|
527
|
+
},
|
|
528
|
+
{
|
|
529
|
+
kind: "tool-call",
|
|
530
|
+
toolId: "add",
|
|
531
|
+
state: IN_PROGRESS,
|
|
532
|
+
callId: "call_1",
|
|
533
|
+
arguments: JSON.stringify({ a: 5, b: 3 }),
|
|
603
534
|
},
|
|
604
535
|
],
|
|
605
536
|
finishReason: "stop",
|
|
@@ -610,11 +541,26 @@ describe("Thread", () => {
|
|
|
610
541
|
},
|
|
611
542
|
warnings: [],
|
|
612
543
|
};
|
|
613
|
-
}
|
|
614
|
-
|
|
615
|
-
|
|
616
|
-
|
|
617
|
-
|
|
544
|
+
}
|
|
545
|
+
// Second call: return terminal message
|
|
546
|
+
return {
|
|
547
|
+
content: [
|
|
548
|
+
{
|
|
549
|
+
kind: "message",
|
|
550
|
+
id: "msg_2",
|
|
551
|
+
role: "assistant",
|
|
552
|
+
content: [{ kind: "text", text: "Done" }],
|
|
553
|
+
},
|
|
554
|
+
],
|
|
555
|
+
finishReason: "stop",
|
|
556
|
+
usage: {
|
|
557
|
+
inputTokens: 2,
|
|
558
|
+
outputTokens: 2,
|
|
559
|
+
totalTokens: 4,
|
|
560
|
+
},
|
|
561
|
+
warnings: [],
|
|
562
|
+
};
|
|
563
|
+
});
|
|
618
564
|
const addTool = tool({
|
|
619
565
|
id: "add",
|
|
620
566
|
description: "Adds two numbers",
|
|
@@ -629,8 +575,9 @@ describe("Thread", () => {
|
|
|
629
575
|
toolkits: [new FunctionToolkit({ id: "test-tools", tools: [addTool] })],
|
|
630
576
|
});
|
|
631
577
|
const kernl = new Kernl();
|
|
632
|
-
const thread = new Thread(kernl, agent, "Add 5 and 3");
|
|
578
|
+
const thread = new Thread(kernl, agent, userMessage("Add 5 and 3"));
|
|
633
579
|
await thread.execute();
|
|
580
|
+
// @ts-expect-error
|
|
634
581
|
const history = thread.history;
|
|
635
582
|
const toolResult = history.find((e) => e.kind === "tool-result");
|
|
636
583
|
expect(toolResult).toEqual({
|
|
@@ -646,54 +593,31 @@ describe("Thread", () => {
|
|
|
646
593
|
describe("Parallel Tool Execution", () => {
|
|
647
594
|
it("should execute multiple tools in parallel with exact history", async () => {
|
|
648
595
|
let callCount = 0;
|
|
649
|
-
const model = {
|
|
650
|
-
|
|
651
|
-
|
|
652
|
-
|
|
653
|
-
async generate(req) {
|
|
654
|
-
callCount++;
|
|
655
|
-
// First call: return multiple tool calls
|
|
656
|
-
if (callCount === 1) {
|
|
657
|
-
return {
|
|
658
|
-
content: [
|
|
659
|
-
{
|
|
660
|
-
kind: "message",
|
|
661
|
-
id: "msg_1",
|
|
662
|
-
role: "assistant",
|
|
663
|
-
content: [],
|
|
664
|
-
},
|
|
665
|
-
{
|
|
666
|
-
kind: "tool-call",
|
|
667
|
-
toolId: "tool1",
|
|
668
|
-
state: IN_PROGRESS,
|
|
669
|
-
callId: "call_1",
|
|
670
|
-
arguments: JSON.stringify({ value: "a" }),
|
|
671
|
-
},
|
|
672
|
-
{
|
|
673
|
-
kind: "tool-call",
|
|
674
|
-
toolId: "tool2",
|
|
675
|
-
state: IN_PROGRESS,
|
|
676
|
-
callId: "call_2",
|
|
677
|
-
arguments: JSON.stringify({ value: "b" }),
|
|
678
|
-
},
|
|
679
|
-
],
|
|
680
|
-
finishReason: "stop",
|
|
681
|
-
usage: {
|
|
682
|
-
inputTokens: 2,
|
|
683
|
-
outputTokens: 2,
|
|
684
|
-
totalTokens: 4,
|
|
685
|
-
},
|
|
686
|
-
warnings: [],
|
|
687
|
-
};
|
|
688
|
-
}
|
|
689
|
-
// Second call: return terminal message
|
|
596
|
+
const model = createMockModel(async (req) => {
|
|
597
|
+
callCount++;
|
|
598
|
+
// First call: return multiple tool calls
|
|
599
|
+
if (callCount === 1) {
|
|
690
600
|
return {
|
|
691
601
|
content: [
|
|
692
602
|
{
|
|
693
603
|
kind: "message",
|
|
694
|
-
id: "
|
|
604
|
+
id: "msg_1",
|
|
695
605
|
role: "assistant",
|
|
696
|
-
content: [
|
|
606
|
+
content: [],
|
|
607
|
+
},
|
|
608
|
+
{
|
|
609
|
+
kind: "tool-call",
|
|
610
|
+
toolId: "tool1",
|
|
611
|
+
state: IN_PROGRESS,
|
|
612
|
+
callId: "call_1",
|
|
613
|
+
arguments: JSON.stringify({ value: "a" }),
|
|
614
|
+
},
|
|
615
|
+
{
|
|
616
|
+
kind: "tool-call",
|
|
617
|
+
toolId: "tool2",
|
|
618
|
+
state: IN_PROGRESS,
|
|
619
|
+
callId: "call_2",
|
|
620
|
+
arguments: JSON.stringify({ value: "b" }),
|
|
697
621
|
},
|
|
698
622
|
],
|
|
699
623
|
finishReason: "stop",
|
|
@@ -704,11 +628,26 @@ describe("Thread", () => {
|
|
|
704
628
|
},
|
|
705
629
|
warnings: [],
|
|
706
630
|
};
|
|
707
|
-
}
|
|
708
|
-
|
|
709
|
-
|
|
710
|
-
|
|
711
|
-
|
|
631
|
+
}
|
|
632
|
+
// Second call: return terminal message
|
|
633
|
+
return {
|
|
634
|
+
content: [
|
|
635
|
+
{
|
|
636
|
+
kind: "message",
|
|
637
|
+
id: "msg_2",
|
|
638
|
+
role: "assistant",
|
|
639
|
+
content: [{ kind: "text", text: "Done" }],
|
|
640
|
+
},
|
|
641
|
+
],
|
|
642
|
+
finishReason: "stop",
|
|
643
|
+
usage: {
|
|
644
|
+
inputTokens: 2,
|
|
645
|
+
outputTokens: 2,
|
|
646
|
+
totalTokens: 4,
|
|
647
|
+
},
|
|
648
|
+
warnings: [],
|
|
649
|
+
};
|
|
650
|
+
});
|
|
712
651
|
const tool1 = tool({
|
|
713
652
|
id: "tool1",
|
|
714
653
|
description: "Tool 1",
|
|
@@ -731,7 +670,7 @@ describe("Thread", () => {
|
|
|
731
670
|
],
|
|
732
671
|
});
|
|
733
672
|
const kernl = new Kernl();
|
|
734
|
-
const thread = new Thread(kernl, agent, "test");
|
|
673
|
+
const thread = new Thread(kernl, agent, userMessage("test"));
|
|
735
674
|
await thread.execute();
|
|
736
675
|
const history = thread.history;
|
|
737
676
|
// Should have both tool results in history
|
|
@@ -760,45 +699,23 @@ describe("Thread", () => {
|
|
|
760
699
|
describe("State Management", () => {
|
|
761
700
|
it("should track tick counter correctly", async () => {
|
|
762
701
|
let callCount = 0;
|
|
763
|
-
const model = {
|
|
764
|
-
|
|
765
|
-
|
|
766
|
-
modelId: "test-model",
|
|
767
|
-
async generate(req) {
|
|
768
|
-
callCount++;
|
|
769
|
-
if (callCount < 3) {
|
|
770
|
-
return {
|
|
771
|
-
content: [
|
|
772
|
-
{
|
|
773
|
-
kind: "message",
|
|
774
|
-
id: `msg_${callCount}`,
|
|
775
|
-
role: "assistant",
|
|
776
|
-
content: [],
|
|
777
|
-
},
|
|
778
|
-
{
|
|
779
|
-
kind: "tool-call",
|
|
780
|
-
toolId: "simple",
|
|
781
|
-
state: IN_PROGRESS,
|
|
782
|
-
callId: `call_${callCount}`,
|
|
783
|
-
arguments: "{}",
|
|
784
|
-
},
|
|
785
|
-
],
|
|
786
|
-
finishReason: "stop",
|
|
787
|
-
usage: {
|
|
788
|
-
inputTokens: 2,
|
|
789
|
-
outputTokens: 2,
|
|
790
|
-
totalTokens: 4,
|
|
791
|
-
},
|
|
792
|
-
warnings: [],
|
|
793
|
-
};
|
|
794
|
-
}
|
|
702
|
+
const model = createMockModel(async (req) => {
|
|
703
|
+
callCount++;
|
|
704
|
+
if (callCount < 3) {
|
|
795
705
|
return {
|
|
796
706
|
content: [
|
|
797
707
|
{
|
|
798
708
|
kind: "message",
|
|
799
|
-
id:
|
|
709
|
+
id: `msg_${callCount}`,
|
|
800
710
|
role: "assistant",
|
|
801
|
-
content: [
|
|
711
|
+
content: [],
|
|
712
|
+
},
|
|
713
|
+
{
|
|
714
|
+
kind: "tool-call",
|
|
715
|
+
toolId: "simple",
|
|
716
|
+
state: IN_PROGRESS,
|
|
717
|
+
callId: `call_${callCount}`,
|
|
718
|
+
arguments: "{}",
|
|
802
719
|
},
|
|
803
720
|
],
|
|
804
721
|
finishReason: "stop",
|
|
@@ -809,11 +726,25 @@ describe("Thread", () => {
|
|
|
809
726
|
},
|
|
810
727
|
warnings: [],
|
|
811
728
|
};
|
|
812
|
-
}
|
|
813
|
-
|
|
814
|
-
|
|
815
|
-
|
|
816
|
-
|
|
729
|
+
}
|
|
730
|
+
return {
|
|
731
|
+
content: [
|
|
732
|
+
{
|
|
733
|
+
kind: "message",
|
|
734
|
+
id: "msg_final",
|
|
735
|
+
role: "assistant",
|
|
736
|
+
content: [{ kind: "text", text: "Done" }],
|
|
737
|
+
},
|
|
738
|
+
],
|
|
739
|
+
finishReason: "stop",
|
|
740
|
+
usage: {
|
|
741
|
+
inputTokens: 2,
|
|
742
|
+
outputTokens: 2,
|
|
743
|
+
totalTokens: 4,
|
|
744
|
+
},
|
|
745
|
+
warnings: [],
|
|
746
|
+
};
|
|
747
|
+
});
|
|
817
748
|
const simpleTool = tool({
|
|
818
749
|
id: "simple",
|
|
819
750
|
description: "Simple tool",
|
|
@@ -830,66 +761,58 @@ describe("Thread", () => {
|
|
|
830
761
|
],
|
|
831
762
|
});
|
|
832
763
|
const kernl = new Kernl();
|
|
833
|
-
const thread = new Thread(kernl, agent, "test");
|
|
764
|
+
const thread = new Thread(kernl, agent, userMessage("test"));
|
|
834
765
|
const result = await thread.execute();
|
|
835
|
-
expect(
|
|
766
|
+
expect(thread._tick).toBe(3);
|
|
836
767
|
});
|
|
837
768
|
it("should accumulate model responses", async () => {
|
|
838
769
|
let callCount = 0;
|
|
839
|
-
const model = {
|
|
840
|
-
|
|
841
|
-
|
|
842
|
-
modelId: "test-model",
|
|
843
|
-
async generate(req) {
|
|
844
|
-
callCount++;
|
|
845
|
-
if (callCount === 1) {
|
|
846
|
-
return {
|
|
847
|
-
content: [
|
|
848
|
-
{
|
|
849
|
-
kind: "message",
|
|
850
|
-
id: "msg_1",
|
|
851
|
-
role: "assistant",
|
|
852
|
-
content: [],
|
|
853
|
-
},
|
|
854
|
-
{
|
|
855
|
-
kind: "tool-call",
|
|
856
|
-
toolId: "simple",
|
|
857
|
-
state: IN_PROGRESS,
|
|
858
|
-
callId: "call_1",
|
|
859
|
-
arguments: "{}",
|
|
860
|
-
},
|
|
861
|
-
],
|
|
862
|
-
finishReason: "stop",
|
|
863
|
-
usage: {
|
|
864
|
-
inputTokens: 10,
|
|
865
|
-
outputTokens: 5,
|
|
866
|
-
totalTokens: 15,
|
|
867
|
-
},
|
|
868
|
-
warnings: [],
|
|
869
|
-
};
|
|
870
|
-
}
|
|
770
|
+
const model = createMockModel(async (req) => {
|
|
771
|
+
callCount++;
|
|
772
|
+
if (callCount === 1) {
|
|
871
773
|
return {
|
|
872
774
|
content: [
|
|
873
775
|
{
|
|
874
776
|
kind: "message",
|
|
875
|
-
id: "
|
|
777
|
+
id: "msg_1",
|
|
876
778
|
role: "assistant",
|
|
877
|
-
content: [
|
|
779
|
+
content: [],
|
|
780
|
+
},
|
|
781
|
+
{
|
|
782
|
+
kind: "tool-call",
|
|
783
|
+
toolId: "simple",
|
|
784
|
+
state: IN_PROGRESS,
|
|
785
|
+
callId: "call_1",
|
|
786
|
+
arguments: "{}",
|
|
878
787
|
},
|
|
879
788
|
],
|
|
880
789
|
finishReason: "stop",
|
|
881
790
|
usage: {
|
|
882
|
-
inputTokens:
|
|
883
|
-
outputTokens:
|
|
884
|
-
totalTokens:
|
|
791
|
+
inputTokens: 10,
|
|
792
|
+
outputTokens: 5,
|
|
793
|
+
totalTokens: 15,
|
|
885
794
|
},
|
|
886
795
|
warnings: [],
|
|
887
796
|
};
|
|
888
|
-
}
|
|
889
|
-
|
|
890
|
-
|
|
891
|
-
|
|
892
|
-
|
|
797
|
+
}
|
|
798
|
+
return {
|
|
799
|
+
content: [
|
|
800
|
+
{
|
|
801
|
+
kind: "message",
|
|
802
|
+
id: "msg_2",
|
|
803
|
+
role: "assistant",
|
|
804
|
+
content: [{ kind: "text", text: "Done" }],
|
|
805
|
+
},
|
|
806
|
+
],
|
|
807
|
+
finishReason: "stop",
|
|
808
|
+
usage: {
|
|
809
|
+
inputTokens: 20,
|
|
810
|
+
outputTokens: 10,
|
|
811
|
+
totalTokens: 30,
|
|
812
|
+
},
|
|
813
|
+
warnings: [],
|
|
814
|
+
};
|
|
815
|
+
});
|
|
893
816
|
const simpleTool = tool({
|
|
894
817
|
id: "simple",
|
|
895
818
|
description: "Simple tool",
|
|
@@ -906,42 +829,34 @@ describe("Thread", () => {
|
|
|
906
829
|
],
|
|
907
830
|
});
|
|
908
831
|
const kernl = new Kernl();
|
|
909
|
-
const thread = new Thread(kernl, agent, "test");
|
|
832
|
+
const thread = new Thread(kernl, agent, userMessage("test"));
|
|
910
833
|
const result = await thread.execute();
|
|
911
|
-
|
|
912
|
-
expect(
|
|
913
|
-
expect(result.
|
|
834
|
+
// Verify the thread executed both turns
|
|
835
|
+
expect(thread._tick).toBe(2);
|
|
836
|
+
expect(result.response).toBe("Done");
|
|
914
837
|
});
|
|
915
838
|
});
|
|
916
839
|
describe("Terminal State Detection", () => {
|
|
917
840
|
it("should terminate when assistant message has no tool calls", async () => {
|
|
918
|
-
const model = {
|
|
919
|
-
|
|
920
|
-
|
|
921
|
-
|
|
922
|
-
|
|
923
|
-
|
|
924
|
-
|
|
925
|
-
{
|
|
926
|
-
kind: "message",
|
|
927
|
-
id: "msg_1",
|
|
928
|
-
role: "assistant",
|
|
929
|
-
content: [{ kind: "text", text: "Final response" }],
|
|
930
|
-
},
|
|
931
|
-
],
|
|
932
|
-
finishReason: "stop",
|
|
933
|
-
usage: {
|
|
934
|
-
inputTokens: 2,
|
|
935
|
-
outputTokens: 2,
|
|
936
|
-
totalTokens: 4,
|
|
841
|
+
const model = createMockModel(async (req) => {
|
|
842
|
+
return {
|
|
843
|
+
content: [
|
|
844
|
+
{
|
|
845
|
+
kind: "message",
|
|
846
|
+
id: "msg_1",
|
|
847
|
+
role: "assistant",
|
|
848
|
+
content: [{ kind: "text", text: "Final response" }],
|
|
937
849
|
},
|
|
938
|
-
|
|
939
|
-
|
|
940
|
-
|
|
941
|
-
|
|
942
|
-
|
|
943
|
-
|
|
944
|
-
|
|
850
|
+
],
|
|
851
|
+
finishReason: "stop",
|
|
852
|
+
usage: {
|
|
853
|
+
inputTokens: 2,
|
|
854
|
+
outputTokens: 2,
|
|
855
|
+
totalTokens: 4,
|
|
856
|
+
},
|
|
857
|
+
warnings: [],
|
|
858
|
+
};
|
|
859
|
+
});
|
|
945
860
|
const agent = new Agent({
|
|
946
861
|
id: "test",
|
|
947
862
|
name: "Test",
|
|
@@ -949,68 +864,58 @@ describe("Thread", () => {
|
|
|
949
864
|
model,
|
|
950
865
|
});
|
|
951
866
|
const kernl = new Kernl();
|
|
952
|
-
const thread = new Thread(kernl, agent, "test");
|
|
867
|
+
const thread = new Thread(kernl, agent, userMessage("test"));
|
|
953
868
|
const result = await thread.execute();
|
|
954
|
-
expect(
|
|
869
|
+
expect(thread._tick).toBe(1);
|
|
955
870
|
});
|
|
956
871
|
it("should continue when assistant message has tool calls", async () => {
|
|
957
872
|
let callCount = 0;
|
|
958
|
-
const model = {
|
|
959
|
-
|
|
960
|
-
|
|
961
|
-
modelId: "test-model",
|
|
962
|
-
async generate(req) {
|
|
963
|
-
callCount++;
|
|
964
|
-
if (callCount === 1) {
|
|
965
|
-
return {
|
|
966
|
-
content: [
|
|
967
|
-
{
|
|
968
|
-
kind: "message",
|
|
969
|
-
id: "msg_1",
|
|
970
|
-
role: "assistant",
|
|
971
|
-
content: [
|
|
972
|
-
{ kind: "text", text: "Let me use a tool" },
|
|
973
|
-
],
|
|
974
|
-
},
|
|
975
|
-
{
|
|
976
|
-
kind: "tool-call",
|
|
977
|
-
toolId: "simple",
|
|
978
|
-
state: IN_PROGRESS,
|
|
979
|
-
callId: "call_1",
|
|
980
|
-
arguments: "{}",
|
|
981
|
-
},
|
|
982
|
-
],
|
|
983
|
-
finishReason: "stop",
|
|
984
|
-
usage: {
|
|
985
|
-
inputTokens: 2,
|
|
986
|
-
outputTokens: 2,
|
|
987
|
-
totalTokens: 4,
|
|
988
|
-
},
|
|
989
|
-
warnings: [],
|
|
990
|
-
};
|
|
991
|
-
}
|
|
873
|
+
const model = createMockModel(async (req) => {
|
|
874
|
+
callCount++;
|
|
875
|
+
if (callCount === 1) {
|
|
992
876
|
return {
|
|
993
877
|
content: [
|
|
994
878
|
{
|
|
995
879
|
kind: "message",
|
|
996
|
-
id: "
|
|
880
|
+
id: "msg_1",
|
|
997
881
|
role: "assistant",
|
|
998
|
-
content: [{ kind: "text", text: "
|
|
882
|
+
content: [{ kind: "text", text: "Let me use a tool" }],
|
|
883
|
+
},
|
|
884
|
+
{
|
|
885
|
+
kind: "tool-call",
|
|
886
|
+
toolId: "simple",
|
|
887
|
+
state: IN_PROGRESS,
|
|
888
|
+
callId: "call_1",
|
|
889
|
+
arguments: "{}",
|
|
999
890
|
},
|
|
1000
891
|
],
|
|
1001
892
|
finishReason: "stop",
|
|
1002
893
|
usage: {
|
|
1003
|
-
inputTokens:
|
|
894
|
+
inputTokens: 2,
|
|
1004
895
|
outputTokens: 2,
|
|
1005
|
-
totalTokens:
|
|
896
|
+
totalTokens: 4,
|
|
1006
897
|
},
|
|
1007
898
|
warnings: [],
|
|
1008
899
|
};
|
|
1009
|
-
}
|
|
1010
|
-
|
|
1011
|
-
|
|
1012
|
-
|
|
1013
|
-
|
|
900
|
+
}
|
|
901
|
+
return {
|
|
902
|
+
content: [
|
|
903
|
+
{
|
|
904
|
+
kind: "message",
|
|
905
|
+
id: "msg_2",
|
|
906
|
+
role: "assistant",
|
|
907
|
+
content: [{ kind: "text", text: "Done now" }],
|
|
908
|
+
},
|
|
909
|
+
],
|
|
910
|
+
finishReason: "stop",
|
|
911
|
+
usage: {
|
|
912
|
+
inputTokens: 3,
|
|
913
|
+
outputTokens: 2,
|
|
914
|
+
totalTokens: 5,
|
|
915
|
+
},
|
|
916
|
+
warnings: [],
|
|
917
|
+
};
|
|
918
|
+
});
|
|
1014
919
|
const simpleTool = tool({
|
|
1015
920
|
id: "simple",
|
|
1016
921
|
description: "Simple tool",
|
|
@@ -1027,41 +932,33 @@ describe("Thread", () => {
|
|
|
1027
932
|
],
|
|
1028
933
|
});
|
|
1029
934
|
const kernl = new Kernl();
|
|
1030
|
-
const thread = new Thread(kernl, agent, "test");
|
|
935
|
+
const thread = new Thread(kernl, agent, userMessage("test"));
|
|
1031
936
|
const result = await thread.execute();
|
|
1032
937
|
// Should have made 2 calls - first with tool, second without
|
|
1033
|
-
expect(
|
|
938
|
+
expect(thread._tick).toBe(2);
|
|
1034
939
|
});
|
|
1035
940
|
});
|
|
1036
941
|
describe("Final Output Parsing", () => {
|
|
1037
942
|
it("should return text output when responseType is 'text'", async () => {
|
|
1038
|
-
const model = {
|
|
1039
|
-
|
|
1040
|
-
|
|
1041
|
-
|
|
1042
|
-
|
|
1043
|
-
|
|
1044
|
-
|
|
1045
|
-
{
|
|
1046
|
-
kind: "message",
|
|
1047
|
-
id: "msg_1",
|
|
1048
|
-
role: "assistant",
|
|
1049
|
-
content: [{ kind: "text", text: "Hello, world!" }],
|
|
1050
|
-
},
|
|
1051
|
-
],
|
|
1052
|
-
finishReason: "stop",
|
|
1053
|
-
usage: {
|
|
1054
|
-
inputTokens: 2,
|
|
1055
|
-
outputTokens: 2,
|
|
1056
|
-
totalTokens: 4,
|
|
943
|
+
const model = createMockModel(async (req) => {
|
|
944
|
+
return {
|
|
945
|
+
content: [
|
|
946
|
+
{
|
|
947
|
+
kind: "message",
|
|
948
|
+
id: "msg_1",
|
|
949
|
+
role: "assistant",
|
|
950
|
+
content: [{ kind: "text", text: "Hello, world!" }],
|
|
1057
951
|
},
|
|
1058
|
-
|
|
1059
|
-
|
|
1060
|
-
|
|
1061
|
-
|
|
1062
|
-
|
|
1063
|
-
|
|
1064
|
-
|
|
952
|
+
],
|
|
953
|
+
finishReason: "stop",
|
|
954
|
+
usage: {
|
|
955
|
+
inputTokens: 2,
|
|
956
|
+
outputTokens: 2,
|
|
957
|
+
totalTokens: 4,
|
|
958
|
+
},
|
|
959
|
+
warnings: [],
|
|
960
|
+
};
|
|
961
|
+
});
|
|
1065
962
|
const agent = new Agent({
|
|
1066
963
|
id: "test",
|
|
1067
964
|
name: "Test",
|
|
@@ -1070,10 +967,10 @@ describe("Thread", () => {
|
|
|
1070
967
|
responseType: "text",
|
|
1071
968
|
});
|
|
1072
969
|
const kernl = new Kernl();
|
|
1073
|
-
const thread = new Thread(kernl, agent, "test");
|
|
970
|
+
const thread = new Thread(kernl, agent, userMessage("test"));
|
|
1074
971
|
const result = await thread.execute();
|
|
1075
972
|
expect(result.response).toBe("Hello, world!");
|
|
1076
|
-
expect(
|
|
973
|
+
expect(thread._tick).toBe(1);
|
|
1077
974
|
});
|
|
1078
975
|
it("should parse and validate structured output with valid JSON", async () => {
|
|
1079
976
|
const responseSchema = z.object({
|
|
@@ -1081,38 +978,30 @@ describe("Thread", () => {
|
|
|
1081
978
|
age: z.number(),
|
|
1082
979
|
email: z.string().email(),
|
|
1083
980
|
});
|
|
1084
|
-
const model = {
|
|
1085
|
-
|
|
1086
|
-
|
|
1087
|
-
|
|
1088
|
-
|
|
1089
|
-
|
|
1090
|
-
|
|
1091
|
-
|
|
1092
|
-
|
|
1093
|
-
|
|
1094
|
-
|
|
1095
|
-
|
|
1096
|
-
|
|
1097
|
-
kind: "text",
|
|
1098
|
-
text: '{"name": "Alice", "age": 30, "email": "alice@example.com"}',
|
|
1099
|
-
},
|
|
1100
|
-
],
|
|
1101
|
-
},
|
|
1102
|
-
],
|
|
1103
|
-
finishReason: "stop",
|
|
1104
|
-
usage: {
|
|
1105
|
-
inputTokens: 2,
|
|
1106
|
-
outputTokens: 2,
|
|
1107
|
-
totalTokens: 4,
|
|
981
|
+
const model = createMockModel(async (req) => {
|
|
982
|
+
return {
|
|
983
|
+
content: [
|
|
984
|
+
{
|
|
985
|
+
kind: "message",
|
|
986
|
+
id: "msg_1",
|
|
987
|
+
role: "assistant",
|
|
988
|
+
content: [
|
|
989
|
+
{
|
|
990
|
+
kind: "text",
|
|
991
|
+
text: '{"name": "Alice", "age": 30, "email": "alice@example.com"}',
|
|
992
|
+
},
|
|
993
|
+
],
|
|
1108
994
|
},
|
|
1109
|
-
|
|
1110
|
-
|
|
1111
|
-
|
|
1112
|
-
|
|
1113
|
-
|
|
1114
|
-
|
|
1115
|
-
|
|
995
|
+
],
|
|
996
|
+
finishReason: "stop",
|
|
997
|
+
usage: {
|
|
998
|
+
inputTokens: 2,
|
|
999
|
+
outputTokens: 2,
|
|
1000
|
+
totalTokens: 4,
|
|
1001
|
+
},
|
|
1002
|
+
warnings: [],
|
|
1003
|
+
};
|
|
1004
|
+
});
|
|
1116
1005
|
const agent = new Agent({
|
|
1117
1006
|
id: "test",
|
|
1118
1007
|
name: "Test",
|
|
@@ -1121,7 +1010,7 @@ describe("Thread", () => {
|
|
|
1121
1010
|
responseType: responseSchema,
|
|
1122
1011
|
});
|
|
1123
1012
|
const kernl = new Kernl();
|
|
1124
|
-
const thread = new Thread(kernl, agent, "test");
|
|
1013
|
+
const thread = new Thread(kernl, agent, userMessage("test"));
|
|
1125
1014
|
const result = await thread.execute();
|
|
1126
1015
|
expect(result.response).toEqual({
|
|
1127
1016
|
name: "Alice",
|
|
@@ -1133,38 +1022,30 @@ describe("Thread", () => {
|
|
|
1133
1022
|
const responseSchema = z.object({
|
|
1134
1023
|
name: z.string(),
|
|
1135
1024
|
});
|
|
1136
|
-
const model = {
|
|
1137
|
-
|
|
1138
|
-
|
|
1139
|
-
|
|
1140
|
-
|
|
1141
|
-
|
|
1142
|
-
|
|
1143
|
-
|
|
1144
|
-
|
|
1145
|
-
|
|
1146
|
-
|
|
1147
|
-
|
|
1148
|
-
|
|
1149
|
-
kind: "text",
|
|
1150
|
-
text: '{"name": "Alice"', // Invalid JSON - missing closing brace
|
|
1151
|
-
},
|
|
1152
|
-
],
|
|
1153
|
-
},
|
|
1154
|
-
],
|
|
1155
|
-
finishReason: "stop",
|
|
1156
|
-
usage: {
|
|
1157
|
-
inputTokens: 2,
|
|
1158
|
-
outputTokens: 2,
|
|
1159
|
-
totalTokens: 4,
|
|
1025
|
+
const model = createMockModel(async (req) => {
|
|
1026
|
+
return {
|
|
1027
|
+
content: [
|
|
1028
|
+
{
|
|
1029
|
+
kind: "message",
|
|
1030
|
+
id: "msg_1",
|
|
1031
|
+
role: "assistant",
|
|
1032
|
+
content: [
|
|
1033
|
+
{
|
|
1034
|
+
kind: "text",
|
|
1035
|
+
text: '{"name": "Alice"', // Invalid JSON - missing closing brace
|
|
1036
|
+
},
|
|
1037
|
+
],
|
|
1160
1038
|
},
|
|
1161
|
-
|
|
1162
|
-
|
|
1163
|
-
|
|
1164
|
-
|
|
1165
|
-
|
|
1166
|
-
|
|
1167
|
-
|
|
1039
|
+
],
|
|
1040
|
+
finishReason: "stop",
|
|
1041
|
+
usage: {
|
|
1042
|
+
inputTokens: 2,
|
|
1043
|
+
outputTokens: 2,
|
|
1044
|
+
totalTokens: 4,
|
|
1045
|
+
},
|
|
1046
|
+
warnings: [],
|
|
1047
|
+
};
|
|
1048
|
+
});
|
|
1168
1049
|
const agent = new Agent({
|
|
1169
1050
|
id: "test",
|
|
1170
1051
|
name: "Test",
|
|
@@ -1173,7 +1054,7 @@ describe("Thread", () => {
|
|
|
1173
1054
|
responseType: responseSchema,
|
|
1174
1055
|
});
|
|
1175
1056
|
const kernl = new Kernl();
|
|
1176
|
-
const thread = new Thread(kernl, agent, "test");
|
|
1057
|
+
const thread = new Thread(kernl, agent, userMessage("test"));
|
|
1177
1058
|
await expect(thread.execute()).rejects.toThrow(ModelBehaviorError);
|
|
1178
1059
|
});
|
|
1179
1060
|
it("should throw ModelBehaviorError when JSON doesn't match schema", async () => {
|
|
@@ -1181,38 +1062,30 @@ describe("Thread", () => {
|
|
|
1181
1062
|
name: z.string(),
|
|
1182
1063
|
age: z.number(),
|
|
1183
1064
|
});
|
|
1184
|
-
const model = {
|
|
1185
|
-
|
|
1186
|
-
|
|
1187
|
-
|
|
1188
|
-
|
|
1189
|
-
|
|
1190
|
-
|
|
1191
|
-
|
|
1192
|
-
|
|
1193
|
-
|
|
1194
|
-
|
|
1195
|
-
|
|
1196
|
-
|
|
1197
|
-
kind: "text",
|
|
1198
|
-
text: '{"name": "Alice", "age": "thirty"}', // age is string instead of number
|
|
1199
|
-
},
|
|
1200
|
-
],
|
|
1201
|
-
},
|
|
1202
|
-
],
|
|
1203
|
-
finishReason: "stop",
|
|
1204
|
-
usage: {
|
|
1205
|
-
inputTokens: 2,
|
|
1206
|
-
outputTokens: 2,
|
|
1207
|
-
totalTokens: 4,
|
|
1065
|
+
const model = createMockModel(async (req) => {
|
|
1066
|
+
return {
|
|
1067
|
+
content: [
|
|
1068
|
+
{
|
|
1069
|
+
kind: "message",
|
|
1070
|
+
id: "msg_1",
|
|
1071
|
+
role: "assistant",
|
|
1072
|
+
content: [
|
|
1073
|
+
{
|
|
1074
|
+
kind: "text",
|
|
1075
|
+
text: '{"name": "Alice", "age": "thirty"}', // age is string instead of number
|
|
1076
|
+
},
|
|
1077
|
+
],
|
|
1208
1078
|
},
|
|
1209
|
-
|
|
1210
|
-
|
|
1211
|
-
|
|
1212
|
-
|
|
1213
|
-
|
|
1214
|
-
|
|
1215
|
-
|
|
1079
|
+
],
|
|
1080
|
+
finishReason: "stop",
|
|
1081
|
+
usage: {
|
|
1082
|
+
inputTokens: 2,
|
|
1083
|
+
outputTokens: 2,
|
|
1084
|
+
totalTokens: 4,
|
|
1085
|
+
},
|
|
1086
|
+
warnings: [],
|
|
1087
|
+
};
|
|
1088
|
+
});
|
|
1216
1089
|
const agent = new Agent({
|
|
1217
1090
|
id: "test",
|
|
1218
1091
|
name: "Test",
|
|
@@ -1221,7 +1094,7 @@ describe("Thread", () => {
|
|
|
1221
1094
|
responseType: responseSchema,
|
|
1222
1095
|
});
|
|
1223
1096
|
const kernl = new Kernl();
|
|
1224
|
-
const thread = new Thread(kernl, agent, "test");
|
|
1097
|
+
const thread = new Thread(kernl, agent, userMessage("test"));
|
|
1225
1098
|
await expect(thread.execute()).rejects.toThrow(ModelBehaviorError);
|
|
1226
1099
|
});
|
|
1227
1100
|
it("should throw ModelBehaviorError when required fields are missing", async () => {
|
|
@@ -1230,38 +1103,30 @@ describe("Thread", () => {
|
|
|
1230
1103
|
age: z.number(),
|
|
1231
1104
|
email: z.string(),
|
|
1232
1105
|
});
|
|
1233
|
-
const model = {
|
|
1234
|
-
|
|
1235
|
-
|
|
1236
|
-
|
|
1237
|
-
|
|
1238
|
-
|
|
1239
|
-
|
|
1240
|
-
|
|
1241
|
-
|
|
1242
|
-
|
|
1243
|
-
|
|
1244
|
-
|
|
1245
|
-
|
|
1246
|
-
kind: "text",
|
|
1247
|
-
text: '{"name": "Alice", "age": 30}', // missing email
|
|
1248
|
-
},
|
|
1249
|
-
],
|
|
1250
|
-
},
|
|
1251
|
-
],
|
|
1252
|
-
finishReason: "stop",
|
|
1253
|
-
usage: {
|
|
1254
|
-
inputTokens: 2,
|
|
1255
|
-
outputTokens: 2,
|
|
1256
|
-
totalTokens: 4,
|
|
1106
|
+
const model = createMockModel(async (req) => {
|
|
1107
|
+
return {
|
|
1108
|
+
content: [
|
|
1109
|
+
{
|
|
1110
|
+
kind: "message",
|
|
1111
|
+
id: "msg_1",
|
|
1112
|
+
role: "assistant",
|
|
1113
|
+
content: [
|
|
1114
|
+
{
|
|
1115
|
+
kind: "text",
|
|
1116
|
+
text: '{"name": "Alice", "age": 30}', // missing email
|
|
1117
|
+
},
|
|
1118
|
+
],
|
|
1257
1119
|
},
|
|
1258
|
-
|
|
1259
|
-
|
|
1260
|
-
|
|
1261
|
-
|
|
1262
|
-
|
|
1263
|
-
|
|
1264
|
-
|
|
1120
|
+
],
|
|
1121
|
+
finishReason: "stop",
|
|
1122
|
+
usage: {
|
|
1123
|
+
inputTokens: 2,
|
|
1124
|
+
outputTokens: 2,
|
|
1125
|
+
totalTokens: 4,
|
|
1126
|
+
},
|
|
1127
|
+
warnings: [],
|
|
1128
|
+
};
|
|
1129
|
+
});
|
|
1265
1130
|
const agent = new Agent({
|
|
1266
1131
|
id: "test",
|
|
1267
1132
|
name: "Test",
|
|
@@ -1270,7 +1135,7 @@ describe("Thread", () => {
|
|
|
1270
1135
|
responseType: responseSchema,
|
|
1271
1136
|
});
|
|
1272
1137
|
const kernl = new Kernl();
|
|
1273
|
-
const thread = new Thread(kernl, agent, "test");
|
|
1138
|
+
const thread = new Thread(kernl, agent, userMessage("test"));
|
|
1274
1139
|
await expect(thread.execute()).rejects.toThrow(ModelBehaviorError);
|
|
1275
1140
|
});
|
|
1276
1141
|
it("should handle nested structured output", async () => {
|
|
@@ -1286,44 +1151,36 @@ describe("Thread", () => {
|
|
|
1286
1151
|
timestamp: z.string(),
|
|
1287
1152
|
}),
|
|
1288
1153
|
});
|
|
1289
|
-
const model = {
|
|
1290
|
-
|
|
1291
|
-
|
|
1292
|
-
|
|
1293
|
-
|
|
1294
|
-
|
|
1295
|
-
|
|
1296
|
-
|
|
1297
|
-
|
|
1298
|
-
|
|
1299
|
-
|
|
1300
|
-
|
|
1301
|
-
|
|
1302
|
-
|
|
1303
|
-
|
|
1304
|
-
|
|
1305
|
-
|
|
1306
|
-
|
|
1307
|
-
|
|
1308
|
-
metadata: { timestamp: "2024-01-01" },
|
|
1309
|
-
}),
|
|
1310
|
-
},
|
|
1311
|
-
],
|
|
1312
|
-
},
|
|
1313
|
-
],
|
|
1314
|
-
finishReason: "stop",
|
|
1315
|
-
usage: {
|
|
1316
|
-
inputTokens: 2,
|
|
1317
|
-
outputTokens: 2,
|
|
1318
|
-
totalTokens: 4,
|
|
1154
|
+
const model = createMockModel(async (req) => {
|
|
1155
|
+
return {
|
|
1156
|
+
content: [
|
|
1157
|
+
{
|
|
1158
|
+
kind: "message",
|
|
1159
|
+
id: "msg_1",
|
|
1160
|
+
role: "assistant",
|
|
1161
|
+
content: [
|
|
1162
|
+
{
|
|
1163
|
+
kind: "text",
|
|
1164
|
+
text: JSON.stringify({
|
|
1165
|
+
user: {
|
|
1166
|
+
name: "Bob",
|
|
1167
|
+
profile: { bio: "Engineer", age: 25 },
|
|
1168
|
+
},
|
|
1169
|
+
metadata: { timestamp: "2024-01-01" },
|
|
1170
|
+
}),
|
|
1171
|
+
},
|
|
1172
|
+
],
|
|
1319
1173
|
},
|
|
1320
|
-
|
|
1321
|
-
|
|
1322
|
-
|
|
1323
|
-
|
|
1324
|
-
|
|
1325
|
-
|
|
1326
|
-
|
|
1174
|
+
],
|
|
1175
|
+
finishReason: "stop",
|
|
1176
|
+
usage: {
|
|
1177
|
+
inputTokens: 2,
|
|
1178
|
+
outputTokens: 2,
|
|
1179
|
+
totalTokens: 4,
|
|
1180
|
+
},
|
|
1181
|
+
warnings: [],
|
|
1182
|
+
};
|
|
1183
|
+
});
|
|
1327
1184
|
const agent = new Agent({
|
|
1328
1185
|
id: "test",
|
|
1329
1186
|
name: "Test",
|
|
@@ -1332,7 +1189,7 @@ describe("Thread", () => {
|
|
|
1332
1189
|
responseType: responseSchema,
|
|
1333
1190
|
});
|
|
1334
1191
|
const kernl = new Kernl();
|
|
1335
|
-
const thread = new Thread(kernl, agent, "test");
|
|
1192
|
+
const thread = new Thread(kernl, agent, userMessage("test"));
|
|
1336
1193
|
const result = await thread.execute();
|
|
1337
1194
|
expect(result.response).toEqual({
|
|
1338
1195
|
user: {
|
|
@@ -1344,40 +1201,17 @@ describe("Thread", () => {
|
|
|
1344
1201
|
});
|
|
1345
1202
|
it("should continue loop when no text in assistant message", async () => {
|
|
1346
1203
|
let callCount = 0;
|
|
1347
|
-
const model = {
|
|
1348
|
-
|
|
1349
|
-
|
|
1350
|
-
|
|
1351
|
-
async generate(req) {
|
|
1352
|
-
callCount++;
|
|
1353
|
-
// First call: return empty message (no text)
|
|
1354
|
-
if (callCount === 1) {
|
|
1355
|
-
return {
|
|
1356
|
-
content: [
|
|
1357
|
-
{
|
|
1358
|
-
kind: "message",
|
|
1359
|
-
id: "msg_1",
|
|
1360
|
-
role: "assistant",
|
|
1361
|
-
content: [], // No content
|
|
1362
|
-
},
|
|
1363
|
-
],
|
|
1364
|
-
finishReason: "stop",
|
|
1365
|
-
usage: {
|
|
1366
|
-
inputTokens: 2,
|
|
1367
|
-
outputTokens: 2,
|
|
1368
|
-
totalTokens: 4,
|
|
1369
|
-
},
|
|
1370
|
-
warnings: [],
|
|
1371
|
-
};
|
|
1372
|
-
}
|
|
1373
|
-
// Second call: return message with text
|
|
1204
|
+
const model = createMockModel(async (req) => {
|
|
1205
|
+
callCount++;
|
|
1206
|
+
// First call: return empty message (no text)
|
|
1207
|
+
if (callCount === 1) {
|
|
1374
1208
|
return {
|
|
1375
1209
|
content: [
|
|
1376
1210
|
{
|
|
1377
1211
|
kind: "message",
|
|
1378
|
-
id: "
|
|
1212
|
+
id: "msg_1",
|
|
1379
1213
|
role: "assistant",
|
|
1380
|
-
content: [
|
|
1214
|
+
content: [], // No content
|
|
1381
1215
|
},
|
|
1382
1216
|
],
|
|
1383
1217
|
finishReason: "stop",
|
|
@@ -1388,11 +1222,26 @@ describe("Thread", () => {
|
|
|
1388
1222
|
},
|
|
1389
1223
|
warnings: [],
|
|
1390
1224
|
};
|
|
1391
|
-
}
|
|
1392
|
-
|
|
1393
|
-
|
|
1394
|
-
|
|
1395
|
-
|
|
1225
|
+
}
|
|
1226
|
+
// Second call: return message with text
|
|
1227
|
+
return {
|
|
1228
|
+
content: [
|
|
1229
|
+
{
|
|
1230
|
+
kind: "message",
|
|
1231
|
+
id: "msg_2",
|
|
1232
|
+
role: "assistant",
|
|
1233
|
+
content: [{ kind: "text", text: "Now I have text" }],
|
|
1234
|
+
},
|
|
1235
|
+
],
|
|
1236
|
+
finishReason: "stop",
|
|
1237
|
+
usage: {
|
|
1238
|
+
inputTokens: 2,
|
|
1239
|
+
outputTokens: 2,
|
|
1240
|
+
totalTokens: 4,
|
|
1241
|
+
},
|
|
1242
|
+
warnings: [],
|
|
1243
|
+
};
|
|
1244
|
+
});
|
|
1396
1245
|
const agent = new Agent({
|
|
1397
1246
|
id: "test",
|
|
1398
1247
|
name: "Test",
|
|
@@ -1401,12 +1250,12 @@ describe("Thread", () => {
|
|
|
1401
1250
|
responseType: "text",
|
|
1402
1251
|
});
|
|
1403
1252
|
const kernl = new Kernl();
|
|
1404
|
-
const thread = new Thread(kernl, agent, "test");
|
|
1253
|
+
const thread = new Thread(kernl, agent, userMessage("test"));
|
|
1405
1254
|
const result = await thread.execute();
|
|
1406
1255
|
// Should have made 2 calls
|
|
1407
1256
|
expect(callCount).toBe(2);
|
|
1408
1257
|
expect(result.response).toBe("Now I have text");
|
|
1409
|
-
expect(
|
|
1258
|
+
expect(thread._tick).toBe(2);
|
|
1410
1259
|
});
|
|
1411
1260
|
});
|
|
1412
1261
|
});
|