@avadisabelle/ava-pi-agent-core 0.61.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +446 -0
- package/dist/agent-loop.d.ts +24 -0
- package/dist/agent-loop.d.ts.map +1 -0
- package/dist/agent-loop.js +390 -0
- package/dist/agent-loop.js.map +1 -0
- package/dist/agent.d.ts +172 -0
- package/dist/agent.d.ts.map +1 -0
- package/dist/agent.js +418 -0
- package/dist/agent.js.map +1 -0
- package/dist/index.d.ts +5 -0
- package/dist/index.d.ts.map +1 -0
- package/dist/index.js +9 -0
- package/dist/index.js.map +1 -0
- package/dist/proxy.d.ts +85 -0
- package/dist/proxy.d.ts.map +1 -0
- package/dist/proxy.js +268 -0
- package/dist/proxy.js.map +1 -0
- package/dist/types.d.ts +286 -0
- package/dist/types.d.ts.map +1 -0
- package/dist/types.js +2 -0
- package/dist/types.js.map +1 -0
- package/package.json +44 -0
package/README.md
ADDED
|
@@ -0,0 +1,446 @@
|
|
|
1
|
+
# @avadisabelle/ava-pi-agent-core
|
|
2
|
+
|
|
3
|
+
Stateful agent with tool execution and event streaming. Built on `@avadisabelle/ava-pi-ai`.
|
|
4
|
+
|
|
5
|
+
## Installation
|
|
6
|
+
|
|
7
|
+
```bash
|
|
8
|
+
npm install @avadisabelle/ava-pi-agent-core
|
|
9
|
+
```
|
|
10
|
+
|
|
11
|
+
## Quick Start
|
|
12
|
+
|
|
13
|
+
```typescript
|
|
14
|
+
import { Agent } from "@avadisabelle/ava-pi-agent-core";
|
|
15
|
+
import { getModel } from "@avadisabelle/ava-pi-ai";
|
|
16
|
+
|
|
17
|
+
const agent = new Agent({
|
|
18
|
+
initialState: {
|
|
19
|
+
systemPrompt: "You are a helpful assistant.",
|
|
20
|
+
model: getModel("anthropic", "claude-sonnet-4-20250514"),
|
|
21
|
+
},
|
|
22
|
+
});
|
|
23
|
+
|
|
24
|
+
agent.subscribe((event) => {
|
|
25
|
+
if (event.type === "message_update" && event.assistantMessageEvent.type === "text_delta") {
|
|
26
|
+
// Stream just the new text chunk
|
|
27
|
+
process.stdout.write(event.assistantMessageEvent.delta);
|
|
28
|
+
}
|
|
29
|
+
});
|
|
30
|
+
|
|
31
|
+
await agent.prompt("Hello!");
|
|
32
|
+
```
|
|
33
|
+
|
|
34
|
+
## Core Concepts
|
|
35
|
+
|
|
36
|
+
### AgentMessage vs LLM Message
|
|
37
|
+
|
|
38
|
+
The agent works with `AgentMessage`, a flexible type that can include:
|
|
39
|
+
- Standard LLM messages (`user`, `assistant`, `toolResult`)
|
|
40
|
+
- Custom app-specific message types via declaration merging
|
|
41
|
+
|
|
42
|
+
LLMs only understand `user`, `assistant`, and `toolResult`. The `convertToLlm` function bridges this gap by filtering and transforming messages before each LLM call.
|
|
43
|
+
|
|
44
|
+
### Message Flow
|
|
45
|
+
|
|
46
|
+
```
|
|
47
|
+
AgentMessage[] → transformContext() → AgentMessage[] → convertToLlm() → Message[] → LLM
|
|
48
|
+
(optional) (required)
|
|
49
|
+
```
|
|
50
|
+
|
|
51
|
+
1. **transformContext**: Prune old messages, inject external context
|
|
52
|
+
2. **convertToLlm**: Filter out UI-only messages, convert custom types to LLM format
|
|
53
|
+
|
|
54
|
+
## Event Flow
|
|
55
|
+
|
|
56
|
+
The agent emits events for UI updates. Understanding the event sequence helps build responsive interfaces.
|
|
57
|
+
|
|
58
|
+
### prompt() Event Sequence
|
|
59
|
+
|
|
60
|
+
When you call `prompt("Hello")`:
|
|
61
|
+
|
|
62
|
+
```
|
|
63
|
+
prompt("Hello")
|
|
64
|
+
├─ agent_start
|
|
65
|
+
├─ turn_start
|
|
66
|
+
├─ message_start { message: userMessage } // Your prompt
|
|
67
|
+
├─ message_end { message: userMessage }
|
|
68
|
+
├─ message_start { message: assistantMessage } // LLM starts responding
|
|
69
|
+
├─ message_update { message: partial... } // Streaming chunks
|
|
70
|
+
├─ message_update { message: partial... }
|
|
71
|
+
├─ message_end { message: assistantMessage } // Complete response
|
|
72
|
+
├─ turn_end { message, toolResults: [] }
|
|
73
|
+
└─ agent_end { messages: [...] }
|
|
74
|
+
```
|
|
75
|
+
|
|
76
|
+
### With Tool Calls
|
|
77
|
+
|
|
78
|
+
If the assistant calls tools, the loop continues:
|
|
79
|
+
|
|
80
|
+
```
|
|
81
|
+
prompt("Read config.json")
|
|
82
|
+
├─ agent_start
|
|
83
|
+
├─ turn_start
|
|
84
|
+
├─ message_start/end { userMessage }
|
|
85
|
+
├─ message_start { assistantMessage with toolCall }
|
|
86
|
+
├─ message_update...
|
|
87
|
+
├─ message_end { assistantMessage }
|
|
88
|
+
├─ tool_execution_start { toolCallId, toolName, args }
|
|
89
|
+
├─ tool_execution_update { partialResult } // If tool streams
|
|
90
|
+
├─ tool_execution_end { toolCallId, result }
|
|
91
|
+
├─ message_start/end { toolResultMessage }
|
|
92
|
+
├─ turn_end { message, toolResults: [toolResult] }
|
|
93
|
+
│
|
|
94
|
+
├─ turn_start // Next turn
|
|
95
|
+
├─ message_start { assistantMessage } // LLM responds to tool result
|
|
96
|
+
├─ message_update...
|
|
97
|
+
├─ message_end
|
|
98
|
+
├─ turn_end
|
|
99
|
+
└─ agent_end
|
|
100
|
+
```
|
|
101
|
+
|
|
102
|
+
Tool execution mode is configurable:
|
|
103
|
+
|
|
104
|
+
- `parallel` (default): preflight tool calls sequentially, execute allowed tools concurrently, emit final `tool_execution_end` and `toolResult` messages in assistant source order
|
|
105
|
+
- `sequential`: execute tool calls one by one, matching the historical behavior
|
|
106
|
+
|
|
107
|
+
The `beforeToolCall` hook runs after `tool_execution_start` and validated argument parsing. It can block execution. The `afterToolCall` hook runs after tool execution finishes and before `tool_execution_end` and final tool result message events are emitted.
|
|
108
|
+
|
|
109
|
+
When you use the `Agent` class, assistant `message_end` processing is treated as a barrier before tool preflight begins. That means `beforeToolCall` sees agent state that already includes the assistant message that requested the tool call.
|
|
110
|
+
|
|
111
|
+
### continue() Event Sequence
|
|
112
|
+
|
|
113
|
+
`continue()` resumes from existing context without adding a new message. Use it for retries after errors.
|
|
114
|
+
|
|
115
|
+
```typescript
|
|
116
|
+
// After an error, retry from current state
|
|
117
|
+
await agent.continue();
|
|
118
|
+
```
|
|
119
|
+
|
|
120
|
+
The last message in context must be `user` or `toolResult` (not `assistant`).
|
|
121
|
+
|
|
122
|
+
### Event Types
|
|
123
|
+
|
|
124
|
+
| Event | Description |
|
|
125
|
+
|-------|-------------|
|
|
126
|
+
| `agent_start` | Agent begins processing |
|
|
127
|
+
| `agent_end` | Agent completes with all new messages |
|
|
128
|
+
| `turn_start` | New turn begins (one LLM call + tool executions) |
|
|
129
|
+
| `turn_end` | Turn completes with assistant message and tool results |
|
|
130
|
+
| `message_start` | Any message begins (user, assistant, toolResult) |
|
|
131
|
+
| `message_update` | **Assistant only.** Includes `assistantMessageEvent` with delta |
|
|
132
|
+
| `message_end` | Message completes |
|
|
133
|
+
| `tool_execution_start` | Tool begins |
|
|
134
|
+
| `tool_execution_update` | Tool streams progress |
|
|
135
|
+
| `tool_execution_end` | Tool completes |
|
|
136
|
+
|
|
137
|
+
## Agent Options
|
|
138
|
+
|
|
139
|
+
```typescript
|
|
140
|
+
const agent = new Agent({
|
|
141
|
+
// Initial state
|
|
142
|
+
initialState: {
|
|
143
|
+
systemPrompt: string,
|
|
144
|
+
model: Model<any>,
|
|
145
|
+
thinkingLevel: "off" | "minimal" | "low" | "medium" | "high" | "xhigh",
|
|
146
|
+
tools: AgentTool<any>[],
|
|
147
|
+
messages: AgentMessage[],
|
|
148
|
+
},
|
|
149
|
+
|
|
150
|
+
// Convert AgentMessage[] to LLM Message[] (required for custom message types)
|
|
151
|
+
convertToLlm: (messages) => messages.filter(...),
|
|
152
|
+
|
|
153
|
+
// Transform context before convertToLlm (for pruning, compaction)
|
|
154
|
+
transformContext: async (messages, signal) => pruneOldMessages(messages),
|
|
155
|
+
|
|
156
|
+
// Steering mode: "one-at-a-time" (default) or "all"
|
|
157
|
+
steeringMode: "one-at-a-time",
|
|
158
|
+
|
|
159
|
+
// Follow-up mode: "one-at-a-time" (default) or "all"
|
|
160
|
+
followUpMode: "one-at-a-time",
|
|
161
|
+
|
|
162
|
+
// Custom stream function (for proxy backends)
|
|
163
|
+
streamFn: streamProxy,
|
|
164
|
+
|
|
165
|
+
// Session ID for provider caching
|
|
166
|
+
sessionId: "session-123",
|
|
167
|
+
|
|
168
|
+
// Dynamic API key resolution (for expiring OAuth tokens)
|
|
169
|
+
getApiKey: async (provider) => refreshToken(),
|
|
170
|
+
|
|
171
|
+
// Tool execution mode: "parallel" (default) or "sequential"
|
|
172
|
+
toolExecution: "parallel",
|
|
173
|
+
|
|
174
|
+
// Preflight each tool call after args are validated. Can block execution.
|
|
175
|
+
beforeToolCall: async ({ toolCall, args, context }) => {
|
|
176
|
+
if (toolCall.name === "bash") {
|
|
177
|
+
return { block: true, reason: "bash is disabled" };
|
|
178
|
+
}
|
|
179
|
+
},
|
|
180
|
+
|
|
181
|
+
// Postprocess each tool result before final tool events are emitted.
|
|
182
|
+
afterToolCall: async ({ toolCall, result, isError, context }) => {
|
|
183
|
+
if (!isError) {
|
|
184
|
+
return { details: { ...result.details, audited: true } };
|
|
185
|
+
}
|
|
186
|
+
},
|
|
187
|
+
|
|
188
|
+
// Custom thinking budgets for token-based providers
|
|
189
|
+
thinkingBudgets: {
|
|
190
|
+
minimal: 128,
|
|
191
|
+
low: 512,
|
|
192
|
+
medium: 1024,
|
|
193
|
+
high: 2048,
|
|
194
|
+
},
|
|
195
|
+
});
|
|
196
|
+
```
|
|
197
|
+
|
|
198
|
+
## Agent State
|
|
199
|
+
|
|
200
|
+
```typescript
|
|
201
|
+
interface AgentState {
|
|
202
|
+
systemPrompt: string;
|
|
203
|
+
model: Model<any>;
|
|
204
|
+
thinkingLevel: ThinkingLevel;
|
|
205
|
+
tools: AgentTool<any>[];
|
|
206
|
+
messages: AgentMessage[];
|
|
207
|
+
isStreaming: boolean;
|
|
208
|
+
streamMessage: AgentMessage | null; // Current partial during streaming
|
|
209
|
+
pendingToolCalls: Set<string>;
|
|
210
|
+
error?: string;
|
|
211
|
+
}
|
|
212
|
+
```
|
|
213
|
+
|
|
214
|
+
Access via `agent.state`. During streaming, `streamMessage` contains the partial assistant message.
|
|
215
|
+
|
|
216
|
+
## Methods
|
|
217
|
+
|
|
218
|
+
### Prompting
|
|
219
|
+
|
|
220
|
+
```typescript
|
|
221
|
+
// Text prompt
|
|
222
|
+
await agent.prompt("Hello");
|
|
223
|
+
|
|
224
|
+
// With images
|
|
225
|
+
await agent.prompt("What's in this image?", [
|
|
226
|
+
{ type: "image", data: base64Data, mimeType: "image/jpeg" }
|
|
227
|
+
]);
|
|
228
|
+
|
|
229
|
+
// AgentMessage directly
|
|
230
|
+
await agent.prompt({ role: "user", content: "Hello", timestamp: Date.now() });
|
|
231
|
+
|
|
232
|
+
// Continue from current context (last message must be user or toolResult)
|
|
233
|
+
await agent.continue();
|
|
234
|
+
```
|
|
235
|
+
|
|
236
|
+
### State Management
|
|
237
|
+
|
|
238
|
+
```typescript
|
|
239
|
+
agent.setSystemPrompt("New prompt");
|
|
240
|
+
agent.setModel(getModel("openai", "gpt-4o"));
|
|
241
|
+
agent.setThinkingLevel("medium");
|
|
242
|
+
agent.setTools([myTool]);
|
|
243
|
+
agent.setToolExecution("sequential");
|
|
244
|
+
agent.setBeforeToolCall(async ({ toolCall }) => undefined);
|
|
245
|
+
agent.setAfterToolCall(async ({ toolCall, result }) => undefined);
|
|
246
|
+
agent.replaceMessages(newMessages);
|
|
247
|
+
agent.appendMessage(message);
|
|
248
|
+
agent.clearMessages();
|
|
249
|
+
agent.reset(); // Clear everything
|
|
250
|
+
```
|
|
251
|
+
|
|
252
|
+
### Session and Thinking Budgets
|
|
253
|
+
|
|
254
|
+
```typescript
|
|
255
|
+
agent.sessionId = "session-123";
|
|
256
|
+
|
|
257
|
+
agent.thinkingBudgets = {
|
|
258
|
+
minimal: 128,
|
|
259
|
+
low: 512,
|
|
260
|
+
medium: 1024,
|
|
261
|
+
high: 2048,
|
|
262
|
+
};
|
|
263
|
+
```
|
|
264
|
+
|
|
265
|
+
### Control
|
|
266
|
+
|
|
267
|
+
```typescript
|
|
268
|
+
agent.abort(); // Cancel current operation
|
|
269
|
+
await agent.waitForIdle(); // Wait for completion
|
|
270
|
+
```
|
|
271
|
+
|
|
272
|
+
### Events
|
|
273
|
+
|
|
274
|
+
```typescript
|
|
275
|
+
const unsubscribe = agent.subscribe((event) => {
|
|
276
|
+
console.log(event.type);
|
|
277
|
+
});
|
|
278
|
+
unsubscribe();
|
|
279
|
+
```
|
|
280
|
+
|
|
281
|
+
## Steering and Follow-up
|
|
282
|
+
|
|
283
|
+
Steering messages let you interrupt the agent while tools are running. Follow-up messages let you queue work after the agent would otherwise stop.
|
|
284
|
+
|
|
285
|
+
```typescript
|
|
286
|
+
agent.setSteeringMode("one-at-a-time");
|
|
287
|
+
agent.setFollowUpMode("one-at-a-time");
|
|
288
|
+
|
|
289
|
+
// While agent is running tools
|
|
290
|
+
agent.steer({
|
|
291
|
+
role: "user",
|
|
292
|
+
content: "Stop! Do this instead.",
|
|
293
|
+
timestamp: Date.now(),
|
|
294
|
+
});
|
|
295
|
+
|
|
296
|
+
// After the agent finishes its current work
|
|
297
|
+
agent.followUp({
|
|
298
|
+
role: "user",
|
|
299
|
+
content: "Also summarize the result.",
|
|
300
|
+
timestamp: Date.now(),
|
|
301
|
+
});
|
|
302
|
+
|
|
303
|
+
const steeringMode = agent.getSteeringMode();
|
|
304
|
+
const followUpMode = agent.getFollowUpMode();
|
|
305
|
+
|
|
306
|
+
agent.clearSteeringQueue();
|
|
307
|
+
agent.clearFollowUpQueue();
|
|
308
|
+
agent.clearAllQueues();
|
|
309
|
+
```
|
|
310
|
+
|
|
311
|
+
Use clearSteeringQueue, clearFollowUpQueue, or clearAllQueues to drop queued messages.
|
|
312
|
+
|
|
313
|
+
When steering messages are detected after a turn completes:
|
|
314
|
+
1. All tool calls from the current assistant message have already finished
|
|
315
|
+
2. Steering messages are injected
|
|
316
|
+
3. The LLM responds on the next turn
|
|
317
|
+
|
|
318
|
+
Follow-up messages are checked only when there are no more tool calls and no steering messages. If any are queued, they are injected and another turn runs.
|
|
319
|
+
|
|
320
|
+
## Custom Message Types
|
|
321
|
+
|
|
322
|
+
Extend `AgentMessage` via declaration merging:
|
|
323
|
+
|
|
324
|
+
```typescript
|
|
325
|
+
declare module "@avadisabelle/ava-pi-agent-core" {
|
|
326
|
+
interface CustomAgentMessages {
|
|
327
|
+
notification: { role: "notification"; text: string; timestamp: number };
|
|
328
|
+
}
|
|
329
|
+
}
|
|
330
|
+
|
|
331
|
+
// Now valid
|
|
332
|
+
const msg: AgentMessage = { role: "notification", text: "Info", timestamp: Date.now() };
|
|
333
|
+
```
|
|
334
|
+
|
|
335
|
+
Handle custom types in `convertToLlm`:
|
|
336
|
+
|
|
337
|
+
```typescript
|
|
338
|
+
const agent = new Agent({
|
|
339
|
+
convertToLlm: (messages) => messages.flatMap(m => {
|
|
340
|
+
if (m.role === "notification") return []; // Filter out
|
|
341
|
+
return [m];
|
|
342
|
+
}),
|
|
343
|
+
});
|
|
344
|
+
```
|
|
345
|
+
|
|
346
|
+
## Tools
|
|
347
|
+
|
|
348
|
+
Define tools using `AgentTool`:
|
|
349
|
+
|
|
350
|
+
```typescript
|
|
351
|
+
import { Type } from "@sinclair/typebox";
|
|
352
|
+
|
|
353
|
+
const readFileTool: AgentTool = {
|
|
354
|
+
name: "read_file",
|
|
355
|
+
label: "Read File", // For UI display
|
|
356
|
+
description: "Read a file's contents",
|
|
357
|
+
parameters: Type.Object({
|
|
358
|
+
path: Type.String({ description: "File path" }),
|
|
359
|
+
}),
|
|
360
|
+
execute: async (toolCallId, params, signal, onUpdate) => {
|
|
361
|
+
const content = await fs.readFile(params.path, "utf-8");
|
|
362
|
+
|
|
363
|
+
// Optional: stream progress
|
|
364
|
+
onUpdate?.({ content: [{ type: "text", text: "Reading..." }], details: {} });
|
|
365
|
+
|
|
366
|
+
return {
|
|
367
|
+
content: [{ type: "text", text: content }],
|
|
368
|
+
details: { path: params.path, size: content.length },
|
|
369
|
+
};
|
|
370
|
+
},
|
|
371
|
+
};
|
|
372
|
+
|
|
373
|
+
agent.setTools([readFileTool]);
|
|
374
|
+
```
|
|
375
|
+
|
|
376
|
+
### Error Handling
|
|
377
|
+
|
|
378
|
+
**Throw an error** when a tool fails. Do not return error messages as content.
|
|
379
|
+
|
|
380
|
+
```typescript
|
|
381
|
+
execute: async (toolCallId, params, signal, onUpdate) => {
|
|
382
|
+
if (!fs.existsSync(params.path)) {
|
|
383
|
+
throw new Error(`File not found: ${params.path}`);
|
|
384
|
+
}
|
|
385
|
+
// Return content only on success
|
|
386
|
+
return { content: [{ type: "text", text: "..." }] };
|
|
387
|
+
}
|
|
388
|
+
```
|
|
389
|
+
|
|
390
|
+
Thrown errors are caught by the agent and reported to the LLM as tool errors with `isError: true`.
|
|
391
|
+
|
|
392
|
+
## Proxy Usage
|
|
393
|
+
|
|
394
|
+
For browser apps that proxy through a backend:
|
|
395
|
+
|
|
396
|
+
```typescript
|
|
397
|
+
import { Agent, streamProxy } from "@avadisabelle/ava-pi-agent-core";
|
|
398
|
+
|
|
399
|
+
const agent = new Agent({
|
|
400
|
+
streamFn: (model, context, options) =>
|
|
401
|
+
streamProxy(model, context, {
|
|
402
|
+
...options,
|
|
403
|
+
authToken: "...",
|
|
404
|
+
proxyUrl: "https://your-server.com",
|
|
405
|
+
}),
|
|
406
|
+
});
|
|
407
|
+
```
|
|
408
|
+
|
|
409
|
+
## Low-Level API
|
|
410
|
+
|
|
411
|
+
For direct control without the Agent class:
|
|
412
|
+
|
|
413
|
+
```typescript
|
|
414
|
+
import { agentLoop, agentLoopContinue } from "@avadisabelle/ava-pi-agent-core";
|
|
415
|
+
|
|
416
|
+
const context: AgentContext = {
|
|
417
|
+
systemPrompt: "You are helpful.",
|
|
418
|
+
messages: [],
|
|
419
|
+
tools: [],
|
|
420
|
+
};
|
|
421
|
+
|
|
422
|
+
const config: AgentLoopConfig = {
|
|
423
|
+
model: getModel("openai", "gpt-4o"),
|
|
424
|
+
convertToLlm: (msgs) => msgs.filter(m => ["user", "assistant", "toolResult"].includes(m.role)),
|
|
425
|
+
toolExecution: "parallel",
|
|
426
|
+
beforeToolCall: async ({ toolCall, args, context }) => undefined,
|
|
427
|
+
afterToolCall: async ({ toolCall, result, isError, context }) => undefined,
|
|
428
|
+
};
|
|
429
|
+
|
|
430
|
+
const userMessage = { role: "user", content: "Hello", timestamp: Date.now() };
|
|
431
|
+
|
|
432
|
+
for await (const event of agentLoop([userMessage], context, config)) {
|
|
433
|
+
console.log(event.type);
|
|
434
|
+
}
|
|
435
|
+
|
|
436
|
+
// Continue from existing context
|
|
437
|
+
for await (const event of agentLoopContinue(context, config)) {
|
|
438
|
+
console.log(event.type);
|
|
439
|
+
}
|
|
440
|
+
```
|
|
441
|
+
|
|
442
|
+
These low-level streams are observational. They preserve event order, but they do not wait for your async event handling to settle before later producer phases continue. If you need message processing to act as a barrier before tool preflight, use the `Agent` class instead of raw `agentLoop()` or `agentLoopContinue()`.
|
|
443
|
+
|
|
444
|
+
## License
|
|
445
|
+
|
|
446
|
+
MIT
|
|
@@ -0,0 +1,24 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Agent loop that works with AgentMessage throughout.
|
|
3
|
+
* Transforms to Message[] only at the LLM call boundary.
|
|
4
|
+
*/
|
|
5
|
+
import { EventStream } from "@avadisabelle/ava-pi-ai";
|
|
6
|
+
import type { AgentContext, AgentEvent, AgentLoopConfig, AgentMessage, StreamFn } from "./types.js";
|
|
7
|
+
export type AgentEventSink = (event: AgentEvent) => Promise<void> | void;
|
|
8
|
+
/**
|
|
9
|
+
* Start an agent loop with a new prompt message.
|
|
10
|
+
* The prompt is added to the context and events are emitted for it.
|
|
11
|
+
*/
|
|
12
|
+
export declare function agentLoop(prompts: AgentMessage[], context: AgentContext, config: AgentLoopConfig, signal?: AbortSignal, streamFn?: StreamFn): EventStream<AgentEvent, AgentMessage[]>;
|
|
13
|
+
/**
|
|
14
|
+
* Continue an agent loop from the current context without adding a new message.
|
|
15
|
+
* Used for retries - context already has user message or tool results.
|
|
16
|
+
*
|
|
17
|
+
* **Important:** The last message in context must convert to a `user` or `toolResult` message
|
|
18
|
+
* via `convertToLlm`. If it doesn't, the LLM provider will reject the request.
|
|
19
|
+
* This cannot be validated here since `convertToLlm` is only called once per turn.
|
|
20
|
+
*/
|
|
21
|
+
export declare function agentLoopContinue(context: AgentContext, config: AgentLoopConfig, signal?: AbortSignal, streamFn?: StreamFn): EventStream<AgentEvent, AgentMessage[]>;
|
|
22
|
+
export declare function runAgentLoop(prompts: AgentMessage[], context: AgentContext, config: AgentLoopConfig, emit: AgentEventSink, signal?: AbortSignal, streamFn?: StreamFn): Promise<AgentMessage[]>;
|
|
23
|
+
export declare function runAgentLoopContinue(context: AgentContext, config: AgentLoopConfig, emit: AgentEventSink, signal?: AbortSignal, streamFn?: StreamFn): Promise<AgentMessage[]>;
|
|
24
|
+
//# sourceMappingURL=agent-loop.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"agent-loop.d.ts","sourceRoot":"","sources":["../src/agent-loop.ts"],"names":[],"mappings":"AAAA;;;GAGG;AAEH,OAAO,EAGN,WAAW,EAIX,MAAM,yBAAyB,CAAC;AACjC,OAAO,KAAK,EACX,YAAY,EACZ,UAAU,EACV,eAAe,EACf,YAAY,EAIZ,QAAQ,EACR,MAAM,YAAY,CAAC;AAEpB,MAAM,MAAM,cAAc,GAAG,CAAC,KAAK,EAAE,UAAU,KAAK,OAAO,CAAC,IAAI,CAAC,GAAG,IAAI,CAAC;AAEzE;;;GAGG;AACH,wBAAgB,SAAS,CACxB,OAAO,EAAE,YAAY,EAAE,EACvB,OAAO,EAAE,YAAY,EACrB,MAAM,EAAE,eAAe,EACvB,MAAM,CAAC,EAAE,WAAW,EACpB,QAAQ,CAAC,EAAE,QAAQ,GACjB,WAAW,CAAC,UAAU,EAAE,YAAY,EAAE,CAAC,CAiBzC;AAED;;;;;;;GAOG;AACH,wBAAgB,iBAAiB,CAChC,OAAO,EAAE,YAAY,EACrB,MAAM,EAAE,eAAe,EACvB,MAAM,CAAC,EAAE,WAAW,EACpB,QAAQ,CAAC,EAAE,QAAQ,GACjB,WAAW,CAAC,UAAU,EAAE,YAAY,EAAE,CAAC,CAwBzC;AAED,wBAAsB,YAAY,CACjC,OAAO,EAAE,YAAY,EAAE,EACvB,OAAO,EAAE,YAAY,EACrB,MAAM,EAAE,eAAe,EACvB,IAAI,EAAE,cAAc,EACpB,MAAM,CAAC,EAAE,WAAW,EACpB,QAAQ,CAAC,EAAE,QAAQ,GACjB,OAAO,CAAC,YAAY,EAAE,CAAC,CAgBzB;AAED,wBAAsB,oBAAoB,CACzC,OAAO,EAAE,YAAY,EACrB,MAAM,EAAE,eAAe,EACvB,IAAI,EAAE,cAAc,EACpB,MAAM,CAAC,EAAE,WAAW,EACpB,QAAQ,CAAC,EAAE,QAAQ,GACjB,OAAO,CAAC,YAAY,EAAE,CAAC,CAiBzB","sourcesContent":["/**\n * Agent loop that works with AgentMessage throughout.\n * Transforms to Message[] only at the LLM call boundary.\n */\n\nimport {\n\ttype AssistantMessage,\n\ttype Context,\n\tEventStream,\n\tstreamSimple,\n\ttype ToolResultMessage,\n\tvalidateToolArguments,\n} from \"@avadisabelle/ava-pi-ai\";\nimport type {\n\tAgentContext,\n\tAgentEvent,\n\tAgentLoopConfig,\n\tAgentMessage,\n\tAgentTool,\n\tAgentToolCall,\n\tAgentToolResult,\n\tStreamFn,\n} from \"./types.js\";\n\nexport type AgentEventSink = (event: AgentEvent) => Promise<void> | void;\n\n/**\n * Start an agent loop with a new prompt message.\n * The prompt is added to the context and events are emitted for it.\n */\nexport function agentLoop(\n\tprompts: AgentMessage[],\n\tcontext: AgentContext,\n\tconfig: AgentLoopConfig,\n\tsignal?: AbortSignal,\n\tstreamFn?: StreamFn,\n): EventStream<AgentEvent, AgentMessage[]> {\n\tconst stream = createAgentStream();\n\n\tvoid runAgentLoop(\n\t\tprompts,\n\t\tcontext,\n\t\tconfig,\n\t\tasync (event) => {\n\t\t\tstream.push(event);\n\t\t},\n\t\tsignal,\n\t\tstreamFn,\n\t).then((messages) => {\n\t\tstream.end(messages);\n\t});\n\n\treturn stream;\n}\n\n/**\n * Continue an agent loop from the current context without adding a new message.\n * Used for retries - context already has user message or tool results.\n *\n * **Important:** The last message in context must convert to a `user` or `toolResult` message\n * via `convertToLlm`. If it doesn't, the LLM provider will reject the request.\n * This cannot be validated here since `convertToLlm` is only called once per turn.\n */\nexport function agentLoopContinue(\n\tcontext: AgentContext,\n\tconfig: AgentLoopConfig,\n\tsignal?: AbortSignal,\n\tstreamFn?: StreamFn,\n): EventStream<AgentEvent, AgentMessage[]> {\n\tif (context.messages.length === 0) {\n\t\tthrow new Error(\"Cannot continue: no messages in context\");\n\t}\n\n\tif (context.messages[context.messages.length - 1].role === \"assistant\") {\n\t\tthrow new Error(\"Cannot continue from message role: assistant\");\n\t}\n\n\tconst stream = createAgentStream();\n\n\tvoid runAgentLoopContinue(\n\t\tcontext,\n\t\tconfig,\n\t\tasync (event) => {\n\t\t\tstream.push(event);\n\t\t},\n\t\tsignal,\n\t\tstreamFn,\n\t).then((messages) => {\n\t\tstream.end(messages);\n\t});\n\n\treturn stream;\n}\n\nexport async function runAgentLoop(\n\tprompts: AgentMessage[],\n\tcontext: AgentContext,\n\tconfig: AgentLoopConfig,\n\temit: AgentEventSink,\n\tsignal?: AbortSignal,\n\tstreamFn?: StreamFn,\n): Promise<AgentMessage[]> {\n\tconst newMessages: AgentMessage[] = [...prompts];\n\tconst currentContext: AgentContext = {\n\t\t...context,\n\t\tmessages: [...context.messages, ...prompts],\n\t};\n\n\tawait emit({ type: \"agent_start\" });\n\tawait emit({ type: \"turn_start\" });\n\tfor (const prompt of prompts) {\n\t\tawait emit({ type: \"message_start\", message: prompt });\n\t\tawait emit({ type: \"message_end\", message: prompt });\n\t}\n\n\tawait runLoop(currentContext, newMessages, config, signal, emit, streamFn);\n\treturn newMessages;\n}\n\nexport async function runAgentLoopContinue(\n\tcontext: AgentContext,\n\tconfig: AgentLoopConfig,\n\temit: AgentEventSink,\n\tsignal?: AbortSignal,\n\tstreamFn?: StreamFn,\n): Promise<AgentMessage[]> {\n\tif (context.messages.length === 0) {\n\t\tthrow new Error(\"Cannot continue: no messages in context\");\n\t}\n\n\tif (context.messages[context.messages.length - 1].role === \"assistant\") {\n\t\tthrow new Error(\"Cannot continue from message role: assistant\");\n\t}\n\n\tconst newMessages: AgentMessage[] = [];\n\tconst currentContext: AgentContext = { ...context };\n\n\tawait emit({ type: \"agent_start\" });\n\tawait emit({ type: \"turn_start\" });\n\n\tawait runLoop(currentContext, newMessages, config, signal, emit, streamFn);\n\treturn newMessages;\n}\n\nfunction createAgentStream(): EventStream<AgentEvent, AgentMessage[]> {\n\treturn new EventStream<AgentEvent, AgentMessage[]>(\n\t\t(event: AgentEvent) => event.type === \"agent_end\",\n\t\t(event: AgentEvent) => (event.type === \"agent_end\" ? event.messages : []),\n\t);\n}\n\n/**\n * Main loop logic shared by agentLoop and agentLoopContinue.\n */\nasync function runLoop(\n\tcurrentContext: AgentContext,\n\tnewMessages: AgentMessage[],\n\tconfig: AgentLoopConfig,\n\tsignal: AbortSignal | undefined,\n\temit: AgentEventSink,\n\tstreamFn?: StreamFn,\n): Promise<void> {\n\tlet firstTurn = true;\n\t// Check for steering messages at start (user may have typed while waiting)\n\tlet pendingMessages: AgentMessage[] = (await config.getSteeringMessages?.()) || [];\n\n\t// Outer loop: continues when queued follow-up messages arrive after agent would stop\n\twhile (true) {\n\t\tlet hasMoreToolCalls = true;\n\n\t\t// Inner loop: process tool calls and steering messages\n\t\twhile (hasMoreToolCalls || pendingMessages.length > 0) {\n\t\t\tif (!firstTurn) {\n\t\t\t\tawait emit({ type: \"turn_start\" });\n\t\t\t} else {\n\t\t\t\tfirstTurn = false;\n\t\t\t}\n\n\t\t\t// Process pending messages (inject before next assistant response)\n\t\t\tif (pendingMessages.length > 0) {\n\t\t\t\tfor (const message of pendingMessages) {\n\t\t\t\t\tawait emit({ type: \"message_start\", message });\n\t\t\t\t\tawait emit({ type: \"message_end\", message });\n\t\t\t\t\tcurrentContext.messages.push(message);\n\t\t\t\t\tnewMessages.push(message);\n\t\t\t\t}\n\t\t\t\tpendingMessages = [];\n\t\t\t}\n\n\t\t\t// Stream assistant response\n\t\t\tconst message = await streamAssistantResponse(currentContext, config, signal, emit, streamFn);\n\t\t\tnewMessages.push(message);\n\n\t\t\tif (message.stopReason === \"error\" || message.stopReason === \"aborted\") {\n\t\t\t\tawait emit({ type: \"turn_end\", message, toolResults: [] });\n\t\t\t\tawait emit({ type: \"agent_end\", messages: newMessages });\n\t\t\t\treturn;\n\t\t\t}\n\n\t\t\t// Check for tool calls\n\t\t\tconst toolCalls = message.content.filter((c) => c.type === \"toolCall\");\n\t\t\thasMoreToolCalls = toolCalls.length > 0;\n\n\t\t\tconst toolResults: ToolResultMessage[] = [];\n\t\t\tif (hasMoreToolCalls) {\n\t\t\t\ttoolResults.push(...(await executeToolCalls(currentContext, message, config, signal, emit)));\n\n\t\t\t\tfor (const result of toolResults) {\n\t\t\t\t\tcurrentContext.messages.push(result);\n\t\t\t\t\tnewMessages.push(result);\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tawait emit({ type: \"turn_end\", message, toolResults });\n\n\t\t\tpendingMessages = (await config.getSteeringMessages?.()) || [];\n\t\t}\n\n\t\t// Agent would stop here. Check for follow-up messages.\n\t\tconst followUpMessages = (await config.getFollowUpMessages?.()) || [];\n\t\tif (followUpMessages.length > 0) {\n\t\t\t// Set as pending so inner loop processes them\n\t\t\tpendingMessages = followUpMessages;\n\t\t\tcontinue;\n\t\t}\n\n\t\t// No more messages, exit\n\t\tbreak;\n\t}\n\n\tawait emit({ type: \"agent_end\", messages: newMessages });\n}\n\n/**\n * Stream an assistant response from the LLM.\n * This is where AgentMessage[] gets transformed to Message[] for the LLM.\n */\nasync function streamAssistantResponse(\n\tcontext: AgentContext,\n\tconfig: AgentLoopConfig,\n\tsignal: AbortSignal | undefined,\n\temit: AgentEventSink,\n\tstreamFn?: StreamFn,\n): Promise<AssistantMessage> {\n\t// Apply context transform if configured (AgentMessage[] → AgentMessage[])\n\tlet messages = context.messages;\n\tif (config.transformContext) {\n\t\tmessages = await config.transformContext(messages, signal);\n\t}\n\n\t// Convert to LLM-compatible messages (AgentMessage[] → Message[])\n\tconst llmMessages = await config.convertToLlm(messages);\n\n\t// Build LLM context\n\tconst llmContext: Context = {\n\t\tsystemPrompt: context.systemPrompt,\n\t\tmessages: llmMessages,\n\t\ttools: context.tools,\n\t};\n\n\tconst streamFunction = streamFn || streamSimple;\n\n\t// Resolve API key (important for expiring tokens)\n\tconst resolvedApiKey =\n\t\t(config.getApiKey ? await config.getApiKey(config.model.provider) : undefined) || config.apiKey;\n\n\tconst response = await streamFunction(config.model, llmContext, {\n\t\t...config,\n\t\tapiKey: resolvedApiKey,\n\t\tsignal,\n\t});\n\n\tlet partialMessage: AssistantMessage | null = null;\n\tlet addedPartial = false;\n\n\tfor await (const event of response) {\n\t\tswitch (event.type) {\n\t\t\tcase \"start\":\n\t\t\t\tpartialMessage = event.partial;\n\t\t\t\tcontext.messages.push(partialMessage);\n\t\t\t\taddedPartial = true;\n\t\t\t\tawait emit({ type: \"message_start\", message: { ...partialMessage } });\n\t\t\t\tbreak;\n\n\t\t\tcase \"text_start\":\n\t\t\tcase \"text_delta\":\n\t\t\tcase \"text_end\":\n\t\t\tcase \"thinking_start\":\n\t\t\tcase \"thinking_delta\":\n\t\t\tcase \"thinking_end\":\n\t\t\tcase \"toolcall_start\":\n\t\t\tcase \"toolcall_delta\":\n\t\t\tcase \"toolcall_end\":\n\t\t\t\tif (partialMessage) {\n\t\t\t\t\tpartialMessage = event.partial;\n\t\t\t\t\tcontext.messages[context.messages.length - 1] = partialMessage;\n\t\t\t\t\tawait emit({\n\t\t\t\t\t\ttype: \"message_update\",\n\t\t\t\t\t\tassistantMessageEvent: event,\n\t\t\t\t\t\tmessage: { ...partialMessage },\n\t\t\t\t\t});\n\t\t\t\t}\n\t\t\t\tbreak;\n\n\t\t\tcase \"done\":\n\t\t\tcase \"error\": {\n\t\t\t\tconst finalMessage = await response.result();\n\t\t\t\tif (addedPartial) {\n\t\t\t\t\tcontext.messages[context.messages.length - 1] = finalMessage;\n\t\t\t\t} else {\n\t\t\t\t\tcontext.messages.push(finalMessage);\n\t\t\t\t}\n\t\t\t\tif (!addedPartial) {\n\t\t\t\t\tawait emit({ type: \"message_start\", message: { ...finalMessage } });\n\t\t\t\t}\n\t\t\t\tawait emit({ type: \"message_end\", message: finalMessage });\n\t\t\t\treturn finalMessage;\n\t\t\t}\n\t\t}\n\t}\n\n\tconst finalMessage = await response.result();\n\tif (addedPartial) {\n\t\tcontext.messages[context.messages.length - 1] = finalMessage;\n\t} else {\n\t\tcontext.messages.push(finalMessage);\n\t\tawait emit({ type: \"message_start\", message: { ...finalMessage } });\n\t}\n\tawait emit({ type: \"message_end\", message: finalMessage });\n\treturn finalMessage;\n}\n\n/**\n * Execute tool calls from an assistant message.\n */\nasync function executeToolCalls(\n\tcurrentContext: AgentContext,\n\tassistantMessage: AssistantMessage,\n\tconfig: AgentLoopConfig,\n\tsignal: AbortSignal | undefined,\n\temit: AgentEventSink,\n): Promise<ToolResultMessage[]> {\n\tconst toolCalls = assistantMessage.content.filter((c) => c.type === \"toolCall\");\n\tif (config.toolExecution === \"sequential\") {\n\t\treturn executeToolCallsSequential(currentContext, assistantMessage, toolCalls, config, signal, emit);\n\t}\n\treturn executeToolCallsParallel(currentContext, assistantMessage, toolCalls, config, signal, emit);\n}\n\nasync function executeToolCallsSequential(\n\tcurrentContext: AgentContext,\n\tassistantMessage: AssistantMessage,\n\ttoolCalls: AgentToolCall[],\n\tconfig: AgentLoopConfig,\n\tsignal: AbortSignal | undefined,\n\temit: AgentEventSink,\n): Promise<ToolResultMessage[]> {\n\tconst results: ToolResultMessage[] = [];\n\n\tfor (const toolCall of toolCalls) {\n\t\tawait emit({\n\t\t\ttype: \"tool_execution_start\",\n\t\t\ttoolCallId: toolCall.id,\n\t\t\ttoolName: toolCall.name,\n\t\t\targs: toolCall.arguments,\n\t\t});\n\n\t\tconst preparation = await prepareToolCall(currentContext, assistantMessage, toolCall, config, signal);\n\t\tif (preparation.kind === \"immediate\") {\n\t\t\tresults.push(await emitToolCallOutcome(toolCall, preparation.result, preparation.isError, emit));\n\t\t} else {\n\t\t\tconst executed = await executePreparedToolCall(preparation, signal, emit);\n\t\t\tresults.push(\n\t\t\t\tawait finalizeExecutedToolCall(\n\t\t\t\t\tcurrentContext,\n\t\t\t\t\tassistantMessage,\n\t\t\t\t\tpreparation,\n\t\t\t\t\texecuted,\n\t\t\t\t\tconfig,\n\t\t\t\t\tsignal,\n\t\t\t\t\temit,\n\t\t\t\t),\n\t\t\t);\n\t\t}\n\t}\n\n\treturn results;\n}\n\nasync function executeToolCallsParallel(\n\tcurrentContext: AgentContext,\n\tassistantMessage: AssistantMessage,\n\ttoolCalls: AgentToolCall[],\n\tconfig: AgentLoopConfig,\n\tsignal: AbortSignal | undefined,\n\temit: AgentEventSink,\n): Promise<ToolResultMessage[]> {\n\tconst results: ToolResultMessage[] = [];\n\tconst runnableCalls: PreparedToolCall[] = [];\n\n\tfor (const toolCall of toolCalls) {\n\t\tawait emit({\n\t\t\ttype: \"tool_execution_start\",\n\t\t\ttoolCallId: toolCall.id,\n\t\t\ttoolName: toolCall.name,\n\t\t\targs: toolCall.arguments,\n\t\t});\n\n\t\tconst preparation = await prepareToolCall(currentContext, assistantMessage, toolCall, config, signal);\n\t\tif (preparation.kind === \"immediate\") {\n\t\t\tresults.push(await emitToolCallOutcome(toolCall, preparation.result, preparation.isError, emit));\n\t\t} else {\n\t\t\trunnableCalls.push(preparation);\n\t\t}\n\t}\n\n\tconst runningCalls = runnableCalls.map((prepared) => ({\n\t\tprepared,\n\t\texecution: executePreparedToolCall(prepared, signal, emit),\n\t}));\n\n\tfor (const running of runningCalls) {\n\t\tconst executed = await running.execution;\n\t\tresults.push(\n\t\t\tawait finalizeExecutedToolCall(\n\t\t\t\tcurrentContext,\n\t\t\t\tassistantMessage,\n\t\t\t\trunning.prepared,\n\t\t\t\texecuted,\n\t\t\t\tconfig,\n\t\t\t\tsignal,\n\t\t\t\temit,\n\t\t\t),\n\t\t);\n\t}\n\n\treturn results;\n}\n\ntype PreparedToolCall = {\n\tkind: \"prepared\";\n\ttoolCall: AgentToolCall;\n\ttool: AgentTool<any>;\n\targs: unknown;\n};\n\ntype ImmediateToolCallOutcome = {\n\tkind: \"immediate\";\n\tresult: AgentToolResult<any>;\n\tisError: boolean;\n};\n\ntype ExecutedToolCallOutcome = {\n\tresult: AgentToolResult<any>;\n\tisError: boolean;\n};\n\nasync function prepareToolCall(\n\tcurrentContext: AgentContext,\n\tassistantMessage: AssistantMessage,\n\ttoolCall: AgentToolCall,\n\tconfig: AgentLoopConfig,\n\tsignal: AbortSignal | undefined,\n): Promise<PreparedToolCall | ImmediateToolCallOutcome> {\n\tconst tool = currentContext.tools?.find((t) => t.name === toolCall.name);\n\tif (!tool) {\n\t\treturn {\n\t\t\tkind: \"immediate\",\n\t\t\tresult: createErrorToolResult(`Tool ${toolCall.name} not found`),\n\t\t\tisError: true,\n\t\t};\n\t}\n\n\ttry {\n\t\tconst validatedArgs = validateToolArguments(tool, toolCall);\n\t\tif (config.beforeToolCall) {\n\t\t\tconst beforeResult = await config.beforeToolCall(\n\t\t\t\t{\n\t\t\t\t\tassistantMessage,\n\t\t\t\t\ttoolCall,\n\t\t\t\t\targs: validatedArgs,\n\t\t\t\t\tcontext: currentContext,\n\t\t\t\t},\n\t\t\t\tsignal,\n\t\t\t);\n\t\t\tif (beforeResult?.block) {\n\t\t\t\treturn {\n\t\t\t\t\tkind: \"immediate\",\n\t\t\t\t\tresult: createErrorToolResult(beforeResult.reason || \"Tool execution was blocked\"),\n\t\t\t\t\tisError: true,\n\t\t\t\t};\n\t\t\t}\n\t\t}\n\t\treturn {\n\t\t\tkind: \"prepared\",\n\t\t\ttoolCall,\n\t\t\ttool,\n\t\t\targs: validatedArgs,\n\t\t};\n\t} catch (error) {\n\t\treturn {\n\t\t\tkind: \"immediate\",\n\t\t\tresult: createErrorToolResult(error instanceof Error ? error.message : String(error)),\n\t\t\tisError: true,\n\t\t};\n\t}\n}\n\nasync function executePreparedToolCall(\n\tprepared: PreparedToolCall,\n\tsignal: AbortSignal | undefined,\n\temit: AgentEventSink,\n): Promise<ExecutedToolCallOutcome> {\n\tconst updateEvents: Promise<void>[] = [];\n\n\ttry {\n\t\tconst result = await prepared.tool.execute(\n\t\t\tprepared.toolCall.id,\n\t\t\tprepared.args as never,\n\t\t\tsignal,\n\t\t\t(partialResult) => {\n\t\t\t\tupdateEvents.push(\n\t\t\t\t\tPromise.resolve(\n\t\t\t\t\t\temit({\n\t\t\t\t\t\t\ttype: \"tool_execution_update\",\n\t\t\t\t\t\t\ttoolCallId: prepared.toolCall.id,\n\t\t\t\t\t\t\ttoolName: prepared.toolCall.name,\n\t\t\t\t\t\t\targs: prepared.toolCall.arguments,\n\t\t\t\t\t\t\tpartialResult,\n\t\t\t\t\t\t}),\n\t\t\t\t\t),\n\t\t\t\t);\n\t\t\t},\n\t\t);\n\t\tawait Promise.all(updateEvents);\n\t\treturn { result, isError: false };\n\t} catch (error) {\n\t\tawait Promise.all(updateEvents);\n\t\treturn {\n\t\t\tresult: createErrorToolResult(error instanceof Error ? error.message : String(error)),\n\t\t\tisError: true,\n\t\t};\n\t}\n}\n\nasync function finalizeExecutedToolCall(\n\tcurrentContext: AgentContext,\n\tassistantMessage: AssistantMessage,\n\tprepared: PreparedToolCall,\n\texecuted: ExecutedToolCallOutcome,\n\tconfig: AgentLoopConfig,\n\tsignal: AbortSignal | undefined,\n\temit: AgentEventSink,\n): Promise<ToolResultMessage> {\n\tlet result = executed.result;\n\tlet isError = executed.isError;\n\n\tif (config.afterToolCall) {\n\t\tconst afterResult = await config.afterToolCall(\n\t\t\t{\n\t\t\t\tassistantMessage,\n\t\t\t\ttoolCall: prepared.toolCall,\n\t\t\t\targs: prepared.args,\n\t\t\t\tresult,\n\t\t\t\tisError,\n\t\t\t\tcontext: currentContext,\n\t\t\t},\n\t\t\tsignal,\n\t\t);\n\t\tif (afterResult) {\n\t\t\tresult = {\n\t\t\t\tcontent: afterResult.content ?? result.content,\n\t\t\t\tdetails: afterResult.details ?? result.details,\n\t\t\t};\n\t\t\tisError = afterResult.isError ?? isError;\n\t\t}\n\t}\n\n\treturn await emitToolCallOutcome(prepared.toolCall, result, isError, emit);\n}\n\nfunction createErrorToolResult(message: string): AgentToolResult<any> {\n\treturn {\n\t\tcontent: [{ type: \"text\", text: message }],\n\t\tdetails: {},\n\t};\n}\n\nasync function emitToolCallOutcome(\n\ttoolCall: AgentToolCall,\n\tresult: AgentToolResult<any>,\n\tisError: boolean,\n\temit: AgentEventSink,\n): Promise<ToolResultMessage> {\n\tawait emit({\n\t\ttype: \"tool_execution_end\",\n\t\ttoolCallId: toolCall.id,\n\t\ttoolName: toolCall.name,\n\t\tresult,\n\t\tisError,\n\t});\n\n\tconst toolResultMessage: ToolResultMessage = {\n\t\trole: \"toolResult\",\n\t\ttoolCallId: toolCall.id,\n\t\ttoolName: toolCall.name,\n\t\tcontent: result.content,\n\t\tdetails: result.details,\n\t\tisError,\n\t\ttimestamp: Date.now(),\n\t};\n\n\tawait emit({ type: \"message_start\", message: toolResultMessage });\n\tawait emit({ type: \"message_end\", message: toolResultMessage });\n\treturn toolResultMessage;\n}\n"]}
|