@boxiaolanya2008/pi-agent-core 0.60.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md ADDED
@@ -0,0 +1,412 @@
1
+ # @mariozechner/pi-agent-core
2
+
3
+ Stateful agent with tool execution and event streaming. Built on `@mariozechner/pi-ai`.
4
+
5
+ ## Installation
6
+
7
+ ```bash
8
+ npm install @mariozechner/pi-agent-core
9
+ ```
10
+
11
+ ## Quick Start
12
+
13
+ ```typescript
14
+ import { Agent } from "@mariozechner/pi-agent-core";
15
+ import { getModel } from "@mariozechner/pi-ai";
16
+
17
+ const agent = new Agent({
18
+ initialState: {
19
+ systemPrompt: "You are a helpful assistant.",
20
+ model: getModel("anthropic", "claude-sonnet-4-20250514"),
21
+ },
22
+ });
23
+
24
+ agent.subscribe((event) => {
25
+ if (event.type === "message_update" && event.assistantMessageEvent.type === "text_delta") {
26
+ // Stream just the new text chunk
27
+ process.stdout.write(event.assistantMessageEvent.delta);
28
+ }
29
+ });
30
+
31
+ await agent.prompt("Hello!");
32
+ ```
33
+
34
+ ## Core Concepts
35
+
36
+ ### AgentMessage vs LLM Message
37
+
38
+ The agent works with `AgentMessage`, a flexible type that can include:
39
+ - Standard LLM messages (`user`, `assistant`, `toolResult`)
40
+ - Custom app-specific message types via declaration merging
41
+
42
+ LLMs only understand `user`, `assistant`, and `toolResult`. The `convertToLlm` function bridges this gap by filtering and transforming messages before each LLM call.
43
+
44
+ ### Message Flow
45
+
46
+ ```
47
+ AgentMessage[] → transformContext() → AgentMessage[] → convertToLlm() → Message[] → LLM
48
+ (optional) (required)
49
+ ```
50
+
51
+ 1. **transformContext**: Prune old messages, inject external context
52
+ 2. **convertToLlm**: Filter out UI-only messages, convert custom types to LLM format
53
+
54
+ ## Event Flow
55
+
56
+ The agent emits events for UI updates. Understanding the event sequence helps build responsive interfaces.
57
+
58
+ ### prompt() Event Sequence
59
+
60
+ When you call `prompt("Hello")`:
61
+
62
+ ```
63
+ prompt("Hello")
64
+ ├─ agent_start
65
+ ├─ turn_start
66
+ ├─ message_start { message: userMessage } // Your prompt
67
+ ├─ message_end { message: userMessage }
68
+ ├─ message_start { message: assistantMessage } // LLM starts responding
69
+ ├─ message_update { message: partial... } // Streaming chunks
70
+ ├─ message_update { message: partial... }
71
+ ├─ message_end { message: assistantMessage } // Complete response
72
+ ├─ turn_end { message, toolResults: [] }
73
+ └─ agent_end { messages: [...] }
74
+ ```
75
+
76
+ ### With Tool Calls
77
+
78
+ If the assistant calls tools, the loop continues:
79
+
80
+ ```
81
+ prompt("Read config.json")
82
+ ├─ agent_start
83
+ ├─ turn_start
84
+ ├─ message_start/end { userMessage }
85
+ ├─ message_start { assistantMessage with toolCall }
86
+ ├─ message_update...
87
+ ├─ message_end { assistantMessage }
88
+ ├─ tool_execution_start { toolCallId, toolName, args }
89
+ ├─ tool_execution_update { partialResult } // If tool streams
90
+ ├─ tool_execution_end { toolCallId, result }
91
+ ├─ message_start/end { toolResultMessage }
92
+ ├─ turn_end { message, toolResults: [toolResult] }
93
+
94
+ ├─ turn_start // Next turn
95
+ ├─ message_start { assistantMessage } // LLM responds to tool result
96
+ ├─ message_update...
97
+ ├─ message_end
98
+ ├─ turn_end
99
+ └─ agent_end
100
+ ```
101
+
102
+ ### continue() Event Sequence
103
+
104
+ `continue()` resumes from existing context without adding a new message. Use it for retries after errors.
105
+
106
+ ```typescript
107
+ // After an error, retry from current state
108
+ await agent.continue();
109
+ ```
110
+
111
+ The last message in context must be `user` or `toolResult` (not `assistant`).
112
+
113
+ ### Event Types
114
+
115
+ | Event | Description |
116
+ |-------|-------------|
117
+ | `agent_start` | Agent begins processing |
118
+ | `agent_end` | Agent completes with all new messages |
119
+ | `turn_start` | New turn begins (one LLM call + tool executions) |
120
+ | `turn_end` | Turn completes with assistant message and tool results |
121
+ | `message_start` | Any message begins (user, assistant, toolResult) |
122
+ | `message_update` | **Assistant only.** Includes `assistantMessageEvent` with delta |
123
+ | `message_end` | Message completes |
124
+ | `tool_execution_start` | Tool begins |
125
+ | `tool_execution_update` | Tool streams progress |
126
+ | `tool_execution_end` | Tool completes |
127
+
128
+ ## Agent Options
129
+
130
+ ```typescript
131
+ const agent = new Agent({
132
+ // Initial state
133
+ initialState: {
134
+ systemPrompt: string,
135
+ model: Model<any>,
136
+ thinkingLevel: "off" | "minimal" | "low" | "medium" | "high" | "xhigh",
137
+ tools: AgentTool<any>[],
138
+ messages: AgentMessage[],
139
+ },
140
+
141
+ // Convert AgentMessage[] to LLM Message[] (required for custom message types)
142
+ convertToLlm: (messages) => messages.filter(...),
143
+
144
+ // Transform context before convertToLlm (for pruning, compaction)
145
+ transformContext: async (messages, signal) => pruneOldMessages(messages),
146
+
147
+ // Steering mode: "one-at-a-time" (default) or "all"
148
+ steeringMode: "one-at-a-time",
149
+
150
+ // Follow-up mode: "one-at-a-time" (default) or "all"
151
+ followUpMode: "one-at-a-time",
152
+
153
+ // Custom stream function (for proxy backends)
154
+ streamFn: streamProxy,
155
+
156
+ // Session ID for provider caching
157
+ sessionId: "session-123",
158
+
159
+ // Dynamic API key resolution (for expiring OAuth tokens)
160
+ getApiKey: async (provider) => refreshToken(),
161
+
162
+ // Custom thinking budgets for token-based providers
163
+ thinkingBudgets: {
164
+ minimal: 128,
165
+ low: 512,
166
+ medium: 1024,
167
+ high: 2048,
168
+ },
169
+ });
170
+ ```
171
+
172
+ ## Agent State
173
+
174
+ ```typescript
175
+ interface AgentState {
176
+ systemPrompt: string;
177
+ model: Model<any>;
178
+ thinkingLevel: ThinkingLevel;
179
+ tools: AgentTool<any>[];
180
+ messages: AgentMessage[];
181
+ isStreaming: boolean;
182
+ streamMessage: AgentMessage | null; // Current partial during streaming
183
+ pendingToolCalls: Set<string>;
184
+ error?: string;
185
+ }
186
+ ```
187
+
188
+ Access via `agent.state`. During streaming, `streamMessage` contains the partial assistant message.
189
+
190
+ ## Methods
191
+
192
+ ### Prompting
193
+
194
+ ```typescript
195
+ // Text prompt
196
+ await agent.prompt("Hello");
197
+
198
+ // With images
199
+ await agent.prompt("What's in this image?", [
200
+ { type: "image", data: base64Data, mimeType: "image/jpeg" }
201
+ ]);
202
+
203
+ // AgentMessage directly
204
+ await agent.prompt({ role: "user", content: "Hello", timestamp: Date.now() });
205
+
206
+ // Continue from current context (last message must be user or toolResult)
207
+ await agent.continue();
208
+ ```
209
+
210
+ ### State Management
211
+
212
+ ```typescript
213
+ agent.setSystemPrompt("New prompt");
214
+ agent.setModel(getModel("openai", "gpt-4o"));
215
+ agent.setThinkingLevel("medium");
216
+ agent.setTools([myTool]);
217
+ agent.replaceMessages(newMessages);
218
+ agent.appendMessage(message);
219
+ agent.clearMessages();
220
+ agent.reset(); // Clear everything
221
+ ```
222
+
223
+ ### Session and Thinking Budgets
224
+
225
+ ```typescript
226
+ agent.sessionId = "session-123";
227
+
228
+ agent.thinkingBudgets = {
229
+ minimal: 128,
230
+ low: 512,
231
+ medium: 1024,
232
+ high: 2048,
233
+ };
234
+ ```
235
+
236
+ ### Control
237
+
238
+ ```typescript
239
+ agent.abort(); // Cancel current operation
240
+ await agent.waitForIdle(); // Wait for completion
241
+ ```
242
+
243
+ ### Events
244
+
245
+ ```typescript
246
+ const unsubscribe = agent.subscribe((event) => {
247
+ console.log(event.type);
248
+ });
249
+ unsubscribe();
250
+ ```
251
+
252
+ ## Steering and Follow-up
253
+
254
+ Steering messages let you interrupt the agent while tools are running. Follow-up messages let you queue work after the agent would otherwise stop.
255
+
256
+ ```typescript
257
+ agent.setSteeringMode("one-at-a-time");
258
+ agent.setFollowUpMode("one-at-a-time");
259
+
260
+ // While agent is running tools
261
+ agent.steer({
262
+ role: "user",
263
+ content: "Stop! Do this instead.",
264
+ timestamp: Date.now(),
265
+ });
266
+
267
+ // After the agent finishes its current work
268
+ agent.followUp({
269
+ role: "user",
270
+ content: "Also summarize the result.",
271
+ timestamp: Date.now(),
272
+ });
273
+
274
+ const steeringMode = agent.getSteeringMode();
275
+ const followUpMode = agent.getFollowUpMode();
276
+
277
+ agent.clearSteeringQueue();
278
+ agent.clearFollowUpQueue();
279
+ agent.clearAllQueues();
280
+ ```
281
+
282
+ Use clearSteeringQueue, clearFollowUpQueue, or clearAllQueues to drop queued messages.
283
+
284
+ When steering messages are detected after a tool completes:
285
+ 1. Remaining tools are skipped with error results
286
+ 2. Steering messages are injected
287
+ 3. LLM responds to the interruption
288
+
289
+ Follow-up messages are checked only when there are no more tool calls and no steering messages. If any are queued, they are injected and another turn runs.
290
+
291
+ ## Custom Message Types
292
+
293
+ Extend `AgentMessage` via declaration merging:
294
+
295
+ ```typescript
296
+ declare module "@mariozechner/pi-agent-core" {
297
+ interface CustomAgentMessages {
298
+ notification: { role: "notification"; text: string; timestamp: number };
299
+ }
300
+ }
301
+
302
+ // Now valid
303
+ const msg: AgentMessage = { role: "notification", text: "Info", timestamp: Date.now() };
304
+ ```
305
+
306
+ Handle custom types in `convertToLlm`:
307
+
308
+ ```typescript
309
+ const agent = new Agent({
310
+ convertToLlm: (messages) => messages.flatMap(m => {
311
+ if (m.role === "notification") return []; // Filter out
312
+ return [m];
313
+ }),
314
+ });
315
+ ```
316
+
317
+ ## Tools
318
+
319
+ Define tools using `AgentTool`:
320
+
321
+ ```typescript
322
+ import { Type } from "@sinclair/typebox";
323
+
324
+ const readFileTool: AgentTool = {
325
+ name: "read_file",
326
+ label: "Read File", // For UI display
327
+ description: "Read a file's contents",
328
+ parameters: Type.Object({
329
+ path: Type.String({ description: "File path" }),
330
+ }),
331
+ execute: async (toolCallId, params, signal, onUpdate) => {
332
+ const content = await fs.readFile(params.path, "utf-8");
333
+
334
+ // Optional: stream progress
335
+ onUpdate?.({ content: [{ type: "text", text: "Reading..." }], details: {} });
336
+
337
+ return {
338
+ content: [{ type: "text", text: content }],
339
+ details: { path: params.path, size: content.length },
340
+ };
341
+ },
342
+ };
343
+
344
+ agent.setTools([readFileTool]);
345
+ ```
346
+
347
+ ### Error Handling
348
+
349
+ **Throw an error** when a tool fails. Do not return error messages as content.
350
+
351
+ ```typescript
352
+ execute: async (toolCallId, params, signal, onUpdate) => {
353
+ if (!fs.existsSync(params.path)) {
354
+ throw new Error(`File not found: ${params.path}`);
355
+ }
356
+ // Return content only on success
357
+ return { content: [{ type: "text", text: "..." }] };
358
+ }
359
+ ```
360
+
361
+ Thrown errors are caught by the agent and reported to the LLM as tool errors with `isError: true`.
362
+
363
+ ## Proxy Usage
364
+
365
+ For browser apps that proxy through a backend:
366
+
367
+ ```typescript
368
+ import { Agent, streamProxy } from "@mariozechner/pi-agent-core";
369
+
370
+ const agent = new Agent({
371
+ streamFn: (model, context, options) =>
372
+ streamProxy(model, context, {
373
+ ...options,
374
+ authToken: "...",
375
+ proxyUrl: "https://your-server.com",
376
+ }),
377
+ });
378
+ ```
379
+
380
+ ## Low-Level API
381
+
382
+ For direct control without the Agent class:
383
+
384
+ ```typescript
385
+ import { agentLoop, agentLoopContinue } from "@mariozechner/pi-agent-core";
386
+
387
+ const context: AgentContext = {
388
+ systemPrompt: "You are helpful.",
389
+ messages: [],
390
+ tools: [],
391
+ };
392
+
393
+ const config: AgentLoopConfig = {
394
+ model: getModel("openai", "gpt-4o"),
395
+ convertToLlm: (msgs) => msgs.filter(m => ["user", "assistant", "toolResult"].includes(m.role)),
396
+ };
397
+
398
+ const userMessage = { role: "user", content: "Hello", timestamp: Date.now() };
399
+
400
+ for await (const event of agentLoop([userMessage], context, config)) {
401
+ console.log(event.type);
402
+ }
403
+
404
+ // Continue from existing context
405
+ for await (const event of agentLoopContinue(context, config)) {
406
+ console.log(event.type);
407
+ }
408
+ ```
409
+
410
+ ## License
411
+
412
+ MIT
@@ -0,0 +1,5 @@
1
+ import { EventStream } from "@boxiaolanya2008/pi-ai";
2
+ import type { AgentContext, AgentEvent, AgentLoopConfig, AgentMessage, StreamFn } from "./types.js";
3
+ export declare function agentLoop(prompts: AgentMessage[], context: AgentContext, config: AgentLoopConfig, signal?: AbortSignal, streamFn?: StreamFn): EventStream<AgentEvent, AgentMessage[]>;
4
+ export declare function agentLoopContinue(context: AgentContext, config: AgentLoopConfig, signal?: AbortSignal, streamFn?: StreamFn): EventStream<AgentEvent, AgentMessage[]>;
5
+ //# sourceMappingURL=agent-loop.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"agent-loop.d.ts","sourceRoot":"","sources":["../src/agent-loop.ts"],"names":[],"mappings":"AAAA,OAAO,EAGN,WAAW,EAIX,MAAM,wBAAwB,CAAC;AAChC,OAAO,KAAK,EACX,YAAY,EACZ,UAAU,EACV,eAAe,EACf,YAAY,EAGZ,QAAQ,EACR,MAAM,YAAY,CAAC;AAIpB,wBAAgB,SAAS,CACxB,OAAO,EAAE,YAAY,EAAE,EACvB,OAAO,EAAE,YAAY,EACrB,MAAM,EAAE,eAAe,EACvB,MAAM,CAAC,EAAE,WAAW,EACpB,QAAQ,CAAC,EAAE,QAAQ,GACjB,WAAW,CAAC,UAAU,EAAE,YAAY,EAAE,CAAC,CAiBzC;AACD,wBAAgB,iBAAiB,CAChC,OAAO,EAAE,YAAY,EACrB,MAAM,EAAE,eAAe,EACvB,MAAM,CAAC,EAAE,WAAW,EACpB,QAAQ,CAAC,EAAE,QAAQ,GACjB,WAAW,CAAC,UAAU,EAAE,YAAY,EAAE,CAAC,CAgBzC","sourcesContent":["import {\n\ttype AssistantMessage,\n\ttype Context,\n\tEventStream,\n\tstreamSimple,\n\ttype ToolResultMessage,\n\tvalidateToolArguments,\n} from \"@boxiaolanya2008/pi-ai\";\nimport type {\n\tAgentContext,\n\tAgentEvent,\n\tAgentLoopConfig,\n\tAgentMessage,\n\tAgentTool,\n\tAgentToolResult,\n\tStreamFn,\n} from \"./types.js\";\n\nconst PARALLEL_TOOL_EXECUTION = true;\nconst MAX_TOOL_CONCURRENCY = 8;\nexport function agentLoop(\n\tprompts: AgentMessage[],\n\tcontext: AgentContext,\n\tconfig: AgentLoopConfig,\n\tsignal?: AbortSignal,\n\tstreamFn?: StreamFn,\n): EventStream<AgentEvent, AgentMessage[]> {\n\tconst stream = createAgentStream();\n\t(async () => {\n\t\tconst newMessages: AgentMessage[] = [...prompts];\n\t\tconst currentContext: AgentContext = {\n\t\t\t...context,\n\t\t\tmessages: [...context.messages, ...prompts],\n\t\t};\n\t\tstream.push({ type: \"agent_start\" });\n\t\tstream.push({ type: \"turn_start\" });\n\t\tfor (const prompt of prompts) {\n\t\t\tstream.push({ type: \"message_start\", message: prompt });\n\t\t\tstream.push({ type: \"message_end\", message: prompt });\n\t\t}\n\t\tawait runLoop(currentContext, newMessages, config, signal, stream, streamFn);\n\t})();\n\treturn stream;\n}\nexport function agentLoopContinue(\n\tcontext: AgentContext,\n\tconfig: AgentLoopConfig,\n\tsignal?: AbortSignal,\n\tstreamFn?: StreamFn,\n): EventStream<AgentEvent, AgentMessage[]> {\n\tif (context.messages.length === 0) {\n\t\tthrow new Error(\"Cannot continue: no messages in context\");\n\t}\n\tif (context.messages[context.messages.length - 1].role === \"assistant\") {\n\t\tthrow new Error(\"Cannot continue from message role: assistant\");\n\t}\n\tconst stream = createAgentStream();\n\t(async () => {\n\t\tconst newMessages: AgentMessage[] = [];\n\t\tconst currentContext: AgentContext = { ...context };\n\t\tstream.push({ type: \"agent_start\" });\n\t\tstream.push({ type: \"turn_start\" });\n\t\tawait runLoop(currentContext, newMessages, config, signal, stream, streamFn);\n\t})();\n\treturn stream;\n}\nfunction createAgentStream(): EventStream<AgentEvent, AgentMessage[]> {\n\treturn new EventStream<AgentEvent, AgentMessage[]>(\n\t\t(event: AgentEvent) => event.type === \"agent_end\",\n\t\t(event: AgentEvent) => (event.type === \"agent_end\" ? event.messages : []),\n\t);\n}\nasync function runLoop(\n\tcurrentContext: AgentContext,\n\tnewMessages: AgentMessage[],\n\tconfig: AgentLoopConfig,\n\tsignal: AbortSignal | undefined,\n\tstream: EventStream<AgentEvent, AgentMessage[]>,\n\tstreamFn?: StreamFn,\n): Promise<void> {\n\tlet firstTurn = true;\n\tlet pendingMessages: AgentMessage[] = (await config.getSteeringMessages?.()) || [];\n\twhile (true) {\n\t\tlet hasMoreToolCalls = true;\n\t\tlet steeringAfterTools: AgentMessage[] | null = null;\n\t\twhile (hasMoreToolCalls || pendingMessages.length > 0) {\n\t\t\tif (!firstTurn) {\n\t\t\t\tstream.push({ type: \"turn_start\" });\n\t\t\t} else {\n\t\t\t\tfirstTurn = false;\n\t\t\t}\n\t\t\tif (pendingMessages.length > 0) {\n\t\t\t\tfor (const message of pendingMessages) {\n\t\t\t\t\tstream.push({ type: \"message_start\", message });\n\t\t\t\t\tstream.push({ type: \"message_end\", message });\n\t\t\t\t\tcurrentContext.messages.push(message);\n\t\t\t\t\tnewMessages.push(message);\n\t\t\t\t}\n\t\t\t\tpendingMessages = [];\n\t\t\t}\n\t\t\tconst message = await streamAssistantResponse(currentContext, config, signal, stream, streamFn);\n\t\t\tnewMessages.push(message);\n\t\t\tif (message.stopReason === \"error\" || message.stopReason === \"aborted\") {\n\t\t\t\tstream.push({ type: \"turn_end\", message, toolResults: [] });\n\t\t\t\tstream.push({ type: \"agent_end\", messages: newMessages });\n\t\t\t\tstream.end(newMessages);\n\t\t\t\treturn;\n\t\t\t}\n\t\t\tconst toolCalls = message.content.filter((c) => c.type === \"toolCall\");\n\t\t\thasMoreToolCalls = toolCalls.length > 0;\n\t\t\tconst toolResults: ToolResultMessage[] = [];\n\t\t\tif (hasMoreToolCalls) {\n\t\t\t\tconst toolExecution = await executeToolCalls(\n\t\t\t\t\tcurrentContext.tools,\n\t\t\t\t\tmessage,\n\t\t\t\t\tsignal,\n\t\t\t\t\tstream,\n\t\t\t\t\tconfig.getSteeringMessages,\n\t\t\t\t);\n\t\t\t\ttoolResults.push(...toolExecution.toolResults);\n\t\t\t\tsteeringAfterTools = toolExecution.steeringMessages ?? null;\n\t\t\t\tfor (const result of toolResults) {\n\t\t\t\t\tcurrentContext.messages.push(result);\n\t\t\t\t\tnewMessages.push(result);\n\t\t\t\t}\n\t\t\t}\n\t\t\tstream.push({ type: \"turn_end\", message, toolResults });\n\t\t\tif (steeringAfterTools && steeringAfterTools.length > 0) {\n\t\t\t\tpendingMessages = steeringAfterTools;\n\t\t\t\tsteeringAfterTools = null;\n\t\t\t} else {\n\t\t\t\tpendingMessages = (await config.getSteeringMessages?.()) || [];\n\t\t\t}\n\t\t}\n\t\tconst followUpMessages = (await config.getFollowUpMessages?.()) || [];\n\t\tif (followUpMessages.length > 0) {\n\t\t\tpendingMessages = followUpMessages;\n\t\t\tcontinue;\n\t\t}\n\t\tbreak;\n\t}\n\tstream.push({ type: \"agent_end\", messages: newMessages });\n\tstream.end(newMessages);\n}\nasync function streamAssistantResponse(\n\tcontext: AgentContext,\n\tconfig: AgentLoopConfig,\n\tsignal: AbortSignal | undefined,\n\tstream: EventStream<AgentEvent, AgentMessage[]>,\n\tstreamFn?: StreamFn,\n): Promise<AssistantMessage> {\n\tlet messages = context.messages;\n\tif (config.transformContext) {\n\t\tmessages = await config.transformContext(messages, signal);\n\t}\n\tconst llmMessages = await config.convertToLlm(messages);\n\tconst llmContext: Context = {\n\t\tsystemPrompt: context.systemPrompt,\n\t\tmessages: llmMessages,\n\t\ttools: context.tools,\n\t};\n\tconst streamFunction = streamFn || streamSimple;\n\tconst resolvedApiKey =\n\t\t(config.getApiKey ? await config.getApiKey(config.model.provider) : undefined) || config.apiKey;\n\tconst response = await streamFunction(config.model, llmContext, {\n\t\t...config,\n\t\tapiKey: resolvedApiKey,\n\t\tsignal,\n\t});\n\tlet partialMessage: AssistantMessage | null = null;\n\tlet addedPartial = false;\n\tfor await (const event of response) {\n\t\tswitch (event.type) {\n\t\t\tcase \"start\":\n\t\t\t\tpartialMessage = event.partial;\n\t\t\t\tcontext.messages.push(partialMessage);\n\t\t\t\taddedPartial = true;\n\t\t\t\tstream.push({ type: \"message_start\", message: { ...partialMessage } });\n\t\t\t\tbreak;\n\t\t\tcase \"text_start\":\n\t\t\tcase \"text_delta\":\n\t\t\tcase \"text_end\":\n\t\t\tcase \"thinking_start\":\n\t\t\tcase \"thinking_delta\":\n\t\t\tcase \"thinking_end\":\n\t\t\tcase \"toolcall_start\":\n\t\t\tcase \"toolcall_delta\":\n\t\t\tcase \"toolcall_end\":\n\t\t\t\tif (partialMessage) {\n\t\t\t\t\tpartialMessage = event.partial;\n\t\t\t\t\tcontext.messages[context.messages.length - 1] = partialMessage;\n\t\t\t\t\tstream.push({\n\t\t\t\t\t\ttype: \"message_update\",\n\t\t\t\t\t\tassistantMessageEvent: event,\n\t\t\t\t\t\tmessage: { ...partialMessage },\n\t\t\t\t\t});\n\t\t\t\t}\n\t\t\t\tbreak;\n\t\t\tcase \"done\":\n\t\t\tcase \"error\": {\n\t\t\t\tconst finalMessage = await response.result();\n\t\t\t\tif (addedPartial) {\n\t\t\t\t\tcontext.messages[context.messages.length - 1] = finalMessage;\n\t\t\t\t} else {\n\t\t\t\t\tcontext.messages.push(finalMessage);\n\t\t\t\t}\n\t\t\t\tif (!addedPartial) {\n\t\t\t\t\tstream.push({ type: \"message_start\", message: { ...finalMessage } });\n\t\t\t\t}\n\t\t\t\tstream.push({ type: \"message_end\", message: finalMessage });\n\t\t\t\treturn finalMessage;\n\t\t\t}\n\t\t}\n\t}\n\treturn await response.result();\n}\nasync function executeToolCalls(\n\ttools: AgentTool<any>[] | undefined,\n\tassistantMessage: AssistantMessage,\n\tsignal: AbortSignal | undefined,\n\tstream: EventStream<AgentEvent, AgentMessage[]>,\n\tgetSteeringMessages?: AgentLoopConfig[\"getSteeringMessages\"],\n): Promise<{ toolResults: ToolResultMessage[]; steeringMessages?: AgentMessage[] }> {\n\tconst toolCalls = assistantMessage.content.filter((c) => c.type === \"toolCall\");\n\tconst results: ToolResultMessage[] = [];\n\tlet steeringMessages: AgentMessage[] | undefined;\n\tif (PARALLEL_TOOL_EXECUTION && toolCalls.length > 1) {\n\t\treturn executeToolCallsParallel(tools, toolCalls, signal, stream, getSteeringMessages);\n\t}\n\tfor (let index = 0; index < toolCalls.length; index++) {\n\t\tconst toolCall = toolCalls[index];\n\t\tconst result = await executeSingleToolCall(tools, toolCall, signal, stream);\n\t\tresults.push(result);\n\t\tif (getSteeringMessages) {\n\t\t\tconst steering = await getSteeringMessages();\n\t\t\tif (steering.length > 0) {\n\t\t\t\tsteeringMessages = steering;\n\t\t\t\tconst remainingCalls = toolCalls.slice(index + 1);\n\t\t\t\tfor (const skipped of remainingCalls) {\n\t\t\t\t\tresults.push(skipToolCall(skipped, stream));\n\t\t\t\t}\n\t\t\t\tbreak;\n\t\t\t}\n\t\t}\n\t}\n\treturn { toolResults: results, steeringMessages };\n}\nasync function executeSingleToolCall(\n\ttools: AgentTool<any>[] | undefined,\n\ttoolCall: Extract<AssistantMessage[\"content\"][number], { type: \"toolCall\" }>,\n\tsignal: AbortSignal | undefined,\n\tstream: EventStream<AgentEvent, AgentMessage[]>,\n): Promise<ToolResultMessage> {\n\tconst tool = tools?.find((t) => t.name === toolCall.name);\n\tstream.push({\n\t\ttype: \"tool_execution_start\",\n\t\ttoolCallId: toolCall.id,\n\t\ttoolName: toolCall.name,\n\t\targs: toolCall.arguments,\n\t});\n\tlet result: AgentToolResult<any>;\n\tlet isError = false;\n\ttry {\n\t\tif (!tool) throw new Error(`Tool ${toolCall.name} not found`);\n\t\tconst validatedArgs = validateToolArguments(tool, toolCall);\n\t\tresult = await tool.execute(toolCall.id, validatedArgs, signal, (partialResult) => {\n\t\t\tstream.push({\n\t\t\t\ttype: \"tool_execution_update\",\n\t\t\t\ttoolCallId: toolCall.id,\n\t\t\t\ttoolName: toolCall.name,\n\t\t\t\targs: toolCall.arguments,\n\t\t\t\tpartialResult,\n\t\t\t});\n\t\t});\n\t} catch (e) {\n\t\tresult = {\n\t\t\tcontent: [{ type: \"text\", text: e instanceof Error ? e.message : String(e) }],\n\t\t\tdetails: {},\n\t\t};\n\t\tisError = true;\n\t}\n\tstream.push({\n\t\ttype: \"tool_execution_end\",\n\t\ttoolCallId: toolCall.id,\n\t\ttoolName: toolCall.name,\n\t\tresult,\n\t\tisError,\n\t});\n\tconst toolResultMessage: ToolResultMessage = {\n\t\trole: \"toolResult\",\n\t\ttoolCallId: toolCall.id,\n\t\ttoolName: toolCall.name,\n\t\tcontent: result.content,\n\t\tdetails: result.details,\n\t\tisError,\n\t\ttimestamp: Date.now(),\n\t};\n\tstream.push({ type: \"message_start\", message: toolResultMessage });\n\tstream.push({ type: \"message_end\", message: toolResultMessage });\n\treturn toolResultMessage;\n}\nasync function executeToolCallsParallel(\n\ttools: AgentTool<any>[] | undefined,\n\ttoolCalls: Extract<AssistantMessage[\"content\"][number], { type: \"toolCall\" }>[],\n\tsignal: AbortSignal | undefined,\n\tstream: EventStream<AgentEvent, AgentMessage[]>,\n\tgetSteeringMessages?: AgentLoopConfig[\"getSteeringMessages\"],\n): Promise<{ toolResults: ToolResultMessage[]; steeringMessages?: AgentMessage[] }> {\n\tconst results: ToolResultMessage[] = [];\n\tlet steeringMessages: AgentMessage[] | undefined;\n\tfor (const toolCall of toolCalls) {\n\t\tstream.push({\n\t\t\ttype: \"tool_execution_start\",\n\t\t\ttoolCallId: toolCall.id,\n\t\t\ttoolName: toolCall.name,\n\t\t\targs: toolCall.arguments,\n\t\t});\n\t}\n\tconst executing = toolCalls.map(async (toolCall) => {\n\t\tconst tool = tools?.find((t) => t.name === toolCall.name);\n\t\tlet result: AgentToolResult<any>;\n\t\tlet isError = false;\n\t\ttry {\n\t\t\tif (!tool) throw new Error(`Tool ${toolCall.name} not found`);\n\t\t\tconst validatedArgs = validateToolArguments(tool, toolCall);\n\t\t\tresult = await tool.execute(toolCall.id, validatedArgs, signal, (partialResult) => {\n\t\t\t\tstream.push({\n\t\t\t\t\ttype: \"tool_execution_update\",\n\t\t\t\t\ttoolCallId: toolCall.id,\n\t\t\t\t\ttoolName: toolCall.name,\n\t\t\t\t\targs: toolCall.arguments,\n\t\t\t\t\tpartialResult,\n\t\t\t\t});\n\t\t\t});\n\t\t} catch (e) {\n\t\t\tresult = {\n\t\t\t\tcontent: [{ type: \"text\", text: e instanceof Error ? e.message : String(e) }],\n\t\t\t\tdetails: {},\n\t\t\t};\n\t\t\tisError = true;\n\t\t}\n\t\tstream.push({\n\t\t\ttype: \"tool_execution_end\",\n\t\t\ttoolCallId: toolCall.id,\n\t\t\ttoolName: toolCall.name,\n\t\t\tresult,\n\t\t\tisError,\n\t\t});\n\t\tconst toolResultMessage: ToolResultMessage = {\n\t\t\trole: \"toolResult\",\n\t\t\ttoolCallId: toolCall.id,\n\t\t\ttoolName: toolCall.name,\n\t\t\tcontent: result.content,\n\t\t\tdetails: result.details,\n\t\t\tisError,\n\t\t\ttimestamp: Date.now(),\n\t\t};\n\t\tstream.push({ type: \"message_start\", message: toolResultMessage });\n\t\tstream.push({ type: \"message_end\", message: toolResultMessage });\n\t\treturn toolResultMessage;\n\t});\n\tconst concurrencyLimit = Math.min(MAX_TOOL_CONCURRENCY, toolCalls.length);\n\tconst chunks: Promise<ToolResultMessage>[][] = [];\n\tfor (let i = 0; i < executing.length; i += concurrencyLimit) {\n\t\tchunks.push(executing.slice(i, i + concurrencyLimit));\n\t}\n\tfor (const chunk of chunks) {\n\t\tconst chunkResults = await Promise.all(chunk);\n\t\tresults.push(...chunkResults);\n\t\tif (getSteeringMessages) {\n\t\t\tconst steering = await getSteeringMessages();\n\t\t\tif (steering.length > 0) {\n\t\t\t\tsteeringMessages = steering;\n\t\t\t\tbreak;\n\t\t\t}\n\t\t}\n\t}\n\treturn { toolResults: results, steeringMessages };\n}\nfunction skipToolCall(\n\ttoolCall: Extract<AssistantMessage[\"content\"][number], { type: \"toolCall\" }>,\n\tstream: EventStream<AgentEvent, AgentMessage[]>,\n): ToolResultMessage {\n\tconst result: AgentToolResult<any> = {\n\t\tcontent: [{ type: \"text\", text: \"Skipped due to queued user message.\" }],\n\t\tdetails: {},\n\t};\n\tstream.push({\n\t\ttype: \"tool_execution_start\",\n\t\ttoolCallId: toolCall.id,\n\t\ttoolName: toolCall.name,\n\t\targs: toolCall.arguments,\n\t});\n\tstream.push({\n\t\ttype: \"tool_execution_end\",\n\t\ttoolCallId: toolCall.id,\n\t\ttoolName: toolCall.name,\n\t\tresult,\n\t\tisError: true,\n\t});\n\tconst toolResultMessage: ToolResultMessage = {\n\t\trole: \"toolResult\",\n\t\ttoolCallId: toolCall.id,\n\t\ttoolName: toolCall.name,\n\t\tcontent: result.content,\n\t\tdetails: {},\n\t\tisError: true,\n\t\ttimestamp: Date.now(),\n\t};\n\tstream.push({ type: \"message_start\", message: toolResultMessage });\n\tstream.push({ type: \"message_end\", message: toolResultMessage });\n\treturn toolResultMessage;\n}\n"]}