@mariozechner/pi-agent-core 0.30.1 → 0.31.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (41) hide show
  1. package/README.md +297 -126
  2. package/dist/agent-loop.d.ts +21 -0
  3. package/dist/agent-loop.d.ts.map +1 -0
  4. package/dist/agent-loop.js +294 -0
  5. package/dist/agent-loop.js.map +1 -0
  6. package/dist/agent.d.ts +43 -29
  7. package/dist/agent.d.ts.map +1 -1
  8. package/dist/agent.js +83 -148
  9. package/dist/agent.js.map +1 -1
  10. package/dist/index.d.ts +4 -3
  11. package/dist/index.d.ts.map +1 -1
  12. package/dist/index.js +7 -3
  13. package/dist/index.js.map +1 -1
  14. package/dist/proxy.d.ts +85 -0
  15. package/dist/proxy.d.ts.map +1 -0
  16. package/dist/proxy.js +269 -0
  17. package/dist/proxy.js.map +1 -0
  18. package/dist/types.d.ts +88 -29
  19. package/dist/types.d.ts.map +1 -1
  20. package/dist/types.js.map +1 -1
  21. package/package.json +3 -3
  22. package/dist/transports/AppTransport.d.ts +0 -28
  23. package/dist/transports/AppTransport.d.ts.map +0 -1
  24. package/dist/transports/AppTransport.js +0 -330
  25. package/dist/transports/AppTransport.js.map +0 -1
  26. package/dist/transports/ProviderTransport.d.ts +0 -29
  27. package/dist/transports/ProviderTransport.d.ts.map +0 -1
  28. package/dist/transports/ProviderTransport.js +0 -54
  29. package/dist/transports/ProviderTransport.js.map +0 -1
  30. package/dist/transports/index.d.ts +0 -5
  31. package/dist/transports/index.d.ts.map +0 -1
  32. package/dist/transports/index.js +0 -3
  33. package/dist/transports/index.js.map +0 -1
  34. package/dist/transports/proxy-types.d.ts +0 -53
  35. package/dist/transports/proxy-types.d.ts.map +0 -1
  36. package/dist/transports/proxy-types.js +0 -2
  37. package/dist/transports/proxy-types.js.map +0 -1
  38. package/dist/transports/types.d.ts +0 -25
  39. package/dist/transports/types.d.ts.map +0 -1
  40. package/dist/transports/types.js +0 -2
  41. package/dist/transports/types.js.map +0 -1
package/README.md CHANGED
@@ -1,194 +1,365 @@
1
- # @mariozechner/pi-agent-core
1
+ # @mariozechner/pi-agent
2
2
 
3
- Stateful agent abstraction with transport layer for LLM interactions. Provides a reactive `Agent` class that manages conversation state, emits granular events, and supports pluggable transports for different deployment scenarios.
3
+ Stateful agent with tool execution and event streaming. Built on `@mariozechner/pi-ai`.
4
4
 
5
5
  ## Installation
6
6
 
7
7
  ```bash
8
- npm install @mariozechner/pi-agent-core
8
+ npm install @mariozechner/pi-agent
9
9
  ```
10
10
 
11
11
  ## Quick Start
12
12
 
13
13
  ```typescript
14
- import { Agent, ProviderTransport } from '@mariozechner/pi-agent-core';
15
- import { getModel } from '@mariozechner/pi-ai';
14
+ import { Agent } from "@mariozechner/pi-agent";
15
+ import { getModel } from "@mariozechner/pi-ai";
16
16
 
17
- // Create agent with direct provider transport
18
17
  const agent = new Agent({
19
- transport: new ProviderTransport(),
20
18
  initialState: {
21
- systemPrompt: 'You are a helpful assistant.',
22
- model: getModel('anthropic', 'claude-sonnet-4-20250514'),
23
- thinkingLevel: 'medium',
24
- tools: []
25
- }
19
+ systemPrompt: "You are a helpful assistant.",
20
+ model: getModel("anthropic", "claude-sonnet-4-20250514"),
21
+ },
26
22
  });
27
23
 
28
- // Subscribe to events for reactive UI updates
29
24
  agent.subscribe((event) => {
30
- switch (event.type) {
31
- case 'message_update':
32
- // Stream text to UI
33
- const content = event.message.content;
34
- for (const block of content) {
35
- if (block.type === 'text') console.log(block.text);
36
- }
37
- break;
38
- case 'tool_execution_start':
39
- console.log(`Calling ${event.toolName}...`);
40
- break;
41
- case 'tool_execution_update':
42
- // Stream tool output (e.g., bash stdout)
43
- console.log('Progress:', event.partialResult.content);
44
- break;
45
- case 'tool_execution_end':
46
- console.log(`Result:`, event.result.content);
47
- break;
25
+ if (event.type === "message_update" && event.assistantMessageEvent.type === "text_delta") {
26
+ // Stream just the new text chunk
27
+ process.stdout.write(event.assistantMessageEvent.delta);
48
28
  }
49
29
  });
50
30
 
51
- // Send a prompt
52
- await agent.prompt('Hello, world!');
53
-
54
- // Access conversation state
55
- console.log(agent.state.messages);
31
+ await agent.prompt("Hello!");
56
32
  ```
57
33
 
58
34
  ## Core Concepts
59
35
 
60
- ### Agent State
36
+ ### AgentMessage vs LLM Message
37
+
38
+ The agent works with `AgentMessage`, a flexible type that can include:
39
+ - Standard LLM messages (`user`, `assistant`, `toolResult`)
40
+ - Custom app-specific message types via declaration merging
41
+
42
+ LLMs only understand `user`, `assistant`, and `toolResult`. The `convertToLlm` function bridges this gap by filtering and transforming messages before each LLM call.
43
+
44
+ ### Message Flow
45
+
46
+ ```
47
+ AgentMessage[] → transformContext() → AgentMessage[] → convertToLlm() → Message[] → LLM
48
+ (optional) (required)
49
+ ```
50
+
51
+ 1. **transformContext**: Prune old messages, inject external context
52
+ 2. **convertToLlm**: Filter out UI-only messages, convert custom types to LLM format
53
+
54
+ ## Event Flow
55
+
56
+ The agent emits events for UI updates. Understanding the event sequence helps build responsive interfaces.
61
57
 
62
- The `Agent` maintains reactive state:
58
+ ### prompt() Event Sequence
59
+
60
+ When you call `prompt("Hello")`:
61
+
62
+ ```
63
+ prompt("Hello")
64
+ ├─ agent_start
65
+ ├─ turn_start
66
+ ├─ message_start { message: userMessage } // Your prompt
67
+ ├─ message_end { message: userMessage }
68
+ ├─ message_start { message: assistantMessage } // LLM starts responding
69
+ ├─ message_update { message: partial... } // Streaming chunks
70
+ ├─ message_update { message: partial... }
71
+ ├─ message_end { message: assistantMessage } // Complete response
72
+ ├─ turn_end { message, toolResults: [] }
73
+ └─ agent_end { messages: [...] }
74
+ ```
75
+
76
+ ### With Tool Calls
77
+
78
+ If the assistant calls tools, the loop continues:
79
+
80
+ ```
81
+ prompt("Read config.json")
82
+ ├─ agent_start
83
+ ├─ turn_start
84
+ ├─ message_start/end { userMessage }
85
+ ├─ message_start { assistantMessage with toolCall }
86
+ ├─ message_update...
87
+ ├─ message_end { assistantMessage }
88
+ ├─ tool_execution_start { toolCallId, toolName, args }
89
+ ├─ tool_execution_update { partialResult } // If tool streams
90
+ ├─ tool_execution_end { toolCallId, result }
91
+ ├─ message_start/end { toolResultMessage }
92
+ ├─ turn_end { message, toolResults: [toolResult] }
93
+
94
+ ├─ turn_start // Next turn
95
+ ├─ message_start { assistantMessage } // LLM responds to tool result
96
+ ├─ message_update...
97
+ ├─ message_end
98
+ ├─ turn_end
99
+ └─ agent_end
100
+ ```
101
+
102
+ ### continue() Event Sequence
103
+
104
+ `continue()` resumes from existing context without adding a new message. Use it for retries after errors.
105
+
106
+ ```typescript
107
+ // After an error, retry from current state
108
+ await agent.continue();
109
+ ```
110
+
111
+ The last message in context must be `user` or `toolResult` (not `assistant`).
112
+
113
+ ### Event Types
114
+
115
+ | Event | Description |
116
+ |-------|-------------|
117
+ | `agent_start` | Agent begins processing |
118
+ | `agent_end` | Agent completes with all new messages |
119
+ | `turn_start` | New turn begins (one LLM call + tool executions) |
120
+ | `turn_end` | Turn completes with assistant message and tool results |
121
+ | `message_start` | Any message begins (user, assistant, toolResult) |
122
+ | `message_update` | **Assistant only.** Includes `assistantMessageEvent` with delta |
123
+ | `message_end` | Message completes |
124
+ | `tool_execution_start` | Tool begins |
125
+ | `tool_execution_update` | Tool streams progress |
126
+ | `tool_execution_end` | Tool completes |
127
+
128
+ ## Agent Options
129
+
130
+ ```typescript
131
+ const agent = new Agent({
132
+ // Initial state
133
+ initialState: {
134
+ systemPrompt: string,
135
+ model: Model<any>,
136
+ thinkingLevel: "off" | "minimal" | "low" | "medium" | "high" | "xhigh",
137
+ tools: AgentTool<any>[],
138
+ messages: AgentMessage[],
139
+ },
140
+
141
+ // Convert AgentMessage[] to LLM Message[] (required for custom message types)
142
+ convertToLlm: (messages) => messages.filter(...),
143
+
144
+ // Transform context before convertToLlm (for pruning, compaction)
145
+ transformContext: async (messages, signal) => pruneOldMessages(messages),
146
+
147
+ // How to handle queued messages: "one-at-a-time" (default) or "all"
148
+ queueMode: "one-at-a-time",
149
+
150
+ // Custom stream function (for proxy backends)
151
+ streamFn: streamProxy,
152
+
153
+ // Dynamic API key resolution (for expiring OAuth tokens)
154
+ getApiKey: async (provider) => refreshToken(),
155
+ });
156
+ ```
157
+
158
+ ## Agent State
63
159
 
64
160
  ```typescript
65
161
  interface AgentState {
66
162
  systemPrompt: string;
67
163
  model: Model<any>;
68
- thinkingLevel: ThinkingLevel; // 'off' | 'minimal' | 'low' | 'medium' | 'high' | 'xhigh'
164
+ thinkingLevel: ThinkingLevel;
69
165
  tools: AgentTool<any>[];
70
- messages: AppMessage[];
166
+ messages: AgentMessage[];
71
167
  isStreaming: boolean;
72
- streamMessage: Message | null;
168
+ streamMessage: AgentMessage | null; // Current partial during streaming
73
169
  pendingToolCalls: Set<string>;
74
170
  error?: string;
75
171
  }
76
172
  ```
77
173
 
78
- ### Events
174
+ Access via `agent.state`. During streaming, `streamMessage` contains the partial assistant message.
79
175
 
80
- Events provide fine-grained lifecycle information:
176
+ ## Methods
81
177
 
82
- | Event | Description |
83
- |-------|-------------|
84
- | `agent_start` | Agent begins processing |
85
- | `agent_end` | Agent completes, contains all generated messages |
86
- | `turn_start` | New turn begins (one LLM response + tool executions) |
87
- | `turn_end` | Turn completes with assistant message and tool results |
88
- | `message_start` | Message begins (user, assistant, or toolResult) |
89
- | `message_update` | Assistant message streaming update |
90
- | `message_end` | Message completes |
91
- | `tool_execution_start` | Tool begins execution |
92
- | `tool_execution_update` | Tool streams progress (e.g., bash output) |
93
- | `tool_execution_end` | Tool completes with result |
178
+ ### Prompting
179
+
180
+ ```typescript
181
+ // Text prompt
182
+ await agent.prompt("Hello");
183
+
184
+ // With images
185
+ await agent.prompt("What's in this image?", [
186
+ { type: "image", data: base64Data, mimeType: "image/jpeg" }
187
+ ]);
94
188
 
95
- ### Transports
189
+ // AgentMessage directly
190
+ await agent.prompt({ role: "user", content: "Hello", timestamp: Date.now() });
96
191
 
97
- Transports abstract LLM communication:
192
+ // Continue from current context (last message must be user or toolResult)
193
+ await agent.continue();
194
+ ```
98
195
 
99
- - **`ProviderTransport`**: Direct API calls using `@mariozechner/pi-ai`
100
- - **`AppTransport`**: Proxy through a backend server (for browser apps)
196
+ ### State Management
101
197
 
102
198
  ```typescript
103
- // Direct provider access (Node.js)
104
- const agent = new Agent({
105
- transport: new ProviderTransport({
106
- apiKey: process.env.ANTHROPIC_API_KEY
107
- })
108
- });
199
+ agent.setSystemPrompt("New prompt");
200
+ agent.setModel(getModel("openai", "gpt-4o"));
201
+ agent.setThinkingLevel("medium");
202
+ agent.setTools([myTool]);
203
+ agent.replaceMessages(newMessages);
204
+ agent.appendMessage(message);
205
+ agent.clearMessages();
206
+ agent.reset(); // Clear everything
207
+ ```
109
208
 
110
- // Via proxy (browser)
111
- const agent = new Agent({
112
- transport: new AppTransport({
113
- endpoint: '/api/agent',
114
- headers: { 'Authorization': 'Bearer ...' }
115
- })
209
+ ### Control
210
+
211
+ ```typescript
212
+ agent.abort(); // Cancel current operation
213
+ await agent.waitForIdle(); // Wait for completion
214
+ ```
215
+
216
+ ### Events
217
+
218
+ ```typescript
219
+ const unsubscribe = agent.subscribe((event) => {
220
+ console.log(event.type);
116
221
  });
222
+ unsubscribe();
117
223
  ```
118
224
 
119
225
  ## Message Queue
120
226
 
121
- Queue messages to inject at the next turn:
227
+ Queue messages to inject during tool execution (for user interruptions):
122
228
 
123
229
  ```typescript
124
- // Queue mode: 'all' or 'one-at-a-time'
125
- agent.setQueueMode('one-at-a-time');
126
-
127
- // Queue a message while agent is streaming
128
- await agent.queueMessage({
129
- role: 'user',
130
- content: 'Additional context...',
131
- timestamp: Date.now()
230
+ agent.setQueueMode("one-at-a-time");
231
+
232
+ // While agent is running tools
233
+ agent.queueMessage({
234
+ role: "user",
235
+ content: "Stop! Do this instead.",
236
+ timestamp: Date.now(),
132
237
  });
133
238
  ```
134
239
 
135
- ## Attachments
240
+ When queued messages are detected after a tool completes:
241
+ 1. Remaining tools are skipped with error results
242
+ 2. Queued message is injected
243
+ 3. LLM responds to the interruption
244
+
245
+ ## Custom Message Types
136
246
 
137
- User messages can include attachments:
247
+ Extend `AgentMessage` via declaration merging:
138
248
 
139
249
  ```typescript
140
- await agent.prompt('What is in this image?', [{
141
- id: 'img1',
142
- type: 'image',
143
- fileName: 'photo.jpg',
144
- mimeType: 'image/jpeg',
145
- size: 102400,
146
- content: base64ImageData
147
- }]);
250
+ declare module "@mariozechner/pi-agent" {
251
+ interface CustomAgentMessages {
252
+ notification: { role: "notification"; text: string; timestamp: number };
253
+ }
254
+ }
255
+
256
+ // Now valid
257
+ const msg: AgentMessage = { role: "notification", text: "Info", timestamp: Date.now() };
148
258
  ```
149
259
 
150
- ## Custom Message Types
260
+ Handle custom types in `convertToLlm`:
261
+
262
+ ```typescript
263
+ const agent = new Agent({
264
+ convertToLlm: (messages) => messages.flatMap(m => {
265
+ if (m.role === "notification") return []; // Filter out
266
+ return [m];
267
+ }),
268
+ });
269
+ ```
270
+
271
+ ## Tools
151
272
 
152
- Extend `AppMessage` for app-specific messages via declaration merging:
273
+ Define tools using `AgentTool`:
153
274
 
154
275
  ```typescript
155
- declare module '@mariozechner/pi-agent-core' {
156
- interface CustomMessages {
157
- artifact: { role: 'artifact'; code: string; language: string };
276
+ import { Type } from "@sinclair/typebox";
277
+
278
+ const readFileTool: AgentTool = {
279
+ name: "read_file",
280
+ label: "Read File", // For UI display
281
+ description: "Read a file's contents",
282
+ parameters: Type.Object({
283
+ path: Type.String({ description: "File path" }),
284
+ }),
285
+ execute: async (toolCallId, params, signal, onUpdate) => {
286
+ const content = await fs.readFile(params.path, "utf-8");
287
+
288
+ // Optional: stream progress
289
+ onUpdate?.({ content: [{ type: "text", text: "Reading..." }], details: {} });
290
+
291
+ return {
292
+ content: [{ type: "text", text: content }],
293
+ details: { path: params.path, size: content.length },
294
+ };
295
+ },
296
+ };
297
+
298
+ agent.setTools([readFileTool]);
299
+ ```
300
+
301
+ ### Error Handling
302
+
303
+ **Throw an error** when a tool fails. Do not return error messages as content.
304
+
305
+ ```typescript
306
+ execute: async (toolCallId, params, signal, onUpdate) => {
307
+ if (!fs.existsSync(params.path)) {
308
+ throw new Error(`File not found: ${params.path}`);
158
309
  }
310
+ // Return content only on success
311
+ return { content: [{ type: "text", text: "..." }] };
312
+ }
313
+ ```
314
+
315
+ Thrown errors are caught by the agent and reported to the LLM as tool errors with `isError: true`.
316
+
317
+ ## Proxy Usage
318
+
319
+ For browser apps that proxy through a backend:
320
+
321
+ ```typescript
322
+ import { Agent, streamProxy } from "@mariozechner/pi-agent";
323
+
324
+ const agent = new Agent({
325
+ streamFn: (model, context, options) =>
326
+ streamProxy(model, context, {
327
+ ...options,
328
+ authToken: "...",
329
+ proxyUrl: "https://your-server.com",
330
+ }),
331
+ });
332
+ ```
333
+
334
+ ## Low-Level API
335
+
336
+ For direct control without the Agent class:
337
+
338
+ ```typescript
339
+ import { agentLoop, agentLoopContinue } from "@mariozechner/pi-agent";
340
+
341
+ const context: AgentContext = {
342
+ systemPrompt: "You are helpful.",
343
+ messages: [],
344
+ tools: [],
345
+ };
346
+
347
+ const config: AgentLoopConfig = {
348
+ model: getModel("openai", "gpt-4o"),
349
+ convertToLlm: (msgs) => msgs.filter(m => ["user", "assistant", "toolResult"].includes(m.role)),
350
+ };
351
+
352
+ const userMessage = { role: "user", content: "Hello", timestamp: Date.now() };
353
+
354
+ for await (const event of agentLoop([userMessage], context, config)) {
355
+ console.log(event.type);
159
356
  }
160
357
 
161
- // Now AppMessage includes your custom type
162
- const msg: AppMessage = { role: 'artifact', code: '...', language: 'typescript' };
163
- ```
164
-
165
- ## API Reference
166
-
167
- ### Agent Methods
168
-
169
- | Method | Description |
170
- |--------|-------------|
171
- | `prompt(text, attachments?)` | Send a user prompt |
172
- | `continue()` | Continue from current context (for retry after overflow) |
173
- | `abort()` | Abort current operation |
174
- | `waitForIdle()` | Returns promise that resolves when agent is idle |
175
- | `reset()` | Clear all messages and state |
176
- | `subscribe(fn)` | Subscribe to events, returns unsubscribe function |
177
- | `queueMessage(msg)` | Queue message for next turn |
178
- | `clearMessageQueue()` | Clear queued messages |
179
-
180
- ### State Mutators
181
-
182
- | Method | Description |
183
- |--------|-------------|
184
- | `setSystemPrompt(v)` | Update system prompt |
185
- | `setModel(m)` | Switch model |
186
- | `setThinkingLevel(l)` | Set reasoning level |
187
- | `setQueueMode(m)` | Set queue mode ('all' or 'one-at-a-time') |
188
- | `setTools(t)` | Update available tools |
189
- | `replaceMessages(ms)` | Replace all messages |
190
- | `appendMessage(m)` | Append a message |
191
- | `clearMessages()` | Clear all messages |
358
+ // Continue from existing context
359
+ for await (const event of agentLoopContinue(context, config)) {
360
+ console.log(event.type);
361
+ }
362
+ ```
192
363
 
193
364
  ## License
194
365
 
@@ -0,0 +1,21 @@
1
+ /**
2
+ * Agent loop that works with AgentMessage throughout.
3
+ * Transforms to Message[] only at the LLM call boundary.
4
+ */
5
+ import { EventStream } from "@mariozechner/pi-ai";
6
+ import type { AgentContext, AgentEvent, AgentLoopConfig, AgentMessage, StreamFn } from "./types.js";
7
+ /**
8
+ * Start an agent loop with a new prompt message.
9
+ * The prompt is added to the context and events are emitted for it.
10
+ */
11
+ export declare function agentLoop(prompts: AgentMessage[], context: AgentContext, config: AgentLoopConfig, signal?: AbortSignal, streamFn?: StreamFn): EventStream<AgentEvent, AgentMessage[]>;
12
+ /**
13
+ * Continue an agent loop from the current context without adding a new message.
14
+ * Used for retries - context already has user message or tool results.
15
+ *
16
+ * **Important:** The last message in context must convert to a `user` or `toolResult` message
17
+ * via `convertToLlm`. If it doesn't, the LLM provider will reject the request.
18
+ * This cannot be validated here since `convertToLlm` is only called once per turn.
19
+ */
20
+ export declare function agentLoopContinue(context: AgentContext, config: AgentLoopConfig, signal?: AbortSignal, streamFn?: StreamFn): EventStream<AgentEvent, AgentMessage[]>;
21
+ //# sourceMappingURL=agent-loop.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"agent-loop.d.ts","sourceRoot":"","sources":["../src/agent-loop.ts"],"names":[],"mappings":"AAAA;;;GAGG;AAEH,OAAO,EAGN,WAAW,EAIX,MAAM,qBAAqB,CAAC;AAC7B,OAAO,KAAK,EACX,YAAY,EACZ,UAAU,EACV,eAAe,EACf,YAAY,EAGZ,QAAQ,EACR,MAAM,YAAY,CAAC;AAEpB;;;GAGG;AACH,wBAAgB,SAAS,CACxB,OAAO,EAAE,YAAY,EAAE,EACvB,OAAO,EAAE,YAAY,EACrB,MAAM,EAAE,eAAe,EACvB,MAAM,CAAC,EAAE,WAAW,EACpB,QAAQ,CAAC,EAAE,QAAQ,GACjB,WAAW,CAAC,UAAU,EAAE,YAAY,EAAE,CAAC,CAqBzC;AAED;;;;;;;GAOG;AACH,wBAAgB,iBAAiB,CAChC,OAAO,EAAE,YAAY,EACrB,MAAM,EAAE,eAAe,EACvB,MAAM,CAAC,EAAE,WAAW,EACpB,QAAQ,CAAC,EAAE,QAAQ,GACjB,WAAW,CAAC,UAAU,EAAE,YAAY,EAAE,CAAC,CAsBzC","sourcesContent":["/**\n * Agent loop that works with AgentMessage throughout.\n * Transforms to Message[] only at the LLM call boundary.\n */\n\nimport {\n\ttype AssistantMessage,\n\ttype Context,\n\tEventStream,\n\tstreamSimple,\n\ttype ToolResultMessage,\n\tvalidateToolArguments,\n} from \"@mariozechner/pi-ai\";\nimport type {\n\tAgentContext,\n\tAgentEvent,\n\tAgentLoopConfig,\n\tAgentMessage,\n\tAgentTool,\n\tAgentToolResult,\n\tStreamFn,\n} from \"./types.js\";\n\n/**\n * Start an agent loop with a new prompt message.\n * The prompt is added to the context and events are emitted for it.\n */\nexport function agentLoop(\n\tprompts: AgentMessage[],\n\tcontext: AgentContext,\n\tconfig: AgentLoopConfig,\n\tsignal?: AbortSignal,\n\tstreamFn?: StreamFn,\n): EventStream<AgentEvent, AgentMessage[]> {\n\tconst stream = createAgentStream();\n\n\t(async () => {\n\t\tconst newMessages: AgentMessage[] = [...prompts];\n\t\tconst currentContext: AgentContext = {\n\t\t\t...context,\n\t\t\tmessages: [...context.messages, ...prompts],\n\t\t};\n\n\t\tstream.push({ type: \"agent_start\" });\n\t\tstream.push({ type: \"turn_start\" });\n\t\tfor (const prompt of prompts) {\n\t\t\tstream.push({ type: \"message_start\", message: prompt });\n\t\t\tstream.push({ type: \"message_end\", message: prompt });\n\t\t}\n\n\t\tawait runLoop(currentContext, newMessages, config, signal, stream, streamFn);\n\t})();\n\n\treturn stream;\n}\n\n/**\n * Continue an agent loop from the current context without adding a new message.\n * Used for retries - context already has user message or tool results.\n *\n * **Important:** The last message in context must convert to a `user` or `toolResult` message\n * via `convertToLlm`. If it doesn't, the LLM provider will reject the request.\n * This cannot be validated here since `convertToLlm` is only called once per turn.\n */\nexport function agentLoopContinue(\n\tcontext: AgentContext,\n\tconfig: AgentLoopConfig,\n\tsignal?: AbortSignal,\n\tstreamFn?: StreamFn,\n): EventStream<AgentEvent, AgentMessage[]> {\n\tif (context.messages.length === 0) {\n\t\tthrow new Error(\"Cannot continue: no messages in context\");\n\t}\n\n\tif (context.messages[context.messages.length - 1].role === \"assistant\") {\n\t\tthrow new Error(\"Cannot continue from message role: assistant\");\n\t}\n\n\tconst stream = createAgentStream();\n\n\t(async () => {\n\t\tconst newMessages: AgentMessage[] = [];\n\t\tconst currentContext: AgentContext = { ...context };\n\n\t\tstream.push({ type: \"agent_start\" });\n\t\tstream.push({ type: \"turn_start\" });\n\n\t\tawait runLoop(currentContext, newMessages, config, signal, stream, streamFn);\n\t})();\n\n\treturn stream;\n}\n\nfunction createAgentStream(): EventStream<AgentEvent, AgentMessage[]> {\n\treturn new EventStream<AgentEvent, AgentMessage[]>(\n\t\t(event: AgentEvent) => event.type === \"agent_end\",\n\t\t(event: AgentEvent) => (event.type === \"agent_end\" ? event.messages : []),\n\t);\n}\n\n/**\n * Main loop logic shared by agentLoop and agentLoopContinue.\n */\nasync function runLoop(\n\tcurrentContext: AgentContext,\n\tnewMessages: AgentMessage[],\n\tconfig: AgentLoopConfig,\n\tsignal: AbortSignal | undefined,\n\tstream: EventStream<AgentEvent, AgentMessage[]>,\n\tstreamFn?: StreamFn,\n): Promise<void> {\n\tlet hasMoreToolCalls = true;\n\tlet firstTurn = true;\n\tlet queuedMessages: AgentMessage[] = (await config.getQueuedMessages?.()) || [];\n\tlet queuedAfterTools: AgentMessage[] | null = null;\n\n\twhile (hasMoreToolCalls || queuedMessages.length > 0) {\n\t\tif (!firstTurn) {\n\t\t\tstream.push({ type: \"turn_start\" });\n\t\t} else {\n\t\t\tfirstTurn = false;\n\t\t}\n\n\t\t// Process queued messages (inject before next assistant response)\n\t\tif (queuedMessages.length > 0) {\n\t\t\tfor (const message of queuedMessages) {\n\t\t\t\tstream.push({ type: \"message_start\", message });\n\t\t\t\tstream.push({ type: \"message_end\", message });\n\t\t\t\tcurrentContext.messages.push(message);\n\t\t\t\tnewMessages.push(message);\n\t\t\t}\n\t\t\tqueuedMessages = [];\n\t\t}\n\n\t\t// Stream assistant response\n\t\tconst message = await streamAssistantResponse(currentContext, config, signal, stream, streamFn);\n\t\tnewMessages.push(message);\n\n\t\tif (message.stopReason === \"error\" || message.stopReason === \"aborted\") {\n\t\t\tstream.push({ type: \"turn_end\", message, toolResults: [] });\n\t\t\tstream.push({ type: \"agent_end\", messages: newMessages });\n\t\t\tstream.end(newMessages);\n\t\t\treturn;\n\t\t}\n\n\t\t// Check for tool calls\n\t\tconst toolCalls = message.content.filter((c) => c.type === \"toolCall\");\n\t\thasMoreToolCalls = toolCalls.length > 0;\n\n\t\tconst toolResults: ToolResultMessage[] = [];\n\t\tif (hasMoreToolCalls) {\n\t\t\tconst toolExecution = await executeToolCalls(\n\t\t\t\tcurrentContext.tools,\n\t\t\t\tmessage,\n\t\t\t\tsignal,\n\t\t\t\tstream,\n\t\t\t\tconfig.getQueuedMessages,\n\t\t\t);\n\t\t\ttoolResults.push(...toolExecution.toolResults);\n\t\t\tqueuedAfterTools = toolExecution.queuedMessages ?? null;\n\n\t\t\tfor (const result of toolResults) {\n\t\t\t\tcurrentContext.messages.push(result);\n\t\t\t\tnewMessages.push(result);\n\t\t\t}\n\t\t}\n\n\t\tstream.push({ type: \"turn_end\", message, toolResults });\n\n\t\t// Get queued messages after turn completes\n\t\tif (queuedAfterTools && queuedAfterTools.length > 0) {\n\t\t\tqueuedMessages = queuedAfterTools;\n\t\t\tqueuedAfterTools = null;\n\t\t} else {\n\t\t\tqueuedMessages = (await config.getQueuedMessages?.()) || [];\n\t\t}\n\t}\n\n\tstream.push({ type: \"agent_end\", messages: newMessages });\n\tstream.end(newMessages);\n}\n\n/**\n * Stream an assistant response from the LLM.\n * This is where AgentMessage[] gets transformed to Message[] for the LLM.\n */\nasync function streamAssistantResponse(\n\tcontext: AgentContext,\n\tconfig: AgentLoopConfig,\n\tsignal: AbortSignal | undefined,\n\tstream: EventStream<AgentEvent, AgentMessage[]>,\n\tstreamFn?: StreamFn,\n): Promise<AssistantMessage> {\n\t// Apply context transform if configured (AgentMessage[] → AgentMessage[])\n\tlet messages = context.messages;\n\tif (config.transformContext) {\n\t\tmessages = await config.transformContext(messages, signal);\n\t}\n\n\t// Convert to LLM-compatible messages (AgentMessage[] → Message[])\n\tconst llmMessages = await config.convertToLlm(messages);\n\n\t// Build LLM context\n\tconst llmContext: Context = {\n\t\tsystemPrompt: context.systemPrompt,\n\t\tmessages: llmMessages,\n\t\ttools: context.tools,\n\t};\n\n\tconst streamFunction = streamFn || streamSimple;\n\n\t// Resolve API key (important for expiring tokens)\n\tconst resolvedApiKey =\n\t\t(config.getApiKey ? await config.getApiKey(config.model.provider) : undefined) || config.apiKey;\n\n\tconst response = await streamFunction(config.model, llmContext, {\n\t\t...config,\n\t\tapiKey: resolvedApiKey,\n\t\tsignal,\n\t});\n\n\tlet partialMessage: AssistantMessage | null = null;\n\tlet addedPartial = false;\n\n\tfor await (const event of response) {\n\t\tswitch (event.type) {\n\t\t\tcase \"start\":\n\t\t\t\tpartialMessage = event.partial;\n\t\t\t\tcontext.messages.push(partialMessage);\n\t\t\t\taddedPartial = true;\n\t\t\t\tstream.push({ type: \"message_start\", message: { ...partialMessage } });\n\t\t\t\tbreak;\n\n\t\t\tcase \"text_start\":\n\t\t\tcase \"text_delta\":\n\t\t\tcase \"text_end\":\n\t\t\tcase \"thinking_start\":\n\t\t\tcase \"thinking_delta\":\n\t\t\tcase \"thinking_end\":\n\t\t\tcase \"toolcall_start\":\n\t\t\tcase \"toolcall_delta\":\n\t\t\tcase \"toolcall_end\":\n\t\t\t\tif (partialMessage) {\n\t\t\t\t\tpartialMessage = event.partial;\n\t\t\t\t\tcontext.messages[context.messages.length - 1] = partialMessage;\n\t\t\t\t\tstream.push({\n\t\t\t\t\t\ttype: \"message_update\",\n\t\t\t\t\t\tassistantMessageEvent: event,\n\t\t\t\t\t\tmessage: { ...partialMessage },\n\t\t\t\t\t});\n\t\t\t\t}\n\t\t\t\tbreak;\n\n\t\t\tcase \"done\":\n\t\t\tcase \"error\": {\n\t\t\t\tconst finalMessage = await response.result();\n\t\t\t\tif (addedPartial) {\n\t\t\t\t\tcontext.messages[context.messages.length - 1] = finalMessage;\n\t\t\t\t} else {\n\t\t\t\t\tcontext.messages.push(finalMessage);\n\t\t\t\t}\n\t\t\t\tif (!addedPartial) {\n\t\t\t\t\tstream.push({ type: \"message_start\", message: { ...finalMessage } });\n\t\t\t\t}\n\t\t\t\tstream.push({ type: \"message_end\", message: finalMessage });\n\t\t\t\treturn finalMessage;\n\t\t\t}\n\t\t}\n\t}\n\n\treturn await response.result();\n}\n\n/**\n * Execute tool calls from an assistant message.\n */\nasync function executeToolCalls(\n\ttools: AgentTool<any>[] | undefined,\n\tassistantMessage: AssistantMessage,\n\tsignal: AbortSignal | undefined,\n\tstream: EventStream<AgentEvent, AgentMessage[]>,\n\tgetQueuedMessages?: AgentLoopConfig[\"getQueuedMessages\"],\n): Promise<{ toolResults: ToolResultMessage[]; queuedMessages?: AgentMessage[] }> {\n\tconst toolCalls = assistantMessage.content.filter((c) => c.type === \"toolCall\");\n\tconst results: ToolResultMessage[] = [];\n\tlet queuedMessages: AgentMessage[] | undefined;\n\n\tfor (let index = 0; index < toolCalls.length; index++) {\n\t\tconst toolCall = toolCalls[index];\n\t\tconst tool = tools?.find((t) => t.name === toolCall.name);\n\n\t\tstream.push({\n\t\t\ttype: \"tool_execution_start\",\n\t\t\ttoolCallId: toolCall.id,\n\t\t\ttoolName: toolCall.name,\n\t\t\targs: toolCall.arguments,\n\t\t});\n\n\t\tlet result: AgentToolResult<any>;\n\t\tlet isError = false;\n\n\t\ttry {\n\t\t\tif (!tool) throw new Error(`Tool ${toolCall.name} not found`);\n\n\t\t\tconst validatedArgs = validateToolArguments(tool, toolCall);\n\n\t\t\tresult = await tool.execute(toolCall.id, validatedArgs, signal, (partialResult) => {\n\t\t\t\tstream.push({\n\t\t\t\t\ttype: \"tool_execution_update\",\n\t\t\t\t\ttoolCallId: toolCall.id,\n\t\t\t\t\ttoolName: toolCall.name,\n\t\t\t\t\targs: toolCall.arguments,\n\t\t\t\t\tpartialResult,\n\t\t\t\t});\n\t\t\t});\n\t\t} catch (e) {\n\t\t\tresult = {\n\t\t\t\tcontent: [{ type: \"text\", text: e instanceof Error ? e.message : String(e) }],\n\t\t\t\tdetails: {},\n\t\t\t};\n\t\t\tisError = true;\n\t\t}\n\n\t\tstream.push({\n\t\t\ttype: \"tool_execution_end\",\n\t\t\ttoolCallId: toolCall.id,\n\t\t\ttoolName: toolCall.name,\n\t\t\tresult,\n\t\t\tisError,\n\t\t});\n\n\t\tconst toolResultMessage: ToolResultMessage = {\n\t\t\trole: \"toolResult\",\n\t\t\ttoolCallId: toolCall.id,\n\t\t\ttoolName: toolCall.name,\n\t\t\tcontent: result.content,\n\t\t\tdetails: result.details,\n\t\t\tisError,\n\t\t\ttimestamp: Date.now(),\n\t\t};\n\n\t\tresults.push(toolResultMessage);\n\t\tstream.push({ type: \"message_start\", message: toolResultMessage });\n\t\tstream.push({ type: \"message_end\", message: toolResultMessage });\n\n\t\t// Check for queued messages - skip remaining tools if user interrupted\n\t\tif (getQueuedMessages) {\n\t\t\tconst queued = await getQueuedMessages();\n\t\t\tif (queued.length > 0) {\n\t\t\t\tqueuedMessages = queued;\n\t\t\t\tconst remainingCalls = toolCalls.slice(index + 1);\n\t\t\t\tfor (const skipped of remainingCalls) {\n\t\t\t\t\tresults.push(skipToolCall(skipped, stream));\n\t\t\t\t}\n\t\t\t\tbreak;\n\t\t\t}\n\t\t}\n\t}\n\n\treturn { toolResults: results, queuedMessages };\n}\n\nfunction skipToolCall(\n\ttoolCall: Extract<AssistantMessage[\"content\"][number], { type: \"toolCall\" }>,\n\tstream: EventStream<AgentEvent, AgentMessage[]>,\n): ToolResultMessage {\n\tconst result: AgentToolResult<any> = {\n\t\tcontent: [{ type: \"text\", text: \"Skipped due to queued user message.\" }],\n\t\tdetails: {},\n\t};\n\n\tstream.push({\n\t\ttype: \"tool_execution_start\",\n\t\ttoolCallId: toolCall.id,\n\t\ttoolName: toolCall.name,\n\t\targs: toolCall.arguments,\n\t});\n\tstream.push({\n\t\ttype: \"tool_execution_end\",\n\t\ttoolCallId: toolCall.id,\n\t\ttoolName: toolCall.name,\n\t\tresult,\n\t\tisError: true,\n\t});\n\n\tconst toolResultMessage: ToolResultMessage = {\n\t\trole: \"toolResult\",\n\t\ttoolCallId: toolCall.id,\n\t\ttoolName: toolCall.name,\n\t\tcontent: result.content,\n\t\tdetails: {},\n\t\tisError: true,\n\t\ttimestamp: Date.now(),\n\t};\n\n\tstream.push({ type: \"message_start\", message: toolResultMessage });\n\tstream.push({ type: \"message_end\", message: toolResultMessage });\n\n\treturn toolResultMessage;\n}\n"]}