@f5xc-salesdemos/pi-agent-core 14.0.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md ADDED
@@ -0,0 +1,319 @@
1
+ # Changelog
2
+
3
+ ## [Unreleased]
4
+
5
+ ## [14.0.1] - 2026-04-08
6
+ ### Added
7
+
8
+ - Added `onAssistantMessageEvent` callback option to inspect assistant streaming events before they are emitted, enabling abort decisions before buffered events continue flowing
9
+ - Added `setAssistantMessageEventInterceptor()` method to dynamically set or update the assistant message event interceptor
10
+
11
+ ## [13.13.0] - 2026-03-18
12
+
13
+ ### Added
14
+
15
+ - Added `startup.checkUpdate` setting, set to `true` by default, can be disabled to skip the update check on agent initialization
16
+
17
+ ## [13.12.7] - 2026-03-16
18
+
19
+ ### Added
20
+
21
+ - Added overload for `prompt()` method accepting a string input with optional options parameter
22
+
23
+ ### Fixed
24
+
25
+ - Fixed stale forced toolChoice being passed to provider after tools are refreshed mid-turn
26
+
27
+ ## [13.9.16] - 2026-03-10
28
+ ### Added
29
+
30
+ - Added `onPayload` option to `AgentOptions` to inspect or replace provider payloads before they are sent
31
+
32
+ ## [13.9.3] - 2026-03-07
33
+
34
+ ### Added
35
+
36
+ - Exported `ThinkingLevel` selector constants and types for configuring agent reasoning behavior
37
+ - Added `inherit` thinking level option to defer reasoning configuration to higher-level selectors
38
+ - Added `serviceTier` option to configure service tier for agent requests
39
+
40
+ ### Changed
41
+
42
+ - Changed `thinkingLevel` from required string to optional `Effort` type, allowing undefined state
43
+ - Updated `setThinkingLevel()` method to accept `Effort | undefined` instead of `ThinkingLevel` string
44
+
45
+ ## [13.4.0] - 2026-03-01
46
+ ### Added
47
+
48
+ - Added `getToolChoice` option to dynamically override tool choice per LLM call
49
+
50
+ ## [13.3.8] - 2026-02-28
51
+ ### Changed
52
+
53
+ - Changed intent field name from `agent__intent` to `_i` in tool schemas
54
+
55
+ ### Fixed
56
+
57
+ - Fixed synthetic tool result text formatting so aborted/error tool results no longer emit `Tool execution was aborted.: Request was aborted` style punctuation.
58
+ ## [13.3.7] - 2026-02-27
59
+ ### Added
60
+
61
+ - Added `lenientArgValidation` option to tools to allow graceful handling of argument validation errors by passing raw arguments to execute() instead of returning an error to the LLM
62
+
63
+ ## [13.3.1] - 2026-02-26
64
+ ### Added
65
+
66
+ - Added `topP`, `topK`, `minP`, `presencePenalty`, and `repetitionPenalty` options to `AgentOptions` for fine-grained sampling control
67
+ - Added getter and setter properties for sampling parameters on the `Agent` class to allow runtime configuration
68
+
69
+ ## [13.1.0] - 2026-02-23
70
+
71
+ ### Changed
72
+
73
+ - Removed per-tool `agent__intent` field description from injected schema to reduce token usage; intent format is now documented once in the system prompt instead of repeated in every tool definition
74
+ ## [12.19.0] - 2026-02-22
75
+ ### Changed
76
+
77
+ - Updated tool result messages to include error details when tool execution fails
78
+
79
+ ## [12.14.0] - 2026-02-19
80
+
81
+ ### Added
82
+
83
+ - Added `intentTracing` option to enable intent goal extraction from tool calls, allowing models to specify high-level goals via a required `_intent` field that is automatically injected into tool schemas and stripped from arguments before execution
84
+
85
+ ## [12.11.0] - 2026-02-19
86
+
87
+ ### Added
88
+
89
+ - Exported `AgentBusyError` exception class for handling concurrent agent operations
90
+
91
+ ### Changed
92
+
93
+ - Agent now throws `AgentBusyError` instead of generic `Error` when attempting concurrent operations
94
+
95
+ ## [12.8.0] - 2026-02-16
96
+
97
+ ### Added
98
+
99
+ - Added `transformToolCallArguments` option to `AgentOptions` and `AgentLoopConfig` for transforming tool call arguments before execution (e.g. secret deobfuscation)
100
+
101
+ ## [12.2.0] - 2026-02-13
102
+
103
+ ### Added
104
+
105
+ - Added `providerSessionState` option to share provider state map for session-scoped transport and session caches
106
+ - Added `preferWebsockets` option to hint that websocket transport should be preferred when supported by the provider implementation
107
+
108
+ ## [11.10.0] - 2026-02-10
109
+
110
+ ### Added
111
+
112
+ - Added `temperature` option to `AgentOptions` to control LLM sampling temperature
113
+ - Added `temperature` getter and setter to `Agent` class for runtime configuration
114
+
115
+ ## [11.6.0] - 2026-02-07
116
+
117
+ ### Added
118
+
119
+ - Added `hasQueuedMessages()` method to check for pending steering/follow-up messages
120
+ - Resume queued steering and follow-up messages from `continue()` after auto-compaction
121
+
122
+ ### Changed
123
+
124
+ - Extracted `dequeueSteeringMessages()` and `dequeueFollowUpMessages()` from inline config callbacks
125
+ - Added `skipInitialSteeringPoll` option to `_runLoop()` for correct queue resume ordering
126
+
127
+ ## [11.3.0] - 2026-02-06
128
+
129
+ ### Added
130
+
131
+ - Added `maxRetryDelayMs` option to AgentOptions to cap server-requested retry delays, allowing higher-level retry logic to handle long waits with user visibility
132
+
133
+ ### Changed
134
+
135
+ - Updated ThinkingLevel documentation to include support for gpt-5.3 and gpt-5.3-codex models with 'xhigh' thinking level
136
+
137
+ ## [11.2.0] - 2026-02-05
138
+
139
+ ### Fixed
140
+
141
+ - Fixed handling of aborted requests to properly throw abort errors when stream terminates without a terminal event
142
+
143
+ ## [10.5.0] - 2026-02-04
144
+
145
+ ### Added
146
+
147
+ - Added `concurrency` option to `AgentTool` to control tool scheduling: "shared" (default, runs in parallel) or "exclusive" (runs alone)
148
+ - Implemented parallel execution of shared tools within a single agent turn for improved performance
149
+
150
+ ### Changed
151
+
152
+ - Refactored tool execution to support concurrent scheduling with proper interrupt handling and steering message checks
153
+
154
+ ## [9.2.2] - 2026-01-31
155
+
156
+ ### Added
157
+
158
+ - Added toolChoice option to AgentPromptOptions for controlling tool selection
159
+
160
+ ## [8.2.0] - 2026-01-24
161
+
162
+ ### Changed
163
+
164
+ - Updated TypeScript configuration for better publish-time configuration handling with tsconfig.publish.json
165
+
166
+ ## [8.0.0] - 2026-01-23
167
+
168
+ ### Added
169
+
170
+ - Added `nonAbortable` option to tools to ignore abort signals during execution
171
+
172
+ ## [6.8.0] - 2026-01-20
173
+
174
+ ### Changed
175
+
176
+ - Updated proxy stream processing to use utility function for reading lines
177
+
178
+ ## [6.2.0] - 2026-01-19
179
+
180
+ ### Added
181
+
182
+ - Enhanced getToolContext to receive tool call batch information including batchId, index, total count, and tool call details
183
+
184
+ ## [5.6.7] - 2026-01-18
185
+
186
+ ### Fixed
187
+
188
+ - Added proper tool result messages for tool calls that are aborted or error out
189
+ - Ensured tool_use/tool_result pairing is maintained when tool execution fails
190
+
191
+ ## [4.6.0] - 2026-01-12
192
+
193
+ ### Changed
194
+
195
+ - Modified assistant message handling to split messages around tool results for improved readability when using Cursor tools
196
+
197
+ ### Fixed
198
+
199
+ - Fixed tool result ordering in Cursor mode by buffering results and emitting them at the correct position within assistant messages
200
+
201
+ ## [4.3.0] - 2026-01-11
202
+
203
+ ### Added
204
+
205
+ - Added `cursorExecHandlers` and `cursorOnToolResult` options for local tool execution with cursor-based streaming
206
+ - Added `emitExternalEvent` method to allow external event injection into the agent state
207
+
208
+ ## [4.0.0] - 2026-01-10
209
+
210
+ ### Added
211
+
212
+ - Added `popLastSteer()` and `popLastFollowUp()` methods to remove and return the last queued message (LIFO) for dequeue operations
213
+ - `thinkingBudgets` option on `Agent` and `AgentOptions` to customize token budgets per thinking level
214
+ - `sessionId` option on `Agent` to forward session identifiers to LLM providers for session-based caching
215
+
216
+ ### Fixed
217
+
218
+ - `minimal` thinking level now maps to `minimal` reasoning effort instead of being treated as `low`
219
+
220
+ ## [3.33.0] - 2026-01-08
221
+
222
+ ### Fixed
223
+
224
+ - Ensured aborted assistant responses always include an error message for callers.
225
+ - Filtered thinking blocks from Cerebras request context to keep multi-turn prompts compatible.
226
+
227
+ ## [3.21.0] - 2026-01-06
228
+
229
+ ### Changed
230
+
231
+ - Switched from local `@oh-my-pi/pi-ai` to upstream `@oh-my-pi/pi-ai` package
232
+
233
+ ### Added
234
+
235
+ - Added `sessionId` option for provider caching (e.g., OpenAI Codex session-based prompt caching)
236
+ - Added `sessionId` getter/setter on Agent class for runtime session switching
237
+
238
+ ## [3.20.0] - 2026-01-06
239
+
240
+ ### Breaking Changes
241
+
242
+ - Replaced `queueMessage`/`queueMode` with steering + follow-up queues: use `steer`, `setSteeringMode`, and `getSteeringMode` for mid-run interruptions, and `followUp`, `setFollowUpMode`, and `getFollowUpMode` for post-turn messages
243
+ - Agent loop callbacks now use `getSteeringMessages` and `getFollowUpMessages` instead of `getQueuedMessages`
244
+
245
+ ### Added
246
+
247
+ - Added follow-up message queue support so new user messages can continue a run after the agent would otherwise stop
248
+ - Added `RenderResultOptions.spinnerFrame` for animated tool-result rendering
249
+
250
+ ### Changed
251
+
252
+ - `prompt()` and `continue()` now throw when the agent is already streaming; use steering or follow-up queues instead
253
+
254
+ ## [3.4.1337] - 2026-01-03
255
+
256
+ ### Added
257
+
258
+ - Added `popMessage()` method to Agent class for removing and retrieving the last message
259
+ - Added abort signal checks during response streaming for faster interruption handling
260
+
261
+ ### Fixed
262
+
263
+ - Fixed abort handling to properly return aborted message state when stream is interrupted mid-response
264
+
265
+ ## [1.341.0] - 2026-01-03
266
+
267
+ ### Added
268
+
269
+ - Added `interruptMode` option to control when queued messages interrupt tool execution.
270
+ - Implemented "immediate" mode (default) to check queue after each tool and interrupt remaining tools.
271
+ - Implemented "wait" mode to defer queue processing until the entire turn completes.
272
+ - Added getter and setter methods for `interruptMode` on Agent class.
273
+
274
+ ## [1.337.1] - 2026-01-02
275
+
276
+ ### Changed
277
+
278
+ - Forked to @oh-my-pi scope with unified versioning across all packages
279
+
280
+ ## [1.337.0] - 2026-01-02
281
+
282
+ Initial release under @oh-my-pi scope. See previous releases at [badlogic/pi-mono](https://github.com/badlogic/pi-mono).
283
+
284
+ ## [0.31.0] - 2026-01-02
285
+
286
+ ### Breaking Changes
287
+
288
+ - **Transport abstraction removed**: `ProviderTransport`, `AppTransport`, and `AgentTransport` interface have been removed. Use the `streamFn` option directly for custom streaming implementations.
289
+
290
+ - **Agent options renamed**:
291
+ - `transport` → removed (use `streamFn` instead)
292
+ - `messageTransformer` → `convertToLlm`
293
+ - `preprocessor` → `transformContext`
294
+
295
+ - **`AppMessage` renamed to `AgentMessage`**: All references to `AppMessage` have been renamed to `AgentMessage` for consistency.
296
+
297
+ - **`CustomMessages` renamed to `CustomAgentMessages`**: The declaration merging interface has been renamed.
298
+
299
+ - **`UserMessageWithAttachments` and `Attachment` types removed**: Attachment handling is now the responsibility of the `convertToLlm` function.
300
+
301
+ - **Agent loop moved from `@oh-my-pi/pi-ai`**: The `agentLoop`, `agentLoopContinue`, and related types have moved to this package. Import from `@oh-my-pi/pi-agent` instead.
302
+
303
+ ### Added
304
+
305
+ - `streamFn` option on `Agent` for custom stream implementations. Default uses `streamSimple` from pi-ai.
306
+
307
+ - `streamProxy()` utility function for browser apps that need to proxy LLM calls through a backend server. Replaces the removed `AppTransport`.
308
+
309
+ - `getApiKey` option for dynamic API key resolution (useful for expiring OAuth tokens like GitHub Copilot).
310
+
311
+ - `agentLoop()` and `agentLoopContinue()` low-level functions for running the agent loop without the `Agent` class wrapper.
312
+
313
+ - New exported types: `AgentLoopConfig`, `AgentContext`, `AgentTool`, `AgentToolResult`, `AgentToolUpdateCallback`, `StreamFn`.
314
+
315
+ ### Changed
316
+
317
+ - `Agent` constructor now has all options optional (empty options use defaults).
318
+
319
+ - `queueMessage()` is now synchronous (no longer returns a Promise).
package/README.md ADDED
@@ -0,0 +1,375 @@
1
+ # @f5xc-salesdemos/pi-agent
2
+
3
+ Stateful agent with tool execution and event streaming. Built on `@f5xc-salesdemos/pi-ai`.
4
+
5
+ ## Installation
6
+
7
+ ```bash
8
+ npm install @f5xc-salesdemos/pi-agent
9
+ ```
10
+
11
+ ## Quick Start
12
+
13
+ ```typescript
14
+ import { Agent } from "@f5xc-salesdemos/pi-agent";
15
+ import { getModel } from "@f5xc-salesdemos/pi-ai";
16
+
17
+ const agent = new Agent({
18
+ initialState: {
19
+ systemPrompt: "You are a helpful assistant.",
20
+ model: getModel("anthropic", "claude-sonnet-4-20250514"),
21
+ },
22
+ });
23
+
24
+ agent.subscribe((event) => {
25
+ if (event.type === "message_update" && event.assistantMessageEvent.type === "text_delta") {
26
+ // Stream just the new text chunk
27
+ process.stdout.write(event.assistantMessageEvent.delta);
28
+ }
29
+ });
30
+
31
+ await agent.prompt("Hello!");
32
+ ```
33
+
34
+ ## Core Concepts
35
+
36
+ ### AgentMessage vs LLM Message
37
+
38
+ The agent works with `AgentMessage`, a flexible type that can include:
39
+
40
+ - Standard LLM messages (`user`, `assistant`, `toolResult`)
41
+ - Custom app-specific message types via declaration merging
42
+
43
+ LLMs only understand `user`, `assistant`, and `toolResult`. The `convertToLlm` function bridges this gap by filtering and transforming messages before each LLM call.
44
+
45
+ ### Message Flow
46
+
47
+ ```
48
+ AgentMessage[] → transformContext() → AgentMessage[] → convertToLlm() → Message[] → LLM
49
+ (optional) (required)
50
+ ```
51
+
52
+ 1. **transformContext**: Prune old messages, inject external context
53
+ 2. **convertToLlm**: Filter out UI-only messages, convert custom types to LLM format
54
+
55
+ ## Event Flow
56
+
57
+ The agent emits events for UI updates. Understanding the event sequence helps build responsive interfaces.
58
+
59
+ ### prompt() Event Sequence
60
+
61
+ When you call `prompt("Hello")`:
62
+
63
+ ```
64
+ prompt("Hello")
65
+ ├─ agent_start
66
+ ├─ turn_start
67
+ ├─ message_start { message: userMessage } // Your prompt
68
+ ├─ message_end { message: userMessage }
69
+ ├─ message_start { message: assistantMessage } // LLM starts responding
70
+ ├─ message_update { message: partial... } // Streaming chunks
71
+ ├─ message_update { message: partial... }
72
+ ├─ message_end { message: assistantMessage } // Complete response
73
+ ├─ turn_end { message, toolResults: [] }
74
+ └─ agent_end { messages: [...] }
75
+ ```
76
+
77
+ ### With Tool Calls
78
+
79
+ If the assistant calls tools, the loop continues:
80
+
81
+ ```
82
+ prompt("Read config.json")
83
+ ├─ agent_start
84
+ ├─ turn_start
85
+ ├─ message_start/end { userMessage }
86
+ ├─ message_start { assistantMessage with toolCall }
87
+ ├─ message_update...
88
+ ├─ message_end { assistantMessage }
89
+ ├─ tool_execution_start { toolCallId, toolName, args }
90
+ ├─ tool_execution_update { partialResult } // If tool streams
91
+ ├─ tool_execution_end { toolCallId, result }
92
+ ├─ message_start/end { toolResultMessage }
93
+ ├─ turn_end { message, toolResults: [toolResult] }
94
+
95
+ ├─ turn_start // Next turn
96
+ ├─ message_start { assistantMessage } // LLM responds to tool result
97
+ ├─ message_update...
98
+ ├─ message_end
99
+ ├─ turn_end
100
+ └─ agent_end
101
+ ```
102
+
103
+ ### continue() Event Sequence
104
+
105
+ `continue()` resumes from existing context without adding a new message. Use it for retries after errors.
106
+
107
+ ```typescript
108
+ // After an error, retry from current state
109
+ await agent.continue();
110
+ ```
111
+
112
+ The last message in context must be `user` or `toolResult` (not `assistant`).
113
+
114
+ ### Event Types
115
+
116
+ | Event | Description |
117
+ | ----------------------- | --------------------------------------------------------------- |
118
+ | `agent_start` | Agent begins processing |
119
+ | `agent_end` | Agent completes with all new messages |
120
+ | `turn_start` | New turn begins (one LLM call + tool executions) |
121
+ | `turn_end` | Turn completes with assistant message and tool results |
122
+ | `message_start` | Any message begins (user, assistant, toolResult) |
123
+ | `message_update` | **Assistant only.** Includes `assistantMessageEvent` with delta |
124
+ | `message_end` | Message completes |
125
+ | `tool_execution_start` | Tool begins |
126
+ | `tool_execution_update` | Tool streams progress |
127
+ | `tool_execution_end` | Tool completes |
128
+
129
+ ## Agent Options
130
+
131
+ ```typescript
132
+ const agent = new Agent({
133
+ // Initial state
134
+ initialState: {
135
+ systemPrompt: string,
136
+ model: Model,
137
+ thinkingLevel: "off" | "minimal" | "low" | "medium" | "high" | "xhigh",
138
+ tools: AgentTool<any>[],
139
+ messages: AgentMessage[],
140
+ },
141
+
142
+ // Convert AgentMessage[] to LLM Message[] (required for custom message types)
143
+ convertToLlm: (messages) => messages.filter(...),
144
+
145
+ // Transform context before convertToLlm (for pruning, compaction)
146
+ transformContext: async (messages, signal) => pruneOldMessages(messages),
147
+
148
+ // How to handle queued messages: "one-at-a-time" (default) or "all"
149
+ queueMode: "one-at-a-time",
150
+
151
+ // Custom stream function (for proxy backends)
152
+ streamFn: streamProxy,
153
+
154
+ // Dynamic API key resolution (for expiring OAuth tokens)
155
+ getApiKey: async (provider) => refreshToken(),
156
+
157
+ // Tool execution context (late-bound UI/session access)
158
+ getToolContext: () => ({ /* app-defined */ }),
159
+ });
160
+ ```
161
+
162
+ ## Agent State
163
+
164
+ ```typescript
165
+ interface AgentState {
166
+ systemPrompt: string;
167
+ model: Model;
168
+ thinkingLevel: ThinkingLevel;
169
+ tools: AgentTool<any>[];
170
+ messages: AgentMessage[];
171
+ isStreaming: boolean;
172
+ streamMessage: AgentMessage | null; // Current partial during streaming
173
+ pendingToolCalls: Set<string>;
174
+ error?: string;
175
+ }
176
+ ```
177
+
178
+ Access via `agent.state`. During streaming, `streamMessage` contains the partial assistant message.
179
+
180
+ ## Methods
181
+
182
+ ### Prompting
183
+
184
+ ```typescript
185
+ // Text prompt
186
+ await agent.prompt("Hello");
187
+
188
+ // With images
189
+ await agent.prompt("What's in this image?", [{ type: "image", data: base64Data, mimeType: "image/jpeg" }]);
190
+
191
+ // AgentMessage directly
192
+ await agent.prompt({ role: "user", content: "Hello", timestamp: Date.now() });
193
+
194
+ // Continue from current context (last message must be user or toolResult)
195
+ await agent.continue();
196
+ ```
197
+
198
+ ### State Management
199
+
200
+ ```typescript
201
+ agent.setSystemPrompt("New prompt");
202
+ agent.setModel(getModel("openai", "gpt-4o"));
203
+ agent.setThinkingLevel("medium");
204
+ agent.setTools([myTool]);
205
+ agent.replaceMessages(newMessages);
206
+ agent.appendMessage(message);
207
+ agent.clearMessages();
208
+ agent.reset(); // Clear everything
209
+ ```
210
+
211
+ ### Control
212
+
213
+ ```typescript
214
+ agent.abort(); // Cancel current operation
215
+ await agent.waitForIdle(); // Wait for completion
216
+ ```
217
+
218
+ ### Events
219
+
220
+ ```typescript
221
+ const unsubscribe = agent.subscribe((event) => {
222
+ console.log(event.type);
223
+ });
224
+ unsubscribe();
225
+ ```
226
+
227
+ ## Steering & Follow-up
228
+
229
+ Queue messages to inject during tool execution (steering) or after the agent would otherwise stop (follow-up):
230
+
231
+ ```typescript
232
+ agent.setSteeringMode("one-at-a-time");
233
+ agent.setInterruptMode("immediate");
234
+
235
+ // While agent is running tools
236
+ agent.steer({
237
+ role: "user",
238
+ content: "Stop! Do this instead.",
239
+ timestamp: Date.now(),
240
+ });
241
+
242
+ // Queue a follow-up to run after the current turn completes
243
+ agent.followUp({
244
+ role: "user",
245
+ content: "After that, summarize the changes.",
246
+ timestamp: Date.now(),
247
+ });
248
+ ```
249
+
250
+ Steering messages are checked after each tool call by default. Set `interruptMode` to `"wait"` to defer
251
+ steering until the current turn completes.
252
+
253
+ ## Custom Message Types
254
+
255
+ Extend `AgentMessage` via declaration merging:
256
+
257
+ ```typescript
258
+ declare module "@f5xc-salesdemos/pi-agent" {
259
+ interface CustomAgentMessages {
260
+ notification: { role: "notification"; text: string; timestamp: number };
261
+ }
262
+ }
263
+
264
+ // Now valid
265
+ const msg: AgentMessage = { role: "notification", text: "Info", timestamp: Date.now() };
266
+ ```
267
+
268
+ Handle custom types in `convertToLlm`:
269
+
270
+ ```typescript
271
+ const agent = new Agent({
272
+ convertToLlm: (messages) =>
273
+ messages.flatMap((m) => {
274
+ if (m.role === "notification") return []; // Filter out
275
+ return [m];
276
+ }),
277
+ });
278
+ ```
279
+
280
+ ## Tools
281
+
282
+ Define tools using `AgentTool`:
283
+
284
+ ```typescript
285
+ import { Type } from "@sinclair/typebox";
286
+
287
+ const readFileTool: AgentTool = {
288
+ name: "read_file",
289
+ label: "Read File", // For UI display
290
+ description: "Read a file's contents",
291
+ parameters: Type.Object({
292
+ path: Type.String({ description: "File path" }),
293
+ }),
294
+ execute: async (toolCallId, params, signal, onUpdate, context) => {
295
+ const content = await fs.readFile(params.path, "utf-8");
296
+
297
+ // Optional: stream progress
298
+ onUpdate?.({ content: [{ type: "text", text: "Reading..." }], details: {} });
299
+
300
+ return {
301
+ content: [{ type: "text", text: content }],
302
+ details: { path: params.path, size: content.length },
303
+ };
304
+ },
305
+ };
306
+
307
+ agent.setTools([readFileTool]);
308
+ ```
309
+
310
+ ### Error Handling
311
+
312
+ **Throw an error** when a tool fails. Do not return error messages as content.
313
+
314
+ ```typescript
315
+ execute: async (toolCallId, params, signal, onUpdate) => {
316
+ if (!fs.existsSync(params.path)) {
317
+ throw new Error(`File not found: ${params.path}`);
318
+ }
319
+ // Return content only on success
320
+ return { content: [{ type: "text", text: "..." }] };
321
+ };
322
+ ```
323
+
324
+ Thrown errors are caught by the agent and reported to the LLM as tool errors with `isError: true`.
325
+
326
+ ## Proxy Usage
327
+
328
+ For browser apps that proxy through a backend:
329
+
330
+ ```typescript
331
+ import { Agent, streamProxy } from "@f5xc-salesdemos/pi-agent";
332
+
333
+ const agent = new Agent({
334
+ streamFn: (model, context, options) =>
335
+ streamProxy(model, context, {
336
+ ...options,
337
+ authToken: "...",
338
+ proxyUrl: "https://your-server.com",
339
+ }),
340
+ });
341
+ ```
342
+
343
+ ## Low-Level API
344
+
345
+ For direct control without the Agent class:
346
+
347
+ ```typescript
348
+ import { agentLoop, agentLoopContinue } from "@f5xc-salesdemos/pi-agent";
349
+
350
+ const context: AgentContext = {
351
+ systemPrompt: "You are helpful.",
352
+ messages: [],
353
+ tools: [],
354
+ };
355
+
356
+ const config: AgentLoopConfig = {
357
+ model: getModel("openai", "gpt-4o"),
358
+ convertToLlm: (msgs) => msgs.filter((m) => ["user", "assistant", "toolResult"].includes(m.role)),
359
+ };
360
+
361
+ const userMessage = { role: "user", content: "Hello", timestamp: Date.now() };
362
+
363
+ for await (const event of agentLoop([userMessage], context, config)) {
364
+ console.log(event.type);
365
+ }
366
+
367
+ // Continue from existing context
368
+ for await (const event of agentLoopContinue(context, config)) {
369
+ console.log(event.type);
370
+ }
371
+ ```
372
+
373
+ ## License
374
+
375
+ MIT