@oh-my-pi/pi-agent-core 1.337.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +370 -0
- package/package.json +40 -0
- package/src/agent-loop.ts +409 -0
- package/src/agent.ts +386 -0
- package/src/index.ts +8 -0
- package/src/proxy.ts +339 -0
- package/src/types.ts +210 -0
package/src/proxy.ts
ADDED
|
@@ -0,0 +1,339 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Proxy stream function for apps that route LLM calls through a server.
|
|
3
|
+
* The server manages auth and proxies requests to LLM providers.
|
|
4
|
+
*/
|
|
5
|
+
|
|
6
|
+
import {
|
|
7
|
+
type AssistantMessage,
|
|
8
|
+
type AssistantMessageEvent,
|
|
9
|
+
type Context,
|
|
10
|
+
EventStream,
|
|
11
|
+
type Model,
|
|
12
|
+
type SimpleStreamOptions,
|
|
13
|
+
type StopReason,
|
|
14
|
+
type ToolCall,
|
|
15
|
+
} from "@oh-my-pi/pi-ai";
|
|
16
|
+
import { parseStreamingJson } from "@oh-my-pi/pi-ai/utils/json-parse";
|
|
17
|
+
|
|
18
|
+
// Create stream class matching ProxyMessageEventStream
|
|
19
|
+
class ProxyMessageEventStream extends EventStream<AssistantMessageEvent, AssistantMessage> {
|
|
20
|
+
constructor() {
|
|
21
|
+
super(
|
|
22
|
+
(event) => event.type === "done" || event.type === "error",
|
|
23
|
+
(event) => {
|
|
24
|
+
if (event.type === "done") return event.message;
|
|
25
|
+
if (event.type === "error") return event.error;
|
|
26
|
+
throw new Error("Unexpected event type");
|
|
27
|
+
},
|
|
28
|
+
);
|
|
29
|
+
}
|
|
30
|
+
}
|
|
31
|
+
|
|
32
|
+
/**
|
|
33
|
+
* Proxy event types - server sends these with partial field stripped to reduce bandwidth.
|
|
34
|
+
*/
|
|
35
|
+
export type ProxyAssistantMessageEvent =
|
|
36
|
+
| { type: "start" }
|
|
37
|
+
| { type: "text_start"; contentIndex: number }
|
|
38
|
+
| { type: "text_delta"; contentIndex: number; delta: string }
|
|
39
|
+
| { type: "text_end"; contentIndex: number; contentSignature?: string }
|
|
40
|
+
| { type: "thinking_start"; contentIndex: number }
|
|
41
|
+
| { type: "thinking_delta"; contentIndex: number; delta: string }
|
|
42
|
+
| { type: "thinking_end"; contentIndex: number; contentSignature?: string }
|
|
43
|
+
| { type: "toolcall_start"; contentIndex: number; id: string; toolName: string }
|
|
44
|
+
| { type: "toolcall_delta"; contentIndex: number; delta: string }
|
|
45
|
+
| { type: "toolcall_end"; contentIndex: number }
|
|
46
|
+
| {
|
|
47
|
+
type: "done";
|
|
48
|
+
reason: Extract<StopReason, "stop" | "length" | "toolUse">;
|
|
49
|
+
usage: AssistantMessage["usage"];
|
|
50
|
+
}
|
|
51
|
+
| {
|
|
52
|
+
type: "error";
|
|
53
|
+
reason: Extract<StopReason, "aborted" | "error">;
|
|
54
|
+
errorMessage?: string;
|
|
55
|
+
usage: AssistantMessage["usage"];
|
|
56
|
+
};
|
|
57
|
+
|
|
58
|
+
export interface ProxyStreamOptions extends SimpleStreamOptions {
|
|
59
|
+
/** Auth token for the proxy server */
|
|
60
|
+
authToken: string;
|
|
61
|
+
/** Proxy server URL (e.g., "https://genai.example.com") */
|
|
62
|
+
proxyUrl: string;
|
|
63
|
+
}
|
|
64
|
+
|
|
65
|
+
/**
|
|
66
|
+
* Stream function that proxies through a server instead of calling LLM providers directly.
|
|
67
|
+
* The server strips the partial field from delta events to reduce bandwidth.
|
|
68
|
+
* We reconstruct the partial message client-side.
|
|
69
|
+
*
|
|
70
|
+
* Use this as the `streamFn` option when creating an Agent that needs to go through a proxy.
|
|
71
|
+
*
|
|
72
|
+
* @example
|
|
73
|
+
* ```typescript
|
|
74
|
+
* const agent = new Agent({
|
|
75
|
+
* streamFn: (model, context, options) =>
|
|
76
|
+
* streamProxy(model, context, {
|
|
77
|
+
* ...options,
|
|
78
|
+
* authToken: await getAuthToken(),
|
|
79
|
+
* proxyUrl: "https://genai.example.com",
|
|
80
|
+
* }),
|
|
81
|
+
* });
|
|
82
|
+
* ```
|
|
83
|
+
*/
|
|
84
|
+
export function streamProxy(model: Model<any>, context: Context, options: ProxyStreamOptions): ProxyMessageEventStream {
|
|
85
|
+
const stream = new ProxyMessageEventStream();
|
|
86
|
+
|
|
87
|
+
(async () => {
|
|
88
|
+
// Initialize the partial message that we'll build up from events
|
|
89
|
+
const partial: AssistantMessage = {
|
|
90
|
+
role: "assistant",
|
|
91
|
+
stopReason: "stop",
|
|
92
|
+
content: [],
|
|
93
|
+
api: model.api,
|
|
94
|
+
provider: model.provider,
|
|
95
|
+
model: model.id,
|
|
96
|
+
usage: {
|
|
97
|
+
input: 0,
|
|
98
|
+
output: 0,
|
|
99
|
+
cacheRead: 0,
|
|
100
|
+
cacheWrite: 0,
|
|
101
|
+
totalTokens: 0,
|
|
102
|
+
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0, total: 0 },
|
|
103
|
+
},
|
|
104
|
+
timestamp: Date.now(),
|
|
105
|
+
};
|
|
106
|
+
|
|
107
|
+
let reader: ReadableStreamDefaultReader<Uint8Array> | undefined;
|
|
108
|
+
|
|
109
|
+
const abortHandler = () => {
|
|
110
|
+
if (reader) {
|
|
111
|
+
reader.cancel("Request aborted by user").catch(() => {});
|
|
112
|
+
}
|
|
113
|
+
};
|
|
114
|
+
|
|
115
|
+
if (options.signal) {
|
|
116
|
+
options.signal.addEventListener("abort", abortHandler);
|
|
117
|
+
}
|
|
118
|
+
|
|
119
|
+
try {
|
|
120
|
+
const response = await fetch(`${options.proxyUrl}/api/stream`, {
|
|
121
|
+
method: "POST",
|
|
122
|
+
headers: {
|
|
123
|
+
Authorization: `Bearer ${options.authToken}`,
|
|
124
|
+
"Content-Type": "application/json",
|
|
125
|
+
},
|
|
126
|
+
body: JSON.stringify({
|
|
127
|
+
model,
|
|
128
|
+
context,
|
|
129
|
+
options: {
|
|
130
|
+
temperature: options.temperature,
|
|
131
|
+
maxTokens: options.maxTokens,
|
|
132
|
+
reasoning: options.reasoning,
|
|
133
|
+
},
|
|
134
|
+
}),
|
|
135
|
+
signal: options.signal,
|
|
136
|
+
});
|
|
137
|
+
|
|
138
|
+
if (!response.ok) {
|
|
139
|
+
let errorMessage = `Proxy error: ${response.status} ${response.statusText}`;
|
|
140
|
+
try {
|
|
141
|
+
const errorData = (await response.json()) as { error?: string };
|
|
142
|
+
if (errorData.error) {
|
|
143
|
+
errorMessage = `Proxy error: ${errorData.error}`;
|
|
144
|
+
}
|
|
145
|
+
} catch {
|
|
146
|
+
// Couldn't parse error response
|
|
147
|
+
}
|
|
148
|
+
throw new Error(errorMessage);
|
|
149
|
+
}
|
|
150
|
+
|
|
151
|
+
reader = response.body!.getReader() as ReadableStreamDefaultReader<Uint8Array>;
|
|
152
|
+
const decoder = new TextDecoder();
|
|
153
|
+
let buffer = "";
|
|
154
|
+
|
|
155
|
+
while (true) {
|
|
156
|
+
const { done, value } = await reader!.read();
|
|
157
|
+
if (done) break;
|
|
158
|
+
|
|
159
|
+
if (options.signal?.aborted) {
|
|
160
|
+
throw new Error("Request aborted by user");
|
|
161
|
+
}
|
|
162
|
+
|
|
163
|
+
buffer += decoder.decode(value, { stream: true });
|
|
164
|
+
const lines = buffer.split("\n");
|
|
165
|
+
buffer = lines.pop() || "";
|
|
166
|
+
|
|
167
|
+
for (const line of lines) {
|
|
168
|
+
if (line.startsWith("data: ")) {
|
|
169
|
+
const data = line.slice(6).trim();
|
|
170
|
+
if (data) {
|
|
171
|
+
const proxyEvent = JSON.parse(data) as ProxyAssistantMessageEvent;
|
|
172
|
+
const event = processProxyEvent(proxyEvent, partial);
|
|
173
|
+
if (event) {
|
|
174
|
+
stream.push(event);
|
|
175
|
+
}
|
|
176
|
+
}
|
|
177
|
+
}
|
|
178
|
+
}
|
|
179
|
+
}
|
|
180
|
+
|
|
181
|
+
if (options.signal?.aborted) {
|
|
182
|
+
throw new Error("Request aborted by user");
|
|
183
|
+
}
|
|
184
|
+
|
|
185
|
+
stream.end();
|
|
186
|
+
} catch (error) {
|
|
187
|
+
const errorMessage = error instanceof Error ? error.message : String(error);
|
|
188
|
+
const reason = options.signal?.aborted ? "aborted" : "error";
|
|
189
|
+
partial.stopReason = reason;
|
|
190
|
+
partial.errorMessage = errorMessage;
|
|
191
|
+
stream.push({
|
|
192
|
+
type: "error",
|
|
193
|
+
reason,
|
|
194
|
+
error: partial,
|
|
195
|
+
});
|
|
196
|
+
stream.end();
|
|
197
|
+
} finally {
|
|
198
|
+
if (options.signal) {
|
|
199
|
+
options.signal.removeEventListener("abort", abortHandler);
|
|
200
|
+
}
|
|
201
|
+
}
|
|
202
|
+
})();
|
|
203
|
+
|
|
204
|
+
return stream;
|
|
205
|
+
}
|
|
206
|
+
|
|
207
|
+
/**
|
|
208
|
+
* Process a proxy event and update the partial message.
|
|
209
|
+
*/
|
|
210
|
+
function processProxyEvent(
|
|
211
|
+
proxyEvent: ProxyAssistantMessageEvent,
|
|
212
|
+
partial: AssistantMessage,
|
|
213
|
+
): AssistantMessageEvent | undefined {
|
|
214
|
+
switch (proxyEvent.type) {
|
|
215
|
+
case "start":
|
|
216
|
+
return { type: "start", partial };
|
|
217
|
+
|
|
218
|
+
case "text_start":
|
|
219
|
+
partial.content[proxyEvent.contentIndex] = { type: "text", text: "" };
|
|
220
|
+
return { type: "text_start", contentIndex: proxyEvent.contentIndex, partial };
|
|
221
|
+
|
|
222
|
+
case "text_delta": {
|
|
223
|
+
const content = partial.content[proxyEvent.contentIndex];
|
|
224
|
+
if (content?.type === "text") {
|
|
225
|
+
content.text += proxyEvent.delta;
|
|
226
|
+
return {
|
|
227
|
+
type: "text_delta",
|
|
228
|
+
contentIndex: proxyEvent.contentIndex,
|
|
229
|
+
delta: proxyEvent.delta,
|
|
230
|
+
partial,
|
|
231
|
+
};
|
|
232
|
+
}
|
|
233
|
+
throw new Error("Received text_delta for non-text content");
|
|
234
|
+
}
|
|
235
|
+
|
|
236
|
+
case "text_end": {
|
|
237
|
+
const content = partial.content[proxyEvent.contentIndex];
|
|
238
|
+
if (content?.type === "text") {
|
|
239
|
+
content.textSignature = proxyEvent.contentSignature;
|
|
240
|
+
return {
|
|
241
|
+
type: "text_end",
|
|
242
|
+
contentIndex: proxyEvent.contentIndex,
|
|
243
|
+
content: content.text,
|
|
244
|
+
partial,
|
|
245
|
+
};
|
|
246
|
+
}
|
|
247
|
+
throw new Error("Received text_end for non-text content");
|
|
248
|
+
}
|
|
249
|
+
|
|
250
|
+
case "thinking_start":
|
|
251
|
+
partial.content[proxyEvent.contentIndex] = { type: "thinking", thinking: "" };
|
|
252
|
+
return { type: "thinking_start", contentIndex: proxyEvent.contentIndex, partial };
|
|
253
|
+
|
|
254
|
+
case "thinking_delta": {
|
|
255
|
+
const content = partial.content[proxyEvent.contentIndex];
|
|
256
|
+
if (content?.type === "thinking") {
|
|
257
|
+
content.thinking += proxyEvent.delta;
|
|
258
|
+
return {
|
|
259
|
+
type: "thinking_delta",
|
|
260
|
+
contentIndex: proxyEvent.contentIndex,
|
|
261
|
+
delta: proxyEvent.delta,
|
|
262
|
+
partial,
|
|
263
|
+
};
|
|
264
|
+
}
|
|
265
|
+
throw new Error("Received thinking_delta for non-thinking content");
|
|
266
|
+
}
|
|
267
|
+
|
|
268
|
+
case "thinking_end": {
|
|
269
|
+
const content = partial.content[proxyEvent.contentIndex];
|
|
270
|
+
if (content?.type === "thinking") {
|
|
271
|
+
content.thinkingSignature = proxyEvent.contentSignature;
|
|
272
|
+
return {
|
|
273
|
+
type: "thinking_end",
|
|
274
|
+
contentIndex: proxyEvent.contentIndex,
|
|
275
|
+
content: content.thinking,
|
|
276
|
+
partial,
|
|
277
|
+
};
|
|
278
|
+
}
|
|
279
|
+
throw new Error("Received thinking_end for non-thinking content");
|
|
280
|
+
}
|
|
281
|
+
|
|
282
|
+
case "toolcall_start":
|
|
283
|
+
partial.content[proxyEvent.contentIndex] = {
|
|
284
|
+
type: "toolCall",
|
|
285
|
+
id: proxyEvent.id,
|
|
286
|
+
name: proxyEvent.toolName,
|
|
287
|
+
arguments: {},
|
|
288
|
+
partialJson: "",
|
|
289
|
+
} satisfies ToolCall & { partialJson: string } as ToolCall;
|
|
290
|
+
return { type: "toolcall_start", contentIndex: proxyEvent.contentIndex, partial };
|
|
291
|
+
|
|
292
|
+
case "toolcall_delta": {
|
|
293
|
+
const content = partial.content[proxyEvent.contentIndex];
|
|
294
|
+
if (content?.type === "toolCall") {
|
|
295
|
+
(content as any).partialJson += proxyEvent.delta;
|
|
296
|
+
content.arguments = parseStreamingJson((content as any).partialJson) || {};
|
|
297
|
+
partial.content[proxyEvent.contentIndex] = { ...content }; // Trigger reactivity
|
|
298
|
+
return {
|
|
299
|
+
type: "toolcall_delta",
|
|
300
|
+
contentIndex: proxyEvent.contentIndex,
|
|
301
|
+
delta: proxyEvent.delta,
|
|
302
|
+
partial,
|
|
303
|
+
};
|
|
304
|
+
}
|
|
305
|
+
throw new Error("Received toolcall_delta for non-toolCall content");
|
|
306
|
+
}
|
|
307
|
+
|
|
308
|
+
case "toolcall_end": {
|
|
309
|
+
const content = partial.content[proxyEvent.contentIndex];
|
|
310
|
+
if (content?.type === "toolCall") {
|
|
311
|
+
delete (content as any).partialJson;
|
|
312
|
+
return {
|
|
313
|
+
type: "toolcall_end",
|
|
314
|
+
contentIndex: proxyEvent.contentIndex,
|
|
315
|
+
toolCall: content,
|
|
316
|
+
partial,
|
|
317
|
+
};
|
|
318
|
+
}
|
|
319
|
+
return undefined;
|
|
320
|
+
}
|
|
321
|
+
|
|
322
|
+
case "done":
|
|
323
|
+
partial.stopReason = proxyEvent.reason;
|
|
324
|
+
partial.usage = proxyEvent.usage;
|
|
325
|
+
return { type: "done", reason: proxyEvent.reason, message: partial };
|
|
326
|
+
|
|
327
|
+
case "error":
|
|
328
|
+
partial.stopReason = proxyEvent.reason;
|
|
329
|
+
partial.errorMessage = proxyEvent.errorMessage;
|
|
330
|
+
partial.usage = proxyEvent.usage;
|
|
331
|
+
return { type: "error", reason: proxyEvent.reason, error: partial };
|
|
332
|
+
|
|
333
|
+
default: {
|
|
334
|
+
const _exhaustiveCheck: never = proxyEvent;
|
|
335
|
+
console.warn(`Unhandled proxy event type: ${(proxyEvent as any).type}`);
|
|
336
|
+
return undefined;
|
|
337
|
+
}
|
|
338
|
+
}
|
|
339
|
+
}
|
package/src/types.ts
ADDED
|
@@ -0,0 +1,210 @@
|
|
|
1
|
+
import type {
|
|
2
|
+
AssistantMessageEvent,
|
|
3
|
+
ImageContent,
|
|
4
|
+
Message,
|
|
5
|
+
Model,
|
|
6
|
+
SimpleStreamOptions,
|
|
7
|
+
streamSimple,
|
|
8
|
+
TextContent,
|
|
9
|
+
Tool,
|
|
10
|
+
ToolResultMessage,
|
|
11
|
+
} from "@oh-my-pi/pi-ai";
|
|
12
|
+
import type { Static, TSchema } from "@sinclair/typebox";
|
|
13
|
+
|
|
14
|
+
/** Stream function - can return sync or Promise for async config lookup */
|
|
15
|
+
export type StreamFn = (
|
|
16
|
+
...args: Parameters<typeof streamSimple>
|
|
17
|
+
) => ReturnType<typeof streamSimple> | Promise<ReturnType<typeof streamSimple>>;
|
|
18
|
+
|
|
19
|
+
/**
|
|
20
|
+
* Configuration for the agent loop.
|
|
21
|
+
*/
|
|
22
|
+
export interface AgentLoopConfig extends SimpleStreamOptions {
|
|
23
|
+
model: Model<any>;
|
|
24
|
+
|
|
25
|
+
/**
|
|
26
|
+
* Converts AgentMessage[] to LLM-compatible Message[] before each LLM call.
|
|
27
|
+
*
|
|
28
|
+
* Each AgentMessage must be converted to a UserMessage, AssistantMessage, or ToolResultMessage
|
|
29
|
+
* that the LLM can understand. AgentMessages that cannot be converted (e.g., UI-only notifications,
|
|
30
|
+
* status messages) should be filtered out.
|
|
31
|
+
*
|
|
32
|
+
* @example
|
|
33
|
+
* ```typescript
|
|
34
|
+
* convertToLlm: (messages) => messages.flatMap(m => {
|
|
35
|
+
* if (m.role === "hookMessage") {
|
|
36
|
+
* // Convert custom message to user message
|
|
37
|
+
* return [{ role: "user", content: m.content, timestamp: m.timestamp }];
|
|
38
|
+
* }
|
|
39
|
+
* if (m.role === "notification") {
|
|
40
|
+
* // Filter out UI-only messages
|
|
41
|
+
* return [];
|
|
42
|
+
* }
|
|
43
|
+
* // Pass through standard LLM messages
|
|
44
|
+
* return [m];
|
|
45
|
+
* })
|
|
46
|
+
* ```
|
|
47
|
+
*/
|
|
48
|
+
convertToLlm: (messages: AgentMessage[]) => Message[] | Promise<Message[]>;
|
|
49
|
+
|
|
50
|
+
/**
|
|
51
|
+
* Optional transform applied to the context before `convertToLlm`.
|
|
52
|
+
*
|
|
53
|
+
* Use this for operations that work at the AgentMessage level:
|
|
54
|
+
* - Context window management (pruning old messages)
|
|
55
|
+
* - Injecting context from external sources
|
|
56
|
+
*
|
|
57
|
+
* @example
|
|
58
|
+
* ```typescript
|
|
59
|
+
* transformContext: async (messages) => {
|
|
60
|
+
* if (estimateTokens(messages) > MAX_TOKENS) {
|
|
61
|
+
* return pruneOldMessages(messages);
|
|
62
|
+
* }
|
|
63
|
+
* return messages;
|
|
64
|
+
* }
|
|
65
|
+
* ```
|
|
66
|
+
*/
|
|
67
|
+
transformContext?: (messages: AgentMessage[], signal?: AbortSignal) => Promise<AgentMessage[]>;
|
|
68
|
+
|
|
69
|
+
/**
|
|
70
|
+
* Resolves an API key dynamically for each LLM call.
|
|
71
|
+
*
|
|
72
|
+
* Useful for short-lived OAuth tokens (e.g., GitHub Copilot) that may expire
|
|
73
|
+
* during long-running tool execution phases.
|
|
74
|
+
*/
|
|
75
|
+
getApiKey?: (provider: string) => Promise<string | undefined> | string | undefined;
|
|
76
|
+
|
|
77
|
+
/**
|
|
78
|
+
* Returns queued messages to inject into the conversation.
|
|
79
|
+
*
|
|
80
|
+
* Called after each turn to check for user interruptions or injected messages.
|
|
81
|
+
* If messages are returned, they're added to the context before the next LLM call.
|
|
82
|
+
*/
|
|
83
|
+
getQueuedMessages?: () => Promise<AgentMessage[]>;
|
|
84
|
+
|
|
85
|
+
/**
|
|
86
|
+
* Provides tool execution context, resolved per tool call.
|
|
87
|
+
* Use for late-bound UI or session state access.
|
|
88
|
+
*/
|
|
89
|
+
getToolContext?: () => AgentToolContext | undefined;
|
|
90
|
+
}
|
|
91
|
+
|
|
92
|
+
/**
|
|
93
|
+
* Thinking/reasoning level for models that support it.
|
|
94
|
+
* Note: "xhigh" is only supported by OpenAI gpt-5.1-codex-max, gpt-5.2, and gpt-5.2-codex models.
|
|
95
|
+
*/
|
|
96
|
+
export type ThinkingLevel = "off" | "minimal" | "low" | "medium" | "high" | "xhigh";
|
|
97
|
+
|
|
98
|
+
/**
|
|
99
|
+
* Extensible interface for custom app messages.
|
|
100
|
+
* Apps can extend via declaration merging:
|
|
101
|
+
*
|
|
102
|
+
* @example
|
|
103
|
+
* ```typescript
|
|
104
|
+
* declare module "@oh-my-pi/agent" {
|
|
105
|
+
* interface CustomAgentMessages {
|
|
106
|
+
* artifact: ArtifactMessage;
|
|
107
|
+
* notification: NotificationMessage;
|
|
108
|
+
* }
|
|
109
|
+
* }
|
|
110
|
+
* ```
|
|
111
|
+
*/
|
|
112
|
+
export interface CustomAgentMessages {
|
|
113
|
+
// Empty by default - apps extend via declaration merging
|
|
114
|
+
}
|
|
115
|
+
|
|
116
|
+
/**
|
|
117
|
+
* AgentMessage: Union of LLM messages + custom messages.
|
|
118
|
+
* This abstraction allows apps to add custom message types while maintaining
|
|
119
|
+
* type safety and compatibility with the base LLM messages.
|
|
120
|
+
*/
|
|
121
|
+
export type AgentMessage = Message | CustomAgentMessages[keyof CustomAgentMessages];
|
|
122
|
+
|
|
123
|
+
/**
|
|
124
|
+
* Agent state containing all configuration and conversation data.
|
|
125
|
+
*/
|
|
126
|
+
export interface AgentState {
|
|
127
|
+
systemPrompt: string;
|
|
128
|
+
model: Model<any>;
|
|
129
|
+
thinkingLevel: ThinkingLevel;
|
|
130
|
+
tools: AgentTool<any>[];
|
|
131
|
+
messages: AgentMessage[]; // Can include attachments + custom message types
|
|
132
|
+
isStreaming: boolean;
|
|
133
|
+
streamMessage: AgentMessage | null;
|
|
134
|
+
pendingToolCalls: Set<string>;
|
|
135
|
+
error?: string;
|
|
136
|
+
}
|
|
137
|
+
|
|
138
|
+
export interface AgentToolResult<T> {
|
|
139
|
+
// Content blocks supporting text and images
|
|
140
|
+
content: (TextContent | ImageContent)[];
|
|
141
|
+
// Details to be displayed in a UI or logged
|
|
142
|
+
details: T;
|
|
143
|
+
}
|
|
144
|
+
|
|
145
|
+
// Callback for streaming tool execution updates
|
|
146
|
+
export type AgentToolUpdateCallback<T = any> = (partialResult: AgentToolResult<T>) => void;
|
|
147
|
+
|
|
148
|
+
/** Options passed to renderResult */
|
|
149
|
+
export interface RenderResultOptions {
|
|
150
|
+
/** Whether the result view is expanded */
|
|
151
|
+
expanded: boolean;
|
|
152
|
+
/** Whether this is a partial/streaming result */
|
|
153
|
+
isPartial: boolean;
|
|
154
|
+
}
|
|
155
|
+
|
|
156
|
+
/**
|
|
157
|
+
* Context passed to tool execution.
|
|
158
|
+
* Apps can extend via declaration merging.
|
|
159
|
+
*/
|
|
160
|
+
export interface AgentToolContext {
|
|
161
|
+
// Empty by default - apps extend via declaration merging
|
|
162
|
+
}
|
|
163
|
+
|
|
164
|
+
// AgentTool extends Tool but adds the execute function
|
|
165
|
+
export interface AgentTool<TParameters extends TSchema = TSchema, TDetails = any, TTheme = unknown>
|
|
166
|
+
extends Tool<TParameters> {
|
|
167
|
+
// A human-readable label for the tool to be displayed in UI
|
|
168
|
+
label: string;
|
|
169
|
+
execute: (
|
|
170
|
+
toolCallId: string,
|
|
171
|
+
params: Static<TParameters>,
|
|
172
|
+
signal?: AbortSignal,
|
|
173
|
+
onUpdate?: AgentToolUpdateCallback<TDetails>,
|
|
174
|
+
context?: AgentToolContext,
|
|
175
|
+
) => Promise<AgentToolResult<TDetails>>;
|
|
176
|
+
|
|
177
|
+
/** Optional custom rendering for tool call display (returns UI component) */
|
|
178
|
+
renderCall?: (args: Static<TParameters>, theme: TTheme) => unknown;
|
|
179
|
+
|
|
180
|
+
/** Optional custom rendering for tool result display (returns UI component) */
|
|
181
|
+
renderResult?: (result: AgentToolResult<TDetails>, options: RenderResultOptions, theme: TTheme) => unknown;
|
|
182
|
+
}
|
|
183
|
+
|
|
184
|
+
// AgentContext is like Context but uses AgentTool
|
|
185
|
+
export interface AgentContext {
|
|
186
|
+
systemPrompt: string;
|
|
187
|
+
messages: AgentMessage[];
|
|
188
|
+
tools?: AgentTool<any>[];
|
|
189
|
+
}
|
|
190
|
+
|
|
191
|
+
/**
|
|
192
|
+
* Events emitted by the Agent for UI updates.
|
|
193
|
+
* These events provide fine-grained lifecycle information for messages, turns, and tool executions.
|
|
194
|
+
*/
|
|
195
|
+
export type AgentEvent =
|
|
196
|
+
// Agent lifecycle
|
|
197
|
+
| { type: "agent_start" }
|
|
198
|
+
| { type: "agent_end"; messages: AgentMessage[] }
|
|
199
|
+
// Turn lifecycle - a turn is one assistant response + any tool calls/results
|
|
200
|
+
| { type: "turn_start" }
|
|
201
|
+
| { type: "turn_end"; message: AgentMessage; toolResults: ToolResultMessage[] }
|
|
202
|
+
// Message lifecycle - emitted for user, assistant, and toolResult messages
|
|
203
|
+
| { type: "message_start"; message: AgentMessage }
|
|
204
|
+
// Only emitted for assistant messages during streaming
|
|
205
|
+
| { type: "message_update"; message: AgentMessage; assistantMessageEvent: AssistantMessageEvent }
|
|
206
|
+
| { type: "message_end"; message: AgentMessage }
|
|
207
|
+
// Tool execution lifecycle
|
|
208
|
+
| { type: "tool_execution_start"; toolCallId: string; toolName: string; args: any }
|
|
209
|
+
| { type: "tool_execution_update"; toolCallId: string; toolName: string; args: any; partialResult: any }
|
|
210
|
+
| { type: "tool_execution_end"; toolCallId: string; toolName: string; result: any; isError: boolean };
|