@f5xc-salesdemos/pi-agent-core 14.0.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +319 -0
- package/README.md +375 -0
- package/package.json +63 -0
- package/src/agent-loop.ts +690 -0
- package/src/agent.ts +1019 -0
- package/src/index.ts +10 -0
- package/src/proxy.ts +322 -0
- package/src/thinking.ts +19 -0
- package/src/types.ts +292 -0
package/src/index.ts
ADDED
package/src/proxy.ts
ADDED
|
@@ -0,0 +1,322 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Proxy stream function for apps that route LLM calls through a server.
|
|
3
|
+
* The server manages auth and proxies requests to LLM providers.
|
|
4
|
+
*/
|
|
5
|
+
import {
|
|
6
|
+
type AssistantMessage,
|
|
7
|
+
type AssistantMessageEvent,
|
|
8
|
+
type Context,
|
|
9
|
+
EventStream,
|
|
10
|
+
type Model,
|
|
11
|
+
type SimpleStreamOptions,
|
|
12
|
+
type StopReason,
|
|
13
|
+
type ToolCall,
|
|
14
|
+
} from "@f5xc-salesdemos/pi-ai";
|
|
15
|
+
import { parseStreamingJson } from "@f5xc-salesdemos/pi-ai/utils/json-parse";
|
|
16
|
+
import { readSseJson } from "@f5xc-salesdemos/pi-utils";
|
|
17
|
+
|
|
18
|
+
// Create stream class matching ProxyMessageEventStream
|
|
19
|
+
class ProxyMessageEventStream extends EventStream<AssistantMessageEvent, AssistantMessage> {
|
|
20
|
+
constructor() {
|
|
21
|
+
super(
|
|
22
|
+
event => event.type === "done" || event.type === "error",
|
|
23
|
+
event => {
|
|
24
|
+
if (event.type === "done") return event.message;
|
|
25
|
+
if (event.type === "error") return event.error;
|
|
26
|
+
throw new Error("Unexpected event type");
|
|
27
|
+
},
|
|
28
|
+
);
|
|
29
|
+
}
|
|
30
|
+
}
|
|
31
|
+
|
|
32
|
+
/**
|
|
33
|
+
* Proxy event types - server sends these with partial field stripped to reduce bandwidth.
|
|
34
|
+
*/
|
|
35
|
+
export type ProxyAssistantMessageEvent =
|
|
36
|
+
| { type: "start" }
|
|
37
|
+
| { type: "text_start"; contentIndex: number }
|
|
38
|
+
| { type: "text_delta"; contentIndex: number; delta: string }
|
|
39
|
+
| { type: "text_end"; contentIndex: number; contentSignature?: string }
|
|
40
|
+
| { type: "thinking_start"; contentIndex: number }
|
|
41
|
+
| { type: "thinking_delta"; contentIndex: number; delta: string }
|
|
42
|
+
| { type: "thinking_end"; contentIndex: number; contentSignature?: string }
|
|
43
|
+
| { type: "toolcall_start"; contentIndex: number; id: string; toolName: string }
|
|
44
|
+
| { type: "toolcall_delta"; contentIndex: number; delta: string }
|
|
45
|
+
| { type: "toolcall_end"; contentIndex: number }
|
|
46
|
+
| {
|
|
47
|
+
type: "done";
|
|
48
|
+
reason: Extract<StopReason, "stop" | "length" | "toolUse">;
|
|
49
|
+
usage: AssistantMessage["usage"];
|
|
50
|
+
}
|
|
51
|
+
| {
|
|
52
|
+
type: "error";
|
|
53
|
+
reason: Extract<StopReason, "aborted" | "error">;
|
|
54
|
+
errorMessage?: string;
|
|
55
|
+
usage: AssistantMessage["usage"];
|
|
56
|
+
};
|
|
57
|
+
|
|
58
|
+
export interface ProxyStreamOptions extends SimpleStreamOptions {
|
|
59
|
+
/** Auth token for the proxy server */
|
|
60
|
+
authToken: string;
|
|
61
|
+
/** Proxy server URL (e.g., "https://genai.example.com") */
|
|
62
|
+
proxyUrl: string;
|
|
63
|
+
}
|
|
64
|
+
|
|
65
|
+
/**
|
|
66
|
+
* Stream function that proxies through a server instead of calling LLM providers directly.
|
|
67
|
+
* The server strips the partial field from delta events to reduce bandwidth.
|
|
68
|
+
* We reconstruct the partial message client-side.
|
|
69
|
+
*
|
|
70
|
+
* Use this as the `streamFn` option when creating an Agent that needs to go through a proxy.
|
|
71
|
+
*
|
|
72
|
+
* @example
|
|
73
|
+
* ```typescript
|
|
74
|
+
* const agent = new Agent({
|
|
75
|
+
* streamFn: (model, context, options) =>
|
|
76
|
+
* streamProxy(model, context, {
|
|
77
|
+
* ...options,
|
|
78
|
+
* authToken: await getAuthToken(),
|
|
79
|
+
* proxyUrl: "https://genai.example.com",
|
|
80
|
+
* }),
|
|
81
|
+
* });
|
|
82
|
+
* ```
|
|
83
|
+
*/
|
|
84
|
+
export function streamProxy(model: Model, context: Context, options: ProxyStreamOptions): ProxyMessageEventStream {
|
|
85
|
+
const stream = new ProxyMessageEventStream();
|
|
86
|
+
|
|
87
|
+
(async () => {
|
|
88
|
+
// Initialize the partial message that we'll build up from events
|
|
89
|
+
const partial: AssistantMessage = {
|
|
90
|
+
role: "assistant",
|
|
91
|
+
stopReason: "stop",
|
|
92
|
+
content: [],
|
|
93
|
+
api: model.api,
|
|
94
|
+
provider: model.provider,
|
|
95
|
+
model: model.id,
|
|
96
|
+
usage: {
|
|
97
|
+
input: 0,
|
|
98
|
+
output: 0,
|
|
99
|
+
cacheRead: 0,
|
|
100
|
+
cacheWrite: 0,
|
|
101
|
+
totalTokens: 0,
|
|
102
|
+
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0, total: 0 },
|
|
103
|
+
},
|
|
104
|
+
timestamp: Date.now(),
|
|
105
|
+
};
|
|
106
|
+
|
|
107
|
+
let response: Response | null = null;
|
|
108
|
+
const abortHandler = () => {
|
|
109
|
+
const body = response?.body;
|
|
110
|
+
if (body) {
|
|
111
|
+
body.cancel("Request aborted by user").catch(() => {});
|
|
112
|
+
}
|
|
113
|
+
};
|
|
114
|
+
if (options.signal) {
|
|
115
|
+
options.signal.addEventListener("abort", abortHandler, { once: true });
|
|
116
|
+
}
|
|
117
|
+
|
|
118
|
+
try {
|
|
119
|
+
response = await fetch(`${options.proxyUrl}/api/stream`, {
|
|
120
|
+
method: "POST",
|
|
121
|
+
headers: {
|
|
122
|
+
Authorization: `Bearer ${options.authToken}`,
|
|
123
|
+
"Content-Type": "application/json",
|
|
124
|
+
},
|
|
125
|
+
body: JSON.stringify({
|
|
126
|
+
model,
|
|
127
|
+
context,
|
|
128
|
+
options: {
|
|
129
|
+
temperature: options.temperature,
|
|
130
|
+
topP: options.topP,
|
|
131
|
+
topK: options.topK,
|
|
132
|
+
minP: options.minP,
|
|
133
|
+
presencePenalty: options.presencePenalty,
|
|
134
|
+
repetitionPenalty: options.repetitionPenalty,
|
|
135
|
+
maxTokens: options.maxTokens,
|
|
136
|
+
reasoning: options.reasoning,
|
|
137
|
+
},
|
|
138
|
+
}),
|
|
139
|
+
signal: options.signal,
|
|
140
|
+
});
|
|
141
|
+
|
|
142
|
+
if (!response.ok) {
|
|
143
|
+
let errorMessage = `Proxy error: ${response.status} ${response.statusText}`;
|
|
144
|
+
try {
|
|
145
|
+
const errorData = (await response.json()) as { error?: string };
|
|
146
|
+
if (errorData.error) {
|
|
147
|
+
errorMessage = `Proxy error: ${errorData.error}`;
|
|
148
|
+
}
|
|
149
|
+
} catch {
|
|
150
|
+
// Couldn't parse error response
|
|
151
|
+
}
|
|
152
|
+
throw new Error(errorMessage);
|
|
153
|
+
}
|
|
154
|
+
|
|
155
|
+
let sawTerminalEvent = false;
|
|
156
|
+
for await (const event of readSseJson<ProxyAssistantMessageEvent>(
|
|
157
|
+
response.body as ReadableStream<Uint8Array>,
|
|
158
|
+
options.signal,
|
|
159
|
+
)) {
|
|
160
|
+
const parsedEvent = processProxyEvent(event, partial);
|
|
161
|
+
if (parsedEvent) {
|
|
162
|
+
if (parsedEvent.type === "done" || parsedEvent.type === "error") {
|
|
163
|
+
sawTerminalEvent = true;
|
|
164
|
+
}
|
|
165
|
+
stream.push(parsedEvent);
|
|
166
|
+
}
|
|
167
|
+
}
|
|
168
|
+
|
|
169
|
+
if (options.signal?.aborted && !sawTerminalEvent) {
|
|
170
|
+
const reason = options.signal.reason;
|
|
171
|
+
throw reason instanceof Error ? reason : new Error(String(reason ?? "Request aborted"));
|
|
172
|
+
}
|
|
173
|
+
|
|
174
|
+
stream.end();
|
|
175
|
+
} catch (error) {
|
|
176
|
+
const errorMessage = error instanceof Error ? error.message : String(error);
|
|
177
|
+
const reason = options.signal?.aborted ? "aborted" : "error";
|
|
178
|
+
partial.stopReason = reason;
|
|
179
|
+
partial.errorMessage = errorMessage;
|
|
180
|
+
stream.push({
|
|
181
|
+
type: "error",
|
|
182
|
+
reason,
|
|
183
|
+
error: partial,
|
|
184
|
+
});
|
|
185
|
+
stream.end();
|
|
186
|
+
} finally {
|
|
187
|
+
if (options.signal) {
|
|
188
|
+
options.signal.removeEventListener("abort", abortHandler);
|
|
189
|
+
}
|
|
190
|
+
}
|
|
191
|
+
})();
|
|
192
|
+
|
|
193
|
+
return stream;
|
|
194
|
+
}
|
|
195
|
+
|
|
196
|
+
/**
|
|
197
|
+
* Process a proxy event and update the partial message.
|
|
198
|
+
*/
|
|
199
|
+
function processProxyEvent(
|
|
200
|
+
proxyEvent: ProxyAssistantMessageEvent,
|
|
201
|
+
partial: AssistantMessage,
|
|
202
|
+
): AssistantMessageEvent | undefined {
|
|
203
|
+
switch (proxyEvent.type) {
|
|
204
|
+
case "start":
|
|
205
|
+
return { type: "start", partial };
|
|
206
|
+
|
|
207
|
+
case "text_start":
|
|
208
|
+
partial.content[proxyEvent.contentIndex] = { type: "text", text: "" };
|
|
209
|
+
return { type: "text_start", contentIndex: proxyEvent.contentIndex, partial };
|
|
210
|
+
|
|
211
|
+
case "text_delta": {
|
|
212
|
+
const content = partial.content[proxyEvent.contentIndex];
|
|
213
|
+
if (content?.type === "text") {
|
|
214
|
+
content.text += proxyEvent.delta;
|
|
215
|
+
return {
|
|
216
|
+
type: "text_delta",
|
|
217
|
+
contentIndex: proxyEvent.contentIndex,
|
|
218
|
+
delta: proxyEvent.delta,
|
|
219
|
+
partial,
|
|
220
|
+
};
|
|
221
|
+
}
|
|
222
|
+
throw new Error("Received text_delta for non-text content");
|
|
223
|
+
}
|
|
224
|
+
|
|
225
|
+
case "text_end": {
|
|
226
|
+
const content = partial.content[proxyEvent.contentIndex];
|
|
227
|
+
if (content?.type === "text") {
|
|
228
|
+
content.textSignature = proxyEvent.contentSignature;
|
|
229
|
+
return {
|
|
230
|
+
type: "text_end",
|
|
231
|
+
contentIndex: proxyEvent.contentIndex,
|
|
232
|
+
content: content.text,
|
|
233
|
+
partial,
|
|
234
|
+
};
|
|
235
|
+
}
|
|
236
|
+
throw new Error("Received text_end for non-text content");
|
|
237
|
+
}
|
|
238
|
+
|
|
239
|
+
case "thinking_start":
|
|
240
|
+
partial.content[proxyEvent.contentIndex] = { type: "thinking", thinking: "" };
|
|
241
|
+
return { type: "thinking_start", contentIndex: proxyEvent.contentIndex, partial };
|
|
242
|
+
|
|
243
|
+
case "thinking_delta": {
|
|
244
|
+
const content = partial.content[proxyEvent.contentIndex];
|
|
245
|
+
if (content?.type === "thinking") {
|
|
246
|
+
content.thinking += proxyEvent.delta;
|
|
247
|
+
return {
|
|
248
|
+
type: "thinking_delta",
|
|
249
|
+
contentIndex: proxyEvent.contentIndex,
|
|
250
|
+
delta: proxyEvent.delta,
|
|
251
|
+
partial,
|
|
252
|
+
};
|
|
253
|
+
}
|
|
254
|
+
throw new Error("Received thinking_delta for non-thinking content");
|
|
255
|
+
}
|
|
256
|
+
|
|
257
|
+
case "thinking_end": {
|
|
258
|
+
const content = partial.content[proxyEvent.contentIndex];
|
|
259
|
+
if (content?.type === "thinking") {
|
|
260
|
+
content.thinkingSignature = proxyEvent.contentSignature;
|
|
261
|
+
return {
|
|
262
|
+
type: "thinking_end",
|
|
263
|
+
contentIndex: proxyEvent.contentIndex,
|
|
264
|
+
content: content.thinking,
|
|
265
|
+
partial,
|
|
266
|
+
};
|
|
267
|
+
}
|
|
268
|
+
throw new Error("Received thinking_end for non-thinking content");
|
|
269
|
+
}
|
|
270
|
+
|
|
271
|
+
case "toolcall_start":
|
|
272
|
+
partial.content[proxyEvent.contentIndex] = {
|
|
273
|
+
type: "toolCall",
|
|
274
|
+
id: proxyEvent.id,
|
|
275
|
+
name: proxyEvent.toolName,
|
|
276
|
+
arguments: {},
|
|
277
|
+
partialJson: "",
|
|
278
|
+
} satisfies ToolCall & { partialJson: string } as ToolCall;
|
|
279
|
+
return { type: "toolcall_start", contentIndex: proxyEvent.contentIndex, partial };
|
|
280
|
+
|
|
281
|
+
case "toolcall_delta": {
|
|
282
|
+
const content = partial.content[proxyEvent.contentIndex];
|
|
283
|
+
if (content?.type === "toolCall") {
|
|
284
|
+
(content as any).partialJson += proxyEvent.delta;
|
|
285
|
+
content.arguments = parseStreamingJson((content as any).partialJson) || {};
|
|
286
|
+
partial.content[proxyEvent.contentIndex] = { ...content }; // Trigger reactivity
|
|
287
|
+
return {
|
|
288
|
+
type: "toolcall_delta",
|
|
289
|
+
contentIndex: proxyEvent.contentIndex,
|
|
290
|
+
delta: proxyEvent.delta,
|
|
291
|
+
partial,
|
|
292
|
+
};
|
|
293
|
+
}
|
|
294
|
+
throw new Error("Received toolcall_delta for non-toolCall content");
|
|
295
|
+
}
|
|
296
|
+
|
|
297
|
+
case "toolcall_end": {
|
|
298
|
+
const content = partial.content[proxyEvent.contentIndex];
|
|
299
|
+
if (content?.type === "toolCall") {
|
|
300
|
+
delete (content as any).partialJson;
|
|
301
|
+
return {
|
|
302
|
+
type: "toolcall_end",
|
|
303
|
+
contentIndex: proxyEvent.contentIndex,
|
|
304
|
+
toolCall: content,
|
|
305
|
+
partial,
|
|
306
|
+
};
|
|
307
|
+
}
|
|
308
|
+
return undefined;
|
|
309
|
+
}
|
|
310
|
+
|
|
311
|
+
case "done":
|
|
312
|
+
partial.stopReason = proxyEvent.reason;
|
|
313
|
+
partial.usage = proxyEvent.usage;
|
|
314
|
+
return { type: "done", reason: proxyEvent.reason, message: partial };
|
|
315
|
+
|
|
316
|
+
case "error":
|
|
317
|
+
partial.stopReason = proxyEvent.reason;
|
|
318
|
+
partial.errorMessage = proxyEvent.errorMessage;
|
|
319
|
+
partial.usage = proxyEvent.usage;
|
|
320
|
+
return { type: "error", reason: proxyEvent.reason, error: partial };
|
|
321
|
+
}
|
|
322
|
+
}
|
package/src/thinking.ts
ADDED
|
@@ -0,0 +1,19 @@
|
|
|
1
|
+
import { Effort } from "@f5xc-salesdemos/pi-ai";
|
|
2
|
+
|
|
3
|
+
/**
|
|
4
|
+
* Agent-local thinking selector.
|
|
5
|
+
*
|
|
6
|
+
* `off` disables reasoning, while `inherit` defers to a higher-level selector.
|
|
7
|
+
*/
|
|
8
|
+
export const ThinkingLevel = {
|
|
9
|
+
Inherit: "inherit",
|
|
10
|
+
Off: "off",
|
|
11
|
+
Minimal: Effort.Minimal,
|
|
12
|
+
Low: Effort.Low,
|
|
13
|
+
Medium: Effort.Medium,
|
|
14
|
+
High: Effort.High,
|
|
15
|
+
XHigh: Effort.XHigh,
|
|
16
|
+
} as const;
|
|
17
|
+
|
|
18
|
+
export type ThinkingLevel = (typeof ThinkingLevel)[keyof typeof ThinkingLevel];
|
|
19
|
+
export type ResolvedThinkingLevel = Exclude<ThinkingLevel, "inherit">;
|
package/src/types.ts
ADDED
|
@@ -0,0 +1,292 @@
|
|
|
1
|
+
import type {
|
|
2
|
+
AssistantMessage,
|
|
3
|
+
AssistantMessageEvent,
|
|
4
|
+
AssistantMessageEventStream,
|
|
5
|
+
Effort,
|
|
6
|
+
ImageContent,
|
|
7
|
+
Message,
|
|
8
|
+
Model,
|
|
9
|
+
SimpleStreamOptions,
|
|
10
|
+
streamSimple,
|
|
11
|
+
TextContent,
|
|
12
|
+
Tool,
|
|
13
|
+
ToolChoice,
|
|
14
|
+
ToolResultMessage,
|
|
15
|
+
} from "@f5xc-salesdemos/pi-ai";
|
|
16
|
+
import type { Static, TSchema } from "@sinclair/typebox";
|
|
17
|
+
|
|
18
|
+
/** Stream function - can return sync or Promise for async config lookup */
|
|
19
|
+
export type StreamFn = (
|
|
20
|
+
...args: Parameters<typeof streamSimple>
|
|
21
|
+
) => AssistantMessageEventStream | Promise<AssistantMessageEventStream>;
|
|
22
|
+
|
|
23
|
+
/**
|
|
24
|
+
* Configuration for the agent loop.
|
|
25
|
+
*/
|
|
26
|
+
export interface AgentLoopConfig extends SimpleStreamOptions {
|
|
27
|
+
model: Model;
|
|
28
|
+
|
|
29
|
+
/**
|
|
30
|
+
* When to interrupt tool execution for steering messages.
|
|
31
|
+
* - "immediate" = check after each tool call (default)
|
|
32
|
+
* - "wait" = defer steering until the current turn completes
|
|
33
|
+
*/
|
|
34
|
+
interruptMode?: "immediate" | "wait";
|
|
35
|
+
|
|
36
|
+
/**
|
|
37
|
+
* Optional session identifier forwarded to LLM providers.
|
|
38
|
+
* Used by providers that support session-based caching (e.g., OpenAI Codex).
|
|
39
|
+
*/
|
|
40
|
+
sessionId?: string;
|
|
41
|
+
|
|
42
|
+
/**
|
|
43
|
+
* Converts AgentMessage[] to LLM-compatible Message[] before each LLM call.
|
|
44
|
+
*
|
|
45
|
+
* Each AgentMessage must be converted to a UserMessage, AssistantMessage, or ToolResultMessage
|
|
46
|
+
* that the LLM can understand. AgentMessages that cannot be converted (e.g., UI-only notifications,
|
|
47
|
+
* status messages) should be filtered out.
|
|
48
|
+
*
|
|
49
|
+
* @example
|
|
50
|
+
* ```typescript
|
|
51
|
+
* convertToLlm: (messages) => messages.flatMap(m => {
|
|
52
|
+
* if (m.role === "custom") {
|
|
53
|
+
* // Convert custom message to user message
|
|
54
|
+
* return [{ role: "user", content: m.content, timestamp: m.timestamp }];
|
|
55
|
+
* }
|
|
56
|
+
* if (m.role === "notification") {
|
|
57
|
+
* // Filter out UI-only messages
|
|
58
|
+
* return [];
|
|
59
|
+
* }
|
|
60
|
+
* // Pass through standard LLM messages
|
|
61
|
+
* return [m];
|
|
62
|
+
* })
|
|
63
|
+
* ```
|
|
64
|
+
*/
|
|
65
|
+
convertToLlm: (messages: AgentMessage[]) => Message[] | Promise<Message[]>;
|
|
66
|
+
|
|
67
|
+
/**
|
|
68
|
+
* Optional transform applied to the context before `convertToLlm`.
|
|
69
|
+
*
|
|
70
|
+
* Use this for operations that work at the AgentMessage level:
|
|
71
|
+
* - Context window management (pruning old messages)
|
|
72
|
+
* - Injecting context from external sources
|
|
73
|
+
*
|
|
74
|
+
* @example
|
|
75
|
+
* ```typescript
|
|
76
|
+
* transformContext: async (messages) => {
|
|
77
|
+
* if (estimateTokens(messages) > MAX_TOKENS) {
|
|
78
|
+
* return pruneOldMessages(messages);
|
|
79
|
+
* }
|
|
80
|
+
* return messages;
|
|
81
|
+
* }
|
|
82
|
+
* ```
|
|
83
|
+
*/
|
|
84
|
+
transformContext?: (messages: AgentMessage[], signal?: AbortSignal) => Promise<AgentMessage[]>;
|
|
85
|
+
|
|
86
|
+
/**
|
|
87
|
+
* Resolves an API key dynamically for each LLM call.
|
|
88
|
+
*
|
|
89
|
+
* Useful for short-lived OAuth tokens (e.g., GitHub Copilot) that may expire
|
|
90
|
+
* during long-running tool execution phases.
|
|
91
|
+
*/
|
|
92
|
+
getApiKey?: (provider: string) => Promise<string | undefined> | string | undefined;
|
|
93
|
+
|
|
94
|
+
/**
|
|
95
|
+
* Returns steering messages to inject into the conversation mid-run.
|
|
96
|
+
*
|
|
97
|
+
* Called after each tool execution to check for user interruptions unless interruptMode is "wait".
|
|
98
|
+
* If messages are returned, remaining tool calls are skipped and
|
|
99
|
+
* these messages are added to the context before the next LLM call.
|
|
100
|
+
*/
|
|
101
|
+
getSteeringMessages?: () => Promise<AgentMessage[]>;
|
|
102
|
+
|
|
103
|
+
/**
|
|
104
|
+
* Returns follow-up messages to process after the agent would otherwise stop.
|
|
105
|
+
*
|
|
106
|
+
* Called when the agent has no more tool calls and no steering messages.
|
|
107
|
+
* If messages are returned, they're added to the context and the agent
|
|
108
|
+
* continues with another turn.
|
|
109
|
+
*/
|
|
110
|
+
getFollowUpMessages?: () => Promise<AgentMessage[]>;
|
|
111
|
+
|
|
112
|
+
/**
|
|
113
|
+
* Provides tool execution context, resolved per tool call.
|
|
114
|
+
* Use for late-bound UI or session state access.
|
|
115
|
+
*/
|
|
116
|
+
getToolContext?: (toolCall?: ToolCallContext) => AgentToolContext | undefined;
|
|
117
|
+
|
|
118
|
+
/**
|
|
119
|
+
* Refreshes prompt/tool context from live session state before each model call.
|
|
120
|
+
* Use this when tool availability or the system prompt can change mid-turn.
|
|
121
|
+
*/
|
|
122
|
+
syncContextBeforeModelCall?: (context: AgentContext) => void | Promise<void>;
|
|
123
|
+
|
|
124
|
+
/**
|
|
125
|
+
* Optional transform applied to tool call arguments before execution.
|
|
126
|
+
* Use for deobfuscating secrets or rewriting arguments.
|
|
127
|
+
*/
|
|
128
|
+
transformToolCallArguments?: (args: Record<string, unknown>, toolName: string) => Record<string, unknown>;
|
|
129
|
+
|
|
130
|
+
/**
|
|
131
|
+
* Enable intent tracing for tool calls.
|
|
132
|
+
* When enabled, the harness injects an `_i: string` field into tool schemas sent to the model,
|
|
133
|
+
* then strips `_i` from arguments before executing tools.
|
|
134
|
+
*/
|
|
135
|
+
intentTracing?: boolean;
|
|
136
|
+
|
|
137
|
+
/**
|
|
138
|
+
* Inspect assistant streaming events before they are published to the outer agent event stream.
|
|
139
|
+
* Callers may abort synchronously to stop consuming buffered provider events.
|
|
140
|
+
*/
|
|
141
|
+
onAssistantMessageEvent?: (message: AssistantMessage, event: AssistantMessageEvent) => void;
|
|
142
|
+
|
|
143
|
+
/**
|
|
144
|
+
* Dynamic tool choice override, resolved per LLM call.
|
|
145
|
+
* When set and returns a value, overrides the static `toolChoice`.
|
|
146
|
+
*/
|
|
147
|
+
getToolChoice?: () => ToolChoice | undefined;
|
|
148
|
+
}
|
|
149
|
+
|
|
150
|
+
export interface ToolCallContext {
|
|
151
|
+
batchId: string;
|
|
152
|
+
index: number;
|
|
153
|
+
total: number;
|
|
154
|
+
toolCalls: Array<{ id: string; name: string }>;
|
|
155
|
+
}
|
|
156
|
+
|
|
157
|
+
/**
|
|
158
|
+
* Extensible interface for custom app messages.
|
|
159
|
+
* Apps can extend via declaration merging:
|
|
160
|
+
*
|
|
161
|
+
* @example
|
|
162
|
+
* ```typescript
|
|
163
|
+
* declare module "@f5xc-salesdemos/agent" {
|
|
164
|
+
* interface CustomAgentMessages {
|
|
165
|
+
* artifact: ArtifactMessage;
|
|
166
|
+
* notification: NotificationMessage;
|
|
167
|
+
* }
|
|
168
|
+
* }
|
|
169
|
+
* ```
|
|
170
|
+
*/
|
|
171
|
+
export interface CustomAgentMessages {
|
|
172
|
+
// Empty by default - apps extend via declaration merging
|
|
173
|
+
}
|
|
174
|
+
|
|
175
|
+
/**
|
|
176
|
+
* AgentMessage: Union of LLM messages + custom messages.
|
|
177
|
+
* This abstraction allows apps to add custom message types while maintaining
|
|
178
|
+
* type safety and compatibility with the base LLM messages.
|
|
179
|
+
*/
|
|
180
|
+
export type AgentMessage = Message | CustomAgentMessages[keyof CustomAgentMessages];
|
|
181
|
+
|
|
182
|
+
/**
|
|
183
|
+
* Agent state containing all configuration and conversation data.
|
|
184
|
+
*/
|
|
185
|
+
export interface AgentState {
|
|
186
|
+
systemPrompt: string;
|
|
187
|
+
model: Model;
|
|
188
|
+
thinkingLevel?: Effort;
|
|
189
|
+
tools: AgentTool<any>[];
|
|
190
|
+
messages: AgentMessage[]; // Can include attachments + custom message types
|
|
191
|
+
isStreaming: boolean;
|
|
192
|
+
streamMessage: AgentMessage | null;
|
|
193
|
+
pendingToolCalls: Set<string>;
|
|
194
|
+
error?: string;
|
|
195
|
+
}
|
|
196
|
+
|
|
197
|
+
export interface AgentToolResult<T = any, _TInput = unknown> {
|
|
198
|
+
// Content blocks supporting text and images
|
|
199
|
+
content: (TextContent | ImageContent)[];
|
|
200
|
+
// Details to be displayed in a UI or logged
|
|
201
|
+
details?: T;
|
|
202
|
+
}
|
|
203
|
+
|
|
204
|
+
// Callback for streaming tool execution updates
|
|
205
|
+
export type AgentToolUpdateCallback<T = any, TInput = unknown> = (partialResult: AgentToolResult<T, TInput>) => void;
|
|
206
|
+
|
|
207
|
+
/** Options passed to renderResult */
|
|
208
|
+
export interface RenderResultOptions {
|
|
209
|
+
/** Whether the result view is expanded */
|
|
210
|
+
expanded: boolean;
|
|
211
|
+
/** Whether this is a partial/streaming result */
|
|
212
|
+
isPartial: boolean;
|
|
213
|
+
/** Current spinner frame index for animated elements (optional) */
|
|
214
|
+
spinnerFrame?: number;
|
|
215
|
+
}
|
|
216
|
+
|
|
217
|
+
/**
|
|
218
|
+
* Context passed to tool execution.
|
|
219
|
+
* Apps can extend via declaration merging.
|
|
220
|
+
*/
|
|
221
|
+
export interface AgentToolContext {
|
|
222
|
+
// Empty by default - apps extend via declaration merging
|
|
223
|
+
}
|
|
224
|
+
|
|
225
|
+
export type AgentToolExecFn<TParameters extends TSchema = TSchema, TDetails = any, TTheme = unknown> = (
|
|
226
|
+
this: AgentTool<TParameters, TDetails, TTheme>,
|
|
227
|
+
toolCallId: string,
|
|
228
|
+
params: Static<TParameters>,
|
|
229
|
+
signal?: AbortSignal,
|
|
230
|
+
onUpdate?: AgentToolUpdateCallback<TDetails, TParameters>,
|
|
231
|
+
context?: AgentToolContext,
|
|
232
|
+
) => Promise<AgentToolResult<TDetails, TParameters>>;
|
|
233
|
+
|
|
234
|
+
// AgentTool extends Tool but adds the execute function
|
|
235
|
+
export interface AgentTool<TParameters extends TSchema = TSchema, TDetails = any, TTheme = unknown>
|
|
236
|
+
extends Tool<TParameters> {
|
|
237
|
+
// A human-readable label for the tool to be displayed in UI
|
|
238
|
+
label: string;
|
|
239
|
+
/** If true, tool is excluded unless explicitly listed in --tools or agent's tools field */
|
|
240
|
+
hidden?: boolean;
|
|
241
|
+
/** If true, tool can stage a pending action that requires explicit resolution via the resolve tool. */
|
|
242
|
+
deferrable?: boolean;
|
|
243
|
+
/** If true, tool execution ignores abort signals (runs to completion) */
|
|
244
|
+
nonAbortable?: boolean;
|
|
245
|
+
/**
|
|
246
|
+
* Concurrency mode for tool scheduling when multiple calls are in one turn.
|
|
247
|
+
* - "shared": can run alongside other shared tools (default)
|
|
248
|
+
* - "exclusive": runs alone; other tools wait until it finishes
|
|
249
|
+
*/
|
|
250
|
+
concurrency?: "shared" | "exclusive";
|
|
251
|
+
/** If true, argument validation errors are non-fatal: raw args are passed to execute() instead of returning an error to the LLM. */
|
|
252
|
+
lenientArgValidation?: boolean;
|
|
253
|
+
execute: AgentToolExecFn<TParameters, TDetails, TTheme>;
|
|
254
|
+
|
|
255
|
+
/** Optional custom rendering for tool call display (returns UI component) */
|
|
256
|
+
renderCall?: (args: Static<TParameters>, options: RenderResultOptions, theme: TTheme) => unknown;
|
|
257
|
+
|
|
258
|
+
/** Optional custom rendering for tool result display (returns UI component) */
|
|
259
|
+
renderResult?: (
|
|
260
|
+
result: AgentToolResult<TDetails, TParameters>,
|
|
261
|
+
options: RenderResultOptions,
|
|
262
|
+
theme: TTheme,
|
|
263
|
+
) => unknown;
|
|
264
|
+
}
|
|
265
|
+
|
|
266
|
+
// AgentContext is like Context but uses AgentTool
|
|
267
|
+
export interface AgentContext {
|
|
268
|
+
systemPrompt: string;
|
|
269
|
+
messages: AgentMessage[];
|
|
270
|
+
tools?: AgentTool<any>[];
|
|
271
|
+
}
|
|
272
|
+
|
|
273
|
+
/**
|
|
274
|
+
* Events emitted by the Agent for UI updates.
|
|
275
|
+
* These events provide fine-grained lifecycle information for messages, turns, and tool executions.
|
|
276
|
+
*/
|
|
277
|
+
export type AgentEvent =
|
|
278
|
+
// Agent lifecycle
|
|
279
|
+
| { type: "agent_start" }
|
|
280
|
+
| { type: "agent_end"; messages: AgentMessage[] }
|
|
281
|
+
// Turn lifecycle - a turn is one assistant response + any tool calls/results
|
|
282
|
+
| { type: "turn_start" }
|
|
283
|
+
| { type: "turn_end"; message: AgentMessage; toolResults: ToolResultMessage[] }
|
|
284
|
+
// Message lifecycle - emitted for user, assistant, and toolResult messages
|
|
285
|
+
| { type: "message_start"; message: AgentMessage }
|
|
286
|
+
// Only emitted for assistant messages during streaming
|
|
287
|
+
| { type: "message_update"; message: AgentMessage; assistantMessageEvent: AssistantMessageEvent }
|
|
288
|
+
| { type: "message_end"; message: AgentMessage }
|
|
289
|
+
// Tool execution lifecycle
|
|
290
|
+
| { type: "tool_execution_start"; toolCallId: string; toolName: string; args: any; intent?: string }
|
|
291
|
+
| { type: "tool_execution_update"; toolCallId: string; toolName: string; args: any; partialResult: any }
|
|
292
|
+
| { type: "tool_execution_end"; toolCallId: string; toolName: string; result: any; isError?: boolean };
|