@oh-my-pi/pi-agent-core 1.337.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/src/agent.ts ADDED
@@ -0,0 +1,386 @@
1
+ /**
2
+ * Agent class that uses the agent-loop directly.
3
+ * No transport abstraction - calls streamSimple via the loop.
4
+ */
5
+
6
+ import {
7
+ getModel,
8
+ type ImageContent,
9
+ type Message,
10
+ type Model,
11
+ type ReasoningEffort,
12
+ streamSimple,
13
+ type TextContent,
14
+ } from "@oh-my-pi/pi-ai";
15
+ import { agentLoop, agentLoopContinue } from "./agent-loop.js";
16
+ import type {
17
+ AgentContext,
18
+ AgentEvent,
19
+ AgentLoopConfig,
20
+ AgentMessage,
21
+ AgentState,
22
+ AgentTool,
23
+ AgentToolContext,
24
+ StreamFn,
25
+ ThinkingLevel,
26
+ } from "./types.js";
27
+
28
+ /**
29
+ * Default convertToLlm: Keep only LLM-compatible messages, convert attachments.
30
+ */
31
+ function defaultConvertToLlm(messages: AgentMessage[]): Message[] {
32
+ return messages.filter((m) => m.role === "user" || m.role === "assistant" || m.role === "toolResult");
33
+ }
34
+
35
+ export interface AgentOptions {
36
+ initialState?: Partial<AgentState>;
37
+
38
+ /**
39
+ * Converts AgentMessage[] to LLM-compatible Message[] before each LLM call.
40
+ * Default filters to user/assistant/toolResult and converts attachments.
41
+ */
42
+ convertToLlm?: (messages: AgentMessage[]) => Message[] | Promise<Message[]>;
43
+
44
+ /**
45
+ * Optional transform applied to context before convertToLlm.
46
+ * Use for context pruning, injecting external context, etc.
47
+ */
48
+ transformContext?: (messages: AgentMessage[], signal?: AbortSignal) => Promise<AgentMessage[]>;
49
+
50
+ /**
51
+ * Queue mode: "all" = send all queued messages at once, "one-at-a-time" = one per turn
52
+ */
53
+ queueMode?: "all" | "one-at-a-time";
54
+
55
+ /**
56
+ * Custom stream function (for proxy backends, etc.). Default uses streamSimple.
57
+ */
58
+ streamFn?: StreamFn;
59
+
60
+ /**
61
+ * Resolves an API key dynamically for each LLM call.
62
+ * Useful for expiring tokens (e.g., GitHub Copilot OAuth).
63
+ */
64
+ getApiKey?: (provider: string) => Promise<string | undefined> | string | undefined;
65
+
66
+ /**
67
+ * Provides tool execution context, resolved per tool call.
68
+ */
69
+ getToolContext?: () => AgentToolContext | undefined;
70
+ }
71
+
72
+ export class Agent {
73
+ private _state: AgentState = {
74
+ systemPrompt: "",
75
+ model: getModel("google", "gemini-2.5-flash-lite-preview-06-17"),
76
+ thinkingLevel: "off",
77
+ tools: [],
78
+ messages: [],
79
+ isStreaming: false,
80
+ streamMessage: null,
81
+ pendingToolCalls: new Set<string>(),
82
+ error: undefined,
83
+ };
84
+
85
+ private listeners = new Set<(e: AgentEvent) => void>();
86
+ private abortController?: AbortController;
87
+ private convertToLlm: (messages: AgentMessage[]) => Message[] | Promise<Message[]>;
88
+ private transformContext?: (messages: AgentMessage[], signal?: AbortSignal) => Promise<AgentMessage[]>;
89
+ private messageQueue: AgentMessage[] = [];
90
+ private queueMode: "all" | "one-at-a-time";
91
+ public streamFn: StreamFn;
92
+ public getApiKey?: (provider: string) => Promise<string | undefined> | string | undefined;
93
+ private getToolContext?: () => AgentToolContext | undefined;
94
+ private runningPrompt?: Promise<void>;
95
+ private resolveRunningPrompt?: () => void;
96
+
97
+ constructor(opts: AgentOptions = {}) {
98
+ this._state = { ...this._state, ...opts.initialState };
99
+ this.convertToLlm = opts.convertToLlm || defaultConvertToLlm;
100
+ this.transformContext = opts.transformContext;
101
+ this.queueMode = opts.queueMode || "one-at-a-time";
102
+ this.streamFn = opts.streamFn || streamSimple;
103
+ this.getApiKey = opts.getApiKey;
104
+ this.getToolContext = opts.getToolContext;
105
+ }
106
+
107
+ get state(): AgentState {
108
+ return this._state;
109
+ }
110
+
111
+ subscribe(fn: (e: AgentEvent) => void): () => void {
112
+ this.listeners.add(fn);
113
+ return () => this.listeners.delete(fn);
114
+ }
115
+
116
+ // State mutators
117
+ setSystemPrompt(v: string) {
118
+ this._state.systemPrompt = v;
119
+ }
120
+
121
+ setModel(m: Model<any>) {
122
+ this._state.model = m;
123
+ }
124
+
125
+ setThinkingLevel(l: ThinkingLevel) {
126
+ this._state.thinkingLevel = l;
127
+ }
128
+
129
+ setQueueMode(mode: "all" | "one-at-a-time") {
130
+ this.queueMode = mode;
131
+ }
132
+
133
+ getQueueMode(): "all" | "one-at-a-time" {
134
+ return this.queueMode;
135
+ }
136
+
137
+ setTools(t: AgentTool<any>[]) {
138
+ this._state.tools = t;
139
+ }
140
+
141
+ replaceMessages(ms: AgentMessage[]) {
142
+ this._state.messages = ms.slice();
143
+ }
144
+
145
+ appendMessage(m: AgentMessage) {
146
+ this._state.messages = [...this._state.messages, m];
147
+ }
148
+
149
+ queueMessage(m: AgentMessage) {
150
+ this.messageQueue.push(m);
151
+ }
152
+
153
+ clearMessageQueue() {
154
+ this.messageQueue = [];
155
+ }
156
+
157
+ clearMessages() {
158
+ this._state.messages = [];
159
+ }
160
+
161
+ abort() {
162
+ this.abortController?.abort();
163
+ }
164
+
165
+ waitForIdle(): Promise<void> {
166
+ return this.runningPrompt ?? Promise.resolve();
167
+ }
168
+
169
+ reset() {
170
+ this._state.messages = [];
171
+ this._state.isStreaming = false;
172
+ this._state.streamMessage = null;
173
+ this._state.pendingToolCalls = new Set<string>();
174
+ this._state.error = undefined;
175
+ this.messageQueue = [];
176
+ }
177
+
178
+ /** Send a prompt with an AgentMessage */
179
+ async prompt(message: AgentMessage | AgentMessage[]): Promise<void>;
180
+ async prompt(input: string, images?: ImageContent[]): Promise<void>;
181
+ async prompt(input: string | AgentMessage | AgentMessage[], images?: ImageContent[]) {
182
+ const model = this._state.model;
183
+ if (!model) throw new Error("No model configured");
184
+
185
+ let msgs: AgentMessage[];
186
+
187
+ if (Array.isArray(input)) {
188
+ msgs = input;
189
+ } else if (typeof input === "string") {
190
+ const content: Array<TextContent | ImageContent> = [{ type: "text", text: input }];
191
+ if (images && images.length > 0) {
192
+ content.push(...images);
193
+ }
194
+ msgs = [
195
+ {
196
+ role: "user",
197
+ content,
198
+ timestamp: Date.now(),
199
+ },
200
+ ];
201
+ } else {
202
+ msgs = [input];
203
+ }
204
+
205
+ await this._runLoop(msgs);
206
+ }
207
+
208
+ /** Continue from current context (for retry after overflow) */
209
+ async continue() {
210
+ const messages = this._state.messages;
211
+ if (messages.length === 0) {
212
+ throw new Error("No messages to continue from");
213
+ }
214
+ if (messages[messages.length - 1].role === "assistant") {
215
+ throw new Error("Cannot continue from message role: assistant");
216
+ }
217
+
218
+ await this._runLoop(undefined);
219
+ }
220
+
221
+ /**
222
+ * Run the agent loop.
223
+ * If messages are provided, starts a new conversation turn with those messages.
224
+ * Otherwise, continues from existing context.
225
+ */
226
+ private async _runLoop(messages?: AgentMessage[]) {
227
+ const model = this._state.model;
228
+ if (!model) throw new Error("No model configured");
229
+
230
+ this.runningPrompt = new Promise<void>((resolve) => {
231
+ this.resolveRunningPrompt = resolve;
232
+ });
233
+
234
+ this.abortController = new AbortController();
235
+ this._state.isStreaming = true;
236
+ this._state.streamMessage = null;
237
+ this._state.error = undefined;
238
+
239
+ const reasoning: ReasoningEffort | undefined =
240
+ this._state.thinkingLevel === "off"
241
+ ? undefined
242
+ : this._state.thinkingLevel === "minimal"
243
+ ? "low"
244
+ : (this._state.thinkingLevel as ReasoningEffort);
245
+
246
+ const context: AgentContext = {
247
+ systemPrompt: this._state.systemPrompt,
248
+ messages: this._state.messages.slice(),
249
+ tools: this._state.tools,
250
+ };
251
+
252
+ const config: AgentLoopConfig = {
253
+ model,
254
+ reasoning,
255
+ convertToLlm: this.convertToLlm,
256
+ transformContext: this.transformContext,
257
+ getApiKey: this.getApiKey,
258
+ getToolContext: this.getToolContext,
259
+ getQueuedMessages: async () => {
260
+ if (this.queueMode === "one-at-a-time") {
261
+ if (this.messageQueue.length > 0) {
262
+ const first = this.messageQueue[0];
263
+ this.messageQueue = this.messageQueue.slice(1);
264
+ return [first];
265
+ }
266
+ return [];
267
+ } else {
268
+ const queued = this.messageQueue.slice();
269
+ this.messageQueue = [];
270
+ return queued;
271
+ }
272
+ },
273
+ };
274
+
275
+ let partial: AgentMessage | null = null;
276
+
277
+ try {
278
+ const stream = messages
279
+ ? agentLoop(messages, context, config, this.abortController.signal, this.streamFn)
280
+ : agentLoopContinue(context, config, this.abortController.signal, this.streamFn);
281
+
282
+ for await (const event of stream) {
283
+ // Update internal state based on events
284
+ switch (event.type) {
285
+ case "message_start":
286
+ partial = event.message;
287
+ this._state.streamMessage = event.message;
288
+ break;
289
+
290
+ case "message_update":
291
+ partial = event.message;
292
+ this._state.streamMessage = event.message;
293
+ break;
294
+
295
+ case "message_end":
296
+ partial = null;
297
+ this._state.streamMessage = null;
298
+ this.appendMessage(event.message);
299
+ break;
300
+
301
+ case "tool_execution_start": {
302
+ const s = new Set(this._state.pendingToolCalls);
303
+ s.add(event.toolCallId);
304
+ this._state.pendingToolCalls = s;
305
+ break;
306
+ }
307
+
308
+ case "tool_execution_end": {
309
+ const s = new Set(this._state.pendingToolCalls);
310
+ s.delete(event.toolCallId);
311
+ this._state.pendingToolCalls = s;
312
+ break;
313
+ }
314
+
315
+ case "turn_end":
316
+ if (event.message.role === "assistant" && (event.message as any).errorMessage) {
317
+ this._state.error = (event.message as any).errorMessage;
318
+ }
319
+ break;
320
+
321
+ case "agent_end":
322
+ this._state.isStreaming = false;
323
+ this._state.streamMessage = null;
324
+ break;
325
+ }
326
+
327
+ // Emit to listeners
328
+ this.emit(event);
329
+ }
330
+
331
+ // Handle any remaining partial message
332
+ if (partial && partial.role === "assistant" && partial.content.length > 0) {
333
+ const onlyEmpty = !partial.content.some(
334
+ (c) =>
335
+ (c.type === "thinking" && c.thinking.trim().length > 0) ||
336
+ (c.type === "text" && c.text.trim().length > 0) ||
337
+ (c.type === "toolCall" && c.name.trim().length > 0),
338
+ );
339
+ if (!onlyEmpty) {
340
+ this.appendMessage(partial);
341
+ } else {
342
+ if (this.abortController?.signal.aborted) {
343
+ throw new Error("Request was aborted");
344
+ }
345
+ }
346
+ }
347
+ } catch (err: any) {
348
+ const errorMsg: AgentMessage = {
349
+ role: "assistant",
350
+ content: [{ type: "text", text: "" }],
351
+ api: model.api,
352
+ provider: model.provider,
353
+ model: model.id,
354
+ usage: {
355
+ input: 0,
356
+ output: 0,
357
+ cacheRead: 0,
358
+ cacheWrite: 0,
359
+ totalTokens: 0,
360
+ cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0, total: 0 },
361
+ },
362
+ stopReason: this.abortController?.signal.aborted ? "aborted" : "error",
363
+ errorMessage: err?.message || String(err),
364
+ timestamp: Date.now(),
365
+ } as AgentMessage;
366
+
367
+ this.appendMessage(errorMsg);
368
+ this._state.error = err?.message || String(err);
369
+ this.emit({ type: "agent_end", messages: [errorMsg] });
370
+ } finally {
371
+ this._state.isStreaming = false;
372
+ this._state.streamMessage = null;
373
+ this._state.pendingToolCalls = new Set<string>();
374
+ this.abortController = undefined;
375
+ this.resolveRunningPrompt?.();
376
+ this.runningPrompt = undefined;
377
+ this.resolveRunningPrompt = undefined;
378
+ }
379
+ }
380
+
381
+ private emit(e: AgentEvent) {
382
+ for (const listener of this.listeners) {
383
+ listener(e);
384
+ }
385
+ }
386
+ }
package/src/index.ts ADDED
@@ -0,0 +1,8 @@
1
+ // Core Agent
2
+ export * from "./agent.js";
3
+ // Loop functions
4
+ export * from "./agent-loop.js";
5
+ // Proxy utilities
6
+ export * from "./proxy.js";
7
+ // Types
8
+ export * from "./types.js";