@lenylvt/pi-agent-core 0.64.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,631 @@
1
+ /**
2
+ * Agent loop that works with AgentMessage throughout.
3
+ * Transforms to Message[] only at the LLM call boundary.
4
+ */
5
+
6
+ import {
7
+ type AssistantMessage,
8
+ type Context,
9
+ EventStream,
10
+ streamSimple,
11
+ type ToolResultMessage,
12
+ validateToolArguments,
13
+ } from "@lenylvt/pi-ai";
14
+ import type {
15
+ AgentContext,
16
+ AgentEvent,
17
+ AgentLoopConfig,
18
+ AgentMessage,
19
+ AgentTool,
20
+ AgentToolCall,
21
+ AgentToolResult,
22
+ StreamFn,
23
+ } from "./types.js";
24
+
25
+ export type AgentEventSink = (event: AgentEvent) => Promise<void> | void;
26
+
27
+ /**
28
+ * Start an agent loop with a new prompt message.
29
+ * The prompt is added to the context and events are emitted for it.
30
+ */
31
+ export function agentLoop(
32
+ prompts: AgentMessage[],
33
+ context: AgentContext,
34
+ config: AgentLoopConfig,
35
+ signal?: AbortSignal,
36
+ streamFn?: StreamFn,
37
+ ): EventStream<AgentEvent, AgentMessage[]> {
38
+ const stream = createAgentStream();
39
+
40
+ void runAgentLoop(
41
+ prompts,
42
+ context,
43
+ config,
44
+ async (event) => {
45
+ stream.push(event);
46
+ },
47
+ signal,
48
+ streamFn,
49
+ ).then((messages) => {
50
+ stream.end(messages);
51
+ });
52
+
53
+ return stream;
54
+ }
55
+
56
+ /**
57
+ * Continue an agent loop from the current context without adding a new message.
58
+ * Used for retries - context already has user message or tool results.
59
+ *
60
+ * **Important:** The last message in context must convert to a `user` or `toolResult` message
61
+ * via `convertToLlm`. If it doesn't, the LLM provider will reject the request.
62
+ * This cannot be validated here since `convertToLlm` is only called once per turn.
63
+ */
64
+ export function agentLoopContinue(
65
+ context: AgentContext,
66
+ config: AgentLoopConfig,
67
+ signal?: AbortSignal,
68
+ streamFn?: StreamFn,
69
+ ): EventStream<AgentEvent, AgentMessage[]> {
70
+ if (context.messages.length === 0) {
71
+ throw new Error("Cannot continue: no messages in context");
72
+ }
73
+
74
+ if (context.messages[context.messages.length - 1].role === "assistant") {
75
+ throw new Error("Cannot continue from message role: assistant");
76
+ }
77
+
78
+ const stream = createAgentStream();
79
+
80
+ void runAgentLoopContinue(
81
+ context,
82
+ config,
83
+ async (event) => {
84
+ stream.push(event);
85
+ },
86
+ signal,
87
+ streamFn,
88
+ ).then((messages) => {
89
+ stream.end(messages);
90
+ });
91
+
92
+ return stream;
93
+ }
94
+
95
+ export async function runAgentLoop(
96
+ prompts: AgentMessage[],
97
+ context: AgentContext,
98
+ config: AgentLoopConfig,
99
+ emit: AgentEventSink,
100
+ signal?: AbortSignal,
101
+ streamFn?: StreamFn,
102
+ ): Promise<AgentMessage[]> {
103
+ const newMessages: AgentMessage[] = [...prompts];
104
+ const currentContext: AgentContext = {
105
+ ...context,
106
+ messages: [...context.messages, ...prompts],
107
+ };
108
+
109
+ await emit({ type: "agent_start" });
110
+ await emit({ type: "turn_start" });
111
+ for (const prompt of prompts) {
112
+ await emit({ type: "message_start", message: prompt });
113
+ await emit({ type: "message_end", message: prompt });
114
+ }
115
+
116
+ await runLoop(currentContext, newMessages, config, signal, emit, streamFn);
117
+ return newMessages;
118
+ }
119
+
120
+ export async function runAgentLoopContinue(
121
+ context: AgentContext,
122
+ config: AgentLoopConfig,
123
+ emit: AgentEventSink,
124
+ signal?: AbortSignal,
125
+ streamFn?: StreamFn,
126
+ ): Promise<AgentMessage[]> {
127
+ if (context.messages.length === 0) {
128
+ throw new Error("Cannot continue: no messages in context");
129
+ }
130
+
131
+ if (context.messages[context.messages.length - 1].role === "assistant") {
132
+ throw new Error("Cannot continue from message role: assistant");
133
+ }
134
+
135
+ const newMessages: AgentMessage[] = [];
136
+ const currentContext: AgentContext = { ...context };
137
+
138
+ await emit({ type: "agent_start" });
139
+ await emit({ type: "turn_start" });
140
+
141
+ await runLoop(currentContext, newMessages, config, signal, emit, streamFn);
142
+ return newMessages;
143
+ }
144
+
145
+ function createAgentStream(): EventStream<AgentEvent, AgentMessage[]> {
146
+ return new EventStream<AgentEvent, AgentMessage[]>(
147
+ (event: AgentEvent) => event.type === "agent_end",
148
+ (event: AgentEvent) => (event.type === "agent_end" ? event.messages : []),
149
+ );
150
+ }
151
+
152
+ /**
153
+ * Main loop logic shared by agentLoop and agentLoopContinue.
154
+ */
155
+ async function runLoop(
156
+ currentContext: AgentContext,
157
+ newMessages: AgentMessage[],
158
+ config: AgentLoopConfig,
159
+ signal: AbortSignal | undefined,
160
+ emit: AgentEventSink,
161
+ streamFn?: StreamFn,
162
+ ): Promise<void> {
163
+ let firstTurn = true;
164
+ // Check for steering messages at start (user may have typed while waiting)
165
+ let pendingMessages: AgentMessage[] = (await config.getSteeringMessages?.()) || [];
166
+
167
+ // Outer loop: continues when queued follow-up messages arrive after agent would stop
168
+ while (true) {
169
+ let hasMoreToolCalls = true;
170
+
171
+ // Inner loop: process tool calls and steering messages
172
+ while (hasMoreToolCalls || pendingMessages.length > 0) {
173
+ if (!firstTurn) {
174
+ await emit({ type: "turn_start" });
175
+ } else {
176
+ firstTurn = false;
177
+ }
178
+
179
+ // Process pending messages (inject before next assistant response)
180
+ if (pendingMessages.length > 0) {
181
+ for (const message of pendingMessages) {
182
+ await emit({ type: "message_start", message });
183
+ await emit({ type: "message_end", message });
184
+ currentContext.messages.push(message);
185
+ newMessages.push(message);
186
+ }
187
+ pendingMessages = [];
188
+ }
189
+
190
+ // Stream assistant response
191
+ const message = await streamAssistantResponse(currentContext, config, signal, emit, streamFn);
192
+ newMessages.push(message);
193
+
194
+ if (message.stopReason === "error" || message.stopReason === "aborted") {
195
+ await emit({ type: "turn_end", message, toolResults: [] });
196
+ await emit({ type: "agent_end", messages: newMessages });
197
+ return;
198
+ }
199
+
200
+ // Check for tool calls
201
+ const toolCalls = message.content.filter((c) => c.type === "toolCall");
202
+ hasMoreToolCalls = toolCalls.length > 0;
203
+
204
+ const toolResults: ToolResultMessage[] = [];
205
+ if (hasMoreToolCalls) {
206
+ toolResults.push(...(await executeToolCalls(currentContext, message, config, signal, emit)));
207
+
208
+ for (const result of toolResults) {
209
+ currentContext.messages.push(result);
210
+ newMessages.push(result);
211
+ }
212
+ }
213
+
214
+ await emit({ type: "turn_end", message, toolResults });
215
+
216
+ pendingMessages = (await config.getSteeringMessages?.()) || [];
217
+ }
218
+
219
+ // Agent would stop here. Check for follow-up messages.
220
+ const followUpMessages = (await config.getFollowUpMessages?.()) || [];
221
+ if (followUpMessages.length > 0) {
222
+ // Set as pending so inner loop processes them
223
+ pendingMessages = followUpMessages;
224
+ continue;
225
+ }
226
+
227
+ // No more messages, exit
228
+ break;
229
+ }
230
+
231
+ await emit({ type: "agent_end", messages: newMessages });
232
+ }
233
+
234
+ /**
235
+ * Stream an assistant response from the LLM.
236
+ * This is where AgentMessage[] gets transformed to Message[] for the LLM.
237
+ */
238
+ async function streamAssistantResponse(
239
+ context: AgentContext,
240
+ config: AgentLoopConfig,
241
+ signal: AbortSignal | undefined,
242
+ emit: AgentEventSink,
243
+ streamFn?: StreamFn,
244
+ ): Promise<AssistantMessage> {
245
+ // Apply context transform if configured (AgentMessage[] → AgentMessage[])
246
+ let messages = context.messages;
247
+ if (config.transformContext) {
248
+ messages = await config.transformContext(messages, signal);
249
+ }
250
+
251
+ // Convert to LLM-compatible messages (AgentMessage[] → Message[])
252
+ const llmMessages = await config.convertToLlm(messages);
253
+
254
+ // Build LLM context
255
+ const llmContext: Context = {
256
+ systemPrompt: context.systemPrompt,
257
+ messages: llmMessages,
258
+ tools: context.tools,
259
+ };
260
+
261
+ const streamFunction = streamFn || streamSimple;
262
+
263
+ // Resolve API key (important for expiring tokens)
264
+ const resolvedApiKey =
265
+ (config.getApiKey ? await config.getApiKey(config.model.provider) : undefined) || config.apiKey;
266
+
267
+ const response = await streamFunction(config.model, llmContext, {
268
+ ...config,
269
+ apiKey: resolvedApiKey,
270
+ signal,
271
+ });
272
+
273
+ let partialMessage: AssistantMessage | null = null;
274
+ let addedPartial = false;
275
+
276
+ for await (const event of response) {
277
+ switch (event.type) {
278
+ case "start":
279
+ partialMessage = event.partial;
280
+ context.messages.push(partialMessage);
281
+ addedPartial = true;
282
+ await emit({ type: "message_start", message: { ...partialMessage } });
283
+ break;
284
+
285
+ case "text_start":
286
+ case "text_delta":
287
+ case "text_end":
288
+ case "thinking_start":
289
+ case "thinking_delta":
290
+ case "thinking_end":
291
+ case "toolcall_start":
292
+ case "toolcall_delta":
293
+ case "toolcall_end":
294
+ if (partialMessage) {
295
+ partialMessage = event.partial;
296
+ context.messages[context.messages.length - 1] = partialMessage;
297
+ await emit({
298
+ type: "message_update",
299
+ assistantMessageEvent: event,
300
+ message: { ...partialMessage },
301
+ });
302
+ }
303
+ break;
304
+
305
+ case "done":
306
+ case "error": {
307
+ const finalMessage = await response.result();
308
+ if (addedPartial) {
309
+ context.messages[context.messages.length - 1] = finalMessage;
310
+ } else {
311
+ context.messages.push(finalMessage);
312
+ }
313
+ if (!addedPartial) {
314
+ await emit({ type: "message_start", message: { ...finalMessage } });
315
+ }
316
+ await emit({ type: "message_end", message: finalMessage });
317
+ return finalMessage;
318
+ }
319
+ }
320
+ }
321
+
322
+ const finalMessage = await response.result();
323
+ if (addedPartial) {
324
+ context.messages[context.messages.length - 1] = finalMessage;
325
+ } else {
326
+ context.messages.push(finalMessage);
327
+ await emit({ type: "message_start", message: { ...finalMessage } });
328
+ }
329
+ await emit({ type: "message_end", message: finalMessage });
330
+ return finalMessage;
331
+ }
332
+
333
+ /**
334
+ * Execute tool calls from an assistant message.
335
+ */
336
+ async function executeToolCalls(
337
+ currentContext: AgentContext,
338
+ assistantMessage: AssistantMessage,
339
+ config: AgentLoopConfig,
340
+ signal: AbortSignal | undefined,
341
+ emit: AgentEventSink,
342
+ ): Promise<ToolResultMessage[]> {
343
+ const toolCalls = assistantMessage.content.filter((c) => c.type === "toolCall");
344
+ if (config.toolExecution === "sequential") {
345
+ return executeToolCallsSequential(currentContext, assistantMessage, toolCalls, config, signal, emit);
346
+ }
347
+ return executeToolCallsParallel(currentContext, assistantMessage, toolCalls, config, signal, emit);
348
+ }
349
+
350
+ async function executeToolCallsSequential(
351
+ currentContext: AgentContext,
352
+ assistantMessage: AssistantMessage,
353
+ toolCalls: AgentToolCall[],
354
+ config: AgentLoopConfig,
355
+ signal: AbortSignal | undefined,
356
+ emit: AgentEventSink,
357
+ ): Promise<ToolResultMessage[]> {
358
+ const results: ToolResultMessage[] = [];
359
+
360
+ for (const toolCall of toolCalls) {
361
+ await emit({
362
+ type: "tool_execution_start",
363
+ toolCallId: toolCall.id,
364
+ toolName: toolCall.name,
365
+ args: toolCall.arguments,
366
+ });
367
+
368
+ const preparation = await prepareToolCall(currentContext, assistantMessage, toolCall, config, signal);
369
+ if (preparation.kind === "immediate") {
370
+ results.push(await emitToolCallOutcome(toolCall, preparation.result, preparation.isError, emit));
371
+ } else {
372
+ const executed = await executePreparedToolCall(preparation, signal, emit);
373
+ results.push(
374
+ await finalizeExecutedToolCall(
375
+ currentContext,
376
+ assistantMessage,
377
+ preparation,
378
+ executed,
379
+ config,
380
+ signal,
381
+ emit,
382
+ ),
383
+ );
384
+ }
385
+ }
386
+
387
+ return results;
388
+ }
389
+
390
+ async function executeToolCallsParallel(
391
+ currentContext: AgentContext,
392
+ assistantMessage: AssistantMessage,
393
+ toolCalls: AgentToolCall[],
394
+ config: AgentLoopConfig,
395
+ signal: AbortSignal | undefined,
396
+ emit: AgentEventSink,
397
+ ): Promise<ToolResultMessage[]> {
398
+ const results: ToolResultMessage[] = [];
399
+ const runnableCalls: PreparedToolCall[] = [];
400
+
401
+ for (const toolCall of toolCalls) {
402
+ await emit({
403
+ type: "tool_execution_start",
404
+ toolCallId: toolCall.id,
405
+ toolName: toolCall.name,
406
+ args: toolCall.arguments,
407
+ });
408
+
409
+ const preparation = await prepareToolCall(currentContext, assistantMessage, toolCall, config, signal);
410
+ if (preparation.kind === "immediate") {
411
+ results.push(await emitToolCallOutcome(toolCall, preparation.result, preparation.isError, emit));
412
+ } else {
413
+ runnableCalls.push(preparation);
414
+ }
415
+ }
416
+
417
+ const runningCalls = runnableCalls.map((prepared) => ({
418
+ prepared,
419
+ execution: executePreparedToolCall(prepared, signal, emit),
420
+ }));
421
+
422
+ for (const running of runningCalls) {
423
+ const executed = await running.execution;
424
+ results.push(
425
+ await finalizeExecutedToolCall(
426
+ currentContext,
427
+ assistantMessage,
428
+ running.prepared,
429
+ executed,
430
+ config,
431
+ signal,
432
+ emit,
433
+ ),
434
+ );
435
+ }
436
+
437
+ return results;
438
+ }
439
+
440
+ type PreparedToolCall = {
441
+ kind: "prepared";
442
+ toolCall: AgentToolCall;
443
+ tool: AgentTool<any>;
444
+ args: unknown;
445
+ };
446
+
447
+ type ImmediateToolCallOutcome = {
448
+ kind: "immediate";
449
+ result: AgentToolResult<any>;
450
+ isError: boolean;
451
+ };
452
+
453
+ type ExecutedToolCallOutcome = {
454
+ result: AgentToolResult<any>;
455
+ isError: boolean;
456
+ };
457
+
458
+ function prepareToolCallArguments(tool: AgentTool<any>, toolCall: AgentToolCall): AgentToolCall {
459
+ if (!tool.prepareArguments) {
460
+ return toolCall;
461
+ }
462
+ const preparedArguments = tool.prepareArguments(toolCall.arguments);
463
+ if (preparedArguments === toolCall.arguments) {
464
+ return toolCall;
465
+ }
466
+ return {
467
+ ...toolCall,
468
+ arguments: preparedArguments as Record<string, any>,
469
+ };
470
+ }
471
+
472
+ async function prepareToolCall(
473
+ currentContext: AgentContext,
474
+ assistantMessage: AssistantMessage,
475
+ toolCall: AgentToolCall,
476
+ config: AgentLoopConfig,
477
+ signal: AbortSignal | undefined,
478
+ ): Promise<PreparedToolCall | ImmediateToolCallOutcome> {
479
+ const tool = currentContext.tools?.find((t) => t.name === toolCall.name);
480
+ if (!tool) {
481
+ return {
482
+ kind: "immediate",
483
+ result: createErrorToolResult(`Tool ${toolCall.name} not found`),
484
+ isError: true,
485
+ };
486
+ }
487
+
488
+ try {
489
+ const preparedToolCall = prepareToolCallArguments(tool, toolCall);
490
+ const validatedArgs = validateToolArguments(tool, preparedToolCall);
491
+ if (config.beforeToolCall) {
492
+ const beforeResult = await config.beforeToolCall(
493
+ {
494
+ assistantMessage,
495
+ toolCall,
496
+ args: validatedArgs,
497
+ context: currentContext,
498
+ },
499
+ signal,
500
+ );
501
+ if (beforeResult?.block) {
502
+ return {
503
+ kind: "immediate",
504
+ result: createErrorToolResult(beforeResult.reason || "Tool execution was blocked"),
505
+ isError: true,
506
+ };
507
+ }
508
+ }
509
+ return {
510
+ kind: "prepared",
511
+ toolCall,
512
+ tool,
513
+ args: validatedArgs,
514
+ };
515
+ } catch (error) {
516
+ return {
517
+ kind: "immediate",
518
+ result: createErrorToolResult(error instanceof Error ? error.message : String(error)),
519
+ isError: true,
520
+ };
521
+ }
522
+ }
523
+
524
+ async function executePreparedToolCall(
525
+ prepared: PreparedToolCall,
526
+ signal: AbortSignal | undefined,
527
+ emit: AgentEventSink,
528
+ ): Promise<ExecutedToolCallOutcome> {
529
+ const updateEvents: Promise<void>[] = [];
530
+
531
+ try {
532
+ const result = await prepared.tool.execute(
533
+ prepared.toolCall.id,
534
+ prepared.args as never,
535
+ signal,
536
+ (partialResult) => {
537
+ updateEvents.push(
538
+ Promise.resolve(
539
+ emit({
540
+ type: "tool_execution_update",
541
+ toolCallId: prepared.toolCall.id,
542
+ toolName: prepared.toolCall.name,
543
+ args: prepared.toolCall.arguments,
544
+ partialResult,
545
+ }),
546
+ ),
547
+ );
548
+ },
549
+ );
550
+ await Promise.all(updateEvents);
551
+ return { result, isError: false };
552
+ } catch (error) {
553
+ await Promise.all(updateEvents);
554
+ return {
555
+ result: createErrorToolResult(error instanceof Error ? error.message : String(error)),
556
+ isError: true,
557
+ };
558
+ }
559
+ }
560
+
561
+ async function finalizeExecutedToolCall(
562
+ currentContext: AgentContext,
563
+ assistantMessage: AssistantMessage,
564
+ prepared: PreparedToolCall,
565
+ executed: ExecutedToolCallOutcome,
566
+ config: AgentLoopConfig,
567
+ signal: AbortSignal | undefined,
568
+ emit: AgentEventSink,
569
+ ): Promise<ToolResultMessage> {
570
+ let result = executed.result;
571
+ let isError = executed.isError;
572
+
573
+ if (config.afterToolCall) {
574
+ const afterResult = await config.afterToolCall(
575
+ {
576
+ assistantMessage,
577
+ toolCall: prepared.toolCall,
578
+ args: prepared.args,
579
+ result,
580
+ isError,
581
+ context: currentContext,
582
+ },
583
+ signal,
584
+ );
585
+ if (afterResult) {
586
+ result = {
587
+ content: afterResult.content ?? result.content,
588
+ details: afterResult.details ?? result.details,
589
+ };
590
+ isError = afterResult.isError ?? isError;
591
+ }
592
+ }
593
+
594
+ return await emitToolCallOutcome(prepared.toolCall, result, isError, emit);
595
+ }
596
+
597
+ function createErrorToolResult(message: string): AgentToolResult<any> {
598
+ return {
599
+ content: [{ type: "text", text: message }],
600
+ details: {},
601
+ };
602
+ }
603
+
604
+ async function emitToolCallOutcome(
605
+ toolCall: AgentToolCall,
606
+ result: AgentToolResult<any>,
607
+ isError: boolean,
608
+ emit: AgentEventSink,
609
+ ): Promise<ToolResultMessage> {
610
+ await emit({
611
+ type: "tool_execution_end",
612
+ toolCallId: toolCall.id,
613
+ toolName: toolCall.name,
614
+ result,
615
+ isError,
616
+ });
617
+
618
+ const toolResultMessage: ToolResultMessage = {
619
+ role: "toolResult",
620
+ toolCallId: toolCall.id,
621
+ toolName: toolCall.name,
622
+ content: result.content,
623
+ details: result.details,
624
+ isError,
625
+ timestamp: Date.now(),
626
+ };
627
+
628
+ await emit({ type: "message_start", message: toolResultMessage });
629
+ await emit({ type: "message_end", message: toolResultMessage });
630
+ return toolResultMessage;
631
+ }