@mariozechner/pi-agent-core 0.31.0 → 0.32.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1 +1 @@
1
- {"version":3,"file":"agent-loop.d.ts","sourceRoot":"","sources":["../src/agent-loop.ts"],"names":[],"mappings":"AAAA;;;GAGG;AAEH,OAAO,EAGN,WAAW,EAIX,MAAM,qBAAqB,CAAC;AAC7B,OAAO,KAAK,EACX,YAAY,EACZ,UAAU,EACV,eAAe,EACf,YAAY,EAGZ,QAAQ,EACR,MAAM,YAAY,CAAC;AAEpB;;;GAGG;AACH,wBAAgB,SAAS,CACxB,OAAO,EAAE,YAAY,EAAE,EACvB,OAAO,EAAE,YAAY,EACrB,MAAM,EAAE,eAAe,EACvB,MAAM,CAAC,EAAE,WAAW,EACpB,QAAQ,CAAC,EAAE,QAAQ,GACjB,WAAW,CAAC,UAAU,EAAE,YAAY,EAAE,CAAC,CAqBzC;AAED;;;;;;;GAOG;AACH,wBAAgB,iBAAiB,CAChC,OAAO,EAAE,YAAY,EACrB,MAAM,EAAE,eAAe,EACvB,MAAM,CAAC,EAAE,WAAW,EACpB,QAAQ,CAAC,EAAE,QAAQ,GACjB,WAAW,CAAC,UAAU,EAAE,YAAY,EAAE,CAAC,CAsBzC","sourcesContent":["/**\n * Agent loop that works with AgentMessage throughout.\n * Transforms to Message[] only at the LLM call boundary.\n */\n\nimport {\n\ttype AssistantMessage,\n\ttype Context,\n\tEventStream,\n\tstreamSimple,\n\ttype ToolResultMessage,\n\tvalidateToolArguments,\n} from \"@mariozechner/pi-ai\";\nimport type {\n\tAgentContext,\n\tAgentEvent,\n\tAgentLoopConfig,\n\tAgentMessage,\n\tAgentTool,\n\tAgentToolResult,\n\tStreamFn,\n} from \"./types.js\";\n\n/**\n * Start an agent loop with a new prompt message.\n * The prompt is added to the context and events are emitted for it.\n */\nexport function agentLoop(\n\tprompts: AgentMessage[],\n\tcontext: AgentContext,\n\tconfig: AgentLoopConfig,\n\tsignal?: AbortSignal,\n\tstreamFn?: StreamFn,\n): EventStream<AgentEvent, AgentMessage[]> {\n\tconst stream = createAgentStream();\n\n\t(async () => {\n\t\tconst newMessages: AgentMessage[] = [...prompts];\n\t\tconst currentContext: AgentContext = {\n\t\t\t...context,\n\t\t\tmessages: [...context.messages, ...prompts],\n\t\t};\n\n\t\tstream.push({ type: \"agent_start\" });\n\t\tstream.push({ type: \"turn_start\" });\n\t\tfor (const prompt of prompts) {\n\t\t\tstream.push({ type: \"message_start\", message: prompt });\n\t\t\tstream.push({ type: \"message_end\", message: prompt });\n\t\t}\n\n\t\tawait runLoop(currentContext, newMessages, config, signal, stream, streamFn);\n\t})();\n\n\treturn stream;\n}\n\n/**\n * Continue an agent loop from the current context without adding a new message.\n * Used for retries - context already has user message or tool results.\n *\n * **Important:** The last message in context must convert to a `user` or `toolResult` message\n * via `convertToLlm`. If it doesn't, the LLM provider will reject the request.\n * This cannot be validated here since `convertToLlm` is only called once per turn.\n */\nexport function agentLoopContinue(\n\tcontext: AgentContext,\n\tconfig: AgentLoopConfig,\n\tsignal?: AbortSignal,\n\tstreamFn?: StreamFn,\n): EventStream<AgentEvent, AgentMessage[]> {\n\tif (context.messages.length === 0) {\n\t\tthrow new Error(\"Cannot continue: no messages in context\");\n\t}\n\n\tif (context.messages[context.messages.length - 1].role === \"assistant\") {\n\t\tthrow new Error(\"Cannot continue from message role: assistant\");\n\t}\n\n\tconst stream = createAgentStream();\n\n\t(async () => {\n\t\tconst newMessages: AgentMessage[] = [];\n\t\tconst currentContext: AgentContext = { ...context };\n\n\t\tstream.push({ type: \"agent_start\" });\n\t\tstream.push({ type: \"turn_start\" });\n\n\t\tawait runLoop(currentContext, newMessages, config, signal, stream, streamFn);\n\t})();\n\n\treturn stream;\n}\n\nfunction createAgentStream(): EventStream<AgentEvent, AgentMessage[]> {\n\treturn new EventStream<AgentEvent, AgentMessage[]>(\n\t\t(event: AgentEvent) => event.type === \"agent_end\",\n\t\t(event: AgentEvent) => (event.type === \"agent_end\" ? event.messages : []),\n\t);\n}\n\n/**\n * Main loop logic shared by agentLoop and agentLoopContinue.\n */\nasync function runLoop(\n\tcurrentContext: AgentContext,\n\tnewMessages: AgentMessage[],\n\tconfig: AgentLoopConfig,\n\tsignal: AbortSignal | undefined,\n\tstream: EventStream<AgentEvent, AgentMessage[]>,\n\tstreamFn?: StreamFn,\n): Promise<void> {\n\tlet hasMoreToolCalls = true;\n\tlet firstTurn = true;\n\tlet queuedMessages: AgentMessage[] = (await config.getQueuedMessages?.()) || [];\n\tlet queuedAfterTools: AgentMessage[] | null = null;\n\n\twhile (hasMoreToolCalls || queuedMessages.length > 0) {\n\t\tif (!firstTurn) {\n\t\t\tstream.push({ type: \"turn_start\" });\n\t\t} else {\n\t\t\tfirstTurn = false;\n\t\t}\n\n\t\t// Process queued messages (inject before next assistant response)\n\t\tif (queuedMessages.length > 0) {\n\t\t\tfor (const message of queuedMessages) {\n\t\t\t\tstream.push({ type: \"message_start\", message });\n\t\t\t\tstream.push({ type: \"message_end\", message });\n\t\t\t\tcurrentContext.messages.push(message);\n\t\t\t\tnewMessages.push(message);\n\t\t\t}\n\t\t\tqueuedMessages = [];\n\t\t}\n\n\t\t// Stream assistant response\n\t\tconst message = await streamAssistantResponse(currentContext, config, signal, stream, streamFn);\n\t\tnewMessages.push(message);\n\n\t\tif (message.stopReason === \"error\" || message.stopReason === \"aborted\") {\n\t\t\tstream.push({ type: \"turn_end\", message, toolResults: [] });\n\t\t\tstream.push({ type: \"agent_end\", messages: newMessages });\n\t\t\tstream.end(newMessages);\n\t\t\treturn;\n\t\t}\n\n\t\t// Check for tool calls\n\t\tconst toolCalls = message.content.filter((c) => c.type === \"toolCall\");\n\t\thasMoreToolCalls = toolCalls.length > 0;\n\n\t\tconst toolResults: ToolResultMessage[] = [];\n\t\tif (hasMoreToolCalls) {\n\t\t\tconst toolExecution = await executeToolCalls(\n\t\t\t\tcurrentContext.tools,\n\t\t\t\tmessage,\n\t\t\t\tsignal,\n\t\t\t\tstream,\n\t\t\t\tconfig.getQueuedMessages,\n\t\t\t);\n\t\t\ttoolResults.push(...toolExecution.toolResults);\n\t\t\tqueuedAfterTools = toolExecution.queuedMessages ?? null;\n\n\t\t\tfor (const result of toolResults) {\n\t\t\t\tcurrentContext.messages.push(result);\n\t\t\t\tnewMessages.push(result);\n\t\t\t}\n\t\t}\n\n\t\tstream.push({ type: \"turn_end\", message, toolResults });\n\n\t\t// Get queued messages after turn completes\n\t\tif (queuedAfterTools && queuedAfterTools.length > 0) {\n\t\t\tqueuedMessages = queuedAfterTools;\n\t\t\tqueuedAfterTools = null;\n\t\t} else {\n\t\t\tqueuedMessages = (await config.getQueuedMessages?.()) || [];\n\t\t}\n\t}\n\n\tstream.push({ type: \"agent_end\", messages: newMessages });\n\tstream.end(newMessages);\n}\n\n/**\n * Stream an assistant response from the LLM.\n * This is where AgentMessage[] gets transformed to Message[] for the LLM.\n */\nasync function streamAssistantResponse(\n\tcontext: AgentContext,\n\tconfig: AgentLoopConfig,\n\tsignal: AbortSignal | undefined,\n\tstream: EventStream<AgentEvent, AgentMessage[]>,\n\tstreamFn?: StreamFn,\n): Promise<AssistantMessage> {\n\t// Apply context transform if configured (AgentMessage[] → AgentMessage[])\n\tlet messages = context.messages;\n\tif (config.transformContext) {\n\t\tmessages = await config.transformContext(messages, signal);\n\t}\n\n\t// Convert to LLM-compatible messages (AgentMessage[] → Message[])\n\tconst llmMessages = await config.convertToLlm(messages);\n\n\t// Build LLM context\n\tconst llmContext: Context = {\n\t\tsystemPrompt: context.systemPrompt,\n\t\tmessages: llmMessages,\n\t\ttools: context.tools,\n\t};\n\n\tconst streamFunction = streamFn || streamSimple;\n\n\t// Resolve API key (important for expiring tokens)\n\tconst resolvedApiKey =\n\t\t(config.getApiKey ? await config.getApiKey(config.model.provider) : undefined) || config.apiKey;\n\n\tconst response = await streamFunction(config.model, llmContext, {\n\t\t...config,\n\t\tapiKey: resolvedApiKey,\n\t\tsignal,\n\t});\n\n\tlet partialMessage: AssistantMessage | null = null;\n\tlet addedPartial = false;\n\n\tfor await (const event of response) {\n\t\tswitch (event.type) {\n\t\t\tcase \"start\":\n\t\t\t\tpartialMessage = event.partial;\n\t\t\t\tcontext.messages.push(partialMessage);\n\t\t\t\taddedPartial = true;\n\t\t\t\tstream.push({ type: \"message_start\", message: { ...partialMessage } });\n\t\t\t\tbreak;\n\n\t\t\tcase \"text_start\":\n\t\t\tcase \"text_delta\":\n\t\t\tcase \"text_end\":\n\t\t\tcase \"thinking_start\":\n\t\t\tcase \"thinking_delta\":\n\t\t\tcase \"thinking_end\":\n\t\t\tcase \"toolcall_start\":\n\t\t\tcase \"toolcall_delta\":\n\t\t\tcase \"toolcall_end\":\n\t\t\t\tif (partialMessage) {\n\t\t\t\t\tpartialMessage = event.partial;\n\t\t\t\t\tcontext.messages[context.messages.length - 1] = partialMessage;\n\t\t\t\t\tstream.push({\n\t\t\t\t\t\ttype: \"message_update\",\n\t\t\t\t\t\tassistantMessageEvent: event,\n\t\t\t\t\t\tmessage: { ...partialMessage },\n\t\t\t\t\t});\n\t\t\t\t}\n\t\t\t\tbreak;\n\n\t\t\tcase \"done\":\n\t\t\tcase \"error\": {\n\t\t\t\tconst finalMessage = await response.result();\n\t\t\t\tif (addedPartial) {\n\t\t\t\t\tcontext.messages[context.messages.length - 1] = finalMessage;\n\t\t\t\t} else {\n\t\t\t\t\tcontext.messages.push(finalMessage);\n\t\t\t\t}\n\t\t\t\tif (!addedPartial) {\n\t\t\t\t\tstream.push({ type: \"message_start\", message: { ...finalMessage } });\n\t\t\t\t}\n\t\t\t\tstream.push({ type: \"message_end\", message: finalMessage });\n\t\t\t\treturn finalMessage;\n\t\t\t}\n\t\t}\n\t}\n\n\treturn await response.result();\n}\n\n/**\n * Execute tool calls from an assistant message.\n */\nasync function executeToolCalls(\n\ttools: AgentTool<any>[] | undefined,\n\tassistantMessage: AssistantMessage,\n\tsignal: AbortSignal | undefined,\n\tstream: EventStream<AgentEvent, AgentMessage[]>,\n\tgetQueuedMessages?: AgentLoopConfig[\"getQueuedMessages\"],\n): Promise<{ toolResults: ToolResultMessage[]; queuedMessages?: AgentMessage[] }> {\n\tconst toolCalls = assistantMessage.content.filter((c) => c.type === \"toolCall\");\n\tconst results: ToolResultMessage[] = [];\n\tlet queuedMessages: AgentMessage[] | undefined;\n\n\tfor (let index = 0; index < toolCalls.length; index++) {\n\t\tconst toolCall = toolCalls[index];\n\t\tconst tool = tools?.find((t) => t.name === toolCall.name);\n\n\t\tstream.push({\n\t\t\ttype: \"tool_execution_start\",\n\t\t\ttoolCallId: toolCall.id,\n\t\t\ttoolName: toolCall.name,\n\t\t\targs: toolCall.arguments,\n\t\t});\n\n\t\tlet result: AgentToolResult<any>;\n\t\tlet isError = false;\n\n\t\ttry {\n\t\t\tif (!tool) throw new Error(`Tool ${toolCall.name} not found`);\n\n\t\t\tconst validatedArgs = validateToolArguments(tool, toolCall);\n\n\t\t\tresult = await tool.execute(toolCall.id, validatedArgs, signal, (partialResult) => {\n\t\t\t\tstream.push({\n\t\t\t\t\ttype: \"tool_execution_update\",\n\t\t\t\t\ttoolCallId: toolCall.id,\n\t\t\t\t\ttoolName: toolCall.name,\n\t\t\t\t\targs: toolCall.arguments,\n\t\t\t\t\tpartialResult,\n\t\t\t\t});\n\t\t\t});\n\t\t} catch (e) {\n\t\t\tresult = {\n\t\t\t\tcontent: [{ type: \"text\", text: e instanceof Error ? e.message : String(e) }],\n\t\t\t\tdetails: {},\n\t\t\t};\n\t\t\tisError = true;\n\t\t}\n\n\t\tstream.push({\n\t\t\ttype: \"tool_execution_end\",\n\t\t\ttoolCallId: toolCall.id,\n\t\t\ttoolName: toolCall.name,\n\t\t\tresult,\n\t\t\tisError,\n\t\t});\n\n\t\tconst toolResultMessage: ToolResultMessage = {\n\t\t\trole: \"toolResult\",\n\t\t\ttoolCallId: toolCall.id,\n\t\t\ttoolName: toolCall.name,\n\t\t\tcontent: result.content,\n\t\t\tdetails: result.details,\n\t\t\tisError,\n\t\t\ttimestamp: Date.now(),\n\t\t};\n\n\t\tresults.push(toolResultMessage);\n\t\tstream.push({ type: \"message_start\", message: toolResultMessage });\n\t\tstream.push({ type: \"message_end\", message: toolResultMessage });\n\n\t\t// Check for queued messages - skip remaining tools if user interrupted\n\t\tif (getQueuedMessages) {\n\t\t\tconst queued = await getQueuedMessages();\n\t\t\tif (queued.length > 0) {\n\t\t\t\tqueuedMessages = queued;\n\t\t\t\tconst remainingCalls = toolCalls.slice(index + 1);\n\t\t\t\tfor (const skipped of remainingCalls) {\n\t\t\t\t\tresults.push(skipToolCall(skipped, stream));\n\t\t\t\t}\n\t\t\t\tbreak;\n\t\t\t}\n\t\t}\n\t}\n\n\treturn { toolResults: results, queuedMessages };\n}\n\nfunction skipToolCall(\n\ttoolCall: Extract<AssistantMessage[\"content\"][number], { type: \"toolCall\" }>,\n\tstream: EventStream<AgentEvent, AgentMessage[]>,\n): ToolResultMessage {\n\tconst result: AgentToolResult<any> = {\n\t\tcontent: [{ type: \"text\", text: \"Skipped due to queued user message.\" }],\n\t\tdetails: {},\n\t};\n\n\tstream.push({\n\t\ttype: \"tool_execution_start\",\n\t\ttoolCallId: toolCall.id,\n\t\ttoolName: toolCall.name,\n\t\targs: toolCall.arguments,\n\t});\n\tstream.push({\n\t\ttype: \"tool_execution_end\",\n\t\ttoolCallId: toolCall.id,\n\t\ttoolName: toolCall.name,\n\t\tresult,\n\t\tisError: true,\n\t});\n\n\tconst toolResultMessage: ToolResultMessage = {\n\t\trole: \"toolResult\",\n\t\ttoolCallId: toolCall.id,\n\t\ttoolName: toolCall.name,\n\t\tcontent: result.content,\n\t\tdetails: {},\n\t\tisError: true,\n\t\ttimestamp: Date.now(),\n\t};\n\n\tstream.push({ type: \"message_start\", message: toolResultMessage });\n\tstream.push({ type: \"message_end\", message: toolResultMessage });\n\n\treturn toolResultMessage;\n}\n"]}
1
+ {"version":3,"file":"agent-loop.d.ts","sourceRoot":"","sources":["../src/agent-loop.ts"],"names":[],"mappings":"AAAA;;;GAGG;AAEH,OAAO,EAGN,WAAW,EAIX,MAAM,qBAAqB,CAAC;AAC7B,OAAO,KAAK,EACX,YAAY,EACZ,UAAU,EACV,eAAe,EACf,YAAY,EAGZ,QAAQ,EACR,MAAM,YAAY,CAAC;AAEpB;;;GAGG;AACH,wBAAgB,SAAS,CACxB,OAAO,EAAE,YAAY,EAAE,EACvB,OAAO,EAAE,YAAY,EACrB,MAAM,EAAE,eAAe,EACvB,MAAM,CAAC,EAAE,WAAW,EACpB,QAAQ,CAAC,EAAE,QAAQ,GACjB,WAAW,CAAC,UAAU,EAAE,YAAY,EAAE,CAAC,CAqBzC;AAED;;;;;;;GAOG;AACH,wBAAgB,iBAAiB,CAChC,OAAO,EAAE,YAAY,EACrB,MAAM,EAAE,eAAe,EACvB,MAAM,CAAC,EAAE,WAAW,EACpB,QAAQ,CAAC,EAAE,QAAQ,GACjB,WAAW,CAAC,UAAU,EAAE,YAAY,EAAE,CAAC,CAsBzC","sourcesContent":["/**\n * Agent loop that works with AgentMessage throughout.\n * Transforms to Message[] only at the LLM call boundary.\n */\n\nimport {\n\ttype AssistantMessage,\n\ttype Context,\n\tEventStream,\n\tstreamSimple,\n\ttype ToolResultMessage,\n\tvalidateToolArguments,\n} from \"@mariozechner/pi-ai\";\nimport type {\n\tAgentContext,\n\tAgentEvent,\n\tAgentLoopConfig,\n\tAgentMessage,\n\tAgentTool,\n\tAgentToolResult,\n\tStreamFn,\n} from \"./types.js\";\n\n/**\n * Start an agent loop with a new prompt message.\n * The prompt is added to the context and events are emitted for it.\n */\nexport function agentLoop(\n\tprompts: AgentMessage[],\n\tcontext: AgentContext,\n\tconfig: AgentLoopConfig,\n\tsignal?: AbortSignal,\n\tstreamFn?: StreamFn,\n): EventStream<AgentEvent, AgentMessage[]> {\n\tconst stream = createAgentStream();\n\n\t(async () => {\n\t\tconst newMessages: AgentMessage[] = [...prompts];\n\t\tconst currentContext: AgentContext = {\n\t\t\t...context,\n\t\t\tmessages: [...context.messages, ...prompts],\n\t\t};\n\n\t\tstream.push({ type: \"agent_start\" });\n\t\tstream.push({ type: \"turn_start\" });\n\t\tfor (const prompt of prompts) {\n\t\t\tstream.push({ type: \"message_start\", message: prompt });\n\t\t\tstream.push({ type: \"message_end\", message: prompt });\n\t\t}\n\n\t\tawait runLoop(currentContext, newMessages, config, signal, stream, streamFn);\n\t})();\n\n\treturn stream;\n}\n\n/**\n * Continue an agent loop from the current context without adding a new message.\n * Used for retries - context already has user message or tool results.\n *\n * **Important:** The last message in context must convert to a `user` or `toolResult` message\n * via `convertToLlm`. If it doesn't, the LLM provider will reject the request.\n * This cannot be validated here since `convertToLlm` is only called once per turn.\n */\nexport function agentLoopContinue(\n\tcontext: AgentContext,\n\tconfig: AgentLoopConfig,\n\tsignal?: AbortSignal,\n\tstreamFn?: StreamFn,\n): EventStream<AgentEvent, AgentMessage[]> {\n\tif (context.messages.length === 0) {\n\t\tthrow new Error(\"Cannot continue: no messages in context\");\n\t}\n\n\tif (context.messages[context.messages.length - 1].role === \"assistant\") {\n\t\tthrow new Error(\"Cannot continue from message role: assistant\");\n\t}\n\n\tconst stream = createAgentStream();\n\n\t(async () => {\n\t\tconst newMessages: AgentMessage[] = [];\n\t\tconst currentContext: AgentContext = { ...context };\n\n\t\tstream.push({ type: \"agent_start\" });\n\t\tstream.push({ type: \"turn_start\" });\n\n\t\tawait runLoop(currentContext, newMessages, config, signal, stream, streamFn);\n\t})();\n\n\treturn stream;\n}\n\nfunction createAgentStream(): EventStream<AgentEvent, AgentMessage[]> {\n\treturn new EventStream<AgentEvent, AgentMessage[]>(\n\t\t(event: AgentEvent) => event.type === \"agent_end\",\n\t\t(event: AgentEvent) => (event.type === \"agent_end\" ? event.messages : []),\n\t);\n}\n\n/**\n * Main loop logic shared by agentLoop and agentLoopContinue.\n */\nasync function runLoop(\n\tcurrentContext: AgentContext,\n\tnewMessages: AgentMessage[],\n\tconfig: AgentLoopConfig,\n\tsignal: AbortSignal | undefined,\n\tstream: EventStream<AgentEvent, AgentMessage[]>,\n\tstreamFn?: StreamFn,\n): Promise<void> {\n\tlet firstTurn = true;\n\t// Check for steering messages at start (user may have typed while waiting)\n\tlet pendingMessages: AgentMessage[] = (await config.getSteeringMessages?.()) || [];\n\n\t// Outer loop: continues when queued follow-up messages arrive after agent would stop\n\twhile (true) {\n\t\tlet hasMoreToolCalls = true;\n\t\tlet steeringAfterTools: AgentMessage[] | null = null;\n\n\t\t// Inner loop: process tool calls and steering messages\n\t\twhile (hasMoreToolCalls || pendingMessages.length > 0) {\n\t\t\tif (!firstTurn) {\n\t\t\t\tstream.push({ type: \"turn_start\" });\n\t\t\t} else {\n\t\t\t\tfirstTurn = false;\n\t\t\t}\n\n\t\t\t// Process pending messages (inject before next assistant response)\n\t\t\tif (pendingMessages.length > 0) {\n\t\t\t\tfor (const message of pendingMessages) {\n\t\t\t\t\tstream.push({ type: \"message_start\", message });\n\t\t\t\t\tstream.push({ type: \"message_end\", message });\n\t\t\t\t\tcurrentContext.messages.push(message);\n\t\t\t\t\tnewMessages.push(message);\n\t\t\t\t}\n\t\t\t\tpendingMessages = [];\n\t\t\t}\n\n\t\t\t// Stream assistant response\n\t\t\tconst message = await streamAssistantResponse(currentContext, config, signal, stream, streamFn);\n\t\t\tnewMessages.push(message);\n\n\t\t\tif (message.stopReason === \"error\" || message.stopReason === \"aborted\") {\n\t\t\t\tstream.push({ type: \"turn_end\", message, toolResults: [] });\n\t\t\t\tstream.push({ type: \"agent_end\", messages: newMessages });\n\t\t\t\tstream.end(newMessages);\n\t\t\t\treturn;\n\t\t\t}\n\n\t\t\t// Check for tool calls\n\t\t\tconst toolCalls = message.content.filter((c) => c.type === \"toolCall\");\n\t\t\thasMoreToolCalls = toolCalls.length > 0;\n\n\t\t\tconst toolResults: ToolResultMessage[] = [];\n\t\t\tif (hasMoreToolCalls) {\n\t\t\t\tconst toolExecution = await executeToolCalls(\n\t\t\t\t\tcurrentContext.tools,\n\t\t\t\t\tmessage,\n\t\t\t\t\tsignal,\n\t\t\t\t\tstream,\n\t\t\t\t\tconfig.getSteeringMessages,\n\t\t\t\t);\n\t\t\t\ttoolResults.push(...toolExecution.toolResults);\n\t\t\t\tsteeringAfterTools = toolExecution.steeringMessages ?? null;\n\n\t\t\t\tfor (const result of toolResults) {\n\t\t\t\t\tcurrentContext.messages.push(result);\n\t\t\t\t\tnewMessages.push(result);\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tstream.push({ type: \"turn_end\", message, toolResults });\n\n\t\t\t// Get steering messages after turn completes\n\t\t\tif (steeringAfterTools && steeringAfterTools.length > 0) {\n\t\t\t\tpendingMessages = steeringAfterTools;\n\t\t\t\tsteeringAfterTools = null;\n\t\t\t} else {\n\t\t\t\tpendingMessages = (await config.getSteeringMessages?.()) || [];\n\t\t\t}\n\t\t}\n\n\t\t// Agent would stop here. Check for follow-up messages.\n\t\tconst followUpMessages = (await config.getFollowUpMessages?.()) || [];\n\t\tif (followUpMessages.length > 0) {\n\t\t\t// Set as pending so inner loop processes them\n\t\t\tpendingMessages = followUpMessages;\n\t\t\tcontinue;\n\t\t}\n\n\t\t// No more messages, exit\n\t\tbreak;\n\t}\n\n\tstream.push({ type: \"agent_end\", messages: newMessages });\n\tstream.end(newMessages);\n}\n\n/**\n * Stream an assistant response from the LLM.\n * This is where AgentMessage[] gets transformed to Message[] for the LLM.\n */\nasync function streamAssistantResponse(\n\tcontext: AgentContext,\n\tconfig: AgentLoopConfig,\n\tsignal: AbortSignal | undefined,\n\tstream: EventStream<AgentEvent, AgentMessage[]>,\n\tstreamFn?: StreamFn,\n): Promise<AssistantMessage> {\n\t// Apply context transform if configured (AgentMessage[] → AgentMessage[])\n\tlet messages = context.messages;\n\tif (config.transformContext) {\n\t\tmessages = await config.transformContext(messages, signal);\n\t}\n\n\t// Convert to LLM-compatible messages (AgentMessage[] → Message[])\n\tconst llmMessages = await config.convertToLlm(messages);\n\n\t// Build LLM context\n\tconst llmContext: Context = {\n\t\tsystemPrompt: context.systemPrompt,\n\t\tmessages: llmMessages,\n\t\ttools: context.tools,\n\t};\n\n\tconst streamFunction = streamFn || streamSimple;\n\n\t// Resolve API key (important for expiring tokens)\n\tconst resolvedApiKey =\n\t\t(config.getApiKey ? await config.getApiKey(config.model.provider) : undefined) || config.apiKey;\n\n\tconst response = await streamFunction(config.model, llmContext, {\n\t\t...config,\n\t\tapiKey: resolvedApiKey,\n\t\tsignal,\n\t});\n\n\tlet partialMessage: AssistantMessage | null = null;\n\tlet addedPartial = false;\n\n\tfor await (const event of response) {\n\t\tswitch (event.type) {\n\t\t\tcase \"start\":\n\t\t\t\tpartialMessage = event.partial;\n\t\t\t\tcontext.messages.push(partialMessage);\n\t\t\t\taddedPartial = true;\n\t\t\t\tstream.push({ type: \"message_start\", message: { ...partialMessage } });\n\t\t\t\tbreak;\n\n\t\t\tcase \"text_start\":\n\t\t\tcase \"text_delta\":\n\t\t\tcase \"text_end\":\n\t\t\tcase \"thinking_start\":\n\t\t\tcase \"thinking_delta\":\n\t\t\tcase \"thinking_end\":\n\t\t\tcase \"toolcall_start\":\n\t\t\tcase \"toolcall_delta\":\n\t\t\tcase \"toolcall_end\":\n\t\t\t\tif (partialMessage) {\n\t\t\t\t\tpartialMessage = event.partial;\n\t\t\t\t\tcontext.messages[context.messages.length - 1] = partialMessage;\n\t\t\t\t\tstream.push({\n\t\t\t\t\t\ttype: \"message_update\",\n\t\t\t\t\t\tassistantMessageEvent: event,\n\t\t\t\t\t\tmessage: { ...partialMessage },\n\t\t\t\t\t});\n\t\t\t\t}\n\t\t\t\tbreak;\n\n\t\t\tcase \"done\":\n\t\t\tcase \"error\": {\n\t\t\t\tconst finalMessage = await response.result();\n\t\t\t\tif (addedPartial) {\n\t\t\t\t\tcontext.messages[context.messages.length - 1] = finalMessage;\n\t\t\t\t} else {\n\t\t\t\t\tcontext.messages.push(finalMessage);\n\t\t\t\t}\n\t\t\t\tif (!addedPartial) {\n\t\t\t\t\tstream.push({ type: \"message_start\", message: { ...finalMessage } });\n\t\t\t\t}\n\t\t\t\tstream.push({ type: \"message_end\", message: finalMessage });\n\t\t\t\treturn finalMessage;\n\t\t\t}\n\t\t}\n\t}\n\n\treturn await response.result();\n}\n\n/**\n * Execute tool calls from an assistant message.\n */\nasync function executeToolCalls(\n\ttools: AgentTool<any>[] | undefined,\n\tassistantMessage: AssistantMessage,\n\tsignal: AbortSignal | undefined,\n\tstream: EventStream<AgentEvent, AgentMessage[]>,\n\tgetSteeringMessages?: AgentLoopConfig[\"getSteeringMessages\"],\n): Promise<{ toolResults: ToolResultMessage[]; steeringMessages?: AgentMessage[] }> {\n\tconst toolCalls = assistantMessage.content.filter((c) => c.type === \"toolCall\");\n\tconst results: ToolResultMessage[] = [];\n\tlet steeringMessages: AgentMessage[] | undefined;\n\n\tfor (let index = 0; index < toolCalls.length; index++) {\n\t\tconst toolCall = toolCalls[index];\n\t\tconst tool = tools?.find((t) => t.name === toolCall.name);\n\n\t\tstream.push({\n\t\t\ttype: \"tool_execution_start\",\n\t\t\ttoolCallId: toolCall.id,\n\t\t\ttoolName: toolCall.name,\n\t\t\targs: toolCall.arguments,\n\t\t});\n\n\t\tlet result: AgentToolResult<any>;\n\t\tlet isError = false;\n\n\t\ttry {\n\t\t\tif (!tool) throw new Error(`Tool ${toolCall.name} not found`);\n\n\t\t\tconst validatedArgs = validateToolArguments(tool, toolCall);\n\n\t\t\tresult = await tool.execute(toolCall.id, validatedArgs, signal, (partialResult) => {\n\t\t\t\tstream.push({\n\t\t\t\t\ttype: \"tool_execution_update\",\n\t\t\t\t\ttoolCallId: toolCall.id,\n\t\t\t\t\ttoolName: toolCall.name,\n\t\t\t\t\targs: toolCall.arguments,\n\t\t\t\t\tpartialResult,\n\t\t\t\t});\n\t\t\t});\n\t\t} catch (e) {\n\t\t\tresult = {\n\t\t\t\tcontent: [{ type: \"text\", text: e instanceof Error ? e.message : String(e) }],\n\t\t\t\tdetails: {},\n\t\t\t};\n\t\t\tisError = true;\n\t\t}\n\n\t\tstream.push({\n\t\t\ttype: \"tool_execution_end\",\n\t\t\ttoolCallId: toolCall.id,\n\t\t\ttoolName: toolCall.name,\n\t\t\tresult,\n\t\t\tisError,\n\t\t});\n\n\t\tconst toolResultMessage: ToolResultMessage = {\n\t\t\trole: \"toolResult\",\n\t\t\ttoolCallId: toolCall.id,\n\t\t\ttoolName: toolCall.name,\n\t\t\tcontent: result.content,\n\t\t\tdetails: result.details,\n\t\t\tisError,\n\t\t\ttimestamp: Date.now(),\n\t\t};\n\n\t\tresults.push(toolResultMessage);\n\t\tstream.push({ type: \"message_start\", message: toolResultMessage });\n\t\tstream.push({ type: \"message_end\", message: toolResultMessage });\n\n\t\t// Check for steering messages - skip remaining tools if user interrupted\n\t\tif (getSteeringMessages) {\n\t\t\tconst steering = await getSteeringMessages();\n\t\t\tif (steering.length > 0) {\n\t\t\t\tsteeringMessages = steering;\n\t\t\t\tconst remainingCalls = toolCalls.slice(index + 1);\n\t\t\t\tfor (const skipped of remainingCalls) {\n\t\t\t\t\tresults.push(skipToolCall(skipped, stream));\n\t\t\t\t}\n\t\t\t\tbreak;\n\t\t\t}\n\t\t}\n\t}\n\n\treturn { toolResults: results, steeringMessages };\n}\n\nfunction skipToolCall(\n\ttoolCall: Extract<AssistantMessage[\"content\"][number], { type: \"toolCall\" }>,\n\tstream: EventStream<AgentEvent, AgentMessage[]>,\n): ToolResultMessage {\n\tconst result: AgentToolResult<any> = {\n\t\tcontent: [{ type: \"text\", text: \"Skipped due to queued user message.\" }],\n\t\tdetails: {},\n\t};\n\n\tstream.push({\n\t\ttype: \"tool_execution_start\",\n\t\ttoolCallId: toolCall.id,\n\t\ttoolName: toolCall.name,\n\t\targs: toolCall.arguments,\n\t});\n\tstream.push({\n\t\ttype: \"tool_execution_end\",\n\t\ttoolCallId: toolCall.id,\n\t\ttoolName: toolCall.name,\n\t\tresult,\n\t\tisError: true,\n\t});\n\n\tconst toolResultMessage: ToolResultMessage = {\n\t\trole: \"toolResult\",\n\t\ttoolCallId: toolCall.id,\n\t\ttoolName: toolCall.name,\n\t\tcontent: result.content,\n\t\tdetails: {},\n\t\tisError: true,\n\t\ttimestamp: Date.now(),\n\t};\n\n\tstream.push({ type: \"message_start\", message: toolResultMessage });\n\tstream.push({ type: \"message_end\", message: toolResultMessage });\n\n\treturn toolResultMessage;\n}\n"]}
@@ -57,58 +57,72 @@ function createAgentStream() {
57
57
  * Main loop logic shared by agentLoop and agentLoopContinue.
58
58
  */
59
59
  async function runLoop(currentContext, newMessages, config, signal, stream, streamFn) {
60
- let hasMoreToolCalls = true;
61
60
  let firstTurn = true;
62
- let queuedMessages = (await config.getQueuedMessages?.()) || [];
63
- let queuedAfterTools = null;
64
- while (hasMoreToolCalls || queuedMessages.length > 0) {
65
- if (!firstTurn) {
66
- stream.push({ type: "turn_start" });
67
- }
68
- else {
69
- firstTurn = false;
70
- }
71
- // Process queued messages (inject before next assistant response)
72
- if (queuedMessages.length > 0) {
73
- for (const message of queuedMessages) {
74
- stream.push({ type: "message_start", message });
75
- stream.push({ type: "message_end", message });
76
- currentContext.messages.push(message);
77
- newMessages.push(message);
61
+ // Check for steering messages at start (user may have typed while waiting)
62
+ let pendingMessages = (await config.getSteeringMessages?.()) || [];
63
+ // Outer loop: continues when queued follow-up messages arrive after agent would stop
64
+ while (true) {
65
+ let hasMoreToolCalls = true;
66
+ let steeringAfterTools = null;
67
+ // Inner loop: process tool calls and steering messages
68
+ while (hasMoreToolCalls || pendingMessages.length > 0) {
69
+ if (!firstTurn) {
70
+ stream.push({ type: "turn_start" });
78
71
  }
79
- queuedMessages = [];
80
- }
81
- // Stream assistant response
82
- const message = await streamAssistantResponse(currentContext, config, signal, stream, streamFn);
83
- newMessages.push(message);
84
- if (message.stopReason === "error" || message.stopReason === "aborted") {
85
- stream.push({ type: "turn_end", message, toolResults: [] });
86
- stream.push({ type: "agent_end", messages: newMessages });
87
- stream.end(newMessages);
88
- return;
89
- }
90
- // Check for tool calls
91
- const toolCalls = message.content.filter((c) => c.type === "toolCall");
92
- hasMoreToolCalls = toolCalls.length > 0;
93
- const toolResults = [];
94
- if (hasMoreToolCalls) {
95
- const toolExecution = await executeToolCalls(currentContext.tools, message, signal, stream, config.getQueuedMessages);
96
- toolResults.push(...toolExecution.toolResults);
97
- queuedAfterTools = toolExecution.queuedMessages ?? null;
98
- for (const result of toolResults) {
99
- currentContext.messages.push(result);
100
- newMessages.push(result);
72
+ else {
73
+ firstTurn = false;
74
+ }
75
+ // Process pending messages (inject before next assistant response)
76
+ if (pendingMessages.length > 0) {
77
+ for (const message of pendingMessages) {
78
+ stream.push({ type: "message_start", message });
79
+ stream.push({ type: "message_end", message });
80
+ currentContext.messages.push(message);
81
+ newMessages.push(message);
82
+ }
83
+ pendingMessages = [];
84
+ }
85
+ // Stream assistant response
86
+ const message = await streamAssistantResponse(currentContext, config, signal, stream, streamFn);
87
+ newMessages.push(message);
88
+ if (message.stopReason === "error" || message.stopReason === "aborted") {
89
+ stream.push({ type: "turn_end", message, toolResults: [] });
90
+ stream.push({ type: "agent_end", messages: newMessages });
91
+ stream.end(newMessages);
92
+ return;
93
+ }
94
+ // Check for tool calls
95
+ const toolCalls = message.content.filter((c) => c.type === "toolCall");
96
+ hasMoreToolCalls = toolCalls.length > 0;
97
+ const toolResults = [];
98
+ if (hasMoreToolCalls) {
99
+ const toolExecution = await executeToolCalls(currentContext.tools, message, signal, stream, config.getSteeringMessages);
100
+ toolResults.push(...toolExecution.toolResults);
101
+ steeringAfterTools = toolExecution.steeringMessages ?? null;
102
+ for (const result of toolResults) {
103
+ currentContext.messages.push(result);
104
+ newMessages.push(result);
105
+ }
106
+ }
107
+ stream.push({ type: "turn_end", message, toolResults });
108
+ // Get steering messages after turn completes
109
+ if (steeringAfterTools && steeringAfterTools.length > 0) {
110
+ pendingMessages = steeringAfterTools;
111
+ steeringAfterTools = null;
112
+ }
113
+ else {
114
+ pendingMessages = (await config.getSteeringMessages?.()) || [];
101
115
  }
102
116
  }
103
- stream.push({ type: "turn_end", message, toolResults });
104
- // Get queued messages after turn completes
105
- if (queuedAfterTools && queuedAfterTools.length > 0) {
106
- queuedMessages = queuedAfterTools;
107
- queuedAfterTools = null;
108
- }
109
- else {
110
- queuedMessages = (await config.getQueuedMessages?.()) || [];
117
+ // Agent would stop here. Check for follow-up messages.
118
+ const followUpMessages = (await config.getFollowUpMessages?.()) || [];
119
+ if (followUpMessages.length > 0) {
120
+ // Set as pending so inner loop processes them
121
+ pendingMessages = followUpMessages;
122
+ continue;
111
123
  }
124
+ // No more messages, exit
125
+ break;
112
126
  }
113
127
  stream.push({ type: "agent_end", messages: newMessages });
114
128
  stream.end(newMessages);
@@ -190,10 +204,10 @@ async function streamAssistantResponse(context, config, signal, stream, streamFn
190
204
  /**
191
205
  * Execute tool calls from an assistant message.
192
206
  */
193
- async function executeToolCalls(tools, assistantMessage, signal, stream, getQueuedMessages) {
207
+ async function executeToolCalls(tools, assistantMessage, signal, stream, getSteeringMessages) {
194
208
  const toolCalls = assistantMessage.content.filter((c) => c.type === "toolCall");
195
209
  const results = [];
196
- let queuedMessages;
210
+ let steeringMessages;
197
211
  for (let index = 0; index < toolCalls.length; index++) {
198
212
  const toolCall = toolCalls[index];
199
213
  const tool = tools?.find((t) => t.name === toolCall.name);
@@ -245,11 +259,11 @@ async function executeToolCalls(tools, assistantMessage, signal, stream, getQueu
245
259
  results.push(toolResultMessage);
246
260
  stream.push({ type: "message_start", message: toolResultMessage });
247
261
  stream.push({ type: "message_end", message: toolResultMessage });
248
- // Check for queued messages - skip remaining tools if user interrupted
249
- if (getQueuedMessages) {
250
- const queued = await getQueuedMessages();
251
- if (queued.length > 0) {
252
- queuedMessages = queued;
262
+ // Check for steering messages - skip remaining tools if user interrupted
263
+ if (getSteeringMessages) {
264
+ const steering = await getSteeringMessages();
265
+ if (steering.length > 0) {
266
+ steeringMessages = steering;
253
267
  const remainingCalls = toolCalls.slice(index + 1);
254
268
  for (const skipped of remainingCalls) {
255
269
  results.push(skipToolCall(skipped, stream));
@@ -258,7 +272,7 @@ async function executeToolCalls(tools, assistantMessage, signal, stream, getQueu
258
272
  }
259
273
  }
260
274
  }
261
- return { toolResults: results, queuedMessages };
275
+ return { toolResults: results, steeringMessages };
262
276
  }
263
277
  function skipToolCall(toolCall, stream) {
264
278
  const result = {
@@ -1 +1 @@
1
- {"version":3,"file":"agent-loop.js","sourceRoot":"","sources":["../src/agent-loop.ts"],"names":[],"mappings":"AAAA;;;GAGG;AAEH,OAAO,EAGN,WAAW,EACX,YAAY,EAEZ,qBAAqB,GACrB,MAAM,qBAAqB,CAAC;AAW7B;;;GAGG;AACH,MAAM,UAAU,SAAS,CACxB,OAAuB,EACvB,OAAqB,EACrB,MAAuB,EACvB,MAAoB,EACpB,QAAmB,EACuB;IAC1C,MAAM,MAAM,GAAG,iBAAiB,EAAE,CAAC;IAEnC,CAAC,KAAK,IAAI,EAAE,CAAC;QACZ,MAAM,WAAW,GAAmB,CAAC,GAAG,OAAO,CAAC,CAAC;QACjD,MAAM,cAAc,GAAiB;YACpC,GAAG,OAAO;YACV,QAAQ,EAAE,CAAC,GAAG,OAAO,CAAC,QAAQ,EAAE,GAAG,OAAO,CAAC;SAC3C,CAAC;QAEF,MAAM,CAAC,IAAI,CAAC,EAAE,IAAI,EAAE,aAAa,EAAE,CAAC,CAAC;QACrC,MAAM,CAAC,IAAI,CAAC,EAAE,IAAI,EAAE,YAAY,EAAE,CAAC,CAAC;QACpC,KAAK,MAAM,MAAM,IAAI,OAAO,EAAE,CAAC;YAC9B,MAAM,CAAC,IAAI,CAAC,EAAE,IAAI,EAAE,eAAe,EAAE,OAAO,EAAE,MAAM,EAAE,CAAC,CAAC;YACxD,MAAM,CAAC,IAAI,CAAC,EAAE,IAAI,EAAE,aAAa,EAAE,OAAO,EAAE,MAAM,EAAE,CAAC,CAAC;QACvD,CAAC;QAED,MAAM,OAAO,CAAC,cAAc,EAAE,WAAW,EAAE,MAAM,EAAE,MAAM,EAAE,MAAM,EAAE,QAAQ,CAAC,CAAC;IAAA,CAC7E,CAAC,EAAE,CAAC;IAEL,OAAO,MAAM,CAAC;AAAA,CACd;AAED;;;;;;;GAOG;AACH,MAAM,UAAU,iBAAiB,CAChC,OAAqB,EACrB,MAAuB,EACvB,MAAoB,EACpB,QAAmB,EACuB;IAC1C,IAAI,OAAO,CAAC,QAAQ,CAAC,MAAM,KAAK,CAAC,EAAE,CAAC;QACnC,MAAM,IAAI,KAAK,CAAC,yCAAyC,CAAC,CAAC;IAC5D,CAAC;IAED,IAAI,OAAO,CAAC,QAAQ,CAAC,OAAO,CAAC,QAAQ,CAAC,MAAM,GAAG,CAAC,CAAC,CAAC,IAAI,KAAK,WAAW,EAAE,CAAC;QACxE,MAAM,IAAI,KAAK,CAAC,8CAA8C,CAAC,CAAC;IACjE,CAAC;IAED,MAAM,MAAM,GAAG,iBAAiB,EAAE,CAAC;IAEnC,CAAC,KAAK,IAAI,EAAE,CAAC;QACZ,MAAM,WAAW,GAAmB,EAAE,CAAC;QACvC,MAAM,cAAc,GAAiB,EAAE,GAAG,OAAO,EAAE,CAAC;QAEpD,MAAM,CAAC,IAAI,CAAC,EAAE,IAAI,EAAE,aAAa,EAAE,CAAC,CAAC;QACrC,MAAM,CAAC,IAAI,CAAC,EAAE,IAAI,EAAE,YAAY,EAAE,CAAC,CAAC;QAEpC,MAAM,OAAO,CAAC,cAAc,EAAE,WAAW,EAAE,MAAM,EAAE,MAAM,EAAE,MAAM,EAAE,QAAQ,CAAC,CAAC;IAAA,CAC7E,CAAC,EAAE,CAAC;IAEL,OAAO,MAAM,CAAC;AAAA,CACd;AAED,SAAS,iBAAiB,GAA4C;IACrE,OAAO,IAAI,WAAW,CACrB,CAAC,KAAiB,EAAE,EAAE,CAAC,KAAK,CAAC,IAAI,KAAK,WAAW,EACjD,CAAC,KAAiB,EAAE,EAAE,CAAC,CAAC,KAAK,CAAC,IAAI,KAAK,WAAW,CAAC,CAAC,CAAC,KAAK,CAAC,QAAQ,CAAC,CAAC,CAAC,EAAE,CAAC,CACzE,CAAC;AAAA,CACF;AAED;;GAEG;AACH,KAAK,UAAU,OAAO,CACrB,cAA4B,EAC5B,WAA2B,EAC3B,MAAuB,EACvB,MAA+B,EAC/B,MAA+C,EAC/C,QAAmB,EACH;IAChB,IAAI,gBAAgB,GAAG,IAAI,CAAC;IAC5B,IAAI,SAAS,GAAG,IAAI,CAAC;IACrB,IAAI,cAAc,GAAmB,CAAC,MAAM,MAAM,CAAC,iBAAiB,EAAE,EAAE,CAAC,IAAI,EAAE,CAAC;IAChF,IAAI,gBAAgB,GAA0B,IAAI,CAAC;IAEnD,OAAO,gBAAgB,IAAI,cAAc,CAAC,MAAM,GAAG,CAAC,EAAE,CAAC;QACtD,IAAI,CAAC,SAAS,EAAE,CAAC;YAChB,MAAM,CAAC,IAAI,CAAC,EAAE,IAAI,EAAE,YAAY,EAAE,CAAC,CAAC;QACrC,CAAC;aAAM,CAAC;YACP,SAAS,GAAG,KAAK,CAAC;QACnB,CAAC;QAED,kEAAkE;QAClE,IAAI,cAAc,CAAC,MAAM,GAAG,CAAC,EAAE,CAAC;YAC/B,KAAK,MAAM,OAAO,IAAI,cAAc,EAAE,CAAC;gBACtC,MAAM,CAAC,IAAI,CAAC,EAAE,IAAI,EAAE,eAAe,EAAE,OAAO,EAAE,CAAC,CAAC;gBAChD,MAAM,CAAC,IAAI,CAAC,EAAE,IAAI,EAAE,aAAa,EAAE,OAAO,EAAE,CAAC,CAAC;gBAC9C,cAAc,CAAC,QAAQ,CAAC,IAAI,CAAC,OAAO,CAAC,CAAC;gBACtC,WAAW,CAAC,IAAI,CAAC,OAAO,CAAC,CAAC;YAC3B,CAAC;YACD,cAAc,GAAG,EAAE,CAAC;QACrB,CAAC;QAED,4BAA4B;QAC5B,MAAM,OAAO,GAAG,MAAM,uBAAuB,CAAC,cAAc,EAAE,MAAM,EAAE,MAAM,EAAE,MAAM,EAAE,QAAQ,CAAC,CAAC;QAChG,WAAW,CAAC,IAAI,CAAC,OAAO,CAAC,CAAC;QAE1B,IAAI,OAAO,CAAC,UAAU,KAAK,OAAO,IAAI,OAAO,CAAC,UAAU,KAAK,SAAS,EAAE,CAAC;YACxE,MAAM,CAAC,IAAI,CAAC,EAAE,IAAI,EAAE,UAAU,EAAE,OAAO,EAAE,WAAW,EAAE,EAAE,EAAE,CAAC,CAAC;YAC5D,MAAM,CAAC,IAAI,CAAC,EAAE,IAAI,EAAE,WAAW,EAAE,QAAQ,EAAE,WAAW,EAAE,CAAC,CAAC;YAC1D,MAAM,CAAC,GAAG,CAAC,WAAW,CAAC,CAAC;YACxB,OAAO;QACR,CAAC;QAED,uBAAuB;QACvB,MAAM,SAAS,GAAG,OAAO,CAAC,OAAO,CAAC,MAAM,CAAC,CAAC,CAAC,EAAE,EAAE,CAAC,CAAC,CAAC,IAAI,KAAK,UAAU,CAAC,CAAC;QACvE,gBAAgB,GAAG,SAAS,CAAC,MAAM,GAAG,CAAC,CAAC;QAExC,MAAM,WAAW,GAAwB,EAAE,CAAC;QAC5C,IAAI,gBAAgB,EAAE,CAAC;YACtB,MAAM,aAAa,GAAG,MAAM,gBAAgB,CAC3C,cAAc,CAAC,KAAK,EACpB,OAAO,EACP,MAAM,EACN,MAAM,EACN,MAAM,CAAC,iBAAiB,CACxB,CAAC;YACF,WAAW,CAAC,IAAI,CAAC,GAAG,aAAa,CAAC,WAAW,CAAC,CAAC;YAC/C,gBAAgB,GAAG,aAAa,CAAC,cAAc,IAAI,IAAI,CAAC;YAExD,KAAK,MAAM,MAAM,IAAI,WAAW,EAAE,CAAC;gBAClC,cAAc,CAAC,QAAQ,CAAC,IAAI,CAAC,MAAM,CAAC,CAAC;gBACrC,WAAW,CAAC,IAAI,CAAC,MAAM,CAAC,CAAC;YAC1B,CAAC;QACF,CAAC;QAED,MAAM,CAAC,IAAI,CAAC,EAAE,IAAI,EAAE,UAAU,EAAE,OAAO,EAAE,WAAW,EAAE,CAAC,CAAC;QAExD,2CAA2C;QAC3C,IAAI,gBAAgB,IAAI,gBAAgB,CAAC,MAAM,GAAG,CAAC,EAAE,CAAC;YACrD,cAAc,GAAG,gBAAgB,CAAC;YAClC,gBAAgB,GAAG,IAAI,CAAC;QACzB,CAAC;aAAM,CAAC;YACP,cAAc,GAAG,CAAC,MAAM,MAAM,CAAC,iBAAiB,EAAE,EAAE,CAAC,IAAI,EAAE,CAAC;QAC7D,CAAC;IACF,CAAC;IAED,MAAM,CAAC,IAAI,CAAC,EAAE,IAAI,EAAE,WAAW,EAAE,QAAQ,EAAE,WAAW,EAAE,CAAC,CAAC;IAC1D,MAAM,CAAC,GAAG,CAAC,WAAW,CAAC,CAAC;AAAA,CACxB;AAED;;;GAGG;AACH,KAAK,UAAU,uBAAuB,CACrC,OAAqB,EACrB,MAAuB,EACvB,MAA+B,EAC/B,MAA+C,EAC/C,QAAmB,EACS;IAC5B,4EAA0E;IAC1E,IAAI,QAAQ,GAAG,OAAO,CAAC,QAAQ,CAAC;IAChC,IAAI,MAAM,CAAC,gBAAgB,EAAE,CAAC;QAC7B,QAAQ,GAAG,MAAM,MAAM,CAAC,gBAAgB,CAAC,QAAQ,EAAE,MAAM,CAAC,CAAC;IAC5D,CAAC;IAED,oEAAkE;IAClE,MAAM,WAAW,GAAG,MAAM,MAAM,CAAC,YAAY,CAAC,QAAQ,CAAC,CAAC;IAExD,oBAAoB;IACpB,MAAM,UAAU,GAAY;QAC3B,YAAY,EAAE,OAAO,CAAC,YAAY;QAClC,QAAQ,EAAE,WAAW;QACrB,KAAK,EAAE,OAAO,CAAC,KAAK;KACpB,CAAC;IAEF,MAAM,cAAc,GAAG,QAAQ,IAAI,YAAY,CAAC;IAEhD,kDAAkD;IAClD,MAAM,cAAc,GACnB,CAAC,MAAM,CAAC,SAAS,CAAC,CAAC,CAAC,MAAM,MAAM,CAAC,SAAS,CAAC,MAAM,CAAC,KAAK,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,SAAS,CAAC,IAAI,MAAM,CAAC,MAAM,CAAC;IAEjG,MAAM,QAAQ,GAAG,MAAM,cAAc,CAAC,MAAM,CAAC,KAAK,EAAE,UAAU,EAAE;QAC/D,GAAG,MAAM;QACT,MAAM,EAAE,cAAc;QACtB,MAAM;KACN,CAAC,CAAC;IAEH,IAAI,cAAc,GAA4B,IAAI,CAAC;IACnD,IAAI,YAAY,GAAG,KAAK,CAAC;IAEzB,IAAI,KAAK,EAAE,MAAM,KAAK,IAAI,QAAQ,EAAE,CAAC;QACpC,QAAQ,KAAK,CAAC,IAAI,EAAE,CAAC;YACpB,KAAK,OAAO;gBACX,cAAc,GAAG,KAAK,CAAC,OAAO,CAAC;gBAC/B,OAAO,CAAC,QAAQ,CAAC,IAAI,CAAC,cAAc,CAAC,CAAC;gBACtC,YAAY,GAAG,IAAI,CAAC;gBACpB,MAAM,CAAC,IAAI,CAAC,EAAE,IAAI,EAAE,eAAe,EAAE,OAAO,EAAE,EAAE,GAAG,cAAc,EAAE,EAAE,CAAC,CAAC;gBACvE,MAAM;YAEP,KAAK,YAAY,CAAC;YAClB,KAAK,YAAY,CAAC;YAClB,KAAK,UAAU,CAAC;YAChB,KAAK,gBAAgB,CAAC;YACtB,KAAK,gBAAgB,CAAC;YACtB,KAAK,cAAc,CAAC;YACpB,KAAK,gBAAgB,CAAC;YACtB,KAAK,gBAAgB,CAAC;YACtB,KAAK,cAAc;gBAClB,IAAI,cAAc,EAAE,CAAC;oBACpB,cAAc,GAAG,KAAK,CAAC,OAAO,CAAC;oBAC/B,OAAO,CAAC,QAAQ,CAAC,OAAO,CAAC,QAAQ,CAAC,MAAM,GAAG,CAAC,CAAC,GAAG,cAAc,CAAC;oBAC/D,MAAM,CAAC,IAAI,CAAC;wBACX,IAAI,EAAE,gBAAgB;wBACtB,qBAAqB,EAAE,KAAK;wBAC5B,OAAO,EAAE,EAAE,GAAG,cAAc,EAAE;qBAC9B,CAAC,CAAC;gBACJ,CAAC;gBACD,MAAM;YAEP,KAAK,MAAM,CAAC;YACZ,KAAK,OAAO,EAAE,CAAC;gBACd,MAAM,YAAY,GAAG,MAAM,QAAQ,CAAC,MAAM,EAAE,CAAC;gBAC7C,IAAI,YAAY,EAAE,CAAC;oBAClB,OAAO,CAAC,QAAQ,CAAC,OAAO,CAAC,QAAQ,CAAC,MAAM,GAAG,CAAC,CAAC,GAAG,YAAY,CAAC;gBAC9D,CAAC;qBAAM,CAAC;oBACP,OAAO,CAAC,QAAQ,CAAC,IAAI,CAAC,YAAY,CAAC,CAAC;gBACrC,CAAC;gBACD,IAAI,CAAC,YAAY,EAAE,CAAC;oBACnB,MAAM,CAAC,IAAI,CAAC,EAAE,IAAI,EAAE,eAAe,EAAE,OAAO,EAAE,EAAE,GAAG,YAAY,EAAE,EAAE,CAAC,CAAC;gBACtE,CAAC;gBACD,MAAM,CAAC,IAAI,CAAC,EAAE,IAAI,EAAE,aAAa,EAAE,OAAO,EAAE,YAAY,EAAE,CAAC,CAAC;gBAC5D,OAAO,YAAY,CAAC;YACrB,CAAC;QACF,CAAC;IACF,CAAC;IAED,OAAO,MAAM,QAAQ,CAAC,MAAM,EAAE,CAAC;AAAA,CAC/B;AAED;;GAEG;AACH,KAAK,UAAU,gBAAgB,CAC9B,KAAmC,EACnC,gBAAkC,EAClC,MAA+B,EAC/B,MAA+C,EAC/C,iBAAwD,EACyB;IACjF,MAAM,SAAS,GAAG,gBAAgB,CAAC,OAAO,CAAC,MAAM,CAAC,CAAC,CAAC,EAAE,EAAE,CAAC,CAAC,CAAC,IAAI,KAAK,UAAU,CAAC,CAAC;IAChF,MAAM,OAAO,GAAwB,EAAE,CAAC;IACxC,IAAI,cAA0C,CAAC;IAE/C,KAAK,IAAI,KAAK,GAAG,CAAC,EAAE,KAAK,GAAG,SAAS,CAAC,MAAM,EAAE,KAAK,EAAE,EAAE,CAAC;QACvD,MAAM,QAAQ,GAAG,SAAS,CAAC,KAAK,CAAC,CAAC;QAClC,MAAM,IAAI,GAAG,KAAK,EAAE,IAAI,CAAC,CAAC,CAAC,EAAE,EAAE,CAAC,CAAC,CAAC,IAAI,KAAK,QAAQ,CAAC,IAAI,CAAC,CAAC;QAE1D,MAAM,CAAC,IAAI,CAAC;YACX,IAAI,EAAE,sBAAsB;YAC5B,UAAU,EAAE,QAAQ,CAAC,EAAE;YACvB,QAAQ,EAAE,QAAQ,CAAC,IAAI;YACvB,IAAI,EAAE,QAAQ,CAAC,SAAS;SACxB,CAAC,CAAC;QAEH,IAAI,MAA4B,CAAC;QACjC,IAAI,OAAO,GAAG,KAAK,CAAC;QAEpB,IAAI,CAAC;YACJ,IAAI,CAAC,IAAI;gBAAE,MAAM,IAAI,KAAK,CAAC,QAAQ,QAAQ,CAAC,IAAI,YAAY,CAAC,CAAC;YAE9D,MAAM,aAAa,GAAG,qBAAqB,CAAC,IAAI,EAAE,QAAQ,CAAC,CAAC;YAE5D,MAAM,GAAG,MAAM,IAAI,CAAC,OAAO,CAAC,QAAQ,CAAC,EAAE,EAAE,aAAa,EAAE,MAAM,EAAE,CAAC,aAAa,EAAE,EAAE,CAAC;gBAClF,MAAM,CAAC,IAAI,CAAC;oBACX,IAAI,EAAE,uBAAuB;oBAC7B,UAAU,EAAE,QAAQ,CAAC,EAAE;oBACvB,QAAQ,EAAE,QAAQ,CAAC,IAAI;oBACvB,IAAI,EAAE,QAAQ,CAAC,SAAS;oBACxB,aAAa;iBACb,CAAC,CAAC;YAAA,CACH,CAAC,CAAC;QACJ,CAAC;QAAC,OAAO,CAAC,EAAE,CAAC;YACZ,MAAM,GAAG;gBACR,OAAO,EAAE,CAAC,EAAE,IAAI,EAAE,MAAM,EAAE,IAAI,EAAE,CAAC,YAAY,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,OAAO,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,EAAE,CAAC;gBAC7E,OAAO,EAAE,EAAE;aACX,CAAC;YACF,OAAO,GAAG,IAAI,CAAC;QAChB,CAAC;QAED,MAAM,CAAC,IAAI,CAAC;YACX,IAAI,EAAE,oBAAoB;YAC1B,UAAU,EAAE,QAAQ,CAAC,EAAE;YACvB,QAAQ,EAAE,QAAQ,CAAC,IAAI;YACvB,MAAM;YACN,OAAO;SACP,CAAC,CAAC;QAEH,MAAM,iBAAiB,GAAsB;YAC5C,IAAI,EAAE,YAAY;YAClB,UAAU,EAAE,QAAQ,CAAC,EAAE;YACvB,QAAQ,EAAE,QAAQ,CAAC,IAAI;YACvB,OAAO,EAAE,MAAM,CAAC,OAAO;YACvB,OAAO,EAAE,MAAM,CAAC,OAAO;YACvB,OAAO;YACP,SAAS,EAAE,IAAI,CAAC,GAAG,EAAE;SACrB,CAAC;QAEF,OAAO,CAAC,IAAI,CAAC,iBAAiB,CAAC,CAAC;QAChC,MAAM,CAAC,IAAI,CAAC,EAAE,IAAI,EAAE,eAAe,EAAE,OAAO,EAAE,iBAAiB,EAAE,CAAC,CAAC;QACnE,MAAM,CAAC,IAAI,CAAC,EAAE,IAAI,EAAE,aAAa,EAAE,OAAO,EAAE,iBAAiB,EAAE,CAAC,CAAC;QAEjE,uEAAuE;QACvE,IAAI,iBAAiB,EAAE,CAAC;YACvB,MAAM,MAAM,GAAG,MAAM,iBAAiB,EAAE,CAAC;YACzC,IAAI,MAAM,CAAC,MAAM,GAAG,CAAC,EAAE,CAAC;gBACvB,cAAc,GAAG,MAAM,CAAC;gBACxB,MAAM,cAAc,GAAG,SAAS,CAAC,KAAK,CAAC,KAAK,GAAG,CAAC,CAAC,CAAC;gBAClD,KAAK,MAAM,OAAO,IAAI,cAAc,EAAE,CAAC;oBACtC,OAAO,CAAC,IAAI,CAAC,YAAY,CAAC,OAAO,EAAE,MAAM,CAAC,CAAC,CAAC;gBAC7C,CAAC;gBACD,MAAM;YACP,CAAC;QACF,CAAC;IACF,CAAC;IAED,OAAO,EAAE,WAAW,EAAE,OAAO,EAAE,cAAc,EAAE,CAAC;AAAA,CAChD;AAED,SAAS,YAAY,CACpB,QAA4E,EAC5E,MAA+C,EAC3B;IACpB,MAAM,MAAM,GAAyB;QACpC,OAAO,EAAE,CAAC,EAAE,IAAI,EAAE,MAAM,EAAE,IAAI,EAAE,qCAAqC,EAAE,CAAC;QACxE,OAAO,EAAE,EAAE;KACX,CAAC;IAEF,MAAM,CAAC,IAAI,CAAC;QACX,IAAI,EAAE,sBAAsB;QAC5B,UAAU,EAAE,QAAQ,CAAC,EAAE;QACvB,QAAQ,EAAE,QAAQ,CAAC,IAAI;QACvB,IAAI,EAAE,QAAQ,CAAC,SAAS;KACxB,CAAC,CAAC;IACH,MAAM,CAAC,IAAI,CAAC;QACX,IAAI,EAAE,oBAAoB;QAC1B,UAAU,EAAE,QAAQ,CAAC,EAAE;QACvB,QAAQ,EAAE,QAAQ,CAAC,IAAI;QACvB,MAAM;QACN,OAAO,EAAE,IAAI;KACb,CAAC,CAAC;IAEH,MAAM,iBAAiB,GAAsB;QAC5C,IAAI,EAAE,YAAY;QAClB,UAAU,EAAE,QAAQ,CAAC,EAAE;QACvB,QAAQ,EAAE,QAAQ,CAAC,IAAI;QACvB,OAAO,EAAE,MAAM,CAAC,OAAO;QACvB,OAAO,EAAE,EAAE;QACX,OAAO,EAAE,IAAI;QACb,SAAS,EAAE,IAAI,CAAC,GAAG,EAAE;KACrB,CAAC;IAEF,MAAM,CAAC,IAAI,CAAC,EAAE,IAAI,EAAE,eAAe,EAAE,OAAO,EAAE,iBAAiB,EAAE,CAAC,CAAC;IACnE,MAAM,CAAC,IAAI,CAAC,EAAE,IAAI,EAAE,aAAa,EAAE,OAAO,EAAE,iBAAiB,EAAE,CAAC,CAAC;IAEjE,OAAO,iBAAiB,CAAC;AAAA,CACzB","sourcesContent":["/**\n * Agent loop that works with AgentMessage throughout.\n * Transforms to Message[] only at the LLM call boundary.\n */\n\nimport {\n\ttype AssistantMessage,\n\ttype Context,\n\tEventStream,\n\tstreamSimple,\n\ttype ToolResultMessage,\n\tvalidateToolArguments,\n} from \"@mariozechner/pi-ai\";\nimport type {\n\tAgentContext,\n\tAgentEvent,\n\tAgentLoopConfig,\n\tAgentMessage,\n\tAgentTool,\n\tAgentToolResult,\n\tStreamFn,\n} from \"./types.js\";\n\n/**\n * Start an agent loop with a new prompt message.\n * The prompt is added to the context and events are emitted for it.\n */\nexport function agentLoop(\n\tprompts: AgentMessage[],\n\tcontext: AgentContext,\n\tconfig: AgentLoopConfig,\n\tsignal?: AbortSignal,\n\tstreamFn?: StreamFn,\n): EventStream<AgentEvent, AgentMessage[]> {\n\tconst stream = createAgentStream();\n\n\t(async () => {\n\t\tconst newMessages: AgentMessage[] = [...prompts];\n\t\tconst currentContext: AgentContext = {\n\t\t\t...context,\n\t\t\tmessages: [...context.messages, ...prompts],\n\t\t};\n\n\t\tstream.push({ type: \"agent_start\" });\n\t\tstream.push({ type: \"turn_start\" });\n\t\tfor (const prompt of prompts) {\n\t\t\tstream.push({ type: \"message_start\", message: prompt });\n\t\t\tstream.push({ type: \"message_end\", message: prompt });\n\t\t}\n\n\t\tawait runLoop(currentContext, newMessages, config, signal, stream, streamFn);\n\t})();\n\n\treturn stream;\n}\n\n/**\n * Continue an agent loop from the current context without adding a new message.\n * Used for retries - context already has user message or tool results.\n *\n * **Important:** The last message in context must convert to a `user` or `toolResult` message\n * via `convertToLlm`. If it doesn't, the LLM provider will reject the request.\n * This cannot be validated here since `convertToLlm` is only called once per turn.\n */\nexport function agentLoopContinue(\n\tcontext: AgentContext,\n\tconfig: AgentLoopConfig,\n\tsignal?: AbortSignal,\n\tstreamFn?: StreamFn,\n): EventStream<AgentEvent, AgentMessage[]> {\n\tif (context.messages.length === 0) {\n\t\tthrow new Error(\"Cannot continue: no messages in context\");\n\t}\n\n\tif (context.messages[context.messages.length - 1].role === \"assistant\") {\n\t\tthrow new Error(\"Cannot continue from message role: assistant\");\n\t}\n\n\tconst stream = createAgentStream();\n\n\t(async () => {\n\t\tconst newMessages: AgentMessage[] = [];\n\t\tconst currentContext: AgentContext = { ...context };\n\n\t\tstream.push({ type: \"agent_start\" });\n\t\tstream.push({ type: \"turn_start\" });\n\n\t\tawait runLoop(currentContext, newMessages, config, signal, stream, streamFn);\n\t})();\n\n\treturn stream;\n}\n\nfunction createAgentStream(): EventStream<AgentEvent, AgentMessage[]> {\n\treturn new EventStream<AgentEvent, AgentMessage[]>(\n\t\t(event: AgentEvent) => event.type === \"agent_end\",\n\t\t(event: AgentEvent) => (event.type === \"agent_end\" ? event.messages : []),\n\t);\n}\n\n/**\n * Main loop logic shared by agentLoop and agentLoopContinue.\n */\nasync function runLoop(\n\tcurrentContext: AgentContext,\n\tnewMessages: AgentMessage[],\n\tconfig: AgentLoopConfig,\n\tsignal: AbortSignal | undefined,\n\tstream: EventStream<AgentEvent, AgentMessage[]>,\n\tstreamFn?: StreamFn,\n): Promise<void> {\n\tlet hasMoreToolCalls = true;\n\tlet firstTurn = true;\n\tlet queuedMessages: AgentMessage[] = (await config.getQueuedMessages?.()) || [];\n\tlet queuedAfterTools: AgentMessage[] | null = null;\n\n\twhile (hasMoreToolCalls || queuedMessages.length > 0) {\n\t\tif (!firstTurn) {\n\t\t\tstream.push({ type: \"turn_start\" });\n\t\t} else {\n\t\t\tfirstTurn = false;\n\t\t}\n\n\t\t// Process queued messages (inject before next assistant response)\n\t\tif (queuedMessages.length > 0) {\n\t\t\tfor (const message of queuedMessages) {\n\t\t\t\tstream.push({ type: \"message_start\", message });\n\t\t\t\tstream.push({ type: \"message_end\", message });\n\t\t\t\tcurrentContext.messages.push(message);\n\t\t\t\tnewMessages.push(message);\n\t\t\t}\n\t\t\tqueuedMessages = [];\n\t\t}\n\n\t\t// Stream assistant response\n\t\tconst message = await streamAssistantResponse(currentContext, config, signal, stream, streamFn);\n\t\tnewMessages.push(message);\n\n\t\tif (message.stopReason === \"error\" || message.stopReason === \"aborted\") {\n\t\t\tstream.push({ type: \"turn_end\", message, toolResults: [] });\n\t\t\tstream.push({ type: \"agent_end\", messages: newMessages });\n\t\t\tstream.end(newMessages);\n\t\t\treturn;\n\t\t}\n\n\t\t// Check for tool calls\n\t\tconst toolCalls = message.content.filter((c) => c.type === \"toolCall\");\n\t\thasMoreToolCalls = toolCalls.length > 0;\n\n\t\tconst toolResults: ToolResultMessage[] = [];\n\t\tif (hasMoreToolCalls) {\n\t\t\tconst toolExecution = await executeToolCalls(\n\t\t\t\tcurrentContext.tools,\n\t\t\t\tmessage,\n\t\t\t\tsignal,\n\t\t\t\tstream,\n\t\t\t\tconfig.getQueuedMessages,\n\t\t\t);\n\t\t\ttoolResults.push(...toolExecution.toolResults);\n\t\t\tqueuedAfterTools = toolExecution.queuedMessages ?? null;\n\n\t\t\tfor (const result of toolResults) {\n\t\t\t\tcurrentContext.messages.push(result);\n\t\t\t\tnewMessages.push(result);\n\t\t\t}\n\t\t}\n\n\t\tstream.push({ type: \"turn_end\", message, toolResults });\n\n\t\t// Get queued messages after turn completes\n\t\tif (queuedAfterTools && queuedAfterTools.length > 0) {\n\t\t\tqueuedMessages = queuedAfterTools;\n\t\t\tqueuedAfterTools = null;\n\t\t} else {\n\t\t\tqueuedMessages = (await config.getQueuedMessages?.()) || [];\n\t\t}\n\t}\n\n\tstream.push({ type: \"agent_end\", messages: newMessages });\n\tstream.end(newMessages);\n}\n\n/**\n * Stream an assistant response from the LLM.\n * This is where AgentMessage[] gets transformed to Message[] for the LLM.\n */\nasync function streamAssistantResponse(\n\tcontext: AgentContext,\n\tconfig: AgentLoopConfig,\n\tsignal: AbortSignal | undefined,\n\tstream: EventStream<AgentEvent, AgentMessage[]>,\n\tstreamFn?: StreamFn,\n): Promise<AssistantMessage> {\n\t// Apply context transform if configured (AgentMessage[] → AgentMessage[])\n\tlet messages = context.messages;\n\tif (config.transformContext) {\n\t\tmessages = await config.transformContext(messages, signal);\n\t}\n\n\t// Convert to LLM-compatible messages (AgentMessage[] → Message[])\n\tconst llmMessages = await config.convertToLlm(messages);\n\n\t// Build LLM context\n\tconst llmContext: Context = {\n\t\tsystemPrompt: context.systemPrompt,\n\t\tmessages: llmMessages,\n\t\ttools: context.tools,\n\t};\n\n\tconst streamFunction = streamFn || streamSimple;\n\n\t// Resolve API key (important for expiring tokens)\n\tconst resolvedApiKey =\n\t\t(config.getApiKey ? await config.getApiKey(config.model.provider) : undefined) || config.apiKey;\n\n\tconst response = await streamFunction(config.model, llmContext, {\n\t\t...config,\n\t\tapiKey: resolvedApiKey,\n\t\tsignal,\n\t});\n\n\tlet partialMessage: AssistantMessage | null = null;\n\tlet addedPartial = false;\n\n\tfor await (const event of response) {\n\t\tswitch (event.type) {\n\t\t\tcase \"start\":\n\t\t\t\tpartialMessage = event.partial;\n\t\t\t\tcontext.messages.push(partialMessage);\n\t\t\t\taddedPartial = true;\n\t\t\t\tstream.push({ type: \"message_start\", message: { ...partialMessage } });\n\t\t\t\tbreak;\n\n\t\t\tcase \"text_start\":\n\t\t\tcase \"text_delta\":\n\t\t\tcase \"text_end\":\n\t\t\tcase \"thinking_start\":\n\t\t\tcase \"thinking_delta\":\n\t\t\tcase \"thinking_end\":\n\t\t\tcase \"toolcall_start\":\n\t\t\tcase \"toolcall_delta\":\n\t\t\tcase \"toolcall_end\":\n\t\t\t\tif (partialMessage) {\n\t\t\t\t\tpartialMessage = event.partial;\n\t\t\t\t\tcontext.messages[context.messages.length - 1] = partialMessage;\n\t\t\t\t\tstream.push({\n\t\t\t\t\t\ttype: \"message_update\",\n\t\t\t\t\t\tassistantMessageEvent: event,\n\t\t\t\t\t\tmessage: { ...partialMessage },\n\t\t\t\t\t});\n\t\t\t\t}\n\t\t\t\tbreak;\n\n\t\t\tcase \"done\":\n\t\t\tcase \"error\": {\n\t\t\t\tconst finalMessage = await response.result();\n\t\t\t\tif (addedPartial) {\n\t\t\t\t\tcontext.messages[context.messages.length - 1] = finalMessage;\n\t\t\t\t} else {\n\t\t\t\t\tcontext.messages.push(finalMessage);\n\t\t\t\t}\n\t\t\t\tif (!addedPartial) {\n\t\t\t\t\tstream.push({ type: \"message_start\", message: { ...finalMessage } });\n\t\t\t\t}\n\t\t\t\tstream.push({ type: \"message_end\", message: finalMessage });\n\t\t\t\treturn finalMessage;\n\t\t\t}\n\t\t}\n\t}\n\n\treturn await response.result();\n}\n\n/**\n * Execute tool calls from an assistant message.\n */\nasync function executeToolCalls(\n\ttools: AgentTool<any>[] | undefined,\n\tassistantMessage: AssistantMessage,\n\tsignal: AbortSignal | undefined,\n\tstream: EventStream<AgentEvent, AgentMessage[]>,\n\tgetQueuedMessages?: AgentLoopConfig[\"getQueuedMessages\"],\n): Promise<{ toolResults: ToolResultMessage[]; queuedMessages?: AgentMessage[] }> {\n\tconst toolCalls = assistantMessage.content.filter((c) => c.type === \"toolCall\");\n\tconst results: ToolResultMessage[] = [];\n\tlet queuedMessages: AgentMessage[] | undefined;\n\n\tfor (let index = 0; index < toolCalls.length; index++) {\n\t\tconst toolCall = toolCalls[index];\n\t\tconst tool = tools?.find((t) => t.name === toolCall.name);\n\n\t\tstream.push({\n\t\t\ttype: \"tool_execution_start\",\n\t\t\ttoolCallId: toolCall.id,\n\t\t\ttoolName: toolCall.name,\n\t\t\targs: toolCall.arguments,\n\t\t});\n\n\t\tlet result: AgentToolResult<any>;\n\t\tlet isError = false;\n\n\t\ttry {\n\t\t\tif (!tool) throw new Error(`Tool ${toolCall.name} not found`);\n\n\t\t\tconst validatedArgs = validateToolArguments(tool, toolCall);\n\n\t\t\tresult = await tool.execute(toolCall.id, validatedArgs, signal, (partialResult) => {\n\t\t\t\tstream.push({\n\t\t\t\t\ttype: \"tool_execution_update\",\n\t\t\t\t\ttoolCallId: toolCall.id,\n\t\t\t\t\ttoolName: toolCall.name,\n\t\t\t\t\targs: toolCall.arguments,\n\t\t\t\t\tpartialResult,\n\t\t\t\t});\n\t\t\t});\n\t\t} catch (e) {\n\t\t\tresult = {\n\t\t\t\tcontent: [{ type: \"text\", text: e instanceof Error ? e.message : String(e) }],\n\t\t\t\tdetails: {},\n\t\t\t};\n\t\t\tisError = true;\n\t\t}\n\n\t\tstream.push({\n\t\t\ttype: \"tool_execution_end\",\n\t\t\ttoolCallId: toolCall.id,\n\t\t\ttoolName: toolCall.name,\n\t\t\tresult,\n\t\t\tisError,\n\t\t});\n\n\t\tconst toolResultMessage: ToolResultMessage = {\n\t\t\trole: \"toolResult\",\n\t\t\ttoolCallId: toolCall.id,\n\t\t\ttoolName: toolCall.name,\n\t\t\tcontent: result.content,\n\t\t\tdetails: result.details,\n\t\t\tisError,\n\t\t\ttimestamp: Date.now(),\n\t\t};\n\n\t\tresults.push(toolResultMessage);\n\t\tstream.push({ type: \"message_start\", message: toolResultMessage });\n\t\tstream.push({ type: \"message_end\", message: toolResultMessage });\n\n\t\t// Check for queued messages - skip remaining tools if user interrupted\n\t\tif (getQueuedMessages) {\n\t\t\tconst queued = await getQueuedMessages();\n\t\t\tif (queued.length > 0) {\n\t\t\t\tqueuedMessages = queued;\n\t\t\t\tconst remainingCalls = toolCalls.slice(index + 1);\n\t\t\t\tfor (const skipped of remainingCalls) {\n\t\t\t\t\tresults.push(skipToolCall(skipped, stream));\n\t\t\t\t}\n\t\t\t\tbreak;\n\t\t\t}\n\t\t}\n\t}\n\n\treturn { toolResults: results, queuedMessages };\n}\n\nfunction skipToolCall(\n\ttoolCall: Extract<AssistantMessage[\"content\"][number], { type: \"toolCall\" }>,\n\tstream: EventStream<AgentEvent, AgentMessage[]>,\n): ToolResultMessage {\n\tconst result: AgentToolResult<any> = {\n\t\tcontent: [{ type: \"text\", text: \"Skipped due to queued user message.\" }],\n\t\tdetails: {},\n\t};\n\n\tstream.push({\n\t\ttype: \"tool_execution_start\",\n\t\ttoolCallId: toolCall.id,\n\t\ttoolName: toolCall.name,\n\t\targs: toolCall.arguments,\n\t});\n\tstream.push({\n\t\ttype: \"tool_execution_end\",\n\t\ttoolCallId: toolCall.id,\n\t\ttoolName: toolCall.name,\n\t\tresult,\n\t\tisError: true,\n\t});\n\n\tconst toolResultMessage: ToolResultMessage = {\n\t\trole: \"toolResult\",\n\t\ttoolCallId: toolCall.id,\n\t\ttoolName: toolCall.name,\n\t\tcontent: result.content,\n\t\tdetails: {},\n\t\tisError: true,\n\t\ttimestamp: Date.now(),\n\t};\n\n\tstream.push({ type: \"message_start\", message: toolResultMessage });\n\tstream.push({ type: \"message_end\", message: toolResultMessage });\n\n\treturn toolResultMessage;\n}\n"]}
1
+ {"version":3,"file":"agent-loop.js","sourceRoot":"","sources":["../src/agent-loop.ts"],"names":[],"mappings":"AAAA;;;GAGG;AAEH,OAAO,EAGN,WAAW,EACX,YAAY,EAEZ,qBAAqB,GACrB,MAAM,qBAAqB,CAAC;AAW7B;;;GAGG;AACH,MAAM,UAAU,SAAS,CACxB,OAAuB,EACvB,OAAqB,EACrB,MAAuB,EACvB,MAAoB,EACpB,QAAmB,EACuB;IAC1C,MAAM,MAAM,GAAG,iBAAiB,EAAE,CAAC;IAEnC,CAAC,KAAK,IAAI,EAAE,CAAC;QACZ,MAAM,WAAW,GAAmB,CAAC,GAAG,OAAO,CAAC,CAAC;QACjD,MAAM,cAAc,GAAiB;YACpC,GAAG,OAAO;YACV,QAAQ,EAAE,CAAC,GAAG,OAAO,CAAC,QAAQ,EAAE,GAAG,OAAO,CAAC;SAC3C,CAAC;QAEF,MAAM,CAAC,IAAI,CAAC,EAAE,IAAI,EAAE,aAAa,EAAE,CAAC,CAAC;QACrC,MAAM,CAAC,IAAI,CAAC,EAAE,IAAI,EAAE,YAAY,EAAE,CAAC,CAAC;QACpC,KAAK,MAAM,MAAM,IAAI,OAAO,EAAE,CAAC;YAC9B,MAAM,CAAC,IAAI,CAAC,EAAE,IAAI,EAAE,eAAe,EAAE,OAAO,EAAE,MAAM,EAAE,CAAC,CAAC;YACxD,MAAM,CAAC,IAAI,CAAC,EAAE,IAAI,EAAE,aAAa,EAAE,OAAO,EAAE,MAAM,EAAE,CAAC,CAAC;QACvD,CAAC;QAED,MAAM,OAAO,CAAC,cAAc,EAAE,WAAW,EAAE,MAAM,EAAE,MAAM,EAAE,MAAM,EAAE,QAAQ,CAAC,CAAC;IAAA,CAC7E,CAAC,EAAE,CAAC;IAEL,OAAO,MAAM,CAAC;AAAA,CACd;AAED;;;;;;;GAOG;AACH,MAAM,UAAU,iBAAiB,CAChC,OAAqB,EACrB,MAAuB,EACvB,MAAoB,EACpB,QAAmB,EACuB;IAC1C,IAAI,OAAO,CAAC,QAAQ,CAAC,MAAM,KAAK,CAAC,EAAE,CAAC;QACnC,MAAM,IAAI,KAAK,CAAC,yCAAyC,CAAC,CAAC;IAC5D,CAAC;IAED,IAAI,OAAO,CAAC,QAAQ,CAAC,OAAO,CAAC,QAAQ,CAAC,MAAM,GAAG,CAAC,CAAC,CAAC,IAAI,KAAK,WAAW,EAAE,CAAC;QACxE,MAAM,IAAI,KAAK,CAAC,8CAA8C,CAAC,CAAC;IACjE,CAAC;IAED,MAAM,MAAM,GAAG,iBAAiB,EAAE,CAAC;IAEnC,CAAC,KAAK,IAAI,EAAE,CAAC;QACZ,MAAM,WAAW,GAAmB,EAAE,CAAC;QACvC,MAAM,cAAc,GAAiB,EAAE,GAAG,OAAO,EAAE,CAAC;QAEpD,MAAM,CAAC,IAAI,CAAC,EAAE,IAAI,EAAE,aAAa,EAAE,CAAC,CAAC;QACrC,MAAM,CAAC,IAAI,CAAC,EAAE,IAAI,EAAE,YAAY,EAAE,CAAC,CAAC;QAEpC,MAAM,OAAO,CAAC,cAAc,EAAE,WAAW,EAAE,MAAM,EAAE,MAAM,EAAE,MAAM,EAAE,QAAQ,CAAC,CAAC;IAAA,CAC7E,CAAC,EAAE,CAAC;IAEL,OAAO,MAAM,CAAC;AAAA,CACd;AAED,SAAS,iBAAiB,GAA4C;IACrE,OAAO,IAAI,WAAW,CACrB,CAAC,KAAiB,EAAE,EAAE,CAAC,KAAK,CAAC,IAAI,KAAK,WAAW,EACjD,CAAC,KAAiB,EAAE,EAAE,CAAC,CAAC,KAAK,CAAC,IAAI,KAAK,WAAW,CAAC,CAAC,CAAC,KAAK,CAAC,QAAQ,CAAC,CAAC,CAAC,EAAE,CAAC,CACzE,CAAC;AAAA,CACF;AAED;;GAEG;AACH,KAAK,UAAU,OAAO,CACrB,cAA4B,EAC5B,WAA2B,EAC3B,MAAuB,EACvB,MAA+B,EAC/B,MAA+C,EAC/C,QAAmB,EACH;IAChB,IAAI,SAAS,GAAG,IAAI,CAAC;IACrB,2EAA2E;IAC3E,IAAI,eAAe,GAAmB,CAAC,MAAM,MAAM,CAAC,mBAAmB,EAAE,EAAE,CAAC,IAAI,EAAE,CAAC;IAEnF,qFAAqF;IACrF,OAAO,IAAI,EAAE,CAAC;QACb,IAAI,gBAAgB,GAAG,IAAI,CAAC;QAC5B,IAAI,kBAAkB,GAA0B,IAAI,CAAC;QAErD,uDAAuD;QACvD,OAAO,gBAAgB,IAAI,eAAe,CAAC,MAAM,GAAG,CAAC,EAAE,CAAC;YACvD,IAAI,CAAC,SAAS,EAAE,CAAC;gBAChB,MAAM,CAAC,IAAI,CAAC,EAAE,IAAI,EAAE,YAAY,EAAE,CAAC,CAAC;YACrC,CAAC;iBAAM,CAAC;gBACP,SAAS,GAAG,KAAK,CAAC;YACnB,CAAC;YAED,mEAAmE;YACnE,IAAI,eAAe,CAAC,MAAM,GAAG,CAAC,EAAE,CAAC;gBAChC,KAAK,MAAM,OAAO,IAAI,eAAe,EAAE,CAAC;oBACvC,MAAM,CAAC,IAAI,CAAC,EAAE,IAAI,EAAE,eAAe,EAAE,OAAO,EAAE,CAAC,CAAC;oBAChD,MAAM,CAAC,IAAI,CAAC,EAAE,IAAI,EAAE,aAAa,EAAE,OAAO,EAAE,CAAC,CAAC;oBAC9C,cAAc,CAAC,QAAQ,CAAC,IAAI,CAAC,OAAO,CAAC,CAAC;oBACtC,WAAW,CAAC,IAAI,CAAC,OAAO,CAAC,CAAC;gBAC3B,CAAC;gBACD,eAAe,GAAG,EAAE,CAAC;YACtB,CAAC;YAED,4BAA4B;YAC5B,MAAM,OAAO,GAAG,MAAM,uBAAuB,CAAC,cAAc,EAAE,MAAM,EAAE,MAAM,EAAE,MAAM,EAAE,QAAQ,CAAC,CAAC;YAChG,WAAW,CAAC,IAAI,CAAC,OAAO,CAAC,CAAC;YAE1B,IAAI,OAAO,CAAC,UAAU,KAAK,OAAO,IAAI,OAAO,CAAC,UAAU,KAAK,SAAS,EAAE,CAAC;gBACxE,MAAM,CAAC,IAAI,CAAC,EAAE,IAAI,EAAE,UAAU,EAAE,OAAO,EAAE,WAAW,EAAE,EAAE,EAAE,CAAC,CAAC;gBAC5D,MAAM,CAAC,IAAI,CAAC,EAAE,IAAI,EAAE,WAAW,EAAE,QAAQ,EAAE,WAAW,EAAE,CAAC,CAAC;gBAC1D,MAAM,CAAC,GAAG,CAAC,WAAW,CAAC,CAAC;gBACxB,OAAO;YACR,CAAC;YAED,uBAAuB;YACvB,MAAM,SAAS,GAAG,OAAO,CAAC,OAAO,CAAC,MAAM,CAAC,CAAC,CAAC,EAAE,EAAE,CAAC,CAAC,CAAC,IAAI,KAAK,UAAU,CAAC,CAAC;YACvE,gBAAgB,GAAG,SAAS,CAAC,MAAM,GAAG,CAAC,CAAC;YAExC,MAAM,WAAW,GAAwB,EAAE,CAAC;YAC5C,IAAI,gBAAgB,EAAE,CAAC;gBACtB,MAAM,aAAa,GAAG,MAAM,gBAAgB,CAC3C,cAAc,CAAC,KAAK,EACpB,OAAO,EACP,MAAM,EACN,MAAM,EACN,MAAM,CAAC,mBAAmB,CAC1B,CAAC;gBACF,WAAW,CAAC,IAAI,CAAC,GAAG,aAAa,CAAC,WAAW,CAAC,CAAC;gBAC/C,kBAAkB,GAAG,aAAa,CAAC,gBAAgB,IAAI,IAAI,CAAC;gBAE5D,KAAK,MAAM,MAAM,IAAI,WAAW,EAAE,CAAC;oBAClC,cAAc,CAAC,QAAQ,CAAC,IAAI,CAAC,MAAM,CAAC,CAAC;oBACrC,WAAW,CAAC,IAAI,CAAC,MAAM,CAAC,CAAC;gBAC1B,CAAC;YACF,CAAC;YAED,MAAM,CAAC,IAAI,CAAC,EAAE,IAAI,EAAE,UAAU,EAAE,OAAO,EAAE,WAAW,EAAE,CAAC,CAAC;YAExD,6CAA6C;YAC7C,IAAI,kBAAkB,IAAI,kBAAkB,CAAC,MAAM,GAAG,CAAC,EAAE,CAAC;gBACzD,eAAe,GAAG,kBAAkB,CAAC;gBACrC,kBAAkB,GAAG,IAAI,CAAC;YAC3B,CAAC;iBAAM,CAAC;gBACP,eAAe,GAAG,CAAC,MAAM,MAAM,CAAC,mBAAmB,EAAE,EAAE,CAAC,IAAI,EAAE,CAAC;YAChE,CAAC;QACF,CAAC;QAED,uDAAuD;QACvD,MAAM,gBAAgB,GAAG,CAAC,MAAM,MAAM,CAAC,mBAAmB,EAAE,EAAE,CAAC,IAAI,EAAE,CAAC;QACtE,IAAI,gBAAgB,CAAC,MAAM,GAAG,CAAC,EAAE,CAAC;YACjC,8CAA8C;YAC9C,eAAe,GAAG,gBAAgB,CAAC;YACnC,SAAS;QACV,CAAC;QAED,yBAAyB;QACzB,MAAM;IACP,CAAC;IAED,MAAM,CAAC,IAAI,CAAC,EAAE,IAAI,EAAE,WAAW,EAAE,QAAQ,EAAE,WAAW,EAAE,CAAC,CAAC;IAC1D,MAAM,CAAC,GAAG,CAAC,WAAW,CAAC,CAAC;AAAA,CACxB;AAED;;;GAGG;AACH,KAAK,UAAU,uBAAuB,CACrC,OAAqB,EACrB,MAAuB,EACvB,MAA+B,EAC/B,MAA+C,EAC/C,QAAmB,EACS;IAC5B,4EAA0E;IAC1E,IAAI,QAAQ,GAAG,OAAO,CAAC,QAAQ,CAAC;IAChC,IAAI,MAAM,CAAC,gBAAgB,EAAE,CAAC;QAC7B,QAAQ,GAAG,MAAM,MAAM,CAAC,gBAAgB,CAAC,QAAQ,EAAE,MAAM,CAAC,CAAC;IAC5D,CAAC;IAED,oEAAkE;IAClE,MAAM,WAAW,GAAG,MAAM,MAAM,CAAC,YAAY,CAAC,QAAQ,CAAC,CAAC;IAExD,oBAAoB;IACpB,MAAM,UAAU,GAAY;QAC3B,YAAY,EAAE,OAAO,CAAC,YAAY;QAClC,QAAQ,EAAE,WAAW;QACrB,KAAK,EAAE,OAAO,CAAC,KAAK;KACpB,CAAC;IAEF,MAAM,cAAc,GAAG,QAAQ,IAAI,YAAY,CAAC;IAEhD,kDAAkD;IAClD,MAAM,cAAc,GACnB,CAAC,MAAM,CAAC,SAAS,CAAC,CAAC,CAAC,MAAM,MAAM,CAAC,SAAS,CAAC,MAAM,CAAC,KAAK,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,SAAS,CAAC,IAAI,MAAM,CAAC,MAAM,CAAC;IAEjG,MAAM,QAAQ,GAAG,MAAM,cAAc,CAAC,MAAM,CAAC,KAAK,EAAE,UAAU,EAAE;QAC/D,GAAG,MAAM;QACT,MAAM,EAAE,cAAc;QACtB,MAAM;KACN,CAAC,CAAC;IAEH,IAAI,cAAc,GAA4B,IAAI,CAAC;IACnD,IAAI,YAAY,GAAG,KAAK,CAAC;IAEzB,IAAI,KAAK,EAAE,MAAM,KAAK,IAAI,QAAQ,EAAE,CAAC;QACpC,QAAQ,KAAK,CAAC,IAAI,EAAE,CAAC;YACpB,KAAK,OAAO;gBACX,cAAc,GAAG,KAAK,CAAC,OAAO,CAAC;gBAC/B,OAAO,CAAC,QAAQ,CAAC,IAAI,CAAC,cAAc,CAAC,CAAC;gBACtC,YAAY,GAAG,IAAI,CAAC;gBACpB,MAAM,CAAC,IAAI,CAAC,EAAE,IAAI,EAAE,eAAe,EAAE,OAAO,EAAE,EAAE,GAAG,cAAc,EAAE,EAAE,CAAC,CAAC;gBACvE,MAAM;YAEP,KAAK,YAAY,CAAC;YAClB,KAAK,YAAY,CAAC;YAClB,KAAK,UAAU,CAAC;YAChB,KAAK,gBAAgB,CAAC;YACtB,KAAK,gBAAgB,CAAC;YACtB,KAAK,cAAc,CAAC;YACpB,KAAK,gBAAgB,CAAC;YACtB,KAAK,gBAAgB,CAAC;YACtB,KAAK,cAAc;gBAClB,IAAI,cAAc,EAAE,CAAC;oBACpB,cAAc,GAAG,KAAK,CAAC,OAAO,CAAC;oBAC/B,OAAO,CAAC,QAAQ,CAAC,OAAO,CAAC,QAAQ,CAAC,MAAM,GAAG,CAAC,CAAC,GAAG,cAAc,CAAC;oBAC/D,MAAM,CAAC,IAAI,CAAC;wBACX,IAAI,EAAE,gBAAgB;wBACtB,qBAAqB,EAAE,KAAK;wBAC5B,OAAO,EAAE,EAAE,GAAG,cAAc,EAAE;qBAC9B,CAAC,CAAC;gBACJ,CAAC;gBACD,MAAM;YAEP,KAAK,MAAM,CAAC;YACZ,KAAK,OAAO,EAAE,CAAC;gBACd,MAAM,YAAY,GAAG,MAAM,QAAQ,CAAC,MAAM,EAAE,CAAC;gBAC7C,IAAI,YAAY,EAAE,CAAC;oBAClB,OAAO,CAAC,QAAQ,CAAC,OAAO,CAAC,QAAQ,CAAC,MAAM,GAAG,CAAC,CAAC,GAAG,YAAY,CAAC;gBAC9D,CAAC;qBAAM,CAAC;oBACP,OAAO,CAAC,QAAQ,CAAC,IAAI,CAAC,YAAY,CAAC,CAAC;gBACrC,CAAC;gBACD,IAAI,CAAC,YAAY,EAAE,CAAC;oBACnB,MAAM,CAAC,IAAI,CAAC,EAAE,IAAI,EAAE,eAAe,EAAE,OAAO,EAAE,EAAE,GAAG,YAAY,EAAE,EAAE,CAAC,CAAC;gBACtE,CAAC;gBACD,MAAM,CAAC,IAAI,CAAC,EAAE,IAAI,EAAE,aAAa,EAAE,OAAO,EAAE,YAAY,EAAE,CAAC,CAAC;gBAC5D,OAAO,YAAY,CAAC;YACrB,CAAC;QACF,CAAC;IACF,CAAC;IAED,OAAO,MAAM,QAAQ,CAAC,MAAM,EAAE,CAAC;AAAA,CAC/B;AAED;;GAEG;AACH,KAAK,UAAU,gBAAgB,CAC9B,KAAmC,EACnC,gBAAkC,EAClC,MAA+B,EAC/B,MAA+C,EAC/C,mBAA4D,EACuB;IACnF,MAAM,SAAS,GAAG,gBAAgB,CAAC,OAAO,CAAC,MAAM,CAAC,CAAC,CAAC,EAAE,EAAE,CAAC,CAAC,CAAC,IAAI,KAAK,UAAU,CAAC,CAAC;IAChF,MAAM,OAAO,GAAwB,EAAE,CAAC;IACxC,IAAI,gBAA4C,CAAC;IAEjD,KAAK,IAAI,KAAK,GAAG,CAAC,EAAE,KAAK,GAAG,SAAS,CAAC,MAAM,EAAE,KAAK,EAAE,EAAE,CAAC;QACvD,MAAM,QAAQ,GAAG,SAAS,CAAC,KAAK,CAAC,CAAC;QAClC,MAAM,IAAI,GAAG,KAAK,EAAE,IAAI,CAAC,CAAC,CAAC,EAAE,EAAE,CAAC,CAAC,CAAC,IAAI,KAAK,QAAQ,CAAC,IAAI,CAAC,CAAC;QAE1D,MAAM,CAAC,IAAI,CAAC;YACX,IAAI,EAAE,sBAAsB;YAC5B,UAAU,EAAE,QAAQ,CAAC,EAAE;YACvB,QAAQ,EAAE,QAAQ,CAAC,IAAI;YACvB,IAAI,EAAE,QAAQ,CAAC,SAAS;SACxB,CAAC,CAAC;QAEH,IAAI,MAA4B,CAAC;QACjC,IAAI,OAAO,GAAG,KAAK,CAAC;QAEpB,IAAI,CAAC;YACJ,IAAI,CAAC,IAAI;gBAAE,MAAM,IAAI,KAAK,CAAC,QAAQ,QAAQ,CAAC,IAAI,YAAY,CAAC,CAAC;YAE9D,MAAM,aAAa,GAAG,qBAAqB,CAAC,IAAI,EAAE,QAAQ,CAAC,CAAC;YAE5D,MAAM,GAAG,MAAM,IAAI,CAAC,OAAO,CAAC,QAAQ,CAAC,EAAE,EAAE,aAAa,EAAE,MAAM,EAAE,CAAC,aAAa,EAAE,EAAE,CAAC;gBAClF,MAAM,CAAC,IAAI,CAAC;oBACX,IAAI,EAAE,uBAAuB;oBAC7B,UAAU,EAAE,QAAQ,CAAC,EAAE;oBACvB,QAAQ,EAAE,QAAQ,CAAC,IAAI;oBACvB,IAAI,EAAE,QAAQ,CAAC,SAAS;oBACxB,aAAa;iBACb,CAAC,CAAC;YAAA,CACH,CAAC,CAAC;QACJ,CAAC;QAAC,OAAO,CAAC,EAAE,CAAC;YACZ,MAAM,GAAG;gBACR,OAAO,EAAE,CAAC,EAAE,IAAI,EAAE,MAAM,EAAE,IAAI,EAAE,CAAC,YAAY,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,OAAO,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,EAAE,CAAC;gBAC7E,OAAO,EAAE,EAAE;aACX,CAAC;YACF,OAAO,GAAG,IAAI,CAAC;QAChB,CAAC;QAED,MAAM,CAAC,IAAI,CAAC;YACX,IAAI,EAAE,oBAAoB;YAC1B,UAAU,EAAE,QAAQ,CAAC,EAAE;YACvB,QAAQ,EAAE,QAAQ,CAAC,IAAI;YACvB,MAAM;YACN,OAAO;SACP,CAAC,CAAC;QAEH,MAAM,iBAAiB,GAAsB;YAC5C,IAAI,EAAE,YAAY;YAClB,UAAU,EAAE,QAAQ,CAAC,EAAE;YACvB,QAAQ,EAAE,QAAQ,CAAC,IAAI;YACvB,OAAO,EAAE,MAAM,CAAC,OAAO;YACvB,OAAO,EAAE,MAAM,CAAC,OAAO;YACvB,OAAO;YACP,SAAS,EAAE,IAAI,CAAC,GAAG,EAAE;SACrB,CAAC;QAEF,OAAO,CAAC,IAAI,CAAC,iBAAiB,CAAC,CAAC;QAChC,MAAM,CAAC,IAAI,CAAC,EAAE,IAAI,EAAE,eAAe,EAAE,OAAO,EAAE,iBAAiB,EAAE,CAAC,CAAC;QACnE,MAAM,CAAC,IAAI,CAAC,EAAE,IAAI,EAAE,aAAa,EAAE,OAAO,EAAE,iBAAiB,EAAE,CAAC,CAAC;QAEjE,yEAAyE;QACzE,IAAI,mBAAmB,EAAE,CAAC;YACzB,MAAM,QAAQ,GAAG,MAAM,mBAAmB,EAAE,CAAC;YAC7C,IAAI,QAAQ,CAAC,MAAM,GAAG,CAAC,EAAE,CAAC;gBACzB,gBAAgB,GAAG,QAAQ,CAAC;gBAC5B,MAAM,cAAc,GAAG,SAAS,CAAC,KAAK,CAAC,KAAK,GAAG,CAAC,CAAC,CAAC;gBAClD,KAAK,MAAM,OAAO,IAAI,cAAc,EAAE,CAAC;oBACtC,OAAO,CAAC,IAAI,CAAC,YAAY,CAAC,OAAO,EAAE,MAAM,CAAC,CAAC,CAAC;gBAC7C,CAAC;gBACD,MAAM;YACP,CAAC;QACF,CAAC;IACF,CAAC;IAED,OAAO,EAAE,WAAW,EAAE,OAAO,EAAE,gBAAgB,EAAE,CAAC;AAAA,CAClD;AAED,SAAS,YAAY,CACpB,QAA4E,EAC5E,MAA+C,EAC3B;IACpB,MAAM,MAAM,GAAyB;QACpC,OAAO,EAAE,CAAC,EAAE,IAAI,EAAE,MAAM,EAAE,IAAI,EAAE,qCAAqC,EAAE,CAAC;QACxE,OAAO,EAAE,EAAE;KACX,CAAC;IAEF,MAAM,CAAC,IAAI,CAAC;QACX,IAAI,EAAE,sBAAsB;QAC5B,UAAU,EAAE,QAAQ,CAAC,EAAE;QACvB,QAAQ,EAAE,QAAQ,CAAC,IAAI;QACvB,IAAI,EAAE,QAAQ,CAAC,SAAS;KACxB,CAAC,CAAC;IACH,MAAM,CAAC,IAAI,CAAC;QACX,IAAI,EAAE,oBAAoB;QAC1B,UAAU,EAAE,QAAQ,CAAC,EAAE;QACvB,QAAQ,EAAE,QAAQ,CAAC,IAAI;QACvB,MAAM;QACN,OAAO,EAAE,IAAI;KACb,CAAC,CAAC;IAEH,MAAM,iBAAiB,GAAsB;QAC5C,IAAI,EAAE,YAAY;QAClB,UAAU,EAAE,QAAQ,CAAC,EAAE;QACvB,QAAQ,EAAE,QAAQ,CAAC,IAAI;QACvB,OAAO,EAAE,MAAM,CAAC,OAAO;QACvB,OAAO,EAAE,EAAE;QACX,OAAO,EAAE,IAAI;QACb,SAAS,EAAE,IAAI,CAAC,GAAG,EAAE;KACrB,CAAC;IAEF,MAAM,CAAC,IAAI,CAAC,EAAE,IAAI,EAAE,eAAe,EAAE,OAAO,EAAE,iBAAiB,EAAE,CAAC,CAAC;IACnE,MAAM,CAAC,IAAI,CAAC,EAAE,IAAI,EAAE,aAAa,EAAE,OAAO,EAAE,iBAAiB,EAAE,CAAC,CAAC;IAEjE,OAAO,iBAAiB,CAAC;AAAA,CACzB","sourcesContent":["/**\n * Agent loop that works with AgentMessage throughout.\n * Transforms to Message[] only at the LLM call boundary.\n */\n\nimport {\n\ttype AssistantMessage,\n\ttype Context,\n\tEventStream,\n\tstreamSimple,\n\ttype ToolResultMessage,\n\tvalidateToolArguments,\n} from \"@mariozechner/pi-ai\";\nimport type {\n\tAgentContext,\n\tAgentEvent,\n\tAgentLoopConfig,\n\tAgentMessage,\n\tAgentTool,\n\tAgentToolResult,\n\tStreamFn,\n} from \"./types.js\";\n\n/**\n * Start an agent loop with a new prompt message.\n * The prompt is added to the context and events are emitted for it.\n */\nexport function agentLoop(\n\tprompts: AgentMessage[],\n\tcontext: AgentContext,\n\tconfig: AgentLoopConfig,\n\tsignal?: AbortSignal,\n\tstreamFn?: StreamFn,\n): EventStream<AgentEvent, AgentMessage[]> {\n\tconst stream = createAgentStream();\n\n\t(async () => {\n\t\tconst newMessages: AgentMessage[] = [...prompts];\n\t\tconst currentContext: AgentContext = {\n\t\t\t...context,\n\t\t\tmessages: [...context.messages, ...prompts],\n\t\t};\n\n\t\tstream.push({ type: \"agent_start\" });\n\t\tstream.push({ type: \"turn_start\" });\n\t\tfor (const prompt of prompts) {\n\t\t\tstream.push({ type: \"message_start\", message: prompt });\n\t\t\tstream.push({ type: \"message_end\", message: prompt });\n\t\t}\n\n\t\tawait runLoop(currentContext, newMessages, config, signal, stream, streamFn);\n\t})();\n\n\treturn stream;\n}\n\n/**\n * Continue an agent loop from the current context without adding a new message.\n * Used for retries - context already has user message or tool results.\n *\n * **Important:** The last message in context must convert to a `user` or `toolResult` message\n * via `convertToLlm`. If it doesn't, the LLM provider will reject the request.\n * This cannot be validated here since `convertToLlm` is only called once per turn.\n */\nexport function agentLoopContinue(\n\tcontext: AgentContext,\n\tconfig: AgentLoopConfig,\n\tsignal?: AbortSignal,\n\tstreamFn?: StreamFn,\n): EventStream<AgentEvent, AgentMessage[]> {\n\tif (context.messages.length === 0) {\n\t\tthrow new Error(\"Cannot continue: no messages in context\");\n\t}\n\n\tif (context.messages[context.messages.length - 1].role === \"assistant\") {\n\t\tthrow new Error(\"Cannot continue from message role: assistant\");\n\t}\n\n\tconst stream = createAgentStream();\n\n\t(async () => {\n\t\tconst newMessages: AgentMessage[] = [];\n\t\tconst currentContext: AgentContext = { ...context };\n\n\t\tstream.push({ type: \"agent_start\" });\n\t\tstream.push({ type: \"turn_start\" });\n\n\t\tawait runLoop(currentContext, newMessages, config, signal, stream, streamFn);\n\t})();\n\n\treturn stream;\n}\n\nfunction createAgentStream(): EventStream<AgentEvent, AgentMessage[]> {\n\treturn new EventStream<AgentEvent, AgentMessage[]>(\n\t\t(event: AgentEvent) => event.type === \"agent_end\",\n\t\t(event: AgentEvent) => (event.type === \"agent_end\" ? event.messages : []),\n\t);\n}\n\n/**\n * Main loop logic shared by agentLoop and agentLoopContinue.\n */\nasync function runLoop(\n\tcurrentContext: AgentContext,\n\tnewMessages: AgentMessage[],\n\tconfig: AgentLoopConfig,\n\tsignal: AbortSignal | undefined,\n\tstream: EventStream<AgentEvent, AgentMessage[]>,\n\tstreamFn?: StreamFn,\n): Promise<void> {\n\tlet firstTurn = true;\n\t// Check for steering messages at start (user may have typed while waiting)\n\tlet pendingMessages: AgentMessage[] = (await config.getSteeringMessages?.()) || [];\n\n\t// Outer loop: continues when queued follow-up messages arrive after agent would stop\n\twhile (true) {\n\t\tlet hasMoreToolCalls = true;\n\t\tlet steeringAfterTools: AgentMessage[] | null = null;\n\n\t\t// Inner loop: process tool calls and steering messages\n\t\twhile (hasMoreToolCalls || pendingMessages.length > 0) {\n\t\t\tif (!firstTurn) {\n\t\t\t\tstream.push({ type: \"turn_start\" });\n\t\t\t} else {\n\t\t\t\tfirstTurn = false;\n\t\t\t}\n\n\t\t\t// Process pending messages (inject before next assistant response)\n\t\t\tif (pendingMessages.length > 0) {\n\t\t\t\tfor (const message of pendingMessages) {\n\t\t\t\t\tstream.push({ type: \"message_start\", message });\n\t\t\t\t\tstream.push({ type: \"message_end\", message });\n\t\t\t\t\tcurrentContext.messages.push(message);\n\t\t\t\t\tnewMessages.push(message);\n\t\t\t\t}\n\t\t\t\tpendingMessages = [];\n\t\t\t}\n\n\t\t\t// Stream assistant response\n\t\t\tconst message = await streamAssistantResponse(currentContext, config, signal, stream, streamFn);\n\t\t\tnewMessages.push(message);\n\n\t\t\tif (message.stopReason === \"error\" || message.stopReason === \"aborted\") {\n\t\t\t\tstream.push({ type: \"turn_end\", message, toolResults: [] });\n\t\t\t\tstream.push({ type: \"agent_end\", messages: newMessages });\n\t\t\t\tstream.end(newMessages);\n\t\t\t\treturn;\n\t\t\t}\n\n\t\t\t// Check for tool calls\n\t\t\tconst toolCalls = message.content.filter((c) => c.type === \"toolCall\");\n\t\t\thasMoreToolCalls = toolCalls.length > 0;\n\n\t\t\tconst toolResults: ToolResultMessage[] = [];\n\t\t\tif (hasMoreToolCalls) {\n\t\t\t\tconst toolExecution = await executeToolCalls(\n\t\t\t\t\tcurrentContext.tools,\n\t\t\t\t\tmessage,\n\t\t\t\t\tsignal,\n\t\t\t\t\tstream,\n\t\t\t\t\tconfig.getSteeringMessages,\n\t\t\t\t);\n\t\t\t\ttoolResults.push(...toolExecution.toolResults);\n\t\t\t\tsteeringAfterTools = toolExecution.steeringMessages ?? null;\n\n\t\t\t\tfor (const result of toolResults) {\n\t\t\t\t\tcurrentContext.messages.push(result);\n\t\t\t\t\tnewMessages.push(result);\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tstream.push({ type: \"turn_end\", message, toolResults });\n\n\t\t\t// Get steering messages after turn completes\n\t\t\tif (steeringAfterTools && steeringAfterTools.length > 0) {\n\t\t\t\tpendingMessages = steeringAfterTools;\n\t\t\t\tsteeringAfterTools = null;\n\t\t\t} else {\n\t\t\t\tpendingMessages = (await config.getSteeringMessages?.()) || [];\n\t\t\t}\n\t\t}\n\n\t\t// Agent would stop here. Check for follow-up messages.\n\t\tconst followUpMessages = (await config.getFollowUpMessages?.()) || [];\n\t\tif (followUpMessages.length > 0) {\n\t\t\t// Set as pending so inner loop processes them\n\t\t\tpendingMessages = followUpMessages;\n\t\t\tcontinue;\n\t\t}\n\n\t\t// No more messages, exit\n\t\tbreak;\n\t}\n\n\tstream.push({ type: \"agent_end\", messages: newMessages });\n\tstream.end(newMessages);\n}\n\n/**\n * Stream an assistant response from the LLM.\n * This is where AgentMessage[] gets transformed to Message[] for the LLM.\n */\nasync function streamAssistantResponse(\n\tcontext: AgentContext,\n\tconfig: AgentLoopConfig,\n\tsignal: AbortSignal | undefined,\n\tstream: EventStream<AgentEvent, AgentMessage[]>,\n\tstreamFn?: StreamFn,\n): Promise<AssistantMessage> {\n\t// Apply context transform if configured (AgentMessage[] → AgentMessage[])\n\tlet messages = context.messages;\n\tif (config.transformContext) {\n\t\tmessages = await config.transformContext(messages, signal);\n\t}\n\n\t// Convert to LLM-compatible messages (AgentMessage[] → Message[])\n\tconst llmMessages = await config.convertToLlm(messages);\n\n\t// Build LLM context\n\tconst llmContext: Context = {\n\t\tsystemPrompt: context.systemPrompt,\n\t\tmessages: llmMessages,\n\t\ttools: context.tools,\n\t};\n\n\tconst streamFunction = streamFn || streamSimple;\n\n\t// Resolve API key (important for expiring tokens)\n\tconst resolvedApiKey =\n\t\t(config.getApiKey ? await config.getApiKey(config.model.provider) : undefined) || config.apiKey;\n\n\tconst response = await streamFunction(config.model, llmContext, {\n\t\t...config,\n\t\tapiKey: resolvedApiKey,\n\t\tsignal,\n\t});\n\n\tlet partialMessage: AssistantMessage | null = null;\n\tlet addedPartial = false;\n\n\tfor await (const event of response) {\n\t\tswitch (event.type) {\n\t\t\tcase \"start\":\n\t\t\t\tpartialMessage = event.partial;\n\t\t\t\tcontext.messages.push(partialMessage);\n\t\t\t\taddedPartial = true;\n\t\t\t\tstream.push({ type: \"message_start\", message: { ...partialMessage } });\n\t\t\t\tbreak;\n\n\t\t\tcase \"text_start\":\n\t\t\tcase \"text_delta\":\n\t\t\tcase \"text_end\":\n\t\t\tcase \"thinking_start\":\n\t\t\tcase \"thinking_delta\":\n\t\t\tcase \"thinking_end\":\n\t\t\tcase \"toolcall_start\":\n\t\t\tcase \"toolcall_delta\":\n\t\t\tcase \"toolcall_end\":\n\t\t\t\tif (partialMessage) {\n\t\t\t\t\tpartialMessage = event.partial;\n\t\t\t\t\tcontext.messages[context.messages.length - 1] = partialMessage;\n\t\t\t\t\tstream.push({\n\t\t\t\t\t\ttype: \"message_update\",\n\t\t\t\t\t\tassistantMessageEvent: event,\n\t\t\t\t\t\tmessage: { ...partialMessage },\n\t\t\t\t\t});\n\t\t\t\t}\n\t\t\t\tbreak;\n\n\t\t\tcase \"done\":\n\t\t\tcase \"error\": {\n\t\t\t\tconst finalMessage = await response.result();\n\t\t\t\tif (addedPartial) {\n\t\t\t\t\tcontext.messages[context.messages.length - 1] = finalMessage;\n\t\t\t\t} else {\n\t\t\t\t\tcontext.messages.push(finalMessage);\n\t\t\t\t}\n\t\t\t\tif (!addedPartial) {\n\t\t\t\t\tstream.push({ type: \"message_start\", message: { ...finalMessage } });\n\t\t\t\t}\n\t\t\t\tstream.push({ type: \"message_end\", message: finalMessage });\n\t\t\t\treturn finalMessage;\n\t\t\t}\n\t\t}\n\t}\n\n\treturn await response.result();\n}\n\n/**\n * Execute tool calls from an assistant message.\n */\nasync function executeToolCalls(\n\ttools: AgentTool<any>[] | undefined,\n\tassistantMessage: AssistantMessage,\n\tsignal: AbortSignal | undefined,\n\tstream: EventStream<AgentEvent, AgentMessage[]>,\n\tgetSteeringMessages?: AgentLoopConfig[\"getSteeringMessages\"],\n): Promise<{ toolResults: ToolResultMessage[]; steeringMessages?: AgentMessage[] }> {\n\tconst toolCalls = assistantMessage.content.filter((c) => c.type === \"toolCall\");\n\tconst results: ToolResultMessage[] = [];\n\tlet steeringMessages: AgentMessage[] | undefined;\n\n\tfor (let index = 0; index < toolCalls.length; index++) {\n\t\tconst toolCall = toolCalls[index];\n\t\tconst tool = tools?.find((t) => t.name === toolCall.name);\n\n\t\tstream.push({\n\t\t\ttype: \"tool_execution_start\",\n\t\t\ttoolCallId: toolCall.id,\n\t\t\ttoolName: toolCall.name,\n\t\t\targs: toolCall.arguments,\n\t\t});\n\n\t\tlet result: AgentToolResult<any>;\n\t\tlet isError = false;\n\n\t\ttry {\n\t\t\tif (!tool) throw new Error(`Tool ${toolCall.name} not found`);\n\n\t\t\tconst validatedArgs = validateToolArguments(tool, toolCall);\n\n\t\t\tresult = await tool.execute(toolCall.id, validatedArgs, signal, (partialResult) => {\n\t\t\t\tstream.push({\n\t\t\t\t\ttype: \"tool_execution_update\",\n\t\t\t\t\ttoolCallId: toolCall.id,\n\t\t\t\t\ttoolName: toolCall.name,\n\t\t\t\t\targs: toolCall.arguments,\n\t\t\t\t\tpartialResult,\n\t\t\t\t});\n\t\t\t});\n\t\t} catch (e) {\n\t\t\tresult = {\n\t\t\t\tcontent: [{ type: \"text\", text: e instanceof Error ? e.message : String(e) }],\n\t\t\t\tdetails: {},\n\t\t\t};\n\t\t\tisError = true;\n\t\t}\n\n\t\tstream.push({\n\t\t\ttype: \"tool_execution_end\",\n\t\t\ttoolCallId: toolCall.id,\n\t\t\ttoolName: toolCall.name,\n\t\t\tresult,\n\t\t\tisError,\n\t\t});\n\n\t\tconst toolResultMessage: ToolResultMessage = {\n\t\t\trole: \"toolResult\",\n\t\t\ttoolCallId: toolCall.id,\n\t\t\ttoolName: toolCall.name,\n\t\t\tcontent: result.content,\n\t\t\tdetails: result.details,\n\t\t\tisError,\n\t\t\ttimestamp: Date.now(),\n\t\t};\n\n\t\tresults.push(toolResultMessage);\n\t\tstream.push({ type: \"message_start\", message: toolResultMessage });\n\t\tstream.push({ type: \"message_end\", message: toolResultMessage });\n\n\t\t// Check for steering messages - skip remaining tools if user interrupted\n\t\tif (getSteeringMessages) {\n\t\t\tconst steering = await getSteeringMessages();\n\t\t\tif (steering.length > 0) {\n\t\t\t\tsteeringMessages = steering;\n\t\t\t\tconst remainingCalls = toolCalls.slice(index + 1);\n\t\t\t\tfor (const skipped of remainingCalls) {\n\t\t\t\t\tresults.push(skipToolCall(skipped, stream));\n\t\t\t\t}\n\t\t\t\tbreak;\n\t\t\t}\n\t\t}\n\t}\n\n\treturn { toolResults: results, steeringMessages };\n}\n\nfunction skipToolCall(\n\ttoolCall: Extract<AssistantMessage[\"content\"][number], { type: \"toolCall\" }>,\n\tstream: EventStream<AgentEvent, AgentMessage[]>,\n): ToolResultMessage {\n\tconst result: AgentToolResult<any> = {\n\t\tcontent: [{ type: \"text\", text: \"Skipped due to queued user message.\" }],\n\t\tdetails: {},\n\t};\n\n\tstream.push({\n\t\ttype: \"tool_execution_start\",\n\t\ttoolCallId: toolCall.id,\n\t\ttoolName: toolCall.name,\n\t\targs: toolCall.arguments,\n\t});\n\tstream.push({\n\t\ttype: \"tool_execution_end\",\n\t\ttoolCallId: toolCall.id,\n\t\ttoolName: toolCall.name,\n\t\tresult,\n\t\tisError: true,\n\t});\n\n\tconst toolResultMessage: ToolResultMessage = {\n\t\trole: \"toolResult\",\n\t\ttoolCallId: toolCall.id,\n\t\ttoolName: toolCall.name,\n\t\tcontent: result.content,\n\t\tdetails: {},\n\t\tisError: true,\n\t\ttimestamp: Date.now(),\n\t};\n\n\tstream.push({ type: \"message_start\", message: toolResultMessage });\n\tstream.push({ type: \"message_end\", message: toolResultMessage });\n\n\treturn toolResultMessage;\n}\n"]}
package/dist/agent.d.ts CHANGED
@@ -17,9 +17,13 @@ export interface AgentOptions {
17
17
  */
18
18
  transformContext?: (messages: AgentMessage[], signal?: AbortSignal) => Promise<AgentMessage[]>;
19
19
  /**
20
- * Queue mode: "all" = send all queued messages at once, "one-at-a-time" = one per turn
20
+ * Steering mode: "all" = send all steering messages at once, "one-at-a-time" = one per turn
21
21
  */
22
- queueMode?: "all" | "one-at-a-time";
22
+ steeringMode?: "all" | "one-at-a-time";
23
+ /**
24
+ * Follow-up mode: "all" = send all follow-up messages at once, "one-at-a-time" = one per turn
25
+ */
26
+ followUpMode?: "all" | "one-at-a-time";
23
27
  /**
24
28
  * Custom stream function (for proxy backends, etc.). Default uses streamSimple.
25
29
  */
@@ -36,8 +40,10 @@ export declare class Agent {
36
40
  private abortController?;
37
41
  private convertToLlm;
38
42
  private transformContext?;
39
- private messageQueue;
40
- private queueMode;
43
+ private steeringQueue;
44
+ private followUpQueue;
45
+ private steeringMode;
46
+ private followUpMode;
41
47
  streamFn: StreamFn;
42
48
  getApiKey?: (provider: string) => Promise<string | undefined> | string | undefined;
43
49
  private runningPrompt?;
@@ -48,13 +54,26 @@ export declare class Agent {
48
54
  setSystemPrompt(v: string): void;
49
55
  setModel(m: Model<any>): void;
50
56
  setThinkingLevel(l: ThinkingLevel): void;
51
- setQueueMode(mode: "all" | "one-at-a-time"): void;
52
- getQueueMode(): "all" | "one-at-a-time";
57
+ setSteeringMode(mode: "all" | "one-at-a-time"): void;
58
+ getSteeringMode(): "all" | "one-at-a-time";
59
+ setFollowUpMode(mode: "all" | "one-at-a-time"): void;
60
+ getFollowUpMode(): "all" | "one-at-a-time";
53
61
  setTools(t: AgentTool<any>[]): void;
54
62
  replaceMessages(ms: AgentMessage[]): void;
55
63
  appendMessage(m: AgentMessage): void;
56
- queueMessage(m: AgentMessage): void;
57
- clearMessageQueue(): void;
64
+ /**
65
+ * Queue a steering message to interrupt the agent mid-run.
66
+ * Delivered after current tool execution, skips remaining tools.
67
+ */
68
+ steer(m: AgentMessage): void;
69
+ /**
70
+ * Queue a follow-up message to be processed after the agent finishes.
71
+ * Delivered only when agent has no more tool calls or steering messages.
72
+ */
73
+ followUp(m: AgentMessage): void;
74
+ clearSteeringQueue(): void;
75
+ clearFollowUpQueue(): void;
76
+ clearAllQueues(): void;
58
77
  clearMessages(): void;
59
78
  abort(): void;
60
79
  waitForIdle(): Promise<void>;
@@ -1 +1 @@
1
- {"version":3,"file":"agent.d.ts","sourceRoot":"","sources":["../src/agent.ts"],"names":[],"mappings":"AAAA;;;GAGG;AAEH,OAAO,EAEN,KAAK,YAAY,EACjB,KAAK,OAAO,EACZ,KAAK,KAAK,EAIV,MAAM,qBAAqB,CAAC;AAE7B,OAAO,KAAK,EAEX,UAAU,EAEV,YAAY,EACZ,UAAU,EACV,SAAS,EACT,QAAQ,EACR,aAAa,EACb,MAAM,YAAY,CAAC;AASpB,MAAM,WAAW,YAAY;IAC5B,YAAY,CAAC,EAAE,OAAO,CAAC,UAAU,CAAC,CAAC;IAEnC;;;OAGG;IACH,YAAY,CAAC,EAAE,CAAC,QAAQ,EAAE,YAAY,EAAE,KAAK,OAAO,EAAE,GAAG,OAAO,CAAC,OAAO,EAAE,CAAC,CAAC;IAE5E;;;OAGG;IACH,gBAAgB,CAAC,EAAE,CAAC,QAAQ,EAAE,YAAY,EAAE,EAAE,MAAM,CAAC,EAAE,WAAW,KAAK,OAAO,CAAC,YAAY,EAAE,CAAC,CAAC;IAE/F;;OAEG;IACH,SAAS,CAAC,EAAE,KAAK,GAAG,eAAe,CAAC;IAEpC;;OAEG;IACH,QAAQ,CAAC,EAAE,QAAQ,CAAC;IAEpB;;;OAGG;IACH,SAAS,CAAC,EAAE,CAAC,QAAQ,EAAE,MAAM,KAAK,OAAO,CAAC,MAAM,GAAG,SAAS,CAAC,GAAG,MAAM,GAAG,SAAS,CAAC;CACnF;AAED,qBAAa,KAAK;IACjB,OAAO,CAAC,MAAM,CAUZ;IAEF,OAAO,CAAC,SAAS,CAAsC;IACvD,OAAO,CAAC,eAAe,CAAC,CAAkB;IAC1C,OAAO,CAAC,YAAY,CAA+D;IACnF,OAAO,CAAC,gBAAgB,CAAC,CAA8E;IACvG,OAAO,CAAC,YAAY,CAAsB;IAC1C,OAAO,CAAC,SAAS,CAA0B;IACpC,QAAQ,EAAE,QAAQ,CAAC;IACnB,SAAS,CAAC,EAAE,CAAC,QAAQ,EAAE,MAAM,KAAK,OAAO,CAAC,MAAM,GAAG,SAAS,CAAC,GAAG,MAAM,GAAG,SAAS,CAAC;IAC1F,OAAO,CAAC,aAAa,CAAC,CAAgB;IACtC,OAAO,CAAC,oBAAoB,CAAC,CAAa;IAE1C,YAAY,IAAI,GAAE,YAAiB,EAOlC;IAED,IAAI,KAAK,IAAI,UAAU,CAEtB;IAED,SAAS,CAAC,EAAE,EAAE,CAAC,CAAC,EAAE,UAAU,KAAK,IAAI,GAAG,MAAM,IAAI,CAGjD;IAGD,eAAe,CAAC,CAAC,EAAE,MAAM,QAExB;IAED,QAAQ,CAAC,CAAC,EAAE,KAAK,CAAC,GAAG,CAAC,QAErB;IAED,gBAAgB,CAAC,CAAC,EAAE,aAAa,QAEhC;IAED,YAAY,CAAC,IAAI,EAAE,KAAK,GAAG,eAAe,QAEzC;IAED,YAAY,IAAI,KAAK,GAAG,eAAe,CAEtC;IAED,QAAQ,CAAC,CAAC,EAAE,SAAS,CAAC,GAAG,CAAC,EAAE,QAE3B;IAED,eAAe,CAAC,EAAE,EAAE,YAAY,EAAE,QAEjC;IAED,aAAa,CAAC,CAAC,EAAE,YAAY,QAE5B;IAED,YAAY,CAAC,CAAC,EAAE,YAAY,QAE3B;IAED,iBAAiB,SAEhB;IAED,aAAa,SAEZ;IAED,KAAK,SAEJ;IAED,WAAW,IAAI,OAAO,CAAC,IAAI,CAAC,CAE3B;IAED,KAAK,SAOJ;IAED,yCAAyC;IACnC,MAAM,CAAC,OAAO,EAAE,YAAY,GAAG,YAAY,EAAE,GAAG,OAAO,CAAC,IAAI,CAAC,CAAC;IAC9D,MAAM,CAAC,KAAK,EAAE,MAAM,EAAE,MAAM,CAAC,EAAE,YAAY,EAAE,GAAG,OAAO,CAAC,IAAI,CAAC,CAAC;IA4BpE,+DAA+D;IACzD,QAAQ,kBAUb;YAOa,QAAQ;IA0JtB,OAAO,CAAC,IAAI;CAKZ","sourcesContent":["/**\n * Agent class that uses the agent-loop directly.\n * No transport abstraction - calls streamSimple via the loop.\n */\n\nimport {\n\tgetModel,\n\ttype ImageContent,\n\ttype Message,\n\ttype Model,\n\ttype ReasoningEffort,\n\tstreamSimple,\n\ttype TextContent,\n} from \"@mariozechner/pi-ai\";\nimport { agentLoop, agentLoopContinue } from \"./agent-loop.js\";\nimport type {\n\tAgentContext,\n\tAgentEvent,\n\tAgentLoopConfig,\n\tAgentMessage,\n\tAgentState,\n\tAgentTool,\n\tStreamFn,\n\tThinkingLevel,\n} from \"./types.js\";\n\n/**\n * Default convertToLlm: Keep only LLM-compatible messages, convert attachments.\n */\nfunction defaultConvertToLlm(messages: AgentMessage[]): Message[] {\n\treturn messages.filter((m) => m.role === \"user\" || m.role === \"assistant\" || m.role === \"toolResult\");\n}\n\nexport interface AgentOptions {\n\tinitialState?: Partial<AgentState>;\n\n\t/**\n\t * Converts AgentMessage[] to LLM-compatible Message[] before each LLM call.\n\t * Default filters to user/assistant/toolResult and converts attachments.\n\t */\n\tconvertToLlm?: (messages: AgentMessage[]) => Message[] | Promise<Message[]>;\n\n\t/**\n\t * Optional transform applied to context before convertToLlm.\n\t * Use for context pruning, injecting external context, etc.\n\t */\n\ttransformContext?: (messages: AgentMessage[], signal?: AbortSignal) => Promise<AgentMessage[]>;\n\n\t/**\n\t * Queue mode: \"all\" = send all queued messages at once, \"one-at-a-time\" = one per turn\n\t */\n\tqueueMode?: \"all\" | \"one-at-a-time\";\n\n\t/**\n\t * Custom stream function (for proxy backends, etc.). Default uses streamSimple.\n\t */\n\tstreamFn?: StreamFn;\n\n\t/**\n\t * Resolves an API key dynamically for each LLM call.\n\t * Useful for expiring tokens (e.g., GitHub Copilot OAuth).\n\t */\n\tgetApiKey?: (provider: string) => Promise<string | undefined> | string | undefined;\n}\n\nexport class Agent {\n\tprivate _state: AgentState = {\n\t\tsystemPrompt: \"\",\n\t\tmodel: getModel(\"google\", \"gemini-2.5-flash-lite-preview-06-17\"),\n\t\tthinkingLevel: \"off\",\n\t\ttools: [],\n\t\tmessages: [],\n\t\tisStreaming: false,\n\t\tstreamMessage: null,\n\t\tpendingToolCalls: new Set<string>(),\n\t\terror: undefined,\n\t};\n\n\tprivate listeners = new Set<(e: AgentEvent) => void>();\n\tprivate abortController?: AbortController;\n\tprivate convertToLlm: (messages: AgentMessage[]) => Message[] | Promise<Message[]>;\n\tprivate transformContext?: (messages: AgentMessage[], signal?: AbortSignal) => Promise<AgentMessage[]>;\n\tprivate messageQueue: AgentMessage[] = [];\n\tprivate queueMode: \"all\" | \"one-at-a-time\";\n\tpublic streamFn: StreamFn;\n\tpublic getApiKey?: (provider: string) => Promise<string | undefined> | string | undefined;\n\tprivate runningPrompt?: Promise<void>;\n\tprivate resolveRunningPrompt?: () => void;\n\n\tconstructor(opts: AgentOptions = {}) {\n\t\tthis._state = { ...this._state, ...opts.initialState };\n\t\tthis.convertToLlm = opts.convertToLlm || defaultConvertToLlm;\n\t\tthis.transformContext = opts.transformContext;\n\t\tthis.queueMode = opts.queueMode || \"one-at-a-time\";\n\t\tthis.streamFn = opts.streamFn || streamSimple;\n\t\tthis.getApiKey = opts.getApiKey;\n\t}\n\n\tget state(): AgentState {\n\t\treturn this._state;\n\t}\n\n\tsubscribe(fn: (e: AgentEvent) => void): () => void {\n\t\tthis.listeners.add(fn);\n\t\treturn () => this.listeners.delete(fn);\n\t}\n\n\t// State mutators\n\tsetSystemPrompt(v: string) {\n\t\tthis._state.systemPrompt = v;\n\t}\n\n\tsetModel(m: Model<any>) {\n\t\tthis._state.model = m;\n\t}\n\n\tsetThinkingLevel(l: ThinkingLevel) {\n\t\tthis._state.thinkingLevel = l;\n\t}\n\n\tsetQueueMode(mode: \"all\" | \"one-at-a-time\") {\n\t\tthis.queueMode = mode;\n\t}\n\n\tgetQueueMode(): \"all\" | \"one-at-a-time\" {\n\t\treturn this.queueMode;\n\t}\n\n\tsetTools(t: AgentTool<any>[]) {\n\t\tthis._state.tools = t;\n\t}\n\n\treplaceMessages(ms: AgentMessage[]) {\n\t\tthis._state.messages = ms.slice();\n\t}\n\n\tappendMessage(m: AgentMessage) {\n\t\tthis._state.messages = [...this._state.messages, m];\n\t}\n\n\tqueueMessage(m: AgentMessage) {\n\t\tthis.messageQueue.push(m);\n\t}\n\n\tclearMessageQueue() {\n\t\tthis.messageQueue = [];\n\t}\n\n\tclearMessages() {\n\t\tthis._state.messages = [];\n\t}\n\n\tabort() {\n\t\tthis.abortController?.abort();\n\t}\n\n\twaitForIdle(): Promise<void> {\n\t\treturn this.runningPrompt ?? Promise.resolve();\n\t}\n\n\treset() {\n\t\tthis._state.messages = [];\n\t\tthis._state.isStreaming = false;\n\t\tthis._state.streamMessage = null;\n\t\tthis._state.pendingToolCalls = new Set<string>();\n\t\tthis._state.error = undefined;\n\t\tthis.messageQueue = [];\n\t}\n\n\t/** Send a prompt with an AgentMessage */\n\tasync prompt(message: AgentMessage | AgentMessage[]): Promise<void>;\n\tasync prompt(input: string, images?: ImageContent[]): Promise<void>;\n\tasync prompt(input: string | AgentMessage | AgentMessage[], images?: ImageContent[]) {\n\t\tconst model = this._state.model;\n\t\tif (!model) throw new Error(\"No model configured\");\n\n\t\tlet msgs: AgentMessage[];\n\n\t\tif (Array.isArray(input)) {\n\t\t\tmsgs = input;\n\t\t} else if (typeof input === \"string\") {\n\t\t\tconst content: Array<TextContent | ImageContent> = [{ type: \"text\", text: input }];\n\t\t\tif (images && images.length > 0) {\n\t\t\t\tcontent.push(...images);\n\t\t\t}\n\t\t\tmsgs = [\n\t\t\t\t{\n\t\t\t\t\trole: \"user\",\n\t\t\t\t\tcontent,\n\t\t\t\t\ttimestamp: Date.now(),\n\t\t\t\t},\n\t\t\t];\n\t\t} else {\n\t\t\tmsgs = [input];\n\t\t}\n\n\t\tawait this._runLoop(msgs);\n\t}\n\n\t/** Continue from current context (for retry after overflow) */\n\tasync continue() {\n\t\tconst messages = this._state.messages;\n\t\tif (messages.length === 0) {\n\t\t\tthrow new Error(\"No messages to continue from\");\n\t\t}\n\t\tif (messages[messages.length - 1].role === \"assistant\") {\n\t\t\tthrow new Error(\"Cannot continue from message role: assistant\");\n\t\t}\n\n\t\tawait this._runLoop(undefined);\n\t}\n\n\t/**\n\t * Run the agent loop.\n\t * If messages are provided, starts a new conversation turn with those messages.\n\t * Otherwise, continues from existing context.\n\t */\n\tprivate async _runLoop(messages?: AgentMessage[]) {\n\t\tconst model = this._state.model;\n\t\tif (!model) throw new Error(\"No model configured\");\n\n\t\tthis.runningPrompt = new Promise<void>((resolve) => {\n\t\t\tthis.resolveRunningPrompt = resolve;\n\t\t});\n\n\t\tthis.abortController = new AbortController();\n\t\tthis._state.isStreaming = true;\n\t\tthis._state.streamMessage = null;\n\t\tthis._state.error = undefined;\n\n\t\tconst reasoning: ReasoningEffort | undefined =\n\t\t\tthis._state.thinkingLevel === \"off\"\n\t\t\t\t? undefined\n\t\t\t\t: this._state.thinkingLevel === \"minimal\"\n\t\t\t\t\t? \"low\"\n\t\t\t\t\t: (this._state.thinkingLevel as ReasoningEffort);\n\n\t\tconst context: AgentContext = {\n\t\t\tsystemPrompt: this._state.systemPrompt,\n\t\t\tmessages: this._state.messages.slice(),\n\t\t\ttools: this._state.tools,\n\t\t};\n\n\t\tconst config: AgentLoopConfig = {\n\t\t\tmodel,\n\t\t\treasoning,\n\t\t\tconvertToLlm: this.convertToLlm,\n\t\t\ttransformContext: this.transformContext,\n\t\t\tgetApiKey: this.getApiKey,\n\t\t\tgetQueuedMessages: async () => {\n\t\t\t\tif (this.queueMode === \"one-at-a-time\") {\n\t\t\t\t\tif (this.messageQueue.length > 0) {\n\t\t\t\t\t\tconst first = this.messageQueue[0];\n\t\t\t\t\t\tthis.messageQueue = this.messageQueue.slice(1);\n\t\t\t\t\t\treturn [first];\n\t\t\t\t\t}\n\t\t\t\t\treturn [];\n\t\t\t\t} else {\n\t\t\t\t\tconst queued = this.messageQueue.slice();\n\t\t\t\t\tthis.messageQueue = [];\n\t\t\t\t\treturn queued;\n\t\t\t\t}\n\t\t\t},\n\t\t};\n\n\t\tlet partial: AgentMessage | null = null;\n\n\t\ttry {\n\t\t\tconst stream = messages\n\t\t\t\t? agentLoop(messages, context, config, this.abortController.signal, this.streamFn)\n\t\t\t\t: agentLoopContinue(context, config, this.abortController.signal, this.streamFn);\n\n\t\t\tfor await (const event of stream) {\n\t\t\t\t// Update internal state based on events\n\t\t\t\tswitch (event.type) {\n\t\t\t\t\tcase \"message_start\":\n\t\t\t\t\t\tpartial = event.message;\n\t\t\t\t\t\tthis._state.streamMessage = event.message;\n\t\t\t\t\t\tbreak;\n\n\t\t\t\t\tcase \"message_update\":\n\t\t\t\t\t\tpartial = event.message;\n\t\t\t\t\t\tthis._state.streamMessage = event.message;\n\t\t\t\t\t\tbreak;\n\n\t\t\t\t\tcase \"message_end\":\n\t\t\t\t\t\tpartial = null;\n\t\t\t\t\t\tthis._state.streamMessage = null;\n\t\t\t\t\t\tthis.appendMessage(event.message);\n\t\t\t\t\t\tbreak;\n\n\t\t\t\t\tcase \"tool_execution_start\": {\n\t\t\t\t\t\tconst s = new Set(this._state.pendingToolCalls);\n\t\t\t\t\t\ts.add(event.toolCallId);\n\t\t\t\t\t\tthis._state.pendingToolCalls = s;\n\t\t\t\t\t\tbreak;\n\t\t\t\t\t}\n\n\t\t\t\t\tcase \"tool_execution_end\": {\n\t\t\t\t\t\tconst s = new Set(this._state.pendingToolCalls);\n\t\t\t\t\t\ts.delete(event.toolCallId);\n\t\t\t\t\t\tthis._state.pendingToolCalls = s;\n\t\t\t\t\t\tbreak;\n\t\t\t\t\t}\n\n\t\t\t\t\tcase \"turn_end\":\n\t\t\t\t\t\tif (event.message.role === \"assistant\" && (event.message as any).errorMessage) {\n\t\t\t\t\t\t\tthis._state.error = (event.message as any).errorMessage;\n\t\t\t\t\t\t}\n\t\t\t\t\t\tbreak;\n\n\t\t\t\t\tcase \"agent_end\":\n\t\t\t\t\t\tthis._state.isStreaming = false;\n\t\t\t\t\t\tthis._state.streamMessage = null;\n\t\t\t\t\t\tbreak;\n\t\t\t\t}\n\n\t\t\t\t// Emit to listeners\n\t\t\t\tthis.emit(event);\n\t\t\t}\n\n\t\t\t// Handle any remaining partial message\n\t\t\tif (partial && partial.role === \"assistant\" && partial.content.length > 0) {\n\t\t\t\tconst onlyEmpty = !partial.content.some(\n\t\t\t\t\t(c) =>\n\t\t\t\t\t\t(c.type === \"thinking\" && c.thinking.trim().length > 0) ||\n\t\t\t\t\t\t(c.type === \"text\" && c.text.trim().length > 0) ||\n\t\t\t\t\t\t(c.type === \"toolCall\" && c.name.trim().length > 0),\n\t\t\t\t);\n\t\t\t\tif (!onlyEmpty) {\n\t\t\t\t\tthis.appendMessage(partial);\n\t\t\t\t} else {\n\t\t\t\t\tif (this.abortController?.signal.aborted) {\n\t\t\t\t\t\tthrow new Error(\"Request was aborted\");\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t} catch (err: any) {\n\t\t\tconst errorMsg: AgentMessage = {\n\t\t\t\trole: \"assistant\",\n\t\t\t\tcontent: [{ type: \"text\", text: \"\" }],\n\t\t\t\tapi: model.api,\n\t\t\t\tprovider: model.provider,\n\t\t\t\tmodel: model.id,\n\t\t\t\tusage: {\n\t\t\t\t\tinput: 0,\n\t\t\t\t\toutput: 0,\n\t\t\t\t\tcacheRead: 0,\n\t\t\t\t\tcacheWrite: 0,\n\t\t\t\t\ttotalTokens: 0,\n\t\t\t\t\tcost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0, total: 0 },\n\t\t\t\t},\n\t\t\t\tstopReason: this.abortController?.signal.aborted ? \"aborted\" : \"error\",\n\t\t\t\terrorMessage: err?.message || String(err),\n\t\t\t\ttimestamp: Date.now(),\n\t\t\t} as AgentMessage;\n\n\t\t\tthis.appendMessage(errorMsg);\n\t\t\tthis._state.error = err?.message || String(err);\n\t\t\tthis.emit({ type: \"agent_end\", messages: [errorMsg] });\n\t\t} finally {\n\t\t\tthis._state.isStreaming = false;\n\t\t\tthis._state.streamMessage = null;\n\t\t\tthis._state.pendingToolCalls = new Set<string>();\n\t\t\tthis.abortController = undefined;\n\t\t\tthis.resolveRunningPrompt?.();\n\t\t\tthis.runningPrompt = undefined;\n\t\t\tthis.resolveRunningPrompt = undefined;\n\t\t}\n\t}\n\n\tprivate emit(e: AgentEvent) {\n\t\tfor (const listener of this.listeners) {\n\t\t\tlistener(e);\n\t\t}\n\t}\n}\n"]}
1
+ {"version":3,"file":"agent.d.ts","sourceRoot":"","sources":["../src/agent.ts"],"names":[],"mappings":"AAAA;;;GAGG;AAEH,OAAO,EAEN,KAAK,YAAY,EACjB,KAAK,OAAO,EACZ,KAAK,KAAK,EAIV,MAAM,qBAAqB,CAAC;AAE7B,OAAO,KAAK,EAEX,UAAU,EAEV,YAAY,EACZ,UAAU,EACV,SAAS,EACT,QAAQ,EACR,aAAa,EACb,MAAM,YAAY,CAAC;AASpB,MAAM,WAAW,YAAY;IAC5B,YAAY,CAAC,EAAE,OAAO,CAAC,UAAU,CAAC,CAAC;IAEnC;;;OAGG;IACH,YAAY,CAAC,EAAE,CAAC,QAAQ,EAAE,YAAY,EAAE,KAAK,OAAO,EAAE,GAAG,OAAO,CAAC,OAAO,EAAE,CAAC,CAAC;IAE5E;;;OAGG;IACH,gBAAgB,CAAC,EAAE,CAAC,QAAQ,EAAE,YAAY,EAAE,EAAE,MAAM,CAAC,EAAE,WAAW,KAAK,OAAO,CAAC,YAAY,EAAE,CAAC,CAAC;IAE/F;;OAEG;IACH,YAAY,CAAC,EAAE,KAAK,GAAG,eAAe,CAAC;IAEvC;;OAEG;IACH,YAAY,CAAC,EAAE,KAAK,GAAG,eAAe,CAAC;IAEvC;;OAEG;IACH,QAAQ,CAAC,EAAE,QAAQ,CAAC;IAEpB;;;OAGG;IACH,SAAS,CAAC,EAAE,CAAC,QAAQ,EAAE,MAAM,KAAK,OAAO,CAAC,MAAM,GAAG,SAAS,CAAC,GAAG,MAAM,GAAG,SAAS,CAAC;CACnF;AAED,qBAAa,KAAK;IACjB,OAAO,CAAC,MAAM,CAUZ;IAEF,OAAO,CAAC,SAAS,CAAsC;IACvD,OAAO,CAAC,eAAe,CAAC,CAAkB;IAC1C,OAAO,CAAC,YAAY,CAA+D;IACnF,OAAO,CAAC,gBAAgB,CAAC,CAA8E;IACvG,OAAO,CAAC,aAAa,CAAsB;IAC3C,OAAO,CAAC,aAAa,CAAsB;IAC3C,OAAO,CAAC,YAAY,CAA0B;IAC9C,OAAO,CAAC,YAAY,CAA0B;IACvC,QAAQ,EAAE,QAAQ,CAAC;IACnB,SAAS,CAAC,EAAE,CAAC,QAAQ,EAAE,MAAM,KAAK,OAAO,CAAC,MAAM,GAAG,SAAS,CAAC,GAAG,MAAM,GAAG,SAAS,CAAC;IAC1F,OAAO,CAAC,aAAa,CAAC,CAAgB;IACtC,OAAO,CAAC,oBAAoB,CAAC,CAAa;IAE1C,YAAY,IAAI,GAAE,YAAiB,EAQlC;IAED,IAAI,KAAK,IAAI,UAAU,CAEtB;IAED,SAAS,CAAC,EAAE,EAAE,CAAC,CAAC,EAAE,UAAU,KAAK,IAAI,GAAG,MAAM,IAAI,CAGjD;IAGD,eAAe,CAAC,CAAC,EAAE,MAAM,QAExB;IAED,QAAQ,CAAC,CAAC,EAAE,KAAK,CAAC,GAAG,CAAC,QAErB;IAED,gBAAgB,CAAC,CAAC,EAAE,aAAa,QAEhC;IAED,eAAe,CAAC,IAAI,EAAE,KAAK,GAAG,eAAe,QAE5C;IAED,eAAe,IAAI,KAAK,GAAG,eAAe,CAEzC;IAED,eAAe,CAAC,IAAI,EAAE,KAAK,GAAG,eAAe,QAE5C;IAED,eAAe,IAAI,KAAK,GAAG,eAAe,CAEzC;IAED,QAAQ,CAAC,CAAC,EAAE,SAAS,CAAC,GAAG,CAAC,EAAE,QAE3B;IAED,eAAe,CAAC,EAAE,EAAE,YAAY,EAAE,QAEjC;IAED,aAAa,CAAC,CAAC,EAAE,YAAY,QAE5B;IAED;;;OAGG;IACH,KAAK,CAAC,CAAC,EAAE,YAAY,QAEpB;IAED;;;OAGG;IACH,QAAQ,CAAC,CAAC,EAAE,YAAY,QAEvB;IAED,kBAAkB,SAEjB;IAED,kBAAkB,SAEjB;IAED,cAAc,SAGb;IAED,aAAa,SAEZ;IAED,KAAK,SAEJ;IAED,WAAW,IAAI,OAAO,CAAC,IAAI,CAAC,CAE3B;IAED,KAAK,SAQJ;IAED,yCAAyC;IACnC,MAAM,CAAC,OAAO,EAAE,YAAY,GAAG,YAAY,EAAE,GAAG,OAAO,CAAC,IAAI,CAAC,CAAC;IAC9D,MAAM,CAAC,KAAK,EAAE,MAAM,EAAE,MAAM,CAAC,EAAE,YAAY,EAAE,GAAG,OAAO,CAAC,IAAI,CAAC,CAAC;IAkCpE,+DAA+D;IACzD,QAAQ,kBAcb;YAOa,QAAQ;IAwKtB,OAAO,CAAC,IAAI;CAKZ","sourcesContent":["/**\n * Agent class that uses the agent-loop directly.\n * No transport abstraction - calls streamSimple via the loop.\n */\n\nimport {\n\tgetModel,\n\ttype ImageContent,\n\ttype Message,\n\ttype Model,\n\ttype ReasoningEffort,\n\tstreamSimple,\n\ttype TextContent,\n} from \"@mariozechner/pi-ai\";\nimport { agentLoop, agentLoopContinue } from \"./agent-loop.js\";\nimport type {\n\tAgentContext,\n\tAgentEvent,\n\tAgentLoopConfig,\n\tAgentMessage,\n\tAgentState,\n\tAgentTool,\n\tStreamFn,\n\tThinkingLevel,\n} from \"./types.js\";\n\n/**\n * Default convertToLlm: Keep only LLM-compatible messages, convert attachments.\n */\nfunction defaultConvertToLlm(messages: AgentMessage[]): Message[] {\n\treturn messages.filter((m) => m.role === \"user\" || m.role === \"assistant\" || m.role === \"toolResult\");\n}\n\nexport interface AgentOptions {\n\tinitialState?: Partial<AgentState>;\n\n\t/**\n\t * Converts AgentMessage[] to LLM-compatible Message[] before each LLM call.\n\t * Default filters to user/assistant/toolResult and converts attachments.\n\t */\n\tconvertToLlm?: (messages: AgentMessage[]) => Message[] | Promise<Message[]>;\n\n\t/**\n\t * Optional transform applied to context before convertToLlm.\n\t * Use for context pruning, injecting external context, etc.\n\t */\n\ttransformContext?: (messages: AgentMessage[], signal?: AbortSignal) => Promise<AgentMessage[]>;\n\n\t/**\n\t * Steering mode: \"all\" = send all steering messages at once, \"one-at-a-time\" = one per turn\n\t */\n\tsteeringMode?: \"all\" | \"one-at-a-time\";\n\n\t/**\n\t * Follow-up mode: \"all\" = send all follow-up messages at once, \"one-at-a-time\" = one per turn\n\t */\n\tfollowUpMode?: \"all\" | \"one-at-a-time\";\n\n\t/**\n\t * Custom stream function (for proxy backends, etc.). Default uses streamSimple.\n\t */\n\tstreamFn?: StreamFn;\n\n\t/**\n\t * Resolves an API key dynamically for each LLM call.\n\t * Useful for expiring tokens (e.g., GitHub Copilot OAuth).\n\t */\n\tgetApiKey?: (provider: string) => Promise<string | undefined> | string | undefined;\n}\n\nexport class Agent {\n\tprivate _state: AgentState = {\n\t\tsystemPrompt: \"\",\n\t\tmodel: getModel(\"google\", \"gemini-2.5-flash-lite-preview-06-17\"),\n\t\tthinkingLevel: \"off\",\n\t\ttools: [],\n\t\tmessages: [],\n\t\tisStreaming: false,\n\t\tstreamMessage: null,\n\t\tpendingToolCalls: new Set<string>(),\n\t\terror: undefined,\n\t};\n\n\tprivate listeners = new Set<(e: AgentEvent) => void>();\n\tprivate abortController?: AbortController;\n\tprivate convertToLlm: (messages: AgentMessage[]) => Message[] | Promise<Message[]>;\n\tprivate transformContext?: (messages: AgentMessage[], signal?: AbortSignal) => Promise<AgentMessage[]>;\n\tprivate steeringQueue: AgentMessage[] = [];\n\tprivate followUpQueue: AgentMessage[] = [];\n\tprivate steeringMode: \"all\" | \"one-at-a-time\";\n\tprivate followUpMode: \"all\" | \"one-at-a-time\";\n\tpublic streamFn: StreamFn;\n\tpublic getApiKey?: (provider: string) => Promise<string | undefined> | string | undefined;\n\tprivate runningPrompt?: Promise<void>;\n\tprivate resolveRunningPrompt?: () => void;\n\n\tconstructor(opts: AgentOptions = {}) {\n\t\tthis._state = { ...this._state, ...opts.initialState };\n\t\tthis.convertToLlm = opts.convertToLlm || defaultConvertToLlm;\n\t\tthis.transformContext = opts.transformContext;\n\t\tthis.steeringMode = opts.steeringMode || \"one-at-a-time\";\n\t\tthis.followUpMode = opts.followUpMode || \"one-at-a-time\";\n\t\tthis.streamFn = opts.streamFn || streamSimple;\n\t\tthis.getApiKey = opts.getApiKey;\n\t}\n\n\tget state(): AgentState {\n\t\treturn this._state;\n\t}\n\n\tsubscribe(fn: (e: AgentEvent) => void): () => void {\n\t\tthis.listeners.add(fn);\n\t\treturn () => this.listeners.delete(fn);\n\t}\n\n\t// State mutators\n\tsetSystemPrompt(v: string) {\n\t\tthis._state.systemPrompt = v;\n\t}\n\n\tsetModel(m: Model<any>) {\n\t\tthis._state.model = m;\n\t}\n\n\tsetThinkingLevel(l: ThinkingLevel) {\n\t\tthis._state.thinkingLevel = l;\n\t}\n\n\tsetSteeringMode(mode: \"all\" | \"one-at-a-time\") {\n\t\tthis.steeringMode = mode;\n\t}\n\n\tgetSteeringMode(): \"all\" | \"one-at-a-time\" {\n\t\treturn this.steeringMode;\n\t}\n\n\tsetFollowUpMode(mode: \"all\" | \"one-at-a-time\") {\n\t\tthis.followUpMode = mode;\n\t}\n\n\tgetFollowUpMode(): \"all\" | \"one-at-a-time\" {\n\t\treturn this.followUpMode;\n\t}\n\n\tsetTools(t: AgentTool<any>[]) {\n\t\tthis._state.tools = t;\n\t}\n\n\treplaceMessages(ms: AgentMessage[]) {\n\t\tthis._state.messages = ms.slice();\n\t}\n\n\tappendMessage(m: AgentMessage) {\n\t\tthis._state.messages = [...this._state.messages, m];\n\t}\n\n\t/**\n\t * Queue a steering message to interrupt the agent mid-run.\n\t * Delivered after current tool execution, skips remaining tools.\n\t */\n\tsteer(m: AgentMessage) {\n\t\tthis.steeringQueue.push(m);\n\t}\n\n\t/**\n\t * Queue a follow-up message to be processed after the agent finishes.\n\t * Delivered only when agent has no more tool calls or steering messages.\n\t */\n\tfollowUp(m: AgentMessage) {\n\t\tthis.followUpQueue.push(m);\n\t}\n\n\tclearSteeringQueue() {\n\t\tthis.steeringQueue = [];\n\t}\n\n\tclearFollowUpQueue() {\n\t\tthis.followUpQueue = [];\n\t}\n\n\tclearAllQueues() {\n\t\tthis.steeringQueue = [];\n\t\tthis.followUpQueue = [];\n\t}\n\n\tclearMessages() {\n\t\tthis._state.messages = [];\n\t}\n\n\tabort() {\n\t\tthis.abortController?.abort();\n\t}\n\n\twaitForIdle(): Promise<void> {\n\t\treturn this.runningPrompt ?? Promise.resolve();\n\t}\n\n\treset() {\n\t\tthis._state.messages = [];\n\t\tthis._state.isStreaming = false;\n\t\tthis._state.streamMessage = null;\n\t\tthis._state.pendingToolCalls = new Set<string>();\n\t\tthis._state.error = undefined;\n\t\tthis.steeringQueue = [];\n\t\tthis.followUpQueue = [];\n\t}\n\n\t/** Send a prompt with an AgentMessage */\n\tasync prompt(message: AgentMessage | AgentMessage[]): Promise<void>;\n\tasync prompt(input: string, images?: ImageContent[]): Promise<void>;\n\tasync prompt(input: string | AgentMessage | AgentMessage[], images?: ImageContent[]) {\n\t\tif (this._state.isStreaming) {\n\t\t\tthrow new Error(\n\t\t\t\t\"Agent is already processing a prompt. Use steer() or followUp() to queue messages, or wait for completion.\",\n\t\t\t);\n\t\t}\n\n\t\tconst model = this._state.model;\n\t\tif (!model) throw new Error(\"No model configured\");\n\n\t\tlet msgs: AgentMessage[];\n\n\t\tif (Array.isArray(input)) {\n\t\t\tmsgs = input;\n\t\t} else if (typeof input === \"string\") {\n\t\t\tconst content: Array<TextContent | ImageContent> = [{ type: \"text\", text: input }];\n\t\t\tif (images && images.length > 0) {\n\t\t\t\tcontent.push(...images);\n\t\t\t}\n\t\t\tmsgs = [\n\t\t\t\t{\n\t\t\t\t\trole: \"user\",\n\t\t\t\t\tcontent,\n\t\t\t\t\ttimestamp: Date.now(),\n\t\t\t\t},\n\t\t\t];\n\t\t} else {\n\t\t\tmsgs = [input];\n\t\t}\n\n\t\tawait this._runLoop(msgs);\n\t}\n\n\t/** Continue from current context (for retry after overflow) */\n\tasync continue() {\n\t\tif (this._state.isStreaming) {\n\t\t\tthrow new Error(\"Agent is already processing. Wait for completion before continuing.\");\n\t\t}\n\n\t\tconst messages = this._state.messages;\n\t\tif (messages.length === 0) {\n\t\t\tthrow new Error(\"No messages to continue from\");\n\t\t}\n\t\tif (messages[messages.length - 1].role === \"assistant\") {\n\t\t\tthrow new Error(\"Cannot continue from message role: assistant\");\n\t\t}\n\n\t\tawait this._runLoop(undefined);\n\t}\n\n\t/**\n\t * Run the agent loop.\n\t * If messages are provided, starts a new conversation turn with those messages.\n\t * Otherwise, continues from existing context.\n\t */\n\tprivate async _runLoop(messages?: AgentMessage[]) {\n\t\tconst model = this._state.model;\n\t\tif (!model) throw new Error(\"No model configured\");\n\n\t\tthis.runningPrompt = new Promise<void>((resolve) => {\n\t\t\tthis.resolveRunningPrompt = resolve;\n\t\t});\n\n\t\tthis.abortController = new AbortController();\n\t\tthis._state.isStreaming = true;\n\t\tthis._state.streamMessage = null;\n\t\tthis._state.error = undefined;\n\n\t\tconst reasoning: ReasoningEffort | undefined =\n\t\t\tthis._state.thinkingLevel === \"off\"\n\t\t\t\t? undefined\n\t\t\t\t: this._state.thinkingLevel === \"minimal\"\n\t\t\t\t\t? \"low\"\n\t\t\t\t\t: (this._state.thinkingLevel as ReasoningEffort);\n\n\t\tconst context: AgentContext = {\n\t\t\tsystemPrompt: this._state.systemPrompt,\n\t\t\tmessages: this._state.messages.slice(),\n\t\t\ttools: this._state.tools,\n\t\t};\n\n\t\tconst config: AgentLoopConfig = {\n\t\t\tmodel,\n\t\t\treasoning,\n\t\t\tconvertToLlm: this.convertToLlm,\n\t\t\ttransformContext: this.transformContext,\n\t\t\tgetApiKey: this.getApiKey,\n\t\t\tgetSteeringMessages: async () => {\n\t\t\t\tif (this.steeringMode === \"one-at-a-time\") {\n\t\t\t\t\tif (this.steeringQueue.length > 0) {\n\t\t\t\t\t\tconst first = this.steeringQueue[0];\n\t\t\t\t\t\tthis.steeringQueue = this.steeringQueue.slice(1);\n\t\t\t\t\t\treturn [first];\n\t\t\t\t\t}\n\t\t\t\t\treturn [];\n\t\t\t\t} else {\n\t\t\t\t\tconst steering = this.steeringQueue.slice();\n\t\t\t\t\tthis.steeringQueue = [];\n\t\t\t\t\treturn steering;\n\t\t\t\t}\n\t\t\t},\n\t\t\tgetFollowUpMessages: async () => {\n\t\t\t\tif (this.followUpMode === \"one-at-a-time\") {\n\t\t\t\t\tif (this.followUpQueue.length > 0) {\n\t\t\t\t\t\tconst first = this.followUpQueue[0];\n\t\t\t\t\t\tthis.followUpQueue = this.followUpQueue.slice(1);\n\t\t\t\t\t\treturn [first];\n\t\t\t\t\t}\n\t\t\t\t\treturn [];\n\t\t\t\t} else {\n\t\t\t\t\tconst followUp = this.followUpQueue.slice();\n\t\t\t\t\tthis.followUpQueue = [];\n\t\t\t\t\treturn followUp;\n\t\t\t\t}\n\t\t\t},\n\t\t};\n\n\t\tlet partial: AgentMessage | null = null;\n\n\t\ttry {\n\t\t\tconst stream = messages\n\t\t\t\t? agentLoop(messages, context, config, this.abortController.signal, this.streamFn)\n\t\t\t\t: agentLoopContinue(context, config, this.abortController.signal, this.streamFn);\n\n\t\t\tfor await (const event of stream) {\n\t\t\t\t// Update internal state based on events\n\t\t\t\tswitch (event.type) {\n\t\t\t\t\tcase \"message_start\":\n\t\t\t\t\t\tpartial = event.message;\n\t\t\t\t\t\tthis._state.streamMessage = event.message;\n\t\t\t\t\t\tbreak;\n\n\t\t\t\t\tcase \"message_update\":\n\t\t\t\t\t\tpartial = event.message;\n\t\t\t\t\t\tthis._state.streamMessage = event.message;\n\t\t\t\t\t\tbreak;\n\n\t\t\t\t\tcase \"message_end\":\n\t\t\t\t\t\tpartial = null;\n\t\t\t\t\t\tthis._state.streamMessage = null;\n\t\t\t\t\t\tthis.appendMessage(event.message);\n\t\t\t\t\t\tbreak;\n\n\t\t\t\t\tcase \"tool_execution_start\": {\n\t\t\t\t\t\tconst s = new Set(this._state.pendingToolCalls);\n\t\t\t\t\t\ts.add(event.toolCallId);\n\t\t\t\t\t\tthis._state.pendingToolCalls = s;\n\t\t\t\t\t\tbreak;\n\t\t\t\t\t}\n\n\t\t\t\t\tcase \"tool_execution_end\": {\n\t\t\t\t\t\tconst s = new Set(this._state.pendingToolCalls);\n\t\t\t\t\t\ts.delete(event.toolCallId);\n\t\t\t\t\t\tthis._state.pendingToolCalls = s;\n\t\t\t\t\t\tbreak;\n\t\t\t\t\t}\n\n\t\t\t\t\tcase \"turn_end\":\n\t\t\t\t\t\tif (event.message.role === \"assistant\" && (event.message as any).errorMessage) {\n\t\t\t\t\t\t\tthis._state.error = (event.message as any).errorMessage;\n\t\t\t\t\t\t}\n\t\t\t\t\t\tbreak;\n\n\t\t\t\t\tcase \"agent_end\":\n\t\t\t\t\t\tthis._state.isStreaming = false;\n\t\t\t\t\t\tthis._state.streamMessage = null;\n\t\t\t\t\t\tbreak;\n\t\t\t\t}\n\n\t\t\t\t// Emit to listeners\n\t\t\t\tthis.emit(event);\n\t\t\t}\n\n\t\t\t// Handle any remaining partial message\n\t\t\tif (partial && partial.role === \"assistant\" && partial.content.length > 0) {\n\t\t\t\tconst onlyEmpty = !partial.content.some(\n\t\t\t\t\t(c) =>\n\t\t\t\t\t\t(c.type === \"thinking\" && c.thinking.trim().length > 0) ||\n\t\t\t\t\t\t(c.type === \"text\" && c.text.trim().length > 0) ||\n\t\t\t\t\t\t(c.type === \"toolCall\" && c.name.trim().length > 0),\n\t\t\t\t);\n\t\t\t\tif (!onlyEmpty) {\n\t\t\t\t\tthis.appendMessage(partial);\n\t\t\t\t} else {\n\t\t\t\t\tif (this.abortController?.signal.aborted) {\n\t\t\t\t\t\tthrow new Error(\"Request was aborted\");\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t} catch (err: any) {\n\t\t\tconst errorMsg: AgentMessage = {\n\t\t\t\trole: \"assistant\",\n\t\t\t\tcontent: [{ type: \"text\", text: \"\" }],\n\t\t\t\tapi: model.api,\n\t\t\t\tprovider: model.provider,\n\t\t\t\tmodel: model.id,\n\t\t\t\tusage: {\n\t\t\t\t\tinput: 0,\n\t\t\t\t\toutput: 0,\n\t\t\t\t\tcacheRead: 0,\n\t\t\t\t\tcacheWrite: 0,\n\t\t\t\t\ttotalTokens: 0,\n\t\t\t\t\tcost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0, total: 0 },\n\t\t\t\t},\n\t\t\t\tstopReason: this.abortController?.signal.aborted ? \"aborted\" : \"error\",\n\t\t\t\terrorMessage: err?.message || String(err),\n\t\t\t\ttimestamp: Date.now(),\n\t\t\t} as AgentMessage;\n\n\t\t\tthis.appendMessage(errorMsg);\n\t\t\tthis._state.error = err?.message || String(err);\n\t\t\tthis.emit({ type: \"agent_end\", messages: [errorMsg] });\n\t\t} finally {\n\t\t\tthis._state.isStreaming = false;\n\t\t\tthis._state.streamMessage = null;\n\t\t\tthis._state.pendingToolCalls = new Set<string>();\n\t\t\tthis.abortController = undefined;\n\t\t\tthis.resolveRunningPrompt?.();\n\t\t\tthis.runningPrompt = undefined;\n\t\t\tthis.resolveRunningPrompt = undefined;\n\t\t}\n\t}\n\n\tprivate emit(e: AgentEvent) {\n\t\tfor (const listener of this.listeners) {\n\t\t\tlistener(e);\n\t\t}\n\t}\n}\n"]}
package/dist/agent.js CHANGED
@@ -26,8 +26,10 @@ export class Agent {
26
26
  abortController;
27
27
  convertToLlm;
28
28
  transformContext;
29
- messageQueue = [];
30
- queueMode;
29
+ steeringQueue = [];
30
+ followUpQueue = [];
31
+ steeringMode;
32
+ followUpMode;
31
33
  streamFn;
32
34
  getApiKey;
33
35
  runningPrompt;
@@ -36,7 +38,8 @@ export class Agent {
36
38
  this._state = { ...this._state, ...opts.initialState };
37
39
  this.convertToLlm = opts.convertToLlm || defaultConvertToLlm;
38
40
  this.transformContext = opts.transformContext;
39
- this.queueMode = opts.queueMode || "one-at-a-time";
41
+ this.steeringMode = opts.steeringMode || "one-at-a-time";
42
+ this.followUpMode = opts.followUpMode || "one-at-a-time";
40
43
  this.streamFn = opts.streamFn || streamSimple;
41
44
  this.getApiKey = opts.getApiKey;
42
45
  }
@@ -57,11 +60,17 @@ export class Agent {
57
60
  setThinkingLevel(l) {
58
61
  this._state.thinkingLevel = l;
59
62
  }
60
- setQueueMode(mode) {
61
- this.queueMode = mode;
63
+ setSteeringMode(mode) {
64
+ this.steeringMode = mode;
62
65
  }
63
- getQueueMode() {
64
- return this.queueMode;
66
+ getSteeringMode() {
67
+ return this.steeringMode;
68
+ }
69
+ setFollowUpMode(mode) {
70
+ this.followUpMode = mode;
71
+ }
72
+ getFollowUpMode() {
73
+ return this.followUpMode;
65
74
  }
66
75
  setTools(t) {
67
76
  this._state.tools = t;
@@ -72,11 +81,29 @@ export class Agent {
72
81
  appendMessage(m) {
73
82
  this._state.messages = [...this._state.messages, m];
74
83
  }
75
- queueMessage(m) {
76
- this.messageQueue.push(m);
84
+ /**
85
+ * Queue a steering message to interrupt the agent mid-run.
86
+ * Delivered after current tool execution, skips remaining tools.
87
+ */
88
+ steer(m) {
89
+ this.steeringQueue.push(m);
90
+ }
91
+ /**
92
+ * Queue a follow-up message to be processed after the agent finishes.
93
+ * Delivered only when agent has no more tool calls or steering messages.
94
+ */
95
+ followUp(m) {
96
+ this.followUpQueue.push(m);
97
+ }
98
+ clearSteeringQueue() {
99
+ this.steeringQueue = [];
100
+ }
101
+ clearFollowUpQueue() {
102
+ this.followUpQueue = [];
77
103
  }
78
- clearMessageQueue() {
79
- this.messageQueue = [];
104
+ clearAllQueues() {
105
+ this.steeringQueue = [];
106
+ this.followUpQueue = [];
80
107
  }
81
108
  clearMessages() {
82
109
  this._state.messages = [];
@@ -93,9 +120,13 @@ export class Agent {
93
120
  this._state.streamMessage = null;
94
121
  this._state.pendingToolCalls = new Set();
95
122
  this._state.error = undefined;
96
- this.messageQueue = [];
123
+ this.steeringQueue = [];
124
+ this.followUpQueue = [];
97
125
  }
98
126
  async prompt(input, images) {
127
+ if (this._state.isStreaming) {
128
+ throw new Error("Agent is already processing a prompt. Use steer() or followUp() to queue messages, or wait for completion.");
129
+ }
99
130
  const model = this._state.model;
100
131
  if (!model)
101
132
  throw new Error("No model configured");
@@ -123,6 +154,9 @@ export class Agent {
123
154
  }
124
155
  /** Continue from current context (for retry after overflow) */
125
156
  async continue() {
157
+ if (this._state.isStreaming) {
158
+ throw new Error("Agent is already processing. Wait for completion before continuing.");
159
+ }
126
160
  const messages = this._state.messages;
127
161
  if (messages.length === 0) {
128
162
  throw new Error("No messages to continue from");
@@ -164,19 +198,34 @@ export class Agent {
164
198
  convertToLlm: this.convertToLlm,
165
199
  transformContext: this.transformContext,
166
200
  getApiKey: this.getApiKey,
167
- getQueuedMessages: async () => {
168
- if (this.queueMode === "one-at-a-time") {
169
- if (this.messageQueue.length > 0) {
170
- const first = this.messageQueue[0];
171
- this.messageQueue = this.messageQueue.slice(1);
201
+ getSteeringMessages: async () => {
202
+ if (this.steeringMode === "one-at-a-time") {
203
+ if (this.steeringQueue.length > 0) {
204
+ const first = this.steeringQueue[0];
205
+ this.steeringQueue = this.steeringQueue.slice(1);
206
+ return [first];
207
+ }
208
+ return [];
209
+ }
210
+ else {
211
+ const steering = this.steeringQueue.slice();
212
+ this.steeringQueue = [];
213
+ return steering;
214
+ }
215
+ },
216
+ getFollowUpMessages: async () => {
217
+ if (this.followUpMode === "one-at-a-time") {
218
+ if (this.followUpQueue.length > 0) {
219
+ const first = this.followUpQueue[0];
220
+ this.followUpQueue = this.followUpQueue.slice(1);
172
221
  return [first];
173
222
  }
174
223
  return [];
175
224
  }
176
225
  else {
177
- const queued = this.messageQueue.slice();
178
- this.messageQueue = [];
179
- return queued;
226
+ const followUp = this.followUpQueue.slice();
227
+ this.followUpQueue = [];
228
+ return followUp;
180
229
  }
181
230
  },
182
231
  };
package/dist/agent.js.map CHANGED
@@ -1 +1 @@
1
- {"version":3,"file":"agent.js","sourceRoot":"","sources":["../src/agent.ts"],"names":[],"mappings":"AAAA;;;GAGG;AAEH,OAAO,EACN,QAAQ,EAKR,YAAY,GAEZ,MAAM,qBAAqB,CAAC;AAC7B,OAAO,EAAE,SAAS,EAAE,iBAAiB,EAAE,MAAM,iBAAiB,CAAC;AAY/D;;GAEG;AACH,SAAS,mBAAmB,CAAC,QAAwB,EAAa;IACjE,OAAO,QAAQ,CAAC,MAAM,CAAC,CAAC,CAAC,EAAE,EAAE,CAAC,CAAC,CAAC,IAAI,KAAK,MAAM,IAAI,CAAC,CAAC,IAAI,KAAK,WAAW,IAAI,CAAC,CAAC,IAAI,KAAK,YAAY,CAAC,CAAC;AAAA,CACtG;AAkCD,MAAM,OAAO,KAAK;IACT,MAAM,GAAe;QAC5B,YAAY,EAAE,EAAE;QAChB,KAAK,EAAE,QAAQ,CAAC,QAAQ,EAAE,qCAAqC,CAAC;QAChE,aAAa,EAAE,KAAK;QACpB,KAAK,EAAE,EAAE;QACT,QAAQ,EAAE,EAAE;QACZ,WAAW,EAAE,KAAK;QAClB,aAAa,EAAE,IAAI;QACnB,gBAAgB,EAAE,IAAI,GAAG,EAAU;QACnC,KAAK,EAAE,SAAS;KAChB,CAAC;IAEM,SAAS,GAAG,IAAI,GAAG,EAA2B,CAAC;IAC/C,eAAe,CAAmB;IAClC,YAAY,CAA+D;IAC3E,gBAAgB,CAA+E;IAC/F,YAAY,GAAmB,EAAE,CAAC;IAClC,SAAS,CAA0B;IACpC,QAAQ,CAAW;IACnB,SAAS,CAA0E;IAClF,aAAa,CAAiB;IAC9B,oBAAoB,CAAc;IAE1C,YAAY,IAAI,GAAiB,EAAE,EAAE;QACpC,IAAI,CAAC,MAAM,GAAG,EAAE,GAAG,IAAI,CAAC,MAAM,EAAE,GAAG,IAAI,CAAC,YAAY,EAAE,CAAC;QACvD,IAAI,CAAC,YAAY,GAAG,IAAI,CAAC,YAAY,IAAI,mBAAmB,CAAC;QAC7D,IAAI,CAAC,gBAAgB,GAAG,IAAI,CAAC,gBAAgB,CAAC;QAC9C,IAAI,CAAC,SAAS,GAAG,IAAI,CAAC,SAAS,IAAI,eAAe,CAAC;QACnD,IAAI,CAAC,QAAQ,GAAG,IAAI,CAAC,QAAQ,IAAI,YAAY,CAAC;QAC9C,IAAI,CAAC,SAAS,GAAG,IAAI,CAAC,SAAS,CAAC;IAAA,CAChC;IAED,IAAI,KAAK,GAAe;QACvB,OAAO,IAAI,CAAC,MAAM,CAAC;IAAA,CACnB;IAED,SAAS,CAAC,EAA2B,EAAc;QAClD,IAAI,CAAC,SAAS,CAAC,GAAG,CAAC,EAAE,CAAC,CAAC;QACvB,OAAO,GAAG,EAAE,CAAC,IAAI,CAAC,SAAS,CAAC,MAAM,CAAC,EAAE,CAAC,CAAC;IAAA,CACvC;IAED,iBAAiB;IACjB,eAAe,CAAC,CAAS,EAAE;QAC1B,IAAI,CAAC,MAAM,CAAC,YAAY,GAAG,CAAC,CAAC;IAAA,CAC7B;IAED,QAAQ,CAAC,CAAa,EAAE;QACvB,IAAI,CAAC,MAAM,CAAC,KAAK,GAAG,CAAC,CAAC;IAAA,CACtB;IAED,gBAAgB,CAAC,CAAgB,EAAE;QAClC,IAAI,CAAC,MAAM,CAAC,aAAa,GAAG,CAAC,CAAC;IAAA,CAC9B;IAED,YAAY,CAAC,IAA6B,EAAE;QAC3C,IAAI,CAAC,SAAS,GAAG,IAAI,CAAC;IAAA,CACtB;IAED,YAAY,GAA4B;QACvC,OAAO,IAAI,CAAC,SAAS,CAAC;IAAA,CACtB;IAED,QAAQ,CAAC,CAAmB,EAAE;QAC7B,IAAI,CAAC,MAAM,CAAC,KAAK,GAAG,CAAC,CAAC;IAAA,CACtB;IAED,eAAe,CAAC,EAAkB,EAAE;QACnC,IAAI,CAAC,MAAM,CAAC,QAAQ,GAAG,EAAE,CAAC,KAAK,EAAE,CAAC;IAAA,CAClC;IAED,aAAa,CAAC,CAAe,EAAE;QAC9B,IAAI,CAAC,MAAM,CAAC,QAAQ,GAAG,CAAC,GAAG,IAAI,CAAC,MAAM,CAAC,QAAQ,EAAE,CAAC,CAAC,CAAC;IAAA,CACpD;IAED,YAAY,CAAC,CAAe,EAAE;QAC7B,IAAI,CAAC,YAAY,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC;IAAA,CAC1B;IAED,iBAAiB,GAAG;QACnB,IAAI,CAAC,YAAY,GAAG,EAAE,CAAC;IAAA,CACvB;IAED,aAAa,GAAG;QACf,IAAI,CAAC,MAAM,CAAC,QAAQ,GAAG,EAAE,CAAC;IAAA,CAC1B;IAED,KAAK,GAAG;QACP,IAAI,CAAC,eAAe,EAAE,KAAK,EAAE,CAAC;IAAA,CAC9B;IAED,WAAW,GAAkB;QAC5B,OAAO,IAAI,CAAC,aAAa,IAAI,OAAO,CAAC,OAAO,EAAE,CAAC;IAAA,CAC/C;IAED,KAAK,GAAG;QACP,IAAI,CAAC,MAAM,CAAC,QAAQ,GAAG,EAAE,CAAC;QAC1B,IAAI,CAAC,MAAM,CAAC,WAAW,GAAG,KAAK,CAAC;QAChC,IAAI,CAAC,MAAM,CAAC,aAAa,GAAG,IAAI,CAAC;QACjC,IAAI,CAAC,MAAM,CAAC,gBAAgB,GAAG,IAAI,GAAG,EAAU,CAAC;QACjD,IAAI,CAAC,MAAM,CAAC,KAAK,GAAG,SAAS,CAAC;QAC9B,IAAI,CAAC,YAAY,GAAG,EAAE,CAAC;IAAA,CACvB;IAKD,KAAK,CAAC,MAAM,CAAC,KAA6C,EAAE,MAAuB,EAAE;QACpF,MAAM,KAAK,GAAG,IAAI,CAAC,MAAM,CAAC,KAAK,CAAC;QAChC,IAAI,CAAC,KAAK;YAAE,MAAM,IAAI,KAAK,CAAC,qBAAqB,CAAC,CAAC;QAEnD,IAAI,IAAoB,CAAC;QAEzB,IAAI,KAAK,CAAC,OAAO,CAAC,KAAK,CAAC,EAAE,CAAC;YAC1B,IAAI,GAAG,KAAK,CAAC;QACd,CAAC;aAAM,IAAI,OAAO,KAAK,KAAK,QAAQ,EAAE,CAAC;YACtC,MAAM,OAAO,GAAsC,CAAC,EAAE,IAAI,EAAE,MAAM,EAAE,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC;YACnF,IAAI,MAAM,IAAI,MAAM,CAAC,MAAM,GAAG,CAAC,EAAE,CAAC;gBACjC,OAAO,CAAC,IAAI,CAAC,GAAG,MAAM,CAAC,CAAC;YACzB,CAAC;YACD,IAAI,GAAG;gBACN;oBACC,IAAI,EAAE,MAAM;oBACZ,OAAO;oBACP,SAAS,EAAE,IAAI,CAAC,GAAG,EAAE;iBACrB;aACD,CAAC;QACH,CAAC;aAAM,CAAC;YACP,IAAI,GAAG,CAAC,KAAK,CAAC,CAAC;QAChB,CAAC;QAED,MAAM,IAAI,CAAC,QAAQ,CAAC,IAAI,CAAC,CAAC;IAAA,CAC1B;IAED,+DAA+D;IAC/D,KAAK,CAAC,QAAQ,GAAG;QAChB,MAAM,QAAQ,GAAG,IAAI,CAAC,MAAM,CAAC,QAAQ,CAAC;QACtC,IAAI,QAAQ,CAAC,MAAM,KAAK,CAAC,EAAE,CAAC;YAC3B,MAAM,IAAI,KAAK,CAAC,8BAA8B,CAAC,CAAC;QACjD,CAAC;QACD,IAAI,QAAQ,CAAC,QAAQ,CAAC,MAAM,GAAG,CAAC,CAAC,CAAC,IAAI,KAAK,WAAW,EAAE,CAAC;YACxD,MAAM,IAAI,KAAK,CAAC,8CAA8C,CAAC,CAAC;QACjE,CAAC;QAED,MAAM,IAAI,CAAC,QAAQ,CAAC,SAAS,CAAC,CAAC;IAAA,CAC/B;IAED;;;;OAIG;IACK,KAAK,CAAC,QAAQ,CAAC,QAAyB,EAAE;QACjD,MAAM,KAAK,GAAG,IAAI,CAAC,MAAM,CAAC,KAAK,CAAC;QAChC,IAAI,CAAC,KAAK;YAAE,MAAM,IAAI,KAAK,CAAC,qBAAqB,CAAC,CAAC;QAEnD,IAAI,CAAC,aAAa,GAAG,IAAI,OAAO,CAAO,CAAC,OAAO,EAAE,EAAE,CAAC;YACnD,IAAI,CAAC,oBAAoB,GAAG,OAAO,CAAC;QAAA,CACpC,CAAC,CAAC;QAEH,IAAI,CAAC,eAAe,GAAG,IAAI,eAAe,EAAE,CAAC;QAC7C,IAAI,CAAC,MAAM,CAAC,WAAW,GAAG,IAAI,CAAC;QAC/B,IAAI,CAAC,MAAM,CAAC,aAAa,GAAG,IAAI,CAAC;QACjC,IAAI,CAAC,MAAM,CAAC,KAAK,GAAG,SAAS,CAAC;QAE9B,MAAM,SAAS,GACd,IAAI,CAAC,MAAM,CAAC,aAAa,KAAK,KAAK;YAClC,CAAC,CAAC,SAAS;YACX,CAAC,CAAC,IAAI,CAAC,MAAM,CAAC,aAAa,KAAK,SAAS;gBACxC,CAAC,CAAC,KAAK;gBACP,CAAC,CAAE,IAAI,CAAC,MAAM,CAAC,aAAiC,CAAC;QAEpD,MAAM,OAAO,GAAiB;YAC7B,YAAY,EAAE,IAAI,CAAC,MAAM,CAAC,YAAY;YACtC,QAAQ,EAAE,IAAI,CAAC,MAAM,CAAC,QAAQ,CAAC,KAAK,EAAE;YACtC,KAAK,EAAE,IAAI,CAAC,MAAM,CAAC,KAAK;SACxB,CAAC;QAEF,MAAM,MAAM,GAAoB;YAC/B,KAAK;YACL,SAAS;YACT,YAAY,EAAE,IAAI,CAAC,YAAY;YAC/B,gBAAgB,EAAE,IAAI,CAAC,gBAAgB;YACvC,SAAS,EAAE,IAAI,CAAC,SAAS;YACzB,iBAAiB,EAAE,KAAK,IAAI,EAAE,CAAC;gBAC9B,IAAI,IAAI,CAAC,SAAS,KAAK,eAAe,EAAE,CAAC;oBACxC,IAAI,IAAI,CAAC,YAAY,CAAC,MAAM,GAAG,CAAC,EAAE,CAAC;wBAClC,MAAM,KAAK,GAAG,IAAI,CAAC,YAAY,CAAC,CAAC,CAAC,CAAC;wBACnC,IAAI,CAAC,YAAY,GAAG,IAAI,CAAC,YAAY,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC;wBAC/C,OAAO,CAAC,KAAK,CAAC,CAAC;oBAChB,CAAC;oBACD,OAAO,EAAE,CAAC;gBACX,CAAC;qBAAM,CAAC;oBACP,MAAM,MAAM,GAAG,IAAI,CAAC,YAAY,CAAC,KAAK,EAAE,CAAC;oBACzC,IAAI,CAAC,YAAY,GAAG,EAAE,CAAC;oBACvB,OAAO,MAAM,CAAC;gBACf,CAAC;YAAA,CACD;SACD,CAAC;QAEF,IAAI,OAAO,GAAwB,IAAI,CAAC;QAExC,IAAI,CAAC;YACJ,MAAM,MAAM,GAAG,QAAQ;gBACtB,CAAC,CAAC,SAAS,CAAC,QAAQ,EAAE,OAAO,EAAE,MAAM,EAAE,IAAI,CAAC,eAAe,CAAC,MAAM,EAAE,IAAI,CAAC,QAAQ,CAAC;gBAClF,CAAC,CAAC,iBAAiB,CAAC,OAAO,EAAE,MAAM,EAAE,IAAI,CAAC,eAAe,CAAC,MAAM,EAAE,IAAI,CAAC,QAAQ,CAAC,CAAC;YAElF,IAAI,KAAK,EAAE,MAAM,KAAK,IAAI,MAAM,EAAE,CAAC;gBAClC,wCAAwC;gBACxC,QAAQ,KAAK,CAAC,IAAI,EAAE,CAAC;oBACpB,KAAK,eAAe;wBACnB,OAAO,GAAG,KAAK,CAAC,OAAO,CAAC;wBACxB,IAAI,CAAC,MAAM,CAAC,aAAa,GAAG,KAAK,CAAC,OAAO,CAAC;wBAC1C,MAAM;oBAEP,KAAK,gBAAgB;wBACpB,OAAO,GAAG,KAAK,CAAC,OAAO,CAAC;wBACxB,IAAI,CAAC,MAAM,CAAC,aAAa,GAAG,KAAK,CAAC,OAAO,CAAC;wBAC1C,MAAM;oBAEP,KAAK,aAAa;wBACjB,OAAO,GAAG,IAAI,CAAC;wBACf,IAAI,CAAC,MAAM,CAAC,aAAa,GAAG,IAAI,CAAC;wBACjC,IAAI,CAAC,aAAa,CAAC,KAAK,CAAC,OAAO,CAAC,CAAC;wBAClC,MAAM;oBAEP,KAAK,sBAAsB,EAAE,CAAC;wBAC7B,MAAM,CAAC,GAAG,IAAI,GAAG,CAAC,IAAI,CAAC,MAAM,CAAC,gBAAgB,CAAC,CAAC;wBAChD,CAAC,CAAC,GAAG,CAAC,KAAK,CAAC,UAAU,CAAC,CAAC;wBACxB,IAAI,CAAC,MAAM,CAAC,gBAAgB,GAAG,CAAC,CAAC;wBACjC,MAAM;oBACP,CAAC;oBAED,KAAK,oBAAoB,EAAE,CAAC;wBAC3B,MAAM,CAAC,GAAG,IAAI,GAAG,CAAC,IAAI,CAAC,MAAM,CAAC,gBAAgB,CAAC,CAAC;wBAChD,CAAC,CAAC,MAAM,CAAC,KAAK,CAAC,UAAU,CAAC,CAAC;wBAC3B,IAAI,CAAC,MAAM,CAAC,gBAAgB,GAAG,CAAC,CAAC;wBACjC,MAAM;oBACP,CAAC;oBAED,KAAK,UAAU;wBACd,IAAI,KAAK,CAAC,OAAO,CAAC,IAAI,KAAK,WAAW,IAAK,KAAK,CAAC,OAAe,CAAC,YAAY,EAAE,CAAC;4BAC/E,IAAI,CAAC,MAAM,CAAC,KAAK,GAAI,KAAK,CAAC,OAAe,CAAC,YAAY,CAAC;wBACzD,CAAC;wBACD,MAAM;oBAEP,KAAK,WAAW;wBACf,IAAI,CAAC,MAAM,CAAC,WAAW,GAAG,KAAK,CAAC;wBAChC,IAAI,CAAC,MAAM,CAAC,aAAa,GAAG,IAAI,CAAC;wBACjC,MAAM;gBACR,CAAC;gBAED,oBAAoB;gBACpB,IAAI,CAAC,IAAI,CAAC,KAAK,CAAC,CAAC;YAClB,CAAC;YAED,uCAAuC;YACvC,IAAI,OAAO,IAAI,OAAO,CAAC,IAAI,KAAK,WAAW,IAAI,OAAO,CAAC,OAAO,CAAC,MAAM,GAAG,CAAC,EAAE,CAAC;gBAC3E,MAAM,SAAS,GAAG,CAAC,OAAO,CAAC,OAAO,CAAC,IAAI,CACtC,CAAC,CAAC,EAAE,EAAE,CACL,CAAC,CAAC,CAAC,IAAI,KAAK,UAAU,IAAI,CAAC,CAAC,QAAQ,CAAC,IAAI,EAAE,CAAC,MAAM,GAAG,CAAC,CAAC;oBACvD,CAAC,CAAC,CAAC,IAAI,KAAK,MAAM,IAAI,CAAC,CAAC,IAAI,CAAC,IAAI,EAAE,CAAC,MAAM,GAAG,CAAC,CAAC;oBAC/C,CAAC,CAAC,CAAC,IAAI,KAAK,UAAU,IAAI,CAAC,CAAC,IAAI,CAAC,IAAI,EAAE,CAAC,MAAM,GAAG,CAAC,CAAC,CACpD,CAAC;gBACF,IAAI,CAAC,SAAS,EAAE,CAAC;oBAChB,IAAI,CAAC,aAAa,CAAC,OAAO,CAAC,CAAC;gBAC7B,CAAC;qBAAM,CAAC;oBACP,IAAI,IAAI,CAAC,eAAe,EAAE,MAAM,CAAC,OAAO,EAAE,CAAC;wBAC1C,MAAM,IAAI,KAAK,CAAC,qBAAqB,CAAC,CAAC;oBACxC,CAAC;gBACF,CAAC;YACF,CAAC;QACF,CAAC;QAAC,OAAO,GAAQ,EAAE,CAAC;YACnB,MAAM,QAAQ,GAAiB;gBAC9B,IAAI,EAAE,WAAW;gBACjB,OAAO,EAAE,CAAC,EAAE,IAAI,EAAE,MAAM,EAAE,IAAI,EAAE,EAAE,EAAE,CAAC;gBACrC,GAAG,EAAE,KAAK,CAAC,GAAG;gBACd,QAAQ,EAAE,KAAK,CAAC,QAAQ;gBACxB,KAAK,EAAE,KAAK,CAAC,EAAE;gBACf,KAAK,EAAE;oBACN,KAAK,EAAE,CAAC;oBACR,MAAM,EAAE,CAAC;oBACT,SAAS,EAAE,CAAC;oBACZ,UAAU,EAAE,CAAC;oBACb,WAAW,EAAE,CAAC;oBACd,IAAI,EAAE,EAAE,KAAK,EAAE,CAAC,EAAE,MAAM,EAAE,CAAC,EAAE,SAAS,EAAE,CAAC,EAAE,UAAU,EAAE,CAAC,EAAE,KAAK,EAAE,CAAC,EAAE;iBACpE;gBACD,UAAU,EAAE,IAAI,CAAC,eAAe,EAAE,MAAM,CAAC,OAAO,CAAC,CAAC,CAAC,SAAS,CAAC,CAAC,CAAC,OAAO;gBACtE,YAAY,EAAE,GAAG,EAAE,OAAO,IAAI,MAAM,CAAC,GAAG,CAAC;gBACzC,SAAS,EAAE,IAAI,CAAC,GAAG,EAAE;aACL,CAAC;YAElB,IAAI,CAAC,aAAa,CAAC,QAAQ,CAAC,CAAC;YAC7B,IAAI,CAAC,MAAM,CAAC,KAAK,GAAG,GAAG,EAAE,OAAO,IAAI,MAAM,CAAC,GAAG,CAAC,CAAC;YAChD,IAAI,CAAC,IAAI,CAAC,EAAE,IAAI,EAAE,WAAW,EAAE,QAAQ,EAAE,CAAC,QAAQ,CAAC,EAAE,CAAC,CAAC;QACxD,CAAC;gBAAS,CAAC;YACV,IAAI,CAAC,MAAM,CAAC,WAAW,GAAG,KAAK,CAAC;YAChC,IAAI,CAAC,MAAM,CAAC,aAAa,GAAG,IAAI,CAAC;YACjC,IAAI,CAAC,MAAM,CAAC,gBAAgB,GAAG,IAAI,GAAG,EAAU,CAAC;YACjD,IAAI,CAAC,eAAe,GAAG,SAAS,CAAC;YACjC,IAAI,CAAC,oBAAoB,EAAE,EAAE,CAAC;YAC9B,IAAI,CAAC,aAAa,GAAG,SAAS,CAAC;YAC/B,IAAI,CAAC,oBAAoB,GAAG,SAAS,CAAC;QACvC,CAAC;IAAA,CACD;IAEO,IAAI,CAAC,CAAa,EAAE;QAC3B,KAAK,MAAM,QAAQ,IAAI,IAAI,CAAC,SAAS,EAAE,CAAC;YACvC,QAAQ,CAAC,CAAC,CAAC,CAAC;QACb,CAAC;IAAA,CACD;CACD","sourcesContent":["/**\n * Agent class that uses the agent-loop directly.\n * No transport abstraction - calls streamSimple via the loop.\n */\n\nimport {\n\tgetModel,\n\ttype ImageContent,\n\ttype Message,\n\ttype Model,\n\ttype ReasoningEffort,\n\tstreamSimple,\n\ttype TextContent,\n} from \"@mariozechner/pi-ai\";\nimport { agentLoop, agentLoopContinue } from \"./agent-loop.js\";\nimport type {\n\tAgentContext,\n\tAgentEvent,\n\tAgentLoopConfig,\n\tAgentMessage,\n\tAgentState,\n\tAgentTool,\n\tStreamFn,\n\tThinkingLevel,\n} from \"./types.js\";\n\n/**\n * Default convertToLlm: Keep only LLM-compatible messages, convert attachments.\n */\nfunction defaultConvertToLlm(messages: AgentMessage[]): Message[] {\n\treturn messages.filter((m) => m.role === \"user\" || m.role === \"assistant\" || m.role === \"toolResult\");\n}\n\nexport interface AgentOptions {\n\tinitialState?: Partial<AgentState>;\n\n\t/**\n\t * Converts AgentMessage[] to LLM-compatible Message[] before each LLM call.\n\t * Default filters to user/assistant/toolResult and converts attachments.\n\t */\n\tconvertToLlm?: (messages: AgentMessage[]) => Message[] | Promise<Message[]>;\n\n\t/**\n\t * Optional transform applied to context before convertToLlm.\n\t * Use for context pruning, injecting external context, etc.\n\t */\n\ttransformContext?: (messages: AgentMessage[], signal?: AbortSignal) => Promise<AgentMessage[]>;\n\n\t/**\n\t * Queue mode: \"all\" = send all queued messages at once, \"one-at-a-time\" = one per turn\n\t */\n\tqueueMode?: \"all\" | \"one-at-a-time\";\n\n\t/**\n\t * Custom stream function (for proxy backends, etc.). Default uses streamSimple.\n\t */\n\tstreamFn?: StreamFn;\n\n\t/**\n\t * Resolves an API key dynamically for each LLM call.\n\t * Useful for expiring tokens (e.g., GitHub Copilot OAuth).\n\t */\n\tgetApiKey?: (provider: string) => Promise<string | undefined> | string | undefined;\n}\n\nexport class Agent {\n\tprivate _state: AgentState = {\n\t\tsystemPrompt: \"\",\n\t\tmodel: getModel(\"google\", \"gemini-2.5-flash-lite-preview-06-17\"),\n\t\tthinkingLevel: \"off\",\n\t\ttools: [],\n\t\tmessages: [],\n\t\tisStreaming: false,\n\t\tstreamMessage: null,\n\t\tpendingToolCalls: new Set<string>(),\n\t\terror: undefined,\n\t};\n\n\tprivate listeners = new Set<(e: AgentEvent) => void>();\n\tprivate abortController?: AbortController;\n\tprivate convertToLlm: (messages: AgentMessage[]) => Message[] | Promise<Message[]>;\n\tprivate transformContext?: (messages: AgentMessage[], signal?: AbortSignal) => Promise<AgentMessage[]>;\n\tprivate messageQueue: AgentMessage[] = [];\n\tprivate queueMode: \"all\" | \"one-at-a-time\";\n\tpublic streamFn: StreamFn;\n\tpublic getApiKey?: (provider: string) => Promise<string | undefined> | string | undefined;\n\tprivate runningPrompt?: Promise<void>;\n\tprivate resolveRunningPrompt?: () => void;\n\n\tconstructor(opts: AgentOptions = {}) {\n\t\tthis._state = { ...this._state, ...opts.initialState };\n\t\tthis.convertToLlm = opts.convertToLlm || defaultConvertToLlm;\n\t\tthis.transformContext = opts.transformContext;\n\t\tthis.queueMode = opts.queueMode || \"one-at-a-time\";\n\t\tthis.streamFn = opts.streamFn || streamSimple;\n\t\tthis.getApiKey = opts.getApiKey;\n\t}\n\n\tget state(): AgentState {\n\t\treturn this._state;\n\t}\n\n\tsubscribe(fn: (e: AgentEvent) => void): () => void {\n\t\tthis.listeners.add(fn);\n\t\treturn () => this.listeners.delete(fn);\n\t}\n\n\t// State mutators\n\tsetSystemPrompt(v: string) {\n\t\tthis._state.systemPrompt = v;\n\t}\n\n\tsetModel(m: Model<any>) {\n\t\tthis._state.model = m;\n\t}\n\n\tsetThinkingLevel(l: ThinkingLevel) {\n\t\tthis._state.thinkingLevel = l;\n\t}\n\n\tsetQueueMode(mode: \"all\" | \"one-at-a-time\") {\n\t\tthis.queueMode = mode;\n\t}\n\n\tgetQueueMode(): \"all\" | \"one-at-a-time\" {\n\t\treturn this.queueMode;\n\t}\n\n\tsetTools(t: AgentTool<any>[]) {\n\t\tthis._state.tools = t;\n\t}\n\n\treplaceMessages(ms: AgentMessage[]) {\n\t\tthis._state.messages = ms.slice();\n\t}\n\n\tappendMessage(m: AgentMessage) {\n\t\tthis._state.messages = [...this._state.messages, m];\n\t}\n\n\tqueueMessage(m: AgentMessage) {\n\t\tthis.messageQueue.push(m);\n\t}\n\n\tclearMessageQueue() {\n\t\tthis.messageQueue = [];\n\t}\n\n\tclearMessages() {\n\t\tthis._state.messages = [];\n\t}\n\n\tabort() {\n\t\tthis.abortController?.abort();\n\t}\n\n\twaitForIdle(): Promise<void> {\n\t\treturn this.runningPrompt ?? Promise.resolve();\n\t}\n\n\treset() {\n\t\tthis._state.messages = [];\n\t\tthis._state.isStreaming = false;\n\t\tthis._state.streamMessage = null;\n\t\tthis._state.pendingToolCalls = new Set<string>();\n\t\tthis._state.error = undefined;\n\t\tthis.messageQueue = [];\n\t}\n\n\t/** Send a prompt with an AgentMessage */\n\tasync prompt(message: AgentMessage | AgentMessage[]): Promise<void>;\n\tasync prompt(input: string, images?: ImageContent[]): Promise<void>;\n\tasync prompt(input: string | AgentMessage | AgentMessage[], images?: ImageContent[]) {\n\t\tconst model = this._state.model;\n\t\tif (!model) throw new Error(\"No model configured\");\n\n\t\tlet msgs: AgentMessage[];\n\n\t\tif (Array.isArray(input)) {\n\t\t\tmsgs = input;\n\t\t} else if (typeof input === \"string\") {\n\t\t\tconst content: Array<TextContent | ImageContent> = [{ type: \"text\", text: input }];\n\t\t\tif (images && images.length > 0) {\n\t\t\t\tcontent.push(...images);\n\t\t\t}\n\t\t\tmsgs = [\n\t\t\t\t{\n\t\t\t\t\trole: \"user\",\n\t\t\t\t\tcontent,\n\t\t\t\t\ttimestamp: Date.now(),\n\t\t\t\t},\n\t\t\t];\n\t\t} else {\n\t\t\tmsgs = [input];\n\t\t}\n\n\t\tawait this._runLoop(msgs);\n\t}\n\n\t/** Continue from current context (for retry after overflow) */\n\tasync continue() {\n\t\tconst messages = this._state.messages;\n\t\tif (messages.length === 0) {\n\t\t\tthrow new Error(\"No messages to continue from\");\n\t\t}\n\t\tif (messages[messages.length - 1].role === \"assistant\") {\n\t\t\tthrow new Error(\"Cannot continue from message role: assistant\");\n\t\t}\n\n\t\tawait this._runLoop(undefined);\n\t}\n\n\t/**\n\t * Run the agent loop.\n\t * If messages are provided, starts a new conversation turn with those messages.\n\t * Otherwise, continues from existing context.\n\t */\n\tprivate async _runLoop(messages?: AgentMessage[]) {\n\t\tconst model = this._state.model;\n\t\tif (!model) throw new Error(\"No model configured\");\n\n\t\tthis.runningPrompt = new Promise<void>((resolve) => {\n\t\t\tthis.resolveRunningPrompt = resolve;\n\t\t});\n\n\t\tthis.abortController = new AbortController();\n\t\tthis._state.isStreaming = true;\n\t\tthis._state.streamMessage = null;\n\t\tthis._state.error = undefined;\n\n\t\tconst reasoning: ReasoningEffort | undefined =\n\t\t\tthis._state.thinkingLevel === \"off\"\n\t\t\t\t? undefined\n\t\t\t\t: this._state.thinkingLevel === \"minimal\"\n\t\t\t\t\t? \"low\"\n\t\t\t\t\t: (this._state.thinkingLevel as ReasoningEffort);\n\n\t\tconst context: AgentContext = {\n\t\t\tsystemPrompt: this._state.systemPrompt,\n\t\t\tmessages: this._state.messages.slice(),\n\t\t\ttools: this._state.tools,\n\t\t};\n\n\t\tconst config: AgentLoopConfig = {\n\t\t\tmodel,\n\t\t\treasoning,\n\t\t\tconvertToLlm: this.convertToLlm,\n\t\t\ttransformContext: this.transformContext,\n\t\t\tgetApiKey: this.getApiKey,\n\t\t\tgetQueuedMessages: async () => {\n\t\t\t\tif (this.queueMode === \"one-at-a-time\") {\n\t\t\t\t\tif (this.messageQueue.length > 0) {\n\t\t\t\t\t\tconst first = this.messageQueue[0];\n\t\t\t\t\t\tthis.messageQueue = this.messageQueue.slice(1);\n\t\t\t\t\t\treturn [first];\n\t\t\t\t\t}\n\t\t\t\t\treturn [];\n\t\t\t\t} else {\n\t\t\t\t\tconst queued = this.messageQueue.slice();\n\t\t\t\t\tthis.messageQueue = [];\n\t\t\t\t\treturn queued;\n\t\t\t\t}\n\t\t\t},\n\t\t};\n\n\t\tlet partial: AgentMessage | null = null;\n\n\t\ttry {\n\t\t\tconst stream = messages\n\t\t\t\t? agentLoop(messages, context, config, this.abortController.signal, this.streamFn)\n\t\t\t\t: agentLoopContinue(context, config, this.abortController.signal, this.streamFn);\n\n\t\t\tfor await (const event of stream) {\n\t\t\t\t// Update internal state based on events\n\t\t\t\tswitch (event.type) {\n\t\t\t\t\tcase \"message_start\":\n\t\t\t\t\t\tpartial = event.message;\n\t\t\t\t\t\tthis._state.streamMessage = event.message;\n\t\t\t\t\t\tbreak;\n\n\t\t\t\t\tcase \"message_update\":\n\t\t\t\t\t\tpartial = event.message;\n\t\t\t\t\t\tthis._state.streamMessage = event.message;\n\t\t\t\t\t\tbreak;\n\n\t\t\t\t\tcase \"message_end\":\n\t\t\t\t\t\tpartial = null;\n\t\t\t\t\t\tthis._state.streamMessage = null;\n\t\t\t\t\t\tthis.appendMessage(event.message);\n\t\t\t\t\t\tbreak;\n\n\t\t\t\t\tcase \"tool_execution_start\": {\n\t\t\t\t\t\tconst s = new Set(this._state.pendingToolCalls);\n\t\t\t\t\t\ts.add(event.toolCallId);\n\t\t\t\t\t\tthis._state.pendingToolCalls = s;\n\t\t\t\t\t\tbreak;\n\t\t\t\t\t}\n\n\t\t\t\t\tcase \"tool_execution_end\": {\n\t\t\t\t\t\tconst s = new Set(this._state.pendingToolCalls);\n\t\t\t\t\t\ts.delete(event.toolCallId);\n\t\t\t\t\t\tthis._state.pendingToolCalls = s;\n\t\t\t\t\t\tbreak;\n\t\t\t\t\t}\n\n\t\t\t\t\tcase \"turn_end\":\n\t\t\t\t\t\tif (event.message.role === \"assistant\" && (event.message as any).errorMessage) {\n\t\t\t\t\t\t\tthis._state.error = (event.message as any).errorMessage;\n\t\t\t\t\t\t}\n\t\t\t\t\t\tbreak;\n\n\t\t\t\t\tcase \"agent_end\":\n\t\t\t\t\t\tthis._state.isStreaming = false;\n\t\t\t\t\t\tthis._state.streamMessage = null;\n\t\t\t\t\t\tbreak;\n\t\t\t\t}\n\n\t\t\t\t// Emit to listeners\n\t\t\t\tthis.emit(event);\n\t\t\t}\n\n\t\t\t// Handle any remaining partial message\n\t\t\tif (partial && partial.role === \"assistant\" && partial.content.length > 0) {\n\t\t\t\tconst onlyEmpty = !partial.content.some(\n\t\t\t\t\t(c) =>\n\t\t\t\t\t\t(c.type === \"thinking\" && c.thinking.trim().length > 0) ||\n\t\t\t\t\t\t(c.type === \"text\" && c.text.trim().length > 0) ||\n\t\t\t\t\t\t(c.type === \"toolCall\" && c.name.trim().length > 0),\n\t\t\t\t);\n\t\t\t\tif (!onlyEmpty) {\n\t\t\t\t\tthis.appendMessage(partial);\n\t\t\t\t} else {\n\t\t\t\t\tif (this.abortController?.signal.aborted) {\n\t\t\t\t\t\tthrow new Error(\"Request was aborted\");\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t} catch (err: any) {\n\t\t\tconst errorMsg: AgentMessage = {\n\t\t\t\trole: \"assistant\",\n\t\t\t\tcontent: [{ type: \"text\", text: \"\" }],\n\t\t\t\tapi: model.api,\n\t\t\t\tprovider: model.provider,\n\t\t\t\tmodel: model.id,\n\t\t\t\tusage: {\n\t\t\t\t\tinput: 0,\n\t\t\t\t\toutput: 0,\n\t\t\t\t\tcacheRead: 0,\n\t\t\t\t\tcacheWrite: 0,\n\t\t\t\t\ttotalTokens: 0,\n\t\t\t\t\tcost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0, total: 0 },\n\t\t\t\t},\n\t\t\t\tstopReason: this.abortController?.signal.aborted ? \"aborted\" : \"error\",\n\t\t\t\terrorMessage: err?.message || String(err),\n\t\t\t\ttimestamp: Date.now(),\n\t\t\t} as AgentMessage;\n\n\t\t\tthis.appendMessage(errorMsg);\n\t\t\tthis._state.error = err?.message || String(err);\n\t\t\tthis.emit({ type: \"agent_end\", messages: [errorMsg] });\n\t\t} finally {\n\t\t\tthis._state.isStreaming = false;\n\t\t\tthis._state.streamMessage = null;\n\t\t\tthis._state.pendingToolCalls = new Set<string>();\n\t\t\tthis.abortController = undefined;\n\t\t\tthis.resolveRunningPrompt?.();\n\t\t\tthis.runningPrompt = undefined;\n\t\t\tthis.resolveRunningPrompt = undefined;\n\t\t}\n\t}\n\n\tprivate emit(e: AgentEvent) {\n\t\tfor (const listener of this.listeners) {\n\t\t\tlistener(e);\n\t\t}\n\t}\n}\n"]}
1
+ {"version":3,"file":"agent.js","sourceRoot":"","sources":["../src/agent.ts"],"names":[],"mappings":"AAAA;;;GAGG;AAEH,OAAO,EACN,QAAQ,EAKR,YAAY,GAEZ,MAAM,qBAAqB,CAAC;AAC7B,OAAO,EAAE,SAAS,EAAE,iBAAiB,EAAE,MAAM,iBAAiB,CAAC;AAY/D;;GAEG;AACH,SAAS,mBAAmB,CAAC,QAAwB,EAAa;IACjE,OAAO,QAAQ,CAAC,MAAM,CAAC,CAAC,CAAC,EAAE,EAAE,CAAC,CAAC,CAAC,IAAI,KAAK,MAAM,IAAI,CAAC,CAAC,IAAI,KAAK,WAAW,IAAI,CAAC,CAAC,IAAI,KAAK,YAAY,CAAC,CAAC;AAAA,CACtG;AAuCD,MAAM,OAAO,KAAK;IACT,MAAM,GAAe;QAC5B,YAAY,EAAE,EAAE;QAChB,KAAK,EAAE,QAAQ,CAAC,QAAQ,EAAE,qCAAqC,CAAC;QAChE,aAAa,EAAE,KAAK;QACpB,KAAK,EAAE,EAAE;QACT,QAAQ,EAAE,EAAE;QACZ,WAAW,EAAE,KAAK;QAClB,aAAa,EAAE,IAAI;QACnB,gBAAgB,EAAE,IAAI,GAAG,EAAU;QACnC,KAAK,EAAE,SAAS;KAChB,CAAC;IAEM,SAAS,GAAG,IAAI,GAAG,EAA2B,CAAC;IAC/C,eAAe,CAAmB;IAClC,YAAY,CAA+D;IAC3E,gBAAgB,CAA+E;IAC/F,aAAa,GAAmB,EAAE,CAAC;IACnC,aAAa,GAAmB,EAAE,CAAC;IACnC,YAAY,CAA0B;IACtC,YAAY,CAA0B;IACvC,QAAQ,CAAW;IACnB,SAAS,CAA0E;IAClF,aAAa,CAAiB;IAC9B,oBAAoB,CAAc;IAE1C,YAAY,IAAI,GAAiB,EAAE,EAAE;QACpC,IAAI,CAAC,MAAM,GAAG,EAAE,GAAG,IAAI,CAAC,MAAM,EAAE,GAAG,IAAI,CAAC,YAAY,EAAE,CAAC;QACvD,IAAI,CAAC,YAAY,GAAG,IAAI,CAAC,YAAY,IAAI,mBAAmB,CAAC;QAC7D,IAAI,CAAC,gBAAgB,GAAG,IAAI,CAAC,gBAAgB,CAAC;QAC9C,IAAI,CAAC,YAAY,GAAG,IAAI,CAAC,YAAY,IAAI,eAAe,CAAC;QACzD,IAAI,CAAC,YAAY,GAAG,IAAI,CAAC,YAAY,IAAI,eAAe,CAAC;QACzD,IAAI,CAAC,QAAQ,GAAG,IAAI,CAAC,QAAQ,IAAI,YAAY,CAAC;QAC9C,IAAI,CAAC,SAAS,GAAG,IAAI,CAAC,SAAS,CAAC;IAAA,CAChC;IAED,IAAI,KAAK,GAAe;QACvB,OAAO,IAAI,CAAC,MAAM,CAAC;IAAA,CACnB;IAED,SAAS,CAAC,EAA2B,EAAc;QAClD,IAAI,CAAC,SAAS,CAAC,GAAG,CAAC,EAAE,CAAC,CAAC;QACvB,OAAO,GAAG,EAAE,CAAC,IAAI,CAAC,SAAS,CAAC,MAAM,CAAC,EAAE,CAAC,CAAC;IAAA,CACvC;IAED,iBAAiB;IACjB,eAAe,CAAC,CAAS,EAAE;QAC1B,IAAI,CAAC,MAAM,CAAC,YAAY,GAAG,CAAC,CAAC;IAAA,CAC7B;IAED,QAAQ,CAAC,CAAa,EAAE;QACvB,IAAI,CAAC,MAAM,CAAC,KAAK,GAAG,CAAC,CAAC;IAAA,CACtB;IAED,gBAAgB,CAAC,CAAgB,EAAE;QAClC,IAAI,CAAC,MAAM,CAAC,aAAa,GAAG,CAAC,CAAC;IAAA,CAC9B;IAED,eAAe,CAAC,IAA6B,EAAE;QAC9C,IAAI,CAAC,YAAY,GAAG,IAAI,CAAC;IAAA,CACzB;IAED,eAAe,GAA4B;QAC1C,OAAO,IAAI,CAAC,YAAY,CAAC;IAAA,CACzB;IAED,eAAe,CAAC,IAA6B,EAAE;QAC9C,IAAI,CAAC,YAAY,GAAG,IAAI,CAAC;IAAA,CACzB;IAED,eAAe,GAA4B;QAC1C,OAAO,IAAI,CAAC,YAAY,CAAC;IAAA,CACzB;IAED,QAAQ,CAAC,CAAmB,EAAE;QAC7B,IAAI,CAAC,MAAM,CAAC,KAAK,GAAG,CAAC,CAAC;IAAA,CACtB;IAED,eAAe,CAAC,EAAkB,EAAE;QACnC,IAAI,CAAC,MAAM,CAAC,QAAQ,GAAG,EAAE,CAAC,KAAK,EAAE,CAAC;IAAA,CAClC;IAED,aAAa,CAAC,CAAe,EAAE;QAC9B,IAAI,CAAC,MAAM,CAAC,QAAQ,GAAG,CAAC,GAAG,IAAI,CAAC,MAAM,CAAC,QAAQ,EAAE,CAAC,CAAC,CAAC;IAAA,CACpD;IAED;;;OAGG;IACH,KAAK,CAAC,CAAe,EAAE;QACtB,IAAI,CAAC,aAAa,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC;IAAA,CAC3B;IAED;;;OAGG;IACH,QAAQ,CAAC,CAAe,EAAE;QACzB,IAAI,CAAC,aAAa,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC;IAAA,CAC3B;IAED,kBAAkB,GAAG;QACpB,IAAI,CAAC,aAAa,GAAG,EAAE,CAAC;IAAA,CACxB;IAED,kBAAkB,GAAG;QACpB,IAAI,CAAC,aAAa,GAAG,EAAE,CAAC;IAAA,CACxB;IAED,cAAc,GAAG;QAChB,IAAI,CAAC,aAAa,GAAG,EAAE,CAAC;QACxB,IAAI,CAAC,aAAa,GAAG,EAAE,CAAC;IAAA,CACxB;IAED,aAAa,GAAG;QACf,IAAI,CAAC,MAAM,CAAC,QAAQ,GAAG,EAAE,CAAC;IAAA,CAC1B;IAED,KAAK,GAAG;QACP,IAAI,CAAC,eAAe,EAAE,KAAK,EAAE,CAAC;IAAA,CAC9B;IAED,WAAW,GAAkB;QAC5B,OAAO,IAAI,CAAC,aAAa,IAAI,OAAO,CAAC,OAAO,EAAE,CAAC;IAAA,CAC/C;IAED,KAAK,GAAG;QACP,IAAI,CAAC,MAAM,CAAC,QAAQ,GAAG,EAAE,CAAC;QAC1B,IAAI,CAAC,MAAM,CAAC,WAAW,GAAG,KAAK,CAAC;QAChC,IAAI,CAAC,MAAM,CAAC,aAAa,GAAG,IAAI,CAAC;QACjC,IAAI,CAAC,MAAM,CAAC,gBAAgB,GAAG,IAAI,GAAG,EAAU,CAAC;QACjD,IAAI,CAAC,MAAM,CAAC,KAAK,GAAG,SAAS,CAAC;QAC9B,IAAI,CAAC,aAAa,GAAG,EAAE,CAAC;QACxB,IAAI,CAAC,aAAa,GAAG,EAAE,CAAC;IAAA,CACxB;IAKD,KAAK,CAAC,MAAM,CAAC,KAA6C,EAAE,MAAuB,EAAE;QACpF,IAAI,IAAI,CAAC,MAAM,CAAC,WAAW,EAAE,CAAC;YAC7B,MAAM,IAAI,KAAK,CACd,4GAA4G,CAC5G,CAAC;QACH,CAAC;QAED,MAAM,KAAK,GAAG,IAAI,CAAC,MAAM,CAAC,KAAK,CAAC;QAChC,IAAI,CAAC,KAAK;YAAE,MAAM,IAAI,KAAK,CAAC,qBAAqB,CAAC,CAAC;QAEnD,IAAI,IAAoB,CAAC;QAEzB,IAAI,KAAK,CAAC,OAAO,CAAC,KAAK,CAAC,EAAE,CAAC;YAC1B,IAAI,GAAG,KAAK,CAAC;QACd,CAAC;aAAM,IAAI,OAAO,KAAK,KAAK,QAAQ,EAAE,CAAC;YACtC,MAAM,OAAO,GAAsC,CAAC,EAAE,IAAI,EAAE,MAAM,EAAE,IAAI,EAAE,KAAK,EAAE,CAAC,CAAC;YACnF,IAAI,MAAM,IAAI,MAAM,CAAC,MAAM,GAAG,CAAC,EAAE,CAAC;gBACjC,OAAO,CAAC,IAAI,CAAC,GAAG,MAAM,CAAC,CAAC;YACzB,CAAC;YACD,IAAI,GAAG;gBACN;oBACC,IAAI,EAAE,MAAM;oBACZ,OAAO;oBACP,SAAS,EAAE,IAAI,CAAC,GAAG,EAAE;iBACrB;aACD,CAAC;QACH,CAAC;aAAM,CAAC;YACP,IAAI,GAAG,CAAC,KAAK,CAAC,CAAC;QAChB,CAAC;QAED,MAAM,IAAI,CAAC,QAAQ,CAAC,IAAI,CAAC,CAAC;IAAA,CAC1B;IAED,+DAA+D;IAC/D,KAAK,CAAC,QAAQ,GAAG;QAChB,IAAI,IAAI,CAAC,MAAM,CAAC,WAAW,EAAE,CAAC;YAC7B,MAAM,IAAI,KAAK,CAAC,qEAAqE,CAAC,CAAC;QACxF,CAAC;QAED,MAAM,QAAQ,GAAG,IAAI,CAAC,MAAM,CAAC,QAAQ,CAAC;QACtC,IAAI,QAAQ,CAAC,MAAM,KAAK,CAAC,EAAE,CAAC;YAC3B,MAAM,IAAI,KAAK,CAAC,8BAA8B,CAAC,CAAC;QACjD,CAAC;QACD,IAAI,QAAQ,CAAC,QAAQ,CAAC,MAAM,GAAG,CAAC,CAAC,CAAC,IAAI,KAAK,WAAW,EAAE,CAAC;YACxD,MAAM,IAAI,KAAK,CAAC,8CAA8C,CAAC,CAAC;QACjE,CAAC;QAED,MAAM,IAAI,CAAC,QAAQ,CAAC,SAAS,CAAC,CAAC;IAAA,CAC/B;IAED;;;;OAIG;IACK,KAAK,CAAC,QAAQ,CAAC,QAAyB,EAAE;QACjD,MAAM,KAAK,GAAG,IAAI,CAAC,MAAM,CAAC,KAAK,CAAC;QAChC,IAAI,CAAC,KAAK;YAAE,MAAM,IAAI,KAAK,CAAC,qBAAqB,CAAC,CAAC;QAEnD,IAAI,CAAC,aAAa,GAAG,IAAI,OAAO,CAAO,CAAC,OAAO,EAAE,EAAE,CAAC;YACnD,IAAI,CAAC,oBAAoB,GAAG,OAAO,CAAC;QAAA,CACpC,CAAC,CAAC;QAEH,IAAI,CAAC,eAAe,GAAG,IAAI,eAAe,EAAE,CAAC;QAC7C,IAAI,CAAC,MAAM,CAAC,WAAW,GAAG,IAAI,CAAC;QAC/B,IAAI,CAAC,MAAM,CAAC,aAAa,GAAG,IAAI,CAAC;QACjC,IAAI,CAAC,MAAM,CAAC,KAAK,GAAG,SAAS,CAAC;QAE9B,MAAM,SAAS,GACd,IAAI,CAAC,MAAM,CAAC,aAAa,KAAK,KAAK;YAClC,CAAC,CAAC,SAAS;YACX,CAAC,CAAC,IAAI,CAAC,MAAM,CAAC,aAAa,KAAK,SAAS;gBACxC,CAAC,CAAC,KAAK;gBACP,CAAC,CAAE,IAAI,CAAC,MAAM,CAAC,aAAiC,CAAC;QAEpD,MAAM,OAAO,GAAiB;YAC7B,YAAY,EAAE,IAAI,CAAC,MAAM,CAAC,YAAY;YACtC,QAAQ,EAAE,IAAI,CAAC,MAAM,CAAC,QAAQ,CAAC,KAAK,EAAE;YACtC,KAAK,EAAE,IAAI,CAAC,MAAM,CAAC,KAAK;SACxB,CAAC;QAEF,MAAM,MAAM,GAAoB;YAC/B,KAAK;YACL,SAAS;YACT,YAAY,EAAE,IAAI,CAAC,YAAY;YAC/B,gBAAgB,EAAE,IAAI,CAAC,gBAAgB;YACvC,SAAS,EAAE,IAAI,CAAC,SAAS;YACzB,mBAAmB,EAAE,KAAK,IAAI,EAAE,CAAC;gBAChC,IAAI,IAAI,CAAC,YAAY,KAAK,eAAe,EAAE,CAAC;oBAC3C,IAAI,IAAI,CAAC,aAAa,CAAC,MAAM,GAAG,CAAC,EAAE,CAAC;wBACnC,MAAM,KAAK,GAAG,IAAI,CAAC,aAAa,CAAC,CAAC,CAAC,CAAC;wBACpC,IAAI,CAAC,aAAa,GAAG,IAAI,CAAC,aAAa,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC;wBACjD,OAAO,CAAC,KAAK,CAAC,CAAC;oBAChB,CAAC;oBACD,OAAO,EAAE,CAAC;gBACX,CAAC;qBAAM,CAAC;oBACP,MAAM,QAAQ,GAAG,IAAI,CAAC,aAAa,CAAC,KAAK,EAAE,CAAC;oBAC5C,IAAI,CAAC,aAAa,GAAG,EAAE,CAAC;oBACxB,OAAO,QAAQ,CAAC;gBACjB,CAAC;YAAA,CACD;YACD,mBAAmB,EAAE,KAAK,IAAI,EAAE,CAAC;gBAChC,IAAI,IAAI,CAAC,YAAY,KAAK,eAAe,EAAE,CAAC;oBAC3C,IAAI,IAAI,CAAC,aAAa,CAAC,MAAM,GAAG,CAAC,EAAE,CAAC;wBACnC,MAAM,KAAK,GAAG,IAAI,CAAC,aAAa,CAAC,CAAC,CAAC,CAAC;wBACpC,IAAI,CAAC,aAAa,GAAG,IAAI,CAAC,aAAa,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC;wBACjD,OAAO,CAAC,KAAK,CAAC,CAAC;oBAChB,CAAC;oBACD,OAAO,EAAE,CAAC;gBACX,CAAC;qBAAM,CAAC;oBACP,MAAM,QAAQ,GAAG,IAAI,CAAC,aAAa,CAAC,KAAK,EAAE,CAAC;oBAC5C,IAAI,CAAC,aAAa,GAAG,EAAE,CAAC;oBACxB,OAAO,QAAQ,CAAC;gBACjB,CAAC;YAAA,CACD;SACD,CAAC;QAEF,IAAI,OAAO,GAAwB,IAAI,CAAC;QAExC,IAAI,CAAC;YACJ,MAAM,MAAM,GAAG,QAAQ;gBACtB,CAAC,CAAC,SAAS,CAAC,QAAQ,EAAE,OAAO,EAAE,MAAM,EAAE,IAAI,CAAC,eAAe,CAAC,MAAM,EAAE,IAAI,CAAC,QAAQ,CAAC;gBAClF,CAAC,CAAC,iBAAiB,CAAC,OAAO,EAAE,MAAM,EAAE,IAAI,CAAC,eAAe,CAAC,MAAM,EAAE,IAAI,CAAC,QAAQ,CAAC,CAAC;YAElF,IAAI,KAAK,EAAE,MAAM,KAAK,IAAI,MAAM,EAAE,CAAC;gBAClC,wCAAwC;gBACxC,QAAQ,KAAK,CAAC,IAAI,EAAE,CAAC;oBACpB,KAAK,eAAe;wBACnB,OAAO,GAAG,KAAK,CAAC,OAAO,CAAC;wBACxB,IAAI,CAAC,MAAM,CAAC,aAAa,GAAG,KAAK,CAAC,OAAO,CAAC;wBAC1C,MAAM;oBAEP,KAAK,gBAAgB;wBACpB,OAAO,GAAG,KAAK,CAAC,OAAO,CAAC;wBACxB,IAAI,CAAC,MAAM,CAAC,aAAa,GAAG,KAAK,CAAC,OAAO,CAAC;wBAC1C,MAAM;oBAEP,KAAK,aAAa;wBACjB,OAAO,GAAG,IAAI,CAAC;wBACf,IAAI,CAAC,MAAM,CAAC,aAAa,GAAG,IAAI,CAAC;wBACjC,IAAI,CAAC,aAAa,CAAC,KAAK,CAAC,OAAO,CAAC,CAAC;wBAClC,MAAM;oBAEP,KAAK,sBAAsB,EAAE,CAAC;wBAC7B,MAAM,CAAC,GAAG,IAAI,GAAG,CAAC,IAAI,CAAC,MAAM,CAAC,gBAAgB,CAAC,CAAC;wBAChD,CAAC,CAAC,GAAG,CAAC,KAAK,CAAC,UAAU,CAAC,CAAC;wBACxB,IAAI,CAAC,MAAM,CAAC,gBAAgB,GAAG,CAAC,CAAC;wBACjC,MAAM;oBACP,CAAC;oBAED,KAAK,oBAAoB,EAAE,CAAC;wBAC3B,MAAM,CAAC,GAAG,IAAI,GAAG,CAAC,IAAI,CAAC,MAAM,CAAC,gBAAgB,CAAC,CAAC;wBAChD,CAAC,CAAC,MAAM,CAAC,KAAK,CAAC,UAAU,CAAC,CAAC;wBAC3B,IAAI,CAAC,MAAM,CAAC,gBAAgB,GAAG,CAAC,CAAC;wBACjC,MAAM;oBACP,CAAC;oBAED,KAAK,UAAU;wBACd,IAAI,KAAK,CAAC,OAAO,CAAC,IAAI,KAAK,WAAW,IAAK,KAAK,CAAC,OAAe,CAAC,YAAY,EAAE,CAAC;4BAC/E,IAAI,CAAC,MAAM,CAAC,KAAK,GAAI,KAAK,CAAC,OAAe,CAAC,YAAY,CAAC;wBACzD,CAAC;wBACD,MAAM;oBAEP,KAAK,WAAW;wBACf,IAAI,CAAC,MAAM,CAAC,WAAW,GAAG,KAAK,CAAC;wBAChC,IAAI,CAAC,MAAM,CAAC,aAAa,GAAG,IAAI,CAAC;wBACjC,MAAM;gBACR,CAAC;gBAED,oBAAoB;gBACpB,IAAI,CAAC,IAAI,CAAC,KAAK,CAAC,CAAC;YAClB,CAAC;YAED,uCAAuC;YACvC,IAAI,OAAO,IAAI,OAAO,CAAC,IAAI,KAAK,WAAW,IAAI,OAAO,CAAC,OAAO,CAAC,MAAM,GAAG,CAAC,EAAE,CAAC;gBAC3E,MAAM,SAAS,GAAG,CAAC,OAAO,CAAC,OAAO,CAAC,IAAI,CACtC,CAAC,CAAC,EAAE,EAAE,CACL,CAAC,CAAC,CAAC,IAAI,KAAK,UAAU,IAAI,CAAC,CAAC,QAAQ,CAAC,IAAI,EAAE,CAAC,MAAM,GAAG,CAAC,CAAC;oBACvD,CAAC,CAAC,CAAC,IAAI,KAAK,MAAM,IAAI,CAAC,CAAC,IAAI,CAAC,IAAI,EAAE,CAAC,MAAM,GAAG,CAAC,CAAC;oBAC/C,CAAC,CAAC,CAAC,IAAI,KAAK,UAAU,IAAI,CAAC,CAAC,IAAI,CAAC,IAAI,EAAE,CAAC,MAAM,GAAG,CAAC,CAAC,CACpD,CAAC;gBACF,IAAI,CAAC,SAAS,EAAE,CAAC;oBAChB,IAAI,CAAC,aAAa,CAAC,OAAO,CAAC,CAAC;gBAC7B,CAAC;qBAAM,CAAC;oBACP,IAAI,IAAI,CAAC,eAAe,EAAE,MAAM,CAAC,OAAO,EAAE,CAAC;wBAC1C,MAAM,IAAI,KAAK,CAAC,qBAAqB,CAAC,CAAC;oBACxC,CAAC;gBACF,CAAC;YACF,CAAC;QACF,CAAC;QAAC,OAAO,GAAQ,EAAE,CAAC;YACnB,MAAM,QAAQ,GAAiB;gBAC9B,IAAI,EAAE,WAAW;gBACjB,OAAO,EAAE,CAAC,EAAE,IAAI,EAAE,MAAM,EAAE,IAAI,EAAE,EAAE,EAAE,CAAC;gBACrC,GAAG,EAAE,KAAK,CAAC,GAAG;gBACd,QAAQ,EAAE,KAAK,CAAC,QAAQ;gBACxB,KAAK,EAAE,KAAK,CAAC,EAAE;gBACf,KAAK,EAAE;oBACN,KAAK,EAAE,CAAC;oBACR,MAAM,EAAE,CAAC;oBACT,SAAS,EAAE,CAAC;oBACZ,UAAU,EAAE,CAAC;oBACb,WAAW,EAAE,CAAC;oBACd,IAAI,EAAE,EAAE,KAAK,EAAE,CAAC,EAAE,MAAM,EAAE,CAAC,EAAE,SAAS,EAAE,CAAC,EAAE,UAAU,EAAE,CAAC,EAAE,KAAK,EAAE,CAAC,EAAE;iBACpE;gBACD,UAAU,EAAE,IAAI,CAAC,eAAe,EAAE,MAAM,CAAC,OAAO,CAAC,CAAC,CAAC,SAAS,CAAC,CAAC,CAAC,OAAO;gBACtE,YAAY,EAAE,GAAG,EAAE,OAAO,IAAI,MAAM,CAAC,GAAG,CAAC;gBACzC,SAAS,EAAE,IAAI,CAAC,GAAG,EAAE;aACL,CAAC;YAElB,IAAI,CAAC,aAAa,CAAC,QAAQ,CAAC,CAAC;YAC7B,IAAI,CAAC,MAAM,CAAC,KAAK,GAAG,GAAG,EAAE,OAAO,IAAI,MAAM,CAAC,GAAG,CAAC,CAAC;YAChD,IAAI,CAAC,IAAI,CAAC,EAAE,IAAI,EAAE,WAAW,EAAE,QAAQ,EAAE,CAAC,QAAQ,CAAC,EAAE,CAAC,CAAC;QACxD,CAAC;gBAAS,CAAC;YACV,IAAI,CAAC,MAAM,CAAC,WAAW,GAAG,KAAK,CAAC;YAChC,IAAI,CAAC,MAAM,CAAC,aAAa,GAAG,IAAI,CAAC;YACjC,IAAI,CAAC,MAAM,CAAC,gBAAgB,GAAG,IAAI,GAAG,EAAU,CAAC;YACjD,IAAI,CAAC,eAAe,GAAG,SAAS,CAAC;YACjC,IAAI,CAAC,oBAAoB,EAAE,EAAE,CAAC;YAC9B,IAAI,CAAC,aAAa,GAAG,SAAS,CAAC;YAC/B,IAAI,CAAC,oBAAoB,GAAG,SAAS,CAAC;QACvC,CAAC;IAAA,CACD;IAEO,IAAI,CAAC,CAAa,EAAE;QAC3B,KAAK,MAAM,QAAQ,IAAI,IAAI,CAAC,SAAS,EAAE,CAAC;YACvC,QAAQ,CAAC,CAAC,CAAC,CAAC;QACb,CAAC;IAAA,CACD;CACD","sourcesContent":["/**\n * Agent class that uses the agent-loop directly.\n * No transport abstraction - calls streamSimple via the loop.\n */\n\nimport {\n\tgetModel,\n\ttype ImageContent,\n\ttype Message,\n\ttype Model,\n\ttype ReasoningEffort,\n\tstreamSimple,\n\ttype TextContent,\n} from \"@mariozechner/pi-ai\";\nimport { agentLoop, agentLoopContinue } from \"./agent-loop.js\";\nimport type {\n\tAgentContext,\n\tAgentEvent,\n\tAgentLoopConfig,\n\tAgentMessage,\n\tAgentState,\n\tAgentTool,\n\tStreamFn,\n\tThinkingLevel,\n} from \"./types.js\";\n\n/**\n * Default convertToLlm: Keep only LLM-compatible messages, convert attachments.\n */\nfunction defaultConvertToLlm(messages: AgentMessage[]): Message[] {\n\treturn messages.filter((m) => m.role === \"user\" || m.role === \"assistant\" || m.role === \"toolResult\");\n}\n\nexport interface AgentOptions {\n\tinitialState?: Partial<AgentState>;\n\n\t/**\n\t * Converts AgentMessage[] to LLM-compatible Message[] before each LLM call.\n\t * Default filters to user/assistant/toolResult and converts attachments.\n\t */\n\tconvertToLlm?: (messages: AgentMessage[]) => Message[] | Promise<Message[]>;\n\n\t/**\n\t * Optional transform applied to context before convertToLlm.\n\t * Use for context pruning, injecting external context, etc.\n\t */\n\ttransformContext?: (messages: AgentMessage[], signal?: AbortSignal) => Promise<AgentMessage[]>;\n\n\t/**\n\t * Steering mode: \"all\" = send all steering messages at once, \"one-at-a-time\" = one per turn\n\t */\n\tsteeringMode?: \"all\" | \"one-at-a-time\";\n\n\t/**\n\t * Follow-up mode: \"all\" = send all follow-up messages at once, \"one-at-a-time\" = one per turn\n\t */\n\tfollowUpMode?: \"all\" | \"one-at-a-time\";\n\n\t/**\n\t * Custom stream function (for proxy backends, etc.). Default uses streamSimple.\n\t */\n\tstreamFn?: StreamFn;\n\n\t/**\n\t * Resolves an API key dynamically for each LLM call.\n\t * Useful for expiring tokens (e.g., GitHub Copilot OAuth).\n\t */\n\tgetApiKey?: (provider: string) => Promise<string | undefined> | string | undefined;\n}\n\nexport class Agent {\n\tprivate _state: AgentState = {\n\t\tsystemPrompt: \"\",\n\t\tmodel: getModel(\"google\", \"gemini-2.5-flash-lite-preview-06-17\"),\n\t\tthinkingLevel: \"off\",\n\t\ttools: [],\n\t\tmessages: [],\n\t\tisStreaming: false,\n\t\tstreamMessage: null,\n\t\tpendingToolCalls: new Set<string>(),\n\t\terror: undefined,\n\t};\n\n\tprivate listeners = new Set<(e: AgentEvent) => void>();\n\tprivate abortController?: AbortController;\n\tprivate convertToLlm: (messages: AgentMessage[]) => Message[] | Promise<Message[]>;\n\tprivate transformContext?: (messages: AgentMessage[], signal?: AbortSignal) => Promise<AgentMessage[]>;\n\tprivate steeringQueue: AgentMessage[] = [];\n\tprivate followUpQueue: AgentMessage[] = [];\n\tprivate steeringMode: \"all\" | \"one-at-a-time\";\n\tprivate followUpMode: \"all\" | \"one-at-a-time\";\n\tpublic streamFn: StreamFn;\n\tpublic getApiKey?: (provider: string) => Promise<string | undefined> | string | undefined;\n\tprivate runningPrompt?: Promise<void>;\n\tprivate resolveRunningPrompt?: () => void;\n\n\tconstructor(opts: AgentOptions = {}) {\n\t\tthis._state = { ...this._state, ...opts.initialState };\n\t\tthis.convertToLlm = opts.convertToLlm || defaultConvertToLlm;\n\t\tthis.transformContext = opts.transformContext;\n\t\tthis.steeringMode = opts.steeringMode || \"one-at-a-time\";\n\t\tthis.followUpMode = opts.followUpMode || \"one-at-a-time\";\n\t\tthis.streamFn = opts.streamFn || streamSimple;\n\t\tthis.getApiKey = opts.getApiKey;\n\t}\n\n\tget state(): AgentState {\n\t\treturn this._state;\n\t}\n\n\tsubscribe(fn: (e: AgentEvent) => void): () => void {\n\t\tthis.listeners.add(fn);\n\t\treturn () => this.listeners.delete(fn);\n\t}\n\n\t// State mutators\n\tsetSystemPrompt(v: string) {\n\t\tthis._state.systemPrompt = v;\n\t}\n\n\tsetModel(m: Model<any>) {\n\t\tthis._state.model = m;\n\t}\n\n\tsetThinkingLevel(l: ThinkingLevel) {\n\t\tthis._state.thinkingLevel = l;\n\t}\n\n\tsetSteeringMode(mode: \"all\" | \"one-at-a-time\") {\n\t\tthis.steeringMode = mode;\n\t}\n\n\tgetSteeringMode(): \"all\" | \"one-at-a-time\" {\n\t\treturn this.steeringMode;\n\t}\n\n\tsetFollowUpMode(mode: \"all\" | \"one-at-a-time\") {\n\t\tthis.followUpMode = mode;\n\t}\n\n\tgetFollowUpMode(): \"all\" | \"one-at-a-time\" {\n\t\treturn this.followUpMode;\n\t}\n\n\tsetTools(t: AgentTool<any>[]) {\n\t\tthis._state.tools = t;\n\t}\n\n\treplaceMessages(ms: AgentMessage[]) {\n\t\tthis._state.messages = ms.slice();\n\t}\n\n\tappendMessage(m: AgentMessage) {\n\t\tthis._state.messages = [...this._state.messages, m];\n\t}\n\n\t/**\n\t * Queue a steering message to interrupt the agent mid-run.\n\t * Delivered after current tool execution, skips remaining tools.\n\t */\n\tsteer(m: AgentMessage) {\n\t\tthis.steeringQueue.push(m);\n\t}\n\n\t/**\n\t * Queue a follow-up message to be processed after the agent finishes.\n\t * Delivered only when agent has no more tool calls or steering messages.\n\t */\n\tfollowUp(m: AgentMessage) {\n\t\tthis.followUpQueue.push(m);\n\t}\n\n\tclearSteeringQueue() {\n\t\tthis.steeringQueue = [];\n\t}\n\n\tclearFollowUpQueue() {\n\t\tthis.followUpQueue = [];\n\t}\n\n\tclearAllQueues() {\n\t\tthis.steeringQueue = [];\n\t\tthis.followUpQueue = [];\n\t}\n\n\tclearMessages() {\n\t\tthis._state.messages = [];\n\t}\n\n\tabort() {\n\t\tthis.abortController?.abort();\n\t}\n\n\twaitForIdle(): Promise<void> {\n\t\treturn this.runningPrompt ?? Promise.resolve();\n\t}\n\n\treset() {\n\t\tthis._state.messages = [];\n\t\tthis._state.isStreaming = false;\n\t\tthis._state.streamMessage = null;\n\t\tthis._state.pendingToolCalls = new Set<string>();\n\t\tthis._state.error = undefined;\n\t\tthis.steeringQueue = [];\n\t\tthis.followUpQueue = [];\n\t}\n\n\t/** Send a prompt with an AgentMessage */\n\tasync prompt(message: AgentMessage | AgentMessage[]): Promise<void>;\n\tasync prompt(input: string, images?: ImageContent[]): Promise<void>;\n\tasync prompt(input: string | AgentMessage | AgentMessage[], images?: ImageContent[]) {\n\t\tif (this._state.isStreaming) {\n\t\t\tthrow new Error(\n\t\t\t\t\"Agent is already processing a prompt. Use steer() or followUp() to queue messages, or wait for completion.\",\n\t\t\t);\n\t\t}\n\n\t\tconst model = this._state.model;\n\t\tif (!model) throw new Error(\"No model configured\");\n\n\t\tlet msgs: AgentMessage[];\n\n\t\tif (Array.isArray(input)) {\n\t\t\tmsgs = input;\n\t\t} else if (typeof input === \"string\") {\n\t\t\tconst content: Array<TextContent | ImageContent> = [{ type: \"text\", text: input }];\n\t\t\tif (images && images.length > 0) {\n\t\t\t\tcontent.push(...images);\n\t\t\t}\n\t\t\tmsgs = [\n\t\t\t\t{\n\t\t\t\t\trole: \"user\",\n\t\t\t\t\tcontent,\n\t\t\t\t\ttimestamp: Date.now(),\n\t\t\t\t},\n\t\t\t];\n\t\t} else {\n\t\t\tmsgs = [input];\n\t\t}\n\n\t\tawait this._runLoop(msgs);\n\t}\n\n\t/** Continue from current context (for retry after overflow) */\n\tasync continue() {\n\t\tif (this._state.isStreaming) {\n\t\t\tthrow new Error(\"Agent is already processing. Wait for completion before continuing.\");\n\t\t}\n\n\t\tconst messages = this._state.messages;\n\t\tif (messages.length === 0) {\n\t\t\tthrow new Error(\"No messages to continue from\");\n\t\t}\n\t\tif (messages[messages.length - 1].role === \"assistant\") {\n\t\t\tthrow new Error(\"Cannot continue from message role: assistant\");\n\t\t}\n\n\t\tawait this._runLoop(undefined);\n\t}\n\n\t/**\n\t * Run the agent loop.\n\t * If messages are provided, starts a new conversation turn with those messages.\n\t * Otherwise, continues from existing context.\n\t */\n\tprivate async _runLoop(messages?: AgentMessage[]) {\n\t\tconst model = this._state.model;\n\t\tif (!model) throw new Error(\"No model configured\");\n\n\t\tthis.runningPrompt = new Promise<void>((resolve) => {\n\t\t\tthis.resolveRunningPrompt = resolve;\n\t\t});\n\n\t\tthis.abortController = new AbortController();\n\t\tthis._state.isStreaming = true;\n\t\tthis._state.streamMessage = null;\n\t\tthis._state.error = undefined;\n\n\t\tconst reasoning: ReasoningEffort | undefined =\n\t\t\tthis._state.thinkingLevel === \"off\"\n\t\t\t\t? undefined\n\t\t\t\t: this._state.thinkingLevel === \"minimal\"\n\t\t\t\t\t? \"low\"\n\t\t\t\t\t: (this._state.thinkingLevel as ReasoningEffort);\n\n\t\tconst context: AgentContext = {\n\t\t\tsystemPrompt: this._state.systemPrompt,\n\t\t\tmessages: this._state.messages.slice(),\n\t\t\ttools: this._state.tools,\n\t\t};\n\n\t\tconst config: AgentLoopConfig = {\n\t\t\tmodel,\n\t\t\treasoning,\n\t\t\tconvertToLlm: this.convertToLlm,\n\t\t\ttransformContext: this.transformContext,\n\t\t\tgetApiKey: this.getApiKey,\n\t\t\tgetSteeringMessages: async () => {\n\t\t\t\tif (this.steeringMode === \"one-at-a-time\") {\n\t\t\t\t\tif (this.steeringQueue.length > 0) {\n\t\t\t\t\t\tconst first = this.steeringQueue[0];\n\t\t\t\t\t\tthis.steeringQueue = this.steeringQueue.slice(1);\n\t\t\t\t\t\treturn [first];\n\t\t\t\t\t}\n\t\t\t\t\treturn [];\n\t\t\t\t} else {\n\t\t\t\t\tconst steering = this.steeringQueue.slice();\n\t\t\t\t\tthis.steeringQueue = [];\n\t\t\t\t\treturn steering;\n\t\t\t\t}\n\t\t\t},\n\t\t\tgetFollowUpMessages: async () => {\n\t\t\t\tif (this.followUpMode === \"one-at-a-time\") {\n\t\t\t\t\tif (this.followUpQueue.length > 0) {\n\t\t\t\t\t\tconst first = this.followUpQueue[0];\n\t\t\t\t\t\tthis.followUpQueue = this.followUpQueue.slice(1);\n\t\t\t\t\t\treturn [first];\n\t\t\t\t\t}\n\t\t\t\t\treturn [];\n\t\t\t\t} else {\n\t\t\t\t\tconst followUp = this.followUpQueue.slice();\n\t\t\t\t\tthis.followUpQueue = [];\n\t\t\t\t\treturn followUp;\n\t\t\t\t}\n\t\t\t},\n\t\t};\n\n\t\tlet partial: AgentMessage | null = null;\n\n\t\ttry {\n\t\t\tconst stream = messages\n\t\t\t\t? agentLoop(messages, context, config, this.abortController.signal, this.streamFn)\n\t\t\t\t: agentLoopContinue(context, config, this.abortController.signal, this.streamFn);\n\n\t\t\tfor await (const event of stream) {\n\t\t\t\t// Update internal state based on events\n\t\t\t\tswitch (event.type) {\n\t\t\t\t\tcase \"message_start\":\n\t\t\t\t\t\tpartial = event.message;\n\t\t\t\t\t\tthis._state.streamMessage = event.message;\n\t\t\t\t\t\tbreak;\n\n\t\t\t\t\tcase \"message_update\":\n\t\t\t\t\t\tpartial = event.message;\n\t\t\t\t\t\tthis._state.streamMessage = event.message;\n\t\t\t\t\t\tbreak;\n\n\t\t\t\t\tcase \"message_end\":\n\t\t\t\t\t\tpartial = null;\n\t\t\t\t\t\tthis._state.streamMessage = null;\n\t\t\t\t\t\tthis.appendMessage(event.message);\n\t\t\t\t\t\tbreak;\n\n\t\t\t\t\tcase \"tool_execution_start\": {\n\t\t\t\t\t\tconst s = new Set(this._state.pendingToolCalls);\n\t\t\t\t\t\ts.add(event.toolCallId);\n\t\t\t\t\t\tthis._state.pendingToolCalls = s;\n\t\t\t\t\t\tbreak;\n\t\t\t\t\t}\n\n\t\t\t\t\tcase \"tool_execution_end\": {\n\t\t\t\t\t\tconst s = new Set(this._state.pendingToolCalls);\n\t\t\t\t\t\ts.delete(event.toolCallId);\n\t\t\t\t\t\tthis._state.pendingToolCalls = s;\n\t\t\t\t\t\tbreak;\n\t\t\t\t\t}\n\n\t\t\t\t\tcase \"turn_end\":\n\t\t\t\t\t\tif (event.message.role === \"assistant\" && (event.message as any).errorMessage) {\n\t\t\t\t\t\t\tthis._state.error = (event.message as any).errorMessage;\n\t\t\t\t\t\t}\n\t\t\t\t\t\tbreak;\n\n\t\t\t\t\tcase \"agent_end\":\n\t\t\t\t\t\tthis._state.isStreaming = false;\n\t\t\t\t\t\tthis._state.streamMessage = null;\n\t\t\t\t\t\tbreak;\n\t\t\t\t}\n\n\t\t\t\t// Emit to listeners\n\t\t\t\tthis.emit(event);\n\t\t\t}\n\n\t\t\t// Handle any remaining partial message\n\t\t\tif (partial && partial.role === \"assistant\" && partial.content.length > 0) {\n\t\t\t\tconst onlyEmpty = !partial.content.some(\n\t\t\t\t\t(c) =>\n\t\t\t\t\t\t(c.type === \"thinking\" && c.thinking.trim().length > 0) ||\n\t\t\t\t\t\t(c.type === \"text\" && c.text.trim().length > 0) ||\n\t\t\t\t\t\t(c.type === \"toolCall\" && c.name.trim().length > 0),\n\t\t\t\t);\n\t\t\t\tif (!onlyEmpty) {\n\t\t\t\t\tthis.appendMessage(partial);\n\t\t\t\t} else {\n\t\t\t\t\tif (this.abortController?.signal.aborted) {\n\t\t\t\t\t\tthrow new Error(\"Request was aborted\");\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t} catch (err: any) {\n\t\t\tconst errorMsg: AgentMessage = {\n\t\t\t\trole: \"assistant\",\n\t\t\t\tcontent: [{ type: \"text\", text: \"\" }],\n\t\t\t\tapi: model.api,\n\t\t\t\tprovider: model.provider,\n\t\t\t\tmodel: model.id,\n\t\t\t\tusage: {\n\t\t\t\t\tinput: 0,\n\t\t\t\t\toutput: 0,\n\t\t\t\t\tcacheRead: 0,\n\t\t\t\t\tcacheWrite: 0,\n\t\t\t\t\ttotalTokens: 0,\n\t\t\t\t\tcost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0, total: 0 },\n\t\t\t\t},\n\t\t\t\tstopReason: this.abortController?.signal.aborted ? \"aborted\" : \"error\",\n\t\t\t\terrorMessage: err?.message || String(err),\n\t\t\t\ttimestamp: Date.now(),\n\t\t\t} as AgentMessage;\n\n\t\t\tthis.appendMessage(errorMsg);\n\t\t\tthis._state.error = err?.message || String(err);\n\t\t\tthis.emit({ type: \"agent_end\", messages: [errorMsg] });\n\t\t} finally {\n\t\t\tthis._state.isStreaming = false;\n\t\t\tthis._state.streamMessage = null;\n\t\t\tthis._state.pendingToolCalls = new Set<string>();\n\t\t\tthis.abortController = undefined;\n\t\t\tthis.resolveRunningPrompt?.();\n\t\t\tthis.runningPrompt = undefined;\n\t\t\tthis.resolveRunningPrompt = undefined;\n\t\t}\n\t}\n\n\tprivate emit(e: AgentEvent) {\n\t\tfor (const listener of this.listeners) {\n\t\t\tlistener(e);\n\t\t}\n\t}\n}\n"]}
package/dist/types.d.ts CHANGED
@@ -57,12 +57,25 @@ export interface AgentLoopConfig extends SimpleStreamOptions {
57
57
  */
58
58
  getApiKey?: (provider: string) => Promise<string | undefined> | string | undefined;
59
59
  /**
60
- * Returns queued messages to inject into the conversation.
60
+ * Returns steering messages to inject into the conversation mid-run.
61
61
  *
62
- * Called after each turn to check for user interruptions or injected messages.
63
- * If messages are returned, they're added to the context before the next LLM call.
62
+ * Called after each tool execution to check for user interruptions.
63
+ * If messages are returned, remaining tool calls are skipped and
64
+ * these messages are added to the context before the next LLM call.
65
+ *
66
+ * Use this for "steering" the agent while it's working.
67
+ */
68
+ getSteeringMessages?: () => Promise<AgentMessage[]>;
69
+ /**
70
+ * Returns follow-up messages to process after the agent would otherwise stop.
71
+ *
72
+ * Called when the agent has no more tool calls and no steering messages.
73
+ * If messages are returned, they're added to the context and the agent
74
+ * continues with another turn.
75
+ *
76
+ * Use this for follow-up messages that should wait until the agent finishes.
64
77
  */
65
- getQueuedMessages?: () => Promise<AgentMessage[]>;
78
+ getFollowUpMessages?: () => Promise<AgentMessage[]>;
66
79
  }
67
80
  /**
68
81
  * Thinking/reasoning level for models that support it.
@@ -1 +1 @@
1
- {"version":3,"file":"types.d.ts","sourceRoot":"","sources":["../src/types.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EACX,qBAAqB,EACrB,YAAY,EACZ,OAAO,EACP,KAAK,EACL,mBAAmB,EACnB,YAAY,EACZ,WAAW,EACX,IAAI,EACJ,iBAAiB,EACjB,MAAM,qBAAqB,CAAC;AAC7B,OAAO,KAAK,EAAE,MAAM,EAAE,OAAO,EAAE,MAAM,mBAAmB,CAAC;AAEzD,2EAA2E;AAC3E,MAAM,MAAM,QAAQ,GAAG,CACtB,GAAG,IAAI,EAAE,UAAU,CAAC,OAAO,YAAY,CAAC,KACpC,UAAU,CAAC,OAAO,YAAY,CAAC,GAAG,OAAO,CAAC,UAAU,CAAC,OAAO,YAAY,CAAC,CAAC,CAAC;AAEhF;;GAEG;AACH,MAAM,WAAW,eAAgB,SAAQ,mBAAmB;IAC3D,KAAK,EAAE,KAAK,CAAC,GAAG,CAAC,CAAC;IAElB;;;;;;;;;;;;;;;;;;;;;;OAsBG;IACH,YAAY,EAAE,CAAC,QAAQ,EAAE,YAAY,EAAE,KAAK,OAAO,EAAE,GAAG,OAAO,CAAC,OAAO,EAAE,CAAC,CAAC;IAE3E;;;;;;;;;;;;;;;;OAgBG;IACH,gBAAgB,CAAC,EAAE,CAAC,QAAQ,EAAE,YAAY,EAAE,EAAE,MAAM,CAAC,EAAE,WAAW,KAAK,OAAO,CAAC,YAAY,EAAE,CAAC,CAAC;IAE/F;;;;;OAKG;IACH,SAAS,CAAC,EAAE,CAAC,QAAQ,EAAE,MAAM,KAAK,OAAO,CAAC,MAAM,GAAG,SAAS,CAAC,GAAG,MAAM,GAAG,SAAS,CAAC;IAEnF;;;;;OAKG;IACH,iBAAiB,CAAC,EAAE,MAAM,OAAO,CAAC,YAAY,EAAE,CAAC,CAAC;CAClD;AAED;;;GAGG;AACH,MAAM,MAAM,aAAa,GAAG,KAAK,GAAG,SAAS,GAAG,KAAK,GAAG,QAAQ,GAAG,MAAM,GAAG,OAAO,CAAC;AAEpF;;;;;;;;;;;;;GAaG;AACH,MAAM,WAAW,mBAAmB;CAEnC;AAED;;;;GAIG;AACH,MAAM,MAAM,YAAY,GAAG,OAAO,GAAG,mBAAmB,CAAC,MAAM,mBAAmB,CAAC,CAAC;AAEpF;;GAEG;AACH,MAAM,WAAW,UAAU;IAC1B,YAAY,EAAE,MAAM,CAAC;IACrB,KAAK,EAAE,KAAK,CAAC,GAAG,CAAC,CAAC;IAClB,aAAa,EAAE,aAAa,CAAC;IAC7B,KAAK,EAAE,SAAS,CAAC,GAAG,CAAC,EAAE,CAAC;IACxB,QAAQ,EAAE,YAAY,EAAE,CAAC;IACzB,WAAW,EAAE,OAAO,CAAC;IACrB,aAAa,EAAE,YAAY,GAAG,IAAI,CAAC;IACnC,gBAAgB,EAAE,GAAG,CAAC,MAAM,CAAC,CAAC;IAC9B,KAAK,CAAC,EAAE,MAAM,CAAC;CACf;AAED,MAAM,WAAW,eAAe,CAAC,CAAC;IAEjC,OAAO,EAAE,CAAC,WAAW,GAAG,YAAY,CAAC,EAAE,CAAC;IAExC,OAAO,EAAE,CAAC,CAAC;CACX;AAGD,MAAM,MAAM,uBAAuB,CAAC,CAAC,GAAG,GAAG,IAAI,CAAC,aAAa,EAAE,eAAe,CAAC,CAAC,CAAC,KAAK,IAAI,CAAC;AAG3F,MAAM,WAAW,SAAS,CAAC,WAAW,SAAS,OAAO,GAAG,OAAO,EAAE,QAAQ,GAAG,GAAG,CAAE,SAAQ,IAAI,CAAC,WAAW,CAAC;IAE1G,KAAK,EAAE,MAAM,CAAC;IACd,OAAO,EAAE,CACR,UAAU,EAAE,MAAM,EAClB,MAAM,EAAE,MAAM,CAAC,WAAW,CAAC,EAC3B,MAAM,CAAC,EAAE,WAAW,EACpB,QAAQ,CAAC,EAAE,uBAAuB,CAAC,QAAQ,CAAC,KACxC,OAAO,CAAC,eAAe,CAAC,QAAQ,CAAC,CAAC,CAAC;CACxC;AAGD,MAAM,WAAW,YAAY;IAC5B,YAAY,EAAE,MAAM,CAAC;IACrB,QAAQ,EAAE,YAAY,EAAE,CAAC;IACzB,KAAK,CAAC,EAAE,SAAS,CAAC,GAAG,CAAC,EAAE,CAAC;CACzB;AAED;;;GAGG;AACH,MAAM,MAAM,UAAU,GAEnB;IAAE,IAAI,EAAE,aAAa,CAAA;CAAE,GACvB;IAAE,IAAI,EAAE,WAAW,CAAC;IAAC,QAAQ,EAAE,YAAY,EAAE,CAAA;CAAE,GAE/C;IAAE,IAAI,EAAE,YAAY,CAAA;CAAE,GACtB;IAAE,IAAI,EAAE,UAAU,CAAC;IAAC,OAAO,EAAE,YAAY,CAAC;IAAC,WAAW,EAAE,iBAAiB,EAAE,CAAA;CAAE,GAE7E;IAAE,IAAI,EAAE,eAAe,CAAC;IAAC,OAAO,EAAE,YAAY,CAAA;CAAE,GAEhD;IAAE,IAAI,EAAE,gBAAgB,CAAC;IAAC,OAAO,EAAE,YAAY,CAAC;IAAC,qBAAqB,EAAE,qBAAqB,CAAA;CAAE,GAC/F;IAAE,IAAI,EAAE,aAAa,CAAC;IAAC,OAAO,EAAE,YAAY,CAAA;CAAE,GAE9C;IAAE,IAAI,EAAE,sBAAsB,CAAC;IAAC,UAAU,EAAE,MAAM,CAAC;IAAC,QAAQ,EAAE,MAAM,CAAC;IAAC,IAAI,EAAE,GAAG,CAAA;CAAE,GACjF;IAAE,IAAI,EAAE,uBAAuB,CAAC;IAAC,UAAU,EAAE,MAAM,CAAC;IAAC,QAAQ,EAAE,MAAM,CAAC;IAAC,IAAI,EAAE,GAAG,CAAC;IAAC,aAAa,EAAE,GAAG,CAAA;CAAE,GACtG;IAAE,IAAI,EAAE,oBAAoB,CAAC;IAAC,UAAU,EAAE,MAAM,CAAC;IAAC,QAAQ,EAAE,MAAM,CAAC;IAAC,MAAM,EAAE,GAAG,CAAC;IAAC,OAAO,EAAE,OAAO,CAAA;CAAE,CAAC","sourcesContent":["import type {\n\tAssistantMessageEvent,\n\tImageContent,\n\tMessage,\n\tModel,\n\tSimpleStreamOptions,\n\tstreamSimple,\n\tTextContent,\n\tTool,\n\tToolResultMessage,\n} from \"@mariozechner/pi-ai\";\nimport type { Static, TSchema } from \"@sinclair/typebox\";\n\n/** Stream function - can return sync or Promise for async config lookup */\nexport type StreamFn = (\n\t...args: Parameters<typeof streamSimple>\n) => ReturnType<typeof streamSimple> | Promise<ReturnType<typeof streamSimple>>;\n\n/**\n * Configuration for the agent loop.\n */\nexport interface AgentLoopConfig extends SimpleStreamOptions {\n\tmodel: Model<any>;\n\n\t/**\n\t * Converts AgentMessage[] to LLM-compatible Message[] before each LLM call.\n\t *\n\t * Each AgentMessage must be converted to a UserMessage, AssistantMessage, or ToolResultMessage\n\t * that the LLM can understand. AgentMessages that cannot be converted (e.g., UI-only notifications,\n\t * status messages) should be filtered out.\n\t *\n\t * @example\n\t * ```typescript\n\t * convertToLlm: (messages) => messages.flatMap(m => {\n\t * if (m.role === \"hookMessage\") {\n\t * // Convert custom message to user message\n\t * return [{ role: \"user\", content: m.content, timestamp: m.timestamp }];\n\t * }\n\t * if (m.role === \"notification\") {\n\t * // Filter out UI-only messages\n\t * return [];\n\t * }\n\t * // Pass through standard LLM messages\n\t * return [m];\n\t * })\n\t * ```\n\t */\n\tconvertToLlm: (messages: AgentMessage[]) => Message[] | Promise<Message[]>;\n\n\t/**\n\t * Optional transform applied to the context before `convertToLlm`.\n\t *\n\t * Use this for operations that work at the AgentMessage level:\n\t * - Context window management (pruning old messages)\n\t * - Injecting context from external sources\n\t *\n\t * @example\n\t * ```typescript\n\t * transformContext: async (messages) => {\n\t * if (estimateTokens(messages) > MAX_TOKENS) {\n\t * return pruneOldMessages(messages);\n\t * }\n\t * return messages;\n\t * }\n\t * ```\n\t */\n\ttransformContext?: (messages: AgentMessage[], signal?: AbortSignal) => Promise<AgentMessage[]>;\n\n\t/**\n\t * Resolves an API key dynamically for each LLM call.\n\t *\n\t * Useful for short-lived OAuth tokens (e.g., GitHub Copilot) that may expire\n\t * during long-running tool execution phases.\n\t */\n\tgetApiKey?: (provider: string) => Promise<string | undefined> | string | undefined;\n\n\t/**\n\t * Returns queued messages to inject into the conversation.\n\t *\n\t * Called after each turn to check for user interruptions or injected messages.\n\t * If messages are returned, they're added to the context before the next LLM call.\n\t */\n\tgetQueuedMessages?: () => Promise<AgentMessage[]>;\n}\n\n/**\n * Thinking/reasoning level for models that support it.\n * Note: \"xhigh\" is only supported by OpenAI gpt-5.1-codex-max, gpt-5.2, and gpt-5.2-codex models.\n */\nexport type ThinkingLevel = \"off\" | \"minimal\" | \"low\" | \"medium\" | \"high\" | \"xhigh\";\n\n/**\n * Extensible interface for custom app messages.\n * Apps can extend via declaration merging:\n *\n * @example\n * ```typescript\n * declare module \"@mariozechner/agent\" {\n * interface CustomAgentMessages {\n * artifact: ArtifactMessage;\n * notification: NotificationMessage;\n * }\n * }\n * ```\n */\nexport interface CustomAgentMessages {\n\t// Empty by default - apps extend via declaration merging\n}\n\n/**\n * AgentMessage: Union of LLM messages + custom messages.\n * This abstraction allows apps to add custom message types while maintaining\n * type safety and compatibility with the base LLM messages.\n */\nexport type AgentMessage = Message | CustomAgentMessages[keyof CustomAgentMessages];\n\n/**\n * Agent state containing all configuration and conversation data.\n */\nexport interface AgentState {\n\tsystemPrompt: string;\n\tmodel: Model<any>;\n\tthinkingLevel: ThinkingLevel;\n\ttools: AgentTool<any>[];\n\tmessages: AgentMessage[]; // Can include attachments + custom message types\n\tisStreaming: boolean;\n\tstreamMessage: AgentMessage | null;\n\tpendingToolCalls: Set<string>;\n\terror?: string;\n}\n\nexport interface AgentToolResult<T> {\n\t// Content blocks supporting text and images\n\tcontent: (TextContent | ImageContent)[];\n\t// Details to be displayed in a UI or logged\n\tdetails: T;\n}\n\n// Callback for streaming tool execution updates\nexport type AgentToolUpdateCallback<T = any> = (partialResult: AgentToolResult<T>) => void;\n\n// AgentTool extends Tool but adds the execute function\nexport interface AgentTool<TParameters extends TSchema = TSchema, TDetails = any> extends Tool<TParameters> {\n\t// A human-readable label for the tool to be displayed in UI\n\tlabel: string;\n\texecute: (\n\t\ttoolCallId: string,\n\t\tparams: Static<TParameters>,\n\t\tsignal?: AbortSignal,\n\t\tonUpdate?: AgentToolUpdateCallback<TDetails>,\n\t) => Promise<AgentToolResult<TDetails>>;\n}\n\n// AgentContext is like Context but uses AgentTool\nexport interface AgentContext {\n\tsystemPrompt: string;\n\tmessages: AgentMessage[];\n\ttools?: AgentTool<any>[];\n}\n\n/**\n * Events emitted by the Agent for UI updates.\n * These events provide fine-grained lifecycle information for messages, turns, and tool executions.\n */\nexport type AgentEvent =\n\t// Agent lifecycle\n\t| { type: \"agent_start\" }\n\t| { type: \"agent_end\"; messages: AgentMessage[] }\n\t// Turn lifecycle - a turn is one assistant response + any tool calls/results\n\t| { type: \"turn_start\" }\n\t| { type: \"turn_end\"; message: AgentMessage; toolResults: ToolResultMessage[] }\n\t// Message lifecycle - emitted for user, assistant, and toolResult messages\n\t| { type: \"message_start\"; message: AgentMessage }\n\t// Only emitted for assistant messages during streaming\n\t| { type: \"message_update\"; message: AgentMessage; assistantMessageEvent: AssistantMessageEvent }\n\t| { type: \"message_end\"; message: AgentMessage }\n\t// Tool execution lifecycle\n\t| { type: \"tool_execution_start\"; toolCallId: string; toolName: string; args: any }\n\t| { type: \"tool_execution_update\"; toolCallId: string; toolName: string; args: any; partialResult: any }\n\t| { type: \"tool_execution_end\"; toolCallId: string; toolName: string; result: any; isError: boolean };\n"]}
1
+ {"version":3,"file":"types.d.ts","sourceRoot":"","sources":["../src/types.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EACX,qBAAqB,EACrB,YAAY,EACZ,OAAO,EACP,KAAK,EACL,mBAAmB,EACnB,YAAY,EACZ,WAAW,EACX,IAAI,EACJ,iBAAiB,EACjB,MAAM,qBAAqB,CAAC;AAC7B,OAAO,KAAK,EAAE,MAAM,EAAE,OAAO,EAAE,MAAM,mBAAmB,CAAC;AAEzD,2EAA2E;AAC3E,MAAM,MAAM,QAAQ,GAAG,CACtB,GAAG,IAAI,EAAE,UAAU,CAAC,OAAO,YAAY,CAAC,KACpC,UAAU,CAAC,OAAO,YAAY,CAAC,GAAG,OAAO,CAAC,UAAU,CAAC,OAAO,YAAY,CAAC,CAAC,CAAC;AAEhF;;GAEG;AACH,MAAM,WAAW,eAAgB,SAAQ,mBAAmB;IAC3D,KAAK,EAAE,KAAK,CAAC,GAAG,CAAC,CAAC;IAElB;;;;;;;;;;;;;;;;;;;;;;OAsBG;IACH,YAAY,EAAE,CAAC,QAAQ,EAAE,YAAY,EAAE,KAAK,OAAO,EAAE,GAAG,OAAO,CAAC,OAAO,EAAE,CAAC,CAAC;IAE3E;;;;;;;;;;;;;;;;OAgBG;IACH,gBAAgB,CAAC,EAAE,CAAC,QAAQ,EAAE,YAAY,EAAE,EAAE,MAAM,CAAC,EAAE,WAAW,KAAK,OAAO,CAAC,YAAY,EAAE,CAAC,CAAC;IAE/F;;;;;OAKG;IACH,SAAS,CAAC,EAAE,CAAC,QAAQ,EAAE,MAAM,KAAK,OAAO,CAAC,MAAM,GAAG,SAAS,CAAC,GAAG,MAAM,GAAG,SAAS,CAAC;IAEnF;;;;;;;;OAQG;IACH,mBAAmB,CAAC,EAAE,MAAM,OAAO,CAAC,YAAY,EAAE,CAAC,CAAC;IAEpD;;;;;;;;OAQG;IACH,mBAAmB,CAAC,EAAE,MAAM,OAAO,CAAC,YAAY,EAAE,CAAC,CAAC;CACpD;AAED;;;GAGG;AACH,MAAM,MAAM,aAAa,GAAG,KAAK,GAAG,SAAS,GAAG,KAAK,GAAG,QAAQ,GAAG,MAAM,GAAG,OAAO,CAAC;AAEpF;;;;;;;;;;;;;GAaG;AACH,MAAM,WAAW,mBAAmB;CAEnC;AAED;;;;GAIG;AACH,MAAM,MAAM,YAAY,GAAG,OAAO,GAAG,mBAAmB,CAAC,MAAM,mBAAmB,CAAC,CAAC;AAEpF;;GAEG;AACH,MAAM,WAAW,UAAU;IAC1B,YAAY,EAAE,MAAM,CAAC;IACrB,KAAK,EAAE,KAAK,CAAC,GAAG,CAAC,CAAC;IAClB,aAAa,EAAE,aAAa,CAAC;IAC7B,KAAK,EAAE,SAAS,CAAC,GAAG,CAAC,EAAE,CAAC;IACxB,QAAQ,EAAE,YAAY,EAAE,CAAC;IACzB,WAAW,EAAE,OAAO,CAAC;IACrB,aAAa,EAAE,YAAY,GAAG,IAAI,CAAC;IACnC,gBAAgB,EAAE,GAAG,CAAC,MAAM,CAAC,CAAC;IAC9B,KAAK,CAAC,EAAE,MAAM,CAAC;CACf;AAED,MAAM,WAAW,eAAe,CAAC,CAAC;IAEjC,OAAO,EAAE,CAAC,WAAW,GAAG,YAAY,CAAC,EAAE,CAAC;IAExC,OAAO,EAAE,CAAC,CAAC;CACX;AAGD,MAAM,MAAM,uBAAuB,CAAC,CAAC,GAAG,GAAG,IAAI,CAAC,aAAa,EAAE,eAAe,CAAC,CAAC,CAAC,KAAK,IAAI,CAAC;AAG3F,MAAM,WAAW,SAAS,CAAC,WAAW,SAAS,OAAO,GAAG,OAAO,EAAE,QAAQ,GAAG,GAAG,CAAE,SAAQ,IAAI,CAAC,WAAW,CAAC;IAE1G,KAAK,EAAE,MAAM,CAAC;IACd,OAAO,EAAE,CACR,UAAU,EAAE,MAAM,EAClB,MAAM,EAAE,MAAM,CAAC,WAAW,CAAC,EAC3B,MAAM,CAAC,EAAE,WAAW,EACpB,QAAQ,CAAC,EAAE,uBAAuB,CAAC,QAAQ,CAAC,KACxC,OAAO,CAAC,eAAe,CAAC,QAAQ,CAAC,CAAC,CAAC;CACxC;AAGD,MAAM,WAAW,YAAY;IAC5B,YAAY,EAAE,MAAM,CAAC;IACrB,QAAQ,EAAE,YAAY,EAAE,CAAC;IACzB,KAAK,CAAC,EAAE,SAAS,CAAC,GAAG,CAAC,EAAE,CAAC;CACzB;AAED;;;GAGG;AACH,MAAM,MAAM,UAAU,GAEnB;IAAE,IAAI,EAAE,aAAa,CAAA;CAAE,GACvB;IAAE,IAAI,EAAE,WAAW,CAAC;IAAC,QAAQ,EAAE,YAAY,EAAE,CAAA;CAAE,GAE/C;IAAE,IAAI,EAAE,YAAY,CAAA;CAAE,GACtB;IAAE,IAAI,EAAE,UAAU,CAAC;IAAC,OAAO,EAAE,YAAY,CAAC;IAAC,WAAW,EAAE,iBAAiB,EAAE,CAAA;CAAE,GAE7E;IAAE,IAAI,EAAE,eAAe,CAAC;IAAC,OAAO,EAAE,YAAY,CAAA;CAAE,GAEhD;IAAE,IAAI,EAAE,gBAAgB,CAAC;IAAC,OAAO,EAAE,YAAY,CAAC;IAAC,qBAAqB,EAAE,qBAAqB,CAAA;CAAE,GAC/F;IAAE,IAAI,EAAE,aAAa,CAAC;IAAC,OAAO,EAAE,YAAY,CAAA;CAAE,GAE9C;IAAE,IAAI,EAAE,sBAAsB,CAAC;IAAC,UAAU,EAAE,MAAM,CAAC;IAAC,QAAQ,EAAE,MAAM,CAAC;IAAC,IAAI,EAAE,GAAG,CAAA;CAAE,GACjF;IAAE,IAAI,EAAE,uBAAuB,CAAC;IAAC,UAAU,EAAE,MAAM,CAAC;IAAC,QAAQ,EAAE,MAAM,CAAC;IAAC,IAAI,EAAE,GAAG,CAAC;IAAC,aAAa,EAAE,GAAG,CAAA;CAAE,GACtG;IAAE,IAAI,EAAE,oBAAoB,CAAC;IAAC,UAAU,EAAE,MAAM,CAAC;IAAC,QAAQ,EAAE,MAAM,CAAC;IAAC,MAAM,EAAE,GAAG,CAAC;IAAC,OAAO,EAAE,OAAO,CAAA;CAAE,CAAC","sourcesContent":["import type {\n\tAssistantMessageEvent,\n\tImageContent,\n\tMessage,\n\tModel,\n\tSimpleStreamOptions,\n\tstreamSimple,\n\tTextContent,\n\tTool,\n\tToolResultMessage,\n} from \"@mariozechner/pi-ai\";\nimport type { Static, TSchema } from \"@sinclair/typebox\";\n\n/** Stream function - can return sync or Promise for async config lookup */\nexport type StreamFn = (\n\t...args: Parameters<typeof streamSimple>\n) => ReturnType<typeof streamSimple> | Promise<ReturnType<typeof streamSimple>>;\n\n/**\n * Configuration for the agent loop.\n */\nexport interface AgentLoopConfig extends SimpleStreamOptions {\n\tmodel: Model<any>;\n\n\t/**\n\t * Converts AgentMessage[] to LLM-compatible Message[] before each LLM call.\n\t *\n\t * Each AgentMessage must be converted to a UserMessage, AssistantMessage, or ToolResultMessage\n\t * that the LLM can understand. AgentMessages that cannot be converted (e.g., UI-only notifications,\n\t * status messages) should be filtered out.\n\t *\n\t * @example\n\t * ```typescript\n\t * convertToLlm: (messages) => messages.flatMap(m => {\n\t * if (m.role === \"hookMessage\") {\n\t * // Convert custom message to user message\n\t * return [{ role: \"user\", content: m.content, timestamp: m.timestamp }];\n\t * }\n\t * if (m.role === \"notification\") {\n\t * // Filter out UI-only messages\n\t * return [];\n\t * }\n\t * // Pass through standard LLM messages\n\t * return [m];\n\t * })\n\t * ```\n\t */\n\tconvertToLlm: (messages: AgentMessage[]) => Message[] | Promise<Message[]>;\n\n\t/**\n\t * Optional transform applied to the context before `convertToLlm`.\n\t *\n\t * Use this for operations that work at the AgentMessage level:\n\t * - Context window management (pruning old messages)\n\t * - Injecting context from external sources\n\t *\n\t * @example\n\t * ```typescript\n\t * transformContext: async (messages) => {\n\t * if (estimateTokens(messages) > MAX_TOKENS) {\n\t * return pruneOldMessages(messages);\n\t * }\n\t * return messages;\n\t * }\n\t * ```\n\t */\n\ttransformContext?: (messages: AgentMessage[], signal?: AbortSignal) => Promise<AgentMessage[]>;\n\n\t/**\n\t * Resolves an API key dynamically for each LLM call.\n\t *\n\t * Useful for short-lived OAuth tokens (e.g., GitHub Copilot) that may expire\n\t * during long-running tool execution phases.\n\t */\n\tgetApiKey?: (provider: string) => Promise<string | undefined> | string | undefined;\n\n\t/**\n\t * Returns steering messages to inject into the conversation mid-run.\n\t *\n\t * Called after each tool execution to check for user interruptions.\n\t * If messages are returned, remaining tool calls are skipped and\n\t * these messages are added to the context before the next LLM call.\n\t *\n\t * Use this for \"steering\" the agent while it's working.\n\t */\n\tgetSteeringMessages?: () => Promise<AgentMessage[]>;\n\n\t/**\n\t * Returns follow-up messages to process after the agent would otherwise stop.\n\t *\n\t * Called when the agent has no more tool calls and no steering messages.\n\t * If messages are returned, they're added to the context and the agent\n\t * continues with another turn.\n\t *\n\t * Use this for follow-up messages that should wait until the agent finishes.\n\t */\n\tgetFollowUpMessages?: () => Promise<AgentMessage[]>;\n}\n\n/**\n * Thinking/reasoning level for models that support it.\n * Note: \"xhigh\" is only supported by OpenAI gpt-5.1-codex-max, gpt-5.2, and gpt-5.2-codex models.\n */\nexport type ThinkingLevel = \"off\" | \"minimal\" | \"low\" | \"medium\" | \"high\" | \"xhigh\";\n\n/**\n * Extensible interface for custom app messages.\n * Apps can extend via declaration merging:\n *\n * @example\n * ```typescript\n * declare module \"@mariozechner/agent\" {\n * interface CustomAgentMessages {\n * artifact: ArtifactMessage;\n * notification: NotificationMessage;\n * }\n * }\n * ```\n */\nexport interface CustomAgentMessages {\n\t// Empty by default - apps extend via declaration merging\n}\n\n/**\n * AgentMessage: Union of LLM messages + custom messages.\n * This abstraction allows apps to add custom message types while maintaining\n * type safety and compatibility with the base LLM messages.\n */\nexport type AgentMessage = Message | CustomAgentMessages[keyof CustomAgentMessages];\n\n/**\n * Agent state containing all configuration and conversation data.\n */\nexport interface AgentState {\n\tsystemPrompt: string;\n\tmodel: Model<any>;\n\tthinkingLevel: ThinkingLevel;\n\ttools: AgentTool<any>[];\n\tmessages: AgentMessage[]; // Can include attachments + custom message types\n\tisStreaming: boolean;\n\tstreamMessage: AgentMessage | null;\n\tpendingToolCalls: Set<string>;\n\terror?: string;\n}\n\nexport interface AgentToolResult<T> {\n\t// Content blocks supporting text and images\n\tcontent: (TextContent | ImageContent)[];\n\t// Details to be displayed in a UI or logged\n\tdetails: T;\n}\n\n// Callback for streaming tool execution updates\nexport type AgentToolUpdateCallback<T = any> = (partialResult: AgentToolResult<T>) => void;\n\n// AgentTool extends Tool but adds the execute function\nexport interface AgentTool<TParameters extends TSchema = TSchema, TDetails = any> extends Tool<TParameters> {\n\t// A human-readable label for the tool to be displayed in UI\n\tlabel: string;\n\texecute: (\n\t\ttoolCallId: string,\n\t\tparams: Static<TParameters>,\n\t\tsignal?: AbortSignal,\n\t\tonUpdate?: AgentToolUpdateCallback<TDetails>,\n\t) => Promise<AgentToolResult<TDetails>>;\n}\n\n// AgentContext is like Context but uses AgentTool\nexport interface AgentContext {\n\tsystemPrompt: string;\n\tmessages: AgentMessage[];\n\ttools?: AgentTool<any>[];\n}\n\n/**\n * Events emitted by the Agent for UI updates.\n * These events provide fine-grained lifecycle information for messages, turns, and tool executions.\n */\nexport type AgentEvent =\n\t// Agent lifecycle\n\t| { type: \"agent_start\" }\n\t| { type: \"agent_end\"; messages: AgentMessage[] }\n\t// Turn lifecycle - a turn is one assistant response + any tool calls/results\n\t| { type: \"turn_start\" }\n\t| { type: \"turn_end\"; message: AgentMessage; toolResults: ToolResultMessage[] }\n\t// Message lifecycle - emitted for user, assistant, and toolResult messages\n\t| { type: \"message_start\"; message: AgentMessage }\n\t// Only emitted for assistant messages during streaming\n\t| { type: \"message_update\"; message: AgentMessage; assistantMessageEvent: AssistantMessageEvent }\n\t| { type: \"message_end\"; message: AgentMessage }\n\t// Tool execution lifecycle\n\t| { type: \"tool_execution_start\"; toolCallId: string; toolName: string; args: any }\n\t| { type: \"tool_execution_update\"; toolCallId: string; toolName: string; args: any; partialResult: any }\n\t| { type: \"tool_execution_end\"; toolCallId: string; toolName: string; result: any; isError: boolean };\n"]}
package/dist/types.js.map CHANGED
@@ -1 +1 @@
1
- {"version":3,"file":"types.js","sourceRoot":"","sources":["../src/types.ts"],"names":[],"mappings":"","sourcesContent":["import type {\n\tAssistantMessageEvent,\n\tImageContent,\n\tMessage,\n\tModel,\n\tSimpleStreamOptions,\n\tstreamSimple,\n\tTextContent,\n\tTool,\n\tToolResultMessage,\n} from \"@mariozechner/pi-ai\";\nimport type { Static, TSchema } from \"@sinclair/typebox\";\n\n/** Stream function - can return sync or Promise for async config lookup */\nexport type StreamFn = (\n\t...args: Parameters<typeof streamSimple>\n) => ReturnType<typeof streamSimple> | Promise<ReturnType<typeof streamSimple>>;\n\n/**\n * Configuration for the agent loop.\n */\nexport interface AgentLoopConfig extends SimpleStreamOptions {\n\tmodel: Model<any>;\n\n\t/**\n\t * Converts AgentMessage[] to LLM-compatible Message[] before each LLM call.\n\t *\n\t * Each AgentMessage must be converted to a UserMessage, AssistantMessage, or ToolResultMessage\n\t * that the LLM can understand. AgentMessages that cannot be converted (e.g., UI-only notifications,\n\t * status messages) should be filtered out.\n\t *\n\t * @example\n\t * ```typescript\n\t * convertToLlm: (messages) => messages.flatMap(m => {\n\t * if (m.role === \"hookMessage\") {\n\t * // Convert custom message to user message\n\t * return [{ role: \"user\", content: m.content, timestamp: m.timestamp }];\n\t * }\n\t * if (m.role === \"notification\") {\n\t * // Filter out UI-only messages\n\t * return [];\n\t * }\n\t * // Pass through standard LLM messages\n\t * return [m];\n\t * })\n\t * ```\n\t */\n\tconvertToLlm: (messages: AgentMessage[]) => Message[] | Promise<Message[]>;\n\n\t/**\n\t * Optional transform applied to the context before `convertToLlm`.\n\t *\n\t * Use this for operations that work at the AgentMessage level:\n\t * - Context window management (pruning old messages)\n\t * - Injecting context from external sources\n\t *\n\t * @example\n\t * ```typescript\n\t * transformContext: async (messages) => {\n\t * if (estimateTokens(messages) > MAX_TOKENS) {\n\t * return pruneOldMessages(messages);\n\t * }\n\t * return messages;\n\t * }\n\t * ```\n\t */\n\ttransformContext?: (messages: AgentMessage[], signal?: AbortSignal) => Promise<AgentMessage[]>;\n\n\t/**\n\t * Resolves an API key dynamically for each LLM call.\n\t *\n\t * Useful for short-lived OAuth tokens (e.g., GitHub Copilot) that may expire\n\t * during long-running tool execution phases.\n\t */\n\tgetApiKey?: (provider: string) => Promise<string | undefined> | string | undefined;\n\n\t/**\n\t * Returns queued messages to inject into the conversation.\n\t *\n\t * Called after each turn to check for user interruptions or injected messages.\n\t * If messages are returned, they're added to the context before the next LLM call.\n\t */\n\tgetQueuedMessages?: () => Promise<AgentMessage[]>;\n}\n\n/**\n * Thinking/reasoning level for models that support it.\n * Note: \"xhigh\" is only supported by OpenAI gpt-5.1-codex-max, gpt-5.2, and gpt-5.2-codex models.\n */\nexport type ThinkingLevel = \"off\" | \"minimal\" | \"low\" | \"medium\" | \"high\" | \"xhigh\";\n\n/**\n * Extensible interface for custom app messages.\n * Apps can extend via declaration merging:\n *\n * @example\n * ```typescript\n * declare module \"@mariozechner/agent\" {\n * interface CustomAgentMessages {\n * artifact: ArtifactMessage;\n * notification: NotificationMessage;\n * }\n * }\n * ```\n */\nexport interface CustomAgentMessages {\n\t// Empty by default - apps extend via declaration merging\n}\n\n/**\n * AgentMessage: Union of LLM messages + custom messages.\n * This abstraction allows apps to add custom message types while maintaining\n * type safety and compatibility with the base LLM messages.\n */\nexport type AgentMessage = Message | CustomAgentMessages[keyof CustomAgentMessages];\n\n/**\n * Agent state containing all configuration and conversation data.\n */\nexport interface AgentState {\n\tsystemPrompt: string;\n\tmodel: Model<any>;\n\tthinkingLevel: ThinkingLevel;\n\ttools: AgentTool<any>[];\n\tmessages: AgentMessage[]; // Can include attachments + custom message types\n\tisStreaming: boolean;\n\tstreamMessage: AgentMessage | null;\n\tpendingToolCalls: Set<string>;\n\terror?: string;\n}\n\nexport interface AgentToolResult<T> {\n\t// Content blocks supporting text and images\n\tcontent: (TextContent | ImageContent)[];\n\t// Details to be displayed in a UI or logged\n\tdetails: T;\n}\n\n// Callback for streaming tool execution updates\nexport type AgentToolUpdateCallback<T = any> = (partialResult: AgentToolResult<T>) => void;\n\n// AgentTool extends Tool but adds the execute function\nexport interface AgentTool<TParameters extends TSchema = TSchema, TDetails = any> extends Tool<TParameters> {\n\t// A human-readable label for the tool to be displayed in UI\n\tlabel: string;\n\texecute: (\n\t\ttoolCallId: string,\n\t\tparams: Static<TParameters>,\n\t\tsignal?: AbortSignal,\n\t\tonUpdate?: AgentToolUpdateCallback<TDetails>,\n\t) => Promise<AgentToolResult<TDetails>>;\n}\n\n// AgentContext is like Context but uses AgentTool\nexport interface AgentContext {\n\tsystemPrompt: string;\n\tmessages: AgentMessage[];\n\ttools?: AgentTool<any>[];\n}\n\n/**\n * Events emitted by the Agent for UI updates.\n * These events provide fine-grained lifecycle information for messages, turns, and tool executions.\n */\nexport type AgentEvent =\n\t// Agent lifecycle\n\t| { type: \"agent_start\" }\n\t| { type: \"agent_end\"; messages: AgentMessage[] }\n\t// Turn lifecycle - a turn is one assistant response + any tool calls/results\n\t| { type: \"turn_start\" }\n\t| { type: \"turn_end\"; message: AgentMessage; toolResults: ToolResultMessage[] }\n\t// Message lifecycle - emitted for user, assistant, and toolResult messages\n\t| { type: \"message_start\"; message: AgentMessage }\n\t// Only emitted for assistant messages during streaming\n\t| { type: \"message_update\"; message: AgentMessage; assistantMessageEvent: AssistantMessageEvent }\n\t| { type: \"message_end\"; message: AgentMessage }\n\t// Tool execution lifecycle\n\t| { type: \"tool_execution_start\"; toolCallId: string; toolName: string; args: any }\n\t| { type: \"tool_execution_update\"; toolCallId: string; toolName: string; args: any; partialResult: any }\n\t| { type: \"tool_execution_end\"; toolCallId: string; toolName: string; result: any; isError: boolean };\n"]}
1
+ {"version":3,"file":"types.js","sourceRoot":"","sources":["../src/types.ts"],"names":[],"mappings":"","sourcesContent":["import type {\n\tAssistantMessageEvent,\n\tImageContent,\n\tMessage,\n\tModel,\n\tSimpleStreamOptions,\n\tstreamSimple,\n\tTextContent,\n\tTool,\n\tToolResultMessage,\n} from \"@mariozechner/pi-ai\";\nimport type { Static, TSchema } from \"@sinclair/typebox\";\n\n/** Stream function - can return sync or Promise for async config lookup */\nexport type StreamFn = (\n\t...args: Parameters<typeof streamSimple>\n) => ReturnType<typeof streamSimple> | Promise<ReturnType<typeof streamSimple>>;\n\n/**\n * Configuration for the agent loop.\n */\nexport interface AgentLoopConfig extends SimpleStreamOptions {\n\tmodel: Model<any>;\n\n\t/**\n\t * Converts AgentMessage[] to LLM-compatible Message[] before each LLM call.\n\t *\n\t * Each AgentMessage must be converted to a UserMessage, AssistantMessage, or ToolResultMessage\n\t * that the LLM can understand. AgentMessages that cannot be converted (e.g., UI-only notifications,\n\t * status messages) should be filtered out.\n\t *\n\t * @example\n\t * ```typescript\n\t * convertToLlm: (messages) => messages.flatMap(m => {\n\t * if (m.role === \"hookMessage\") {\n\t * // Convert custom message to user message\n\t * return [{ role: \"user\", content: m.content, timestamp: m.timestamp }];\n\t * }\n\t * if (m.role === \"notification\") {\n\t * // Filter out UI-only messages\n\t * return [];\n\t * }\n\t * // Pass through standard LLM messages\n\t * return [m];\n\t * })\n\t * ```\n\t */\n\tconvertToLlm: (messages: AgentMessage[]) => Message[] | Promise<Message[]>;\n\n\t/**\n\t * Optional transform applied to the context before `convertToLlm`.\n\t *\n\t * Use this for operations that work at the AgentMessage level:\n\t * - Context window management (pruning old messages)\n\t * - Injecting context from external sources\n\t *\n\t * @example\n\t * ```typescript\n\t * transformContext: async (messages) => {\n\t * if (estimateTokens(messages) > MAX_TOKENS) {\n\t * return pruneOldMessages(messages);\n\t * }\n\t * return messages;\n\t * }\n\t * ```\n\t */\n\ttransformContext?: (messages: AgentMessage[], signal?: AbortSignal) => Promise<AgentMessage[]>;\n\n\t/**\n\t * Resolves an API key dynamically for each LLM call.\n\t *\n\t * Useful for short-lived OAuth tokens (e.g., GitHub Copilot) that may expire\n\t * during long-running tool execution phases.\n\t */\n\tgetApiKey?: (provider: string) => Promise<string | undefined> | string | undefined;\n\n\t/**\n\t * Returns steering messages to inject into the conversation mid-run.\n\t *\n\t * Called after each tool execution to check for user interruptions.\n\t * If messages are returned, remaining tool calls are skipped and\n\t * these messages are added to the context before the next LLM call.\n\t *\n\t * Use this for \"steering\" the agent while it's working.\n\t */\n\tgetSteeringMessages?: () => Promise<AgentMessage[]>;\n\n\t/**\n\t * Returns follow-up messages to process after the agent would otherwise stop.\n\t *\n\t * Called when the agent has no more tool calls and no steering messages.\n\t * If messages are returned, they're added to the context and the agent\n\t * continues with another turn.\n\t *\n\t * Use this for follow-up messages that should wait until the agent finishes.\n\t */\n\tgetFollowUpMessages?: () => Promise<AgentMessage[]>;\n}\n\n/**\n * Thinking/reasoning level for models that support it.\n * Note: \"xhigh\" is only supported by OpenAI gpt-5.1-codex-max, gpt-5.2, and gpt-5.2-codex models.\n */\nexport type ThinkingLevel = \"off\" | \"minimal\" | \"low\" | \"medium\" | \"high\" | \"xhigh\";\n\n/**\n * Extensible interface for custom app messages.\n * Apps can extend via declaration merging:\n *\n * @example\n * ```typescript\n * declare module \"@mariozechner/agent\" {\n * interface CustomAgentMessages {\n * artifact: ArtifactMessage;\n * notification: NotificationMessage;\n * }\n * }\n * ```\n */\nexport interface CustomAgentMessages {\n\t// Empty by default - apps extend via declaration merging\n}\n\n/**\n * AgentMessage: Union of LLM messages + custom messages.\n * This abstraction allows apps to add custom message types while maintaining\n * type safety and compatibility with the base LLM messages.\n */\nexport type AgentMessage = Message | CustomAgentMessages[keyof CustomAgentMessages];\n\n/**\n * Agent state containing all configuration and conversation data.\n */\nexport interface AgentState {\n\tsystemPrompt: string;\n\tmodel: Model<any>;\n\tthinkingLevel: ThinkingLevel;\n\ttools: AgentTool<any>[];\n\tmessages: AgentMessage[]; // Can include attachments + custom message types\n\tisStreaming: boolean;\n\tstreamMessage: AgentMessage | null;\n\tpendingToolCalls: Set<string>;\n\terror?: string;\n}\n\nexport interface AgentToolResult<T> {\n\t// Content blocks supporting text and images\n\tcontent: (TextContent | ImageContent)[];\n\t// Details to be displayed in a UI or logged\n\tdetails: T;\n}\n\n// Callback for streaming tool execution updates\nexport type AgentToolUpdateCallback<T = any> = (partialResult: AgentToolResult<T>) => void;\n\n// AgentTool extends Tool but adds the execute function\nexport interface AgentTool<TParameters extends TSchema = TSchema, TDetails = any> extends Tool<TParameters> {\n\t// A human-readable label for the tool to be displayed in UI\n\tlabel: string;\n\texecute: (\n\t\ttoolCallId: string,\n\t\tparams: Static<TParameters>,\n\t\tsignal?: AbortSignal,\n\t\tonUpdate?: AgentToolUpdateCallback<TDetails>,\n\t) => Promise<AgentToolResult<TDetails>>;\n}\n\n// AgentContext is like Context but uses AgentTool\nexport interface AgentContext {\n\tsystemPrompt: string;\n\tmessages: AgentMessage[];\n\ttools?: AgentTool<any>[];\n}\n\n/**\n * Events emitted by the Agent for UI updates.\n * These events provide fine-grained lifecycle information for messages, turns, and tool executions.\n */\nexport type AgentEvent =\n\t// Agent lifecycle\n\t| { type: \"agent_start\" }\n\t| { type: \"agent_end\"; messages: AgentMessage[] }\n\t// Turn lifecycle - a turn is one assistant response + any tool calls/results\n\t| { type: \"turn_start\" }\n\t| { type: \"turn_end\"; message: AgentMessage; toolResults: ToolResultMessage[] }\n\t// Message lifecycle - emitted for user, assistant, and toolResult messages\n\t| { type: \"message_start\"; message: AgentMessage }\n\t// Only emitted for assistant messages during streaming\n\t| { type: \"message_update\"; message: AgentMessage; assistantMessageEvent: AssistantMessageEvent }\n\t| { type: \"message_end\"; message: AgentMessage }\n\t// Tool execution lifecycle\n\t| { type: \"tool_execution_start\"; toolCallId: string; toolName: string; args: any }\n\t| { type: \"tool_execution_update\"; toolCallId: string; toolName: string; args: any; partialResult: any }\n\t| { type: \"tool_execution_end\"; toolCallId: string; toolName: string; result: any; isError: boolean };\n"]}
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@mariozechner/pi-agent-core",
3
- "version": "0.31.0",
3
+ "version": "0.32.0",
4
4
  "description": "General-purpose agent with transport abstraction, state management, and attachment support",
5
5
  "type": "module",
6
6
  "main": "./dist/index.js",
@@ -17,8 +17,8 @@
17
17
  "prepublishOnly": "npm run clean && npm run build"
18
18
  },
19
19
  "dependencies": {
20
- "@mariozechner/pi-ai": "^0.31.0",
21
- "@mariozechner/pi-tui": "^0.31.0"
20
+ "@mariozechner/pi-ai": "^0.32.0",
21
+ "@mariozechner/pi-tui": "^0.32.0"
22
22
  },
23
23
  "keywords": [
24
24
  "ai",