@rudderjs/ai 0.0.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +21 -0
- package/README.md +461 -0
- package/boost/guidelines.md +150 -0
- package/dist/agent.d.ts +74 -0
- package/dist/agent.d.ts.map +1 -0
- package/dist/agent.js +1070 -0
- package/dist/agent.js.map +1 -0
- package/dist/attachment.d.ts +35 -0
- package/dist/attachment.d.ts.map +1 -0
- package/dist/attachment.js +121 -0
- package/dist/attachment.js.map +1 -0
- package/dist/audio.d.ts +33 -0
- package/dist/audio.d.ts.map +1 -0
- package/dist/audio.js +76 -0
- package/dist/audio.js.map +1 -0
- package/dist/cached-embedding.d.ts +14 -0
- package/dist/cached-embedding.d.ts.map +1 -0
- package/dist/cached-embedding.js +44 -0
- package/dist/cached-embedding.js.map +1 -0
- package/dist/conversation.d.ts +16 -0
- package/dist/conversation.d.ts.map +1 -0
- package/dist/conversation.js +53 -0
- package/dist/conversation.js.map +1 -0
- package/dist/facade.d.ts +53 -0
- package/dist/facade.d.ts.map +1 -0
- package/dist/facade.js +100 -0
- package/dist/facade.js.map +1 -0
- package/dist/fake.d.ts +55 -0
- package/dist/fake.d.ts.map +1 -0
- package/dist/fake.js +172 -0
- package/dist/fake.js.map +1 -0
- package/dist/image.d.ts +27 -0
- package/dist/image.d.ts.map +1 -0
- package/dist/image.js +90 -0
- package/dist/image.js.map +1 -0
- package/dist/index.d.ts +30 -0
- package/dist/index.d.ts.map +1 -0
- package/dist/index.js +45 -0
- package/dist/index.js.map +1 -0
- package/dist/middleware.d.ts +18 -0
- package/dist/middleware.d.ts.map +1 -0
- package/dist/middleware.js +72 -0
- package/dist/middleware.js.map +1 -0
- package/dist/output.d.ts +22 -0
- package/dist/output.d.ts.map +1 -0
- package/dist/output.js +55 -0
- package/dist/output.js.map +1 -0
- package/dist/provider-tools.d.ts +60 -0
- package/dist/provider-tools.d.ts.map +1 -0
- package/dist/provider-tools.js +133 -0
- package/dist/provider-tools.js.map +1 -0
- package/dist/provider.d.ts +12 -0
- package/dist/provider.d.ts.map +1 -0
- package/dist/provider.js +94 -0
- package/dist/provider.js.map +1 -0
- package/dist/providers/anthropic.d.ts +12 -0
- package/dist/providers/anthropic.d.ts.map +1 -0
- package/dist/providers/anthropic.js +221 -0
- package/dist/providers/anthropic.js.map +1 -0
- package/dist/providers/azure.d.ts +13 -0
- package/dist/providers/azure.d.ts.map +1 -0
- package/dist/providers/azure.js +15 -0
- package/dist/providers/azure.js.map +1 -0
- package/dist/providers/deepseek.d.ts +12 -0
- package/dist/providers/deepseek.d.ts.map +1 -0
- package/dist/providers/deepseek.js +15 -0
- package/dist/providers/deepseek.js.map +1 -0
- package/dist/providers/google.d.ts +13 -0
- package/dist/providers/google.d.ts.map +1 -0
- package/dist/providers/google.js +293 -0
- package/dist/providers/google.js.map +1 -0
- package/dist/providers/groq.d.ts +12 -0
- package/dist/providers/groq.d.ts.map +1 -0
- package/dist/providers/groq.js +15 -0
- package/dist/providers/groq.js.map +1 -0
- package/dist/providers/mistral.d.ts +13 -0
- package/dist/providers/mistral.d.ts.map +1 -0
- package/dist/providers/mistral.js +46 -0
- package/dist/providers/mistral.js.map +1 -0
- package/dist/providers/ollama.d.ts +11 -0
- package/dist/providers/ollama.d.ts.map +1 -0
- package/dist/providers/ollama.js +15 -0
- package/dist/providers/ollama.js.map +1 -0
- package/dist/providers/openai.d.ts +26 -0
- package/dist/providers/openai.d.ts.map +1 -0
- package/dist/providers/openai.js +374 -0
- package/dist/providers/openai.js.map +1 -0
- package/dist/providers/xai.d.ts +12 -0
- package/dist/providers/xai.d.ts.map +1 -0
- package/dist/providers/xai.js +15 -0
- package/dist/providers/xai.js.map +1 -0
- package/dist/queue-job.d.ts +35 -0
- package/dist/queue-job.d.ts.map +1 -0
- package/dist/queue-job.js +82 -0
- package/dist/queue-job.js.map +1 -0
- package/dist/registry.d.ts +25 -0
- package/dist/registry.d.ts.map +1 -0
- package/dist/registry.js +54 -0
- package/dist/registry.js.map +1 -0
- package/dist/tool.d.ts +157 -0
- package/dist/tool.d.ts.map +1 -0
- package/dist/tool.js +134 -0
- package/dist/tool.js.map +1 -0
- package/dist/transcription.d.ts +28 -0
- package/dist/transcription.d.ts.map +1 -0
- package/dist/transcription.js +63 -0
- package/dist/transcription.js.map +1 -0
- package/dist/types.d.ts +439 -0
- package/dist/types.d.ts.map +1 -0
- package/dist/types.js +2 -0
- package/dist/types.js.map +1 -0
- package/dist/vercel-protocol.d.ts +18 -0
- package/dist/vercel-protocol.d.ts.map +1 -0
- package/dist/vercel-protocol.js +75 -0
- package/dist/vercel-protocol.js.map +1 -0
- package/dist/zod-to-json-schema.d.ts +8 -0
- package/dist/zod-to-json-schema.d.ts.map +1 -0
- package/dist/zod-to-json-schema.js +86 -0
- package/dist/zod-to-json-schema.js.map +1 -0
- package/package.json +45 -0
package/dist/agent.js
ADDED
|
@@ -0,0 +1,1070 @@
|
|
|
1
|
+
import { AiRegistry } from './registry.js';
|
|
2
|
+
import { isPauseForClientToolsChunk, toolToSchema } from './tool.js';
|
|
3
|
+
import { attachmentsToContentParts, getMessageText } from './attachment.js';
|
|
4
|
+
import { QueuedPromptBuilder } from './queue-job.js';
|
|
5
|
+
import { runOnConfig, runOnChunk, runOnBeforeToolCall, runOnAfterToolCall, runSequential, runOnUsage, runOnAbort, runOnError, } from './middleware.js';
|
|
6
|
+
// ─── Stop Condition Combinators ──────────────────────────
|
|
7
|
+
/** Stop after N steps */
|
|
8
|
+
export function stepCountIs(n) {
|
|
9
|
+
return ({ steps }) => steps.length >= n;
|
|
10
|
+
}
|
|
11
|
+
/** Stop when a specific tool is called in the latest step */
|
|
12
|
+
export function hasToolCall(toolName) {
|
|
13
|
+
return ({ steps }) => {
|
|
14
|
+
const last = steps[steps.length - 1];
|
|
15
|
+
return last?.toolCalls.some(tc => tc.name === toolName) ?? false;
|
|
16
|
+
};
|
|
17
|
+
}
|
|
18
|
+
// ─── Agent Base Class ────────────────────────────────────
|
|
19
|
+
export class Agent {
|
|
20
|
+
/** Model string (e.g. 'anthropic/claude-sonnet-4-5'). Defaults to registry default. */
|
|
21
|
+
model() { return undefined; }
|
|
22
|
+
/** Failover provider/model strings */
|
|
23
|
+
failover() { return []; }
|
|
24
|
+
/** Maximum iterations for the tool loop (default: 20) */
|
|
25
|
+
maxSteps() { return 20; }
|
|
26
|
+
/** Stop conditions — combine with array (OR logic) */
|
|
27
|
+
stopWhen() {
|
|
28
|
+
return stepCountIs(this.maxSteps());
|
|
29
|
+
}
|
|
30
|
+
/** Temperature (0-1) */
|
|
31
|
+
temperature() { return undefined; }
|
|
32
|
+
/** Max tokens for response */
|
|
33
|
+
maxTokens() { return undefined; }
|
|
34
|
+
/** Run the agent with a prompt (non-streaming) */
|
|
35
|
+
async prompt(input, options) {
|
|
36
|
+
return runAgentLoop(this, input, options);
|
|
37
|
+
}
|
|
38
|
+
/** Run the agent with a prompt (streaming) */
|
|
39
|
+
stream(input, options) {
|
|
40
|
+
return runAgentLoopStreaming(this, input, options);
|
|
41
|
+
}
|
|
42
|
+
/** Queue the prompt for background execution */
|
|
43
|
+
queue(input, options) {
|
|
44
|
+
return new QueuedPromptBuilder(this, input, options);
|
|
45
|
+
}
|
|
46
|
+
/** Set the user scope for conversation persistence */
|
|
47
|
+
forUser(userId) {
|
|
48
|
+
return new ConversableAgent(this).forUser(userId);
|
|
49
|
+
}
|
|
50
|
+
/** Continue an existing conversation */
|
|
51
|
+
continue(conversationId) {
|
|
52
|
+
return new ConversableAgent(this).continue(conversationId);
|
|
53
|
+
}
|
|
54
|
+
}
|
|
55
|
+
// ─── Conversable Agent (conversation persistence) ───────
|
|
56
|
+
/**
|
|
57
|
+
* Wraps an Agent to add conversation memory.
|
|
58
|
+
* Created via `agent.forUser(id)` or `agent.continue(id)`.
|
|
59
|
+
*/
|
|
60
|
+
export class ConversableAgent {
|
|
61
|
+
agent;
|
|
62
|
+
_userId;
|
|
63
|
+
_conversationId;
|
|
64
|
+
constructor(agent) {
|
|
65
|
+
this.agent = agent;
|
|
66
|
+
}
|
|
67
|
+
forUser(userId) {
|
|
68
|
+
this._userId = userId;
|
|
69
|
+
return this;
|
|
70
|
+
}
|
|
71
|
+
continue(conversationId) {
|
|
72
|
+
this._conversationId = conversationId;
|
|
73
|
+
return this;
|
|
74
|
+
}
|
|
75
|
+
async prompt(input, options) {
|
|
76
|
+
const store = resolveConversationStore();
|
|
77
|
+
if (!store)
|
|
78
|
+
throw new Error('[RudderJS AI] No ConversationStore registered. Register one via the DI container with key "ai.conversations".');
|
|
79
|
+
// Load or create conversation
|
|
80
|
+
let history = options?.history ?? [];
|
|
81
|
+
if (this._conversationId) {
|
|
82
|
+
history = [...(await store.load(this._conversationId)), ...history];
|
|
83
|
+
}
|
|
84
|
+
else {
|
|
85
|
+
const meta = this._userId ? { userId: this._userId } : undefined;
|
|
86
|
+
this._conversationId = await store.create(undefined, meta);
|
|
87
|
+
}
|
|
88
|
+
const response = await runAgentLoop(this.agent, input, { ...options, history });
|
|
89
|
+
// Persist messages
|
|
90
|
+
const newMessages = [
|
|
91
|
+
{ role: 'user', content: input },
|
|
92
|
+
...response.steps.flatMap(s => {
|
|
93
|
+
const msgs = [s.message];
|
|
94
|
+
for (const tr of s.toolResults) {
|
|
95
|
+
const resultStr = typeof tr.result === 'string' ? tr.result : JSON.stringify(tr.result);
|
|
96
|
+
msgs.push({ role: 'tool', content: resultStr, toolCallId: tr.toolCallId });
|
|
97
|
+
}
|
|
98
|
+
return msgs;
|
|
99
|
+
}),
|
|
100
|
+
];
|
|
101
|
+
await store.append(this._conversationId, newMessages);
|
|
102
|
+
return { text: response.text, steps: response.steps, usage: response.usage, conversationId: this._conversationId };
|
|
103
|
+
}
|
|
104
|
+
stream(input, options) {
|
|
105
|
+
const store = resolveConversationStore();
|
|
106
|
+
if (!store)
|
|
107
|
+
throw new Error('[RudderJS AI] No ConversationStore registered. Register one via the DI container with key "ai.conversations".');
|
|
108
|
+
// We need to handle async setup, so wrap the streaming
|
|
109
|
+
let resolveReady;
|
|
110
|
+
const ready = new Promise(r => { resolveReady = r; });
|
|
111
|
+
let loadedHistory = [];
|
|
112
|
+
let convId = this._conversationId;
|
|
113
|
+
// Kick off async setup
|
|
114
|
+
const setupPromise = (async () => {
|
|
115
|
+
if (convId) {
|
|
116
|
+
loadedHistory = await store.load(convId);
|
|
117
|
+
}
|
|
118
|
+
else {
|
|
119
|
+
const meta = this._userId ? { userId: this._userId } : undefined;
|
|
120
|
+
convId = await store.create(undefined, meta);
|
|
121
|
+
this._conversationId = convId;
|
|
122
|
+
}
|
|
123
|
+
resolveReady();
|
|
124
|
+
})();
|
|
125
|
+
let resolveResponse;
|
|
126
|
+
const responsePromise = new Promise(r => { resolveResponse = r; });
|
|
127
|
+
const self = this; // eslint-disable-line @typescript-eslint/no-this-alias
|
|
128
|
+
const storeRef = store;
|
|
129
|
+
async function* generateStream() {
|
|
130
|
+
await setupPromise;
|
|
131
|
+
const history = [...loadedHistory, ...(options?.history ?? [])];
|
|
132
|
+
const inner = runAgentLoopStreaming(self.agent, input, { ...options, history });
|
|
133
|
+
for await (const chunk of inner.stream) {
|
|
134
|
+
yield chunk;
|
|
135
|
+
}
|
|
136
|
+
const response = await inner.response;
|
|
137
|
+
// Persist messages
|
|
138
|
+
const newMessages = [
|
|
139
|
+
{ role: 'user', content: input },
|
|
140
|
+
...response.steps.flatMap(s => {
|
|
141
|
+
const msgs = [s.message];
|
|
142
|
+
for (const tr of s.toolResults) {
|
|
143
|
+
const resultStr = typeof tr.result === 'string' ? tr.result : JSON.stringify(tr.result);
|
|
144
|
+
msgs.push({ role: 'tool', content: resultStr, toolCallId: tr.toolCallId });
|
|
145
|
+
}
|
|
146
|
+
return msgs;
|
|
147
|
+
}),
|
|
148
|
+
];
|
|
149
|
+
await storeRef.append(convId, newMessages);
|
|
150
|
+
const result = { text: response.text, steps: response.steps, usage: response.usage, conversationId: convId };
|
|
151
|
+
resolveResponse(result);
|
|
152
|
+
}
|
|
153
|
+
return { stream: generateStream(), response: responsePromise };
|
|
154
|
+
}
|
|
155
|
+
}
|
|
156
|
+
// ─── Anonymous Agent ─────────────────────────────────────
|
|
157
|
+
class AnonymousAgent extends Agent {
|
|
158
|
+
_instructions;
|
|
159
|
+
_tools;
|
|
160
|
+
_model;
|
|
161
|
+
_middleware;
|
|
162
|
+
constructor(options) {
|
|
163
|
+
super();
|
|
164
|
+
this._instructions = options.instructions;
|
|
165
|
+
this._tools = options.tools ?? [];
|
|
166
|
+
this._model = options.model;
|
|
167
|
+
this._middleware = options.middleware ?? [];
|
|
168
|
+
}
|
|
169
|
+
instructions() { return this._instructions; }
|
|
170
|
+
model() { return this._model; }
|
|
171
|
+
tools() { return this._tools; }
|
|
172
|
+
middleware() { return this._middleware; }
|
|
173
|
+
}
|
|
174
|
+
/**
|
|
175
|
+
* Create an anonymous agent inline.
|
|
176
|
+
*
|
|
177
|
+
* @example
|
|
178
|
+
* const response = await agent('You are helpful.').prompt('Hello')
|
|
179
|
+
*
|
|
180
|
+
* @example
|
|
181
|
+
* const response = await agent({
|
|
182
|
+
* instructions: 'You are a search assistant.',
|
|
183
|
+
* tools: [searchTool],
|
|
184
|
+
* model: 'anthropic/claude-sonnet-4-5',
|
|
185
|
+
* }).prompt('Find users named John')
|
|
186
|
+
*/
|
|
187
|
+
export function agent(instructionsOrOptions) {
|
|
188
|
+
const options = typeof instructionsOrOptions === 'string'
|
|
189
|
+
? { instructions: instructionsOrOptions }
|
|
190
|
+
: instructionsOrOptions;
|
|
191
|
+
return new AnonymousAgent(options);
|
|
192
|
+
}
|
|
193
|
+
// ─── Helpers ─────────────────────────────────────────────
|
|
194
|
+
// ─── Conversation Store Registry ────────────────────────
|
|
195
|
+
let _conversationStore;
|
|
196
|
+
/** Set the global conversation store (called by service provider or manually) */
|
|
197
|
+
export function setConversationStore(store) {
|
|
198
|
+
_conversationStore = store;
|
|
199
|
+
}
|
|
200
|
+
function resolveConversationStore() {
|
|
201
|
+
return _conversationStore;
|
|
202
|
+
}
|
|
203
|
+
// ─── Helpers ─────────────────────────────────────────────
|
|
204
|
+
function getTools(a) {
|
|
205
|
+
return 'tools' in a && typeof a.tools === 'function'
|
|
206
|
+
? a.tools()
|
|
207
|
+
: [];
|
|
208
|
+
}
|
|
209
|
+
function getMiddleware(a) {
|
|
210
|
+
return 'middleware' in a && typeof a.middleware === 'function'
|
|
211
|
+
? a.middleware()
|
|
212
|
+
: [];
|
|
213
|
+
}
|
|
214
|
+
function createMiddlewareContext(messages, model, tools, iteration) {
|
|
215
|
+
const [provider] = AiRegistry.parseModelString(model);
|
|
216
|
+
let aborted = false;
|
|
217
|
+
let abortReason = '';
|
|
218
|
+
return {
|
|
219
|
+
requestId: typeof crypto !== 'undefined' && crypto.randomUUID ? crypto.randomUUID() : `req-${Date.now()}`,
|
|
220
|
+
iteration,
|
|
221
|
+
chunkIndex: 0,
|
|
222
|
+
messages,
|
|
223
|
+
model,
|
|
224
|
+
provider,
|
|
225
|
+
toolNames: tools.map(t => t.definition.name),
|
|
226
|
+
abort(reason) {
|
|
227
|
+
aborted = true;
|
|
228
|
+
abortReason = reason ?? 'Aborted by middleware';
|
|
229
|
+
},
|
|
230
|
+
get _aborted() { return aborted; },
|
|
231
|
+
get _abortReason() { return abortReason; },
|
|
232
|
+
};
|
|
233
|
+
}
|
|
234
|
+
function buildUserMessage(input, attachments) {
|
|
235
|
+
if (!attachments?.length)
|
|
236
|
+
return { role: 'user', content: input };
|
|
237
|
+
const parts = [
|
|
238
|
+
{ type: 'text', text: input },
|
|
239
|
+
...attachmentsToContentParts(attachments),
|
|
240
|
+
];
|
|
241
|
+
return { role: 'user', content: parts };
|
|
242
|
+
}
|
|
243
|
+
function buildToolSchemas(tools) {
|
|
244
|
+
return tools.filter(t => !t.definition.lazy).map(toolToSchema);
|
|
245
|
+
}
|
|
246
|
+
function buildToolMap(tools) {
|
|
247
|
+
const map = new Map();
|
|
248
|
+
for (const t of tools)
|
|
249
|
+
map.set(t.definition.name, t);
|
|
250
|
+
return map;
|
|
251
|
+
}
|
|
252
|
+
function addUsage(total, step) {
|
|
253
|
+
total.promptTokens += step.promptTokens;
|
|
254
|
+
total.completionTokens += step.completionTokens;
|
|
255
|
+
total.totalTokens += step.totalTokens;
|
|
256
|
+
}
|
|
257
|
+
function buildMiddlewareConfig(messages, a) {
|
|
258
|
+
const config = { messages };
|
|
259
|
+
const temp = a.temperature();
|
|
260
|
+
const maxTok = a.maxTokens();
|
|
261
|
+
if (temp !== undefined)
|
|
262
|
+
config.temperature = temp;
|
|
263
|
+
if (maxTok !== undefined)
|
|
264
|
+
config.maxTokens = maxTok;
|
|
265
|
+
return config;
|
|
266
|
+
}
|
|
267
|
+
// ─── Agent Loop (non-streaming) ──────────────────────────
|
|
268
|
+
async function runAgentLoop(a, input, options) {
|
|
269
|
+
const modelString = a.model() ?? AiRegistry.getDefault();
|
|
270
|
+
const tools = getTools(a);
|
|
271
|
+
const middlewares = getMiddleware(a);
|
|
272
|
+
const toolSchemas = buildToolSchemas(tools);
|
|
273
|
+
const toolMap = buildToolMap(tools);
|
|
274
|
+
const messages = options?.messages
|
|
275
|
+
? [{ role: 'system', content: a.instructions() }, ...options.messages]
|
|
276
|
+
: [
|
|
277
|
+
{ role: 'system', content: a.instructions() },
|
|
278
|
+
...(options?.history ?? []),
|
|
279
|
+
buildUserMessage(input, options?.attachments),
|
|
280
|
+
];
|
|
281
|
+
const steps = [];
|
|
282
|
+
const stopConditions = normalizeStopConditions(a.stopWhen());
|
|
283
|
+
const totalUsage = { promptTokens: 0, completionTokens: 0, totalTokens: 0 };
|
|
284
|
+
// State for client-tool-stopping and approval-stopping
|
|
285
|
+
const pendingClientToolCalls = [];
|
|
286
|
+
let pendingApprovalToolCall;
|
|
287
|
+
let loopFinishReason;
|
|
288
|
+
let stopForClientTools = false;
|
|
289
|
+
let stopForApproval = false;
|
|
290
|
+
let resumedToolMessages = []; // eslint-disable-line no-useless-assignment
|
|
291
|
+
// Resume server tools left pending by a previous approval round-trip.
|
|
292
|
+
// (Must run before middleware context creation since `messages` may grow.)
|
|
293
|
+
{
|
|
294
|
+
const resume = await resumePendingToolCalls({ messages, toolMap, options });
|
|
295
|
+
resumedToolMessages = resume.resumed;
|
|
296
|
+
if (resume.approvalStillRequired) {
|
|
297
|
+
pendingApprovalToolCall = resume.approvalStillRequired;
|
|
298
|
+
loopFinishReason = 'tool_approval_required';
|
|
299
|
+
stopForApproval = true;
|
|
300
|
+
}
|
|
301
|
+
}
|
|
302
|
+
// Create middleware context
|
|
303
|
+
const ctx = createMiddlewareContext(messages, modelString, tools, 0);
|
|
304
|
+
// onConfig — init phase
|
|
305
|
+
if (middlewares.length > 0) {
|
|
306
|
+
const configResult = runOnConfig(middlewares, ctx, buildMiddlewareConfig(messages, a), 'init');
|
|
307
|
+
if (configResult.messages)
|
|
308
|
+
messages.splice(0, messages.length, ...configResult.messages);
|
|
309
|
+
}
|
|
310
|
+
// onStart
|
|
311
|
+
if (middlewares.length > 0)
|
|
312
|
+
await runSequential(middlewares, 'onStart', ctx);
|
|
313
|
+
try {
|
|
314
|
+
if (stopForApproval) {
|
|
315
|
+
// Approval is still required from the resume — skip the model loop.
|
|
316
|
+
}
|
|
317
|
+
else {
|
|
318
|
+
for (let iteration = 0; iteration < a.maxSteps(); iteration++) {
|
|
319
|
+
ctx.iteration = iteration;
|
|
320
|
+
// Check if middleware aborted
|
|
321
|
+
if (ctx._aborted) {
|
|
322
|
+
await runOnAbort(middlewares, ctx, ctx._abortReason);
|
|
323
|
+
break;
|
|
324
|
+
}
|
|
325
|
+
// onIteration
|
|
326
|
+
if (middlewares.length > 0)
|
|
327
|
+
await runSequential(middlewares, 'onIteration', ctx);
|
|
328
|
+
let currentModel = modelString;
|
|
329
|
+
const currentToolSchemas = toolSchemas;
|
|
330
|
+
// prepareStep hook
|
|
331
|
+
if (a.prepareStep) {
|
|
332
|
+
const prep = await a.prepareStep({ stepNumber: iteration, steps, messages });
|
|
333
|
+
if (prep.model)
|
|
334
|
+
currentModel = prep.model;
|
|
335
|
+
if (prep.messages)
|
|
336
|
+
messages.splice(0, messages.length, ...prep.messages);
|
|
337
|
+
if (prep.system)
|
|
338
|
+
messages[0] = { role: 'system', content: prep.system };
|
|
339
|
+
}
|
|
340
|
+
// onConfig — beforeModel phase
|
|
341
|
+
if (middlewares.length > 0) {
|
|
342
|
+
const configResult = runOnConfig(middlewares, ctx, buildMiddlewareConfig(messages, a), 'beforeModel');
|
|
343
|
+
if (configResult.messages)
|
|
344
|
+
messages.splice(0, messages.length, ...configResult.messages);
|
|
345
|
+
}
|
|
346
|
+
const failoverModels = [currentModel, ...a.failover().filter(m => m !== currentModel)];
|
|
347
|
+
let response;
|
|
348
|
+
let lastError;
|
|
349
|
+
for (const tryModel of failoverModels) {
|
|
350
|
+
try {
|
|
351
|
+
const adapter = AiRegistry.resolve(tryModel);
|
|
352
|
+
const [, modelId] = AiRegistry.parseModelString(tryModel);
|
|
353
|
+
const reqOptions = {
|
|
354
|
+
model: modelId,
|
|
355
|
+
messages,
|
|
356
|
+
tools: currentToolSchemas.length > 0 ? currentToolSchemas : undefined,
|
|
357
|
+
temperature: a.temperature(),
|
|
358
|
+
maxTokens: a.maxTokens(),
|
|
359
|
+
};
|
|
360
|
+
response = await adapter.generate(reqOptions);
|
|
361
|
+
break;
|
|
362
|
+
}
|
|
363
|
+
catch (err) {
|
|
364
|
+
lastError = err instanceof Error ? err : new Error(String(err));
|
|
365
|
+
if (tryModel === failoverModels[failoverModels.length - 1])
|
|
366
|
+
throw lastError;
|
|
367
|
+
}
|
|
368
|
+
}
|
|
369
|
+
if (!response)
|
|
370
|
+
throw lastError ?? new Error('No provider available');
|
|
371
|
+
addUsage(totalUsage, response.usage);
|
|
372
|
+
// onUsage
|
|
373
|
+
if (middlewares.length > 0)
|
|
374
|
+
await runOnUsage(middlewares, ctx, response.usage);
|
|
375
|
+
const toolCalls = response.message.toolCalls ?? [];
|
|
376
|
+
const toolResults = [];
|
|
377
|
+
if (toolCalls.length > 0) {
|
|
378
|
+
messages.push(response.message);
|
|
379
|
+
for (const tc of toolCalls) {
|
|
380
|
+
const tool = toolMap.get(tc.name);
|
|
381
|
+
if (!tool) {
|
|
382
|
+
toolResults.push({ toolCallId: tc.id, result: `Error: Unknown tool "${tc.name}"` });
|
|
383
|
+
messages.push({ role: 'tool', content: `Error: Unknown tool "${tc.name}"`, toolCallId: tc.id });
|
|
384
|
+
continue;
|
|
385
|
+
}
|
|
386
|
+
if (!tool.execute) {
|
|
387
|
+
// Client tool — no server-side handler.
|
|
388
|
+
if (options?.toolCallStreamingMode === 'stop-on-client-tool') {
|
|
389
|
+
pendingClientToolCalls.push(tc);
|
|
390
|
+
loopFinishReason = 'client_tool_calls';
|
|
391
|
+
stopForClientTools = true;
|
|
392
|
+
continue;
|
|
393
|
+
}
|
|
394
|
+
toolResults.push({ toolCallId: tc.id, result: '[client tool — execute on client]' });
|
|
395
|
+
messages.push({ role: 'tool', content: '[client tool — execute on client]', toolCallId: tc.id });
|
|
396
|
+
continue;
|
|
397
|
+
}
|
|
398
|
+
// needsApproval enforcement
|
|
399
|
+
const approvalDecision = await evaluateApproval(tool, tc, options);
|
|
400
|
+
if (approvalDecision === 'rejected') {
|
|
401
|
+
const rejectionResult = { rejected: true, reason: 'User rejected this tool call' };
|
|
402
|
+
toolResults.push({ toolCallId: tc.id, result: rejectionResult });
|
|
403
|
+
messages.push({ role: 'tool', content: JSON.stringify(rejectionResult), toolCallId: tc.id });
|
|
404
|
+
continue;
|
|
405
|
+
}
|
|
406
|
+
if (approvalDecision === 'pending') {
|
|
407
|
+
pendingApprovalToolCall = { toolCall: tc, isClientTool: false };
|
|
408
|
+
loopFinishReason = 'tool_approval_required';
|
|
409
|
+
stopForApproval = true;
|
|
410
|
+
break;
|
|
411
|
+
}
|
|
412
|
+
// onBeforeToolCall
|
|
413
|
+
let toolArgs = tc.arguments;
|
|
414
|
+
if (middlewares.length > 0) {
|
|
415
|
+
const beforeResult = await runOnBeforeToolCall(middlewares, ctx, tc.name, toolArgs);
|
|
416
|
+
if (beforeResult) {
|
|
417
|
+
if (beforeResult.type === 'skip') {
|
|
418
|
+
const resultStr = typeof beforeResult.result === 'string' ? beforeResult.result : JSON.stringify(beforeResult.result);
|
|
419
|
+
toolResults.push({ toolCallId: tc.id, result: beforeResult.result });
|
|
420
|
+
messages.push({ role: 'tool', content: resultStr, toolCallId: tc.id });
|
|
421
|
+
await runOnAfterToolCall(middlewares, ctx, tc.name, toolArgs, beforeResult.result);
|
|
422
|
+
continue;
|
|
423
|
+
}
|
|
424
|
+
if (beforeResult.type === 'abort') {
|
|
425
|
+
await runOnAbort(middlewares, ctx, beforeResult.reason);
|
|
426
|
+
break;
|
|
427
|
+
}
|
|
428
|
+
if (beforeResult.type === 'transformArgs') {
|
|
429
|
+
toolArgs = beforeResult.args;
|
|
430
|
+
}
|
|
431
|
+
}
|
|
432
|
+
}
|
|
433
|
+
try {
|
|
434
|
+
// Drain generator yields silently in the non-streaming loop —
|
|
435
|
+
// the same tool definition must work in both prompt() and stream().
|
|
436
|
+
// Exception: a `pause_for_client_tools` control chunk yield
|
|
437
|
+
// halts iteration, propagates the nested calls to the parent's
|
|
438
|
+
// pending list, and skips tool_result recording (see tool.ts
|
|
439
|
+
// `pauseForClientTools` for rationale).
|
|
440
|
+
const execGen = executeMaybeStreaming(tool, toolArgs, { toolCallId: tc.id });
|
|
441
|
+
let result;
|
|
442
|
+
let paused = false;
|
|
443
|
+
while (true) {
|
|
444
|
+
const step = await execGen.next();
|
|
445
|
+
if (step.done) {
|
|
446
|
+
result = step.value;
|
|
447
|
+
break;
|
|
448
|
+
}
|
|
449
|
+
if (isPauseForClientToolsChunk(step.value)) {
|
|
450
|
+
for (const pending of step.value.toolCalls) {
|
|
451
|
+
pendingClientToolCalls.push(pending);
|
|
452
|
+
}
|
|
453
|
+
loopFinishReason = 'client_tool_calls';
|
|
454
|
+
stopForClientTools = true;
|
|
455
|
+
paused = true;
|
|
456
|
+
break;
|
|
457
|
+
}
|
|
458
|
+
// Plain tool-update yields are silently dropped in the
|
|
459
|
+
// non-streaming loop — only the final return value matters.
|
|
460
|
+
}
|
|
461
|
+
if (paused)
|
|
462
|
+
continue; // skip toolResults + message push for this tc
|
|
463
|
+
// toolResults preserves the ORIGINAL value; only the tool message
|
|
464
|
+
// pushed onto `messages` (what the next model step sees) is
|
|
465
|
+
// narrowed by toModelOutput.
|
|
466
|
+
toolResults.push({ toolCallId: tc.id, result });
|
|
467
|
+
const resultStr = await applyToModelOutput(tool, result, middlewares.length > 0 ? (e) => runOnError(middlewares, ctx, e) : undefined);
|
|
468
|
+
messages.push({ role: 'tool', content: resultStr, toolCallId: tc.id });
|
|
469
|
+
// onAfterToolCall
|
|
470
|
+
if (middlewares.length > 0)
|
|
471
|
+
await runOnAfterToolCall(middlewares, ctx, tc.name, toolArgs, result);
|
|
472
|
+
}
|
|
473
|
+
catch (err) {
|
|
474
|
+
const msg = err instanceof Error ? err.message : String(err);
|
|
475
|
+
toolResults.push({ toolCallId: tc.id, result: `Error: ${msg}` });
|
|
476
|
+
messages.push({ role: 'tool', content: `Error: ${msg}`, toolCallId: tc.id });
|
|
477
|
+
// onAfterToolCall (error case)
|
|
478
|
+
if (middlewares.length > 0)
|
|
479
|
+
await runOnAfterToolCall(middlewares, ctx, tc.name, toolArgs, `Error: ${msg}`);
|
|
480
|
+
}
|
|
481
|
+
}
|
|
482
|
+
// onToolPhaseComplete
|
|
483
|
+
if (middlewares.length > 0)
|
|
484
|
+
await runSequential(middlewares, 'onToolPhaseComplete', ctx);
|
|
485
|
+
}
|
|
486
|
+
else {
|
|
487
|
+
messages.push(response.message);
|
|
488
|
+
}
|
|
489
|
+
const step = {
|
|
490
|
+
message: response.message,
|
|
491
|
+
toolCalls,
|
|
492
|
+
toolResults,
|
|
493
|
+
usage: response.usage,
|
|
494
|
+
finishReason: response.finishReason,
|
|
495
|
+
};
|
|
496
|
+
steps.push(step);
|
|
497
|
+
if (stopForClientTools || stopForApproval)
|
|
498
|
+
break;
|
|
499
|
+
const shouldStop = stopConditions.some(cond => cond({ steps, iteration, lastMessage: response.message }));
|
|
500
|
+
if (shouldStop || response.finishReason !== 'tool_calls') {
|
|
501
|
+
break;
|
|
502
|
+
}
|
|
503
|
+
}
|
|
504
|
+
} // close `else` (skip-loop-when-resume-needs-approval)
|
|
505
|
+
}
|
|
506
|
+
catch (err) {
|
|
507
|
+
// onError
|
|
508
|
+
if (middlewares.length > 0)
|
|
509
|
+
await runOnError(middlewares, ctx, err);
|
|
510
|
+
throw err;
|
|
511
|
+
}
|
|
512
|
+
// onFinish
|
|
513
|
+
if (middlewares.length > 0)
|
|
514
|
+
await runSequential(middlewares, 'onFinish', ctx);
|
|
515
|
+
const lastStep = steps[steps.length - 1];
|
|
516
|
+
const result = {
|
|
517
|
+
text: lastStep ? getMessageText(lastStep.message.content) : '',
|
|
518
|
+
steps,
|
|
519
|
+
usage: totalUsage,
|
|
520
|
+
};
|
|
521
|
+
if (loopFinishReason)
|
|
522
|
+
result.finishReason = loopFinishReason;
|
|
523
|
+
if (pendingClientToolCalls.length > 0)
|
|
524
|
+
result.pendingClientToolCalls = pendingClientToolCalls;
|
|
525
|
+
if (pendingApprovalToolCall)
|
|
526
|
+
result.pendingApprovalToolCall = pendingApprovalToolCall;
|
|
527
|
+
if (resumedToolMessages.length > 0)
|
|
528
|
+
result.resumedToolMessages = resumedToolMessages;
|
|
529
|
+
return result;
|
|
530
|
+
}
|
|
531
|
+
// ─── Agent Loop (streaming) ──────────────────────────────
|
|
532
|
+
function runAgentLoopStreaming(a, input, options) {
|
|
533
|
+
let resolveResponse;
|
|
534
|
+
const responsePromise = new Promise((resolve) => { resolveResponse = resolve; });
|
|
535
|
+
async function* generateStream() {
|
|
536
|
+
const modelString = a.model() ?? AiRegistry.getDefault();
|
|
537
|
+
const tools = getTools(a);
|
|
538
|
+
const middlewares = getMiddleware(a);
|
|
539
|
+
const toolSchemas = buildToolSchemas(tools);
|
|
540
|
+
const toolMap = buildToolMap(tools);
|
|
541
|
+
const messages = options?.messages
|
|
542
|
+
? [{ role: 'system', content: a.instructions() }, ...options.messages]
|
|
543
|
+
: [
|
|
544
|
+
{ role: 'system', content: a.instructions() },
|
|
545
|
+
...(options?.history ?? []),
|
|
546
|
+
buildUserMessage(input, options?.attachments),
|
|
547
|
+
];
|
|
548
|
+
const steps = [];
|
|
549
|
+
const stopConditions = normalizeStopConditions(a.stopWhen());
|
|
550
|
+
const totalUsage = { promptTokens: 0, completionTokens: 0, totalTokens: 0 };
|
|
551
|
+
// State for client-tool-stopping and approval-stopping
|
|
552
|
+
const pendingClientToolCalls = [];
|
|
553
|
+
let pendingApprovalToolCall;
|
|
554
|
+
let loopFinishReason;
|
|
555
|
+
let stopForClientTools = false;
|
|
556
|
+
let stopForApproval = false;
|
|
557
|
+
let resumedToolMessages = []; // eslint-disable-line no-useless-assignment
|
|
558
|
+
// Resume server tools left pending by a previous approval round-trip.
|
|
559
|
+
{
|
|
560
|
+
const resume = await resumePendingToolCalls({ messages, toolMap, options });
|
|
561
|
+
resumedToolMessages = resume.resumed;
|
|
562
|
+
if (resume.approvalStillRequired) {
|
|
563
|
+
pendingApprovalToolCall = resume.approvalStillRequired;
|
|
564
|
+
loopFinishReason = 'tool_approval_required';
|
|
565
|
+
stopForApproval = true;
|
|
566
|
+
}
|
|
567
|
+
}
|
|
568
|
+
// Create middleware context
|
|
569
|
+
const ctx = createMiddlewareContext(messages, modelString, tools, 0);
|
|
570
|
+
// onConfig — init phase
|
|
571
|
+
if (middlewares.length > 0) {
|
|
572
|
+
const configResult = runOnConfig(middlewares, ctx, buildMiddlewareConfig(messages, a), 'init');
|
|
573
|
+
if (configResult.messages)
|
|
574
|
+
messages.splice(0, messages.length, ...configResult.messages);
|
|
575
|
+
}
|
|
576
|
+
// onStart
|
|
577
|
+
if (middlewares.length > 0)
|
|
578
|
+
await runSequential(middlewares, 'onStart', ctx);
|
|
579
|
+
try {
|
|
580
|
+
if (stopForApproval) {
|
|
581
|
+
// Resume detected unfulfilled approval — skip the model loop entirely.
|
|
582
|
+
}
|
|
583
|
+
else {
|
|
584
|
+
for (let iteration = 0; iteration < a.maxSteps(); iteration++) {
|
|
585
|
+
ctx.iteration = iteration;
|
|
586
|
+
ctx.chunkIndex = 0;
|
|
587
|
+
// Check if middleware aborted
|
|
588
|
+
if (ctx._aborted) {
|
|
589
|
+
await runOnAbort(middlewares, ctx, ctx._abortReason);
|
|
590
|
+
break;
|
|
591
|
+
}
|
|
592
|
+
// onIteration
|
|
593
|
+
if (middlewares.length > 0)
|
|
594
|
+
await runSequential(middlewares, 'onIteration', ctx);
|
|
595
|
+
let currentModel = modelString;
|
|
596
|
+
if (a.prepareStep) {
|
|
597
|
+
const prep = await a.prepareStep({ stepNumber: iteration, steps, messages });
|
|
598
|
+
if (prep.model)
|
|
599
|
+
currentModel = prep.model;
|
|
600
|
+
if (prep.messages)
|
|
601
|
+
messages.splice(0, messages.length, ...prep.messages);
|
|
602
|
+
if (prep.system)
|
|
603
|
+
messages[0] = { role: 'system', content: prep.system };
|
|
604
|
+
}
|
|
605
|
+
// onConfig — beforeModel phase
|
|
606
|
+
if (middlewares.length > 0) {
|
|
607
|
+
const configResult = runOnConfig(middlewares, ctx, buildMiddlewareConfig(messages, a), 'beforeModel');
|
|
608
|
+
if (configResult.messages)
|
|
609
|
+
messages.splice(0, messages.length, ...configResult.messages);
|
|
610
|
+
}
|
|
611
|
+
const failoverModels = [currentModel, ...a.failover().filter(m => m !== currentModel)];
|
|
612
|
+
let streamSource;
|
|
613
|
+
let lastError;
|
|
614
|
+
for (const tryModel of failoverModels) {
|
|
615
|
+
try {
|
|
616
|
+
const adapter = AiRegistry.resolve(tryModel);
|
|
617
|
+
const [, modelId] = AiRegistry.parseModelString(tryModel);
|
|
618
|
+
const opts = {
|
|
619
|
+
model: modelId,
|
|
620
|
+
messages,
|
|
621
|
+
tools: toolSchemas.length > 0 ? toolSchemas : undefined,
|
|
622
|
+
temperature: a.temperature(),
|
|
623
|
+
maxTokens: a.maxTokens(),
|
|
624
|
+
};
|
|
625
|
+
streamSource = adapter.stream(opts);
|
|
626
|
+
break;
|
|
627
|
+
}
|
|
628
|
+
catch (err) {
|
|
629
|
+
lastError = err instanceof Error ? err : new Error(String(err));
|
|
630
|
+
if (tryModel === failoverModels[failoverModels.length - 1])
|
|
631
|
+
throw lastError;
|
|
632
|
+
}
|
|
633
|
+
}
|
|
634
|
+
if (!streamSource)
|
|
635
|
+
throw lastError ?? new Error('No provider available');
|
|
636
|
+
let text = '';
|
|
637
|
+
let currentToolCalls = [];
|
|
638
|
+
let stepUsage = { promptTokens: 0, completionTokens: 0, totalTokens: 0 };
|
|
639
|
+
let finishReason = 'stop';
|
|
640
|
+
const partialToolCalls = new Map();
|
|
641
|
+
for await (const chunk of streamSource) {
|
|
642
|
+
// onChunk — middleware can transform or drop chunks
|
|
643
|
+
let processedChunk = chunk;
|
|
644
|
+
if (middlewares.length > 0) {
|
|
645
|
+
processedChunk = runOnChunk(middlewares, ctx, chunk);
|
|
646
|
+
ctx.chunkIndex++;
|
|
647
|
+
}
|
|
648
|
+
if (processedChunk)
|
|
649
|
+
yield processedChunk;
|
|
650
|
+
// Always process the original chunk for state tracking
|
|
651
|
+
if (chunk.type === 'text-delta' && chunk.text) {
|
|
652
|
+
text += chunk.text;
|
|
653
|
+
}
|
|
654
|
+
else if (chunk.type === 'tool-call-delta' && chunk.toolCall?.id) {
|
|
655
|
+
partialToolCalls.set(chunk.toolCall.id, {
|
|
656
|
+
id: chunk.toolCall.id,
|
|
657
|
+
name: chunk.toolCall.name ?? '',
|
|
658
|
+
argChunks: [],
|
|
659
|
+
});
|
|
660
|
+
}
|
|
661
|
+
else if (chunk.type === 'tool-call-delta' && chunk.text) {
|
|
662
|
+
// Accumulate argument JSON chunks to the last partial tool call
|
|
663
|
+
const last = Array.from(partialToolCalls.values()).pop();
|
|
664
|
+
if (last)
|
|
665
|
+
last.argChunks.push(chunk.text);
|
|
666
|
+
}
|
|
667
|
+
else if (chunk.type === 'tool-call' && chunk.toolCall) {
|
|
668
|
+
const tc = chunk.toolCall;
|
|
669
|
+
currentToolCalls.push(tc);
|
|
670
|
+
}
|
|
671
|
+
else if (chunk.type === 'usage' && chunk.usage) {
|
|
672
|
+
stepUsage = chunk.usage;
|
|
673
|
+
}
|
|
674
|
+
else if (chunk.type === 'finish') {
|
|
675
|
+
if (chunk.usage)
|
|
676
|
+
stepUsage = chunk.usage;
|
|
677
|
+
finishReason = chunk.finishReason ?? 'stop';
|
|
678
|
+
}
|
|
679
|
+
}
|
|
680
|
+
// Finalize partial tool calls
|
|
681
|
+
for (const [, partial] of partialToolCalls) {
|
|
682
|
+
try {
|
|
683
|
+
const args = JSON.parse(partial.argChunks.join(''));
|
|
684
|
+
currentToolCalls.push({ id: partial.id, name: partial.name, arguments: args });
|
|
685
|
+
}
|
|
686
|
+
catch {
|
|
687
|
+
currentToolCalls.push({ id: partial.id, name: partial.name, arguments: {} });
|
|
688
|
+
}
|
|
689
|
+
}
|
|
690
|
+
addUsage(totalUsage, stepUsage);
|
|
691
|
+
// onUsage
|
|
692
|
+
if (middlewares.length > 0)
|
|
693
|
+
await runOnUsage(middlewares, ctx, stepUsage);
|
|
694
|
+
const toolResults = [];
|
|
695
|
+
if (currentToolCalls.length > 0) {
|
|
696
|
+
const assistantMsg = { role: 'assistant', content: text, toolCalls: currentToolCalls };
|
|
697
|
+
messages.push(assistantMsg);
|
|
698
|
+
for (const tc of currentToolCalls) {
|
|
699
|
+
const tool = toolMap.get(tc.name);
|
|
700
|
+
if (!tool) {
|
|
701
|
+
const unknownResult = `Error: Unknown tool "${tc.name}"`;
|
|
702
|
+
toolResults.push({ toolCallId: tc.id, result: unknownResult });
|
|
703
|
+
messages.push({ role: 'tool', content: unknownResult, toolCallId: tc.id });
|
|
704
|
+
yield { type: 'tool-result', toolCall: tc, result: unknownResult };
|
|
705
|
+
continue;
|
|
706
|
+
}
|
|
707
|
+
if (!tool.execute) {
|
|
708
|
+
// Client tool — no server-side handler.
|
|
709
|
+
if (options?.toolCallStreamingMode === 'stop-on-client-tool') {
|
|
710
|
+
pendingClientToolCalls.push(tc);
|
|
711
|
+
loopFinishReason = 'client_tool_calls';
|
|
712
|
+
stopForClientTools = true;
|
|
713
|
+
yield { type: 'tool-call', toolCall: tc };
|
|
714
|
+
continue;
|
|
715
|
+
}
|
|
716
|
+
const placeholder = '[client tool — execute on client]';
|
|
717
|
+
toolResults.push({ toolCallId: tc.id, result: placeholder });
|
|
718
|
+
messages.push({ role: 'tool', content: placeholder, toolCallId: tc.id });
|
|
719
|
+
yield { type: 'tool-call', toolCall: tc };
|
|
720
|
+
yield { type: 'tool-result', toolCall: tc, result: placeholder };
|
|
721
|
+
continue;
|
|
722
|
+
}
|
|
723
|
+
// needsApproval enforcement
|
|
724
|
+
const approvalDecision = await evaluateApproval(tool, tc, options);
|
|
725
|
+
if (approvalDecision === 'rejected') {
|
|
726
|
+
const rejectionResult = { rejected: true, reason: 'User rejected this tool call' };
|
|
727
|
+
toolResults.push({ toolCallId: tc.id, result: rejectionResult });
|
|
728
|
+
messages.push({ role: 'tool', content: JSON.stringify(rejectionResult), toolCallId: tc.id });
|
|
729
|
+
yield { type: 'tool-result', toolCall: tc, result: rejectionResult };
|
|
730
|
+
continue;
|
|
731
|
+
}
|
|
732
|
+
if (approvalDecision === 'pending') {
|
|
733
|
+
pendingApprovalToolCall = { toolCall: tc, isClientTool: false };
|
|
734
|
+
loopFinishReason = 'tool_approval_required';
|
|
735
|
+
stopForApproval = true;
|
|
736
|
+
yield { type: 'tool-call', toolCall: tc };
|
|
737
|
+
break;
|
|
738
|
+
}
|
|
739
|
+
// onBeforeToolCall
|
|
740
|
+
let toolArgs = tc.arguments;
|
|
741
|
+
if (middlewares.length > 0) {
|
|
742
|
+
const beforeResult = await runOnBeforeToolCall(middlewares, ctx, tc.name, toolArgs);
|
|
743
|
+
if (beforeResult) {
|
|
744
|
+
if (beforeResult.type === 'skip') {
|
|
745
|
+
const resultStr = typeof beforeResult.result === 'string' ? beforeResult.result : JSON.stringify(beforeResult.result);
|
|
746
|
+
toolResults.push({ toolCallId: tc.id, result: beforeResult.result });
|
|
747
|
+
messages.push({ role: 'tool', content: resultStr, toolCallId: tc.id });
|
|
748
|
+
yield { type: 'tool-result', toolCall: tc, result: beforeResult.result };
|
|
749
|
+
await runOnAfterToolCall(middlewares, ctx, tc.name, toolArgs, beforeResult.result);
|
|
750
|
+
continue;
|
|
751
|
+
}
|
|
752
|
+
if (beforeResult.type === 'abort') {
|
|
753
|
+
await runOnAbort(middlewares, ctx, beforeResult.reason);
|
|
754
|
+
break;
|
|
755
|
+
}
|
|
756
|
+
if (beforeResult.type === 'transformArgs') {
|
|
757
|
+
toolArgs = beforeResult.args;
|
|
758
|
+
}
|
|
759
|
+
}
|
|
760
|
+
}
|
|
761
|
+
try {
|
|
762
|
+
// Emit the tool-call marker before execution so the UI sees
|
|
763
|
+
// tool-call → tool-update* → tool-result in order. Async-
|
|
764
|
+
// generator executes stream their yields as tool-update chunks
|
|
765
|
+
// live; plain executes yield nothing here.
|
|
766
|
+
//
|
|
767
|
+
// Pause detection: a yielded `pause_for_client_tools` control
|
|
768
|
+
// chunk halts iteration, propagates the nested calls to the
|
|
769
|
+
// parent's pending list, and SKIPS the tool_result emission
|
|
770
|
+
// — the yielding tool's own call stays orphaned in the parent
|
|
771
|
+
// message history until the caller resolves it on resume.
|
|
772
|
+
yield { type: 'tool-call', toolCall: tc };
|
|
773
|
+
const execGen = executeMaybeStreaming(tool, toolArgs, { toolCallId: tc.id });
|
|
774
|
+
let result;
|
|
775
|
+
let paused = false;
|
|
776
|
+
while (true) {
|
|
777
|
+
const step = await execGen.next();
|
|
778
|
+
if (step.done) {
|
|
779
|
+
result = step.value;
|
|
780
|
+
break;
|
|
781
|
+
}
|
|
782
|
+
if (isPauseForClientToolsChunk(step.value)) {
|
|
783
|
+
for (const pending of step.value.toolCalls) {
|
|
784
|
+
pendingClientToolCalls.push(pending);
|
|
785
|
+
}
|
|
786
|
+
loopFinishReason = 'client_tool_calls';
|
|
787
|
+
stopForClientTools = true;
|
|
788
|
+
paused = true;
|
|
789
|
+
break;
|
|
790
|
+
}
|
|
791
|
+
const updateChunk = { type: 'tool-update', toolCall: tc, update: step.value };
|
|
792
|
+
if (middlewares.length > 0) {
|
|
793
|
+
const transformed = runOnChunk(middlewares, ctx, updateChunk);
|
|
794
|
+
if (transformed)
|
|
795
|
+
yield transformed;
|
|
796
|
+
}
|
|
797
|
+
else {
|
|
798
|
+
yield updateChunk;
|
|
799
|
+
}
|
|
800
|
+
}
|
|
801
|
+
if (paused)
|
|
802
|
+
continue; // skip tool_result emission + message push for this tc
|
|
803
|
+
// The streamed `tool-result` chunk and `step.toolResults`
|
|
804
|
+
// both carry the ORIGINAL value; only the message content
|
|
805
|
+
// pushed onto `messages` (next-step model input) is narrowed
|
|
806
|
+
// by toModelOutput.
|
|
807
|
+
toolResults.push({ toolCallId: tc.id, result });
|
|
808
|
+
const resultStr = await applyToModelOutput(tool, result, middlewares.length > 0 ? (e) => runOnError(middlewares, ctx, e) : undefined);
|
|
809
|
+
messages.push({ role: 'tool', content: resultStr, toolCallId: tc.id });
|
|
810
|
+
yield { type: 'tool-result', toolCall: tc, result };
|
|
811
|
+
// onAfterToolCall
|
|
812
|
+
if (middlewares.length > 0)
|
|
813
|
+
await runOnAfterToolCall(middlewares, ctx, tc.name, toolArgs, result);
|
|
814
|
+
}
|
|
815
|
+
catch (err) {
|
|
816
|
+
const msg = err instanceof Error ? err.message : String(err);
|
|
817
|
+
const errResult = `Error: ${msg}`;
|
|
818
|
+
toolResults.push({ toolCallId: tc.id, result: errResult });
|
|
819
|
+
messages.push({ role: 'tool', content: errResult, toolCallId: tc.id });
|
|
820
|
+
yield { type: 'tool-result', toolCall: tc, result: errResult };
|
|
821
|
+
// onAfterToolCall (error case)
|
|
822
|
+
if (middlewares.length > 0)
|
|
823
|
+
await runOnAfterToolCall(middlewares, ctx, tc.name, toolArgs, errResult);
|
|
824
|
+
}
|
|
825
|
+
}
|
|
826
|
+
// onToolPhaseComplete
|
|
827
|
+
if (middlewares.length > 0)
|
|
828
|
+
await runSequential(middlewares, 'onToolPhaseComplete', ctx);
|
|
829
|
+
}
|
|
830
|
+
else {
|
|
831
|
+
messages.push({ role: 'assistant', content: text });
|
|
832
|
+
}
|
|
833
|
+
const step = {
|
|
834
|
+
message: { role: 'assistant', content: text, ...(currentToolCalls.length > 0 ? { toolCalls: currentToolCalls } : {}) },
|
|
835
|
+
toolCalls: currentToolCalls,
|
|
836
|
+
toolResults,
|
|
837
|
+
usage: stepUsage,
|
|
838
|
+
finishReason,
|
|
839
|
+
};
|
|
840
|
+
steps.push(step);
|
|
841
|
+
if (stopForClientTools || stopForApproval)
|
|
842
|
+
break;
|
|
843
|
+
const shouldStop = stopConditions.some(cond => cond({ steps, iteration, lastMessage: step.message }));
|
|
844
|
+
if (shouldStop || finishReason !== 'tool_calls')
|
|
845
|
+
break;
|
|
846
|
+
// Reset for next iteration
|
|
847
|
+
text = '';
|
|
848
|
+
currentToolCalls = [];
|
|
849
|
+
}
|
|
850
|
+
} // close `else` (skip-loop-when-resume-needs-approval)
|
|
851
|
+
}
|
|
852
|
+
catch (err) {
|
|
853
|
+
// onError
|
|
854
|
+
if (middlewares.length > 0)
|
|
855
|
+
await runOnError(middlewares, ctx, err);
|
|
856
|
+
throw err;
|
|
857
|
+
}
|
|
858
|
+
// onFinish
|
|
859
|
+
if (middlewares.length > 0)
|
|
860
|
+
await runSequential(middlewares, 'onFinish', ctx);
|
|
861
|
+
// Emit pending state to consumers via dedicated chunk types
|
|
862
|
+
if (pendingClientToolCalls.length > 0) {
|
|
863
|
+
yield { type: 'pending-client-tools', toolCalls: pendingClientToolCalls };
|
|
864
|
+
}
|
|
865
|
+
if (pendingApprovalToolCall) {
|
|
866
|
+
yield { type: 'pending-approval', toolCall: pendingApprovalToolCall.toolCall, isClientTool: pendingApprovalToolCall.isClientTool };
|
|
867
|
+
}
|
|
868
|
+
const lastStep = steps[steps.length - 1];
|
|
869
|
+
const result = {
|
|
870
|
+
text: lastStep ? getMessageText(lastStep.message.content) : '',
|
|
871
|
+
steps,
|
|
872
|
+
usage: totalUsage,
|
|
873
|
+
};
|
|
874
|
+
if (loopFinishReason)
|
|
875
|
+
result.finishReason = loopFinishReason;
|
|
876
|
+
if (pendingClientToolCalls.length > 0)
|
|
877
|
+
result.pendingClientToolCalls = pendingClientToolCalls;
|
|
878
|
+
if (pendingApprovalToolCall)
|
|
879
|
+
result.pendingApprovalToolCall = pendingApprovalToolCall;
|
|
880
|
+
if (resumedToolMessages.length > 0)
|
|
881
|
+
result.resumedToolMessages = resumedToolMessages;
|
|
882
|
+
resolveResponse(result);
|
|
883
|
+
}
|
|
884
|
+
return {
|
|
885
|
+
stream: generateStream(),
|
|
886
|
+
response: responsePromise,
|
|
887
|
+
};
|
|
888
|
+
}
|
|
889
|
+
function normalizeStopConditions(cond) {
|
|
890
|
+
return Array.isArray(cond) ? cond : [cond];
|
|
891
|
+
}
|
|
892
|
+
/**
|
|
893
|
+
* When continuing a chat after a stop-on-approval round-trip, the supplied
|
|
894
|
+
* `messages` array ends with an `assistant` message whose `toolCalls` were
|
|
895
|
+
* never fulfilled (the loop paused before executing them). Most providers
|
|
896
|
+
* (Anthropic in particular) reject such conversations because every
|
|
897
|
+
* `tool_use` block must be followed by a matching `tool_result`.
|
|
898
|
+
*
|
|
899
|
+
* This helper detects that case, executes the pending **server** tool calls
|
|
900
|
+
* (honoring `approvedToolCallIds` / `rejectedToolCallIds`), appends the
|
|
901
|
+
* resulting tool messages to `messages` in place, and returns them. The
|
|
902
|
+
* caller can attach the returned list to `AgentResponse.resumedToolMessages`
|
|
903
|
+
* so that the panels dispatcher persists them in the conversation store.
|
|
904
|
+
*
|
|
905
|
+
* Client tools (no `execute`) must come back from the browser with their
|
|
906
|
+
* tool result already in the conversation, so the trailing assistant message
|
|
907
|
+
* will not have unmatched `toolCalls` for them — they're handled outside.
|
|
908
|
+
*/
|
|
909
|
+
async function resumePendingToolCalls(deps) {
|
|
910
|
+
const { messages, toolMap, options } = deps;
|
|
911
|
+
const last = messages[messages.length - 1];
|
|
912
|
+
if (!last || last.role !== 'assistant' || !last.toolCalls || last.toolCalls.length === 0) {
|
|
913
|
+
return { resumed: [], approvalStillRequired: undefined };
|
|
914
|
+
}
|
|
915
|
+
const resumed = [];
|
|
916
|
+
let approvalStillRequired;
|
|
917
|
+
for (const tc of last.toolCalls) {
|
|
918
|
+
const tool = toolMap.get(tc.name);
|
|
919
|
+
if (!tool) {
|
|
920
|
+
const err = `Error: Unknown tool "${tc.name}"`;
|
|
921
|
+
const m = { role: 'tool', content: err, toolCallId: tc.id };
|
|
922
|
+
messages.push(m);
|
|
923
|
+
resumed.push(m);
|
|
924
|
+
continue;
|
|
925
|
+
}
|
|
926
|
+
if (!tool.execute) {
|
|
927
|
+
// Client tool whose result is missing from the supplied messages.
|
|
928
|
+
// Surface an error so the model can recover instead of hanging.
|
|
929
|
+
const err = `Error: client tool "${tc.name}" was not executed by the browser`;
|
|
930
|
+
const m = { role: 'tool', content: err, toolCallId: tc.id };
|
|
931
|
+
messages.push(m);
|
|
932
|
+
resumed.push(m);
|
|
933
|
+
continue;
|
|
934
|
+
}
|
|
935
|
+
const decision = await evaluateApproval(tool, tc, options);
|
|
936
|
+
if (decision === 'rejected') {
|
|
937
|
+
const rej = { rejected: true, reason: 'User rejected this tool call' };
|
|
938
|
+
const m = { role: 'tool', content: JSON.stringify(rej), toolCallId: tc.id };
|
|
939
|
+
messages.push(m);
|
|
940
|
+
resumed.push(m);
|
|
941
|
+
continue;
|
|
942
|
+
}
|
|
943
|
+
if (decision === 'pending') {
|
|
944
|
+
// Still pending — the user has not yet approved this call. Re-emit
|
|
945
|
+
// the pending state and stop processing further tools.
|
|
946
|
+
approvalStillRequired = { toolCall: tc, isClientTool: false };
|
|
947
|
+
break;
|
|
948
|
+
}
|
|
949
|
+
try {
|
|
950
|
+
// Drain generator yields silently — approval-resume runs outside the
|
|
951
|
+
// stream, so any preliminary updates are discarded; only the final
|
|
952
|
+
// return value is captured.
|
|
953
|
+
const execGen = executeMaybeStreaming(tool, tc.arguments, { toolCallId: tc.id });
|
|
954
|
+
let result;
|
|
955
|
+
while (true) {
|
|
956
|
+
const step = await execGen.next();
|
|
957
|
+
if (step.done) {
|
|
958
|
+
result = step.value;
|
|
959
|
+
break;
|
|
960
|
+
}
|
|
961
|
+
}
|
|
962
|
+
// Approval-resume has no middleware context here, so toModelOutput
|
|
963
|
+
// errors fall back silently to default stringification (R6).
|
|
964
|
+
const content = await applyToModelOutput(tool, result);
|
|
965
|
+
const m = { role: 'tool', content, toolCallId: tc.id };
|
|
966
|
+
messages.push(m);
|
|
967
|
+
resumed.push(m);
|
|
968
|
+
}
|
|
969
|
+
catch (err) {
|
|
970
|
+
const errMsg = `Error: ${err instanceof Error ? err.message : String(err)}`;
|
|
971
|
+
const m = { role: 'tool', content: errMsg, toolCallId: tc.id };
|
|
972
|
+
messages.push(m);
|
|
973
|
+
resumed.push(m);
|
|
974
|
+
}
|
|
975
|
+
}
|
|
976
|
+
return { resumed, approvalStillRequired };
|
|
977
|
+
}
|
|
978
|
+
/**
|
|
979
|
+
* Detect an async generator (the value returned by `async function*` or any
|
|
980
|
+
* object implementing the AsyncGenerator protocol). We use a structural check
|
|
981
|
+
* because the executor may not be authored as a literal `async function*`
|
|
982
|
+
* (e.g. wrapped or returned from a factory).
|
|
983
|
+
*/
|
|
984
|
+
function isAsyncGenerator(value) {
|
|
985
|
+
if (value === null || typeof value !== 'object')
|
|
986
|
+
return false;
|
|
987
|
+
const v = value;
|
|
988
|
+
return typeof v.next === 'function'
|
|
989
|
+
&& typeof v.return === 'function'
|
|
990
|
+
&& typeof v[Symbol.asyncIterator] === 'function';
|
|
991
|
+
}
|
|
992
|
+
/**
|
|
993
|
+
* Uniformly iterate a tool's `execute`, whether it returns a value, a
|
|
994
|
+
* promise, or an async generator.
|
|
995
|
+
*
|
|
996
|
+
* The helper is itself an async generator: each `yield` is a preliminary
|
|
997
|
+
* tool-update payload (only generator-style executes produce these), and the
|
|
998
|
+
* generator's `return` value is the final tool result.
|
|
999
|
+
*
|
|
1000
|
+
* Streaming callers iterate and emit `tool-update` chunks live as updates
|
|
1001
|
+
* arrive. Non-streaming callers iterate and discard yields, capturing only
|
|
1002
|
+
* the final return value — same tool definition works in both modes.
|
|
1003
|
+
*/
|
|
1004
|
+
async function* executeMaybeStreaming(tool, args, ctx) {
|
|
1005
|
+
const execute = tool.execute;
|
|
1006
|
+
if (!execute) {
|
|
1007
|
+
throw new Error('Tool has no execute function');
|
|
1008
|
+
}
|
|
1009
|
+
const ret = execute(args, ctx);
|
|
1010
|
+
if (isAsyncGenerator(ret)) {
|
|
1011
|
+
while (true) {
|
|
1012
|
+
const step = await ret.next();
|
|
1013
|
+
if (step.done)
|
|
1014
|
+
return step.value;
|
|
1015
|
+
yield step.value;
|
|
1016
|
+
}
|
|
1017
|
+
}
|
|
1018
|
+
return await ret;
|
|
1019
|
+
}
|
|
1020
|
+
/**
|
|
1021
|
+
* Default stringification used for the `tool` role message content when a
|
|
1022
|
+
* tool has no `toModelOutput` transform: pass through strings, JSON-encode
|
|
1023
|
+
* everything else.
|
|
1024
|
+
*/
|
|
1025
|
+
function defaultStringify(value) {
|
|
1026
|
+
return typeof value === 'string' ? value : JSON.stringify(value);
|
|
1027
|
+
}
|
|
1028
|
+
/**
|
|
1029
|
+
* Convert a tool's structured `result` into the string the **model** will
|
|
1030
|
+
* see on its next step. Honors `tool.toModelOutput` when present, falling
|
|
1031
|
+
* back to {@link defaultStringify}.
|
|
1032
|
+
*
|
|
1033
|
+
* Per R6 in the ai-loop-parity plan: a throwing `toModelOutput` MUST NOT
|
|
1034
|
+
* crash the loop. We swallow the error, route it through `onError`
|
|
1035
|
+
* middleware so it stays observable, and use the default stringification
|
|
1036
|
+
* as a safety net.
|
|
1037
|
+
*/
|
|
1038
|
+
async function applyToModelOutput(tool, result, onError) {
|
|
1039
|
+
if (tool.toModelOutput) {
|
|
1040
|
+
try {
|
|
1041
|
+
return await tool.toModelOutput(result);
|
|
1042
|
+
}
|
|
1043
|
+
catch (err) {
|
|
1044
|
+
if (onError)
|
|
1045
|
+
await onError(err);
|
|
1046
|
+
}
|
|
1047
|
+
}
|
|
1048
|
+
return defaultStringify(result);
|
|
1049
|
+
}
|
|
1050
|
+
/**
|
|
1051
|
+
* Resolve `needsApproval` for a tool call, taking into account the
|
|
1052
|
+
* client-supplied `approvedToolCallIds` / `rejectedToolCallIds` lists.
|
|
1053
|
+
*
|
|
1054
|
+
* Returns:
|
|
1055
|
+
* - `'allow'` — execute the tool normally (default; also when approved)
|
|
1056
|
+
* - `'pending'` — needsApproval is truthy and the call has not been approved
|
|
1057
|
+
* - `'rejected'` — the call appears in `rejectedToolCallIds`
|
|
1058
|
+
*/
|
|
1059
|
+
async function evaluateApproval(tool, tc, options) {
|
|
1060
|
+
const needs = tool.definition.needsApproval;
|
|
1061
|
+
const requires = typeof needs === 'function' ? await needs(tc.arguments) : !!needs;
|
|
1062
|
+
if (!requires)
|
|
1063
|
+
return 'allow';
|
|
1064
|
+
if (options?.rejectedToolCallIds?.includes(tc.id))
|
|
1065
|
+
return 'rejected';
|
|
1066
|
+
if (options?.approvedToolCallIds?.includes(tc.id))
|
|
1067
|
+
return 'allow';
|
|
1068
|
+
return 'pending';
|
|
1069
|
+
}
|
|
1070
|
+
//# sourceMappingURL=agent.js.map
|