@cuylabs/agent-core 0.6.0 → 0.7.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +5 -1
- package/dist/{builder-BKkipazh.d.ts → builder-BRvqCcIk.d.ts} +2 -2
- package/dist/{resolver-DOfZ-xuk.d.ts → capability-resolver-CgRGsWVX.d.ts} +1 -1
- package/dist/{chunk-3C4VKG4P.js → chunk-3HNO5SVI.js} +273 -807
- package/dist/chunk-5K7AQVOU.js +619 -0
- package/dist/{chunk-QAQADS4X.js → chunk-BNSHUWCV.js} +1 -0
- package/dist/{chunk-O2ZCFQL6.js → chunk-CDTV2UYU.js} +86 -1
- package/dist/chunk-IEFIQENH.js +73 -0
- package/dist/chunk-N7P4PN3O.js +84 -0
- package/dist/{chunk-QWFMX226.js → chunk-QGOGIP7T.js} +148 -15
- package/dist/chunk-VNQBHPCT.js +398 -0
- package/dist/{chunk-X635CM2F.js → chunk-ZPMACVZK.js} +1 -1
- package/dist/context/index.js +1 -1
- package/dist/host/index.d.ts +45 -0
- package/dist/host/index.js +8 -0
- package/dist/{index-DZQJD_hp.d.ts → index-C33hlD6H.d.ts} +12 -7
- package/dist/{index-ipP3_ztp.d.ts → index-CfBGYrpd.d.ts} +121 -2
- package/dist/index.d.ts +107 -126
- package/dist/index.js +321 -601
- package/dist/inference/index.d.ts +59 -0
- package/dist/inference/index.js +25 -0
- package/dist/middleware/index.d.ts +7 -4
- package/dist/middleware/index.js +5 -3
- package/dist/models/index.d.ts +104 -2
- package/dist/models/index.js +40 -6
- package/dist/prompt/index.d.ts +9 -6
- package/dist/reasoning/index.d.ts +54 -8
- package/dist/reasoning/index.js +2 -3
- package/dist/{registry-CuRWWtcT.d.ts → registry-BDLIHOQB.d.ts} +1 -1
- package/dist/{runner-G1wxEgac.d.ts → runner-DSKaEz3z.d.ts} +35 -8
- package/dist/runtime/index.d.ts +41 -7
- package/dist/runtime/index.js +15 -6
- package/dist/scope/index.d.ts +10 -0
- package/dist/scope/index.js +14 -0
- package/dist/{session-manager-Uawm2Le7.d.ts → session-manager-B_CWGTsl.d.ts} +1 -1
- package/dist/skill/index.d.ts +7 -5
- package/dist/storage/index.d.ts +2 -2
- package/dist/sub-agent/index.d.ts +12 -8
- package/dist/tool/index.d.ts +7 -4
- package/dist/tool/index.js +4 -3
- package/dist/{tool-pFAnJc5Y.d.ts → tool-Db1Ue-1U.d.ts} +1 -1
- package/dist/{tool-DYp6-cC3.d.ts → tool-HUtkiVBx.d.ts} +5 -99
- package/dist/tracking/index.d.ts +3 -1
- package/dist/types-9jGQUjqW.d.ts +29 -0
- package/dist/types-CHiPh8U2.d.ts +100 -0
- package/dist/types-CqDZTh4d.d.ts +335 -0
- package/dist/types-FRpzzg_9.d.ts +355 -0
- package/package.json +19 -8
- package/dist/capabilities/index.d.ts +0 -97
- package/dist/capabilities/index.js +0 -46
- package/dist/chunk-6TDTQJ4P.js +0 -116
- package/dist/chunk-DWYX7ASF.js +0 -26
- package/dist/chunk-FG4MD5MU.js +0 -54
- package/dist/config-D2xeGEHK.d.ts +0 -52
- package/dist/identifiers-BLUxFqV_.d.ts +0 -12
- package/dist/network-D76DS5ot.d.ts +0 -5
- package/dist/types-BWo810L_.d.ts +0 -648
|
@@ -0,0 +1,619 @@
|
|
|
1
|
+
import {
|
|
2
|
+
buildReasoningOptionsSync
|
|
3
|
+
} from "./chunk-ZPMACVZK.js";
|
|
4
|
+
import {
|
|
5
|
+
executeAgentToolCall
|
|
6
|
+
} from "./chunk-IEFIQENH.js";
|
|
7
|
+
import {
|
|
8
|
+
snapshotScope
|
|
9
|
+
} from "./chunk-N7P4PN3O.js";
|
|
10
|
+
|
|
11
|
+
// src/inference/toolset.ts
|
|
12
|
+
import { tool, zodSchema } from "ai";
|
|
13
|
+
async function buildToolSet(options) {
|
|
14
|
+
const toolSet = {};
|
|
15
|
+
const executionMode = options.executionMode ?? "auto";
|
|
16
|
+
for (const [id, info] of Object.entries(options.tools)) {
|
|
17
|
+
const initialized = await info.init({ cwd: options.cwd });
|
|
18
|
+
toolSet[id] = executionMode === "auto" ? tool({
|
|
19
|
+
description: initialized.description,
|
|
20
|
+
inputSchema: zodSchema(initialized.parameters),
|
|
21
|
+
execute: async (params) => (await executeAgentToolCall({
|
|
22
|
+
toolName: id,
|
|
23
|
+
tool: info,
|
|
24
|
+
params,
|
|
25
|
+
cwd: options.cwd,
|
|
26
|
+
abort: options.abort,
|
|
27
|
+
sessionID: options.sessionID,
|
|
28
|
+
messageID: options.messageID,
|
|
29
|
+
...options.host ? { host: options.host } : {},
|
|
30
|
+
...options.turnTracker ? { turnTracker: options.turnTracker } : {},
|
|
31
|
+
...options.middleware ? { middleware: options.middleware } : {}
|
|
32
|
+
})).output
|
|
33
|
+
}) : tool({
|
|
34
|
+
description: initialized.description,
|
|
35
|
+
inputSchema: zodSchema(initialized.parameters)
|
|
36
|
+
});
|
|
37
|
+
}
|
|
38
|
+
return toolSet;
|
|
39
|
+
}
|
|
40
|
+
|
|
41
|
+
// src/inference/stream.ts
|
|
42
|
+
import {
|
|
43
|
+
stepCountIs,
|
|
44
|
+
streamText
|
|
45
|
+
} from "ai";
|
|
46
|
+
|
|
47
|
+
// src/errors/classify.ts
|
|
48
|
+
function isRetryableCategory(category) {
|
|
49
|
+
switch (category) {
|
|
50
|
+
case "rate_limit":
|
|
51
|
+
case "overloaded":
|
|
52
|
+
case "network":
|
|
53
|
+
case "timeout":
|
|
54
|
+
return true;
|
|
55
|
+
case "auth":
|
|
56
|
+
case "invalid_request":
|
|
57
|
+
case "context_overflow":
|
|
58
|
+
case "content_filter":
|
|
59
|
+
case "cancelled":
|
|
60
|
+
case "unknown":
|
|
61
|
+
return false;
|
|
62
|
+
}
|
|
63
|
+
}
|
|
64
|
+
function classifyFromStatusAndMessage(status, message) {
|
|
65
|
+
const lowerMessage = message.toLowerCase();
|
|
66
|
+
if (status) {
|
|
67
|
+
if (status === 429) return "rate_limit";
|
|
68
|
+
if (status === 401 || status === 403) return "auth";
|
|
69
|
+
if (status === 400) {
|
|
70
|
+
if (lowerMessage.includes("context") || lowerMessage.includes("token")) {
|
|
71
|
+
return "context_overflow";
|
|
72
|
+
}
|
|
73
|
+
return "invalid_request";
|
|
74
|
+
}
|
|
75
|
+
if (status === 503 || status === 502) return "overloaded";
|
|
76
|
+
if (status >= 500) return "network";
|
|
77
|
+
}
|
|
78
|
+
if (lowerMessage.includes("rate") && lowerMessage.includes("limit")) {
|
|
79
|
+
return "rate_limit";
|
|
80
|
+
}
|
|
81
|
+
if (lowerMessage.includes("overload") || lowerMessage.includes("capacity")) {
|
|
82
|
+
return "overloaded";
|
|
83
|
+
}
|
|
84
|
+
if (lowerMessage.includes("too_many_requests")) {
|
|
85
|
+
return "rate_limit";
|
|
86
|
+
}
|
|
87
|
+
if (lowerMessage.includes("unauthorized") || lowerMessage.includes("invalid api key")) {
|
|
88
|
+
return "auth";
|
|
89
|
+
}
|
|
90
|
+
if (lowerMessage.includes("context") && lowerMessage.includes("length")) {
|
|
91
|
+
return "context_overflow";
|
|
92
|
+
}
|
|
93
|
+
if (lowerMessage.includes("content") && lowerMessage.includes("filter")) {
|
|
94
|
+
return "content_filter";
|
|
95
|
+
}
|
|
96
|
+
if (lowerMessage.includes("timeout") || lowerMessage.includes("timed out")) {
|
|
97
|
+
return "timeout";
|
|
98
|
+
}
|
|
99
|
+
if (lowerMessage.includes("network") || lowerMessage.includes("econnrefused") || lowerMessage.includes("econnreset")) {
|
|
100
|
+
return "network";
|
|
101
|
+
}
|
|
102
|
+
return "unknown";
|
|
103
|
+
}
|
|
104
|
+
function parseRetryDelay(headers) {
|
|
105
|
+
const retryAfterMs = headers["retry-after-ms"];
|
|
106
|
+
if (retryAfterMs) {
|
|
107
|
+
const parsed = parseFloat(retryAfterMs);
|
|
108
|
+
if (!Number.isNaN(parsed) && parsed > 0) {
|
|
109
|
+
return parsed;
|
|
110
|
+
}
|
|
111
|
+
}
|
|
112
|
+
const retryAfter = headers["retry-after"];
|
|
113
|
+
if (retryAfter) {
|
|
114
|
+
const seconds = parseFloat(retryAfter);
|
|
115
|
+
if (!Number.isNaN(seconds) && seconds > 0) {
|
|
116
|
+
return Math.ceil(seconds * 1e3);
|
|
117
|
+
}
|
|
118
|
+
const dateMs = Date.parse(retryAfter);
|
|
119
|
+
if (!Number.isNaN(dateMs)) {
|
|
120
|
+
const delayMs = dateMs - Date.now();
|
|
121
|
+
if (delayMs > 0) {
|
|
122
|
+
return Math.ceil(delayMs);
|
|
123
|
+
}
|
|
124
|
+
}
|
|
125
|
+
}
|
|
126
|
+
return void 0;
|
|
127
|
+
}
|
|
128
|
+
|
|
129
|
+
// src/errors/extract.ts
|
|
130
|
+
function extractFromAISDKError(error) {
|
|
131
|
+
const result = {};
|
|
132
|
+
const anyError = error;
|
|
133
|
+
if (typeof anyError.status === "number") {
|
|
134
|
+
result.status = anyError.status;
|
|
135
|
+
} else if (typeof anyError.statusCode === "number") {
|
|
136
|
+
result.status = anyError.statusCode;
|
|
137
|
+
}
|
|
138
|
+
if (anyError.responseHeaders && typeof anyError.responseHeaders === "object") {
|
|
139
|
+
result.headers = anyError.responseHeaders;
|
|
140
|
+
} else if (anyError.headers && typeof anyError.headers === "object") {
|
|
141
|
+
result.headers = anyError.headers;
|
|
142
|
+
}
|
|
143
|
+
if (anyError.data && typeof anyError.data === "object") {
|
|
144
|
+
const data = anyError.data;
|
|
145
|
+
if (data.type === "error" && typeof data.error === "object") {
|
|
146
|
+
const innerError = data.error;
|
|
147
|
+
if (innerError.type === "too_many_requests") {
|
|
148
|
+
result.category = "rate_limit";
|
|
149
|
+
} else if (innerError.type === "overloaded") {
|
|
150
|
+
result.category = "overloaded";
|
|
151
|
+
}
|
|
152
|
+
}
|
|
153
|
+
if (typeof data.isRetryable === "boolean" && !data.isRetryable && !result.category) {
|
|
154
|
+
result.category = "invalid_request";
|
|
155
|
+
}
|
|
156
|
+
}
|
|
157
|
+
return result;
|
|
158
|
+
}
|
|
159
|
+
|
|
160
|
+
// src/errors/llm-error.ts
|
|
161
|
+
var LLMError = class _LLMError extends Error {
|
|
162
|
+
category;
|
|
163
|
+
status;
|
|
164
|
+
headers;
|
|
165
|
+
provider;
|
|
166
|
+
model;
|
|
167
|
+
isRetryable;
|
|
168
|
+
retryDelayMs;
|
|
169
|
+
constructor(options) {
|
|
170
|
+
super(options.message, { cause: options.cause });
|
|
171
|
+
this.name = "LLMError";
|
|
172
|
+
this.status = options.status;
|
|
173
|
+
this.headers = options.headers;
|
|
174
|
+
this.provider = options.provider;
|
|
175
|
+
this.model = options.model;
|
|
176
|
+
this.category = options.category ?? classifyFromStatusAndMessage(
|
|
177
|
+
options.status,
|
|
178
|
+
options.message
|
|
179
|
+
);
|
|
180
|
+
this.isRetryable = isRetryableCategory(this.category);
|
|
181
|
+
this.retryDelayMs = this.headers ? parseRetryDelay(this.headers) : void 0;
|
|
182
|
+
}
|
|
183
|
+
static from(error, context) {
|
|
184
|
+
if (error instanceof _LLMError) {
|
|
185
|
+
return error;
|
|
186
|
+
}
|
|
187
|
+
if (error instanceof Error) {
|
|
188
|
+
if (error.name === "AbortError" || error.message.includes("abort")) {
|
|
189
|
+
return new _LLMError({
|
|
190
|
+
message: error.message,
|
|
191
|
+
category: "cancelled",
|
|
192
|
+
cause: error,
|
|
193
|
+
...context
|
|
194
|
+
});
|
|
195
|
+
}
|
|
196
|
+
return new _LLMError({
|
|
197
|
+
message: error.message,
|
|
198
|
+
cause: error,
|
|
199
|
+
...extractFromAISDKError(error),
|
|
200
|
+
...context
|
|
201
|
+
});
|
|
202
|
+
}
|
|
203
|
+
return new _LLMError({
|
|
204
|
+
message: String(error),
|
|
205
|
+
category: "unknown",
|
|
206
|
+
...context
|
|
207
|
+
});
|
|
208
|
+
}
|
|
209
|
+
get description() {
|
|
210
|
+
const parts = [this.message];
|
|
211
|
+
if (this.provider) parts.unshift(`[${this.provider}]`);
|
|
212
|
+
if (this.status) parts.push(`(HTTP ${this.status})`);
|
|
213
|
+
if (this.isRetryable && this.retryDelayMs) {
|
|
214
|
+
parts.push(`retry in ${Math.ceil(this.retryDelayMs / 1e3)}s`);
|
|
215
|
+
}
|
|
216
|
+
return parts.join(" ");
|
|
217
|
+
}
|
|
218
|
+
};
|
|
219
|
+
|
|
220
|
+
// src/errors/utils.ts
|
|
221
|
+
function isRetryable(error) {
|
|
222
|
+
if (error instanceof LLMError) {
|
|
223
|
+
return error.isRetryable;
|
|
224
|
+
}
|
|
225
|
+
return LLMError.from(error).isRetryable;
|
|
226
|
+
}
|
|
227
|
+
function getRetryDelay(error) {
|
|
228
|
+
if (error instanceof LLMError) {
|
|
229
|
+
return error.isRetryable ? error.retryDelayMs : void 0;
|
|
230
|
+
}
|
|
231
|
+
const llmError = LLMError.from(error);
|
|
232
|
+
return llmError.isRetryable ? llmError.retryDelayMs : void 0;
|
|
233
|
+
}
|
|
234
|
+
function getErrorCategory(error) {
|
|
235
|
+
if (error instanceof LLMError) {
|
|
236
|
+
return error.category;
|
|
237
|
+
}
|
|
238
|
+
return LLMError.from(error).category;
|
|
239
|
+
}
|
|
240
|
+
|
|
241
|
+
// src/retry.ts
|
|
242
|
+
var DEFAULT_RETRY_CONFIG = {
|
|
243
|
+
maxAttempts: 3,
|
|
244
|
+
initialDelayMs: 2e3,
|
|
245
|
+
backoffFactor: 2,
|
|
246
|
+
maxDelayMs: 3e4,
|
|
247
|
+
jitter: true
|
|
248
|
+
};
|
|
249
|
+
function createRetryState() {
|
|
250
|
+
return {
|
|
251
|
+
attempt: 0,
|
|
252
|
+
errors: [],
|
|
253
|
+
canRetry: true,
|
|
254
|
+
nextDelayMs: void 0
|
|
255
|
+
};
|
|
256
|
+
}
|
|
257
|
+
function calculateDelay(attempt, error, config) {
|
|
258
|
+
if (error?.retryDelayMs) {
|
|
259
|
+
return error.retryDelayMs;
|
|
260
|
+
}
|
|
261
|
+
const baseDelay = config.initialDelayMs * Math.pow(config.backoffFactor, attempt - 1);
|
|
262
|
+
const cappedDelay = Math.min(baseDelay, config.maxDelayMs);
|
|
263
|
+
if (config.jitter) {
|
|
264
|
+
const jitterRange = cappedDelay * 0.25;
|
|
265
|
+
const jitter = (Math.random() - 0.5) * 2 * jitterRange;
|
|
266
|
+
return Math.max(0, Math.round(cappedDelay + jitter));
|
|
267
|
+
}
|
|
268
|
+
return Math.round(cappedDelay);
|
|
269
|
+
}
|
|
270
|
+
async function sleep(ms, signal) {
|
|
271
|
+
return new Promise((resolve, reject) => {
|
|
272
|
+
if (signal?.aborted) {
|
|
273
|
+
reject(new DOMException("Aborted", "AbortError"));
|
|
274
|
+
return;
|
|
275
|
+
}
|
|
276
|
+
const timeoutId = setTimeout(() => {
|
|
277
|
+
cleanup();
|
|
278
|
+
resolve();
|
|
279
|
+
}, ms);
|
|
280
|
+
const abortHandler = () => {
|
|
281
|
+
clearTimeout(timeoutId);
|
|
282
|
+
cleanup();
|
|
283
|
+
reject(new DOMException("Aborted", "AbortError"));
|
|
284
|
+
};
|
|
285
|
+
const cleanup = () => {
|
|
286
|
+
signal?.removeEventListener("abort", abortHandler);
|
|
287
|
+
};
|
|
288
|
+
signal?.addEventListener("abort", abortHandler, { once: true });
|
|
289
|
+
});
|
|
290
|
+
}
|
|
291
|
+
async function withRetry(fn, config, signal) {
|
|
292
|
+
const mergedConfig = { ...DEFAULT_RETRY_CONFIG, ...config };
|
|
293
|
+
const state = createRetryState();
|
|
294
|
+
while (true) {
|
|
295
|
+
state.attempt++;
|
|
296
|
+
try {
|
|
297
|
+
return await fn(state.attempt);
|
|
298
|
+
} catch (error) {
|
|
299
|
+
const llmError = LLMError.from(error);
|
|
300
|
+
state.errors.push(llmError);
|
|
301
|
+
const shouldRetry2 = state.attempt < mergedConfig.maxAttempts && isRetryable(llmError) && !signal?.aborted;
|
|
302
|
+
if (!shouldRetry2) {
|
|
303
|
+
throw new LLMError({
|
|
304
|
+
message: `Failed after ${state.attempt} attempt(s): ${llmError.message}`,
|
|
305
|
+
category: llmError.category,
|
|
306
|
+
status: llmError.status,
|
|
307
|
+
headers: llmError.headers,
|
|
308
|
+
provider: llmError.provider,
|
|
309
|
+
model: llmError.model,
|
|
310
|
+
cause: llmError
|
|
311
|
+
});
|
|
312
|
+
}
|
|
313
|
+
const delayMs = calculateDelay(state.attempt, llmError, mergedConfig);
|
|
314
|
+
state.nextDelayMs = delayMs;
|
|
315
|
+
config?.onRetry?.(state.attempt, delayMs, llmError);
|
|
316
|
+
await sleep(delayMs, signal);
|
|
317
|
+
}
|
|
318
|
+
}
|
|
319
|
+
}
|
|
320
|
+
function createRetryHandler(options) {
|
|
321
|
+
const config = options ?? {};
|
|
322
|
+
const signal = options?.signal;
|
|
323
|
+
return async (createStream) => {
|
|
324
|
+
return withRetry(createStream, config, signal);
|
|
325
|
+
};
|
|
326
|
+
}
|
|
327
|
+
function shouldRetry(error, attempt, maxAttempts = DEFAULT_RETRY_CONFIG.maxAttempts) {
|
|
328
|
+
if (attempt >= maxAttempts) return false;
|
|
329
|
+
return isRetryable(error);
|
|
330
|
+
}
|
|
331
|
+
|
|
332
|
+
// src/inference/types.ts
|
|
333
|
+
var DEFAULT_MAX_OUTPUT_TOKENS = 32e3;
|
|
334
|
+
var OUTPUT_TOKEN_MAX = DEFAULT_MAX_OUTPUT_TOKENS;
|
|
335
|
+
|
|
336
|
+
// src/inference/stream.ts
|
|
337
|
+
function buildModelCallContext(input) {
|
|
338
|
+
return {
|
|
339
|
+
sessionID: input.sessionID,
|
|
340
|
+
step: input.step ?? 1,
|
|
341
|
+
cwd: input.cwd,
|
|
342
|
+
abort: input.abort,
|
|
343
|
+
model: input.model,
|
|
344
|
+
toolNames: Object.keys(input.tools),
|
|
345
|
+
mcpToolNames: Object.keys(input.mcpTools ?? {}),
|
|
346
|
+
scope: snapshotScope()
|
|
347
|
+
};
|
|
348
|
+
}
|
|
349
|
+
function buildModelCallInput(input) {
|
|
350
|
+
return {
|
|
351
|
+
model: input.model,
|
|
352
|
+
system: [...input.system],
|
|
353
|
+
messages: [...input.messages],
|
|
354
|
+
temperature: input.temperature,
|
|
355
|
+
topP: input.topP,
|
|
356
|
+
maxOutputTokens: input.maxOutputTokens,
|
|
357
|
+
maxSteps: input.maxSteps,
|
|
358
|
+
reasoningLevel: input.reasoningLevel,
|
|
359
|
+
telemetry: input.telemetry,
|
|
360
|
+
customStreamProvider: input.customStreamProvider,
|
|
361
|
+
toolExecutionMode: input.toolExecutionMode,
|
|
362
|
+
providerOptions: void 0,
|
|
363
|
+
systemMessages: void 0
|
|
364
|
+
};
|
|
365
|
+
}
|
|
366
|
+
function applyModelCallInput(target, modelCall) {
|
|
367
|
+
target.model = modelCall.model;
|
|
368
|
+
target.system = [...modelCall.system];
|
|
369
|
+
target.messages = [...modelCall.messages];
|
|
370
|
+
target.temperature = modelCall.temperature;
|
|
371
|
+
target.topP = modelCall.topP;
|
|
372
|
+
target.maxOutputTokens = modelCall.maxOutputTokens;
|
|
373
|
+
target.maxSteps = modelCall.maxSteps;
|
|
374
|
+
target.reasoningLevel = modelCall.reasoningLevel;
|
|
375
|
+
target.telemetry = modelCall.telemetry;
|
|
376
|
+
target.customStreamProvider = modelCall.customStreamProvider;
|
|
377
|
+
target.toolExecutionMode = modelCall.toolExecutionMode;
|
|
378
|
+
target.activeModelCall = modelCall;
|
|
379
|
+
}
|
|
380
|
+
function mergeProviderOptions(base, override) {
|
|
381
|
+
if (!base) return override;
|
|
382
|
+
if (!override) return base;
|
|
383
|
+
return { ...base, ...override };
|
|
384
|
+
}
|
|
385
|
+
function isBlockedModelCall(value) {
|
|
386
|
+
return "block" in value && value.block === true;
|
|
387
|
+
}
|
|
388
|
+
async function resolveModelCallInput(input) {
|
|
389
|
+
if (!input.middleware?.hasMiddleware) {
|
|
390
|
+
const current = buildModelCallInput(input);
|
|
391
|
+
input.activeModelCall = current;
|
|
392
|
+
return current;
|
|
393
|
+
}
|
|
394
|
+
const next = await input.middleware.runModelInput(
|
|
395
|
+
buildModelCallInput(input),
|
|
396
|
+
buildModelCallContext(input)
|
|
397
|
+
);
|
|
398
|
+
if (isBlockedModelCall(next)) {
|
|
399
|
+
return next;
|
|
400
|
+
}
|
|
401
|
+
applyModelCallInput(input, next);
|
|
402
|
+
return next;
|
|
403
|
+
}
|
|
404
|
+
function wrapModelStream(stream2, input) {
|
|
405
|
+
const normalizedText = Promise.resolve(stream2.text);
|
|
406
|
+
const normalizedUsage = Promise.resolve(stream2.usage).then((usage) => ({
|
|
407
|
+
inputTokens: usage.inputTokens ?? 0,
|
|
408
|
+
outputTokens: usage.outputTokens ?? 0,
|
|
409
|
+
totalTokens: usage.totalTokens ?? 0
|
|
410
|
+
}));
|
|
411
|
+
const normalizedFinishReason = Promise.resolve(stream2.finishReason).then(
|
|
412
|
+
(reason) => String(reason)
|
|
413
|
+
);
|
|
414
|
+
if (!input.middleware?.hasMiddleware) {
|
|
415
|
+
return {
|
|
416
|
+
fullStream: stream2.fullStream,
|
|
417
|
+
text: normalizedText,
|
|
418
|
+
usage: normalizedUsage,
|
|
419
|
+
finishReason: normalizedFinishReason
|
|
420
|
+
};
|
|
421
|
+
}
|
|
422
|
+
return {
|
|
423
|
+
fullStream: (async function* () {
|
|
424
|
+
const ctx = buildModelCallContext(input);
|
|
425
|
+
for await (const rawChunk of stream2.fullStream) {
|
|
426
|
+
const chunk = await input.middleware.runModelChunk(
|
|
427
|
+
rawChunk,
|
|
428
|
+
ctx
|
|
429
|
+
);
|
|
430
|
+
if (chunk) {
|
|
431
|
+
yield chunk;
|
|
432
|
+
}
|
|
433
|
+
}
|
|
434
|
+
})(),
|
|
435
|
+
text: normalizedText,
|
|
436
|
+
usage: normalizedUsage,
|
|
437
|
+
finishReason: normalizedFinishReason
|
|
438
|
+
};
|
|
439
|
+
}
|
|
440
|
+
async function createCustomStream(input) {
|
|
441
|
+
const system = input.system.filter(Boolean).join("\n");
|
|
442
|
+
return input.customStreamProvider({
|
|
443
|
+
system,
|
|
444
|
+
messages: input.messages,
|
|
445
|
+
abortSignal: input.abort,
|
|
446
|
+
maxSteps: input.maxSteps
|
|
447
|
+
});
|
|
448
|
+
}
|
|
449
|
+
function getModelInfo(input) {
|
|
450
|
+
return {
|
|
451
|
+
provider: typeof input.model === "object" && "provider" in input.model ? String(input.model.provider) : void 0,
|
|
452
|
+
model: typeof input.model === "object" && "modelId" in input.model ? String(input.model.modelId) : String(input.model)
|
|
453
|
+
};
|
|
454
|
+
}
|
|
455
|
+
async function callStreamTextWithOtelContext(options) {
|
|
456
|
+
const { input, allTools, system, providerOptions } = options;
|
|
457
|
+
const systemParam = input.activeModelCall?.systemMessages?.length ? input.activeModelCall.systemMessages : system;
|
|
458
|
+
const mergedProviderOptions = mergeProviderOptions(
|
|
459
|
+
providerOptions,
|
|
460
|
+
input.activeModelCall?.providerOptions
|
|
461
|
+
);
|
|
462
|
+
const callStreamText = () => streamText({
|
|
463
|
+
model: input.model,
|
|
464
|
+
system: systemParam,
|
|
465
|
+
messages: input.messages,
|
|
466
|
+
tools: allTools,
|
|
467
|
+
stopWhen: stepCountIs(input.maxSteps ?? 50),
|
|
468
|
+
maxOutputTokens: input.maxOutputTokens ?? DEFAULT_MAX_OUTPUT_TOKENS,
|
|
469
|
+
temperature: input.temperature,
|
|
470
|
+
topP: input.topP,
|
|
471
|
+
abortSignal: input.abort,
|
|
472
|
+
providerOptions: mergedProviderOptions,
|
|
473
|
+
experimental_telemetry: input.telemetry,
|
|
474
|
+
prepareStep: input.intervention ? async ({ messages }) => {
|
|
475
|
+
const pending = input.intervention.drainImmediate();
|
|
476
|
+
if (pending.length === 0) {
|
|
477
|
+
return void 0;
|
|
478
|
+
}
|
|
479
|
+
const injected = pending.map((item) => ({
|
|
480
|
+
role: "user",
|
|
481
|
+
content: item.message
|
|
482
|
+
}));
|
|
483
|
+
for (const item of pending) {
|
|
484
|
+
input.intervention.onApplied?.(item);
|
|
485
|
+
}
|
|
486
|
+
return { messages: [...messages, ...injected] };
|
|
487
|
+
} : void 0,
|
|
488
|
+
onStepFinish: async (step) => {
|
|
489
|
+
if (!input.onStepFinish) {
|
|
490
|
+
return;
|
|
491
|
+
}
|
|
492
|
+
await input.onStepFinish({
|
|
493
|
+
toolResults: step.toolResults?.map((toolResult) => ({
|
|
494
|
+
toolName: toolResult.toolName,
|
|
495
|
+
toolCallId: toolResult.toolCallId,
|
|
496
|
+
output: toolResult.output
|
|
497
|
+
})),
|
|
498
|
+
usage: step.usage,
|
|
499
|
+
finishReason: step.finishReason
|
|
500
|
+
});
|
|
501
|
+
}
|
|
502
|
+
});
|
|
503
|
+
const otelCtx = input.middleware?.getOtelContext(input.sessionID);
|
|
504
|
+
if (!otelCtx) {
|
|
505
|
+
return callStreamText();
|
|
506
|
+
}
|
|
507
|
+
try {
|
|
508
|
+
const otelApi = await import("@opentelemetry/api");
|
|
509
|
+
return otelApi.context.with(
|
|
510
|
+
otelCtx,
|
|
511
|
+
callStreamText
|
|
512
|
+
);
|
|
513
|
+
} catch {
|
|
514
|
+
return callStreamText();
|
|
515
|
+
}
|
|
516
|
+
}
|
|
517
|
+
async function stream(input) {
|
|
518
|
+
const messageID = crypto.randomUUID();
|
|
519
|
+
const resolvedInput = await resolveModelCallInput(input);
|
|
520
|
+
const modelInfo = getModelInfo(input);
|
|
521
|
+
if (isBlockedModelCall(resolvedInput)) {
|
|
522
|
+
throw new LLMError({
|
|
523
|
+
message: resolvedInput.reason,
|
|
524
|
+
category: "invalid_request",
|
|
525
|
+
provider: modelInfo.provider,
|
|
526
|
+
model: modelInfo.model
|
|
527
|
+
});
|
|
528
|
+
}
|
|
529
|
+
const system = input.system.filter(Boolean).join("\n");
|
|
530
|
+
if (input.customStreamProvider) {
|
|
531
|
+
const runCustomStream = async () => await createCustomStream(input);
|
|
532
|
+
if (!input.retry || input.retry.maxAttempts === 0) {
|
|
533
|
+
return wrapModelStream(await runCustomStream(), input);
|
|
534
|
+
}
|
|
535
|
+
return wrapModelStream(await withRetry(
|
|
536
|
+
async () => await runCustomStream(),
|
|
537
|
+
input.retry,
|
|
538
|
+
input.abort
|
|
539
|
+
), input);
|
|
540
|
+
}
|
|
541
|
+
const toolSet = await buildToolSet({
|
|
542
|
+
tools: input.tools,
|
|
543
|
+
cwd: input.cwd,
|
|
544
|
+
sessionID: input.sessionID,
|
|
545
|
+
messageID,
|
|
546
|
+
abort: input.abort,
|
|
547
|
+
turnTracker: input.turnTracker,
|
|
548
|
+
host: input.host,
|
|
549
|
+
middleware: input.middleware,
|
|
550
|
+
executionMode: input.toolExecutionMode
|
|
551
|
+
});
|
|
552
|
+
const allTools = {
|
|
553
|
+
...toolSet,
|
|
554
|
+
...input.mcpTools ?? {}
|
|
555
|
+
};
|
|
556
|
+
const providerOptions = input.reasoningLevel ? buildReasoningOptionsSync(input.model, input.reasoningLevel) : void 0;
|
|
557
|
+
const createStream = async () => {
|
|
558
|
+
try {
|
|
559
|
+
return await callStreamTextWithOtelContext({
|
|
560
|
+
input,
|
|
561
|
+
allTools,
|
|
562
|
+
system,
|
|
563
|
+
providerOptions
|
|
564
|
+
});
|
|
565
|
+
} catch (error) {
|
|
566
|
+
throw LLMError.from(error, modelInfo);
|
|
567
|
+
}
|
|
568
|
+
};
|
|
569
|
+
if (!input.retry || input.retry.maxAttempts === 0) {
|
|
570
|
+
return wrapModelStream(await createStream(), input);
|
|
571
|
+
}
|
|
572
|
+
return wrapModelStream(await withRetry(
|
|
573
|
+
async () => await createStream(),
|
|
574
|
+
input.retry,
|
|
575
|
+
input.abort
|
|
576
|
+
), input);
|
|
577
|
+
}
|
|
578
|
+
async function streamOnce(input) {
|
|
579
|
+
return await stream({ ...input, retry: void 0 });
|
|
580
|
+
}
|
|
581
|
+
async function streamStep(input) {
|
|
582
|
+
return await stream({
|
|
583
|
+
...input,
|
|
584
|
+
maxSteps: 1
|
|
585
|
+
});
|
|
586
|
+
}
|
|
587
|
+
|
|
588
|
+
// src/inference/index.ts
|
|
589
|
+
var Inference = {
|
|
590
|
+
buildToolSet,
|
|
591
|
+
stream,
|
|
592
|
+
streamOnce,
|
|
593
|
+
streamStep
|
|
594
|
+
};
|
|
595
|
+
var LLM = Inference;
|
|
596
|
+
|
|
597
|
+
export {
|
|
598
|
+
buildToolSet,
|
|
599
|
+
isRetryableCategory,
|
|
600
|
+
parseRetryDelay,
|
|
601
|
+
LLMError,
|
|
602
|
+
isRetryable,
|
|
603
|
+
getRetryDelay,
|
|
604
|
+
getErrorCategory,
|
|
605
|
+
DEFAULT_RETRY_CONFIG,
|
|
606
|
+
createRetryState,
|
|
607
|
+
calculateDelay,
|
|
608
|
+
sleep,
|
|
609
|
+
withRetry,
|
|
610
|
+
createRetryHandler,
|
|
611
|
+
shouldRetry,
|
|
612
|
+
DEFAULT_MAX_OUTPUT_TOKENS,
|
|
613
|
+
OUTPUT_TOKEN_MAX,
|
|
614
|
+
stream,
|
|
615
|
+
streamOnce,
|
|
616
|
+
streamStep,
|
|
617
|
+
Inference,
|
|
618
|
+
LLM
|
|
619
|
+
};
|