ai 4.3.8 → 4.3.10
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +12 -0
- package/dist/index.d.mts +23 -1
- package/dist/index.d.ts +23 -1
- package/dist/index.js +95 -59
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +60 -24
- package/dist/index.mjs.map +1 -1
- package/mcp-stdio/create-child-process.test.ts +6 -5
- package/mcp-stdio/create-child-process.ts +2 -2
- package/mcp-stdio/dist/index.js +3 -3
- package/mcp-stdio/dist/index.js.map +1 -1
- package/mcp-stdio/dist/index.mjs +3 -3
- package/mcp-stdio/dist/index.mjs.map +1 -1
- package/mcp-stdio/mcp-stdio-transport.test.ts +1 -14
- package/mcp-stdio/mcp-stdio-transport.ts +2 -2
- package/package.json +3 -3
- package/rsc/dist/rsc-server.mjs +18 -3
- package/rsc/dist/rsc-server.mjs.map +1 -1
package/CHANGELOG.md
CHANGED
@@ -1,5 +1,17 @@
|
|
1
1
|
# ai
|
2
2
|
|
3
|
+
## 4.3.10
|
4
|
+
|
5
|
+
### Patch Changes
|
6
|
+
|
7
|
+
- 0432959: feat (ai): add experimental prepareStep callback to generateText
|
8
|
+
|
9
|
+
## 4.3.9
|
10
|
+
|
11
|
+
### Patch Changes
|
12
|
+
|
13
|
+
- b69a253: fix(utils/detect-mimetype): add support for detecting id3 tags
|
14
|
+
|
3
15
|
## 4.3.8
|
4
16
|
|
5
17
|
### Patch Changes
|
package/dist/index.d.mts
CHANGED
@@ -2463,7 +2463,7 @@ If set and supported by the model, calls will generate deterministic results.
|
|
2463
2463
|
@returns
|
2464
2464
|
A result object that contains the generated text, the results of the tool calls, and additional information.
|
2465
2465
|
*/
|
2466
|
-
declare function generateText<TOOLS extends ToolSet, OUTPUT = never, OUTPUT_PARTIAL = never>({ model, tools, toolChoice, system, prompt, messages, maxRetries: maxRetriesArg, abortSignal, headers, maxSteps, experimental_generateMessageId: generateMessageId, experimental_output: output, experimental_continueSteps: continueSteps, experimental_telemetry: telemetry, experimental_providerMetadata, providerOptions, experimental_activeTools: activeTools, experimental_repairToolCall: repairToolCall, _internal: { generateId, currentDate, }, onStepFinish, ...settings }: CallSettings & Prompt & {
|
2466
|
+
declare function generateText<TOOLS extends ToolSet, OUTPUT = never, OUTPUT_PARTIAL = never>({ model, tools, toolChoice, system, prompt, messages, maxRetries: maxRetriesArg, abortSignal, headers, maxSteps, experimental_generateMessageId: generateMessageId, experimental_output: output, experimental_continueSteps: continueSteps, experimental_telemetry: telemetry, experimental_providerMetadata, providerOptions, experimental_activeTools: activeTools, experimental_prepareStep: prepareStep, experimental_repairToolCall: repairToolCall, _internal: { generateId, currentDate, }, onStepFinish, ...settings }: CallSettings & Prompt & {
|
2467
2467
|
/**
|
2468
2468
|
The language model to use.
|
2469
2469
|
*/
|
@@ -2518,6 +2518,28 @@ Optional specification for parsing structured outputs from the LLM response.
|
|
2518
2518
|
*/
|
2519
2519
|
experimental_output?: Output<OUTPUT, OUTPUT_PARTIAL>;
|
2520
2520
|
/**
|
2521
|
+
Optional function that you can use to provide different settings for a step.
|
2522
|
+
|
2523
|
+
@param options - The options for the step.
|
2524
|
+
@param options.steps - The steps that have been executed so far.
|
2525
|
+
@param options.stepNumber - The number of the step that is being executed.
|
2526
|
+
@param options.maxSteps - The maximum number of steps.
|
2527
|
+
@param options.model - The model that is being used.
|
2528
|
+
|
2529
|
+
@returns An object that contains the settings for the step.
|
2530
|
+
If you return undefined (or for undefined settings), the settings from the outer level will be used.
|
2531
|
+
*/
|
2532
|
+
experimental_prepareStep?: (options: {
|
2533
|
+
steps: Array<StepResult<TOOLS>>;
|
2534
|
+
stepNumber: number;
|
2535
|
+
maxSteps: number;
|
2536
|
+
model: LanguageModel;
|
2537
|
+
}) => PromiseLike<{
|
2538
|
+
model?: LanguageModel;
|
2539
|
+
toolChoice?: ToolChoice<TOOLS>;
|
2540
|
+
experimental_activeTools?: Array<keyof TOOLS>;
|
2541
|
+
} | undefined>;
|
2542
|
+
/**
|
2521
2543
|
A function that attempts to repair a tool call that failed to parse.
|
2522
2544
|
*/
|
2523
2545
|
experimental_repairToolCall?: ToolCallRepairFunction<TOOLS>;
|
package/dist/index.d.ts
CHANGED
@@ -2463,7 +2463,7 @@ If set and supported by the model, calls will generate deterministic results.
|
|
2463
2463
|
@returns
|
2464
2464
|
A result object that contains the generated text, the results of the tool calls, and additional information.
|
2465
2465
|
*/
|
2466
|
-
declare function generateText<TOOLS extends ToolSet, OUTPUT = never, OUTPUT_PARTIAL = never>({ model, tools, toolChoice, system, prompt, messages, maxRetries: maxRetriesArg, abortSignal, headers, maxSteps, experimental_generateMessageId: generateMessageId, experimental_output: output, experimental_continueSteps: continueSteps, experimental_telemetry: telemetry, experimental_providerMetadata, providerOptions, experimental_activeTools: activeTools, experimental_repairToolCall: repairToolCall, _internal: { generateId, currentDate, }, onStepFinish, ...settings }: CallSettings & Prompt & {
|
2466
|
+
declare function generateText<TOOLS extends ToolSet, OUTPUT = never, OUTPUT_PARTIAL = never>({ model, tools, toolChoice, system, prompt, messages, maxRetries: maxRetriesArg, abortSignal, headers, maxSteps, experimental_generateMessageId: generateMessageId, experimental_output: output, experimental_continueSteps: continueSteps, experimental_telemetry: telemetry, experimental_providerMetadata, providerOptions, experimental_activeTools: activeTools, experimental_prepareStep: prepareStep, experimental_repairToolCall: repairToolCall, _internal: { generateId, currentDate, }, onStepFinish, ...settings }: CallSettings & Prompt & {
|
2467
2467
|
/**
|
2468
2468
|
The language model to use.
|
2469
2469
|
*/
|
@@ -2518,6 +2518,28 @@ Optional specification for parsing structured outputs from the LLM response.
|
|
2518
2518
|
*/
|
2519
2519
|
experimental_output?: Output<OUTPUT, OUTPUT_PARTIAL>;
|
2520
2520
|
/**
|
2521
|
+
Optional function that you can use to provide different settings for a step.
|
2522
|
+
|
2523
|
+
@param options - The options for the step.
|
2524
|
+
@param options.steps - The steps that have been executed so far.
|
2525
|
+
@param options.stepNumber - The number of the step that is being executed.
|
2526
|
+
@param options.maxSteps - The maximum number of steps.
|
2527
|
+
@param options.model - The model that is being used.
|
2528
|
+
|
2529
|
+
@returns An object that contains the settings for the step.
|
2530
|
+
If you return undefined (or for undefined settings), the settings from the outer level will be used.
|
2531
|
+
*/
|
2532
|
+
experimental_prepareStep?: (options: {
|
2533
|
+
steps: Array<StepResult<TOOLS>>;
|
2534
|
+
stepNumber: number;
|
2535
|
+
maxSteps: number;
|
2536
|
+
model: LanguageModel;
|
2537
|
+
}) => PromiseLike<{
|
2538
|
+
model?: LanguageModel;
|
2539
|
+
toolChoice?: ToolChoice<TOOLS>;
|
2540
|
+
experimental_activeTools?: Array<keyof TOOLS>;
|
2541
|
+
} | undefined>;
|
2542
|
+
/**
|
2521
2543
|
A function that attempts to repair a tool call that failed to parse.
|
2522
2544
|
*/
|
2523
2545
|
experimental_repairToolCall?: ToolCallRepairFunction<TOOLS>;
|
package/dist/index.js
CHANGED
@@ -63,7 +63,7 @@ __export(streams_exports, {
|
|
63
63
|
cosineSimilarity: () => cosineSimilarity,
|
64
64
|
createDataStream: () => createDataStream,
|
65
65
|
createDataStreamResponse: () => createDataStreamResponse,
|
66
|
-
createIdGenerator: () =>
|
66
|
+
createIdGenerator: () => import_provider_utils16.createIdGenerator,
|
67
67
|
createProviderRegistry: () => createProviderRegistry,
|
68
68
|
customProvider: () => customProvider,
|
69
69
|
defaultSettingsMiddleware: () => defaultSettingsMiddleware,
|
@@ -79,7 +79,7 @@ __export(streams_exports, {
|
|
79
79
|
extractReasoningMiddleware: () => extractReasoningMiddleware,
|
80
80
|
formatAssistantStreamPart: () => import_ui_utils11.formatAssistantStreamPart,
|
81
81
|
formatDataStreamPart: () => import_ui_utils11.formatDataStreamPart,
|
82
|
-
generateId: () =>
|
82
|
+
generateId: () => import_provider_utils16.generateId,
|
83
83
|
generateObject: () => generateObject,
|
84
84
|
generateText: () => generateText,
|
85
85
|
jsonSchema: () => import_ui_utils11.jsonSchema,
|
@@ -100,7 +100,7 @@ __export(streams_exports, {
|
|
100
100
|
module.exports = __toCommonJS(streams_exports);
|
101
101
|
|
102
102
|
// core/index.ts
|
103
|
-
var
|
103
|
+
var import_provider_utils16 = require("@ai-sdk/provider-utils");
|
104
104
|
var import_ui_utils11 = require("@ai-sdk/ui-utils");
|
105
105
|
|
106
106
|
// core/data-stream/create-data-stream.ts
|
@@ -945,6 +945,7 @@ var DefaultGeneratedFileWithType = class extends DefaultGeneratedFile {
|
|
945
945
|
};
|
946
946
|
|
947
947
|
// core/util/detect-mimetype.ts
|
948
|
+
var import_provider_utils3 = require("@ai-sdk/provider-utils");
|
948
949
|
var imageMimeTypeSignatures = [
|
949
950
|
{
|
950
951
|
mimeType: "image/gif",
|
@@ -1050,12 +1051,26 @@ var audioMimeTypeSignatures = [
|
|
1050
1051
|
base64Prefix: "ZnR5cA"
|
1051
1052
|
}
|
1052
1053
|
];
|
1054
|
+
var stripID3 = (data) => {
|
1055
|
+
const bytes = typeof data === "string" ? (0, import_provider_utils3.convertBase64ToUint8Array)(data) : data;
|
1056
|
+
const id3Size = (bytes[6] & 127) << 21 | (bytes[7] & 127) << 14 | (bytes[8] & 127) << 7 | bytes[9] & 127;
|
1057
|
+
return bytes.slice(id3Size + 10);
|
1058
|
+
};
|
1059
|
+
function stripID3TagsIfPresent(data) {
|
1060
|
+
const hasId3 = typeof data === "string" && data.startsWith("SUQz") || typeof data !== "string" && data.length > 10 && data[0] === 73 && // 'I'
|
1061
|
+
data[1] === 68 && // 'D'
|
1062
|
+
data[2] === 51;
|
1063
|
+
return hasId3 ? stripID3(data) : data;
|
1064
|
+
}
|
1053
1065
|
function detectMimeType({
|
1054
1066
|
data,
|
1055
1067
|
signatures
|
1056
1068
|
}) {
|
1069
|
+
const processedData = stripID3TagsIfPresent(data);
|
1057
1070
|
for (const signature of signatures) {
|
1058
|
-
if (typeof
|
1071
|
+
if (typeof processedData === "string" ? processedData.startsWith(signature.base64Prefix) : processedData.length >= signature.bytesPrefix.length && signature.bytesPrefix.every(
|
1072
|
+
(byte, index) => processedData[index] === byte
|
1073
|
+
)) {
|
1059
1074
|
return signature.mimeType;
|
1060
1075
|
}
|
1061
1076
|
}
|
@@ -1141,7 +1156,7 @@ var DefaultGenerateImageResult = class {
|
|
1141
1156
|
|
1142
1157
|
// core/generate-object/generate-object.ts
|
1143
1158
|
var import_provider12 = require("@ai-sdk/provider");
|
1144
|
-
var
|
1159
|
+
var import_provider_utils7 = require("@ai-sdk/provider-utils");
|
1145
1160
|
|
1146
1161
|
// errors/no-object-generated-error.ts
|
1147
1162
|
var import_provider5 = require("@ai-sdk/provider");
|
@@ -1223,7 +1238,7 @@ async function download({ url }) {
|
|
1223
1238
|
}
|
1224
1239
|
|
1225
1240
|
// core/prompt/data-content.ts
|
1226
|
-
var
|
1241
|
+
var import_provider_utils4 = require("@ai-sdk/provider-utils");
|
1227
1242
|
|
1228
1243
|
// core/prompt/invalid-data-content-error.ts
|
1229
1244
|
var import_provider7 = require("@ai-sdk/provider");
|
@@ -1267,9 +1282,9 @@ function convertDataContentToBase64String(content) {
|
|
1267
1282
|
return content;
|
1268
1283
|
}
|
1269
1284
|
if (content instanceof ArrayBuffer) {
|
1270
|
-
return (0,
|
1285
|
+
return (0, import_provider_utils4.convertUint8ArrayToBase64)(new Uint8Array(content));
|
1271
1286
|
}
|
1272
|
-
return (0,
|
1287
|
+
return (0, import_provider_utils4.convertUint8ArrayToBase64)(content);
|
1273
1288
|
}
|
1274
1289
|
function convertDataContentToUint8Array(content) {
|
1275
1290
|
if (content instanceof Uint8Array) {
|
@@ -1277,7 +1292,7 @@ function convertDataContentToUint8Array(content) {
|
|
1277
1292
|
}
|
1278
1293
|
if (typeof content === "string") {
|
1279
1294
|
try {
|
1280
|
-
return (0,
|
1295
|
+
return (0, import_provider_utils4.convertBase64ToUint8Array)(content);
|
1281
1296
|
} catch (error) {
|
1282
1297
|
throw new InvalidDataContentError({
|
1283
1298
|
message: "Invalid data content. Content string is not a base64-encoded media.",
|
@@ -1667,7 +1682,7 @@ function prepareCallSettings({
|
|
1667
1682
|
|
1668
1683
|
// core/prompt/standardize-prompt.ts
|
1669
1684
|
var import_provider10 = require("@ai-sdk/provider");
|
1670
|
-
var
|
1685
|
+
var import_provider_utils5 = require("@ai-sdk/provider-utils");
|
1671
1686
|
var import_zod7 = require("zod");
|
1672
1687
|
|
1673
1688
|
// core/prompt/attachments-to-parts.ts
|
@@ -2235,7 +2250,7 @@ function standardizePrompt({
|
|
2235
2250
|
message: "messages must not be empty"
|
2236
2251
|
});
|
2237
2252
|
}
|
2238
|
-
const validationResult = (0,
|
2253
|
+
const validationResult = (0, import_provider_utils5.safeValidateTypes)({
|
2239
2254
|
value: messages,
|
2240
2255
|
schema: import_zod7.z.array(coreMessageSchema)
|
2241
2256
|
});
|
@@ -2296,7 +2311,7 @@ function injectJsonInstruction({
|
|
2296
2311
|
|
2297
2312
|
// core/generate-object/output-strategy.ts
|
2298
2313
|
var import_provider11 = require("@ai-sdk/provider");
|
2299
|
-
var
|
2314
|
+
var import_provider_utils6 = require("@ai-sdk/provider-utils");
|
2300
2315
|
var import_ui_utils2 = require("@ai-sdk/ui-utils");
|
2301
2316
|
|
2302
2317
|
// core/util/async-iterable-stream.ts
|
@@ -2353,7 +2368,7 @@ var objectOutputStrategy = (schema) => ({
|
|
2353
2368
|
};
|
2354
2369
|
},
|
2355
2370
|
validateFinalResult(value) {
|
2356
|
-
return (0,
|
2371
|
+
return (0, import_provider_utils6.safeValidateTypes)({ value, schema });
|
2357
2372
|
},
|
2358
2373
|
createElementStream() {
|
2359
2374
|
throw new import_provider11.UnsupportedFunctionalityError({
|
@@ -2392,7 +2407,7 @@ var arrayOutputStrategy = (schema) => {
|
|
2392
2407
|
const resultArray = [];
|
2393
2408
|
for (let i = 0; i < inputArray.length; i++) {
|
2394
2409
|
const element = inputArray[i];
|
2395
|
-
const result = (0,
|
2410
|
+
const result = (0, import_provider_utils6.safeValidateTypes)({ value: element, schema });
|
2396
2411
|
if (i === inputArray.length - 1 && !isFinalDelta) {
|
2397
2412
|
continue;
|
2398
2413
|
}
|
@@ -2433,7 +2448,7 @@ var arrayOutputStrategy = (schema) => {
|
|
2433
2448
|
}
|
2434
2449
|
const inputArray = value.elements;
|
2435
2450
|
for (const element of inputArray) {
|
2436
|
-
const result = (0,
|
2451
|
+
const result = (0, import_provider_utils6.safeValidateTypes)({ value: element, schema });
|
2437
2452
|
if (!result.success) {
|
2438
2453
|
return result;
|
2439
2454
|
}
|
@@ -2666,7 +2681,7 @@ function validateObjectGenerationInput({
|
|
2666
2681
|
}
|
2667
2682
|
|
2668
2683
|
// core/generate-object/generate-object.ts
|
2669
|
-
var originalGenerateId = (0,
|
2684
|
+
var originalGenerateId = (0, import_provider_utils7.createIdGenerator)({ prefix: "aiobj", size: 24 });
|
2670
2685
|
async function generateObject({
|
2671
2686
|
model,
|
2672
2687
|
enum: enumValues,
|
@@ -2986,7 +3001,7 @@ async function generateObject({
|
|
2986
3001
|
}
|
2987
3002
|
}
|
2988
3003
|
function processResult(result2) {
|
2989
|
-
const parseResult = (0,
|
3004
|
+
const parseResult = (0, import_provider_utils7.safeParseJSON)({ text: result2 });
|
2990
3005
|
if (!parseResult.success) {
|
2991
3006
|
throw new NoObjectGeneratedError({
|
2992
3007
|
message: "No object generated: could not parse the response.",
|
@@ -3088,7 +3103,7 @@ var DefaultGenerateObjectResult = class {
|
|
3088
3103
|
};
|
3089
3104
|
|
3090
3105
|
// core/generate-object/stream-object.ts
|
3091
|
-
var
|
3106
|
+
var import_provider_utils8 = require("@ai-sdk/provider-utils");
|
3092
3107
|
var import_ui_utils3 = require("@ai-sdk/ui-utils");
|
3093
3108
|
|
3094
3109
|
// util/delayed-promise.ts
|
@@ -3233,7 +3248,7 @@ function now() {
|
|
3233
3248
|
}
|
3234
3249
|
|
3235
3250
|
// core/generate-object/stream-object.ts
|
3236
|
-
var originalGenerateId2 = (0,
|
3251
|
+
var originalGenerateId2 = (0, import_provider_utils8.createIdGenerator)({ prefix: "aiobj", size: 24 });
|
3237
3252
|
function streamObject({
|
3238
3253
|
model,
|
3239
3254
|
schema: inputSchema,
|
@@ -3805,7 +3820,7 @@ var DefaultStreamObjectResult = class {
|
|
3805
3820
|
};
|
3806
3821
|
|
3807
3822
|
// core/generate-text/generate-text.ts
|
3808
|
-
var
|
3823
|
+
var import_provider_utils10 = require("@ai-sdk/provider-utils");
|
3809
3824
|
|
3810
3825
|
// errors/no-output-specified-error.ts
|
3811
3826
|
var import_provider13 = require("@ai-sdk/provider");
|
@@ -3917,7 +3932,7 @@ function removeTextAfterLastWhitespace(text2) {
|
|
3917
3932
|
}
|
3918
3933
|
|
3919
3934
|
// core/generate-text/parse-tool-call.ts
|
3920
|
-
var
|
3935
|
+
var import_provider_utils9 = require("@ai-sdk/provider-utils");
|
3921
3936
|
var import_ui_utils5 = require("@ai-sdk/ui-utils");
|
3922
3937
|
|
3923
3938
|
// errors/invalid-tool-arguments-error.ts
|
@@ -4043,7 +4058,7 @@ async function doParseToolCall({
|
|
4043
4058
|
});
|
4044
4059
|
}
|
4045
4060
|
const schema = (0, import_ui_utils5.asSchema)(tool2.parameters);
|
4046
|
-
const parseResult = toolCall.args.trim() === "" ? (0,
|
4061
|
+
const parseResult = toolCall.args.trim() === "" ? (0, import_provider_utils9.safeValidateTypes)({ value: {}, schema }) : (0, import_provider_utils9.safeParseJSON)({ text: toolCall.args, schema });
|
4047
4062
|
if (parseResult.success === false) {
|
4048
4063
|
throw new InvalidToolArgumentsError({
|
4049
4064
|
toolName,
|
@@ -4134,11 +4149,11 @@ function toResponseMessages({
|
|
4134
4149
|
}
|
4135
4150
|
|
4136
4151
|
// core/generate-text/generate-text.ts
|
4137
|
-
var originalGenerateId3 = (0,
|
4152
|
+
var originalGenerateId3 = (0, import_provider_utils10.createIdGenerator)({
|
4138
4153
|
prefix: "aitxt",
|
4139
4154
|
size: 24
|
4140
4155
|
});
|
4141
|
-
var originalGenerateMessageId = (0,
|
4156
|
+
var originalGenerateMessageId = (0, import_provider_utils10.createIdGenerator)({
|
4142
4157
|
prefix: "msg",
|
4143
4158
|
size: 24
|
4144
4159
|
});
|
@@ -4160,6 +4175,7 @@ async function generateText({
|
|
4160
4175
|
experimental_providerMetadata,
|
4161
4176
|
providerOptions = experimental_providerMetadata,
|
4162
4177
|
experimental_activeTools: activeTools,
|
4178
|
+
experimental_prepareStep: prepareStep,
|
4163
4179
|
experimental_repairToolCall: repairToolCall,
|
4164
4180
|
_internal: {
|
4165
4181
|
generateId: generateId3 = originalGenerateId3,
|
@@ -4202,6 +4218,9 @@ async function generateText({
|
|
4202
4218
|
telemetry
|
4203
4219
|
}),
|
4204
4220
|
...baseTelemetryAttributes,
|
4221
|
+
// model:
|
4222
|
+
"ai.model.provider": model.provider,
|
4223
|
+
"ai.model.id": model.modelId,
|
4205
4224
|
// specific settings that only make sense on the outer level:
|
4206
4225
|
"ai.prompt": {
|
4207
4226
|
input: () => JSON.stringify({ system, prompt, messages })
|
@@ -4211,11 +4230,7 @@ async function generateText({
|
|
4211
4230
|
}),
|
4212
4231
|
tracer,
|
4213
4232
|
fn: async (span) => {
|
4214
|
-
var _a18, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k;
|
4215
|
-
const mode = {
|
4216
|
-
type: "regular",
|
4217
|
-
...prepareToolsAndToolChoice({ tools, toolChoice, activeTools })
|
4218
|
-
};
|
4233
|
+
var _a18, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n;
|
4219
4234
|
const callSettings = prepareCallSettings(settings);
|
4220
4235
|
let currentModelResponse;
|
4221
4236
|
let currentToolCalls = [];
|
@@ -4238,16 +4253,33 @@ async function generateText({
|
|
4238
4253
|
...initialPrompt.messages,
|
4239
4254
|
...responseMessages
|
4240
4255
|
];
|
4256
|
+
const prepareStepResult = await (prepareStep == null ? void 0 : prepareStep({
|
4257
|
+
model,
|
4258
|
+
steps,
|
4259
|
+
maxSteps,
|
4260
|
+
stepNumber: stepCount
|
4261
|
+
}));
|
4262
|
+
const stepToolChoice = (_a18 = prepareStepResult == null ? void 0 : prepareStepResult.toolChoice) != null ? _a18 : toolChoice;
|
4263
|
+
const stepActiveTools = (_b = prepareStepResult == null ? void 0 : prepareStepResult.experimental_activeTools) != null ? _b : activeTools;
|
4264
|
+
const stepModel = (_c = prepareStepResult == null ? void 0 : prepareStepResult.model) != null ? _c : model;
|
4241
4265
|
const promptMessages = await convertToLanguageModelPrompt({
|
4242
4266
|
prompt: {
|
4243
4267
|
type: promptFormat,
|
4244
4268
|
system: initialPrompt.system,
|
4245
4269
|
messages: stepInputMessages
|
4246
4270
|
},
|
4247
|
-
modelSupportsImageUrls:
|
4248
|
-
modelSupportsUrl: (
|
4271
|
+
modelSupportsImageUrls: stepModel.supportsImageUrls,
|
4272
|
+
modelSupportsUrl: (_d = stepModel.supportsUrl) == null ? void 0 : _d.bind(stepModel)
|
4249
4273
|
// support 'this' context
|
4250
4274
|
});
|
4275
|
+
const mode = {
|
4276
|
+
type: "regular",
|
4277
|
+
...prepareToolsAndToolChoice({
|
4278
|
+
tools,
|
4279
|
+
toolChoice: stepToolChoice,
|
4280
|
+
activeTools: stepActiveTools
|
4281
|
+
})
|
4282
|
+
};
|
4251
4283
|
currentModelResponse = await retry(
|
4252
4284
|
() => recordSpan({
|
4253
4285
|
name: "ai.generateText.doGenerate",
|
@@ -4259,6 +4291,10 @@ async function generateText({
|
|
4259
4291
|
telemetry
|
4260
4292
|
}),
|
4261
4293
|
...baseTelemetryAttributes,
|
4294
|
+
// model:
|
4295
|
+
"ai.model.provider": stepModel.provider,
|
4296
|
+
"ai.model.id": stepModel.modelId,
|
4297
|
+
// prompt:
|
4262
4298
|
"ai.prompt.format": { input: () => promptFormat },
|
4263
4299
|
"ai.prompt.messages": {
|
4264
4300
|
input: () => JSON.stringify(promptMessages)
|
@@ -4274,8 +4310,8 @@ async function generateText({
|
|
4274
4310
|
input: () => mode.toolChoice != null ? JSON.stringify(mode.toolChoice) : void 0
|
4275
4311
|
},
|
4276
4312
|
// standardized gen-ai llm span attributes:
|
4277
|
-
"gen_ai.system":
|
4278
|
-
"gen_ai.request.model":
|
4313
|
+
"gen_ai.system": stepModel.provider,
|
4314
|
+
"gen_ai.request.model": stepModel.modelId,
|
4279
4315
|
"gen_ai.request.frequency_penalty": settings.frequencyPenalty,
|
4280
4316
|
"gen_ai.request.max_tokens": settings.maxTokens,
|
4281
4317
|
"gen_ai.request.presence_penalty": settings.presencePenalty,
|
@@ -4288,7 +4324,7 @@ async function generateText({
|
|
4288
4324
|
tracer,
|
4289
4325
|
fn: async (span2) => {
|
4290
4326
|
var _a19, _b2, _c2, _d2, _e2, _f2;
|
4291
|
-
const result = await
|
4327
|
+
const result = await stepModel.doGenerate({
|
4292
4328
|
mode,
|
4293
4329
|
...callSettings,
|
4294
4330
|
inputFormat: promptFormat,
|
@@ -4301,7 +4337,7 @@ async function generateText({
|
|
4301
4337
|
const responseData = {
|
4302
4338
|
id: (_b2 = (_a19 = result.response) == null ? void 0 : _a19.id) != null ? _b2 : generateId3(),
|
4303
4339
|
timestamp: (_d2 = (_c2 = result.response) == null ? void 0 : _c2.timestamp) != null ? _d2 : currentDate(),
|
4304
|
-
modelId: (_f2 = (_e2 = result.response) == null ? void 0 : _e2.modelId) != null ? _f2 :
|
4340
|
+
modelId: (_f2 = (_e2 = result.response) == null ? void 0 : _e2.modelId) != null ? _f2 : stepModel.modelId
|
4305
4341
|
};
|
4306
4342
|
span2.setAttributes(
|
4307
4343
|
selectTelemetryAttributes({
|
@@ -4333,7 +4369,7 @@ async function generateText({
|
|
4333
4369
|
})
|
4334
4370
|
);
|
4335
4371
|
currentToolCalls = await Promise.all(
|
4336
|
-
((
|
4372
|
+
((_e = currentModelResponse.toolCalls) != null ? _e : []).map(
|
4337
4373
|
(toolCall) => parseToolCall({
|
4338
4374
|
toolCall,
|
4339
4375
|
tools,
|
@@ -4368,7 +4404,7 @@ async function generateText({
|
|
4368
4404
|
nextStepType = "tool-result";
|
4369
4405
|
}
|
4370
4406
|
}
|
4371
|
-
const originalText = (
|
4407
|
+
const originalText = (_f = currentModelResponse.text) != null ? _f : "";
|
4372
4408
|
const stepTextLeadingWhitespaceTrimmed = stepType === "continue" && // only for continue steps
|
4373
4409
|
text2.trimEnd() !== text2 ? originalText.trimStart() : originalText;
|
4374
4410
|
const stepText = nextStepType === "continue" ? removeTextAfterLastWhitespace(stepTextLeadingWhitespaceTrimmed) : stepTextLeadingWhitespaceTrimmed;
|
@@ -4376,7 +4412,7 @@ async function generateText({
|
|
4376
4412
|
currentReasoningDetails = asReasoningDetails(
|
4377
4413
|
currentModelResponse.reasoning
|
4378
4414
|
);
|
4379
|
-
sources.push(...(
|
4415
|
+
sources.push(...(_g = currentModelResponse.sources) != null ? _g : []);
|
4380
4416
|
if (stepType === "continue") {
|
4381
4417
|
const lastMessage = responseMessages[responseMessages.length - 1];
|
4382
4418
|
if (typeof lastMessage.content === "string") {
|
@@ -4408,18 +4444,18 @@ async function generateText({
|
|
4408
4444
|
reasoning: asReasoningText(currentReasoningDetails),
|
4409
4445
|
reasoningDetails: currentReasoningDetails,
|
4410
4446
|
files: asFiles(currentModelResponse.files),
|
4411
|
-
sources: (
|
4447
|
+
sources: (_h = currentModelResponse.sources) != null ? _h : [],
|
4412
4448
|
toolCalls: currentToolCalls,
|
4413
4449
|
toolResults: currentToolResults,
|
4414
4450
|
finishReason: currentModelResponse.finishReason,
|
4415
4451
|
usage: currentUsage,
|
4416
4452
|
warnings: currentModelResponse.warnings,
|
4417
4453
|
logprobs: currentModelResponse.logprobs,
|
4418
|
-
request: (
|
4454
|
+
request: (_i = currentModelResponse.request) != null ? _i : {},
|
4419
4455
|
response: {
|
4420
4456
|
...currentModelResponse.response,
|
4421
|
-
headers: (
|
4422
|
-
body: (
|
4457
|
+
headers: (_j = currentModelResponse.rawResponse) == null ? void 0 : _j.headers,
|
4458
|
+
body: (_k = currentModelResponse.rawResponse) == null ? void 0 : _k.body,
|
4423
4459
|
// deep clone msgs to avoid mutating past messages in multi-step:
|
4424
4460
|
messages: structuredClone(responseMessages)
|
4425
4461
|
},
|
@@ -4471,11 +4507,11 @@ async function generateText({
|
|
4471
4507
|
finishReason: currentModelResponse.finishReason,
|
4472
4508
|
usage,
|
4473
4509
|
warnings: currentModelResponse.warnings,
|
4474
|
-
request: (
|
4510
|
+
request: (_l = currentModelResponse.request) != null ? _l : {},
|
4475
4511
|
response: {
|
4476
4512
|
...currentModelResponse.response,
|
4477
|
-
headers: (
|
4478
|
-
body: (
|
4513
|
+
headers: (_m = currentModelResponse.rawResponse) == null ? void 0 : _m.headers,
|
4514
|
+
body: (_n = currentModelResponse.rawResponse) == null ? void 0 : _n.body,
|
4479
4515
|
messages: responseMessages
|
4480
4516
|
},
|
4481
4517
|
logprobs: currentModelResponse.logprobs,
|
@@ -4604,7 +4640,7 @@ __export(output_exports, {
|
|
4604
4640
|
object: () => object,
|
4605
4641
|
text: () => text
|
4606
4642
|
});
|
4607
|
-
var
|
4643
|
+
var import_provider_utils11 = require("@ai-sdk/provider-utils");
|
4608
4644
|
var import_ui_utils6 = require("@ai-sdk/ui-utils");
|
4609
4645
|
|
4610
4646
|
// errors/index.ts
|
@@ -4701,7 +4737,7 @@ var object = ({
|
|
4701
4737
|
}
|
4702
4738
|
},
|
4703
4739
|
parseOutput({ text: text2 }, context) {
|
4704
|
-
const parseResult = (0,
|
4740
|
+
const parseResult = (0, import_provider_utils11.safeParseJSON)({ text: text2 });
|
4705
4741
|
if (!parseResult.success) {
|
4706
4742
|
throw new NoObjectGeneratedError({
|
4707
4743
|
message: "No object generated: could not parse the response.",
|
@@ -4712,7 +4748,7 @@ var object = ({
|
|
4712
4748
|
finishReason: context.finishReason
|
4713
4749
|
});
|
4714
4750
|
}
|
4715
|
-
const validationResult = (0,
|
4751
|
+
const validationResult = (0, import_provider_utils11.safeValidateTypes)({
|
4716
4752
|
value: parseResult.value,
|
4717
4753
|
schema
|
4718
4754
|
});
|
@@ -4732,7 +4768,7 @@ var object = ({
|
|
4732
4768
|
};
|
4733
4769
|
|
4734
4770
|
// core/generate-text/smooth-stream.ts
|
4735
|
-
var
|
4771
|
+
var import_provider_utils12 = require("@ai-sdk/provider-utils");
|
4736
4772
|
var import_provider21 = require("@ai-sdk/provider");
|
4737
4773
|
var CHUNKING_REGEXPS = {
|
4738
4774
|
word: /\S+\s+/m,
|
@@ -4741,7 +4777,7 @@ var CHUNKING_REGEXPS = {
|
|
4741
4777
|
function smoothStream({
|
4742
4778
|
delayInMs = 10,
|
4743
4779
|
chunking = "word",
|
4744
|
-
_internal: { delay: delay2 =
|
4780
|
+
_internal: { delay: delay2 = import_provider_utils12.delay } = {}
|
4745
4781
|
} = {}) {
|
4746
4782
|
let detectChunk;
|
4747
4783
|
if (typeof chunking === "function") {
|
@@ -4802,7 +4838,7 @@ function smoothStream({
|
|
4802
4838
|
|
4803
4839
|
// core/generate-text/stream-text.ts
|
4804
4840
|
var import_provider22 = require("@ai-sdk/provider");
|
4805
|
-
var
|
4841
|
+
var import_provider_utils13 = require("@ai-sdk/provider-utils");
|
4806
4842
|
var import_ui_utils8 = require("@ai-sdk/ui-utils");
|
4807
4843
|
|
4808
4844
|
// util/as-array.ts
|
@@ -5121,11 +5157,11 @@ function runToolsTransformation({
|
|
5121
5157
|
}
|
5122
5158
|
|
5123
5159
|
// core/generate-text/stream-text.ts
|
5124
|
-
var originalGenerateId4 = (0,
|
5160
|
+
var originalGenerateId4 = (0, import_provider_utils13.createIdGenerator)({
|
5125
5161
|
prefix: "aitxt",
|
5126
5162
|
size: 24
|
5127
5163
|
});
|
5128
|
-
var originalGenerateMessageId2 = (0,
|
5164
|
+
var originalGenerateMessageId2 = (0, import_provider_utils13.createIdGenerator)({
|
5129
5165
|
prefix: "msg",
|
5130
5166
|
size: 24
|
5131
5167
|
});
|
@@ -7150,7 +7186,7 @@ function tool(tool2) {
|
|
7150
7186
|
}
|
7151
7187
|
|
7152
7188
|
// core/tool/mcp/mcp-sse-transport.ts
|
7153
|
-
var
|
7189
|
+
var import_provider_utils14 = require("@ai-sdk/provider-utils");
|
7154
7190
|
|
7155
7191
|
// core/tool/mcp/json-rpc-message.ts
|
7156
7192
|
var import_zod9 = require("zod");
|
@@ -7321,7 +7357,7 @@ var SseMCPTransport = class {
|
|
7321
7357
|
(_b = this.onerror) == null ? void 0 : _b.call(this, error);
|
7322
7358
|
return reject(error);
|
7323
7359
|
}
|
7324
|
-
const stream = response.body.pipeThrough(new TextDecoderStream()).pipeThrough((0,
|
7360
|
+
const stream = response.body.pipeThrough(new TextDecoderStream()).pipeThrough((0, import_provider_utils14.createEventSourceParserStream)());
|
7325
7361
|
const reader = stream.getReader();
|
7326
7362
|
const processEvents = async () => {
|
7327
7363
|
var _a18, _b2, _c2;
|
@@ -7733,7 +7769,7 @@ function cosineSimilarity(vector1, vector2, options) {
|
|
7733
7769
|
}
|
7734
7770
|
|
7735
7771
|
// core/util/simulate-readable-stream.ts
|
7736
|
-
var
|
7772
|
+
var import_provider_utils15 = require("@ai-sdk/provider-utils");
|
7737
7773
|
function simulateReadableStream({
|
7738
7774
|
chunks,
|
7739
7775
|
initialDelayInMs = 0,
|
@@ -7741,7 +7777,7 @@ function simulateReadableStream({
|
|
7741
7777
|
_internal
|
7742
7778
|
}) {
|
7743
7779
|
var _a17;
|
7744
|
-
const delay2 = (_a17 = _internal == null ? void 0 : _internal.delay) != null ? _a17 :
|
7780
|
+
const delay2 = (_a17 = _internal == null ? void 0 : _internal.delay) != null ? _a17 : import_provider_utils15.delay;
|
7745
7781
|
let index = 0;
|
7746
7782
|
return new ReadableStream({
|
7747
7783
|
async pull(controller) {
|
@@ -7965,11 +8001,11 @@ __export(llamaindex_adapter_exports, {
|
|
7965
8001
|
toDataStream: () => toDataStream2,
|
7966
8002
|
toDataStreamResponse: () => toDataStreamResponse2
|
7967
8003
|
});
|
7968
|
-
var
|
8004
|
+
var import_provider_utils17 = require("@ai-sdk/provider-utils");
|
7969
8005
|
var import_ui_utils14 = require("@ai-sdk/ui-utils");
|
7970
8006
|
function toDataStreamInternal2(stream, callbacks) {
|
7971
8007
|
const trimStart = trimStartOfStream();
|
7972
|
-
return (0,
|
8008
|
+
return (0, import_provider_utils17.convertAsyncIteratorToReadableStream)(stream[Symbol.asyncIterator]()).pipeThrough(
|
7973
8009
|
new TransformStream({
|
7974
8010
|
async transform(message, controller) {
|
7975
8011
|
controller.enqueue(trimStart(message.delta));
|