ai 5.0.0-canary.10 → 5.0.0-canary.12
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +18 -0
- package/dist/index.d.mts +19 -44
- package/dist/index.d.ts +19 -44
- package/dist/index.js +374 -640
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +341 -607
- package/dist/index.mjs.map +1 -1
- package/dist/internal/index.d.mts +7 -5
- package/dist/internal/index.d.ts +7 -5
- package/dist/internal/index.js +23 -15
- package/dist/internal/index.js.map +1 -1
- package/dist/internal/index.mjs +23 -15
- package/dist/internal/index.mjs.map +1 -1
- package/dist/mcp-stdio/index.js +3 -3
- package/dist/mcp-stdio/index.js.map +1 -1
- package/dist/mcp-stdio/index.mjs +3 -3
- package/dist/mcp-stdio/index.mjs.map +1 -1
- package/dist/test/index.d.mts +3 -7
- package/dist/test/index.d.ts +3 -7
- package/dist/test/index.js +3 -7
- package/dist/test/index.js.map +1 -1
- package/dist/test/index.mjs +3 -7
- package/dist/test/index.mjs.map +1 -1
- package/package.json +3 -3
package/dist/index.js
CHANGED
@@ -75,7 +75,7 @@ __export(ai_exports, {
|
|
75
75
|
cosineSimilarity: () => cosineSimilarity,
|
76
76
|
createDataStream: () => createDataStream,
|
77
77
|
createDataStreamResponse: () => createDataStreamResponse,
|
78
|
-
createIdGenerator: () =>
|
78
|
+
createIdGenerator: () => import_provider_utils22.createIdGenerator,
|
79
79
|
createProviderRegistry: () => createProviderRegistry,
|
80
80
|
customProvider: () => customProvider,
|
81
81
|
defaultSettingsMiddleware: () => defaultSettingsMiddleware,
|
@@ -91,7 +91,7 @@ __export(ai_exports, {
|
|
91
91
|
extractReasoningMiddleware: () => extractReasoningMiddleware,
|
92
92
|
fillMessageParts: () => fillMessageParts,
|
93
93
|
formatDataStreamPart: () => formatDataStreamPart,
|
94
|
-
generateId: () =>
|
94
|
+
generateId: () => import_provider_utils22.generateId,
|
95
95
|
generateObject: () => generateObject,
|
96
96
|
generateText: () => generateText,
|
97
97
|
getMessageParts: () => getMessageParts,
|
@@ -119,7 +119,7 @@ __export(ai_exports, {
|
|
119
119
|
module.exports = __toCommonJS(ai_exports);
|
120
120
|
|
121
121
|
// core/index.ts
|
122
|
-
var
|
122
|
+
var import_provider_utils22 = require("@ai-sdk/provider-utils");
|
123
123
|
|
124
124
|
// core/util/index.ts
|
125
125
|
var import_provider_utils5 = require("@ai-sdk/provider-utils");
|
@@ -2679,7 +2679,7 @@ var DefaultGenerateImageResult = class {
|
|
2679
2679
|
|
2680
2680
|
// core/generate-object/generate-object.ts
|
2681
2681
|
var import_provider13 = require("@ai-sdk/provider");
|
2682
|
-
var
|
2682
|
+
var import_provider_utils13 = require("@ai-sdk/provider-utils");
|
2683
2683
|
|
2684
2684
|
// errors/no-object-generated-error.ts
|
2685
2685
|
var import_provider5 = require("@ai-sdk/provider");
|
@@ -2709,6 +2709,17 @@ var NoObjectGeneratedError = class extends import_provider5.AISDKError {
|
|
2709
2709
|
};
|
2710
2710
|
_a4 = symbol4;
|
2711
2711
|
|
2712
|
+
// core/generate-text/extract-content-text.ts
|
2713
|
+
function extractContentText(content) {
|
2714
|
+
const parts = content.filter(
|
2715
|
+
(content2) => content2.type === "text"
|
2716
|
+
);
|
2717
|
+
if (parts.length === 0) {
|
2718
|
+
return void 0;
|
2719
|
+
}
|
2720
|
+
return parts.map((content2) => content2.text).join("");
|
2721
|
+
}
|
2722
|
+
|
2712
2723
|
// util/download-error.ts
|
2713
2724
|
var import_provider6 = require("@ai-sdk/provider");
|
2714
2725
|
var name5 = "AI_DownloadError";
|
@@ -2903,17 +2914,16 @@ var InvalidMessageRoleError = class extends import_provider9.AISDKError {
|
|
2903
2914
|
_a7 = symbol7;
|
2904
2915
|
|
2905
2916
|
// core/prompt/convert-to-language-model-prompt.ts
|
2917
|
+
var import_provider_utils10 = require("@ai-sdk/provider-utils");
|
2906
2918
|
async function convertToLanguageModelPrompt({
|
2907
2919
|
prompt,
|
2908
|
-
|
2909
|
-
modelSupportsUrl = () => false,
|
2920
|
+
supportedUrls,
|
2910
2921
|
downloadImplementation = download
|
2911
2922
|
}) {
|
2912
2923
|
const downloadedAssets = await downloadAssets(
|
2913
2924
|
prompt.messages,
|
2914
2925
|
downloadImplementation,
|
2915
|
-
|
2916
|
-
modelSupportsUrl
|
2926
|
+
supportedUrls
|
2917
2927
|
);
|
2918
2928
|
return [
|
2919
2929
|
...prompt.system != null ? [{ role: "system", content: prompt.system }] : [],
|
@@ -3032,19 +3042,29 @@ function convertToLanguageModelMessage(message, downloadedAssets) {
|
|
3032
3042
|
}
|
3033
3043
|
}
|
3034
3044
|
}
|
3035
|
-
async function downloadAssets(messages, downloadImplementation,
|
3045
|
+
async function downloadAssets(messages, downloadImplementation, supportedUrls) {
|
3036
3046
|
const urls = messages.filter((message) => message.role === "user").map((message) => message.content).filter(
|
3037
3047
|
(content) => Array.isArray(content)
|
3038
3048
|
).flat().filter(
|
3039
3049
|
(part) => part.type === "image" || part.type === "file"
|
3040
|
-
).
|
3041
|
-
|
3042
|
-
|
3043
|
-
|
3044
|
-
|
3045
|
-
|
3046
|
-
|
3047
|
-
|
3050
|
+
).map((part) => {
|
3051
|
+
var _a17, _b;
|
3052
|
+
const mediaType = (_b = (_a17 = part.mediaType) != null ? _a17 : part.mimeType) != null ? _b : part.type === "image" ? "image/*" : void 0;
|
3053
|
+
let data = part.type === "image" ? part.image : part.data;
|
3054
|
+
if (typeof data === "string") {
|
3055
|
+
try {
|
3056
|
+
data = new URL(data);
|
3057
|
+
} catch (ignored) {
|
3058
|
+
}
|
3059
|
+
}
|
3060
|
+
return { mediaType, data };
|
3061
|
+
}).filter(
|
3062
|
+
(part) => part.data instanceof URL && part.mediaType != null && !(0, import_provider_utils10.isUrlSupported)({
|
3063
|
+
url: part.data.toString(),
|
3064
|
+
mediaType: part.mediaType,
|
3065
|
+
supportedUrls
|
3066
|
+
})
|
3067
|
+
).map((part) => part.data);
|
3048
3068
|
const downloadedImages = await Promise.all(
|
3049
3069
|
urls.map(async (url) => ({
|
3050
3070
|
url,
|
@@ -3198,8 +3218,7 @@ function prepareCallSettings({
|
|
3198
3218
|
}
|
3199
3219
|
return {
|
3200
3220
|
maxOutputTokens,
|
3201
|
-
|
3202
|
-
temperature: temperature != null ? temperature : 0,
|
3221
|
+
temperature: temperature != null ? temperature : temperature === null ? void 0 : 0,
|
3203
3222
|
topP,
|
3204
3223
|
topK,
|
3205
3224
|
presencePenalty,
|
@@ -3211,7 +3230,7 @@ function prepareCallSettings({
|
|
3211
3230
|
|
3212
3231
|
// core/prompt/standardize-prompt.ts
|
3213
3232
|
var import_provider11 = require("@ai-sdk/provider");
|
3214
|
-
var
|
3233
|
+
var import_provider_utils11 = require("@ai-sdk/provider-utils");
|
3215
3234
|
var import_zod7 = require("zod");
|
3216
3235
|
|
3217
3236
|
// core/prompt/attachments-to-parts.ts
|
@@ -3779,7 +3798,7 @@ function standardizePrompt({
|
|
3779
3798
|
message: "messages must not be empty"
|
3780
3799
|
});
|
3781
3800
|
}
|
3782
|
-
const validationResult = (0,
|
3801
|
+
const validationResult = (0, import_provider_utils11.safeValidateTypes)({
|
3783
3802
|
value: messages,
|
3784
3803
|
schema: import_zod7.z.array(coreMessageSchema)
|
3785
3804
|
});
|
@@ -3818,29 +3837,9 @@ function addLanguageModelUsage(usage1, usage2) {
|
|
3818
3837
|
};
|
3819
3838
|
}
|
3820
3839
|
|
3821
|
-
// core/generate-object/inject-json-instruction.ts
|
3822
|
-
var DEFAULT_SCHEMA_PREFIX = "JSON schema:";
|
3823
|
-
var DEFAULT_SCHEMA_SUFFIX = "You MUST answer with a JSON object that matches the JSON schema above.";
|
3824
|
-
var DEFAULT_GENERIC_SUFFIX = "You MUST answer with JSON.";
|
3825
|
-
function injectJsonInstruction({
|
3826
|
-
prompt,
|
3827
|
-
schema,
|
3828
|
-
schemaPrefix = schema != null ? DEFAULT_SCHEMA_PREFIX : void 0,
|
3829
|
-
schemaSuffix = schema != null ? DEFAULT_SCHEMA_SUFFIX : DEFAULT_GENERIC_SUFFIX
|
3830
|
-
}) {
|
3831
|
-
return [
|
3832
|
-
prompt != null && prompt.length > 0 ? prompt : void 0,
|
3833
|
-
prompt != null && prompt.length > 0 ? "" : void 0,
|
3834
|
-
// add a newline if prompt is not null
|
3835
|
-
schemaPrefix,
|
3836
|
-
schema != null ? JSON.stringify(schema) : void 0,
|
3837
|
-
schemaSuffix
|
3838
|
-
].filter((line) => line != null).join("\n");
|
3839
|
-
}
|
3840
|
-
|
3841
3840
|
// core/generate-object/output-strategy.ts
|
3842
3841
|
var import_provider12 = require("@ai-sdk/provider");
|
3843
|
-
var
|
3842
|
+
var import_provider_utils12 = require("@ai-sdk/provider-utils");
|
3844
3843
|
|
3845
3844
|
// core/util/async-iterable-stream.ts
|
3846
3845
|
function createAsyncIterableStream(source) {
|
@@ -3896,7 +3895,7 @@ var objectOutputStrategy = (schema) => ({
|
|
3896
3895
|
};
|
3897
3896
|
},
|
3898
3897
|
validateFinalResult(value) {
|
3899
|
-
return (0,
|
3898
|
+
return (0, import_provider_utils12.safeValidateTypes)({ value, schema });
|
3900
3899
|
},
|
3901
3900
|
createElementStream() {
|
3902
3901
|
throw new import_provider12.UnsupportedFunctionalityError({
|
@@ -3935,7 +3934,7 @@ var arrayOutputStrategy = (schema) => {
|
|
3935
3934
|
const resultArray = [];
|
3936
3935
|
for (let i = 0; i < inputArray.length; i++) {
|
3937
3936
|
const element = inputArray[i];
|
3938
|
-
const result = (0,
|
3937
|
+
const result = (0, import_provider_utils12.safeValidateTypes)({ value: element, schema });
|
3939
3938
|
if (i === inputArray.length - 1 && !isFinalDelta) {
|
3940
3939
|
continue;
|
3941
3940
|
}
|
@@ -3976,7 +3975,7 @@ var arrayOutputStrategy = (schema) => {
|
|
3976
3975
|
}
|
3977
3976
|
const inputArray = value.elements;
|
3978
3977
|
for (const element of inputArray) {
|
3979
|
-
const result = (0,
|
3978
|
+
const result = (0, import_provider_utils12.safeValidateTypes)({ value: element, schema });
|
3980
3979
|
if (!result.success) {
|
3981
3980
|
return result;
|
3982
3981
|
}
|
@@ -4085,7 +4084,6 @@ function getOutputStrategy({
|
|
4085
4084
|
// core/generate-object/validate-object-generation-input.ts
|
4086
4085
|
function validateObjectGenerationInput({
|
4087
4086
|
output,
|
4088
|
-
mode,
|
4089
4087
|
schema,
|
4090
4088
|
schemaName,
|
4091
4089
|
schemaDescription,
|
@@ -4099,13 +4097,6 @@ function validateObjectGenerationInput({
|
|
4099
4097
|
});
|
4100
4098
|
}
|
4101
4099
|
if (output === "no-schema") {
|
4102
|
-
if (mode === "auto" || mode === "tool") {
|
4103
|
-
throw new InvalidArgumentError({
|
4104
|
-
parameter: "mode",
|
4105
|
-
value: mode,
|
4106
|
-
message: 'Mode must be "json" for no-schema output.'
|
4107
|
-
});
|
4108
|
-
}
|
4109
4100
|
if (schema != null) {
|
4110
4101
|
throw new InvalidArgumentError({
|
4111
4102
|
parameter: "schema",
|
@@ -4208,19 +4199,8 @@ function validateObjectGenerationInput({
|
|
4208
4199
|
}
|
4209
4200
|
}
|
4210
4201
|
|
4211
|
-
// core/generate-text/extract-content-text.ts
|
4212
|
-
function extractContentText(content) {
|
4213
|
-
const parts = content.filter(
|
4214
|
-
(content2) => content2.type === "text"
|
4215
|
-
);
|
4216
|
-
if (parts.length === 0) {
|
4217
|
-
return void 0;
|
4218
|
-
}
|
4219
|
-
return parts.map((content2) => content2.text).join("");
|
4220
|
-
}
|
4221
|
-
|
4222
4202
|
// core/generate-object/generate-object.ts
|
4223
|
-
var originalGenerateId = (0,
|
4203
|
+
var originalGenerateId = (0, import_provider_utils13.createIdGenerator)({ prefix: "aiobj", size: 24 });
|
4224
4204
|
async function generateObject({
|
4225
4205
|
model,
|
4226
4206
|
enum: enumValues,
|
@@ -4228,7 +4208,6 @@ async function generateObject({
|
|
4228
4208
|
schema: inputSchema,
|
4229
4209
|
schemaName,
|
4230
4210
|
schemaDescription,
|
4231
|
-
mode,
|
4232
4211
|
output = "object",
|
4233
4212
|
system,
|
4234
4213
|
prompt,
|
@@ -4247,7 +4226,6 @@ async function generateObject({
|
|
4247
4226
|
}) {
|
4248
4227
|
validateObjectGenerationInput({
|
4249
4228
|
output,
|
4250
|
-
mode,
|
4251
4229
|
schema: inputSchema,
|
4252
4230
|
schemaName,
|
4253
4231
|
schemaDescription,
|
@@ -4259,14 +4237,12 @@ async function generateObject({
|
|
4259
4237
|
schema: inputSchema,
|
4260
4238
|
enumValues
|
4261
4239
|
});
|
4262
|
-
|
4263
|
-
mode = "json";
|
4264
|
-
}
|
4240
|
+
const callSettings = prepareCallSettings(settings);
|
4265
4241
|
const baseTelemetryAttributes = getBaseTelemetryAttributes({
|
4266
4242
|
model,
|
4267
4243
|
telemetry,
|
4268
4244
|
headers,
|
4269
|
-
settings: { ...
|
4245
|
+
settings: { ...callSettings, maxRetries }
|
4270
4246
|
});
|
4271
4247
|
const tracer = getTracer(telemetry);
|
4272
4248
|
return recordSpan({
|
@@ -4286,267 +4262,122 @@ async function generateObject({
|
|
4286
4262
|
"ai.schema": outputStrategy.jsonSchema != null ? { input: () => JSON.stringify(outputStrategy.jsonSchema) } : void 0,
|
4287
4263
|
"ai.schema.name": schemaName,
|
4288
4264
|
"ai.schema.description": schemaDescription,
|
4289
|
-
"ai.settings.output": outputStrategy.type
|
4290
|
-
"ai.settings.mode": mode
|
4265
|
+
"ai.settings.output": outputStrategy.type
|
4291
4266
|
}
|
4292
4267
|
}),
|
4293
4268
|
tracer,
|
4294
4269
|
fn: async (span) => {
|
4295
|
-
var _a17
|
4296
|
-
if (mode === "auto" || mode == null) {
|
4297
|
-
mode = model.defaultObjectGenerationMode;
|
4298
|
-
}
|
4270
|
+
var _a17;
|
4299
4271
|
let result;
|
4300
4272
|
let finishReason;
|
4301
4273
|
let usage;
|
4302
4274
|
let warnings;
|
4303
4275
|
let response;
|
4304
4276
|
let request;
|
4305
|
-
let logprobs;
|
4306
4277
|
let resultProviderMetadata;
|
4307
|
-
|
4308
|
-
|
4309
|
-
|
4310
|
-
|
4311
|
-
|
4312
|
-
|
4313
|
-
|
4314
|
-
|
4315
|
-
|
4316
|
-
|
4317
|
-
|
4318
|
-
|
4319
|
-
|
4320
|
-
|
4321
|
-
|
4322
|
-
|
4323
|
-
|
4324
|
-
// support 'this' context
|
4325
|
-
});
|
4326
|
-
const generateResult = await retry(
|
4327
|
-
() => recordSpan({
|
4328
|
-
name: "ai.generateObject.doGenerate",
|
4329
|
-
attributes: selectTelemetryAttributes({
|
4330
|
-
telemetry,
|
4331
|
-
attributes: {
|
4332
|
-
...assembleOperationName({
|
4333
|
-
operationId: "ai.generateObject.doGenerate",
|
4334
|
-
telemetry
|
4335
|
-
}),
|
4336
|
-
...baseTelemetryAttributes,
|
4337
|
-
"ai.prompt.format": {
|
4338
|
-
input: () => standardizedPrompt.type
|
4339
|
-
},
|
4340
|
-
"ai.prompt.messages": {
|
4341
|
-
input: () => JSON.stringify(promptMessages)
|
4342
|
-
},
|
4343
|
-
"ai.settings.mode": mode,
|
4344
|
-
// standardized gen-ai llm span attributes:
|
4345
|
-
"gen_ai.system": model.provider,
|
4346
|
-
"gen_ai.request.model": model.modelId,
|
4347
|
-
"gen_ai.request.frequency_penalty": settings.frequencyPenalty,
|
4348
|
-
"gen_ai.request.max_tokens": settings.maxOutputTokens,
|
4349
|
-
"gen_ai.request.presence_penalty": settings.presencePenalty,
|
4350
|
-
"gen_ai.request.temperature": settings.temperature,
|
4351
|
-
"gen_ai.request.top_k": settings.topK,
|
4352
|
-
"gen_ai.request.top_p": settings.topP
|
4353
|
-
}
|
4278
|
+
const standardizedPrompt = standardizePrompt({
|
4279
|
+
prompt: { system, prompt, messages },
|
4280
|
+
tools: void 0
|
4281
|
+
});
|
4282
|
+
const promptMessages = await convertToLanguageModelPrompt({
|
4283
|
+
prompt: standardizedPrompt,
|
4284
|
+
supportedUrls: await model.getSupportedUrls()
|
4285
|
+
});
|
4286
|
+
const generateResult = await retry(
|
4287
|
+
() => recordSpan({
|
4288
|
+
name: "ai.generateObject.doGenerate",
|
4289
|
+
attributes: selectTelemetryAttributes({
|
4290
|
+
telemetry,
|
4291
|
+
attributes: {
|
4292
|
+
...assembleOperationName({
|
4293
|
+
operationId: "ai.generateObject.doGenerate",
|
4294
|
+
telemetry
|
4354
4295
|
}),
|
4355
|
-
|
4356
|
-
|
4357
|
-
|
4358
|
-
|
4359
|
-
|
4360
|
-
|
4361
|
-
|
4362
|
-
|
4363
|
-
|
4364
|
-
|
4365
|
-
|
4366
|
-
|
4367
|
-
|
4368
|
-
|
4369
|
-
|
4370
|
-
|
4371
|
-
|
4372
|
-
|
4373
|
-
|
4374
|
-
|
4375
|
-
|
4376
|
-
|
4377
|
-
|
4378
|
-
|
4379
|
-
|
4380
|
-
|
4381
|
-
|
4382
|
-
|
4383
|
-
|
4384
|
-
|
4385
|
-
|
4386
|
-
|
4387
|
-
|
4388
|
-
|
4389
|
-
|
4390
|
-
|
4391
|
-
|
4392
|
-
|
4393
|
-
|
4394
|
-
|
4395
|
-
|
4396
|
-
|
4397
|
-
|
4398
|
-
|
4399
|
-
|
4400
|
-
|
4401
|
-
|
4402
|
-
|
4403
|
-
|
4404
|
-
|
4405
|
-
|
4406
|
-
|
4407
|
-
|
4408
|
-
);
|
4409
|
-
return { ...result2, objectText: text2, responseData };
|
4410
|
-
}
|
4411
|
-
})
|
4412
|
-
);
|
4413
|
-
result = generateResult.objectText;
|
4414
|
-
finishReason = generateResult.finishReason;
|
4415
|
-
usage = generateResult.usage;
|
4416
|
-
warnings = generateResult.warnings;
|
4417
|
-
logprobs = generateResult.logprobs;
|
4418
|
-
resultProviderMetadata = generateResult.providerMetadata;
|
4419
|
-
request = (_b = generateResult.request) != null ? _b : {};
|
4420
|
-
response = generateResult.responseData;
|
4421
|
-
break;
|
4422
|
-
}
|
4423
|
-
case "tool": {
|
4424
|
-
const standardizedPrompt = standardizePrompt({
|
4425
|
-
prompt: { system, prompt, messages },
|
4426
|
-
tools: void 0
|
4427
|
-
});
|
4428
|
-
const promptMessages = await convertToLanguageModelPrompt({
|
4429
|
-
prompt: standardizedPrompt,
|
4430
|
-
modelSupportsImageUrls: model.supportsImageUrls,
|
4431
|
-
modelSupportsUrl: (_c = model.supportsUrl) == null ? void 0 : _c.bind(model)
|
4432
|
-
// support 'this' context,
|
4433
|
-
});
|
4434
|
-
const inputFormat = standardizedPrompt.type;
|
4435
|
-
const generateResult = await retry(
|
4436
|
-
() => recordSpan({
|
4437
|
-
name: "ai.generateObject.doGenerate",
|
4438
|
-
attributes: selectTelemetryAttributes({
|
4296
|
+
...baseTelemetryAttributes,
|
4297
|
+
"ai.prompt.format": {
|
4298
|
+
input: () => standardizedPrompt.type
|
4299
|
+
},
|
4300
|
+
"ai.prompt.messages": {
|
4301
|
+
input: () => JSON.stringify(promptMessages)
|
4302
|
+
},
|
4303
|
+
// standardized gen-ai llm span attributes:
|
4304
|
+
"gen_ai.system": model.provider,
|
4305
|
+
"gen_ai.request.model": model.modelId,
|
4306
|
+
"gen_ai.request.frequency_penalty": callSettings.frequencyPenalty,
|
4307
|
+
"gen_ai.request.max_tokens": callSettings.maxOutputTokens,
|
4308
|
+
"gen_ai.request.presence_penalty": callSettings.presencePenalty,
|
4309
|
+
"gen_ai.request.temperature": callSettings.temperature,
|
4310
|
+
"gen_ai.request.top_k": callSettings.topK,
|
4311
|
+
"gen_ai.request.top_p": callSettings.topP
|
4312
|
+
}
|
4313
|
+
}),
|
4314
|
+
tracer,
|
4315
|
+
fn: async (span2) => {
|
4316
|
+
var _a18, _b, _c, _d, _e, _f, _g, _h;
|
4317
|
+
const result2 = await model.doGenerate({
|
4318
|
+
responseFormat: {
|
4319
|
+
type: "json",
|
4320
|
+
schema: outputStrategy.jsonSchema,
|
4321
|
+
name: schemaName,
|
4322
|
+
description: schemaDescription
|
4323
|
+
},
|
4324
|
+
...prepareCallSettings(settings),
|
4325
|
+
inputFormat: standardizedPrompt.type,
|
4326
|
+
prompt: promptMessages,
|
4327
|
+
providerOptions,
|
4328
|
+
abortSignal,
|
4329
|
+
headers
|
4330
|
+
});
|
4331
|
+
const responseData = {
|
4332
|
+
id: (_b = (_a18 = result2.response) == null ? void 0 : _a18.id) != null ? _b : generateId3(),
|
4333
|
+
timestamp: (_d = (_c = result2.response) == null ? void 0 : _c.timestamp) != null ? _d : currentDate(),
|
4334
|
+
modelId: (_f = (_e = result2.response) == null ? void 0 : _e.modelId) != null ? _f : model.modelId,
|
4335
|
+
headers: (_g = result2.response) == null ? void 0 : _g.headers,
|
4336
|
+
body: (_h = result2.response) == null ? void 0 : _h.body
|
4337
|
+
};
|
4338
|
+
const text2 = extractContentText(result2.content);
|
4339
|
+
if (text2 === void 0) {
|
4340
|
+
throw new NoObjectGeneratedError({
|
4341
|
+
message: "No object generated: the model did not return a response.",
|
4342
|
+
response: responseData,
|
4343
|
+
usage: calculateLanguageModelUsage2(result2.usage),
|
4344
|
+
finishReason: result2.finishReason
|
4345
|
+
});
|
4346
|
+
}
|
4347
|
+
span2.setAttributes(
|
4348
|
+
selectTelemetryAttributes({
|
4439
4349
|
telemetry,
|
4440
4350
|
attributes: {
|
4441
|
-
|
4442
|
-
|
4443
|
-
|
4444
|
-
|
4445
|
-
|
4446
|
-
|
4447
|
-
|
4448
|
-
|
4449
|
-
"ai.prompt.messages": {
|
4450
|
-
input: () => JSON.stringify(promptMessages)
|
4451
|
-
},
|
4452
|
-
"ai.settings.mode": mode,
|
4351
|
+
"ai.response.finishReason": result2.finishReason,
|
4352
|
+
"ai.response.object": { output: () => text2 },
|
4353
|
+
"ai.response.id": responseData.id,
|
4354
|
+
"ai.response.model": responseData.modelId,
|
4355
|
+
"ai.response.timestamp": responseData.timestamp.toISOString(),
|
4356
|
+
// TODO rename telemetry attributes to inputTokens and outputTokens
|
4357
|
+
"ai.usage.promptTokens": result2.usage.inputTokens,
|
4358
|
+
"ai.usage.completionTokens": result2.usage.outputTokens,
|
4453
4359
|
// standardized gen-ai llm span attributes:
|
4454
|
-
"gen_ai.
|
4455
|
-
"gen_ai.
|
4456
|
-
"gen_ai.
|
4457
|
-
"gen_ai.
|
4458
|
-
"gen_ai.
|
4459
|
-
"gen_ai.request.temperature": settings.temperature,
|
4460
|
-
"gen_ai.request.top_k": settings.topK,
|
4461
|
-
"gen_ai.request.top_p": settings.topP
|
4360
|
+
"gen_ai.response.finish_reasons": [result2.finishReason],
|
4361
|
+
"gen_ai.response.id": responseData.id,
|
4362
|
+
"gen_ai.response.model": responseData.modelId,
|
4363
|
+
"gen_ai.usage.input_tokens": result2.usage.inputTokens,
|
4364
|
+
"gen_ai.usage.output_tokens": result2.usage.outputTokens
|
4462
4365
|
}
|
4463
|
-
})
|
4464
|
-
|
4465
|
-
|
4466
|
-
|
4467
|
-
|
4468
|
-
|
4469
|
-
|
4470
|
-
|
4471
|
-
|
4472
|
-
|
4473
|
-
|
4474
|
-
|
4475
|
-
|
4476
|
-
toolChoice: { type: "required" },
|
4477
|
-
...prepareCallSettings(settings),
|
4478
|
-
inputFormat,
|
4479
|
-
prompt: promptMessages,
|
4480
|
-
providerOptions,
|
4481
|
-
abortSignal,
|
4482
|
-
headers
|
4483
|
-
});
|
4484
|
-
const firstToolCall = result2.content.find(
|
4485
|
-
(content) => content.type === "tool-call"
|
4486
|
-
);
|
4487
|
-
const objectText = firstToolCall == null ? void 0 : firstToolCall.args;
|
4488
|
-
const responseData = {
|
4489
|
-
id: (_b2 = (_a18 = result2.response) == null ? void 0 : _a18.id) != null ? _b2 : generateId3(),
|
4490
|
-
timestamp: (_d2 = (_c2 = result2.response) == null ? void 0 : _c2.timestamp) != null ? _d2 : currentDate(),
|
4491
|
-
modelId: (_f = (_e = result2.response) == null ? void 0 : _e.modelId) != null ? _f : model.modelId,
|
4492
|
-
headers: (_g = result2.response) == null ? void 0 : _g.headers,
|
4493
|
-
body: (_h = result2.response) == null ? void 0 : _h.body
|
4494
|
-
};
|
4495
|
-
if (objectText === void 0) {
|
4496
|
-
throw new NoObjectGeneratedError({
|
4497
|
-
message: "No object generated: the tool was not called.",
|
4498
|
-
response: responseData,
|
4499
|
-
usage: calculateLanguageModelUsage2(result2.usage),
|
4500
|
-
finishReason: result2.finishReason
|
4501
|
-
});
|
4502
|
-
}
|
4503
|
-
span2.setAttributes(
|
4504
|
-
selectTelemetryAttributes({
|
4505
|
-
telemetry,
|
4506
|
-
attributes: {
|
4507
|
-
"ai.response.finishReason": result2.finishReason,
|
4508
|
-
"ai.response.object": { output: () => objectText },
|
4509
|
-
"ai.response.id": responseData.id,
|
4510
|
-
"ai.response.model": responseData.modelId,
|
4511
|
-
"ai.response.timestamp": responseData.timestamp.toISOString(),
|
4512
|
-
// TODO rename telemetry attributes to inputTokens and outputTokens
|
4513
|
-
"ai.usage.promptTokens": result2.usage.inputTokens,
|
4514
|
-
"ai.usage.completionTokens": result2.usage.outputTokens,
|
4515
|
-
// standardized gen-ai llm span attributes:
|
4516
|
-
"gen_ai.response.finish_reasons": [result2.finishReason],
|
4517
|
-
"gen_ai.response.id": responseData.id,
|
4518
|
-
"gen_ai.response.model": responseData.modelId,
|
4519
|
-
"gen_ai.usage.input_tokens": result2.usage.inputTokens,
|
4520
|
-
"gen_ai.usage.output_tokens": result2.usage.outputTokens
|
4521
|
-
}
|
4522
|
-
})
|
4523
|
-
);
|
4524
|
-
return { ...result2, objectText, responseData };
|
4525
|
-
}
|
4526
|
-
})
|
4527
|
-
);
|
4528
|
-
result = generateResult.objectText;
|
4529
|
-
finishReason = generateResult.finishReason;
|
4530
|
-
usage = generateResult.usage;
|
4531
|
-
warnings = generateResult.warnings;
|
4532
|
-
logprobs = generateResult.logprobs;
|
4533
|
-
resultProviderMetadata = generateResult.providerMetadata;
|
4534
|
-
request = (_d = generateResult.request) != null ? _d : {};
|
4535
|
-
response = generateResult.responseData;
|
4536
|
-
break;
|
4537
|
-
}
|
4538
|
-
case void 0: {
|
4539
|
-
throw new Error(
|
4540
|
-
"Model does not have a default object generation mode."
|
4541
|
-
);
|
4542
|
-
}
|
4543
|
-
default: {
|
4544
|
-
const _exhaustiveCheck = mode;
|
4545
|
-
throw new Error(`Unsupported mode: ${_exhaustiveCheck}`);
|
4546
|
-
}
|
4547
|
-
}
|
4366
|
+
})
|
4367
|
+
);
|
4368
|
+
return { ...result2, objectText: text2, responseData };
|
4369
|
+
}
|
4370
|
+
})
|
4371
|
+
);
|
4372
|
+
result = generateResult.objectText;
|
4373
|
+
finishReason = generateResult.finishReason;
|
4374
|
+
usage = generateResult.usage;
|
4375
|
+
warnings = generateResult.warnings;
|
4376
|
+
resultProviderMetadata = generateResult.providerMetadata;
|
4377
|
+
request = (_a17 = generateResult.request) != null ? _a17 : {};
|
4378
|
+
response = generateResult.responseData;
|
4548
4379
|
function processResult(result2) {
|
4549
|
-
const parseResult = (0,
|
4380
|
+
const parseResult = (0, import_provider_utils13.safeParseJSON)({ text: result2 });
|
4550
4381
|
if (!parseResult.success) {
|
4551
4382
|
throw new NoObjectGeneratedError({
|
4552
4383
|
message: "No object generated: could not parse the response.",
|
@@ -4615,7 +4446,6 @@ async function generateObject({
|
|
4615
4446
|
warnings,
|
4616
4447
|
request,
|
4617
4448
|
response,
|
4618
|
-
logprobs,
|
4619
4449
|
providerMetadata: resultProviderMetadata
|
4620
4450
|
});
|
4621
4451
|
}
|
@@ -4630,7 +4460,6 @@ var DefaultGenerateObjectResult = class {
|
|
4630
4460
|
this.providerMetadata = options.providerMetadata;
|
4631
4461
|
this.response = options.response;
|
4632
4462
|
this.request = options.request;
|
4633
|
-
this.logprobs = options.logprobs;
|
4634
4463
|
}
|
4635
4464
|
toJsonResponse(init) {
|
4636
4465
|
var _a17;
|
@@ -4644,7 +4473,7 @@ var DefaultGenerateObjectResult = class {
|
|
4644
4473
|
};
|
4645
4474
|
|
4646
4475
|
// core/generate-object/stream-object.ts
|
4647
|
-
var
|
4476
|
+
var import_provider_utils14 = require("@ai-sdk/provider-utils");
|
4648
4477
|
|
4649
4478
|
// util/delayed-promise.ts
|
4650
4479
|
var DelayedPromise = class {
|
@@ -4788,13 +4617,12 @@ function now() {
|
|
4788
4617
|
}
|
4789
4618
|
|
4790
4619
|
// core/generate-object/stream-object.ts
|
4791
|
-
var originalGenerateId2 = (0,
|
4620
|
+
var originalGenerateId2 = (0, import_provider_utils14.createIdGenerator)({ prefix: "aiobj", size: 24 });
|
4792
4621
|
function streamObject({
|
4793
4622
|
model,
|
4794
4623
|
schema: inputSchema,
|
4795
4624
|
schemaName,
|
4796
4625
|
schemaDescription,
|
4797
|
-
mode,
|
4798
4626
|
output = "object",
|
4799
4627
|
system,
|
4800
4628
|
prompt,
|
@@ -4815,15 +4643,11 @@ function streamObject({
|
|
4815
4643
|
}) {
|
4816
4644
|
validateObjectGenerationInput({
|
4817
4645
|
output,
|
4818
|
-
mode,
|
4819
4646
|
schema: inputSchema,
|
4820
4647
|
schemaName,
|
4821
4648
|
schemaDescription
|
4822
4649
|
});
|
4823
4650
|
const outputStrategy = getOutputStrategy({ output, schema: inputSchema });
|
4824
|
-
if (outputStrategy.type === "no-schema" && mode === void 0) {
|
4825
|
-
mode = "json";
|
4826
|
-
}
|
4827
4651
|
return new DefaultStreamObjectResult({
|
4828
4652
|
model,
|
4829
4653
|
telemetry,
|
@@ -4838,7 +4662,6 @@ function streamObject({
|
|
4838
4662
|
schemaName,
|
4839
4663
|
schemaDescription,
|
4840
4664
|
providerOptions,
|
4841
|
-
mode,
|
4842
4665
|
onError,
|
4843
4666
|
onFinish,
|
4844
4667
|
generateId: generateId3,
|
@@ -4861,7 +4684,6 @@ var DefaultStreamObjectResult = class {
|
|
4861
4684
|
schemaName,
|
4862
4685
|
schemaDescription,
|
4863
4686
|
providerOptions,
|
4864
|
-
mode,
|
4865
4687
|
onError,
|
4866
4688
|
onFinish,
|
4867
4689
|
generateId: generateId3,
|
@@ -4877,11 +4699,12 @@ var DefaultStreamObjectResult = class {
|
|
4877
4699
|
const { maxRetries, retry } = prepareRetries({
|
4878
4700
|
maxRetries: maxRetriesArg
|
4879
4701
|
});
|
4702
|
+
const callSettings = prepareCallSettings(settings);
|
4880
4703
|
const baseTelemetryAttributes = getBaseTelemetryAttributes({
|
4881
4704
|
model,
|
4882
4705
|
telemetry,
|
4883
4706
|
headers,
|
4884
|
-
settings: { ...
|
4707
|
+
settings: { ...callSettings, maxRetries }
|
4885
4708
|
});
|
4886
4709
|
const tracer = getTracer(telemetry);
|
4887
4710
|
const self = this;
|
@@ -4912,120 +4735,47 @@ var DefaultStreamObjectResult = class {
|
|
4912
4735
|
"ai.schema": outputStrategy.jsonSchema != null ? { input: () => JSON.stringify(outputStrategy.jsonSchema) } : void 0,
|
4913
4736
|
"ai.schema.name": schemaName,
|
4914
4737
|
"ai.schema.description": schemaDescription,
|
4915
|
-
"ai.settings.output": outputStrategy.type
|
4916
|
-
"ai.settings.mode": mode
|
4738
|
+
"ai.settings.output": outputStrategy.type
|
4917
4739
|
}
|
4918
4740
|
}),
|
4919
4741
|
tracer,
|
4920
4742
|
endWhenDone: false,
|
4921
4743
|
fn: async (rootSpan) => {
|
4922
|
-
|
4923
|
-
|
4924
|
-
|
4925
|
-
}
|
4926
|
-
|
4927
|
-
|
4928
|
-
|
4929
|
-
|
4930
|
-
|
4931
|
-
|
4932
|
-
|
4933
|
-
|
4934
|
-
|
4935
|
-
|
4936
|
-
|
4937
|
-
|
4938
|
-
|
4939
|
-
|
4940
|
-
|
4941
|
-
|
4942
|
-
|
4943
|
-
|
4944
|
-
|
4945
|
-
|
4946
|
-
|
4947
|
-
|
4948
|
-
|
4949
|
-
|
4950
|
-
|
4951
|
-
|
4952
|
-
|
4953
|
-
|
4954
|
-
|
4955
|
-
}),
|
4956
|
-
providerOptions,
|
4957
|
-
abortSignal,
|
4958
|
-
headers
|
4959
|
-
};
|
4960
|
-
transformer = {
|
4961
|
-
transform: (chunk, controller) => {
|
4962
|
-
switch (chunk.type) {
|
4963
|
-
case "text":
|
4964
|
-
controller.enqueue(chunk.text);
|
4965
|
-
break;
|
4966
|
-
case "response-metadata":
|
4967
|
-
case "finish":
|
4968
|
-
case "error":
|
4969
|
-
controller.enqueue(chunk);
|
4970
|
-
break;
|
4971
|
-
}
|
4972
|
-
}
|
4973
|
-
};
|
4974
|
-
break;
|
4975
|
-
}
|
4976
|
-
case "tool": {
|
4977
|
-
const standardizedPrompt = standardizePrompt({
|
4978
|
-
prompt: { system, prompt, messages },
|
4979
|
-
tools: void 0
|
4980
|
-
});
|
4981
|
-
callOptions = {
|
4982
|
-
tools: [
|
4983
|
-
{
|
4984
|
-
type: "function",
|
4985
|
-
name: schemaName != null ? schemaName : "json",
|
4986
|
-
description: schemaDescription != null ? schemaDescription : "Respond with a JSON object.",
|
4987
|
-
parameters: outputStrategy.jsonSchema
|
4988
|
-
}
|
4989
|
-
],
|
4990
|
-
toolChoice: { type: "required" },
|
4991
|
-
...prepareCallSettings(settings),
|
4992
|
-
inputFormat: standardizedPrompt.type,
|
4993
|
-
prompt: await convertToLanguageModelPrompt({
|
4994
|
-
prompt: standardizedPrompt,
|
4995
|
-
modelSupportsImageUrls: model.supportsImageUrls,
|
4996
|
-
modelSupportsUrl: (_b = model.supportsUrl) == null ? void 0 : _b.bind(model)
|
4997
|
-
// support 'this' context,
|
4998
|
-
}),
|
4999
|
-
providerOptions,
|
5000
|
-
abortSignal,
|
5001
|
-
headers
|
5002
|
-
};
|
5003
|
-
transformer = {
|
5004
|
-
transform(chunk, controller) {
|
5005
|
-
switch (chunk.type) {
|
5006
|
-
case "tool-call-delta":
|
5007
|
-
controller.enqueue(chunk.argsTextDelta);
|
5008
|
-
break;
|
5009
|
-
case "response-metadata":
|
5010
|
-
case "finish":
|
5011
|
-
case "error":
|
5012
|
-
controller.enqueue(chunk);
|
5013
|
-
break;
|
5014
|
-
}
|
5015
|
-
}
|
5016
|
-
};
|
5017
|
-
break;
|
5018
|
-
}
|
5019
|
-
case void 0: {
|
5020
|
-
throw new Error(
|
5021
|
-
"Model does not have a default object generation mode."
|
5022
|
-
);
|
5023
|
-
}
|
5024
|
-
default: {
|
5025
|
-
const _exhaustiveCheck = mode;
|
5026
|
-
throw new Error(`Unsupported mode: ${_exhaustiveCheck}`);
|
4744
|
+
const standardizedPrompt = standardizePrompt({
|
4745
|
+
prompt: { system, prompt, messages },
|
4746
|
+
tools: void 0
|
4747
|
+
});
|
4748
|
+
const callOptions = {
|
4749
|
+
responseFormat: {
|
4750
|
+
type: "json",
|
4751
|
+
schema: outputStrategy.jsonSchema,
|
4752
|
+
name: schemaName,
|
4753
|
+
description: schemaDescription
|
4754
|
+
},
|
4755
|
+
...prepareCallSettings(settings),
|
4756
|
+
inputFormat: standardizedPrompt.type,
|
4757
|
+
prompt: await convertToLanguageModelPrompt({
|
4758
|
+
prompt: standardizedPrompt,
|
4759
|
+
supportedUrls: await model.getSupportedUrls()
|
4760
|
+
}),
|
4761
|
+
providerOptions,
|
4762
|
+
abortSignal,
|
4763
|
+
headers
|
4764
|
+
};
|
4765
|
+
const transformer = {
|
4766
|
+
transform: (chunk, controller) => {
|
4767
|
+
switch (chunk.type) {
|
4768
|
+
case "text":
|
4769
|
+
controller.enqueue(chunk.text);
|
4770
|
+
break;
|
4771
|
+
case "response-metadata":
|
4772
|
+
case "finish":
|
4773
|
+
case "error":
|
4774
|
+
controller.enqueue(chunk);
|
4775
|
+
break;
|
4776
|
+
}
|
5027
4777
|
}
|
5028
|
-
}
|
4778
|
+
};
|
5029
4779
|
const {
|
5030
4780
|
result: { stream, response, request },
|
5031
4781
|
doStreamSpan,
|
@@ -5047,16 +4797,15 @@ var DefaultStreamObjectResult = class {
|
|
5047
4797
|
"ai.prompt.messages": {
|
5048
4798
|
input: () => JSON.stringify(callOptions.prompt)
|
5049
4799
|
},
|
5050
|
-
"ai.settings.mode": mode,
|
5051
4800
|
// standardized gen-ai llm span attributes:
|
5052
4801
|
"gen_ai.system": model.provider,
|
5053
4802
|
"gen_ai.request.model": model.modelId,
|
5054
|
-
"gen_ai.request.frequency_penalty":
|
5055
|
-
"gen_ai.request.max_tokens":
|
5056
|
-
"gen_ai.request.presence_penalty":
|
5057
|
-
"gen_ai.request.temperature":
|
5058
|
-
"gen_ai.request.top_k":
|
5059
|
-
"gen_ai.request.top_p":
|
4803
|
+
"gen_ai.request.frequency_penalty": callSettings.frequencyPenalty,
|
4804
|
+
"gen_ai.request.max_tokens": callSettings.maxOutputTokens,
|
4805
|
+
"gen_ai.request.presence_penalty": callSettings.presencePenalty,
|
4806
|
+
"gen_ai.request.temperature": callSettings.temperature,
|
4807
|
+
"gen_ai.request.top_k": callSettings.topK,
|
4808
|
+
"gen_ai.request.top_p": callSettings.topP
|
5060
4809
|
}
|
5061
4810
|
}),
|
5062
4811
|
tracer,
|
@@ -5089,7 +4838,7 @@ var DefaultStreamObjectResult = class {
|
|
5089
4838
|
const transformedStream = stream.pipeThrough(new TransformStream(transformer)).pipeThrough(
|
5090
4839
|
new TransformStream({
|
5091
4840
|
async transform(chunk, controller) {
|
5092
|
-
var
|
4841
|
+
var _a17, _b, _c;
|
5093
4842
|
if (typeof chunk === "object" && chunk.type === "stream-start") {
|
5094
4843
|
warnings = chunk.warnings;
|
5095
4844
|
return;
|
@@ -5139,8 +4888,8 @@ var DefaultStreamObjectResult = class {
|
|
5139
4888
|
switch (chunk.type) {
|
5140
4889
|
case "response-metadata": {
|
5141
4890
|
fullResponse = {
|
5142
|
-
id: (
|
5143
|
-
timestamp: (
|
4891
|
+
id: (_a17 = chunk.id) != null ? _a17 : fullResponse.id,
|
4892
|
+
timestamp: (_b = chunk.timestamp) != null ? _b : fullResponse.timestamp,
|
5144
4893
|
modelId: (_c = chunk.modelId) != null ? _c : fullResponse.modelId
|
5145
4894
|
};
|
5146
4895
|
break;
|
@@ -5364,7 +5113,7 @@ var DefaultStreamObjectResult = class {
|
|
5364
5113
|
};
|
5365
5114
|
|
5366
5115
|
// core/generate-text/generate-text.ts
|
5367
|
-
var
|
5116
|
+
var import_provider_utils16 = require("@ai-sdk/provider-utils");
|
5368
5117
|
|
5369
5118
|
// errors/no-output-specified-error.ts
|
5370
5119
|
var import_provider14 = require("@ai-sdk/provider");
|
@@ -5473,7 +5222,7 @@ function removeTextAfterLastWhitespace(text2) {
|
|
5473
5222
|
}
|
5474
5223
|
|
5475
5224
|
// core/generate-text/parse-tool-call.ts
|
5476
|
-
var
|
5225
|
+
var import_provider_utils15 = require("@ai-sdk/provider-utils");
|
5477
5226
|
|
5478
5227
|
// errors/invalid-tool-arguments-error.ts
|
5479
5228
|
var import_provider16 = require("@ai-sdk/provider");
|
@@ -5601,7 +5350,7 @@ async function doParseToolCall({
|
|
5601
5350
|
});
|
5602
5351
|
}
|
5603
5352
|
const schema = asSchema(tool2.parameters);
|
5604
|
-
const parseResult = toolCall.args.trim() === "" ? (0,
|
5353
|
+
const parseResult = toolCall.args.trim() === "" ? (0, import_provider_utils15.safeValidateTypes)({ value: {}, schema }) : (0, import_provider_utils15.safeParseJSON)({ text: toolCall.args, schema });
|
5605
5354
|
if (parseResult.success === false) {
|
5606
5355
|
throw new InvalidToolArgumentsError({
|
5607
5356
|
toolName,
|
@@ -5692,11 +5441,11 @@ function toResponseMessages({
|
|
5692
5441
|
}
|
5693
5442
|
|
5694
5443
|
// core/generate-text/generate-text.ts
|
5695
|
-
var originalGenerateId3 = (0,
|
5444
|
+
var originalGenerateId3 = (0, import_provider_utils16.createIdGenerator)({
|
5696
5445
|
prefix: "aitxt",
|
5697
5446
|
size: 24
|
5698
5447
|
});
|
5699
|
-
var originalGenerateMessageId = (0,
|
5448
|
+
var originalGenerateMessageId = (0, import_provider_utils16.createIdGenerator)({
|
5700
5449
|
prefix: "msg",
|
5701
5450
|
size: 24
|
5702
5451
|
});
|
@@ -5725,7 +5474,6 @@ async function generateText({
|
|
5725
5474
|
onStepFinish,
|
5726
5475
|
...settings
|
5727
5476
|
}) {
|
5728
|
-
var _a17;
|
5729
5477
|
if (maxSteps < 1) {
|
5730
5478
|
throw new InvalidArgumentError({
|
5731
5479
|
parameter: "maxSteps",
|
@@ -5734,18 +5482,15 @@ async function generateText({
|
|
5734
5482
|
});
|
5735
5483
|
}
|
5736
5484
|
const { maxRetries, retry } = prepareRetries({ maxRetries: maxRetriesArg });
|
5485
|
+
const callSettings = prepareCallSettings(settings);
|
5737
5486
|
const baseTelemetryAttributes = getBaseTelemetryAttributes({
|
5738
5487
|
model,
|
5739
5488
|
telemetry,
|
5740
5489
|
headers,
|
5741
|
-
settings: { ...
|
5490
|
+
settings: { ...callSettings, maxRetries }
|
5742
5491
|
});
|
5743
5492
|
const initialPrompt = standardizePrompt({
|
5744
|
-
prompt: {
|
5745
|
-
system: (_a17 = output == null ? void 0 : output.injectIntoSystemPrompt({ system, model })) != null ? _a17 : system,
|
5746
|
-
prompt,
|
5747
|
-
messages
|
5748
|
-
},
|
5493
|
+
prompt: { system, prompt, messages },
|
5749
5494
|
tools
|
5750
5495
|
});
|
5751
5496
|
const tracer = getTracer(telemetry);
|
@@ -5768,11 +5513,10 @@ async function generateText({
|
|
5768
5513
|
}),
|
5769
5514
|
tracer,
|
5770
5515
|
fn: async (span) => {
|
5771
|
-
var
|
5516
|
+
var _a17, _b, _c;
|
5772
5517
|
const toolsAndToolChoice = {
|
5773
5518
|
...prepareToolsAndToolChoice({ tools, toolChoice, activeTools })
|
5774
5519
|
};
|
5775
|
-
const callSettings = prepareCallSettings(settings);
|
5776
5520
|
let currentModelResponse;
|
5777
5521
|
let currentToolCalls = [];
|
5778
5522
|
let currentToolResults = [];
|
@@ -5800,99 +5544,100 @@ async function generateText({
|
|
5800
5544
|
system: initialPrompt.system,
|
5801
5545
|
messages: stepInputMessages
|
5802
5546
|
},
|
5803
|
-
|
5804
|
-
modelSupportsUrl: (_a18 = model.supportsUrl) == null ? void 0 : _a18.bind(model)
|
5805
|
-
// support 'this' context
|
5547
|
+
supportedUrls: await model.getSupportedUrls()
|
5806
5548
|
});
|
5807
5549
|
currentModelResponse = await retry(
|
5808
|
-
() =>
|
5809
|
-
|
5810
|
-
|
5811
|
-
|
5812
|
-
attributes: {
|
5813
|
-
|
5814
|
-
|
5815
|
-
|
5816
|
-
|
5817
|
-
|
5818
|
-
|
5819
|
-
|
5820
|
-
input: () =>
|
5821
|
-
|
5822
|
-
|
5823
|
-
|
5824
|
-
|
5825
|
-
|
5826
|
-
|
5827
|
-
|
5828
|
-
|
5829
|
-
|
5830
|
-
|
5831
|
-
|
5832
|
-
|
5833
|
-
|
5834
|
-
|
5835
|
-
|
5836
|
-
|
5837
|
-
|
5838
|
-
|
5839
|
-
|
5840
|
-
|
5841
|
-
|
5550
|
+
() => {
|
5551
|
+
var _a18;
|
5552
|
+
return recordSpan({
|
5553
|
+
name: "ai.generateText.doGenerate",
|
5554
|
+
attributes: selectTelemetryAttributes({
|
5555
|
+
telemetry,
|
5556
|
+
attributes: {
|
5557
|
+
...assembleOperationName({
|
5558
|
+
operationId: "ai.generateText.doGenerate",
|
5559
|
+
telemetry
|
5560
|
+
}),
|
5561
|
+
...baseTelemetryAttributes,
|
5562
|
+
"ai.prompt.format": { input: () => promptFormat },
|
5563
|
+
"ai.prompt.messages": {
|
5564
|
+
input: () => JSON.stringify(promptMessages)
|
5565
|
+
},
|
5566
|
+
"ai.prompt.tools": {
|
5567
|
+
// convert the language model level tools:
|
5568
|
+
input: () => {
|
5569
|
+
var _a19;
|
5570
|
+
return (_a19 = toolsAndToolChoice.tools) == null ? void 0 : _a19.map((tool2) => JSON.stringify(tool2));
|
5571
|
+
}
|
5572
|
+
},
|
5573
|
+
"ai.prompt.toolChoice": {
|
5574
|
+
input: () => toolsAndToolChoice.toolChoice != null ? JSON.stringify(toolsAndToolChoice.toolChoice) : void 0
|
5575
|
+
},
|
5576
|
+
// standardized gen-ai llm span attributes:
|
5577
|
+
"gen_ai.system": model.provider,
|
5578
|
+
"gen_ai.request.model": model.modelId,
|
5579
|
+
"gen_ai.request.frequency_penalty": settings.frequencyPenalty,
|
5580
|
+
"gen_ai.request.max_tokens": settings.maxOutputTokens,
|
5581
|
+
"gen_ai.request.presence_penalty": settings.presencePenalty,
|
5582
|
+
"gen_ai.request.stop_sequences": settings.stopSequences,
|
5583
|
+
"gen_ai.request.temperature": (_a18 = settings.temperature) != null ? _a18 : void 0,
|
5584
|
+
"gen_ai.request.top_k": settings.topK,
|
5585
|
+
"gen_ai.request.top_p": settings.topP
|
5586
|
+
}
|
5587
|
+
}),
|
5588
|
+
tracer,
|
5589
|
+
fn: async (span2) => {
|
5590
|
+
var _a19, _b2, _c2, _d, _e, _f, _g, _h;
|
5591
|
+
const result = await model.doGenerate({
|
5592
|
+
...callSettings,
|
5593
|
+
...toolsAndToolChoice,
|
5594
|
+
inputFormat: promptFormat,
|
5595
|
+
responseFormat: output == null ? void 0 : output.responseFormat,
|
5596
|
+
prompt: promptMessages,
|
5597
|
+
providerOptions,
|
5598
|
+
abortSignal,
|
5599
|
+
headers
|
5600
|
+
});
|
5601
|
+
const responseData = {
|
5602
|
+
id: (_b2 = (_a19 = result.response) == null ? void 0 : _a19.id) != null ? _b2 : generateId3(),
|
5603
|
+
timestamp: (_d = (_c2 = result.response) == null ? void 0 : _c2.timestamp) != null ? _d : currentDate(),
|
5604
|
+
modelId: (_f = (_e = result.response) == null ? void 0 : _e.modelId) != null ? _f : model.modelId,
|
5605
|
+
headers: (_g = result.response) == null ? void 0 : _g.headers,
|
5606
|
+
body: (_h = result.response) == null ? void 0 : _h.body
|
5607
|
+
};
|
5608
|
+
span2.setAttributes(
|
5609
|
+
selectTelemetryAttributes({
|
5610
|
+
telemetry,
|
5611
|
+
attributes: {
|
5612
|
+
"ai.response.finishReason": result.finishReason,
|
5613
|
+
"ai.response.text": {
|
5614
|
+
output: () => extractContentText(result.content)
|
5615
|
+
},
|
5616
|
+
"ai.response.toolCalls": {
|
5617
|
+
output: () => {
|
5618
|
+
const toolCalls = asToolCalls(result.content);
|
5619
|
+
return toolCalls == null ? void 0 : JSON.stringify(toolCalls);
|
5620
|
+
}
|
5621
|
+
},
|
5622
|
+
"ai.response.id": responseData.id,
|
5623
|
+
"ai.response.model": responseData.modelId,
|
5624
|
+
"ai.response.timestamp": responseData.timestamp.toISOString(),
|
5625
|
+
// TODO rename telemetry attributes to inputTokens and outputTokens
|
5626
|
+
"ai.usage.promptTokens": result.usage.inputTokens,
|
5627
|
+
"ai.usage.completionTokens": result.usage.outputTokens,
|
5628
|
+
// standardized gen-ai llm span attributes:
|
5629
|
+
"gen_ai.response.finish_reasons": [result.finishReason],
|
5630
|
+
"gen_ai.response.id": responseData.id,
|
5631
|
+
"gen_ai.response.model": responseData.modelId,
|
5632
|
+
"gen_ai.usage.input_tokens": result.usage.inputTokens,
|
5633
|
+
"gen_ai.usage.output_tokens": result.usage.outputTokens
|
5634
|
+
}
|
5635
|
+
})
|
5636
|
+
);
|
5637
|
+
return { ...result, response: responseData };
|
5842
5638
|
}
|
5843
|
-
})
|
5844
|
-
|
5845
|
-
fn: async (span2) => {
|
5846
|
-
var _a19, _b2, _c2, _d2, _e, _f, _g, _h;
|
5847
|
-
const result = await model.doGenerate({
|
5848
|
-
...callSettings,
|
5849
|
-
...toolsAndToolChoice,
|
5850
|
-
inputFormat: promptFormat,
|
5851
|
-
responseFormat: output == null ? void 0 : output.responseFormat({ model }),
|
5852
|
-
prompt: promptMessages,
|
5853
|
-
providerOptions,
|
5854
|
-
abortSignal,
|
5855
|
-
headers
|
5856
|
-
});
|
5857
|
-
const responseData = {
|
5858
|
-
id: (_b2 = (_a19 = result.response) == null ? void 0 : _a19.id) != null ? _b2 : generateId3(),
|
5859
|
-
timestamp: (_d2 = (_c2 = result.response) == null ? void 0 : _c2.timestamp) != null ? _d2 : currentDate(),
|
5860
|
-
modelId: (_f = (_e = result.response) == null ? void 0 : _e.modelId) != null ? _f : model.modelId,
|
5861
|
-
headers: (_g = result.response) == null ? void 0 : _g.headers,
|
5862
|
-
body: (_h = result.response) == null ? void 0 : _h.body
|
5863
|
-
};
|
5864
|
-
span2.setAttributes(
|
5865
|
-
selectTelemetryAttributes({
|
5866
|
-
telemetry,
|
5867
|
-
attributes: {
|
5868
|
-
"ai.response.finishReason": result.finishReason,
|
5869
|
-
"ai.response.text": {
|
5870
|
-
output: () => extractContentText(result.content)
|
5871
|
-
},
|
5872
|
-
"ai.response.toolCalls": {
|
5873
|
-
output: () => {
|
5874
|
-
const toolCalls = asToolCalls(result.content);
|
5875
|
-
return toolCalls == null ? void 0 : JSON.stringify(toolCalls);
|
5876
|
-
}
|
5877
|
-
},
|
5878
|
-
"ai.response.id": responseData.id,
|
5879
|
-
"ai.response.model": responseData.modelId,
|
5880
|
-
"ai.response.timestamp": responseData.timestamp.toISOString(),
|
5881
|
-
// TODO rename telemetry attributes to inputTokens and outputTokens
|
5882
|
-
"ai.usage.promptTokens": result.usage.inputTokens,
|
5883
|
-
"ai.usage.completionTokens": result.usage.outputTokens,
|
5884
|
-
// standardized gen-ai llm span attributes:
|
5885
|
-
"gen_ai.response.finish_reasons": [result.finishReason],
|
5886
|
-
"gen_ai.response.id": responseData.id,
|
5887
|
-
"gen_ai.response.model": responseData.modelId,
|
5888
|
-
"gen_ai.usage.input_tokens": result.usage.inputTokens,
|
5889
|
-
"gen_ai.usage.output_tokens": result.usage.outputTokens
|
5890
|
-
}
|
5891
|
-
})
|
5892
|
-
);
|
5893
|
-
return { ...result, response: responseData };
|
5894
|
-
}
|
5895
|
-
})
|
5639
|
+
});
|
5640
|
+
}
|
5896
5641
|
);
|
5897
5642
|
currentToolCalls = await Promise.all(
|
5898
5643
|
currentModelResponse.content.filter(
|
@@ -5932,7 +5677,7 @@ async function generateText({
|
|
5932
5677
|
nextStepType = "tool-result";
|
5933
5678
|
}
|
5934
5679
|
}
|
5935
|
-
const originalText = (
|
5680
|
+
const originalText = (_a17 = extractContentText(currentModelResponse.content)) != null ? _a17 : "";
|
5936
5681
|
const stepTextLeadingWhitespaceTrimmed = stepType === "continue" && // only for continue steps
|
5937
5682
|
text2.trimEnd() !== text2 ? originalText.trimStart() : originalText;
|
5938
5683
|
const stepText = nextStepType === "continue" ? removeTextAfterLastWhitespace(stepTextLeadingWhitespaceTrimmed) : stepTextLeadingWhitespaceTrimmed;
|
@@ -5983,8 +5728,7 @@ async function generateText({
|
|
5983
5728
|
finishReason: currentModelResponse.finishReason,
|
5984
5729
|
usage: currentUsage,
|
5985
5730
|
warnings: currentModelResponse.warnings,
|
5986
|
-
|
5987
|
-
request: (_c = currentModelResponse.request) != null ? _c : {},
|
5731
|
+
request: (_b = currentModelResponse.request) != null ? _b : {},
|
5988
5732
|
response: {
|
5989
5733
|
...currentModelResponse.response,
|
5990
5734
|
// deep clone msgs to avoid mutating past messages in multi-step:
|
@@ -6041,12 +5785,11 @@ async function generateText({
|
|
6041
5785
|
finishReason: currentModelResponse.finishReason,
|
6042
5786
|
usage,
|
6043
5787
|
warnings: currentModelResponse.warnings,
|
6044
|
-
request: (
|
5788
|
+
request: (_c = currentModelResponse.request) != null ? _c : {},
|
6045
5789
|
response: {
|
6046
5790
|
...currentModelResponse.response,
|
6047
5791
|
messages: responseMessages
|
6048
5792
|
},
|
6049
|
-
logprobs: currentModelResponse.logprobs,
|
6050
5793
|
steps,
|
6051
5794
|
providerMetadata: currentModelResponse.providerMetadata
|
6052
5795
|
});
|
@@ -6143,7 +5886,6 @@ var DefaultGenerateTextResult = class {
|
|
6143
5886
|
this.response = options.response;
|
6144
5887
|
this.steps = options.steps;
|
6145
5888
|
this.providerMetadata = options.providerMetadata;
|
6146
|
-
this.logprobs = options.logprobs;
|
6147
5889
|
this.outputResolver = options.outputResolver;
|
6148
5890
|
this.sources = options.sources;
|
6149
5891
|
}
|
@@ -6203,7 +5945,7 @@ __export(output_exports, {
|
|
6203
5945
|
object: () => object,
|
6204
5946
|
text: () => text
|
6205
5947
|
});
|
6206
|
-
var
|
5948
|
+
var import_provider_utils17 = require("@ai-sdk/provider-utils");
|
6207
5949
|
|
6208
5950
|
// errors/index.ts
|
6209
5951
|
var import_provider21 = require("@ai-sdk/provider");
|
@@ -6253,10 +5995,7 @@ _a15 = symbol15;
|
|
6253
5995
|
// core/generate-text/output.ts
|
6254
5996
|
var text = () => ({
|
6255
5997
|
type: "text",
|
6256
|
-
responseFormat:
|
6257
|
-
injectIntoSystemPrompt({ system }) {
|
6258
|
-
return system;
|
6259
|
-
},
|
5998
|
+
responseFormat: { type: "text" },
|
6260
5999
|
parsePartial({ text: text2 }) {
|
6261
6000
|
return { partial: text2 };
|
6262
6001
|
},
|
@@ -6270,15 +6009,9 @@ var object = ({
|
|
6270
6009
|
const schema = asSchema(inputSchema);
|
6271
6010
|
return {
|
6272
6011
|
type: "object",
|
6273
|
-
responseFormat:
|
6012
|
+
responseFormat: {
|
6274
6013
|
type: "json",
|
6275
|
-
schema:
|
6276
|
-
}),
|
6277
|
-
injectIntoSystemPrompt({ system, model }) {
|
6278
|
-
return model.supportsStructuredOutputs ? system : injectJsonInstruction({
|
6279
|
-
prompt: system,
|
6280
|
-
schema: schema.jsonSchema
|
6281
|
-
});
|
6014
|
+
schema: schema.jsonSchema
|
6282
6015
|
},
|
6283
6016
|
parsePartial({ text: text2 }) {
|
6284
6017
|
const result = parsePartialJson(text2);
|
@@ -6299,7 +6032,7 @@ var object = ({
|
|
6299
6032
|
}
|
6300
6033
|
},
|
6301
6034
|
parseOutput({ text: text2 }, context) {
|
6302
|
-
const parseResult = (0,
|
6035
|
+
const parseResult = (0, import_provider_utils17.safeParseJSON)({ text: text2 });
|
6303
6036
|
if (!parseResult.success) {
|
6304
6037
|
throw new NoObjectGeneratedError({
|
6305
6038
|
message: "No object generated: could not parse the response.",
|
@@ -6310,7 +6043,7 @@ var object = ({
|
|
6310
6043
|
finishReason: context.finishReason
|
6311
6044
|
});
|
6312
6045
|
}
|
6313
|
-
const validationResult = (0,
|
6046
|
+
const validationResult = (0, import_provider_utils17.safeValidateTypes)({
|
6314
6047
|
value: parseResult.value,
|
6315
6048
|
schema
|
6316
6049
|
});
|
@@ -6330,7 +6063,7 @@ var object = ({
|
|
6330
6063
|
};
|
6331
6064
|
|
6332
6065
|
// core/generate-text/smooth-stream.ts
|
6333
|
-
var
|
6066
|
+
var import_provider_utils18 = require("@ai-sdk/provider-utils");
|
6334
6067
|
var import_provider22 = require("@ai-sdk/provider");
|
6335
6068
|
var CHUNKING_REGEXPS = {
|
6336
6069
|
word: /\S+\s+/m,
|
@@ -6339,7 +6072,7 @@ var CHUNKING_REGEXPS = {
|
|
6339
6072
|
function smoothStream({
|
6340
6073
|
delayInMs = 10,
|
6341
6074
|
chunking = "word",
|
6342
|
-
_internal: { delay: delay2 =
|
6075
|
+
_internal: { delay: delay2 = import_provider_utils18.delay } = {}
|
6343
6076
|
} = {}) {
|
6344
6077
|
let detectChunk;
|
6345
6078
|
if (typeof chunking === "function") {
|
@@ -6400,7 +6133,7 @@ function smoothStream({
|
|
6400
6133
|
|
6401
6134
|
// core/generate-text/stream-text.ts
|
6402
6135
|
var import_provider23 = require("@ai-sdk/provider");
|
6403
|
-
var
|
6136
|
+
var import_provider_utils19 = require("@ai-sdk/provider-utils");
|
6404
6137
|
|
6405
6138
|
// util/as-array.ts
|
6406
6139
|
function asArray(value) {
|
@@ -6672,7 +6405,6 @@ function runToolsTransformation({
|
|
6672
6405
|
finishChunk = {
|
6673
6406
|
type: "finish",
|
6674
6407
|
finishReason: chunk.finishReason,
|
6675
|
-
logprobs: chunk.logprobs,
|
6676
6408
|
usage: calculateLanguageModelUsage2(chunk.usage),
|
6677
6409
|
providerMetadata: chunk.providerMetadata
|
6678
6410
|
};
|
@@ -6717,11 +6449,11 @@ function runToolsTransformation({
|
|
6717
6449
|
}
|
6718
6450
|
|
6719
6451
|
// core/generate-text/stream-text.ts
|
6720
|
-
var originalGenerateId4 = (0,
|
6452
|
+
var originalGenerateId4 = (0, import_provider_utils19.createIdGenerator)({
|
6721
6453
|
prefix: "aitxt",
|
6722
6454
|
size: 24
|
6723
6455
|
});
|
6724
|
-
var originalGenerateMessageId2 = (0,
|
6456
|
+
var originalGenerateMessageId2 = (0, import_provider_utils19.createIdGenerator)({
|
6725
6457
|
prefix: "msg",
|
6726
6458
|
size: 24
|
6727
6459
|
});
|
@@ -6879,7 +6611,6 @@ var DefaultStreamTextResult = class {
|
|
6879
6611
|
this.requestPromise = new DelayedPromise();
|
6880
6612
|
this.responsePromise = new DelayedPromise();
|
6881
6613
|
this.stepsPromise = new DelayedPromise();
|
6882
|
-
var _a17;
|
6883
6614
|
if (maxSteps < 1) {
|
6884
6615
|
throw new InvalidArgumentError({
|
6885
6616
|
parameter: "maxSteps",
|
@@ -6995,7 +6726,6 @@ var DefaultStreamTextResult = class {
|
|
6995
6726
|
finishReason: part.finishReason,
|
6996
6727
|
usage: part.usage,
|
6997
6728
|
warnings: part.warnings,
|
6998
|
-
logprobs: part.logprobs,
|
6999
6729
|
request: part.request,
|
7000
6730
|
response: {
|
7001
6731
|
...part.response,
|
@@ -7031,7 +6761,7 @@ var DefaultStreamTextResult = class {
|
|
7031
6761
|
}
|
7032
6762
|
},
|
7033
6763
|
async flush(controller) {
|
7034
|
-
var
|
6764
|
+
var _a17;
|
7035
6765
|
try {
|
7036
6766
|
if (recordedSteps.length === 0) {
|
7037
6767
|
return;
|
@@ -7059,7 +6789,6 @@ var DefaultStreamTextResult = class {
|
|
7059
6789
|
self.stepsPromise.resolve(recordedSteps);
|
7060
6790
|
await (onFinish == null ? void 0 : onFinish({
|
7061
6791
|
finishReason,
|
7062
|
-
logprobs: void 0,
|
7063
6792
|
usage,
|
7064
6793
|
text: recordedFullText,
|
7065
6794
|
reasoningText: lastStep.reasoningText,
|
@@ -7068,7 +6797,7 @@ var DefaultStreamTextResult = class {
|
|
7068
6797
|
sources: lastStep.sources,
|
7069
6798
|
toolCalls: lastStep.toolCalls,
|
7070
6799
|
toolResults: lastStep.toolResults,
|
7071
|
-
request: (
|
6800
|
+
request: (_a17 = lastStep.request) != null ? _a17 : {},
|
7072
6801
|
response: lastStep.response,
|
7073
6802
|
warnings: lastStep.warnings,
|
7074
6803
|
providerMetadata: lastStep.providerMetadata,
|
@@ -7082,8 +6811,8 @@ var DefaultStreamTextResult = class {
|
|
7082
6811
|
"ai.response.text": { output: () => recordedFullText },
|
7083
6812
|
"ai.response.toolCalls": {
|
7084
6813
|
output: () => {
|
7085
|
-
var
|
7086
|
-
return ((
|
6814
|
+
var _a18;
|
6815
|
+
return ((_a18 = lastStep.toolCalls) == null ? void 0 : _a18.length) ? JSON.stringify(lastStep.toolCalls) : void 0;
|
7087
6816
|
}
|
7088
6817
|
},
|
7089
6818
|
"ai.usage.promptTokens": usage.promptTokens,
|
@@ -7117,18 +6846,15 @@ var DefaultStreamTextResult = class {
|
|
7117
6846
|
maxRetries: maxRetriesArg
|
7118
6847
|
});
|
7119
6848
|
const tracer = getTracer(telemetry);
|
6849
|
+
const callSettings = prepareCallSettings(settings);
|
7120
6850
|
const baseTelemetryAttributes = getBaseTelemetryAttributes({
|
7121
6851
|
model,
|
7122
6852
|
telemetry,
|
7123
6853
|
headers,
|
7124
|
-
settings: { ...
|
6854
|
+
settings: { ...callSettings, maxRetries }
|
7125
6855
|
});
|
7126
6856
|
const initialPrompt = standardizePrompt({
|
7127
|
-
prompt: {
|
7128
|
-
system: (_a17 = output == null ? void 0 : output.injectIntoSystemPrompt({ system, model })) != null ? _a17 : system,
|
7129
|
-
prompt,
|
7130
|
-
messages
|
7131
|
-
},
|
6857
|
+
prompt: { system, prompt, messages },
|
7132
6858
|
tools
|
7133
6859
|
});
|
7134
6860
|
const self = this;
|
@@ -7159,7 +6885,6 @@ var DefaultStreamTextResult = class {
|
|
7159
6885
|
hasLeadingWhitespace,
|
7160
6886
|
messageId
|
7161
6887
|
}) {
|
7162
|
-
var _a18;
|
7163
6888
|
const promptFormat = responseMessages.length === 0 ? initialPrompt.type : "messages";
|
7164
6889
|
const stepInputMessages = [
|
7165
6890
|
...initialPrompt.messages,
|
@@ -7171,9 +6896,7 @@ var DefaultStreamTextResult = class {
|
|
7171
6896
|
system: initialPrompt.system,
|
7172
6897
|
messages: stepInputMessages
|
7173
6898
|
},
|
7174
|
-
|
7175
|
-
modelSupportsUrl: (_a18 = model.supportsUrl) == null ? void 0 : _a18.bind(model)
|
7176
|
-
// support 'this' context
|
6899
|
+
supportedUrls: await model.getSupportedUrls()
|
7177
6900
|
});
|
7178
6901
|
const toolsAndToolChoice = {
|
7179
6902
|
...prepareToolsAndToolChoice({ tools, toolChoice, activeTools })
|
@@ -7202,8 +6925,8 @@ var DefaultStreamTextResult = class {
|
|
7202
6925
|
"ai.prompt.tools": {
|
7203
6926
|
// convert the language model level tools:
|
7204
6927
|
input: () => {
|
7205
|
-
var
|
7206
|
-
return (
|
6928
|
+
var _a17;
|
6929
|
+
return (_a17 = toolsAndToolChoice.tools) == null ? void 0 : _a17.map(
|
7207
6930
|
(tool2) => JSON.stringify(tool2)
|
7208
6931
|
);
|
7209
6932
|
}
|
@@ -7214,32 +6937,34 @@ var DefaultStreamTextResult = class {
|
|
7214
6937
|
// standardized gen-ai llm span attributes:
|
7215
6938
|
"gen_ai.system": model.provider,
|
7216
6939
|
"gen_ai.request.model": model.modelId,
|
7217
|
-
"gen_ai.request.frequency_penalty":
|
7218
|
-
"gen_ai.request.max_tokens":
|
7219
|
-
"gen_ai.request.presence_penalty":
|
7220
|
-
"gen_ai.request.stop_sequences":
|
7221
|
-
"gen_ai.request.temperature":
|
7222
|
-
"gen_ai.request.top_k":
|
7223
|
-
"gen_ai.request.top_p":
|
6940
|
+
"gen_ai.request.frequency_penalty": callSettings.frequencyPenalty,
|
6941
|
+
"gen_ai.request.max_tokens": callSettings.maxOutputTokens,
|
6942
|
+
"gen_ai.request.presence_penalty": callSettings.presencePenalty,
|
6943
|
+
"gen_ai.request.stop_sequences": callSettings.stopSequences,
|
6944
|
+
"gen_ai.request.temperature": callSettings.temperature,
|
6945
|
+
"gen_ai.request.top_k": callSettings.topK,
|
6946
|
+
"gen_ai.request.top_p": callSettings.topP
|
7224
6947
|
}
|
7225
6948
|
}),
|
7226
6949
|
tracer,
|
7227
6950
|
endWhenDone: false,
|
7228
|
-
fn: async (doStreamSpan2) =>
|
7229
|
-
|
7230
|
-
|
7231
|
-
|
7232
|
-
|
7233
|
-
|
7234
|
-
|
7235
|
-
|
7236
|
-
|
7237
|
-
|
7238
|
-
|
7239
|
-
|
7240
|
-
|
7241
|
-
|
7242
|
-
|
6951
|
+
fn: async (doStreamSpan2) => {
|
6952
|
+
return {
|
6953
|
+
startTimestampMs: now2(),
|
6954
|
+
// get before the call
|
6955
|
+
doStreamSpan: doStreamSpan2,
|
6956
|
+
result: await model.doStream({
|
6957
|
+
...callSettings,
|
6958
|
+
...toolsAndToolChoice,
|
6959
|
+
inputFormat: promptFormat,
|
6960
|
+
responseFormat: output == null ? void 0 : output.responseFormat,
|
6961
|
+
prompt: promptMessages,
|
6962
|
+
providerOptions,
|
6963
|
+
abortSignal,
|
6964
|
+
headers
|
6965
|
+
})
|
6966
|
+
};
|
6967
|
+
}
|
7243
6968
|
})
|
7244
6969
|
);
|
7245
6970
|
const transformedStream = runToolsTransformation({
|
@@ -7270,7 +6995,6 @@ var DefaultStreamTextResult = class {
|
|
7270
6995
|
let stepFirstChunk = true;
|
7271
6996
|
let stepText = "";
|
7272
6997
|
let fullStepText = stepType2 === "continue" ? previousStepText : "";
|
7273
|
-
let stepLogProbs;
|
7274
6998
|
let stepResponse = {
|
7275
6999
|
id: generateId3(),
|
7276
7000
|
timestamp: currentDate(),
|
@@ -7294,7 +7018,7 @@ var DefaultStreamTextResult = class {
|
|
7294
7018
|
transformedStream.pipeThrough(
|
7295
7019
|
new TransformStream({
|
7296
7020
|
async transform(chunk, controller) {
|
7297
|
-
var
|
7021
|
+
var _a17, _b, _c;
|
7298
7022
|
if (chunk.type === "stream-start") {
|
7299
7023
|
warnings = chunk.warnings;
|
7300
7024
|
return;
|
@@ -7385,7 +7109,7 @@ var DefaultStreamTextResult = class {
|
|
7385
7109
|
}
|
7386
7110
|
case "response-metadata": {
|
7387
7111
|
stepResponse = {
|
7388
|
-
id: (
|
7112
|
+
id: (_a17 = chunk.id) != null ? _a17 : stepResponse.id,
|
7389
7113
|
timestamp: (_b = chunk.timestamp) != null ? _b : stepResponse.timestamp,
|
7390
7114
|
modelId: (_c = chunk.modelId) != null ? _c : stepResponse.modelId
|
7391
7115
|
};
|
@@ -7395,7 +7119,6 @@ var DefaultStreamTextResult = class {
|
|
7395
7119
|
stepUsage = chunk.usage;
|
7396
7120
|
stepFinishReason = chunk.finishReason;
|
7397
7121
|
stepProviderMetadata = chunk.providerMetadata;
|
7398
|
-
stepLogProbs = chunk.logprobs;
|
7399
7122
|
const msToFinish = now2() - startTimestampMs;
|
7400
7123
|
doStreamSpan.addEvent("ai.stream.finish");
|
7401
7124
|
doStreamSpan.setAttributes({
|
@@ -7483,7 +7206,6 @@ var DefaultStreamTextResult = class {
|
|
7483
7206
|
finishReason: stepFinishReason,
|
7484
7207
|
usage: stepUsage,
|
7485
7208
|
providerMetadata: stepProviderMetadata,
|
7486
|
-
logprobs: stepLogProbs,
|
7487
7209
|
request: stepRequest,
|
7488
7210
|
response: {
|
7489
7211
|
...stepResponse,
|
@@ -7500,7 +7222,6 @@ var DefaultStreamTextResult = class {
|
|
7500
7222
|
finishReason: stepFinishReason,
|
7501
7223
|
usage: combinedUsage,
|
7502
7224
|
providerMetadata: stepProviderMetadata,
|
7503
|
-
logprobs: stepLogProbs,
|
7504
7225
|
response: {
|
7505
7226
|
...stepResponse,
|
7506
7227
|
headers: response == null ? void 0 : response.headers
|
@@ -8121,17 +7842,32 @@ function defaultSettingsMiddleware({
|
|
8121
7842
|
return {
|
8122
7843
|
middlewareVersion: "v2",
|
8123
7844
|
transformParams: async ({ params }) => {
|
8124
|
-
var _a17;
|
7845
|
+
var _a17, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k;
|
8125
7846
|
return {
|
8126
7847
|
...settings,
|
8127
7848
|
...params,
|
7849
|
+
// map all values that are null to undefined
|
7850
|
+
maxOutputTokens: settings.maxOutputTokens !== null ? (_a17 = params.maxOutputTokens) != null ? _a17 : settings.maxOutputTokens : void 0,
|
7851
|
+
temperature: settings.temperature !== null ? (
|
7852
|
+
// temperature: special case 0 or null
|
7853
|
+
params.temperature === 0 || params.temperature == null ? (_b = settings.temperature) != null ? _b : params.temperature : params.temperature
|
7854
|
+
) : void 0,
|
7855
|
+
stopSequences: settings.stopSequences !== null ? (_c = params.stopSequences) != null ? _c : settings.stopSequences : void 0,
|
7856
|
+
topP: settings.topP !== null ? (_d = params.topP) != null ? _d : settings.topP : void 0,
|
7857
|
+
topK: settings.topK !== null ? (_e = params.topK) != null ? _e : settings.topK : void 0,
|
7858
|
+
presencePenalty: settings.presencePenalty !== null ? (_f = params.presencePenalty) != null ? _f : settings.presencePenalty : void 0,
|
7859
|
+
frequencyPenalty: settings.frequencyPenalty !== null ? (_g = params.frequencyPenalty) != null ? _g : settings.frequencyPenalty : void 0,
|
7860
|
+
responseFormat: settings.responseFormat !== null ? (_h = params.responseFormat) != null ? _h : settings.responseFormat : void 0,
|
7861
|
+
seed: settings.seed !== null ? (_i = params.seed) != null ? _i : settings.seed : void 0,
|
7862
|
+
tools: settings.tools !== null ? (_j = params.tools) != null ? _j : settings.tools : void 0,
|
7863
|
+
toolChoice: settings.toolChoice !== null ? (_k = params.toolChoice) != null ? _k : settings.toolChoice : void 0,
|
7864
|
+
// headers: deep merge
|
7865
|
+
headers: mergeObjects(settings.headers, params.headers),
|
7866
|
+
// provider options: deep merge
|
8128
7867
|
providerOptions: mergeObjects(
|
8129
7868
|
settings.providerOptions,
|
8130
7869
|
params.providerOptions
|
8131
|
-
)
|
8132
|
-
// special case for temperature 0
|
8133
|
-
// TODO remove when temperature defaults to undefined
|
8134
|
-
temperature: params.temperature === 0 || params.temperature == null ? (_a17 = settings.temperature) != null ? _a17 : 0 : params.temperature
|
7870
|
+
)
|
8135
7871
|
};
|
8136
7872
|
}
|
8137
7873
|
};
|
@@ -8287,7 +8023,6 @@ function simulateStreamingMiddleware() {
|
|
8287
8023
|
type: "finish",
|
8288
8024
|
finishReason: result.finishReason,
|
8289
8025
|
usage: result.usage,
|
8290
|
-
logprobs: result.logprobs,
|
8291
8026
|
providerMetadata: result.providerMetadata
|
8292
8027
|
});
|
8293
8028
|
controller.close();
|
@@ -8319,7 +8054,6 @@ var doWrap = ({
|
|
8319
8054
|
modelId,
|
8320
8055
|
providerId
|
8321
8056
|
}) => {
|
8322
|
-
var _a17;
|
8323
8057
|
async function doTransform({
|
8324
8058
|
params,
|
8325
8059
|
type
|
@@ -8330,10 +8064,10 @@ var doWrap = ({
|
|
8330
8064
|
specificationVersion: "v2",
|
8331
8065
|
provider: providerId != null ? providerId : model.provider,
|
8332
8066
|
modelId: modelId != null ? modelId : model.modelId,
|
8333
|
-
|
8334
|
-
|
8335
|
-
|
8336
|
-
|
8067
|
+
// TODO middleware should be able to modify the supported urls
|
8068
|
+
async getSupportedUrls() {
|
8069
|
+
return model.getSupportedUrls();
|
8070
|
+
},
|
8337
8071
|
async doGenerate(params) {
|
8338
8072
|
const transformedParams = await doTransform({ params, type: "generate" });
|
8339
8073
|
const doGenerate = async () => model.doGenerate(transformedParams);
|
@@ -8695,7 +8429,7 @@ function tool(tool2) {
|
|
8695
8429
|
}
|
8696
8430
|
|
8697
8431
|
// core/tool/mcp/mcp-sse-transport.ts
|
8698
|
-
var
|
8432
|
+
var import_provider_utils20 = require("@ai-sdk/provider-utils");
|
8699
8433
|
|
8700
8434
|
// core/tool/mcp/json-rpc-message.ts
|
8701
8435
|
var import_zod9 = require("zod");
|
@@ -8866,7 +8600,7 @@ var SseMCPTransport = class {
|
|
8866
8600
|
(_b = this.onerror) == null ? void 0 : _b.call(this, error);
|
8867
8601
|
return reject(error);
|
8868
8602
|
}
|
8869
|
-
const stream = response.body.pipeThrough(new TextDecoderStream()).pipeThrough((0,
|
8603
|
+
const stream = response.body.pipeThrough(new TextDecoderStream()).pipeThrough((0, import_provider_utils20.createEventSourceParserStream)());
|
8870
8604
|
const reader = stream.getReader();
|
8871
8605
|
const processEvents = async () => {
|
8872
8606
|
var _a18, _b2, _c2;
|
@@ -9271,7 +9005,7 @@ function cosineSimilarity(vector1, vector2) {
|
|
9271
9005
|
}
|
9272
9006
|
|
9273
9007
|
// core/util/simulate-readable-stream.ts
|
9274
|
-
var
|
9008
|
+
var import_provider_utils21 = require("@ai-sdk/provider-utils");
|
9275
9009
|
function simulateReadableStream({
|
9276
9010
|
chunks,
|
9277
9011
|
initialDelayInMs = 0,
|
@@ -9279,7 +9013,7 @@ function simulateReadableStream({
|
|
9279
9013
|
_internal
|
9280
9014
|
}) {
|
9281
9015
|
var _a17;
|
9282
|
-
const delay2 = (_a17 = _internal == null ? void 0 : _internal.delay) != null ? _a17 :
|
9016
|
+
const delay2 = (_a17 = _internal == null ? void 0 : _internal.delay) != null ? _a17 : import_provider_utils21.delay;
|
9283
9017
|
let index = 0;
|
9284
9018
|
return new ReadableStream({
|
9285
9019
|
async pull(controller) {
|
@@ -9406,10 +9140,10 @@ __export(llamaindex_adapter_exports, {
|
|
9406
9140
|
toDataStream: () => toDataStream2,
|
9407
9141
|
toDataStreamResponse: () => toDataStreamResponse2
|
9408
9142
|
});
|
9409
|
-
var
|
9143
|
+
var import_provider_utils23 = require("@ai-sdk/provider-utils");
|
9410
9144
|
function toDataStreamInternal2(stream, callbacks) {
|
9411
9145
|
const trimStart = trimStartOfStream();
|
9412
|
-
return (0,
|
9146
|
+
return (0, import_provider_utils23.convertAsyncIteratorToReadableStream)(stream[Symbol.asyncIterator]()).pipeThrough(
|
9413
9147
|
new TransformStream({
|
9414
9148
|
async transform(message, controller) {
|
9415
9149
|
controller.enqueue(trimStart(message.delta));
|