ai 5.0.0-canary.10 → 5.0.0-canary.12
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +18 -0
- package/dist/index.d.mts +19 -44
- package/dist/index.d.ts +19 -44
- package/dist/index.js +374 -640
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +341 -607
- package/dist/index.mjs.map +1 -1
- package/dist/internal/index.d.mts +7 -5
- package/dist/internal/index.d.ts +7 -5
- package/dist/internal/index.js +23 -15
- package/dist/internal/index.js.map +1 -1
- package/dist/internal/index.mjs +23 -15
- package/dist/internal/index.mjs.map +1 -1
- package/dist/mcp-stdio/index.js +3 -3
- package/dist/mcp-stdio/index.js.map +1 -1
- package/dist/mcp-stdio/index.mjs +3 -3
- package/dist/mcp-stdio/index.mjs.map +1 -1
- package/dist/test/index.d.mts +3 -7
- package/dist/test/index.d.ts +3 -7
- package/dist/test/index.js +3 -7
- package/dist/test/index.js.map +1 -1
- package/dist/test/index.mjs +3 -7
- package/dist/test/index.mjs.map +1 -1
- package/package.json +3 -3
package/dist/index.mjs
CHANGED
@@ -2601,6 +2601,17 @@ var NoObjectGeneratedError = class extends AISDKError4 {
|
|
2601
2601
|
};
|
2602
2602
|
_a4 = symbol4;
|
2603
2603
|
|
2604
|
+
// core/generate-text/extract-content-text.ts
|
2605
|
+
function extractContentText(content) {
|
2606
|
+
const parts = content.filter(
|
2607
|
+
(content2) => content2.type === "text"
|
2608
|
+
);
|
2609
|
+
if (parts.length === 0) {
|
2610
|
+
return void 0;
|
2611
|
+
}
|
2612
|
+
return parts.map((content2) => content2.text).join("");
|
2613
|
+
}
|
2614
|
+
|
2604
2615
|
// util/download-error.ts
|
2605
2616
|
import { AISDKError as AISDKError5 } from "@ai-sdk/provider";
|
2606
2617
|
var name5 = "AI_DownloadError";
|
@@ -2798,17 +2809,16 @@ var InvalidMessageRoleError = class extends AISDKError8 {
|
|
2798
2809
|
_a7 = symbol7;
|
2799
2810
|
|
2800
2811
|
// core/prompt/convert-to-language-model-prompt.ts
|
2812
|
+
import { isUrlSupported } from "@ai-sdk/provider-utils";
|
2801
2813
|
async function convertToLanguageModelPrompt({
|
2802
2814
|
prompt,
|
2803
|
-
|
2804
|
-
modelSupportsUrl = () => false,
|
2815
|
+
supportedUrls,
|
2805
2816
|
downloadImplementation = download
|
2806
2817
|
}) {
|
2807
2818
|
const downloadedAssets = await downloadAssets(
|
2808
2819
|
prompt.messages,
|
2809
2820
|
downloadImplementation,
|
2810
|
-
|
2811
|
-
modelSupportsUrl
|
2821
|
+
supportedUrls
|
2812
2822
|
);
|
2813
2823
|
return [
|
2814
2824
|
...prompt.system != null ? [{ role: "system", content: prompt.system }] : [],
|
@@ -2927,19 +2937,29 @@ function convertToLanguageModelMessage(message, downloadedAssets) {
|
|
2927
2937
|
}
|
2928
2938
|
}
|
2929
2939
|
}
|
2930
|
-
async function downloadAssets(messages, downloadImplementation,
|
2940
|
+
async function downloadAssets(messages, downloadImplementation, supportedUrls) {
|
2931
2941
|
const urls = messages.filter((message) => message.role === "user").map((message) => message.content).filter(
|
2932
2942
|
(content) => Array.isArray(content)
|
2933
2943
|
).flat().filter(
|
2934
2944
|
(part) => part.type === "image" || part.type === "file"
|
2935
|
-
).
|
2936
|
-
|
2937
|
-
|
2938
|
-
|
2939
|
-
|
2940
|
-
|
2941
|
-
|
2942
|
-
|
2945
|
+
).map((part) => {
|
2946
|
+
var _a17, _b;
|
2947
|
+
const mediaType = (_b = (_a17 = part.mediaType) != null ? _a17 : part.mimeType) != null ? _b : part.type === "image" ? "image/*" : void 0;
|
2948
|
+
let data = part.type === "image" ? part.image : part.data;
|
2949
|
+
if (typeof data === "string") {
|
2950
|
+
try {
|
2951
|
+
data = new URL(data);
|
2952
|
+
} catch (ignored) {
|
2953
|
+
}
|
2954
|
+
}
|
2955
|
+
return { mediaType, data };
|
2956
|
+
}).filter(
|
2957
|
+
(part) => part.data instanceof URL && part.mediaType != null && !isUrlSupported({
|
2958
|
+
url: part.data.toString(),
|
2959
|
+
mediaType: part.mediaType,
|
2960
|
+
supportedUrls
|
2961
|
+
})
|
2962
|
+
).map((part) => part.data);
|
2943
2963
|
const downloadedImages = await Promise.all(
|
2944
2964
|
urls.map(async (url) => ({
|
2945
2965
|
url,
|
@@ -3093,8 +3113,7 @@ function prepareCallSettings({
|
|
3093
3113
|
}
|
3094
3114
|
return {
|
3095
3115
|
maxOutputTokens,
|
3096
|
-
|
3097
|
-
temperature: temperature != null ? temperature : 0,
|
3116
|
+
temperature: temperature != null ? temperature : temperature === null ? void 0 : 0,
|
3098
3117
|
topP,
|
3099
3118
|
topK,
|
3100
3119
|
presencePenalty,
|
@@ -3713,26 +3732,6 @@ function addLanguageModelUsage(usage1, usage2) {
|
|
3713
3732
|
};
|
3714
3733
|
}
|
3715
3734
|
|
3716
|
-
// core/generate-object/inject-json-instruction.ts
|
3717
|
-
var DEFAULT_SCHEMA_PREFIX = "JSON schema:";
|
3718
|
-
var DEFAULT_SCHEMA_SUFFIX = "You MUST answer with a JSON object that matches the JSON schema above.";
|
3719
|
-
var DEFAULT_GENERIC_SUFFIX = "You MUST answer with JSON.";
|
3720
|
-
function injectJsonInstruction({
|
3721
|
-
prompt,
|
3722
|
-
schema,
|
3723
|
-
schemaPrefix = schema != null ? DEFAULT_SCHEMA_PREFIX : void 0,
|
3724
|
-
schemaSuffix = schema != null ? DEFAULT_SCHEMA_SUFFIX : DEFAULT_GENERIC_SUFFIX
|
3725
|
-
}) {
|
3726
|
-
return [
|
3727
|
-
prompt != null && prompt.length > 0 ? prompt : void 0,
|
3728
|
-
prompt != null && prompt.length > 0 ? "" : void 0,
|
3729
|
-
// add a newline if prompt is not null
|
3730
|
-
schemaPrefix,
|
3731
|
-
schema != null ? JSON.stringify(schema) : void 0,
|
3732
|
-
schemaSuffix
|
3733
|
-
].filter((line) => line != null).join("\n");
|
3734
|
-
}
|
3735
|
-
|
3736
3735
|
// core/generate-object/output-strategy.ts
|
3737
3736
|
import {
|
3738
3737
|
isJSONArray,
|
@@ -3985,7 +3984,6 @@ function getOutputStrategy({
|
|
3985
3984
|
// core/generate-object/validate-object-generation-input.ts
|
3986
3985
|
function validateObjectGenerationInput({
|
3987
3986
|
output,
|
3988
|
-
mode,
|
3989
3987
|
schema,
|
3990
3988
|
schemaName,
|
3991
3989
|
schemaDescription,
|
@@ -3999,13 +3997,6 @@ function validateObjectGenerationInput({
|
|
3999
3997
|
});
|
4000
3998
|
}
|
4001
3999
|
if (output === "no-schema") {
|
4002
|
-
if (mode === "auto" || mode === "tool") {
|
4003
|
-
throw new InvalidArgumentError({
|
4004
|
-
parameter: "mode",
|
4005
|
-
value: mode,
|
4006
|
-
message: 'Mode must be "json" for no-schema output.'
|
4007
|
-
});
|
4008
|
-
}
|
4009
4000
|
if (schema != null) {
|
4010
4001
|
throw new InvalidArgumentError({
|
4011
4002
|
parameter: "schema",
|
@@ -4108,17 +4099,6 @@ function validateObjectGenerationInput({
|
|
4108
4099
|
}
|
4109
4100
|
}
|
4110
4101
|
|
4111
|
-
// core/generate-text/extract-content-text.ts
|
4112
|
-
function extractContentText(content) {
|
4113
|
-
const parts = content.filter(
|
4114
|
-
(content2) => content2.type === "text"
|
4115
|
-
);
|
4116
|
-
if (parts.length === 0) {
|
4117
|
-
return void 0;
|
4118
|
-
}
|
4119
|
-
return parts.map((content2) => content2.text).join("");
|
4120
|
-
}
|
4121
|
-
|
4122
4102
|
// core/generate-object/generate-object.ts
|
4123
4103
|
var originalGenerateId = createIdGenerator({ prefix: "aiobj", size: 24 });
|
4124
4104
|
async function generateObject({
|
@@ -4128,7 +4108,6 @@ async function generateObject({
|
|
4128
4108
|
schema: inputSchema,
|
4129
4109
|
schemaName,
|
4130
4110
|
schemaDescription,
|
4131
|
-
mode,
|
4132
4111
|
output = "object",
|
4133
4112
|
system,
|
4134
4113
|
prompt,
|
@@ -4147,7 +4126,6 @@ async function generateObject({
|
|
4147
4126
|
}) {
|
4148
4127
|
validateObjectGenerationInput({
|
4149
4128
|
output,
|
4150
|
-
mode,
|
4151
4129
|
schema: inputSchema,
|
4152
4130
|
schemaName,
|
4153
4131
|
schemaDescription,
|
@@ -4159,14 +4137,12 @@ async function generateObject({
|
|
4159
4137
|
schema: inputSchema,
|
4160
4138
|
enumValues
|
4161
4139
|
});
|
4162
|
-
|
4163
|
-
mode = "json";
|
4164
|
-
}
|
4140
|
+
const callSettings = prepareCallSettings(settings);
|
4165
4141
|
const baseTelemetryAttributes = getBaseTelemetryAttributes({
|
4166
4142
|
model,
|
4167
4143
|
telemetry,
|
4168
4144
|
headers,
|
4169
|
-
settings: { ...
|
4145
|
+
settings: { ...callSettings, maxRetries }
|
4170
4146
|
});
|
4171
4147
|
const tracer = getTracer(telemetry);
|
4172
4148
|
return recordSpan({
|
@@ -4186,265 +4162,120 @@ async function generateObject({
|
|
4186
4162
|
"ai.schema": outputStrategy.jsonSchema != null ? { input: () => JSON.stringify(outputStrategy.jsonSchema) } : void 0,
|
4187
4163
|
"ai.schema.name": schemaName,
|
4188
4164
|
"ai.schema.description": schemaDescription,
|
4189
|
-
"ai.settings.output": outputStrategy.type
|
4190
|
-
"ai.settings.mode": mode
|
4165
|
+
"ai.settings.output": outputStrategy.type
|
4191
4166
|
}
|
4192
4167
|
}),
|
4193
4168
|
tracer,
|
4194
4169
|
fn: async (span) => {
|
4195
|
-
var _a17
|
4196
|
-
if (mode === "auto" || mode == null) {
|
4197
|
-
mode = model.defaultObjectGenerationMode;
|
4198
|
-
}
|
4170
|
+
var _a17;
|
4199
4171
|
let result;
|
4200
4172
|
let finishReason;
|
4201
4173
|
let usage;
|
4202
4174
|
let warnings;
|
4203
4175
|
let response;
|
4204
4176
|
let request;
|
4205
|
-
let logprobs;
|
4206
4177
|
let resultProviderMetadata;
|
4207
|
-
|
4208
|
-
|
4209
|
-
|
4210
|
-
|
4211
|
-
|
4212
|
-
|
4213
|
-
|
4214
|
-
|
4215
|
-
|
4216
|
-
|
4217
|
-
|
4218
|
-
|
4219
|
-
|
4220
|
-
|
4221
|
-
|
4222
|
-
|
4223
|
-
|
4224
|
-
// support 'this' context
|
4225
|
-
});
|
4226
|
-
const generateResult = await retry(
|
4227
|
-
() => recordSpan({
|
4228
|
-
name: "ai.generateObject.doGenerate",
|
4229
|
-
attributes: selectTelemetryAttributes({
|
4230
|
-
telemetry,
|
4231
|
-
attributes: {
|
4232
|
-
...assembleOperationName({
|
4233
|
-
operationId: "ai.generateObject.doGenerate",
|
4234
|
-
telemetry
|
4235
|
-
}),
|
4236
|
-
...baseTelemetryAttributes,
|
4237
|
-
"ai.prompt.format": {
|
4238
|
-
input: () => standardizedPrompt.type
|
4239
|
-
},
|
4240
|
-
"ai.prompt.messages": {
|
4241
|
-
input: () => JSON.stringify(promptMessages)
|
4242
|
-
},
|
4243
|
-
"ai.settings.mode": mode,
|
4244
|
-
// standardized gen-ai llm span attributes:
|
4245
|
-
"gen_ai.system": model.provider,
|
4246
|
-
"gen_ai.request.model": model.modelId,
|
4247
|
-
"gen_ai.request.frequency_penalty": settings.frequencyPenalty,
|
4248
|
-
"gen_ai.request.max_tokens": settings.maxOutputTokens,
|
4249
|
-
"gen_ai.request.presence_penalty": settings.presencePenalty,
|
4250
|
-
"gen_ai.request.temperature": settings.temperature,
|
4251
|
-
"gen_ai.request.top_k": settings.topK,
|
4252
|
-
"gen_ai.request.top_p": settings.topP
|
4253
|
-
}
|
4178
|
+
const standardizedPrompt = standardizePrompt({
|
4179
|
+
prompt: { system, prompt, messages },
|
4180
|
+
tools: void 0
|
4181
|
+
});
|
4182
|
+
const promptMessages = await convertToLanguageModelPrompt({
|
4183
|
+
prompt: standardizedPrompt,
|
4184
|
+
supportedUrls: await model.getSupportedUrls()
|
4185
|
+
});
|
4186
|
+
const generateResult = await retry(
|
4187
|
+
() => recordSpan({
|
4188
|
+
name: "ai.generateObject.doGenerate",
|
4189
|
+
attributes: selectTelemetryAttributes({
|
4190
|
+
telemetry,
|
4191
|
+
attributes: {
|
4192
|
+
...assembleOperationName({
|
4193
|
+
operationId: "ai.generateObject.doGenerate",
|
4194
|
+
telemetry
|
4254
4195
|
}),
|
4255
|
-
|
4256
|
-
|
4257
|
-
|
4258
|
-
|
4259
|
-
|
4260
|
-
|
4261
|
-
|
4262
|
-
|
4263
|
-
|
4264
|
-
|
4265
|
-
|
4266
|
-
|
4267
|
-
|
4268
|
-
|
4269
|
-
|
4270
|
-
|
4271
|
-
|
4272
|
-
|
4273
|
-
|
4274
|
-
|
4275
|
-
|
4276
|
-
|
4277
|
-
|
4278
|
-
|
4279
|
-
|
4280
|
-
|
4281
|
-
|
4282
|
-
|
4283
|
-
|
4284
|
-
|
4285
|
-
|
4286
|
-
|
4287
|
-
|
4288
|
-
|
4289
|
-
|
4290
|
-
|
4291
|
-
|
4292
|
-
|
4293
|
-
|
4294
|
-
|
4295
|
-
|
4296
|
-
|
4297
|
-
|
4298
|
-
|
4299
|
-
|
4300
|
-
|
4301
|
-
|
4302
|
-
|
4303
|
-
|
4304
|
-
|
4305
|
-
|
4306
|
-
|
4307
|
-
|
4308
|
-
);
|
4309
|
-
return { ...result2, objectText: text2, responseData };
|
4310
|
-
}
|
4311
|
-
})
|
4312
|
-
);
|
4313
|
-
result = generateResult.objectText;
|
4314
|
-
finishReason = generateResult.finishReason;
|
4315
|
-
usage = generateResult.usage;
|
4316
|
-
warnings = generateResult.warnings;
|
4317
|
-
logprobs = generateResult.logprobs;
|
4318
|
-
resultProviderMetadata = generateResult.providerMetadata;
|
4319
|
-
request = (_b = generateResult.request) != null ? _b : {};
|
4320
|
-
response = generateResult.responseData;
|
4321
|
-
break;
|
4322
|
-
}
|
4323
|
-
case "tool": {
|
4324
|
-
const standardizedPrompt = standardizePrompt({
|
4325
|
-
prompt: { system, prompt, messages },
|
4326
|
-
tools: void 0
|
4327
|
-
});
|
4328
|
-
const promptMessages = await convertToLanguageModelPrompt({
|
4329
|
-
prompt: standardizedPrompt,
|
4330
|
-
modelSupportsImageUrls: model.supportsImageUrls,
|
4331
|
-
modelSupportsUrl: (_c = model.supportsUrl) == null ? void 0 : _c.bind(model)
|
4332
|
-
// support 'this' context,
|
4333
|
-
});
|
4334
|
-
const inputFormat = standardizedPrompt.type;
|
4335
|
-
const generateResult = await retry(
|
4336
|
-
() => recordSpan({
|
4337
|
-
name: "ai.generateObject.doGenerate",
|
4338
|
-
attributes: selectTelemetryAttributes({
|
4196
|
+
...baseTelemetryAttributes,
|
4197
|
+
"ai.prompt.format": {
|
4198
|
+
input: () => standardizedPrompt.type
|
4199
|
+
},
|
4200
|
+
"ai.prompt.messages": {
|
4201
|
+
input: () => JSON.stringify(promptMessages)
|
4202
|
+
},
|
4203
|
+
// standardized gen-ai llm span attributes:
|
4204
|
+
"gen_ai.system": model.provider,
|
4205
|
+
"gen_ai.request.model": model.modelId,
|
4206
|
+
"gen_ai.request.frequency_penalty": callSettings.frequencyPenalty,
|
4207
|
+
"gen_ai.request.max_tokens": callSettings.maxOutputTokens,
|
4208
|
+
"gen_ai.request.presence_penalty": callSettings.presencePenalty,
|
4209
|
+
"gen_ai.request.temperature": callSettings.temperature,
|
4210
|
+
"gen_ai.request.top_k": callSettings.topK,
|
4211
|
+
"gen_ai.request.top_p": callSettings.topP
|
4212
|
+
}
|
4213
|
+
}),
|
4214
|
+
tracer,
|
4215
|
+
fn: async (span2) => {
|
4216
|
+
var _a18, _b, _c, _d, _e, _f, _g, _h;
|
4217
|
+
const result2 = await model.doGenerate({
|
4218
|
+
responseFormat: {
|
4219
|
+
type: "json",
|
4220
|
+
schema: outputStrategy.jsonSchema,
|
4221
|
+
name: schemaName,
|
4222
|
+
description: schemaDescription
|
4223
|
+
},
|
4224
|
+
...prepareCallSettings(settings),
|
4225
|
+
inputFormat: standardizedPrompt.type,
|
4226
|
+
prompt: promptMessages,
|
4227
|
+
providerOptions,
|
4228
|
+
abortSignal,
|
4229
|
+
headers
|
4230
|
+
});
|
4231
|
+
const responseData = {
|
4232
|
+
id: (_b = (_a18 = result2.response) == null ? void 0 : _a18.id) != null ? _b : generateId3(),
|
4233
|
+
timestamp: (_d = (_c = result2.response) == null ? void 0 : _c.timestamp) != null ? _d : currentDate(),
|
4234
|
+
modelId: (_f = (_e = result2.response) == null ? void 0 : _e.modelId) != null ? _f : model.modelId,
|
4235
|
+
headers: (_g = result2.response) == null ? void 0 : _g.headers,
|
4236
|
+
body: (_h = result2.response) == null ? void 0 : _h.body
|
4237
|
+
};
|
4238
|
+
const text2 = extractContentText(result2.content);
|
4239
|
+
if (text2 === void 0) {
|
4240
|
+
throw new NoObjectGeneratedError({
|
4241
|
+
message: "No object generated: the model did not return a response.",
|
4242
|
+
response: responseData,
|
4243
|
+
usage: calculateLanguageModelUsage2(result2.usage),
|
4244
|
+
finishReason: result2.finishReason
|
4245
|
+
});
|
4246
|
+
}
|
4247
|
+
span2.setAttributes(
|
4248
|
+
selectTelemetryAttributes({
|
4339
4249
|
telemetry,
|
4340
4250
|
attributes: {
|
4341
|
-
|
4342
|
-
|
4343
|
-
|
4344
|
-
|
4345
|
-
|
4346
|
-
|
4347
|
-
|
4348
|
-
|
4349
|
-
"ai.prompt.messages": {
|
4350
|
-
input: () => JSON.stringify(promptMessages)
|
4351
|
-
},
|
4352
|
-
"ai.settings.mode": mode,
|
4251
|
+
"ai.response.finishReason": result2.finishReason,
|
4252
|
+
"ai.response.object": { output: () => text2 },
|
4253
|
+
"ai.response.id": responseData.id,
|
4254
|
+
"ai.response.model": responseData.modelId,
|
4255
|
+
"ai.response.timestamp": responseData.timestamp.toISOString(),
|
4256
|
+
// TODO rename telemetry attributes to inputTokens and outputTokens
|
4257
|
+
"ai.usage.promptTokens": result2.usage.inputTokens,
|
4258
|
+
"ai.usage.completionTokens": result2.usage.outputTokens,
|
4353
4259
|
// standardized gen-ai llm span attributes:
|
4354
|
-
"gen_ai.
|
4355
|
-
"gen_ai.
|
4356
|
-
"gen_ai.
|
4357
|
-
"gen_ai.
|
4358
|
-
"gen_ai.
|
4359
|
-
"gen_ai.request.temperature": settings.temperature,
|
4360
|
-
"gen_ai.request.top_k": settings.topK,
|
4361
|
-
"gen_ai.request.top_p": settings.topP
|
4362
|
-
}
|
4363
|
-
}),
|
4364
|
-
tracer,
|
4365
|
-
fn: async (span2) => {
|
4366
|
-
var _a18, _b2, _c2, _d2, _e, _f, _g, _h;
|
4367
|
-
const result2 = await model.doGenerate({
|
4368
|
-
tools: [
|
4369
|
-
{
|
4370
|
-
type: "function",
|
4371
|
-
name: schemaName != null ? schemaName : "json",
|
4372
|
-
description: schemaDescription != null ? schemaDescription : "Respond with a JSON object.",
|
4373
|
-
parameters: outputStrategy.jsonSchema
|
4374
|
-
}
|
4375
|
-
],
|
4376
|
-
toolChoice: { type: "required" },
|
4377
|
-
...prepareCallSettings(settings),
|
4378
|
-
inputFormat,
|
4379
|
-
prompt: promptMessages,
|
4380
|
-
providerOptions,
|
4381
|
-
abortSignal,
|
4382
|
-
headers
|
4383
|
-
});
|
4384
|
-
const firstToolCall = result2.content.find(
|
4385
|
-
(content) => content.type === "tool-call"
|
4386
|
-
);
|
4387
|
-
const objectText = firstToolCall == null ? void 0 : firstToolCall.args;
|
4388
|
-
const responseData = {
|
4389
|
-
id: (_b2 = (_a18 = result2.response) == null ? void 0 : _a18.id) != null ? _b2 : generateId3(),
|
4390
|
-
timestamp: (_d2 = (_c2 = result2.response) == null ? void 0 : _c2.timestamp) != null ? _d2 : currentDate(),
|
4391
|
-
modelId: (_f = (_e = result2.response) == null ? void 0 : _e.modelId) != null ? _f : model.modelId,
|
4392
|
-
headers: (_g = result2.response) == null ? void 0 : _g.headers,
|
4393
|
-
body: (_h = result2.response) == null ? void 0 : _h.body
|
4394
|
-
};
|
4395
|
-
if (objectText === void 0) {
|
4396
|
-
throw new NoObjectGeneratedError({
|
4397
|
-
message: "No object generated: the tool was not called.",
|
4398
|
-
response: responseData,
|
4399
|
-
usage: calculateLanguageModelUsage2(result2.usage),
|
4400
|
-
finishReason: result2.finishReason
|
4401
|
-
});
|
4260
|
+
"gen_ai.response.finish_reasons": [result2.finishReason],
|
4261
|
+
"gen_ai.response.id": responseData.id,
|
4262
|
+
"gen_ai.response.model": responseData.modelId,
|
4263
|
+
"gen_ai.usage.input_tokens": result2.usage.inputTokens,
|
4264
|
+
"gen_ai.usage.output_tokens": result2.usage.outputTokens
|
4402
4265
|
}
|
4403
|
-
|
4404
|
-
|
4405
|
-
|
4406
|
-
|
4407
|
-
|
4408
|
-
|
4409
|
-
|
4410
|
-
|
4411
|
-
|
4412
|
-
|
4413
|
-
|
4414
|
-
|
4415
|
-
|
4416
|
-
"gen_ai.response.finish_reasons": [result2.finishReason],
|
4417
|
-
"gen_ai.response.id": responseData.id,
|
4418
|
-
"gen_ai.response.model": responseData.modelId,
|
4419
|
-
"gen_ai.usage.input_tokens": result2.usage.inputTokens,
|
4420
|
-
"gen_ai.usage.output_tokens": result2.usage.outputTokens
|
4421
|
-
}
|
4422
|
-
})
|
4423
|
-
);
|
4424
|
-
return { ...result2, objectText, responseData };
|
4425
|
-
}
|
4426
|
-
})
|
4427
|
-
);
|
4428
|
-
result = generateResult.objectText;
|
4429
|
-
finishReason = generateResult.finishReason;
|
4430
|
-
usage = generateResult.usage;
|
4431
|
-
warnings = generateResult.warnings;
|
4432
|
-
logprobs = generateResult.logprobs;
|
4433
|
-
resultProviderMetadata = generateResult.providerMetadata;
|
4434
|
-
request = (_d = generateResult.request) != null ? _d : {};
|
4435
|
-
response = generateResult.responseData;
|
4436
|
-
break;
|
4437
|
-
}
|
4438
|
-
case void 0: {
|
4439
|
-
throw new Error(
|
4440
|
-
"Model does not have a default object generation mode."
|
4441
|
-
);
|
4442
|
-
}
|
4443
|
-
default: {
|
4444
|
-
const _exhaustiveCheck = mode;
|
4445
|
-
throw new Error(`Unsupported mode: ${_exhaustiveCheck}`);
|
4446
|
-
}
|
4447
|
-
}
|
4266
|
+
})
|
4267
|
+
);
|
4268
|
+
return { ...result2, objectText: text2, responseData };
|
4269
|
+
}
|
4270
|
+
})
|
4271
|
+
);
|
4272
|
+
result = generateResult.objectText;
|
4273
|
+
finishReason = generateResult.finishReason;
|
4274
|
+
usage = generateResult.usage;
|
4275
|
+
warnings = generateResult.warnings;
|
4276
|
+
resultProviderMetadata = generateResult.providerMetadata;
|
4277
|
+
request = (_a17 = generateResult.request) != null ? _a17 : {};
|
4278
|
+
response = generateResult.responseData;
|
4448
4279
|
function processResult(result2) {
|
4449
4280
|
const parseResult = safeParseJSON2({ text: result2 });
|
4450
4281
|
if (!parseResult.success) {
|
@@ -4515,7 +4346,6 @@ async function generateObject({
|
|
4515
4346
|
warnings,
|
4516
4347
|
request,
|
4517
4348
|
response,
|
4518
|
-
logprobs,
|
4519
4349
|
providerMetadata: resultProviderMetadata
|
4520
4350
|
});
|
4521
4351
|
}
|
@@ -4530,7 +4360,6 @@ var DefaultGenerateObjectResult = class {
|
|
4530
4360
|
this.providerMetadata = options.providerMetadata;
|
4531
4361
|
this.response = options.response;
|
4532
4362
|
this.request = options.request;
|
4533
|
-
this.logprobs = options.logprobs;
|
4534
4363
|
}
|
4535
4364
|
toJsonResponse(init) {
|
4536
4365
|
var _a17;
|
@@ -4694,7 +4523,6 @@ function streamObject({
|
|
4694
4523
|
schema: inputSchema,
|
4695
4524
|
schemaName,
|
4696
4525
|
schemaDescription,
|
4697
|
-
mode,
|
4698
4526
|
output = "object",
|
4699
4527
|
system,
|
4700
4528
|
prompt,
|
@@ -4715,15 +4543,11 @@ function streamObject({
|
|
4715
4543
|
}) {
|
4716
4544
|
validateObjectGenerationInput({
|
4717
4545
|
output,
|
4718
|
-
mode,
|
4719
4546
|
schema: inputSchema,
|
4720
4547
|
schemaName,
|
4721
4548
|
schemaDescription
|
4722
4549
|
});
|
4723
4550
|
const outputStrategy = getOutputStrategy({ output, schema: inputSchema });
|
4724
|
-
if (outputStrategy.type === "no-schema" && mode === void 0) {
|
4725
|
-
mode = "json";
|
4726
|
-
}
|
4727
4551
|
return new DefaultStreamObjectResult({
|
4728
4552
|
model,
|
4729
4553
|
telemetry,
|
@@ -4738,7 +4562,6 @@ function streamObject({
|
|
4738
4562
|
schemaName,
|
4739
4563
|
schemaDescription,
|
4740
4564
|
providerOptions,
|
4741
|
-
mode,
|
4742
4565
|
onError,
|
4743
4566
|
onFinish,
|
4744
4567
|
generateId: generateId3,
|
@@ -4761,7 +4584,6 @@ var DefaultStreamObjectResult = class {
|
|
4761
4584
|
schemaName,
|
4762
4585
|
schemaDescription,
|
4763
4586
|
providerOptions,
|
4764
|
-
mode,
|
4765
4587
|
onError,
|
4766
4588
|
onFinish,
|
4767
4589
|
generateId: generateId3,
|
@@ -4777,11 +4599,12 @@ var DefaultStreamObjectResult = class {
|
|
4777
4599
|
const { maxRetries, retry } = prepareRetries({
|
4778
4600
|
maxRetries: maxRetriesArg
|
4779
4601
|
});
|
4602
|
+
const callSettings = prepareCallSettings(settings);
|
4780
4603
|
const baseTelemetryAttributes = getBaseTelemetryAttributes({
|
4781
4604
|
model,
|
4782
4605
|
telemetry,
|
4783
4606
|
headers,
|
4784
|
-
settings: { ...
|
4607
|
+
settings: { ...callSettings, maxRetries }
|
4785
4608
|
});
|
4786
4609
|
const tracer = getTracer(telemetry);
|
4787
4610
|
const self = this;
|
@@ -4812,120 +4635,47 @@ var DefaultStreamObjectResult = class {
|
|
4812
4635
|
"ai.schema": outputStrategy.jsonSchema != null ? { input: () => JSON.stringify(outputStrategy.jsonSchema) } : void 0,
|
4813
4636
|
"ai.schema.name": schemaName,
|
4814
4637
|
"ai.schema.description": schemaDescription,
|
4815
|
-
"ai.settings.output": outputStrategy.type
|
4816
|
-
"ai.settings.mode": mode
|
4638
|
+
"ai.settings.output": outputStrategy.type
|
4817
4639
|
}
|
4818
4640
|
}),
|
4819
4641
|
tracer,
|
4820
4642
|
endWhenDone: false,
|
4821
4643
|
fn: async (rootSpan) => {
|
4822
|
-
|
4823
|
-
|
4824
|
-
|
4825
|
-
}
|
4826
|
-
|
4827
|
-
|
4828
|
-
|
4829
|
-
|
4830
|
-
|
4831
|
-
|
4832
|
-
|
4833
|
-
|
4834
|
-
|
4835
|
-
|
4836
|
-
|
4837
|
-
|
4838
|
-
|
4839
|
-
|
4840
|
-
|
4841
|
-
|
4842
|
-
|
4843
|
-
|
4844
|
-
|
4845
|
-
|
4846
|
-
|
4847
|
-
|
4848
|
-
|
4849
|
-
|
4850
|
-
|
4851
|
-
|
4852
|
-
|
4853
|
-
|
4854
|
-
|
4855
|
-
}),
|
4856
|
-
providerOptions,
|
4857
|
-
abortSignal,
|
4858
|
-
headers
|
4859
|
-
};
|
4860
|
-
transformer = {
|
4861
|
-
transform: (chunk, controller) => {
|
4862
|
-
switch (chunk.type) {
|
4863
|
-
case "text":
|
4864
|
-
controller.enqueue(chunk.text);
|
4865
|
-
break;
|
4866
|
-
case "response-metadata":
|
4867
|
-
case "finish":
|
4868
|
-
case "error":
|
4869
|
-
controller.enqueue(chunk);
|
4870
|
-
break;
|
4871
|
-
}
|
4872
|
-
}
|
4873
|
-
};
|
4874
|
-
break;
|
4875
|
-
}
|
4876
|
-
case "tool": {
|
4877
|
-
const standardizedPrompt = standardizePrompt({
|
4878
|
-
prompt: { system, prompt, messages },
|
4879
|
-
tools: void 0
|
4880
|
-
});
|
4881
|
-
callOptions = {
|
4882
|
-
tools: [
|
4883
|
-
{
|
4884
|
-
type: "function",
|
4885
|
-
name: schemaName != null ? schemaName : "json",
|
4886
|
-
description: schemaDescription != null ? schemaDescription : "Respond with a JSON object.",
|
4887
|
-
parameters: outputStrategy.jsonSchema
|
4888
|
-
}
|
4889
|
-
],
|
4890
|
-
toolChoice: { type: "required" },
|
4891
|
-
...prepareCallSettings(settings),
|
4892
|
-
inputFormat: standardizedPrompt.type,
|
4893
|
-
prompt: await convertToLanguageModelPrompt({
|
4894
|
-
prompt: standardizedPrompt,
|
4895
|
-
modelSupportsImageUrls: model.supportsImageUrls,
|
4896
|
-
modelSupportsUrl: (_b = model.supportsUrl) == null ? void 0 : _b.bind(model)
|
4897
|
-
// support 'this' context,
|
4898
|
-
}),
|
4899
|
-
providerOptions,
|
4900
|
-
abortSignal,
|
4901
|
-
headers
|
4902
|
-
};
|
4903
|
-
transformer = {
|
4904
|
-
transform(chunk, controller) {
|
4905
|
-
switch (chunk.type) {
|
4906
|
-
case "tool-call-delta":
|
4907
|
-
controller.enqueue(chunk.argsTextDelta);
|
4908
|
-
break;
|
4909
|
-
case "response-metadata":
|
4910
|
-
case "finish":
|
4911
|
-
case "error":
|
4912
|
-
controller.enqueue(chunk);
|
4913
|
-
break;
|
4914
|
-
}
|
4915
|
-
}
|
4916
|
-
};
|
4917
|
-
break;
|
4918
|
-
}
|
4919
|
-
case void 0: {
|
4920
|
-
throw new Error(
|
4921
|
-
"Model does not have a default object generation mode."
|
4922
|
-
);
|
4923
|
-
}
|
4924
|
-
default: {
|
4925
|
-
const _exhaustiveCheck = mode;
|
4926
|
-
throw new Error(`Unsupported mode: ${_exhaustiveCheck}`);
|
4644
|
+
const standardizedPrompt = standardizePrompt({
|
4645
|
+
prompt: { system, prompt, messages },
|
4646
|
+
tools: void 0
|
4647
|
+
});
|
4648
|
+
const callOptions = {
|
4649
|
+
responseFormat: {
|
4650
|
+
type: "json",
|
4651
|
+
schema: outputStrategy.jsonSchema,
|
4652
|
+
name: schemaName,
|
4653
|
+
description: schemaDescription
|
4654
|
+
},
|
4655
|
+
...prepareCallSettings(settings),
|
4656
|
+
inputFormat: standardizedPrompt.type,
|
4657
|
+
prompt: await convertToLanguageModelPrompt({
|
4658
|
+
prompt: standardizedPrompt,
|
4659
|
+
supportedUrls: await model.getSupportedUrls()
|
4660
|
+
}),
|
4661
|
+
providerOptions,
|
4662
|
+
abortSignal,
|
4663
|
+
headers
|
4664
|
+
};
|
4665
|
+
const transformer = {
|
4666
|
+
transform: (chunk, controller) => {
|
4667
|
+
switch (chunk.type) {
|
4668
|
+
case "text":
|
4669
|
+
controller.enqueue(chunk.text);
|
4670
|
+
break;
|
4671
|
+
case "response-metadata":
|
4672
|
+
case "finish":
|
4673
|
+
case "error":
|
4674
|
+
controller.enqueue(chunk);
|
4675
|
+
break;
|
4676
|
+
}
|
4927
4677
|
}
|
4928
|
-
}
|
4678
|
+
};
|
4929
4679
|
const {
|
4930
4680
|
result: { stream, response, request },
|
4931
4681
|
doStreamSpan,
|
@@ -4947,16 +4697,15 @@ var DefaultStreamObjectResult = class {
|
|
4947
4697
|
"ai.prompt.messages": {
|
4948
4698
|
input: () => JSON.stringify(callOptions.prompt)
|
4949
4699
|
},
|
4950
|
-
"ai.settings.mode": mode,
|
4951
4700
|
// standardized gen-ai llm span attributes:
|
4952
4701
|
"gen_ai.system": model.provider,
|
4953
4702
|
"gen_ai.request.model": model.modelId,
|
4954
|
-
"gen_ai.request.frequency_penalty":
|
4955
|
-
"gen_ai.request.max_tokens":
|
4956
|
-
"gen_ai.request.presence_penalty":
|
4957
|
-
"gen_ai.request.temperature":
|
4958
|
-
"gen_ai.request.top_k":
|
4959
|
-
"gen_ai.request.top_p":
|
4703
|
+
"gen_ai.request.frequency_penalty": callSettings.frequencyPenalty,
|
4704
|
+
"gen_ai.request.max_tokens": callSettings.maxOutputTokens,
|
4705
|
+
"gen_ai.request.presence_penalty": callSettings.presencePenalty,
|
4706
|
+
"gen_ai.request.temperature": callSettings.temperature,
|
4707
|
+
"gen_ai.request.top_k": callSettings.topK,
|
4708
|
+
"gen_ai.request.top_p": callSettings.topP
|
4960
4709
|
}
|
4961
4710
|
}),
|
4962
4711
|
tracer,
|
@@ -4989,7 +4738,7 @@ var DefaultStreamObjectResult = class {
|
|
4989
4738
|
const transformedStream = stream.pipeThrough(new TransformStream(transformer)).pipeThrough(
|
4990
4739
|
new TransformStream({
|
4991
4740
|
async transform(chunk, controller) {
|
4992
|
-
var
|
4741
|
+
var _a17, _b, _c;
|
4993
4742
|
if (typeof chunk === "object" && chunk.type === "stream-start") {
|
4994
4743
|
warnings = chunk.warnings;
|
4995
4744
|
return;
|
@@ -5039,8 +4788,8 @@ var DefaultStreamObjectResult = class {
|
|
5039
4788
|
switch (chunk.type) {
|
5040
4789
|
case "response-metadata": {
|
5041
4790
|
fullResponse = {
|
5042
|
-
id: (
|
5043
|
-
timestamp: (
|
4791
|
+
id: (_a17 = chunk.id) != null ? _a17 : fullResponse.id,
|
4792
|
+
timestamp: (_b = chunk.timestamp) != null ? _b : fullResponse.timestamp,
|
5044
4793
|
modelId: (_c = chunk.modelId) != null ? _c : fullResponse.modelId
|
5045
4794
|
};
|
5046
4795
|
break;
|
@@ -5625,7 +5374,6 @@ async function generateText({
|
|
5625
5374
|
onStepFinish,
|
5626
5375
|
...settings
|
5627
5376
|
}) {
|
5628
|
-
var _a17;
|
5629
5377
|
if (maxSteps < 1) {
|
5630
5378
|
throw new InvalidArgumentError({
|
5631
5379
|
parameter: "maxSteps",
|
@@ -5634,18 +5382,15 @@ async function generateText({
|
|
5634
5382
|
});
|
5635
5383
|
}
|
5636
5384
|
const { maxRetries, retry } = prepareRetries({ maxRetries: maxRetriesArg });
|
5385
|
+
const callSettings = prepareCallSettings(settings);
|
5637
5386
|
const baseTelemetryAttributes = getBaseTelemetryAttributes({
|
5638
5387
|
model,
|
5639
5388
|
telemetry,
|
5640
5389
|
headers,
|
5641
|
-
settings: { ...
|
5390
|
+
settings: { ...callSettings, maxRetries }
|
5642
5391
|
});
|
5643
5392
|
const initialPrompt = standardizePrompt({
|
5644
|
-
prompt: {
|
5645
|
-
system: (_a17 = output == null ? void 0 : output.injectIntoSystemPrompt({ system, model })) != null ? _a17 : system,
|
5646
|
-
prompt,
|
5647
|
-
messages
|
5648
|
-
},
|
5393
|
+
prompt: { system, prompt, messages },
|
5649
5394
|
tools
|
5650
5395
|
});
|
5651
5396
|
const tracer = getTracer(telemetry);
|
@@ -5668,11 +5413,10 @@ async function generateText({
|
|
5668
5413
|
}),
|
5669
5414
|
tracer,
|
5670
5415
|
fn: async (span) => {
|
5671
|
-
var
|
5416
|
+
var _a17, _b, _c;
|
5672
5417
|
const toolsAndToolChoice = {
|
5673
5418
|
...prepareToolsAndToolChoice({ tools, toolChoice, activeTools })
|
5674
5419
|
};
|
5675
|
-
const callSettings = prepareCallSettings(settings);
|
5676
5420
|
let currentModelResponse;
|
5677
5421
|
let currentToolCalls = [];
|
5678
5422
|
let currentToolResults = [];
|
@@ -5700,99 +5444,100 @@ async function generateText({
|
|
5700
5444
|
system: initialPrompt.system,
|
5701
5445
|
messages: stepInputMessages
|
5702
5446
|
},
|
5703
|
-
|
5704
|
-
modelSupportsUrl: (_a18 = model.supportsUrl) == null ? void 0 : _a18.bind(model)
|
5705
|
-
// support 'this' context
|
5447
|
+
supportedUrls: await model.getSupportedUrls()
|
5706
5448
|
});
|
5707
5449
|
currentModelResponse = await retry(
|
5708
|
-
() =>
|
5709
|
-
|
5710
|
-
|
5711
|
-
|
5712
|
-
attributes: {
|
5713
|
-
|
5714
|
-
|
5715
|
-
|
5716
|
-
|
5717
|
-
|
5718
|
-
|
5719
|
-
|
5720
|
-
input: () =>
|
5721
|
-
|
5722
|
-
|
5723
|
-
|
5724
|
-
|
5725
|
-
|
5726
|
-
|
5727
|
-
|
5728
|
-
|
5729
|
-
|
5730
|
-
|
5731
|
-
|
5732
|
-
|
5733
|
-
|
5734
|
-
|
5735
|
-
|
5736
|
-
|
5737
|
-
|
5738
|
-
|
5739
|
-
|
5740
|
-
|
5741
|
-
|
5450
|
+
() => {
|
5451
|
+
var _a18;
|
5452
|
+
return recordSpan({
|
5453
|
+
name: "ai.generateText.doGenerate",
|
5454
|
+
attributes: selectTelemetryAttributes({
|
5455
|
+
telemetry,
|
5456
|
+
attributes: {
|
5457
|
+
...assembleOperationName({
|
5458
|
+
operationId: "ai.generateText.doGenerate",
|
5459
|
+
telemetry
|
5460
|
+
}),
|
5461
|
+
...baseTelemetryAttributes,
|
5462
|
+
"ai.prompt.format": { input: () => promptFormat },
|
5463
|
+
"ai.prompt.messages": {
|
5464
|
+
input: () => JSON.stringify(promptMessages)
|
5465
|
+
},
|
5466
|
+
"ai.prompt.tools": {
|
5467
|
+
// convert the language model level tools:
|
5468
|
+
input: () => {
|
5469
|
+
var _a19;
|
5470
|
+
return (_a19 = toolsAndToolChoice.tools) == null ? void 0 : _a19.map((tool2) => JSON.stringify(tool2));
|
5471
|
+
}
|
5472
|
+
},
|
5473
|
+
"ai.prompt.toolChoice": {
|
5474
|
+
input: () => toolsAndToolChoice.toolChoice != null ? JSON.stringify(toolsAndToolChoice.toolChoice) : void 0
|
5475
|
+
},
|
5476
|
+
// standardized gen-ai llm span attributes:
|
5477
|
+
"gen_ai.system": model.provider,
|
5478
|
+
"gen_ai.request.model": model.modelId,
|
5479
|
+
"gen_ai.request.frequency_penalty": settings.frequencyPenalty,
|
5480
|
+
"gen_ai.request.max_tokens": settings.maxOutputTokens,
|
5481
|
+
"gen_ai.request.presence_penalty": settings.presencePenalty,
|
5482
|
+
"gen_ai.request.stop_sequences": settings.stopSequences,
|
5483
|
+
"gen_ai.request.temperature": (_a18 = settings.temperature) != null ? _a18 : void 0,
|
5484
|
+
"gen_ai.request.top_k": settings.topK,
|
5485
|
+
"gen_ai.request.top_p": settings.topP
|
5486
|
+
}
|
5487
|
+
}),
|
5488
|
+
tracer,
|
5489
|
+
fn: async (span2) => {
|
5490
|
+
var _a19, _b2, _c2, _d, _e, _f, _g, _h;
|
5491
|
+
const result = await model.doGenerate({
|
5492
|
+
...callSettings,
|
5493
|
+
...toolsAndToolChoice,
|
5494
|
+
inputFormat: promptFormat,
|
5495
|
+
responseFormat: output == null ? void 0 : output.responseFormat,
|
5496
|
+
prompt: promptMessages,
|
5497
|
+
providerOptions,
|
5498
|
+
abortSignal,
|
5499
|
+
headers
|
5500
|
+
});
|
5501
|
+
const responseData = {
|
5502
|
+
id: (_b2 = (_a19 = result.response) == null ? void 0 : _a19.id) != null ? _b2 : generateId3(),
|
5503
|
+
timestamp: (_d = (_c2 = result.response) == null ? void 0 : _c2.timestamp) != null ? _d : currentDate(),
|
5504
|
+
modelId: (_f = (_e = result.response) == null ? void 0 : _e.modelId) != null ? _f : model.modelId,
|
5505
|
+
headers: (_g = result.response) == null ? void 0 : _g.headers,
|
5506
|
+
body: (_h = result.response) == null ? void 0 : _h.body
|
5507
|
+
};
|
5508
|
+
span2.setAttributes(
|
5509
|
+
selectTelemetryAttributes({
|
5510
|
+
telemetry,
|
5511
|
+
attributes: {
|
5512
|
+
"ai.response.finishReason": result.finishReason,
|
5513
|
+
"ai.response.text": {
|
5514
|
+
output: () => extractContentText(result.content)
|
5515
|
+
},
|
5516
|
+
"ai.response.toolCalls": {
|
5517
|
+
output: () => {
|
5518
|
+
const toolCalls = asToolCalls(result.content);
|
5519
|
+
return toolCalls == null ? void 0 : JSON.stringify(toolCalls);
|
5520
|
+
}
|
5521
|
+
},
|
5522
|
+
"ai.response.id": responseData.id,
|
5523
|
+
"ai.response.model": responseData.modelId,
|
5524
|
+
"ai.response.timestamp": responseData.timestamp.toISOString(),
|
5525
|
+
// TODO rename telemetry attributes to inputTokens and outputTokens
|
5526
|
+
"ai.usage.promptTokens": result.usage.inputTokens,
|
5527
|
+
"ai.usage.completionTokens": result.usage.outputTokens,
|
5528
|
+
// standardized gen-ai llm span attributes:
|
5529
|
+
"gen_ai.response.finish_reasons": [result.finishReason],
|
5530
|
+
"gen_ai.response.id": responseData.id,
|
5531
|
+
"gen_ai.response.model": responseData.modelId,
|
5532
|
+
"gen_ai.usage.input_tokens": result.usage.inputTokens,
|
5533
|
+
"gen_ai.usage.output_tokens": result.usage.outputTokens
|
5534
|
+
}
|
5535
|
+
})
|
5536
|
+
);
|
5537
|
+
return { ...result, response: responseData };
|
5742
5538
|
}
|
5743
|
-
})
|
5744
|
-
|
5745
|
-
fn: async (span2) => {
|
5746
|
-
var _a19, _b2, _c2, _d2, _e, _f, _g, _h;
|
5747
|
-
const result = await model.doGenerate({
|
5748
|
-
...callSettings,
|
5749
|
-
...toolsAndToolChoice,
|
5750
|
-
inputFormat: promptFormat,
|
5751
|
-
responseFormat: output == null ? void 0 : output.responseFormat({ model }),
|
5752
|
-
prompt: promptMessages,
|
5753
|
-
providerOptions,
|
5754
|
-
abortSignal,
|
5755
|
-
headers
|
5756
|
-
});
|
5757
|
-
const responseData = {
|
5758
|
-
id: (_b2 = (_a19 = result.response) == null ? void 0 : _a19.id) != null ? _b2 : generateId3(),
|
5759
|
-
timestamp: (_d2 = (_c2 = result.response) == null ? void 0 : _c2.timestamp) != null ? _d2 : currentDate(),
|
5760
|
-
modelId: (_f = (_e = result.response) == null ? void 0 : _e.modelId) != null ? _f : model.modelId,
|
5761
|
-
headers: (_g = result.response) == null ? void 0 : _g.headers,
|
5762
|
-
body: (_h = result.response) == null ? void 0 : _h.body
|
5763
|
-
};
|
5764
|
-
span2.setAttributes(
|
5765
|
-
selectTelemetryAttributes({
|
5766
|
-
telemetry,
|
5767
|
-
attributes: {
|
5768
|
-
"ai.response.finishReason": result.finishReason,
|
5769
|
-
"ai.response.text": {
|
5770
|
-
output: () => extractContentText(result.content)
|
5771
|
-
},
|
5772
|
-
"ai.response.toolCalls": {
|
5773
|
-
output: () => {
|
5774
|
-
const toolCalls = asToolCalls(result.content);
|
5775
|
-
return toolCalls == null ? void 0 : JSON.stringify(toolCalls);
|
5776
|
-
}
|
5777
|
-
},
|
5778
|
-
"ai.response.id": responseData.id,
|
5779
|
-
"ai.response.model": responseData.modelId,
|
5780
|
-
"ai.response.timestamp": responseData.timestamp.toISOString(),
|
5781
|
-
// TODO rename telemetry attributes to inputTokens and outputTokens
|
5782
|
-
"ai.usage.promptTokens": result.usage.inputTokens,
|
5783
|
-
"ai.usage.completionTokens": result.usage.outputTokens,
|
5784
|
-
// standardized gen-ai llm span attributes:
|
5785
|
-
"gen_ai.response.finish_reasons": [result.finishReason],
|
5786
|
-
"gen_ai.response.id": responseData.id,
|
5787
|
-
"gen_ai.response.model": responseData.modelId,
|
5788
|
-
"gen_ai.usage.input_tokens": result.usage.inputTokens,
|
5789
|
-
"gen_ai.usage.output_tokens": result.usage.outputTokens
|
5790
|
-
}
|
5791
|
-
})
|
5792
|
-
);
|
5793
|
-
return { ...result, response: responseData };
|
5794
|
-
}
|
5795
|
-
})
|
5539
|
+
});
|
5540
|
+
}
|
5796
5541
|
);
|
5797
5542
|
currentToolCalls = await Promise.all(
|
5798
5543
|
currentModelResponse.content.filter(
|
@@ -5832,7 +5577,7 @@ async function generateText({
|
|
5832
5577
|
nextStepType = "tool-result";
|
5833
5578
|
}
|
5834
5579
|
}
|
5835
|
-
const originalText = (
|
5580
|
+
const originalText = (_a17 = extractContentText(currentModelResponse.content)) != null ? _a17 : "";
|
5836
5581
|
const stepTextLeadingWhitespaceTrimmed = stepType === "continue" && // only for continue steps
|
5837
5582
|
text2.trimEnd() !== text2 ? originalText.trimStart() : originalText;
|
5838
5583
|
const stepText = nextStepType === "continue" ? removeTextAfterLastWhitespace(stepTextLeadingWhitespaceTrimmed) : stepTextLeadingWhitespaceTrimmed;
|
@@ -5883,8 +5628,7 @@ async function generateText({
|
|
5883
5628
|
finishReason: currentModelResponse.finishReason,
|
5884
5629
|
usage: currentUsage,
|
5885
5630
|
warnings: currentModelResponse.warnings,
|
5886
|
-
|
5887
|
-
request: (_c = currentModelResponse.request) != null ? _c : {},
|
5631
|
+
request: (_b = currentModelResponse.request) != null ? _b : {},
|
5888
5632
|
response: {
|
5889
5633
|
...currentModelResponse.response,
|
5890
5634
|
// deep clone msgs to avoid mutating past messages in multi-step:
|
@@ -5941,12 +5685,11 @@ async function generateText({
|
|
5941
5685
|
finishReason: currentModelResponse.finishReason,
|
5942
5686
|
usage,
|
5943
5687
|
warnings: currentModelResponse.warnings,
|
5944
|
-
request: (
|
5688
|
+
request: (_c = currentModelResponse.request) != null ? _c : {},
|
5945
5689
|
response: {
|
5946
5690
|
...currentModelResponse.response,
|
5947
5691
|
messages: responseMessages
|
5948
5692
|
},
|
5949
|
-
logprobs: currentModelResponse.logprobs,
|
5950
5693
|
steps,
|
5951
5694
|
providerMetadata: currentModelResponse.providerMetadata
|
5952
5695
|
});
|
@@ -6043,7 +5786,6 @@ var DefaultGenerateTextResult = class {
|
|
6043
5786
|
this.response = options.response;
|
6044
5787
|
this.steps = options.steps;
|
6045
5788
|
this.providerMetadata = options.providerMetadata;
|
6046
|
-
this.logprobs = options.logprobs;
|
6047
5789
|
this.outputResolver = options.outputResolver;
|
6048
5790
|
this.sources = options.sources;
|
6049
5791
|
}
|
@@ -6165,10 +5907,7 @@ _a15 = symbol15;
|
|
6165
5907
|
// core/generate-text/output.ts
|
6166
5908
|
var text = () => ({
|
6167
5909
|
type: "text",
|
6168
|
-
responseFormat:
|
6169
|
-
injectIntoSystemPrompt({ system }) {
|
6170
|
-
return system;
|
6171
|
-
},
|
5910
|
+
responseFormat: { type: "text" },
|
6172
5911
|
parsePartial({ text: text2 }) {
|
6173
5912
|
return { partial: text2 };
|
6174
5913
|
},
|
@@ -6182,15 +5921,9 @@ var object = ({
|
|
6182
5921
|
const schema = asSchema(inputSchema);
|
6183
5922
|
return {
|
6184
5923
|
type: "object",
|
6185
|
-
responseFormat:
|
5924
|
+
responseFormat: {
|
6186
5925
|
type: "json",
|
6187
|
-
schema:
|
6188
|
-
}),
|
6189
|
-
injectIntoSystemPrompt({ system, model }) {
|
6190
|
-
return model.supportsStructuredOutputs ? system : injectJsonInstruction({
|
6191
|
-
prompt: system,
|
6192
|
-
schema: schema.jsonSchema
|
6193
|
-
});
|
5926
|
+
schema: schema.jsonSchema
|
6194
5927
|
},
|
6195
5928
|
parsePartial({ text: text2 }) {
|
6196
5929
|
const result = parsePartialJson(text2);
|
@@ -6586,7 +6319,6 @@ function runToolsTransformation({
|
|
6586
6319
|
finishChunk = {
|
6587
6320
|
type: "finish",
|
6588
6321
|
finishReason: chunk.finishReason,
|
6589
|
-
logprobs: chunk.logprobs,
|
6590
6322
|
usage: calculateLanguageModelUsage2(chunk.usage),
|
6591
6323
|
providerMetadata: chunk.providerMetadata
|
6592
6324
|
};
|
@@ -6793,7 +6525,6 @@ var DefaultStreamTextResult = class {
|
|
6793
6525
|
this.requestPromise = new DelayedPromise();
|
6794
6526
|
this.responsePromise = new DelayedPromise();
|
6795
6527
|
this.stepsPromise = new DelayedPromise();
|
6796
|
-
var _a17;
|
6797
6528
|
if (maxSteps < 1) {
|
6798
6529
|
throw new InvalidArgumentError({
|
6799
6530
|
parameter: "maxSteps",
|
@@ -6909,7 +6640,6 @@ var DefaultStreamTextResult = class {
|
|
6909
6640
|
finishReason: part.finishReason,
|
6910
6641
|
usage: part.usage,
|
6911
6642
|
warnings: part.warnings,
|
6912
|
-
logprobs: part.logprobs,
|
6913
6643
|
request: part.request,
|
6914
6644
|
response: {
|
6915
6645
|
...part.response,
|
@@ -6945,7 +6675,7 @@ var DefaultStreamTextResult = class {
|
|
6945
6675
|
}
|
6946
6676
|
},
|
6947
6677
|
async flush(controller) {
|
6948
|
-
var
|
6678
|
+
var _a17;
|
6949
6679
|
try {
|
6950
6680
|
if (recordedSteps.length === 0) {
|
6951
6681
|
return;
|
@@ -6973,7 +6703,6 @@ var DefaultStreamTextResult = class {
|
|
6973
6703
|
self.stepsPromise.resolve(recordedSteps);
|
6974
6704
|
await (onFinish == null ? void 0 : onFinish({
|
6975
6705
|
finishReason,
|
6976
|
-
logprobs: void 0,
|
6977
6706
|
usage,
|
6978
6707
|
text: recordedFullText,
|
6979
6708
|
reasoningText: lastStep.reasoningText,
|
@@ -6982,7 +6711,7 @@ var DefaultStreamTextResult = class {
|
|
6982
6711
|
sources: lastStep.sources,
|
6983
6712
|
toolCalls: lastStep.toolCalls,
|
6984
6713
|
toolResults: lastStep.toolResults,
|
6985
|
-
request: (
|
6714
|
+
request: (_a17 = lastStep.request) != null ? _a17 : {},
|
6986
6715
|
response: lastStep.response,
|
6987
6716
|
warnings: lastStep.warnings,
|
6988
6717
|
providerMetadata: lastStep.providerMetadata,
|
@@ -6996,8 +6725,8 @@ var DefaultStreamTextResult = class {
|
|
6996
6725
|
"ai.response.text": { output: () => recordedFullText },
|
6997
6726
|
"ai.response.toolCalls": {
|
6998
6727
|
output: () => {
|
6999
|
-
var
|
7000
|
-
return ((
|
6728
|
+
var _a18;
|
6729
|
+
return ((_a18 = lastStep.toolCalls) == null ? void 0 : _a18.length) ? JSON.stringify(lastStep.toolCalls) : void 0;
|
7001
6730
|
}
|
7002
6731
|
},
|
7003
6732
|
"ai.usage.promptTokens": usage.promptTokens,
|
@@ -7031,18 +6760,15 @@ var DefaultStreamTextResult = class {
|
|
7031
6760
|
maxRetries: maxRetriesArg
|
7032
6761
|
});
|
7033
6762
|
const tracer = getTracer(telemetry);
|
6763
|
+
const callSettings = prepareCallSettings(settings);
|
7034
6764
|
const baseTelemetryAttributes = getBaseTelemetryAttributes({
|
7035
6765
|
model,
|
7036
6766
|
telemetry,
|
7037
6767
|
headers,
|
7038
|
-
settings: { ...
|
6768
|
+
settings: { ...callSettings, maxRetries }
|
7039
6769
|
});
|
7040
6770
|
const initialPrompt = standardizePrompt({
|
7041
|
-
prompt: {
|
7042
|
-
system: (_a17 = output == null ? void 0 : output.injectIntoSystemPrompt({ system, model })) != null ? _a17 : system,
|
7043
|
-
prompt,
|
7044
|
-
messages
|
7045
|
-
},
|
6771
|
+
prompt: { system, prompt, messages },
|
7046
6772
|
tools
|
7047
6773
|
});
|
7048
6774
|
const self = this;
|
@@ -7073,7 +6799,6 @@ var DefaultStreamTextResult = class {
|
|
7073
6799
|
hasLeadingWhitespace,
|
7074
6800
|
messageId
|
7075
6801
|
}) {
|
7076
|
-
var _a18;
|
7077
6802
|
const promptFormat = responseMessages.length === 0 ? initialPrompt.type : "messages";
|
7078
6803
|
const stepInputMessages = [
|
7079
6804
|
...initialPrompt.messages,
|
@@ -7085,9 +6810,7 @@ var DefaultStreamTextResult = class {
|
|
7085
6810
|
system: initialPrompt.system,
|
7086
6811
|
messages: stepInputMessages
|
7087
6812
|
},
|
7088
|
-
|
7089
|
-
modelSupportsUrl: (_a18 = model.supportsUrl) == null ? void 0 : _a18.bind(model)
|
7090
|
-
// support 'this' context
|
6813
|
+
supportedUrls: await model.getSupportedUrls()
|
7091
6814
|
});
|
7092
6815
|
const toolsAndToolChoice = {
|
7093
6816
|
...prepareToolsAndToolChoice({ tools, toolChoice, activeTools })
|
@@ -7116,8 +6839,8 @@ var DefaultStreamTextResult = class {
|
|
7116
6839
|
"ai.prompt.tools": {
|
7117
6840
|
// convert the language model level tools:
|
7118
6841
|
input: () => {
|
7119
|
-
var
|
7120
|
-
return (
|
6842
|
+
var _a17;
|
6843
|
+
return (_a17 = toolsAndToolChoice.tools) == null ? void 0 : _a17.map(
|
7121
6844
|
(tool2) => JSON.stringify(tool2)
|
7122
6845
|
);
|
7123
6846
|
}
|
@@ -7128,32 +6851,34 @@ var DefaultStreamTextResult = class {
|
|
7128
6851
|
// standardized gen-ai llm span attributes:
|
7129
6852
|
"gen_ai.system": model.provider,
|
7130
6853
|
"gen_ai.request.model": model.modelId,
|
7131
|
-
"gen_ai.request.frequency_penalty":
|
7132
|
-
"gen_ai.request.max_tokens":
|
7133
|
-
"gen_ai.request.presence_penalty":
|
7134
|
-
"gen_ai.request.stop_sequences":
|
7135
|
-
"gen_ai.request.temperature":
|
7136
|
-
"gen_ai.request.top_k":
|
7137
|
-
"gen_ai.request.top_p":
|
6854
|
+
"gen_ai.request.frequency_penalty": callSettings.frequencyPenalty,
|
6855
|
+
"gen_ai.request.max_tokens": callSettings.maxOutputTokens,
|
6856
|
+
"gen_ai.request.presence_penalty": callSettings.presencePenalty,
|
6857
|
+
"gen_ai.request.stop_sequences": callSettings.stopSequences,
|
6858
|
+
"gen_ai.request.temperature": callSettings.temperature,
|
6859
|
+
"gen_ai.request.top_k": callSettings.topK,
|
6860
|
+
"gen_ai.request.top_p": callSettings.topP
|
7138
6861
|
}
|
7139
6862
|
}),
|
7140
6863
|
tracer,
|
7141
6864
|
endWhenDone: false,
|
7142
|
-
fn: async (doStreamSpan2) =>
|
7143
|
-
|
7144
|
-
|
7145
|
-
|
7146
|
-
|
7147
|
-
|
7148
|
-
|
7149
|
-
|
7150
|
-
|
7151
|
-
|
7152
|
-
|
7153
|
-
|
7154
|
-
|
7155
|
-
|
7156
|
-
|
6865
|
+
fn: async (doStreamSpan2) => {
|
6866
|
+
return {
|
6867
|
+
startTimestampMs: now2(),
|
6868
|
+
// get before the call
|
6869
|
+
doStreamSpan: doStreamSpan2,
|
6870
|
+
result: await model.doStream({
|
6871
|
+
...callSettings,
|
6872
|
+
...toolsAndToolChoice,
|
6873
|
+
inputFormat: promptFormat,
|
6874
|
+
responseFormat: output == null ? void 0 : output.responseFormat,
|
6875
|
+
prompt: promptMessages,
|
6876
|
+
providerOptions,
|
6877
|
+
abortSignal,
|
6878
|
+
headers
|
6879
|
+
})
|
6880
|
+
};
|
6881
|
+
}
|
7157
6882
|
})
|
7158
6883
|
);
|
7159
6884
|
const transformedStream = runToolsTransformation({
|
@@ -7184,7 +6909,6 @@ var DefaultStreamTextResult = class {
|
|
7184
6909
|
let stepFirstChunk = true;
|
7185
6910
|
let stepText = "";
|
7186
6911
|
let fullStepText = stepType2 === "continue" ? previousStepText : "";
|
7187
|
-
let stepLogProbs;
|
7188
6912
|
let stepResponse = {
|
7189
6913
|
id: generateId3(),
|
7190
6914
|
timestamp: currentDate(),
|
@@ -7208,7 +6932,7 @@ var DefaultStreamTextResult = class {
|
|
7208
6932
|
transformedStream.pipeThrough(
|
7209
6933
|
new TransformStream({
|
7210
6934
|
async transform(chunk, controller) {
|
7211
|
-
var
|
6935
|
+
var _a17, _b, _c;
|
7212
6936
|
if (chunk.type === "stream-start") {
|
7213
6937
|
warnings = chunk.warnings;
|
7214
6938
|
return;
|
@@ -7299,7 +7023,7 @@ var DefaultStreamTextResult = class {
|
|
7299
7023
|
}
|
7300
7024
|
case "response-metadata": {
|
7301
7025
|
stepResponse = {
|
7302
|
-
id: (
|
7026
|
+
id: (_a17 = chunk.id) != null ? _a17 : stepResponse.id,
|
7303
7027
|
timestamp: (_b = chunk.timestamp) != null ? _b : stepResponse.timestamp,
|
7304
7028
|
modelId: (_c = chunk.modelId) != null ? _c : stepResponse.modelId
|
7305
7029
|
};
|
@@ -7309,7 +7033,6 @@ var DefaultStreamTextResult = class {
|
|
7309
7033
|
stepUsage = chunk.usage;
|
7310
7034
|
stepFinishReason = chunk.finishReason;
|
7311
7035
|
stepProviderMetadata = chunk.providerMetadata;
|
7312
|
-
stepLogProbs = chunk.logprobs;
|
7313
7036
|
const msToFinish = now2() - startTimestampMs;
|
7314
7037
|
doStreamSpan.addEvent("ai.stream.finish");
|
7315
7038
|
doStreamSpan.setAttributes({
|
@@ -7397,7 +7120,6 @@ var DefaultStreamTextResult = class {
|
|
7397
7120
|
finishReason: stepFinishReason,
|
7398
7121
|
usage: stepUsage,
|
7399
7122
|
providerMetadata: stepProviderMetadata,
|
7400
|
-
logprobs: stepLogProbs,
|
7401
7123
|
request: stepRequest,
|
7402
7124
|
response: {
|
7403
7125
|
...stepResponse,
|
@@ -7414,7 +7136,6 @@ var DefaultStreamTextResult = class {
|
|
7414
7136
|
finishReason: stepFinishReason,
|
7415
7137
|
usage: combinedUsage,
|
7416
7138
|
providerMetadata: stepProviderMetadata,
|
7417
|
-
logprobs: stepLogProbs,
|
7418
7139
|
response: {
|
7419
7140
|
...stepResponse,
|
7420
7141
|
headers: response == null ? void 0 : response.headers
|
@@ -8035,17 +7756,32 @@ function defaultSettingsMiddleware({
|
|
8035
7756
|
return {
|
8036
7757
|
middlewareVersion: "v2",
|
8037
7758
|
transformParams: async ({ params }) => {
|
8038
|
-
var _a17;
|
7759
|
+
var _a17, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k;
|
8039
7760
|
return {
|
8040
7761
|
...settings,
|
8041
7762
|
...params,
|
7763
|
+
// map all values that are null to undefined
|
7764
|
+
maxOutputTokens: settings.maxOutputTokens !== null ? (_a17 = params.maxOutputTokens) != null ? _a17 : settings.maxOutputTokens : void 0,
|
7765
|
+
temperature: settings.temperature !== null ? (
|
7766
|
+
// temperature: special case 0 or null
|
7767
|
+
params.temperature === 0 || params.temperature == null ? (_b = settings.temperature) != null ? _b : params.temperature : params.temperature
|
7768
|
+
) : void 0,
|
7769
|
+
stopSequences: settings.stopSequences !== null ? (_c = params.stopSequences) != null ? _c : settings.stopSequences : void 0,
|
7770
|
+
topP: settings.topP !== null ? (_d = params.topP) != null ? _d : settings.topP : void 0,
|
7771
|
+
topK: settings.topK !== null ? (_e = params.topK) != null ? _e : settings.topK : void 0,
|
7772
|
+
presencePenalty: settings.presencePenalty !== null ? (_f = params.presencePenalty) != null ? _f : settings.presencePenalty : void 0,
|
7773
|
+
frequencyPenalty: settings.frequencyPenalty !== null ? (_g = params.frequencyPenalty) != null ? _g : settings.frequencyPenalty : void 0,
|
7774
|
+
responseFormat: settings.responseFormat !== null ? (_h = params.responseFormat) != null ? _h : settings.responseFormat : void 0,
|
7775
|
+
seed: settings.seed !== null ? (_i = params.seed) != null ? _i : settings.seed : void 0,
|
7776
|
+
tools: settings.tools !== null ? (_j = params.tools) != null ? _j : settings.tools : void 0,
|
7777
|
+
toolChoice: settings.toolChoice !== null ? (_k = params.toolChoice) != null ? _k : settings.toolChoice : void 0,
|
7778
|
+
// headers: deep merge
|
7779
|
+
headers: mergeObjects(settings.headers, params.headers),
|
7780
|
+
// provider options: deep merge
|
8042
7781
|
providerOptions: mergeObjects(
|
8043
7782
|
settings.providerOptions,
|
8044
7783
|
params.providerOptions
|
8045
|
-
)
|
8046
|
-
// special case for temperature 0
|
8047
|
-
// TODO remove when temperature defaults to undefined
|
8048
|
-
temperature: params.temperature === 0 || params.temperature == null ? (_a17 = settings.temperature) != null ? _a17 : 0 : params.temperature
|
7784
|
+
)
|
8049
7785
|
};
|
8050
7786
|
}
|
8051
7787
|
};
|
@@ -8201,7 +7937,6 @@ function simulateStreamingMiddleware() {
|
|
8201
7937
|
type: "finish",
|
8202
7938
|
finishReason: result.finishReason,
|
8203
7939
|
usage: result.usage,
|
8204
|
-
logprobs: result.logprobs,
|
8205
7940
|
providerMetadata: result.providerMetadata
|
8206
7941
|
});
|
8207
7942
|
controller.close();
|
@@ -8233,7 +7968,6 @@ var doWrap = ({
|
|
8233
7968
|
modelId,
|
8234
7969
|
providerId
|
8235
7970
|
}) => {
|
8236
|
-
var _a17;
|
8237
7971
|
async function doTransform({
|
8238
7972
|
params,
|
8239
7973
|
type
|
@@ -8244,10 +7978,10 @@ var doWrap = ({
|
|
8244
7978
|
specificationVersion: "v2",
|
8245
7979
|
provider: providerId != null ? providerId : model.provider,
|
8246
7980
|
modelId: modelId != null ? modelId : model.modelId,
|
8247
|
-
|
8248
|
-
|
8249
|
-
|
8250
|
-
|
7981
|
+
// TODO middleware should be able to modify the supported urls
|
7982
|
+
async getSupportedUrls() {
|
7983
|
+
return model.getSupportedUrls();
|
7984
|
+
},
|
8251
7985
|
async doGenerate(params) {
|
8252
7986
|
const transformedParams = await doTransform({ params, type: "generate" });
|
8253
7987
|
const doGenerate = async () => model.doGenerate(transformedParams);
|