ai 5.0.0-alpha.7 → 5.0.0-alpha.8
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +29 -0
- package/dist/index.d.mts +26 -22
- package/dist/index.d.ts +26 -22
- package/dist/index.js +510 -475
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +488 -449
- package/dist/index.mjs.map +1 -1
- package/dist/internal/index.d.mts +3 -2
- package/dist/internal/index.d.ts +3 -2
- package/dist/mcp-stdio/index.js.map +1 -1
- package/dist/mcp-stdio/index.mjs.map +1 -1
- package/package.json +6 -6
package/dist/index.js
CHANGED
@@ -26,6 +26,7 @@ __export(src_exports, {
|
|
26
26
|
DefaultChatTransport: () => DefaultChatTransport,
|
27
27
|
DownloadError: () => DownloadError,
|
28
28
|
EmptyResponseBodyError: () => import_provider16.EmptyResponseBodyError,
|
29
|
+
GLOBAL_DEFAULT_PROVIDER: () => GLOBAL_DEFAULT_PROVIDER,
|
29
30
|
InvalidArgumentError: () => InvalidArgumentError,
|
30
31
|
InvalidDataContentError: () => InvalidDataContentError,
|
31
32
|
InvalidMessageRoleError: () => InvalidMessageRoleError,
|
@@ -1476,14 +1477,14 @@ var ChatStore = class {
|
|
1476
1477
|
return this.chats.size;
|
1477
1478
|
}
|
1478
1479
|
getStatus(id) {
|
1479
|
-
return this.
|
1480
|
+
return this.getChat(id).status;
|
1480
1481
|
}
|
1481
1482
|
setStatus({
|
1482
1483
|
id,
|
1483
1484
|
status,
|
1484
1485
|
error
|
1485
1486
|
}) {
|
1486
|
-
const state = this.
|
1487
|
+
const state = this.getChat(id);
|
1487
1488
|
if (state.status === status)
|
1488
1489
|
return;
|
1489
1490
|
state.setStatus(status);
|
@@ -1491,13 +1492,13 @@ var ChatStore = class {
|
|
1491
1492
|
this.emit({ type: "chat-status-changed", chatId: id, error });
|
1492
1493
|
}
|
1493
1494
|
getError(id) {
|
1494
|
-
return this.
|
1495
|
+
return this.getChat(id).error;
|
1495
1496
|
}
|
1496
1497
|
getMessages(id) {
|
1497
|
-
return this.
|
1498
|
+
return this.getChat(id).messages;
|
1498
1499
|
}
|
1499
1500
|
getLastMessage(id) {
|
1500
|
-
const chat = this.
|
1501
|
+
const chat = this.getChat(id);
|
1501
1502
|
return chat.messages[chat.messages.length - 1];
|
1502
1503
|
}
|
1503
1504
|
subscribe(subscriber) {
|
@@ -1508,11 +1509,11 @@ var ChatStore = class {
|
|
1508
1509
|
id,
|
1509
1510
|
messages
|
1510
1511
|
}) {
|
1511
|
-
this.
|
1512
|
+
this.getChat(id).setMessages(messages);
|
1512
1513
|
this.emit({ type: "chat-messages-changed", chatId: id });
|
1513
1514
|
}
|
1514
1515
|
removeAssistantResponse(id) {
|
1515
|
-
const chat = this.
|
1516
|
+
const chat = this.getChat(id);
|
1516
1517
|
const lastMessage = chat.messages[chat.messages.length - 1];
|
1517
1518
|
if (lastMessage == null) {
|
1518
1519
|
throw new Error("Cannot remove assistant response from empty chat");
|
@@ -1533,8 +1534,8 @@ var ChatStore = class {
|
|
1533
1534
|
onFinish
|
1534
1535
|
}) {
|
1535
1536
|
var _a17;
|
1536
|
-
const
|
1537
|
-
|
1537
|
+
const chat = this.getChat(chatId);
|
1538
|
+
chat.pushMessage({ ...message, id: (_a17 = message.id) != null ? _a17 : this.generateId() });
|
1538
1539
|
this.emit({
|
1539
1540
|
type: "chat-messages-changed",
|
1540
1541
|
chatId
|
@@ -1557,7 +1558,7 @@ var ChatStore = class {
|
|
1557
1558
|
onToolCall,
|
1558
1559
|
onFinish
|
1559
1560
|
}) {
|
1560
|
-
const chat = this.
|
1561
|
+
const chat = this.getChat(chatId);
|
1561
1562
|
if (chat.messages[chat.messages.length - 1].role === "assistant") {
|
1562
1563
|
chat.popMessage();
|
1563
1564
|
this.emit({
|
@@ -1601,7 +1602,7 @@ var ChatStore = class {
|
|
1601
1602
|
toolCallId,
|
1602
1603
|
result
|
1603
1604
|
}) {
|
1604
|
-
const chat = this.
|
1605
|
+
const chat = this.getChat(chatId);
|
1605
1606
|
chat.jobExecutor.run(async () => {
|
1606
1607
|
updateToolCallResult({
|
1607
1608
|
messages: chat.messages,
|
@@ -1626,7 +1627,7 @@ var ChatStore = class {
|
|
1626
1627
|
}
|
1627
1628
|
async stopStream({ chatId }) {
|
1628
1629
|
var _a17;
|
1629
|
-
const chat = this.
|
1630
|
+
const chat = this.getChat(chatId);
|
1630
1631
|
if (chat.status !== "streaming" && chat.status !== "submitted")
|
1631
1632
|
return;
|
1632
1633
|
if ((_a17 = chat.activeResponse) == null ? void 0 : _a17.abortController) {
|
@@ -1639,7 +1640,7 @@ var ChatStore = class {
|
|
1639
1640
|
subscriber.onChatChanged(event);
|
1640
1641
|
}
|
1641
1642
|
}
|
1642
|
-
|
1643
|
+
getChat(id) {
|
1643
1644
|
if (!this.hasChat(id)) {
|
1644
1645
|
this.addChat(id, []);
|
1645
1646
|
}
|
@@ -1654,7 +1655,7 @@ var ChatStore = class {
|
|
1654
1655
|
onToolCall,
|
1655
1656
|
onFinish
|
1656
1657
|
}) {
|
1657
|
-
const chat = this.
|
1658
|
+
const chat = this.getChat(chatId);
|
1658
1659
|
this.setStatus({ id: chatId, status: "submitted", error: void 0 });
|
1659
1660
|
const messageCount = chat.messages.length;
|
1660
1661
|
const lastMessage = chat.messages[chat.messages.length - 1];
|
@@ -3368,7 +3369,7 @@ async function invokeModelMaxImagesPerCall(model) {
|
|
3368
3369
|
}
|
3369
3370
|
|
3370
3371
|
// core/generate-object/generate-object.ts
|
3371
|
-
var
|
3372
|
+
var import_provider22 = require("@ai-sdk/provider");
|
3372
3373
|
var import_provider_utils15 = require("@ai-sdk/provider-utils");
|
3373
3374
|
|
3374
3375
|
// core/generate-text/extract-content-text.ts
|
@@ -3807,6 +3808,19 @@ function prepareCallSettings({
|
|
3807
3808
|
};
|
3808
3809
|
}
|
3809
3810
|
|
3811
|
+
// core/prompt/resolve-language-model.ts
|
3812
|
+
var import_gateway = require("@ai-sdk/gateway");
|
3813
|
+
var GLOBAL_DEFAULT_PROVIDER = Symbol(
|
3814
|
+
"vercel.ai.global.defaultProvider"
|
3815
|
+
);
|
3816
|
+
function resolveLanguageModel(model) {
|
3817
|
+
if (typeof model !== "string") {
|
3818
|
+
return model;
|
3819
|
+
}
|
3820
|
+
const globalProvider = globalThis[GLOBAL_DEFAULT_PROVIDER];
|
3821
|
+
return (globalProvider != null ? globalProvider : import_gateway.gateway).languageModel(model);
|
3822
|
+
}
|
3823
|
+
|
3810
3824
|
// core/prompt/standardize-prompt.ts
|
3811
3825
|
var import_provider19 = require("@ai-sdk/provider");
|
3812
3826
|
var import_provider_utils13 = require("@ai-sdk/provider-utils");
|
@@ -3998,6 +4012,20 @@ async function standardizePrompt(prompt) {
|
|
3998
4012
|
};
|
3999
4013
|
}
|
4000
4014
|
|
4015
|
+
// core/prompt/wrap-gateway-error.ts
|
4016
|
+
var import_gateway2 = require("@ai-sdk/gateway");
|
4017
|
+
var import_provider20 = require("@ai-sdk/provider");
|
4018
|
+
function wrapGatewayError(error) {
|
4019
|
+
if (import_gateway2.GatewayAuthenticationError.isInstance(error) || import_gateway2.GatewayModelNotFoundError.isInstance(error)) {
|
4020
|
+
return new import_provider20.AISDKError({
|
4021
|
+
name: "GatewayError",
|
4022
|
+
message: "Vercel AI Gateway access failed. If you want to use AI SDK providers directly, use the providers, e.g. @ai-sdk/openai, or register a different global default provider.",
|
4023
|
+
cause: error
|
4024
|
+
});
|
4025
|
+
}
|
4026
|
+
return error;
|
4027
|
+
}
|
4028
|
+
|
4001
4029
|
// core/telemetry/stringify-for-telemetry.ts
|
4002
4030
|
function stringifyForTelemetry(prompt) {
|
4003
4031
|
return JSON.stringify(
|
@@ -4014,7 +4042,7 @@ function stringifyForTelemetry(prompt) {
|
|
4014
4042
|
}
|
4015
4043
|
|
4016
4044
|
// core/generate-object/output-strategy.ts
|
4017
|
-
var
|
4045
|
+
var import_provider21 = require("@ai-sdk/provider");
|
4018
4046
|
var import_provider_utils14 = require("@ai-sdk/provider-utils");
|
4019
4047
|
|
4020
4048
|
// src/util/async-iterable-stream.ts
|
@@ -4052,7 +4080,7 @@ var noSchemaOutputStrategy = {
|
|
4052
4080
|
} : { success: true, value };
|
4053
4081
|
},
|
4054
4082
|
createElementStream() {
|
4055
|
-
throw new
|
4083
|
+
throw new import_provider21.UnsupportedFunctionalityError({
|
4056
4084
|
functionality: "element streams in no-schema mode"
|
4057
4085
|
});
|
4058
4086
|
}
|
@@ -4074,7 +4102,7 @@ var objectOutputStrategy = (schema) => ({
|
|
4074
4102
|
return (0, import_provider_utils14.safeValidateTypes)({ value, schema });
|
4075
4103
|
},
|
4076
4104
|
createElementStream() {
|
4077
|
-
throw new
|
4105
|
+
throw new import_provider21.UnsupportedFunctionalityError({
|
4078
4106
|
functionality: "element streams in object mode"
|
4079
4107
|
});
|
4080
4108
|
}
|
@@ -4102,10 +4130,10 @@ var arrayOutputStrategy = (schema) => {
|
|
4102
4130
|
isFinalDelta
|
4103
4131
|
}) {
|
4104
4132
|
var _a17;
|
4105
|
-
if (!(0,
|
4133
|
+
if (!(0, import_provider21.isJSONObject)(value) || !(0, import_provider21.isJSONArray)(value.elements)) {
|
4106
4134
|
return {
|
4107
4135
|
success: false,
|
4108
|
-
error: new
|
4136
|
+
error: new import_provider21.TypeValidationError({
|
4109
4137
|
value,
|
4110
4138
|
cause: "value must be an object that contains an array of elements"
|
4111
4139
|
})
|
@@ -4145,10 +4173,10 @@ var arrayOutputStrategy = (schema) => {
|
|
4145
4173
|
};
|
4146
4174
|
},
|
4147
4175
|
async validateFinalResult(value) {
|
4148
|
-
if (!(0,
|
4176
|
+
if (!(0, import_provider21.isJSONObject)(value) || !(0, import_provider21.isJSONArray)(value.elements)) {
|
4149
4177
|
return {
|
4150
4178
|
success: false,
|
4151
|
-
error: new
|
4179
|
+
error: new import_provider21.TypeValidationError({
|
4152
4180
|
value,
|
4153
4181
|
cause: "value must be an object that contains an array of elements"
|
4154
4182
|
})
|
@@ -4211,10 +4239,10 @@ var enumOutputStrategy = (enumValues) => {
|
|
4211
4239
|
additionalProperties: false
|
4212
4240
|
},
|
4213
4241
|
async validateFinalResult(value) {
|
4214
|
-
if (!(0,
|
4242
|
+
if (!(0, import_provider21.isJSONObject)(value) || typeof value.result !== "string") {
|
4215
4243
|
return {
|
4216
4244
|
success: false,
|
4217
|
-
error: new
|
4245
|
+
error: new import_provider21.TypeValidationError({
|
4218
4246
|
value,
|
4219
4247
|
cause: 'value must be an object that contains a string in the "result" property.'
|
4220
4248
|
})
|
@@ -4223,17 +4251,17 @@ var enumOutputStrategy = (enumValues) => {
|
|
4223
4251
|
const result = value.result;
|
4224
4252
|
return enumValues.includes(result) ? { success: true, value: result } : {
|
4225
4253
|
success: false,
|
4226
|
-
error: new
|
4254
|
+
error: new import_provider21.TypeValidationError({
|
4227
4255
|
value,
|
4228
4256
|
cause: "value must be a string in the enum"
|
4229
4257
|
})
|
4230
4258
|
};
|
4231
4259
|
},
|
4232
4260
|
async validatePartialResult({ value, textDelta }) {
|
4233
|
-
if (!(0,
|
4261
|
+
if (!(0, import_provider21.isJSONObject)(value) || typeof value.result !== "string") {
|
4234
4262
|
return {
|
4235
4263
|
success: false,
|
4236
|
-
error: new
|
4264
|
+
error: new import_provider21.TypeValidationError({
|
4237
4265
|
value,
|
4238
4266
|
cause: 'value must be an object that contains a string in the "result" property.'
|
4239
4267
|
})
|
@@ -4246,7 +4274,7 @@ var enumOutputStrategy = (enumValues) => {
|
|
4246
4274
|
if (value.result.length === 0 || possibleEnumValues.length === 0) {
|
4247
4275
|
return {
|
4248
4276
|
success: false,
|
4249
|
-
error: new
|
4277
|
+
error: new import_provider21.TypeValidationError({
|
4250
4278
|
value,
|
4251
4279
|
cause: "value must be a string in the enum"
|
4252
4280
|
})
|
@@ -4261,7 +4289,7 @@ var enumOutputStrategy = (enumValues) => {
|
|
4261
4289
|
};
|
4262
4290
|
},
|
4263
4291
|
createElementStream() {
|
4264
|
-
throw new
|
4292
|
+
throw new import_provider21.UnsupportedFunctionalityError({
|
4265
4293
|
functionality: "element streams in enum mode"
|
4266
4294
|
});
|
4267
4295
|
}
|
@@ -4406,12 +4434,6 @@ function validateObjectGenerationInput({
|
|
4406
4434
|
}
|
4407
4435
|
}
|
4408
4436
|
|
4409
|
-
// core/prompt/resolve-language-model.ts
|
4410
|
-
var import_gateway = require("@ai-sdk/gateway");
|
4411
|
-
function resolveLanguageModel(model) {
|
4412
|
-
return typeof model === "string" ? import_gateway.gateway.languageModel(model) : model;
|
4413
|
-
}
|
4414
|
-
|
4415
4437
|
// core/generate-object/generate-object.ts
|
4416
4438
|
var originalGenerateId = (0, import_provider_utils15.createIdGenerator)({ prefix: "aiobj", size: 24 });
|
4417
4439
|
async function generateObject(options) {
|
@@ -4461,208 +4483,212 @@ async function generateObject(options) {
|
|
4461
4483
|
settings: { ...callSettings, maxRetries }
|
4462
4484
|
});
|
4463
4485
|
const tracer = getTracer(telemetry);
|
4464
|
-
|
4465
|
-
|
4466
|
-
|
4467
|
-
|
4468
|
-
|
4469
|
-
|
4470
|
-
|
4471
|
-
|
4472
|
-
|
4473
|
-
...baseTelemetryAttributes,
|
4474
|
-
// specific settings that only make sense on the outer level:
|
4475
|
-
"ai.prompt": {
|
4476
|
-
input: () => JSON.stringify({ system, prompt, messages })
|
4477
|
-
},
|
4478
|
-
"ai.schema": outputStrategy.jsonSchema != null ? { input: () => JSON.stringify(outputStrategy.jsonSchema) } : void 0,
|
4479
|
-
"ai.schema.name": schemaName,
|
4480
|
-
"ai.schema.description": schemaDescription,
|
4481
|
-
"ai.settings.output": outputStrategy.type
|
4482
|
-
}
|
4483
|
-
}),
|
4484
|
-
tracer,
|
4485
|
-
fn: async (span) => {
|
4486
|
-
var _a17;
|
4487
|
-
let result;
|
4488
|
-
let finishReason;
|
4489
|
-
let usage;
|
4490
|
-
let warnings;
|
4491
|
-
let response;
|
4492
|
-
let request;
|
4493
|
-
let resultProviderMetadata;
|
4494
|
-
const standardizedPrompt = await standardizePrompt({
|
4495
|
-
system,
|
4496
|
-
prompt,
|
4497
|
-
messages
|
4498
|
-
});
|
4499
|
-
const promptMessages = await convertToLanguageModelPrompt({
|
4500
|
-
prompt: standardizedPrompt,
|
4501
|
-
supportedUrls: await model.supportedUrls
|
4502
|
-
});
|
4503
|
-
const generateResult = await retry(
|
4504
|
-
() => recordSpan({
|
4505
|
-
name: "ai.generateObject.doGenerate",
|
4506
|
-
attributes: selectTelemetryAttributes({
|
4507
|
-
telemetry,
|
4508
|
-
attributes: {
|
4509
|
-
...assembleOperationName({
|
4510
|
-
operationId: "ai.generateObject.doGenerate",
|
4511
|
-
telemetry
|
4512
|
-
}),
|
4513
|
-
...baseTelemetryAttributes,
|
4514
|
-
"ai.prompt.messages": {
|
4515
|
-
input: () => stringifyForTelemetry(promptMessages)
|
4516
|
-
},
|
4517
|
-
// standardized gen-ai llm span attributes:
|
4518
|
-
"gen_ai.system": model.provider,
|
4519
|
-
"gen_ai.request.model": model.modelId,
|
4520
|
-
"gen_ai.request.frequency_penalty": callSettings.frequencyPenalty,
|
4521
|
-
"gen_ai.request.max_tokens": callSettings.maxOutputTokens,
|
4522
|
-
"gen_ai.request.presence_penalty": callSettings.presencePenalty,
|
4523
|
-
"gen_ai.request.temperature": callSettings.temperature,
|
4524
|
-
"gen_ai.request.top_k": callSettings.topK,
|
4525
|
-
"gen_ai.request.top_p": callSettings.topP
|
4526
|
-
}
|
4486
|
+
try {
|
4487
|
+
return await recordSpan({
|
4488
|
+
name: "ai.generateObject",
|
4489
|
+
attributes: selectTelemetryAttributes({
|
4490
|
+
telemetry,
|
4491
|
+
attributes: {
|
4492
|
+
...assembleOperationName({
|
4493
|
+
operationId: "ai.generateObject",
|
4494
|
+
telemetry
|
4527
4495
|
}),
|
4528
|
-
|
4529
|
-
|
4530
|
-
|
4531
|
-
|
4532
|
-
|
4533
|
-
|
4534
|
-
|
4535
|
-
|
4536
|
-
|
4537
|
-
|
4538
|
-
|
4539
|
-
|
4540
|
-
|
4541
|
-
|
4542
|
-
|
4543
|
-
|
4544
|
-
|
4545
|
-
|
4546
|
-
|
4547
|
-
|
4548
|
-
|
4549
|
-
|
4550
|
-
|
4551
|
-
|
4552
|
-
|
4553
|
-
|
4554
|
-
|
4555
|
-
|
4556
|
-
|
4557
|
-
|
4496
|
+
...baseTelemetryAttributes,
|
4497
|
+
// specific settings that only make sense on the outer level:
|
4498
|
+
"ai.prompt": {
|
4499
|
+
input: () => JSON.stringify({ system, prompt, messages })
|
4500
|
+
},
|
4501
|
+
"ai.schema": outputStrategy.jsonSchema != null ? { input: () => JSON.stringify(outputStrategy.jsonSchema) } : void 0,
|
4502
|
+
"ai.schema.name": schemaName,
|
4503
|
+
"ai.schema.description": schemaDescription,
|
4504
|
+
"ai.settings.output": outputStrategy.type
|
4505
|
+
}
|
4506
|
+
}),
|
4507
|
+
tracer,
|
4508
|
+
fn: async (span) => {
|
4509
|
+
var _a17;
|
4510
|
+
let result;
|
4511
|
+
let finishReason;
|
4512
|
+
let usage;
|
4513
|
+
let warnings;
|
4514
|
+
let response;
|
4515
|
+
let request;
|
4516
|
+
let resultProviderMetadata;
|
4517
|
+
const standardizedPrompt = await standardizePrompt({
|
4518
|
+
system,
|
4519
|
+
prompt,
|
4520
|
+
messages
|
4521
|
+
});
|
4522
|
+
const promptMessages = await convertToLanguageModelPrompt({
|
4523
|
+
prompt: standardizedPrompt,
|
4524
|
+
supportedUrls: await model.supportedUrls
|
4525
|
+
});
|
4526
|
+
const generateResult = await retry(
|
4527
|
+
() => recordSpan({
|
4528
|
+
name: "ai.generateObject.doGenerate",
|
4529
|
+
attributes: selectTelemetryAttributes({
|
4530
|
+
telemetry,
|
4531
|
+
attributes: {
|
4532
|
+
...assembleOperationName({
|
4533
|
+
operationId: "ai.generateObject.doGenerate",
|
4534
|
+
telemetry
|
4535
|
+
}),
|
4536
|
+
...baseTelemetryAttributes,
|
4537
|
+
"ai.prompt.messages": {
|
4538
|
+
input: () => stringifyForTelemetry(promptMessages)
|
4539
|
+
},
|
4540
|
+
// standardized gen-ai llm span attributes:
|
4541
|
+
"gen_ai.system": model.provider,
|
4542
|
+
"gen_ai.request.model": model.modelId,
|
4543
|
+
"gen_ai.request.frequency_penalty": callSettings.frequencyPenalty,
|
4544
|
+
"gen_ai.request.max_tokens": callSettings.maxOutputTokens,
|
4545
|
+
"gen_ai.request.presence_penalty": callSettings.presencePenalty,
|
4546
|
+
"gen_ai.request.temperature": callSettings.temperature,
|
4547
|
+
"gen_ai.request.top_k": callSettings.topK,
|
4548
|
+
"gen_ai.request.top_p": callSettings.topP
|
4549
|
+
}
|
4550
|
+
}),
|
4551
|
+
tracer,
|
4552
|
+
fn: async (span2) => {
|
4553
|
+
var _a18, _b, _c, _d, _e, _f, _g, _h;
|
4554
|
+
const result2 = await model.doGenerate({
|
4555
|
+
responseFormat: {
|
4556
|
+
type: "json",
|
4557
|
+
schema: outputStrategy.jsonSchema,
|
4558
|
+
name: schemaName,
|
4559
|
+
description: schemaDescription
|
4560
|
+
},
|
4561
|
+
...prepareCallSettings(settings),
|
4562
|
+
prompt: promptMessages,
|
4563
|
+
providerOptions,
|
4564
|
+
abortSignal,
|
4565
|
+
headers
|
4558
4566
|
});
|
4567
|
+
const responseData = {
|
4568
|
+
id: (_b = (_a18 = result2.response) == null ? void 0 : _a18.id) != null ? _b : generateId3(),
|
4569
|
+
timestamp: (_d = (_c = result2.response) == null ? void 0 : _c.timestamp) != null ? _d : currentDate(),
|
4570
|
+
modelId: (_f = (_e = result2.response) == null ? void 0 : _e.modelId) != null ? _f : model.modelId,
|
4571
|
+
headers: (_g = result2.response) == null ? void 0 : _g.headers,
|
4572
|
+
body: (_h = result2.response) == null ? void 0 : _h.body
|
4573
|
+
};
|
4574
|
+
const text2 = extractContentText(result2.content);
|
4575
|
+
if (text2 === void 0) {
|
4576
|
+
throw new NoObjectGeneratedError({
|
4577
|
+
message: "No object generated: the model did not return a response.",
|
4578
|
+
response: responseData,
|
4579
|
+
usage: result2.usage,
|
4580
|
+
finishReason: result2.finishReason
|
4581
|
+
});
|
4582
|
+
}
|
4583
|
+
span2.setAttributes(
|
4584
|
+
selectTelemetryAttributes({
|
4585
|
+
telemetry,
|
4586
|
+
attributes: {
|
4587
|
+
"ai.response.finishReason": result2.finishReason,
|
4588
|
+
"ai.response.object": { output: () => text2 },
|
4589
|
+
"ai.response.id": responseData.id,
|
4590
|
+
"ai.response.model": responseData.modelId,
|
4591
|
+
"ai.response.timestamp": responseData.timestamp.toISOString(),
|
4592
|
+
// TODO rename telemetry attributes to inputTokens and outputTokens
|
4593
|
+
"ai.usage.promptTokens": result2.usage.inputTokens,
|
4594
|
+
"ai.usage.completionTokens": result2.usage.outputTokens,
|
4595
|
+
// standardized gen-ai llm span attributes:
|
4596
|
+
"gen_ai.response.finish_reasons": [result2.finishReason],
|
4597
|
+
"gen_ai.response.id": responseData.id,
|
4598
|
+
"gen_ai.response.model": responseData.modelId,
|
4599
|
+
"gen_ai.usage.input_tokens": result2.usage.inputTokens,
|
4600
|
+
"gen_ai.usage.output_tokens": result2.usage.outputTokens
|
4601
|
+
}
|
4602
|
+
})
|
4603
|
+
);
|
4604
|
+
return { ...result2, objectText: text2, responseData };
|
4559
4605
|
}
|
4560
|
-
|
4561
|
-
|
4562
|
-
|
4563
|
-
|
4564
|
-
|
4565
|
-
|
4566
|
-
|
4567
|
-
|
4568
|
-
|
4569
|
-
|
4570
|
-
|
4571
|
-
|
4572
|
-
|
4573
|
-
|
4574
|
-
|
4575
|
-
|
4576
|
-
|
4577
|
-
|
4578
|
-
|
4579
|
-
|
4580
|
-
);
|
4581
|
-
return { ...result2, objectText: text2, responseData };
|
4606
|
+
})
|
4607
|
+
);
|
4608
|
+
result = generateResult.objectText;
|
4609
|
+
finishReason = generateResult.finishReason;
|
4610
|
+
usage = generateResult.usage;
|
4611
|
+
warnings = generateResult.warnings;
|
4612
|
+
resultProviderMetadata = generateResult.providerMetadata;
|
4613
|
+
request = (_a17 = generateResult.request) != null ? _a17 : {};
|
4614
|
+
response = generateResult.responseData;
|
4615
|
+
async function processResult(result2) {
|
4616
|
+
const parseResult = await (0, import_provider_utils15.safeParseJSON)({ text: result2 });
|
4617
|
+
if (!parseResult.success) {
|
4618
|
+
throw new NoObjectGeneratedError({
|
4619
|
+
message: "No object generated: could not parse the response.",
|
4620
|
+
cause: parseResult.error,
|
4621
|
+
text: result2,
|
4622
|
+
response,
|
4623
|
+
usage,
|
4624
|
+
finishReason
|
4625
|
+
});
|
4582
4626
|
}
|
4583
|
-
|
4584
|
-
|
4585
|
-
|
4586
|
-
|
4587
|
-
|
4588
|
-
|
4589
|
-
|
4590
|
-
|
4591
|
-
|
4592
|
-
|
4593
|
-
|
4594
|
-
|
4595
|
-
|
4596
|
-
|
4597
|
-
|
4598
|
-
|
4599
|
-
|
4600
|
-
usage,
|
4601
|
-
finishReason
|
4602
|
-
});
|
4603
|
-
}
|
4604
|
-
const validationResult = await outputStrategy.validateFinalResult(
|
4605
|
-
parseResult.value,
|
4606
|
-
{
|
4607
|
-
text: result2,
|
4608
|
-
response,
|
4609
|
-
usage
|
4627
|
+
const validationResult = await outputStrategy.validateFinalResult(
|
4628
|
+
parseResult.value,
|
4629
|
+
{
|
4630
|
+
text: result2,
|
4631
|
+
response,
|
4632
|
+
usage
|
4633
|
+
}
|
4634
|
+
);
|
4635
|
+
if (!validationResult.success) {
|
4636
|
+
throw new NoObjectGeneratedError({
|
4637
|
+
message: "No object generated: response did not match schema.",
|
4638
|
+
cause: validationResult.error,
|
4639
|
+
text: result2,
|
4640
|
+
response,
|
4641
|
+
usage,
|
4642
|
+
finishReason
|
4643
|
+
});
|
4610
4644
|
}
|
4611
|
-
|
4612
|
-
if (!validationResult.success) {
|
4613
|
-
throw new NoObjectGeneratedError({
|
4614
|
-
message: "No object generated: response did not match schema.",
|
4615
|
-
cause: validationResult.error,
|
4616
|
-
text: result2,
|
4617
|
-
response,
|
4618
|
-
usage,
|
4619
|
-
finishReason
|
4620
|
-
});
|
4645
|
+
return validationResult.value;
|
4621
4646
|
}
|
4622
|
-
|
4623
|
-
|
4624
|
-
|
4625
|
-
|
4626
|
-
|
4627
|
-
|
4628
|
-
|
4629
|
-
|
4630
|
-
|
4631
|
-
|
4632
|
-
|
4633
|
-
|
4647
|
+
let object2;
|
4648
|
+
try {
|
4649
|
+
object2 = await processResult(result);
|
4650
|
+
} catch (error) {
|
4651
|
+
if (repairText != null && NoObjectGeneratedError.isInstance(error) && (import_provider22.JSONParseError.isInstance(error.cause) || import_provider22.TypeValidationError.isInstance(error.cause))) {
|
4652
|
+
const repairedText = await repairText({
|
4653
|
+
text: result,
|
4654
|
+
error: error.cause
|
4655
|
+
});
|
4656
|
+
if (repairedText === null) {
|
4657
|
+
throw error;
|
4658
|
+
}
|
4659
|
+
object2 = await processResult(repairedText);
|
4660
|
+
} else {
|
4634
4661
|
throw error;
|
4635
4662
|
}
|
4636
|
-
object2 = await processResult(repairedText);
|
4637
|
-
} else {
|
4638
|
-
throw error;
|
4639
4663
|
}
|
4664
|
+
span.setAttributes(
|
4665
|
+
selectTelemetryAttributes({
|
4666
|
+
telemetry,
|
4667
|
+
attributes: {
|
4668
|
+
"ai.response.finishReason": finishReason,
|
4669
|
+
"ai.response.object": {
|
4670
|
+
output: () => JSON.stringify(object2)
|
4671
|
+
},
|
4672
|
+
// TODO rename telemetry attributes to inputTokens and outputTokens
|
4673
|
+
"ai.usage.promptTokens": usage.inputTokens,
|
4674
|
+
"ai.usage.completionTokens": usage.outputTokens
|
4675
|
+
}
|
4676
|
+
})
|
4677
|
+
);
|
4678
|
+
return new DefaultGenerateObjectResult({
|
4679
|
+
object: object2,
|
4680
|
+
finishReason,
|
4681
|
+
usage,
|
4682
|
+
warnings,
|
4683
|
+
request,
|
4684
|
+
response,
|
4685
|
+
providerMetadata: resultProviderMetadata
|
4686
|
+
});
|
4640
4687
|
}
|
4641
|
-
|
4642
|
-
|
4643
|
-
|
4644
|
-
|
4645
|
-
"ai.response.finishReason": finishReason,
|
4646
|
-
"ai.response.object": {
|
4647
|
-
output: () => JSON.stringify(object2)
|
4648
|
-
},
|
4649
|
-
// TODO rename telemetry attributes to inputTokens and outputTokens
|
4650
|
-
"ai.usage.promptTokens": usage.inputTokens,
|
4651
|
-
"ai.usage.completionTokens": usage.outputTokens
|
4652
|
-
}
|
4653
|
-
})
|
4654
|
-
);
|
4655
|
-
return new DefaultGenerateObjectResult({
|
4656
|
-
object: object2,
|
4657
|
-
finishReason,
|
4658
|
-
usage,
|
4659
|
-
warnings,
|
4660
|
-
request,
|
4661
|
-
response,
|
4662
|
-
providerMetadata: resultProviderMetadata
|
4663
|
-
});
|
4664
|
-
}
|
4665
|
-
});
|
4688
|
+
});
|
4689
|
+
} catch (error) {
|
4690
|
+
throw wrapGatewayError(error);
|
4691
|
+
}
|
4666
4692
|
}
|
4667
4693
|
var DefaultGenerateObjectResult = class {
|
4668
4694
|
constructor(options) {
|
@@ -4843,7 +4869,9 @@ function streamObject(options) {
|
|
4843
4869
|
headers,
|
4844
4870
|
experimental_telemetry: telemetry,
|
4845
4871
|
providerOptions,
|
4846
|
-
onError
|
4872
|
+
onError = ({ error }) => {
|
4873
|
+
console.error(error);
|
4874
|
+
},
|
4847
4875
|
onFinish,
|
4848
4876
|
_internal: {
|
4849
4877
|
generateId: generateId3 = originalGenerateId2,
|
@@ -4936,7 +4964,7 @@ var DefaultStreamObjectResult = class {
|
|
4936
4964
|
transform(chunk, controller) {
|
4937
4965
|
controller.enqueue(chunk);
|
4938
4966
|
if (chunk.type === "error") {
|
4939
|
-
onError
|
4967
|
+
onError({ error: wrapGatewayError(chunk.error) });
|
4940
4968
|
}
|
4941
4969
|
}
|
4942
4970
|
});
|
@@ -5336,8 +5364,8 @@ var DefaultStreamObjectResult = class {
|
|
5336
5364
|
};
|
5337
5365
|
|
5338
5366
|
// src/error/no-speech-generated-error.ts
|
5339
|
-
var
|
5340
|
-
var NoSpeechGeneratedError = class extends
|
5367
|
+
var import_provider23 = require("@ai-sdk/provider");
|
5368
|
+
var NoSpeechGeneratedError = class extends import_provider23.AISDKError {
|
5341
5369
|
constructor(options) {
|
5342
5370
|
super({
|
5343
5371
|
name: "AI_NoSpeechGeneratedError",
|
@@ -5773,239 +5801,243 @@ async function generateText({
|
|
5773
5801
|
messages
|
5774
5802
|
});
|
5775
5803
|
const tracer = getTracer(telemetry);
|
5776
|
-
|
5777
|
-
|
5778
|
-
|
5779
|
-
|
5780
|
-
|
5781
|
-
|
5782
|
-
|
5783
|
-
|
5784
|
-
|
5785
|
-
|
5786
|
-
|
5787
|
-
|
5788
|
-
|
5789
|
-
|
5790
|
-
|
5791
|
-
|
5792
|
-
|
5793
|
-
}
|
5794
|
-
}),
|
5795
|
-
tracer,
|
5796
|
-
fn: async (span) => {
|
5797
|
-
var _a17, _b, _c, _d, _e;
|
5798
|
-
const callSettings2 = prepareCallSettings(settings);
|
5799
|
-
let currentModelResponse;
|
5800
|
-
let currentToolCalls = [];
|
5801
|
-
let currentToolResults = [];
|
5802
|
-
const responseMessages = [];
|
5803
|
-
const steps = [];
|
5804
|
-
do {
|
5805
|
-
const stepInputMessages = [
|
5806
|
-
...initialPrompt.messages,
|
5807
|
-
...responseMessages
|
5808
|
-
];
|
5809
|
-
const prepareStepResult = await (prepareStep == null ? void 0 : prepareStep({
|
5810
|
-
model,
|
5811
|
-
steps,
|
5812
|
-
stepNumber: steps.length
|
5813
|
-
}));
|
5814
|
-
const promptMessages = await convertToLanguageModelPrompt({
|
5815
|
-
prompt: {
|
5816
|
-
system: (_a17 = prepareStepResult == null ? void 0 : prepareStepResult.system) != null ? _a17 : initialPrompt.system,
|
5817
|
-
messages: stepInputMessages
|
5818
|
-
},
|
5819
|
-
supportedUrls: await model.supportedUrls
|
5820
|
-
});
|
5821
|
-
const stepModel = resolveLanguageModel(
|
5822
|
-
(_b = prepareStepResult == null ? void 0 : prepareStepResult.model) != null ? _b : model
|
5823
|
-
);
|
5824
|
-
const { toolChoice: stepToolChoice, tools: stepTools } = prepareToolsAndToolChoice({
|
5825
|
-
tools,
|
5826
|
-
toolChoice: (_c = prepareStepResult == null ? void 0 : prepareStepResult.toolChoice) != null ? _c : toolChoice,
|
5827
|
-
activeTools: (_d = prepareStepResult == null ? void 0 : prepareStepResult.activeTools) != null ? _d : activeTools
|
5828
|
-
});
|
5829
|
-
currentModelResponse = await retry(
|
5830
|
-
() => {
|
5831
|
-
var _a18;
|
5832
|
-
return recordSpan({
|
5833
|
-
name: "ai.generateText.doGenerate",
|
5834
|
-
attributes: selectTelemetryAttributes({
|
5835
|
-
telemetry,
|
5836
|
-
attributes: {
|
5837
|
-
...assembleOperationName({
|
5838
|
-
operationId: "ai.generateText.doGenerate",
|
5839
|
-
telemetry
|
5840
|
-
}),
|
5841
|
-
...baseTelemetryAttributes,
|
5842
|
-
// model:
|
5843
|
-
"ai.model.provider": stepModel.provider,
|
5844
|
-
"ai.model.id": stepModel.modelId,
|
5845
|
-
// prompt:
|
5846
|
-
"ai.prompt.messages": {
|
5847
|
-
input: () => stringifyForTelemetry(promptMessages)
|
5848
|
-
},
|
5849
|
-
"ai.prompt.tools": {
|
5850
|
-
// convert the language model level tools:
|
5851
|
-
input: () => stepTools == null ? void 0 : stepTools.map((tool2) => JSON.stringify(tool2))
|
5852
|
-
},
|
5853
|
-
"ai.prompt.toolChoice": {
|
5854
|
-
input: () => stepToolChoice != null ? JSON.stringify(stepToolChoice) : void 0
|
5855
|
-
},
|
5856
|
-
// standardized gen-ai llm span attributes:
|
5857
|
-
"gen_ai.system": stepModel.provider,
|
5858
|
-
"gen_ai.request.model": stepModel.modelId,
|
5859
|
-
"gen_ai.request.frequency_penalty": settings.frequencyPenalty,
|
5860
|
-
"gen_ai.request.max_tokens": settings.maxOutputTokens,
|
5861
|
-
"gen_ai.request.presence_penalty": settings.presencePenalty,
|
5862
|
-
"gen_ai.request.stop_sequences": settings.stopSequences,
|
5863
|
-
"gen_ai.request.temperature": (_a18 = settings.temperature) != null ? _a18 : void 0,
|
5864
|
-
"gen_ai.request.top_k": settings.topK,
|
5865
|
-
"gen_ai.request.top_p": settings.topP
|
5866
|
-
}
|
5867
|
-
}),
|
5868
|
-
tracer,
|
5869
|
-
fn: async (span2) => {
|
5870
|
-
var _a19, _b2, _c2, _d2, _e2, _f, _g, _h;
|
5871
|
-
const result = await stepModel.doGenerate({
|
5872
|
-
...callSettings2,
|
5873
|
-
tools: stepTools,
|
5874
|
-
toolChoice: stepToolChoice,
|
5875
|
-
responseFormat: output == null ? void 0 : output.responseFormat,
|
5876
|
-
prompt: promptMessages,
|
5877
|
-
providerOptions,
|
5878
|
-
abortSignal,
|
5879
|
-
headers
|
5880
|
-
});
|
5881
|
-
const responseData = {
|
5882
|
-
id: (_b2 = (_a19 = result.response) == null ? void 0 : _a19.id) != null ? _b2 : generateId3(),
|
5883
|
-
timestamp: (_d2 = (_c2 = result.response) == null ? void 0 : _c2.timestamp) != null ? _d2 : currentDate(),
|
5884
|
-
modelId: (_f = (_e2 = result.response) == null ? void 0 : _e2.modelId) != null ? _f : stepModel.modelId,
|
5885
|
-
headers: (_g = result.response) == null ? void 0 : _g.headers,
|
5886
|
-
body: (_h = result.response) == null ? void 0 : _h.body
|
5887
|
-
};
|
5888
|
-
span2.setAttributes(
|
5889
|
-
selectTelemetryAttributes({
|
5890
|
-
telemetry,
|
5891
|
-
attributes: {
|
5892
|
-
"ai.response.finishReason": result.finishReason,
|
5893
|
-
"ai.response.text": {
|
5894
|
-
output: () => extractContentText(result.content)
|
5895
|
-
},
|
5896
|
-
"ai.response.toolCalls": {
|
5897
|
-
output: () => {
|
5898
|
-
const toolCalls = asToolCalls(result.content);
|
5899
|
-
return toolCalls == null ? void 0 : JSON.stringify(toolCalls);
|
5900
|
-
}
|
5901
|
-
},
|
5902
|
-
"ai.response.id": responseData.id,
|
5903
|
-
"ai.response.model": responseData.modelId,
|
5904
|
-
"ai.response.timestamp": responseData.timestamp.toISOString(),
|
5905
|
-
// TODO rename telemetry attributes to inputTokens and outputTokens
|
5906
|
-
"ai.usage.promptTokens": result.usage.inputTokens,
|
5907
|
-
"ai.usage.completionTokens": result.usage.outputTokens,
|
5908
|
-
// standardized gen-ai llm span attributes:
|
5909
|
-
"gen_ai.response.finish_reasons": [result.finishReason],
|
5910
|
-
"gen_ai.response.id": responseData.id,
|
5911
|
-
"gen_ai.response.model": responseData.modelId,
|
5912
|
-
"gen_ai.usage.input_tokens": result.usage.inputTokens,
|
5913
|
-
"gen_ai.usage.output_tokens": result.usage.outputTokens
|
5914
|
-
}
|
5915
|
-
})
|
5916
|
-
);
|
5917
|
-
return { ...result, response: responseData };
|
5918
|
-
}
|
5919
|
-
});
|
5804
|
+
try {
|
5805
|
+
return await recordSpan({
|
5806
|
+
name: "ai.generateText",
|
5807
|
+
attributes: selectTelemetryAttributes({
|
5808
|
+
telemetry,
|
5809
|
+
attributes: {
|
5810
|
+
...assembleOperationName({
|
5811
|
+
operationId: "ai.generateText",
|
5812
|
+
telemetry
|
5813
|
+
}),
|
5814
|
+
...baseTelemetryAttributes,
|
5815
|
+
// model:
|
5816
|
+
"ai.model.provider": model.provider,
|
5817
|
+
"ai.model.id": model.modelId,
|
5818
|
+
// specific settings that only make sense on the outer level:
|
5819
|
+
"ai.prompt": {
|
5820
|
+
input: () => JSON.stringify({ system, prompt, messages })
|
5920
5821
|
}
|
5921
|
-
|
5922
|
-
|
5923
|
-
|
5924
|
-
|
5925
|
-
|
5926
|
-
|
5927
|
-
|
5928
|
-
|
5929
|
-
|
5930
|
-
|
5822
|
+
}
|
5823
|
+
}),
|
5824
|
+
tracer,
|
5825
|
+
fn: async (span) => {
|
5826
|
+
var _a17, _b, _c, _d, _e;
|
5827
|
+
const callSettings2 = prepareCallSettings(settings);
|
5828
|
+
let currentModelResponse;
|
5829
|
+
let currentToolCalls = [];
|
5830
|
+
let currentToolResults = [];
|
5831
|
+
const responseMessages = [];
|
5832
|
+
const steps = [];
|
5833
|
+
do {
|
5834
|
+
const stepInputMessages = [
|
5835
|
+
...initialPrompt.messages,
|
5836
|
+
...responseMessages
|
5837
|
+
];
|
5838
|
+
const prepareStepResult = await (prepareStep == null ? void 0 : prepareStep({
|
5839
|
+
model,
|
5840
|
+
steps,
|
5841
|
+
stepNumber: steps.length
|
5842
|
+
}));
|
5843
|
+
const promptMessages = await convertToLanguageModelPrompt({
|
5844
|
+
prompt: {
|
5845
|
+
system: (_a17 = prepareStepResult == null ? void 0 : prepareStepResult.system) != null ? _a17 : initialPrompt.system,
|
5931
5846
|
messages: stepInputMessages
|
5847
|
+
},
|
5848
|
+
supportedUrls: await model.supportedUrls
|
5849
|
+
});
|
5850
|
+
const stepModel = resolveLanguageModel(
|
5851
|
+
(_b = prepareStepResult == null ? void 0 : prepareStepResult.model) != null ? _b : model
|
5852
|
+
);
|
5853
|
+
const { toolChoice: stepToolChoice, tools: stepTools } = prepareToolsAndToolChoice({
|
5854
|
+
tools,
|
5855
|
+
toolChoice: (_c = prepareStepResult == null ? void 0 : prepareStepResult.toolChoice) != null ? _c : toolChoice,
|
5856
|
+
activeTools: (_d = prepareStepResult == null ? void 0 : prepareStepResult.activeTools) != null ? _d : activeTools
|
5857
|
+
});
|
5858
|
+
currentModelResponse = await retry(
|
5859
|
+
() => {
|
5860
|
+
var _a18;
|
5861
|
+
return recordSpan({
|
5862
|
+
name: "ai.generateText.doGenerate",
|
5863
|
+
attributes: selectTelemetryAttributes({
|
5864
|
+
telemetry,
|
5865
|
+
attributes: {
|
5866
|
+
...assembleOperationName({
|
5867
|
+
operationId: "ai.generateText.doGenerate",
|
5868
|
+
telemetry
|
5869
|
+
}),
|
5870
|
+
...baseTelemetryAttributes,
|
5871
|
+
// model:
|
5872
|
+
"ai.model.provider": stepModel.provider,
|
5873
|
+
"ai.model.id": stepModel.modelId,
|
5874
|
+
// prompt:
|
5875
|
+
"ai.prompt.messages": {
|
5876
|
+
input: () => stringifyForTelemetry(promptMessages)
|
5877
|
+
},
|
5878
|
+
"ai.prompt.tools": {
|
5879
|
+
// convert the language model level tools:
|
5880
|
+
input: () => stepTools == null ? void 0 : stepTools.map((tool2) => JSON.stringify(tool2))
|
5881
|
+
},
|
5882
|
+
"ai.prompt.toolChoice": {
|
5883
|
+
input: () => stepToolChoice != null ? JSON.stringify(stepToolChoice) : void 0
|
5884
|
+
},
|
5885
|
+
// standardized gen-ai llm span attributes:
|
5886
|
+
"gen_ai.system": stepModel.provider,
|
5887
|
+
"gen_ai.request.model": stepModel.modelId,
|
5888
|
+
"gen_ai.request.frequency_penalty": settings.frequencyPenalty,
|
5889
|
+
"gen_ai.request.max_tokens": settings.maxOutputTokens,
|
5890
|
+
"gen_ai.request.presence_penalty": settings.presencePenalty,
|
5891
|
+
"gen_ai.request.stop_sequences": settings.stopSequences,
|
5892
|
+
"gen_ai.request.temperature": (_a18 = settings.temperature) != null ? _a18 : void 0,
|
5893
|
+
"gen_ai.request.top_k": settings.topK,
|
5894
|
+
"gen_ai.request.top_p": settings.topP
|
5895
|
+
}
|
5896
|
+
}),
|
5897
|
+
tracer,
|
5898
|
+
fn: async (span2) => {
|
5899
|
+
var _a19, _b2, _c2, _d2, _e2, _f, _g, _h;
|
5900
|
+
const result = await stepModel.doGenerate({
|
5901
|
+
...callSettings2,
|
5902
|
+
tools: stepTools,
|
5903
|
+
toolChoice: stepToolChoice,
|
5904
|
+
responseFormat: output == null ? void 0 : output.responseFormat,
|
5905
|
+
prompt: promptMessages,
|
5906
|
+
providerOptions,
|
5907
|
+
abortSignal,
|
5908
|
+
headers
|
5909
|
+
});
|
5910
|
+
const responseData = {
|
5911
|
+
id: (_b2 = (_a19 = result.response) == null ? void 0 : _a19.id) != null ? _b2 : generateId3(),
|
5912
|
+
timestamp: (_d2 = (_c2 = result.response) == null ? void 0 : _c2.timestamp) != null ? _d2 : currentDate(),
|
5913
|
+
modelId: (_f = (_e2 = result.response) == null ? void 0 : _e2.modelId) != null ? _f : stepModel.modelId,
|
5914
|
+
headers: (_g = result.response) == null ? void 0 : _g.headers,
|
5915
|
+
body: (_h = result.response) == null ? void 0 : _h.body
|
5916
|
+
};
|
5917
|
+
span2.setAttributes(
|
5918
|
+
selectTelemetryAttributes({
|
5919
|
+
telemetry,
|
5920
|
+
attributes: {
|
5921
|
+
"ai.response.finishReason": result.finishReason,
|
5922
|
+
"ai.response.text": {
|
5923
|
+
output: () => extractContentText(result.content)
|
5924
|
+
},
|
5925
|
+
"ai.response.toolCalls": {
|
5926
|
+
output: () => {
|
5927
|
+
const toolCalls = asToolCalls(result.content);
|
5928
|
+
return toolCalls == null ? void 0 : JSON.stringify(toolCalls);
|
5929
|
+
}
|
5930
|
+
},
|
5931
|
+
"ai.response.id": responseData.id,
|
5932
|
+
"ai.response.model": responseData.modelId,
|
5933
|
+
"ai.response.timestamp": responseData.timestamp.toISOString(),
|
5934
|
+
// TODO rename telemetry attributes to inputTokens and outputTokens
|
5935
|
+
"ai.usage.promptTokens": result.usage.inputTokens,
|
5936
|
+
"ai.usage.completionTokens": result.usage.outputTokens,
|
5937
|
+
// standardized gen-ai llm span attributes:
|
5938
|
+
"gen_ai.response.finish_reasons": [result.finishReason],
|
5939
|
+
"gen_ai.response.id": responseData.id,
|
5940
|
+
"gen_ai.response.model": responseData.modelId,
|
5941
|
+
"gen_ai.usage.input_tokens": result.usage.inputTokens,
|
5942
|
+
"gen_ai.usage.output_tokens": result.usage.outputTokens
|
5943
|
+
}
|
5944
|
+
})
|
5945
|
+
);
|
5946
|
+
return { ...result, response: responseData };
|
5947
|
+
}
|
5948
|
+
});
|
5949
|
+
}
|
5950
|
+
);
|
5951
|
+
currentToolCalls = await Promise.all(
|
5952
|
+
currentModelResponse.content.filter(
|
5953
|
+
(part) => part.type === "tool-call"
|
5954
|
+
).map(
|
5955
|
+
(toolCall) => parseToolCall({
|
5956
|
+
toolCall,
|
5957
|
+
tools,
|
5958
|
+
repairToolCall,
|
5959
|
+
system,
|
5960
|
+
messages: stepInputMessages
|
5961
|
+
})
|
5962
|
+
)
|
5963
|
+
);
|
5964
|
+
currentToolResults = tools == null ? [] : await executeTools({
|
5965
|
+
toolCalls: currentToolCalls,
|
5966
|
+
tools,
|
5967
|
+
tracer,
|
5968
|
+
telemetry,
|
5969
|
+
messages: stepInputMessages,
|
5970
|
+
abortSignal
|
5971
|
+
});
|
5972
|
+
const stepContent = asContent({
|
5973
|
+
content: currentModelResponse.content,
|
5974
|
+
toolCalls: currentToolCalls,
|
5975
|
+
toolResults: currentToolResults
|
5976
|
+
});
|
5977
|
+
responseMessages.push(
|
5978
|
+
...toResponseMessages({
|
5979
|
+
content: stepContent,
|
5980
|
+
tools: tools != null ? tools : {}
|
5932
5981
|
})
|
5933
|
-
)
|
5934
|
-
|
5935
|
-
currentToolResults = tools == null ? [] : await executeTools({
|
5936
|
-
toolCalls: currentToolCalls,
|
5937
|
-
tools,
|
5938
|
-
tracer,
|
5939
|
-
telemetry,
|
5940
|
-
messages: stepInputMessages,
|
5941
|
-
abortSignal
|
5942
|
-
});
|
5943
|
-
const stepContent = asContent({
|
5944
|
-
content: currentModelResponse.content,
|
5945
|
-
toolCalls: currentToolCalls,
|
5946
|
-
toolResults: currentToolResults
|
5947
|
-
});
|
5948
|
-
responseMessages.push(
|
5949
|
-
...toResponseMessages({
|
5982
|
+
);
|
5983
|
+
const currentStepResult = new DefaultStepResult({
|
5950
5984
|
content: stepContent,
|
5951
|
-
|
5985
|
+
finishReason: currentModelResponse.finishReason,
|
5986
|
+
usage: currentModelResponse.usage,
|
5987
|
+
warnings: currentModelResponse.warnings,
|
5988
|
+
providerMetadata: currentModelResponse.providerMetadata,
|
5989
|
+
request: (_e = currentModelResponse.request) != null ? _e : {},
|
5990
|
+
response: {
|
5991
|
+
...currentModelResponse.response,
|
5992
|
+
// deep clone msgs to avoid mutating past messages in multi-step:
|
5993
|
+
messages: structuredClone(responseMessages)
|
5994
|
+
}
|
5995
|
+
});
|
5996
|
+
steps.push(currentStepResult);
|
5997
|
+
await (onStepFinish == null ? void 0 : onStepFinish(currentStepResult));
|
5998
|
+
} while (
|
5999
|
+
// there are tool calls:
|
6000
|
+
currentToolCalls.length > 0 && // all current tool calls have results:
|
6001
|
+
currentToolResults.length === currentToolCalls.length && // continue until a stop condition is met:
|
6002
|
+
!await isStopConditionMet({ stopConditions, steps })
|
6003
|
+
);
|
6004
|
+
span.setAttributes(
|
6005
|
+
selectTelemetryAttributes({
|
6006
|
+
telemetry,
|
6007
|
+
attributes: {
|
6008
|
+
"ai.response.finishReason": currentModelResponse.finishReason,
|
6009
|
+
"ai.response.text": {
|
6010
|
+
output: () => extractContentText(currentModelResponse.content)
|
6011
|
+
},
|
6012
|
+
"ai.response.toolCalls": {
|
6013
|
+
output: () => {
|
6014
|
+
const toolCalls = asToolCalls(currentModelResponse.content);
|
6015
|
+
return toolCalls == null ? void 0 : JSON.stringify(toolCalls);
|
6016
|
+
}
|
6017
|
+
},
|
6018
|
+
// TODO rename telemetry attributes to inputTokens and outputTokens
|
6019
|
+
"ai.usage.promptTokens": currentModelResponse.usage.inputTokens,
|
6020
|
+
"ai.usage.completionTokens": currentModelResponse.usage.outputTokens
|
6021
|
+
}
|
5952
6022
|
})
|
5953
6023
|
);
|
5954
|
-
const
|
5955
|
-
|
5956
|
-
|
5957
|
-
|
5958
|
-
|
5959
|
-
|
5960
|
-
|
5961
|
-
|
5962
|
-
|
5963
|
-
|
5964
|
-
|
5965
|
-
}
|
6024
|
+
const lastStep = steps[steps.length - 1];
|
6025
|
+
return new DefaultGenerateTextResult({
|
6026
|
+
steps,
|
6027
|
+
resolvedOutput: await (output == null ? void 0 : output.parseOutput(
|
6028
|
+
{ text: lastStep.text },
|
6029
|
+
{
|
6030
|
+
response: lastStep.response,
|
6031
|
+
usage: lastStep.usage,
|
6032
|
+
finishReason: lastStep.finishReason
|
6033
|
+
}
|
6034
|
+
))
|
5966
6035
|
});
|
5967
|
-
|
5968
|
-
|
5969
|
-
|
5970
|
-
|
5971
|
-
|
5972
|
-
currentToolResults.length === currentToolCalls.length && // continue until a stop condition is met:
|
5973
|
-
!await isStopConditionMet({ stopConditions, steps })
|
5974
|
-
);
|
5975
|
-
span.setAttributes(
|
5976
|
-
selectTelemetryAttributes({
|
5977
|
-
telemetry,
|
5978
|
-
attributes: {
|
5979
|
-
"ai.response.finishReason": currentModelResponse.finishReason,
|
5980
|
-
"ai.response.text": {
|
5981
|
-
output: () => extractContentText(currentModelResponse.content)
|
5982
|
-
},
|
5983
|
-
"ai.response.toolCalls": {
|
5984
|
-
output: () => {
|
5985
|
-
const toolCalls = asToolCalls(currentModelResponse.content);
|
5986
|
-
return toolCalls == null ? void 0 : JSON.stringify(toolCalls);
|
5987
|
-
}
|
5988
|
-
},
|
5989
|
-
// TODO rename telemetry attributes to inputTokens and outputTokens
|
5990
|
-
"ai.usage.promptTokens": currentModelResponse.usage.inputTokens,
|
5991
|
-
"ai.usage.completionTokens": currentModelResponse.usage.outputTokens
|
5992
|
-
}
|
5993
|
-
})
|
5994
|
-
);
|
5995
|
-
const lastStep = steps[steps.length - 1];
|
5996
|
-
return new DefaultGenerateTextResult({
|
5997
|
-
steps,
|
5998
|
-
resolvedOutput: await (output == null ? void 0 : output.parseOutput(
|
5999
|
-
{ text: lastStep.text },
|
6000
|
-
{
|
6001
|
-
response: lastStep.response,
|
6002
|
-
usage: lastStep.usage,
|
6003
|
-
finishReason: lastStep.finishReason
|
6004
|
-
}
|
6005
|
-
))
|
6006
|
-
});
|
6007
|
-
}
|
6008
|
-
});
|
6036
|
+
}
|
6037
|
+
});
|
6038
|
+
} catch (error) {
|
6039
|
+
throw wrapGatewayError(error);
|
6040
|
+
}
|
6009
6041
|
}
|
6010
6042
|
async function executeTools({
|
6011
6043
|
toolCalls,
|
@@ -6246,7 +6278,7 @@ var object = ({
|
|
6246
6278
|
|
6247
6279
|
// core/generate-text/smooth-stream.ts
|
6248
6280
|
var import_provider_utils21 = require("@ai-sdk/provider-utils");
|
6249
|
-
var
|
6281
|
+
var import_provider24 = require("@ai-sdk/provider");
|
6250
6282
|
var CHUNKING_REGEXPS = {
|
6251
6283
|
word: /\S+\s+/m,
|
6252
6284
|
line: /\n+/m
|
@@ -6276,7 +6308,7 @@ function smoothStream({
|
|
6276
6308
|
} else {
|
6277
6309
|
const chunkingRegex = typeof chunking === "string" ? CHUNKING_REGEXPS[chunking] : chunking;
|
6278
6310
|
if (chunkingRegex == null) {
|
6279
|
-
throw new
|
6311
|
+
throw new import_provider24.InvalidArgumentError({
|
6280
6312
|
argument: "chunking",
|
6281
6313
|
message: `Chunking must be "word" or "line" or a RegExp. Received: ${chunking}`
|
6282
6314
|
});
|
@@ -6538,7 +6570,9 @@ function streamText({
|
|
6538
6570
|
experimental_repairToolCall: repairToolCall,
|
6539
6571
|
experimental_transform: transform,
|
6540
6572
|
onChunk,
|
6541
|
-
onError
|
6573
|
+
onError = ({ error }) => {
|
6574
|
+
console.error(error);
|
6575
|
+
},
|
6542
6576
|
onFinish,
|
6543
6577
|
onStepFinish,
|
6544
6578
|
_internal: {
|
@@ -6677,7 +6711,7 @@ var DefaultStreamTextResult = class {
|
|
6677
6711
|
await (onChunk == null ? void 0 : onChunk({ chunk: part }));
|
6678
6712
|
}
|
6679
6713
|
if (part.type === "error") {
|
6680
|
-
await
|
6714
|
+
await onError({ error: wrapGatewayError(part.error) });
|
6681
6715
|
}
|
6682
6716
|
if (part.type === "text") {
|
6683
6717
|
const latestContent = recordedContent[recordedContent.length - 1];
|
@@ -7773,7 +7807,7 @@ var doWrap = ({
|
|
7773
7807
|
};
|
7774
7808
|
|
7775
7809
|
// core/registry/custom-provider.ts
|
7776
|
-
var
|
7810
|
+
var import_provider25 = require("@ai-sdk/provider");
|
7777
7811
|
function customProvider({
|
7778
7812
|
languageModels,
|
7779
7813
|
textEmbeddingModels,
|
@@ -7788,7 +7822,7 @@ function customProvider({
|
|
7788
7822
|
if (fallbackProvider) {
|
7789
7823
|
return fallbackProvider.languageModel(modelId);
|
7790
7824
|
}
|
7791
|
-
throw new
|
7825
|
+
throw new import_provider25.NoSuchModelError({ modelId, modelType: "languageModel" });
|
7792
7826
|
},
|
7793
7827
|
textEmbeddingModel(modelId) {
|
7794
7828
|
if (textEmbeddingModels != null && modelId in textEmbeddingModels) {
|
@@ -7797,7 +7831,7 @@ function customProvider({
|
|
7797
7831
|
if (fallbackProvider) {
|
7798
7832
|
return fallbackProvider.textEmbeddingModel(modelId);
|
7799
7833
|
}
|
7800
|
-
throw new
|
7834
|
+
throw new import_provider25.NoSuchModelError({ modelId, modelType: "textEmbeddingModel" });
|
7801
7835
|
},
|
7802
7836
|
imageModel(modelId) {
|
7803
7837
|
if (imageModels != null && modelId in imageModels) {
|
@@ -7806,19 +7840,19 @@ function customProvider({
|
|
7806
7840
|
if (fallbackProvider == null ? void 0 : fallbackProvider.imageModel) {
|
7807
7841
|
return fallbackProvider.imageModel(modelId);
|
7808
7842
|
}
|
7809
|
-
throw new
|
7843
|
+
throw new import_provider25.NoSuchModelError({ modelId, modelType: "imageModel" });
|
7810
7844
|
}
|
7811
7845
|
};
|
7812
7846
|
}
|
7813
7847
|
var experimental_customProvider = customProvider;
|
7814
7848
|
|
7815
7849
|
// core/registry/no-such-provider-error.ts
|
7816
|
-
var
|
7850
|
+
var import_provider26 = require("@ai-sdk/provider");
|
7817
7851
|
var name16 = "AI_NoSuchProviderError";
|
7818
7852
|
var marker16 = `vercel.ai.error.${name16}`;
|
7819
7853
|
var symbol16 = Symbol.for(marker16);
|
7820
7854
|
var _a16;
|
7821
|
-
var NoSuchProviderError = class extends
|
7855
|
+
var NoSuchProviderError = class extends import_provider26.NoSuchModelError {
|
7822
7856
|
constructor({
|
7823
7857
|
modelId,
|
7824
7858
|
modelType,
|
@@ -7832,13 +7866,13 @@ var NoSuchProviderError = class extends import_provider25.NoSuchModelError {
|
|
7832
7866
|
this.availableProviders = availableProviders;
|
7833
7867
|
}
|
7834
7868
|
static isInstance(error) {
|
7835
|
-
return
|
7869
|
+
return import_provider26.AISDKError.hasMarker(error, marker16);
|
7836
7870
|
}
|
7837
7871
|
};
|
7838
7872
|
_a16 = symbol16;
|
7839
7873
|
|
7840
7874
|
// core/registry/provider-registry.ts
|
7841
|
-
var
|
7875
|
+
var import_provider27 = require("@ai-sdk/provider");
|
7842
7876
|
function createProviderRegistry(providers, {
|
7843
7877
|
separator = ":"
|
7844
7878
|
} = {}) {
|
@@ -7877,7 +7911,7 @@ var DefaultProviderRegistry = class {
|
|
7877
7911
|
splitId(id, modelType) {
|
7878
7912
|
const index = id.indexOf(this.separator);
|
7879
7913
|
if (index === -1) {
|
7880
|
-
throw new
|
7914
|
+
throw new import_provider27.NoSuchModelError({
|
7881
7915
|
modelId: id,
|
7882
7916
|
modelType,
|
7883
7917
|
message: `Invalid ${modelType} id for registry: ${id} (must be in the format "providerId${this.separator}modelId")`
|
@@ -7890,7 +7924,7 @@ var DefaultProviderRegistry = class {
|
|
7890
7924
|
const [providerId, modelId] = this.splitId(id, "languageModel");
|
7891
7925
|
const model = (_b = (_a17 = this.getProvider(providerId)).languageModel) == null ? void 0 : _b.call(_a17, modelId);
|
7892
7926
|
if (model == null) {
|
7893
|
-
throw new
|
7927
|
+
throw new import_provider27.NoSuchModelError({ modelId: id, modelType: "languageModel" });
|
7894
7928
|
}
|
7895
7929
|
return model;
|
7896
7930
|
}
|
@@ -7900,7 +7934,7 @@ var DefaultProviderRegistry = class {
|
|
7900
7934
|
const provider = this.getProvider(providerId);
|
7901
7935
|
const model = (_a17 = provider.textEmbeddingModel) == null ? void 0 : _a17.call(provider, modelId);
|
7902
7936
|
if (model == null) {
|
7903
|
-
throw new
|
7937
|
+
throw new import_provider27.NoSuchModelError({
|
7904
7938
|
modelId: id,
|
7905
7939
|
modelType: "textEmbeddingModel"
|
7906
7940
|
});
|
@@ -7913,7 +7947,7 @@ var DefaultProviderRegistry = class {
|
|
7913
7947
|
const provider = this.getProvider(providerId);
|
7914
7948
|
const model = (_a17 = provider.imageModel) == null ? void 0 : _a17.call(provider, modelId);
|
7915
7949
|
if (model == null) {
|
7916
|
-
throw new
|
7950
|
+
throw new import_provider27.NoSuchModelError({ modelId: id, modelType: "imageModel" });
|
7917
7951
|
}
|
7918
7952
|
return model;
|
7919
7953
|
}
|
@@ -8487,8 +8521,8 @@ var MCPClient = class {
|
|
8487
8521
|
};
|
8488
8522
|
|
8489
8523
|
// src/error/no-transcript-generated-error.ts
|
8490
|
-
var
|
8491
|
-
var NoTranscriptGeneratedError = class extends
|
8524
|
+
var import_provider28 = require("@ai-sdk/provider");
|
8525
|
+
var NoTranscriptGeneratedError = class extends import_provider28.AISDKError {
|
8492
8526
|
constructor(options) {
|
8493
8527
|
super({
|
8494
8528
|
name: "AI_NoTranscriptGeneratedError",
|
@@ -8557,6 +8591,7 @@ var DefaultTranscriptionResult = class {
|
|
8557
8591
|
DefaultChatTransport,
|
8558
8592
|
DownloadError,
|
8559
8593
|
EmptyResponseBodyError,
|
8594
|
+
GLOBAL_DEFAULT_PROVIDER,
|
8560
8595
|
InvalidArgumentError,
|
8561
8596
|
InvalidDataContentError,
|
8562
8597
|
InvalidMessageRoleError,
|