ai 5.0.0-alpha.6 → 5.0.0-alpha.8
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +44 -0
- package/dist/index.d.mts +282 -437
- package/dist/index.d.ts +282 -437
- package/dist/index.js +957 -1047
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +935 -1015
- package/dist/index.mjs.map +1 -1
- package/dist/internal/index.d.mts +3 -2
- package/dist/internal/index.d.ts +3 -2
- package/dist/mcp-stdio/index.js.map +1 -1
- package/dist/mcp-stdio/index.mjs.map +1 -1
- package/package.json +6 -6
package/dist/index.js
CHANGED
@@ -26,6 +26,7 @@ __export(src_exports, {
|
|
26
26
|
DefaultChatTransport: () => DefaultChatTransport,
|
27
27
|
DownloadError: () => DownloadError,
|
28
28
|
EmptyResponseBodyError: () => import_provider16.EmptyResponseBodyError,
|
29
|
+
GLOBAL_DEFAULT_PROVIDER: () => GLOBAL_DEFAULT_PROVIDER,
|
29
30
|
InvalidArgumentError: () => InvalidArgumentError,
|
30
31
|
InvalidDataContentError: () => InvalidDataContentError,
|
31
32
|
InvalidMessageRoleError: () => InvalidMessageRoleError,
|
@@ -53,10 +54,8 @@ __export(src_exports, {
|
|
53
54
|
ToolExecutionError: () => ToolExecutionError,
|
54
55
|
TypeValidationError: () => import_provider16.TypeValidationError,
|
55
56
|
UnsupportedFunctionalityError: () => import_provider16.UnsupportedFunctionalityError,
|
56
|
-
appendClientMessage: () => appendClientMessage,
|
57
57
|
asSchema: () => import_provider_utils26.asSchema,
|
58
58
|
assistantModelMessageSchema: () => assistantModelMessageSchema,
|
59
|
-
callChatApi: () => callChatApi,
|
60
59
|
callCompletionApi: () => callCompletionApi,
|
61
60
|
convertFileListToFileUIParts: () => convertFileListToFileUIParts,
|
62
61
|
convertToCoreMessages: () => convertToCoreMessages,
|
@@ -83,7 +82,6 @@ __export(src_exports, {
|
|
83
82
|
experimental_generateImage: () => generateImage,
|
84
83
|
experimental_generateSpeech: () => generateSpeech,
|
85
84
|
experimental_transcribe: () => transcribe,
|
86
|
-
extractMaxToolInvocationStep: () => extractMaxToolInvocationStep,
|
87
85
|
extractReasoningMiddleware: () => extractReasoningMiddleware,
|
88
86
|
generateId: () => import_provider_utils26.generateId,
|
89
87
|
generateObject: () => generateObject,
|
@@ -91,14 +89,12 @@ __export(src_exports, {
|
|
91
89
|
getTextFromDataUrl: () => getTextFromDataUrl,
|
92
90
|
getToolInvocations: () => getToolInvocations,
|
93
91
|
hasToolCall: () => hasToolCall,
|
94
|
-
isAssistantMessageWithCompletedToolCalls: () => isAssistantMessageWithCompletedToolCalls,
|
95
92
|
isDeepEqualData: () => isDeepEqualData,
|
96
93
|
jsonSchema: () => import_provider_utils26.jsonSchema,
|
97
94
|
modelMessageSchema: () => modelMessageSchema,
|
98
95
|
parsePartialJson: () => parsePartialJson,
|
99
96
|
pipeTextStreamToResponse: () => pipeTextStreamToResponse,
|
100
97
|
pipeUIMessageStreamToResponse: () => pipeUIMessageStreamToResponse,
|
101
|
-
shouldResubmitMessages: () => shouldResubmitMessages,
|
102
98
|
simulateReadableStream: () => simulateReadableStream,
|
103
99
|
simulateStreamingMiddleware: () => simulateStreamingMiddleware,
|
104
100
|
smoothStream: () => smoothStream,
|
@@ -108,7 +104,6 @@ __export(src_exports, {
|
|
108
104
|
systemModelMessageSchema: () => systemModelMessageSchema,
|
109
105
|
tool: () => tool,
|
110
106
|
toolModelMessageSchema: () => toolModelMessageSchema,
|
111
|
-
updateToolCallResult: () => updateToolCallResult,
|
112
107
|
userModelMessageSchema: () => userModelMessageSchema,
|
113
108
|
wrapLanguageModel: () => wrapLanguageModel
|
114
109
|
});
|
@@ -540,19 +535,8 @@ function pipeTextStreamToResponse({
|
|
540
535
|
});
|
541
536
|
}
|
542
537
|
|
543
|
-
// src/ui/
|
544
|
-
|
545
|
-
messages,
|
546
|
-
message
|
547
|
-
}) {
|
548
|
-
return [
|
549
|
-
...messages.length > 0 && messages[messages.length - 1].id === message.id ? messages.slice(0, -1) : messages,
|
550
|
-
message
|
551
|
-
];
|
552
|
-
}
|
553
|
-
|
554
|
-
// src/ui/call-chat-api.ts
|
555
|
-
var import_provider_utils3 = require("@ai-sdk/provider-utils");
|
538
|
+
// src/ui/call-completion-api.ts
|
539
|
+
var import_provider_utils = require("@ai-sdk/provider-utils");
|
556
540
|
|
557
541
|
// src/ui-message-stream/ui-message-stream-parts.ts
|
558
542
|
var import_zod = require("zod");
|
@@ -658,8 +642,138 @@ async function consumeStream({
|
|
658
642
|
}
|
659
643
|
}
|
660
644
|
|
645
|
+
// src/ui/process-text-stream.ts
|
646
|
+
async function processTextStream({
|
647
|
+
stream,
|
648
|
+
onTextPart
|
649
|
+
}) {
|
650
|
+
const reader = stream.pipeThrough(new TextDecoderStream()).getReader();
|
651
|
+
while (true) {
|
652
|
+
const { done, value } = await reader.read();
|
653
|
+
if (done) {
|
654
|
+
break;
|
655
|
+
}
|
656
|
+
await onTextPart(value);
|
657
|
+
}
|
658
|
+
}
|
659
|
+
|
660
|
+
// src/ui/call-completion-api.ts
|
661
|
+
var getOriginalFetch = () => fetch;
|
662
|
+
async function callCompletionApi({
|
663
|
+
api,
|
664
|
+
prompt,
|
665
|
+
credentials,
|
666
|
+
headers,
|
667
|
+
body,
|
668
|
+
streamProtocol = "data",
|
669
|
+
setCompletion,
|
670
|
+
setLoading,
|
671
|
+
setError,
|
672
|
+
setAbortController,
|
673
|
+
onFinish,
|
674
|
+
onError,
|
675
|
+
fetch: fetch2 = getOriginalFetch()
|
676
|
+
}) {
|
677
|
+
var _a17;
|
678
|
+
try {
|
679
|
+
setLoading(true);
|
680
|
+
setError(void 0);
|
681
|
+
const abortController = new AbortController();
|
682
|
+
setAbortController(abortController);
|
683
|
+
setCompletion("");
|
684
|
+
const response = await fetch2(api, {
|
685
|
+
method: "POST",
|
686
|
+
body: JSON.stringify({
|
687
|
+
prompt,
|
688
|
+
...body
|
689
|
+
}),
|
690
|
+
credentials,
|
691
|
+
headers: {
|
692
|
+
"Content-Type": "application/json",
|
693
|
+
...headers
|
694
|
+
},
|
695
|
+
signal: abortController.signal
|
696
|
+
}).catch((err) => {
|
697
|
+
throw err;
|
698
|
+
});
|
699
|
+
if (!response.ok) {
|
700
|
+
throw new Error(
|
701
|
+
(_a17 = await response.text()) != null ? _a17 : "Failed to fetch the chat response."
|
702
|
+
);
|
703
|
+
}
|
704
|
+
if (!response.body) {
|
705
|
+
throw new Error("The response body is empty.");
|
706
|
+
}
|
707
|
+
let result = "";
|
708
|
+
switch (streamProtocol) {
|
709
|
+
case "text": {
|
710
|
+
await processTextStream({
|
711
|
+
stream: response.body,
|
712
|
+
onTextPart: (chunk) => {
|
713
|
+
result += chunk;
|
714
|
+
setCompletion(result);
|
715
|
+
}
|
716
|
+
});
|
717
|
+
break;
|
718
|
+
}
|
719
|
+
case "data": {
|
720
|
+
await consumeStream({
|
721
|
+
stream: (0, import_provider_utils.parseJsonEventStream)({
|
722
|
+
stream: response.body,
|
723
|
+
schema: uiMessageStreamPartSchema
|
724
|
+
}).pipeThrough(
|
725
|
+
new TransformStream({
|
726
|
+
async transform(part) {
|
727
|
+
if (!part.success) {
|
728
|
+
throw part.error;
|
729
|
+
}
|
730
|
+
const streamPart = part.value;
|
731
|
+
if (streamPart.type === "text") {
|
732
|
+
result += streamPart.text;
|
733
|
+
setCompletion(result);
|
734
|
+
} else if (streamPart.type === "error") {
|
735
|
+
throw new Error(streamPart.errorText);
|
736
|
+
}
|
737
|
+
}
|
738
|
+
})
|
739
|
+
),
|
740
|
+
onError: (error) => {
|
741
|
+
throw error;
|
742
|
+
}
|
743
|
+
});
|
744
|
+
break;
|
745
|
+
}
|
746
|
+
default: {
|
747
|
+
const exhaustiveCheck = streamProtocol;
|
748
|
+
throw new Error(`Unknown stream protocol: ${exhaustiveCheck}`);
|
749
|
+
}
|
750
|
+
}
|
751
|
+
if (onFinish) {
|
752
|
+
onFinish(prompt, result);
|
753
|
+
}
|
754
|
+
setAbortController(null);
|
755
|
+
return result;
|
756
|
+
} catch (err) {
|
757
|
+
if (err.name === "AbortError") {
|
758
|
+
setAbortController(null);
|
759
|
+
return null;
|
760
|
+
}
|
761
|
+
if (err instanceof Error) {
|
762
|
+
if (onError) {
|
763
|
+
onError(err);
|
764
|
+
}
|
765
|
+
}
|
766
|
+
setError(err);
|
767
|
+
} finally {
|
768
|
+
setLoading(false);
|
769
|
+
}
|
770
|
+
}
|
771
|
+
|
772
|
+
// src/ui/chat-store.ts
|
773
|
+
var import_provider_utils4 = require("@ai-sdk/provider-utils");
|
774
|
+
|
661
775
|
// src/ui/process-ui-message-stream.ts
|
662
|
-
var
|
776
|
+
var import_provider_utils3 = require("@ai-sdk/provider-utils");
|
663
777
|
|
664
778
|
// src/util/merge-objects.ts
|
665
779
|
function mergeObjects(base, overrides) {
|
@@ -695,7 +809,7 @@ function mergeObjects(base, overrides) {
|
|
695
809
|
}
|
696
810
|
|
697
811
|
// src/util/parse-partial-json.ts
|
698
|
-
var
|
812
|
+
var import_provider_utils2 = require("@ai-sdk/provider-utils");
|
699
813
|
|
700
814
|
// src/util/fix-json.ts
|
701
815
|
function fixJson(input) {
|
@@ -1020,25 +1134,17 @@ async function parsePartialJson(jsonText) {
|
|
1020
1134
|
if (jsonText === void 0) {
|
1021
1135
|
return { value: void 0, state: "undefined-input" };
|
1022
1136
|
}
|
1023
|
-
let result = await (0,
|
1137
|
+
let result = await (0, import_provider_utils2.safeParseJSON)({ text: jsonText });
|
1024
1138
|
if (result.success) {
|
1025
1139
|
return { value: result.value, state: "successful-parse" };
|
1026
1140
|
}
|
1027
|
-
result = await (0,
|
1141
|
+
result = await (0, import_provider_utils2.safeParseJSON)({ text: fixJson(jsonText) });
|
1028
1142
|
if (result.success) {
|
1029
1143
|
return { value: result.value, state: "repaired-parse" };
|
1030
1144
|
}
|
1031
1145
|
return { value: void 0, state: "failed-parse" };
|
1032
1146
|
}
|
1033
1147
|
|
1034
|
-
// src/ui/extract-max-tool-invocation-step.ts
|
1035
|
-
function extractMaxToolInvocationStep(toolInvocations) {
|
1036
|
-
return toolInvocations == null ? void 0 : toolInvocations.reduce((max, toolInvocation) => {
|
1037
|
-
var _a17;
|
1038
|
-
return Math.max(max, (_a17 = toolInvocation.step) != null ? _a17 : 0);
|
1039
|
-
}, 0);
|
1040
|
-
}
|
1041
|
-
|
1042
1148
|
// src/ui/get-tool-invocations.ts
|
1043
1149
|
function getToolInvocations(message) {
|
1044
1150
|
return message.parts.filter(
|
@@ -1051,9 +1157,7 @@ function createStreamingUIMessageState({
|
|
1051
1157
|
lastMessage,
|
1052
1158
|
newMessageId = ""
|
1053
1159
|
} = {}) {
|
1054
|
-
var _a17;
|
1055
1160
|
const isContinuation = (lastMessage == null ? void 0 : lastMessage.role) === "assistant";
|
1056
|
-
const step = isContinuation ? 1 + ((_a17 = extractMaxToolInvocationStep(getToolInvocations(lastMessage))) != null ? _a17 : 0) : 0;
|
1057
1161
|
const message = isContinuation ? lastMessage : {
|
1058
1162
|
id: newMessageId,
|
1059
1163
|
metadata: {},
|
@@ -1064,8 +1168,7 @@ function createStreamingUIMessageState({
|
|
1064
1168
|
message,
|
1065
1169
|
activeTextPart: void 0,
|
1066
1170
|
activeReasoningPart: void 0,
|
1067
|
-
partialToolCalls: {}
|
1068
|
-
step
|
1171
|
+
partialToolCalls: {}
|
1069
1172
|
};
|
1070
1173
|
}
|
1071
1174
|
function processUIMessageStream({
|
@@ -1096,7 +1199,7 @@ function processUIMessageStream({
|
|
1096
1199
|
if (metadata != null) {
|
1097
1200
|
const mergedMetadata = state.message.metadata != null ? mergeObjects(state.message.metadata, metadata) : metadata;
|
1098
1201
|
if (messageMetadataSchema != null) {
|
1099
|
-
await (0,
|
1202
|
+
await (0, import_provider_utils3.validateTypes)({
|
1100
1203
|
value: mergedMetadata,
|
1101
1204
|
schema: messageMetadataSchema
|
1102
1205
|
});
|
@@ -1163,13 +1266,11 @@ function processUIMessageStream({
|
|
1163
1266
|
const toolInvocations = getToolInvocations(state.message);
|
1164
1267
|
state.partialToolCalls[part.toolCallId] = {
|
1165
1268
|
text: "",
|
1166
|
-
step: state.step,
|
1167
1269
|
toolName: part.toolName,
|
1168
1270
|
index: toolInvocations.length
|
1169
1271
|
};
|
1170
1272
|
updateToolInvocationPart(part.toolCallId, {
|
1171
1273
|
state: "partial-call",
|
1172
|
-
step: state.step,
|
1173
1274
|
toolCallId: part.toolCallId,
|
1174
1275
|
toolName: part.toolName,
|
1175
1276
|
args: void 0
|
@@ -1185,7 +1286,6 @@ function processUIMessageStream({
|
|
1185
1286
|
);
|
1186
1287
|
updateToolInvocationPart(part.toolCallId, {
|
1187
1288
|
state: "partial-call",
|
1188
|
-
step: partialToolCall.step,
|
1189
1289
|
toolCallId: part.toolCallId,
|
1190
1290
|
toolName: partialToolCall.toolName,
|
1191
1291
|
args: partialArgs
|
@@ -1196,7 +1296,6 @@ function processUIMessageStream({
|
|
1196
1296
|
case "tool-call": {
|
1197
1297
|
updateToolInvocationPart(part.toolCallId, {
|
1198
1298
|
state: "call",
|
1199
|
-
step: state.step,
|
1200
1299
|
toolCallId: part.toolCallId,
|
1201
1300
|
toolName: part.toolName,
|
1202
1301
|
args: part.args
|
@@ -1209,7 +1308,6 @@ function processUIMessageStream({
|
|
1209
1308
|
if (result != null) {
|
1210
1309
|
updateToolInvocationPart(part.toolCallId, {
|
1211
1310
|
state: "result",
|
1212
|
-
step: state.step,
|
1213
1311
|
toolCallId: part.toolCallId,
|
1214
1312
|
toolName: part.toolName,
|
1215
1313
|
args: part.args,
|
@@ -1248,7 +1346,6 @@ function processUIMessageStream({
|
|
1248
1346
|
break;
|
1249
1347
|
}
|
1250
1348
|
case "finish-step": {
|
1251
|
-
state.step += 1;
|
1252
1349
|
state.activeTextPart = void 0;
|
1253
1350
|
state.activeReasoningPart = void 0;
|
1254
1351
|
await updateMessageMetadata(part.metadata);
|
@@ -1311,406 +1408,64 @@ function isObject(value) {
|
|
1311
1408
|
return typeof value === "object" && value !== null;
|
1312
1409
|
}
|
1313
1410
|
|
1314
|
-
// src/ui/
|
1315
|
-
function
|
1316
|
-
|
1411
|
+
// src/ui/should-resubmit-messages.ts
|
1412
|
+
function shouldResubmitMessages({
|
1413
|
+
originalMaxToolInvocationStep,
|
1414
|
+
originalMessageCount,
|
1415
|
+
maxSteps,
|
1416
|
+
messages
|
1317
1417
|
}) {
|
1318
|
-
|
1319
|
-
|
1320
|
-
|
1321
|
-
|
1322
|
-
|
1323
|
-
|
1324
|
-
|
1325
|
-
|
1326
|
-
|
1327
|
-
|
1328
|
-
|
1329
|
-
controller.enqueue({ type: "finish" });
|
1330
|
-
}
|
1331
|
-
})
|
1418
|
+
const lastMessage = messages[messages.length - 1];
|
1419
|
+
const lastMessageStepStartCount = lastMessage.parts.filter(
|
1420
|
+
(part) => part.type === "step-start"
|
1421
|
+
).length;
|
1422
|
+
return (
|
1423
|
+
// check if the feature is enabled:
|
1424
|
+
maxSteps > 1 && // ensure there is a last message:
|
1425
|
+
lastMessage != null && // ensure we actually have new steps (to prevent infinite loops in case of errors):
|
1426
|
+
(messages.length > originalMessageCount || lastMessageStepStartCount !== originalMaxToolInvocationStep) && // check that next step is possible:
|
1427
|
+
isAssistantMessageWithCompletedToolCalls(lastMessage) && // limit the number of automatic steps:
|
1428
|
+
lastMessageStepStartCount < maxSteps
|
1332
1429
|
);
|
1333
1430
|
}
|
1431
|
+
function isAssistantMessageWithCompletedToolCalls(message) {
|
1432
|
+
if (message.role !== "assistant") {
|
1433
|
+
return false;
|
1434
|
+
}
|
1435
|
+
const lastStepStartIndex = message.parts.reduce((lastIndex, part, index) => {
|
1436
|
+
return part.type === "step-start" ? index : lastIndex;
|
1437
|
+
}, -1);
|
1438
|
+
const lastStepToolInvocations = message.parts.slice(lastStepStartIndex + 1).filter((part) => part.type === "tool-invocation");
|
1439
|
+
return lastStepToolInvocations.length > 0 && lastStepToolInvocations.every((part) => "result" in part.toolInvocation);
|
1440
|
+
}
|
1334
1441
|
|
1335
|
-
// src/ui/
|
1336
|
-
var
|
1337
|
-
|
1338
|
-
|
1339
|
-
|
1340
|
-
|
1341
|
-
|
1342
|
-
|
1343
|
-
|
1344
|
-
|
1345
|
-
}) {
|
1346
|
-
|
1347
|
-
|
1348
|
-
|
1349
|
-
|
1350
|
-
|
1351
|
-
|
1352
|
-
},
|
1353
|
-
signal: (_a17 = abortController == null ? void 0 : abortController()) == null ? void 0 : _a17.signal,
|
1354
|
-
credentials
|
1355
|
-
}) : await fetch2(api, {
|
1356
|
-
method: "POST",
|
1357
|
-
body: JSON.stringify(body),
|
1358
|
-
headers: {
|
1359
|
-
"Content-Type": "application/json",
|
1360
|
-
...headers
|
1361
|
-
},
|
1362
|
-
signal: (_b = abortController == null ? void 0 : abortController()) == null ? void 0 : _b.signal,
|
1363
|
-
credentials
|
1364
|
-
});
|
1365
|
-
if (!response.ok) {
|
1366
|
-
throw new Error(
|
1367
|
-
(_c = await response.text()) != null ? _c : "Failed to fetch the chat response."
|
1442
|
+
// src/ui/chat-store.ts
|
1443
|
+
var ChatStore = class {
|
1444
|
+
constructor({
|
1445
|
+
chats = {},
|
1446
|
+
generateId: generateId3,
|
1447
|
+
transport,
|
1448
|
+
maxSteps = 1,
|
1449
|
+
messageMetadataSchema,
|
1450
|
+
dataPartSchemas,
|
1451
|
+
createChat
|
1452
|
+
}) {
|
1453
|
+
this.createChat = createChat;
|
1454
|
+
this.chats = new Map(
|
1455
|
+
Object.entries(chats).map(([id, chat]) => [
|
1456
|
+
id,
|
1457
|
+
this.createChat({ messages: chat.messages })
|
1458
|
+
])
|
1368
1459
|
);
|
1460
|
+
this.maxSteps = maxSteps;
|
1461
|
+
this.transport = transport;
|
1462
|
+
this.subscribers = /* @__PURE__ */ new Set();
|
1463
|
+
this.generateId = generateId3 != null ? generateId3 : import_provider_utils4.generateId;
|
1464
|
+
this.messageMetadataSchema = messageMetadataSchema;
|
1465
|
+
this.dataPartSchemas = dataPartSchemas;
|
1369
1466
|
}
|
1370
|
-
|
1371
|
-
|
1372
|
-
}
|
1373
|
-
return (0, import_provider_utils3.parseJsonEventStream)({
|
1374
|
-
stream: response.body,
|
1375
|
-
schema: uiMessageStreamPartSchema
|
1376
|
-
}).pipeThrough(
|
1377
|
-
new TransformStream({
|
1378
|
-
async transform(part, controller) {
|
1379
|
-
if (!part.success) {
|
1380
|
-
throw part.error;
|
1381
|
-
}
|
1382
|
-
controller.enqueue(part.value);
|
1383
|
-
}
|
1384
|
-
})
|
1385
|
-
);
|
1386
|
-
}
|
1387
|
-
async function fetchTextStream({
|
1388
|
-
api,
|
1389
|
-
body,
|
1390
|
-
credentials,
|
1391
|
-
headers,
|
1392
|
-
abortController,
|
1393
|
-
fetch: fetch2 = getOriginalFetch(),
|
1394
|
-
requestType = "generate"
|
1395
|
-
}) {
|
1396
|
-
var _a17, _b, _c;
|
1397
|
-
const response = requestType === "resume" ? await fetch2(`${api}?chatId=${body.chatId}`, {
|
1398
|
-
method: "GET",
|
1399
|
-
headers: {
|
1400
|
-
"Content-Type": "application/json",
|
1401
|
-
...headers
|
1402
|
-
},
|
1403
|
-
signal: (_a17 = abortController == null ? void 0 : abortController()) == null ? void 0 : _a17.signal,
|
1404
|
-
credentials
|
1405
|
-
}) : await fetch2(api, {
|
1406
|
-
method: "POST",
|
1407
|
-
body: JSON.stringify(body),
|
1408
|
-
headers: {
|
1409
|
-
"Content-Type": "application/json",
|
1410
|
-
...headers
|
1411
|
-
},
|
1412
|
-
signal: (_b = abortController == null ? void 0 : abortController()) == null ? void 0 : _b.signal,
|
1413
|
-
credentials
|
1414
|
-
});
|
1415
|
-
if (!response.ok) {
|
1416
|
-
throw new Error(
|
1417
|
-
(_c = await response.text()) != null ? _c : "Failed to fetch the chat response."
|
1418
|
-
);
|
1419
|
-
}
|
1420
|
-
if (!response.body) {
|
1421
|
-
throw new Error("The response body is empty.");
|
1422
|
-
}
|
1423
|
-
return transformTextToUiMessageStream({
|
1424
|
-
stream: response.body.pipeThrough(new TextDecoderStream())
|
1425
|
-
});
|
1426
|
-
}
|
1427
|
-
async function consumeUIMessageStream({
|
1428
|
-
stream,
|
1429
|
-
onUpdate,
|
1430
|
-
onFinish,
|
1431
|
-
onToolCall,
|
1432
|
-
generateId: generateId3,
|
1433
|
-
lastMessage,
|
1434
|
-
messageMetadataSchema
|
1435
|
-
}) {
|
1436
|
-
const state = createStreamingUIMessageState({
|
1437
|
-
lastMessage: lastMessage ? structuredClone(lastMessage) : void 0,
|
1438
|
-
newMessageId: generateId3()
|
1439
|
-
});
|
1440
|
-
const runUpdateMessageJob = async (job) => {
|
1441
|
-
await job({
|
1442
|
-
state,
|
1443
|
-
write: () => {
|
1444
|
-
onUpdate({ message: state.message });
|
1445
|
-
}
|
1446
|
-
});
|
1447
|
-
};
|
1448
|
-
await consumeStream({
|
1449
|
-
stream: processUIMessageStream({
|
1450
|
-
stream,
|
1451
|
-
onToolCall,
|
1452
|
-
messageMetadataSchema,
|
1453
|
-
runUpdateMessageJob
|
1454
|
-
}),
|
1455
|
-
onError: (error) => {
|
1456
|
-
throw error;
|
1457
|
-
}
|
1458
|
-
});
|
1459
|
-
onFinish == null ? void 0 : onFinish({ message: state.message });
|
1460
|
-
}
|
1461
|
-
async function callChatApi({
|
1462
|
-
api,
|
1463
|
-
body,
|
1464
|
-
streamProtocol = "ui-message",
|
1465
|
-
credentials,
|
1466
|
-
headers,
|
1467
|
-
abortController,
|
1468
|
-
onUpdate,
|
1469
|
-
onFinish,
|
1470
|
-
onToolCall,
|
1471
|
-
generateId: generateId3,
|
1472
|
-
fetch: fetch2 = getOriginalFetch(),
|
1473
|
-
lastMessage,
|
1474
|
-
requestType = "generate",
|
1475
|
-
messageMetadataSchema
|
1476
|
-
}) {
|
1477
|
-
const stream = streamProtocol === "text" ? await fetchTextStream({
|
1478
|
-
api,
|
1479
|
-
body,
|
1480
|
-
credentials,
|
1481
|
-
headers,
|
1482
|
-
abortController,
|
1483
|
-
fetch: fetch2,
|
1484
|
-
requestType
|
1485
|
-
}) : await fetchUIMessageStream({
|
1486
|
-
api,
|
1487
|
-
body,
|
1488
|
-
credentials,
|
1489
|
-
headers,
|
1490
|
-
abortController,
|
1491
|
-
fetch: fetch2,
|
1492
|
-
requestType
|
1493
|
-
});
|
1494
|
-
await consumeUIMessageStream({
|
1495
|
-
stream,
|
1496
|
-
onUpdate,
|
1497
|
-
onFinish,
|
1498
|
-
onToolCall,
|
1499
|
-
generateId: generateId3,
|
1500
|
-
lastMessage,
|
1501
|
-
messageMetadataSchema
|
1502
|
-
});
|
1503
|
-
}
|
1504
|
-
|
1505
|
-
// src/ui/call-completion-api.ts
|
1506
|
-
var import_provider_utils4 = require("@ai-sdk/provider-utils");
|
1507
|
-
|
1508
|
-
// src/ui/process-text-stream.ts
|
1509
|
-
async function processTextStream({
|
1510
|
-
stream,
|
1511
|
-
onTextPart
|
1512
|
-
}) {
|
1513
|
-
const reader = stream.pipeThrough(new TextDecoderStream()).getReader();
|
1514
|
-
while (true) {
|
1515
|
-
const { done, value } = await reader.read();
|
1516
|
-
if (done) {
|
1517
|
-
break;
|
1518
|
-
}
|
1519
|
-
await onTextPart(value);
|
1520
|
-
}
|
1521
|
-
}
|
1522
|
-
|
1523
|
-
// src/ui/call-completion-api.ts
|
1524
|
-
var getOriginalFetch2 = () => fetch;
|
1525
|
-
async function callCompletionApi({
|
1526
|
-
api,
|
1527
|
-
prompt,
|
1528
|
-
credentials,
|
1529
|
-
headers,
|
1530
|
-
body,
|
1531
|
-
streamProtocol = "data",
|
1532
|
-
setCompletion,
|
1533
|
-
setLoading,
|
1534
|
-
setError,
|
1535
|
-
setAbortController,
|
1536
|
-
onFinish,
|
1537
|
-
onError,
|
1538
|
-
fetch: fetch2 = getOriginalFetch2()
|
1539
|
-
}) {
|
1540
|
-
var _a17;
|
1541
|
-
try {
|
1542
|
-
setLoading(true);
|
1543
|
-
setError(void 0);
|
1544
|
-
const abortController = new AbortController();
|
1545
|
-
setAbortController(abortController);
|
1546
|
-
setCompletion("");
|
1547
|
-
const response = await fetch2(api, {
|
1548
|
-
method: "POST",
|
1549
|
-
body: JSON.stringify({
|
1550
|
-
prompt,
|
1551
|
-
...body
|
1552
|
-
}),
|
1553
|
-
credentials,
|
1554
|
-
headers: {
|
1555
|
-
"Content-Type": "application/json",
|
1556
|
-
...headers
|
1557
|
-
},
|
1558
|
-
signal: abortController.signal
|
1559
|
-
}).catch((err) => {
|
1560
|
-
throw err;
|
1561
|
-
});
|
1562
|
-
if (!response.ok) {
|
1563
|
-
throw new Error(
|
1564
|
-
(_a17 = await response.text()) != null ? _a17 : "Failed to fetch the chat response."
|
1565
|
-
);
|
1566
|
-
}
|
1567
|
-
if (!response.body) {
|
1568
|
-
throw new Error("The response body is empty.");
|
1569
|
-
}
|
1570
|
-
let result = "";
|
1571
|
-
switch (streamProtocol) {
|
1572
|
-
case "text": {
|
1573
|
-
await processTextStream({
|
1574
|
-
stream: response.body,
|
1575
|
-
onTextPart: (chunk) => {
|
1576
|
-
result += chunk;
|
1577
|
-
setCompletion(result);
|
1578
|
-
}
|
1579
|
-
});
|
1580
|
-
break;
|
1581
|
-
}
|
1582
|
-
case "data": {
|
1583
|
-
await consumeStream({
|
1584
|
-
stream: (0, import_provider_utils4.parseJsonEventStream)({
|
1585
|
-
stream: response.body,
|
1586
|
-
schema: uiMessageStreamPartSchema
|
1587
|
-
}).pipeThrough(
|
1588
|
-
new TransformStream({
|
1589
|
-
async transform(part) {
|
1590
|
-
if (!part.success) {
|
1591
|
-
throw part.error;
|
1592
|
-
}
|
1593
|
-
const streamPart = part.value;
|
1594
|
-
if (streamPart.type === "text") {
|
1595
|
-
result += streamPart.text;
|
1596
|
-
setCompletion(result);
|
1597
|
-
} else if (streamPart.type === "error") {
|
1598
|
-
throw new Error(streamPart.errorText);
|
1599
|
-
}
|
1600
|
-
}
|
1601
|
-
})
|
1602
|
-
),
|
1603
|
-
onError: (error) => {
|
1604
|
-
throw error;
|
1605
|
-
}
|
1606
|
-
});
|
1607
|
-
break;
|
1608
|
-
}
|
1609
|
-
default: {
|
1610
|
-
const exhaustiveCheck = streamProtocol;
|
1611
|
-
throw new Error(`Unknown stream protocol: ${exhaustiveCheck}`);
|
1612
|
-
}
|
1613
|
-
}
|
1614
|
-
if (onFinish) {
|
1615
|
-
onFinish(prompt, result);
|
1616
|
-
}
|
1617
|
-
setAbortController(null);
|
1618
|
-
return result;
|
1619
|
-
} catch (err) {
|
1620
|
-
if (err.name === "AbortError") {
|
1621
|
-
setAbortController(null);
|
1622
|
-
return null;
|
1623
|
-
}
|
1624
|
-
if (err instanceof Error) {
|
1625
|
-
if (onError) {
|
1626
|
-
onError(err);
|
1627
|
-
}
|
1628
|
-
}
|
1629
|
-
setError(err);
|
1630
|
-
} finally {
|
1631
|
-
setLoading(false);
|
1632
|
-
}
|
1633
|
-
}
|
1634
|
-
|
1635
|
-
// src/ui/chat-store.ts
|
1636
|
-
var import_provider_utils5 = require("@ai-sdk/provider-utils");
|
1637
|
-
|
1638
|
-
// src/ui/should-resubmit-messages.ts
|
1639
|
-
function shouldResubmitMessages({
|
1640
|
-
originalMaxToolInvocationStep,
|
1641
|
-
originalMessageCount,
|
1642
|
-
maxSteps,
|
1643
|
-
messages
|
1644
|
-
}) {
|
1645
|
-
var _a17;
|
1646
|
-
const lastMessage = messages[messages.length - 1];
|
1647
|
-
return (
|
1648
|
-
// check if the feature is enabled:
|
1649
|
-
maxSteps > 1 && // ensure there is a last message:
|
1650
|
-
lastMessage != null && // ensure we actually have new steps (to prevent infinite loops in case of errors):
|
1651
|
-
(messages.length > originalMessageCount || extractMaxToolInvocationStep(getToolInvocations(lastMessage)) !== originalMaxToolInvocationStep) && // check that next step is possible:
|
1652
|
-
isAssistantMessageWithCompletedToolCalls(lastMessage) && // limit the number of automatic steps:
|
1653
|
-
((_a17 = extractMaxToolInvocationStep(getToolInvocations(lastMessage))) != null ? _a17 : 0) < maxSteps
|
1654
|
-
);
|
1655
|
-
}
|
1656
|
-
function isAssistantMessageWithCompletedToolCalls(message) {
|
1657
|
-
if (message.role !== "assistant") {
|
1658
|
-
return false;
|
1659
|
-
}
|
1660
|
-
const lastStepStartIndex = message.parts.reduce((lastIndex, part, index) => {
|
1661
|
-
return part.type === "step-start" ? index : lastIndex;
|
1662
|
-
}, -1);
|
1663
|
-
const lastStepToolInvocations = message.parts.slice(lastStepStartIndex + 1).filter((part) => part.type === "tool-invocation");
|
1664
|
-
return lastStepToolInvocations.length > 0 && lastStepToolInvocations.every((part) => "result" in part.toolInvocation);
|
1665
|
-
}
|
1666
|
-
|
1667
|
-
// src/ui/update-tool-call-result.ts
|
1668
|
-
function updateToolCallResult({
|
1669
|
-
messages,
|
1670
|
-
toolCallId,
|
1671
|
-
toolResult: result
|
1672
|
-
}) {
|
1673
|
-
const lastMessage = messages[messages.length - 1];
|
1674
|
-
const invocationPart = lastMessage.parts.find(
|
1675
|
-
(part) => part.type === "tool-invocation" && part.toolInvocation.toolCallId === toolCallId
|
1676
|
-
);
|
1677
|
-
if (invocationPart == null) {
|
1678
|
-
return;
|
1679
|
-
}
|
1680
|
-
invocationPart.toolInvocation = {
|
1681
|
-
...invocationPart.toolInvocation,
|
1682
|
-
state: "result",
|
1683
|
-
result
|
1684
|
-
};
|
1685
|
-
}
|
1686
|
-
|
1687
|
-
// src/ui/chat-store.ts
|
1688
|
-
var ChatStore = class {
|
1689
|
-
constructor({
|
1690
|
-
chats = {},
|
1691
|
-
generateId: generateId3,
|
1692
|
-
transport,
|
1693
|
-
maxSteps = 1,
|
1694
|
-
messageMetadataSchema,
|
1695
|
-
dataPartSchemas,
|
1696
|
-
createChat
|
1697
|
-
}) {
|
1698
|
-
this.createChat = createChat;
|
1699
|
-
this.chats = new Map(
|
1700
|
-
Object.entries(chats).map(([id, chat]) => [
|
1701
|
-
id,
|
1702
|
-
this.createChat({ messages: chat.messages })
|
1703
|
-
])
|
1704
|
-
);
|
1705
|
-
this.maxSteps = maxSteps;
|
1706
|
-
this.transport = transport;
|
1707
|
-
this.subscribers = /* @__PURE__ */ new Set();
|
1708
|
-
this.generateId = generateId3 != null ? generateId3 : import_provider_utils5.generateId;
|
1709
|
-
this.messageMetadataSchema = messageMetadataSchema;
|
1710
|
-
this.dataPartSchemas = dataPartSchemas;
|
1711
|
-
}
|
1712
|
-
hasChat(id) {
|
1713
|
-
return this.chats.has(id);
|
1467
|
+
hasChat(id) {
|
1468
|
+
return this.chats.has(id);
|
1714
1469
|
}
|
1715
1470
|
addChat(id, messages) {
|
1716
1471
|
this.chats.set(id, this.createChat({ messages }));
|
@@ -1722,14 +1477,14 @@ var ChatStore = class {
|
|
1722
1477
|
return this.chats.size;
|
1723
1478
|
}
|
1724
1479
|
getStatus(id) {
|
1725
|
-
return this.
|
1480
|
+
return this.getChat(id).status;
|
1726
1481
|
}
|
1727
1482
|
setStatus({
|
1728
1483
|
id,
|
1729
1484
|
status,
|
1730
1485
|
error
|
1731
1486
|
}) {
|
1732
|
-
const state = this.
|
1487
|
+
const state = this.getChat(id);
|
1733
1488
|
if (state.status === status)
|
1734
1489
|
return;
|
1735
1490
|
state.setStatus(status);
|
@@ -1737,13 +1492,13 @@ var ChatStore = class {
|
|
1737
1492
|
this.emit({ type: "chat-status-changed", chatId: id, error });
|
1738
1493
|
}
|
1739
1494
|
getError(id) {
|
1740
|
-
return this.
|
1495
|
+
return this.getChat(id).error;
|
1741
1496
|
}
|
1742
1497
|
getMessages(id) {
|
1743
|
-
return this.
|
1498
|
+
return this.getChat(id).messages;
|
1744
1499
|
}
|
1745
1500
|
getLastMessage(id) {
|
1746
|
-
const chat = this.
|
1501
|
+
const chat = this.getChat(id);
|
1747
1502
|
return chat.messages[chat.messages.length - 1];
|
1748
1503
|
}
|
1749
1504
|
subscribe(subscriber) {
|
@@ -1754,11 +1509,11 @@ var ChatStore = class {
|
|
1754
1509
|
id,
|
1755
1510
|
messages
|
1756
1511
|
}) {
|
1757
|
-
this.
|
1512
|
+
this.getChat(id).setMessages(messages);
|
1758
1513
|
this.emit({ type: "chat-messages-changed", chatId: id });
|
1759
1514
|
}
|
1760
1515
|
removeAssistantResponse(id) {
|
1761
|
-
const chat = this.
|
1516
|
+
const chat = this.getChat(id);
|
1762
1517
|
const lastMessage = chat.messages[chat.messages.length - 1];
|
1763
1518
|
if (lastMessage == null) {
|
1764
1519
|
throw new Error("Cannot remove assistant response from empty chat");
|
@@ -1779,8 +1534,8 @@ var ChatStore = class {
|
|
1779
1534
|
onFinish
|
1780
1535
|
}) {
|
1781
1536
|
var _a17;
|
1782
|
-
const
|
1783
|
-
|
1537
|
+
const chat = this.getChat(chatId);
|
1538
|
+
chat.pushMessage({ ...message, id: (_a17 = message.id) != null ? _a17 : this.generateId() });
|
1784
1539
|
this.emit({
|
1785
1540
|
type: "chat-messages-changed",
|
1786
1541
|
chatId
|
@@ -1803,7 +1558,7 @@ var ChatStore = class {
|
|
1803
1558
|
onToolCall,
|
1804
1559
|
onFinish
|
1805
1560
|
}) {
|
1806
|
-
const chat = this.
|
1561
|
+
const chat = this.getChat(chatId);
|
1807
1562
|
if (chat.messages[chat.messages.length - 1].role === "assistant") {
|
1808
1563
|
chat.popMessage();
|
1809
1564
|
this.emit({
|
@@ -1847,7 +1602,7 @@ var ChatStore = class {
|
|
1847
1602
|
toolCallId,
|
1848
1603
|
result
|
1849
1604
|
}) {
|
1850
|
-
const chat = this.
|
1605
|
+
const chat = this.getChat(chatId);
|
1851
1606
|
chat.jobExecutor.run(async () => {
|
1852
1607
|
updateToolCallResult({
|
1853
1608
|
messages: chat.messages,
|
@@ -1872,7 +1627,7 @@ var ChatStore = class {
|
|
1872
1627
|
}
|
1873
1628
|
async stopStream({ chatId }) {
|
1874
1629
|
var _a17;
|
1875
|
-
const chat = this.
|
1630
|
+
const chat = this.getChat(chatId);
|
1876
1631
|
if (chat.status !== "streaming" && chat.status !== "submitted")
|
1877
1632
|
return;
|
1878
1633
|
if ((_a17 = chat.activeResponse) == null ? void 0 : _a17.abortController) {
|
@@ -1885,7 +1640,7 @@ var ChatStore = class {
|
|
1885
1640
|
subscriber.onChatChanged(event);
|
1886
1641
|
}
|
1887
1642
|
}
|
1888
|
-
|
1643
|
+
getChat(id) {
|
1889
1644
|
if (!this.hasChat(id)) {
|
1890
1645
|
this.addChat(id, []);
|
1891
1646
|
}
|
@@ -1900,17 +1655,18 @@ var ChatStore = class {
|
|
1900
1655
|
onToolCall,
|
1901
1656
|
onFinish
|
1902
1657
|
}) {
|
1903
|
-
const chat = this.
|
1658
|
+
const chat = this.getChat(chatId);
|
1904
1659
|
this.setStatus({ id: chatId, status: "submitted", error: void 0 });
|
1905
1660
|
const messageCount = chat.messages.length;
|
1906
|
-
const
|
1907
|
-
|
1908
|
-
|
1661
|
+
const lastMessage = chat.messages[chat.messages.length - 1];
|
1662
|
+
const maxStep = lastMessage.parts.filter(
|
1663
|
+
(part) => part.type === "step-start"
|
1664
|
+
).length;
|
1909
1665
|
try {
|
1910
|
-
const
|
1666
|
+
const lastMessage2 = chat.messages[chat.messages.length - 1];
|
1911
1667
|
const activeResponse = {
|
1912
1668
|
state: createStreamingUIMessageState({
|
1913
|
-
lastMessage: chat.snapshot ? chat.snapshot(
|
1669
|
+
lastMessage: chat.snapshot ? chat.snapshot(lastMessage2) : lastMessage2,
|
1914
1670
|
newMessageId: this.generateId()
|
1915
1671
|
}),
|
1916
1672
|
abortController: new AbortController()
|
@@ -1992,106 +1748,24 @@ var ChatStore = class {
|
|
1992
1748
|
}
|
1993
1749
|
}
|
1994
1750
|
};
|
1995
|
-
|
1996
|
-
|
1997
|
-
|
1998
|
-
|
1999
|
-
|
2000
|
-
|
2001
|
-
|
2002
|
-
|
2003
|
-
|
2004
|
-
|
2005
|
-
|
2006
|
-
this.api = api;
|
2007
|
-
this.credentials = credentials;
|
2008
|
-
this.headers = headers;
|
2009
|
-
this.body = body;
|
2010
|
-
this.fetch = fetch2;
|
2011
|
-
this.prepareRequestBody = prepareRequestBody;
|
2012
|
-
}
|
2013
|
-
submitMessages({
|
2014
|
-
chatId,
|
2015
|
-
messages,
|
2016
|
-
abortController,
|
2017
|
-
body,
|
2018
|
-
headers,
|
2019
|
-
requestType
|
2020
|
-
}) {
|
2021
|
-
var _a17, _b;
|
2022
|
-
return fetchUIMessageStream({
|
2023
|
-
api: this.api,
|
2024
|
-
headers: {
|
2025
|
-
...this.headers,
|
2026
|
-
...headers
|
2027
|
-
},
|
2028
|
-
body: (_b = (_a17 = this.prepareRequestBody) == null ? void 0 : _a17.call(this, {
|
2029
|
-
chatId,
|
2030
|
-
messages,
|
2031
|
-
...this.body,
|
2032
|
-
...body
|
2033
|
-
})) != null ? _b : {
|
2034
|
-
chatId,
|
2035
|
-
messages,
|
2036
|
-
...this.body,
|
2037
|
-
...body
|
2038
|
-
},
|
2039
|
-
credentials: this.credentials,
|
2040
|
-
abortController: () => abortController,
|
2041
|
-
fetch: this.fetch,
|
2042
|
-
requestType
|
2043
|
-
});
|
2044
|
-
}
|
2045
|
-
};
|
2046
|
-
var TextStreamChatTransport = class {
|
2047
|
-
constructor({
|
2048
|
-
api,
|
2049
|
-
credentials,
|
2050
|
-
headers,
|
2051
|
-
body,
|
2052
|
-
fetch: fetch2,
|
2053
|
-
prepareRequestBody
|
2054
|
-
}) {
|
2055
|
-
this.api = api;
|
2056
|
-
this.credentials = credentials;
|
2057
|
-
this.headers = headers;
|
2058
|
-
this.body = body;
|
2059
|
-
this.fetch = fetch2;
|
2060
|
-
this.prepareRequestBody = prepareRequestBody;
|
2061
|
-
}
|
2062
|
-
submitMessages({
|
2063
|
-
chatId,
|
2064
|
-
messages,
|
2065
|
-
abortController,
|
2066
|
-
body,
|
2067
|
-
headers,
|
2068
|
-
requestType
|
2069
|
-
}) {
|
2070
|
-
var _a17, _b;
|
2071
|
-
return fetchTextStream({
|
2072
|
-
api: this.api,
|
2073
|
-
headers: {
|
2074
|
-
...this.headers,
|
2075
|
-
...headers
|
2076
|
-
},
|
2077
|
-
body: (_b = (_a17 = this.prepareRequestBody) == null ? void 0 : _a17.call(this, {
|
2078
|
-
chatId,
|
2079
|
-
messages,
|
2080
|
-
...this.body,
|
2081
|
-
...body
|
2082
|
-
})) != null ? _b : {
|
2083
|
-
chatId,
|
2084
|
-
messages,
|
2085
|
-
...this.body,
|
2086
|
-
...body
|
2087
|
-
},
|
2088
|
-
credentials: this.credentials,
|
2089
|
-
abortController: () => abortController,
|
2090
|
-
fetch: this.fetch,
|
2091
|
-
requestType
|
2092
|
-
});
|
1751
|
+
function updateToolCallResult({
|
1752
|
+
messages,
|
1753
|
+
toolCallId,
|
1754
|
+
toolResult: result
|
1755
|
+
}) {
|
1756
|
+
const lastMessage = messages[messages.length - 1];
|
1757
|
+
const invocationPart = lastMessage.parts.find(
|
1758
|
+
(part) => part.type === "tool-invocation" && part.toolInvocation.toolCallId === toolCallId
|
1759
|
+
);
|
1760
|
+
if (invocationPart == null) {
|
1761
|
+
return;
|
2093
1762
|
}
|
2094
|
-
|
1763
|
+
invocationPart.toolInvocation = {
|
1764
|
+
...invocationPart.toolInvocation,
|
1765
|
+
state: "result",
|
1766
|
+
result
|
1767
|
+
};
|
1768
|
+
}
|
2095
1769
|
|
2096
1770
|
// src/ui/convert-file-list-to-file-ui-parts.ts
|
2097
1771
|
async function convertFileListToFileUIParts(files) {
|
@@ -2125,7 +1799,7 @@ async function convertFileListToFileUIParts(files) {
|
|
2125
1799
|
|
2126
1800
|
// src/ui/convert-to-model-messages.ts
|
2127
1801
|
function convertToModelMessages(messages, options) {
|
2128
|
-
var _a17
|
1802
|
+
var _a17;
|
2129
1803
|
const tools = (_a17 = options == null ? void 0 : options.tools) != null ? _a17 : {};
|
2130
1804
|
const modelMessages = [];
|
2131
1805
|
for (const message of messages) {
|
@@ -2156,6 +1830,9 @@ function convertToModelMessages(messages, options) {
|
|
2156
1830
|
case "assistant": {
|
2157
1831
|
if (message.parts != null) {
|
2158
1832
|
let processBlock2 = function() {
|
1833
|
+
if (block.length === 0) {
|
1834
|
+
return;
|
1835
|
+
}
|
2159
1836
|
const content = [];
|
2160
1837
|
for (const part of block) {
|
2161
1838
|
switch (part.type) {
|
@@ -2230,33 +1907,20 @@ function convertToModelMessages(messages, options) {
|
|
2230
1907
|
});
|
2231
1908
|
}
|
2232
1909
|
block = [];
|
2233
|
-
blockHasToolInvocations = false;
|
2234
|
-
currentStep++;
|
2235
1910
|
};
|
2236
1911
|
var processBlock = processBlock2;
|
2237
|
-
let currentStep = 0;
|
2238
|
-
let blockHasToolInvocations = false;
|
2239
1912
|
let block = [];
|
2240
1913
|
for (const part of message.parts) {
|
2241
1914
|
switch (part.type) {
|
2242
|
-
case "text":
|
2243
|
-
|
2244
|
-
processBlock2();
|
2245
|
-
}
|
2246
|
-
block.push(part);
|
2247
|
-
break;
|
2248
|
-
}
|
1915
|
+
case "text":
|
1916
|
+
case "reasoning":
|
2249
1917
|
case "file":
|
2250
|
-
case "
|
1918
|
+
case "tool-invocation": {
|
2251
1919
|
block.push(part);
|
2252
1920
|
break;
|
2253
1921
|
}
|
2254
|
-
case "
|
2255
|
-
|
2256
|
-
processBlock2();
|
2257
|
-
}
|
2258
|
-
block.push(part);
|
2259
|
-
blockHasToolInvocations = true;
|
1922
|
+
case "step-start": {
|
1923
|
+
processBlock2();
|
2260
1924
|
break;
|
2261
1925
|
}
|
2262
1926
|
}
|
@@ -2275,12 +1939,117 @@ function convertToModelMessages(messages, options) {
|
|
2275
1939
|
}
|
2276
1940
|
}
|
2277
1941
|
}
|
2278
|
-
return modelMessages;
|
2279
|
-
}
|
2280
|
-
var convertToCoreMessages = convertToModelMessages;
|
1942
|
+
return modelMessages;
|
1943
|
+
}
|
1944
|
+
var convertToCoreMessages = convertToModelMessages;
|
1945
|
+
|
1946
|
+
// src/ui/default-chat-store-options.ts
|
1947
|
+
var import_provider_utils6 = require("@ai-sdk/provider-utils");
|
1948
|
+
|
1949
|
+
// src/ui/default-chat-transport.ts
|
1950
|
+
var import_provider_utils5 = require("@ai-sdk/provider-utils");
|
1951
|
+
var getOriginalFetch2 = () => fetch;
|
1952
|
+
async function fetchUIMessageStream({
|
1953
|
+
api,
|
1954
|
+
body,
|
1955
|
+
credentials,
|
1956
|
+
headers,
|
1957
|
+
abortController,
|
1958
|
+
fetch: fetch2 = getOriginalFetch2(),
|
1959
|
+
requestType = "generate"
|
1960
|
+
}) {
|
1961
|
+
var _a17, _b, _c;
|
1962
|
+
const response = requestType === "resume" ? await fetch2(`${api}?chatId=${body.chatId}`, {
|
1963
|
+
method: "GET",
|
1964
|
+
headers: {
|
1965
|
+
"Content-Type": "application/json",
|
1966
|
+
...headers
|
1967
|
+
},
|
1968
|
+
signal: (_a17 = abortController == null ? void 0 : abortController()) == null ? void 0 : _a17.signal,
|
1969
|
+
credentials
|
1970
|
+
}) : await fetch2(api, {
|
1971
|
+
method: "POST",
|
1972
|
+
body: JSON.stringify(body),
|
1973
|
+
headers: {
|
1974
|
+
"Content-Type": "application/json",
|
1975
|
+
...headers
|
1976
|
+
},
|
1977
|
+
signal: (_b = abortController == null ? void 0 : abortController()) == null ? void 0 : _b.signal,
|
1978
|
+
credentials
|
1979
|
+
});
|
1980
|
+
if (!response.ok) {
|
1981
|
+
throw new Error(
|
1982
|
+
(_c = await response.text()) != null ? _c : "Failed to fetch the chat response."
|
1983
|
+
);
|
1984
|
+
}
|
1985
|
+
if (!response.body) {
|
1986
|
+
throw new Error("The response body is empty.");
|
1987
|
+
}
|
1988
|
+
return (0, import_provider_utils5.parseJsonEventStream)({
|
1989
|
+
stream: response.body,
|
1990
|
+
schema: uiMessageStreamPartSchema
|
1991
|
+
}).pipeThrough(
|
1992
|
+
new TransformStream({
|
1993
|
+
async transform(part, controller) {
|
1994
|
+
if (!part.success) {
|
1995
|
+
throw part.error;
|
1996
|
+
}
|
1997
|
+
controller.enqueue(part.value);
|
1998
|
+
}
|
1999
|
+
})
|
2000
|
+
);
|
2001
|
+
}
|
2002
|
+
var DefaultChatTransport = class {
|
2003
|
+
constructor({
|
2004
|
+
api,
|
2005
|
+
credentials,
|
2006
|
+
headers,
|
2007
|
+
body,
|
2008
|
+
fetch: fetch2,
|
2009
|
+
prepareRequestBody
|
2010
|
+
}) {
|
2011
|
+
this.api = api;
|
2012
|
+
this.credentials = credentials;
|
2013
|
+
this.headers = headers;
|
2014
|
+
this.body = body;
|
2015
|
+
this.fetch = fetch2;
|
2016
|
+
this.prepareRequestBody = prepareRequestBody;
|
2017
|
+
}
|
2018
|
+
submitMessages({
|
2019
|
+
chatId,
|
2020
|
+
messages,
|
2021
|
+
abortController,
|
2022
|
+
body,
|
2023
|
+
headers,
|
2024
|
+
requestType
|
2025
|
+
}) {
|
2026
|
+
var _a17, _b;
|
2027
|
+
return fetchUIMessageStream({
|
2028
|
+
api: this.api,
|
2029
|
+
headers: {
|
2030
|
+
...this.headers,
|
2031
|
+
...headers
|
2032
|
+
},
|
2033
|
+
body: (_b = (_a17 = this.prepareRequestBody) == null ? void 0 : _a17.call(this, {
|
2034
|
+
chatId,
|
2035
|
+
messages,
|
2036
|
+
...this.body,
|
2037
|
+
...body
|
2038
|
+
})) != null ? _b : {
|
2039
|
+
chatId,
|
2040
|
+
messages,
|
2041
|
+
...this.body,
|
2042
|
+
...body
|
2043
|
+
},
|
2044
|
+
credentials: this.credentials,
|
2045
|
+
abortController: () => abortController,
|
2046
|
+
fetch: this.fetch,
|
2047
|
+
requestType
|
2048
|
+
});
|
2049
|
+
}
|
2050
|
+
};
|
2281
2051
|
|
2282
2052
|
// src/ui/default-chat-store-options.ts
|
2283
|
-
var import_provider_utils6 = require("@ai-sdk/provider-utils");
|
2284
2053
|
function defaultChatStoreOptions({
|
2285
2054
|
api = "/api/chat",
|
2286
2055
|
fetch: fetch2,
|
@@ -2311,6 +2080,119 @@ function defaultChatStoreOptions({
|
|
2311
2080
|
});
|
2312
2081
|
}
|
2313
2082
|
|
2083
|
+
// src/ui/transform-text-to-ui-message-stream.ts
|
2084
|
+
function transformTextToUiMessageStream({
|
2085
|
+
stream
|
2086
|
+
}) {
|
2087
|
+
return stream.pipeThrough(
|
2088
|
+
new TransformStream({
|
2089
|
+
start(controller) {
|
2090
|
+
controller.enqueue({ type: "start" });
|
2091
|
+
controller.enqueue({ type: "start-step" });
|
2092
|
+
},
|
2093
|
+
async transform(part, controller) {
|
2094
|
+
controller.enqueue({ type: "text", text: part });
|
2095
|
+
},
|
2096
|
+
async flush(controller) {
|
2097
|
+
controller.enqueue({ type: "finish-step" });
|
2098
|
+
controller.enqueue({ type: "finish" });
|
2099
|
+
}
|
2100
|
+
})
|
2101
|
+
);
|
2102
|
+
}
|
2103
|
+
|
2104
|
+
// src/ui/text-stream-chat-transport.ts
|
2105
|
+
var getOriginalFetch3 = () => fetch;
|
2106
|
+
async function fetchTextStream({
|
2107
|
+
api,
|
2108
|
+
body,
|
2109
|
+
credentials,
|
2110
|
+
headers,
|
2111
|
+
abortController,
|
2112
|
+
fetch: fetch2 = getOriginalFetch3(),
|
2113
|
+
requestType = "generate"
|
2114
|
+
}) {
|
2115
|
+
var _a17, _b, _c;
|
2116
|
+
const response = requestType === "resume" ? await fetch2(`${api}?chatId=${body.chatId}`, {
|
2117
|
+
method: "GET",
|
2118
|
+
headers: {
|
2119
|
+
"Content-Type": "application/json",
|
2120
|
+
...headers
|
2121
|
+
},
|
2122
|
+
signal: (_a17 = abortController == null ? void 0 : abortController()) == null ? void 0 : _a17.signal,
|
2123
|
+
credentials
|
2124
|
+
}) : await fetch2(api, {
|
2125
|
+
method: "POST",
|
2126
|
+
body: JSON.stringify(body),
|
2127
|
+
headers: {
|
2128
|
+
"Content-Type": "application/json",
|
2129
|
+
...headers
|
2130
|
+
},
|
2131
|
+
signal: (_b = abortController == null ? void 0 : abortController()) == null ? void 0 : _b.signal,
|
2132
|
+
credentials
|
2133
|
+
});
|
2134
|
+
if (!response.ok) {
|
2135
|
+
throw new Error(
|
2136
|
+
(_c = await response.text()) != null ? _c : "Failed to fetch the chat response."
|
2137
|
+
);
|
2138
|
+
}
|
2139
|
+
if (!response.body) {
|
2140
|
+
throw new Error("The response body is empty.");
|
2141
|
+
}
|
2142
|
+
return transformTextToUiMessageStream({
|
2143
|
+
stream: response.body.pipeThrough(new TextDecoderStream())
|
2144
|
+
});
|
2145
|
+
}
|
2146
|
+
var TextStreamChatTransport = class {
|
2147
|
+
constructor({
|
2148
|
+
api,
|
2149
|
+
credentials,
|
2150
|
+
headers,
|
2151
|
+
body,
|
2152
|
+
fetch: fetch2,
|
2153
|
+
prepareRequestBody
|
2154
|
+
}) {
|
2155
|
+
this.api = api;
|
2156
|
+
this.credentials = credentials;
|
2157
|
+
this.headers = headers;
|
2158
|
+
this.body = body;
|
2159
|
+
this.fetch = fetch2;
|
2160
|
+
this.prepareRequestBody = prepareRequestBody;
|
2161
|
+
}
|
2162
|
+
submitMessages({
|
2163
|
+
chatId,
|
2164
|
+
messages,
|
2165
|
+
abortController,
|
2166
|
+
body,
|
2167
|
+
headers,
|
2168
|
+
requestType
|
2169
|
+
}) {
|
2170
|
+
var _a17, _b;
|
2171
|
+
return fetchTextStream({
|
2172
|
+
api: this.api,
|
2173
|
+
headers: {
|
2174
|
+
...this.headers,
|
2175
|
+
...headers
|
2176
|
+
},
|
2177
|
+
body: (_b = (_a17 = this.prepareRequestBody) == null ? void 0 : _a17.call(this, {
|
2178
|
+
chatId,
|
2179
|
+
messages,
|
2180
|
+
...this.body,
|
2181
|
+
...body
|
2182
|
+
})) != null ? _b : {
|
2183
|
+
chatId,
|
2184
|
+
messages,
|
2185
|
+
...this.body,
|
2186
|
+
...body
|
2187
|
+
},
|
2188
|
+
credentials: this.credentials,
|
2189
|
+
abortController: () => abortController,
|
2190
|
+
fetch: this.fetch,
|
2191
|
+
requestType
|
2192
|
+
});
|
2193
|
+
}
|
2194
|
+
};
|
2195
|
+
|
2314
2196
|
// src/ui-message-stream/handle-ui-message-stream-finish.ts
|
2315
2197
|
function handleUIMessageStreamFinish({
|
2316
2198
|
newMessageId,
|
@@ -3487,7 +3369,7 @@ async function invokeModelMaxImagesPerCall(model) {
|
|
3487
3369
|
}
|
3488
3370
|
|
3489
3371
|
// core/generate-object/generate-object.ts
|
3490
|
-
var
|
3372
|
+
var import_provider22 = require("@ai-sdk/provider");
|
3491
3373
|
var import_provider_utils15 = require("@ai-sdk/provider-utils");
|
3492
3374
|
|
3493
3375
|
// core/generate-text/extract-content-text.ts
|
@@ -3926,6 +3808,19 @@ function prepareCallSettings({
|
|
3926
3808
|
};
|
3927
3809
|
}
|
3928
3810
|
|
3811
|
+
// core/prompt/resolve-language-model.ts
|
3812
|
+
var import_gateway = require("@ai-sdk/gateway");
|
3813
|
+
var GLOBAL_DEFAULT_PROVIDER = Symbol(
|
3814
|
+
"vercel.ai.global.defaultProvider"
|
3815
|
+
);
|
3816
|
+
function resolveLanguageModel(model) {
|
3817
|
+
if (typeof model !== "string") {
|
3818
|
+
return model;
|
3819
|
+
}
|
3820
|
+
const globalProvider = globalThis[GLOBAL_DEFAULT_PROVIDER];
|
3821
|
+
return (globalProvider != null ? globalProvider : import_gateway.gateway).languageModel(model);
|
3822
|
+
}
|
3823
|
+
|
3929
3824
|
// core/prompt/standardize-prompt.ts
|
3930
3825
|
var import_provider19 = require("@ai-sdk/provider");
|
3931
3826
|
var import_provider_utils13 = require("@ai-sdk/provider-utils");
|
@@ -4117,6 +4012,20 @@ async function standardizePrompt(prompt) {
|
|
4117
4012
|
};
|
4118
4013
|
}
|
4119
4014
|
|
4015
|
+
// core/prompt/wrap-gateway-error.ts
|
4016
|
+
var import_gateway2 = require("@ai-sdk/gateway");
|
4017
|
+
var import_provider20 = require("@ai-sdk/provider");
|
4018
|
+
function wrapGatewayError(error) {
|
4019
|
+
if (import_gateway2.GatewayAuthenticationError.isInstance(error) || import_gateway2.GatewayModelNotFoundError.isInstance(error)) {
|
4020
|
+
return new import_provider20.AISDKError({
|
4021
|
+
name: "GatewayError",
|
4022
|
+
message: "Vercel AI Gateway access failed. If you want to use AI SDK providers directly, use the providers, e.g. @ai-sdk/openai, or register a different global default provider.",
|
4023
|
+
cause: error
|
4024
|
+
});
|
4025
|
+
}
|
4026
|
+
return error;
|
4027
|
+
}
|
4028
|
+
|
4120
4029
|
// core/telemetry/stringify-for-telemetry.ts
|
4121
4030
|
function stringifyForTelemetry(prompt) {
|
4122
4031
|
return JSON.stringify(
|
@@ -4133,7 +4042,7 @@ function stringifyForTelemetry(prompt) {
|
|
4133
4042
|
}
|
4134
4043
|
|
4135
4044
|
// core/generate-object/output-strategy.ts
|
4136
|
-
var
|
4045
|
+
var import_provider21 = require("@ai-sdk/provider");
|
4137
4046
|
var import_provider_utils14 = require("@ai-sdk/provider-utils");
|
4138
4047
|
|
4139
4048
|
// src/util/async-iterable-stream.ts
|
@@ -4171,7 +4080,7 @@ var noSchemaOutputStrategy = {
|
|
4171
4080
|
} : { success: true, value };
|
4172
4081
|
},
|
4173
4082
|
createElementStream() {
|
4174
|
-
throw new
|
4083
|
+
throw new import_provider21.UnsupportedFunctionalityError({
|
4175
4084
|
functionality: "element streams in no-schema mode"
|
4176
4085
|
});
|
4177
4086
|
}
|
@@ -4193,7 +4102,7 @@ var objectOutputStrategy = (schema) => ({
|
|
4193
4102
|
return (0, import_provider_utils14.safeValidateTypes)({ value, schema });
|
4194
4103
|
},
|
4195
4104
|
createElementStream() {
|
4196
|
-
throw new
|
4105
|
+
throw new import_provider21.UnsupportedFunctionalityError({
|
4197
4106
|
functionality: "element streams in object mode"
|
4198
4107
|
});
|
4199
4108
|
}
|
@@ -4221,10 +4130,10 @@ var arrayOutputStrategy = (schema) => {
|
|
4221
4130
|
isFinalDelta
|
4222
4131
|
}) {
|
4223
4132
|
var _a17;
|
4224
|
-
if (!(0,
|
4133
|
+
if (!(0, import_provider21.isJSONObject)(value) || !(0, import_provider21.isJSONArray)(value.elements)) {
|
4225
4134
|
return {
|
4226
4135
|
success: false,
|
4227
|
-
error: new
|
4136
|
+
error: new import_provider21.TypeValidationError({
|
4228
4137
|
value,
|
4229
4138
|
cause: "value must be an object that contains an array of elements"
|
4230
4139
|
})
|
@@ -4264,10 +4173,10 @@ var arrayOutputStrategy = (schema) => {
|
|
4264
4173
|
};
|
4265
4174
|
},
|
4266
4175
|
async validateFinalResult(value) {
|
4267
|
-
if (!(0,
|
4176
|
+
if (!(0, import_provider21.isJSONObject)(value) || !(0, import_provider21.isJSONArray)(value.elements)) {
|
4268
4177
|
return {
|
4269
4178
|
success: false,
|
4270
|
-
error: new
|
4179
|
+
error: new import_provider21.TypeValidationError({
|
4271
4180
|
value,
|
4272
4181
|
cause: "value must be an object that contains an array of elements"
|
4273
4182
|
})
|
@@ -4330,10 +4239,10 @@ var enumOutputStrategy = (enumValues) => {
|
|
4330
4239
|
additionalProperties: false
|
4331
4240
|
},
|
4332
4241
|
async validateFinalResult(value) {
|
4333
|
-
if (!(0,
|
4242
|
+
if (!(0, import_provider21.isJSONObject)(value) || typeof value.result !== "string") {
|
4334
4243
|
return {
|
4335
4244
|
success: false,
|
4336
|
-
error: new
|
4245
|
+
error: new import_provider21.TypeValidationError({
|
4337
4246
|
value,
|
4338
4247
|
cause: 'value must be an object that contains a string in the "result" property.'
|
4339
4248
|
})
|
@@ -4342,17 +4251,17 @@ var enumOutputStrategy = (enumValues) => {
|
|
4342
4251
|
const result = value.result;
|
4343
4252
|
return enumValues.includes(result) ? { success: true, value: result } : {
|
4344
4253
|
success: false,
|
4345
|
-
error: new
|
4254
|
+
error: new import_provider21.TypeValidationError({
|
4346
4255
|
value,
|
4347
4256
|
cause: "value must be a string in the enum"
|
4348
4257
|
})
|
4349
4258
|
};
|
4350
4259
|
},
|
4351
4260
|
async validatePartialResult({ value, textDelta }) {
|
4352
|
-
if (!(0,
|
4261
|
+
if (!(0, import_provider21.isJSONObject)(value) || typeof value.result !== "string") {
|
4353
4262
|
return {
|
4354
4263
|
success: false,
|
4355
|
-
error: new
|
4264
|
+
error: new import_provider21.TypeValidationError({
|
4356
4265
|
value,
|
4357
4266
|
cause: 'value must be an object that contains a string in the "result" property.'
|
4358
4267
|
})
|
@@ -4365,7 +4274,7 @@ var enumOutputStrategy = (enumValues) => {
|
|
4365
4274
|
if (value.result.length === 0 || possibleEnumValues.length === 0) {
|
4366
4275
|
return {
|
4367
4276
|
success: false,
|
4368
|
-
error: new
|
4277
|
+
error: new import_provider21.TypeValidationError({
|
4369
4278
|
value,
|
4370
4279
|
cause: "value must be a string in the enum"
|
4371
4280
|
})
|
@@ -4380,7 +4289,7 @@ var enumOutputStrategy = (enumValues) => {
|
|
4380
4289
|
};
|
4381
4290
|
},
|
4382
4291
|
createElementStream() {
|
4383
|
-
throw new
|
4292
|
+
throw new import_provider21.UnsupportedFunctionalityError({
|
4384
4293
|
functionality: "element streams in enum mode"
|
4385
4294
|
});
|
4386
4295
|
}
|
@@ -4525,12 +4434,6 @@ function validateObjectGenerationInput({
|
|
4525
4434
|
}
|
4526
4435
|
}
|
4527
4436
|
|
4528
|
-
// core/prompt/resolve-language-model.ts
|
4529
|
-
var import_gateway = require("@ai-sdk/gateway");
|
4530
|
-
function resolveLanguageModel(model) {
|
4531
|
-
return typeof model === "string" ? import_gateway.gateway.languageModel(model) : model;
|
4532
|
-
}
|
4533
|
-
|
4534
4437
|
// core/generate-object/generate-object.ts
|
4535
4438
|
var originalGenerateId = (0, import_provider_utils15.createIdGenerator)({ prefix: "aiobj", size: 24 });
|
4536
4439
|
async function generateObject(options) {
|
@@ -4580,208 +4483,212 @@ async function generateObject(options) {
|
|
4580
4483
|
settings: { ...callSettings, maxRetries }
|
4581
4484
|
});
|
4582
4485
|
const tracer = getTracer(telemetry);
|
4583
|
-
|
4584
|
-
|
4585
|
-
|
4586
|
-
|
4587
|
-
|
4588
|
-
|
4589
|
-
|
4590
|
-
|
4591
|
-
|
4592
|
-
...baseTelemetryAttributes,
|
4593
|
-
// specific settings that only make sense on the outer level:
|
4594
|
-
"ai.prompt": {
|
4595
|
-
input: () => JSON.stringify({ system, prompt, messages })
|
4596
|
-
},
|
4597
|
-
"ai.schema": outputStrategy.jsonSchema != null ? { input: () => JSON.stringify(outputStrategy.jsonSchema) } : void 0,
|
4598
|
-
"ai.schema.name": schemaName,
|
4599
|
-
"ai.schema.description": schemaDescription,
|
4600
|
-
"ai.settings.output": outputStrategy.type
|
4601
|
-
}
|
4602
|
-
}),
|
4603
|
-
tracer,
|
4604
|
-
fn: async (span) => {
|
4605
|
-
var _a17;
|
4606
|
-
let result;
|
4607
|
-
let finishReason;
|
4608
|
-
let usage;
|
4609
|
-
let warnings;
|
4610
|
-
let response;
|
4611
|
-
let request;
|
4612
|
-
let resultProviderMetadata;
|
4613
|
-
const standardizedPrompt = await standardizePrompt({
|
4614
|
-
system,
|
4615
|
-
prompt,
|
4616
|
-
messages
|
4617
|
-
});
|
4618
|
-
const promptMessages = await convertToLanguageModelPrompt({
|
4619
|
-
prompt: standardizedPrompt,
|
4620
|
-
supportedUrls: await model.supportedUrls
|
4621
|
-
});
|
4622
|
-
const generateResult = await retry(
|
4623
|
-
() => recordSpan({
|
4624
|
-
name: "ai.generateObject.doGenerate",
|
4625
|
-
attributes: selectTelemetryAttributes({
|
4626
|
-
telemetry,
|
4627
|
-
attributes: {
|
4628
|
-
...assembleOperationName({
|
4629
|
-
operationId: "ai.generateObject.doGenerate",
|
4630
|
-
telemetry
|
4631
|
-
}),
|
4632
|
-
...baseTelemetryAttributes,
|
4633
|
-
"ai.prompt.messages": {
|
4634
|
-
input: () => stringifyForTelemetry(promptMessages)
|
4635
|
-
},
|
4636
|
-
// standardized gen-ai llm span attributes:
|
4637
|
-
"gen_ai.system": model.provider,
|
4638
|
-
"gen_ai.request.model": model.modelId,
|
4639
|
-
"gen_ai.request.frequency_penalty": callSettings.frequencyPenalty,
|
4640
|
-
"gen_ai.request.max_tokens": callSettings.maxOutputTokens,
|
4641
|
-
"gen_ai.request.presence_penalty": callSettings.presencePenalty,
|
4642
|
-
"gen_ai.request.temperature": callSettings.temperature,
|
4643
|
-
"gen_ai.request.top_k": callSettings.topK,
|
4644
|
-
"gen_ai.request.top_p": callSettings.topP
|
4645
|
-
}
|
4486
|
+
try {
|
4487
|
+
return await recordSpan({
|
4488
|
+
name: "ai.generateObject",
|
4489
|
+
attributes: selectTelemetryAttributes({
|
4490
|
+
telemetry,
|
4491
|
+
attributes: {
|
4492
|
+
...assembleOperationName({
|
4493
|
+
operationId: "ai.generateObject",
|
4494
|
+
telemetry
|
4646
4495
|
}),
|
4647
|
-
|
4648
|
-
|
4649
|
-
|
4650
|
-
|
4651
|
-
|
4652
|
-
|
4653
|
-
|
4654
|
-
|
4655
|
-
|
4656
|
-
|
4657
|
-
|
4658
|
-
|
4659
|
-
|
4660
|
-
|
4661
|
-
|
4662
|
-
|
4663
|
-
|
4664
|
-
|
4665
|
-
|
4666
|
-
|
4667
|
-
|
4668
|
-
|
4669
|
-
|
4670
|
-
|
4671
|
-
|
4672
|
-
|
4673
|
-
|
4674
|
-
|
4675
|
-
|
4676
|
-
|
4496
|
+
...baseTelemetryAttributes,
|
4497
|
+
// specific settings that only make sense on the outer level:
|
4498
|
+
"ai.prompt": {
|
4499
|
+
input: () => JSON.stringify({ system, prompt, messages })
|
4500
|
+
},
|
4501
|
+
"ai.schema": outputStrategy.jsonSchema != null ? { input: () => JSON.stringify(outputStrategy.jsonSchema) } : void 0,
|
4502
|
+
"ai.schema.name": schemaName,
|
4503
|
+
"ai.schema.description": schemaDescription,
|
4504
|
+
"ai.settings.output": outputStrategy.type
|
4505
|
+
}
|
4506
|
+
}),
|
4507
|
+
tracer,
|
4508
|
+
fn: async (span) => {
|
4509
|
+
var _a17;
|
4510
|
+
let result;
|
4511
|
+
let finishReason;
|
4512
|
+
let usage;
|
4513
|
+
let warnings;
|
4514
|
+
let response;
|
4515
|
+
let request;
|
4516
|
+
let resultProviderMetadata;
|
4517
|
+
const standardizedPrompt = await standardizePrompt({
|
4518
|
+
system,
|
4519
|
+
prompt,
|
4520
|
+
messages
|
4521
|
+
});
|
4522
|
+
const promptMessages = await convertToLanguageModelPrompt({
|
4523
|
+
prompt: standardizedPrompt,
|
4524
|
+
supportedUrls: await model.supportedUrls
|
4525
|
+
});
|
4526
|
+
const generateResult = await retry(
|
4527
|
+
() => recordSpan({
|
4528
|
+
name: "ai.generateObject.doGenerate",
|
4529
|
+
attributes: selectTelemetryAttributes({
|
4530
|
+
telemetry,
|
4531
|
+
attributes: {
|
4532
|
+
...assembleOperationName({
|
4533
|
+
operationId: "ai.generateObject.doGenerate",
|
4534
|
+
telemetry
|
4535
|
+
}),
|
4536
|
+
...baseTelemetryAttributes,
|
4537
|
+
"ai.prompt.messages": {
|
4538
|
+
input: () => stringifyForTelemetry(promptMessages)
|
4539
|
+
},
|
4540
|
+
// standardized gen-ai llm span attributes:
|
4541
|
+
"gen_ai.system": model.provider,
|
4542
|
+
"gen_ai.request.model": model.modelId,
|
4543
|
+
"gen_ai.request.frequency_penalty": callSettings.frequencyPenalty,
|
4544
|
+
"gen_ai.request.max_tokens": callSettings.maxOutputTokens,
|
4545
|
+
"gen_ai.request.presence_penalty": callSettings.presencePenalty,
|
4546
|
+
"gen_ai.request.temperature": callSettings.temperature,
|
4547
|
+
"gen_ai.request.top_k": callSettings.topK,
|
4548
|
+
"gen_ai.request.top_p": callSettings.topP
|
4549
|
+
}
|
4550
|
+
}),
|
4551
|
+
tracer,
|
4552
|
+
fn: async (span2) => {
|
4553
|
+
var _a18, _b, _c, _d, _e, _f, _g, _h;
|
4554
|
+
const result2 = await model.doGenerate({
|
4555
|
+
responseFormat: {
|
4556
|
+
type: "json",
|
4557
|
+
schema: outputStrategy.jsonSchema,
|
4558
|
+
name: schemaName,
|
4559
|
+
description: schemaDescription
|
4560
|
+
},
|
4561
|
+
...prepareCallSettings(settings),
|
4562
|
+
prompt: promptMessages,
|
4563
|
+
providerOptions,
|
4564
|
+
abortSignal,
|
4565
|
+
headers
|
4677
4566
|
});
|
4678
|
-
|
4679
|
-
|
4680
|
-
|
4681
|
-
|
4682
|
-
|
4683
|
-
|
4684
|
-
|
4685
|
-
|
4686
|
-
|
4687
|
-
|
4688
|
-
|
4689
|
-
|
4690
|
-
|
4691
|
-
|
4692
|
-
|
4693
|
-
|
4694
|
-
|
4695
|
-
|
4696
|
-
|
4697
|
-
|
4698
|
-
|
4699
|
-
|
4700
|
-
|
4567
|
+
const responseData = {
|
4568
|
+
id: (_b = (_a18 = result2.response) == null ? void 0 : _a18.id) != null ? _b : generateId3(),
|
4569
|
+
timestamp: (_d = (_c = result2.response) == null ? void 0 : _c.timestamp) != null ? _d : currentDate(),
|
4570
|
+
modelId: (_f = (_e = result2.response) == null ? void 0 : _e.modelId) != null ? _f : model.modelId,
|
4571
|
+
headers: (_g = result2.response) == null ? void 0 : _g.headers,
|
4572
|
+
body: (_h = result2.response) == null ? void 0 : _h.body
|
4573
|
+
};
|
4574
|
+
const text2 = extractContentText(result2.content);
|
4575
|
+
if (text2 === void 0) {
|
4576
|
+
throw new NoObjectGeneratedError({
|
4577
|
+
message: "No object generated: the model did not return a response.",
|
4578
|
+
response: responseData,
|
4579
|
+
usage: result2.usage,
|
4580
|
+
finishReason: result2.finishReason
|
4581
|
+
});
|
4582
|
+
}
|
4583
|
+
span2.setAttributes(
|
4584
|
+
selectTelemetryAttributes({
|
4585
|
+
telemetry,
|
4586
|
+
attributes: {
|
4587
|
+
"ai.response.finishReason": result2.finishReason,
|
4588
|
+
"ai.response.object": { output: () => text2 },
|
4589
|
+
"ai.response.id": responseData.id,
|
4590
|
+
"ai.response.model": responseData.modelId,
|
4591
|
+
"ai.response.timestamp": responseData.timestamp.toISOString(),
|
4592
|
+
// TODO rename telemetry attributes to inputTokens and outputTokens
|
4593
|
+
"ai.usage.promptTokens": result2.usage.inputTokens,
|
4594
|
+
"ai.usage.completionTokens": result2.usage.outputTokens,
|
4595
|
+
// standardized gen-ai llm span attributes:
|
4596
|
+
"gen_ai.response.finish_reasons": [result2.finishReason],
|
4597
|
+
"gen_ai.response.id": responseData.id,
|
4598
|
+
"gen_ai.response.model": responseData.modelId,
|
4599
|
+
"gen_ai.usage.input_tokens": result2.usage.inputTokens,
|
4600
|
+
"gen_ai.usage.output_tokens": result2.usage.outputTokens
|
4601
|
+
}
|
4602
|
+
})
|
4603
|
+
);
|
4604
|
+
return { ...result2, objectText: text2, responseData };
|
4605
|
+
}
|
4606
|
+
})
|
4607
|
+
);
|
4608
|
+
result = generateResult.objectText;
|
4609
|
+
finishReason = generateResult.finishReason;
|
4610
|
+
usage = generateResult.usage;
|
4611
|
+
warnings = generateResult.warnings;
|
4612
|
+
resultProviderMetadata = generateResult.providerMetadata;
|
4613
|
+
request = (_a17 = generateResult.request) != null ? _a17 : {};
|
4614
|
+
response = generateResult.responseData;
|
4615
|
+
async function processResult(result2) {
|
4616
|
+
const parseResult = await (0, import_provider_utils15.safeParseJSON)({ text: result2 });
|
4617
|
+
if (!parseResult.success) {
|
4618
|
+
throw new NoObjectGeneratedError({
|
4619
|
+
message: "No object generated: could not parse the response.",
|
4620
|
+
cause: parseResult.error,
|
4621
|
+
text: result2,
|
4622
|
+
response,
|
4623
|
+
usage,
|
4624
|
+
finishReason
|
4625
|
+
});
|
4701
4626
|
}
|
4702
|
-
|
4703
|
-
|
4704
|
-
|
4705
|
-
|
4706
|
-
|
4707
|
-
|
4708
|
-
|
4709
|
-
|
4710
|
-
|
4711
|
-
|
4712
|
-
|
4713
|
-
|
4714
|
-
|
4715
|
-
|
4716
|
-
|
4717
|
-
|
4718
|
-
|
4719
|
-
usage,
|
4720
|
-
finishReason
|
4721
|
-
});
|
4722
|
-
}
|
4723
|
-
const validationResult = await outputStrategy.validateFinalResult(
|
4724
|
-
parseResult.value,
|
4725
|
-
{
|
4726
|
-
text: result2,
|
4727
|
-
response,
|
4728
|
-
usage
|
4627
|
+
const validationResult = await outputStrategy.validateFinalResult(
|
4628
|
+
parseResult.value,
|
4629
|
+
{
|
4630
|
+
text: result2,
|
4631
|
+
response,
|
4632
|
+
usage
|
4633
|
+
}
|
4634
|
+
);
|
4635
|
+
if (!validationResult.success) {
|
4636
|
+
throw new NoObjectGeneratedError({
|
4637
|
+
message: "No object generated: response did not match schema.",
|
4638
|
+
cause: validationResult.error,
|
4639
|
+
text: result2,
|
4640
|
+
response,
|
4641
|
+
usage,
|
4642
|
+
finishReason
|
4643
|
+
});
|
4729
4644
|
}
|
4730
|
-
|
4731
|
-
if (!validationResult.success) {
|
4732
|
-
throw new NoObjectGeneratedError({
|
4733
|
-
message: "No object generated: response did not match schema.",
|
4734
|
-
cause: validationResult.error,
|
4735
|
-
text: result2,
|
4736
|
-
response,
|
4737
|
-
usage,
|
4738
|
-
finishReason
|
4739
|
-
});
|
4645
|
+
return validationResult.value;
|
4740
4646
|
}
|
4741
|
-
|
4742
|
-
|
4743
|
-
|
4744
|
-
|
4745
|
-
|
4746
|
-
|
4747
|
-
|
4748
|
-
|
4749
|
-
|
4750
|
-
|
4751
|
-
|
4752
|
-
|
4647
|
+
let object2;
|
4648
|
+
try {
|
4649
|
+
object2 = await processResult(result);
|
4650
|
+
} catch (error) {
|
4651
|
+
if (repairText != null && NoObjectGeneratedError.isInstance(error) && (import_provider22.JSONParseError.isInstance(error.cause) || import_provider22.TypeValidationError.isInstance(error.cause))) {
|
4652
|
+
const repairedText = await repairText({
|
4653
|
+
text: result,
|
4654
|
+
error: error.cause
|
4655
|
+
});
|
4656
|
+
if (repairedText === null) {
|
4657
|
+
throw error;
|
4658
|
+
}
|
4659
|
+
object2 = await processResult(repairedText);
|
4660
|
+
} else {
|
4753
4661
|
throw error;
|
4754
4662
|
}
|
4755
|
-
object2 = await processResult(repairedText);
|
4756
|
-
} else {
|
4757
|
-
throw error;
|
4758
4663
|
}
|
4664
|
+
span.setAttributes(
|
4665
|
+
selectTelemetryAttributes({
|
4666
|
+
telemetry,
|
4667
|
+
attributes: {
|
4668
|
+
"ai.response.finishReason": finishReason,
|
4669
|
+
"ai.response.object": {
|
4670
|
+
output: () => JSON.stringify(object2)
|
4671
|
+
},
|
4672
|
+
// TODO rename telemetry attributes to inputTokens and outputTokens
|
4673
|
+
"ai.usage.promptTokens": usage.inputTokens,
|
4674
|
+
"ai.usage.completionTokens": usage.outputTokens
|
4675
|
+
}
|
4676
|
+
})
|
4677
|
+
);
|
4678
|
+
return new DefaultGenerateObjectResult({
|
4679
|
+
object: object2,
|
4680
|
+
finishReason,
|
4681
|
+
usage,
|
4682
|
+
warnings,
|
4683
|
+
request,
|
4684
|
+
response,
|
4685
|
+
providerMetadata: resultProviderMetadata
|
4686
|
+
});
|
4759
4687
|
}
|
4760
|
-
|
4761
|
-
|
4762
|
-
|
4763
|
-
|
4764
|
-
"ai.response.finishReason": finishReason,
|
4765
|
-
"ai.response.object": {
|
4766
|
-
output: () => JSON.stringify(object2)
|
4767
|
-
},
|
4768
|
-
// TODO rename telemetry attributes to inputTokens and outputTokens
|
4769
|
-
"ai.usage.promptTokens": usage.inputTokens,
|
4770
|
-
"ai.usage.completionTokens": usage.outputTokens
|
4771
|
-
}
|
4772
|
-
})
|
4773
|
-
);
|
4774
|
-
return new DefaultGenerateObjectResult({
|
4775
|
-
object: object2,
|
4776
|
-
finishReason,
|
4777
|
-
usage,
|
4778
|
-
warnings,
|
4779
|
-
request,
|
4780
|
-
response,
|
4781
|
-
providerMetadata: resultProviderMetadata
|
4782
|
-
});
|
4783
|
-
}
|
4784
|
-
});
|
4688
|
+
});
|
4689
|
+
} catch (error) {
|
4690
|
+
throw wrapGatewayError(error);
|
4691
|
+
}
|
4785
4692
|
}
|
4786
4693
|
var DefaultGenerateObjectResult = class {
|
4787
4694
|
constructor(options) {
|
@@ -4962,7 +4869,9 @@ function streamObject(options) {
|
|
4962
4869
|
headers,
|
4963
4870
|
experimental_telemetry: telemetry,
|
4964
4871
|
providerOptions,
|
4965
|
-
onError
|
4872
|
+
onError = ({ error }) => {
|
4873
|
+
console.error(error);
|
4874
|
+
},
|
4966
4875
|
onFinish,
|
4967
4876
|
_internal: {
|
4968
4877
|
generateId: generateId3 = originalGenerateId2,
|
@@ -5055,7 +4964,7 @@ var DefaultStreamObjectResult = class {
|
|
5055
4964
|
transform(chunk, controller) {
|
5056
4965
|
controller.enqueue(chunk);
|
5057
4966
|
if (chunk.type === "error") {
|
5058
|
-
onError
|
4967
|
+
onError({ error: wrapGatewayError(chunk.error) });
|
5059
4968
|
}
|
5060
4969
|
}
|
5061
4970
|
});
|
@@ -5455,8 +5364,8 @@ var DefaultStreamObjectResult = class {
|
|
5455
5364
|
};
|
5456
5365
|
|
5457
5366
|
// src/error/no-speech-generated-error.ts
|
5458
|
-
var
|
5459
|
-
var NoSpeechGeneratedError = class extends
|
5367
|
+
var import_provider23 = require("@ai-sdk/provider");
|
5368
|
+
var NoSpeechGeneratedError = class extends import_provider23.AISDKError {
|
5460
5369
|
constructor(options) {
|
5461
5370
|
super({
|
5462
5371
|
name: "AI_NoSpeechGeneratedError",
|
@@ -5892,239 +5801,243 @@ async function generateText({
|
|
5892
5801
|
messages
|
5893
5802
|
});
|
5894
5803
|
const tracer = getTracer(telemetry);
|
5895
|
-
|
5896
|
-
|
5897
|
-
|
5898
|
-
|
5899
|
-
|
5900
|
-
|
5901
|
-
|
5902
|
-
|
5903
|
-
|
5904
|
-
|
5905
|
-
|
5906
|
-
|
5907
|
-
|
5908
|
-
|
5909
|
-
|
5910
|
-
|
5911
|
-
|
5912
|
-
}
|
5913
|
-
}),
|
5914
|
-
tracer,
|
5915
|
-
fn: async (span) => {
|
5916
|
-
var _a17, _b, _c, _d, _e;
|
5917
|
-
const callSettings2 = prepareCallSettings(settings);
|
5918
|
-
let currentModelResponse;
|
5919
|
-
let currentToolCalls = [];
|
5920
|
-
let currentToolResults = [];
|
5921
|
-
const responseMessages = [];
|
5922
|
-
const steps = [];
|
5923
|
-
do {
|
5924
|
-
const stepInputMessages = [
|
5925
|
-
...initialPrompt.messages,
|
5926
|
-
...responseMessages
|
5927
|
-
];
|
5928
|
-
const prepareStepResult = await (prepareStep == null ? void 0 : prepareStep({
|
5929
|
-
model,
|
5930
|
-
steps,
|
5931
|
-
stepNumber: steps.length
|
5932
|
-
}));
|
5933
|
-
const promptMessages = await convertToLanguageModelPrompt({
|
5934
|
-
prompt: {
|
5935
|
-
system: (_a17 = prepareStepResult == null ? void 0 : prepareStepResult.system) != null ? _a17 : initialPrompt.system,
|
5936
|
-
messages: stepInputMessages
|
5937
|
-
},
|
5938
|
-
supportedUrls: await model.supportedUrls
|
5939
|
-
});
|
5940
|
-
const stepModel = resolveLanguageModel(
|
5941
|
-
(_b = prepareStepResult == null ? void 0 : prepareStepResult.model) != null ? _b : model
|
5942
|
-
);
|
5943
|
-
const { toolChoice: stepToolChoice, tools: stepTools } = prepareToolsAndToolChoice({
|
5944
|
-
tools,
|
5945
|
-
toolChoice: (_c = prepareStepResult == null ? void 0 : prepareStepResult.toolChoice) != null ? _c : toolChoice,
|
5946
|
-
activeTools: (_d = prepareStepResult == null ? void 0 : prepareStepResult.activeTools) != null ? _d : activeTools
|
5947
|
-
});
|
5948
|
-
currentModelResponse = await retry(
|
5949
|
-
() => {
|
5950
|
-
var _a18;
|
5951
|
-
return recordSpan({
|
5952
|
-
name: "ai.generateText.doGenerate",
|
5953
|
-
attributes: selectTelemetryAttributes({
|
5954
|
-
telemetry,
|
5955
|
-
attributes: {
|
5956
|
-
...assembleOperationName({
|
5957
|
-
operationId: "ai.generateText.doGenerate",
|
5958
|
-
telemetry
|
5959
|
-
}),
|
5960
|
-
...baseTelemetryAttributes,
|
5961
|
-
// model:
|
5962
|
-
"ai.model.provider": stepModel.provider,
|
5963
|
-
"ai.model.id": stepModel.modelId,
|
5964
|
-
// prompt:
|
5965
|
-
"ai.prompt.messages": {
|
5966
|
-
input: () => stringifyForTelemetry(promptMessages)
|
5967
|
-
},
|
5968
|
-
"ai.prompt.tools": {
|
5969
|
-
// convert the language model level tools:
|
5970
|
-
input: () => stepTools == null ? void 0 : stepTools.map((tool2) => JSON.stringify(tool2))
|
5971
|
-
},
|
5972
|
-
"ai.prompt.toolChoice": {
|
5973
|
-
input: () => stepToolChoice != null ? JSON.stringify(stepToolChoice) : void 0
|
5974
|
-
},
|
5975
|
-
// standardized gen-ai llm span attributes:
|
5976
|
-
"gen_ai.system": stepModel.provider,
|
5977
|
-
"gen_ai.request.model": stepModel.modelId,
|
5978
|
-
"gen_ai.request.frequency_penalty": settings.frequencyPenalty,
|
5979
|
-
"gen_ai.request.max_tokens": settings.maxOutputTokens,
|
5980
|
-
"gen_ai.request.presence_penalty": settings.presencePenalty,
|
5981
|
-
"gen_ai.request.stop_sequences": settings.stopSequences,
|
5982
|
-
"gen_ai.request.temperature": (_a18 = settings.temperature) != null ? _a18 : void 0,
|
5983
|
-
"gen_ai.request.top_k": settings.topK,
|
5984
|
-
"gen_ai.request.top_p": settings.topP
|
5985
|
-
}
|
5986
|
-
}),
|
5987
|
-
tracer,
|
5988
|
-
fn: async (span2) => {
|
5989
|
-
var _a19, _b2, _c2, _d2, _e2, _f, _g, _h;
|
5990
|
-
const result = await stepModel.doGenerate({
|
5991
|
-
...callSettings2,
|
5992
|
-
tools: stepTools,
|
5993
|
-
toolChoice: stepToolChoice,
|
5994
|
-
responseFormat: output == null ? void 0 : output.responseFormat,
|
5995
|
-
prompt: promptMessages,
|
5996
|
-
providerOptions,
|
5997
|
-
abortSignal,
|
5998
|
-
headers
|
5999
|
-
});
|
6000
|
-
const responseData = {
|
6001
|
-
id: (_b2 = (_a19 = result.response) == null ? void 0 : _a19.id) != null ? _b2 : generateId3(),
|
6002
|
-
timestamp: (_d2 = (_c2 = result.response) == null ? void 0 : _c2.timestamp) != null ? _d2 : currentDate(),
|
6003
|
-
modelId: (_f = (_e2 = result.response) == null ? void 0 : _e2.modelId) != null ? _f : stepModel.modelId,
|
6004
|
-
headers: (_g = result.response) == null ? void 0 : _g.headers,
|
6005
|
-
body: (_h = result.response) == null ? void 0 : _h.body
|
6006
|
-
};
|
6007
|
-
span2.setAttributes(
|
6008
|
-
selectTelemetryAttributes({
|
6009
|
-
telemetry,
|
6010
|
-
attributes: {
|
6011
|
-
"ai.response.finishReason": result.finishReason,
|
6012
|
-
"ai.response.text": {
|
6013
|
-
output: () => extractContentText(result.content)
|
6014
|
-
},
|
6015
|
-
"ai.response.toolCalls": {
|
6016
|
-
output: () => {
|
6017
|
-
const toolCalls = asToolCalls(result.content);
|
6018
|
-
return toolCalls == null ? void 0 : JSON.stringify(toolCalls);
|
6019
|
-
}
|
6020
|
-
},
|
6021
|
-
"ai.response.id": responseData.id,
|
6022
|
-
"ai.response.model": responseData.modelId,
|
6023
|
-
"ai.response.timestamp": responseData.timestamp.toISOString(),
|
6024
|
-
// TODO rename telemetry attributes to inputTokens and outputTokens
|
6025
|
-
"ai.usage.promptTokens": result.usage.inputTokens,
|
6026
|
-
"ai.usage.completionTokens": result.usage.outputTokens,
|
6027
|
-
// standardized gen-ai llm span attributes:
|
6028
|
-
"gen_ai.response.finish_reasons": [result.finishReason],
|
6029
|
-
"gen_ai.response.id": responseData.id,
|
6030
|
-
"gen_ai.response.model": responseData.modelId,
|
6031
|
-
"gen_ai.usage.input_tokens": result.usage.inputTokens,
|
6032
|
-
"gen_ai.usage.output_tokens": result.usage.outputTokens
|
6033
|
-
}
|
6034
|
-
})
|
6035
|
-
);
|
6036
|
-
return { ...result, response: responseData };
|
6037
|
-
}
|
6038
|
-
});
|
5804
|
+
try {
|
5805
|
+
return await recordSpan({
|
5806
|
+
name: "ai.generateText",
|
5807
|
+
attributes: selectTelemetryAttributes({
|
5808
|
+
telemetry,
|
5809
|
+
attributes: {
|
5810
|
+
...assembleOperationName({
|
5811
|
+
operationId: "ai.generateText",
|
5812
|
+
telemetry
|
5813
|
+
}),
|
5814
|
+
...baseTelemetryAttributes,
|
5815
|
+
// model:
|
5816
|
+
"ai.model.provider": model.provider,
|
5817
|
+
"ai.model.id": model.modelId,
|
5818
|
+
// specific settings that only make sense on the outer level:
|
5819
|
+
"ai.prompt": {
|
5820
|
+
input: () => JSON.stringify({ system, prompt, messages })
|
6039
5821
|
}
|
6040
|
-
|
6041
|
-
|
6042
|
-
|
6043
|
-
|
6044
|
-
|
6045
|
-
|
6046
|
-
|
6047
|
-
|
6048
|
-
|
6049
|
-
|
5822
|
+
}
|
5823
|
+
}),
|
5824
|
+
tracer,
|
5825
|
+
fn: async (span) => {
|
5826
|
+
var _a17, _b, _c, _d, _e;
|
5827
|
+
const callSettings2 = prepareCallSettings(settings);
|
5828
|
+
let currentModelResponse;
|
5829
|
+
let currentToolCalls = [];
|
5830
|
+
let currentToolResults = [];
|
5831
|
+
const responseMessages = [];
|
5832
|
+
const steps = [];
|
5833
|
+
do {
|
5834
|
+
const stepInputMessages = [
|
5835
|
+
...initialPrompt.messages,
|
5836
|
+
...responseMessages
|
5837
|
+
];
|
5838
|
+
const prepareStepResult = await (prepareStep == null ? void 0 : prepareStep({
|
5839
|
+
model,
|
5840
|
+
steps,
|
5841
|
+
stepNumber: steps.length
|
5842
|
+
}));
|
5843
|
+
const promptMessages = await convertToLanguageModelPrompt({
|
5844
|
+
prompt: {
|
5845
|
+
system: (_a17 = prepareStepResult == null ? void 0 : prepareStepResult.system) != null ? _a17 : initialPrompt.system,
|
6050
5846
|
messages: stepInputMessages
|
5847
|
+
},
|
5848
|
+
supportedUrls: await model.supportedUrls
|
5849
|
+
});
|
5850
|
+
const stepModel = resolveLanguageModel(
|
5851
|
+
(_b = prepareStepResult == null ? void 0 : prepareStepResult.model) != null ? _b : model
|
5852
|
+
);
|
5853
|
+
const { toolChoice: stepToolChoice, tools: stepTools } = prepareToolsAndToolChoice({
|
5854
|
+
tools,
|
5855
|
+
toolChoice: (_c = prepareStepResult == null ? void 0 : prepareStepResult.toolChoice) != null ? _c : toolChoice,
|
5856
|
+
activeTools: (_d = prepareStepResult == null ? void 0 : prepareStepResult.activeTools) != null ? _d : activeTools
|
5857
|
+
});
|
5858
|
+
currentModelResponse = await retry(
|
5859
|
+
() => {
|
5860
|
+
var _a18;
|
5861
|
+
return recordSpan({
|
5862
|
+
name: "ai.generateText.doGenerate",
|
5863
|
+
attributes: selectTelemetryAttributes({
|
5864
|
+
telemetry,
|
5865
|
+
attributes: {
|
5866
|
+
...assembleOperationName({
|
5867
|
+
operationId: "ai.generateText.doGenerate",
|
5868
|
+
telemetry
|
5869
|
+
}),
|
5870
|
+
...baseTelemetryAttributes,
|
5871
|
+
// model:
|
5872
|
+
"ai.model.provider": stepModel.provider,
|
5873
|
+
"ai.model.id": stepModel.modelId,
|
5874
|
+
// prompt:
|
5875
|
+
"ai.prompt.messages": {
|
5876
|
+
input: () => stringifyForTelemetry(promptMessages)
|
5877
|
+
},
|
5878
|
+
"ai.prompt.tools": {
|
5879
|
+
// convert the language model level tools:
|
5880
|
+
input: () => stepTools == null ? void 0 : stepTools.map((tool2) => JSON.stringify(tool2))
|
5881
|
+
},
|
5882
|
+
"ai.prompt.toolChoice": {
|
5883
|
+
input: () => stepToolChoice != null ? JSON.stringify(stepToolChoice) : void 0
|
5884
|
+
},
|
5885
|
+
// standardized gen-ai llm span attributes:
|
5886
|
+
"gen_ai.system": stepModel.provider,
|
5887
|
+
"gen_ai.request.model": stepModel.modelId,
|
5888
|
+
"gen_ai.request.frequency_penalty": settings.frequencyPenalty,
|
5889
|
+
"gen_ai.request.max_tokens": settings.maxOutputTokens,
|
5890
|
+
"gen_ai.request.presence_penalty": settings.presencePenalty,
|
5891
|
+
"gen_ai.request.stop_sequences": settings.stopSequences,
|
5892
|
+
"gen_ai.request.temperature": (_a18 = settings.temperature) != null ? _a18 : void 0,
|
5893
|
+
"gen_ai.request.top_k": settings.topK,
|
5894
|
+
"gen_ai.request.top_p": settings.topP
|
5895
|
+
}
|
5896
|
+
}),
|
5897
|
+
tracer,
|
5898
|
+
fn: async (span2) => {
|
5899
|
+
var _a19, _b2, _c2, _d2, _e2, _f, _g, _h;
|
5900
|
+
const result = await stepModel.doGenerate({
|
5901
|
+
...callSettings2,
|
5902
|
+
tools: stepTools,
|
5903
|
+
toolChoice: stepToolChoice,
|
5904
|
+
responseFormat: output == null ? void 0 : output.responseFormat,
|
5905
|
+
prompt: promptMessages,
|
5906
|
+
providerOptions,
|
5907
|
+
abortSignal,
|
5908
|
+
headers
|
5909
|
+
});
|
5910
|
+
const responseData = {
|
5911
|
+
id: (_b2 = (_a19 = result.response) == null ? void 0 : _a19.id) != null ? _b2 : generateId3(),
|
5912
|
+
timestamp: (_d2 = (_c2 = result.response) == null ? void 0 : _c2.timestamp) != null ? _d2 : currentDate(),
|
5913
|
+
modelId: (_f = (_e2 = result.response) == null ? void 0 : _e2.modelId) != null ? _f : stepModel.modelId,
|
5914
|
+
headers: (_g = result.response) == null ? void 0 : _g.headers,
|
5915
|
+
body: (_h = result.response) == null ? void 0 : _h.body
|
5916
|
+
};
|
5917
|
+
span2.setAttributes(
|
5918
|
+
selectTelemetryAttributes({
|
5919
|
+
telemetry,
|
5920
|
+
attributes: {
|
5921
|
+
"ai.response.finishReason": result.finishReason,
|
5922
|
+
"ai.response.text": {
|
5923
|
+
output: () => extractContentText(result.content)
|
5924
|
+
},
|
5925
|
+
"ai.response.toolCalls": {
|
5926
|
+
output: () => {
|
5927
|
+
const toolCalls = asToolCalls(result.content);
|
5928
|
+
return toolCalls == null ? void 0 : JSON.stringify(toolCalls);
|
5929
|
+
}
|
5930
|
+
},
|
5931
|
+
"ai.response.id": responseData.id,
|
5932
|
+
"ai.response.model": responseData.modelId,
|
5933
|
+
"ai.response.timestamp": responseData.timestamp.toISOString(),
|
5934
|
+
// TODO rename telemetry attributes to inputTokens and outputTokens
|
5935
|
+
"ai.usage.promptTokens": result.usage.inputTokens,
|
5936
|
+
"ai.usage.completionTokens": result.usage.outputTokens,
|
5937
|
+
// standardized gen-ai llm span attributes:
|
5938
|
+
"gen_ai.response.finish_reasons": [result.finishReason],
|
5939
|
+
"gen_ai.response.id": responseData.id,
|
5940
|
+
"gen_ai.response.model": responseData.modelId,
|
5941
|
+
"gen_ai.usage.input_tokens": result.usage.inputTokens,
|
5942
|
+
"gen_ai.usage.output_tokens": result.usage.outputTokens
|
5943
|
+
}
|
5944
|
+
})
|
5945
|
+
);
|
5946
|
+
return { ...result, response: responseData };
|
5947
|
+
}
|
5948
|
+
});
|
5949
|
+
}
|
5950
|
+
);
|
5951
|
+
currentToolCalls = await Promise.all(
|
5952
|
+
currentModelResponse.content.filter(
|
5953
|
+
(part) => part.type === "tool-call"
|
5954
|
+
).map(
|
5955
|
+
(toolCall) => parseToolCall({
|
5956
|
+
toolCall,
|
5957
|
+
tools,
|
5958
|
+
repairToolCall,
|
5959
|
+
system,
|
5960
|
+
messages: stepInputMessages
|
5961
|
+
})
|
5962
|
+
)
|
5963
|
+
);
|
5964
|
+
currentToolResults = tools == null ? [] : await executeTools({
|
5965
|
+
toolCalls: currentToolCalls,
|
5966
|
+
tools,
|
5967
|
+
tracer,
|
5968
|
+
telemetry,
|
5969
|
+
messages: stepInputMessages,
|
5970
|
+
abortSignal
|
5971
|
+
});
|
5972
|
+
const stepContent = asContent({
|
5973
|
+
content: currentModelResponse.content,
|
5974
|
+
toolCalls: currentToolCalls,
|
5975
|
+
toolResults: currentToolResults
|
5976
|
+
});
|
5977
|
+
responseMessages.push(
|
5978
|
+
...toResponseMessages({
|
5979
|
+
content: stepContent,
|
5980
|
+
tools: tools != null ? tools : {}
|
6051
5981
|
})
|
6052
|
-
)
|
6053
|
-
|
6054
|
-
currentToolResults = tools == null ? [] : await executeTools({
|
6055
|
-
toolCalls: currentToolCalls,
|
6056
|
-
tools,
|
6057
|
-
tracer,
|
6058
|
-
telemetry,
|
6059
|
-
messages: stepInputMessages,
|
6060
|
-
abortSignal
|
6061
|
-
});
|
6062
|
-
const stepContent = asContent({
|
6063
|
-
content: currentModelResponse.content,
|
6064
|
-
toolCalls: currentToolCalls,
|
6065
|
-
toolResults: currentToolResults
|
6066
|
-
});
|
6067
|
-
responseMessages.push(
|
6068
|
-
...toResponseMessages({
|
5982
|
+
);
|
5983
|
+
const currentStepResult = new DefaultStepResult({
|
6069
5984
|
content: stepContent,
|
6070
|
-
|
5985
|
+
finishReason: currentModelResponse.finishReason,
|
5986
|
+
usage: currentModelResponse.usage,
|
5987
|
+
warnings: currentModelResponse.warnings,
|
5988
|
+
providerMetadata: currentModelResponse.providerMetadata,
|
5989
|
+
request: (_e = currentModelResponse.request) != null ? _e : {},
|
5990
|
+
response: {
|
5991
|
+
...currentModelResponse.response,
|
5992
|
+
// deep clone msgs to avoid mutating past messages in multi-step:
|
5993
|
+
messages: structuredClone(responseMessages)
|
5994
|
+
}
|
5995
|
+
});
|
5996
|
+
steps.push(currentStepResult);
|
5997
|
+
await (onStepFinish == null ? void 0 : onStepFinish(currentStepResult));
|
5998
|
+
} while (
|
5999
|
+
// there are tool calls:
|
6000
|
+
currentToolCalls.length > 0 && // all current tool calls have results:
|
6001
|
+
currentToolResults.length === currentToolCalls.length && // continue until a stop condition is met:
|
6002
|
+
!await isStopConditionMet({ stopConditions, steps })
|
6003
|
+
);
|
6004
|
+
span.setAttributes(
|
6005
|
+
selectTelemetryAttributes({
|
6006
|
+
telemetry,
|
6007
|
+
attributes: {
|
6008
|
+
"ai.response.finishReason": currentModelResponse.finishReason,
|
6009
|
+
"ai.response.text": {
|
6010
|
+
output: () => extractContentText(currentModelResponse.content)
|
6011
|
+
},
|
6012
|
+
"ai.response.toolCalls": {
|
6013
|
+
output: () => {
|
6014
|
+
const toolCalls = asToolCalls(currentModelResponse.content);
|
6015
|
+
return toolCalls == null ? void 0 : JSON.stringify(toolCalls);
|
6016
|
+
}
|
6017
|
+
},
|
6018
|
+
// TODO rename telemetry attributes to inputTokens and outputTokens
|
6019
|
+
"ai.usage.promptTokens": currentModelResponse.usage.inputTokens,
|
6020
|
+
"ai.usage.completionTokens": currentModelResponse.usage.outputTokens
|
6021
|
+
}
|
6071
6022
|
})
|
6072
6023
|
);
|
6073
|
-
const
|
6074
|
-
|
6075
|
-
|
6076
|
-
|
6077
|
-
|
6078
|
-
|
6079
|
-
|
6080
|
-
|
6081
|
-
|
6082
|
-
|
6083
|
-
|
6084
|
-
}
|
6024
|
+
const lastStep = steps[steps.length - 1];
|
6025
|
+
return new DefaultGenerateTextResult({
|
6026
|
+
steps,
|
6027
|
+
resolvedOutput: await (output == null ? void 0 : output.parseOutput(
|
6028
|
+
{ text: lastStep.text },
|
6029
|
+
{
|
6030
|
+
response: lastStep.response,
|
6031
|
+
usage: lastStep.usage,
|
6032
|
+
finishReason: lastStep.finishReason
|
6033
|
+
}
|
6034
|
+
))
|
6085
6035
|
});
|
6086
|
-
|
6087
|
-
|
6088
|
-
|
6089
|
-
|
6090
|
-
|
6091
|
-
currentToolResults.length === currentToolCalls.length && // continue until a stop condition is met:
|
6092
|
-
!await isStopConditionMet({ stopConditions, steps })
|
6093
|
-
);
|
6094
|
-
span.setAttributes(
|
6095
|
-
selectTelemetryAttributes({
|
6096
|
-
telemetry,
|
6097
|
-
attributes: {
|
6098
|
-
"ai.response.finishReason": currentModelResponse.finishReason,
|
6099
|
-
"ai.response.text": {
|
6100
|
-
output: () => extractContentText(currentModelResponse.content)
|
6101
|
-
},
|
6102
|
-
"ai.response.toolCalls": {
|
6103
|
-
output: () => {
|
6104
|
-
const toolCalls = asToolCalls(currentModelResponse.content);
|
6105
|
-
return toolCalls == null ? void 0 : JSON.stringify(toolCalls);
|
6106
|
-
}
|
6107
|
-
},
|
6108
|
-
// TODO rename telemetry attributes to inputTokens and outputTokens
|
6109
|
-
"ai.usage.promptTokens": currentModelResponse.usage.inputTokens,
|
6110
|
-
"ai.usage.completionTokens": currentModelResponse.usage.outputTokens
|
6111
|
-
}
|
6112
|
-
})
|
6113
|
-
);
|
6114
|
-
const lastStep = steps[steps.length - 1];
|
6115
|
-
return new DefaultGenerateTextResult({
|
6116
|
-
steps,
|
6117
|
-
resolvedOutput: await (output == null ? void 0 : output.parseOutput(
|
6118
|
-
{ text: lastStep.text },
|
6119
|
-
{
|
6120
|
-
response: lastStep.response,
|
6121
|
-
usage: lastStep.usage,
|
6122
|
-
finishReason: lastStep.finishReason
|
6123
|
-
}
|
6124
|
-
))
|
6125
|
-
});
|
6126
|
-
}
|
6127
|
-
});
|
6036
|
+
}
|
6037
|
+
});
|
6038
|
+
} catch (error) {
|
6039
|
+
throw wrapGatewayError(error);
|
6040
|
+
}
|
6128
6041
|
}
|
6129
6042
|
async function executeTools({
|
6130
6043
|
toolCalls,
|
@@ -6365,7 +6278,7 @@ var object = ({
|
|
6365
6278
|
|
6366
6279
|
// core/generate-text/smooth-stream.ts
|
6367
6280
|
var import_provider_utils21 = require("@ai-sdk/provider-utils");
|
6368
|
-
var
|
6281
|
+
var import_provider24 = require("@ai-sdk/provider");
|
6369
6282
|
var CHUNKING_REGEXPS = {
|
6370
6283
|
word: /\S+\s+/m,
|
6371
6284
|
line: /\n+/m
|
@@ -6395,7 +6308,7 @@ function smoothStream({
|
|
6395
6308
|
} else {
|
6396
6309
|
const chunkingRegex = typeof chunking === "string" ? CHUNKING_REGEXPS[chunking] : chunking;
|
6397
6310
|
if (chunkingRegex == null) {
|
6398
|
-
throw new
|
6311
|
+
throw new import_provider24.InvalidArgumentError({
|
6399
6312
|
argument: "chunking",
|
6400
6313
|
message: `Chunking must be "word" or "line" or a RegExp. Received: ${chunking}`
|
6401
6314
|
});
|
@@ -6657,7 +6570,9 @@ function streamText({
|
|
6657
6570
|
experimental_repairToolCall: repairToolCall,
|
6658
6571
|
experimental_transform: transform,
|
6659
6572
|
onChunk,
|
6660
|
-
onError
|
6573
|
+
onError = ({ error }) => {
|
6574
|
+
console.error(error);
|
6575
|
+
},
|
6661
6576
|
onFinish,
|
6662
6577
|
onStepFinish,
|
6663
6578
|
_internal: {
|
@@ -6796,7 +6711,7 @@ var DefaultStreamTextResult = class {
|
|
6796
6711
|
await (onChunk == null ? void 0 : onChunk({ chunk: part }));
|
6797
6712
|
}
|
6798
6713
|
if (part.type === "error") {
|
6799
|
-
await
|
6714
|
+
await onError({ error: wrapGatewayError(part.error) });
|
6800
6715
|
}
|
6801
6716
|
if (part.type === "text") {
|
6802
6717
|
const latestContent = recordedContent[recordedContent.length - 1];
|
@@ -7452,7 +7367,7 @@ var DefaultStreamTextResult = class {
|
|
7452
7367
|
} = {}) {
|
7453
7368
|
const lastMessage = originalMessages[originalMessages.length - 1];
|
7454
7369
|
const isContinuation = (lastMessage == null ? void 0 : lastMessage.role) === "assistant";
|
7455
|
-
const messageId = isContinuation ? lastMessage.id : newMessageId
|
7370
|
+
const messageId = isContinuation ? lastMessage.id : newMessageId;
|
7456
7371
|
const baseStream = this.fullStream.pipeThrough(
|
7457
7372
|
new TransformStream({
|
7458
7373
|
transform: async (part, controller) => {
|
@@ -7588,7 +7503,7 @@ var DefaultStreamTextResult = class {
|
|
7588
7503
|
);
|
7589
7504
|
return handleUIMessageStreamFinish({
|
7590
7505
|
stream: baseStream,
|
7591
|
-
newMessageId: messageId,
|
7506
|
+
newMessageId: messageId != null ? messageId : this.generateId(),
|
7592
7507
|
originalMessages,
|
7593
7508
|
onFinish
|
7594
7509
|
});
|
@@ -7892,7 +7807,7 @@ var doWrap = ({
|
|
7892
7807
|
};
|
7893
7808
|
|
7894
7809
|
// core/registry/custom-provider.ts
|
7895
|
-
var
|
7810
|
+
var import_provider25 = require("@ai-sdk/provider");
|
7896
7811
|
function customProvider({
|
7897
7812
|
languageModels,
|
7898
7813
|
textEmbeddingModels,
|
@@ -7907,7 +7822,7 @@ function customProvider({
|
|
7907
7822
|
if (fallbackProvider) {
|
7908
7823
|
return fallbackProvider.languageModel(modelId);
|
7909
7824
|
}
|
7910
|
-
throw new
|
7825
|
+
throw new import_provider25.NoSuchModelError({ modelId, modelType: "languageModel" });
|
7911
7826
|
},
|
7912
7827
|
textEmbeddingModel(modelId) {
|
7913
7828
|
if (textEmbeddingModels != null && modelId in textEmbeddingModels) {
|
@@ -7916,7 +7831,7 @@ function customProvider({
|
|
7916
7831
|
if (fallbackProvider) {
|
7917
7832
|
return fallbackProvider.textEmbeddingModel(modelId);
|
7918
7833
|
}
|
7919
|
-
throw new
|
7834
|
+
throw new import_provider25.NoSuchModelError({ modelId, modelType: "textEmbeddingModel" });
|
7920
7835
|
},
|
7921
7836
|
imageModel(modelId) {
|
7922
7837
|
if (imageModels != null && modelId in imageModels) {
|
@@ -7925,19 +7840,19 @@ function customProvider({
|
|
7925
7840
|
if (fallbackProvider == null ? void 0 : fallbackProvider.imageModel) {
|
7926
7841
|
return fallbackProvider.imageModel(modelId);
|
7927
7842
|
}
|
7928
|
-
throw new
|
7843
|
+
throw new import_provider25.NoSuchModelError({ modelId, modelType: "imageModel" });
|
7929
7844
|
}
|
7930
7845
|
};
|
7931
7846
|
}
|
7932
7847
|
var experimental_customProvider = customProvider;
|
7933
7848
|
|
7934
7849
|
// core/registry/no-such-provider-error.ts
|
7935
|
-
var
|
7850
|
+
var import_provider26 = require("@ai-sdk/provider");
|
7936
7851
|
var name16 = "AI_NoSuchProviderError";
|
7937
7852
|
var marker16 = `vercel.ai.error.${name16}`;
|
7938
7853
|
var symbol16 = Symbol.for(marker16);
|
7939
7854
|
var _a16;
|
7940
|
-
var NoSuchProviderError = class extends
|
7855
|
+
var NoSuchProviderError = class extends import_provider26.NoSuchModelError {
|
7941
7856
|
constructor({
|
7942
7857
|
modelId,
|
7943
7858
|
modelType,
|
@@ -7951,13 +7866,13 @@ var NoSuchProviderError = class extends import_provider25.NoSuchModelError {
|
|
7951
7866
|
this.availableProviders = availableProviders;
|
7952
7867
|
}
|
7953
7868
|
static isInstance(error) {
|
7954
|
-
return
|
7869
|
+
return import_provider26.AISDKError.hasMarker(error, marker16);
|
7955
7870
|
}
|
7956
7871
|
};
|
7957
7872
|
_a16 = symbol16;
|
7958
7873
|
|
7959
7874
|
// core/registry/provider-registry.ts
|
7960
|
-
var
|
7875
|
+
var import_provider27 = require("@ai-sdk/provider");
|
7961
7876
|
function createProviderRegistry(providers, {
|
7962
7877
|
separator = ":"
|
7963
7878
|
} = {}) {
|
@@ -7996,7 +7911,7 @@ var DefaultProviderRegistry = class {
|
|
7996
7911
|
splitId(id, modelType) {
|
7997
7912
|
const index = id.indexOf(this.separator);
|
7998
7913
|
if (index === -1) {
|
7999
|
-
throw new
|
7914
|
+
throw new import_provider27.NoSuchModelError({
|
8000
7915
|
modelId: id,
|
8001
7916
|
modelType,
|
8002
7917
|
message: `Invalid ${modelType} id for registry: ${id} (must be in the format "providerId${this.separator}modelId")`
|
@@ -8009,7 +7924,7 @@ var DefaultProviderRegistry = class {
|
|
8009
7924
|
const [providerId, modelId] = this.splitId(id, "languageModel");
|
8010
7925
|
const model = (_b = (_a17 = this.getProvider(providerId)).languageModel) == null ? void 0 : _b.call(_a17, modelId);
|
8011
7926
|
if (model == null) {
|
8012
|
-
throw new
|
7927
|
+
throw new import_provider27.NoSuchModelError({ modelId: id, modelType: "languageModel" });
|
8013
7928
|
}
|
8014
7929
|
return model;
|
8015
7930
|
}
|
@@ -8019,7 +7934,7 @@ var DefaultProviderRegistry = class {
|
|
8019
7934
|
const provider = this.getProvider(providerId);
|
8020
7935
|
const model = (_a17 = provider.textEmbeddingModel) == null ? void 0 : _a17.call(provider, modelId);
|
8021
7936
|
if (model == null) {
|
8022
|
-
throw new
|
7937
|
+
throw new import_provider27.NoSuchModelError({
|
8023
7938
|
modelId: id,
|
8024
7939
|
modelType: "textEmbeddingModel"
|
8025
7940
|
});
|
@@ -8032,7 +7947,7 @@ var DefaultProviderRegistry = class {
|
|
8032
7947
|
const provider = this.getProvider(providerId);
|
8033
7948
|
const model = (_a17 = provider.imageModel) == null ? void 0 : _a17.call(provider, modelId);
|
8034
7949
|
if (model == null) {
|
8035
|
-
throw new
|
7950
|
+
throw new import_provider27.NoSuchModelError({ modelId: id, modelType: "imageModel" });
|
8036
7951
|
}
|
8037
7952
|
return model;
|
8038
7953
|
}
|
@@ -8606,8 +8521,8 @@ var MCPClient = class {
|
|
8606
8521
|
};
|
8607
8522
|
|
8608
8523
|
// src/error/no-transcript-generated-error.ts
|
8609
|
-
var
|
8610
|
-
var NoTranscriptGeneratedError = class extends
|
8524
|
+
var import_provider28 = require("@ai-sdk/provider");
|
8525
|
+
var NoTranscriptGeneratedError = class extends import_provider28.AISDKError {
|
8611
8526
|
constructor(options) {
|
8612
8527
|
super({
|
8613
8528
|
name: "AI_NoTranscriptGeneratedError",
|
@@ -8676,6 +8591,7 @@ var DefaultTranscriptionResult = class {
|
|
8676
8591
|
DefaultChatTransport,
|
8677
8592
|
DownloadError,
|
8678
8593
|
EmptyResponseBodyError,
|
8594
|
+
GLOBAL_DEFAULT_PROVIDER,
|
8679
8595
|
InvalidArgumentError,
|
8680
8596
|
InvalidDataContentError,
|
8681
8597
|
InvalidMessageRoleError,
|
@@ -8703,10 +8619,8 @@ var DefaultTranscriptionResult = class {
|
|
8703
8619
|
ToolExecutionError,
|
8704
8620
|
TypeValidationError,
|
8705
8621
|
UnsupportedFunctionalityError,
|
8706
|
-
appendClientMessage,
|
8707
8622
|
asSchema,
|
8708
8623
|
assistantModelMessageSchema,
|
8709
|
-
callChatApi,
|
8710
8624
|
callCompletionApi,
|
8711
8625
|
convertFileListToFileUIParts,
|
8712
8626
|
convertToCoreMessages,
|
@@ -8733,7 +8647,6 @@ var DefaultTranscriptionResult = class {
|
|
8733
8647
|
experimental_generateImage,
|
8734
8648
|
experimental_generateSpeech,
|
8735
8649
|
experimental_transcribe,
|
8736
|
-
extractMaxToolInvocationStep,
|
8737
8650
|
extractReasoningMiddleware,
|
8738
8651
|
generateId,
|
8739
8652
|
generateObject,
|
@@ -8741,14 +8654,12 @@ var DefaultTranscriptionResult = class {
|
|
8741
8654
|
getTextFromDataUrl,
|
8742
8655
|
getToolInvocations,
|
8743
8656
|
hasToolCall,
|
8744
|
-
isAssistantMessageWithCompletedToolCalls,
|
8745
8657
|
isDeepEqualData,
|
8746
8658
|
jsonSchema,
|
8747
8659
|
modelMessageSchema,
|
8748
8660
|
parsePartialJson,
|
8749
8661
|
pipeTextStreamToResponse,
|
8750
8662
|
pipeUIMessageStreamToResponse,
|
8751
|
-
shouldResubmitMessages,
|
8752
8663
|
simulateReadableStream,
|
8753
8664
|
simulateStreamingMiddleware,
|
8754
8665
|
smoothStream,
|
@@ -8758,7 +8669,6 @@ var DefaultTranscriptionResult = class {
|
|
8758
8669
|
systemModelMessageSchema,
|
8759
8670
|
tool,
|
8760
8671
|
toolModelMessageSchema,
|
8761
|
-
updateToolCallResult,
|
8762
8672
|
userModelMessageSchema,
|
8763
8673
|
wrapLanguageModel
|
8764
8674
|
});
|