ai 5.0.0-alpha.1 → 5.0.0-alpha.10
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +146 -0
- package/dist/index.d.mts +404 -563
- package/dist/index.d.ts +404 -563
- package/dist/index.js +1416 -1439
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +1350 -1362
- package/dist/index.mjs.map +1 -1
- package/dist/internal/index.d.mts +3 -2
- package/dist/internal/index.d.ts +3 -2
- package/dist/mcp-stdio/index.js.map +1 -1
- package/dist/mcp-stdio/index.mjs.map +1 -1
- package/package.json +6 -5
package/dist/index.js
CHANGED
@@ -22,10 +22,11 @@ var src_exports = {};
|
|
22
22
|
__export(src_exports, {
|
23
23
|
AISDKError: () => import_provider16.AISDKError,
|
24
24
|
APICallError: () => import_provider16.APICallError,
|
25
|
-
|
25
|
+
AbstractChat: () => AbstractChat,
|
26
26
|
DefaultChatTransport: () => DefaultChatTransport,
|
27
27
|
DownloadError: () => DownloadError,
|
28
28
|
EmptyResponseBodyError: () => import_provider16.EmptyResponseBodyError,
|
29
|
+
GLOBAL_DEFAULT_PROVIDER: () => GLOBAL_DEFAULT_PROVIDER,
|
29
30
|
InvalidArgumentError: () => InvalidArgumentError,
|
30
31
|
InvalidDataContentError: () => InvalidDataContentError,
|
31
32
|
InvalidMessageRoleError: () => InvalidMessageRoleError,
|
@@ -47,14 +48,14 @@ __export(src_exports, {
|
|
47
48
|
NoSuchToolError: () => NoSuchToolError,
|
48
49
|
Output: () => output_exports,
|
49
50
|
RetryError: () => RetryError,
|
51
|
+
SerialJobExecutor: () => SerialJobExecutor,
|
52
|
+
TextStreamChatTransport: () => TextStreamChatTransport,
|
50
53
|
ToolCallRepairError: () => ToolCallRepairError,
|
51
54
|
ToolExecutionError: () => ToolExecutionError,
|
52
55
|
TypeValidationError: () => import_provider16.TypeValidationError,
|
53
56
|
UnsupportedFunctionalityError: () => import_provider16.UnsupportedFunctionalityError,
|
54
|
-
|
55
|
-
asSchema: () => import_provider_utils26.asSchema,
|
57
|
+
asSchema: () => import_provider_utils25.asSchema,
|
56
58
|
assistantModelMessageSchema: () => assistantModelMessageSchema,
|
57
|
-
callChatApi: () => callChatApi,
|
58
59
|
callCompletionApi: () => callCompletionApi,
|
59
60
|
convertFileListToFileUIParts: () => convertFileListToFileUIParts,
|
60
61
|
convertToCoreMessages: () => convertToCoreMessages,
|
@@ -65,13 +66,12 @@ __export(src_exports, {
|
|
65
66
|
coreToolMessageSchema: () => coreToolMessageSchema,
|
66
67
|
coreUserMessageSchema: () => coreUserMessageSchema,
|
67
68
|
cosineSimilarity: () => cosineSimilarity,
|
68
|
-
createIdGenerator: () =>
|
69
|
+
createIdGenerator: () => import_provider_utils25.createIdGenerator,
|
69
70
|
createProviderRegistry: () => createProviderRegistry,
|
70
71
|
createTextStreamResponse: () => createTextStreamResponse,
|
71
72
|
createUIMessageStream: () => createUIMessageStream,
|
72
73
|
createUIMessageStreamResponse: () => createUIMessageStreamResponse,
|
73
74
|
customProvider: () => customProvider,
|
74
|
-
defaultChatStore: () => defaultChatStore,
|
75
75
|
defaultSettingsMiddleware: () => defaultSettingsMiddleware,
|
76
76
|
embed: () => embed,
|
77
77
|
embedMany: () => embedMany,
|
@@ -81,37 +81,33 @@ __export(src_exports, {
|
|
81
81
|
experimental_generateImage: () => generateImage,
|
82
82
|
experimental_generateSpeech: () => generateSpeech,
|
83
83
|
experimental_transcribe: () => transcribe,
|
84
|
-
extractMaxToolInvocationStep: () => extractMaxToolInvocationStep,
|
85
84
|
extractReasoningMiddleware: () => extractReasoningMiddleware,
|
86
|
-
generateId: () =>
|
85
|
+
generateId: () => import_provider_utils25.generateId,
|
87
86
|
generateObject: () => generateObject,
|
88
87
|
generateText: () => generateText,
|
89
88
|
getTextFromDataUrl: () => getTextFromDataUrl,
|
90
89
|
getToolInvocations: () => getToolInvocations,
|
91
90
|
hasToolCall: () => hasToolCall,
|
92
|
-
isAssistantMessageWithCompletedToolCalls: () => isAssistantMessageWithCompletedToolCalls,
|
93
91
|
isDeepEqualData: () => isDeepEqualData,
|
94
|
-
jsonSchema: () =>
|
95
|
-
maxSteps: () => maxSteps,
|
92
|
+
jsonSchema: () => import_provider_utils25.jsonSchema,
|
96
93
|
modelMessageSchema: () => modelMessageSchema,
|
97
94
|
parsePartialJson: () => parsePartialJson,
|
98
95
|
pipeTextStreamToResponse: () => pipeTextStreamToResponse,
|
99
96
|
pipeUIMessageStreamToResponse: () => pipeUIMessageStreamToResponse,
|
100
|
-
shouldResubmitMessages: () => shouldResubmitMessages,
|
101
97
|
simulateReadableStream: () => simulateReadableStream,
|
102
98
|
simulateStreamingMiddleware: () => simulateStreamingMiddleware,
|
103
99
|
smoothStream: () => smoothStream,
|
100
|
+
stepCountIs: () => stepCountIs,
|
104
101
|
streamObject: () => streamObject,
|
105
102
|
streamText: () => streamText,
|
106
103
|
systemModelMessageSchema: () => systemModelMessageSchema,
|
107
104
|
tool: () => tool,
|
108
105
|
toolModelMessageSchema: () => toolModelMessageSchema,
|
109
|
-
updateToolCallResult: () => updateToolCallResult,
|
110
106
|
userModelMessageSchema: () => userModelMessageSchema,
|
111
107
|
wrapLanguageModel: () => wrapLanguageModel
|
112
108
|
});
|
113
109
|
module.exports = __toCommonJS(src_exports);
|
114
|
-
var
|
110
|
+
var import_provider_utils25 = require("@ai-sdk/provider-utils");
|
115
111
|
|
116
112
|
// src/error/index.ts
|
117
113
|
var import_provider16 = require("@ai-sdk/provider");
|
@@ -538,19 +534,8 @@ function pipeTextStreamToResponse({
|
|
538
534
|
});
|
539
535
|
}
|
540
536
|
|
541
|
-
// src/ui/
|
542
|
-
|
543
|
-
messages,
|
544
|
-
message
|
545
|
-
}) {
|
546
|
-
return [
|
547
|
-
...messages.length > 0 && messages[messages.length - 1].id === message.id ? messages.slice(0, -1) : messages,
|
548
|
-
message
|
549
|
-
];
|
550
|
-
}
|
551
|
-
|
552
|
-
// src/ui/call-chat-api.ts
|
553
|
-
var import_provider_utils3 = require("@ai-sdk/provider-utils");
|
537
|
+
// src/ui/call-completion-api.ts
|
538
|
+
var import_provider_utils = require("@ai-sdk/provider-utils");
|
554
539
|
|
555
540
|
// src/ui-message-stream/ui-message-stream-parts.ts
|
556
541
|
var import_zod = require("zod");
|
@@ -591,9 +576,8 @@ var uiMessageStreamPartSchema = import_zod.z.union([
|
|
591
576
|
providerMetadata: import_zod.z.record(import_zod.z.any()).optional()
|
592
577
|
}),
|
593
578
|
import_zod.z.object({
|
594
|
-
type: import_zod.z.literal("source"),
|
595
|
-
|
596
|
-
id: import_zod.z.string(),
|
579
|
+
type: import_zod.z.literal("source-url"),
|
580
|
+
sourceId: import_zod.z.string(),
|
597
581
|
url: import_zod.z.string(),
|
598
582
|
title: import_zod.z.string().optional(),
|
599
583
|
providerMetadata: import_zod.z.any().optional()
|
@@ -657,8 +641,170 @@ async function consumeStream({
|
|
657
641
|
}
|
658
642
|
}
|
659
643
|
|
644
|
+
// src/ui/process-text-stream.ts
|
645
|
+
async function processTextStream({
|
646
|
+
stream,
|
647
|
+
onTextPart
|
648
|
+
}) {
|
649
|
+
const reader = stream.pipeThrough(new TextDecoderStream()).getReader();
|
650
|
+
while (true) {
|
651
|
+
const { done, value } = await reader.read();
|
652
|
+
if (done) {
|
653
|
+
break;
|
654
|
+
}
|
655
|
+
await onTextPart(value);
|
656
|
+
}
|
657
|
+
}
|
658
|
+
|
659
|
+
// src/ui/call-completion-api.ts
|
660
|
+
var getOriginalFetch = () => fetch;
|
661
|
+
async function callCompletionApi({
|
662
|
+
api,
|
663
|
+
prompt,
|
664
|
+
credentials,
|
665
|
+
headers,
|
666
|
+
body,
|
667
|
+
streamProtocol = "data",
|
668
|
+
setCompletion,
|
669
|
+
setLoading,
|
670
|
+
setError,
|
671
|
+
setAbortController,
|
672
|
+
onFinish,
|
673
|
+
onError,
|
674
|
+
fetch: fetch2 = getOriginalFetch()
|
675
|
+
}) {
|
676
|
+
var _a17;
|
677
|
+
try {
|
678
|
+
setLoading(true);
|
679
|
+
setError(void 0);
|
680
|
+
const abortController = new AbortController();
|
681
|
+
setAbortController(abortController);
|
682
|
+
setCompletion("");
|
683
|
+
const response = await fetch2(api, {
|
684
|
+
method: "POST",
|
685
|
+
body: JSON.stringify({
|
686
|
+
prompt,
|
687
|
+
...body
|
688
|
+
}),
|
689
|
+
credentials,
|
690
|
+
headers: {
|
691
|
+
"Content-Type": "application/json",
|
692
|
+
...headers
|
693
|
+
},
|
694
|
+
signal: abortController.signal
|
695
|
+
}).catch((err) => {
|
696
|
+
throw err;
|
697
|
+
});
|
698
|
+
if (!response.ok) {
|
699
|
+
throw new Error(
|
700
|
+
(_a17 = await response.text()) != null ? _a17 : "Failed to fetch the chat response."
|
701
|
+
);
|
702
|
+
}
|
703
|
+
if (!response.body) {
|
704
|
+
throw new Error("The response body is empty.");
|
705
|
+
}
|
706
|
+
let result = "";
|
707
|
+
switch (streamProtocol) {
|
708
|
+
case "text": {
|
709
|
+
await processTextStream({
|
710
|
+
stream: response.body,
|
711
|
+
onTextPart: (chunk) => {
|
712
|
+
result += chunk;
|
713
|
+
setCompletion(result);
|
714
|
+
}
|
715
|
+
});
|
716
|
+
break;
|
717
|
+
}
|
718
|
+
case "data": {
|
719
|
+
await consumeStream({
|
720
|
+
stream: (0, import_provider_utils.parseJsonEventStream)({
|
721
|
+
stream: response.body,
|
722
|
+
schema: uiMessageStreamPartSchema
|
723
|
+
}).pipeThrough(
|
724
|
+
new TransformStream({
|
725
|
+
async transform(part) {
|
726
|
+
if (!part.success) {
|
727
|
+
throw part.error;
|
728
|
+
}
|
729
|
+
const streamPart = part.value;
|
730
|
+
if (streamPart.type === "text") {
|
731
|
+
result += streamPart.text;
|
732
|
+
setCompletion(result);
|
733
|
+
} else if (streamPart.type === "error") {
|
734
|
+
throw new Error(streamPart.errorText);
|
735
|
+
}
|
736
|
+
}
|
737
|
+
})
|
738
|
+
),
|
739
|
+
onError: (error) => {
|
740
|
+
throw error;
|
741
|
+
}
|
742
|
+
});
|
743
|
+
break;
|
744
|
+
}
|
745
|
+
default: {
|
746
|
+
const exhaustiveCheck = streamProtocol;
|
747
|
+
throw new Error(`Unknown stream protocol: ${exhaustiveCheck}`);
|
748
|
+
}
|
749
|
+
}
|
750
|
+
if (onFinish) {
|
751
|
+
onFinish(prompt, result);
|
752
|
+
}
|
753
|
+
setAbortController(null);
|
754
|
+
return result;
|
755
|
+
} catch (err) {
|
756
|
+
if (err.name === "AbortError") {
|
757
|
+
setAbortController(null);
|
758
|
+
return null;
|
759
|
+
}
|
760
|
+
if (err instanceof Error) {
|
761
|
+
if (onError) {
|
762
|
+
onError(err);
|
763
|
+
}
|
764
|
+
}
|
765
|
+
setError(err);
|
766
|
+
} finally {
|
767
|
+
setLoading(false);
|
768
|
+
}
|
769
|
+
}
|
770
|
+
|
771
|
+
// src/ui/chat.ts
|
772
|
+
var import_provider_utils5 = require("@ai-sdk/provider-utils");
|
773
|
+
|
774
|
+
// src/util/serial-job-executor.ts
|
775
|
+
var SerialJobExecutor = class {
|
776
|
+
constructor() {
|
777
|
+
this.queue = [];
|
778
|
+
this.isProcessing = false;
|
779
|
+
}
|
780
|
+
async processQueue() {
|
781
|
+
if (this.isProcessing) {
|
782
|
+
return;
|
783
|
+
}
|
784
|
+
this.isProcessing = true;
|
785
|
+
while (this.queue.length > 0) {
|
786
|
+
await this.queue[0]();
|
787
|
+
this.queue.shift();
|
788
|
+
}
|
789
|
+
this.isProcessing = false;
|
790
|
+
}
|
791
|
+
async run(job) {
|
792
|
+
return new Promise((resolve, reject) => {
|
793
|
+
this.queue.push(async () => {
|
794
|
+
try {
|
795
|
+
await job();
|
796
|
+
resolve();
|
797
|
+
} catch (error) {
|
798
|
+
reject(error);
|
799
|
+
}
|
800
|
+
});
|
801
|
+
void this.processQueue();
|
802
|
+
});
|
803
|
+
}
|
804
|
+
};
|
805
|
+
|
660
806
|
// src/ui/process-ui-message-stream.ts
|
661
|
-
var
|
807
|
+
var import_provider_utils3 = require("@ai-sdk/provider-utils");
|
662
808
|
|
663
809
|
// src/util/merge-objects.ts
|
664
810
|
function mergeObjects(base, overrides) {
|
@@ -694,7 +840,7 @@ function mergeObjects(base, overrides) {
|
|
694
840
|
}
|
695
841
|
|
696
842
|
// src/util/parse-partial-json.ts
|
697
|
-
var
|
843
|
+
var import_provider_utils2 = require("@ai-sdk/provider-utils");
|
698
844
|
|
699
845
|
// src/util/fix-json.ts
|
700
846
|
function fixJson(input) {
|
@@ -1019,25 +1165,17 @@ async function parsePartialJson(jsonText) {
|
|
1019
1165
|
if (jsonText === void 0) {
|
1020
1166
|
return { value: void 0, state: "undefined-input" };
|
1021
1167
|
}
|
1022
|
-
let result = await (0,
|
1168
|
+
let result = await (0, import_provider_utils2.safeParseJSON)({ text: jsonText });
|
1023
1169
|
if (result.success) {
|
1024
1170
|
return { value: result.value, state: "successful-parse" };
|
1025
1171
|
}
|
1026
|
-
result = await (0,
|
1172
|
+
result = await (0, import_provider_utils2.safeParseJSON)({ text: fixJson(jsonText) });
|
1027
1173
|
if (result.success) {
|
1028
1174
|
return { value: result.value, state: "repaired-parse" };
|
1029
1175
|
}
|
1030
1176
|
return { value: void 0, state: "failed-parse" };
|
1031
1177
|
}
|
1032
1178
|
|
1033
|
-
// src/ui/extract-max-tool-invocation-step.ts
|
1034
|
-
function extractMaxToolInvocationStep(toolInvocations) {
|
1035
|
-
return toolInvocations == null ? void 0 : toolInvocations.reduce((max, toolInvocation) => {
|
1036
|
-
var _a17;
|
1037
|
-
return Math.max(max, (_a17 = toolInvocation.step) != null ? _a17 : 0);
|
1038
|
-
}, 0);
|
1039
|
-
}
|
1040
|
-
|
1041
1179
|
// src/ui/get-tool-invocations.ts
|
1042
1180
|
function getToolInvocations(message) {
|
1043
1181
|
return message.parts.filter(
|
@@ -1048,12 +1186,10 @@ function getToolInvocations(message) {
|
|
1048
1186
|
// src/ui/process-ui-message-stream.ts
|
1049
1187
|
function createStreamingUIMessageState({
|
1050
1188
|
lastMessage,
|
1051
|
-
newMessageId = "
|
1189
|
+
newMessageId = ""
|
1052
1190
|
} = {}) {
|
1053
|
-
var _a17;
|
1054
1191
|
const isContinuation = (lastMessage == null ? void 0 : lastMessage.role) === "assistant";
|
1055
|
-
const
|
1056
|
-
const message = isContinuation ? structuredClone(lastMessage) : {
|
1192
|
+
const message = isContinuation ? lastMessage : {
|
1057
1193
|
id: newMessageId,
|
1058
1194
|
metadata: {},
|
1059
1195
|
role: "assistant",
|
@@ -1063,8 +1199,7 @@ function createStreamingUIMessageState({
|
|
1063
1199
|
message,
|
1064
1200
|
activeTextPart: void 0,
|
1065
1201
|
activeReasoningPart: void 0,
|
1066
|
-
partialToolCalls: {}
|
1067
|
-
step
|
1202
|
+
partialToolCalls: {}
|
1068
1203
|
};
|
1069
1204
|
}
|
1070
1205
|
function processUIMessageStream({
|
@@ -1095,7 +1230,7 @@ function processUIMessageStream({
|
|
1095
1230
|
if (metadata != null) {
|
1096
1231
|
const mergedMetadata = state.message.metadata != null ? mergeObjects(state.message.metadata, metadata) : metadata;
|
1097
1232
|
if (messageMetadataSchema != null) {
|
1098
|
-
await (0,
|
1233
|
+
await (0, import_provider_utils3.validateTypes)({
|
1099
1234
|
value: mergedMetadata,
|
1100
1235
|
schema: messageMetadataSchema
|
1101
1236
|
});
|
@@ -1147,16 +1282,13 @@ function processUIMessageStream({
|
|
1147
1282
|
write();
|
1148
1283
|
break;
|
1149
1284
|
}
|
1150
|
-
case "source": {
|
1285
|
+
case "source-url": {
|
1151
1286
|
state.message.parts.push({
|
1152
|
-
type: "source",
|
1153
|
-
|
1154
|
-
|
1155
|
-
|
1156
|
-
|
1157
|
-
title: part.title,
|
1158
|
-
providerMetadata: part.providerMetadata
|
1159
|
-
}
|
1287
|
+
type: "source-url",
|
1288
|
+
sourceId: part.sourceId,
|
1289
|
+
url: part.url,
|
1290
|
+
title: part.title,
|
1291
|
+
providerMetadata: part.providerMetadata
|
1160
1292
|
});
|
1161
1293
|
write();
|
1162
1294
|
break;
|
@@ -1165,13 +1297,11 @@ function processUIMessageStream({
|
|
1165
1297
|
const toolInvocations = getToolInvocations(state.message);
|
1166
1298
|
state.partialToolCalls[part.toolCallId] = {
|
1167
1299
|
text: "",
|
1168
|
-
step: state.step,
|
1169
1300
|
toolName: part.toolName,
|
1170
1301
|
index: toolInvocations.length
|
1171
1302
|
};
|
1172
1303
|
updateToolInvocationPart(part.toolCallId, {
|
1173
1304
|
state: "partial-call",
|
1174
|
-
step: state.step,
|
1175
1305
|
toolCallId: part.toolCallId,
|
1176
1306
|
toolName: part.toolName,
|
1177
1307
|
args: void 0
|
@@ -1187,7 +1317,6 @@ function processUIMessageStream({
|
|
1187
1317
|
);
|
1188
1318
|
updateToolInvocationPart(part.toolCallId, {
|
1189
1319
|
state: "partial-call",
|
1190
|
-
step: partialToolCall.step,
|
1191
1320
|
toolCallId: part.toolCallId,
|
1192
1321
|
toolName: partialToolCall.toolName,
|
1193
1322
|
args: partialArgs
|
@@ -1198,7 +1327,6 @@ function processUIMessageStream({
|
|
1198
1327
|
case "tool-call": {
|
1199
1328
|
updateToolInvocationPart(part.toolCallId, {
|
1200
1329
|
state: "call",
|
1201
|
-
step: state.step,
|
1202
1330
|
toolCallId: part.toolCallId,
|
1203
1331
|
toolName: part.toolName,
|
1204
1332
|
args: part.args
|
@@ -1211,7 +1339,6 @@ function processUIMessageStream({
|
|
1211
1339
|
if (result != null) {
|
1212
1340
|
updateToolInvocationPart(part.toolCallId, {
|
1213
1341
|
state: "result",
|
1214
|
-
step: state.step,
|
1215
1342
|
toolCallId: part.toolCallId,
|
1216
1343
|
toolName: part.toolName,
|
1217
1344
|
args: part.args,
|
@@ -1250,7 +1377,6 @@ function processUIMessageStream({
|
|
1250
1377
|
break;
|
1251
1378
|
}
|
1252
1379
|
case "finish-step": {
|
1253
|
-
state.step += 1;
|
1254
1380
|
state.activeTextPart = void 0;
|
1255
1381
|
state.activeReasoningPart = void 0;
|
1256
1382
|
await updateMessageMetadata(part.metadata);
|
@@ -1292,14 +1418,7 @@ function processUIMessageStream({
|
|
1292
1418
|
(partArg) => part.type === partArg.type && part.id === partArg.id
|
1293
1419
|
) : void 0;
|
1294
1420
|
if (existingPart != null) {
|
1295
|
-
|
1296
|
-
existingPart.value = mergeObjects(
|
1297
|
-
existingPart.data,
|
1298
|
-
part.data
|
1299
|
-
);
|
1300
|
-
} else {
|
1301
|
-
existingPart.data = part.data;
|
1302
|
-
}
|
1421
|
+
existingPart.data = isObject(existingPart.data) && isObject(part.data) ? mergeObjects(existingPart.data, part.data) : part.data;
|
1303
1422
|
} else {
|
1304
1423
|
state.message.parts.push(part);
|
1305
1424
|
}
|
@@ -1320,47 +1439,60 @@ function isObject(value) {
|
|
1320
1439
|
return typeof value === "object" && value !== null;
|
1321
1440
|
}
|
1322
1441
|
|
1323
|
-
// src/ui/
|
1324
|
-
function
|
1325
|
-
|
1442
|
+
// src/ui/should-resubmit-messages.ts
|
1443
|
+
function shouldResubmitMessages({
|
1444
|
+
originalMaxToolInvocationStep,
|
1445
|
+
originalMessageCount,
|
1446
|
+
maxSteps,
|
1447
|
+
messages
|
1326
1448
|
}) {
|
1327
|
-
|
1328
|
-
|
1329
|
-
|
1330
|
-
|
1331
|
-
|
1332
|
-
|
1333
|
-
|
1334
|
-
|
1335
|
-
|
1336
|
-
|
1337
|
-
|
1338
|
-
controller.enqueue({ type: "finish" });
|
1339
|
-
}
|
1340
|
-
})
|
1449
|
+
const lastMessage = messages[messages.length - 1];
|
1450
|
+
const lastMessageStepStartCount = lastMessage.parts.filter(
|
1451
|
+
(part) => part.type === "step-start"
|
1452
|
+
).length;
|
1453
|
+
return (
|
1454
|
+
// check if the feature is enabled:
|
1455
|
+
maxSteps > 1 && // ensure there is a last message:
|
1456
|
+
lastMessage != null && // ensure we actually have new steps (to prevent infinite loops in case of errors):
|
1457
|
+
(messages.length > originalMessageCount || lastMessageStepStartCount !== originalMaxToolInvocationStep) && // check that next step is possible:
|
1458
|
+
isAssistantMessageWithCompletedToolCalls(lastMessage) && // limit the number of automatic steps:
|
1459
|
+
lastMessageStepStartCount < maxSteps
|
1341
1460
|
);
|
1342
1461
|
}
|
1462
|
+
function isAssistantMessageWithCompletedToolCalls(message) {
|
1463
|
+
if (!message) {
|
1464
|
+
return false;
|
1465
|
+
}
|
1466
|
+
if (message.role !== "assistant") {
|
1467
|
+
return false;
|
1468
|
+
}
|
1469
|
+
const lastStepStartIndex = message.parts.reduce((lastIndex, part, index) => {
|
1470
|
+
return part.type === "step-start" ? index : lastIndex;
|
1471
|
+
}, -1);
|
1472
|
+
const lastStepToolInvocations = message.parts.slice(lastStepStartIndex + 1).filter((part) => part.type === "tool-invocation");
|
1473
|
+
return lastStepToolInvocations.length > 0 && lastStepToolInvocations.every((part) => "result" in part.toolInvocation);
|
1474
|
+
}
|
1343
1475
|
|
1344
|
-
// src/ui/
|
1345
|
-
var
|
1476
|
+
// src/ui/default-chat-transport.ts
|
1477
|
+
var import_provider_utils4 = require("@ai-sdk/provider-utils");
|
1478
|
+
var getOriginalFetch2 = () => fetch;
|
1346
1479
|
async function fetchUIMessageStream({
|
1347
1480
|
api,
|
1348
1481
|
body,
|
1349
|
-
streamProtocol = "ui-message",
|
1350
1482
|
credentials,
|
1351
1483
|
headers,
|
1352
|
-
|
1353
|
-
fetch: fetch2 =
|
1484
|
+
abortSignal,
|
1485
|
+
fetch: fetch2 = getOriginalFetch2(),
|
1354
1486
|
requestType = "generate"
|
1355
1487
|
}) {
|
1356
|
-
var _a17
|
1357
|
-
const response = requestType === "resume" ? await fetch2(`${api}?
|
1488
|
+
var _a17;
|
1489
|
+
const response = requestType === "resume" ? await fetch2(`${api}?id=${body.id}`, {
|
1358
1490
|
method: "GET",
|
1359
1491
|
headers: {
|
1360
1492
|
"Content-Type": "application/json",
|
1361
1493
|
...headers
|
1362
1494
|
},
|
1363
|
-
signal:
|
1495
|
+
signal: abortSignal,
|
1364
1496
|
credentials
|
1365
1497
|
}) : await fetch2(api, {
|
1366
1498
|
method: "POST",
|
@@ -1369,20 +1501,18 @@ async function fetchUIMessageStream({
|
|
1369
1501
|
"Content-Type": "application/json",
|
1370
1502
|
...headers
|
1371
1503
|
},
|
1372
|
-
signal:
|
1504
|
+
signal: abortSignal,
|
1373
1505
|
credentials
|
1374
1506
|
});
|
1375
1507
|
if (!response.ok) {
|
1376
1508
|
throw new Error(
|
1377
|
-
(
|
1509
|
+
(_a17 = await response.text()) != null ? _a17 : "Failed to fetch the chat response."
|
1378
1510
|
);
|
1379
1511
|
}
|
1380
1512
|
if (!response.body) {
|
1381
1513
|
throw new Error("The response body is empty.");
|
1382
1514
|
}
|
1383
|
-
return
|
1384
|
-
stream: response.body.pipeThrough(new TextDecoderStream())
|
1385
|
-
}) : (0, import_provider_utils3.parseJsonEventStream)({
|
1515
|
+
return (0, import_provider_utils4.parseJsonEventStream)({
|
1386
1516
|
stream: response.body,
|
1387
1517
|
schema: uiMessageStreamPartSchema
|
1388
1518
|
}).pipeThrough(
|
@@ -1396,552 +1526,291 @@ async function fetchUIMessageStream({
|
|
1396
1526
|
})
|
1397
1527
|
);
|
1398
1528
|
}
|
1399
|
-
|
1400
|
-
|
1401
|
-
|
1402
|
-
onFinish,
|
1403
|
-
onToolCall,
|
1404
|
-
generateId: generateId3,
|
1405
|
-
lastMessage,
|
1406
|
-
messageMetadataSchema
|
1407
|
-
}) {
|
1408
|
-
const state = createStreamingUIMessageState({
|
1409
|
-
lastMessage,
|
1410
|
-
newMessageId: generateId3()
|
1411
|
-
});
|
1412
|
-
const runUpdateMessageJob = async (job) => {
|
1413
|
-
await job({
|
1414
|
-
state,
|
1415
|
-
write: () => {
|
1416
|
-
onUpdate({ message: state.message });
|
1417
|
-
}
|
1418
|
-
});
|
1419
|
-
};
|
1420
|
-
await consumeStream({
|
1421
|
-
stream: processUIMessageStream({
|
1422
|
-
stream,
|
1423
|
-
onToolCall,
|
1424
|
-
messageMetadataSchema,
|
1425
|
-
runUpdateMessageJob
|
1426
|
-
}),
|
1427
|
-
onError: (error) => {
|
1428
|
-
throw error;
|
1429
|
-
}
|
1430
|
-
});
|
1431
|
-
onFinish == null ? void 0 : onFinish({ message: state.message });
|
1432
|
-
}
|
1433
|
-
async function callChatApi({
|
1434
|
-
api,
|
1435
|
-
body,
|
1436
|
-
streamProtocol = "ui-message",
|
1437
|
-
credentials,
|
1438
|
-
headers,
|
1439
|
-
abortController,
|
1440
|
-
onUpdate,
|
1441
|
-
onFinish,
|
1442
|
-
onToolCall,
|
1443
|
-
generateId: generateId3,
|
1444
|
-
fetch: fetch2 = getOriginalFetch(),
|
1445
|
-
lastMessage,
|
1446
|
-
requestType = "generate",
|
1447
|
-
messageMetadataSchema
|
1448
|
-
}) {
|
1449
|
-
const stream = await fetchUIMessageStream({
|
1450
|
-
api,
|
1451
|
-
body,
|
1452
|
-
streamProtocol,
|
1529
|
+
var DefaultChatTransport = class {
|
1530
|
+
constructor({
|
1531
|
+
api = "/api/chat",
|
1453
1532
|
credentials,
|
1454
1533
|
headers,
|
1455
|
-
|
1534
|
+
body,
|
1456
1535
|
fetch: fetch2,
|
1457
|
-
|
1458
|
-
})
|
1459
|
-
|
1460
|
-
|
1461
|
-
|
1462
|
-
|
1463
|
-
|
1464
|
-
|
1465
|
-
lastMessage,
|
1466
|
-
messageMetadataSchema
|
1467
|
-
});
|
1468
|
-
}
|
1469
|
-
|
1470
|
-
// src/ui/call-completion-api.ts
|
1471
|
-
var import_provider_utils4 = require("@ai-sdk/provider-utils");
|
1472
|
-
|
1473
|
-
// src/ui/process-text-stream.ts
|
1474
|
-
async function processTextStream({
|
1475
|
-
stream,
|
1476
|
-
onTextPart
|
1477
|
-
}) {
|
1478
|
-
const reader = stream.pipeThrough(new TextDecoderStream()).getReader();
|
1479
|
-
while (true) {
|
1480
|
-
const { done, value } = await reader.read();
|
1481
|
-
if (done) {
|
1482
|
-
break;
|
1483
|
-
}
|
1484
|
-
await onTextPart(value);
|
1536
|
+
prepareRequest
|
1537
|
+
} = {}) {
|
1538
|
+
this.api = api;
|
1539
|
+
this.credentials = credentials;
|
1540
|
+
this.headers = headers;
|
1541
|
+
this.body = body;
|
1542
|
+
this.fetch = fetch2;
|
1543
|
+
this.prepareRequest = prepareRequest;
|
1485
1544
|
}
|
1486
|
-
|
1487
|
-
|
1488
|
-
|
1489
|
-
|
1490
|
-
|
1491
|
-
|
1492
|
-
|
1493
|
-
|
1494
|
-
|
1495
|
-
|
1496
|
-
|
1497
|
-
|
1498
|
-
|
1499
|
-
|
1500
|
-
|
1501
|
-
|
1502
|
-
|
1503
|
-
fetch: fetch2 = getOriginalFetch2()
|
1504
|
-
}) {
|
1505
|
-
var _a17;
|
1506
|
-
try {
|
1507
|
-
setLoading(true);
|
1508
|
-
setError(void 0);
|
1509
|
-
const abortController = new AbortController();
|
1510
|
-
setAbortController(abortController);
|
1511
|
-
setCompletion("");
|
1512
|
-
const response = await fetch2(api, {
|
1513
|
-
method: "POST",
|
1514
|
-
body: JSON.stringify({
|
1515
|
-
prompt,
|
1516
|
-
...body
|
1517
|
-
}),
|
1518
|
-
credentials,
|
1519
|
-
headers: {
|
1520
|
-
"Content-Type": "application/json",
|
1521
|
-
...headers
|
1522
|
-
},
|
1523
|
-
signal: abortController.signal
|
1524
|
-
}).catch((err) => {
|
1525
|
-
throw err;
|
1545
|
+
submitMessages({
|
1546
|
+
chatId,
|
1547
|
+
messages,
|
1548
|
+
abortSignal,
|
1549
|
+
metadata,
|
1550
|
+
headers,
|
1551
|
+
body,
|
1552
|
+
requestType
|
1553
|
+
}) {
|
1554
|
+
var _a17, _b;
|
1555
|
+
const preparedRequest = (_a17 = this.prepareRequest) == null ? void 0 : _a17.call(this, {
|
1556
|
+
id: chatId,
|
1557
|
+
messages,
|
1558
|
+
body: { ...this.body, ...body },
|
1559
|
+
headers: { ...this.headers, ...headers },
|
1560
|
+
credentials: this.credentials,
|
1561
|
+
requestMetadata: metadata
|
1526
1562
|
});
|
1527
|
-
|
1528
|
-
|
1529
|
-
|
1530
|
-
)
|
1531
|
-
|
1532
|
-
|
1533
|
-
|
1534
|
-
|
1535
|
-
let result = "";
|
1536
|
-
switch (streamProtocol) {
|
1537
|
-
case "text": {
|
1538
|
-
await processTextStream({
|
1539
|
-
stream: response.body,
|
1540
|
-
onTextPart: (chunk) => {
|
1541
|
-
result += chunk;
|
1542
|
-
setCompletion(result);
|
1543
|
-
}
|
1544
|
-
});
|
1545
|
-
break;
|
1546
|
-
}
|
1547
|
-
case "data": {
|
1548
|
-
await consumeStream({
|
1549
|
-
stream: (0, import_provider_utils4.parseJsonEventStream)({
|
1550
|
-
stream: response.body,
|
1551
|
-
schema: uiMessageStreamPartSchema
|
1552
|
-
}).pipeThrough(
|
1553
|
-
new TransformStream({
|
1554
|
-
async transform(part) {
|
1555
|
-
if (!part.success) {
|
1556
|
-
throw part.error;
|
1557
|
-
}
|
1558
|
-
const streamPart = part.value;
|
1559
|
-
if (streamPart.type === "text") {
|
1560
|
-
result += streamPart.text;
|
1561
|
-
setCompletion(result);
|
1562
|
-
} else if (streamPart.type === "error") {
|
1563
|
-
throw new Error(streamPart.errorText);
|
1564
|
-
}
|
1565
|
-
}
|
1566
|
-
})
|
1567
|
-
),
|
1568
|
-
onError: (error) => {
|
1569
|
-
throw error;
|
1570
|
-
}
|
1571
|
-
});
|
1572
|
-
break;
|
1573
|
-
}
|
1574
|
-
default: {
|
1575
|
-
const exhaustiveCheck = streamProtocol;
|
1576
|
-
throw new Error(`Unknown stream protocol: ${exhaustiveCheck}`);
|
1577
|
-
}
|
1578
|
-
}
|
1579
|
-
if (onFinish) {
|
1580
|
-
onFinish(prompt, result);
|
1581
|
-
}
|
1582
|
-
setAbortController(null);
|
1583
|
-
return result;
|
1584
|
-
} catch (err) {
|
1585
|
-
if (err.name === "AbortError") {
|
1586
|
-
setAbortController(null);
|
1587
|
-
return null;
|
1588
|
-
}
|
1589
|
-
if (err instanceof Error) {
|
1590
|
-
if (onError) {
|
1591
|
-
onError(err);
|
1592
|
-
}
|
1593
|
-
}
|
1594
|
-
setError(err);
|
1595
|
-
} finally {
|
1596
|
-
setLoading(false);
|
1597
|
-
}
|
1598
|
-
}
|
1599
|
-
|
1600
|
-
// src/ui/chat-store.ts
|
1601
|
-
var import_provider_utils5 = require("@ai-sdk/provider-utils");
|
1602
|
-
|
1603
|
-
// src/util/serial-job-executor.ts
|
1604
|
-
var SerialJobExecutor = class {
|
1605
|
-
constructor() {
|
1606
|
-
this.queue = [];
|
1607
|
-
this.isProcessing = false;
|
1608
|
-
}
|
1609
|
-
async processQueue() {
|
1610
|
-
if (this.isProcessing) {
|
1611
|
-
return;
|
1612
|
-
}
|
1613
|
-
this.isProcessing = true;
|
1614
|
-
while (this.queue.length > 0) {
|
1615
|
-
await this.queue[0]();
|
1616
|
-
this.queue.shift();
|
1617
|
-
}
|
1618
|
-
this.isProcessing = false;
|
1619
|
-
}
|
1620
|
-
async run(job) {
|
1621
|
-
return new Promise((resolve, reject) => {
|
1622
|
-
this.queue.push(async () => {
|
1623
|
-
try {
|
1624
|
-
await job();
|
1625
|
-
resolve();
|
1626
|
-
} catch (error) {
|
1627
|
-
reject(error);
|
1628
|
-
}
|
1629
|
-
});
|
1630
|
-
void this.processQueue();
|
1563
|
+
return fetchUIMessageStream({
|
1564
|
+
api: this.api,
|
1565
|
+
body: (preparedRequest == null ? void 0 : preparedRequest.body) !== void 0 ? preparedRequest.body : { ...this.body, ...body, id: chatId, messages },
|
1566
|
+
headers: (preparedRequest == null ? void 0 : preparedRequest.headers) !== void 0 ? preparedRequest.headers : { ...this.headers, ...headers },
|
1567
|
+
credentials: (_b = preparedRequest == null ? void 0 : preparedRequest.credentials) != null ? _b : this.credentials,
|
1568
|
+
abortSignal,
|
1569
|
+
fetch: this.fetch,
|
1570
|
+
requestType
|
1631
1571
|
});
|
1632
1572
|
}
|
1633
1573
|
};
|
1634
1574
|
|
1635
|
-
// src/ui/
|
1636
|
-
function
|
1637
|
-
|
1638
|
-
|
1639
|
-
maxSteps: maxSteps2,
|
1640
|
-
messages
|
1641
|
-
}) {
|
1642
|
-
var _a17;
|
1643
|
-
const lastMessage = messages[messages.length - 1];
|
1644
|
-
return (
|
1645
|
-
// check if the feature is enabled:
|
1646
|
-
maxSteps2 > 1 && // ensure there is a last message:
|
1647
|
-
lastMessage != null && // ensure we actually have new steps (to prevent infinite loops in case of errors):
|
1648
|
-
(messages.length > originalMessageCount || extractMaxToolInvocationStep(getToolInvocations(lastMessage)) !== originalMaxToolInvocationStep) && // check that next step is possible:
|
1649
|
-
isAssistantMessageWithCompletedToolCalls(lastMessage) && // limit the number of automatic steps:
|
1650
|
-
((_a17 = extractMaxToolInvocationStep(getToolInvocations(lastMessage))) != null ? _a17 : 0) < maxSteps2
|
1651
|
-
);
|
1652
|
-
}
|
1653
|
-
function isAssistantMessageWithCompletedToolCalls(message) {
|
1654
|
-
if (message.role !== "assistant") {
|
1655
|
-
return false;
|
1575
|
+
// src/ui/convert-file-list-to-file-ui-parts.ts
|
1576
|
+
async function convertFileListToFileUIParts(files) {
|
1577
|
+
if (files == null) {
|
1578
|
+
return [];
|
1656
1579
|
}
|
1657
|
-
|
1658
|
-
|
1659
|
-
}, -1);
|
1660
|
-
const lastStepToolInvocations = message.parts.slice(lastStepStartIndex + 1).filter((part) => part.type === "tool-invocation");
|
1661
|
-
return lastStepToolInvocations.length > 0 && lastStepToolInvocations.every((part) => "result" in part.toolInvocation);
|
1662
|
-
}
|
1663
|
-
|
1664
|
-
// src/ui/update-tool-call-result.ts
|
1665
|
-
function updateToolCallResult({
|
1666
|
-
messages,
|
1667
|
-
toolCallId,
|
1668
|
-
toolResult: result
|
1669
|
-
}) {
|
1670
|
-
const lastMessage = messages[messages.length - 1];
|
1671
|
-
const invocationPart = lastMessage.parts.find(
|
1672
|
-
(part) => part.type === "tool-invocation" && part.toolInvocation.toolCallId === toolCallId
|
1673
|
-
);
|
1674
|
-
if (invocationPart == null) {
|
1675
|
-
return;
|
1580
|
+
if (!globalThis.FileList || !(files instanceof globalThis.FileList)) {
|
1581
|
+
throw new Error("FileList is not supported in the current environment");
|
1676
1582
|
}
|
1677
|
-
|
1678
|
-
|
1679
|
-
|
1680
|
-
|
1681
|
-
|
1583
|
+
return Promise.all(
|
1584
|
+
Array.from(files).map(async (file) => {
|
1585
|
+
const { name: name17, type } = file;
|
1586
|
+
const dataUrl = await new Promise((resolve, reject) => {
|
1587
|
+
const reader = new FileReader();
|
1588
|
+
reader.onload = (readerEvent) => {
|
1589
|
+
var _a17;
|
1590
|
+
resolve((_a17 = readerEvent.target) == null ? void 0 : _a17.result);
|
1591
|
+
};
|
1592
|
+
reader.onerror = (error) => reject(error);
|
1593
|
+
reader.readAsDataURL(file);
|
1594
|
+
});
|
1595
|
+
return {
|
1596
|
+
type: "file",
|
1597
|
+
mediaType: type,
|
1598
|
+
filename: name17,
|
1599
|
+
url: dataUrl
|
1600
|
+
};
|
1601
|
+
})
|
1602
|
+
);
|
1682
1603
|
}
|
1683
1604
|
|
1684
|
-
// src/ui/chat
|
1685
|
-
var
|
1605
|
+
// src/ui/chat.ts
|
1606
|
+
var AbstractChat = class {
|
1686
1607
|
constructor({
|
1687
|
-
|
1688
|
-
|
1689
|
-
transport,
|
1690
|
-
maxSteps
|
1608
|
+
generateId: generateId3 = import_provider_utils5.generateId,
|
1609
|
+
id = generateId3(),
|
1610
|
+
transport = new DefaultChatTransport(),
|
1611
|
+
maxSteps = 1,
|
1691
1612
|
messageMetadataSchema,
|
1692
|
-
dataPartSchemas
|
1693
|
-
|
1694
|
-
this.chats = new Map(
|
1695
|
-
Object.entries(chats).map(([id, state]) => [
|
1696
|
-
id,
|
1697
|
-
{
|
1698
|
-
messages: [...state.messages],
|
1699
|
-
status: "ready",
|
1700
|
-
activeResponse: void 0,
|
1701
|
-
error: void 0,
|
1702
|
-
jobExecutor: new SerialJobExecutor()
|
1703
|
-
}
|
1704
|
-
])
|
1705
|
-
);
|
1706
|
-
this.maxSteps = maxSteps2;
|
1707
|
-
this.transport = transport;
|
1708
|
-
this.subscribers = /* @__PURE__ */ new Set();
|
1709
|
-
this.generateId = generateId3 != null ? generateId3 : import_provider_utils5.generateId;
|
1710
|
-
this.messageMetadataSchema = messageMetadataSchema;
|
1711
|
-
this.dataPartSchemas = dataPartSchemas;
|
1712
|
-
}
|
1713
|
-
hasChat(id) {
|
1714
|
-
return this.chats.has(id);
|
1715
|
-
}
|
1716
|
-
addChat(id, messages) {
|
1717
|
-
this.chats.set(id, {
|
1718
|
-
messages,
|
1719
|
-
status: "ready",
|
1720
|
-
jobExecutor: new SerialJobExecutor()
|
1721
|
-
});
|
1722
|
-
}
|
1723
|
-
getChats() {
|
1724
|
-
return Array.from(this.chats.entries());
|
1725
|
-
}
|
1726
|
-
get chatCount() {
|
1727
|
-
return this.chats.size;
|
1728
|
-
}
|
1729
|
-
getStatus(id) {
|
1730
|
-
return this.getChat(id).status;
|
1731
|
-
}
|
1732
|
-
setStatus({
|
1733
|
-
id,
|
1734
|
-
status,
|
1735
|
-
error
|
1736
|
-
}) {
|
1737
|
-
const chat = this.getChat(id);
|
1738
|
-
if (chat.status === status)
|
1739
|
-
return;
|
1740
|
-
chat.status = status;
|
1741
|
-
chat.error = error;
|
1742
|
-
this.emit({ type: "chat-status-changed", chatId: id, error });
|
1743
|
-
}
|
1744
|
-
getError(id) {
|
1745
|
-
return this.getChat(id).error;
|
1746
|
-
}
|
1747
|
-
getMessages(id) {
|
1748
|
-
return this.getChat(id).messages;
|
1749
|
-
}
|
1750
|
-
getLastMessage(id) {
|
1751
|
-
const chat = this.getChat(id);
|
1752
|
-
return chat.messages[chat.messages.length - 1];
|
1753
|
-
}
|
1754
|
-
subscribe(subscriber) {
|
1755
|
-
this.subscribers.add(subscriber);
|
1756
|
-
return () => this.subscribers.delete(subscriber);
|
1757
|
-
}
|
1758
|
-
setMessages({
|
1759
|
-
id,
|
1760
|
-
messages
|
1761
|
-
}) {
|
1762
|
-
this.getChat(id).messages = [...messages];
|
1763
|
-
this.emit({ type: "chat-messages-changed", chatId: id });
|
1764
|
-
}
|
1765
|
-
removeAssistantResponse(id) {
|
1766
|
-
const chat = this.getChat(id);
|
1767
|
-
const lastMessage = chat.messages[chat.messages.length - 1];
|
1768
|
-
if (lastMessage == null) {
|
1769
|
-
throw new Error("Cannot remove assistant response from empty chat");
|
1770
|
-
}
|
1771
|
-
if (lastMessage.role !== "assistant") {
|
1772
|
-
throw new Error("Last message is not an assistant message");
|
1773
|
-
}
|
1774
|
-
this.setMessages({ id, messages: chat.messages.slice(0, -1) });
|
1775
|
-
}
|
1776
|
-
async submitMessage({
|
1777
|
-
chatId,
|
1778
|
-
message,
|
1779
|
-
headers,
|
1780
|
-
body,
|
1781
|
-
onError,
|
1782
|
-
onToolCall,
|
1783
|
-
onFinish
|
1784
|
-
}) {
|
1785
|
-
var _a17;
|
1786
|
-
const chat = this.getChat(chatId);
|
1787
|
-
const currentMessages = chat.messages;
|
1788
|
-
await this.triggerRequest({
|
1789
|
-
chatId,
|
1790
|
-
messages: currentMessages.concat({
|
1791
|
-
...message,
|
1792
|
-
id: (_a17 = message.id) != null ? _a17 : this.generateId()
|
1793
|
-
}),
|
1794
|
-
headers,
|
1795
|
-
body,
|
1796
|
-
requestType: "generate",
|
1797
|
-
onError,
|
1798
|
-
onToolCall,
|
1799
|
-
onFinish
|
1800
|
-
});
|
1801
|
-
}
|
1802
|
-
async resubmitLastUserMessage({
|
1803
|
-
chatId,
|
1804
|
-
headers,
|
1805
|
-
body,
|
1806
|
-
onError,
|
1807
|
-
onToolCall,
|
1808
|
-
onFinish
|
1809
|
-
}) {
|
1810
|
-
const messages = this.getChat(chatId).messages;
|
1811
|
-
const messagesToSubmit = messages[messages.length - 1].role === "assistant" ? messages.slice(0, -1) : messages;
|
1812
|
-
if (messagesToSubmit.length === 0) {
|
1813
|
-
return;
|
1814
|
-
}
|
1815
|
-
return this.triggerRequest({
|
1816
|
-
chatId,
|
1817
|
-
requestType: "generate",
|
1818
|
-
messages: messagesToSubmit,
|
1819
|
-
headers,
|
1820
|
-
body,
|
1821
|
-
onError,
|
1822
|
-
onToolCall,
|
1823
|
-
onFinish
|
1824
|
-
});
|
1825
|
-
}
|
1826
|
-
async resumeStream({
|
1827
|
-
chatId,
|
1828
|
-
headers,
|
1829
|
-
body,
|
1613
|
+
dataPartSchemas,
|
1614
|
+
state,
|
1830
1615
|
onError,
|
1831
1616
|
onToolCall,
|
1832
1617
|
onFinish
|
1833
1618
|
}) {
|
1834
|
-
|
1835
|
-
|
1836
|
-
|
1837
|
-
|
1838
|
-
messages
|
1839
|
-
|
1840
|
-
|
1841
|
-
|
1842
|
-
|
1843
|
-
|
1844
|
-
|
1845
|
-
|
1846
|
-
|
1847
|
-
|
1848
|
-
|
1849
|
-
|
1850
|
-
|
1851
|
-
|
1852
|
-
|
1853
|
-
|
1854
|
-
|
1855
|
-
|
1856
|
-
|
1857
|
-
|
1858
|
-
|
1619
|
+
this.subscribers = /* @__PURE__ */ new Set();
|
1620
|
+
this.activeResponse = void 0;
|
1621
|
+
this.jobExecutor = new SerialJobExecutor();
|
1622
|
+
this.removeAssistantResponse = () => {
|
1623
|
+
const lastMessage = this.state.messages[this.state.messages.length - 1];
|
1624
|
+
if (lastMessage == null) {
|
1625
|
+
throw new Error("Cannot remove assistant response from empty chat");
|
1626
|
+
}
|
1627
|
+
if (lastMessage.role !== "assistant") {
|
1628
|
+
throw new Error("Last message is not an assistant message");
|
1629
|
+
}
|
1630
|
+
this.state.popMessage();
|
1631
|
+
this.emit({ type: "messages-changed" });
|
1632
|
+
};
|
1633
|
+
/**
|
1634
|
+
* Append a user message to the chat list. This triggers the API call to fetch
|
1635
|
+
* the assistant's response.
|
1636
|
+
*/
|
1637
|
+
this.sendMessage = async (message, options = {}) => {
|
1638
|
+
var _a17, _b;
|
1639
|
+
let uiMessage;
|
1640
|
+
if ("text" in message || "files" in message) {
|
1641
|
+
const fileParts = Array.isArray(message.files) ? message.files : await convertFileListToFileUIParts(message.files);
|
1642
|
+
uiMessage = {
|
1643
|
+
parts: [
|
1644
|
+
...fileParts,
|
1645
|
+
..."text" in message && message.text != null ? [{ type: "text", text: message.text }] : []
|
1646
|
+
]
|
1647
|
+
};
|
1648
|
+
} else {
|
1649
|
+
uiMessage = message;
|
1650
|
+
}
|
1651
|
+
this.state.pushMessage({
|
1652
|
+
...uiMessage,
|
1653
|
+
id: (_a17 = uiMessage.id) != null ? _a17 : this.generateId(),
|
1654
|
+
role: (_b = uiMessage.role) != null ? _b : "user"
|
1859
1655
|
});
|
1860
|
-
this.
|
1861
|
-
|
1656
|
+
this.emit({ type: "messages-changed" });
|
1657
|
+
await this.triggerRequest({ requestType: "generate", ...options });
|
1658
|
+
};
|
1659
|
+
/**
|
1660
|
+
* Regenerate the last assistant message.
|
1661
|
+
*/
|
1662
|
+
this.reload = async (options = {}) => {
|
1663
|
+
if (this.lastMessage === void 0) {
|
1862
1664
|
return;
|
1863
1665
|
}
|
1864
|
-
|
1865
|
-
|
1866
|
-
|
1867
|
-
|
1868
|
-
|
1869
|
-
|
1666
|
+
if (this.lastMessage.role === "assistant") {
|
1667
|
+
this.state.popMessage();
|
1668
|
+
this.emit({ type: "messages-changed" });
|
1669
|
+
}
|
1670
|
+
await this.triggerRequest({ requestType: "generate", ...options });
|
1671
|
+
};
|
1672
|
+
/**
|
1673
|
+
* Resume an ongoing chat generation stream. This does not resume an aborted generation.
|
1674
|
+
*/
|
1675
|
+
this.experimental_resume = async (options = {}) => {
|
1676
|
+
await this.triggerRequest({ requestType: "resume", ...options });
|
1677
|
+
};
|
1678
|
+
this.addToolResult = async ({
|
1679
|
+
toolCallId,
|
1680
|
+
result
|
1681
|
+
}) => {
|
1682
|
+
this.jobExecutor.run(async () => {
|
1683
|
+
updateToolCallResult({
|
1684
|
+
messages: this.state.messages,
|
1685
|
+
toolCallId,
|
1686
|
+
toolResult: result
|
1870
1687
|
});
|
1688
|
+
this.messages = this.state.messages;
|
1689
|
+
if (this.status === "submitted" || this.status === "streaming") {
|
1690
|
+
return;
|
1691
|
+
}
|
1692
|
+
const lastMessage = this.lastMessage;
|
1693
|
+
if (isAssistantMessageWithCompletedToolCalls(lastMessage)) {
|
1694
|
+
this.triggerRequest({
|
1695
|
+
requestType: "generate"
|
1696
|
+
});
|
1697
|
+
}
|
1698
|
+
});
|
1699
|
+
};
|
1700
|
+
/**
|
1701
|
+
* Abort the current request immediately, keep the generated tokens if any.
|
1702
|
+
*/
|
1703
|
+
this.stop = async () => {
|
1704
|
+
var _a17;
|
1705
|
+
if (this.status !== "streaming" && this.status !== "submitted")
|
1706
|
+
return;
|
1707
|
+
if ((_a17 = this.activeResponse) == null ? void 0 : _a17.abortController) {
|
1708
|
+
this.activeResponse.abortController.abort();
|
1709
|
+
this.activeResponse.abortController = void 0;
|
1871
1710
|
}
|
1872
|
-
}
|
1711
|
+
};
|
1712
|
+
this.id = id;
|
1713
|
+
this.maxSteps = maxSteps;
|
1714
|
+
this.transport = transport;
|
1715
|
+
this.generateId = generateId3;
|
1716
|
+
this.messageMetadataSchema = messageMetadataSchema;
|
1717
|
+
this.dataPartSchemas = dataPartSchemas;
|
1718
|
+
this.state = state;
|
1719
|
+
this.onError = onError;
|
1720
|
+
this.onToolCall = onToolCall;
|
1721
|
+
this.onFinish = onFinish;
|
1873
1722
|
}
|
1874
|
-
|
1875
|
-
|
1876
|
-
|
1877
|
-
|
1723
|
+
/**
|
1724
|
+
* Hook status:
|
1725
|
+
*
|
1726
|
+
* - `submitted`: The message has been sent to the API and we're awaiting the start of the response stream.
|
1727
|
+
* - `streaming`: The response is actively streaming in from the API, receiving chunks of data.
|
1728
|
+
* - `ready`: The full response has been received and processed; a new user message can be submitted.
|
1729
|
+
* - `error`: An error occurred during the API request, preventing successful completion.
|
1730
|
+
*/
|
1731
|
+
get status() {
|
1732
|
+
return this.state.status;
|
1733
|
+
}
|
1734
|
+
setStatus({
|
1735
|
+
status,
|
1736
|
+
error
|
1737
|
+
}) {
|
1738
|
+
if (this.status === status)
|
1878
1739
|
return;
|
1879
|
-
|
1880
|
-
|
1881
|
-
|
1882
|
-
|
1740
|
+
this.state.status = status;
|
1741
|
+
this.state.error = error;
|
1742
|
+
this.emit({ type: "status-changed" });
|
1743
|
+
}
|
1744
|
+
get error() {
|
1745
|
+
return this.state.error;
|
1746
|
+
}
|
1747
|
+
get messages() {
|
1748
|
+
return this.state.messages;
|
1749
|
+
}
|
1750
|
+
get lastMessage() {
|
1751
|
+
return this.state.messages[this.state.messages.length - 1];
|
1752
|
+
}
|
1753
|
+
subscribe(subscriber) {
|
1754
|
+
this.subscribers.add(subscriber);
|
1755
|
+
return () => this.subscribers.delete(subscriber);
|
1756
|
+
}
|
1757
|
+
set messages(messages) {
|
1758
|
+
this.state.messages = messages;
|
1759
|
+
this.emit({ type: "messages-changed" });
|
1883
1760
|
}
|
1884
1761
|
emit(event) {
|
1885
1762
|
for (const subscriber of this.subscribers) {
|
1886
|
-
subscriber.
|
1887
|
-
}
|
1888
|
-
}
|
1889
|
-
getChat(id) {
|
1890
|
-
if (!this.hasChat(id)) {
|
1891
|
-
throw new Error(`chat '${id}' not found`);
|
1763
|
+
subscriber.onChange(event);
|
1892
1764
|
}
|
1893
|
-
return this.chats.get(id);
|
1894
1765
|
}
|
1895
1766
|
async triggerRequest({
|
1896
|
-
chatId,
|
1897
|
-
messages: chatMessages,
|
1898
1767
|
requestType,
|
1768
|
+
metadata,
|
1899
1769
|
headers,
|
1900
|
-
body
|
1901
|
-
onError,
|
1902
|
-
onToolCall,
|
1903
|
-
onFinish
|
1770
|
+
body
|
1904
1771
|
}) {
|
1905
|
-
|
1906
|
-
|
1907
|
-
|
1908
|
-
|
1909
|
-
const
|
1910
|
-
const maxStep = extractMaxToolInvocationStep(
|
1911
|
-
getToolInvocations(chatMessages[chatMessages.length - 1])
|
1912
|
-
);
|
1772
|
+
var _a17, _b;
|
1773
|
+
this.setStatus({ status: "submitted", error: void 0 });
|
1774
|
+
const messageCount = this.state.messages.length;
|
1775
|
+
const lastMessage = this.lastMessage;
|
1776
|
+
const maxStep = (_a17 = lastMessage == null ? void 0 : lastMessage.parts.filter((part) => part.type === "step-start").length) != null ? _a17 : 0;
|
1913
1777
|
try {
|
1914
1778
|
const activeResponse = {
|
1915
1779
|
state: createStreamingUIMessageState({
|
1916
|
-
lastMessage:
|
1917
|
-
newMessageId:
|
1780
|
+
lastMessage: this.state.snapshot(lastMessage),
|
1781
|
+
newMessageId: this.generateId()
|
1918
1782
|
}),
|
1919
1783
|
abortController: new AbortController()
|
1920
1784
|
};
|
1921
|
-
|
1922
|
-
const stream = await
|
1923
|
-
chatId,
|
1924
|
-
messages:
|
1925
|
-
|
1785
|
+
this.activeResponse = activeResponse;
|
1786
|
+
const stream = await this.transport.submitMessages({
|
1787
|
+
chatId: this.id,
|
1788
|
+
messages: this.state.messages,
|
1789
|
+
abortSignal: activeResponse.abortController.signal,
|
1790
|
+
metadata,
|
1926
1791
|
headers,
|
1927
|
-
|
1792
|
+
body,
|
1928
1793
|
requestType
|
1929
1794
|
});
|
1930
1795
|
const runUpdateMessageJob = (job) => (
|
1931
1796
|
// serialize the job execution to avoid race conditions:
|
1932
|
-
|
1797
|
+
this.jobExecutor.run(
|
1933
1798
|
() => job({
|
1934
1799
|
state: activeResponse.state,
|
1935
1800
|
write: () => {
|
1936
|
-
|
1937
|
-
|
1938
|
-
const
|
1939
|
-
|
1940
|
-
|
1941
|
-
|
1942
|
-
|
1943
|
-
|
1944
|
-
|
1801
|
+
var _a18;
|
1802
|
+
this.setStatus({ status: "streaming" });
|
1803
|
+
const replaceLastMessage = activeResponse.state.message.id === ((_a18 = this.lastMessage) == null ? void 0 : _a18.id);
|
1804
|
+
if (replaceLastMessage) {
|
1805
|
+
this.state.replaceMessage(
|
1806
|
+
this.state.messages.length - 1,
|
1807
|
+
activeResponse.state.message
|
1808
|
+
);
|
1809
|
+
} else {
|
1810
|
+
this.state.pushMessage(activeResponse.state.message);
|
1811
|
+
}
|
1812
|
+
this.emit({
|
1813
|
+
type: "messages-changed"
|
1945
1814
|
});
|
1946
1815
|
}
|
1947
1816
|
})
|
@@ -1950,137 +1819,67 @@ var ChatStore = class {
|
|
1950
1819
|
await consumeStream({
|
1951
1820
|
stream: processUIMessageStream({
|
1952
1821
|
stream,
|
1953
|
-
onToolCall,
|
1954
|
-
messageMetadataSchema:
|
1955
|
-
dataPartSchemas:
|
1822
|
+
onToolCall: this.onToolCall,
|
1823
|
+
messageMetadataSchema: this.messageMetadataSchema,
|
1824
|
+
dataPartSchemas: this.dataPartSchemas,
|
1956
1825
|
runUpdateMessageJob
|
1957
1826
|
}),
|
1958
1827
|
onError: (error) => {
|
1959
1828
|
throw error;
|
1960
1829
|
}
|
1961
1830
|
});
|
1962
|
-
onFinish == null ? void 0 :
|
1963
|
-
this.setStatus({
|
1831
|
+
(_b = this.onFinish) == null ? void 0 : _b.call(this, { message: activeResponse.state.message });
|
1832
|
+
this.setStatus({ status: "ready" });
|
1964
1833
|
} catch (err) {
|
1834
|
+
console.error(err);
|
1965
1835
|
if (err.name === "AbortError") {
|
1966
|
-
this.setStatus({
|
1836
|
+
this.setStatus({ status: "ready" });
|
1967
1837
|
return null;
|
1968
1838
|
}
|
1969
|
-
if (onError && err instanceof Error) {
|
1970
|
-
onError(err);
|
1839
|
+
if (this.onError && err instanceof Error) {
|
1840
|
+
this.onError(err);
|
1971
1841
|
}
|
1972
|
-
this.setStatus({
|
1842
|
+
this.setStatus({ status: "error", error: err });
|
1973
1843
|
} finally {
|
1974
|
-
|
1844
|
+
this.activeResponse = void 0;
|
1975
1845
|
}
|
1976
|
-
const currentMessages = self.getMessages(chatId);
|
1977
1846
|
if (shouldResubmitMessages({
|
1978
1847
|
originalMaxToolInvocationStep: maxStep,
|
1979
1848
|
originalMessageCount: messageCount,
|
1980
|
-
maxSteps:
|
1981
|
-
messages:
|
1849
|
+
maxSteps: this.maxSteps,
|
1850
|
+
messages: this.state.messages
|
1982
1851
|
})) {
|
1983
|
-
await
|
1984
|
-
chatId,
|
1852
|
+
await this.triggerRequest({
|
1985
1853
|
requestType,
|
1986
|
-
|
1987
|
-
onToolCall,
|
1988
|
-
onFinish,
|
1854
|
+
metadata,
|
1989
1855
|
headers,
|
1990
|
-
body
|
1991
|
-
messages: currentMessages
|
1856
|
+
body
|
1992
1857
|
});
|
1993
1858
|
}
|
1994
1859
|
}
|
1995
1860
|
};
|
1996
|
-
|
1997
|
-
|
1998
|
-
|
1999
|
-
|
2000
|
-
|
2001
|
-
|
2002
|
-
|
2003
|
-
|
2004
|
-
streamProtocol,
|
2005
|
-
fetch: fetch2,
|
2006
|
-
prepareRequestBody
|
2007
|
-
}) {
|
2008
|
-
this.api = api;
|
2009
|
-
this.credentials = credentials;
|
2010
|
-
this.headers = headers;
|
2011
|
-
this.body = body;
|
2012
|
-
this.streamProtocol = streamProtocol;
|
2013
|
-
this.fetch = fetch2;
|
2014
|
-
this.prepareRequestBody = prepareRequestBody;
|
2015
|
-
}
|
2016
|
-
submitMessages({
|
2017
|
-
chatId,
|
2018
|
-
messages,
|
2019
|
-
abortController,
|
2020
|
-
body,
|
2021
|
-
headers,
|
2022
|
-
requestType
|
2023
|
-
}) {
|
2024
|
-
var _a17, _b;
|
2025
|
-
return fetchUIMessageStream({
|
2026
|
-
api: this.api,
|
2027
|
-
headers: {
|
2028
|
-
...this.headers,
|
2029
|
-
...headers
|
2030
|
-
},
|
2031
|
-
body: (_b = (_a17 = this.prepareRequestBody) == null ? void 0 : _a17.call(this, {
|
2032
|
-
chatId,
|
2033
|
-
messages,
|
2034
|
-
...this.body,
|
2035
|
-
...body
|
2036
|
-
})) != null ? _b : {
|
2037
|
-
chatId,
|
2038
|
-
messages,
|
2039
|
-
...this.body,
|
2040
|
-
...body
|
2041
|
-
},
|
2042
|
-
streamProtocol: this.streamProtocol,
|
2043
|
-
credentials: this.credentials,
|
2044
|
-
abortController: () => abortController,
|
2045
|
-
fetch: this.fetch,
|
2046
|
-
requestType
|
2047
|
-
});
|
2048
|
-
}
|
2049
|
-
};
|
2050
|
-
|
2051
|
-
// src/ui/convert-file-list-to-file-ui-parts.ts
|
2052
|
-
async function convertFileListToFileUIParts(files) {
|
2053
|
-
if (files == null) {
|
2054
|
-
return [];
|
2055
|
-
}
|
2056
|
-
if (!globalThis.FileList || !(files instanceof globalThis.FileList)) {
|
2057
|
-
throw new Error("FileList is not supported in the current environment");
|
2058
|
-
}
|
2059
|
-
return Promise.all(
|
2060
|
-
Array.from(files).map(async (file) => {
|
2061
|
-
const { name: name17, type } = file;
|
2062
|
-
const dataUrl = await new Promise((resolve, reject) => {
|
2063
|
-
const reader = new FileReader();
|
2064
|
-
reader.onload = (readerEvent) => {
|
2065
|
-
var _a17;
|
2066
|
-
resolve((_a17 = readerEvent.target) == null ? void 0 : _a17.result);
|
2067
|
-
};
|
2068
|
-
reader.onerror = (error) => reject(error);
|
2069
|
-
reader.readAsDataURL(file);
|
2070
|
-
});
|
2071
|
-
return {
|
2072
|
-
type: "file",
|
2073
|
-
mediaType: type,
|
2074
|
-
filename: name17,
|
2075
|
-
url: dataUrl
|
2076
|
-
};
|
2077
|
-
})
|
1861
|
+
function updateToolCallResult({
|
1862
|
+
messages,
|
1863
|
+
toolCallId,
|
1864
|
+
toolResult: result
|
1865
|
+
}) {
|
1866
|
+
const lastMessage = messages[messages.length - 1];
|
1867
|
+
const invocationPart = lastMessage.parts.find(
|
1868
|
+
(part) => part.type === "tool-invocation" && part.toolInvocation.toolCallId === toolCallId
|
2078
1869
|
);
|
1870
|
+
if (invocationPart == null) {
|
1871
|
+
return;
|
1872
|
+
}
|
1873
|
+
invocationPart.toolInvocation = {
|
1874
|
+
...invocationPart.toolInvocation,
|
1875
|
+
state: "result",
|
1876
|
+
result
|
1877
|
+
};
|
2079
1878
|
}
|
2080
1879
|
|
2081
1880
|
// src/ui/convert-to-model-messages.ts
|
2082
1881
|
function convertToModelMessages(messages, options) {
|
2083
|
-
var _a17
|
1882
|
+
var _a17;
|
2084
1883
|
const tools = (_a17 = options == null ? void 0 : options.tools) != null ? _a17 : {};
|
2085
1884
|
const modelMessages = [];
|
2086
1885
|
for (const message of messages) {
|
@@ -2111,6 +1910,9 @@ function convertToModelMessages(messages, options) {
|
|
2111
1910
|
case "assistant": {
|
2112
1911
|
if (message.parts != null) {
|
2113
1912
|
let processBlock2 = function() {
|
1913
|
+
if (block.length === 0) {
|
1914
|
+
return;
|
1915
|
+
}
|
2114
1916
|
const content = [];
|
2115
1917
|
for (const part of block) {
|
2116
1918
|
switch (part.type) {
|
@@ -2185,33 +1987,20 @@ function convertToModelMessages(messages, options) {
|
|
2185
1987
|
});
|
2186
1988
|
}
|
2187
1989
|
block = [];
|
2188
|
-
blockHasToolInvocations = false;
|
2189
|
-
currentStep++;
|
2190
1990
|
};
|
2191
1991
|
var processBlock = processBlock2;
|
2192
|
-
let currentStep = 0;
|
2193
|
-
let blockHasToolInvocations = false;
|
2194
1992
|
let block = [];
|
2195
1993
|
for (const part of message.parts) {
|
2196
1994
|
switch (part.type) {
|
2197
|
-
case "text":
|
2198
|
-
|
2199
|
-
processBlock2();
|
2200
|
-
}
|
2201
|
-
block.push(part);
|
2202
|
-
break;
|
2203
|
-
}
|
1995
|
+
case "text":
|
1996
|
+
case "reasoning":
|
2204
1997
|
case "file":
|
2205
|
-
case "
|
1998
|
+
case "tool-invocation": {
|
2206
1999
|
block.push(part);
|
2207
2000
|
break;
|
2208
2001
|
}
|
2209
|
-
case "
|
2210
|
-
|
2211
|
-
processBlock2();
|
2212
|
-
}
|
2213
|
-
block.push(part);
|
2214
|
-
blockHasToolInvocations = true;
|
2002
|
+
case "step-start": {
|
2003
|
+
processBlock2();
|
2215
2004
|
break;
|
2216
2005
|
}
|
2217
2006
|
}
|
@@ -2234,45 +2023,166 @@ function convertToModelMessages(messages, options) {
|
|
2234
2023
|
}
|
2235
2024
|
var convertToCoreMessages = convertToModelMessages;
|
2236
2025
|
|
2237
|
-
// src/ui/
|
2238
|
-
|
2239
|
-
|
2026
|
+
// src/ui/transform-text-to-ui-message-stream.ts
|
2027
|
+
function transformTextToUiMessageStream({
|
2028
|
+
stream
|
2029
|
+
}) {
|
2030
|
+
return stream.pipeThrough(
|
2031
|
+
new TransformStream({
|
2032
|
+
start(controller) {
|
2033
|
+
controller.enqueue({ type: "start" });
|
2034
|
+
controller.enqueue({ type: "start-step" });
|
2035
|
+
},
|
2036
|
+
async transform(part, controller) {
|
2037
|
+
controller.enqueue({ type: "text", text: part });
|
2038
|
+
},
|
2039
|
+
async flush(controller) {
|
2040
|
+
controller.enqueue({ type: "finish-step" });
|
2041
|
+
controller.enqueue({ type: "finish" });
|
2042
|
+
}
|
2043
|
+
})
|
2044
|
+
);
|
2045
|
+
}
|
2046
|
+
|
2047
|
+
// src/ui/text-stream-chat-transport.ts
|
2048
|
+
var getOriginalFetch3 = () => fetch;
|
2049
|
+
async function fetchTextStream({
|
2240
2050
|
api,
|
2241
|
-
|
2242
|
-
streamProtocol = "ui-message",
|
2051
|
+
body,
|
2243
2052
|
credentials,
|
2244
2053
|
headers,
|
2245
|
-
|
2246
|
-
|
2247
|
-
|
2248
|
-
dataPartSchemas,
|
2249
|
-
messageMetadataSchema,
|
2250
|
-
maxSteps: maxSteps2 = 1,
|
2251
|
-
chats
|
2054
|
+
abortSignal,
|
2055
|
+
fetch: fetch2 = getOriginalFetch3(),
|
2056
|
+
requestType = "generate"
|
2252
2057
|
}) {
|
2253
|
-
|
2254
|
-
|
2255
|
-
|
2256
|
-
|
2257
|
-
|
2258
|
-
|
2259
|
-
|
2260
|
-
|
2261
|
-
|
2262
|
-
|
2263
|
-
|
2264
|
-
|
2265
|
-
|
2266
|
-
|
2267
|
-
|
2058
|
+
var _a17;
|
2059
|
+
const response = requestType === "resume" ? await fetch2(`${api}?chatId=${body.chatId}`, {
|
2060
|
+
method: "GET",
|
2061
|
+
headers: {
|
2062
|
+
"Content-Type": "application/json",
|
2063
|
+
...headers
|
2064
|
+
},
|
2065
|
+
signal: abortSignal,
|
2066
|
+
credentials
|
2067
|
+
}) : await fetch2(api, {
|
2068
|
+
method: "POST",
|
2069
|
+
body: JSON.stringify(body),
|
2070
|
+
headers: {
|
2071
|
+
"Content-Type": "application/json",
|
2072
|
+
...headers
|
2073
|
+
},
|
2074
|
+
signal: abortSignal,
|
2075
|
+
credentials
|
2076
|
+
});
|
2077
|
+
if (!response.ok) {
|
2078
|
+
throw new Error(
|
2079
|
+
(_a17 = await response.text()) != null ? _a17 : "Failed to fetch the chat response."
|
2080
|
+
);
|
2081
|
+
}
|
2082
|
+
if (!response.body) {
|
2083
|
+
throw new Error("The response body is empty.");
|
2084
|
+
}
|
2085
|
+
return transformTextToUiMessageStream({
|
2086
|
+
stream: response.body.pipeThrough(new TextDecoderStream())
|
2087
|
+
});
|
2088
|
+
}
|
2089
|
+
var TextStreamChatTransport = class {
|
2090
|
+
constructor({
|
2091
|
+
api,
|
2092
|
+
credentials,
|
2093
|
+
headers,
|
2094
|
+
body,
|
2095
|
+
fetch: fetch2,
|
2096
|
+
prepareRequest
|
2097
|
+
}) {
|
2098
|
+
this.api = api;
|
2099
|
+
this.credentials = credentials;
|
2100
|
+
this.headers = headers;
|
2101
|
+
this.body = body;
|
2102
|
+
this.fetch = fetch2;
|
2103
|
+
this.prepareRequest = prepareRequest;
|
2104
|
+
}
|
2105
|
+
submitMessages({
|
2106
|
+
chatId,
|
2107
|
+
messages,
|
2108
|
+
abortSignal,
|
2109
|
+
metadata,
|
2110
|
+
headers,
|
2111
|
+
body,
|
2112
|
+
requestType
|
2113
|
+
}) {
|
2114
|
+
var _a17, _b;
|
2115
|
+
const preparedRequest = (_a17 = this.prepareRequest) == null ? void 0 : _a17.call(this, {
|
2116
|
+
id: chatId,
|
2117
|
+
messages,
|
2118
|
+
body: { ...this.body, ...body },
|
2119
|
+
headers: { ...this.headers, ...headers },
|
2120
|
+
credentials: this.credentials,
|
2121
|
+
requestMetadata: metadata
|
2122
|
+
});
|
2123
|
+
return fetchTextStream({
|
2124
|
+
api: this.api,
|
2125
|
+
body: (preparedRequest == null ? void 0 : preparedRequest.body) !== void 0 ? preparedRequest.body : { ...this.body, ...body },
|
2126
|
+
headers: (preparedRequest == null ? void 0 : preparedRequest.headers) !== void 0 ? preparedRequest.headers : { ...this.headers, ...headers },
|
2127
|
+
credentials: (_b = preparedRequest == null ? void 0 : preparedRequest.credentials) != null ? _b : this.credentials,
|
2128
|
+
abortSignal,
|
2129
|
+
fetch: this.fetch,
|
2130
|
+
requestType
|
2131
|
+
});
|
2132
|
+
}
|
2133
|
+
};
|
2134
|
+
|
2135
|
+
// src/ui-message-stream/handle-ui-message-stream-finish.ts
|
2136
|
+
function handleUIMessageStreamFinish({
|
2137
|
+
newMessageId,
|
2138
|
+
originalMessages = [],
|
2139
|
+
onFinish,
|
2140
|
+
stream
|
2141
|
+
}) {
|
2142
|
+
if (onFinish == null) {
|
2143
|
+
return stream;
|
2144
|
+
}
|
2145
|
+
const lastMessage = originalMessages[originalMessages.length - 1];
|
2146
|
+
const isContinuation = (lastMessage == null ? void 0 : lastMessage.role) === "assistant";
|
2147
|
+
const messageId = isContinuation ? lastMessage.id : newMessageId;
|
2148
|
+
const state = createStreamingUIMessageState({
|
2149
|
+
lastMessage: structuredClone(lastMessage),
|
2150
|
+
newMessageId: messageId
|
2268
2151
|
});
|
2152
|
+
const runUpdateMessageJob = async (job) => {
|
2153
|
+
await job({ state, write: () => {
|
2154
|
+
} });
|
2155
|
+
};
|
2156
|
+
return processUIMessageStream({
|
2157
|
+
stream,
|
2158
|
+
runUpdateMessageJob
|
2159
|
+
}).pipeThrough(
|
2160
|
+
new TransformStream({
|
2161
|
+
transform(chunk, controller) {
|
2162
|
+
controller.enqueue(chunk);
|
2163
|
+
},
|
2164
|
+
flush() {
|
2165
|
+
const isContinuation2 = state.message.id === (lastMessage == null ? void 0 : lastMessage.id);
|
2166
|
+
onFinish({
|
2167
|
+
isContinuation: isContinuation2,
|
2168
|
+
responseMessage: state.message,
|
2169
|
+
messages: [
|
2170
|
+
...isContinuation2 ? originalMessages.slice(0, -1) : originalMessages,
|
2171
|
+
state.message
|
2172
|
+
]
|
2173
|
+
});
|
2174
|
+
}
|
2175
|
+
})
|
2176
|
+
);
|
2269
2177
|
}
|
2270
2178
|
|
2271
2179
|
// src/ui-message-stream/create-ui-message-stream.ts
|
2272
2180
|
function createUIMessageStream({
|
2273
2181
|
execute,
|
2274
|
-
onError = () => "An error occurred."
|
2182
|
+
onError = () => "An error occurred.",
|
2275
2183
|
// mask error messages for safety by default
|
2184
|
+
originalMessages,
|
2185
|
+
onFinish
|
2276
2186
|
}) {
|
2277
2187
|
let controller;
|
2278
2188
|
const ongoingStreamPromises = [];
|
@@ -2289,25 +2199,27 @@ function createUIMessageStream({
|
|
2289
2199
|
}
|
2290
2200
|
try {
|
2291
2201
|
const result = execute({
|
2292
|
-
|
2293
|
-
|
2294
|
-
|
2295
|
-
|
2296
|
-
|
2297
|
-
(
|
2298
|
-
|
2299
|
-
|
2300
|
-
|
2301
|
-
|
2302
|
-
|
2303
|
-
|
2304
|
-
|
2305
|
-
|
2306
|
-
|
2307
|
-
|
2308
|
-
|
2309
|
-
|
2310
|
-
|
2202
|
+
writer: {
|
2203
|
+
write(part) {
|
2204
|
+
safeEnqueue(part);
|
2205
|
+
},
|
2206
|
+
merge(streamArg) {
|
2207
|
+
ongoingStreamPromises.push(
|
2208
|
+
(async () => {
|
2209
|
+
const reader = streamArg.getReader();
|
2210
|
+
while (true) {
|
2211
|
+
const { done, value } = await reader.read();
|
2212
|
+
if (done)
|
2213
|
+
break;
|
2214
|
+
safeEnqueue(value);
|
2215
|
+
}
|
2216
|
+
})().catch((error) => {
|
2217
|
+
safeEnqueue({ type: "error", errorText: onError(error) });
|
2218
|
+
})
|
2219
|
+
);
|
2220
|
+
},
|
2221
|
+
onError
|
2222
|
+
}
|
2311
2223
|
});
|
2312
2224
|
if (result) {
|
2313
2225
|
ongoingStreamPromises.push(
|
@@ -2331,7 +2243,12 @@ function createUIMessageStream({
|
|
2331
2243
|
} catch (error) {
|
2332
2244
|
}
|
2333
2245
|
});
|
2334
|
-
return
|
2246
|
+
return handleUIMessageStreamFinish({
|
2247
|
+
stream,
|
2248
|
+
newMessageId: "",
|
2249
|
+
originalMessages,
|
2250
|
+
onFinish
|
2251
|
+
});
|
2335
2252
|
}
|
2336
2253
|
|
2337
2254
|
// src/ui-message-stream/ui-message-stream-headers.ts
|
@@ -2396,6 +2313,32 @@ function pipeUIMessageStreamToResponse({
|
|
2396
2313
|
});
|
2397
2314
|
}
|
2398
2315
|
|
2316
|
+
// src/util/cosine-similarity.ts
|
2317
|
+
function cosineSimilarity(vector1, vector2) {
|
2318
|
+
if (vector1.length !== vector2.length) {
|
2319
|
+
throw new InvalidArgumentError({
|
2320
|
+
parameter: "vector1,vector2",
|
2321
|
+
value: { vector1Length: vector1.length, vector2Length: vector2.length },
|
2322
|
+
message: `Vectors must have the same length`
|
2323
|
+
});
|
2324
|
+
}
|
2325
|
+
const n = vector1.length;
|
2326
|
+
if (n === 0) {
|
2327
|
+
return 0;
|
2328
|
+
}
|
2329
|
+
let magnitudeSquared1 = 0;
|
2330
|
+
let magnitudeSquared2 = 0;
|
2331
|
+
let dotProduct = 0;
|
2332
|
+
for (let i = 0; i < n; i++) {
|
2333
|
+
const value1 = vector1[i];
|
2334
|
+
const value2 = vector2[i];
|
2335
|
+
magnitudeSquared1 += value1 * value1;
|
2336
|
+
magnitudeSquared2 += value2 * value2;
|
2337
|
+
dotProduct += value1 * value2;
|
2338
|
+
}
|
2339
|
+
return magnitudeSquared1 === 0 || magnitudeSquared2 === 0 ? 0 : dotProduct / (Math.sqrt(magnitudeSquared1) * Math.sqrt(magnitudeSquared2));
|
2340
|
+
}
|
2341
|
+
|
2399
2342
|
// src/util/data-url.ts
|
2400
2343
|
function getTextFromDataUrl(dataUrl) {
|
2401
2344
|
const [header, base64Content] = dataUrl.split(",");
|
@@ -2445,34 +2388,8 @@ function isDeepEqualData(obj1, obj2) {
|
|
2445
2388
|
return true;
|
2446
2389
|
}
|
2447
2390
|
|
2448
|
-
// src/util/cosine-similarity.ts
|
2449
|
-
function cosineSimilarity(vector1, vector2) {
|
2450
|
-
if (vector1.length !== vector2.length) {
|
2451
|
-
throw new InvalidArgumentError({
|
2452
|
-
parameter: "vector1,vector2",
|
2453
|
-
value: { vector1Length: vector1.length, vector2Length: vector2.length },
|
2454
|
-
message: `Vectors must have the same length`
|
2455
|
-
});
|
2456
|
-
}
|
2457
|
-
const n = vector1.length;
|
2458
|
-
if (n === 0) {
|
2459
|
-
return 0;
|
2460
|
-
}
|
2461
|
-
let magnitudeSquared1 = 0;
|
2462
|
-
let magnitudeSquared2 = 0;
|
2463
|
-
let dotProduct = 0;
|
2464
|
-
for (let i = 0; i < n; i++) {
|
2465
|
-
const value1 = vector1[i];
|
2466
|
-
const value2 = vector2[i];
|
2467
|
-
magnitudeSquared1 += value1 * value1;
|
2468
|
-
magnitudeSquared2 += value2 * value2;
|
2469
|
-
dotProduct += value1 * value2;
|
2470
|
-
}
|
2471
|
-
return magnitudeSquared1 === 0 || magnitudeSquared2 === 0 ? 0 : dotProduct / (Math.sqrt(magnitudeSquared1) * Math.sqrt(magnitudeSquared2));
|
2472
|
-
}
|
2473
|
-
|
2474
2391
|
// src/util/simulate-readable-stream.ts
|
2475
|
-
var
|
2392
|
+
var import_provider_utils6 = require("@ai-sdk/provider-utils");
|
2476
2393
|
function simulateReadableStream({
|
2477
2394
|
chunks,
|
2478
2395
|
initialDelayInMs = 0,
|
@@ -2480,7 +2397,7 @@ function simulateReadableStream({
|
|
2480
2397
|
_internal
|
2481
2398
|
}) {
|
2482
2399
|
var _a17;
|
2483
|
-
const delay2 = (_a17 = _internal == null ? void 0 : _internal.delay) != null ? _a17 :
|
2400
|
+
const delay2 = (_a17 = _internal == null ? void 0 : _internal.delay) != null ? _a17 : import_provider_utils6.delay;
|
2484
2401
|
let index = 0;
|
2485
2402
|
return new ReadableStream({
|
2486
2403
|
async pull(controller) {
|
@@ -2496,7 +2413,7 @@ function simulateReadableStream({
|
|
2496
2413
|
|
2497
2414
|
// src/util/retry-with-exponential-backoff.ts
|
2498
2415
|
var import_provider17 = require("@ai-sdk/provider");
|
2499
|
-
var
|
2416
|
+
var import_provider_utils7 = require("@ai-sdk/provider-utils");
|
2500
2417
|
var retryWithExponentialBackoff = ({
|
2501
2418
|
maxRetries = 2,
|
2502
2419
|
initialDelayInMs = 2e3,
|
@@ -2514,13 +2431,13 @@ async function _retryWithExponentialBackoff(f, {
|
|
2514
2431
|
try {
|
2515
2432
|
return await f();
|
2516
2433
|
} catch (error) {
|
2517
|
-
if ((0,
|
2434
|
+
if ((0, import_provider_utils7.isAbortError)(error)) {
|
2518
2435
|
throw error;
|
2519
2436
|
}
|
2520
2437
|
if (maxRetries === 0) {
|
2521
2438
|
throw error;
|
2522
2439
|
}
|
2523
|
-
const errorMessage = (0,
|
2440
|
+
const errorMessage = (0, import_provider_utils7.getErrorMessage)(error);
|
2524
2441
|
const newErrors = [...errors, error];
|
2525
2442
|
const tryNumber = newErrors.length;
|
2526
2443
|
if (tryNumber > maxRetries) {
|
@@ -2531,7 +2448,7 @@ async function _retryWithExponentialBackoff(f, {
|
|
2531
2448
|
});
|
2532
2449
|
}
|
2533
2450
|
if (error instanceof Error && import_provider17.APICallError.isInstance(error) && error.isRetryable === true && tryNumber <= maxRetries) {
|
2534
|
-
await (0,
|
2451
|
+
await (0, import_provider_utils7.delay)(delayInMs);
|
2535
2452
|
return _retryWithExponentialBackoff(
|
2536
2453
|
f,
|
2537
2454
|
{ maxRetries, delayInMs: backoffFactor * delayInMs, backoffFactor },
|
@@ -3090,7 +3007,7 @@ var DefaultEmbedManyResult = class {
|
|
3090
3007
|
};
|
3091
3008
|
|
3092
3009
|
// src/util/detect-media-type.ts
|
3093
|
-
var
|
3010
|
+
var import_provider_utils8 = require("@ai-sdk/provider-utils");
|
3094
3011
|
var imageMediaTypeSignatures = [
|
3095
3012
|
{
|
3096
3013
|
mediaType: "image/gif",
|
@@ -3197,7 +3114,7 @@ var audioMediaTypeSignatures = [
|
|
3197
3114
|
}
|
3198
3115
|
];
|
3199
3116
|
var stripID3 = (data) => {
|
3200
|
-
const bytes = typeof data === "string" ? (0,
|
3117
|
+
const bytes = typeof data === "string" ? (0, import_provider_utils8.convertBase64ToUint8Array)(data) : data;
|
3201
3118
|
const id3Size = (bytes[6] & 127) << 21 | (bytes[7] & 127) << 14 | (bytes[8] & 127) << 7 | bytes[9] & 127;
|
3202
3119
|
return bytes.slice(id3Size + 10);
|
3203
3120
|
};
|
@@ -3223,7 +3140,7 @@ function detectMediaType({
|
|
3223
3140
|
}
|
3224
3141
|
|
3225
3142
|
// core/generate-text/generated-file.ts
|
3226
|
-
var
|
3143
|
+
var import_provider_utils9 = require("@ai-sdk/provider-utils");
|
3227
3144
|
var DefaultGeneratedFile = class {
|
3228
3145
|
constructor({
|
3229
3146
|
data,
|
@@ -3237,14 +3154,14 @@ var DefaultGeneratedFile = class {
|
|
3237
3154
|
// lazy conversion with caching to avoid unnecessary conversion overhead:
|
3238
3155
|
get base64() {
|
3239
3156
|
if (this.base64Data == null) {
|
3240
|
-
this.base64Data = (0,
|
3157
|
+
this.base64Data = (0, import_provider_utils9.convertUint8ArrayToBase64)(this.uint8ArrayData);
|
3241
3158
|
}
|
3242
3159
|
return this.base64Data;
|
3243
3160
|
}
|
3244
3161
|
// lazy conversion with caching to avoid unnecessary conversion overhead:
|
3245
3162
|
get uint8Array() {
|
3246
3163
|
if (this.uint8ArrayData == null) {
|
3247
|
-
this.uint8ArrayData = (0,
|
3164
|
+
this.uint8ArrayData = (0, import_provider_utils9.convertBase64ToUint8Array)(this.base64Data);
|
3248
3165
|
}
|
3249
3166
|
return this.uint8ArrayData;
|
3250
3167
|
}
|
@@ -3359,8 +3276,8 @@ async function invokeModelMaxImagesPerCall(model) {
|
|
3359
3276
|
}
|
3360
3277
|
|
3361
3278
|
// core/generate-object/generate-object.ts
|
3362
|
-
var
|
3363
|
-
var
|
3279
|
+
var import_provider22 = require("@ai-sdk/provider");
|
3280
|
+
var import_provider_utils14 = require("@ai-sdk/provider-utils");
|
3364
3281
|
|
3365
3282
|
// core/generate-text/extract-content-text.ts
|
3366
3283
|
function extractContentText(content) {
|
@@ -3374,7 +3291,7 @@ function extractContentText(content) {
|
|
3374
3291
|
}
|
3375
3292
|
|
3376
3293
|
// core/prompt/convert-to-language-model-prompt.ts
|
3377
|
-
var
|
3294
|
+
var import_provider_utils11 = require("@ai-sdk/provider-utils");
|
3378
3295
|
|
3379
3296
|
// src/util/download.ts
|
3380
3297
|
async function download({ url }) {
|
@@ -3403,7 +3320,7 @@ async function download({ url }) {
|
|
3403
3320
|
|
3404
3321
|
// core/prompt/data-content.ts
|
3405
3322
|
var import_provider18 = require("@ai-sdk/provider");
|
3406
|
-
var
|
3323
|
+
var import_provider_utils10 = require("@ai-sdk/provider-utils");
|
3407
3324
|
var import_zod2 = require("zod");
|
3408
3325
|
|
3409
3326
|
// core/prompt/split-data-url.ts
|
@@ -3463,13 +3380,22 @@ function convertToLanguageModelV2DataContent(content) {
|
|
3463
3380
|
}
|
3464
3381
|
return { data: content, mediaType: void 0 };
|
3465
3382
|
}
|
3383
|
+
function convertDataContentToBase64String(content) {
|
3384
|
+
if (typeof content === "string") {
|
3385
|
+
return content;
|
3386
|
+
}
|
3387
|
+
if (content instanceof ArrayBuffer) {
|
3388
|
+
return (0, import_provider_utils10.convertUint8ArrayToBase64)(new Uint8Array(content));
|
3389
|
+
}
|
3390
|
+
return (0, import_provider_utils10.convertUint8ArrayToBase64)(content);
|
3391
|
+
}
|
3466
3392
|
function convertDataContentToUint8Array(content) {
|
3467
3393
|
if (content instanceof Uint8Array) {
|
3468
3394
|
return content;
|
3469
3395
|
}
|
3470
3396
|
if (typeof content === "string") {
|
3471
3397
|
try {
|
3472
|
-
return (0,
|
3398
|
+
return (0, import_provider_utils10.convertBase64ToUint8Array)(content);
|
3473
3399
|
} catch (error) {
|
3474
3400
|
throw new InvalidDataContentError({
|
3475
3401
|
message: "Invalid data content. Content string is not a base64-encoded media.",
|
@@ -3620,7 +3546,7 @@ async function downloadAssets(messages, downloadImplementation, supportedUrls) {
|
|
3620
3546
|
}
|
3621
3547
|
return { mediaType, data };
|
3622
3548
|
}).filter(
|
3623
|
-
(part) => part.data instanceof URL && part.mediaType != null && !(0,
|
3549
|
+
(part) => part.data instanceof URL && part.mediaType != null && !(0, import_provider_utils11.isUrlSupported)({
|
3624
3550
|
url: part.data.toString(),
|
3625
3551
|
mediaType: part.mediaType,
|
3626
3552
|
supportedUrls
|
@@ -3789,9 +3715,22 @@ function prepareCallSettings({
|
|
3789
3715
|
};
|
3790
3716
|
}
|
3791
3717
|
|
3718
|
+
// core/prompt/resolve-language-model.ts
|
3719
|
+
var import_gateway = require("@ai-sdk/gateway");
|
3720
|
+
var GLOBAL_DEFAULT_PROVIDER = Symbol(
|
3721
|
+
"vercel.ai.global.defaultProvider"
|
3722
|
+
);
|
3723
|
+
function resolveLanguageModel(model) {
|
3724
|
+
if (typeof model !== "string") {
|
3725
|
+
return model;
|
3726
|
+
}
|
3727
|
+
const globalProvider = globalThis[GLOBAL_DEFAULT_PROVIDER];
|
3728
|
+
return (globalProvider != null ? globalProvider : import_gateway.gateway).languageModel(model);
|
3729
|
+
}
|
3730
|
+
|
3792
3731
|
// core/prompt/standardize-prompt.ts
|
3793
3732
|
var import_provider19 = require("@ai-sdk/provider");
|
3794
|
-
var
|
3733
|
+
var import_provider_utils12 = require("@ai-sdk/provider-utils");
|
3795
3734
|
var import_zod8 = require("zod");
|
3796
3735
|
|
3797
3736
|
// core/prompt/message.ts
|
@@ -3963,7 +3902,7 @@ async function standardizePrompt(prompt) {
|
|
3963
3902
|
message: "messages must not be empty"
|
3964
3903
|
});
|
3965
3904
|
}
|
3966
|
-
const validationResult = await (0,
|
3905
|
+
const validationResult = await (0, import_provider_utils12.safeValidateTypes)({
|
3967
3906
|
value: messages,
|
3968
3907
|
schema: import_zod8.z.array(modelMessageSchema)
|
3969
3908
|
});
|
@@ -3980,9 +3919,38 @@ async function standardizePrompt(prompt) {
|
|
3980
3919
|
};
|
3981
3920
|
}
|
3982
3921
|
|
3983
|
-
// core/
|
3922
|
+
// core/prompt/wrap-gateway-error.ts
|
3923
|
+
var import_gateway2 = require("@ai-sdk/gateway");
|
3984
3924
|
var import_provider20 = require("@ai-sdk/provider");
|
3985
|
-
|
3925
|
+
function wrapGatewayError(error) {
|
3926
|
+
if (import_gateway2.GatewayAuthenticationError.isInstance(error) || import_gateway2.GatewayModelNotFoundError.isInstance(error)) {
|
3927
|
+
return new import_provider20.AISDKError({
|
3928
|
+
name: "GatewayError",
|
3929
|
+
message: "Vercel AI Gateway access failed. If you want to use AI SDK providers directly, use the providers, e.g. @ai-sdk/openai, or register a different global default provider.",
|
3930
|
+
cause: error
|
3931
|
+
});
|
3932
|
+
}
|
3933
|
+
return error;
|
3934
|
+
}
|
3935
|
+
|
3936
|
+
// core/telemetry/stringify-for-telemetry.ts
|
3937
|
+
function stringifyForTelemetry(prompt) {
|
3938
|
+
return JSON.stringify(
|
3939
|
+
prompt.map((message) => ({
|
3940
|
+
...message,
|
3941
|
+
content: typeof message.content === "string" ? message.content : message.content.map(
|
3942
|
+
(part) => part.type === "file" ? {
|
3943
|
+
...part,
|
3944
|
+
data: part.data instanceof Uint8Array ? convertDataContentToBase64String(part.data) : part.data
|
3945
|
+
} : part
|
3946
|
+
)
|
3947
|
+
}))
|
3948
|
+
);
|
3949
|
+
}
|
3950
|
+
|
3951
|
+
// core/generate-object/output-strategy.ts
|
3952
|
+
var import_provider21 = require("@ai-sdk/provider");
|
3953
|
+
var import_provider_utils13 = require("@ai-sdk/provider-utils");
|
3986
3954
|
|
3987
3955
|
// src/util/async-iterable-stream.ts
|
3988
3956
|
function createAsyncIterableStream(source) {
|
@@ -4019,7 +3987,7 @@ var noSchemaOutputStrategy = {
|
|
4019
3987
|
} : { success: true, value };
|
4020
3988
|
},
|
4021
3989
|
createElementStream() {
|
4022
|
-
throw new
|
3990
|
+
throw new import_provider21.UnsupportedFunctionalityError({
|
4023
3991
|
functionality: "element streams in no-schema mode"
|
4024
3992
|
});
|
4025
3993
|
}
|
@@ -4038,10 +4006,10 @@ var objectOutputStrategy = (schema) => ({
|
|
4038
4006
|
};
|
4039
4007
|
},
|
4040
4008
|
async validateFinalResult(value) {
|
4041
|
-
return (0,
|
4009
|
+
return (0, import_provider_utils13.safeValidateTypes)({ value, schema });
|
4042
4010
|
},
|
4043
4011
|
createElementStream() {
|
4044
|
-
throw new
|
4012
|
+
throw new import_provider21.UnsupportedFunctionalityError({
|
4045
4013
|
functionality: "element streams in object mode"
|
4046
4014
|
});
|
4047
4015
|
}
|
@@ -4069,10 +4037,10 @@ var arrayOutputStrategy = (schema) => {
|
|
4069
4037
|
isFinalDelta
|
4070
4038
|
}) {
|
4071
4039
|
var _a17;
|
4072
|
-
if (!(0,
|
4040
|
+
if (!(0, import_provider21.isJSONObject)(value) || !(0, import_provider21.isJSONArray)(value.elements)) {
|
4073
4041
|
return {
|
4074
4042
|
success: false,
|
4075
|
-
error: new
|
4043
|
+
error: new import_provider21.TypeValidationError({
|
4076
4044
|
value,
|
4077
4045
|
cause: "value must be an object that contains an array of elements"
|
4078
4046
|
})
|
@@ -4082,7 +4050,7 @@ var arrayOutputStrategy = (schema) => {
|
|
4082
4050
|
const resultArray = [];
|
4083
4051
|
for (let i = 0; i < inputArray.length; i++) {
|
4084
4052
|
const element = inputArray[i];
|
4085
|
-
const result = await (0,
|
4053
|
+
const result = await (0, import_provider_utils13.safeValidateTypes)({ value: element, schema });
|
4086
4054
|
if (i === inputArray.length - 1 && !isFinalDelta) {
|
4087
4055
|
continue;
|
4088
4056
|
}
|
@@ -4112,10 +4080,10 @@ var arrayOutputStrategy = (schema) => {
|
|
4112
4080
|
};
|
4113
4081
|
},
|
4114
4082
|
async validateFinalResult(value) {
|
4115
|
-
if (!(0,
|
4083
|
+
if (!(0, import_provider21.isJSONObject)(value) || !(0, import_provider21.isJSONArray)(value.elements)) {
|
4116
4084
|
return {
|
4117
4085
|
success: false,
|
4118
|
-
error: new
|
4086
|
+
error: new import_provider21.TypeValidationError({
|
4119
4087
|
value,
|
4120
4088
|
cause: "value must be an object that contains an array of elements"
|
4121
4089
|
})
|
@@ -4123,7 +4091,7 @@ var arrayOutputStrategy = (schema) => {
|
|
4123
4091
|
}
|
4124
4092
|
const inputArray = value.elements;
|
4125
4093
|
for (const element of inputArray) {
|
4126
|
-
const result = await (0,
|
4094
|
+
const result = await (0, import_provider_utils13.safeValidateTypes)({ value: element, schema });
|
4127
4095
|
if (!result.success) {
|
4128
4096
|
return result;
|
4129
4097
|
}
|
@@ -4178,10 +4146,10 @@ var enumOutputStrategy = (enumValues) => {
|
|
4178
4146
|
additionalProperties: false
|
4179
4147
|
},
|
4180
4148
|
async validateFinalResult(value) {
|
4181
|
-
if (!(0,
|
4149
|
+
if (!(0, import_provider21.isJSONObject)(value) || typeof value.result !== "string") {
|
4182
4150
|
return {
|
4183
4151
|
success: false,
|
4184
|
-
error: new
|
4152
|
+
error: new import_provider21.TypeValidationError({
|
4185
4153
|
value,
|
4186
4154
|
cause: 'value must be an object that contains a string in the "result" property.'
|
4187
4155
|
})
|
@@ -4190,17 +4158,17 @@ var enumOutputStrategy = (enumValues) => {
|
|
4190
4158
|
const result = value.result;
|
4191
4159
|
return enumValues.includes(result) ? { success: true, value: result } : {
|
4192
4160
|
success: false,
|
4193
|
-
error: new
|
4161
|
+
error: new import_provider21.TypeValidationError({
|
4194
4162
|
value,
|
4195
4163
|
cause: "value must be a string in the enum"
|
4196
4164
|
})
|
4197
4165
|
};
|
4198
4166
|
},
|
4199
4167
|
async validatePartialResult({ value, textDelta }) {
|
4200
|
-
if (!(0,
|
4168
|
+
if (!(0, import_provider21.isJSONObject)(value) || typeof value.result !== "string") {
|
4201
4169
|
return {
|
4202
4170
|
success: false,
|
4203
|
-
error: new
|
4171
|
+
error: new import_provider21.TypeValidationError({
|
4204
4172
|
value,
|
4205
4173
|
cause: 'value must be an object that contains a string in the "result" property.'
|
4206
4174
|
})
|
@@ -4213,7 +4181,7 @@ var enumOutputStrategy = (enumValues) => {
|
|
4213
4181
|
if (value.result.length === 0 || possibleEnumValues.length === 0) {
|
4214
4182
|
return {
|
4215
4183
|
success: false,
|
4216
|
-
error: new
|
4184
|
+
error: new import_provider21.TypeValidationError({
|
4217
4185
|
value,
|
4218
4186
|
cause: "value must be a string in the enum"
|
4219
4187
|
})
|
@@ -4228,7 +4196,7 @@ var enumOutputStrategy = (enumValues) => {
|
|
4228
4196
|
};
|
4229
4197
|
},
|
4230
4198
|
createElementStream() {
|
4231
|
-
throw new
|
4199
|
+
throw new import_provider21.UnsupportedFunctionalityError({
|
4232
4200
|
functionality: "element streams in enum mode"
|
4233
4201
|
});
|
4234
4202
|
}
|
@@ -4241,9 +4209,9 @@ function getOutputStrategy({
|
|
4241
4209
|
}) {
|
4242
4210
|
switch (output) {
|
4243
4211
|
case "object":
|
4244
|
-
return objectOutputStrategy((0,
|
4212
|
+
return objectOutputStrategy((0, import_provider_utils13.asSchema)(schema));
|
4245
4213
|
case "array":
|
4246
|
-
return arrayOutputStrategy((0,
|
4214
|
+
return arrayOutputStrategy((0, import_provider_utils13.asSchema)(schema));
|
4247
4215
|
case "enum":
|
4248
4216
|
return enumOutputStrategy(enumValues);
|
4249
4217
|
case "no-schema":
|
@@ -4374,10 +4342,10 @@ function validateObjectGenerationInput({
|
|
4374
4342
|
}
|
4375
4343
|
|
4376
4344
|
// core/generate-object/generate-object.ts
|
4377
|
-
var originalGenerateId = (0,
|
4345
|
+
var originalGenerateId = (0, import_provider_utils14.createIdGenerator)({ prefix: "aiobj", size: 24 });
|
4378
4346
|
async function generateObject(options) {
|
4379
4347
|
const {
|
4380
|
-
model,
|
4348
|
+
model: modelArg,
|
4381
4349
|
output = "object",
|
4382
4350
|
system,
|
4383
4351
|
prompt,
|
@@ -4394,6 +4362,7 @@ async function generateObject(options) {
|
|
4394
4362
|
} = {},
|
4395
4363
|
...settings
|
4396
4364
|
} = options;
|
4365
|
+
const model = resolveLanguageModel(modelArg);
|
4397
4366
|
const enumValues = "enum" in options ? options.enum : void 0;
|
4398
4367
|
const {
|
4399
4368
|
schema: inputSchema,
|
@@ -4421,208 +4390,212 @@ async function generateObject(options) {
|
|
4421
4390
|
settings: { ...callSettings, maxRetries }
|
4422
4391
|
});
|
4423
4392
|
const tracer = getTracer(telemetry);
|
4424
|
-
|
4425
|
-
|
4426
|
-
|
4427
|
-
|
4428
|
-
|
4429
|
-
|
4430
|
-
|
4431
|
-
|
4432
|
-
|
4433
|
-
...baseTelemetryAttributes,
|
4434
|
-
// specific settings that only make sense on the outer level:
|
4435
|
-
"ai.prompt": {
|
4436
|
-
input: () => JSON.stringify({ system, prompt, messages })
|
4437
|
-
},
|
4438
|
-
"ai.schema": outputStrategy.jsonSchema != null ? { input: () => JSON.stringify(outputStrategy.jsonSchema) } : void 0,
|
4439
|
-
"ai.schema.name": schemaName,
|
4440
|
-
"ai.schema.description": schemaDescription,
|
4441
|
-
"ai.settings.output": outputStrategy.type
|
4442
|
-
}
|
4443
|
-
}),
|
4444
|
-
tracer,
|
4445
|
-
fn: async (span) => {
|
4446
|
-
var _a17;
|
4447
|
-
let result;
|
4448
|
-
let finishReason;
|
4449
|
-
let usage;
|
4450
|
-
let warnings;
|
4451
|
-
let response;
|
4452
|
-
let request;
|
4453
|
-
let resultProviderMetadata;
|
4454
|
-
const standardizedPrompt = await standardizePrompt({
|
4455
|
-
system,
|
4456
|
-
prompt,
|
4457
|
-
messages
|
4458
|
-
});
|
4459
|
-
const promptMessages = await convertToLanguageModelPrompt({
|
4460
|
-
prompt: standardizedPrompt,
|
4461
|
-
supportedUrls: await model.supportedUrls
|
4462
|
-
});
|
4463
|
-
const generateResult = await retry(
|
4464
|
-
() => recordSpan({
|
4465
|
-
name: "ai.generateObject.doGenerate",
|
4466
|
-
attributes: selectTelemetryAttributes({
|
4467
|
-
telemetry,
|
4468
|
-
attributes: {
|
4469
|
-
...assembleOperationName({
|
4470
|
-
operationId: "ai.generateObject.doGenerate",
|
4471
|
-
telemetry
|
4472
|
-
}),
|
4473
|
-
...baseTelemetryAttributes,
|
4474
|
-
"ai.prompt.messages": {
|
4475
|
-
input: () => JSON.stringify(promptMessages)
|
4476
|
-
},
|
4477
|
-
// standardized gen-ai llm span attributes:
|
4478
|
-
"gen_ai.system": model.provider,
|
4479
|
-
"gen_ai.request.model": model.modelId,
|
4480
|
-
"gen_ai.request.frequency_penalty": callSettings.frequencyPenalty,
|
4481
|
-
"gen_ai.request.max_tokens": callSettings.maxOutputTokens,
|
4482
|
-
"gen_ai.request.presence_penalty": callSettings.presencePenalty,
|
4483
|
-
"gen_ai.request.temperature": callSettings.temperature,
|
4484
|
-
"gen_ai.request.top_k": callSettings.topK,
|
4485
|
-
"gen_ai.request.top_p": callSettings.topP
|
4486
|
-
}
|
4393
|
+
try {
|
4394
|
+
return await recordSpan({
|
4395
|
+
name: "ai.generateObject",
|
4396
|
+
attributes: selectTelemetryAttributes({
|
4397
|
+
telemetry,
|
4398
|
+
attributes: {
|
4399
|
+
...assembleOperationName({
|
4400
|
+
operationId: "ai.generateObject",
|
4401
|
+
telemetry
|
4487
4402
|
}),
|
4488
|
-
|
4489
|
-
|
4490
|
-
|
4491
|
-
|
4492
|
-
|
4493
|
-
|
4494
|
-
|
4495
|
-
|
4496
|
-
|
4497
|
-
|
4498
|
-
|
4499
|
-
|
4500
|
-
|
4501
|
-
|
4502
|
-
|
4503
|
-
|
4504
|
-
|
4505
|
-
|
4506
|
-
|
4507
|
-
|
4508
|
-
|
4509
|
-
|
4510
|
-
|
4511
|
-
|
4512
|
-
|
4513
|
-
|
4514
|
-
|
4515
|
-
|
4516
|
-
|
4517
|
-
|
4403
|
+
...baseTelemetryAttributes,
|
4404
|
+
// specific settings that only make sense on the outer level:
|
4405
|
+
"ai.prompt": {
|
4406
|
+
input: () => JSON.stringify({ system, prompt, messages })
|
4407
|
+
},
|
4408
|
+
"ai.schema": outputStrategy.jsonSchema != null ? { input: () => JSON.stringify(outputStrategy.jsonSchema) } : void 0,
|
4409
|
+
"ai.schema.name": schemaName,
|
4410
|
+
"ai.schema.description": schemaDescription,
|
4411
|
+
"ai.settings.output": outputStrategy.type
|
4412
|
+
}
|
4413
|
+
}),
|
4414
|
+
tracer,
|
4415
|
+
fn: async (span) => {
|
4416
|
+
var _a17;
|
4417
|
+
let result;
|
4418
|
+
let finishReason;
|
4419
|
+
let usage;
|
4420
|
+
let warnings;
|
4421
|
+
let response;
|
4422
|
+
let request;
|
4423
|
+
let resultProviderMetadata;
|
4424
|
+
const standardizedPrompt = await standardizePrompt({
|
4425
|
+
system,
|
4426
|
+
prompt,
|
4427
|
+
messages
|
4428
|
+
});
|
4429
|
+
const promptMessages = await convertToLanguageModelPrompt({
|
4430
|
+
prompt: standardizedPrompt,
|
4431
|
+
supportedUrls: await model.supportedUrls
|
4432
|
+
});
|
4433
|
+
const generateResult = await retry(
|
4434
|
+
() => recordSpan({
|
4435
|
+
name: "ai.generateObject.doGenerate",
|
4436
|
+
attributes: selectTelemetryAttributes({
|
4437
|
+
telemetry,
|
4438
|
+
attributes: {
|
4439
|
+
...assembleOperationName({
|
4440
|
+
operationId: "ai.generateObject.doGenerate",
|
4441
|
+
telemetry
|
4442
|
+
}),
|
4443
|
+
...baseTelemetryAttributes,
|
4444
|
+
"ai.prompt.messages": {
|
4445
|
+
input: () => stringifyForTelemetry(promptMessages)
|
4446
|
+
},
|
4447
|
+
// standardized gen-ai llm span attributes:
|
4448
|
+
"gen_ai.system": model.provider,
|
4449
|
+
"gen_ai.request.model": model.modelId,
|
4450
|
+
"gen_ai.request.frequency_penalty": callSettings.frequencyPenalty,
|
4451
|
+
"gen_ai.request.max_tokens": callSettings.maxOutputTokens,
|
4452
|
+
"gen_ai.request.presence_penalty": callSettings.presencePenalty,
|
4453
|
+
"gen_ai.request.temperature": callSettings.temperature,
|
4454
|
+
"gen_ai.request.top_k": callSettings.topK,
|
4455
|
+
"gen_ai.request.top_p": callSettings.topP
|
4456
|
+
}
|
4457
|
+
}),
|
4458
|
+
tracer,
|
4459
|
+
fn: async (span2) => {
|
4460
|
+
var _a18, _b, _c, _d, _e, _f, _g, _h;
|
4461
|
+
const result2 = await model.doGenerate({
|
4462
|
+
responseFormat: {
|
4463
|
+
type: "json",
|
4464
|
+
schema: outputStrategy.jsonSchema,
|
4465
|
+
name: schemaName,
|
4466
|
+
description: schemaDescription
|
4467
|
+
},
|
4468
|
+
...prepareCallSettings(settings),
|
4469
|
+
prompt: promptMessages,
|
4470
|
+
providerOptions,
|
4471
|
+
abortSignal,
|
4472
|
+
headers
|
4518
4473
|
});
|
4474
|
+
const responseData = {
|
4475
|
+
id: (_b = (_a18 = result2.response) == null ? void 0 : _a18.id) != null ? _b : generateId3(),
|
4476
|
+
timestamp: (_d = (_c = result2.response) == null ? void 0 : _c.timestamp) != null ? _d : currentDate(),
|
4477
|
+
modelId: (_f = (_e = result2.response) == null ? void 0 : _e.modelId) != null ? _f : model.modelId,
|
4478
|
+
headers: (_g = result2.response) == null ? void 0 : _g.headers,
|
4479
|
+
body: (_h = result2.response) == null ? void 0 : _h.body
|
4480
|
+
};
|
4481
|
+
const text2 = extractContentText(result2.content);
|
4482
|
+
if (text2 === void 0) {
|
4483
|
+
throw new NoObjectGeneratedError({
|
4484
|
+
message: "No object generated: the model did not return a response.",
|
4485
|
+
response: responseData,
|
4486
|
+
usage: result2.usage,
|
4487
|
+
finishReason: result2.finishReason
|
4488
|
+
});
|
4489
|
+
}
|
4490
|
+
span2.setAttributes(
|
4491
|
+
selectTelemetryAttributes({
|
4492
|
+
telemetry,
|
4493
|
+
attributes: {
|
4494
|
+
"ai.response.finishReason": result2.finishReason,
|
4495
|
+
"ai.response.object": { output: () => text2 },
|
4496
|
+
"ai.response.id": responseData.id,
|
4497
|
+
"ai.response.model": responseData.modelId,
|
4498
|
+
"ai.response.timestamp": responseData.timestamp.toISOString(),
|
4499
|
+
// TODO rename telemetry attributes to inputTokens and outputTokens
|
4500
|
+
"ai.usage.promptTokens": result2.usage.inputTokens,
|
4501
|
+
"ai.usage.completionTokens": result2.usage.outputTokens,
|
4502
|
+
// standardized gen-ai llm span attributes:
|
4503
|
+
"gen_ai.response.finish_reasons": [result2.finishReason],
|
4504
|
+
"gen_ai.response.id": responseData.id,
|
4505
|
+
"gen_ai.response.model": responseData.modelId,
|
4506
|
+
"gen_ai.usage.input_tokens": result2.usage.inputTokens,
|
4507
|
+
"gen_ai.usage.output_tokens": result2.usage.outputTokens
|
4508
|
+
}
|
4509
|
+
})
|
4510
|
+
);
|
4511
|
+
return { ...result2, objectText: text2, responseData };
|
4519
4512
|
}
|
4520
|
-
|
4521
|
-
|
4522
|
-
|
4523
|
-
|
4524
|
-
|
4525
|
-
|
4526
|
-
|
4527
|
-
|
4528
|
-
|
4529
|
-
|
4530
|
-
|
4531
|
-
|
4532
|
-
|
4533
|
-
|
4534
|
-
|
4535
|
-
|
4536
|
-
|
4537
|
-
|
4538
|
-
|
4539
|
-
|
4540
|
-
);
|
4541
|
-
return { ...result2, objectText: text2, responseData };
|
4513
|
+
})
|
4514
|
+
);
|
4515
|
+
result = generateResult.objectText;
|
4516
|
+
finishReason = generateResult.finishReason;
|
4517
|
+
usage = generateResult.usage;
|
4518
|
+
warnings = generateResult.warnings;
|
4519
|
+
resultProviderMetadata = generateResult.providerMetadata;
|
4520
|
+
request = (_a17 = generateResult.request) != null ? _a17 : {};
|
4521
|
+
response = generateResult.responseData;
|
4522
|
+
async function processResult(result2) {
|
4523
|
+
const parseResult = await (0, import_provider_utils14.safeParseJSON)({ text: result2 });
|
4524
|
+
if (!parseResult.success) {
|
4525
|
+
throw new NoObjectGeneratedError({
|
4526
|
+
message: "No object generated: could not parse the response.",
|
4527
|
+
cause: parseResult.error,
|
4528
|
+
text: result2,
|
4529
|
+
response,
|
4530
|
+
usage,
|
4531
|
+
finishReason
|
4532
|
+
});
|
4542
4533
|
}
|
4543
|
-
|
4544
|
-
|
4545
|
-
|
4546
|
-
|
4547
|
-
|
4548
|
-
|
4549
|
-
|
4550
|
-
|
4551
|
-
|
4552
|
-
|
4553
|
-
|
4554
|
-
|
4555
|
-
|
4556
|
-
|
4557
|
-
|
4558
|
-
|
4559
|
-
|
4560
|
-
usage,
|
4561
|
-
finishReason
|
4562
|
-
});
|
4563
|
-
}
|
4564
|
-
const validationResult = await outputStrategy.validateFinalResult(
|
4565
|
-
parseResult.value,
|
4566
|
-
{
|
4567
|
-
text: result2,
|
4568
|
-
response,
|
4569
|
-
usage
|
4534
|
+
const validationResult = await outputStrategy.validateFinalResult(
|
4535
|
+
parseResult.value,
|
4536
|
+
{
|
4537
|
+
text: result2,
|
4538
|
+
response,
|
4539
|
+
usage
|
4540
|
+
}
|
4541
|
+
);
|
4542
|
+
if (!validationResult.success) {
|
4543
|
+
throw new NoObjectGeneratedError({
|
4544
|
+
message: "No object generated: response did not match schema.",
|
4545
|
+
cause: validationResult.error,
|
4546
|
+
text: result2,
|
4547
|
+
response,
|
4548
|
+
usage,
|
4549
|
+
finishReason
|
4550
|
+
});
|
4570
4551
|
}
|
4571
|
-
|
4572
|
-
if (!validationResult.success) {
|
4573
|
-
throw new NoObjectGeneratedError({
|
4574
|
-
message: "No object generated: response did not match schema.",
|
4575
|
-
cause: validationResult.error,
|
4576
|
-
text: result2,
|
4577
|
-
response,
|
4578
|
-
usage,
|
4579
|
-
finishReason
|
4580
|
-
});
|
4552
|
+
return validationResult.value;
|
4581
4553
|
}
|
4582
|
-
|
4583
|
-
|
4584
|
-
|
4585
|
-
|
4586
|
-
|
4587
|
-
|
4588
|
-
|
4589
|
-
|
4590
|
-
|
4591
|
-
|
4592
|
-
|
4593
|
-
|
4554
|
+
let object2;
|
4555
|
+
try {
|
4556
|
+
object2 = await processResult(result);
|
4557
|
+
} catch (error) {
|
4558
|
+
if (repairText != null && NoObjectGeneratedError.isInstance(error) && (import_provider22.JSONParseError.isInstance(error.cause) || import_provider22.TypeValidationError.isInstance(error.cause))) {
|
4559
|
+
const repairedText = await repairText({
|
4560
|
+
text: result,
|
4561
|
+
error: error.cause
|
4562
|
+
});
|
4563
|
+
if (repairedText === null) {
|
4564
|
+
throw error;
|
4565
|
+
}
|
4566
|
+
object2 = await processResult(repairedText);
|
4567
|
+
} else {
|
4594
4568
|
throw error;
|
4595
4569
|
}
|
4596
|
-
object2 = await processResult(repairedText);
|
4597
|
-
} else {
|
4598
|
-
throw error;
|
4599
4570
|
}
|
4571
|
+
span.setAttributes(
|
4572
|
+
selectTelemetryAttributes({
|
4573
|
+
telemetry,
|
4574
|
+
attributes: {
|
4575
|
+
"ai.response.finishReason": finishReason,
|
4576
|
+
"ai.response.object": {
|
4577
|
+
output: () => JSON.stringify(object2)
|
4578
|
+
},
|
4579
|
+
// TODO rename telemetry attributes to inputTokens and outputTokens
|
4580
|
+
"ai.usage.promptTokens": usage.inputTokens,
|
4581
|
+
"ai.usage.completionTokens": usage.outputTokens
|
4582
|
+
}
|
4583
|
+
})
|
4584
|
+
);
|
4585
|
+
return new DefaultGenerateObjectResult({
|
4586
|
+
object: object2,
|
4587
|
+
finishReason,
|
4588
|
+
usage,
|
4589
|
+
warnings,
|
4590
|
+
request,
|
4591
|
+
response,
|
4592
|
+
providerMetadata: resultProviderMetadata
|
4593
|
+
});
|
4600
4594
|
}
|
4601
|
-
|
4602
|
-
|
4603
|
-
|
4604
|
-
|
4605
|
-
"ai.response.finishReason": finishReason,
|
4606
|
-
"ai.response.object": {
|
4607
|
-
output: () => JSON.stringify(object2)
|
4608
|
-
},
|
4609
|
-
// TODO rename telemetry attributes to inputTokens and outputTokens
|
4610
|
-
"ai.usage.promptTokens": usage.inputTokens,
|
4611
|
-
"ai.usage.completionTokens": usage.outputTokens
|
4612
|
-
}
|
4613
|
-
})
|
4614
|
-
);
|
4615
|
-
return new DefaultGenerateObjectResult({
|
4616
|
-
object: object2,
|
4617
|
-
finishReason,
|
4618
|
-
usage,
|
4619
|
-
warnings,
|
4620
|
-
request,
|
4621
|
-
response,
|
4622
|
-
providerMetadata: resultProviderMetadata
|
4623
|
-
});
|
4624
|
-
}
|
4625
|
-
});
|
4595
|
+
});
|
4596
|
+
} catch (error) {
|
4597
|
+
throw wrapGatewayError(error);
|
4598
|
+
}
|
4626
4599
|
}
|
4627
4600
|
var DefaultGenerateObjectResult = class {
|
4628
4601
|
constructor(options) {
|
@@ -4646,7 +4619,7 @@ var DefaultGenerateObjectResult = class {
|
|
4646
4619
|
};
|
4647
4620
|
|
4648
4621
|
// core/generate-object/stream-object.ts
|
4649
|
-
var
|
4622
|
+
var import_provider_utils15 = require("@ai-sdk/provider-utils");
|
4650
4623
|
|
4651
4624
|
// src/util/create-resolvable-promise.ts
|
4652
4625
|
function createResolvablePromise() {
|
@@ -4752,11 +4725,11 @@ var DelayedPromise = class {
|
|
4752
4725
|
this._resolve = void 0;
|
4753
4726
|
this._reject = void 0;
|
4754
4727
|
}
|
4755
|
-
get
|
4756
|
-
if (this.
|
4757
|
-
return this.
|
4728
|
+
get promise() {
|
4729
|
+
if (this._promise) {
|
4730
|
+
return this._promise;
|
4758
4731
|
}
|
4759
|
-
this.
|
4732
|
+
this._promise = new Promise((resolve, reject) => {
|
4760
4733
|
if (this.status.type === "resolved") {
|
4761
4734
|
resolve(this.status.value);
|
4762
4735
|
} else if (this.status.type === "rejected") {
|
@@ -4765,19 +4738,19 @@ var DelayedPromise = class {
|
|
4765
4738
|
this._resolve = resolve;
|
4766
4739
|
this._reject = reject;
|
4767
4740
|
});
|
4768
|
-
return this.
|
4741
|
+
return this._promise;
|
4769
4742
|
}
|
4770
4743
|
resolve(value) {
|
4771
4744
|
var _a17;
|
4772
4745
|
this.status = { type: "resolved", value };
|
4773
|
-
if (this.
|
4746
|
+
if (this._promise) {
|
4774
4747
|
(_a17 = this._resolve) == null ? void 0 : _a17.call(this, value);
|
4775
4748
|
}
|
4776
4749
|
}
|
4777
4750
|
reject(error) {
|
4778
4751
|
var _a17;
|
4779
4752
|
this.status = { type: "rejected", error };
|
4780
|
-
if (this.
|
4753
|
+
if (this._promise) {
|
4781
4754
|
(_a17 = this._reject) == null ? void 0 : _a17.call(this, error);
|
4782
4755
|
}
|
4783
4756
|
}
|
@@ -4790,7 +4763,7 @@ function now() {
|
|
4790
4763
|
}
|
4791
4764
|
|
4792
4765
|
// core/generate-object/stream-object.ts
|
4793
|
-
var originalGenerateId2 = (0,
|
4766
|
+
var originalGenerateId2 = (0, import_provider_utils15.createIdGenerator)({ prefix: "aiobj", size: 24 });
|
4794
4767
|
function streamObject(options) {
|
4795
4768
|
const {
|
4796
4769
|
model,
|
@@ -4803,7 +4776,9 @@ function streamObject(options) {
|
|
4803
4776
|
headers,
|
4804
4777
|
experimental_telemetry: telemetry,
|
4805
4778
|
providerOptions,
|
4806
|
-
onError
|
4779
|
+
onError = ({ error }) => {
|
4780
|
+
console.error(error);
|
4781
|
+
},
|
4807
4782
|
onFinish,
|
4808
4783
|
_internal: {
|
4809
4784
|
generateId: generateId3 = originalGenerateId2,
|
@@ -4853,7 +4828,7 @@ function streamObject(options) {
|
|
4853
4828
|
}
|
4854
4829
|
var DefaultStreamObjectResult = class {
|
4855
4830
|
constructor({
|
4856
|
-
model,
|
4831
|
+
model: modelArg,
|
4857
4832
|
headers,
|
4858
4833
|
telemetry,
|
4859
4834
|
settings,
|
@@ -4872,12 +4847,13 @@ var DefaultStreamObjectResult = class {
|
|
4872
4847
|
currentDate,
|
4873
4848
|
now: now2
|
4874
4849
|
}) {
|
4875
|
-
this.
|
4876
|
-
this.
|
4877
|
-
this.
|
4878
|
-
this.
|
4879
|
-
this.
|
4880
|
-
this.
|
4850
|
+
this._object = new DelayedPromise();
|
4851
|
+
this._usage = new DelayedPromise();
|
4852
|
+
this._providerMetadata = new DelayedPromise();
|
4853
|
+
this._warnings = new DelayedPromise();
|
4854
|
+
this._request = new DelayedPromise();
|
4855
|
+
this._response = new DelayedPromise();
|
4856
|
+
const model = resolveLanguageModel(modelArg);
|
4881
4857
|
const { maxRetries, retry } = prepareRetries({
|
4882
4858
|
maxRetries: maxRetriesArg
|
4883
4859
|
});
|
@@ -4895,7 +4871,7 @@ var DefaultStreamObjectResult = class {
|
|
4895
4871
|
transform(chunk, controller) {
|
4896
4872
|
controller.enqueue(chunk);
|
4897
4873
|
if (chunk.type === "error") {
|
4898
|
-
onError
|
4874
|
+
onError({ error: wrapGatewayError(chunk.error) });
|
4899
4875
|
}
|
4900
4876
|
}
|
4901
4877
|
});
|
@@ -4974,7 +4950,7 @@ var DefaultStreamObjectResult = class {
|
|
4974
4950
|
}),
|
4975
4951
|
...baseTelemetryAttributes,
|
4976
4952
|
"ai.prompt.messages": {
|
4977
|
-
input: () =>
|
4953
|
+
input: () => stringifyForTelemetry(callOptions.prompt)
|
4978
4954
|
},
|
4979
4955
|
// standardized gen-ai llm span attributes:
|
4980
4956
|
"gen_ai.system": model.provider,
|
@@ -4996,7 +4972,7 @@ var DefaultStreamObjectResult = class {
|
|
4996
4972
|
})
|
4997
4973
|
})
|
4998
4974
|
);
|
4999
|
-
self.
|
4975
|
+
self._request.resolve(request != null ? request : {});
|
5000
4976
|
let warnings;
|
5001
4977
|
let usage = {
|
5002
4978
|
inputTokens: void 0,
|
@@ -5089,9 +5065,9 @@ var DefaultStreamObjectResult = class {
|
|
5089
5065
|
usage,
|
5090
5066
|
response: fullResponse
|
5091
5067
|
});
|
5092
|
-
self.
|
5093
|
-
self.
|
5094
|
-
self.
|
5068
|
+
self._usage.resolve(usage);
|
5069
|
+
self._providerMetadata.resolve(providerMetadata);
|
5070
|
+
self._response.resolve({
|
5095
5071
|
...fullResponse,
|
5096
5072
|
headers: response == null ? void 0 : response.headers
|
5097
5073
|
});
|
@@ -5105,7 +5081,7 @@ var DefaultStreamObjectResult = class {
|
|
5105
5081
|
);
|
5106
5082
|
if (validationResult.success) {
|
5107
5083
|
object2 = validationResult.value;
|
5108
|
-
self.
|
5084
|
+
self._object.resolve(object2);
|
5109
5085
|
} else {
|
5110
5086
|
error = new NoObjectGeneratedError({
|
5111
5087
|
message: "No object generated: response did not match schema.",
|
@@ -5115,7 +5091,7 @@ var DefaultStreamObjectResult = class {
|
|
5115
5091
|
usage,
|
5116
5092
|
finishReason
|
5117
5093
|
});
|
5118
|
-
self.
|
5094
|
+
self._object.reject(error);
|
5119
5095
|
}
|
5120
5096
|
break;
|
5121
5097
|
}
|
@@ -5210,22 +5186,22 @@ var DefaultStreamObjectResult = class {
|
|
5210
5186
|
this.outputStrategy = outputStrategy;
|
5211
5187
|
}
|
5212
5188
|
get object() {
|
5213
|
-
return this.
|
5189
|
+
return this._object.promise;
|
5214
5190
|
}
|
5215
5191
|
get usage() {
|
5216
|
-
return this.
|
5192
|
+
return this._usage.promise;
|
5217
5193
|
}
|
5218
5194
|
get providerMetadata() {
|
5219
|
-
return this.
|
5195
|
+
return this._providerMetadata.promise;
|
5220
5196
|
}
|
5221
5197
|
get warnings() {
|
5222
|
-
return this.
|
5198
|
+
return this._warnings.promise;
|
5223
5199
|
}
|
5224
5200
|
get request() {
|
5225
|
-
return this.
|
5201
|
+
return this._request.promise;
|
5226
5202
|
}
|
5227
5203
|
get response() {
|
5228
|
-
return this.
|
5204
|
+
return this._response.promise;
|
5229
5205
|
}
|
5230
5206
|
get partialObjectStream() {
|
5231
5207
|
return createAsyncIterableStream(
|
@@ -5295,8 +5271,8 @@ var DefaultStreamObjectResult = class {
|
|
5295
5271
|
};
|
5296
5272
|
|
5297
5273
|
// src/error/no-speech-generated-error.ts
|
5298
|
-
var
|
5299
|
-
var NoSpeechGeneratedError = class extends
|
5274
|
+
var import_provider23 = require("@ai-sdk/provider");
|
5275
|
+
var NoSpeechGeneratedError = class extends import_provider23.AISDKError {
|
5300
5276
|
constructor(options) {
|
5301
5277
|
super({
|
5302
5278
|
name: "AI_NoSpeechGeneratedError",
|
@@ -5385,10 +5361,15 @@ var DefaultSpeechResult = class {
|
|
5385
5361
|
};
|
5386
5362
|
|
5387
5363
|
// core/generate-text/generate-text.ts
|
5388
|
-
var
|
5364
|
+
var import_provider_utils18 = require("@ai-sdk/provider-utils");
|
5365
|
+
|
5366
|
+
// src/util/as-array.ts
|
5367
|
+
function asArray(value) {
|
5368
|
+
return value === void 0 ? [] : Array.isArray(value) ? value : [value];
|
5369
|
+
}
|
5389
5370
|
|
5390
5371
|
// core/prompt/prepare-tools-and-tool-choice.ts
|
5391
|
-
var
|
5372
|
+
var import_provider_utils16 = require("@ai-sdk/provider-utils");
|
5392
5373
|
|
5393
5374
|
// src/util/is-non-empty-object.ts
|
5394
5375
|
function isNonEmptyObject(object2) {
|
@@ -5420,7 +5401,7 @@ function prepareToolsAndToolChoice({
|
|
5420
5401
|
type: "function",
|
5421
5402
|
name: name17,
|
5422
5403
|
description: tool2.description,
|
5423
|
-
parameters: (0,
|
5404
|
+
parameters: (0, import_provider_utils16.asSchema)(tool2.parameters).jsonSchema
|
5424
5405
|
};
|
5425
5406
|
case "provider-defined":
|
5426
5407
|
return {
|
@@ -5490,7 +5471,7 @@ function asContent({
|
|
5490
5471
|
}
|
5491
5472
|
|
5492
5473
|
// core/generate-text/parse-tool-call.ts
|
5493
|
-
var
|
5474
|
+
var import_provider_utils17 = require("@ai-sdk/provider-utils");
|
5494
5475
|
async function parseToolCall({
|
5495
5476
|
toolCall,
|
5496
5477
|
tools,
|
@@ -5514,7 +5495,7 @@ async function parseToolCall({
|
|
5514
5495
|
tools,
|
5515
5496
|
parameterSchema: ({ toolName }) => {
|
5516
5497
|
const { parameters } = tools[toolName];
|
5517
|
-
return (0,
|
5498
|
+
return (0, import_provider_utils17.asSchema)(parameters).jsonSchema;
|
5518
5499
|
},
|
5519
5500
|
system,
|
5520
5501
|
messages,
|
@@ -5544,8 +5525,8 @@ async function doParseToolCall({
|
|
5544
5525
|
availableTools: Object.keys(tools)
|
5545
5526
|
});
|
5546
5527
|
}
|
5547
|
-
const schema = (0,
|
5548
|
-
const parseResult = toolCall.args.trim() === "" ? await (0,
|
5528
|
+
const schema = (0, import_provider_utils17.asSchema)(tool2.parameters);
|
5529
|
+
const parseResult = toolCall.args.trim() === "" ? await (0, import_provider_utils17.safeValidateTypes)({ value: {}, schema }) : await (0, import_provider_utils17.safeParseJSON)({ text: toolCall.args, schema });
|
5549
5530
|
if (parseResult.success === false) {
|
5550
5531
|
throw new InvalidToolArgumentsError({
|
5551
5532
|
toolName,
|
@@ -5604,8 +5585,8 @@ var DefaultStepResult = class {
|
|
5604
5585
|
};
|
5605
5586
|
|
5606
5587
|
// core/generate-text/stop-condition.ts
|
5607
|
-
function
|
5608
|
-
return ({ steps }) => steps.length
|
5588
|
+
function stepCountIs(stepCount) {
|
5589
|
+
return ({ steps }) => steps.length === stepCount;
|
5609
5590
|
}
|
5610
5591
|
function hasToolCall(toolName) {
|
5611
5592
|
return ({ steps }) => {
|
@@ -5615,6 +5596,12 @@ function hasToolCall(toolName) {
|
|
5615
5596
|
)) != null ? _c : false;
|
5616
5597
|
};
|
5617
5598
|
}
|
5599
|
+
async function isStopConditionMet({
|
5600
|
+
stopConditions,
|
5601
|
+
steps
|
5602
|
+
}) {
|
5603
|
+
return (await Promise.all(stopConditions.map((condition) => condition({ steps })))).some((result) => result);
|
5604
|
+
}
|
5618
5605
|
|
5619
5606
|
// core/generate-text/to-response-messages.ts
|
5620
5607
|
function toResponseMessages({
|
@@ -5675,12 +5662,12 @@ function toResponseMessages({
|
|
5675
5662
|
}
|
5676
5663
|
|
5677
5664
|
// core/generate-text/generate-text.ts
|
5678
|
-
var originalGenerateId3 = (0,
|
5665
|
+
var originalGenerateId3 = (0, import_provider_utils18.createIdGenerator)({
|
5679
5666
|
prefix: "aitxt",
|
5680
5667
|
size: 24
|
5681
5668
|
});
|
5682
5669
|
async function generateText({
|
5683
|
-
model,
|
5670
|
+
model: modelArg,
|
5684
5671
|
tools,
|
5685
5672
|
toolChoice,
|
5686
5673
|
system,
|
@@ -5689,12 +5676,14 @@ async function generateText({
|
|
5689
5676
|
maxRetries: maxRetriesArg,
|
5690
5677
|
abortSignal,
|
5691
5678
|
headers,
|
5692
|
-
|
5679
|
+
stopWhen = stepCountIs(1),
|
5693
5680
|
experimental_output: output,
|
5694
5681
|
experimental_telemetry: telemetry,
|
5695
5682
|
providerOptions,
|
5696
|
-
experimental_activeTools
|
5697
|
-
|
5683
|
+
experimental_activeTools,
|
5684
|
+
activeTools = experimental_activeTools,
|
5685
|
+
experimental_prepareStep,
|
5686
|
+
prepareStep = experimental_prepareStep,
|
5698
5687
|
experimental_repairToolCall: repairToolCall,
|
5699
5688
|
_internal: {
|
5700
5689
|
generateId: generateId3 = originalGenerateId3,
|
@@ -5703,6 +5692,8 @@ async function generateText({
|
|
5703
5692
|
onStepFinish,
|
5704
5693
|
...settings
|
5705
5694
|
}) {
|
5695
|
+
const model = resolveLanguageModel(modelArg);
|
5696
|
+
const stopConditions = asArray(stopWhen);
|
5706
5697
|
const { maxRetries, retry } = prepareRetries({ maxRetries: maxRetriesArg });
|
5707
5698
|
const callSettings = prepareCallSettings(settings);
|
5708
5699
|
const baseTelemetryAttributes = getBaseTelemetryAttributes({
|
@@ -5717,237 +5708,243 @@ async function generateText({
|
|
5717
5708
|
messages
|
5718
5709
|
});
|
5719
5710
|
const tracer = getTracer(telemetry);
|
5720
|
-
|
5721
|
-
|
5722
|
-
|
5723
|
-
|
5724
|
-
|
5725
|
-
|
5726
|
-
|
5727
|
-
|
5728
|
-
|
5729
|
-
|
5730
|
-
|
5731
|
-
|
5732
|
-
|
5733
|
-
|
5734
|
-
|
5735
|
-
|
5736
|
-
|
5737
|
-
}
|
5738
|
-
}),
|
5739
|
-
tracer,
|
5740
|
-
fn: async (span) => {
|
5741
|
-
var _a17, _b, _c, _d;
|
5742
|
-
const callSettings2 = prepareCallSettings(settings);
|
5743
|
-
let currentModelResponse;
|
5744
|
-
let currentToolCalls = [];
|
5745
|
-
let currentToolResults = [];
|
5746
|
-
const responseMessages = [];
|
5747
|
-
const steps = [];
|
5748
|
-
do {
|
5749
|
-
const stepInputMessages = [
|
5750
|
-
...initialPrompt.messages,
|
5751
|
-
...responseMessages
|
5752
|
-
];
|
5753
|
-
const prepareStepResult = await (prepareStep == null ? void 0 : prepareStep({
|
5754
|
-
model,
|
5755
|
-
steps,
|
5756
|
-
stepNumber: steps.length
|
5757
|
-
}));
|
5758
|
-
const promptMessages = await convertToLanguageModelPrompt({
|
5759
|
-
prompt: {
|
5760
|
-
system: initialPrompt.system,
|
5761
|
-
messages: stepInputMessages
|
5762
|
-
},
|
5763
|
-
supportedUrls: await model.supportedUrls
|
5764
|
-
});
|
5765
|
-
const stepModel = (_a17 = prepareStepResult == null ? void 0 : prepareStepResult.model) != null ? _a17 : model;
|
5766
|
-
const { toolChoice: stepToolChoice, tools: stepTools } = prepareToolsAndToolChoice({
|
5767
|
-
tools,
|
5768
|
-
toolChoice: (_b = prepareStepResult == null ? void 0 : prepareStepResult.toolChoice) != null ? _b : toolChoice,
|
5769
|
-
activeTools: (_c = prepareStepResult == null ? void 0 : prepareStepResult.experimental_activeTools) != null ? _c : activeTools
|
5770
|
-
});
|
5771
|
-
currentModelResponse = await retry(
|
5772
|
-
() => {
|
5773
|
-
var _a18;
|
5774
|
-
return recordSpan({
|
5775
|
-
name: "ai.generateText.doGenerate",
|
5776
|
-
attributes: selectTelemetryAttributes({
|
5777
|
-
telemetry,
|
5778
|
-
attributes: {
|
5779
|
-
...assembleOperationName({
|
5780
|
-
operationId: "ai.generateText.doGenerate",
|
5781
|
-
telemetry
|
5782
|
-
}),
|
5783
|
-
...baseTelemetryAttributes,
|
5784
|
-
// model:
|
5785
|
-
"ai.model.provider": stepModel.provider,
|
5786
|
-
"ai.model.id": stepModel.modelId,
|
5787
|
-
// prompt:
|
5788
|
-
"ai.prompt.messages": {
|
5789
|
-
input: () => JSON.stringify(promptMessages)
|
5790
|
-
},
|
5791
|
-
"ai.prompt.tools": {
|
5792
|
-
// convert the language model level tools:
|
5793
|
-
input: () => stepTools == null ? void 0 : stepTools.map((tool2) => JSON.stringify(tool2))
|
5794
|
-
},
|
5795
|
-
"ai.prompt.toolChoice": {
|
5796
|
-
input: () => stepToolChoice != null ? JSON.stringify(stepToolChoice) : void 0
|
5797
|
-
},
|
5798
|
-
// standardized gen-ai llm span attributes:
|
5799
|
-
"gen_ai.system": stepModel.provider,
|
5800
|
-
"gen_ai.request.model": stepModel.modelId,
|
5801
|
-
"gen_ai.request.frequency_penalty": settings.frequencyPenalty,
|
5802
|
-
"gen_ai.request.max_tokens": settings.maxOutputTokens,
|
5803
|
-
"gen_ai.request.presence_penalty": settings.presencePenalty,
|
5804
|
-
"gen_ai.request.stop_sequences": settings.stopSequences,
|
5805
|
-
"gen_ai.request.temperature": (_a18 = settings.temperature) != null ? _a18 : void 0,
|
5806
|
-
"gen_ai.request.top_k": settings.topK,
|
5807
|
-
"gen_ai.request.top_p": settings.topP
|
5808
|
-
}
|
5809
|
-
}),
|
5810
|
-
tracer,
|
5811
|
-
fn: async (span2) => {
|
5812
|
-
var _a19, _b2, _c2, _d2, _e, _f, _g, _h;
|
5813
|
-
const result = await stepModel.doGenerate({
|
5814
|
-
...callSettings2,
|
5815
|
-
tools: stepTools,
|
5816
|
-
toolChoice: stepToolChoice,
|
5817
|
-
responseFormat: output == null ? void 0 : output.responseFormat,
|
5818
|
-
prompt: promptMessages,
|
5819
|
-
providerOptions,
|
5820
|
-
abortSignal,
|
5821
|
-
headers
|
5822
|
-
});
|
5823
|
-
const responseData = {
|
5824
|
-
id: (_b2 = (_a19 = result.response) == null ? void 0 : _a19.id) != null ? _b2 : generateId3(),
|
5825
|
-
timestamp: (_d2 = (_c2 = result.response) == null ? void 0 : _c2.timestamp) != null ? _d2 : currentDate(),
|
5826
|
-
modelId: (_f = (_e = result.response) == null ? void 0 : _e.modelId) != null ? _f : stepModel.modelId,
|
5827
|
-
headers: (_g = result.response) == null ? void 0 : _g.headers,
|
5828
|
-
body: (_h = result.response) == null ? void 0 : _h.body
|
5829
|
-
};
|
5830
|
-
span2.setAttributes(
|
5831
|
-
selectTelemetryAttributes({
|
5832
|
-
telemetry,
|
5833
|
-
attributes: {
|
5834
|
-
"ai.response.finishReason": result.finishReason,
|
5835
|
-
"ai.response.text": {
|
5836
|
-
output: () => extractContentText(result.content)
|
5837
|
-
},
|
5838
|
-
"ai.response.toolCalls": {
|
5839
|
-
output: () => {
|
5840
|
-
const toolCalls = asToolCalls(result.content);
|
5841
|
-
return toolCalls == null ? void 0 : JSON.stringify(toolCalls);
|
5842
|
-
}
|
5843
|
-
},
|
5844
|
-
"ai.response.id": responseData.id,
|
5845
|
-
"ai.response.model": responseData.modelId,
|
5846
|
-
"ai.response.timestamp": responseData.timestamp.toISOString(),
|
5847
|
-
// TODO rename telemetry attributes to inputTokens and outputTokens
|
5848
|
-
"ai.usage.promptTokens": result.usage.inputTokens,
|
5849
|
-
"ai.usage.completionTokens": result.usage.outputTokens,
|
5850
|
-
// standardized gen-ai llm span attributes:
|
5851
|
-
"gen_ai.response.finish_reasons": [result.finishReason],
|
5852
|
-
"gen_ai.response.id": responseData.id,
|
5853
|
-
"gen_ai.response.model": responseData.modelId,
|
5854
|
-
"gen_ai.usage.input_tokens": result.usage.inputTokens,
|
5855
|
-
"gen_ai.usage.output_tokens": result.usage.outputTokens
|
5856
|
-
}
|
5857
|
-
})
|
5858
|
-
);
|
5859
|
-
return { ...result, response: responseData };
|
5860
|
-
}
|
5861
|
-
});
|
5711
|
+
try {
|
5712
|
+
return await recordSpan({
|
5713
|
+
name: "ai.generateText",
|
5714
|
+
attributes: selectTelemetryAttributes({
|
5715
|
+
telemetry,
|
5716
|
+
attributes: {
|
5717
|
+
...assembleOperationName({
|
5718
|
+
operationId: "ai.generateText",
|
5719
|
+
telemetry
|
5720
|
+
}),
|
5721
|
+
...baseTelemetryAttributes,
|
5722
|
+
// model:
|
5723
|
+
"ai.model.provider": model.provider,
|
5724
|
+
"ai.model.id": model.modelId,
|
5725
|
+
// specific settings that only make sense on the outer level:
|
5726
|
+
"ai.prompt": {
|
5727
|
+
input: () => JSON.stringify({ system, prompt, messages })
|
5862
5728
|
}
|
5863
|
-
|
5864
|
-
|
5865
|
-
|
5866
|
-
|
5867
|
-
|
5868
|
-
|
5869
|
-
|
5870
|
-
|
5871
|
-
|
5872
|
-
|
5729
|
+
}
|
5730
|
+
}),
|
5731
|
+
tracer,
|
5732
|
+
fn: async (span) => {
|
5733
|
+
var _a17, _b, _c, _d, _e;
|
5734
|
+
const callSettings2 = prepareCallSettings(settings);
|
5735
|
+
let currentModelResponse;
|
5736
|
+
let currentToolCalls = [];
|
5737
|
+
let currentToolResults = [];
|
5738
|
+
const responseMessages = [];
|
5739
|
+
const steps = [];
|
5740
|
+
do {
|
5741
|
+
const stepInputMessages = [
|
5742
|
+
...initialPrompt.messages,
|
5743
|
+
...responseMessages
|
5744
|
+
];
|
5745
|
+
const prepareStepResult = await (prepareStep == null ? void 0 : prepareStep({
|
5746
|
+
model,
|
5747
|
+
steps,
|
5748
|
+
stepNumber: steps.length
|
5749
|
+
}));
|
5750
|
+
const promptMessages = await convertToLanguageModelPrompt({
|
5751
|
+
prompt: {
|
5752
|
+
system: (_a17 = prepareStepResult == null ? void 0 : prepareStepResult.system) != null ? _a17 : initialPrompt.system,
|
5873
5753
|
messages: stepInputMessages
|
5754
|
+
},
|
5755
|
+
supportedUrls: await model.supportedUrls
|
5756
|
+
});
|
5757
|
+
const stepModel = resolveLanguageModel(
|
5758
|
+
(_b = prepareStepResult == null ? void 0 : prepareStepResult.model) != null ? _b : model
|
5759
|
+
);
|
5760
|
+
const { toolChoice: stepToolChoice, tools: stepTools } = prepareToolsAndToolChoice({
|
5761
|
+
tools,
|
5762
|
+
toolChoice: (_c = prepareStepResult == null ? void 0 : prepareStepResult.toolChoice) != null ? _c : toolChoice,
|
5763
|
+
activeTools: (_d = prepareStepResult == null ? void 0 : prepareStepResult.activeTools) != null ? _d : activeTools
|
5764
|
+
});
|
5765
|
+
currentModelResponse = await retry(
|
5766
|
+
() => {
|
5767
|
+
var _a18;
|
5768
|
+
return recordSpan({
|
5769
|
+
name: "ai.generateText.doGenerate",
|
5770
|
+
attributes: selectTelemetryAttributes({
|
5771
|
+
telemetry,
|
5772
|
+
attributes: {
|
5773
|
+
...assembleOperationName({
|
5774
|
+
operationId: "ai.generateText.doGenerate",
|
5775
|
+
telemetry
|
5776
|
+
}),
|
5777
|
+
...baseTelemetryAttributes,
|
5778
|
+
// model:
|
5779
|
+
"ai.model.provider": stepModel.provider,
|
5780
|
+
"ai.model.id": stepModel.modelId,
|
5781
|
+
// prompt:
|
5782
|
+
"ai.prompt.messages": {
|
5783
|
+
input: () => stringifyForTelemetry(promptMessages)
|
5784
|
+
},
|
5785
|
+
"ai.prompt.tools": {
|
5786
|
+
// convert the language model level tools:
|
5787
|
+
input: () => stepTools == null ? void 0 : stepTools.map((tool2) => JSON.stringify(tool2))
|
5788
|
+
},
|
5789
|
+
"ai.prompt.toolChoice": {
|
5790
|
+
input: () => stepToolChoice != null ? JSON.stringify(stepToolChoice) : void 0
|
5791
|
+
},
|
5792
|
+
// standardized gen-ai llm span attributes:
|
5793
|
+
"gen_ai.system": stepModel.provider,
|
5794
|
+
"gen_ai.request.model": stepModel.modelId,
|
5795
|
+
"gen_ai.request.frequency_penalty": settings.frequencyPenalty,
|
5796
|
+
"gen_ai.request.max_tokens": settings.maxOutputTokens,
|
5797
|
+
"gen_ai.request.presence_penalty": settings.presencePenalty,
|
5798
|
+
"gen_ai.request.stop_sequences": settings.stopSequences,
|
5799
|
+
"gen_ai.request.temperature": (_a18 = settings.temperature) != null ? _a18 : void 0,
|
5800
|
+
"gen_ai.request.top_k": settings.topK,
|
5801
|
+
"gen_ai.request.top_p": settings.topP
|
5802
|
+
}
|
5803
|
+
}),
|
5804
|
+
tracer,
|
5805
|
+
fn: async (span2) => {
|
5806
|
+
var _a19, _b2, _c2, _d2, _e2, _f, _g, _h;
|
5807
|
+
const result = await stepModel.doGenerate({
|
5808
|
+
...callSettings2,
|
5809
|
+
tools: stepTools,
|
5810
|
+
toolChoice: stepToolChoice,
|
5811
|
+
responseFormat: output == null ? void 0 : output.responseFormat,
|
5812
|
+
prompt: promptMessages,
|
5813
|
+
providerOptions,
|
5814
|
+
abortSignal,
|
5815
|
+
headers
|
5816
|
+
});
|
5817
|
+
const responseData = {
|
5818
|
+
id: (_b2 = (_a19 = result.response) == null ? void 0 : _a19.id) != null ? _b2 : generateId3(),
|
5819
|
+
timestamp: (_d2 = (_c2 = result.response) == null ? void 0 : _c2.timestamp) != null ? _d2 : currentDate(),
|
5820
|
+
modelId: (_f = (_e2 = result.response) == null ? void 0 : _e2.modelId) != null ? _f : stepModel.modelId,
|
5821
|
+
headers: (_g = result.response) == null ? void 0 : _g.headers,
|
5822
|
+
body: (_h = result.response) == null ? void 0 : _h.body
|
5823
|
+
};
|
5824
|
+
span2.setAttributes(
|
5825
|
+
selectTelemetryAttributes({
|
5826
|
+
telemetry,
|
5827
|
+
attributes: {
|
5828
|
+
"ai.response.finishReason": result.finishReason,
|
5829
|
+
"ai.response.text": {
|
5830
|
+
output: () => extractContentText(result.content)
|
5831
|
+
},
|
5832
|
+
"ai.response.toolCalls": {
|
5833
|
+
output: () => {
|
5834
|
+
const toolCalls = asToolCalls(result.content);
|
5835
|
+
return toolCalls == null ? void 0 : JSON.stringify(toolCalls);
|
5836
|
+
}
|
5837
|
+
},
|
5838
|
+
"ai.response.id": responseData.id,
|
5839
|
+
"ai.response.model": responseData.modelId,
|
5840
|
+
"ai.response.timestamp": responseData.timestamp.toISOString(),
|
5841
|
+
// TODO rename telemetry attributes to inputTokens and outputTokens
|
5842
|
+
"ai.usage.promptTokens": result.usage.inputTokens,
|
5843
|
+
"ai.usage.completionTokens": result.usage.outputTokens,
|
5844
|
+
// standardized gen-ai llm span attributes:
|
5845
|
+
"gen_ai.response.finish_reasons": [result.finishReason],
|
5846
|
+
"gen_ai.response.id": responseData.id,
|
5847
|
+
"gen_ai.response.model": responseData.modelId,
|
5848
|
+
"gen_ai.usage.input_tokens": result.usage.inputTokens,
|
5849
|
+
"gen_ai.usage.output_tokens": result.usage.outputTokens
|
5850
|
+
}
|
5851
|
+
})
|
5852
|
+
);
|
5853
|
+
return { ...result, response: responseData };
|
5854
|
+
}
|
5855
|
+
});
|
5856
|
+
}
|
5857
|
+
);
|
5858
|
+
currentToolCalls = await Promise.all(
|
5859
|
+
currentModelResponse.content.filter(
|
5860
|
+
(part) => part.type === "tool-call"
|
5861
|
+
).map(
|
5862
|
+
(toolCall) => parseToolCall({
|
5863
|
+
toolCall,
|
5864
|
+
tools,
|
5865
|
+
repairToolCall,
|
5866
|
+
system,
|
5867
|
+
messages: stepInputMessages
|
5868
|
+
})
|
5869
|
+
)
|
5870
|
+
);
|
5871
|
+
currentToolResults = tools == null ? [] : await executeTools({
|
5872
|
+
toolCalls: currentToolCalls,
|
5873
|
+
tools,
|
5874
|
+
tracer,
|
5875
|
+
telemetry,
|
5876
|
+
messages: stepInputMessages,
|
5877
|
+
abortSignal
|
5878
|
+
});
|
5879
|
+
const stepContent = asContent({
|
5880
|
+
content: currentModelResponse.content,
|
5881
|
+
toolCalls: currentToolCalls,
|
5882
|
+
toolResults: currentToolResults
|
5883
|
+
});
|
5884
|
+
responseMessages.push(
|
5885
|
+
...toResponseMessages({
|
5886
|
+
content: stepContent,
|
5887
|
+
tools: tools != null ? tools : {}
|
5874
5888
|
})
|
5875
|
-
)
|
5876
|
-
|
5877
|
-
currentToolResults = tools == null ? [] : await executeTools({
|
5878
|
-
toolCalls: currentToolCalls,
|
5879
|
-
tools,
|
5880
|
-
tracer,
|
5881
|
-
telemetry,
|
5882
|
-
messages: stepInputMessages,
|
5883
|
-
abortSignal
|
5884
|
-
});
|
5885
|
-
const stepContent = asContent({
|
5886
|
-
content: currentModelResponse.content,
|
5887
|
-
toolCalls: currentToolCalls,
|
5888
|
-
toolResults: currentToolResults
|
5889
|
-
});
|
5890
|
-
responseMessages.push(
|
5891
|
-
...toResponseMessages({
|
5889
|
+
);
|
5890
|
+
const currentStepResult = new DefaultStepResult({
|
5892
5891
|
content: stepContent,
|
5893
|
-
|
5892
|
+
finishReason: currentModelResponse.finishReason,
|
5893
|
+
usage: currentModelResponse.usage,
|
5894
|
+
warnings: currentModelResponse.warnings,
|
5895
|
+
providerMetadata: currentModelResponse.providerMetadata,
|
5896
|
+
request: (_e = currentModelResponse.request) != null ? _e : {},
|
5897
|
+
response: {
|
5898
|
+
...currentModelResponse.response,
|
5899
|
+
// deep clone msgs to avoid mutating past messages in multi-step:
|
5900
|
+
messages: structuredClone(responseMessages)
|
5901
|
+
}
|
5902
|
+
});
|
5903
|
+
steps.push(currentStepResult);
|
5904
|
+
await (onStepFinish == null ? void 0 : onStepFinish(currentStepResult));
|
5905
|
+
} while (
|
5906
|
+
// there are tool calls:
|
5907
|
+
currentToolCalls.length > 0 && // all current tool calls have results:
|
5908
|
+
currentToolResults.length === currentToolCalls.length && // continue until a stop condition is met:
|
5909
|
+
!await isStopConditionMet({ stopConditions, steps })
|
5910
|
+
);
|
5911
|
+
span.setAttributes(
|
5912
|
+
selectTelemetryAttributes({
|
5913
|
+
telemetry,
|
5914
|
+
attributes: {
|
5915
|
+
"ai.response.finishReason": currentModelResponse.finishReason,
|
5916
|
+
"ai.response.text": {
|
5917
|
+
output: () => extractContentText(currentModelResponse.content)
|
5918
|
+
},
|
5919
|
+
"ai.response.toolCalls": {
|
5920
|
+
output: () => {
|
5921
|
+
const toolCalls = asToolCalls(currentModelResponse.content);
|
5922
|
+
return toolCalls == null ? void 0 : JSON.stringify(toolCalls);
|
5923
|
+
}
|
5924
|
+
},
|
5925
|
+
// TODO rename telemetry attributes to inputTokens and outputTokens
|
5926
|
+
"ai.usage.promptTokens": currentModelResponse.usage.inputTokens,
|
5927
|
+
"ai.usage.completionTokens": currentModelResponse.usage.outputTokens
|
5928
|
+
}
|
5894
5929
|
})
|
5895
5930
|
);
|
5896
|
-
const
|
5897
|
-
|
5898
|
-
|
5899
|
-
|
5900
|
-
|
5901
|
-
|
5902
|
-
|
5903
|
-
|
5904
|
-
|
5905
|
-
|
5906
|
-
|
5907
|
-
}
|
5931
|
+
const lastStep = steps[steps.length - 1];
|
5932
|
+
return new DefaultGenerateTextResult({
|
5933
|
+
steps,
|
5934
|
+
resolvedOutput: await (output == null ? void 0 : output.parseOutput(
|
5935
|
+
{ text: lastStep.text },
|
5936
|
+
{
|
5937
|
+
response: lastStep.response,
|
5938
|
+
usage: lastStep.usage,
|
5939
|
+
finishReason: lastStep.finishReason
|
5940
|
+
}
|
5941
|
+
))
|
5908
5942
|
});
|
5909
|
-
|
5910
|
-
|
5911
|
-
|
5912
|
-
|
5913
|
-
|
5914
|
-
currentToolResults.length === currentToolCalls.length && // continue until the stop condition is met:
|
5915
|
-
!await continueUntil({ steps })
|
5916
|
-
);
|
5917
|
-
span.setAttributes(
|
5918
|
-
selectTelemetryAttributes({
|
5919
|
-
telemetry,
|
5920
|
-
attributes: {
|
5921
|
-
"ai.response.finishReason": currentModelResponse.finishReason,
|
5922
|
-
"ai.response.text": {
|
5923
|
-
output: () => extractContentText(currentModelResponse.content)
|
5924
|
-
},
|
5925
|
-
"ai.response.toolCalls": {
|
5926
|
-
output: () => {
|
5927
|
-
const toolCalls = asToolCalls(currentModelResponse.content);
|
5928
|
-
return toolCalls == null ? void 0 : JSON.stringify(toolCalls);
|
5929
|
-
}
|
5930
|
-
},
|
5931
|
-
// TODO rename telemetry attributes to inputTokens and outputTokens
|
5932
|
-
"ai.usage.promptTokens": currentModelResponse.usage.inputTokens,
|
5933
|
-
"ai.usage.completionTokens": currentModelResponse.usage.outputTokens
|
5934
|
-
}
|
5935
|
-
})
|
5936
|
-
);
|
5937
|
-
const lastStep = steps[steps.length - 1];
|
5938
|
-
return new DefaultGenerateTextResult({
|
5939
|
-
steps,
|
5940
|
-
resolvedOutput: await (output == null ? void 0 : output.parseOutput(
|
5941
|
-
{ text: lastStep.text },
|
5942
|
-
{
|
5943
|
-
response: lastStep.response,
|
5944
|
-
usage: lastStep.usage,
|
5945
|
-
finishReason: lastStep.finishReason
|
5946
|
-
}
|
5947
|
-
))
|
5948
|
-
});
|
5949
|
-
}
|
5950
|
-
});
|
5943
|
+
}
|
5944
|
+
});
|
5945
|
+
} catch (error) {
|
5946
|
+
throw wrapGatewayError(error);
|
5947
|
+
}
|
5951
5948
|
}
|
5952
5949
|
async function executeTools({
|
5953
5950
|
toolCalls,
|
@@ -6116,7 +6113,7 @@ __export(output_exports, {
|
|
6116
6113
|
object: () => object,
|
6117
6114
|
text: () => text
|
6118
6115
|
});
|
6119
|
-
var
|
6116
|
+
var import_provider_utils19 = require("@ai-sdk/provider-utils");
|
6120
6117
|
var text = () => ({
|
6121
6118
|
type: "text",
|
6122
6119
|
responseFormat: { type: "text" },
|
@@ -6130,7 +6127,7 @@ var text = () => ({
|
|
6130
6127
|
var object = ({
|
6131
6128
|
schema: inputSchema
|
6132
6129
|
}) => {
|
6133
|
-
const schema = (0,
|
6130
|
+
const schema = (0, import_provider_utils19.asSchema)(inputSchema);
|
6134
6131
|
return {
|
6135
6132
|
type: "object",
|
6136
6133
|
responseFormat: {
|
@@ -6156,7 +6153,7 @@ var object = ({
|
|
6156
6153
|
}
|
6157
6154
|
},
|
6158
6155
|
async parseOutput({ text: text2 }, context) {
|
6159
|
-
const parseResult = await (0,
|
6156
|
+
const parseResult = await (0, import_provider_utils19.safeParseJSON)({ text: text2 });
|
6160
6157
|
if (!parseResult.success) {
|
6161
6158
|
throw new NoObjectGeneratedError({
|
6162
6159
|
message: "No object generated: could not parse the response.",
|
@@ -6167,7 +6164,7 @@ var object = ({
|
|
6167
6164
|
finishReason: context.finishReason
|
6168
6165
|
});
|
6169
6166
|
}
|
6170
|
-
const validationResult = await (0,
|
6167
|
+
const validationResult = await (0, import_provider_utils19.safeValidateTypes)({
|
6171
6168
|
value: parseResult.value,
|
6172
6169
|
schema
|
6173
6170
|
});
|
@@ -6187,8 +6184,8 @@ var object = ({
|
|
6187
6184
|
};
|
6188
6185
|
|
6189
6186
|
// core/generate-text/smooth-stream.ts
|
6190
|
-
var
|
6191
|
-
var
|
6187
|
+
var import_provider_utils20 = require("@ai-sdk/provider-utils");
|
6188
|
+
var import_provider24 = require("@ai-sdk/provider");
|
6192
6189
|
var CHUNKING_REGEXPS = {
|
6193
6190
|
word: /\S+\s+/m,
|
6194
6191
|
line: /\n+/m
|
@@ -6196,7 +6193,7 @@ var CHUNKING_REGEXPS = {
|
|
6196
6193
|
function smoothStream({
|
6197
6194
|
delayInMs = 10,
|
6198
6195
|
chunking = "word",
|
6199
|
-
_internal: { delay: delay2 =
|
6196
|
+
_internal: { delay: delay2 = import_provider_utils20.delay } = {}
|
6200
6197
|
} = {}) {
|
6201
6198
|
let detectChunk;
|
6202
6199
|
if (typeof chunking === "function") {
|
@@ -6218,7 +6215,7 @@ function smoothStream({
|
|
6218
6215
|
} else {
|
6219
6216
|
const chunkingRegex = typeof chunking === "string" ? CHUNKING_REGEXPS[chunking] : chunking;
|
6220
6217
|
if (chunkingRegex == null) {
|
6221
|
-
throw new
|
6218
|
+
throw new import_provider24.InvalidArgumentError({
|
6222
6219
|
argument: "chunking",
|
6223
6220
|
message: `Chunking must be "word" or "line" or a RegExp. Received: ${chunking}`
|
6224
6221
|
});
|
@@ -6256,15 +6253,10 @@ function smoothStream({
|
|
6256
6253
|
}
|
6257
6254
|
|
6258
6255
|
// core/generate-text/stream-text.ts
|
6259
|
-
var
|
6260
|
-
|
6261
|
-
// src/util/as-array.ts
|
6262
|
-
function asArray(value) {
|
6263
|
-
return value === void 0 ? [] : Array.isArray(value) ? value : [value];
|
6264
|
-
}
|
6256
|
+
var import_provider_utils22 = require("@ai-sdk/provider-utils");
|
6265
6257
|
|
6266
6258
|
// core/generate-text/run-tools-transformation.ts
|
6267
|
-
var
|
6259
|
+
var import_provider_utils21 = require("@ai-sdk/provider-utils");
|
6268
6260
|
function runToolsTransformation({
|
6269
6261
|
tools,
|
6270
6262
|
generatorStream,
|
@@ -6350,7 +6342,7 @@ function runToolsTransformation({
|
|
6350
6342
|
controller.enqueue(toolCall);
|
6351
6343
|
const tool2 = tools[toolCall.toolName];
|
6352
6344
|
if (tool2.execute != null) {
|
6353
|
-
const toolExecutionId = (0,
|
6345
|
+
const toolExecutionId = (0, import_provider_utils21.generateId)();
|
6354
6346
|
outstandingToolResults.add(toolExecutionId);
|
6355
6347
|
recordSpan({
|
6356
6348
|
name: "ai.toolCall",
|
@@ -6459,7 +6451,7 @@ function runToolsTransformation({
|
|
6459
6451
|
}
|
6460
6452
|
|
6461
6453
|
// core/generate-text/stream-text.ts
|
6462
|
-
var originalGenerateId4 = (0,
|
6454
|
+
var originalGenerateId4 = (0, import_provider_utils22.createIdGenerator)({
|
6463
6455
|
prefix: "aitxt",
|
6464
6456
|
size: 24
|
6465
6457
|
});
|
@@ -6473,17 +6465,21 @@ function streamText({
|
|
6473
6465
|
maxRetries,
|
6474
6466
|
abortSignal,
|
6475
6467
|
headers,
|
6476
|
-
|
6468
|
+
stopWhen = stepCountIs(1),
|
6477
6469
|
experimental_output: output,
|
6478
6470
|
experimental_telemetry: telemetry,
|
6471
|
+
prepareStep,
|
6479
6472
|
providerOptions,
|
6480
6473
|
experimental_toolCallStreaming = false,
|
6481
6474
|
toolCallStreaming = experimental_toolCallStreaming,
|
6482
|
-
experimental_activeTools
|
6475
|
+
experimental_activeTools,
|
6476
|
+
activeTools = experimental_activeTools,
|
6483
6477
|
experimental_repairToolCall: repairToolCall,
|
6484
6478
|
experimental_transform: transform,
|
6485
6479
|
onChunk,
|
6486
|
-
onError
|
6480
|
+
onError = ({ error }) => {
|
6481
|
+
console.error(error);
|
6482
|
+
},
|
6487
6483
|
onFinish,
|
6488
6484
|
onStepFinish,
|
6489
6485
|
_internal: {
|
@@ -6494,7 +6490,7 @@ function streamText({
|
|
6494
6490
|
...settings
|
6495
6491
|
}) {
|
6496
6492
|
return new DefaultStreamTextResult({
|
6497
|
-
model,
|
6493
|
+
model: resolveLanguageModel(model),
|
6498
6494
|
telemetry,
|
6499
6495
|
headers,
|
6500
6496
|
settings,
|
@@ -6509,9 +6505,10 @@ function streamText({
|
|
6509
6505
|
transforms: asArray(transform),
|
6510
6506
|
activeTools,
|
6511
6507
|
repairToolCall,
|
6512
|
-
|
6508
|
+
stopConditions: asArray(stopWhen),
|
6513
6509
|
output,
|
6514
6510
|
providerOptions,
|
6511
|
+
prepareStep,
|
6515
6512
|
onChunk,
|
6516
6513
|
onError,
|
6517
6514
|
onFinish,
|
@@ -6586,9 +6583,10 @@ var DefaultStreamTextResult = class {
|
|
6586
6583
|
transforms,
|
6587
6584
|
activeTools,
|
6588
6585
|
repairToolCall,
|
6589
|
-
|
6586
|
+
stopConditions,
|
6590
6587
|
output,
|
6591
6588
|
providerOptions,
|
6589
|
+
prepareStep,
|
6592
6590
|
now: now2,
|
6593
6591
|
currentDate,
|
6594
6592
|
generateId: generateId3,
|
@@ -6597,18 +6595,12 @@ var DefaultStreamTextResult = class {
|
|
6597
6595
|
onFinish,
|
6598
6596
|
onStepFinish
|
6599
6597
|
}) {
|
6600
|
-
this.
|
6601
|
-
this.
|
6602
|
-
this.
|
6603
|
-
if (maxSteps2 < 1) {
|
6604
|
-
throw new InvalidArgumentError({
|
6605
|
-
parameter: "maxSteps",
|
6606
|
-
value: maxSteps2,
|
6607
|
-
message: "maxSteps must be at least 1"
|
6608
|
-
});
|
6609
|
-
}
|
6598
|
+
this._totalUsage = new DelayedPromise();
|
6599
|
+
this._finishReason = new DelayedPromise();
|
6600
|
+
this._steps = new DelayedPromise();
|
6610
6601
|
this.output = output;
|
6611
6602
|
this.generateId = generateId3;
|
6603
|
+
let stepFinish;
|
6612
6604
|
let activeReasoningPart = void 0;
|
6613
6605
|
let recordedContent = [];
|
6614
6606
|
const recordedResponseMessages = [];
|
@@ -6626,7 +6618,7 @@ var DefaultStreamTextResult = class {
|
|
6626
6618
|
await (onChunk == null ? void 0 : onChunk({ chunk: part }));
|
6627
6619
|
}
|
6628
6620
|
if (part.type === "error") {
|
6629
|
-
await
|
6621
|
+
await onError({ error: wrapGatewayError(part.error) });
|
6630
6622
|
}
|
6631
6623
|
if (part.type === "text") {
|
6632
6624
|
const latestContent = recordedContent[recordedContent.length - 1];
|
@@ -6690,6 +6682,7 @@ var DefaultStreamTextResult = class {
|
|
6690
6682
|
recordedContent = [];
|
6691
6683
|
activeReasoningPart = void 0;
|
6692
6684
|
recordedResponseMessages.push(...stepMessages);
|
6685
|
+
stepFinish.resolve();
|
6693
6686
|
}
|
6694
6687
|
if (part.type === "finish") {
|
6695
6688
|
recordedTotalUsage = part.totalUsage;
|
@@ -6707,9 +6700,9 @@ var DefaultStreamTextResult = class {
|
|
6707
6700
|
outputTokens: void 0,
|
6708
6701
|
totalTokens: void 0
|
6709
6702
|
};
|
6710
|
-
self.
|
6711
|
-
self.
|
6712
|
-
self.
|
6703
|
+
self._finishReason.resolve(finishReason);
|
6704
|
+
self._totalUsage.resolve(totalUsage);
|
6705
|
+
self._steps.resolve(recordedSteps);
|
6713
6706
|
const finalStep = recordedSteps[recordedSteps.length - 1];
|
6714
6707
|
await (onFinish == null ? void 0 : onFinish({
|
6715
6708
|
finishReason,
|
@@ -6800,8 +6793,7 @@ var DefaultStreamTextResult = class {
|
|
6800
6793
|
// specific settings that only make sense on the outer level:
|
6801
6794
|
"ai.prompt": {
|
6802
6795
|
input: () => JSON.stringify({ system, prompt, messages })
|
6803
|
-
}
|
6804
|
-
"ai.settings.maxSteps": maxSteps2
|
6796
|
+
}
|
6805
6797
|
}
|
6806
6798
|
}),
|
6807
6799
|
tracer,
|
@@ -6813,6 +6805,8 @@ var DefaultStreamTextResult = class {
|
|
6813
6805
|
responseMessages,
|
6814
6806
|
usage
|
6815
6807
|
}) {
|
6808
|
+
var _a17, _b, _c, _d;
|
6809
|
+
stepFinish = new DelayedPromise();
|
6816
6810
|
const initialPrompt = await standardizePrompt({
|
6817
6811
|
system,
|
6818
6812
|
prompt,
|
@@ -6822,16 +6816,26 @@ var DefaultStreamTextResult = class {
|
|
6822
6816
|
...initialPrompt.messages,
|
6823
6817
|
...responseMessages
|
6824
6818
|
];
|
6819
|
+
const prepareStepResult = await (prepareStep == null ? void 0 : prepareStep({
|
6820
|
+
model,
|
6821
|
+
steps: recordedSteps,
|
6822
|
+
stepNumber: recordedSteps.length
|
6823
|
+
}));
|
6825
6824
|
const promptMessages = await convertToLanguageModelPrompt({
|
6826
6825
|
prompt: {
|
6827
|
-
system: initialPrompt.system,
|
6826
|
+
system: (_a17 = prepareStepResult == null ? void 0 : prepareStepResult.system) != null ? _a17 : initialPrompt.system,
|
6828
6827
|
messages: stepInputMessages
|
6829
6828
|
},
|
6830
6829
|
supportedUrls: await model.supportedUrls
|
6831
6830
|
});
|
6832
|
-
const
|
6833
|
-
|
6834
|
-
|
6831
|
+
const stepModel = resolveLanguageModel(
|
6832
|
+
(_b = prepareStepResult == null ? void 0 : prepareStepResult.model) != null ? _b : model
|
6833
|
+
);
|
6834
|
+
const { toolChoice: stepToolChoice, tools: stepTools } = prepareToolsAndToolChoice({
|
6835
|
+
tools,
|
6836
|
+
toolChoice: (_c = prepareStepResult == null ? void 0 : prepareStepResult.toolChoice) != null ? _c : toolChoice,
|
6837
|
+
activeTools: (_d = prepareStepResult == null ? void 0 : prepareStepResult.activeTools) != null ? _d : activeTools
|
6838
|
+
});
|
6835
6839
|
const {
|
6836
6840
|
result: { stream: stream2, response, request },
|
6837
6841
|
doStreamSpan,
|
@@ -6847,24 +6851,23 @@ var DefaultStreamTextResult = class {
|
|
6847
6851
|
telemetry
|
6848
6852
|
}),
|
6849
6853
|
...baseTelemetryAttributes,
|
6854
|
+
// model:
|
6855
|
+
"ai.model.provider": stepModel.provider,
|
6856
|
+
"ai.model.id": stepModel.modelId,
|
6857
|
+
// prompt:
|
6850
6858
|
"ai.prompt.messages": {
|
6851
|
-
input: () =>
|
6859
|
+
input: () => stringifyForTelemetry(promptMessages)
|
6852
6860
|
},
|
6853
6861
|
"ai.prompt.tools": {
|
6854
6862
|
// convert the language model level tools:
|
6855
|
-
input: () =>
|
6856
|
-
var _a17;
|
6857
|
-
return (_a17 = toolsAndToolChoice.tools) == null ? void 0 : _a17.map(
|
6858
|
-
(tool2) => JSON.stringify(tool2)
|
6859
|
-
);
|
6860
|
-
}
|
6863
|
+
input: () => stepTools == null ? void 0 : stepTools.map((tool2) => JSON.stringify(tool2))
|
6861
6864
|
},
|
6862
6865
|
"ai.prompt.toolChoice": {
|
6863
|
-
input: () =>
|
6866
|
+
input: () => stepToolChoice != null ? JSON.stringify(stepToolChoice) : void 0
|
6864
6867
|
},
|
6865
6868
|
// standardized gen-ai llm span attributes:
|
6866
|
-
"gen_ai.system":
|
6867
|
-
"gen_ai.request.model":
|
6869
|
+
"gen_ai.system": stepModel.provider,
|
6870
|
+
"gen_ai.request.model": stepModel.modelId,
|
6868
6871
|
"gen_ai.request.frequency_penalty": callSettings.frequencyPenalty,
|
6869
6872
|
"gen_ai.request.max_tokens": callSettings.maxOutputTokens,
|
6870
6873
|
"gen_ai.request.presence_penalty": callSettings.presencePenalty,
|
@@ -6881,9 +6884,10 @@ var DefaultStreamTextResult = class {
|
|
6881
6884
|
startTimestampMs: now2(),
|
6882
6885
|
// get before the call
|
6883
6886
|
doStreamSpan: doStreamSpan2,
|
6884
|
-
result: await
|
6887
|
+
result: await stepModel.doStream({
|
6885
6888
|
...callSettings,
|
6886
|
-
|
6889
|
+
tools: stepTools,
|
6890
|
+
toolChoice: stepToolChoice,
|
6887
6891
|
responseFormat: output == null ? void 0 : output.responseFormat,
|
6888
6892
|
prompt: promptMessages,
|
6889
6893
|
providerOptions,
|
@@ -6894,7 +6898,7 @@ var DefaultStreamTextResult = class {
|
|
6894
6898
|
}
|
6895
6899
|
})
|
6896
6900
|
);
|
6897
|
-
const
|
6901
|
+
const streamWithToolResults = runToolsTransformation({
|
6898
6902
|
tools,
|
6899
6903
|
generatorStream: stream2,
|
6900
6904
|
toolCallStreaming,
|
@@ -6933,10 +6937,10 @@ var DefaultStreamTextResult = class {
|
|
6933
6937
|
stepText += chunk.text;
|
6934
6938
|
}
|
6935
6939
|
self.addStream(
|
6936
|
-
|
6940
|
+
streamWithToolResults.pipeThrough(
|
6937
6941
|
new TransformStream({
|
6938
6942
|
async transform(chunk, controller) {
|
6939
|
-
var
|
6943
|
+
var _a18, _b2, _c2, _d2;
|
6940
6944
|
if (chunk.type === "stream-start") {
|
6941
6945
|
warnings = chunk.warnings;
|
6942
6946
|
return;
|
@@ -6999,9 +7003,9 @@ var DefaultStreamTextResult = class {
|
|
6999
7003
|
}
|
7000
7004
|
case "response-metadata": {
|
7001
7005
|
stepResponse = {
|
7002
|
-
id: (
|
7003
|
-
timestamp: (
|
7004
|
-
modelId: (
|
7006
|
+
id: (_a18 = chunk.id) != null ? _a18 : stepResponse.id,
|
7007
|
+
timestamp: (_b2 = chunk.timestamp) != null ? _b2 : stepResponse.timestamp,
|
7008
|
+
modelId: (_c2 = chunk.modelId) != null ? _c2 : stepResponse.modelId
|
7005
7009
|
};
|
7006
7010
|
break;
|
7007
7011
|
}
|
@@ -7013,7 +7017,7 @@ var DefaultStreamTextResult = class {
|
|
7013
7017
|
doStreamSpan.addEvent("ai.stream.finish");
|
7014
7018
|
doStreamSpan.setAttributes({
|
7015
7019
|
"ai.response.msToFinish": msToFinish,
|
7016
|
-
"ai.response.avgOutputTokensPerSecond": 1e3 * ((
|
7020
|
+
"ai.response.avgOutputTokensPerSecond": 1e3 * ((_d2 = stepUsage.outputTokens) != null ? _d2 : 0) / msToFinish
|
7017
7021
|
});
|
7018
7022
|
break;
|
7019
7023
|
}
|
@@ -7088,9 +7092,13 @@ var DefaultStreamTextResult = class {
|
|
7088
7092
|
}
|
7089
7093
|
});
|
7090
7094
|
const combinedUsage = addLanguageModelUsage(usage, stepUsage);
|
7091
|
-
|
7092
|
-
stepToolCalls.length > 0 && // all current tool calls have results:
|
7093
|
-
stepToolResults.length === stepToolCalls.length
|
7095
|
+
await stepFinish.promise;
|
7096
|
+
if (stepToolCalls.length > 0 && // all current tool calls have results:
|
7097
|
+
stepToolResults.length === stepToolCalls.length && // continue until a stop condition is met:
|
7098
|
+
!await isStopConditionMet({
|
7099
|
+
stopConditions,
|
7100
|
+
steps: recordedSteps
|
7101
|
+
})) {
|
7094
7102
|
responseMessages.push(
|
7095
7103
|
...toResponseMessages({
|
7096
7104
|
content: stepContent,
|
@@ -7138,7 +7146,7 @@ var DefaultStreamTextResult = class {
|
|
7138
7146
|
});
|
7139
7147
|
}
|
7140
7148
|
get steps() {
|
7141
|
-
return this.
|
7149
|
+
return this._steps.promise;
|
7142
7150
|
}
|
7143
7151
|
get finalStep() {
|
7144
7152
|
return this.steps.then((steps) => steps[steps.length - 1]);
|
@@ -7183,10 +7191,10 @@ var DefaultStreamTextResult = class {
|
|
7183
7191
|
return this.finalStep.then((step) => step.response);
|
7184
7192
|
}
|
7185
7193
|
get totalUsage() {
|
7186
|
-
return this.
|
7194
|
+
return this._totalUsage.promise;
|
7187
7195
|
}
|
7188
7196
|
get finishReason() {
|
7189
|
-
return this.
|
7197
|
+
return this._finishReason.promise;
|
7190
7198
|
}
|
7191
7199
|
/**
|
7192
7200
|
Split out a new stream from the original stream.
|
@@ -7259,8 +7267,8 @@ var DefaultStreamTextResult = class {
|
|
7259
7267
|
messageMetadata,
|
7260
7268
|
sendReasoning = false,
|
7261
7269
|
sendSources = false,
|
7262
|
-
|
7263
|
-
|
7270
|
+
sendStart = true,
|
7271
|
+
sendFinish = true,
|
7264
7272
|
onError = () => "An error occurred."
|
7265
7273
|
// mask error messages for safety by default
|
7266
7274
|
} = {}) {
|
@@ -7306,9 +7314,8 @@ var DefaultStreamTextResult = class {
|
|
7306
7314
|
case "source": {
|
7307
7315
|
if (sendSources) {
|
7308
7316
|
controller.enqueue({
|
7309
|
-
type: "source",
|
7310
|
-
|
7311
|
-
id: part.id,
|
7317
|
+
type: "source-url",
|
7318
|
+
sourceId: part.id,
|
7312
7319
|
url: part.url,
|
7313
7320
|
title: part.title,
|
7314
7321
|
providerMetadata: part.providerMetadata
|
@@ -7373,7 +7380,7 @@ var DefaultStreamTextResult = class {
|
|
7373
7380
|
break;
|
7374
7381
|
}
|
7375
7382
|
case "start": {
|
7376
|
-
if (
|
7383
|
+
if (sendStart) {
|
7377
7384
|
const metadata = messageMetadata == null ? void 0 : messageMetadata({ part });
|
7378
7385
|
controller.enqueue({
|
7379
7386
|
type: "start",
|
@@ -7384,7 +7391,7 @@ var DefaultStreamTextResult = class {
|
|
7384
7391
|
break;
|
7385
7392
|
}
|
7386
7393
|
case "finish": {
|
7387
|
-
if (
|
7394
|
+
if (sendFinish) {
|
7388
7395
|
const metadata = messageMetadata == null ? void 0 : messageMetadata({ part });
|
7389
7396
|
controller.enqueue({
|
7390
7397
|
type: "finish",
|
@@ -7401,38 +7408,12 @@ var DefaultStreamTextResult = class {
|
|
7401
7408
|
}
|
7402
7409
|
})
|
7403
7410
|
);
|
7404
|
-
|
7405
|
-
return baseStream;
|
7406
|
-
}
|
7407
|
-
const state = createStreamingUIMessageState({
|
7408
|
-
lastMessage,
|
7409
|
-
newMessageId: messageId != null ? messageId : this.generateId()
|
7410
|
-
});
|
7411
|
-
const runUpdateMessageJob = async (job) => {
|
7412
|
-
await job({ state, write: () => {
|
7413
|
-
} });
|
7414
|
-
};
|
7415
|
-
return processUIMessageStream({
|
7411
|
+
return handleUIMessageStreamFinish({
|
7416
7412
|
stream: baseStream,
|
7417
|
-
|
7418
|
-
|
7419
|
-
|
7420
|
-
|
7421
|
-
controller.enqueue(chunk);
|
7422
|
-
},
|
7423
|
-
flush() {
|
7424
|
-
const isContinuation2 = state.message.id === (lastMessage == null ? void 0 : lastMessage.id);
|
7425
|
-
onFinish({
|
7426
|
-
isContinuation: isContinuation2,
|
7427
|
-
responseMessage: state.message,
|
7428
|
-
messages: [
|
7429
|
-
...isContinuation2 ? originalMessages.slice(0, -1) : originalMessages,
|
7430
|
-
state.message
|
7431
|
-
]
|
7432
|
-
});
|
7433
|
-
}
|
7434
|
-
})
|
7435
|
-
);
|
7413
|
+
newMessageId: messageId != null ? messageId : this.generateId(),
|
7414
|
+
originalMessages,
|
7415
|
+
onFinish
|
7416
|
+
});
|
7436
7417
|
}
|
7437
7418
|
pipeUIMessageStreamToResponse(response, {
|
7438
7419
|
newMessageId,
|
@@ -7441,8 +7422,8 @@ var DefaultStreamTextResult = class {
|
|
7441
7422
|
messageMetadata,
|
7442
7423
|
sendReasoning,
|
7443
7424
|
sendSources,
|
7444
|
-
|
7445
|
-
|
7425
|
+
sendFinish,
|
7426
|
+
sendStart,
|
7446
7427
|
onError,
|
7447
7428
|
...init
|
7448
7429
|
} = {}) {
|
@@ -7455,8 +7436,8 @@ var DefaultStreamTextResult = class {
|
|
7455
7436
|
messageMetadata,
|
7456
7437
|
sendReasoning,
|
7457
7438
|
sendSources,
|
7458
|
-
|
7459
|
-
|
7439
|
+
sendFinish,
|
7440
|
+
sendStart,
|
7460
7441
|
onError
|
7461
7442
|
}),
|
7462
7443
|
...init
|
@@ -7476,8 +7457,8 @@ var DefaultStreamTextResult = class {
|
|
7476
7457
|
messageMetadata,
|
7477
7458
|
sendReasoning,
|
7478
7459
|
sendSources,
|
7479
|
-
|
7480
|
-
|
7460
|
+
sendFinish,
|
7461
|
+
sendStart,
|
7481
7462
|
onError,
|
7482
7463
|
...init
|
7483
7464
|
} = {}) {
|
@@ -7489,8 +7470,8 @@ var DefaultStreamTextResult = class {
|
|
7489
7470
|
messageMetadata,
|
7490
7471
|
sendReasoning,
|
7491
7472
|
sendSources,
|
7492
|
-
|
7493
|
-
|
7473
|
+
sendFinish,
|
7474
|
+
sendStart,
|
7494
7475
|
onError
|
7495
7476
|
}),
|
7496
7477
|
...init
|
@@ -7733,7 +7714,7 @@ var doWrap = ({
|
|
7733
7714
|
};
|
7734
7715
|
|
7735
7716
|
// core/registry/custom-provider.ts
|
7736
|
-
var
|
7717
|
+
var import_provider25 = require("@ai-sdk/provider");
|
7737
7718
|
function customProvider({
|
7738
7719
|
languageModels,
|
7739
7720
|
textEmbeddingModels,
|
@@ -7748,7 +7729,7 @@ function customProvider({
|
|
7748
7729
|
if (fallbackProvider) {
|
7749
7730
|
return fallbackProvider.languageModel(modelId);
|
7750
7731
|
}
|
7751
|
-
throw new
|
7732
|
+
throw new import_provider25.NoSuchModelError({ modelId, modelType: "languageModel" });
|
7752
7733
|
},
|
7753
7734
|
textEmbeddingModel(modelId) {
|
7754
7735
|
if (textEmbeddingModels != null && modelId in textEmbeddingModels) {
|
@@ -7757,7 +7738,7 @@ function customProvider({
|
|
7757
7738
|
if (fallbackProvider) {
|
7758
7739
|
return fallbackProvider.textEmbeddingModel(modelId);
|
7759
7740
|
}
|
7760
|
-
throw new
|
7741
|
+
throw new import_provider25.NoSuchModelError({ modelId, modelType: "textEmbeddingModel" });
|
7761
7742
|
},
|
7762
7743
|
imageModel(modelId) {
|
7763
7744
|
if (imageModels != null && modelId in imageModels) {
|
@@ -7766,19 +7747,19 @@ function customProvider({
|
|
7766
7747
|
if (fallbackProvider == null ? void 0 : fallbackProvider.imageModel) {
|
7767
7748
|
return fallbackProvider.imageModel(modelId);
|
7768
7749
|
}
|
7769
|
-
throw new
|
7750
|
+
throw new import_provider25.NoSuchModelError({ modelId, modelType: "imageModel" });
|
7770
7751
|
}
|
7771
7752
|
};
|
7772
7753
|
}
|
7773
7754
|
var experimental_customProvider = customProvider;
|
7774
7755
|
|
7775
7756
|
// core/registry/no-such-provider-error.ts
|
7776
|
-
var
|
7757
|
+
var import_provider26 = require("@ai-sdk/provider");
|
7777
7758
|
var name16 = "AI_NoSuchProviderError";
|
7778
7759
|
var marker16 = `vercel.ai.error.${name16}`;
|
7779
7760
|
var symbol16 = Symbol.for(marker16);
|
7780
7761
|
var _a16;
|
7781
|
-
var NoSuchProviderError = class extends
|
7762
|
+
var NoSuchProviderError = class extends import_provider26.NoSuchModelError {
|
7782
7763
|
constructor({
|
7783
7764
|
modelId,
|
7784
7765
|
modelType,
|
@@ -7792,13 +7773,13 @@ var NoSuchProviderError = class extends import_provider25.NoSuchModelError {
|
|
7792
7773
|
this.availableProviders = availableProviders;
|
7793
7774
|
}
|
7794
7775
|
static isInstance(error) {
|
7795
|
-
return
|
7776
|
+
return import_provider26.AISDKError.hasMarker(error, marker16);
|
7796
7777
|
}
|
7797
7778
|
};
|
7798
7779
|
_a16 = symbol16;
|
7799
7780
|
|
7800
7781
|
// core/registry/provider-registry.ts
|
7801
|
-
var
|
7782
|
+
var import_provider27 = require("@ai-sdk/provider");
|
7802
7783
|
function createProviderRegistry(providers, {
|
7803
7784
|
separator = ":"
|
7804
7785
|
} = {}) {
|
@@ -7837,7 +7818,7 @@ var DefaultProviderRegistry = class {
|
|
7837
7818
|
splitId(id, modelType) {
|
7838
7819
|
const index = id.indexOf(this.separator);
|
7839
7820
|
if (index === -1) {
|
7840
|
-
throw new
|
7821
|
+
throw new import_provider27.NoSuchModelError({
|
7841
7822
|
modelId: id,
|
7842
7823
|
modelType,
|
7843
7824
|
message: `Invalid ${modelType} id for registry: ${id} (must be in the format "providerId${this.separator}modelId")`
|
@@ -7850,7 +7831,7 @@ var DefaultProviderRegistry = class {
|
|
7850
7831
|
const [providerId, modelId] = this.splitId(id, "languageModel");
|
7851
7832
|
const model = (_b = (_a17 = this.getProvider(providerId)).languageModel) == null ? void 0 : _b.call(_a17, modelId);
|
7852
7833
|
if (model == null) {
|
7853
|
-
throw new
|
7834
|
+
throw new import_provider27.NoSuchModelError({ modelId: id, modelType: "languageModel" });
|
7854
7835
|
}
|
7855
7836
|
return model;
|
7856
7837
|
}
|
@@ -7860,7 +7841,7 @@ var DefaultProviderRegistry = class {
|
|
7860
7841
|
const provider = this.getProvider(providerId);
|
7861
7842
|
const model = (_a17 = provider.textEmbeddingModel) == null ? void 0 : _a17.call(provider, modelId);
|
7862
7843
|
if (model == null) {
|
7863
|
-
throw new
|
7844
|
+
throw new import_provider27.NoSuchModelError({
|
7864
7845
|
modelId: id,
|
7865
7846
|
modelType: "textEmbeddingModel"
|
7866
7847
|
});
|
@@ -7873,14 +7854,14 @@ var DefaultProviderRegistry = class {
|
|
7873
7854
|
const provider = this.getProvider(providerId);
|
7874
7855
|
const model = (_a17 = provider.imageModel) == null ? void 0 : _a17.call(provider, modelId);
|
7875
7856
|
if (model == null) {
|
7876
|
-
throw new
|
7857
|
+
throw new import_provider27.NoSuchModelError({ modelId: id, modelType: "imageModel" });
|
7877
7858
|
}
|
7878
7859
|
return model;
|
7879
7860
|
}
|
7880
7861
|
};
|
7881
7862
|
|
7882
7863
|
// core/tool/mcp/mcp-client.ts
|
7883
|
-
var
|
7864
|
+
var import_provider_utils24 = require("@ai-sdk/provider-utils");
|
7884
7865
|
|
7885
7866
|
// core/tool/tool.ts
|
7886
7867
|
function tool(tool2) {
|
@@ -7888,7 +7869,7 @@ function tool(tool2) {
|
|
7888
7869
|
}
|
7889
7870
|
|
7890
7871
|
// core/tool/mcp/mcp-sse-transport.ts
|
7891
|
-
var
|
7872
|
+
var import_provider_utils23 = require("@ai-sdk/provider-utils");
|
7892
7873
|
|
7893
7874
|
// core/tool/mcp/json-rpc-message.ts
|
7894
7875
|
var import_zod10 = require("zod");
|
@@ -8059,7 +8040,7 @@ var SseMCPTransport = class {
|
|
8059
8040
|
(_b = this.onerror) == null ? void 0 : _b.call(this, error);
|
8060
8041
|
return reject(error);
|
8061
8042
|
}
|
8062
|
-
const stream = response.body.pipeThrough(new TextDecoderStream()).pipeThrough((0,
|
8043
|
+
const stream = response.body.pipeThrough(new TextDecoderStream()).pipeThrough((0, import_provider_utils23.createEventSourceParserStream)());
|
8063
8044
|
const reader = stream.getReader();
|
8064
8045
|
const processEvents = async () => {
|
8065
8046
|
var _a18, _b2, _c2;
|
@@ -8383,7 +8364,7 @@ var MCPClient = class {
|
|
8383
8364
|
if (schemas !== "automatic" && !(name17 in schemas)) {
|
8384
8365
|
continue;
|
8385
8366
|
}
|
8386
|
-
const parameters = schemas === "automatic" ? (0,
|
8367
|
+
const parameters = schemas === "automatic" ? (0, import_provider_utils24.jsonSchema)({
|
8387
8368
|
...inputSchema,
|
8388
8369
|
properties: (_a17 = inputSchema.properties) != null ? _a17 : {},
|
8389
8370
|
additionalProperties: false
|
@@ -8447,8 +8428,8 @@ var MCPClient = class {
|
|
8447
8428
|
};
|
8448
8429
|
|
8449
8430
|
// src/error/no-transcript-generated-error.ts
|
8450
|
-
var
|
8451
|
-
var NoTranscriptGeneratedError = class extends
|
8431
|
+
var import_provider28 = require("@ai-sdk/provider");
|
8432
|
+
var NoTranscriptGeneratedError = class extends import_provider28.AISDKError {
|
8452
8433
|
constructor(options) {
|
8453
8434
|
super({
|
8454
8435
|
name: "AI_NoTranscriptGeneratedError",
|
@@ -8513,10 +8494,11 @@ var DefaultTranscriptionResult = class {
|
|
8513
8494
|
0 && (module.exports = {
|
8514
8495
|
AISDKError,
|
8515
8496
|
APICallError,
|
8516
|
-
|
8497
|
+
AbstractChat,
|
8517
8498
|
DefaultChatTransport,
|
8518
8499
|
DownloadError,
|
8519
8500
|
EmptyResponseBodyError,
|
8501
|
+
GLOBAL_DEFAULT_PROVIDER,
|
8520
8502
|
InvalidArgumentError,
|
8521
8503
|
InvalidDataContentError,
|
8522
8504
|
InvalidMessageRoleError,
|
@@ -8538,14 +8520,14 @@ var DefaultTranscriptionResult = class {
|
|
8538
8520
|
NoSuchToolError,
|
8539
8521
|
Output,
|
8540
8522
|
RetryError,
|
8523
|
+
SerialJobExecutor,
|
8524
|
+
TextStreamChatTransport,
|
8541
8525
|
ToolCallRepairError,
|
8542
8526
|
ToolExecutionError,
|
8543
8527
|
TypeValidationError,
|
8544
8528
|
UnsupportedFunctionalityError,
|
8545
|
-
appendClientMessage,
|
8546
8529
|
asSchema,
|
8547
8530
|
assistantModelMessageSchema,
|
8548
|
-
callChatApi,
|
8549
8531
|
callCompletionApi,
|
8550
8532
|
convertFileListToFileUIParts,
|
8551
8533
|
convertToCoreMessages,
|
@@ -8562,7 +8544,6 @@ var DefaultTranscriptionResult = class {
|
|
8562
8544
|
createUIMessageStream,
|
8563
8545
|
createUIMessageStreamResponse,
|
8564
8546
|
customProvider,
|
8565
|
-
defaultChatStore,
|
8566
8547
|
defaultSettingsMiddleware,
|
8567
8548
|
embed,
|
8568
8549
|
embedMany,
|
@@ -8572,7 +8553,6 @@ var DefaultTranscriptionResult = class {
|
|
8572
8553
|
experimental_generateImage,
|
8573
8554
|
experimental_generateSpeech,
|
8574
8555
|
experimental_transcribe,
|
8575
|
-
extractMaxToolInvocationStep,
|
8576
8556
|
extractReasoningMiddleware,
|
8577
8557
|
generateId,
|
8578
8558
|
generateObject,
|
@@ -8580,24 +8560,21 @@ var DefaultTranscriptionResult = class {
|
|
8580
8560
|
getTextFromDataUrl,
|
8581
8561
|
getToolInvocations,
|
8582
8562
|
hasToolCall,
|
8583
|
-
isAssistantMessageWithCompletedToolCalls,
|
8584
8563
|
isDeepEqualData,
|
8585
8564
|
jsonSchema,
|
8586
|
-
maxSteps,
|
8587
8565
|
modelMessageSchema,
|
8588
8566
|
parsePartialJson,
|
8589
8567
|
pipeTextStreamToResponse,
|
8590
8568
|
pipeUIMessageStreamToResponse,
|
8591
|
-
shouldResubmitMessages,
|
8592
8569
|
simulateReadableStream,
|
8593
8570
|
simulateStreamingMiddleware,
|
8594
8571
|
smoothStream,
|
8572
|
+
stepCountIs,
|
8595
8573
|
streamObject,
|
8596
8574
|
streamText,
|
8597
8575
|
systemModelMessageSchema,
|
8598
8576
|
tool,
|
8599
8577
|
toolModelMessageSchema,
|
8600
|
-
updateToolCallResult,
|
8601
8578
|
userModelMessageSchema,
|
8602
8579
|
wrapLanguageModel
|
8603
8580
|
});
|