ai 4.0.5 → 4.0.6
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +7 -0
- package/dist/index.d.mts +42 -8
- package/dist/index.d.ts +42 -8
- package/dist/index.js +272 -208
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +245 -182
- package/dist/index.mjs.map +1 -1
- package/package.json +1 -1
- package/rsc/dist/rsc-server.mjs +138 -129
- package/rsc/dist/rsc-server.mjs.map +1 -1
package/dist/index.js
CHANGED
@@ -41,6 +41,7 @@ __export(streams_exports, {
|
|
41
41
|
NoSuchModelError: () => import_provider13.NoSuchModelError,
|
42
42
|
NoSuchProviderError: () => NoSuchProviderError,
|
43
43
|
NoSuchToolError: () => NoSuchToolError,
|
44
|
+
Output: () => output_exports,
|
44
45
|
RetryError: () => RetryError,
|
45
46
|
StreamData: () => StreamData,
|
46
47
|
TypeValidationError: () => import_provider13.TypeValidationError,
|
@@ -53,29 +54,55 @@ __export(streams_exports, {
|
|
53
54
|
experimental_createProviderRegistry: () => experimental_createProviderRegistry,
|
54
55
|
experimental_customProvider: () => experimental_customProvider,
|
55
56
|
experimental_wrapLanguageModel: () => experimental_wrapLanguageModel,
|
56
|
-
formatAssistantStreamPart: () =>
|
57
|
-
formatDataStreamPart: () =>
|
58
|
-
generateId: () =>
|
57
|
+
formatAssistantStreamPart: () => import_ui_utils11.formatAssistantStreamPart,
|
58
|
+
formatDataStreamPart: () => import_ui_utils11.formatDataStreamPart,
|
59
|
+
generateId: () => import_provider_utils12.generateId,
|
59
60
|
generateObject: () => generateObject,
|
60
61
|
generateText: () => generateText,
|
61
|
-
jsonSchema: () =>
|
62
|
-
parseAssistantStreamPart: () =>
|
63
|
-
parseDataStreamPart: () =>
|
64
|
-
processDataStream: () =>
|
65
|
-
processTextStream: () =>
|
62
|
+
jsonSchema: () => import_ui_utils8.jsonSchema,
|
63
|
+
parseAssistantStreamPart: () => import_ui_utils11.parseAssistantStreamPart,
|
64
|
+
parseDataStreamPart: () => import_ui_utils11.parseDataStreamPart,
|
65
|
+
processDataStream: () => import_ui_utils11.processDataStream,
|
66
|
+
processTextStream: () => import_ui_utils11.processTextStream,
|
66
67
|
streamObject: () => streamObject,
|
67
68
|
streamText: () => streamText,
|
68
69
|
tool: () => tool
|
69
70
|
});
|
70
71
|
module.exports = __toCommonJS(streams_exports);
|
71
|
-
var
|
72
|
-
var
|
72
|
+
var import_ui_utils11 = require("@ai-sdk/ui-utils");
|
73
|
+
var import_provider_utils12 = require("@ai-sdk/provider-utils");
|
73
74
|
|
74
75
|
// core/index.ts
|
75
|
-
var
|
76
|
+
var import_ui_utils8 = require("@ai-sdk/ui-utils");
|
77
|
+
|
78
|
+
// errors/invalid-argument-error.ts
|
79
|
+
var import_provider = require("@ai-sdk/provider");
|
80
|
+
var name = "AI_InvalidArgumentError";
|
81
|
+
var marker = `vercel.ai.error.${name}`;
|
82
|
+
var symbol = Symbol.for(marker);
|
83
|
+
var _a;
|
84
|
+
var InvalidArgumentError = class extends import_provider.AISDKError {
|
85
|
+
constructor({
|
86
|
+
parameter,
|
87
|
+
value,
|
88
|
+
message
|
89
|
+
}) {
|
90
|
+
super({
|
91
|
+
name,
|
92
|
+
message: `Invalid argument for parameter ${parameter}: ${message}`
|
93
|
+
});
|
94
|
+
this[_a] = true;
|
95
|
+
this.parameter = parameter;
|
96
|
+
this.value = value;
|
97
|
+
}
|
98
|
+
static isInstance(error) {
|
99
|
+
return import_provider.AISDKError.hasMarker(error, marker);
|
100
|
+
}
|
101
|
+
};
|
102
|
+
_a = symbol;
|
76
103
|
|
77
104
|
// util/retry-with-exponential-backoff.ts
|
78
|
-
var
|
105
|
+
var import_provider3 = require("@ai-sdk/provider");
|
79
106
|
var import_provider_utils = require("@ai-sdk/provider-utils");
|
80
107
|
|
81
108
|
// util/delay.ts
|
@@ -84,28 +111,28 @@ async function delay(delayInMs) {
|
|
84
111
|
}
|
85
112
|
|
86
113
|
// util/retry-error.ts
|
87
|
-
var
|
88
|
-
var
|
89
|
-
var
|
90
|
-
var
|
91
|
-
var
|
92
|
-
var RetryError = class extends
|
114
|
+
var import_provider2 = require("@ai-sdk/provider");
|
115
|
+
var name2 = "AI_RetryError";
|
116
|
+
var marker2 = `vercel.ai.error.${name2}`;
|
117
|
+
var symbol2 = Symbol.for(marker2);
|
118
|
+
var _a2;
|
119
|
+
var RetryError = class extends import_provider2.AISDKError {
|
93
120
|
constructor({
|
94
121
|
message,
|
95
122
|
reason,
|
96
123
|
errors
|
97
124
|
}) {
|
98
|
-
super({ name, message });
|
99
|
-
this[
|
125
|
+
super({ name: name2, message });
|
126
|
+
this[_a2] = true;
|
100
127
|
this.reason = reason;
|
101
128
|
this.errors = errors;
|
102
129
|
this.lastError = errors[errors.length - 1];
|
103
130
|
}
|
104
131
|
static isInstance(error) {
|
105
|
-
return
|
132
|
+
return import_provider2.AISDKError.hasMarker(error, marker2);
|
106
133
|
}
|
107
134
|
};
|
108
|
-
|
135
|
+
_a2 = symbol2;
|
109
136
|
|
110
137
|
// util/retry-with-exponential-backoff.ts
|
111
138
|
var retryWithExponentialBackoff = ({
|
@@ -141,7 +168,7 @@ async function _retryWithExponentialBackoff(f, {
|
|
141
168
|
errors: newErrors
|
142
169
|
});
|
143
170
|
}
|
144
|
-
if (error instanceof Error &&
|
171
|
+
if (error instanceof Error && import_provider3.APICallError.isInstance(error) && error.isRetryable === true && tryNumber <= maxRetries) {
|
145
172
|
await delay(delayInMs);
|
146
173
|
return _retryWithExponentialBackoff(
|
147
174
|
f,
|
@@ -160,6 +187,33 @@ async function _retryWithExponentialBackoff(f, {
|
|
160
187
|
}
|
161
188
|
}
|
162
189
|
|
190
|
+
// core/prompt/prepare-retries.ts
|
191
|
+
function prepareRetries({
|
192
|
+
maxRetries
|
193
|
+
}) {
|
194
|
+
if (maxRetries != null) {
|
195
|
+
if (!Number.isInteger(maxRetries)) {
|
196
|
+
throw new InvalidArgumentError({
|
197
|
+
parameter: "maxRetries",
|
198
|
+
value: maxRetries,
|
199
|
+
message: "maxRetries must be an integer"
|
200
|
+
});
|
201
|
+
}
|
202
|
+
if (maxRetries < 0) {
|
203
|
+
throw new InvalidArgumentError({
|
204
|
+
parameter: "maxRetries",
|
205
|
+
value: maxRetries,
|
206
|
+
message: "maxRetries must be >= 0"
|
207
|
+
});
|
208
|
+
}
|
209
|
+
}
|
210
|
+
const maxRetriesResult = maxRetries != null ? maxRetries : 2;
|
211
|
+
return {
|
212
|
+
maxRetries: maxRetriesResult,
|
213
|
+
retry: retryWithExponentialBackoff({ maxRetries: maxRetriesResult })
|
214
|
+
};
|
215
|
+
}
|
216
|
+
|
163
217
|
// core/telemetry/assemble-operation-name.ts
|
164
218
|
function assembleOperationName({
|
165
219
|
operationId,
|
@@ -357,11 +411,12 @@ function selectTelemetryAttributes({
|
|
357
411
|
async function embed({
|
358
412
|
model,
|
359
413
|
value,
|
360
|
-
maxRetries,
|
414
|
+
maxRetries: maxRetriesArg,
|
361
415
|
abortSignal,
|
362
416
|
headers,
|
363
417
|
experimental_telemetry: telemetry
|
364
418
|
}) {
|
419
|
+
const { maxRetries, retry } = prepareRetries({ maxRetries: maxRetriesArg });
|
365
420
|
const baseTelemetryAttributes = getBaseTelemetryAttributes({
|
366
421
|
model,
|
367
422
|
telemetry,
|
@@ -381,7 +436,6 @@ async function embed({
|
|
381
436
|
}),
|
382
437
|
tracer,
|
383
438
|
fn: async (span) => {
|
384
|
-
const retry = retryWithExponentialBackoff({ maxRetries });
|
385
439
|
const { embedding, usage, rawResponse } = await retry(
|
386
440
|
() => (
|
387
441
|
// nested spans to align with the embedMany telemetry data:
|
@@ -469,11 +523,12 @@ function splitArray(array, chunkSize) {
|
|
469
523
|
async function embedMany({
|
470
524
|
model,
|
471
525
|
values,
|
472
|
-
maxRetries,
|
526
|
+
maxRetries: maxRetriesArg,
|
473
527
|
abortSignal,
|
474
528
|
headers,
|
475
529
|
experimental_telemetry: telemetry
|
476
530
|
}) {
|
531
|
+
const { maxRetries, retry } = prepareRetries({ maxRetries: maxRetriesArg });
|
477
532
|
const baseTelemetryAttributes = getBaseTelemetryAttributes({
|
478
533
|
model,
|
479
534
|
telemetry,
|
@@ -496,7 +551,6 @@ async function embedMany({
|
|
496
551
|
}),
|
497
552
|
tracer,
|
498
553
|
fn: async (span) => {
|
499
|
-
const retry = retryWithExponentialBackoff({ maxRetries });
|
500
554
|
const maxEmbeddingsPerCall = model.maxEmbeddingsPerCall;
|
501
555
|
if (maxEmbeddingsPerCall == null) {
|
502
556
|
const { embeddings: embeddings2, usage } = await retry(() => {
|
@@ -634,12 +688,12 @@ var DefaultEmbedManyResult = class {
|
|
634
688
|
var import_provider_utils5 = require("@ai-sdk/provider-utils");
|
635
689
|
|
636
690
|
// util/download-error.ts
|
637
|
-
var
|
638
|
-
var
|
639
|
-
var
|
640
|
-
var
|
641
|
-
var
|
642
|
-
var DownloadError = class extends
|
691
|
+
var import_provider4 = require("@ai-sdk/provider");
|
692
|
+
var name3 = "AI_DownloadError";
|
693
|
+
var marker3 = `vercel.ai.error.${name3}`;
|
694
|
+
var symbol3 = Symbol.for(marker3);
|
695
|
+
var _a3;
|
696
|
+
var DownloadError = class extends import_provider4.AISDKError {
|
643
697
|
constructor({
|
644
698
|
url,
|
645
699
|
statusCode,
|
@@ -647,17 +701,17 @@ var DownloadError = class extends import_provider3.AISDKError {
|
|
647
701
|
cause,
|
648
702
|
message = cause == null ? `Failed to download ${url}: ${statusCode} ${statusText}` : `Failed to download ${url}: ${cause}`
|
649
703
|
}) {
|
650
|
-
super({ name:
|
651
|
-
this[
|
704
|
+
super({ name: name3, message, cause });
|
705
|
+
this[_a3] = true;
|
652
706
|
this.url = url;
|
653
707
|
this.statusCode = statusCode;
|
654
708
|
this.statusText = statusText;
|
655
709
|
}
|
656
710
|
static isInstance(error) {
|
657
|
-
return
|
711
|
+
return import_provider4.AISDKError.hasMarker(error, marker3);
|
658
712
|
}
|
659
713
|
};
|
660
|
-
|
714
|
+
_a3 = symbol3;
|
661
715
|
|
662
716
|
// util/download.ts
|
663
717
|
async function download({
|
@@ -707,26 +761,26 @@ function detectImageMimeType(image) {
|
|
707
761
|
var import_provider_utils2 = require("@ai-sdk/provider-utils");
|
708
762
|
|
709
763
|
// core/prompt/invalid-data-content-error.ts
|
710
|
-
var
|
711
|
-
var
|
712
|
-
var
|
713
|
-
var
|
714
|
-
var
|
715
|
-
var InvalidDataContentError = class extends
|
764
|
+
var import_provider5 = require("@ai-sdk/provider");
|
765
|
+
var name4 = "AI_InvalidDataContentError";
|
766
|
+
var marker4 = `vercel.ai.error.${name4}`;
|
767
|
+
var symbol4 = Symbol.for(marker4);
|
768
|
+
var _a4;
|
769
|
+
var InvalidDataContentError = class extends import_provider5.AISDKError {
|
716
770
|
constructor({
|
717
771
|
content,
|
718
772
|
cause,
|
719
773
|
message = `Invalid data content. Expected a base64 string, Uint8Array, ArrayBuffer, or Buffer, but got ${typeof content}.`
|
720
774
|
}) {
|
721
|
-
super({ name:
|
722
|
-
this[
|
775
|
+
super({ name: name4, message, cause });
|
776
|
+
this[_a4] = true;
|
723
777
|
this.content = content;
|
724
778
|
}
|
725
779
|
static isInstance(error) {
|
726
|
-
return
|
780
|
+
return import_provider5.AISDKError.hasMarker(error, marker4);
|
727
781
|
}
|
728
782
|
};
|
729
|
-
|
783
|
+
_a4 = symbol4;
|
730
784
|
|
731
785
|
// core/prompt/data-content.ts
|
732
786
|
var import_zod = require("zod");
|
@@ -781,25 +835,25 @@ function convertUint8ArrayToText(uint8Array) {
|
|
781
835
|
}
|
782
836
|
|
783
837
|
// core/prompt/invalid-message-role-error.ts
|
784
|
-
var
|
785
|
-
var
|
786
|
-
var
|
787
|
-
var
|
788
|
-
var
|
789
|
-
var InvalidMessageRoleError = class extends
|
838
|
+
var import_provider6 = require("@ai-sdk/provider");
|
839
|
+
var name5 = "AI_InvalidMessageRoleError";
|
840
|
+
var marker5 = `vercel.ai.error.${name5}`;
|
841
|
+
var symbol5 = Symbol.for(marker5);
|
842
|
+
var _a5;
|
843
|
+
var InvalidMessageRoleError = class extends import_provider6.AISDKError {
|
790
844
|
constructor({
|
791
845
|
role,
|
792
846
|
message = `Invalid message role: '${role}'. Must be one of: "system", "user", "assistant", "tool".`
|
793
847
|
}) {
|
794
|
-
super({ name:
|
795
|
-
this[
|
848
|
+
super({ name: name5, message });
|
849
|
+
this[_a5] = true;
|
796
850
|
this.role = role;
|
797
851
|
}
|
798
852
|
static isInstance(error) {
|
799
|
-
return
|
853
|
+
return import_provider6.AISDKError.hasMarker(error, marker5);
|
800
854
|
}
|
801
855
|
};
|
802
|
-
|
856
|
+
_a5 = symbol5;
|
803
857
|
|
804
858
|
// core/prompt/split-data-url.ts
|
805
859
|
function splitDataUrl(dataUrl) {
|
@@ -1002,32 +1056,6 @@ function convertPartToLanguageModelPart(part, downloadedAssets) {
|
|
1002
1056
|
}
|
1003
1057
|
}
|
1004
1058
|
|
1005
|
-
// errors/invalid-argument-error.ts
|
1006
|
-
var import_provider6 = require("@ai-sdk/provider");
|
1007
|
-
var name5 = "AI_InvalidArgumentError";
|
1008
|
-
var marker5 = `vercel.ai.error.${name5}`;
|
1009
|
-
var symbol5 = Symbol.for(marker5);
|
1010
|
-
var _a5;
|
1011
|
-
var InvalidArgumentError = class extends import_provider6.AISDKError {
|
1012
|
-
constructor({
|
1013
|
-
parameter,
|
1014
|
-
value,
|
1015
|
-
message
|
1016
|
-
}) {
|
1017
|
-
super({
|
1018
|
-
name: name5,
|
1019
|
-
message: `Invalid argument for parameter ${parameter}: ${message}`
|
1020
|
-
});
|
1021
|
-
this[_a5] = true;
|
1022
|
-
this.parameter = parameter;
|
1023
|
-
this.value = value;
|
1024
|
-
}
|
1025
|
-
static isInstance(error) {
|
1026
|
-
return import_provider6.AISDKError.hasMarker(error, marker5);
|
1027
|
-
}
|
1028
|
-
};
|
1029
|
-
_a5 = symbol5;
|
1030
|
-
|
1031
1059
|
// core/prompt/prepare-call-settings.ts
|
1032
1060
|
function prepareCallSettings({
|
1033
1061
|
maxTokens,
|
@@ -1037,8 +1065,7 @@ function prepareCallSettings({
|
|
1037
1065
|
presencePenalty,
|
1038
1066
|
frequencyPenalty,
|
1039
1067
|
stopSequences,
|
1040
|
-
seed
|
1041
|
-
maxRetries
|
1068
|
+
seed
|
1042
1069
|
}) {
|
1043
1070
|
if (maxTokens != null) {
|
1044
1071
|
if (!Number.isInteger(maxTokens)) {
|
@@ -1110,22 +1137,6 @@ function prepareCallSettings({
|
|
1110
1137
|
});
|
1111
1138
|
}
|
1112
1139
|
}
|
1113
|
-
if (maxRetries != null) {
|
1114
|
-
if (!Number.isInteger(maxRetries)) {
|
1115
|
-
throw new InvalidArgumentError({
|
1116
|
-
parameter: "maxRetries",
|
1117
|
-
value: maxRetries,
|
1118
|
-
message: "maxRetries must be an integer"
|
1119
|
-
});
|
1120
|
-
}
|
1121
|
-
if (maxRetries < 0) {
|
1122
|
-
throw new InvalidArgumentError({
|
1123
|
-
parameter: "maxRetries",
|
1124
|
-
value: maxRetries,
|
1125
|
-
message: "maxRetries must be >= 0"
|
1126
|
-
});
|
1127
|
-
}
|
1128
|
-
}
|
1129
1140
|
return {
|
1130
1141
|
maxTokens,
|
1131
1142
|
temperature: temperature != null ? temperature : 0,
|
@@ -1134,8 +1145,7 @@ function prepareCallSettings({
|
|
1134
1145
|
presencePenalty,
|
1135
1146
|
frequencyPenalty,
|
1136
1147
|
stopSequences: stopSequences != null && stopSequences.length > 0 ? stopSequences : void 0,
|
1137
|
-
seed
|
1138
|
-
maxRetries: maxRetries != null ? maxRetries : 2
|
1148
|
+
seed
|
1139
1149
|
};
|
1140
1150
|
}
|
1141
1151
|
|
@@ -1983,7 +1993,7 @@ async function generateObject({
|
|
1983
1993
|
system,
|
1984
1994
|
prompt,
|
1985
1995
|
messages,
|
1986
|
-
maxRetries,
|
1996
|
+
maxRetries: maxRetriesArg,
|
1987
1997
|
abortSignal,
|
1988
1998
|
headers,
|
1989
1999
|
experimental_telemetry: telemetry,
|
@@ -2002,6 +2012,7 @@ async function generateObject({
|
|
2002
2012
|
schemaDescription,
|
2003
2013
|
enumValues
|
2004
2014
|
});
|
2015
|
+
const { maxRetries, retry } = prepareRetries({ maxRetries: maxRetriesArg });
|
2005
2016
|
const outputStrategy = getOutputStrategy({
|
2006
2017
|
output,
|
2007
2018
|
schema: inputSchema,
|
@@ -2041,7 +2052,6 @@ async function generateObject({
|
|
2041
2052
|
tracer,
|
2042
2053
|
fn: async (span) => {
|
2043
2054
|
var _a11, _b;
|
2044
|
-
const retry = retryWithExponentialBackoff({ maxRetries });
|
2045
2055
|
if (mode === "auto" || mode == null) {
|
2046
2056
|
mode = model.defaultObjectGenerationMode;
|
2047
2057
|
}
|
@@ -2378,59 +2388,6 @@ var DelayedPromise = class {
|
|
2378
2388
|
}
|
2379
2389
|
};
|
2380
2390
|
|
2381
|
-
// core/util/now.ts
|
2382
|
-
function now() {
|
2383
|
-
var _a11, _b;
|
2384
|
-
return (_b = (_a11 = globalThis == null ? void 0 : globalThis.performance) == null ? void 0 : _a11.now()) != null ? _b : Date.now();
|
2385
|
-
}
|
2386
|
-
|
2387
|
-
// core/util/prepare-outgoing-http-headers.ts
|
2388
|
-
function prepareOutgoingHttpHeaders(headers, {
|
2389
|
-
contentType,
|
2390
|
-
dataStreamVersion
|
2391
|
-
}) {
|
2392
|
-
const outgoingHeaders = {};
|
2393
|
-
if (headers != null) {
|
2394
|
-
for (const [key, value] of Object.entries(headers)) {
|
2395
|
-
outgoingHeaders[key] = value;
|
2396
|
-
}
|
2397
|
-
}
|
2398
|
-
if (outgoingHeaders["Content-Type"] == null) {
|
2399
|
-
outgoingHeaders["Content-Type"] = contentType;
|
2400
|
-
}
|
2401
|
-
if (dataStreamVersion !== void 0) {
|
2402
|
-
outgoingHeaders["X-Vercel-AI-Data-Stream"] = dataStreamVersion;
|
2403
|
-
}
|
2404
|
-
return outgoingHeaders;
|
2405
|
-
}
|
2406
|
-
|
2407
|
-
// core/util/write-to-server-response.ts
|
2408
|
-
function writeToServerResponse({
|
2409
|
-
response,
|
2410
|
-
status,
|
2411
|
-
statusText,
|
2412
|
-
headers,
|
2413
|
-
stream
|
2414
|
-
}) {
|
2415
|
-
response.writeHead(status != null ? status : 200, statusText, headers);
|
2416
|
-
const reader = stream.getReader();
|
2417
|
-
const read = async () => {
|
2418
|
-
try {
|
2419
|
-
while (true) {
|
2420
|
-
const { done, value } = await reader.read();
|
2421
|
-
if (done)
|
2422
|
-
break;
|
2423
|
-
response.write(value);
|
2424
|
-
}
|
2425
|
-
} catch (error) {
|
2426
|
-
throw error;
|
2427
|
-
} finally {
|
2428
|
-
response.end();
|
2429
|
-
}
|
2430
|
-
};
|
2431
|
-
read();
|
2432
|
-
}
|
2433
|
-
|
2434
2391
|
// util/create-resolvable-promise.ts
|
2435
2392
|
function createResolvablePromise() {
|
2436
2393
|
let resolve;
|
@@ -2513,6 +2470,59 @@ function createStitchableStream() {
|
|
2513
2470
|
};
|
2514
2471
|
}
|
2515
2472
|
|
2473
|
+
// core/util/now.ts
|
2474
|
+
function now() {
|
2475
|
+
var _a11, _b;
|
2476
|
+
return (_b = (_a11 = globalThis == null ? void 0 : globalThis.performance) == null ? void 0 : _a11.now()) != null ? _b : Date.now();
|
2477
|
+
}
|
2478
|
+
|
2479
|
+
// core/util/prepare-outgoing-http-headers.ts
|
2480
|
+
function prepareOutgoingHttpHeaders(headers, {
|
2481
|
+
contentType,
|
2482
|
+
dataStreamVersion
|
2483
|
+
}) {
|
2484
|
+
const outgoingHeaders = {};
|
2485
|
+
if (headers != null) {
|
2486
|
+
for (const [key, value] of Object.entries(headers)) {
|
2487
|
+
outgoingHeaders[key] = value;
|
2488
|
+
}
|
2489
|
+
}
|
2490
|
+
if (outgoingHeaders["Content-Type"] == null) {
|
2491
|
+
outgoingHeaders["Content-Type"] = contentType;
|
2492
|
+
}
|
2493
|
+
if (dataStreamVersion !== void 0) {
|
2494
|
+
outgoingHeaders["X-Vercel-AI-Data-Stream"] = dataStreamVersion;
|
2495
|
+
}
|
2496
|
+
return outgoingHeaders;
|
2497
|
+
}
|
2498
|
+
|
2499
|
+
// core/util/write-to-server-response.ts
|
2500
|
+
function writeToServerResponse({
|
2501
|
+
response,
|
2502
|
+
status,
|
2503
|
+
statusText,
|
2504
|
+
headers,
|
2505
|
+
stream
|
2506
|
+
}) {
|
2507
|
+
response.writeHead(status != null ? status : 200, statusText, headers);
|
2508
|
+
const reader = stream.getReader();
|
2509
|
+
const read = async () => {
|
2510
|
+
try {
|
2511
|
+
while (true) {
|
2512
|
+
const { done, value } = await reader.read();
|
2513
|
+
if (done)
|
2514
|
+
break;
|
2515
|
+
response.write(value);
|
2516
|
+
}
|
2517
|
+
} catch (error) {
|
2518
|
+
throw error;
|
2519
|
+
} finally {
|
2520
|
+
response.end();
|
2521
|
+
}
|
2522
|
+
};
|
2523
|
+
read();
|
2524
|
+
}
|
2525
|
+
|
2516
2526
|
// core/generate-object/stream-object.ts
|
2517
2527
|
var originalGenerateId2 = (0, import_provider_utils6.createIdGenerator)({ prefix: "aiobj", size: 24 });
|
2518
2528
|
function streamObject({
|
@@ -2576,7 +2586,7 @@ var DefaultStreamObjectResult = class {
|
|
2576
2586
|
headers,
|
2577
2587
|
telemetry,
|
2578
2588
|
settings,
|
2579
|
-
maxRetries,
|
2589
|
+
maxRetries: maxRetriesArg,
|
2580
2590
|
abortSignal,
|
2581
2591
|
outputStrategy,
|
2582
2592
|
system,
|
@@ -2598,6 +2608,9 @@ var DefaultStreamObjectResult = class {
|
|
2598
2608
|
this.requestPromise = new DelayedPromise();
|
2599
2609
|
this.responsePromise = new DelayedPromise();
|
2600
2610
|
this.stitchableStream = createStitchableStream();
|
2611
|
+
const { maxRetries, retry } = prepareRetries({
|
2612
|
+
maxRetries: maxRetriesArg
|
2613
|
+
});
|
2601
2614
|
const baseTelemetryAttributes = getBaseTelemetryAttributes({
|
2602
2615
|
model,
|
2603
2616
|
telemetry,
|
@@ -2605,7 +2618,6 @@ var DefaultStreamObjectResult = class {
|
|
2605
2618
|
settings: { ...settings, maxRetries }
|
2606
2619
|
});
|
2607
2620
|
const tracer = getTracer(telemetry);
|
2608
|
-
const retry = retryWithExponentialBackoff({ maxRetries });
|
2609
2621
|
const self = this;
|
2610
2622
|
recordSpan({
|
2611
2623
|
name: "ai.streamObject",
|
@@ -2781,7 +2793,7 @@ var DefaultStreamObjectResult = class {
|
|
2781
2793
|
let usage;
|
2782
2794
|
let finishReason;
|
2783
2795
|
let providerMetadata;
|
2784
|
-
let
|
2796
|
+
let object2;
|
2785
2797
|
let error;
|
2786
2798
|
let accumulatedText = "";
|
2787
2799
|
let textDelta = "";
|
@@ -2865,8 +2877,8 @@ var DefaultStreamObjectResult = class {
|
|
2865
2877
|
});
|
2866
2878
|
const validationResult = outputStrategy.validateFinalResult(latestObjectJson);
|
2867
2879
|
if (validationResult.success) {
|
2868
|
-
|
2869
|
-
self.objectPromise.resolve(
|
2880
|
+
object2 = validationResult.value;
|
2881
|
+
self.objectPromise.resolve(object2);
|
2870
2882
|
} else {
|
2871
2883
|
error = validationResult.error;
|
2872
2884
|
self.objectPromise.reject(error);
|
@@ -2893,7 +2905,7 @@ var DefaultStreamObjectResult = class {
|
|
2893
2905
|
attributes: {
|
2894
2906
|
"ai.response.finishReason": finishReason,
|
2895
2907
|
"ai.response.object": {
|
2896
|
-
output: () => JSON.stringify(
|
2908
|
+
output: () => JSON.stringify(object2)
|
2897
2909
|
},
|
2898
2910
|
"ai.response.id": response.id,
|
2899
2911
|
"ai.response.model": response.modelId,
|
@@ -2917,14 +2929,14 @@ var DefaultStreamObjectResult = class {
|
|
2917
2929
|
"ai.usage.promptTokens": finalUsage.promptTokens,
|
2918
2930
|
"ai.usage.completionTokens": finalUsage.completionTokens,
|
2919
2931
|
"ai.response.object": {
|
2920
|
-
output: () => JSON.stringify(
|
2932
|
+
output: () => JSON.stringify(object2)
|
2921
2933
|
}
|
2922
2934
|
}
|
2923
2935
|
})
|
2924
2936
|
);
|
2925
2937
|
await (onFinish == null ? void 0 : onFinish({
|
2926
2938
|
usage: finalUsage,
|
2927
|
-
object,
|
2939
|
+
object: object2,
|
2928
2940
|
error,
|
2929
2941
|
response: {
|
2930
2942
|
...response,
|
@@ -3109,8 +3121,8 @@ _a9 = symbol9;
|
|
3109
3121
|
var import_ui_utils3 = require("@ai-sdk/ui-utils");
|
3110
3122
|
|
3111
3123
|
// core/util/is-non-empty-object.ts
|
3112
|
-
function isNonEmptyObject(
|
3113
|
-
return
|
3124
|
+
function isNonEmptyObject(object2) {
|
3125
|
+
return object2 != null && Object.keys(object2).length > 0;
|
3114
3126
|
}
|
3115
3127
|
|
3116
3128
|
// core/prompt/prepare-tools-and-tool-choice.ts
|
@@ -3159,15 +3171,15 @@ function prepareToolsAndToolChoice({
|
|
3159
3171
|
|
3160
3172
|
// core/util/split-on-last-whitespace.ts
|
3161
3173
|
var lastWhitespaceRegexp = /^([\s\S]*?)(\s+)(\S*)$/;
|
3162
|
-
function splitOnLastWhitespace(
|
3163
|
-
const match =
|
3174
|
+
function splitOnLastWhitespace(text2) {
|
3175
|
+
const match = text2.match(lastWhitespaceRegexp);
|
3164
3176
|
return match ? { prefix: match[1], whitespace: match[2], suffix: match[3] } : void 0;
|
3165
3177
|
}
|
3166
3178
|
|
3167
3179
|
// core/util/remove-text-after-last-whitespace.ts
|
3168
|
-
function removeTextAfterLastWhitespace(
|
3169
|
-
const match = splitOnLastWhitespace(
|
3170
|
-
return match ? match.prefix + match.whitespace :
|
3180
|
+
function removeTextAfterLastWhitespace(text2) {
|
3181
|
+
const match = splitOnLastWhitespace(text2);
|
3182
|
+
return match ? match.prefix + match.whitespace : text2;
|
3171
3183
|
}
|
3172
3184
|
|
3173
3185
|
// core/generate-text/parse-tool-call.ts
|
@@ -3207,7 +3219,7 @@ function parseToolCall({
|
|
3207
3219
|
|
3208
3220
|
// core/generate-text/to-response-messages.ts
|
3209
3221
|
function toResponseMessages({
|
3210
|
-
text = "",
|
3222
|
+
text: text2 = "",
|
3211
3223
|
tools,
|
3212
3224
|
toolCalls,
|
3213
3225
|
toolResults
|
@@ -3215,7 +3227,7 @@ function toResponseMessages({
|
|
3215
3227
|
const responseMessages = [];
|
3216
3228
|
responseMessages.push({
|
3217
3229
|
role: "assistant",
|
3218
|
-
content: [{ type: "text", text }, ...toolCalls]
|
3230
|
+
content: [{ type: "text", text: text2 }, ...toolCalls]
|
3219
3231
|
});
|
3220
3232
|
if (toolResults.length > 0) {
|
3221
3233
|
responseMessages.push({
|
@@ -3251,10 +3263,11 @@ async function generateText({
|
|
3251
3263
|
system,
|
3252
3264
|
prompt,
|
3253
3265
|
messages,
|
3254
|
-
maxRetries,
|
3266
|
+
maxRetries: maxRetriesArg,
|
3255
3267
|
abortSignal,
|
3256
3268
|
headers,
|
3257
3269
|
maxSteps = 1,
|
3270
|
+
experimental_output: output,
|
3258
3271
|
experimental_continueSteps: continueSteps = false,
|
3259
3272
|
experimental_telemetry: telemetry,
|
3260
3273
|
experimental_providerMetadata: providerMetadata,
|
@@ -3266,6 +3279,7 @@ async function generateText({
|
|
3266
3279
|
onStepFinish,
|
3267
3280
|
...settings
|
3268
3281
|
}) {
|
3282
|
+
var _a11;
|
3269
3283
|
if (maxSteps < 1) {
|
3270
3284
|
throw new InvalidArgumentError({
|
3271
3285
|
parameter: "maxSteps",
|
@@ -3273,6 +3287,7 @@ async function generateText({
|
|
3273
3287
|
message: "maxSteps must be at least 1"
|
3274
3288
|
});
|
3275
3289
|
}
|
3290
|
+
const { maxRetries, retry } = prepareRetries({ maxRetries: maxRetriesArg });
|
3276
3291
|
const baseTelemetryAttributes = getBaseTelemetryAttributes({
|
3277
3292
|
model,
|
3278
3293
|
telemetry,
|
@@ -3280,7 +3295,11 @@ async function generateText({
|
|
3280
3295
|
settings: { ...settings, maxRetries }
|
3281
3296
|
});
|
3282
3297
|
const initialPrompt = standardizePrompt({
|
3283
|
-
prompt: {
|
3298
|
+
prompt: {
|
3299
|
+
system: (_a11 = output == null ? void 0 : output.injectIntoSystemPrompt({ system, model })) != null ? _a11 : system,
|
3300
|
+
prompt,
|
3301
|
+
messages
|
3302
|
+
},
|
3284
3303
|
tools
|
3285
3304
|
});
|
3286
3305
|
const tracer = getTracer(telemetry);
|
@@ -3303,8 +3322,7 @@ async function generateText({
|
|
3303
3322
|
}),
|
3304
3323
|
tracer,
|
3305
3324
|
fn: async (span) => {
|
3306
|
-
var
|
3307
|
-
const retry = retryWithExponentialBackoff({ maxRetries });
|
3325
|
+
var _a12, _b, _c, _d, _e, _f;
|
3308
3326
|
const mode = {
|
3309
3327
|
type: "regular",
|
3310
3328
|
...prepareToolsAndToolChoice({ tools, toolChoice, activeTools })
|
@@ -3315,7 +3333,7 @@ async function generateText({
|
|
3315
3333
|
let currentToolResults = [];
|
3316
3334
|
let stepCount = 0;
|
3317
3335
|
const responseMessages = [];
|
3318
|
-
let
|
3336
|
+
let text2 = "";
|
3319
3337
|
const steps = [];
|
3320
3338
|
const usage = {
|
3321
3339
|
completionTokens: 0,
|
@@ -3356,8 +3374,8 @@ async function generateText({
|
|
3356
3374
|
"ai.prompt.tools": {
|
3357
3375
|
// convert the language model level tools:
|
3358
3376
|
input: () => {
|
3359
|
-
var
|
3360
|
-
return (
|
3377
|
+
var _a13;
|
3378
|
+
return (_a13 = mode.tools) == null ? void 0 : _a13.map((tool2) => JSON.stringify(tool2));
|
3361
3379
|
}
|
3362
3380
|
},
|
3363
3381
|
"ai.prompt.toolChoice": {
|
@@ -3377,18 +3395,19 @@ async function generateText({
|
|
3377
3395
|
}),
|
3378
3396
|
tracer,
|
3379
3397
|
fn: async (span2) => {
|
3380
|
-
var
|
3398
|
+
var _a13, _b2, _c2, _d2, _e2, _f2;
|
3381
3399
|
const result = await model.doGenerate({
|
3382
3400
|
mode,
|
3383
3401
|
...callSettings,
|
3384
3402
|
inputFormat: promptFormat,
|
3403
|
+
responseFormat: output == null ? void 0 : output.responseFormat({ model }),
|
3385
3404
|
prompt: promptMessages,
|
3386
3405
|
providerMetadata,
|
3387
3406
|
abortSignal,
|
3388
3407
|
headers
|
3389
3408
|
});
|
3390
3409
|
const responseData = {
|
3391
|
-
id: (_b2 = (
|
3410
|
+
id: (_b2 = (_a13 = result.response) == null ? void 0 : _a13.id) != null ? _b2 : generateId3(),
|
3392
3411
|
timestamp: (_d2 = (_c2 = result.response) == null ? void 0 : _c2.timestamp) != null ? _d2 : currentDate(),
|
3393
3412
|
modelId: (_f2 = (_e2 = result.response) == null ? void 0 : _e2.modelId) != null ? _f2 : model.modelId
|
3394
3413
|
};
|
@@ -3421,7 +3440,7 @@ async function generateText({
|
|
3421
3440
|
}
|
3422
3441
|
})
|
3423
3442
|
);
|
3424
|
-
currentToolCalls = ((
|
3443
|
+
currentToolCalls = ((_a12 = currentModelResponse.toolCalls) != null ? _a12 : []).map(
|
3425
3444
|
(modelToolCall) => parseToolCall({ toolCall: modelToolCall, tools })
|
3426
3445
|
);
|
3427
3446
|
currentToolResults = tools == null ? [] : await executeTools({
|
@@ -3453,9 +3472,9 @@ async function generateText({
|
|
3453
3472
|
}
|
3454
3473
|
const originalText = (_b = currentModelResponse.text) != null ? _b : "";
|
3455
3474
|
const stepTextLeadingWhitespaceTrimmed = stepType === "continue" && // only for continue steps
|
3456
|
-
|
3475
|
+
text2.trimEnd() !== text2 ? originalText.trimStart() : originalText;
|
3457
3476
|
const stepText = nextStepType === "continue" ? removeTextAfterLastWhitespace(stepTextLeadingWhitespaceTrimmed) : stepTextLeadingWhitespaceTrimmed;
|
3458
|
-
|
3477
|
+
text2 = nextStepType === "continue" || stepType === "continue" ? text2 + stepText : stepText;
|
3459
3478
|
if (stepType === "continue") {
|
3460
3479
|
const lastMessage = responseMessages[responseMessages.length - 1];
|
3461
3480
|
if (typeof lastMessage.content === "string") {
|
@@ -3469,7 +3488,7 @@ async function generateText({
|
|
3469
3488
|
} else {
|
3470
3489
|
responseMessages.push(
|
3471
3490
|
...toResponseMessages({
|
3472
|
-
text,
|
3491
|
+
text: text2,
|
3473
3492
|
tools: tools != null ? tools : {},
|
3474
3493
|
toolCalls: currentToolCalls,
|
3475
3494
|
toolResults: currentToolResults
|
@@ -3516,7 +3535,8 @@ async function generateText({
|
|
3516
3535
|
})
|
3517
3536
|
);
|
3518
3537
|
return new DefaultGenerateTextResult({
|
3519
|
-
text,
|
3538
|
+
text: text2,
|
3539
|
+
output: output == null ? void 0 : output.parseOutput({ text: text2 }),
|
3520
3540
|
toolCalls: currentToolCalls,
|
3521
3541
|
toolResults: currentToolResults,
|
3522
3542
|
finishReason: currentModelResponse.finishReason,
|
@@ -3612,6 +3632,7 @@ var DefaultGenerateTextResult = class {
|
|
3612
3632
|
this.steps = options.steps;
|
3613
3633
|
this.experimental_providerMetadata = options.providerMetadata;
|
3614
3634
|
this.logprobs = options.logprobs;
|
3635
|
+
this.experimental_output = options.output;
|
3615
3636
|
}
|
3616
3637
|
};
|
3617
3638
|
|
@@ -3963,7 +3984,7 @@ var DefaultStreamTextResult = class {
|
|
3963
3984
|
telemetry,
|
3964
3985
|
headers,
|
3965
3986
|
settings,
|
3966
|
-
maxRetries,
|
3987
|
+
maxRetries: maxRetriesArg,
|
3967
3988
|
abortSignal,
|
3968
3989
|
system,
|
3969
3990
|
prompt,
|
@@ -4000,6 +4021,9 @@ var DefaultStreamTextResult = class {
|
|
4000
4021
|
message: "maxSteps must be at least 1"
|
4001
4022
|
});
|
4002
4023
|
}
|
4024
|
+
const { maxRetries, retry } = prepareRetries({
|
4025
|
+
maxRetries: maxRetriesArg
|
4026
|
+
});
|
4003
4027
|
const tracer = getTracer(telemetry);
|
4004
4028
|
const baseTelemetryAttributes = getBaseTelemetryAttributes({
|
4005
4029
|
model,
|
@@ -4029,7 +4053,6 @@ var DefaultStreamTextResult = class {
|
|
4029
4053
|
tracer,
|
4030
4054
|
endWhenDone: false,
|
4031
4055
|
fn: async (rootSpan) => {
|
4032
|
-
const retry = retryWithExponentialBackoff({ maxRetries });
|
4033
4056
|
const stepResults = [];
|
4034
4057
|
async function streamStep({
|
4035
4058
|
currentStep,
|
@@ -4701,6 +4724,46 @@ var DefaultStreamTextResult = class {
|
|
4701
4724
|
}
|
4702
4725
|
};
|
4703
4726
|
|
4727
|
+
// core/generate-text/output.ts
|
4728
|
+
var output_exports = {};
|
4729
|
+
__export(output_exports, {
|
4730
|
+
object: () => object,
|
4731
|
+
text: () => text
|
4732
|
+
});
|
4733
|
+
var import_provider_utils10 = require("@ai-sdk/provider-utils");
|
4734
|
+
var import_ui_utils7 = require("@ai-sdk/ui-utils");
|
4735
|
+
var text = () => ({
|
4736
|
+
type: "text",
|
4737
|
+
responseFormat: () => ({ type: "text" }),
|
4738
|
+
injectIntoSystemPrompt({ system }) {
|
4739
|
+
return system;
|
4740
|
+
},
|
4741
|
+
parseOutput({ text: text2 }) {
|
4742
|
+
return text2;
|
4743
|
+
}
|
4744
|
+
});
|
4745
|
+
var object = ({
|
4746
|
+
schema: inputSchema
|
4747
|
+
}) => {
|
4748
|
+
const schema = (0, import_ui_utils7.asSchema)(inputSchema);
|
4749
|
+
return {
|
4750
|
+
type: "object",
|
4751
|
+
responseFormat: ({ model }) => ({
|
4752
|
+
type: "json",
|
4753
|
+
schema: model.supportsStructuredOutputs ? schema.jsonSchema : void 0
|
4754
|
+
}),
|
4755
|
+
injectIntoSystemPrompt({ system, model }) {
|
4756
|
+
return model.supportsStructuredOutputs ? system : injectJsonInstruction({
|
4757
|
+
prompt: system,
|
4758
|
+
schema: schema.jsonSchema
|
4759
|
+
});
|
4760
|
+
},
|
4761
|
+
parseOutput({ text: text2 }) {
|
4762
|
+
return (0, import_provider_utils10.parseJSON)({ text: text2, schema });
|
4763
|
+
}
|
4764
|
+
};
|
4765
|
+
};
|
4766
|
+
|
4704
4767
|
// core/middleware/wrap-language-model.ts
|
4705
4768
|
var experimental_wrapLanguageModel = ({
|
4706
4769
|
model,
|
@@ -4883,7 +4946,7 @@ function magnitude(vector) {
|
|
4883
4946
|
}
|
4884
4947
|
|
4885
4948
|
// streams/assistant-response.ts
|
4886
|
-
var
|
4949
|
+
var import_ui_utils9 = require("@ai-sdk/ui-utils");
|
4887
4950
|
function AssistantResponse({ threadId, messageId }, process2) {
|
4888
4951
|
const stream = new ReadableStream({
|
4889
4952
|
async start(controller) {
|
@@ -4892,20 +4955,20 @@ function AssistantResponse({ threadId, messageId }, process2) {
|
|
4892
4955
|
const sendMessage = (message) => {
|
4893
4956
|
controller.enqueue(
|
4894
4957
|
textEncoder.encode(
|
4895
|
-
(0,
|
4958
|
+
(0, import_ui_utils9.formatAssistantStreamPart)("assistant_message", message)
|
4896
4959
|
)
|
4897
4960
|
);
|
4898
4961
|
};
|
4899
4962
|
const sendDataMessage = (message) => {
|
4900
4963
|
controller.enqueue(
|
4901
4964
|
textEncoder.encode(
|
4902
|
-
(0,
|
4965
|
+
(0, import_ui_utils9.formatAssistantStreamPart)("data_message", message)
|
4903
4966
|
)
|
4904
4967
|
);
|
4905
4968
|
};
|
4906
4969
|
const sendError = (errorMessage) => {
|
4907
4970
|
controller.enqueue(
|
4908
|
-
textEncoder.encode((0,
|
4971
|
+
textEncoder.encode((0, import_ui_utils9.formatAssistantStreamPart)("error", errorMessage))
|
4909
4972
|
);
|
4910
4973
|
};
|
4911
4974
|
const forwardStream = async (stream2) => {
|
@@ -4916,7 +4979,7 @@ function AssistantResponse({ threadId, messageId }, process2) {
|
|
4916
4979
|
case "thread.message.created": {
|
4917
4980
|
controller.enqueue(
|
4918
4981
|
textEncoder.encode(
|
4919
|
-
(0,
|
4982
|
+
(0, import_ui_utils9.formatAssistantStreamPart)("assistant_message", {
|
4920
4983
|
id: value.data.id,
|
4921
4984
|
role: "assistant",
|
4922
4985
|
content: [{ type: "text", text: { value: "" } }]
|
@@ -4930,7 +4993,7 @@ function AssistantResponse({ threadId, messageId }, process2) {
|
|
4930
4993
|
if ((content == null ? void 0 : content.type) === "text" && ((_b = content.text) == null ? void 0 : _b.value) != null) {
|
4931
4994
|
controller.enqueue(
|
4932
4995
|
textEncoder.encode(
|
4933
|
-
(0,
|
4996
|
+
(0, import_ui_utils9.formatAssistantStreamPart)("text", content.text.value)
|
4934
4997
|
)
|
4935
4998
|
);
|
4936
4999
|
}
|
@@ -4947,7 +5010,7 @@ function AssistantResponse({ threadId, messageId }, process2) {
|
|
4947
5010
|
};
|
4948
5011
|
controller.enqueue(
|
4949
5012
|
textEncoder.encode(
|
4950
|
-
(0,
|
5013
|
+
(0, import_ui_utils9.formatAssistantStreamPart)("assistant_control_data", {
|
4951
5014
|
threadId,
|
4952
5015
|
messageId
|
4953
5016
|
})
|
@@ -5015,7 +5078,7 @@ function createCallbacksTransformer(callbacks = {}) {
|
|
5015
5078
|
}
|
5016
5079
|
|
5017
5080
|
// streams/stream-data.ts
|
5018
|
-
var
|
5081
|
+
var import_ui_utils10 = require("@ai-sdk/ui-utils");
|
5019
5082
|
|
5020
5083
|
// util/constants.ts
|
5021
5084
|
var HANGING_STREAM_WARNING_TIME_MS = 15 * 1e3;
|
@@ -5067,7 +5130,7 @@ var StreamData = class {
|
|
5067
5130
|
throw new Error("Stream controller is not initialized.");
|
5068
5131
|
}
|
5069
5132
|
this.controller.enqueue(
|
5070
|
-
this.encoder.encode((0,
|
5133
|
+
this.encoder.encode((0, import_ui_utils10.formatDataStreamPart)("data", [value]))
|
5071
5134
|
);
|
5072
5135
|
}
|
5073
5136
|
appendMessageAnnotation(value) {
|
@@ -5078,7 +5141,7 @@ var StreamData = class {
|
|
5078
5141
|
throw new Error("Stream controller is not initialized.");
|
5079
5142
|
}
|
5080
5143
|
this.controller.enqueue(
|
5081
|
-
this.encoder.encode((0,
|
5144
|
+
this.encoder.encode((0, import_ui_utils10.formatDataStreamPart)("message_annotations", [value]))
|
5082
5145
|
);
|
5083
5146
|
}
|
5084
5147
|
};
|
@@ -5088,7 +5151,7 @@ function createStreamDataTransformer() {
|
|
5088
5151
|
return new TransformStream({
|
5089
5152
|
transform: async (chunk, controller) => {
|
5090
5153
|
const message = decoder.decode(chunk);
|
5091
|
-
controller.enqueue(encoder.encode((0,
|
5154
|
+
controller.enqueue(encoder.encode((0, import_ui_utils10.formatDataStreamPart)("text", message)));
|
5092
5155
|
}
|
5093
5156
|
});
|
5094
5157
|
}
|
@@ -5151,10 +5214,10 @@ __export(llamaindex_adapter_exports, {
|
|
5151
5214
|
toDataStream: () => toDataStream2,
|
5152
5215
|
toDataStreamResponse: () => toDataStreamResponse2
|
5153
5216
|
});
|
5154
|
-
var
|
5217
|
+
var import_provider_utils11 = require("@ai-sdk/provider-utils");
|
5155
5218
|
function toDataStream2(stream, callbacks) {
|
5156
5219
|
const trimStart = trimStartOfStream();
|
5157
|
-
return (0,
|
5220
|
+
return (0, import_provider_utils11.convertAsyncIteratorToReadableStream)(stream[Symbol.asyncIterator]()).pipeThrough(
|
5158
5221
|
new TransformStream({
|
5159
5222
|
async transform(message, controller) {
|
5160
5223
|
controller.enqueue(trimStart(message.delta));
|
@@ -5178,13 +5241,13 @@ function toDataStreamResponse2(stream, options = {}) {
|
|
5178
5241
|
}
|
5179
5242
|
function trimStartOfStream() {
|
5180
5243
|
let isStreamStart = true;
|
5181
|
-
return (
|
5244
|
+
return (text2) => {
|
5182
5245
|
if (isStreamStart) {
|
5183
|
-
|
5184
|
-
if (
|
5246
|
+
text2 = text2.trimStart();
|
5247
|
+
if (text2)
|
5185
5248
|
isStreamStart = false;
|
5186
5249
|
}
|
5187
|
-
return
|
5250
|
+
return text2;
|
5188
5251
|
};
|
5189
5252
|
}
|
5190
5253
|
// Annotate the CommonJS export names for ESM import in node:
|
@@ -5210,6 +5273,7 @@ function trimStartOfStream() {
|
|
5210
5273
|
NoSuchModelError,
|
5211
5274
|
NoSuchProviderError,
|
5212
5275
|
NoSuchToolError,
|
5276
|
+
Output,
|
5213
5277
|
RetryError,
|
5214
5278
|
StreamData,
|
5215
5279
|
TypeValidationError,
|