ai 3.3.17 → 3.3.19
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.d.mts +105 -139
- package/dist/index.d.ts +105 -139
- package/dist/index.js +209 -212
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +200 -203
- package/dist/index.mjs.map +1 -1
- package/package.json +8 -8
package/dist/index.js
CHANGED
@@ -4,8 +4,8 @@ var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
|
|
4
4
|
var __getOwnPropNames = Object.getOwnPropertyNames;
|
5
5
|
var __hasOwnProp = Object.prototype.hasOwnProperty;
|
6
6
|
var __export = (target, all) => {
|
7
|
-
for (var
|
8
|
-
__defProp(target,
|
7
|
+
for (var name11 in all)
|
8
|
+
__defProp(target, name11, { get: all[name11], enumerable: true });
|
9
9
|
};
|
10
10
|
var __copyProps = (to, from, except, desc) => {
|
11
11
|
if (from && typeof from === "object" || typeof from === "function") {
|
@@ -20,9 +20,9 @@ var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: tru
|
|
20
20
|
// streams/index.ts
|
21
21
|
var streams_exports = {};
|
22
22
|
__export(streams_exports, {
|
23
|
-
AISDKError: () =>
|
23
|
+
AISDKError: () => import_provider16.AISDKError,
|
24
24
|
AIStream: () => AIStream,
|
25
|
-
APICallError: () =>
|
25
|
+
APICallError: () => import_provider16.APICallError,
|
26
26
|
AWSBedrockAnthropicMessagesStream: () => AWSBedrockAnthropicMessagesStream,
|
27
27
|
AWSBedrockAnthropicStream: () => AWSBedrockAnthropicStream,
|
28
28
|
AWSBedrockCohereStream: () => AWSBedrockCohereStream,
|
@@ -32,24 +32,25 @@ __export(streams_exports, {
|
|
32
32
|
AssistantResponse: () => AssistantResponse,
|
33
33
|
CohereStream: () => CohereStream,
|
34
34
|
DownloadError: () => DownloadError,
|
35
|
-
EmptyResponseBodyError: () =>
|
35
|
+
EmptyResponseBodyError: () => import_provider16.EmptyResponseBodyError,
|
36
36
|
GoogleGenerativeAIStream: () => GoogleGenerativeAIStream,
|
37
37
|
HuggingFaceStream: () => HuggingFaceStream,
|
38
38
|
InkeepStream: () => InkeepStream,
|
39
39
|
InvalidArgumentError: () => InvalidArgumentError,
|
40
40
|
InvalidDataContentError: () => InvalidDataContentError,
|
41
41
|
InvalidMessageRoleError: () => InvalidMessageRoleError,
|
42
|
-
|
43
|
-
|
44
|
-
InvalidResponseDataError: () => import_provider15.InvalidResponseDataError,
|
42
|
+
InvalidPromptError: () => import_provider16.InvalidPromptError,
|
43
|
+
InvalidResponseDataError: () => import_provider16.InvalidResponseDataError,
|
45
44
|
InvalidToolArgumentsError: () => InvalidToolArgumentsError,
|
46
|
-
JSONParseError: () =>
|
45
|
+
JSONParseError: () => import_provider16.JSONParseError,
|
47
46
|
LangChainAdapter: () => langchain_adapter_exports,
|
48
47
|
LangChainStream: () => LangChainStream,
|
49
|
-
LoadAPIKeyError: () =>
|
48
|
+
LoadAPIKeyError: () => import_provider16.LoadAPIKeyError,
|
49
|
+
MessageConversionError: () => MessageConversionError,
|
50
50
|
MistralStream: () => MistralStream,
|
51
|
+
NoContentGeneratedError: () => import_provider16.NoContentGeneratedError,
|
51
52
|
NoObjectGeneratedError: () => NoObjectGeneratedError,
|
52
|
-
NoSuchModelError: () => NoSuchModelError,
|
53
|
+
NoSuchModelError: () => import_provider16.NoSuchModelError,
|
53
54
|
NoSuchProviderError: () => NoSuchProviderError,
|
54
55
|
NoSuchToolError: () => NoSuchToolError,
|
55
56
|
OpenAIStream: () => OpenAIStream,
|
@@ -57,8 +58,8 @@ __export(streams_exports, {
|
|
57
58
|
RetryError: () => RetryError,
|
58
59
|
StreamData: () => StreamData2,
|
59
60
|
StreamingTextResponse: () => StreamingTextResponse,
|
60
|
-
TypeValidationError: () =>
|
61
|
-
UnsupportedFunctionalityError: () =>
|
61
|
+
TypeValidationError: () => import_provider16.TypeValidationError,
|
62
|
+
UnsupportedFunctionalityError: () => import_provider16.UnsupportedFunctionalityError,
|
62
63
|
convertToCoreMessages: () => convertToCoreMessages,
|
63
64
|
cosineSimilarity: () => cosineSimilarity,
|
64
65
|
createCallbacksTransformer: () => createCallbacksTransformer,
|
@@ -70,6 +71,7 @@ __export(streams_exports, {
|
|
70
71
|
experimental_StreamData: () => experimental_StreamData,
|
71
72
|
experimental_createModelRegistry: () => experimental_createModelRegistry,
|
72
73
|
experimental_createProviderRegistry: () => experimental_createProviderRegistry,
|
74
|
+
experimental_customProvider: () => experimental_customProvider,
|
73
75
|
experimental_generateObject: () => experimental_generateObject,
|
74
76
|
experimental_generateText: () => experimental_generateText,
|
75
77
|
experimental_streamObject: () => experimental_streamObject,
|
@@ -223,7 +225,7 @@ function getBaseTelemetryAttributes({
|
|
223
225
|
telemetry,
|
224
226
|
headers
|
225
227
|
}) {
|
226
|
-
var
|
228
|
+
var _a11;
|
227
229
|
return {
|
228
230
|
"ai.model.provider": model.provider,
|
229
231
|
"ai.model.id": model.modelId,
|
@@ -233,7 +235,7 @@ function getBaseTelemetryAttributes({
|
|
233
235
|
return attributes;
|
234
236
|
}, {}),
|
235
237
|
// add metadata as attributes:
|
236
|
-
...Object.entries((
|
238
|
+
...Object.entries((_a11 = telemetry == null ? void 0 : telemetry.metadata) != null ? _a11 : {}).reduce(
|
237
239
|
(attributes, [key, value]) => {
|
238
240
|
attributes[`ai.telemetry.metadata.${key}`] = value;
|
239
241
|
return attributes;
|
@@ -258,7 +260,7 @@ var noopTracer = {
|
|
258
260
|
startSpan() {
|
259
261
|
return noopSpan;
|
260
262
|
},
|
261
|
-
startActiveSpan(
|
263
|
+
startActiveSpan(name11, arg1, arg2, arg3) {
|
262
264
|
if (typeof arg1 === "function") {
|
263
265
|
return arg1(noopSpan);
|
264
266
|
}
|
@@ -326,13 +328,13 @@ function getTracer({ isEnabled }) {
|
|
326
328
|
// core/telemetry/record-span.ts
|
327
329
|
var import_api2 = require("@opentelemetry/api");
|
328
330
|
function recordSpan({
|
329
|
-
name:
|
331
|
+
name: name11,
|
330
332
|
tracer,
|
331
333
|
attributes,
|
332
334
|
fn,
|
333
335
|
endWhenDone = true
|
334
336
|
}) {
|
335
|
-
return tracer.startActiveSpan(
|
337
|
+
return tracer.startActiveSpan(name11, { attributes }, async (span) => {
|
336
338
|
try {
|
337
339
|
const result = await fn(span);
|
338
340
|
if (endWhenDone) {
|
@@ -398,14 +400,14 @@ async function embed({
|
|
398
400
|
headers,
|
399
401
|
experimental_telemetry: telemetry
|
400
402
|
}) {
|
401
|
-
var
|
403
|
+
var _a11;
|
402
404
|
const baseTelemetryAttributes = getBaseTelemetryAttributes({
|
403
405
|
model,
|
404
406
|
telemetry,
|
405
407
|
headers,
|
406
408
|
settings: { maxRetries }
|
407
409
|
});
|
408
|
-
const tracer = getTracer({ isEnabled: (
|
410
|
+
const tracer = getTracer({ isEnabled: (_a11 = telemetry == null ? void 0 : telemetry.isEnabled) != null ? _a11 : false });
|
409
411
|
return recordSpan({
|
410
412
|
name: "ai.embed",
|
411
413
|
attributes: selectTelemetryAttributes({
|
@@ -438,14 +440,14 @@ async function embed({
|
|
438
440
|
}),
|
439
441
|
tracer,
|
440
442
|
fn: async (doEmbedSpan) => {
|
441
|
-
var
|
443
|
+
var _a12;
|
442
444
|
const modelResponse = await model.doEmbed({
|
443
445
|
values: [value],
|
444
446
|
abortSignal,
|
445
447
|
headers
|
446
448
|
});
|
447
449
|
const embedding2 = modelResponse.embeddings[0];
|
448
|
-
const usage2 = (
|
450
|
+
const usage2 = (_a12 = modelResponse.usage) != null ? _a12 : { tokens: NaN };
|
449
451
|
doEmbedSpan.setAttributes(
|
450
452
|
selectTelemetryAttributes({
|
451
453
|
telemetry,
|
@@ -511,14 +513,14 @@ async function embedMany({
|
|
511
513
|
headers,
|
512
514
|
experimental_telemetry: telemetry
|
513
515
|
}) {
|
514
|
-
var
|
516
|
+
var _a11;
|
515
517
|
const baseTelemetryAttributes = getBaseTelemetryAttributes({
|
516
518
|
model,
|
517
519
|
telemetry,
|
518
520
|
headers,
|
519
521
|
settings: { maxRetries }
|
520
522
|
});
|
521
|
-
const tracer = getTracer({ isEnabled: (
|
523
|
+
const tracer = getTracer({ isEnabled: (_a11 = telemetry == null ? void 0 : telemetry.isEnabled) != null ? _a11 : false });
|
522
524
|
return recordSpan({
|
523
525
|
name: "ai.embedMany",
|
524
526
|
attributes: selectTelemetryAttributes({
|
@@ -556,14 +558,14 @@ async function embedMany({
|
|
556
558
|
}),
|
557
559
|
tracer,
|
558
560
|
fn: async (doEmbedSpan) => {
|
559
|
-
var
|
561
|
+
var _a12;
|
560
562
|
const modelResponse = await model.doEmbed({
|
561
563
|
values,
|
562
564
|
abortSignal,
|
563
565
|
headers
|
564
566
|
});
|
565
567
|
const embeddings3 = modelResponse.embeddings;
|
566
|
-
const usage2 = (
|
568
|
+
const usage2 = (_a12 = modelResponse.usage) != null ? _a12 : { tokens: NaN };
|
567
569
|
doEmbedSpan.setAttributes(
|
568
570
|
selectTelemetryAttributes({
|
569
571
|
telemetry,
|
@@ -615,14 +617,14 @@ async function embedMany({
|
|
615
617
|
}),
|
616
618
|
tracer,
|
617
619
|
fn: async (doEmbedSpan) => {
|
618
|
-
var
|
620
|
+
var _a12;
|
619
621
|
const modelResponse = await model.doEmbed({
|
620
622
|
values: chunk,
|
621
623
|
abortSignal,
|
622
624
|
headers
|
623
625
|
});
|
624
626
|
const embeddings2 = modelResponse.embeddings;
|
625
|
-
const usage2 = (
|
627
|
+
const usage2 = (_a12 = modelResponse.usage) != null ? _a12 : { tokens: NaN };
|
626
628
|
doEmbedSpan.setAttributes(
|
627
629
|
selectTelemetryAttributes({
|
628
630
|
telemetry,
|
@@ -724,7 +726,7 @@ async function download({
|
|
724
726
|
url,
|
725
727
|
fetchImplementation = fetch
|
726
728
|
}) {
|
727
|
-
var
|
729
|
+
var _a11;
|
728
730
|
const urlText = url.toString();
|
729
731
|
try {
|
730
732
|
const response = await fetchImplementation(urlText);
|
@@ -737,7 +739,7 @@ async function download({
|
|
737
739
|
}
|
738
740
|
return {
|
739
741
|
data: new Uint8Array(await response.arrayBuffer()),
|
740
|
-
mimeType: (
|
742
|
+
mimeType: (_a11 = response.headers.get("content-type")) != null ? _a11 : void 0
|
741
743
|
};
|
742
744
|
} catch (error) {
|
743
745
|
if (DownloadError.isInstance(error)) {
|
@@ -815,8 +817,8 @@ var dataContentSchema = import_zod.z.union([
|
|
815
817
|
import_zod.z.custom(
|
816
818
|
// Buffer might not be available in some environments such as CloudFlare:
|
817
819
|
(value) => {
|
818
|
-
var
|
819
|
-
return (_b = (
|
820
|
+
var _a11, _b;
|
821
|
+
return (_b = (_a11 = globalThis.Buffer) == null ? void 0 : _a11.isBuffer(value)) != null ? _b : false;
|
820
822
|
},
|
821
823
|
{ message: "Must be a Buffer" }
|
822
824
|
)
|
@@ -944,7 +946,7 @@ function convertToLanguageModelMessage(message, downloadedImages) {
|
|
944
946
|
role: "user",
|
945
947
|
content: message.content.map(
|
946
948
|
(part) => {
|
947
|
-
var
|
949
|
+
var _a11, _b, _c;
|
948
950
|
switch (part.type) {
|
949
951
|
case "text": {
|
950
952
|
return {
|
@@ -967,7 +969,7 @@ function convertToLanguageModelMessage(message, downloadedImages) {
|
|
967
969
|
return {
|
968
970
|
type: "image",
|
969
971
|
image: downloadedImage.data,
|
970
|
-
mimeType: (
|
972
|
+
mimeType: (_a11 = part.mimeType) != null ? _a11 : downloadedImage.mimeType,
|
971
973
|
providerMetadata: part.experimental_providerMetadata
|
972
974
|
};
|
973
975
|
}
|
@@ -1399,8 +1401,8 @@ function prepareResponseHeaders(init, {
|
|
1399
1401
|
contentType,
|
1400
1402
|
dataStreamVersion
|
1401
1403
|
}) {
|
1402
|
-
var
|
1403
|
-
const headers = new Headers((
|
1404
|
+
var _a11;
|
1405
|
+
const headers = new Headers((_a11 = init == null ? void 0 : init.headers) != null ? _a11 : {});
|
1404
1406
|
if (!headers.has("Content-Type")) {
|
1405
1407
|
headers.set("Content-Type", contentType);
|
1406
1408
|
}
|
@@ -1709,7 +1711,7 @@ async function generateObject({
|
|
1709
1711
|
experimental_telemetry: telemetry,
|
1710
1712
|
...settings
|
1711
1713
|
}) {
|
1712
|
-
var
|
1714
|
+
var _a11;
|
1713
1715
|
validateObjectGenerationInput({
|
1714
1716
|
output,
|
1715
1717
|
mode,
|
@@ -1727,7 +1729,7 @@ async function generateObject({
|
|
1727
1729
|
headers,
|
1728
1730
|
settings: { ...settings, maxRetries }
|
1729
1731
|
});
|
1730
|
-
const tracer = getTracer({ isEnabled: (
|
1732
|
+
const tracer = getTracer({ isEnabled: (_a11 = telemetry == null ? void 0 : telemetry.isEnabled) != null ? _a11 : false });
|
1731
1733
|
return recordSpan({
|
1732
1734
|
name: "ai.generateObject",
|
1733
1735
|
attributes: selectTelemetryAttributes({
|
@@ -1888,7 +1890,7 @@ async function generateObject({
|
|
1888
1890
|
}),
|
1889
1891
|
tracer,
|
1890
1892
|
fn: async (span2) => {
|
1891
|
-
var
|
1893
|
+
var _a12, _b;
|
1892
1894
|
const result2 = await model.doGenerate({
|
1893
1895
|
mode: {
|
1894
1896
|
type: "object-tool",
|
@@ -1905,7 +1907,7 @@ async function generateObject({
|
|
1905
1907
|
abortSignal,
|
1906
1908
|
headers
|
1907
1909
|
});
|
1908
|
-
const objectText = (_b = (
|
1910
|
+
const objectText = (_b = (_a12 = result2.toolCalls) == null ? void 0 : _a12[0]) == null ? void 0 : _b.args;
|
1909
1911
|
if (objectText === void 0) {
|
1910
1912
|
throw new NoObjectGeneratedError();
|
1911
1913
|
}
|
@@ -1993,9 +1995,9 @@ var DefaultGenerateObjectResult = class {
|
|
1993
1995
|
this.experimental_providerMetadata = options.providerMetadata;
|
1994
1996
|
}
|
1995
1997
|
toJsonResponse(init) {
|
1996
|
-
var
|
1998
|
+
var _a11;
|
1997
1999
|
return new Response(JSON.stringify(this.object), {
|
1998
|
-
status: (
|
2000
|
+
status: (_a11 = init == null ? void 0 : init.status) != null ? _a11 : 200,
|
1999
2001
|
headers: prepareResponseHeaders(init, {
|
2000
2002
|
contentType: "application/json; charset=utf-8"
|
2001
2003
|
})
|
@@ -2045,17 +2047,17 @@ var DelayedPromise = class {
|
|
2045
2047
|
return this.promise;
|
2046
2048
|
}
|
2047
2049
|
resolve(value) {
|
2048
|
-
var
|
2050
|
+
var _a11;
|
2049
2051
|
this.status = { type: "resolved", value };
|
2050
2052
|
if (this.promise) {
|
2051
|
-
(
|
2053
|
+
(_a11 = this._resolve) == null ? void 0 : _a11.call(this, value);
|
2052
2054
|
}
|
2053
2055
|
}
|
2054
2056
|
reject(error) {
|
2055
|
-
var
|
2057
|
+
var _a11;
|
2056
2058
|
this.status = { type: "rejected", error };
|
2057
2059
|
if (this.promise) {
|
2058
|
-
(
|
2060
|
+
(_a11 = this._reject) == null ? void 0 : _a11.call(this, error);
|
2059
2061
|
}
|
2060
2062
|
}
|
2061
2063
|
};
|
@@ -2078,7 +2080,7 @@ async function streamObject({
|
|
2078
2080
|
onFinish,
|
2079
2081
|
...settings
|
2080
2082
|
}) {
|
2081
|
-
var
|
2083
|
+
var _a11;
|
2082
2084
|
validateObjectGenerationInput({
|
2083
2085
|
output,
|
2084
2086
|
mode,
|
@@ -2096,7 +2098,7 @@ async function streamObject({
|
|
2096
2098
|
headers,
|
2097
2099
|
settings: { ...settings, maxRetries }
|
2098
2100
|
});
|
2099
|
-
const tracer = getTracer({ isEnabled: (
|
2101
|
+
const tracer = getTracer({ isEnabled: (_a11 = telemetry == null ? void 0 : telemetry.isEnabled) != null ? _a11 : false });
|
2100
2102
|
const retry = retryWithExponentialBackoff({ maxRetries });
|
2101
2103
|
return recordSpan({
|
2102
2104
|
name: "ai.streamObject",
|
@@ -2485,8 +2487,8 @@ var DefaultStreamObjectResult = class {
|
|
2485
2487
|
});
|
2486
2488
|
}
|
2487
2489
|
pipeTextStreamToResponse(response, init) {
|
2488
|
-
var
|
2489
|
-
response.writeHead((
|
2490
|
+
var _a11;
|
2491
|
+
response.writeHead((_a11 = init == null ? void 0 : init.status) != null ? _a11 : 200, {
|
2490
2492
|
"Content-Type": "text/plain; charset=utf-8",
|
2491
2493
|
...init == null ? void 0 : init.headers
|
2492
2494
|
});
|
@@ -2508,9 +2510,9 @@ var DefaultStreamObjectResult = class {
|
|
2508
2510
|
read();
|
2509
2511
|
}
|
2510
2512
|
toTextStreamResponse(init) {
|
2511
|
-
var
|
2513
|
+
var _a11;
|
2512
2514
|
return new Response(this.textStream.pipeThrough(new TextEncoderStream()), {
|
2513
|
-
status: (
|
2515
|
+
status: (_a11 = init == null ? void 0 : init.status) != null ? _a11 : 200,
|
2514
2516
|
headers: prepareResponseHeaders(init, {
|
2515
2517
|
contentType: "text/plain; charset=utf-8"
|
2516
2518
|
})
|
@@ -2539,9 +2541,9 @@ function prepareToolsAndToolChoice({
|
|
2539
2541
|
};
|
2540
2542
|
}
|
2541
2543
|
return {
|
2542
|
-
tools: Object.entries(tools).map(([
|
2544
|
+
tools: Object.entries(tools).map(([name11, tool2]) => ({
|
2543
2545
|
type: "function",
|
2544
|
-
name:
|
2546
|
+
name: name11,
|
2545
2547
|
description: tool2.description,
|
2546
2548
|
parameters: (0, import_ui_utils3.asSchema)(tool2.parameters).jsonSchema
|
2547
2549
|
})),
|
@@ -2690,14 +2692,14 @@ async function generateText({
|
|
2690
2692
|
experimental_telemetry: telemetry,
|
2691
2693
|
...settings
|
2692
2694
|
}) {
|
2693
|
-
var
|
2695
|
+
var _a11;
|
2694
2696
|
const baseTelemetryAttributes = getBaseTelemetryAttributes({
|
2695
2697
|
model,
|
2696
2698
|
telemetry,
|
2697
2699
|
headers,
|
2698
2700
|
settings: { ...settings, maxRetries }
|
2699
2701
|
});
|
2700
|
-
const tracer = getTracer({ isEnabled: (
|
2702
|
+
const tracer = getTracer({ isEnabled: (_a11 = telemetry == null ? void 0 : telemetry.isEnabled) != null ? _a11 : false });
|
2701
2703
|
return recordSpan({
|
2702
2704
|
name: "ai.generateText",
|
2703
2705
|
attributes: selectTelemetryAttributes({
|
@@ -2717,7 +2719,7 @@ async function generateText({
|
|
2717
2719
|
}),
|
2718
2720
|
tracer,
|
2719
2721
|
fn: async (span) => {
|
2720
|
-
var
|
2722
|
+
var _a12, _b, _c, _d;
|
2721
2723
|
const retry = retryWithExponentialBackoff({ maxRetries });
|
2722
2724
|
const validatedPrompt = validatePrompt({
|
2723
2725
|
system,
|
@@ -2803,7 +2805,7 @@ async function generateText({
|
|
2803
2805
|
}
|
2804
2806
|
})
|
2805
2807
|
);
|
2806
|
-
currentToolCalls = ((
|
2808
|
+
currentToolCalls = ((_a12 = currentModelResponse.toolCalls) != null ? _a12 : []).map(
|
2807
2809
|
(modelToolCall) => parseToolCall({ toolCall: modelToolCall, tools })
|
2808
2810
|
);
|
2809
2811
|
currentToolResults = tools == null ? [] : await executeTools({
|
@@ -3271,14 +3273,14 @@ async function streamText({
|
|
3271
3273
|
onFinish,
|
3272
3274
|
...settings
|
3273
3275
|
}) {
|
3274
|
-
var
|
3276
|
+
var _a11;
|
3275
3277
|
const baseTelemetryAttributes = getBaseTelemetryAttributes({
|
3276
3278
|
model,
|
3277
3279
|
telemetry,
|
3278
3280
|
headers,
|
3279
3281
|
settings: { ...settings, maxRetries }
|
3280
3282
|
});
|
3281
|
-
const tracer = getTracer({ isEnabled: (
|
3283
|
+
const tracer = getTracer({ isEnabled: (_a11 = telemetry == null ? void 0 : telemetry.isEnabled) != null ? _a11 : false });
|
3282
3284
|
return recordSpan({
|
3283
3285
|
name: "ai.streamText",
|
3284
3286
|
attributes: selectTelemetryAttributes({
|
@@ -3655,8 +3657,8 @@ var DefaultStreamTextResult = class {
|
|
3655
3657
|
return this.pipeDataStreamToResponse(response, init);
|
3656
3658
|
}
|
3657
3659
|
pipeDataStreamToResponse(response, init) {
|
3658
|
-
var
|
3659
|
-
response.writeHead((
|
3660
|
+
var _a11;
|
3661
|
+
response.writeHead((_a11 = init == null ? void 0 : init.status) != null ? _a11 : 200, {
|
3660
3662
|
"Content-Type": "text/plain; charset=utf-8",
|
3661
3663
|
...init == null ? void 0 : init.headers
|
3662
3664
|
});
|
@@ -3678,8 +3680,8 @@ var DefaultStreamTextResult = class {
|
|
3678
3680
|
read();
|
3679
3681
|
}
|
3680
3682
|
pipeTextStreamToResponse(response, init) {
|
3681
|
-
var
|
3682
|
-
response.writeHead((
|
3683
|
+
var _a11;
|
3684
|
+
response.writeHead((_a11 = init == null ? void 0 : init.status) != null ? _a11 : 200, {
|
3683
3685
|
"Content-Type": "text/plain; charset=utf-8",
|
3684
3686
|
...init == null ? void 0 : init.headers
|
3685
3687
|
});
|
@@ -3704,7 +3706,7 @@ var DefaultStreamTextResult = class {
|
|
3704
3706
|
return this.toDataStreamResponse(options);
|
3705
3707
|
}
|
3706
3708
|
toDataStreamResponse(options) {
|
3707
|
-
var
|
3709
|
+
var _a11;
|
3708
3710
|
const init = options == null ? void 0 : "init" in options ? options.init : {
|
3709
3711
|
headers: "headers" in options ? options.headers : void 0,
|
3710
3712
|
status: "status" in options ? options.status : void 0,
|
@@ -3714,7 +3716,7 @@ var DefaultStreamTextResult = class {
|
|
3714
3716
|
const getErrorMessage4 = options == null ? void 0 : "getErrorMessage" in options ? options.getErrorMessage : void 0;
|
3715
3717
|
const stream = data ? mergeStreams(data.stream, this.toDataStream({ getErrorMessage: getErrorMessage4 })) : this.toDataStream({ getErrorMessage: getErrorMessage4 });
|
3716
3718
|
return new Response(stream, {
|
3717
|
-
status: (
|
3719
|
+
status: (_a11 = init == null ? void 0 : init.status) != null ? _a11 : 200,
|
3718
3720
|
statusText: init == null ? void 0 : init.statusText,
|
3719
3721
|
headers: prepareResponseHeaders(init, {
|
3720
3722
|
contentType: "text/plain; charset=utf-8",
|
@@ -3723,9 +3725,9 @@ var DefaultStreamTextResult = class {
|
|
3723
3725
|
});
|
3724
3726
|
}
|
3725
3727
|
toTextStreamResponse(init) {
|
3726
|
-
var
|
3728
|
+
var _a11;
|
3727
3729
|
return new Response(this.textStream.pipeThrough(new TextEncoderStream()), {
|
3728
|
-
status: (
|
3730
|
+
status: (_a11 = init == null ? void 0 : init.status) != null ? _a11 : 200,
|
3729
3731
|
headers: prepareResponseHeaders(init, {
|
3730
3732
|
contentType: "text/plain; charset=utf-8"
|
3731
3733
|
})
|
@@ -3736,7 +3738,7 @@ var experimental_streamText = streamText;
|
|
3736
3738
|
|
3737
3739
|
// core/prompt/attachments-to-parts.ts
|
3738
3740
|
function attachmentsToParts(attachments) {
|
3739
|
-
var
|
3741
|
+
var _a11, _b, _c;
|
3740
3742
|
const parts = [];
|
3741
3743
|
for (const attachment of attachments) {
|
3742
3744
|
let url;
|
@@ -3748,7 +3750,7 @@ function attachmentsToParts(attachments) {
|
|
3748
3750
|
switch (url.protocol) {
|
3749
3751
|
case "http:":
|
3750
3752
|
case "https:": {
|
3751
|
-
if ((
|
3753
|
+
if ((_a11 = attachment.contentType) == null ? void 0 : _a11.startsWith("image/")) {
|
3752
3754
|
parts.push({ type: "image", image: url });
|
3753
3755
|
}
|
3754
3756
|
break;
|
@@ -3789,15 +3791,32 @@ function attachmentsToParts(attachments) {
|
|
3789
3791
|
return parts;
|
3790
3792
|
}
|
3791
3793
|
|
3794
|
+
// core/prompt/message-conversion-error.ts
|
3795
|
+
var import_provider12 = require("@ai-sdk/provider");
|
3796
|
+
var name9 = "AI_MessageConversionError";
|
3797
|
+
var marker9 = `vercel.ai.error.${name9}`;
|
3798
|
+
var symbol9 = Symbol.for(marker9);
|
3799
|
+
var _a9;
|
3800
|
+
var MessageConversionError = class extends import_provider12.AISDKError {
|
3801
|
+
constructor({
|
3802
|
+
originalMessage,
|
3803
|
+
message
|
3804
|
+
}) {
|
3805
|
+
super({ name: name9, message });
|
3806
|
+
this[_a9] = true;
|
3807
|
+
this.originalMessage = originalMessage;
|
3808
|
+
}
|
3809
|
+
static isInstance(error) {
|
3810
|
+
return import_provider12.AISDKError.hasMarker(error, marker9);
|
3811
|
+
}
|
3812
|
+
};
|
3813
|
+
_a9 = symbol9;
|
3814
|
+
|
3792
3815
|
// core/prompt/convert-to-core-messages.ts
|
3793
3816
|
function convertToCoreMessages(messages) {
|
3794
3817
|
const coreMessages = [];
|
3795
|
-
for (const {
|
3796
|
-
role,
|
3797
|
-
content,
|
3798
|
-
toolInvocations,
|
3799
|
-
experimental_attachments
|
3800
|
-
} of messages) {
|
3818
|
+
for (const message of messages) {
|
3819
|
+
const { role, content, toolInvocations, experimental_attachments } = message;
|
3801
3820
|
switch (role) {
|
3802
3821
|
case "system": {
|
3803
3822
|
coreMessages.push({
|
@@ -3835,131 +3854,98 @@ function convertToCoreMessages(messages) {
|
|
3835
3854
|
});
|
3836
3855
|
coreMessages.push({
|
3837
3856
|
role: "tool",
|
3838
|
-
content: toolInvocations.map(
|
3839
|
-
(
|
3857
|
+
content: toolInvocations.map((ToolInvocation) => {
|
3858
|
+
if (!("result" in ToolInvocation)) {
|
3859
|
+
throw new MessageConversionError({
|
3860
|
+
originalMessage: message,
|
3861
|
+
message: "ToolInvocation must have a result: " + JSON.stringify(ToolInvocation)
|
3862
|
+
});
|
3863
|
+
}
|
3864
|
+
const { toolCallId, toolName, args, result } = ToolInvocation;
|
3865
|
+
return {
|
3840
3866
|
type: "tool-result",
|
3841
3867
|
toolCallId,
|
3842
3868
|
toolName,
|
3843
3869
|
args,
|
3844
3870
|
result
|
3845
|
-
}
|
3846
|
-
)
|
3871
|
+
};
|
3872
|
+
})
|
3847
3873
|
});
|
3848
3874
|
break;
|
3849
3875
|
}
|
3876
|
+
case "function":
|
3877
|
+
case "data":
|
3878
|
+
case "tool": {
|
3879
|
+
break;
|
3880
|
+
}
|
3850
3881
|
default: {
|
3851
3882
|
const _exhaustiveCheck = role;
|
3852
|
-
throw new
|
3883
|
+
throw new MessageConversionError({
|
3884
|
+
originalMessage: message,
|
3885
|
+
message: `Unsupported role: ${_exhaustiveCheck}`
|
3886
|
+
});
|
3853
3887
|
}
|
3854
3888
|
}
|
3855
3889
|
}
|
3856
3890
|
return coreMessages;
|
3857
3891
|
}
|
3858
3892
|
|
3859
|
-
// core/registry/
|
3860
|
-
var import_provider12 = require("@ai-sdk/provider");
|
3861
|
-
var name9 = "AI_InvalidModelIdError";
|
3862
|
-
var marker9 = `vercel.ai.error.${name9}`;
|
3863
|
-
var symbol9 = Symbol.for(marker9);
|
3864
|
-
var _a9;
|
3865
|
-
var InvalidModelIdError = class extends import_provider12.AISDKError {
|
3866
|
-
constructor({
|
3867
|
-
id,
|
3868
|
-
message = `Invalid model id: ${id}`
|
3869
|
-
}) {
|
3870
|
-
super({ name: name9, message });
|
3871
|
-
this[_a9] = true;
|
3872
|
-
this.id = id;
|
3873
|
-
}
|
3874
|
-
static isInstance(error) {
|
3875
|
-
return import_provider12.AISDKError.hasMarker(error, marker9);
|
3876
|
-
}
|
3877
|
-
/**
|
3878
|
-
* @deprecated use `isInstance` instead
|
3879
|
-
*/
|
3880
|
-
static isInvalidModelIdError(error) {
|
3881
|
-
return error instanceof Error && error.name === name9 && typeof error.id === "string";
|
3882
|
-
}
|
3883
|
-
/**
|
3884
|
-
* @deprecated Do not use this method. It will be removed in the next major version.
|
3885
|
-
*/
|
3886
|
-
toJSON() {
|
3887
|
-
return {
|
3888
|
-
name: this.name,
|
3889
|
-
message: this.message,
|
3890
|
-
stack: this.stack,
|
3891
|
-
id: this.id
|
3892
|
-
};
|
3893
|
-
}
|
3894
|
-
};
|
3895
|
-
_a9 = symbol9;
|
3896
|
-
|
3897
|
-
// core/registry/no-such-model-error.ts
|
3893
|
+
// core/registry/custom-provider.ts
|
3898
3894
|
var import_provider13 = require("@ai-sdk/provider");
|
3899
|
-
|
3895
|
+
function experimental_customProvider({
|
3896
|
+
languageModels,
|
3897
|
+
textEmbeddingModels,
|
3898
|
+
fallbackProvider
|
3899
|
+
}) {
|
3900
|
+
return {
|
3901
|
+
languageModel(modelId) {
|
3902
|
+
if (languageModels != null && modelId in languageModels) {
|
3903
|
+
return languageModels[modelId];
|
3904
|
+
}
|
3905
|
+
if (fallbackProvider) {
|
3906
|
+
return fallbackProvider.languageModel(modelId);
|
3907
|
+
}
|
3908
|
+
throw new import_provider13.NoSuchModelError({ modelId, modelType: "languageModel" });
|
3909
|
+
},
|
3910
|
+
textEmbeddingModel(modelId) {
|
3911
|
+
if (textEmbeddingModels != null && modelId in textEmbeddingModels) {
|
3912
|
+
return textEmbeddingModels[modelId];
|
3913
|
+
}
|
3914
|
+
if (fallbackProvider) {
|
3915
|
+
return fallbackProvider.textEmbeddingModel(modelId);
|
3916
|
+
}
|
3917
|
+
throw new import_provider13.NoSuchModelError({ modelId, modelType: "textEmbeddingModel" });
|
3918
|
+
}
|
3919
|
+
};
|
3920
|
+
}
|
3921
|
+
|
3922
|
+
// core/registry/no-such-provider-error.ts
|
3923
|
+
var import_provider14 = require("@ai-sdk/provider");
|
3924
|
+
var name10 = "AI_NoSuchProviderError";
|
3900
3925
|
var marker10 = `vercel.ai.error.${name10}`;
|
3901
3926
|
var symbol10 = Symbol.for(marker10);
|
3902
3927
|
var _a10;
|
3903
|
-
var
|
3928
|
+
var NoSuchProviderError = class extends import_provider14.NoSuchModelError {
|
3904
3929
|
constructor({
|
3905
3930
|
modelId,
|
3906
3931
|
modelType,
|
3907
|
-
message = `No such ${modelType}: ${modelId}`
|
3908
|
-
}) {
|
3909
|
-
super({ name: name10, message });
|
3910
|
-
this[_a10] = true;
|
3911
|
-
this.modelId = modelId;
|
3912
|
-
this.modelType = modelType;
|
3913
|
-
}
|
3914
|
-
static isInstance(error) {
|
3915
|
-
return import_provider13.AISDKError.hasMarker(error, marker10);
|
3916
|
-
}
|
3917
|
-
/**
|
3918
|
-
* @deprecated use `isInstance` instead
|
3919
|
-
*/
|
3920
|
-
static isNoSuchModelError(error) {
|
3921
|
-
return error instanceof Error && error.name === name10 && typeof error.modelId === "string" && typeof error.modelType === "string";
|
3922
|
-
}
|
3923
|
-
/**
|
3924
|
-
* @deprecated Do not use this method. It will be removed in the next major version.
|
3925
|
-
*/
|
3926
|
-
toJSON() {
|
3927
|
-
return {
|
3928
|
-
name: this.name,
|
3929
|
-
message: this.message,
|
3930
|
-
stack: this.stack,
|
3931
|
-
modelId: this.modelId,
|
3932
|
-
modelType: this.modelType
|
3933
|
-
};
|
3934
|
-
}
|
3935
|
-
};
|
3936
|
-
_a10 = symbol10;
|
3937
|
-
|
3938
|
-
// core/registry/no-such-provider-error.ts
|
3939
|
-
var import_provider14 = require("@ai-sdk/provider");
|
3940
|
-
var name11 = "AI_NoSuchProviderError";
|
3941
|
-
var marker11 = `vercel.ai.error.${name11}`;
|
3942
|
-
var symbol11 = Symbol.for(marker11);
|
3943
|
-
var _a11;
|
3944
|
-
var NoSuchProviderError = class extends import_provider14.AISDKError {
|
3945
|
-
constructor({
|
3946
3932
|
providerId,
|
3947
3933
|
availableProviders,
|
3948
3934
|
message = `No such provider: ${providerId} (available providers: ${availableProviders.join()})`
|
3949
3935
|
}) {
|
3950
|
-
super({
|
3951
|
-
this[
|
3936
|
+
super({ errorName: name10, modelId, modelType, message });
|
3937
|
+
this[_a10] = true;
|
3952
3938
|
this.providerId = providerId;
|
3953
3939
|
this.availableProviders = availableProviders;
|
3954
3940
|
}
|
3955
3941
|
static isInstance(error) {
|
3956
|
-
return import_provider14.AISDKError.hasMarker(error,
|
3942
|
+
return import_provider14.AISDKError.hasMarker(error, marker10);
|
3957
3943
|
}
|
3958
3944
|
/**
|
3959
3945
|
* @deprecated use `isInstance` instead
|
3960
3946
|
*/
|
3961
3947
|
static isNoSuchProviderError(error) {
|
3962
|
-
return error instanceof Error && error.name ===
|
3948
|
+
return error instanceof Error && error.name === name10 && typeof error.providerId === "string" && Array.isArray(error.availableProviders);
|
3963
3949
|
}
|
3964
3950
|
/**
|
3965
3951
|
* @deprecated Do not use this method. It will be removed in the next major version.
|
@@ -3969,14 +3955,17 @@ var NoSuchProviderError = class extends import_provider14.AISDKError {
|
|
3969
3955
|
name: this.name,
|
3970
3956
|
message: this.message,
|
3971
3957
|
stack: this.stack,
|
3958
|
+
modelId: this.modelId,
|
3959
|
+
modelType: this.modelType,
|
3972
3960
|
providerId: this.providerId,
|
3973
3961
|
availableProviders: this.availableProviders
|
3974
3962
|
};
|
3975
3963
|
}
|
3976
3964
|
};
|
3977
|
-
|
3965
|
+
_a10 = symbol10;
|
3978
3966
|
|
3979
3967
|
// core/registry/provider-registry.ts
|
3968
|
+
var import_provider15 = require("@ai-sdk/provider");
|
3980
3969
|
function experimental_createProviderRegistry(providers) {
|
3981
3970
|
const registry = new DefaultProviderRegistry();
|
3982
3971
|
for (const [id, provider] of Object.entries(providers)) {
|
@@ -3999,35 +3988,41 @@ var DefaultProviderRegistry = class {
|
|
3999
3988
|
const provider = this.providers[id];
|
4000
3989
|
if (provider == null) {
|
4001
3990
|
throw new NoSuchProviderError({
|
3991
|
+
modelId: id,
|
3992
|
+
modelType: "languageModel",
|
4002
3993
|
providerId: id,
|
4003
3994
|
availableProviders: Object.keys(this.providers)
|
4004
3995
|
});
|
4005
3996
|
}
|
4006
3997
|
return provider;
|
4007
3998
|
}
|
4008
|
-
splitId(id) {
|
3999
|
+
splitId(id, modelType) {
|
4009
4000
|
const index = id.indexOf(":");
|
4010
4001
|
if (index === -1) {
|
4011
|
-
throw new
|
4002
|
+
throw new import_provider15.NoSuchModelError({
|
4003
|
+
modelId: id,
|
4004
|
+
modelType,
|
4005
|
+
message: `Invalid ${modelType} id for registry: ${id} (must be in the format "providerId:modelId")`
|
4006
|
+
});
|
4012
4007
|
}
|
4013
4008
|
return [id.slice(0, index), id.slice(index + 1)];
|
4014
4009
|
}
|
4015
4010
|
languageModel(id) {
|
4016
|
-
var
|
4017
|
-
const [providerId, modelId] = this.splitId(id);
|
4018
|
-
const model = (_b = (
|
4011
|
+
var _a11, _b;
|
4012
|
+
const [providerId, modelId] = this.splitId(id, "languageModel");
|
4013
|
+
const model = (_b = (_a11 = this.getProvider(providerId)).languageModel) == null ? void 0 : _b.call(_a11, modelId);
|
4019
4014
|
if (model == null) {
|
4020
|
-
throw new NoSuchModelError({ modelId: id, modelType: "languageModel" });
|
4015
|
+
throw new import_provider15.NoSuchModelError({ modelId: id, modelType: "languageModel" });
|
4021
4016
|
}
|
4022
4017
|
return model;
|
4023
4018
|
}
|
4024
4019
|
textEmbeddingModel(id) {
|
4025
|
-
var
|
4026
|
-
const [providerId, modelId] = this.splitId(id);
|
4020
|
+
var _a11, _b, _c;
|
4021
|
+
const [providerId, modelId] = this.splitId(id, "textEmbeddingModel");
|
4027
4022
|
const provider = this.getProvider(providerId);
|
4028
|
-
const model = (_c = (
|
4023
|
+
const model = (_c = (_a11 = provider.textEmbeddingModel) == null ? void 0 : _a11.call(provider, modelId)) != null ? _c : "textEmbedding" in provider ? (_b = provider.textEmbedding) == null ? void 0 : _b.call(provider, modelId) : void 0;
|
4029
4024
|
if (model == null) {
|
4030
|
-
throw new NoSuchModelError({
|
4025
|
+
throw new import_provider15.NoSuchModelError({
|
4031
4026
|
modelId: id,
|
4032
4027
|
modelType: "textEmbeddingModel"
|
4033
4028
|
});
|
@@ -4067,7 +4062,7 @@ function magnitude(vector) {
|
|
4067
4062
|
}
|
4068
4063
|
|
4069
4064
|
// errors/index.ts
|
4070
|
-
var
|
4065
|
+
var import_provider16 = require("@ai-sdk/provider");
|
4071
4066
|
|
4072
4067
|
// streams/ai-stream.ts
|
4073
4068
|
var import_eventsource_parser = require("eventsource-parser");
|
@@ -4185,8 +4180,8 @@ function readableFromAsyncIterable(iterable) {
|
|
4185
4180
|
controller.enqueue(value);
|
4186
4181
|
},
|
4187
4182
|
async cancel(reason) {
|
4188
|
-
var
|
4189
|
-
await ((
|
4183
|
+
var _a11;
|
4184
|
+
await ((_a11 = it.return) == null ? void 0 : _a11.call(it, reason));
|
4190
4185
|
}
|
4191
4186
|
});
|
4192
4187
|
}
|
@@ -4323,7 +4318,7 @@ var import_ui_utils8 = require("@ai-sdk/ui-utils");
|
|
4323
4318
|
function AssistantResponse({ threadId, messageId }, process2) {
|
4324
4319
|
const stream = new ReadableStream({
|
4325
4320
|
async start(controller) {
|
4326
|
-
var
|
4321
|
+
var _a11;
|
4327
4322
|
const textEncoder = new TextEncoder();
|
4328
4323
|
const sendMessage = (message) => {
|
4329
4324
|
controller.enqueue(
|
@@ -4341,7 +4336,7 @@ function AssistantResponse({ threadId, messageId }, process2) {
|
|
4341
4336
|
);
|
4342
4337
|
};
|
4343
4338
|
const forwardStream = async (stream2) => {
|
4344
|
-
var
|
4339
|
+
var _a12, _b;
|
4345
4340
|
let result = void 0;
|
4346
4341
|
for await (const value of stream2) {
|
4347
4342
|
switch (value.event) {
|
@@ -4358,7 +4353,7 @@ function AssistantResponse({ threadId, messageId }, process2) {
|
|
4358
4353
|
break;
|
4359
4354
|
}
|
4360
4355
|
case "thread.message.delta": {
|
4361
|
-
const content = (
|
4356
|
+
const content = (_a12 = value.data.delta.content) == null ? void 0 : _a12[0];
|
4362
4357
|
if ((content == null ? void 0 : content.type) === "text" && ((_b = content.text) == null ? void 0 : _b.value) != null) {
|
4363
4358
|
controller.enqueue(
|
4364
4359
|
textEncoder.encode(
|
@@ -4394,7 +4389,7 @@ function AssistantResponse({ threadId, messageId }, process2) {
|
|
4394
4389
|
forwardStream
|
4395
4390
|
});
|
4396
4391
|
} catch (error) {
|
4397
|
-
sendError((
|
4392
|
+
sendError((_a11 = error.message) != null ? _a11 : `${error}`);
|
4398
4393
|
} finally {
|
4399
4394
|
controller.close();
|
4400
4395
|
}
|
@@ -4415,9 +4410,9 @@ var experimental_AssistantResponse = AssistantResponse;
|
|
4415
4410
|
|
4416
4411
|
// streams/aws-bedrock-stream.ts
|
4417
4412
|
async function* asDeltaIterable(response, extractTextDeltaFromChunk) {
|
4418
|
-
var
|
4413
|
+
var _a11, _b;
|
4419
4414
|
const decoder = new TextDecoder();
|
4420
|
-
for await (const chunk of (
|
4415
|
+
for await (const chunk of (_a11 = response.body) != null ? _a11 : []) {
|
4421
4416
|
const bytes = (_b = chunk.chunk) == null ? void 0 : _b.bytes;
|
4422
4417
|
if (bytes != null) {
|
4423
4418
|
const chunkText = decoder.decode(bytes);
|
@@ -4431,8 +4426,8 @@ async function* asDeltaIterable(response, extractTextDeltaFromChunk) {
|
|
4431
4426
|
}
|
4432
4427
|
function AWSBedrockAnthropicMessagesStream(response, callbacks) {
|
4433
4428
|
return AWSBedrockStream(response, callbacks, (chunk) => {
|
4434
|
-
var
|
4435
|
-
return (
|
4429
|
+
var _a11;
|
4430
|
+
return (_a11 = chunk.delta) == null ? void 0 : _a11.text;
|
4436
4431
|
});
|
4437
4432
|
}
|
4438
4433
|
function AWSBedrockAnthropicStream(response, callbacks) {
|
@@ -4479,8 +4474,8 @@ async function readAndProcessLines(reader, controller) {
|
|
4479
4474
|
controller.close();
|
4480
4475
|
}
|
4481
4476
|
function createParser2(res) {
|
4482
|
-
var
|
4483
|
-
const reader = (
|
4477
|
+
var _a11;
|
4478
|
+
const reader = (_a11 = res.body) == null ? void 0 : _a11.getReader();
|
4484
4479
|
return new ReadableStream({
|
4485
4480
|
async start(controller) {
|
4486
4481
|
if (!reader) {
|
@@ -4510,9 +4505,9 @@ function CohereStream(reader, callbacks) {
|
|
4510
4505
|
|
4511
4506
|
// streams/google-generative-ai-stream.ts
|
4512
4507
|
async function* streamable3(response) {
|
4513
|
-
var
|
4508
|
+
var _a11, _b, _c;
|
4514
4509
|
for await (const chunk of response.stream) {
|
4515
|
-
const parts = (_c = (_b = (
|
4510
|
+
const parts = (_c = (_b = (_a11 = chunk.candidates) == null ? void 0 : _a11[0]) == null ? void 0 : _b.content) == null ? void 0 : _c.parts;
|
4516
4511
|
if (parts === void 0) {
|
4517
4512
|
continue;
|
4518
4513
|
}
|
@@ -4531,13 +4526,13 @@ function createParser3(res) {
|
|
4531
4526
|
const trimStartOfStream = trimStartOfStreamHelper();
|
4532
4527
|
return new ReadableStream({
|
4533
4528
|
async pull(controller) {
|
4534
|
-
var
|
4529
|
+
var _a11, _b;
|
4535
4530
|
const { value, done } = await res.next();
|
4536
4531
|
if (done) {
|
4537
4532
|
controller.close();
|
4538
4533
|
return;
|
4539
4534
|
}
|
4540
|
-
const text = trimStartOfStream((_b = (
|
4535
|
+
const text = trimStartOfStream((_b = (_a11 = value.token) == null ? void 0 : _a11.text) != null ? _b : "");
|
4541
4536
|
if (!text)
|
4542
4537
|
return;
|
4543
4538
|
if (value.generated_text != null && value.generated_text.length > 0) {
|
@@ -4562,11 +4557,11 @@ function InkeepStream(res, callbacks) {
|
|
4562
4557
|
let chat_session_id = "";
|
4563
4558
|
let records_cited;
|
4564
4559
|
const inkeepEventParser = (data, options) => {
|
4565
|
-
var
|
4560
|
+
var _a11, _b;
|
4566
4561
|
const { event } = options;
|
4567
4562
|
if (event === "records_cited") {
|
4568
4563
|
records_cited = JSON.parse(data);
|
4569
|
-
(
|
4564
|
+
(_a11 = callbacks == null ? void 0 : callbacks.onRecordsCited) == null ? void 0 : _a11.call(callbacks, records_cited);
|
4570
4565
|
}
|
4571
4566
|
if (event === "message_chunk") {
|
4572
4567
|
const inkeepMessageChunk = JSON.parse(data);
|
@@ -4579,12 +4574,12 @@ function InkeepStream(res, callbacks) {
|
|
4579
4574
|
passThroughCallbacks = {
|
4580
4575
|
...passThroughCallbacks,
|
4581
4576
|
onFinal: (completion) => {
|
4582
|
-
var
|
4577
|
+
var _a11;
|
4583
4578
|
const inkeepOnFinalMetadata = {
|
4584
4579
|
chat_session_id,
|
4585
4580
|
records_cited
|
4586
4581
|
};
|
4587
|
-
(
|
4582
|
+
(_a11 = callbacks == null ? void 0 : callbacks.onFinal) == null ? void 0 : _a11.call(callbacks, completion, inkeepOnFinalMetadata);
|
4588
4583
|
}
|
4589
4584
|
};
|
4590
4585
|
return AIStream(res, inkeepEventParser, passThroughCallbacks).pipeThrough(
|
@@ -4606,7 +4601,7 @@ function toDataStream(stream, callbacks) {
|
|
4606
4601
|
return stream.pipeThrough(
|
4607
4602
|
new TransformStream({
|
4608
4603
|
transform: async (value, controller) => {
|
4609
|
-
var
|
4604
|
+
var _a11;
|
4610
4605
|
if (typeof value === "string") {
|
4611
4606
|
controller.enqueue(value);
|
4612
4607
|
return;
|
@@ -4614,7 +4609,7 @@ function toDataStream(stream, callbacks) {
|
|
4614
4609
|
if ("event" in value) {
|
4615
4610
|
if (value.event === "on_chat_model_stream") {
|
4616
4611
|
forwardAIMessageChunk(
|
4617
|
-
(
|
4612
|
+
(_a11 = value.data) == null ? void 0 : _a11.chunk,
|
4618
4613
|
controller
|
4619
4614
|
);
|
4620
4615
|
}
|
@@ -4626,13 +4621,13 @@ function toDataStream(stream, callbacks) {
|
|
4626
4621
|
).pipeThrough(createCallbacksTransformer(callbacks)).pipeThrough(createStreamDataTransformer());
|
4627
4622
|
}
|
4628
4623
|
function toDataStreamResponse(stream, options) {
|
4629
|
-
var
|
4624
|
+
var _a11;
|
4630
4625
|
const dataStream = toDataStream(stream, options == null ? void 0 : options.callbacks);
|
4631
4626
|
const data = options == null ? void 0 : options.data;
|
4632
4627
|
const init = options == null ? void 0 : options.init;
|
4633
4628
|
const responseStream = data ? mergeStreams(data.stream, dataStream) : dataStream;
|
4634
4629
|
return new Response(responseStream, {
|
4635
|
-
status: (
|
4630
|
+
status: (_a11 = init == null ? void 0 : init.status) != null ? _a11 : 200,
|
4636
4631
|
statusText: init == null ? void 0 : init.statusText,
|
4637
4632
|
headers: prepareResponseHeaders(init, {
|
4638
4633
|
contentType: "text/plain; charset=utf-8",
|
@@ -4714,9 +4709,9 @@ function LangChainStream(callbacks) {
|
|
4714
4709
|
|
4715
4710
|
// streams/mistral-stream.ts
|
4716
4711
|
async function* streamable4(stream) {
|
4717
|
-
var
|
4712
|
+
var _a11, _b;
|
4718
4713
|
for await (const chunk of stream) {
|
4719
|
-
const content = (_b = (
|
4714
|
+
const content = (_b = (_a11 = chunk.choices[0]) == null ? void 0 : _a11.delta) == null ? void 0 : _b.content;
|
4720
4715
|
if (content === void 0 || content === "") {
|
4721
4716
|
continue;
|
4722
4717
|
}
|
@@ -4746,10 +4741,10 @@ async function* streamable5(stream) {
|
|
4746
4741
|
model: chunk.model,
|
4747
4742
|
// not exposed by Azure API
|
4748
4743
|
choices: chunk.choices.map((choice) => {
|
4749
|
-
var
|
4744
|
+
var _a11, _b, _c, _d, _e, _f, _g;
|
4750
4745
|
return {
|
4751
4746
|
delta: {
|
4752
|
-
content: (
|
4747
|
+
content: (_a11 = choice.delta) == null ? void 0 : _a11.content,
|
4753
4748
|
function_call: (_b = choice.delta) == null ? void 0 : _b.functionCall,
|
4754
4749
|
role: (_c = choice.delta) == null ? void 0 : _c.role,
|
4755
4750
|
tool_calls: ((_e = (_d = choice.delta) == null ? void 0 : _d.toolCalls) == null ? void 0 : _e.length) ? (_g = (_f = choice.delta) == null ? void 0 : _f.toolCalls) == null ? void 0 : _g.map((toolCall, index) => ({
|
@@ -4774,9 +4769,9 @@ function chunkToText() {
|
|
4774
4769
|
const trimStartOfStream = trimStartOfStreamHelper();
|
4775
4770
|
let isFunctionStreamingIn;
|
4776
4771
|
return (json) => {
|
4777
|
-
var
|
4772
|
+
var _a11, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q, _r;
|
4778
4773
|
if (isChatCompletionChunk(json)) {
|
4779
|
-
const delta = (
|
4774
|
+
const delta = (_a11 = json.choices[0]) == null ? void 0 : _a11.delta;
|
4780
4775
|
if ((_b = delta.function_call) == null ? void 0 : _b.name) {
|
4781
4776
|
isFunctionStreamingIn = true;
|
4782
4777
|
return {
|
@@ -5049,8 +5044,8 @@ function createFunctionCallTransformer(callbacks) {
|
|
5049
5044
|
|
5050
5045
|
// streams/replicate-stream.ts
|
5051
5046
|
async function ReplicateStream(res, cb, options) {
|
5052
|
-
var
|
5053
|
-
const url = (
|
5047
|
+
var _a11;
|
5048
|
+
const url = (_a11 = res.urls) == null ? void 0 : _a11.stream;
|
5054
5049
|
if (!url) {
|
5055
5050
|
if (res.error)
|
5056
5051
|
throw new Error(res.error);
|
@@ -5071,8 +5066,8 @@ async function ReplicateStream(res, cb, options) {
|
|
5071
5066
|
|
5072
5067
|
// streams/stream-to-response.ts
|
5073
5068
|
function streamToResponse(res, response, init, data) {
|
5074
|
-
var
|
5075
|
-
response.writeHead((
|
5069
|
+
var _a11;
|
5070
|
+
response.writeHead((_a11 = init == null ? void 0 : init.status) != null ? _a11 : 200, {
|
5076
5071
|
"Content-Type": "text/plain; charset=utf-8",
|
5077
5072
|
...init == null ? void 0 : init.headers
|
5078
5073
|
});
|
@@ -5135,7 +5130,6 @@ var nanoid = import_provider_utils8.generateId;
|
|
5135
5130
|
InvalidArgumentError,
|
5136
5131
|
InvalidDataContentError,
|
5137
5132
|
InvalidMessageRoleError,
|
5138
|
-
InvalidModelIdError,
|
5139
5133
|
InvalidPromptError,
|
5140
5134
|
InvalidResponseDataError,
|
5141
5135
|
InvalidToolArgumentsError,
|
@@ -5143,7 +5137,9 @@ var nanoid = import_provider_utils8.generateId;
|
|
5143
5137
|
LangChainAdapter,
|
5144
5138
|
LangChainStream,
|
5145
5139
|
LoadAPIKeyError,
|
5140
|
+
MessageConversionError,
|
5146
5141
|
MistralStream,
|
5142
|
+
NoContentGeneratedError,
|
5147
5143
|
NoObjectGeneratedError,
|
5148
5144
|
NoSuchModelError,
|
5149
5145
|
NoSuchProviderError,
|
@@ -5166,6 +5162,7 @@ var nanoid = import_provider_utils8.generateId;
|
|
5166
5162
|
experimental_StreamData,
|
5167
5163
|
experimental_createModelRegistry,
|
5168
5164
|
experimental_createProviderRegistry,
|
5165
|
+
experimental_customProvider,
|
5169
5166
|
experimental_generateObject,
|
5170
5167
|
experimental_generateText,
|
5171
5168
|
experimental_streamObject,
|