ai 3.1.13 → 3.1.15
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.d.mts +60 -4
- package/dist/index.d.ts +60 -4
- package/dist/index.js +65 -4
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +65 -4
- package/dist/index.mjs.map +1 -1
- package/package.json +4 -4
- package/react/dist/index.d.mts +14 -1
- package/react/dist/index.d.ts +14 -1
- package/react/dist/index.js +28 -3
- package/react/dist/index.js.map +1 -1
- package/react/dist/index.mjs +28 -3
- package/react/dist/index.mjs.map +1 -1
package/dist/index.d.mts
CHANGED
@@ -922,10 +922,13 @@ If set and supported by the model, calls will generate deterministic results.
|
|
922
922
|
@param maxRetries - Maximum number of retries. Set to 0 to disable retries. Default: 2.
|
923
923
|
@param abortSignal - An optional abort signal that can be used to cancel the call.
|
924
924
|
|
925
|
+
@param onFinish - Callback that is called when the LLM response and all request tool executions
|
926
|
+
(for tools that have an `execute` function) are finished.
|
927
|
+
|
925
928
|
@return
|
926
929
|
A result object for accessing different stream types and additional information.
|
927
930
|
*/
|
928
|
-
declare function streamText<TOOLS extends Record<string, CoreTool>>({ model, tools, system, prompt, messages, maxRetries, abortSignal, ...settings }: CallSettings & Prompt & {
|
931
|
+
declare function streamText<TOOLS extends Record<string, CoreTool>>({ model, tools, system, prompt, messages, maxRetries, abortSignal, onFinish, ...settings }: CallSettings & Prompt & {
|
929
932
|
/**
|
930
933
|
The language model to use.
|
931
934
|
*/
|
@@ -934,6 +937,45 @@ The language model to use.
|
|
934
937
|
The tools that the model can call. The model needs to support calling tools.
|
935
938
|
*/
|
936
939
|
tools?: TOOLS;
|
940
|
+
/**
|
941
|
+
Callback that is called when the LLM response and all request tool executions
|
942
|
+
(for tools that have an `execute` function) are finished.
|
943
|
+
*/
|
944
|
+
onFinish?: (event: {
|
945
|
+
/**
|
946
|
+
The reason why the generation finished.
|
947
|
+
*/
|
948
|
+
finishReason: FinishReason;
|
949
|
+
/**
|
950
|
+
The token usage of the generated response.
|
951
|
+
*/
|
952
|
+
usage: TokenUsage;
|
953
|
+
/**
|
954
|
+
The full text that has been generated.
|
955
|
+
*/
|
956
|
+
text: string;
|
957
|
+
/**
|
958
|
+
The tool calls that have been executed.
|
959
|
+
*/
|
960
|
+
toolCalls?: ToToolCall<TOOLS>[];
|
961
|
+
/**
|
962
|
+
The tool results that have been generated.
|
963
|
+
*/
|
964
|
+
toolResults?: ToToolResult<TOOLS>[];
|
965
|
+
/**
|
966
|
+
Optional raw response data.
|
967
|
+
*/
|
968
|
+
rawResponse?: {
|
969
|
+
/**
|
970
|
+
Response headers.
|
971
|
+
*/
|
972
|
+
headers?: Record<string, string>;
|
973
|
+
};
|
974
|
+
/**
|
975
|
+
Warnings from the model provider (e.g. unsupported settings).
|
976
|
+
*/
|
977
|
+
warnings?: CallWarning[];
|
978
|
+
}) => Promise<void> | void;
|
937
979
|
}): Promise<StreamTextResult<TOOLS>>;
|
938
980
|
type TextStreamPart<TOOLS extends Record<string, CoreTool>> = {
|
939
981
|
type: 'text-delta';
|
@@ -960,12 +1002,13 @@ A result object for accessing different stream types and additional information.
|
|
960
1002
|
*/
|
961
1003
|
declare class StreamTextResult<TOOLS extends Record<string, CoreTool>> {
|
962
1004
|
private originalStream;
|
1005
|
+
private onFinish?;
|
963
1006
|
/**
|
964
1007
|
Warnings from the model provider (e.g. unsupported settings).
|
965
1008
|
*/
|
966
1009
|
readonly warnings: CallWarning[] | undefined;
|
967
1010
|
/**
|
968
|
-
The token usage of the generated
|
1011
|
+
The token usage of the generated response. Resolved when the response is finished.
|
969
1012
|
*/
|
970
1013
|
readonly usage: Promise<TokenUsage>;
|
971
1014
|
/**
|
@@ -973,20 +1016,33 @@ declare class StreamTextResult<TOOLS extends Record<string, CoreTool>> {
|
|
973
1016
|
*/
|
974
1017
|
readonly finishReason: Promise<FinishReason>;
|
975
1018
|
/**
|
1019
|
+
The full text that has been generated. Resolved when the response is finished.
|
1020
|
+
*/
|
1021
|
+
readonly text: Promise<string>;
|
1022
|
+
/**
|
1023
|
+
The tool calls that have been executed. Resolved when the response is finished.
|
1024
|
+
*/
|
1025
|
+
readonly toolCalls: Promise<ToToolCall<TOOLS>[]>;
|
1026
|
+
/**
|
1027
|
+
The tool results that have been generated. Resolved when the all tool executions are finished.
|
1028
|
+
*/
|
1029
|
+
readonly toolResults: Promise<ToToolResult<TOOLS>[]>;
|
1030
|
+
/**
|
976
1031
|
Optional raw response data.
|
977
1032
|
*/
|
978
|
-
rawResponse?: {
|
1033
|
+
readonly rawResponse?: {
|
979
1034
|
/**
|
980
1035
|
Response headers.
|
981
1036
|
*/
|
982
1037
|
headers?: Record<string, string>;
|
983
1038
|
};
|
984
|
-
constructor({ stream, warnings, rawResponse, }: {
|
1039
|
+
constructor({ stream, warnings, rawResponse, onFinish, }: {
|
985
1040
|
stream: ReadableStream<TextStreamPart<TOOLS>>;
|
986
1041
|
warnings: CallWarning[] | undefined;
|
987
1042
|
rawResponse?: {
|
988
1043
|
headers?: Record<string, string>;
|
989
1044
|
};
|
1045
|
+
onFinish?: Parameters<typeof streamText>[0]['onFinish'];
|
990
1046
|
});
|
991
1047
|
/**
|
992
1048
|
Split out a new stream from the original stream.
|
package/dist/index.d.ts
CHANGED
@@ -922,10 +922,13 @@ If set and supported by the model, calls will generate deterministic results.
|
|
922
922
|
@param maxRetries - Maximum number of retries. Set to 0 to disable retries. Default: 2.
|
923
923
|
@param abortSignal - An optional abort signal that can be used to cancel the call.
|
924
924
|
|
925
|
+
@param onFinish - Callback that is called when the LLM response and all request tool executions
|
926
|
+
(for tools that have an `execute` function) are finished.
|
927
|
+
|
925
928
|
@return
|
926
929
|
A result object for accessing different stream types and additional information.
|
927
930
|
*/
|
928
|
-
declare function streamText<TOOLS extends Record<string, CoreTool>>({ model, tools, system, prompt, messages, maxRetries, abortSignal, ...settings }: CallSettings & Prompt & {
|
931
|
+
declare function streamText<TOOLS extends Record<string, CoreTool>>({ model, tools, system, prompt, messages, maxRetries, abortSignal, onFinish, ...settings }: CallSettings & Prompt & {
|
929
932
|
/**
|
930
933
|
The language model to use.
|
931
934
|
*/
|
@@ -934,6 +937,45 @@ The language model to use.
|
|
934
937
|
The tools that the model can call. The model needs to support calling tools.
|
935
938
|
*/
|
936
939
|
tools?: TOOLS;
|
940
|
+
/**
|
941
|
+
Callback that is called when the LLM response and all request tool executions
|
942
|
+
(for tools that have an `execute` function) are finished.
|
943
|
+
*/
|
944
|
+
onFinish?: (event: {
|
945
|
+
/**
|
946
|
+
The reason why the generation finished.
|
947
|
+
*/
|
948
|
+
finishReason: FinishReason;
|
949
|
+
/**
|
950
|
+
The token usage of the generated response.
|
951
|
+
*/
|
952
|
+
usage: TokenUsage;
|
953
|
+
/**
|
954
|
+
The full text that has been generated.
|
955
|
+
*/
|
956
|
+
text: string;
|
957
|
+
/**
|
958
|
+
The tool calls that have been executed.
|
959
|
+
*/
|
960
|
+
toolCalls?: ToToolCall<TOOLS>[];
|
961
|
+
/**
|
962
|
+
The tool results that have been generated.
|
963
|
+
*/
|
964
|
+
toolResults?: ToToolResult<TOOLS>[];
|
965
|
+
/**
|
966
|
+
Optional raw response data.
|
967
|
+
*/
|
968
|
+
rawResponse?: {
|
969
|
+
/**
|
970
|
+
Response headers.
|
971
|
+
*/
|
972
|
+
headers?: Record<string, string>;
|
973
|
+
};
|
974
|
+
/**
|
975
|
+
Warnings from the model provider (e.g. unsupported settings).
|
976
|
+
*/
|
977
|
+
warnings?: CallWarning[];
|
978
|
+
}) => Promise<void> | void;
|
937
979
|
}): Promise<StreamTextResult<TOOLS>>;
|
938
980
|
type TextStreamPart<TOOLS extends Record<string, CoreTool>> = {
|
939
981
|
type: 'text-delta';
|
@@ -960,12 +1002,13 @@ A result object for accessing different stream types and additional information.
|
|
960
1002
|
*/
|
961
1003
|
declare class StreamTextResult<TOOLS extends Record<string, CoreTool>> {
|
962
1004
|
private originalStream;
|
1005
|
+
private onFinish?;
|
963
1006
|
/**
|
964
1007
|
Warnings from the model provider (e.g. unsupported settings).
|
965
1008
|
*/
|
966
1009
|
readonly warnings: CallWarning[] | undefined;
|
967
1010
|
/**
|
968
|
-
The token usage of the generated
|
1011
|
+
The token usage of the generated response. Resolved when the response is finished.
|
969
1012
|
*/
|
970
1013
|
readonly usage: Promise<TokenUsage>;
|
971
1014
|
/**
|
@@ -973,20 +1016,33 @@ declare class StreamTextResult<TOOLS extends Record<string, CoreTool>> {
|
|
973
1016
|
*/
|
974
1017
|
readonly finishReason: Promise<FinishReason>;
|
975
1018
|
/**
|
1019
|
+
The full text that has been generated. Resolved when the response is finished.
|
1020
|
+
*/
|
1021
|
+
readonly text: Promise<string>;
|
1022
|
+
/**
|
1023
|
+
The tool calls that have been executed. Resolved when the response is finished.
|
1024
|
+
*/
|
1025
|
+
readonly toolCalls: Promise<ToToolCall<TOOLS>[]>;
|
1026
|
+
/**
|
1027
|
+
The tool results that have been generated. Resolved when the all tool executions are finished.
|
1028
|
+
*/
|
1029
|
+
readonly toolResults: Promise<ToToolResult<TOOLS>[]>;
|
1030
|
+
/**
|
976
1031
|
Optional raw response data.
|
977
1032
|
*/
|
978
|
-
rawResponse?: {
|
1033
|
+
readonly rawResponse?: {
|
979
1034
|
/**
|
980
1035
|
Response headers.
|
981
1036
|
*/
|
982
1037
|
headers?: Record<string, string>;
|
983
1038
|
};
|
984
|
-
constructor({ stream, warnings, rawResponse, }: {
|
1039
|
+
constructor({ stream, warnings, rawResponse, onFinish, }: {
|
985
1040
|
stream: ReadableStream<TextStreamPart<TOOLS>>;
|
986
1041
|
warnings: CallWarning[] | undefined;
|
987
1042
|
rawResponse?: {
|
988
1043
|
headers?: Record<string, string>;
|
989
1044
|
};
|
1045
|
+
onFinish?: Parameters<typeof streamText>[0]['onFinish'];
|
990
1046
|
});
|
991
1047
|
/**
|
992
1048
|
Split out a new stream from the original stream.
|
package/dist/index.js
CHANGED
@@ -1543,6 +1543,7 @@ async function streamText({
|
|
1543
1543
|
messages,
|
1544
1544
|
maxRetries,
|
1545
1545
|
abortSignal,
|
1546
|
+
onFinish,
|
1546
1547
|
...settings
|
1547
1548
|
}) {
|
1548
1549
|
const retry = retryWithExponentialBackoff({ maxRetries });
|
@@ -1570,17 +1571,20 @@ async function streamText({
|
|
1570
1571
|
generatorStream: stream
|
1571
1572
|
}),
|
1572
1573
|
warnings,
|
1573
|
-
rawResponse
|
1574
|
+
rawResponse,
|
1575
|
+
onFinish
|
1574
1576
|
});
|
1575
1577
|
}
|
1576
1578
|
var StreamTextResult = class {
|
1577
1579
|
constructor({
|
1578
1580
|
stream,
|
1579
1581
|
warnings,
|
1580
|
-
rawResponse
|
1582
|
+
rawResponse,
|
1583
|
+
onFinish
|
1581
1584
|
}) {
|
1582
1585
|
this.warnings = warnings;
|
1583
1586
|
this.rawResponse = rawResponse;
|
1587
|
+
this.onFinish = onFinish;
|
1584
1588
|
let resolveUsage;
|
1585
1589
|
this.usage = new Promise((resolve) => {
|
1586
1590
|
resolveUsage = resolve;
|
@@ -1589,13 +1593,70 @@ var StreamTextResult = class {
|
|
1589
1593
|
this.finishReason = new Promise((resolve) => {
|
1590
1594
|
resolveFinishReason = resolve;
|
1591
1595
|
});
|
1596
|
+
let resolveText;
|
1597
|
+
this.text = new Promise((resolve) => {
|
1598
|
+
resolveText = resolve;
|
1599
|
+
});
|
1600
|
+
let resolveToolCalls;
|
1601
|
+
this.toolCalls = new Promise((resolve) => {
|
1602
|
+
resolveToolCalls = resolve;
|
1603
|
+
});
|
1604
|
+
let resolveToolResults;
|
1605
|
+
this.toolResults = new Promise((resolve) => {
|
1606
|
+
resolveToolResults = resolve;
|
1607
|
+
});
|
1608
|
+
let finishReason;
|
1609
|
+
let usage;
|
1610
|
+
let text = "";
|
1611
|
+
const toolCalls = [];
|
1612
|
+
const toolResults = [];
|
1613
|
+
const self = this;
|
1592
1614
|
this.originalStream = stream.pipeThrough(
|
1593
1615
|
new TransformStream({
|
1594
1616
|
async transform(chunk, controller) {
|
1595
1617
|
controller.enqueue(chunk);
|
1618
|
+
if (chunk.type === "text-delta") {
|
1619
|
+
text += chunk.textDelta;
|
1620
|
+
}
|
1621
|
+
if (chunk.type === "tool-call") {
|
1622
|
+
toolCalls.push(chunk);
|
1623
|
+
}
|
1624
|
+
if (chunk.type === "tool-result") {
|
1625
|
+
toolResults.push(chunk);
|
1626
|
+
}
|
1596
1627
|
if (chunk.type === "finish") {
|
1597
|
-
|
1598
|
-
|
1628
|
+
usage = chunk.usage;
|
1629
|
+
finishReason = chunk.finishReason;
|
1630
|
+
resolveUsage(usage);
|
1631
|
+
resolveFinishReason(finishReason);
|
1632
|
+
resolveText(text);
|
1633
|
+
resolveToolCalls(toolCalls);
|
1634
|
+
}
|
1635
|
+
},
|
1636
|
+
// invoke onFinish callback and resolve toolResults promise when the stream is about to close:
|
1637
|
+
async flush(controller) {
|
1638
|
+
var _a;
|
1639
|
+
try {
|
1640
|
+
resolveToolResults(toolResults);
|
1641
|
+
await ((_a = self.onFinish) == null ? void 0 : _a.call(self, {
|
1642
|
+
finishReason: finishReason != null ? finishReason : "unknown",
|
1643
|
+
usage: usage != null ? usage : {
|
1644
|
+
promptTokens: NaN,
|
1645
|
+
completionTokens: NaN,
|
1646
|
+
totalTokens: NaN
|
1647
|
+
},
|
1648
|
+
text,
|
1649
|
+
toolCalls,
|
1650
|
+
// The tool results are inferred as a never[] type, because they are
|
1651
|
+
// optional and the execute method with an inferred result type is
|
1652
|
+
// optional as well. Therefore we need to cast the toolResults to any.
|
1653
|
+
// The type exposed to the users will be correctly inferred.
|
1654
|
+
toolResults,
|
1655
|
+
rawResponse,
|
1656
|
+
warnings
|
1657
|
+
}));
|
1658
|
+
} catch (error) {
|
1659
|
+
controller.error(error);
|
1599
1660
|
}
|
1600
1661
|
}
|
1601
1662
|
})
|