ai 3.1.14 → 3.1.16
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.d.mts +82 -6
- package/dist/index.d.ts +82 -6
- package/dist/index.js +96 -16
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +96 -16
- package/dist/index.mjs.map +1 -1
- package/package.json +4 -4
- package/rsc/dist/index.d.ts +18 -1
- package/rsc/dist/rsc-server.d.mts +18 -1
- package/rsc/dist/rsc-server.mjs +29 -6
- package/rsc/dist/rsc-server.mjs.map +1 -1
package/dist/index.d.mts
CHANGED
@@ -39,6 +39,18 @@ Warning from the model provider for this call. The call will proceed, but e.g.
|
|
39
39
|
some settings might not be supported, which can lead to suboptimal results.
|
40
40
|
*/
|
41
41
|
type CallWarning = LanguageModelV1CallWarning;
|
42
|
+
/**
|
43
|
+
Tool choice for the generation. It supports the following settings:
|
44
|
+
|
45
|
+
- `auto` (default): the model can choose whether and which tools to call.
|
46
|
+
- `required`: the model must call a tool. It can choose which tool to call.
|
47
|
+
- `none`: the model must not call tools
|
48
|
+
- `{ type: 'tool', tooName: string (typed) }`: the model must call the specified tool
|
49
|
+
*/
|
50
|
+
type CoreToolChoice<TOOLS extends Record<string, unknown>> = 'auto' | 'none' | 'required' | {
|
51
|
+
type: 'tool';
|
52
|
+
toolName: keyof TOOLS;
|
53
|
+
};
|
42
54
|
|
43
55
|
/**
|
44
56
|
Embed a value using an embedding model. The type of the value is defined by the embedding model.
|
@@ -820,7 +832,7 @@ If set and supported by the model, calls will generate deterministic results.
|
|
820
832
|
@returns
|
821
833
|
A result object that contains the generated text, the results of the tool calls, and additional information.
|
822
834
|
*/
|
823
|
-
declare function generateText<TOOLS extends Record<string, CoreTool>>({ model, tools, system, prompt, messages, maxRetries, abortSignal, ...settings }: CallSettings & Prompt & {
|
835
|
+
declare function generateText<TOOLS extends Record<string, CoreTool>>({ model, tools, toolChoice, system, prompt, messages, maxRetries, abortSignal, ...settings }: CallSettings & Prompt & {
|
824
836
|
/**
|
825
837
|
The language model to use.
|
826
838
|
*/
|
@@ -829,6 +841,10 @@ The language model to use.
|
|
829
841
|
The tools that the model can call. The model needs to support calling tools.
|
830
842
|
*/
|
831
843
|
tools?: TOOLS;
|
844
|
+
/**
|
845
|
+
The tool choice strategy. Default: 'auto'.
|
846
|
+
*/
|
847
|
+
toolChoice?: CoreToolChoice<TOOLS>;
|
832
848
|
}): Promise<GenerateTextResult<TOOLS>>;
|
833
849
|
/**
|
834
850
|
The result of a `generateText` call.
|
@@ -922,10 +938,13 @@ If set and supported by the model, calls will generate deterministic results.
|
|
922
938
|
@param maxRetries - Maximum number of retries. Set to 0 to disable retries. Default: 2.
|
923
939
|
@param abortSignal - An optional abort signal that can be used to cancel the call.
|
924
940
|
|
941
|
+
@param onFinish - Callback that is called when the LLM response and all request tool executions
|
942
|
+
(for tools that have an `execute` function) are finished.
|
943
|
+
|
925
944
|
@return
|
926
945
|
A result object for accessing different stream types and additional information.
|
927
946
|
*/
|
928
|
-
declare function streamText<TOOLS extends Record<string, CoreTool>>({ model, tools, system, prompt, messages, maxRetries, abortSignal, ...settings }: CallSettings & Prompt & {
|
947
|
+
declare function streamText<TOOLS extends Record<string, CoreTool>>({ model, tools, toolChoice, system, prompt, messages, maxRetries, abortSignal, onFinish, ...settings }: CallSettings & Prompt & {
|
929
948
|
/**
|
930
949
|
The language model to use.
|
931
950
|
*/
|
@@ -934,6 +953,49 @@ The language model to use.
|
|
934
953
|
The tools that the model can call. The model needs to support calling tools.
|
935
954
|
*/
|
936
955
|
tools?: TOOLS;
|
956
|
+
/**
|
957
|
+
The tool choice strategy. Default: 'auto'.
|
958
|
+
*/
|
959
|
+
toolChoice?: CoreToolChoice<TOOLS>;
|
960
|
+
/**
|
961
|
+
Callback that is called when the LLM response and all request tool executions
|
962
|
+
(for tools that have an `execute` function) are finished.
|
963
|
+
*/
|
964
|
+
onFinish?: (event: {
|
965
|
+
/**
|
966
|
+
The reason why the generation finished.
|
967
|
+
*/
|
968
|
+
finishReason: FinishReason;
|
969
|
+
/**
|
970
|
+
The token usage of the generated response.
|
971
|
+
*/
|
972
|
+
usage: TokenUsage;
|
973
|
+
/**
|
974
|
+
The full text that has been generated.
|
975
|
+
*/
|
976
|
+
text: string;
|
977
|
+
/**
|
978
|
+
The tool calls that have been executed.
|
979
|
+
*/
|
980
|
+
toolCalls?: ToToolCall<TOOLS>[];
|
981
|
+
/**
|
982
|
+
The tool results that have been generated.
|
983
|
+
*/
|
984
|
+
toolResults?: ToToolResult<TOOLS>[];
|
985
|
+
/**
|
986
|
+
Optional raw response data.
|
987
|
+
*/
|
988
|
+
rawResponse?: {
|
989
|
+
/**
|
990
|
+
Response headers.
|
991
|
+
*/
|
992
|
+
headers?: Record<string, string>;
|
993
|
+
};
|
994
|
+
/**
|
995
|
+
Warnings from the model provider (e.g. unsupported settings).
|
996
|
+
*/
|
997
|
+
warnings?: CallWarning[];
|
998
|
+
}) => Promise<void> | void;
|
937
999
|
}): Promise<StreamTextResult<TOOLS>>;
|
938
1000
|
type TextStreamPart<TOOLS extends Record<string, CoreTool>> = {
|
939
1001
|
type: 'text-delta';
|
@@ -960,12 +1022,13 @@ A result object for accessing different stream types and additional information.
|
|
960
1022
|
*/
|
961
1023
|
declare class StreamTextResult<TOOLS extends Record<string, CoreTool>> {
|
962
1024
|
private originalStream;
|
1025
|
+
private onFinish?;
|
963
1026
|
/**
|
964
1027
|
Warnings from the model provider (e.g. unsupported settings).
|
965
1028
|
*/
|
966
1029
|
readonly warnings: CallWarning[] | undefined;
|
967
1030
|
/**
|
968
|
-
The token usage of the generated
|
1031
|
+
The token usage of the generated response. Resolved when the response is finished.
|
969
1032
|
*/
|
970
1033
|
readonly usage: Promise<TokenUsage>;
|
971
1034
|
/**
|
@@ -973,20 +1036,33 @@ declare class StreamTextResult<TOOLS extends Record<string, CoreTool>> {
|
|
973
1036
|
*/
|
974
1037
|
readonly finishReason: Promise<FinishReason>;
|
975
1038
|
/**
|
1039
|
+
The full text that has been generated. Resolved when the response is finished.
|
1040
|
+
*/
|
1041
|
+
readonly text: Promise<string>;
|
1042
|
+
/**
|
1043
|
+
The tool calls that have been executed. Resolved when the response is finished.
|
1044
|
+
*/
|
1045
|
+
readonly toolCalls: Promise<ToToolCall<TOOLS>[]>;
|
1046
|
+
/**
|
1047
|
+
The tool results that have been generated. Resolved when the all tool executions are finished.
|
1048
|
+
*/
|
1049
|
+
readonly toolResults: Promise<ToToolResult<TOOLS>[]>;
|
1050
|
+
/**
|
976
1051
|
Optional raw response data.
|
977
1052
|
*/
|
978
|
-
rawResponse?: {
|
1053
|
+
readonly rawResponse?: {
|
979
1054
|
/**
|
980
1055
|
Response headers.
|
981
1056
|
*/
|
982
1057
|
headers?: Record<string, string>;
|
983
1058
|
};
|
984
|
-
constructor({ stream, warnings, rawResponse, }: {
|
1059
|
+
constructor({ stream, warnings, rawResponse, onFinish, }: {
|
985
1060
|
stream: ReadableStream<TextStreamPart<TOOLS>>;
|
986
1061
|
warnings: CallWarning[] | undefined;
|
987
1062
|
rawResponse?: {
|
988
1063
|
headers?: Record<string, string>;
|
989
1064
|
};
|
1065
|
+
onFinish?: Parameters<typeof streamText>[0]['onFinish'];
|
990
1066
|
});
|
991
1067
|
/**
|
992
1068
|
Split out a new stream from the original stream.
|
@@ -2268,4 +2344,4 @@ declare class StreamingTextResponse extends Response {
|
|
2268
2344
|
constructor(res: ReadableStream, init?: ResponseInit, data?: StreamData);
|
2269
2345
|
}
|
2270
2346
|
|
2271
|
-
export { AIStream, AIStreamCallbacksAndOptions, AIStreamParser, AIStreamParserOptions, AWSBedrockAnthropicMessagesStream, AWSBedrockAnthropicStream, AWSBedrockCohereStream, AWSBedrockLlama2Stream, AWSBedrockStream, AnthropicStream, AssistantContent, AssistantMessage, AssistantResponse, AssistantStatus, CallWarning, ChatRequest, ChatRequestOptions, CohereStream, CompletionUsage, CoreAssistantMessage, CoreMessage, CoreSystemMessage, CoreTool, CoreToolMessage, CoreUserMessage, CreateMessage, DataContent, DataMessage, DeepPartial, EmbedManyResult, EmbedResult, Embedding, EmbeddingModel, ExperimentalAssistantMessage, ExperimentalMessage, ExperimentalTool, ExperimentalToolMessage, ExperimentalUserMessage, FinishReason, Function, FunctionCall$1 as FunctionCall, FunctionCallHandler, FunctionCallPayload, GenerateObjectResult, GenerateTextResult, GoogleGenerativeAIStream, HuggingFaceStream, IdGenerator, ImagePart, InkeepAIStreamCallbacksAndOptions, InkeepChatResultCallbacks, InkeepOnFinalMetadata, InkeepStream, JSONValue, langchainAdapter as LangChainAdapter, LangChainStream, LanguageModel, LogProbs, Message$1 as Message, MistralStream, ObjectStreamPart, ObjectStreamPartInput, OpenAIStream, OpenAIStreamCallbacks, ReactResponseRow, ReplicateStream, RequestOptions, StreamData, StreamObjectResult, StreamPart, StreamString, StreamTextResult, StreamingTextResponse, TextPart$1 as TextPart, TextStreamPart, Tool, ToolCall, ToolCallHandler, ToolCallPart, ToolCallPayload, ToolChoice, ToolContent, ToolInvocation, ToolResultPart, UseAssistantOptions, UseChatOptions, UseCompletionOptions, UserContent, convertDataContentToBase64String, convertDataContentToUint8Array, convertToCoreMessages, createCallbacksTransformer, createChunkDecoder, createEventStreamTransformer, createStreamDataTransformer, embed, embedMany, experimental_AssistantResponse, experimental_StreamData, experimental_StreamingReactResponse, experimental_generateObject, experimental_generateText, experimental_streamObject, experimental_streamText, formatStreamPart, generateId, generateObject, generateText, isStreamStringEqualToType, generateId as nanoid, parseStreamPart, readDataStream, readableFromAsyncIterable, streamObject, streamText, streamToResponse, tool, trimStartOfStreamHelper };
|
2347
|
+
export { AIStream, AIStreamCallbacksAndOptions, AIStreamParser, AIStreamParserOptions, AWSBedrockAnthropicMessagesStream, AWSBedrockAnthropicStream, AWSBedrockCohereStream, AWSBedrockLlama2Stream, AWSBedrockStream, AnthropicStream, AssistantContent, AssistantMessage, AssistantResponse, AssistantStatus, CallWarning, ChatRequest, ChatRequestOptions, CohereStream, CompletionUsage, CoreAssistantMessage, CoreMessage, CoreSystemMessage, CoreTool, CoreToolChoice, CoreToolMessage, CoreUserMessage, CreateMessage, DataContent, DataMessage, DeepPartial, EmbedManyResult, EmbedResult, Embedding, EmbeddingModel, ExperimentalAssistantMessage, ExperimentalMessage, ExperimentalTool, ExperimentalToolMessage, ExperimentalUserMessage, FinishReason, Function, FunctionCall$1 as FunctionCall, FunctionCallHandler, FunctionCallPayload, GenerateObjectResult, GenerateTextResult, GoogleGenerativeAIStream, HuggingFaceStream, IdGenerator, ImagePart, InkeepAIStreamCallbacksAndOptions, InkeepChatResultCallbacks, InkeepOnFinalMetadata, InkeepStream, JSONValue, langchainAdapter as LangChainAdapter, LangChainStream, LanguageModel, LogProbs, Message$1 as Message, MistralStream, ObjectStreamPart, ObjectStreamPartInput, OpenAIStream, OpenAIStreamCallbacks, ReactResponseRow, ReplicateStream, RequestOptions, StreamData, StreamObjectResult, StreamPart, StreamString, StreamTextResult, StreamingTextResponse, TextPart$1 as TextPart, TextStreamPart, Tool, ToolCall, ToolCallHandler, ToolCallPart, ToolCallPayload, ToolChoice, ToolContent, ToolInvocation, ToolResultPart, UseAssistantOptions, UseChatOptions, UseCompletionOptions, UserContent, convertDataContentToBase64String, convertDataContentToUint8Array, convertToCoreMessages, createCallbacksTransformer, createChunkDecoder, createEventStreamTransformer, createStreamDataTransformer, embed, embedMany, experimental_AssistantResponse, experimental_StreamData, experimental_StreamingReactResponse, experimental_generateObject, experimental_generateText, experimental_streamObject, experimental_streamText, formatStreamPart, generateId, generateObject, generateText, isStreamStringEqualToType, generateId as nanoid, parseStreamPart, readDataStream, readableFromAsyncIterable, streamObject, streamText, streamToResponse, tool, trimStartOfStreamHelper };
|
package/dist/index.d.ts
CHANGED
@@ -39,6 +39,18 @@ Warning from the model provider for this call. The call will proceed, but e.g.
|
|
39
39
|
some settings might not be supported, which can lead to suboptimal results.
|
40
40
|
*/
|
41
41
|
type CallWarning = LanguageModelV1CallWarning;
|
42
|
+
/**
|
43
|
+
Tool choice for the generation. It supports the following settings:
|
44
|
+
|
45
|
+
- `auto` (default): the model can choose whether and which tools to call.
|
46
|
+
- `required`: the model must call a tool. It can choose which tool to call.
|
47
|
+
- `none`: the model must not call tools
|
48
|
+
- `{ type: 'tool', tooName: string (typed) }`: the model must call the specified tool
|
49
|
+
*/
|
50
|
+
type CoreToolChoice<TOOLS extends Record<string, unknown>> = 'auto' | 'none' | 'required' | {
|
51
|
+
type: 'tool';
|
52
|
+
toolName: keyof TOOLS;
|
53
|
+
};
|
42
54
|
|
43
55
|
/**
|
44
56
|
Embed a value using an embedding model. The type of the value is defined by the embedding model.
|
@@ -820,7 +832,7 @@ If set and supported by the model, calls will generate deterministic results.
|
|
820
832
|
@returns
|
821
833
|
A result object that contains the generated text, the results of the tool calls, and additional information.
|
822
834
|
*/
|
823
|
-
declare function generateText<TOOLS extends Record<string, CoreTool>>({ model, tools, system, prompt, messages, maxRetries, abortSignal, ...settings }: CallSettings & Prompt & {
|
835
|
+
declare function generateText<TOOLS extends Record<string, CoreTool>>({ model, tools, toolChoice, system, prompt, messages, maxRetries, abortSignal, ...settings }: CallSettings & Prompt & {
|
824
836
|
/**
|
825
837
|
The language model to use.
|
826
838
|
*/
|
@@ -829,6 +841,10 @@ The language model to use.
|
|
829
841
|
The tools that the model can call. The model needs to support calling tools.
|
830
842
|
*/
|
831
843
|
tools?: TOOLS;
|
844
|
+
/**
|
845
|
+
The tool choice strategy. Default: 'auto'.
|
846
|
+
*/
|
847
|
+
toolChoice?: CoreToolChoice<TOOLS>;
|
832
848
|
}): Promise<GenerateTextResult<TOOLS>>;
|
833
849
|
/**
|
834
850
|
The result of a `generateText` call.
|
@@ -922,10 +938,13 @@ If set and supported by the model, calls will generate deterministic results.
|
|
922
938
|
@param maxRetries - Maximum number of retries. Set to 0 to disable retries. Default: 2.
|
923
939
|
@param abortSignal - An optional abort signal that can be used to cancel the call.
|
924
940
|
|
941
|
+
@param onFinish - Callback that is called when the LLM response and all request tool executions
|
942
|
+
(for tools that have an `execute` function) are finished.
|
943
|
+
|
925
944
|
@return
|
926
945
|
A result object for accessing different stream types and additional information.
|
927
946
|
*/
|
928
|
-
declare function streamText<TOOLS extends Record<string, CoreTool>>({ model, tools, system, prompt, messages, maxRetries, abortSignal, ...settings }: CallSettings & Prompt & {
|
947
|
+
declare function streamText<TOOLS extends Record<string, CoreTool>>({ model, tools, toolChoice, system, prompt, messages, maxRetries, abortSignal, onFinish, ...settings }: CallSettings & Prompt & {
|
929
948
|
/**
|
930
949
|
The language model to use.
|
931
950
|
*/
|
@@ -934,6 +953,49 @@ The language model to use.
|
|
934
953
|
The tools that the model can call. The model needs to support calling tools.
|
935
954
|
*/
|
936
955
|
tools?: TOOLS;
|
956
|
+
/**
|
957
|
+
The tool choice strategy. Default: 'auto'.
|
958
|
+
*/
|
959
|
+
toolChoice?: CoreToolChoice<TOOLS>;
|
960
|
+
/**
|
961
|
+
Callback that is called when the LLM response and all request tool executions
|
962
|
+
(for tools that have an `execute` function) are finished.
|
963
|
+
*/
|
964
|
+
onFinish?: (event: {
|
965
|
+
/**
|
966
|
+
The reason why the generation finished.
|
967
|
+
*/
|
968
|
+
finishReason: FinishReason;
|
969
|
+
/**
|
970
|
+
The token usage of the generated response.
|
971
|
+
*/
|
972
|
+
usage: TokenUsage;
|
973
|
+
/**
|
974
|
+
The full text that has been generated.
|
975
|
+
*/
|
976
|
+
text: string;
|
977
|
+
/**
|
978
|
+
The tool calls that have been executed.
|
979
|
+
*/
|
980
|
+
toolCalls?: ToToolCall<TOOLS>[];
|
981
|
+
/**
|
982
|
+
The tool results that have been generated.
|
983
|
+
*/
|
984
|
+
toolResults?: ToToolResult<TOOLS>[];
|
985
|
+
/**
|
986
|
+
Optional raw response data.
|
987
|
+
*/
|
988
|
+
rawResponse?: {
|
989
|
+
/**
|
990
|
+
Response headers.
|
991
|
+
*/
|
992
|
+
headers?: Record<string, string>;
|
993
|
+
};
|
994
|
+
/**
|
995
|
+
Warnings from the model provider (e.g. unsupported settings).
|
996
|
+
*/
|
997
|
+
warnings?: CallWarning[];
|
998
|
+
}) => Promise<void> | void;
|
937
999
|
}): Promise<StreamTextResult<TOOLS>>;
|
938
1000
|
type TextStreamPart<TOOLS extends Record<string, CoreTool>> = {
|
939
1001
|
type: 'text-delta';
|
@@ -960,12 +1022,13 @@ A result object for accessing different stream types and additional information.
|
|
960
1022
|
*/
|
961
1023
|
declare class StreamTextResult<TOOLS extends Record<string, CoreTool>> {
|
962
1024
|
private originalStream;
|
1025
|
+
private onFinish?;
|
963
1026
|
/**
|
964
1027
|
Warnings from the model provider (e.g. unsupported settings).
|
965
1028
|
*/
|
966
1029
|
readonly warnings: CallWarning[] | undefined;
|
967
1030
|
/**
|
968
|
-
The token usage of the generated
|
1031
|
+
The token usage of the generated response. Resolved when the response is finished.
|
969
1032
|
*/
|
970
1033
|
readonly usage: Promise<TokenUsage>;
|
971
1034
|
/**
|
@@ -973,20 +1036,33 @@ declare class StreamTextResult<TOOLS extends Record<string, CoreTool>> {
|
|
973
1036
|
*/
|
974
1037
|
readonly finishReason: Promise<FinishReason>;
|
975
1038
|
/**
|
1039
|
+
The full text that has been generated. Resolved when the response is finished.
|
1040
|
+
*/
|
1041
|
+
readonly text: Promise<string>;
|
1042
|
+
/**
|
1043
|
+
The tool calls that have been executed. Resolved when the response is finished.
|
1044
|
+
*/
|
1045
|
+
readonly toolCalls: Promise<ToToolCall<TOOLS>[]>;
|
1046
|
+
/**
|
1047
|
+
The tool results that have been generated. Resolved when the all tool executions are finished.
|
1048
|
+
*/
|
1049
|
+
readonly toolResults: Promise<ToToolResult<TOOLS>[]>;
|
1050
|
+
/**
|
976
1051
|
Optional raw response data.
|
977
1052
|
*/
|
978
|
-
rawResponse?: {
|
1053
|
+
readonly rawResponse?: {
|
979
1054
|
/**
|
980
1055
|
Response headers.
|
981
1056
|
*/
|
982
1057
|
headers?: Record<string, string>;
|
983
1058
|
};
|
984
|
-
constructor({ stream, warnings, rawResponse, }: {
|
1059
|
+
constructor({ stream, warnings, rawResponse, onFinish, }: {
|
985
1060
|
stream: ReadableStream<TextStreamPart<TOOLS>>;
|
986
1061
|
warnings: CallWarning[] | undefined;
|
987
1062
|
rawResponse?: {
|
988
1063
|
headers?: Record<string, string>;
|
989
1064
|
};
|
1065
|
+
onFinish?: Parameters<typeof streamText>[0]['onFinish'];
|
990
1066
|
});
|
991
1067
|
/**
|
992
1068
|
Split out a new stream from the original stream.
|
@@ -2268,4 +2344,4 @@ declare class StreamingTextResponse extends Response {
|
|
2268
2344
|
constructor(res: ReadableStream, init?: ResponseInit, data?: StreamData);
|
2269
2345
|
}
|
2270
2346
|
|
2271
|
-
export { AIStream, AIStreamCallbacksAndOptions, AIStreamParser, AIStreamParserOptions, AWSBedrockAnthropicMessagesStream, AWSBedrockAnthropicStream, AWSBedrockCohereStream, AWSBedrockLlama2Stream, AWSBedrockStream, AnthropicStream, AssistantContent, AssistantMessage, AssistantResponse, AssistantStatus, CallWarning, ChatRequest, ChatRequestOptions, CohereStream, CompletionUsage, CoreAssistantMessage, CoreMessage, CoreSystemMessage, CoreTool, CoreToolMessage, CoreUserMessage, CreateMessage, DataContent, DataMessage, DeepPartial, EmbedManyResult, EmbedResult, Embedding, EmbeddingModel, ExperimentalAssistantMessage, ExperimentalMessage, ExperimentalTool, ExperimentalToolMessage, ExperimentalUserMessage, FinishReason, Function, FunctionCall$1 as FunctionCall, FunctionCallHandler, FunctionCallPayload, GenerateObjectResult, GenerateTextResult, GoogleGenerativeAIStream, HuggingFaceStream, IdGenerator, ImagePart, InkeepAIStreamCallbacksAndOptions, InkeepChatResultCallbacks, InkeepOnFinalMetadata, InkeepStream, JSONValue, langchainAdapter as LangChainAdapter, LangChainStream, LanguageModel, LogProbs, Message$1 as Message, MistralStream, ObjectStreamPart, ObjectStreamPartInput, OpenAIStream, OpenAIStreamCallbacks, ReactResponseRow, ReplicateStream, RequestOptions, StreamData, StreamObjectResult, StreamPart, StreamString, StreamTextResult, StreamingTextResponse, TextPart$1 as TextPart, TextStreamPart, Tool, ToolCall, ToolCallHandler, ToolCallPart, ToolCallPayload, ToolChoice, ToolContent, ToolInvocation, ToolResultPart, UseAssistantOptions, UseChatOptions, UseCompletionOptions, UserContent, convertDataContentToBase64String, convertDataContentToUint8Array, convertToCoreMessages, createCallbacksTransformer, createChunkDecoder, createEventStreamTransformer, createStreamDataTransformer, embed, embedMany, experimental_AssistantResponse, experimental_StreamData, experimental_StreamingReactResponse, experimental_generateObject, experimental_generateText, experimental_streamObject, experimental_streamText, formatStreamPart, generateId, generateObject, generateText, isStreamStringEqualToType, generateId as nanoid, parseStreamPart, readDataStream, readableFromAsyncIterable, streamObject, streamText, streamToResponse, tool, trimStartOfStreamHelper };
|
2347
|
+
export { AIStream, AIStreamCallbacksAndOptions, AIStreamParser, AIStreamParserOptions, AWSBedrockAnthropicMessagesStream, AWSBedrockAnthropicStream, AWSBedrockCohereStream, AWSBedrockLlama2Stream, AWSBedrockStream, AnthropicStream, AssistantContent, AssistantMessage, AssistantResponse, AssistantStatus, CallWarning, ChatRequest, ChatRequestOptions, CohereStream, CompletionUsage, CoreAssistantMessage, CoreMessage, CoreSystemMessage, CoreTool, CoreToolChoice, CoreToolMessage, CoreUserMessage, CreateMessage, DataContent, DataMessage, DeepPartial, EmbedManyResult, EmbedResult, Embedding, EmbeddingModel, ExperimentalAssistantMessage, ExperimentalMessage, ExperimentalTool, ExperimentalToolMessage, ExperimentalUserMessage, FinishReason, Function, FunctionCall$1 as FunctionCall, FunctionCallHandler, FunctionCallPayload, GenerateObjectResult, GenerateTextResult, GoogleGenerativeAIStream, HuggingFaceStream, IdGenerator, ImagePart, InkeepAIStreamCallbacksAndOptions, InkeepChatResultCallbacks, InkeepOnFinalMetadata, InkeepStream, JSONValue, langchainAdapter as LangChainAdapter, LangChainStream, LanguageModel, LogProbs, Message$1 as Message, MistralStream, ObjectStreamPart, ObjectStreamPartInput, OpenAIStream, OpenAIStreamCallbacks, ReactResponseRow, ReplicateStream, RequestOptions, StreamData, StreamObjectResult, StreamPart, StreamString, StreamTextResult, StreamingTextResponse, TextPart$1 as TextPart, TextStreamPart, Tool, ToolCall, ToolCallHandler, ToolCallPart, ToolCallPayload, ToolChoice, ToolContent, ToolInvocation, ToolResultPart, UseAssistantOptions, UseChatOptions, UseCompletionOptions, UserContent, convertDataContentToBase64String, convertDataContentToUint8Array, convertToCoreMessages, createCallbacksTransformer, createChunkDecoder, createEventStreamTransformer, createStreamDataTransformer, embed, embedMany, experimental_AssistantResponse, experimental_StreamData, experimental_StreamingReactResponse, experimental_generateObject, experimental_generateText, experimental_streamObject, experimental_streamText, formatStreamPart, generateId, generateObject, generateText, isStreamStringEqualToType, generateId as nanoid, parseStreamPart, readDataStream, readableFromAsyncIterable, streamObject, streamText, streamToResponse, tool, trimStartOfStreamHelper };
|
package/dist/index.js
CHANGED
@@ -1265,6 +1265,33 @@ var StreamObjectResult = class {
|
|
1265
1265
|
};
|
1266
1266
|
var experimental_streamObject = streamObject;
|
1267
1267
|
|
1268
|
+
// core/util/is-non-empty-object.ts
|
1269
|
+
function isNonEmptyObject(object) {
|
1270
|
+
return object != null && Object.keys(object).length > 0;
|
1271
|
+
}
|
1272
|
+
|
1273
|
+
// core/prompt/prepare-tools-and-tool-choice.ts
|
1274
|
+
function prepareToolsAndToolChoice({
|
1275
|
+
tools,
|
1276
|
+
toolChoice
|
1277
|
+
}) {
|
1278
|
+
if (!isNonEmptyObject(tools)) {
|
1279
|
+
return {
|
1280
|
+
tools: void 0,
|
1281
|
+
toolChoice: void 0
|
1282
|
+
};
|
1283
|
+
}
|
1284
|
+
return {
|
1285
|
+
tools: Object.entries(tools).map(([name, tool2]) => ({
|
1286
|
+
type: "function",
|
1287
|
+
name,
|
1288
|
+
description: tool2.description,
|
1289
|
+
parameters: convertZodToJSONSchema(tool2.parameters)
|
1290
|
+
})),
|
1291
|
+
toolChoice: toolChoice == null ? { type: "auto" } : typeof toolChoice === "string" ? { type: toolChoice } : { type: "tool", toolName: toolChoice.toolName }
|
1292
|
+
};
|
1293
|
+
}
|
1294
|
+
|
1268
1295
|
// core/generate-text/tool-call.ts
|
1269
1296
|
var import_provider6 = require("@ai-sdk/provider");
|
1270
1297
|
var import_provider_utils4 = require("@ai-sdk/provider-utils");
|
@@ -1306,6 +1333,7 @@ function parseToolCall({
|
|
1306
1333
|
async function generateText({
|
1307
1334
|
model,
|
1308
1335
|
tools,
|
1336
|
+
toolChoice,
|
1309
1337
|
system,
|
1310
1338
|
prompt,
|
1311
1339
|
messages,
|
@@ -1320,12 +1348,7 @@ async function generateText({
|
|
1320
1348
|
return model.doGenerate({
|
1321
1349
|
mode: {
|
1322
1350
|
type: "regular",
|
1323
|
-
|
1324
|
-
type: "function",
|
1325
|
-
name,
|
1326
|
-
description: tool2.description,
|
1327
|
-
parameters: convertZodToJSONSchema(tool2.parameters)
|
1328
|
-
}))
|
1351
|
+
...prepareToolsAndToolChoice({ tools, toolChoice })
|
1329
1352
|
},
|
1330
1353
|
...prepareCallSettings(settings),
|
1331
1354
|
inputFormat: validatedPrompt.type,
|
@@ -1538,11 +1561,13 @@ function runToolsTransformation({
|
|
1538
1561
|
async function streamText({
|
1539
1562
|
model,
|
1540
1563
|
tools,
|
1564
|
+
toolChoice,
|
1541
1565
|
system,
|
1542
1566
|
prompt,
|
1543
1567
|
messages,
|
1544
1568
|
maxRetries,
|
1545
1569
|
abortSignal,
|
1570
|
+
onFinish,
|
1546
1571
|
...settings
|
1547
1572
|
}) {
|
1548
1573
|
const retry = retryWithExponentialBackoff({ maxRetries });
|
@@ -1551,12 +1576,7 @@ async function streamText({
|
|
1551
1576
|
() => model.doStream({
|
1552
1577
|
mode: {
|
1553
1578
|
type: "regular",
|
1554
|
-
|
1555
|
-
type: "function",
|
1556
|
-
name,
|
1557
|
-
description: tool2.description,
|
1558
|
-
parameters: convertZodToJSONSchema(tool2.parameters)
|
1559
|
-
}))
|
1579
|
+
...prepareToolsAndToolChoice({ tools, toolChoice })
|
1560
1580
|
},
|
1561
1581
|
...prepareCallSettings(settings),
|
1562
1582
|
inputFormat: validatedPrompt.type,
|
@@ -1570,17 +1590,20 @@ async function streamText({
|
|
1570
1590
|
generatorStream: stream
|
1571
1591
|
}),
|
1572
1592
|
warnings,
|
1573
|
-
rawResponse
|
1593
|
+
rawResponse,
|
1594
|
+
onFinish
|
1574
1595
|
});
|
1575
1596
|
}
|
1576
1597
|
var StreamTextResult = class {
|
1577
1598
|
constructor({
|
1578
1599
|
stream,
|
1579
1600
|
warnings,
|
1580
|
-
rawResponse
|
1601
|
+
rawResponse,
|
1602
|
+
onFinish
|
1581
1603
|
}) {
|
1582
1604
|
this.warnings = warnings;
|
1583
1605
|
this.rawResponse = rawResponse;
|
1606
|
+
this.onFinish = onFinish;
|
1584
1607
|
let resolveUsage;
|
1585
1608
|
this.usage = new Promise((resolve) => {
|
1586
1609
|
resolveUsage = resolve;
|
@@ -1589,13 +1612,70 @@ var StreamTextResult = class {
|
|
1589
1612
|
this.finishReason = new Promise((resolve) => {
|
1590
1613
|
resolveFinishReason = resolve;
|
1591
1614
|
});
|
1615
|
+
let resolveText;
|
1616
|
+
this.text = new Promise((resolve) => {
|
1617
|
+
resolveText = resolve;
|
1618
|
+
});
|
1619
|
+
let resolveToolCalls;
|
1620
|
+
this.toolCalls = new Promise((resolve) => {
|
1621
|
+
resolveToolCalls = resolve;
|
1622
|
+
});
|
1623
|
+
let resolveToolResults;
|
1624
|
+
this.toolResults = new Promise((resolve) => {
|
1625
|
+
resolveToolResults = resolve;
|
1626
|
+
});
|
1627
|
+
let finishReason;
|
1628
|
+
let usage;
|
1629
|
+
let text = "";
|
1630
|
+
const toolCalls = [];
|
1631
|
+
const toolResults = [];
|
1632
|
+
const self = this;
|
1592
1633
|
this.originalStream = stream.pipeThrough(
|
1593
1634
|
new TransformStream({
|
1594
1635
|
async transform(chunk, controller) {
|
1595
1636
|
controller.enqueue(chunk);
|
1637
|
+
if (chunk.type === "text-delta") {
|
1638
|
+
text += chunk.textDelta;
|
1639
|
+
}
|
1640
|
+
if (chunk.type === "tool-call") {
|
1641
|
+
toolCalls.push(chunk);
|
1642
|
+
}
|
1643
|
+
if (chunk.type === "tool-result") {
|
1644
|
+
toolResults.push(chunk);
|
1645
|
+
}
|
1596
1646
|
if (chunk.type === "finish") {
|
1597
|
-
|
1598
|
-
|
1647
|
+
usage = chunk.usage;
|
1648
|
+
finishReason = chunk.finishReason;
|
1649
|
+
resolveUsage(usage);
|
1650
|
+
resolveFinishReason(finishReason);
|
1651
|
+
resolveText(text);
|
1652
|
+
resolveToolCalls(toolCalls);
|
1653
|
+
}
|
1654
|
+
},
|
1655
|
+
// invoke onFinish callback and resolve toolResults promise when the stream is about to close:
|
1656
|
+
async flush(controller) {
|
1657
|
+
var _a;
|
1658
|
+
try {
|
1659
|
+
resolveToolResults(toolResults);
|
1660
|
+
await ((_a = self.onFinish) == null ? void 0 : _a.call(self, {
|
1661
|
+
finishReason: finishReason != null ? finishReason : "unknown",
|
1662
|
+
usage: usage != null ? usage : {
|
1663
|
+
promptTokens: NaN,
|
1664
|
+
completionTokens: NaN,
|
1665
|
+
totalTokens: NaN
|
1666
|
+
},
|
1667
|
+
text,
|
1668
|
+
toolCalls,
|
1669
|
+
// The tool results are inferred as a never[] type, because they are
|
1670
|
+
// optional and the execute method with an inferred result type is
|
1671
|
+
// optional as well. Therefore we need to cast the toolResults to any.
|
1672
|
+
// The type exposed to the users will be correctly inferred.
|
1673
|
+
toolResults,
|
1674
|
+
rawResponse,
|
1675
|
+
warnings
|
1676
|
+
}));
|
1677
|
+
} catch (error) {
|
1678
|
+
controller.error(error);
|
1599
1679
|
}
|
1600
1680
|
}
|
1601
1681
|
})
|