ai 3.1.6 → 3.1.8

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.d.mts CHANGED
@@ -1,10 +1,107 @@
1
- import { z } from 'zod';
2
- import { LanguageModelV1, LanguageModelV1FinishReason, LanguageModelV1LogProbs, LanguageModelV1CallWarning } from '@ai-sdk/provider';
1
+ import { EmbeddingModelV1, EmbeddingModelV1Embedding, LanguageModelV1, LanguageModelV1FinishReason, LanguageModelV1LogProbs, LanguageModelV1CallWarning } from '@ai-sdk/provider';
3
2
  export { APICallError, EmptyResponseBodyError, InvalidArgumentError, InvalidDataContentError, InvalidPromptError, InvalidResponseDataError, InvalidToolArgumentsError, JSONParseError, LoadAPIKeyError, NoObjectGeneratedError, NoSuchToolError, RetryError, ToolCallParseError, TypeValidationError, UnsupportedFunctionalityError, UnsupportedJSONSchemaError } from '@ai-sdk/provider';
3
+ import { z } from 'zod';
4
4
  import { ServerResponse } from 'node:http';
5
5
  import { AssistantStream } from 'openai/lib/AssistantStream';
6
6
  import { Run } from 'openai/resources/beta/threads/runs/runs';
7
7
 
8
+ /**
9
+ Embedding model that is used by the AI SDK Core functions.
10
+ */
11
+ type EmbeddingModel<VALUE> = EmbeddingModelV1<VALUE>;
12
+ /**
13
+ Embedding.
14
+ */
15
+ type Embedding = EmbeddingModelV1Embedding;
16
+
17
+ /**
18
+ Language model that is used by the AI SDK Core functions.
19
+ */
20
+ type LanguageModel = LanguageModelV1;
21
+ /**
22
+ Reason why a language model finished generating a response.
23
+
24
+ Can be one of the following:
25
+ - `stop`: model generated stop sequence
26
+ - `length`: model generated maximum number of tokens
27
+ - `content-filter`: content filter violation stopped the model
28
+ - `tool-calls`: model triggered tool calls
29
+ - `error`: model stopped because of an error
30
+ - `other`: model stopped for other reasons
31
+ */
32
+ type FinishReason = LanguageModelV1FinishReason;
33
+ /**
34
+ Log probabilities for each token and its top log probabilities.
35
+ */
36
+ type LogProbs = LanguageModelV1LogProbs;
37
+ /**
38
+ Warning from the model provider for this call. The call will proceed, but e.g.
39
+ some settings might not be supported, which can lead to suboptimal results.
40
+ */
41
+ type CallWarning = LanguageModelV1CallWarning;
42
+
43
+ /**
44
+ Embed a value using an embedding model. The type of the value is defined by the embedding model.
45
+
46
+ @param model - The embedding model to use.
47
+ @param value - The value that should be embedded.
48
+
49
+ @param maxRetries - Maximum number of retries. Set to 0 to disable retries. Default: 2.
50
+ @param abortSignal - An optional abort signal that can be used to cancel the call.
51
+
52
+ @returns A result object that contains the embedding, the value, and additional information.
53
+ */
54
+ declare function embed<VALUE>({ model, value, maxRetries, abortSignal, }: {
55
+ /**
56
+ The embedding model to use.
57
+ */
58
+ model: EmbeddingModel<VALUE>;
59
+ /**
60
+ The value that should be embedded.
61
+ */
62
+ value: VALUE;
63
+ /**
64
+ Maximum number of retries per embedding model call. Set to 0 to disable retries.
65
+
66
+ @default 2
67
+ */
68
+ maxRetries?: number;
69
+ /**
70
+ Abort signal.
71
+ */
72
+ abortSignal?: AbortSignal;
73
+ }): Promise<EmbedResult<VALUE>>;
74
+ /**
75
+ The result of a `embed` call.
76
+ It contains the embedding, the value, and additional information.
77
+ */
78
+ declare class EmbedResult<VALUE> {
79
+ /**
80
+ The value that was embedded.
81
+ */
82
+ readonly value: VALUE;
83
+ /**
84
+ The embedding of the value.
85
+ */
86
+ readonly embedding: Embedding;
87
+ /**
88
+ Optional raw response data.
89
+ */
90
+ readonly rawResponse?: {
91
+ /**
92
+ Response headers.
93
+ */
94
+ headers?: Record<string, string>;
95
+ };
96
+ constructor(options: {
97
+ value: VALUE;
98
+ embedding: Embedding;
99
+ rawResponse?: {
100
+ headers?: Record<string, string>;
101
+ };
102
+ });
103
+ }
104
+
8
105
  type TokenUsage = {
9
106
  promptTokens: number;
10
107
  completionTokens: number;
@@ -242,32 +339,6 @@ type Prompt = {
242
339
  messages?: Array<CoreMessage>;
243
340
  };
244
341
 
245
- /**
246
- Language model that is used by the AI SDK Core functions.
247
- */
248
- type LanguageModel = LanguageModelV1;
249
- /**
250
- Reason why a language model finished generating a response.
251
-
252
- Can be one of the following:
253
- - `stop`: model generated stop sequence
254
- - `length`: model generated maximum number of tokens
255
- - `content-filter`: content filter violation stopped the model
256
- - `tool-calls`: model triggered tool calls
257
- - `error`: model stopped because of an error
258
- - `other`: model stopped for other reasons
259
- */
260
- type FinishReason = LanguageModelV1FinishReason;
261
- /**
262
- Log probabilities for each token and its top log probabilities.
263
- */
264
- type LogProbs = LanguageModelV1LogProbs;
265
- /**
266
- Warning from the model provider for this call. The call will proceed, but e.g.
267
- some settings might not be supported, which can lead to suboptimal results.
268
- */
269
- type CallWarning = LanguageModelV1CallWarning;
270
-
271
342
  /**
272
343
  Generate a structured, typed object for a given prompt and schema using a language model.
273
344
 
@@ -591,6 +662,24 @@ onlyBar('bar');
591
662
  */
592
663
  type ValueOf<ObjectType, ValueType extends keyof ObjectType = keyof ObjectType> = ObjectType[ValueType];
593
664
 
665
+ /**
666
+ Typed tool call that is returned by generateText and streamText.
667
+ It contains the tool call ID, the tool name, and the tool arguments.
668
+ */
669
+ interface ToolCall$1<NAME extends string, ARGS> {
670
+ /**
671
+ ID of the tool call. This ID is used to match the tool call with the tool result.
672
+ */
673
+ toolCallId: string;
674
+ /**
675
+ Name of the tool that is being called.
676
+ */
677
+ toolName: NAME;
678
+ /**
679
+ Arguments of the tool call. This is a JSON-serializable object that matches the tool's input schema.
680
+ */
681
+ args: ARGS;
682
+ }
594
683
  type ToToolCall<TOOLS extends Record<string, CoreTool>> = ValueOf<{
595
684
  [NAME in keyof TOOLS]: {
596
685
  type: 'tool-call';
@@ -601,6 +690,28 @@ type ToToolCall<TOOLS extends Record<string, CoreTool>> = ValueOf<{
601
690
  }>;
602
691
  type ToToolCallArray<TOOLS extends Record<string, CoreTool>> = Array<ToToolCall<TOOLS>>;
603
692
 
693
+ /**
694
+ Typed tool result that is returned by generateText and streamText.
695
+ It contains the tool call ID, the tool name, the tool arguments, and the tool result.
696
+ */
697
+ interface ToolResult<NAME extends string, ARGS, RESULT> {
698
+ /**
699
+ ID of the tool call. This ID is used to match the tool call with the tool result.
700
+ */
701
+ toolCallId: string;
702
+ /**
703
+ Name of the tool that was called.
704
+ */
705
+ toolName: NAME;
706
+ /**
707
+ Arguments of the tool call. This is a JSON-serializable object that matches the tool's input schema.
708
+ */
709
+ args: ARGS;
710
+ /**
711
+ Result of the tool call. This is the result of the tool's execution.
712
+ */
713
+ result: RESULT;
714
+ }
604
715
  type ToToolsWithExecute<TOOLS extends Record<string, CoreTool>> = {
605
716
  [K in keyof TOOLS as TOOLS[K] extends {
606
717
  execute: any;
@@ -854,7 +965,7 @@ declare class StreamTextResult<TOOLS extends Record<string, CoreTool>> {
854
965
 
855
966
  @returns an `AIStream` object.
856
967
  */
857
- toAIStream(callbacks?: AIStreamCallbacksAndOptions): ReadableStream<any>;
968
+ toAIStream(callbacks?: AIStreamCallbacksAndOptions): ReadableStream<Uint8Array>;
858
969
  /**
859
970
  Writes stream data output to a Node.js response-like object.
860
971
  It sets a `Content-Type` header to `text/plain; charset=utf-8` and
@@ -902,6 +1013,16 @@ declare class StreamTextResult<TOOLS extends Record<string, CoreTool>> {
902
1013
  */
903
1014
  declare const experimental_streamText: typeof streamText;
904
1015
 
1016
+ /**
1017
+ Converts an array of messages from useChat into an array of CoreMessages that can be used
1018
+ with the AI core functions (e.g. `streamText`).
1019
+ */
1020
+ declare function convertToCoreMessages(messages: Array<{
1021
+ role: 'user' | 'assistant';
1022
+ content: string;
1023
+ toolInvocations?: Array<ToolResult<string, unknown, unknown>>;
1024
+ }>): CoreMessage[];
1025
+
905
1026
  interface FunctionCall$1 {
906
1027
  /**
907
1028
  * The arguments to call the function with, as generated by the model in JSON
@@ -970,6 +1091,12 @@ interface Function {
970
1091
  description?: string;
971
1092
  }
972
1093
  type IdGenerator = () => string;
1094
+ /**
1095
+ Tool invocations are either tool calls or tool results. For each assistant tool call,
1096
+ there is one tool invocation. While the call is in progress, the invocation is a tool call.
1097
+ Once the call is complete, the invocation is a tool result.
1098
+ */
1099
+ type ToolInvocation = ToolCall$1<string, any> | ToolResult<string, any, any>;
973
1100
  /**
974
1101
  * Shared types between the API and UI packages.
975
1102
  */
@@ -1005,6 +1132,11 @@ interface Message$1 {
1005
1132
  * Additional message-specific information added on the server via StreamData
1006
1133
  */
1007
1134
  annotations?: JSONValue[] | undefined;
1135
+ /**
1136
+ Tool invocations (that can be tool calls or tool results, depending on whether or not the invocation has finished)
1137
+ that the assistant made as part of this message.
1138
+ */
1139
+ toolInvocations?: Array<ToolInvocation>;
1008
1140
  }
1009
1141
  type CreateMessage = Omit<Message$1, 'id'> & {
1010
1142
  id?: Message$1['id'];
@@ -1210,18 +1342,20 @@ declare const assistantControlDataStreamPart: StreamPart<'5', 'assistant_control
1210
1342
  messageId: string;
1211
1343
  }>;
1212
1344
  declare const dataMessageStreamPart: StreamPart<'6', 'data_message', DataMessage>;
1213
- declare const toolCallStreamPart: StreamPart<'7', 'tool_calls', {
1345
+ declare const toolCallsStreamPart: StreamPart<'7', 'tool_calls', {
1214
1346
  tool_calls: ToolCall[];
1215
1347
  }>;
1216
1348
  declare const messageAnnotationsStreamPart: StreamPart<'8', 'message_annotations', Array<JSONValue>>;
1217
- type StreamParts = typeof textStreamPart | typeof functionCallStreamPart | typeof dataStreamPart | typeof errorStreamPart | typeof assistantMessageStreamPart | typeof assistantControlDataStreamPart | typeof dataMessageStreamPart | typeof toolCallStreamPart | typeof messageAnnotationsStreamPart;
1349
+ declare const toolCallStreamPart: StreamPart<'9', 'tool_call', ToolCall$1<string, any>>;
1350
+ declare const toolResultStreamPart: StreamPart<'a', 'tool_result', ToolResult<string, any, any>>;
1351
+ type StreamParts = typeof textStreamPart | typeof functionCallStreamPart | typeof dataStreamPart | typeof errorStreamPart | typeof assistantMessageStreamPart | typeof assistantControlDataStreamPart | typeof dataMessageStreamPart | typeof toolCallsStreamPart | typeof messageAnnotationsStreamPart | typeof toolCallStreamPart | typeof toolResultStreamPart;
1218
1352
  /**
1219
1353
  * Maps the type of a stream part to its value type.
1220
1354
  */
1221
1355
  type StreamPartValueType = {
1222
1356
  [P in StreamParts as P['name']]: ReturnType<P['parse']>['value'];
1223
1357
  };
1224
- type StreamPartType = ReturnType<typeof textStreamPart.parse> | ReturnType<typeof functionCallStreamPart.parse> | ReturnType<typeof dataStreamPart.parse> | ReturnType<typeof errorStreamPart.parse> | ReturnType<typeof assistantMessageStreamPart.parse> | ReturnType<typeof assistantControlDataStreamPart.parse> | ReturnType<typeof dataMessageStreamPart.parse> | ReturnType<typeof toolCallStreamPart.parse> | ReturnType<typeof messageAnnotationsStreamPart.parse>;
1358
+ type StreamPartType = ReturnType<typeof textStreamPart.parse> | ReturnType<typeof functionCallStreamPart.parse> | ReturnType<typeof dataStreamPart.parse> | ReturnType<typeof errorStreamPart.parse> | ReturnType<typeof assistantMessageStreamPart.parse> | ReturnType<typeof assistantControlDataStreamPart.parse> | ReturnType<typeof dataMessageStreamPart.parse> | ReturnType<typeof toolCallsStreamPart.parse> | ReturnType<typeof messageAnnotationsStreamPart.parse> | ReturnType<typeof toolCallStreamPart.parse> | ReturnType<typeof toolResultStreamPart.parse>;
1225
1359
  /**
1226
1360
  * The map of prefixes for data in the stream
1227
1361
  *
@@ -1254,6 +1388,8 @@ declare const StreamStringPrefixes: {
1254
1388
  readonly data_message: "6";
1255
1389
  readonly tool_calls: "7";
1256
1390
  readonly message_annotations: "8";
1391
+ readonly tool_call: "9";
1392
+ readonly tool_result: "a";
1257
1393
  };
1258
1394
  /**
1259
1395
  Parses a stream part from a string.
@@ -1296,7 +1432,7 @@ declare function createChunkDecoder(complex: false): (chunk: Uint8Array | undefi
1296
1432
  declare function createChunkDecoder(complex: true): (chunk: Uint8Array | undefined) => StreamPartType[];
1297
1433
  declare function createChunkDecoder(complex?: boolean): (chunk: Uint8Array | undefined) => StreamPartType[] | string;
1298
1434
 
1299
- declare const isStreamStringEqualToType: (type: keyof typeof StreamStringPrefixes, value: string) => value is `0:${string}\n` | `1:${string}\n` | `2:${string}\n` | `3:${string}\n` | `4:${string}\n` | `5:${string}\n` | `6:${string}\n` | `7:${string}\n` | `8:${string}\n`;
1435
+ declare const isStreamStringEqualToType: (type: keyof typeof StreamStringPrefixes, value: string) => value is `0:${string}\n` | `1:${string}\n` | `2:${string}\n` | `3:${string}\n` | `4:${string}\n` | `5:${string}\n` | `6:${string}\n` | `7:${string}\n` | `8:${string}\n` | `9:${string}\n` | `a:${string}\n`;
1300
1436
  type StreamString = `${(typeof StreamStringPrefixes)[keyof typeof StreamStringPrefixes]}:${string}\n`;
1301
1437
 
1302
1438
  declare interface AzureChatCompletions {
@@ -2048,4 +2184,4 @@ declare function streamToResponse(res: ReadableStream, response: ServerResponse,
2048
2184
  status?: number;
2049
2185
  }): void;
2050
2186
 
2051
- export { AIStream, AIStreamCallbacksAndOptions, AIStreamParser, AIStreamParserOptions, AWSBedrockAnthropicMessagesStream, AWSBedrockAnthropicStream, AWSBedrockCohereStream, AWSBedrockLlama2Stream, AWSBedrockStream, AnthropicStream, AssistantContent, AssistantMessage, AssistantResponse, CallWarning, ChatRequest, ChatRequestOptions, CohereStream, CompletionUsage, CoreAssistantMessage, CoreMessage, CoreSystemMessage, CoreTool, CoreToolMessage, CoreUserMessage, CreateMessage, DataContent, DataMessage, DeepPartial, ExperimentalAssistantMessage, ExperimentalMessage, ExperimentalTool, ExperimentalToolMessage, ExperimentalUserMessage, FinishReason, Function, FunctionCall$1 as FunctionCall, FunctionCallHandler, FunctionCallPayload, GenerateObjectResult, GenerateTextResult, GoogleGenerativeAIStream, HuggingFaceStream, IdGenerator, ImagePart, InkeepAIStreamCallbacksAndOptions, InkeepChatResultCallbacks, InkeepOnFinalMetadata, InkeepStream, JSONValue, langchainAdapter as LangChainAdapter, LangChainStream, LanguageModel, LogProbs, Message$1 as Message, MistralStream, ObjectStreamPart, ObjectStreamPartInput, OpenAIStream, OpenAIStreamCallbacks, ReactResponseRow, ReplicateStream, RequestOptions, StreamData, StreamObjectResult, StreamPart, StreamString, StreamTextResult, StreamingTextResponse, TextPart$1 as TextPart, TextStreamPart, Tool, ToolCall, ToolCallHandler, ToolCallPart, ToolCallPayload, ToolChoice, ToolContent, ToolResultPart, UseChatOptions, UseCompletionOptions, UserContent, convertDataContentToBase64String, convertDataContentToUint8Array, createCallbacksTransformer, createChunkDecoder, createEventStreamTransformer, createStreamDataTransformer, experimental_AssistantResponse, experimental_StreamData, experimental_StreamingReactResponse, experimental_generateObject, experimental_generateText, experimental_streamObject, experimental_streamText, formatStreamPart, generateId, generateObject, generateText, isStreamStringEqualToType, generateId as nanoid, parseStreamPart, readDataStream, readableFromAsyncIterable, streamObject, streamText, streamToResponse, tool, trimStartOfStreamHelper };
2187
+ export { AIStream, AIStreamCallbacksAndOptions, AIStreamParser, AIStreamParserOptions, AWSBedrockAnthropicMessagesStream, AWSBedrockAnthropicStream, AWSBedrockCohereStream, AWSBedrockLlama2Stream, AWSBedrockStream, AnthropicStream, AssistantContent, AssistantMessage, AssistantResponse, CallWarning, ChatRequest, ChatRequestOptions, CohereStream, CompletionUsage, CoreAssistantMessage, CoreMessage, CoreSystemMessage, CoreTool, CoreToolMessage, CoreUserMessage, CreateMessage, DataContent, DataMessage, DeepPartial, EmbedResult, Embedding, EmbeddingModel, ExperimentalAssistantMessage, ExperimentalMessage, ExperimentalTool, ExperimentalToolMessage, ExperimentalUserMessage, FinishReason, Function, FunctionCall$1 as FunctionCall, FunctionCallHandler, FunctionCallPayload, GenerateObjectResult, GenerateTextResult, GoogleGenerativeAIStream, HuggingFaceStream, IdGenerator, ImagePart, InkeepAIStreamCallbacksAndOptions, InkeepChatResultCallbacks, InkeepOnFinalMetadata, InkeepStream, JSONValue, langchainAdapter as LangChainAdapter, LangChainStream, LanguageModel, LogProbs, Message$1 as Message, MistralStream, ObjectStreamPart, ObjectStreamPartInput, OpenAIStream, OpenAIStreamCallbacks, ReactResponseRow, ReplicateStream, RequestOptions, StreamData, StreamObjectResult, StreamPart, StreamString, StreamTextResult, StreamingTextResponse, TextPart$1 as TextPart, TextStreamPart, Tool, ToolCall, ToolCallHandler, ToolCallPart, ToolCallPayload, ToolChoice, ToolContent, ToolInvocation, ToolResultPart, UseChatOptions, UseCompletionOptions, UserContent, convertDataContentToBase64String, convertDataContentToUint8Array, convertToCoreMessages, createCallbacksTransformer, createChunkDecoder, createEventStreamTransformer, createStreamDataTransformer, embed, experimental_AssistantResponse, experimental_StreamData, experimental_StreamingReactResponse, experimental_generateObject, experimental_generateText, experimental_streamObject, experimental_streamText, formatStreamPart, generateId, generateObject, generateText, isStreamStringEqualToType, generateId as nanoid, parseStreamPart, readDataStream, readableFromAsyncIterable, streamObject, streamText, streamToResponse, tool, trimStartOfStreamHelper };
package/dist/index.d.ts CHANGED
@@ -1,10 +1,107 @@
1
- import { z } from 'zod';
2
- import { LanguageModelV1, LanguageModelV1FinishReason, LanguageModelV1LogProbs, LanguageModelV1CallWarning } from '@ai-sdk/provider';
1
+ import { EmbeddingModelV1, EmbeddingModelV1Embedding, LanguageModelV1, LanguageModelV1FinishReason, LanguageModelV1LogProbs, LanguageModelV1CallWarning } from '@ai-sdk/provider';
3
2
  export { APICallError, EmptyResponseBodyError, InvalidArgumentError, InvalidDataContentError, InvalidPromptError, InvalidResponseDataError, InvalidToolArgumentsError, JSONParseError, LoadAPIKeyError, NoObjectGeneratedError, NoSuchToolError, RetryError, ToolCallParseError, TypeValidationError, UnsupportedFunctionalityError, UnsupportedJSONSchemaError } from '@ai-sdk/provider';
3
+ import { z } from 'zod';
4
4
  import { ServerResponse } from 'node:http';
5
5
  import { AssistantStream } from 'openai/lib/AssistantStream';
6
6
  import { Run } from 'openai/resources/beta/threads/runs/runs';
7
7
 
8
+ /**
9
+ Embedding model that is used by the AI SDK Core functions.
10
+ */
11
+ type EmbeddingModel<VALUE> = EmbeddingModelV1<VALUE>;
12
+ /**
13
+ Embedding.
14
+ */
15
+ type Embedding = EmbeddingModelV1Embedding;
16
+
17
+ /**
18
+ Language model that is used by the AI SDK Core functions.
19
+ */
20
+ type LanguageModel = LanguageModelV1;
21
+ /**
22
+ Reason why a language model finished generating a response.
23
+
24
+ Can be one of the following:
25
+ - `stop`: model generated stop sequence
26
+ - `length`: model generated maximum number of tokens
27
+ - `content-filter`: content filter violation stopped the model
28
+ - `tool-calls`: model triggered tool calls
29
+ - `error`: model stopped because of an error
30
+ - `other`: model stopped for other reasons
31
+ */
32
+ type FinishReason = LanguageModelV1FinishReason;
33
+ /**
34
+ Log probabilities for each token and its top log probabilities.
35
+ */
36
+ type LogProbs = LanguageModelV1LogProbs;
37
+ /**
38
+ Warning from the model provider for this call. The call will proceed, but e.g.
39
+ some settings might not be supported, which can lead to suboptimal results.
40
+ */
41
+ type CallWarning = LanguageModelV1CallWarning;
42
+
43
+ /**
44
+ Embed a value using an embedding model. The type of the value is defined by the embedding model.
45
+
46
+ @param model - The embedding model to use.
47
+ @param value - The value that should be embedded.
48
+
49
+ @param maxRetries - Maximum number of retries. Set to 0 to disable retries. Default: 2.
50
+ @param abortSignal - An optional abort signal that can be used to cancel the call.
51
+
52
+ @returns A result object that contains the embedding, the value, and additional information.
53
+ */
54
+ declare function embed<VALUE>({ model, value, maxRetries, abortSignal, }: {
55
+ /**
56
+ The embedding model to use.
57
+ */
58
+ model: EmbeddingModel<VALUE>;
59
+ /**
60
+ The value that should be embedded.
61
+ */
62
+ value: VALUE;
63
+ /**
64
+ Maximum number of retries per embedding model call. Set to 0 to disable retries.
65
+
66
+ @default 2
67
+ */
68
+ maxRetries?: number;
69
+ /**
70
+ Abort signal.
71
+ */
72
+ abortSignal?: AbortSignal;
73
+ }): Promise<EmbedResult<VALUE>>;
74
+ /**
75
+ The result of a `embed` call.
76
+ It contains the embedding, the value, and additional information.
77
+ */
78
+ declare class EmbedResult<VALUE> {
79
+ /**
80
+ The value that was embedded.
81
+ */
82
+ readonly value: VALUE;
83
+ /**
84
+ The embedding of the value.
85
+ */
86
+ readonly embedding: Embedding;
87
+ /**
88
+ Optional raw response data.
89
+ */
90
+ readonly rawResponse?: {
91
+ /**
92
+ Response headers.
93
+ */
94
+ headers?: Record<string, string>;
95
+ };
96
+ constructor(options: {
97
+ value: VALUE;
98
+ embedding: Embedding;
99
+ rawResponse?: {
100
+ headers?: Record<string, string>;
101
+ };
102
+ });
103
+ }
104
+
8
105
  type TokenUsage = {
9
106
  promptTokens: number;
10
107
  completionTokens: number;
@@ -242,32 +339,6 @@ type Prompt = {
242
339
  messages?: Array<CoreMessage>;
243
340
  };
244
341
 
245
- /**
246
- Language model that is used by the AI SDK Core functions.
247
- */
248
- type LanguageModel = LanguageModelV1;
249
- /**
250
- Reason why a language model finished generating a response.
251
-
252
- Can be one of the following:
253
- - `stop`: model generated stop sequence
254
- - `length`: model generated maximum number of tokens
255
- - `content-filter`: content filter violation stopped the model
256
- - `tool-calls`: model triggered tool calls
257
- - `error`: model stopped because of an error
258
- - `other`: model stopped for other reasons
259
- */
260
- type FinishReason = LanguageModelV1FinishReason;
261
- /**
262
- Log probabilities for each token and its top log probabilities.
263
- */
264
- type LogProbs = LanguageModelV1LogProbs;
265
- /**
266
- Warning from the model provider for this call. The call will proceed, but e.g.
267
- some settings might not be supported, which can lead to suboptimal results.
268
- */
269
- type CallWarning = LanguageModelV1CallWarning;
270
-
271
342
  /**
272
343
  Generate a structured, typed object for a given prompt and schema using a language model.
273
344
 
@@ -591,6 +662,24 @@ onlyBar('bar');
591
662
  */
592
663
  type ValueOf<ObjectType, ValueType extends keyof ObjectType = keyof ObjectType> = ObjectType[ValueType];
593
664
 
665
+ /**
666
+ Typed tool call that is returned by generateText and streamText.
667
+ It contains the tool call ID, the tool name, and the tool arguments.
668
+ */
669
+ interface ToolCall$1<NAME extends string, ARGS> {
670
+ /**
671
+ ID of the tool call. This ID is used to match the tool call with the tool result.
672
+ */
673
+ toolCallId: string;
674
+ /**
675
+ Name of the tool that is being called.
676
+ */
677
+ toolName: NAME;
678
+ /**
679
+ Arguments of the tool call. This is a JSON-serializable object that matches the tool's input schema.
680
+ */
681
+ args: ARGS;
682
+ }
594
683
  type ToToolCall<TOOLS extends Record<string, CoreTool>> = ValueOf<{
595
684
  [NAME in keyof TOOLS]: {
596
685
  type: 'tool-call';
@@ -601,6 +690,28 @@ type ToToolCall<TOOLS extends Record<string, CoreTool>> = ValueOf<{
601
690
  }>;
602
691
  type ToToolCallArray<TOOLS extends Record<string, CoreTool>> = Array<ToToolCall<TOOLS>>;
603
692
 
693
+ /**
694
+ Typed tool result that is returned by generateText and streamText.
695
+ It contains the tool call ID, the tool name, the tool arguments, and the tool result.
696
+ */
697
+ interface ToolResult<NAME extends string, ARGS, RESULT> {
698
+ /**
699
+ ID of the tool call. This ID is used to match the tool call with the tool result.
700
+ */
701
+ toolCallId: string;
702
+ /**
703
+ Name of the tool that was called.
704
+ */
705
+ toolName: NAME;
706
+ /**
707
+ Arguments of the tool call. This is a JSON-serializable object that matches the tool's input schema.
708
+ */
709
+ args: ARGS;
710
+ /**
711
+ Result of the tool call. This is the result of the tool's execution.
712
+ */
713
+ result: RESULT;
714
+ }
604
715
  type ToToolsWithExecute<TOOLS extends Record<string, CoreTool>> = {
605
716
  [K in keyof TOOLS as TOOLS[K] extends {
606
717
  execute: any;
@@ -854,7 +965,7 @@ declare class StreamTextResult<TOOLS extends Record<string, CoreTool>> {
854
965
 
855
966
  @returns an `AIStream` object.
856
967
  */
857
- toAIStream(callbacks?: AIStreamCallbacksAndOptions): ReadableStream<any>;
968
+ toAIStream(callbacks?: AIStreamCallbacksAndOptions): ReadableStream<Uint8Array>;
858
969
  /**
859
970
  Writes stream data output to a Node.js response-like object.
860
971
  It sets a `Content-Type` header to `text/plain; charset=utf-8` and
@@ -902,6 +1013,16 @@ declare class StreamTextResult<TOOLS extends Record<string, CoreTool>> {
902
1013
  */
903
1014
  declare const experimental_streamText: typeof streamText;
904
1015
 
1016
+ /**
1017
+ Converts an array of messages from useChat into an array of CoreMessages that can be used
1018
+ with the AI core functions (e.g. `streamText`).
1019
+ */
1020
+ declare function convertToCoreMessages(messages: Array<{
1021
+ role: 'user' | 'assistant';
1022
+ content: string;
1023
+ toolInvocations?: Array<ToolResult<string, unknown, unknown>>;
1024
+ }>): CoreMessage[];
1025
+
905
1026
  interface FunctionCall$1 {
906
1027
  /**
907
1028
  * The arguments to call the function with, as generated by the model in JSON
@@ -970,6 +1091,12 @@ interface Function {
970
1091
  description?: string;
971
1092
  }
972
1093
  type IdGenerator = () => string;
1094
+ /**
1095
+ Tool invocations are either tool calls or tool results. For each assistant tool call,
1096
+ there is one tool invocation. While the call is in progress, the invocation is a tool call.
1097
+ Once the call is complete, the invocation is a tool result.
1098
+ */
1099
+ type ToolInvocation = ToolCall$1<string, any> | ToolResult<string, any, any>;
973
1100
  /**
974
1101
  * Shared types between the API and UI packages.
975
1102
  */
@@ -1005,6 +1132,11 @@ interface Message$1 {
1005
1132
  * Additional message-specific information added on the server via StreamData
1006
1133
  */
1007
1134
  annotations?: JSONValue[] | undefined;
1135
+ /**
1136
+ Tool invocations (that can be tool calls or tool results, depending on whether or not the invocation has finished)
1137
+ that the assistant made as part of this message.
1138
+ */
1139
+ toolInvocations?: Array<ToolInvocation>;
1008
1140
  }
1009
1141
  type CreateMessage = Omit<Message$1, 'id'> & {
1010
1142
  id?: Message$1['id'];
@@ -1210,18 +1342,20 @@ declare const assistantControlDataStreamPart: StreamPart<'5', 'assistant_control
1210
1342
  messageId: string;
1211
1343
  }>;
1212
1344
  declare const dataMessageStreamPart: StreamPart<'6', 'data_message', DataMessage>;
1213
- declare const toolCallStreamPart: StreamPart<'7', 'tool_calls', {
1345
+ declare const toolCallsStreamPart: StreamPart<'7', 'tool_calls', {
1214
1346
  tool_calls: ToolCall[];
1215
1347
  }>;
1216
1348
  declare const messageAnnotationsStreamPart: StreamPart<'8', 'message_annotations', Array<JSONValue>>;
1217
- type StreamParts = typeof textStreamPart | typeof functionCallStreamPart | typeof dataStreamPart | typeof errorStreamPart | typeof assistantMessageStreamPart | typeof assistantControlDataStreamPart | typeof dataMessageStreamPart | typeof toolCallStreamPart | typeof messageAnnotationsStreamPart;
1349
+ declare const toolCallStreamPart: StreamPart<'9', 'tool_call', ToolCall$1<string, any>>;
1350
+ declare const toolResultStreamPart: StreamPart<'a', 'tool_result', ToolResult<string, any, any>>;
1351
+ type StreamParts = typeof textStreamPart | typeof functionCallStreamPart | typeof dataStreamPart | typeof errorStreamPart | typeof assistantMessageStreamPart | typeof assistantControlDataStreamPart | typeof dataMessageStreamPart | typeof toolCallsStreamPart | typeof messageAnnotationsStreamPart | typeof toolCallStreamPart | typeof toolResultStreamPart;
1218
1352
  /**
1219
1353
  * Maps the type of a stream part to its value type.
1220
1354
  */
1221
1355
  type StreamPartValueType = {
1222
1356
  [P in StreamParts as P['name']]: ReturnType<P['parse']>['value'];
1223
1357
  };
1224
- type StreamPartType = ReturnType<typeof textStreamPart.parse> | ReturnType<typeof functionCallStreamPart.parse> | ReturnType<typeof dataStreamPart.parse> | ReturnType<typeof errorStreamPart.parse> | ReturnType<typeof assistantMessageStreamPart.parse> | ReturnType<typeof assistantControlDataStreamPart.parse> | ReturnType<typeof dataMessageStreamPart.parse> | ReturnType<typeof toolCallStreamPart.parse> | ReturnType<typeof messageAnnotationsStreamPart.parse>;
1358
+ type StreamPartType = ReturnType<typeof textStreamPart.parse> | ReturnType<typeof functionCallStreamPart.parse> | ReturnType<typeof dataStreamPart.parse> | ReturnType<typeof errorStreamPart.parse> | ReturnType<typeof assistantMessageStreamPart.parse> | ReturnType<typeof assistantControlDataStreamPart.parse> | ReturnType<typeof dataMessageStreamPart.parse> | ReturnType<typeof toolCallsStreamPart.parse> | ReturnType<typeof messageAnnotationsStreamPart.parse> | ReturnType<typeof toolCallStreamPart.parse> | ReturnType<typeof toolResultStreamPart.parse>;
1225
1359
  /**
1226
1360
  * The map of prefixes for data in the stream
1227
1361
  *
@@ -1254,6 +1388,8 @@ declare const StreamStringPrefixes: {
1254
1388
  readonly data_message: "6";
1255
1389
  readonly tool_calls: "7";
1256
1390
  readonly message_annotations: "8";
1391
+ readonly tool_call: "9";
1392
+ readonly tool_result: "a";
1257
1393
  };
1258
1394
  /**
1259
1395
  Parses a stream part from a string.
@@ -1296,7 +1432,7 @@ declare function createChunkDecoder(complex: false): (chunk: Uint8Array | undefi
1296
1432
  declare function createChunkDecoder(complex: true): (chunk: Uint8Array | undefined) => StreamPartType[];
1297
1433
  declare function createChunkDecoder(complex?: boolean): (chunk: Uint8Array | undefined) => StreamPartType[] | string;
1298
1434
 
1299
- declare const isStreamStringEqualToType: (type: keyof typeof StreamStringPrefixes, value: string) => value is `0:${string}\n` | `1:${string}\n` | `2:${string}\n` | `3:${string}\n` | `4:${string}\n` | `5:${string}\n` | `6:${string}\n` | `7:${string}\n` | `8:${string}\n`;
1435
+ declare const isStreamStringEqualToType: (type: keyof typeof StreamStringPrefixes, value: string) => value is `0:${string}\n` | `1:${string}\n` | `2:${string}\n` | `3:${string}\n` | `4:${string}\n` | `5:${string}\n` | `6:${string}\n` | `7:${string}\n` | `8:${string}\n` | `9:${string}\n` | `a:${string}\n`;
1300
1436
  type StreamString = `${(typeof StreamStringPrefixes)[keyof typeof StreamStringPrefixes]}:${string}\n`;
1301
1437
 
1302
1438
  declare interface AzureChatCompletions {
@@ -2048,4 +2184,4 @@ declare function streamToResponse(res: ReadableStream, response: ServerResponse,
2048
2184
  status?: number;
2049
2185
  }): void;
2050
2186
 
2051
- export { AIStream, AIStreamCallbacksAndOptions, AIStreamParser, AIStreamParserOptions, AWSBedrockAnthropicMessagesStream, AWSBedrockAnthropicStream, AWSBedrockCohereStream, AWSBedrockLlama2Stream, AWSBedrockStream, AnthropicStream, AssistantContent, AssistantMessage, AssistantResponse, CallWarning, ChatRequest, ChatRequestOptions, CohereStream, CompletionUsage, CoreAssistantMessage, CoreMessage, CoreSystemMessage, CoreTool, CoreToolMessage, CoreUserMessage, CreateMessage, DataContent, DataMessage, DeepPartial, ExperimentalAssistantMessage, ExperimentalMessage, ExperimentalTool, ExperimentalToolMessage, ExperimentalUserMessage, FinishReason, Function, FunctionCall$1 as FunctionCall, FunctionCallHandler, FunctionCallPayload, GenerateObjectResult, GenerateTextResult, GoogleGenerativeAIStream, HuggingFaceStream, IdGenerator, ImagePart, InkeepAIStreamCallbacksAndOptions, InkeepChatResultCallbacks, InkeepOnFinalMetadata, InkeepStream, JSONValue, langchainAdapter as LangChainAdapter, LangChainStream, LanguageModel, LogProbs, Message$1 as Message, MistralStream, ObjectStreamPart, ObjectStreamPartInput, OpenAIStream, OpenAIStreamCallbacks, ReactResponseRow, ReplicateStream, RequestOptions, StreamData, StreamObjectResult, StreamPart, StreamString, StreamTextResult, StreamingTextResponse, TextPart$1 as TextPart, TextStreamPart, Tool, ToolCall, ToolCallHandler, ToolCallPart, ToolCallPayload, ToolChoice, ToolContent, ToolResultPart, UseChatOptions, UseCompletionOptions, UserContent, convertDataContentToBase64String, convertDataContentToUint8Array, createCallbacksTransformer, createChunkDecoder, createEventStreamTransformer, createStreamDataTransformer, experimental_AssistantResponse, experimental_StreamData, experimental_StreamingReactResponse, experimental_generateObject, experimental_generateText, experimental_streamObject, experimental_streamText, formatStreamPart, generateId, generateObject, generateText, isStreamStringEqualToType, generateId as nanoid, parseStreamPart, readDataStream, readableFromAsyncIterable, streamObject, streamText, streamToResponse, tool, trimStartOfStreamHelper };
2187
+ export { AIStream, AIStreamCallbacksAndOptions, AIStreamParser, AIStreamParserOptions, AWSBedrockAnthropicMessagesStream, AWSBedrockAnthropicStream, AWSBedrockCohereStream, AWSBedrockLlama2Stream, AWSBedrockStream, AnthropicStream, AssistantContent, AssistantMessage, AssistantResponse, CallWarning, ChatRequest, ChatRequestOptions, CohereStream, CompletionUsage, CoreAssistantMessage, CoreMessage, CoreSystemMessage, CoreTool, CoreToolMessage, CoreUserMessage, CreateMessage, DataContent, DataMessage, DeepPartial, EmbedResult, Embedding, EmbeddingModel, ExperimentalAssistantMessage, ExperimentalMessage, ExperimentalTool, ExperimentalToolMessage, ExperimentalUserMessage, FinishReason, Function, FunctionCall$1 as FunctionCall, FunctionCallHandler, FunctionCallPayload, GenerateObjectResult, GenerateTextResult, GoogleGenerativeAIStream, HuggingFaceStream, IdGenerator, ImagePart, InkeepAIStreamCallbacksAndOptions, InkeepChatResultCallbacks, InkeepOnFinalMetadata, InkeepStream, JSONValue, langchainAdapter as LangChainAdapter, LangChainStream, LanguageModel, LogProbs, Message$1 as Message, MistralStream, ObjectStreamPart, ObjectStreamPartInput, OpenAIStream, OpenAIStreamCallbacks, ReactResponseRow, ReplicateStream, RequestOptions, StreamData, StreamObjectResult, StreamPart, StreamString, StreamTextResult, StreamingTextResponse, TextPart$1 as TextPart, TextStreamPart, Tool, ToolCall, ToolCallHandler, ToolCallPart, ToolCallPayload, ToolChoice, ToolContent, ToolInvocation, ToolResultPart, UseChatOptions, UseCompletionOptions, UserContent, convertDataContentToBase64String, convertDataContentToUint8Array, convertToCoreMessages, createCallbacksTransformer, createChunkDecoder, createEventStreamTransformer, createStreamDataTransformer, embed, experimental_AssistantResponse, experimental_StreamData, experimental_StreamingReactResponse, experimental_generateObject, experimental_generateText, experimental_streamObject, experimental_streamText, formatStreamPart, generateId, generateObject, generateText, isStreamStringEqualToType, generateId as nanoid, parseStreamPart, readDataStream, readableFromAsyncIterable, streamObject, streamText, streamToResponse, tool, trimStartOfStreamHelper };