ai 3.2.18 → 3.2.20

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.d.mts CHANGED
@@ -4,10 +4,38 @@ import { EmbeddingModelV1, EmbeddingModelV1Embedding, LanguageModelV1, LanguageM
4
4
  export { APICallError, EmptyResponseBodyError, InvalidArgumentError, InvalidDataContentError, InvalidPromptError, InvalidResponseDataError, InvalidToolArgumentsError, JSONParseError, LoadAPIKeyError, NoObjectGeneratedError, NoSuchToolError, RetryError, ToolCallParseError, TypeValidationError, UnsupportedFunctionalityError, UnsupportedJSONSchemaError } from '@ai-sdk/provider';
5
5
  import { z } from 'zod';
6
6
  import { ServerResponse } from 'http';
7
+ import { AttributeValue, Span } from '@opentelemetry/api';
7
8
  import { ServerResponse as ServerResponse$1 } from 'node:http';
8
9
  import { AssistantStream } from 'openai/lib/AssistantStream';
9
10
  import { Run } from 'openai/resources/beta/threads/runs/runs';
10
11
 
12
+ /**
13
+ Represents the number of tokens used in a prompt and completion.
14
+ */
15
+ type CompletionTokenUsage$1 = {
16
+ /**
17
+ The number of tokens used in the prompt
18
+ */
19
+ promptTokens: number;
20
+ /**
21
+ The number of tokens used in the completion.
22
+ */
23
+ completionTokens: number;
24
+ /**
25
+ The total number of tokens used (promptTokens + completionTokens).
26
+ */
27
+ totalTokens: number;
28
+ };
29
+ /**
30
+ Represents the number of tokens used in an embedding.
31
+ */
32
+ type EmbeddingTokenUsage = {
33
+ /**
34
+ The number of tokens used in the embedding.
35
+ */
36
+ tokens: number;
37
+ };
38
+
11
39
  /**
12
40
  Embedding model that is used by the AI SDK Core functions.
13
41
  */
@@ -55,6 +83,12 @@ type CoreToolChoice<TOOLS extends Record<string, unknown>> = 'auto' | 'none' | '
55
83
  toolName: keyof TOOLS;
56
84
  };
57
85
 
86
+ /**
87
+ * @deprecated Use CompletionTokenUsage instead.
88
+ */
89
+ type TokenUsage = CompletionTokenUsage$1;
90
+ type CompletionTokenUsage = CompletionTokenUsage$1;
91
+
58
92
  /**
59
93
  Embed a value using an embedding model. The type of the value is defined by the embedding model.
60
94
 
@@ -106,6 +140,10 @@ declare class EmbedResult<VALUE> {
106
140
  */
107
141
  readonly embedding: Embedding;
108
142
  /**
143
+ The embedding token usage.
144
+ */
145
+ readonly usage: EmbeddingTokenUsage;
146
+ /**
109
147
  Optional raw response data.
110
148
  */
111
149
  readonly rawResponse?: {
@@ -117,6 +155,7 @@ declare class EmbedResult<VALUE> {
117
155
  constructor(options: {
118
156
  value: VALUE;
119
157
  embedding: Embedding;
158
+ usage: EmbeddingTokenUsage;
120
159
  rawResponse?: {
121
160
  headers?: Record<string, string>;
122
161
  };
@@ -177,30 +216,17 @@ declare class EmbedManyResult<VALUE> {
177
216
  The embeddings. They are in the same order as the values.
178
217
  */
179
218
  readonly embeddings: Array<Embedding>;
219
+ /**
220
+ The embedding token usage.
221
+ */
222
+ readonly usage: EmbeddingTokenUsage;
180
223
  constructor(options: {
181
224
  values: Array<VALUE>;
182
225
  embeddings: Array<Embedding>;
226
+ usage: EmbeddingTokenUsage;
183
227
  });
184
228
  }
185
229
 
186
- /**
187
- Represents the number of tokens used in a prompt and completion.
188
- */
189
- type TokenUsage = {
190
- /**
191
- The number of tokens used in the prompt
192
- */
193
- promptTokens: number;
194
- /**
195
- The number of tokens used in the completion.
196
- */
197
- completionTokens: number;
198
- /**
199
- The total number of tokens used (promptTokens + completionTokens).
200
- */
201
- totalTokens: number;
202
- };
203
-
204
230
  type CallSettings = {
205
231
  /**
206
232
  Maximum number of tokens to generate.
@@ -514,7 +540,7 @@ declare class GenerateObjectResult<T> {
514
540
  /**
515
541
  The token usage of the generated text.
516
542
  */
517
- readonly usage: TokenUsage;
543
+ readonly usage: CompletionTokenUsage$1;
518
544
  /**
519
545
  Warnings from the model provider (e.g. unsupported settings)
520
546
  */
@@ -536,7 +562,7 @@ declare class GenerateObjectResult<T> {
536
562
  constructor(options: {
537
563
  object: T;
538
564
  finishReason: FinishReason;
539
- usage: TokenUsage;
565
+ usage: CompletionTokenUsage$1;
540
566
  warnings: CallWarning[] | undefined;
541
567
  rawResponse?: {
542
568
  headers?: Record<string, string>;
@@ -624,7 +650,7 @@ Callback that is called when the LLM response and the final object validation ar
624
650
  /**
625
651
  The token usage of the generated response.
626
652
  */
627
- usage: TokenUsage;
653
+ usage: CompletionTokenUsage$1;
628
654
  /**
629
655
  The generated object (typed according to the schema). Can be undefined if the final object does not match the schema.
630
656
  */
@@ -684,7 +710,7 @@ declare class StreamObjectResult<T> {
684
710
  /**
685
711
  The token usage of the generated response. Resolved when the response is finished.
686
712
  */
687
- readonly usage: Promise<TokenUsage>;
713
+ readonly usage: Promise<CompletionTokenUsage$1>;
688
714
  /**
689
715
  Optional raw response data.
690
716
  */
@@ -896,6 +922,24 @@ declare class InvalidMessageRoleError extends Error {
896
922
  };
897
923
  }
898
924
 
925
+ /**
926
+ * Telemetry configuration.
927
+ */
928
+ type TelemetrySettings = {
929
+ /**
930
+ * Enable or disable telemetry. Disabled by default while experimental.
931
+ */
932
+ isEnabled?: boolean;
933
+ /**
934
+ * Identifier for this function. Used to group telemetry data by function.
935
+ */
936
+ functionId?: string;
937
+ /**
938
+ * Additional information to include in the telemetry data.
939
+ */
940
+ metadata?: Record<string, AttributeValue>;
941
+ };
942
+
899
943
  type ToToolCall<TOOLS extends Record<string, CoreTool>> = ValueOf<{
900
944
  [NAME in keyof TOOLS]: {
901
945
  type: 'tool-call';
@@ -945,7 +989,7 @@ If set and supported by the model, calls will generate deterministic results.
945
989
  @returns
946
990
  A result object that contains the generated text, the results of the tool calls, and additional information.
947
991
  */
948
- declare function generateText<TOOLS extends Record<string, CoreTool>>({ model, tools, toolChoice, system, prompt, messages, maxRetries, abortSignal, headers, maxAutomaticRoundtrips, maxToolRoundtrips, ...settings }: CallSettings & Prompt & {
992
+ declare function generateText<TOOLS extends Record<string, CoreTool>>({ model, tools, toolChoice, system, prompt, messages, maxRetries, abortSignal, headers, maxAutomaticRoundtrips, maxToolRoundtrips, experimental_telemetry: telemetry, ...settings }: CallSettings & Prompt & {
949
993
  /**
950
994
  The language model to use.
951
995
  */
@@ -975,6 +1019,10 @@ case of misconfigured tools.
975
1019
  By default, it's set to 0, which will disable the feature.
976
1020
  */
977
1021
  maxToolRoundtrips?: number;
1022
+ /**
1023
+ * Optional telemetry configuration (experimental).
1024
+ */
1025
+ experimental_telemetry?: TelemetrySettings;
978
1026
  }): Promise<GenerateTextResult<TOOLS>>;
979
1027
  /**
980
1028
  The result of a `generateText` call.
@@ -1000,7 +1048,7 @@ declare class GenerateTextResult<TOOLS extends Record<string, CoreTool>> {
1000
1048
  /**
1001
1049
  The token usage of the generated text.
1002
1050
  */
1003
- readonly usage: TokenUsage;
1051
+ readonly usage: CompletionTokenUsage$1;
1004
1052
  /**
1005
1053
  Warnings from the model provider (e.g. unsupported settings)
1006
1054
  */
@@ -1032,7 +1080,7 @@ declare class GenerateTextResult<TOOLS extends Record<string, CoreTool>> {
1032
1080
  toolCalls: ToToolCallArray<TOOLS>;
1033
1081
  toolResults: ToToolResultArray<TOOLS>;
1034
1082
  finishReason: FinishReason;
1035
- usage: TokenUsage;
1083
+ usage: CompletionTokenUsage$1;
1036
1084
  warnings: CallWarning[] | undefined;
1037
1085
  rawResponse?: {
1038
1086
  headers?: Record<string, string>;
@@ -1084,7 +1132,7 @@ If set and supported by the model, calls will generate deterministic results.
1084
1132
  @return
1085
1133
  A result object for accessing different stream types and additional information.
1086
1134
  */
1087
- declare function streamText<TOOLS extends Record<string, CoreTool>>({ model, tools, toolChoice, system, prompt, messages, maxRetries, abortSignal, headers, onFinish, ...settings }: CallSettings & Prompt & {
1135
+ declare function streamText<TOOLS extends Record<string, CoreTool>>({ model, tools, toolChoice, system, prompt, messages, maxRetries, abortSignal, headers, experimental_telemetry: telemetry, onFinish, ...settings }: CallSettings & Prompt & {
1088
1136
  /**
1089
1137
  The language model to use.
1090
1138
  */
@@ -1098,6 +1146,10 @@ The tool choice strategy. Default: 'auto'.
1098
1146
  */
1099
1147
  toolChoice?: CoreToolChoice<TOOLS>;
1100
1148
  /**
1149
+ * Optional telemetry configuration (experimental).
1150
+ */
1151
+ experimental_telemetry?: TelemetrySettings;
1152
+ /**
1101
1153
  Callback that is called when the LLM response and all request tool executions
1102
1154
  (for tools that have an `execute` function) are finished.
1103
1155
  */
@@ -1109,7 +1161,7 @@ Callback that is called when the LLM response and all request tool executions
1109
1161
  /**
1110
1162
  The token usage of the generated response.
1111
1163
  */
1112
- usage: TokenUsage;
1164
+ usage: CompletionTokenUsage$1;
1113
1165
  /**
1114
1166
  The full text that has been generated.
1115
1167
  */
@@ -1170,7 +1222,7 @@ declare class StreamTextResult<TOOLS extends Record<string, CoreTool>> {
1170
1222
  /**
1171
1223
  The token usage of the generated response. Resolved when the response is finished.
1172
1224
  */
1173
- readonly usage: Promise<TokenUsage>;
1225
+ readonly usage: Promise<CompletionTokenUsage$1>;
1174
1226
  /**
1175
1227
  The reason why the generation finished. Resolved when the response is finished.
1176
1228
  */
@@ -1196,13 +1248,15 @@ declare class StreamTextResult<TOOLS extends Record<string, CoreTool>> {
1196
1248
  */
1197
1249
  headers?: Record<string, string>;
1198
1250
  };
1199
- constructor({ stream, warnings, rawResponse, onFinish, }: {
1251
+ constructor({ stream, warnings, rawResponse, onFinish, rootSpan, doStreamSpan, }: {
1200
1252
  stream: ReadableStream<TextStreamPart<TOOLS>>;
1201
1253
  warnings: CallWarning[] | undefined;
1202
1254
  rawResponse?: {
1203
1255
  headers?: Record<string, string>;
1204
1256
  };
1205
1257
  onFinish?: Parameters<typeof streamText>[0]['onFinish'];
1258
+ rootSpan: Span;
1259
+ doStreamSpan: Span;
1206
1260
  });
1207
1261
  /**
1208
1262
  Split out a new stream from the original stream.
@@ -2130,4 +2184,4 @@ declare const generateId: (size?: number | undefined) => string;
2130
2184
  */
2131
2185
  declare const nanoid: (size?: number | undefined) => string;
2132
2186
 
2133
- export { AIStream, AIStreamCallbacksAndOptions, AIStreamParser, AIStreamParserOptions, AWSBedrockAnthropicMessagesStream, AWSBedrockAnthropicStream, AWSBedrockCohereStream, AWSBedrockLlama2Stream, AWSBedrockStream, AnthropicStream, AssistantContent, AssistantResponse, CallWarning, CohereStream, CompletionUsage, CoreAssistantMessage, CoreMessage, CoreSystemMessage, CoreTool, CoreToolChoice, CoreToolMessage, CoreUserMessage, DataContent, EmbedManyResult, EmbedResult, Embedding, EmbeddingModel, ExperimentalAssistantMessage, ExperimentalMessage, ExperimentalTool, ExperimentalToolMessage, ExperimentalUserMessage, FinishReason, FunctionCallPayload, GenerateObjectResult, GenerateTextResult, GoogleGenerativeAIStream, HuggingFaceStream, ImagePart, InkeepAIStreamCallbacksAndOptions, InkeepChatResultCallbacks, InkeepOnFinalMetadata, InkeepStream, InvalidMessageRoleError, InvalidModelIdError, langchainAdapter as LangChainAdapter, LangChainStream, LanguageModel, LogProbs, MistralStream, NoSuchModelError, NoSuchProviderError, ObjectStreamInputPart, ObjectStreamPart, OpenAIStream, OpenAIStreamCallbacks, ReplicateStream, StreamData, StreamObjectResult, StreamTextResult, StreamingTextResponse, TextPart$1 as TextPart, TextStreamPart, TokenUsage, ToolCallPart, ToolCallPayload, ToolContent, ToolResultPart, UserContent, convertDataContentToBase64String, convertDataContentToUint8Array, convertToCoreMessages, cosineSimilarity, createCallbacksTransformer, createEventStreamTransformer, createStreamDataTransformer, embed, embedMany, experimental_AssistantResponse, experimental_ModelRegistry, experimental_ProviderRegistry, experimental_StreamData, experimental_createModelRegistry, experimental_createProviderRegistry, experimental_generateObject, experimental_generateText, experimental_streamObject, experimental_streamText, generateId, generateObject, generateText, nanoid, readableFromAsyncIterable, streamObject, streamText, streamToResponse, tool, trimStartOfStreamHelper };
2187
+ export { AIStream, AIStreamCallbacksAndOptions, AIStreamParser, AIStreamParserOptions, AWSBedrockAnthropicMessagesStream, AWSBedrockAnthropicStream, AWSBedrockCohereStream, AWSBedrockLlama2Stream, AWSBedrockStream, AnthropicStream, AssistantContent, AssistantResponse, CallWarning, CohereStream, CompletionTokenUsage, CompletionUsage, CoreAssistantMessage, CoreMessage, CoreSystemMessage, CoreTool, CoreToolChoice, CoreToolMessage, CoreUserMessage, DataContent, EmbedManyResult, EmbedResult, Embedding, EmbeddingModel, EmbeddingTokenUsage, ExperimentalAssistantMessage, ExperimentalMessage, ExperimentalTool, ExperimentalToolMessage, ExperimentalUserMessage, FinishReason, FunctionCallPayload, GenerateObjectResult, GenerateTextResult, GoogleGenerativeAIStream, HuggingFaceStream, ImagePart, InkeepAIStreamCallbacksAndOptions, InkeepChatResultCallbacks, InkeepOnFinalMetadata, InkeepStream, InvalidMessageRoleError, InvalidModelIdError, langchainAdapter as LangChainAdapter, LangChainStream, LanguageModel, LogProbs, MistralStream, NoSuchModelError, NoSuchProviderError, ObjectStreamInputPart, ObjectStreamPart, OpenAIStream, OpenAIStreamCallbacks, ReplicateStream, StreamData, StreamObjectResult, StreamTextResult, StreamingTextResponse, TextPart$1 as TextPart, TextStreamPart, TokenUsage, ToolCallPart, ToolCallPayload, ToolContent, ToolResultPart, UserContent, convertDataContentToBase64String, convertDataContentToUint8Array, convertToCoreMessages, cosineSimilarity, createCallbacksTransformer, createEventStreamTransformer, createStreamDataTransformer, embed, embedMany, experimental_AssistantResponse, experimental_ModelRegistry, experimental_ProviderRegistry, experimental_StreamData, experimental_createModelRegistry, experimental_createProviderRegistry, experimental_generateObject, experimental_generateText, experimental_streamObject, experimental_streamText, generateId, generateObject, generateText, nanoid, readableFromAsyncIterable, streamObject, streamText, streamToResponse, tool, trimStartOfStreamHelper };
package/dist/index.d.ts CHANGED
@@ -4,10 +4,38 @@ import { EmbeddingModelV1, EmbeddingModelV1Embedding, LanguageModelV1, LanguageM
4
4
  export { APICallError, EmptyResponseBodyError, InvalidArgumentError, InvalidDataContentError, InvalidPromptError, InvalidResponseDataError, InvalidToolArgumentsError, JSONParseError, LoadAPIKeyError, NoObjectGeneratedError, NoSuchToolError, RetryError, ToolCallParseError, TypeValidationError, UnsupportedFunctionalityError, UnsupportedJSONSchemaError } from '@ai-sdk/provider';
5
5
  import { z } from 'zod';
6
6
  import { ServerResponse } from 'http';
7
+ import { AttributeValue, Span } from '@opentelemetry/api';
7
8
  import { ServerResponse as ServerResponse$1 } from 'node:http';
8
9
  import { AssistantStream } from 'openai/lib/AssistantStream';
9
10
  import { Run } from 'openai/resources/beta/threads/runs/runs';
10
11
 
12
+ /**
13
+ Represents the number of tokens used in a prompt and completion.
14
+ */
15
+ type CompletionTokenUsage$1 = {
16
+ /**
17
+ The number of tokens used in the prompt
18
+ */
19
+ promptTokens: number;
20
+ /**
21
+ The number of tokens used in the completion.
22
+ */
23
+ completionTokens: number;
24
+ /**
25
+ The total number of tokens used (promptTokens + completionTokens).
26
+ */
27
+ totalTokens: number;
28
+ };
29
+ /**
30
+ Represents the number of tokens used in an embedding.
31
+ */
32
+ type EmbeddingTokenUsage = {
33
+ /**
34
+ The number of tokens used in the embedding.
35
+ */
36
+ tokens: number;
37
+ };
38
+
11
39
  /**
12
40
  Embedding model that is used by the AI SDK Core functions.
13
41
  */
@@ -55,6 +83,12 @@ type CoreToolChoice<TOOLS extends Record<string, unknown>> = 'auto' | 'none' | '
55
83
  toolName: keyof TOOLS;
56
84
  };
57
85
 
86
+ /**
87
+ * @deprecated Use CompletionTokenUsage instead.
88
+ */
89
+ type TokenUsage = CompletionTokenUsage$1;
90
+ type CompletionTokenUsage = CompletionTokenUsage$1;
91
+
58
92
  /**
59
93
  Embed a value using an embedding model. The type of the value is defined by the embedding model.
60
94
 
@@ -106,6 +140,10 @@ declare class EmbedResult<VALUE> {
106
140
  */
107
141
  readonly embedding: Embedding;
108
142
  /**
143
+ The embedding token usage.
144
+ */
145
+ readonly usage: EmbeddingTokenUsage;
146
+ /**
109
147
  Optional raw response data.
110
148
  */
111
149
  readonly rawResponse?: {
@@ -117,6 +155,7 @@ declare class EmbedResult<VALUE> {
117
155
  constructor(options: {
118
156
  value: VALUE;
119
157
  embedding: Embedding;
158
+ usage: EmbeddingTokenUsage;
120
159
  rawResponse?: {
121
160
  headers?: Record<string, string>;
122
161
  };
@@ -177,30 +216,17 @@ declare class EmbedManyResult<VALUE> {
177
216
  The embeddings. They are in the same order as the values.
178
217
  */
179
218
  readonly embeddings: Array<Embedding>;
219
+ /**
220
+ The embedding token usage.
221
+ */
222
+ readonly usage: EmbeddingTokenUsage;
180
223
  constructor(options: {
181
224
  values: Array<VALUE>;
182
225
  embeddings: Array<Embedding>;
226
+ usage: EmbeddingTokenUsage;
183
227
  });
184
228
  }
185
229
 
186
- /**
187
- Represents the number of tokens used in a prompt and completion.
188
- */
189
- type TokenUsage = {
190
- /**
191
- The number of tokens used in the prompt
192
- */
193
- promptTokens: number;
194
- /**
195
- The number of tokens used in the completion.
196
- */
197
- completionTokens: number;
198
- /**
199
- The total number of tokens used (promptTokens + completionTokens).
200
- */
201
- totalTokens: number;
202
- };
203
-
204
230
  type CallSettings = {
205
231
  /**
206
232
  Maximum number of tokens to generate.
@@ -514,7 +540,7 @@ declare class GenerateObjectResult<T> {
514
540
  /**
515
541
  The token usage of the generated text.
516
542
  */
517
- readonly usage: TokenUsage;
543
+ readonly usage: CompletionTokenUsage$1;
518
544
  /**
519
545
  Warnings from the model provider (e.g. unsupported settings)
520
546
  */
@@ -536,7 +562,7 @@ declare class GenerateObjectResult<T> {
536
562
  constructor(options: {
537
563
  object: T;
538
564
  finishReason: FinishReason;
539
- usage: TokenUsage;
565
+ usage: CompletionTokenUsage$1;
540
566
  warnings: CallWarning[] | undefined;
541
567
  rawResponse?: {
542
568
  headers?: Record<string, string>;
@@ -624,7 +650,7 @@ Callback that is called when the LLM response and the final object validation ar
624
650
  /**
625
651
  The token usage of the generated response.
626
652
  */
627
- usage: TokenUsage;
653
+ usage: CompletionTokenUsage$1;
628
654
  /**
629
655
  The generated object (typed according to the schema). Can be undefined if the final object does not match the schema.
630
656
  */
@@ -684,7 +710,7 @@ declare class StreamObjectResult<T> {
684
710
  /**
685
711
  The token usage of the generated response. Resolved when the response is finished.
686
712
  */
687
- readonly usage: Promise<TokenUsage>;
713
+ readonly usage: Promise<CompletionTokenUsage$1>;
688
714
  /**
689
715
  Optional raw response data.
690
716
  */
@@ -896,6 +922,24 @@ declare class InvalidMessageRoleError extends Error {
896
922
  };
897
923
  }
898
924
 
925
+ /**
926
+ * Telemetry configuration.
927
+ */
928
+ type TelemetrySettings = {
929
+ /**
930
+ * Enable or disable telemetry. Disabled by default while experimental.
931
+ */
932
+ isEnabled?: boolean;
933
+ /**
934
+ * Identifier for this function. Used to group telemetry data by function.
935
+ */
936
+ functionId?: string;
937
+ /**
938
+ * Additional information to include in the telemetry data.
939
+ */
940
+ metadata?: Record<string, AttributeValue>;
941
+ };
942
+
899
943
  type ToToolCall<TOOLS extends Record<string, CoreTool>> = ValueOf<{
900
944
  [NAME in keyof TOOLS]: {
901
945
  type: 'tool-call';
@@ -945,7 +989,7 @@ If set and supported by the model, calls will generate deterministic results.
945
989
  @returns
946
990
  A result object that contains the generated text, the results of the tool calls, and additional information.
947
991
  */
948
- declare function generateText<TOOLS extends Record<string, CoreTool>>({ model, tools, toolChoice, system, prompt, messages, maxRetries, abortSignal, headers, maxAutomaticRoundtrips, maxToolRoundtrips, ...settings }: CallSettings & Prompt & {
992
+ declare function generateText<TOOLS extends Record<string, CoreTool>>({ model, tools, toolChoice, system, prompt, messages, maxRetries, abortSignal, headers, maxAutomaticRoundtrips, maxToolRoundtrips, experimental_telemetry: telemetry, ...settings }: CallSettings & Prompt & {
949
993
  /**
950
994
  The language model to use.
951
995
  */
@@ -975,6 +1019,10 @@ case of misconfigured tools.
975
1019
  By default, it's set to 0, which will disable the feature.
976
1020
  */
977
1021
  maxToolRoundtrips?: number;
1022
+ /**
1023
+ * Optional telemetry configuration (experimental).
1024
+ */
1025
+ experimental_telemetry?: TelemetrySettings;
978
1026
  }): Promise<GenerateTextResult<TOOLS>>;
979
1027
  /**
980
1028
  The result of a `generateText` call.
@@ -1000,7 +1048,7 @@ declare class GenerateTextResult<TOOLS extends Record<string, CoreTool>> {
1000
1048
  /**
1001
1049
  The token usage of the generated text.
1002
1050
  */
1003
- readonly usage: TokenUsage;
1051
+ readonly usage: CompletionTokenUsage$1;
1004
1052
  /**
1005
1053
  Warnings from the model provider (e.g. unsupported settings)
1006
1054
  */
@@ -1032,7 +1080,7 @@ declare class GenerateTextResult<TOOLS extends Record<string, CoreTool>> {
1032
1080
  toolCalls: ToToolCallArray<TOOLS>;
1033
1081
  toolResults: ToToolResultArray<TOOLS>;
1034
1082
  finishReason: FinishReason;
1035
- usage: TokenUsage;
1083
+ usage: CompletionTokenUsage$1;
1036
1084
  warnings: CallWarning[] | undefined;
1037
1085
  rawResponse?: {
1038
1086
  headers?: Record<string, string>;
@@ -1084,7 +1132,7 @@ If set and supported by the model, calls will generate deterministic results.
1084
1132
  @return
1085
1133
  A result object for accessing different stream types and additional information.
1086
1134
  */
1087
- declare function streamText<TOOLS extends Record<string, CoreTool>>({ model, tools, toolChoice, system, prompt, messages, maxRetries, abortSignal, headers, onFinish, ...settings }: CallSettings & Prompt & {
1135
+ declare function streamText<TOOLS extends Record<string, CoreTool>>({ model, tools, toolChoice, system, prompt, messages, maxRetries, abortSignal, headers, experimental_telemetry: telemetry, onFinish, ...settings }: CallSettings & Prompt & {
1088
1136
  /**
1089
1137
  The language model to use.
1090
1138
  */
@@ -1098,6 +1146,10 @@ The tool choice strategy. Default: 'auto'.
1098
1146
  */
1099
1147
  toolChoice?: CoreToolChoice<TOOLS>;
1100
1148
  /**
1149
+ * Optional telemetry configuration (experimental).
1150
+ */
1151
+ experimental_telemetry?: TelemetrySettings;
1152
+ /**
1101
1153
  Callback that is called when the LLM response and all request tool executions
1102
1154
  (for tools that have an `execute` function) are finished.
1103
1155
  */
@@ -1109,7 +1161,7 @@ Callback that is called when the LLM response and all request tool executions
1109
1161
  /**
1110
1162
  The token usage of the generated response.
1111
1163
  */
1112
- usage: TokenUsage;
1164
+ usage: CompletionTokenUsage$1;
1113
1165
  /**
1114
1166
  The full text that has been generated.
1115
1167
  */
@@ -1170,7 +1222,7 @@ declare class StreamTextResult<TOOLS extends Record<string, CoreTool>> {
1170
1222
  /**
1171
1223
  The token usage of the generated response. Resolved when the response is finished.
1172
1224
  */
1173
- readonly usage: Promise<TokenUsage>;
1225
+ readonly usage: Promise<CompletionTokenUsage$1>;
1174
1226
  /**
1175
1227
  The reason why the generation finished. Resolved when the response is finished.
1176
1228
  */
@@ -1196,13 +1248,15 @@ declare class StreamTextResult<TOOLS extends Record<string, CoreTool>> {
1196
1248
  */
1197
1249
  headers?: Record<string, string>;
1198
1250
  };
1199
- constructor({ stream, warnings, rawResponse, onFinish, }: {
1251
+ constructor({ stream, warnings, rawResponse, onFinish, rootSpan, doStreamSpan, }: {
1200
1252
  stream: ReadableStream<TextStreamPart<TOOLS>>;
1201
1253
  warnings: CallWarning[] | undefined;
1202
1254
  rawResponse?: {
1203
1255
  headers?: Record<string, string>;
1204
1256
  };
1205
1257
  onFinish?: Parameters<typeof streamText>[0]['onFinish'];
1258
+ rootSpan: Span;
1259
+ doStreamSpan: Span;
1206
1260
  });
1207
1261
  /**
1208
1262
  Split out a new stream from the original stream.
@@ -2130,4 +2184,4 @@ declare const generateId: (size?: number | undefined) => string;
2130
2184
  */
2131
2185
  declare const nanoid: (size?: number | undefined) => string;
2132
2186
 
2133
- export { AIStream, AIStreamCallbacksAndOptions, AIStreamParser, AIStreamParserOptions, AWSBedrockAnthropicMessagesStream, AWSBedrockAnthropicStream, AWSBedrockCohereStream, AWSBedrockLlama2Stream, AWSBedrockStream, AnthropicStream, AssistantContent, AssistantResponse, CallWarning, CohereStream, CompletionUsage, CoreAssistantMessage, CoreMessage, CoreSystemMessage, CoreTool, CoreToolChoice, CoreToolMessage, CoreUserMessage, DataContent, EmbedManyResult, EmbedResult, Embedding, EmbeddingModel, ExperimentalAssistantMessage, ExperimentalMessage, ExperimentalTool, ExperimentalToolMessage, ExperimentalUserMessage, FinishReason, FunctionCallPayload, GenerateObjectResult, GenerateTextResult, GoogleGenerativeAIStream, HuggingFaceStream, ImagePart, InkeepAIStreamCallbacksAndOptions, InkeepChatResultCallbacks, InkeepOnFinalMetadata, InkeepStream, InvalidMessageRoleError, InvalidModelIdError, langchainAdapter as LangChainAdapter, LangChainStream, LanguageModel, LogProbs, MistralStream, NoSuchModelError, NoSuchProviderError, ObjectStreamInputPart, ObjectStreamPart, OpenAIStream, OpenAIStreamCallbacks, ReplicateStream, StreamData, StreamObjectResult, StreamTextResult, StreamingTextResponse, TextPart$1 as TextPart, TextStreamPart, TokenUsage, ToolCallPart, ToolCallPayload, ToolContent, ToolResultPart, UserContent, convertDataContentToBase64String, convertDataContentToUint8Array, convertToCoreMessages, cosineSimilarity, createCallbacksTransformer, createEventStreamTransformer, createStreamDataTransformer, embed, embedMany, experimental_AssistantResponse, experimental_ModelRegistry, experimental_ProviderRegistry, experimental_StreamData, experimental_createModelRegistry, experimental_createProviderRegistry, experimental_generateObject, experimental_generateText, experimental_streamObject, experimental_streamText, generateId, generateObject, generateText, nanoid, readableFromAsyncIterable, streamObject, streamText, streamToResponse, tool, trimStartOfStreamHelper };
2187
+ export { AIStream, AIStreamCallbacksAndOptions, AIStreamParser, AIStreamParserOptions, AWSBedrockAnthropicMessagesStream, AWSBedrockAnthropicStream, AWSBedrockCohereStream, AWSBedrockLlama2Stream, AWSBedrockStream, AnthropicStream, AssistantContent, AssistantResponse, CallWarning, CohereStream, CompletionTokenUsage, CompletionUsage, CoreAssistantMessage, CoreMessage, CoreSystemMessage, CoreTool, CoreToolChoice, CoreToolMessage, CoreUserMessage, DataContent, EmbedManyResult, EmbedResult, Embedding, EmbeddingModel, EmbeddingTokenUsage, ExperimentalAssistantMessage, ExperimentalMessage, ExperimentalTool, ExperimentalToolMessage, ExperimentalUserMessage, FinishReason, FunctionCallPayload, GenerateObjectResult, GenerateTextResult, GoogleGenerativeAIStream, HuggingFaceStream, ImagePart, InkeepAIStreamCallbacksAndOptions, InkeepChatResultCallbacks, InkeepOnFinalMetadata, InkeepStream, InvalidMessageRoleError, InvalidModelIdError, langchainAdapter as LangChainAdapter, LangChainStream, LanguageModel, LogProbs, MistralStream, NoSuchModelError, NoSuchProviderError, ObjectStreamInputPart, ObjectStreamPart, OpenAIStream, OpenAIStreamCallbacks, ReplicateStream, StreamData, StreamObjectResult, StreamTextResult, StreamingTextResponse, TextPart$1 as TextPart, TextStreamPart, TokenUsage, ToolCallPart, ToolCallPayload, ToolContent, ToolResultPart, UserContent, convertDataContentToBase64String, convertDataContentToUint8Array, convertToCoreMessages, cosineSimilarity, createCallbacksTransformer, createEventStreamTransformer, createStreamDataTransformer, embed, embedMany, experimental_AssistantResponse, experimental_ModelRegistry, experimental_ProviderRegistry, experimental_StreamData, experimental_createModelRegistry, experimental_createProviderRegistry, experimental_generateObject, experimental_generateText, experimental_streamObject, experimental_streamText, generateId, generateObject, generateText, nanoid, readableFromAsyncIterable, streamObject, streamText, streamToResponse, tool, trimStartOfStreamHelper };