ai 6.0.0-beta.163 → 6.0.0-beta.165

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -1,5 +1,25 @@
1
1
  # ai
2
2
 
3
+ ## 6.0.0-beta.165
4
+
5
+ ### Patch Changes
6
+
7
+ - 2625a04: feat(openai); update spec for mcp approval
8
+ - Updated dependencies [2625a04]
9
+ - @ai-sdk/provider@3.0.0-beta.31
10
+ - @ai-sdk/gateway@2.0.0-beta.91
11
+ - @ai-sdk/provider-utils@4.0.0-beta.58
12
+
13
+ ## 6.0.0-beta.164
14
+
15
+ ### Patch Changes
16
+
17
+ - cbf52cd: feat: expose raw finish reason
18
+ - Updated dependencies [cbf52cd]
19
+ - @ai-sdk/provider@3.0.0-beta.30
20
+ - @ai-sdk/gateway@2.0.0-beta.90
21
+ - @ai-sdk/provider-utils@4.0.0-beta.57
22
+
3
23
  ## 6.0.0-beta.163
4
24
 
5
25
  ### Patch Changes
package/dist/index.d.mts CHANGED
@@ -4,7 +4,7 @@ import * as _ai_sdk_provider_utils from '@ai-sdk/provider-utils';
4
4
  import { Tool, InferToolInput, InferToolOutput, FlexibleSchema, InferSchema, AssistantModelMessage, ToolModelMessage, ReasoningPart, SystemModelMessage, ModelMessage, ProviderOptions, UserModelMessage, IdGenerator, ToolCall, MaybePromiseLike, TextPart, FilePart, Resolvable, FetchFunction, DataContent } from '@ai-sdk/provider-utils';
5
5
  export { AssistantContent, AssistantModelMessage, DataContent, DownloadError, FilePart, FlexibleSchema, IdGenerator, ImagePart, InferSchema, InferToolInput, InferToolOutput, ModelMessage, Schema, SystemModelMessage, TextPart, Tool, ToolApprovalRequest, ToolApprovalResponse, ToolCallOptions, ToolCallPart, ToolContent, ToolExecuteFunction, ToolExecutionOptions, ToolModelMessage, ToolResultPart, UserContent, UserModelMessage, asSchema, createIdGenerator, dynamicTool, generateId, jsonSchema, parseJsonEventStream, tool, zodSchema } from '@ai-sdk/provider-utils';
6
6
  import * as _ai_sdk_provider from '@ai-sdk/provider';
7
- import { EmbeddingModelV3, EmbeddingModelV2, EmbeddingModelV3Embedding, EmbeddingModelV3Middleware, ImageModelV3, ImageModelV2, ImageModelV3ProviderMetadata, ImageModelV2ProviderMetadata, JSONValue as JSONValue$1, LanguageModelV3, LanguageModelV2, LanguageModelV3FinishReason, SharedV3Warning, LanguageModelV3Source, LanguageModelV3Middleware, RerankingModelV3, SharedV3ProviderMetadata, SpeechModelV3, SpeechModelV2, TranscriptionModelV3, TranscriptionModelV2, JSONObject, ImageModelV3Usage, LanguageModelV3CallOptions, AISDKError, LanguageModelV3ToolCall, JSONSchema7, JSONParseError, TypeValidationError, EmbeddingModelCallOptions, ProviderV3, ProviderV2, NoSuchModelError } from '@ai-sdk/provider';
7
+ import { EmbeddingModelV3, EmbeddingModelV2, EmbeddingModelV3Embedding, EmbeddingModelV3Middleware, ImageModelV3, ImageModelV2, ImageModelV3ProviderMetadata, ImageModelV2ProviderMetadata, JSONValue as JSONValue$1, LanguageModelV3, LanguageModelV2, SharedV3Warning, LanguageModelV3Source, LanguageModelV3Middleware, RerankingModelV3, SharedV3ProviderMetadata, SpeechModelV3, SpeechModelV2, TranscriptionModelV3, TranscriptionModelV2, JSONObject, ImageModelV3Usage, LanguageModelV3CallOptions, AISDKError, LanguageModelV3ToolCall, JSONSchema7, JSONParseError, TypeValidationError, EmbeddingModelCallOptions, ProviderV3, ProviderV2, NoSuchModelError } from '@ai-sdk/provider';
8
8
  export { AISDKError, APICallError, EmptyResponseBodyError, InvalidPromptError, InvalidResponseDataError, JSONParseError, JSONSchema7, LoadAPIKeyError, LoadSettingError, NoContentGeneratedError, NoSuchModelError, TooManyEmbeddingValuesForCallError, TypeValidationError, UnsupportedFunctionalityError } from '@ai-sdk/provider';
9
9
  import { AttributeValue, Tracer } from '@opentelemetry/api';
10
10
  import { ServerResponse } from 'node:http';
@@ -103,7 +103,7 @@ Can be one of the following:
103
103
  - `error`: model stopped because of an error
104
104
  - `other`: model stopped for other reasons
105
105
  */
106
- type FinishReason = LanguageModelV3FinishReason;
106
+ type FinishReason = 'stop' | 'length' | 'content-filter' | 'tool-calls' | 'error' | 'other';
107
107
  /**
108
108
  Warning from the model provider for this call. The call will proceed, but e.g.
109
109
  some settings might not be supported, which can lead to suboptimal results.
@@ -774,10 +774,14 @@ type StepResult<TOOLS extends ToolSet> = {
774
774
  */
775
775
  readonly dynamicToolResults: Array<DynamicToolResult>;
776
776
  /**
777
- The reason why the generation finished.
778
- */
777
+ * The unified reason why the generation finished.
778
+ */
779
779
  readonly finishReason: FinishReason;
780
780
  /**
781
+ * The raw reason why the generation finished (from the provider).
782
+ */
783
+ readonly rawFinishReason: string | undefined;
784
+ /**
781
785
  The token usage of the generated text.
782
786
  */
783
787
  readonly usage: LanguageModelUsage;
@@ -868,10 +872,14 @@ interface GenerateTextResult<TOOLS extends ToolSet, OUTPUT extends Output> {
868
872
  */
869
873
  readonly dynamicToolResults: Array<DynamicToolResult>;
870
874
  /**
871
- The reason why the generation finished.
875
+ * The unified reason why the generation finished.
872
876
  */
873
877
  readonly finishReason: FinishReason;
874
878
  /**
879
+ * The raw reason why the generation finished (from the provider).
880
+ */
881
+ readonly rawFinishReason: string | undefined;
882
+ /**
875
883
  The token usage of the last step.
876
884
  */
877
885
  readonly usage: LanguageModelUsage;
@@ -1974,7 +1982,7 @@ declare const uiMessageChunkSchema: _ai_sdk_provider_utils.LazySchema<{
1974
1982
  messageMetadata?: unknown;
1975
1983
  } | {
1976
1984
  type: "finish";
1977
- finishReason?: "unknown" | "length" | "error" | "stop" | "content-filter" | "tool-calls" | "other" | undefined;
1985
+ finishReason?: "length" | "error" | "stop" | "content-filter" | "tool-calls" | "other" | undefined;
1978
1986
  messageMetadata?: unknown;
1979
1987
  } | {
1980
1988
  type: "abort";
@@ -2211,98 +2219,104 @@ interface StreamTextResult<TOOLS extends ToolSet, OUTPUT extends Output> {
2211
2219
 
2212
2220
  Automatically consumes the stream.
2213
2221
  */
2214
- readonly content: Promise<Array<ContentPart<TOOLS>>>;
2222
+ readonly content: PromiseLike<Array<ContentPart<TOOLS>>>;
2215
2223
  /**
2216
2224
  The full text that has been generated by the last step.
2217
2225
 
2218
2226
  Automatically consumes the stream.
2219
2227
  */
2220
- readonly text: Promise<string>;
2228
+ readonly text: PromiseLike<string>;
2221
2229
  /**
2222
2230
  The full reasoning that the model has generated.
2223
2231
 
2224
2232
  Automatically consumes the stream.
2225
2233
  */
2226
- readonly reasoning: Promise<Array<ReasoningOutput>>;
2234
+ readonly reasoning: PromiseLike<Array<ReasoningOutput>>;
2227
2235
  /**
2228
2236
  The reasoning that has been generated by the last step.
2229
2237
 
2230
2238
  Automatically consumes the stream.
2231
2239
  */
2232
- readonly reasoningText: Promise<string | undefined>;
2240
+ readonly reasoningText: PromiseLike<string | undefined>;
2233
2241
  /**
2234
2242
  Files that have been generated by the model in the last step.
2235
2243
 
2236
2244
  Automatically consumes the stream.
2237
2245
  */
2238
- readonly files: Promise<GeneratedFile[]>;
2246
+ readonly files: PromiseLike<GeneratedFile[]>;
2239
2247
  /**
2240
2248
  Sources that have been used as references in the last step.
2241
2249
 
2242
2250
  Automatically consumes the stream.
2243
2251
  */
2244
- readonly sources: Promise<Source[]>;
2252
+ readonly sources: PromiseLike<Source[]>;
2245
2253
  /**
2246
2254
  The tool calls that have been executed in the last step.
2247
2255
 
2248
2256
  Automatically consumes the stream.
2249
2257
  */
2250
- readonly toolCalls: Promise<TypedToolCall<TOOLS>[]>;
2258
+ readonly toolCalls: PromiseLike<TypedToolCall<TOOLS>[]>;
2251
2259
  /**
2252
2260
  The static tool calls that have been executed in the last step.
2253
2261
 
2254
2262
  Automatically consumes the stream.
2255
2263
  */
2256
- readonly staticToolCalls: Promise<StaticToolCall<TOOLS>[]>;
2264
+ readonly staticToolCalls: PromiseLike<StaticToolCall<TOOLS>[]>;
2257
2265
  /**
2258
2266
  The dynamic tool calls that have been executed in the last step.
2259
2267
 
2260
2268
  Automatically consumes the stream.
2261
2269
  */
2262
- readonly dynamicToolCalls: Promise<DynamicToolCall[]>;
2270
+ readonly dynamicToolCalls: PromiseLike<DynamicToolCall[]>;
2263
2271
  /**
2264
2272
  The static tool results that have been generated in the last step.
2265
2273
 
2266
2274
  Automatically consumes the stream.
2267
2275
  */
2268
- readonly staticToolResults: Promise<StaticToolResult<TOOLS>[]>;
2276
+ readonly staticToolResults: PromiseLike<StaticToolResult<TOOLS>[]>;
2269
2277
  /**
2270
2278
  The dynamic tool results that have been generated in the last step.
2271
2279
 
2272
2280
  Automatically consumes the stream.
2273
2281
  */
2274
- readonly dynamicToolResults: Promise<DynamicToolResult[]>;
2282
+ readonly dynamicToolResults: PromiseLike<DynamicToolResult[]>;
2275
2283
  /**
2276
2284
  The tool results that have been generated in the last step.
2277
2285
 
2278
2286
  Automatically consumes the stream.
2279
2287
  */
2280
- readonly toolResults: Promise<TypedToolResult<TOOLS>[]>;
2288
+ readonly toolResults: PromiseLike<TypedToolResult<TOOLS>[]>;
2281
2289
  /**
2282
- The reason why the generation finished. Taken from the last step.
2283
-
2284
- Automatically consumes the stream.
2285
- */
2286
- readonly finishReason: Promise<FinishReason>;
2290
+ * The unified finish reason why the generation finished. Taken from the last step.
2291
+ *
2292
+ * Automatically consumes the stream.
2293
+ */
2294
+ readonly finishReason: PromiseLike<FinishReason>;
2295
+ /**
2296
+ * The raw reason why the generation finished (from the provider). Taken from the last step.
2297
+ *
2298
+ * Automatically consumes the stream.
2299
+ */
2300
+ readonly rawFinishReason: PromiseLike<string | undefined>;
2287
2301
  /**
2288
2302
  The token usage of the last step.
2289
2303
 
2290
2304
  Automatically consumes the stream.
2291
2305
  */
2292
- readonly usage: Promise<LanguageModelUsage>;
2306
+ readonly usage: PromiseLike<LanguageModelUsage>;
2293
2307
  /**
2294
2308
  The total token usage of the generated response.
2295
2309
  When there are multiple steps, the usage is the sum of all step usages.
2296
2310
 
2297
2311
  Automatically consumes the stream.
2298
2312
  */
2299
- readonly totalUsage: Promise<LanguageModelUsage>;
2313
+ readonly totalUsage: PromiseLike<LanguageModelUsage>;
2300
2314
  /**
2301
2315
  Warnings from the model provider (e.g. unsupported settings) for the first step.
2302
2316
 
2303
2317
  Automatically consumes the stream.
2304
2318
  */
2305
- readonly warnings: Promise<CallWarning[] | undefined>;
2319
+ readonly warnings: PromiseLike<CallWarning[] | undefined>;
2306
2320
  /**
2307
2321
  Details for all steps.
2308
2322
  You can use this to get information about intermediate steps,
@@ -2310,19 +2324,19 @@ interface StreamTextResult<TOOLS extends ToolSet, OUTPUT extends Output> {
2310
2324
 
2311
2325
  Automatically consumes the stream.
2312
2326
  */
2313
- readonly steps: Promise<Array<StepResult<TOOLS>>>;
2327
+ readonly steps: PromiseLike<Array<StepResult<TOOLS>>>;
2314
2328
  /**
2315
2329
  Additional request information from the last step.
2316
2330
 
2317
2331
  Automatically consumes the stream.
2318
2332
  */
2319
- readonly request: Promise<LanguageModelRequestMetadata>;
2333
+ readonly request: PromiseLike<LanguageModelRequestMetadata>;
2320
2334
  /**
2321
2335
  Additional response information from the last step.
2322
2336
 
2323
2337
  Automatically consumes the stream.
2324
2338
  */
2325
- readonly response: Promise<LanguageModelResponseMetadata & {
2339
+ readonly response: PromiseLike<LanguageModelResponseMetadata & {
2326
2340
  /**
2327
2341
  The response messages that were generated during the call. It consists of an assistant message,
2328
2342
  potentially containing tool calls.
@@ -2338,7 +2352,7 @@ interface StreamTextResult<TOOLS extends ToolSet, OUTPUT extends Output> {
2338
2352
  Metadata is passed through from the provider to the AI SDK and
2339
2353
  enables provider-specific results that can be fully encapsulated in the provider.
2340
2354
  */
2341
- readonly providerMetadata: Promise<ProviderMetadata | undefined>;
2355
+ readonly providerMetadata: PromiseLike<ProviderMetadata | undefined>;
2342
2356
  /**
2343
2357
  A text stream that returns only the generated text deltas. You can use it
2344
2358
  as either an AsyncIterable or a ReadableStream. When an error occurs, the
@@ -2365,7 +2379,7 @@ interface StreamTextResult<TOOLS extends ToolSet, OUTPUT extends Output> {
2365
2379
  /**
2366
2380
  * The complete parsed output. It uses the `output` specification.
2367
2381
  */
2368
- readonly output: Promise<InferCompleteOutput<OUTPUT>>;
2382
+ readonly output: PromiseLike<InferCompleteOutput<OUTPUT>>;
2369
2383
  /**
2370
2384
  Consumes the stream without processing the parts.
2371
2385
  This is useful to force the stream to finish.
@@ -2374,7 +2388,7 @@ interface StreamTextResult<TOOLS extends ToolSet, OUTPUT extends Output> {
2374
2388
 
2375
2389
  If an error occurs, it is passed to the optional `onError` callback.
2376
2390
  */
2377
- consumeStream(options?: ConsumeStreamOptions): Promise<void>;
2391
+ consumeStream(options?: ConsumeStreamOptions): PromiseLike<void>;
2378
2392
  /**
2379
2393
  Converts the result to a UI message stream.
2380
2394
 
@@ -2473,12 +2487,14 @@ type TextStreamPart<TOOLS extends ToolSet> = {
2473
2487
  response: LanguageModelResponseMetadata;
2474
2488
  usage: LanguageModelUsage;
2475
2489
  finishReason: FinishReason;
2490
+ rawFinishReason: string | undefined;
2476
2491
  providerMetadata: ProviderMetadata | undefined;
2477
2492
  } | {
2478
2493
  type: 'start';
2479
2494
  } | {
2480
2495
  type: 'finish';
2481
2496
  finishReason: FinishReason;
2497
+ rawFinishReason: string | undefined;
2482
2498
  totalUsage: LanguageModelUsage;
2483
2499
  } | {
2484
2500
  type: 'abort';
@@ -3948,6 +3964,7 @@ type SingleRequestTextStreamPart<TOOLS extends ToolSet> = {
3948
3964
  } | {
3949
3965
  type: 'finish';
3950
3966
  finishReason: FinishReason;
3967
+ rawFinishReason: string | undefined;
3951
3968
  usage: LanguageModelUsage;
3952
3969
  providerMetadata?: ProviderMetadata;
3953
3970
  } | {
package/dist/index.d.ts CHANGED
@@ -4,7 +4,7 @@ import * as _ai_sdk_provider_utils from '@ai-sdk/provider-utils';
4
4
  import { Tool, InferToolInput, InferToolOutput, FlexibleSchema, InferSchema, AssistantModelMessage, ToolModelMessage, ReasoningPart, SystemModelMessage, ModelMessage, ProviderOptions, UserModelMessage, IdGenerator, ToolCall, MaybePromiseLike, TextPart, FilePart, Resolvable, FetchFunction, DataContent } from '@ai-sdk/provider-utils';
5
5
  export { AssistantContent, AssistantModelMessage, DataContent, DownloadError, FilePart, FlexibleSchema, IdGenerator, ImagePart, InferSchema, InferToolInput, InferToolOutput, ModelMessage, Schema, SystemModelMessage, TextPart, Tool, ToolApprovalRequest, ToolApprovalResponse, ToolCallOptions, ToolCallPart, ToolContent, ToolExecuteFunction, ToolExecutionOptions, ToolModelMessage, ToolResultPart, UserContent, UserModelMessage, asSchema, createIdGenerator, dynamicTool, generateId, jsonSchema, parseJsonEventStream, tool, zodSchema } from '@ai-sdk/provider-utils';
6
6
  import * as _ai_sdk_provider from '@ai-sdk/provider';
7
- import { EmbeddingModelV3, EmbeddingModelV2, EmbeddingModelV3Embedding, EmbeddingModelV3Middleware, ImageModelV3, ImageModelV2, ImageModelV3ProviderMetadata, ImageModelV2ProviderMetadata, JSONValue as JSONValue$1, LanguageModelV3, LanguageModelV2, LanguageModelV3FinishReason, SharedV3Warning, LanguageModelV3Source, LanguageModelV3Middleware, RerankingModelV3, SharedV3ProviderMetadata, SpeechModelV3, SpeechModelV2, TranscriptionModelV3, TranscriptionModelV2, JSONObject, ImageModelV3Usage, LanguageModelV3CallOptions, AISDKError, LanguageModelV3ToolCall, JSONSchema7, JSONParseError, TypeValidationError, EmbeddingModelCallOptions, ProviderV3, ProviderV2, NoSuchModelError } from '@ai-sdk/provider';
7
+ import { EmbeddingModelV3, EmbeddingModelV2, EmbeddingModelV3Embedding, EmbeddingModelV3Middleware, ImageModelV3, ImageModelV2, ImageModelV3ProviderMetadata, ImageModelV2ProviderMetadata, JSONValue as JSONValue$1, LanguageModelV3, LanguageModelV2, SharedV3Warning, LanguageModelV3Source, LanguageModelV3Middleware, RerankingModelV3, SharedV3ProviderMetadata, SpeechModelV3, SpeechModelV2, TranscriptionModelV3, TranscriptionModelV2, JSONObject, ImageModelV3Usage, LanguageModelV3CallOptions, AISDKError, LanguageModelV3ToolCall, JSONSchema7, JSONParseError, TypeValidationError, EmbeddingModelCallOptions, ProviderV3, ProviderV2, NoSuchModelError } from '@ai-sdk/provider';
8
8
  export { AISDKError, APICallError, EmptyResponseBodyError, InvalidPromptError, InvalidResponseDataError, JSONParseError, JSONSchema7, LoadAPIKeyError, LoadSettingError, NoContentGeneratedError, NoSuchModelError, TooManyEmbeddingValuesForCallError, TypeValidationError, UnsupportedFunctionalityError } from '@ai-sdk/provider';
9
9
  import { AttributeValue, Tracer } from '@opentelemetry/api';
10
10
  import { ServerResponse } from 'node:http';
@@ -103,7 +103,7 @@ Can be one of the following:
103
103
  - `error`: model stopped because of an error
104
104
  - `other`: model stopped for other reasons
105
105
  */
106
- type FinishReason = LanguageModelV3FinishReason;
106
+ type FinishReason = 'stop' | 'length' | 'content-filter' | 'tool-calls' | 'error' | 'other';
107
107
  /**
108
108
  Warning from the model provider for this call. The call will proceed, but e.g.
109
109
  some settings might not be supported, which can lead to suboptimal results.
@@ -774,10 +774,14 @@ type StepResult<TOOLS extends ToolSet> = {
774
774
  */
775
775
  readonly dynamicToolResults: Array<DynamicToolResult>;
776
776
  /**
777
- The reason why the generation finished.
778
- */
777
+ * The unified reason why the generation finished.
778
+ */
779
779
  readonly finishReason: FinishReason;
780
780
  /**
781
+ * The raw reason why the generation finished (from the provider).
782
+ */
783
+ readonly rawFinishReason: string | undefined;
784
+ /**
781
785
  The token usage of the generated text.
782
786
  */
783
787
  readonly usage: LanguageModelUsage;
@@ -868,10 +872,14 @@ interface GenerateTextResult<TOOLS extends ToolSet, OUTPUT extends Output> {
868
872
  */
869
873
  readonly dynamicToolResults: Array<DynamicToolResult>;
870
874
  /**
871
- The reason why the generation finished.
875
+ * The unified reason why the generation finished.
872
876
  */
873
877
  readonly finishReason: FinishReason;
874
878
  /**
879
+ * The raw reason why the generation finished (from the provider).
880
+ */
881
+ readonly rawFinishReason: string | undefined;
882
+ /**
875
883
  The token usage of the last step.
876
884
  */
877
885
  readonly usage: LanguageModelUsage;
@@ -1974,7 +1982,7 @@ declare const uiMessageChunkSchema: _ai_sdk_provider_utils.LazySchema<{
1974
1982
  messageMetadata?: unknown;
1975
1983
  } | {
1976
1984
  type: "finish";
1977
- finishReason?: "unknown" | "length" | "error" | "stop" | "content-filter" | "tool-calls" | "other" | undefined;
1985
+ finishReason?: "length" | "error" | "stop" | "content-filter" | "tool-calls" | "other" | undefined;
1978
1986
  messageMetadata?: unknown;
1979
1987
  } | {
1980
1988
  type: "abort";
@@ -2211,98 +2219,104 @@ interface StreamTextResult<TOOLS extends ToolSet, OUTPUT extends Output> {
2211
2219
 
2212
2220
  Automatically consumes the stream.
2213
2221
  */
2214
- readonly content: Promise<Array<ContentPart<TOOLS>>>;
2222
+ readonly content: PromiseLike<Array<ContentPart<TOOLS>>>;
2215
2223
  /**
2216
2224
  The full text that has been generated by the last step.
2217
2225
 
2218
2226
  Automatically consumes the stream.
2219
2227
  */
2220
- readonly text: Promise<string>;
2228
+ readonly text: PromiseLike<string>;
2221
2229
  /**
2222
2230
  The full reasoning that the model has generated.
2223
2231
 
2224
2232
  Automatically consumes the stream.
2225
2233
  */
2226
- readonly reasoning: Promise<Array<ReasoningOutput>>;
2234
+ readonly reasoning: PromiseLike<Array<ReasoningOutput>>;
2227
2235
  /**
2228
2236
  The reasoning that has been generated by the last step.
2229
2237
 
2230
2238
  Automatically consumes the stream.
2231
2239
  */
2232
- readonly reasoningText: Promise<string | undefined>;
2240
+ readonly reasoningText: PromiseLike<string | undefined>;
2233
2241
  /**
2234
2242
  Files that have been generated by the model in the last step.
2235
2243
 
2236
2244
  Automatically consumes the stream.
2237
2245
  */
2238
- readonly files: Promise<GeneratedFile[]>;
2246
+ readonly files: PromiseLike<GeneratedFile[]>;
2239
2247
  /**
2240
2248
  Sources that have been used as references in the last step.
2241
2249
 
2242
2250
  Automatically consumes the stream.
2243
2251
  */
2244
- readonly sources: Promise<Source[]>;
2252
+ readonly sources: PromiseLike<Source[]>;
2245
2253
  /**
2246
2254
  The tool calls that have been executed in the last step.
2247
2255
 
2248
2256
  Automatically consumes the stream.
2249
2257
  */
2250
- readonly toolCalls: Promise<TypedToolCall<TOOLS>[]>;
2258
+ readonly toolCalls: PromiseLike<TypedToolCall<TOOLS>[]>;
2251
2259
  /**
2252
2260
  The static tool calls that have been executed in the last step.
2253
2261
 
2254
2262
  Automatically consumes the stream.
2255
2263
  */
2256
- readonly staticToolCalls: Promise<StaticToolCall<TOOLS>[]>;
2264
+ readonly staticToolCalls: PromiseLike<StaticToolCall<TOOLS>[]>;
2257
2265
  /**
2258
2266
  The dynamic tool calls that have been executed in the last step.
2259
2267
 
2260
2268
  Automatically consumes the stream.
2261
2269
  */
2262
- readonly dynamicToolCalls: Promise<DynamicToolCall[]>;
2270
+ readonly dynamicToolCalls: PromiseLike<DynamicToolCall[]>;
2263
2271
  /**
2264
2272
  The static tool results that have been generated in the last step.
2265
2273
 
2266
2274
  Automatically consumes the stream.
2267
2275
  */
2268
- readonly staticToolResults: Promise<StaticToolResult<TOOLS>[]>;
2276
+ readonly staticToolResults: PromiseLike<StaticToolResult<TOOLS>[]>;
2269
2277
  /**
2270
2278
  The dynamic tool results that have been generated in the last step.
2271
2279
 
2272
2280
  Automatically consumes the stream.
2273
2281
  */
2274
- readonly dynamicToolResults: Promise<DynamicToolResult[]>;
2282
+ readonly dynamicToolResults: PromiseLike<DynamicToolResult[]>;
2275
2283
  /**
2276
2284
  The tool results that have been generated in the last step.
2277
2285
 
2278
2286
  Automatically consumes the stream.
2279
2287
  */
2280
- readonly toolResults: Promise<TypedToolResult<TOOLS>[]>;
2288
+ readonly toolResults: PromiseLike<TypedToolResult<TOOLS>[]>;
2281
2289
  /**
2282
- The reason why the generation finished. Taken from the last step.
2283
-
2284
- Automatically consumes the stream.
2285
- */
2286
- readonly finishReason: Promise<FinishReason>;
2290
+ * The unified finish reason why the generation finished. Taken from the last step.
2291
+ *
2292
+ * Automatically consumes the stream.
2293
+ */
2294
+ readonly finishReason: PromiseLike<FinishReason>;
2295
+ /**
2296
+ * The raw reason why the generation finished (from the provider). Taken from the last step.
2297
+ *
2298
+ * Automatically consumes the stream.
2299
+ */
2300
+ readonly rawFinishReason: PromiseLike<string | undefined>;
2287
2301
  /**
2288
2302
  The token usage of the last step.
2289
2303
 
2290
2304
  Automatically consumes the stream.
2291
2305
  */
2292
- readonly usage: Promise<LanguageModelUsage>;
2306
+ readonly usage: PromiseLike<LanguageModelUsage>;
2293
2307
  /**
2294
2308
  The total token usage of the generated response.
2295
2309
  When there are multiple steps, the usage is the sum of all step usages.
2296
2310
 
2297
2311
  Automatically consumes the stream.
2298
2312
  */
2299
- readonly totalUsage: Promise<LanguageModelUsage>;
2313
+ readonly totalUsage: PromiseLike<LanguageModelUsage>;
2300
2314
  /**
2301
2315
  Warnings from the model provider (e.g. unsupported settings) for the first step.
2302
2316
 
2303
2317
  Automatically consumes the stream.
2304
2318
  */
2305
- readonly warnings: Promise<CallWarning[] | undefined>;
2319
+ readonly warnings: PromiseLike<CallWarning[] | undefined>;
2306
2320
  /**
2307
2321
  Details for all steps.
2308
2322
  You can use this to get information about intermediate steps,
@@ -2310,19 +2324,19 @@ interface StreamTextResult<TOOLS extends ToolSet, OUTPUT extends Output> {
2310
2324
 
2311
2325
  Automatically consumes the stream.
2312
2326
  */
2313
- readonly steps: Promise<Array<StepResult<TOOLS>>>;
2327
+ readonly steps: PromiseLike<Array<StepResult<TOOLS>>>;
2314
2328
  /**
2315
2329
  Additional request information from the last step.
2316
2330
 
2317
2331
  Automatically consumes the stream.
2318
2332
  */
2319
- readonly request: Promise<LanguageModelRequestMetadata>;
2333
+ readonly request: PromiseLike<LanguageModelRequestMetadata>;
2320
2334
  /**
2321
2335
  Additional response information from the last step.
2322
2336
 
2323
2337
  Automatically consumes the stream.
2324
2338
  */
2325
- readonly response: Promise<LanguageModelResponseMetadata & {
2339
+ readonly response: PromiseLike<LanguageModelResponseMetadata & {
2326
2340
  /**
2327
2341
  The response messages that were generated during the call. It consists of an assistant message,
2328
2342
  potentially containing tool calls.
@@ -2338,7 +2352,7 @@ interface StreamTextResult<TOOLS extends ToolSet, OUTPUT extends Output> {
2338
2352
  Metadata is passed through from the provider to the AI SDK and
2339
2353
  enables provider-specific results that can be fully encapsulated in the provider.
2340
2354
  */
2341
- readonly providerMetadata: Promise<ProviderMetadata | undefined>;
2355
+ readonly providerMetadata: PromiseLike<ProviderMetadata | undefined>;
2342
2356
  /**
2343
2357
  A text stream that returns only the generated text deltas. You can use it
2344
2358
  as either an AsyncIterable or a ReadableStream. When an error occurs, the
@@ -2365,7 +2379,7 @@ interface StreamTextResult<TOOLS extends ToolSet, OUTPUT extends Output> {
2365
2379
  /**
2366
2380
  * The complete parsed output. It uses the `output` specification.
2367
2381
  */
2368
- readonly output: Promise<InferCompleteOutput<OUTPUT>>;
2382
+ readonly output: PromiseLike<InferCompleteOutput<OUTPUT>>;
2369
2383
  /**
2370
2384
  Consumes the stream without processing the parts.
2371
2385
  This is useful to force the stream to finish.
@@ -2374,7 +2388,7 @@ interface StreamTextResult<TOOLS extends ToolSet, OUTPUT extends Output> {
2374
2388
 
2375
2389
  If an error occurs, it is passed to the optional `onError` callback.
2376
2390
  */
2377
- consumeStream(options?: ConsumeStreamOptions): Promise<void>;
2391
+ consumeStream(options?: ConsumeStreamOptions): PromiseLike<void>;
2378
2392
  /**
2379
2393
  Converts the result to a UI message stream.
2380
2394
 
@@ -2473,12 +2487,14 @@ type TextStreamPart<TOOLS extends ToolSet> = {
2473
2487
  response: LanguageModelResponseMetadata;
2474
2488
  usage: LanguageModelUsage;
2475
2489
  finishReason: FinishReason;
2490
+ rawFinishReason: string | undefined;
2476
2491
  providerMetadata: ProviderMetadata | undefined;
2477
2492
  } | {
2478
2493
  type: 'start';
2479
2494
  } | {
2480
2495
  type: 'finish';
2481
2496
  finishReason: FinishReason;
2497
+ rawFinishReason: string | undefined;
2482
2498
  totalUsage: LanguageModelUsage;
2483
2499
  } | {
2484
2500
  type: 'abort';
@@ -3948,6 +3964,7 @@ type SingleRequestTextStreamPart<TOOLS extends ToolSet> = {
3948
3964
  } | {
3949
3965
  type: 'finish';
3950
3966
  finishReason: FinishReason;
3967
+ rawFinishReason: string | undefined;
3951
3968
  usage: LanguageModelUsage;
3952
3969
  providerMetadata?: ProviderMetadata;
3953
3970
  } | {