ai 6.0.23 → 6.0.25

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.d.ts CHANGED
@@ -1,10 +1,10 @@
1
1
  import { GatewayModelId } from '@ai-sdk/gateway';
2
2
  export { GatewayModelId, createGateway, gateway } from '@ai-sdk/gateway';
3
3
  import * as _ai_sdk_provider_utils from '@ai-sdk/provider-utils';
4
- import { Tool, InferToolInput, InferToolOutput, FlexibleSchema, InferSchema, AssistantModelMessage, ToolModelMessage, ReasoningPart, SystemModelMessage, ModelMessage, ProviderOptions, UserModelMessage, IdGenerator, ToolCall, MaybePromiseLike, TextPart, FilePart, Resolvable, FetchFunction, DataContent } from '@ai-sdk/provider-utils';
4
+ import { Tool, InferToolInput, InferToolOutput, FlexibleSchema, InferSchema, SystemModelMessage, ModelMessage, AssistantModelMessage, ToolModelMessage, ReasoningPart, ProviderOptions, UserModelMessage, IdGenerator, ToolCall, MaybePromiseLike, TextPart, FilePart, Resolvable, FetchFunction, DataContent } from '@ai-sdk/provider-utils';
5
5
  export { AssistantContent, AssistantModelMessage, DataContent, DownloadError, FilePart, FlexibleSchema, IdGenerator, ImagePart, InferSchema, InferToolInput, InferToolOutput, ModelMessage, Schema, SystemModelMessage, TextPart, Tool, ToolApprovalRequest, ToolApprovalResponse, ToolCallOptions, ToolCallPart, ToolContent, ToolExecuteFunction, ToolExecutionOptions, ToolModelMessage, ToolResultPart, UserContent, UserModelMessage, asSchema, createIdGenerator, dynamicTool, generateId, jsonSchema, parseJsonEventStream, tool, zodSchema } from '@ai-sdk/provider-utils';
6
6
  import * as _ai_sdk_provider from '@ai-sdk/provider';
7
- import { EmbeddingModelV3, EmbeddingModelV2, EmbeddingModelV3Embedding, EmbeddingModelV3Middleware, ImageModelV3, ImageModelV2, ImageModelV3ProviderMetadata, ImageModelV2ProviderMetadata, ImageModelV3Middleware, JSONValue as JSONValue$1, LanguageModelV3, LanguageModelV2, SharedV3Warning, LanguageModelV3Source, LanguageModelV3Middleware, RerankingModelV3, SharedV3ProviderMetadata, SpeechModelV3, SpeechModelV2, TranscriptionModelV3, TranscriptionModelV2, JSONObject, ImageModelV3Usage, LanguageModelV3CallOptions, AISDKError, LanguageModelV3ToolCall, JSONSchema7, JSONParseError, TypeValidationError, EmbeddingModelV3CallOptions, ProviderV3, ProviderV2, NoSuchModelError } from '@ai-sdk/provider';
7
+ import { EmbeddingModelV3, EmbeddingModelV2, EmbeddingModelV3Embedding, EmbeddingModelV3Middleware, ImageModelV3, ImageModelV2, ImageModelV3ProviderMetadata, ImageModelV2ProviderMetadata, ImageModelV3Middleware, JSONValue as JSONValue$1, LanguageModelV3, LanguageModelV2, SharedV3Warning, LanguageModelV3Source, LanguageModelV3Middleware, RerankingModelV3, SharedV3ProviderMetadata, SpeechModelV3, SpeechModelV2, TranscriptionModelV3, TranscriptionModelV2, JSONObject, ImageModelV3Usage, AISDKError, LanguageModelV3ToolCall, JSONSchema7, LanguageModelV3CallOptions, JSONParseError, TypeValidationError, EmbeddingModelV3CallOptions, ProviderV3, ProviderV2, NoSuchModelError } from '@ai-sdk/provider';
8
8
  export { AISDKError, APICallError, EmptyResponseBodyError, InvalidPromptError, InvalidResponseDataError, JSONParseError, JSONSchema7, LoadAPIKeyError, LoadSettingError, NoContentGeneratedError, NoSuchModelError, TooManyEmbeddingValuesForCallError, TypeValidationError, UnsupportedFunctionalityError } from '@ai-sdk/provider';
9
9
  import { AttributeValue, Tracer } from '@opentelemetry/api';
10
10
  import { ServerResponse } from 'node:http';
@@ -569,191 +569,231 @@ type PartialObject<ObjectType extends object> = {
569
569
  [KeyType in keyof ObjectType]?: DeepPartialInternal<ObjectType[KeyType]>;
570
570
  };
571
571
 
572
- interface Output<OUTPUT = any, PARTIAL = any> {
572
+ /**
573
+ Timeout configuration for API calls. Can be specified as:
574
+ - A number representing milliseconds
575
+ - An object with `totalMs` property for the total timeout in milliseconds
576
+ - An object with `stepMs` property for the timeout of each step in milliseconds
577
+ - An object with `chunkMs` property for the timeout between stream chunks (streaming only)
578
+ */
579
+ type TimeoutConfiguration = number | {
580
+ totalMs?: number;
581
+ stepMs?: number;
582
+ chunkMs?: number;
583
+ };
584
+ type CallSettings = {
573
585
  /**
574
- * The response format to use for the model.
586
+ Maximum number of tokens to generate.
575
587
  */
576
- responseFormat: PromiseLike<LanguageModelV3CallOptions['responseFormat']>;
588
+ maxOutputTokens?: number;
577
589
  /**
578
- * Parses the complete output of the model.
590
+ Temperature setting. The range depends on the provider and model.
591
+
592
+ It is recommended to set either `temperature` or `topP`, but not both.
579
593
  */
580
- parseCompleteOutput(options: {
581
- text: string;
582
- }, context: {
583
- response: LanguageModelResponseMetadata;
584
- usage: LanguageModelUsage;
585
- finishReason: FinishReason;
586
- }): Promise<OUTPUT>;
594
+ temperature?: number;
587
595
  /**
588
- * Parses the partial output of the model.
596
+ Nucleus sampling. This is a number between 0 and 1.
597
+
598
+ E.g. 0.1 would mean that only tokens with the top 10% probability mass
599
+ are considered.
600
+
601
+ It is recommended to set either `temperature` or `topP`, but not both.
589
602
  */
590
- parsePartialOutput(options: {
591
- text: string;
592
- }): Promise<{
593
- partial: PARTIAL;
594
- } | undefined>;
595
- }
596
- /**
597
- * Output specification for text generation.
598
- * This is the default output mode that generates plain text.
599
- *
600
- * @returns An output specification for generating text.
601
- */
602
- declare const text: () => Output<string, string>;
603
- /**
604
- * Output specification for typed object generation using schemas.
605
- * When the model generates a text response, it will return an object that matches the schema.
606
- *
607
- * @param schema - The schema of the object to generate.
608
- * @param name - Optional name of the output that should be generated. Used by some providers for additional LLM guidance, e.g. via tool or schema name.
609
- * @param description - Optional description of the output that should be generated. Used by some providers for additional LLM guidance, e.g. via tool or schema description.
610
- *
611
- * @returns An output specification for generating objects with the specified schema.
612
- */
613
- declare const object: <OBJECT>({ schema: inputSchema, name, description, }: {
614
- schema: FlexibleSchema<OBJECT>;
603
+ topP?: number;
615
604
  /**
616
- * Optional name of the output that should be generated.
617
- * Used by some providers for additional LLM guidance, e.g. via tool or schema name.
605
+ Only sample from the top K options for each subsequent token.
606
+
607
+ Used to remove "long tail" low probability responses.
608
+ Recommended for advanced use cases only. You usually only need to use temperature.
618
609
  */
619
- name?: string;
610
+ topK?: number;
620
611
  /**
621
- * Optional description of the output that should be generated.
622
- * Used by some providers for additional LLM guidance, e.g. via tool or schema description.
612
+ Presence penalty setting. It affects the likelihood of the model to
613
+ repeat information that is already in the prompt.
614
+
615
+ The presence penalty is a number between -1 (increase repetition)
616
+ and 1 (maximum penalty, decrease repetition). 0 means no penalty.
623
617
  */
624
- description?: string;
625
- }) => Output<OBJECT, DeepPartial<OBJECT>>;
626
- /**
627
- * Output specification for array generation.
628
- * When the model generates a text response, it will return an array of elements.
629
- *
630
- * @param element - The schema of the array elements to generate.
631
- * @param name - Optional name of the output that should be generated. Used by some providers for additional LLM guidance, e.g. via tool or schema name.
632
- * @param description - Optional description of the output that should be generated. Used by some providers for additional LLM guidance, e.g. via tool or schema description.
633
- *
634
- * @returns An output specification for generating an array of elements.
635
- */
636
- declare const array: <ELEMENT>({ element: inputElementSchema, name, description, }: {
637
- element: FlexibleSchema<ELEMENT>;
618
+ presencePenalty?: number;
638
619
  /**
639
- * Optional name of the output that should be generated.
640
- * Used by some providers for additional LLM guidance, e.g. via tool or schema name.
620
+ Frequency penalty setting. It affects the likelihood of the model
621
+ to repeatedly use the same words or phrases.
622
+
623
+ The frequency penalty is a number between -1 (increase repetition)
624
+ and 1 (maximum penalty, decrease repetition). 0 means no penalty.
641
625
  */
642
- name?: string;
626
+ frequencyPenalty?: number;
643
627
  /**
644
- * Optional description of the output that should be generated.
645
- * Used by some providers for additional LLM guidance, e.g. via tool or schema description.
628
+ Stop sequences.
629
+ If set, the model will stop generating text when one of the stop sequences is generated.
630
+ Providers may have limits on the number of stop sequences.
646
631
  */
647
- description?: string;
648
- }) => Output<Array<ELEMENT>, Array<ELEMENT>>;
649
- /**
650
- * Output specification for choice generation.
651
- * When the model generates a text response, it will return a one of the choice options.
652
- *
653
- * @param options - The available choices.
654
- * @param name - Optional name of the output that should be generated. Used by some providers for additional LLM guidance, e.g. via tool or schema name.
655
- * @param description - Optional description of the output that should be generated. Used by some providers for additional LLM guidance, e.g. via tool or schema description.
656
- *
657
- * @returns An output specification for generating a choice.
658
- */
659
- declare const choice: <CHOICE extends string>({ options: choiceOptions, name, description, }: {
660
- options: Array<CHOICE>;
632
+ stopSequences?: string[];
661
633
  /**
662
- * Optional name of the output that should be generated.
663
- * Used by some providers for additional LLM guidance, e.g. via tool or schema name.
634
+ The seed (integer) to use for random sampling. If set and supported
635
+ by the model, calls will generate deterministic results.
664
636
  */
665
- name?: string;
637
+ seed?: number;
666
638
  /**
667
- * Optional description of the output that should be generated.
668
- * Used by some providers for additional LLM guidance, e.g. via tool or schema description.
639
+ Maximum number of retries. Set to 0 to disable retries.
640
+
641
+ @default 2
669
642
  */
670
- description?: string;
671
- }) => Output<CHOICE, CHOICE>;
672
- /**
673
- * Output specification for unstructured JSON generation.
674
- * When the model generates a text response, it will return a JSON object.
675
- *
676
- * @param name - Optional name of the output that should be generated. Used by some providers for additional LLM guidance, e.g. via tool or schema name.
677
- * @param description - Optional description of the output that should be generated. Used by some providers for additional LLM guidance, e.g. via tool or schema description.
678
- *
679
- * @returns An output specification for generating JSON.
680
- */
681
- declare const json: ({ name, description, }?: {
643
+ maxRetries?: number;
682
644
  /**
683
- * Optional name of the output that should be generated.
684
- * Used by some providers for additional LLM guidance, e.g. via tool or schema name.
645
+ Abort signal.
685
646
  */
686
- name?: string;
647
+ abortSignal?: AbortSignal;
687
648
  /**
688
- * Optional description of the output that should be generated.
689
- * Used by some providers for additional LLM guidance, e.g. via tool or schema description.
649
+ Timeout in milliseconds. The call will be aborted if it takes longer
650
+ than the specified timeout. Can be used alongside abortSignal.
651
+
652
+ Can be specified as a number (milliseconds) or as an object with `totalMs`.
690
653
  */
691
- description?: string;
692
- }) => Output<JSONValue$1, JSONValue$1>;
693
-
694
- type output_Output<OUTPUT = any, PARTIAL = any> = Output<OUTPUT, PARTIAL>;
695
- declare const output_array: typeof array;
696
- declare const output_choice: typeof choice;
697
- declare const output_json: typeof json;
698
- declare const output_object: typeof object;
699
- declare const output_text: typeof text;
700
- declare namespace output {
701
- export {
702
- output_Output as Output,
703
- output_array as array,
704
- output_choice as choice,
705
- output_json as json,
706
- output_object as object,
707
- output_text as text,
708
- };
709
- }
654
+ timeout?: TimeoutConfiguration;
655
+ /**
656
+ Additional HTTP headers to be sent with the request.
657
+ Only applicable for HTTP-based providers.
658
+ */
659
+ headers?: Record<string, string | undefined>;
660
+ };
710
661
 
711
662
  /**
712
- * Infers the complete output type from the output specification.
663
+ Prompt part of the AI function options.
664
+ It contains a system message, a simple text prompt, or a list of messages.
713
665
  */
714
- type InferCompleteOutput<OUTPUT extends Output> = OUTPUT extends Output<infer COMPLETE_OUTPUT, any> ? COMPLETE_OUTPUT : never;
715
- /**
716
- * Infers the partial output type from the output specification.
666
+ type Prompt = {
667
+ /**
668
+ System message to include in the prompt. Can be used with `prompt` or `messages`.
669
+ */
670
+ system?: string | SystemModelMessage | Array<SystemModelMessage>;
671
+ } & ({
672
+ /**
673
+ A prompt. It can be either a text prompt or a list of messages.
674
+
675
+ You can either use `prompt` or `messages` but not both.
676
+ */
677
+ prompt: string | Array<ModelMessage>;
678
+ /**
679
+ A list of messages.
680
+
681
+ You can either use `prompt` or `messages` but not both.
717
682
  */
718
- type InferPartialOutput<OUTPUT extends Output> = OUTPUT extends Output<any, infer PARTIAL_OUTPUT> ? PARTIAL_OUTPUT : never;
683
+ messages?: never;
684
+ } | {
685
+ /**
686
+ A list of messages.
719
687
 
720
- /**
721
- A message that was generated during the generation process.
722
- It can be either an assistant message or a tool message.
688
+ You can either use `prompt` or `messages` but not both.
723
689
  */
724
- type ResponseMessage = AssistantModelMessage | ToolModelMessage;
690
+ messages: Array<ModelMessage>;
691
+ /**
692
+ A prompt. It can be either a text prompt or a list of messages.
693
+
694
+ You can either use `prompt` or `messages` but not both.
695
+ */
696
+ prompt?: never;
697
+ });
725
698
 
726
699
  /**
727
- * The result of a single step in the generation process.
700
+ * Telemetry configuration.
728
701
  */
729
- type StepResult<TOOLS extends ToolSet> = {
702
+ type TelemetrySettings = {
730
703
  /**
731
- The content that was generated in the last step.
704
+ * Enable or disable telemetry. Disabled by default while experimental.
732
705
  */
733
- readonly content: Array<ContentPart<TOOLS>>;
734
- /**
735
- The generated text.
736
- */
737
- readonly text: string;
706
+ isEnabled?: boolean;
738
707
  /**
739
- The reasoning that was generated during the generation.
740
- */
741
- readonly reasoning: Array<ReasoningPart>;
708
+ * Enable or disable input recording. Enabled by default.
709
+ *
710
+ * You might want to disable input recording to avoid recording sensitive
711
+ * information, to reduce data transfers, or to increase performance.
712
+ */
713
+ recordInputs?: boolean;
742
714
  /**
743
- The reasoning text that was generated during the generation.
744
- */
745
- readonly reasoningText: string | undefined;
715
+ * Enable or disable output recording. Enabled by default.
716
+ *
717
+ * You might want to disable output recording to avoid recording sensitive
718
+ * information, to reduce data transfers, or to increase performance.
719
+ */
720
+ recordOutputs?: boolean;
746
721
  /**
747
- The files that were generated during the generation.
748
- */
749
- readonly files: Array<GeneratedFile>;
722
+ * Identifier for this function. Used to group telemetry data by function.
723
+ */
724
+ functionId?: string;
750
725
  /**
751
- The sources that were used to generate the text.
752
- */
753
- readonly sources: Array<Source>;
726
+ * Additional information to include in the telemetry data.
727
+ */
728
+ metadata?: Record<string, AttributeValue>;
754
729
  /**
755
- The tool calls that were made during the generation.
756
- */
730
+ * A custom tracer to use for the telemetry data.
731
+ */
732
+ tracer?: Tracer;
733
+ };
734
+
735
+ /**
736
+ * Experimental. Can change in patch versions without warning.
737
+ *
738
+ * Download function. Called with the array of URLs and a boolean indicating
739
+ * whether the URL is supported by the model.
740
+ *
741
+ * The download function can decide for each URL:
742
+ * - to return null (which means that the URL should be passed to the model)
743
+ * - to download the asset and return the data (incl. retries, authentication, etc.)
744
+ *
745
+ * Should throw DownloadError if the download fails.
746
+ *
747
+ * Should return an array of objects sorted by the order of the requested downloads.
748
+ * For each object, the data should be a Uint8Array if the URL was downloaded.
749
+ * For each object, the mediaType should be the media type of the downloaded asset.
750
+ * For each object, the data should be null if the URL should be passed through as is.
751
+ */
752
+ type DownloadFunction = (options: Array<{
753
+ url: URL;
754
+ isUrlSupportedByModel: boolean;
755
+ }>) => PromiseLike<Array<{
756
+ data: Uint8Array;
757
+ mediaType: string | undefined;
758
+ } | null>>;
759
+
760
+ /**
761
+ A message that was generated during the generation process.
762
+ It can be either an assistant message or a tool message.
763
+ */
764
+ type ResponseMessage = AssistantModelMessage | ToolModelMessage;
765
+
766
+ /**
767
+ * The result of a single step in the generation process.
768
+ */
769
+ type StepResult<TOOLS extends ToolSet> = {
770
+ /**
771
+ The content that was generated in the last step.
772
+ */
773
+ readonly content: Array<ContentPart<TOOLS>>;
774
+ /**
775
+ The generated text.
776
+ */
777
+ readonly text: string;
778
+ /**
779
+ The reasoning that was generated during the generation.
780
+ */
781
+ readonly reasoning: Array<ReasoningPart>;
782
+ /**
783
+ The reasoning text that was generated during the generation.
784
+ */
785
+ readonly reasoningText: string | undefined;
786
+ /**
787
+ The files that were generated during the generation.
788
+ */
789
+ readonly files: Array<GeneratedFile>;
790
+ /**
791
+ The sources that were used to generate the text.
792
+ */
793
+ readonly sources: Array<Source>;
794
+ /**
795
+ The tool calls that were made during the generation.
796
+ */
757
797
  readonly toolCalls: Array<TypedToolCall<TOOLS>>;
758
798
  /**
759
799
  The static tool calls that were made in the last step.
@@ -819,1966 +859,1935 @@ type StepResult<TOOLS extends ToolSet> = {
819
859
  };
820
860
 
821
861
  /**
822
- The result of a `generateText` call.
823
- It contains the generated text, the tool calls that were made during the generation, and the results of the tool calls.
862
+ * Function that you can use to provide different settings for a step.
863
+ *
864
+ * @param options - The options for the step.
865
+ * @param options.steps - The steps that have been executed so far.
866
+ * @param options.stepNumber - The number of the step that is being executed.
867
+ * @param options.model - The model that is being used.
868
+ * @param options.messages - The messages that will be sent to the model for the current step.
869
+ * @param options.experimental_context - The context passed via the experimental_context setting (experimental).
870
+ *
871
+ * @returns An object that contains the settings for the step.
872
+ * If you return undefined (or for undefined settings), the settings from the outer level will be used.
824
873
  */
825
- interface GenerateTextResult<TOOLS extends ToolSet, OUTPUT extends Output> {
874
+ type PrepareStepFunction<TOOLS extends Record<string, Tool> = Record<string, Tool>> = (options: {
826
875
  /**
827
- The content that was generated in the last step.
876
+ * The steps that have been executed so far.
828
877
  */
829
- readonly content: Array<ContentPart<TOOLS>>;
830
- /**
831
- The text that was generated in the last step.
832
- */
833
- readonly text: string;
878
+ steps: Array<StepResult<NoInfer<TOOLS>>>;
834
879
  /**
835
- The full reasoning that the model has generated in the last step.
880
+ * The number of the step that is being executed.
836
881
  */
837
- readonly reasoning: Array<ReasoningOutput>;
882
+ stepNumber: number;
838
883
  /**
839
- The reasoning text that the model has generated in the last step. Can be undefined if the model
840
- has only generated text.
884
+ * The model instance that is being used for this step.
841
885
  */
842
- readonly reasoningText: string | undefined;
843
- /**
844
- The files that were generated in the last step.
845
- Empty array if no files were generated.
846
- */
847
- readonly files: Array<GeneratedFile>;
886
+ model: LanguageModel;
848
887
  /**
849
- Sources that have been used as references in the last step.
888
+ * The messages that will be sent to the model for the current step.
850
889
  */
851
- readonly sources: Array<Source>;
890
+ messages: Array<ModelMessage>;
852
891
  /**
853
- The tool calls that were made in the last step.
892
+ * The context passed via the experimental_context setting (experimental).
854
893
  */
855
- readonly toolCalls: Array<TypedToolCall<TOOLS>>;
894
+ experimental_context: unknown;
895
+ }) => PromiseLike<PrepareStepResult<TOOLS>> | PrepareStepResult<TOOLS>;
896
+ /**
897
+ * The result type returned by a {@link PrepareStepFunction},
898
+ * allowing per-step overrides of model, tools, or messages.
899
+ */
900
+ type PrepareStepResult<TOOLS extends Record<string, Tool> = Record<string, Tool>> = {
856
901
  /**
857
- The static tool calls that were made in the last step.
902
+ * Optionally override which LanguageModel instance is used for this step.
858
903
  */
859
- readonly staticToolCalls: Array<StaticToolCall<TOOLS>>;
904
+ model?: LanguageModel;
860
905
  /**
861
- The dynamic tool calls that were made in the last step.
906
+ * Optionally set which tool the model must call, or provide tool call configuration
907
+ * for this step.
862
908
  */
863
- readonly dynamicToolCalls: Array<DynamicToolCall>;
909
+ toolChoice?: ToolChoice<NoInfer<TOOLS>>;
864
910
  /**
865
- The results of the tool calls from the last step.
911
+ * If provided, only these tools are enabled/available for this step.
866
912
  */
867
- readonly toolResults: Array<TypedToolResult<TOOLS>>;
913
+ activeTools?: Array<keyof NoInfer<TOOLS>>;
868
914
  /**
869
- The static tool results that were made in the last step.
915
+ * Optionally override the system message(s) sent to the model for this step.
870
916
  */
871
- readonly staticToolResults: Array<StaticToolResult<TOOLS>>;
917
+ system?: string | SystemModelMessage | Array<SystemModelMessage>;
872
918
  /**
873
- The dynamic tool results that were made in the last step.
919
+ * Optionally override the full set of messages sent to the model
920
+ * for this step.
874
921
  */
875
- readonly dynamicToolResults: Array<DynamicToolResult>;
922
+ messages?: Array<ModelMessage>;
876
923
  /**
877
- * The unified reason why the generation finished.
924
+ * Context that is passed into tool execution. Experimental.
925
+ *
926
+ * Changing the context will affect the context in this step
927
+ * and all subsequent steps.
878
928
  */
879
- readonly finishReason: FinishReason;
929
+ experimental_context?: unknown;
880
930
  /**
881
- * The raw reason why the generation finished (from the provider).
931
+ * Additional provider-specific options for this step.
932
+ *
933
+ * Can be used to pass provider-specific configuration such as
934
+ * container IDs for Anthropic's code execution.
882
935
  */
883
- readonly rawFinishReason: string | undefined;
936
+ providerOptions?: ProviderOptions;
937
+ } | undefined;
938
+
939
+ type StopCondition<TOOLS extends ToolSet> = (options: {
940
+ steps: Array<StepResult<TOOLS>>;
941
+ }) => PromiseLike<boolean> | boolean;
942
+ declare function stepCountIs(stepCount: number): StopCondition<any>;
943
+ declare function hasToolCall(toolName: string): StopCondition<any>;
944
+
945
+ declare const symbol$e: unique symbol;
946
+ declare class InvalidToolInputError extends AISDKError {
947
+ private readonly [symbol$e];
948
+ readonly toolName: string;
949
+ readonly toolInput: string;
950
+ constructor({ toolInput, toolName, cause, message, }: {
951
+ message?: string;
952
+ toolInput: string;
953
+ toolName: string;
954
+ cause: unknown;
955
+ });
956
+ static isInstance(error: unknown): error is InvalidToolInputError;
957
+ }
958
+
959
+ declare const symbol$d: unique symbol;
960
+ declare class NoSuchToolError extends AISDKError {
961
+ private readonly [symbol$d];
962
+ readonly toolName: string;
963
+ readonly availableTools: string[] | undefined;
964
+ constructor({ toolName, availableTools, message, }: {
965
+ toolName: string;
966
+ availableTools?: string[] | undefined;
967
+ message?: string;
968
+ });
969
+ static isInstance(error: unknown): error is NoSuchToolError;
970
+ }
971
+
972
+ declare const systemModelMessageSchema: z.ZodType<SystemModelMessage>;
973
+ declare const userModelMessageSchema: z.ZodType<UserModelMessage>;
974
+ declare const assistantModelMessageSchema: z.ZodType<AssistantModelMessage>;
975
+ declare const toolModelMessageSchema: z.ZodType<ToolModelMessage>;
976
+ declare const modelMessageSchema: z.ZodType<ModelMessage>;
977
+
978
+ /**
979
+ * A function that attempts to repair a tool call that failed to parse.
980
+ *
981
+ * It receives the error and the context as arguments and returns the repair
982
+ * tool call JSON as text.
983
+ *
984
+ * @param options.system - The system prompt.
985
+ * @param options.messages - The messages in the current generation step.
986
+ * @param options.toolCall - The tool call that failed to parse.
987
+ * @param options.tools - The tools that are available.
988
+ * @param options.inputSchema - A function that returns the JSON Schema for a tool.
989
+ * @param options.error - The error that occurred while parsing the tool call.
990
+ */
991
+ type ToolCallRepairFunction<TOOLS extends ToolSet> = (options: {
992
+ system: string | SystemModelMessage | Array<SystemModelMessage> | undefined;
993
+ messages: ModelMessage[];
994
+ toolCall: LanguageModelV3ToolCall;
995
+ tools: TOOLS;
996
+ inputSchema: (options: {
997
+ toolName: string;
998
+ }) => PromiseLike<JSONSchema7>;
999
+ error: NoSuchToolError | InvalidToolInputError;
1000
+ }) => Promise<LanguageModelV3ToolCall | null>;
1001
+
1002
+ /**
1003
+ Callback that is set using the `onStepFinish` option.
1004
+
1005
+ @param stepResult - The result of the step.
1006
+ */
1007
+ type GenerateTextOnStepFinishCallback<TOOLS extends ToolSet> = (stepResult: StepResult<TOOLS>) => Promise<void> | void;
1008
+ /**
1009
+ Callback that is set using the `onFinish` option.
1010
+
1011
+ @param event - The event that is passed to the callback.
1012
+ */
1013
+ type GenerateTextOnFinishCallback<TOOLS extends ToolSet> = (event: StepResult<TOOLS> & {
884
1014
  /**
885
- The token usage of the last step.
1015
+ * Details for all steps.
886
1016
  */
887
- readonly usage: LanguageModelUsage;
1017
+ readonly steps: StepResult<TOOLS>[];
888
1018
  /**
889
- The total token usage of all steps.
890
- When there are multiple steps, the usage is the sum of all step usages.
1019
+ * Total usage for all steps. This is the sum of the usage of all steps.
891
1020
  */
892
1021
  readonly totalUsage: LanguageModelUsage;
893
1022
  /**
894
- Warnings from the model provider (e.g. unsupported settings)
895
- */
896
- readonly warnings: CallWarning[] | undefined;
897
- /**
898
- Additional request information.
899
- */
900
- readonly request: LanguageModelRequestMetadata;
901
- /**
902
- Additional response information.
1023
+ * Context that is passed into tool execution.
1024
+ *
1025
+ * Experimental (can break in patch releases).
1026
+ *
1027
+ * @default undefined
903
1028
  */
904
- readonly response: LanguageModelResponseMetadata & {
905
- /**
906
- The response messages that were generated during the call. It consists of an assistant message,
907
- potentially containing tool calls.
908
-
909
- When there are tool results, there is an additional tool message with the tool results that are available.
910
- If there are tools that do not have execute functions, they are not included in the tool results and
911
- need to be added separately.
912
- */
913
- messages: Array<ResponseMessage>;
914
- /**
915
- Response body (available only for providers that use HTTP requests).
916
- */
917
- body?: unknown;
918
- };
1029
+ experimental_context: unknown;
1030
+ }) => PromiseLike<void> | void;
1031
+ /**
1032
+ Generate a text and call tools for a given prompt using a language model.
1033
+
1034
+ This function does not stream the output. If you want to stream the output, use `streamText` instead.
1035
+
1036
+ @param model - The language model to use.
1037
+
1038
+ @param tools - Tools that are accessible to and can be called by the model. The model needs to support calling tools.
1039
+ @param toolChoice - The tool choice strategy. Default: 'auto'.
1040
+
1041
+ @param system - A system message that will be part of the prompt.
1042
+ @param prompt - A simple text prompt. You can either use `prompt` or `messages` but not both.
1043
+ @param messages - A list of messages. You can either use `prompt` or `messages` but not both.
1044
+
1045
+ @param maxOutputTokens - Maximum number of tokens to generate.
1046
+ @param temperature - Temperature setting.
1047
+ The value is passed through to the provider. The range depends on the provider and model.
1048
+ It is recommended to set either `temperature` or `topP`, but not both.
1049
+ @param topP - Nucleus sampling.
1050
+ The value is passed through to the provider. The range depends on the provider and model.
1051
+ It is recommended to set either `temperature` or `topP`, but not both.
1052
+ @param topK - Only sample from the top K options for each subsequent token.
1053
+ Used to remove "long tail" low probability responses.
1054
+ Recommended for advanced use cases only. You usually only need to use temperature.
1055
+ @param presencePenalty - Presence penalty setting.
1056
+ It affects the likelihood of the model to repeat information that is already in the prompt.
1057
+ The value is passed through to the provider. The range depends on the provider and model.
1058
+ @param frequencyPenalty - Frequency penalty setting.
1059
+ It affects the likelihood of the model to repeatedly use the same words or phrases.
1060
+ The value is passed through to the provider. The range depends on the provider and model.
1061
+ @param stopSequences - Stop sequences.
1062
+ If set, the model will stop generating text when one of the stop sequences is generated.
1063
+ @param seed - The seed (integer) to use for random sampling.
1064
+ If set and supported by the model, calls will generate deterministic results.
1065
+
1066
+ @param maxRetries - Maximum number of retries. Set to 0 to disable retries. Default: 2.
1067
+ @param abortSignal - An optional abort signal that can be used to cancel the call.
1068
+ @param timeout - An optional timeout in milliseconds. The call will be aborted if it takes longer than the specified timeout.
1069
+ @param headers - Additional HTTP headers to be sent with the request. Only applicable for HTTP-based providers.
1070
+
1071
+ @param experimental_generateMessageId - Generate a unique ID for each message.
1072
+
1073
+ @param onStepFinish - Callback that is called when each step (LLM call) is finished, including intermediate steps.
1074
+ @param onFinish - Callback that is called when all steps are finished and the response is complete.
1075
+
1076
+ @returns
1077
+ A result object that contains the generated text, the results of the tool calls, and additional information.
1078
+ */
1079
+ declare function generateText<TOOLS extends ToolSet, OUTPUT extends Output = Output<string, string>>({ model: modelArg, tools, toolChoice, system, prompt, messages, maxRetries: maxRetriesArg, abortSignal, timeout, headers, stopWhen, experimental_output, output, experimental_telemetry: telemetry, providerOptions, experimental_activeTools, activeTools, experimental_prepareStep, prepareStep, experimental_repairToolCall: repairToolCall, experimental_download: download, experimental_context, _internal: { generateId }, onStepFinish, onFinish, ...settings }: CallSettings & Prompt & {
919
1080
  /**
920
- Additional provider-specific metadata. They are passed through
921
- from the provider to the AI SDK and enable provider-specific
922
- results that can be fully encapsulated in the provider.
1081
+ The language model to use.
923
1082
  */
924
- readonly providerMetadata: ProviderMetadata | undefined;
1083
+ model: LanguageModel;
925
1084
  /**
926
- Details for all steps.
927
- You can use this to get information about intermediate steps,
928
- such as the tool calls or the response headers.
1085
+ The tools that the model can call. The model needs to support calling tools.
1086
+ */
1087
+ tools?: TOOLS;
1088
+ /**
1089
+ The tool choice strategy. Default: 'auto'.
929
1090
  */
930
- readonly steps: Array<StepResult<TOOLS>>;
1091
+ toolChoice?: ToolChoice<NoInfer<TOOLS>>;
931
1092
  /**
932
- The generated structured output. It uses the `output` specification.
933
-
934
- @deprecated Use `output` instead.
1093
+ Condition for stopping the generation when there are tool results in the last step.
1094
+ When the condition is an array, any of the conditions can be met to stop the generation.
1095
+
1096
+ @default stepCountIs(1)
935
1097
  */
936
- readonly experimental_output: InferCompleteOutput<OUTPUT>;
1098
+ stopWhen?: StopCondition<NoInfer<TOOLS>> | Array<StopCondition<NoInfer<TOOLS>>>;
937
1099
  /**
938
- The generated structured output. It uses the `output` specification.
939
-
1100
+ Optional telemetry configuration (experimental).
940
1101
  */
941
- readonly output: InferCompleteOutput<OUTPUT>;
942
- }
943
-
944
- /**
945
- Timeout configuration for API calls. Can be specified as:
946
- - A number representing milliseconds
947
- - An object with `totalMs` property for the total timeout in milliseconds
948
- - An object with `stepMs` property for the timeout of each step in milliseconds
949
- */
950
- type TimeoutConfiguration = number | {
951
- totalMs?: number;
952
- stepMs?: number;
953
- };
954
- /**
955
- Extracts the total timeout value in milliseconds from a TimeoutConfiguration.
956
-
957
- @param timeout - The timeout configuration.
958
- @returns The total timeout in milliseconds, or undefined if no timeout is configured.
959
- */
960
- declare function getTotalTimeoutMs(timeout: TimeoutConfiguration | undefined): number | undefined;
961
- /**
962
- Extracts the step timeout value in milliseconds from a TimeoutConfiguration.
963
-
964
- @param timeout - The timeout configuration.
965
- @returns The step timeout in milliseconds, or undefined if no step timeout is configured.
1102
+ experimental_telemetry?: TelemetrySettings;
1103
+ /**
1104
+ Additional provider-specific options. They are passed through
1105
+ to the provider from the AI SDK and enable provider-specific
1106
+ functionality that can be fully encapsulated in the provider.
966
1107
  */
967
- declare function getStepTimeoutMs(timeout: TimeoutConfiguration | undefined): number | undefined;
968
- type CallSettings = {
1108
+ providerOptions?: ProviderOptions;
969
1109
  /**
970
- Maximum number of tokens to generate.
1110
+ * @deprecated Use `activeTools` instead.
971
1111
  */
972
- maxOutputTokens?: number;
1112
+ experimental_activeTools?: Array<keyof NoInfer<TOOLS>>;
973
1113
  /**
974
- Temperature setting. The range depends on the provider and model.
975
-
976
- It is recommended to set either `temperature` or `topP`, but not both.
1114
+ Limits the tools that are available for the model to call without
1115
+ changing the tool call and result types in the result.
977
1116
  */
978
- temperature?: number;
1117
+ activeTools?: Array<keyof NoInfer<TOOLS>>;
979
1118
  /**
980
- Nucleus sampling. This is a number between 0 and 1.
981
-
982
- E.g. 0.1 would mean that only tokens with the top 10% probability mass
983
- are considered.
984
-
985
- It is recommended to set either `temperature` or `topP`, but not both.
1119
+ Optional specification for parsing structured outputs from the LLM response.
986
1120
  */
987
- topP?: number;
1121
+ output?: OUTPUT;
988
1122
  /**
989
- Only sample from the top K options for each subsequent token.
990
-
991
- Used to remove "long tail" low probability responses.
992
- Recommended for advanced use cases only. You usually only need to use temperature.
1123
+ Optional specification for parsing structured outputs from the LLM response.
1124
+
1125
+ @deprecated Use `output` instead.
993
1126
  */
994
- topK?: number;
1127
+ experimental_output?: OUTPUT;
995
1128
  /**
996
- Presence penalty setting. It affects the likelihood of the model to
997
- repeat information that is already in the prompt.
998
-
999
- The presence penalty is a number between -1 (increase repetition)
1000
- and 1 (maximum penalty, decrease repetition). 0 means no penalty.
1129
+ Custom download function to use for URLs.
1130
+
1131
+ By default, files are downloaded if the model does not support the URL for the given media type.
1001
1132
  */
1002
- presencePenalty?: number;
1133
+ experimental_download?: DownloadFunction | undefined;
1003
1134
  /**
1004
- Frequency penalty setting. It affects the likelihood of the model
1005
- to repeatedly use the same words or phrases.
1006
-
1007
- The frequency penalty is a number between -1 (increase repetition)
1008
- and 1 (maximum penalty, decrease repetition). 0 means no penalty.
1135
+ * @deprecated Use `prepareStep` instead.
1009
1136
  */
1010
- frequencyPenalty?: number;
1137
+ experimental_prepareStep?: PrepareStepFunction<NoInfer<TOOLS>>;
1011
1138
  /**
1012
- Stop sequences.
1013
- If set, the model will stop generating text when one of the stop sequences is generated.
1014
- Providers may have limits on the number of stop sequences.
1015
- */
1016
- stopSequences?: string[];
1139
+ Optional function that you can use to provide different settings for a step.
1140
+ */
1141
+ prepareStep?: PrepareStepFunction<NoInfer<TOOLS>>;
1017
1142
  /**
1018
- The seed (integer) to use for random sampling. If set and supported
1019
- by the model, calls will generate deterministic results.
1143
+ A function that attempts to repair a tool call that failed to parse.
1020
1144
  */
1021
- seed?: number;
1145
+ experimental_repairToolCall?: ToolCallRepairFunction<NoInfer<TOOLS>>;
1022
1146
  /**
1023
- Maximum number of retries. Set to 0 to disable retries.
1024
-
1025
- @default 2
1147
+ * Callback that is called when each step (LLM call) is finished, including intermediate steps.
1026
1148
  */
1027
- maxRetries?: number;
1149
+ onStepFinish?: GenerateTextOnStepFinishCallback<NoInfer<TOOLS>>;
1028
1150
  /**
1029
- Abort signal.
1151
+ * Callback that is called when all steps are finished and the response is complete.
1030
1152
  */
1031
- abortSignal?: AbortSignal;
1153
+ onFinish?: GenerateTextOnFinishCallback<NoInfer<TOOLS>>;
1032
1154
  /**
1033
- Timeout in milliseconds. The call will be aborted if it takes longer
1034
- than the specified timeout. Can be used alongside abortSignal.
1035
-
1036
- Can be specified as a number (milliseconds) or as an object with `totalMs`.
1155
+ * Context that is passed into tool execution.
1156
+ *
1157
+ * Experimental (can break in patch releases).
1158
+ *
1159
+ * @default undefined
1037
1160
  */
1038
- timeout?: TimeoutConfiguration;
1161
+ experimental_context?: unknown;
1039
1162
  /**
1040
- Additional HTTP headers to be sent with the request.
1041
- Only applicable for HTTP-based providers.
1163
+ * Internal. For test use only. May change without notice.
1042
1164
  */
1043
- headers?: Record<string, string | undefined>;
1044
- };
1165
+ _internal?: {
1166
+ generateId?: IdGenerator;
1167
+ };
1168
+ }): Promise<GenerateTextResult<TOOLS, OUTPUT>>;
1045
1169
 
1046
1170
  /**
1047
- Prompt part of the AI function options.
1048
- It contains a system message, a simple text prompt, or a list of messages.
1171
+ * Infers the complete output type from the output specification.
1049
1172
  */
1050
- type Prompt = {
1051
- /**
1052
- System message to include in the prompt. Can be used with `prompt` or `messages`.
1053
- */
1054
- system?: string | SystemModelMessage | Array<SystemModelMessage>;
1055
- } & ({
1056
- /**
1057
- A prompt. It can be either a text prompt or a list of messages.
1058
-
1059
- You can either use `prompt` or `messages` but not both.
1060
- */
1061
- prompt: string | Array<ModelMessage>;
1062
- /**
1063
- A list of messages.
1064
-
1065
- You can either use `prompt` or `messages` but not both.
1173
+ type InferCompleteOutput<OUTPUT extends Output> = OUTPUT extends Output<infer COMPLETE_OUTPUT, any, any> ? COMPLETE_OUTPUT : never;
1174
+ /**
1175
+ * Infers the partial output type from the output specification.
1066
1176
  */
1067
- messages?: never;
1068
- } | {
1069
- /**
1070
- A list of messages.
1071
-
1072
- You can either use `prompt` or `messages` but not both.
1177
+ type InferPartialOutput<OUTPUT extends Output> = OUTPUT extends Output<any, infer PARTIAL_OUTPUT, any> ? PARTIAL_OUTPUT : never;
1178
+ /**
1179
+ * Infers the element type from an array output specification.
1073
1180
  */
1074
- messages: Array<ModelMessage>;
1075
- /**
1076
- A prompt. It can be either a text prompt or a list of messages.
1077
-
1078
- You can either use `prompt` or `messages` but not both.
1079
- */
1080
- prompt?: never;
1081
- });
1181
+ type InferElementOutput<OUTPUT extends Output> = OUTPUT extends Output<any, any, infer ELEMENT> ? ELEMENT : never;
1082
1182
 
1083
1183
  /**
1084
- * Telemetry configuration.
1184
+ * Prunes model messages from a list of model messages.
1185
+ *
1186
+ * @param messages - The list of model messages to prune.
1187
+ * @param reasoning - How to remove reasoning content from assistant messages. Default is `'none'`.
1188
+ * @param toolCalls - How to prune tool call/results/approval content. Default is `[]`.
1189
+ * @param emptyMessages - Whether to keep or remove messages whose content is empty after pruning. Default is `'remove'`.
1190
+ *
1191
+ * @returns The pruned list of model messages.
1085
1192
  */
1086
- type TelemetrySettings = {
1087
- /**
1088
- * Enable or disable telemetry. Disabled by default while experimental.
1089
- */
1090
- isEnabled?: boolean;
1091
- /**
1092
- * Enable or disable input recording. Enabled by default.
1093
- *
1094
- * You might want to disable input recording to avoid recording sensitive
1095
- * information, to reduce data transfers, or to increase performance.
1096
- */
1097
- recordInputs?: boolean;
1098
- /**
1099
- * Enable or disable output recording. Enabled by default.
1100
- *
1101
- * You might want to disable output recording to avoid recording sensitive
1102
- * information, to reduce data transfers, or to increase performance.
1103
- */
1104
- recordOutputs?: boolean;
1105
- /**
1106
- * Identifier for this function. Used to group telemetry data by function.
1107
- */
1108
- functionId?: string;
1109
- /**
1110
- * Additional information to include in the telemetry data.
1111
- */
1112
- metadata?: Record<string, AttributeValue>;
1113
- /**
1114
- * A custom tracer to use for the telemetry data.
1115
- */
1116
- tracer?: Tracer;
1117
- };
1193
+ declare function pruneMessages({ messages, reasoning, toolCalls, emptyMessages, }: {
1194
+ messages: ModelMessage[];
1195
+ reasoning?: 'all' | 'before-last-message' | 'none';
1196
+ toolCalls?: 'all' | 'before-last-message' | `before-last-${number}-messages` | 'none' | Array<{
1197
+ type: 'all' | 'before-last-message' | `before-last-${number}-messages`;
1198
+ tools?: string[];
1199
+ }>;
1200
+ emptyMessages?: 'keep' | 'remove';
1201
+ }): ModelMessage[];
1118
1202
 
1119
1203
  /**
1120
- * Experimental. Can change in patch versions without warning.
1121
- *
1122
- * Download function. Called with the array of URLs and a boolean indicating
1123
- * whether the URL is supported by the model.
1124
- *
1125
- * The download function can decide for each URL:
1126
- * - to return null (which means that the URL should be passed to the model)
1127
- * - to download the asset and return the data (incl. retries, authentication, etc.)
1204
+ * Detects the first chunk in a buffer.
1128
1205
  *
1129
- * Should throw DownloadError if the download fails.
1206
+ * @param buffer - The buffer to detect the first chunk in.
1130
1207
  *
1131
- * Should return an array of objects sorted by the order of the requested downloads.
1132
- * For each object, the data should be a Uint8Array if the URL was downloaded.
1133
- * For each object, the mediaType should be the media type of the downloaded asset.
1134
- * For each object, the data should be null if the URL should be passed through as is.
1208
+ * @returns The first detected chunk, or `undefined` if no chunk was detected.
1135
1209
  */
1136
- type DownloadFunction = (options: Array<{
1137
- url: URL;
1138
- isUrlSupportedByModel: boolean;
1139
- }>) => PromiseLike<Array<{
1140
- data: Uint8Array;
1141
- mediaType: string | undefined;
1142
- } | null>>;
1143
-
1210
+ type ChunkDetector = (buffer: string) => string | undefined | null;
1144
1211
  /**
1145
- * Function that you can use to provide different settings for a step.
1212
+ * Smooths text and reasoning streaming output.
1146
1213
  *
1147
- * @param options - The options for the step.
1148
- * @param options.steps - The steps that have been executed so far.
1149
- * @param options.stepNumber - The number of the step that is being executed.
1150
- * @param options.model - The model that is being used.
1151
- * @param options.messages - The messages that will be sent to the model for the current step.
1152
- * @param options.experimental_context - The context passed via the experimental_context setting (experimental).
1214
+ * @param delayInMs - The delay in milliseconds between each chunk. Defaults to 10ms. Can be set to `null` to skip the delay.
1215
+ * @param chunking - Controls how the text is chunked for streaming. Use "word" to stream word by word (default), "line" to stream line by line, provide a custom RegExp pattern for custom chunking, provide an Intl.Segmenter for locale-aware word segmentation (recommended for CJK languages), or provide a custom ChunkDetector function.
1153
1216
  *
1154
- * @returns An object that contains the settings for the step.
1155
- * If you return undefined (or for undefined settings), the settings from the outer level will be used.
1217
+ * @returns A transform stream that smooths text streaming output.
1156
1218
  */
1157
- type PrepareStepFunction<TOOLS extends Record<string, Tool> = Record<string, Tool>> = (options: {
1219
+ declare function smoothStream<TOOLS extends ToolSet>({ delayInMs, chunking, _internal: { delay }, }?: {
1220
+ delayInMs?: number | null;
1221
+ chunking?: 'word' | 'line' | RegExp | ChunkDetector | Intl.Segmenter;
1158
1222
  /**
1159
- * The steps that have been executed so far.
1223
+ * Internal. For test use only. May change without notice.
1160
1224
  */
1161
- steps: Array<StepResult<NoInfer<TOOLS>>>;
1225
+ _internal?: {
1226
+ delay?: (delayInMs: number | null) => Promise<void>;
1227
+ };
1228
+ }): (options: {
1229
+ tools: TOOLS;
1230
+ }) => TransformStream<TextStreamPart<TOOLS>, TextStreamPart<TOOLS>>;
1231
+
1232
+ /**
1233
+ * Tool output when the tool execution has been denied (for static tools).
1234
+ */
1235
+ type StaticToolOutputDenied<TOOLS extends ToolSet> = ValueOf<{
1236
+ [NAME in keyof TOOLS]: {
1237
+ type: 'tool-output-denied';
1238
+ toolCallId: string;
1239
+ toolName: NAME & string;
1240
+ providerExecuted?: boolean;
1241
+ dynamic?: false | undefined;
1242
+ };
1243
+ }>;
1244
+ /**
1245
+ * Tool output when the tool execution has been denied.
1246
+ */
1247
+ type TypedToolOutputDenied<TOOLS extends ToolSet> = StaticToolOutputDenied<TOOLS>;
1248
+
1249
+ /**
1250
+ The data types that can be used in the UI message for the UI message data parts.
1251
+ */
1252
+ type UIDataTypes = Record<string, unknown>;
1253
+ type UITool = {
1254
+ input: unknown;
1255
+ output: unknown | undefined;
1256
+ };
1257
+ /**
1258
+ * Infer the input and output types of a tool so it can be used as a UI tool.
1259
+ */
1260
+ type InferUITool<TOOL extends Tool> = {
1261
+ input: InferToolInput<TOOL>;
1262
+ output: InferToolOutput<TOOL>;
1263
+ };
1264
+ /**
1265
+ * Infer the input and output types of a tool set so it can be used as a UI tool set.
1266
+ */
1267
+ type InferUITools<TOOLS extends ToolSet> = {
1268
+ [NAME in keyof TOOLS & string]: InferUITool<TOOLS[NAME]>;
1269
+ };
1270
+ type UITools = Record<string, UITool>;
1271
+ /**
1272
+ AI SDK UI Messages. They are used in the client and to communicate between the frontend and the API routes.
1273
+ */
1274
+ interface UIMessage<METADATA = unknown, DATA_PARTS extends UIDataTypes = UIDataTypes, TOOLS extends UITools = UITools> {
1162
1275
  /**
1163
- * The number of the step that is being executed.
1276
+ A unique identifier for the message.
1164
1277
  */
1165
- stepNumber: number;
1278
+ id: string;
1166
1279
  /**
1167
- * The model instance that is being used for this step.
1280
+ The role of the message.
1168
1281
  */
1169
- model: LanguageModel;
1282
+ role: 'system' | 'user' | 'assistant';
1170
1283
  /**
1171
- * The messages that will be sent to the model for the current step.
1284
+ The metadata of the message.
1172
1285
  */
1173
- messages: Array<ModelMessage>;
1286
+ metadata?: METADATA;
1174
1287
  /**
1175
- * The context passed via the experimental_context setting (experimental).
1288
+ The parts of the message. Use this for rendering the message in the UI.
1289
+
1290
+ System messages should be avoided (set the system prompt on the server instead).
1291
+ They can have text parts.
1292
+
1293
+ User messages can have text parts and file parts.
1294
+
1295
+ Assistant messages can have text, reasoning, tool invocation, and file parts.
1176
1296
  */
1177
- experimental_context: unknown;
1178
- }) => PromiseLike<PrepareStepResult<TOOLS>> | PrepareStepResult<TOOLS>;
1297
+ parts: Array<UIMessagePart<DATA_PARTS, TOOLS>>;
1298
+ }
1299
+ type UIMessagePart<DATA_TYPES extends UIDataTypes, TOOLS extends UITools> = TextUIPart | ReasoningUIPart | ToolUIPart<TOOLS> | DynamicToolUIPart | SourceUrlUIPart | SourceDocumentUIPart | FileUIPart | DataUIPart<DATA_TYPES> | StepStartUIPart;
1179
1300
  /**
1180
- * The result type returned by a {@link PrepareStepFunction},
1181
- * allowing per-step overrides of model, tools, or messages.
1301
+ * A text part of a message.
1182
1302
  */
1183
- type PrepareStepResult<TOOLS extends Record<string, Tool> = Record<string, Tool>> = {
1303
+ type TextUIPart = {
1304
+ type: 'text';
1184
1305
  /**
1185
- * Optionally override which LanguageModel instance is used for this step.
1306
+ * The text content.
1186
1307
  */
1187
- model?: LanguageModel;
1308
+ text: string;
1188
1309
  /**
1189
- * Optionally set which tool the model must call, or provide tool call configuration
1190
- * for this step.
1310
+ * The state of the text part.
1191
1311
  */
1192
- toolChoice?: ToolChoice<NoInfer<TOOLS>>;
1312
+ state?: 'streaming' | 'done';
1193
1313
  /**
1194
- * If provided, only these tools are enabled/available for this step.
1314
+ * The provider metadata.
1195
1315
  */
1196
- activeTools?: Array<keyof NoInfer<TOOLS>>;
1316
+ providerMetadata?: ProviderMetadata;
1317
+ };
1318
+ /**
1319
+ * A reasoning part of a message.
1320
+ */
1321
+ type ReasoningUIPart = {
1322
+ type: 'reasoning';
1197
1323
  /**
1198
- * Optionally override the system message(s) sent to the model for this step.
1324
+ * The reasoning text.
1199
1325
  */
1200
- system?: string | SystemModelMessage | Array<SystemModelMessage>;
1326
+ text: string;
1201
1327
  /**
1202
- * Optionally override the full set of messages sent to the model
1203
- * for this step.
1328
+ * The state of the reasoning part.
1204
1329
  */
1205
- messages?: Array<ModelMessage>;
1330
+ state?: 'streaming' | 'done';
1206
1331
  /**
1207
- * Context that is passed into tool execution. Experimental.
1208
- *
1209
- * Changing the context will affect the context in this step
1210
- * and all subsequent steps.
1332
+ * The provider metadata.
1211
1333
  */
1212
- experimental_context?: unknown;
1334
+ providerMetadata?: ProviderMetadata;
1335
+ };
1336
+ /**
1337
+ * A source part of a message.
1338
+ */
1339
+ type SourceUrlUIPart = {
1340
+ type: 'source-url';
1341
+ sourceId: string;
1342
+ url: string;
1343
+ title?: string;
1344
+ providerMetadata?: ProviderMetadata;
1345
+ };
1346
+ /**
1347
+ * A document source part of a message.
1348
+ */
1349
+ type SourceDocumentUIPart = {
1350
+ type: 'source-document';
1351
+ sourceId: string;
1352
+ mediaType: string;
1353
+ title: string;
1354
+ filename?: string;
1355
+ providerMetadata?: ProviderMetadata;
1356
+ };
1357
+ /**
1358
+ * A file part of a message.
1359
+ */
1360
+ type FileUIPart = {
1361
+ type: 'file';
1213
1362
  /**
1214
- * Additional provider-specific options for this step.
1363
+ * IANA media type of the file.
1215
1364
  *
1216
- * Can be used to pass provider-specific configuration such as
1217
- * container IDs for Anthropic's code execution.
1365
+ * @see https://www.iana.org/assignments/media-types/media-types.xhtml
1218
1366
  */
1219
- providerOptions?: ProviderOptions;
1220
- } | undefined;
1221
-
1222
- type StopCondition<TOOLS extends ToolSet> = (options: {
1223
- steps: Array<StepResult<TOOLS>>;
1224
- }) => PromiseLike<boolean> | boolean;
1225
- declare function stepCountIs(stepCount: number): StopCondition<any>;
1226
- declare function hasToolCall(toolName: string): StopCondition<any>;
1227
-
1228
- declare const symbol$e: unique symbol;
1229
- declare class InvalidToolInputError extends AISDKError {
1230
- private readonly [symbol$e];
1231
- readonly toolName: string;
1232
- readonly toolInput: string;
1233
- constructor({ toolInput, toolName, cause, message, }: {
1234
- message?: string;
1235
- toolInput: string;
1236
- toolName: string;
1237
- cause: unknown;
1238
- });
1239
- static isInstance(error: unknown): error is InvalidToolInputError;
1240
- }
1241
-
1242
- declare const symbol$d: unique symbol;
1243
- declare class NoSuchToolError extends AISDKError {
1244
- private readonly [symbol$d];
1245
- readonly toolName: string;
1246
- readonly availableTools: string[] | undefined;
1247
- constructor({ toolName, availableTools, message, }: {
1248
- toolName: string;
1249
- availableTools?: string[] | undefined;
1250
- message?: string;
1251
- });
1252
- static isInstance(error: unknown): error is NoSuchToolError;
1253
- }
1254
-
1255
- declare const systemModelMessageSchema: z.ZodType<SystemModelMessage>;
1256
- declare const userModelMessageSchema: z.ZodType<UserModelMessage>;
1257
- declare const assistantModelMessageSchema: z.ZodType<AssistantModelMessage>;
1258
- declare const toolModelMessageSchema: z.ZodType<ToolModelMessage>;
1259
- declare const modelMessageSchema: z.ZodType<ModelMessage>;
1260
-
1261
- /**
1262
- * A function that attempts to repair a tool call that failed to parse.
1263
- *
1264
- * It receives the error and the context as arguments and returns the repair
1265
- * tool call JSON as text.
1266
- *
1267
- * @param options.system - The system prompt.
1268
- * @param options.messages - The messages in the current generation step.
1269
- * @param options.toolCall - The tool call that failed to parse.
1270
- * @param options.tools - The tools that are available.
1271
- * @param options.inputSchema - A function that returns the JSON Schema for a tool.
1272
- * @param options.error - The error that occurred while parsing the tool call.
1273
- */
1274
- type ToolCallRepairFunction<TOOLS extends ToolSet> = (options: {
1275
- system: string | SystemModelMessage | Array<SystemModelMessage> | undefined;
1276
- messages: ModelMessage[];
1277
- toolCall: LanguageModelV3ToolCall;
1278
- tools: TOOLS;
1279
- inputSchema: (options: {
1280
- toolName: string;
1281
- }) => PromiseLike<JSONSchema7>;
1282
- error: NoSuchToolError | InvalidToolInputError;
1283
- }) => Promise<LanguageModelV3ToolCall | null>;
1284
-
1285
- /**
1286
- Callback that is set using the `onStepFinish` option.
1287
-
1288
- @param stepResult - The result of the step.
1289
- */
1290
- type GenerateTextOnStepFinishCallback<TOOLS extends ToolSet> = (stepResult: StepResult<TOOLS>) => Promise<void> | void;
1291
- /**
1292
- Callback that is set using the `onFinish` option.
1293
-
1294
- @param event - The event that is passed to the callback.
1295
- */
1296
- type GenerateTextOnFinishCallback<TOOLS extends ToolSet> = (event: StepResult<TOOLS> & {
1367
+ mediaType: string;
1297
1368
  /**
1298
- * Details for all steps.
1369
+ * Optional filename of the file.
1299
1370
  */
1300
- readonly steps: StepResult<TOOLS>[];
1371
+ filename?: string;
1301
1372
  /**
1302
- * Total usage for all steps. This is the sum of the usage of all steps.
1373
+ * The URL of the file.
1374
+ * It can either be a URL to a hosted file or a [Data URL](https://developer.mozilla.org/en-US/docs/Web/HTTP/Basics_of_HTTP/Data_URLs).
1303
1375
  */
1304
- readonly totalUsage: LanguageModelUsage;
1376
+ url: string;
1305
1377
  /**
1306
- * Context that is passed into tool execution.
1307
- *
1308
- * Experimental (can break in patch releases).
1309
- *
1310
- * @default undefined
1378
+ * The provider metadata.
1311
1379
  */
1312
- experimental_context: unknown;
1313
- }) => PromiseLike<void> | void;
1380
+ providerMetadata?: ProviderMetadata;
1381
+ };
1314
1382
  /**
1315
- Generate a text and call tools for a given prompt using a language model.
1316
-
1317
- This function does not stream the output. If you want to stream the output, use `streamText` instead.
1318
-
1319
- @param model - The language model to use.
1320
-
1321
- @param tools - Tools that are accessible to and can be called by the model. The model needs to support calling tools.
1322
- @param toolChoice - The tool choice strategy. Default: 'auto'.
1323
-
1324
- @param system - A system message that will be part of the prompt.
1325
- @param prompt - A simple text prompt. You can either use `prompt` or `messages` but not both.
1326
- @param messages - A list of messages. You can either use `prompt` or `messages` but not both.
1327
-
1328
- @param maxOutputTokens - Maximum number of tokens to generate.
1329
- @param temperature - Temperature setting.
1330
- The value is passed through to the provider. The range depends on the provider and model.
1331
- It is recommended to set either `temperature` or `topP`, but not both.
1332
- @param topP - Nucleus sampling.
1333
- The value is passed through to the provider. The range depends on the provider and model.
1334
- It is recommended to set either `temperature` or `topP`, but not both.
1335
- @param topK - Only sample from the top K options for each subsequent token.
1336
- Used to remove "long tail" low probability responses.
1337
- Recommended for advanced use cases only. You usually only need to use temperature.
1338
- @param presencePenalty - Presence penalty setting.
1339
- It affects the likelihood of the model to repeat information that is already in the prompt.
1340
- The value is passed through to the provider. The range depends on the provider and model.
1341
- @param frequencyPenalty - Frequency penalty setting.
1342
- It affects the likelihood of the model to repeatedly use the same words or phrases.
1343
- The value is passed through to the provider. The range depends on the provider and model.
1344
- @param stopSequences - Stop sequences.
1345
- If set, the model will stop generating text when one of the stop sequences is generated.
1346
- @param seed - The seed (integer) to use for random sampling.
1347
- If set and supported by the model, calls will generate deterministic results.
1348
-
1349
- @param maxRetries - Maximum number of retries. Set to 0 to disable retries. Default: 2.
1350
- @param abortSignal - An optional abort signal that can be used to cancel the call.
1351
- @param timeout - An optional timeout in milliseconds. The call will be aborted if it takes longer than the specified timeout.
1352
- @param headers - Additional HTTP headers to be sent with the request. Only applicable for HTTP-based providers.
1353
-
1354
- @param experimental_generateMessageId - Generate a unique ID for each message.
1355
-
1356
- @param onStepFinish - Callback that is called when each step (LLM call) is finished, including intermediate steps.
1357
- @param onFinish - Callback that is called when all steps are finished and the response is complete.
1358
-
1359
- @returns
1360
- A result object that contains the generated text, the results of the tool calls, and additional information.
1383
+ * A step boundary part of a message.
1361
1384
  */
1362
- declare function generateText<TOOLS extends ToolSet, OUTPUT extends Output = Output<string, string>>({ model: modelArg, tools, toolChoice, system, prompt, messages, maxRetries: maxRetriesArg, abortSignal, timeout, headers, stopWhen, experimental_output, output, experimental_telemetry: telemetry, providerOptions, experimental_activeTools, activeTools, experimental_prepareStep, prepareStep, experimental_repairToolCall: repairToolCall, experimental_download: download, experimental_context, _internal: { generateId }, onStepFinish, onFinish, ...settings }: CallSettings & Prompt & {
1363
- /**
1364
- The language model to use.
1365
- */
1366
- model: LanguageModel;
1367
- /**
1368
- The tools that the model can call. The model needs to support calling tools.
1369
- */
1370
- tools?: TOOLS;
1385
+ type StepStartUIPart = {
1386
+ type: 'step-start';
1387
+ };
1388
+ type DataUIPart<DATA_TYPES extends UIDataTypes> = ValueOf<{
1389
+ [NAME in keyof DATA_TYPES & string]: {
1390
+ type: `data-${NAME}`;
1391
+ id?: string;
1392
+ data: DATA_TYPES[NAME];
1393
+ };
1394
+ }>;
1395
+ type asUITool<TOOL extends UITool | Tool> = TOOL extends Tool ? InferUITool<TOOL> : TOOL;
1396
+ /**
1397
+ * Check if a message part is a data part.
1398
+ */
1399
+ declare function isDataUIPart<DATA_TYPES extends UIDataTypes>(part: UIMessagePart<DATA_TYPES, UITools>): part is DataUIPart<DATA_TYPES>;
1400
+ /**
1401
+ * A UI tool invocation contains all the information needed to render a tool invocation in the UI.
1402
+ * It can be derived from a tool without knowing the tool name, and can be used to define
1403
+ * UI components for the tool.
1404
+ */
1405
+ type UIToolInvocation<TOOL extends UITool | Tool> = {
1371
1406
  /**
1372
- The tool choice strategy. Default: 'auto'.
1407
+ * ID of the tool call.
1373
1408
  */
1374
- toolChoice?: ToolChoice<NoInfer<TOOLS>>;
1409
+ toolCallId: string;
1410
+ title?: string;
1375
1411
  /**
1376
- Condition for stopping the generation when there are tool results in the last step.
1377
- When the condition is an array, any of the conditions can be met to stop the generation.
1378
-
1379
- @default stepCountIs(1)
1412
+ * Whether the tool call was executed by the provider.
1380
1413
  */
1381
- stopWhen?: StopCondition<NoInfer<TOOLS>> | Array<StopCondition<NoInfer<TOOLS>>>;
1414
+ providerExecuted?: boolean;
1415
+ } & ({
1416
+ state: 'input-streaming';
1417
+ input: DeepPartial<asUITool<TOOL>['input']> | undefined;
1418
+ output?: never;
1419
+ errorText?: never;
1420
+ approval?: never;
1421
+ } | {
1422
+ state: 'input-available';
1423
+ input: asUITool<TOOL>['input'];
1424
+ output?: never;
1425
+ errorText?: never;
1426
+ callProviderMetadata?: ProviderMetadata;
1427
+ approval?: never;
1428
+ } | {
1429
+ state: 'approval-requested';
1430
+ input: asUITool<TOOL>['input'];
1431
+ output?: never;
1432
+ errorText?: never;
1433
+ callProviderMetadata?: ProviderMetadata;
1434
+ approval: {
1435
+ id: string;
1436
+ approved?: never;
1437
+ reason?: never;
1438
+ };
1439
+ } | {
1440
+ state: 'approval-responded';
1441
+ input: asUITool<TOOL>['input'];
1442
+ output?: never;
1443
+ errorText?: never;
1444
+ callProviderMetadata?: ProviderMetadata;
1445
+ approval: {
1446
+ id: string;
1447
+ approved: boolean;
1448
+ reason?: string;
1449
+ };
1450
+ } | {
1451
+ state: 'output-available';
1452
+ input: asUITool<TOOL>['input'];
1453
+ output: asUITool<TOOL>['output'];
1454
+ errorText?: never;
1455
+ callProviderMetadata?: ProviderMetadata;
1456
+ preliminary?: boolean;
1457
+ approval?: {
1458
+ id: string;
1459
+ approved: true;
1460
+ reason?: string;
1461
+ };
1462
+ } | {
1463
+ state: 'output-error';
1464
+ input: asUITool<TOOL>['input'] | undefined;
1465
+ rawInput?: unknown;
1466
+ output?: never;
1467
+ errorText: string;
1468
+ callProviderMetadata?: ProviderMetadata;
1469
+ approval?: {
1470
+ id: string;
1471
+ approved: true;
1472
+ reason?: string;
1473
+ };
1474
+ } | {
1475
+ state: 'output-denied';
1476
+ input: asUITool<TOOL>['input'];
1477
+ output?: never;
1478
+ errorText?: never;
1479
+ callProviderMetadata?: ProviderMetadata;
1480
+ approval: {
1481
+ id: string;
1482
+ approved: false;
1483
+ reason?: string;
1484
+ };
1485
+ });
1486
+ type ToolUIPart<TOOLS extends UITools = UITools> = ValueOf<{
1487
+ [NAME in keyof TOOLS & string]: {
1488
+ type: `tool-${NAME}`;
1489
+ } & UIToolInvocation<TOOLS[NAME]>;
1490
+ }>;
1491
+ type DynamicToolUIPart = {
1492
+ type: 'dynamic-tool';
1382
1493
  /**
1383
- Optional telemetry configuration (experimental).
1494
+ * Name of the tool that is being called.
1384
1495
  */
1385
- experimental_telemetry?: TelemetrySettings;
1386
- /**
1387
- Additional provider-specific options. They are passed through
1388
- to the provider from the AI SDK and enable provider-specific
1389
- functionality that can be fully encapsulated in the provider.
1390
- */
1391
- providerOptions?: ProviderOptions;
1496
+ toolName: string;
1392
1497
  /**
1393
- * @deprecated Use `activeTools` instead.
1498
+ * ID of the tool call.
1394
1499
  */
1395
- experimental_activeTools?: Array<keyof NoInfer<TOOLS>>;
1500
+ toolCallId: string;
1501
+ title?: string;
1396
1502
  /**
1397
- Limits the tools that are available for the model to call without
1398
- changing the tool call and result types in the result.
1503
+ * Whether the tool call was executed by the provider.
1399
1504
  */
1400
- activeTools?: Array<keyof NoInfer<TOOLS>>;
1401
- /**
1402
- Optional specification for parsing structured outputs from the LLM response.
1403
- */
1404
- output?: OUTPUT;
1405
- /**
1406
- Optional specification for parsing structured outputs from the LLM response.
1407
-
1408
- @deprecated Use `output` instead.
1409
- */
1410
- experimental_output?: OUTPUT;
1411
- /**
1412
- Custom download function to use for URLs.
1413
-
1414
- By default, files are downloaded if the model does not support the URL for the given media type.
1415
- */
1416
- experimental_download?: DownloadFunction | undefined;
1417
- /**
1418
- * @deprecated Use `prepareStep` instead.
1419
- */
1420
- experimental_prepareStep?: PrepareStepFunction<NoInfer<TOOLS>>;
1421
- /**
1422
- Optional function that you can use to provide different settings for a step.
1423
- */
1424
- prepareStep?: PrepareStepFunction<NoInfer<TOOLS>>;
1425
- /**
1426
- A function that attempts to repair a tool call that failed to parse.
1427
- */
1428
- experimental_repairToolCall?: ToolCallRepairFunction<NoInfer<TOOLS>>;
1429
- /**
1430
- * Callback that is called when each step (LLM call) is finished, including intermediate steps.
1431
- */
1432
- onStepFinish?: GenerateTextOnStepFinishCallback<NoInfer<TOOLS>>;
1433
- /**
1434
- * Callback that is called when all steps are finished and the response is complete.
1435
- */
1436
- onFinish?: GenerateTextOnFinishCallback<NoInfer<TOOLS>>;
1437
- /**
1438
- * Context that is passed into tool execution.
1439
- *
1440
- * Experimental (can break in patch releases).
1441
- *
1442
- * @default undefined
1443
- */
1444
- experimental_context?: unknown;
1445
- /**
1446
- * Internal. For test use only. May change without notice.
1447
- */
1448
- _internal?: {
1449
- generateId?: IdGenerator;
1505
+ providerExecuted?: boolean;
1506
+ } & ({
1507
+ state: 'input-streaming';
1508
+ input: unknown | undefined;
1509
+ output?: never;
1510
+ errorText?: never;
1511
+ approval?: never;
1512
+ } | {
1513
+ state: 'input-available';
1514
+ input: unknown;
1515
+ output?: never;
1516
+ errorText?: never;
1517
+ callProviderMetadata?: ProviderMetadata;
1518
+ approval?: never;
1519
+ } | {
1520
+ state: 'approval-requested';
1521
+ input: unknown;
1522
+ output?: never;
1523
+ errorText?: never;
1524
+ callProviderMetadata?: ProviderMetadata;
1525
+ approval: {
1526
+ id: string;
1527
+ approved?: never;
1528
+ reason?: never;
1450
1529
  };
1451
- }): Promise<GenerateTextResult<TOOLS, OUTPUT>>;
1452
-
1530
+ } | {
1531
+ state: 'approval-responded';
1532
+ input: unknown;
1533
+ output?: never;
1534
+ errorText?: never;
1535
+ callProviderMetadata?: ProviderMetadata;
1536
+ approval: {
1537
+ id: string;
1538
+ approved: boolean;
1539
+ reason?: string;
1540
+ };
1541
+ } | {
1542
+ state: 'output-available';
1543
+ input: unknown;
1544
+ output: unknown;
1545
+ errorText?: never;
1546
+ callProviderMetadata?: ProviderMetadata;
1547
+ preliminary?: boolean;
1548
+ approval?: {
1549
+ id: string;
1550
+ approved: true;
1551
+ reason?: string;
1552
+ };
1553
+ } | {
1554
+ state: 'output-error';
1555
+ input: unknown;
1556
+ output?: never;
1557
+ errorText: string;
1558
+ callProviderMetadata?: ProviderMetadata;
1559
+ approval?: {
1560
+ id: string;
1561
+ approved: true;
1562
+ reason?: string;
1563
+ };
1564
+ } | {
1565
+ state: 'output-denied';
1566
+ input: unknown;
1567
+ output?: never;
1568
+ errorText?: never;
1569
+ callProviderMetadata?: ProviderMetadata;
1570
+ approval: {
1571
+ id: string;
1572
+ approved: false;
1573
+ reason?: string;
1574
+ };
1575
+ });
1453
1576
  /**
1454
- * Prunes model messages from a list of model messages.
1455
- *
1456
- * @param messages - The list of model messages to prune.
1457
- * @param reasoning - How to remove reasoning content from assistant messages. Default is `'none'`.
1458
- * @param toolCalls - How to prune tool call/results/approval content. Default is `[]`.
1459
- * @param emptyMessages - Whether to keep or remove messages whose content is empty after pruning. Default is `'remove'`.
1460
- *
1461
- * @returns The pruned list of model messages.
1577
+ * Type guard to check if a message part is a text part.
1462
1578
  */
1463
- declare function pruneMessages({ messages, reasoning, toolCalls, emptyMessages, }: {
1464
- messages: ModelMessage[];
1465
- reasoning?: 'all' | 'before-last-message' | 'none';
1466
- toolCalls?: 'all' | 'before-last-message' | `before-last-${number}-messages` | 'none' | Array<{
1467
- type: 'all' | 'before-last-message' | `before-last-${number}-messages`;
1468
- tools?: string[];
1469
- }>;
1470
- emptyMessages?: 'keep' | 'remove';
1471
- }): ModelMessage[];
1472
-
1579
+ declare function isTextUIPart(part: UIMessagePart<UIDataTypes, UITools>): part is TextUIPart;
1473
1580
  /**
1474
- * Detects the first chunk in a buffer.
1475
- *
1476
- * @param buffer - The buffer to detect the first chunk in.
1477
- *
1478
- * @returns The first detected chunk, or `undefined` if no chunk was detected.
1581
+ * Type guard to check if a message part is a file part.
1479
1582
  */
1480
- type ChunkDetector = (buffer: string) => string | undefined | null;
1583
+ declare function isFileUIPart(part: UIMessagePart<UIDataTypes, UITools>): part is FileUIPart;
1481
1584
  /**
1482
- * Smooths text and reasoning streaming output.
1483
- *
1484
- * @param delayInMs - The delay in milliseconds between each chunk. Defaults to 10ms. Can be set to `null` to skip the delay.
1485
- * @param chunking - Controls how the text is chunked for streaming. Use "word" to stream word by word (default), "line" to stream line by line, provide a custom RegExp pattern for custom chunking, provide an Intl.Segmenter for locale-aware word segmentation (recommended for CJK languages), or provide a custom ChunkDetector function.
1486
- *
1487
- * @returns A transform stream that smooths text streaming output.
1585
+ * Type guard to check if a message part is a reasoning part.
1488
1586
  */
1489
- declare function smoothStream<TOOLS extends ToolSet>({ delayInMs, chunking, _internal: { delay }, }?: {
1490
- delayInMs?: number | null;
1491
- chunking?: 'word' | 'line' | RegExp | ChunkDetector | Intl.Segmenter;
1492
- /**
1493
- * Internal. For test use only. May change without notice.
1494
- */
1495
- _internal?: {
1496
- delay?: (delayInMs: number | null) => Promise<void>;
1497
- };
1498
- }): (options: {
1499
- tools: TOOLS;
1500
- }) => TransformStream<TextStreamPart<TOOLS>, TextStreamPart<TOOLS>>;
1501
-
1587
+ declare function isReasoningUIPart(part: UIMessagePart<UIDataTypes, UITools>): part is ReasoningUIPart;
1502
1588
  /**
1503
- * Tool output when the tool execution has been denied (for static tools).
1589
+ * Check if a message part is a static tool part.
1590
+ *
1591
+ * Static tools are tools for which the types are known at development time.
1504
1592
  */
1505
- type StaticToolOutputDenied<TOOLS extends ToolSet> = ValueOf<{
1506
- [NAME in keyof TOOLS]: {
1507
- type: 'tool-output-denied';
1508
- toolCallId: string;
1509
- toolName: NAME & string;
1510
- providerExecuted?: boolean;
1511
- dynamic?: false | undefined;
1512
- };
1513
- }>;
1593
+ declare function isStaticToolUIPart<TOOLS extends UITools>(part: UIMessagePart<UIDataTypes, TOOLS>): part is ToolUIPart<TOOLS>;
1514
1594
  /**
1515
- * Tool output when the tool execution has been denied.
1595
+ * Check if a message part is a tool part.
1596
+ *
1597
+ * Tool parts are either static or dynamic tools.
1598
+ *
1599
+ * Use `isStaticToolUIPart` or `isDynamicToolUIPart` to check the type of the tool.
1516
1600
  */
1517
- type TypedToolOutputDenied<TOOLS extends ToolSet> = StaticToolOutputDenied<TOOLS>;
1518
-
1601
+ declare function isToolUIPart<TOOLS extends UITools>(part: UIMessagePart<UIDataTypes, TOOLS>): part is ToolUIPart<TOOLS> | DynamicToolUIPart;
1519
1602
  /**
1520
- The data types that can be used in the UI message for the UI message data parts.
1603
+ * @deprecated Use isToolUIPart instead.
1521
1604
  */
1522
- type UIDataTypes = Record<string, unknown>;
1523
- type UITool = {
1524
- input: unknown;
1525
- output: unknown | undefined;
1526
- };
1605
+ declare const isToolOrDynamicToolUIPart: typeof isToolUIPart;
1527
1606
  /**
1528
- * Infer the input and output types of a tool so it can be used as a UI tool.
1607
+ * Returns the name of the static tool.
1608
+ *
1609
+ * The possible values are the keys of the tool set.
1529
1610
  */
1530
- type InferUITool<TOOL extends Tool> = {
1531
- input: InferToolInput<TOOL>;
1532
- output: InferToolOutput<TOOL>;
1533
- };
1611
+ declare function getStaticToolName<TOOLS extends UITools>(part: ToolUIPart<TOOLS>): keyof TOOLS;
1534
1612
  /**
1535
- * Infer the input and output types of a tool set so it can be used as a UI tool set.
1613
+ * Returns the name of the tool (static or dynamic).
1614
+ *
1615
+ * This function will not restrict the name to the keys of the tool set.
1616
+ * If you need to restrict the name to the keys of the tool set, use `getStaticToolName` instead.
1536
1617
  */
1537
- type InferUITools<TOOLS extends ToolSet> = {
1538
- [NAME in keyof TOOLS & string]: InferUITool<TOOLS[NAME]>;
1539
- };
1540
- type UITools = Record<string, UITool>;
1618
+ declare function getToolName(part: ToolUIPart<UITools> | DynamicToolUIPart): string;
1541
1619
  /**
1542
- AI SDK UI Messages. They are used in the client and to communicate between the frontend and the API routes.
1620
+ * @deprecated Use getToolName instead.
1543
1621
  */
1544
- interface UIMessage<METADATA = unknown, DATA_PARTS extends UIDataTypes = UIDataTypes, TOOLS extends UITools = UITools> {
1545
- /**
1546
- A unique identifier for the message.
1547
- */
1622
+ declare const getToolOrDynamicToolName: typeof getToolName;
1623
+ type InferUIMessageMetadata<T extends UIMessage> = T extends UIMessage<infer METADATA> ? METADATA : unknown;
1624
+ type InferUIMessageData<T extends UIMessage> = T extends UIMessage<unknown, infer DATA_TYPES> ? DATA_TYPES : UIDataTypes;
1625
+ type InferUIMessageTools<T extends UIMessage> = T extends UIMessage<unknown, UIDataTypes, infer TOOLS> ? TOOLS : UITools;
1626
+ type InferUIMessageToolCall<UI_MESSAGE extends UIMessage> = ValueOf<{
1627
+ [NAME in keyof InferUIMessageTools<UI_MESSAGE>]: ToolCall<NAME & string, InferUIMessageTools<UI_MESSAGE>[NAME] extends {
1628
+ input: infer INPUT;
1629
+ } ? INPUT : never> & {
1630
+ dynamic?: false;
1631
+ };
1632
+ }> | (ToolCall<string, unknown> & {
1633
+ dynamic: true;
1634
+ });
1635
+
1636
+ declare const uiMessageChunkSchema: _ai_sdk_provider_utils.LazySchema<{
1637
+ type: "text-start";
1548
1638
  id: string;
1549
- /**
1550
- The role of the message.
1551
- */
1552
- role: 'system' | 'user' | 'assistant';
1553
- /**
1554
- The metadata of the message.
1555
- */
1556
- metadata?: METADATA;
1557
- /**
1558
- The parts of the message. Use this for rendering the message in the UI.
1559
-
1560
- System messages should be avoided (set the system prompt on the server instead).
1561
- They can have text parts.
1562
-
1563
- User messages can have text parts and file parts.
1564
-
1565
- Assistant messages can have text, reasoning, tool invocation, and file parts.
1566
- */
1567
- parts: Array<UIMessagePart<DATA_PARTS, TOOLS>>;
1568
- }
1569
- type UIMessagePart<DATA_TYPES extends UIDataTypes, TOOLS extends UITools> = TextUIPart | ReasoningUIPart | ToolUIPart<TOOLS> | DynamicToolUIPart | SourceUrlUIPart | SourceDocumentUIPart | FileUIPart | DataUIPart<DATA_TYPES> | StepStartUIPart;
1570
- /**
1571
- * A text part of a message.
1572
- */
1573
- type TextUIPart = {
1574
- type: 'text';
1575
- /**
1576
- * The text content.
1577
- */
1578
- text: string;
1579
- /**
1580
- * The state of the text part.
1581
- */
1582
- state?: 'streaming' | 'done';
1583
- /**
1584
- * The provider metadata.
1585
- */
1586
- providerMetadata?: ProviderMetadata;
1587
- };
1588
- /**
1589
- * A reasoning part of a message.
1590
- */
1591
- type ReasoningUIPart = {
1592
- type: 'reasoning';
1593
- /**
1594
- * The reasoning text.
1595
- */
1596
- text: string;
1597
- /**
1598
- * The state of the reasoning part.
1599
- */
1600
- state?: 'streaming' | 'done';
1601
- /**
1602
- * The provider metadata.
1603
- */
1604
- providerMetadata?: ProviderMetadata;
1605
- };
1606
- /**
1607
- * A source part of a message.
1608
- */
1609
- type SourceUrlUIPart = {
1610
- type: 'source-url';
1639
+ providerMetadata?: _ai_sdk_provider.SharedV3ProviderMetadata | undefined;
1640
+ } | {
1641
+ type: "text-delta";
1642
+ id: string;
1643
+ delta: string;
1644
+ providerMetadata?: _ai_sdk_provider.SharedV3ProviderMetadata | undefined;
1645
+ } | {
1646
+ type: "text-end";
1647
+ id: string;
1648
+ providerMetadata?: _ai_sdk_provider.SharedV3ProviderMetadata | undefined;
1649
+ } | {
1650
+ type: "error";
1651
+ errorText: string;
1652
+ } | {
1653
+ type: "tool-input-start";
1654
+ toolCallId: string;
1655
+ toolName: string;
1656
+ providerExecuted?: boolean | undefined;
1657
+ dynamic?: boolean | undefined;
1658
+ title?: string | undefined;
1659
+ } | {
1660
+ type: "tool-input-delta";
1661
+ toolCallId: string;
1662
+ inputTextDelta: string;
1663
+ } | {
1664
+ type: "tool-input-available";
1665
+ toolCallId: string;
1666
+ toolName: string;
1667
+ input: unknown;
1668
+ providerExecuted?: boolean | undefined;
1669
+ providerMetadata?: _ai_sdk_provider.SharedV3ProviderMetadata | undefined;
1670
+ dynamic?: boolean | undefined;
1671
+ title?: string | undefined;
1672
+ } | {
1673
+ type: "tool-input-error";
1674
+ toolCallId: string;
1675
+ toolName: string;
1676
+ input: unknown;
1677
+ errorText: string;
1678
+ providerExecuted?: boolean | undefined;
1679
+ providerMetadata?: _ai_sdk_provider.SharedV3ProviderMetadata | undefined;
1680
+ dynamic?: boolean | undefined;
1681
+ title?: string | undefined;
1682
+ } | {
1683
+ type: "tool-approval-request";
1684
+ approvalId: string;
1685
+ toolCallId: string;
1686
+ } | {
1687
+ type: "tool-output-available";
1688
+ toolCallId: string;
1689
+ output: unknown;
1690
+ providerExecuted?: boolean | undefined;
1691
+ dynamic?: boolean | undefined;
1692
+ preliminary?: boolean | undefined;
1693
+ } | {
1694
+ type: "tool-output-error";
1695
+ toolCallId: string;
1696
+ errorText: string;
1697
+ providerExecuted?: boolean | undefined;
1698
+ dynamic?: boolean | undefined;
1699
+ } | {
1700
+ type: "tool-output-denied";
1701
+ toolCallId: string;
1702
+ } | {
1703
+ type: "reasoning-start";
1704
+ id: string;
1705
+ providerMetadata?: _ai_sdk_provider.SharedV3ProviderMetadata | undefined;
1706
+ } | {
1707
+ type: "reasoning-delta";
1708
+ id: string;
1709
+ delta: string;
1710
+ providerMetadata?: _ai_sdk_provider.SharedV3ProviderMetadata | undefined;
1711
+ } | {
1712
+ type: "reasoning-end";
1713
+ id: string;
1714
+ providerMetadata?: _ai_sdk_provider.SharedV3ProviderMetadata | undefined;
1715
+ } | {
1716
+ type: "source-url";
1611
1717
  sourceId: string;
1612
1718
  url: string;
1613
- title?: string;
1614
- providerMetadata?: ProviderMetadata;
1615
- };
1616
- /**
1617
- * A document source part of a message.
1618
- */
1619
- type SourceDocumentUIPart = {
1620
- type: 'source-document';
1719
+ title?: string | undefined;
1720
+ providerMetadata?: _ai_sdk_provider.SharedV3ProviderMetadata | undefined;
1721
+ } | {
1722
+ type: "source-document";
1621
1723
  sourceId: string;
1622
1724
  mediaType: string;
1623
1725
  title: string;
1624
- filename?: string;
1625
- providerMetadata?: ProviderMetadata;
1626
- };
1627
- /**
1628
- * A file part of a message.
1629
- */
1630
- type FileUIPart = {
1631
- type: 'file';
1632
- /**
1633
- * IANA media type of the file.
1634
- *
1635
- * @see https://www.iana.org/assignments/media-types/media-types.xhtml
1636
- */
1637
- mediaType: string;
1638
- /**
1639
- * Optional filename of the file.
1640
- */
1641
- filename?: string;
1642
- /**
1643
- * The URL of the file.
1644
- * It can either be a URL to a hosted file or a [Data URL](https://developer.mozilla.org/en-US/docs/Web/HTTP/Basics_of_HTTP/Data_URLs).
1645
- */
1726
+ filename?: string | undefined;
1727
+ providerMetadata?: _ai_sdk_provider.SharedV3ProviderMetadata | undefined;
1728
+ } | {
1729
+ type: "file";
1646
1730
  url: string;
1647
- /**
1648
- * The provider metadata.
1649
- */
1650
- providerMetadata?: ProviderMetadata;
1651
- };
1652
- /**
1653
- * A step boundary part of a message.
1654
- */
1655
- type StepStartUIPart = {
1656
- type: 'step-start';
1657
- };
1658
- type DataUIPart<DATA_TYPES extends UIDataTypes> = ValueOf<{
1731
+ mediaType: string;
1732
+ providerMetadata?: _ai_sdk_provider.SharedV3ProviderMetadata | undefined;
1733
+ } | {
1734
+ type: `data-${string}`;
1735
+ data: unknown;
1736
+ id?: string | undefined;
1737
+ transient?: boolean | undefined;
1738
+ } | {
1739
+ type: "start-step";
1740
+ } | {
1741
+ type: "finish-step";
1742
+ } | {
1743
+ type: "start";
1744
+ messageId?: string | undefined;
1745
+ messageMetadata?: unknown;
1746
+ } | {
1747
+ type: "finish";
1748
+ finishReason?: "length" | "error" | "stop" | "content-filter" | "tool-calls" | "other" | undefined;
1749
+ messageMetadata?: unknown;
1750
+ } | {
1751
+ type: "abort";
1752
+ reason?: string | undefined;
1753
+ } | {
1754
+ type: "message-metadata";
1755
+ messageMetadata: unknown;
1756
+ }>;
1757
+ type DataUIMessageChunk<DATA_TYPES extends UIDataTypes> = ValueOf<{
1659
1758
  [NAME in keyof DATA_TYPES & string]: {
1660
1759
  type: `data-${NAME}`;
1661
1760
  id?: string;
1662
1761
  data: DATA_TYPES[NAME];
1762
+ transient?: boolean;
1663
1763
  };
1664
1764
  }>;
1665
- type asUITool<TOOL extends UITool | Tool> = TOOL extends Tool ? InferUITool<TOOL> : TOOL;
1666
- /**
1667
- * Check if a message part is a data part.
1668
- */
1669
- declare function isDataUIPart<DATA_TYPES extends UIDataTypes>(part: UIMessagePart<DATA_TYPES, UITools>): part is DataUIPart<DATA_TYPES>;
1670
- /**
1671
- * A UI tool invocation contains all the information needed to render a tool invocation in the UI.
1672
- * It can be derived from a tool without knowing the tool name, and can be used to define
1673
- * UI components for the tool.
1674
- */
1675
- type UIToolInvocation<TOOL extends UITool | Tool> = {
1676
- /**
1677
- * ID of the tool call.
1678
- */
1679
- toolCallId: string;
1680
- title?: string;
1681
- /**
1682
- * Whether the tool call was executed by the provider.
1683
- */
1684
- providerExecuted?: boolean;
1685
- } & ({
1686
- state: 'input-streaming';
1687
- input: DeepPartial<asUITool<TOOL>['input']> | undefined;
1688
- output?: never;
1689
- errorText?: never;
1690
- approval?: never;
1691
- } | {
1692
- state: 'input-available';
1693
- input: asUITool<TOOL>['input'];
1694
- output?: never;
1695
- errorText?: never;
1696
- callProviderMetadata?: ProviderMetadata;
1697
- approval?: never;
1765
+ type UIMessageChunk<METADATA = unknown, DATA_TYPES extends UIDataTypes = UIDataTypes> = {
1766
+ type: 'text-start';
1767
+ id: string;
1768
+ providerMetadata?: ProviderMetadata;
1698
1769
  } | {
1699
- state: 'approval-requested';
1700
- input: asUITool<TOOL>['input'];
1701
- output?: never;
1702
- errorText?: never;
1703
- callProviderMetadata?: ProviderMetadata;
1704
- approval: {
1705
- id: string;
1706
- approved?: never;
1707
- reason?: never;
1708
- };
1770
+ type: 'text-delta';
1771
+ delta: string;
1772
+ id: string;
1773
+ providerMetadata?: ProviderMetadata;
1709
1774
  } | {
1710
- state: 'approval-responded';
1711
- input: asUITool<TOOL>['input'];
1712
- output?: never;
1713
- errorText?: never;
1714
- callProviderMetadata?: ProviderMetadata;
1715
- approval: {
1716
- id: string;
1717
- approved: boolean;
1718
- reason?: string;
1719
- };
1720
- } | {
1721
- state: 'output-available';
1722
- input: asUITool<TOOL>['input'];
1723
- output: asUITool<TOOL>['output'];
1724
- errorText?: never;
1725
- callProviderMetadata?: ProviderMetadata;
1726
- preliminary?: boolean;
1727
- approval?: {
1728
- id: string;
1729
- approved: true;
1730
- reason?: string;
1731
- };
1732
- } | {
1733
- state: 'output-error';
1734
- input: asUITool<TOOL>['input'] | undefined;
1735
- rawInput?: unknown;
1736
- output?: never;
1737
- errorText: string;
1738
- callProviderMetadata?: ProviderMetadata;
1739
- approval?: {
1740
- id: string;
1741
- approved: true;
1742
- reason?: string;
1743
- };
1744
- } | {
1745
- state: 'output-denied';
1746
- input: asUITool<TOOL>['input'];
1747
- output?: never;
1748
- errorText?: never;
1749
- callProviderMetadata?: ProviderMetadata;
1750
- approval: {
1751
- id: string;
1752
- approved: false;
1753
- reason?: string;
1754
- };
1755
- });
1756
- type ToolUIPart<TOOLS extends UITools = UITools> = ValueOf<{
1757
- [NAME in keyof TOOLS & string]: {
1758
- type: `tool-${NAME}`;
1759
- } & UIToolInvocation<TOOLS[NAME]>;
1760
- }>;
1761
- type DynamicToolUIPart = {
1762
- type: 'dynamic-tool';
1763
- /**
1764
- * Name of the tool that is being called.
1765
- */
1766
- toolName: string;
1767
- /**
1768
- * ID of the tool call.
1769
- */
1770
- toolCallId: string;
1771
- title?: string;
1772
- /**
1773
- * Whether the tool call was executed by the provider.
1774
- */
1775
- providerExecuted?: boolean;
1776
- } & ({
1777
- state: 'input-streaming';
1778
- input: unknown | undefined;
1779
- output?: never;
1780
- errorText?: never;
1781
- approval?: never;
1782
- } | {
1783
- state: 'input-available';
1784
- input: unknown;
1785
- output?: never;
1786
- errorText?: never;
1787
- callProviderMetadata?: ProviderMetadata;
1788
- approval?: never;
1789
- } | {
1790
- state: 'approval-requested';
1791
- input: unknown;
1792
- output?: never;
1793
- errorText?: never;
1794
- callProviderMetadata?: ProviderMetadata;
1795
- approval: {
1796
- id: string;
1797
- approved?: never;
1798
- reason?: never;
1799
- };
1800
- } | {
1801
- state: 'approval-responded';
1802
- input: unknown;
1803
- output?: never;
1804
- errorText?: never;
1805
- callProviderMetadata?: ProviderMetadata;
1806
- approval: {
1807
- id: string;
1808
- approved: boolean;
1809
- reason?: string;
1810
- };
1811
- } | {
1812
- state: 'output-available';
1813
- input: unknown;
1814
- output: unknown;
1815
- errorText?: never;
1816
- callProviderMetadata?: ProviderMetadata;
1817
- preliminary?: boolean;
1818
- approval?: {
1819
- id: string;
1820
- approved: true;
1821
- reason?: string;
1822
- };
1823
- } | {
1824
- state: 'output-error';
1825
- input: unknown;
1826
- output?: never;
1827
- errorText: string;
1828
- callProviderMetadata?: ProviderMetadata;
1829
- approval?: {
1830
- id: string;
1831
- approved: true;
1832
- reason?: string;
1833
- };
1775
+ type: 'text-end';
1776
+ id: string;
1777
+ providerMetadata?: ProviderMetadata;
1834
1778
  } | {
1835
- state: 'output-denied';
1836
- input: unknown;
1837
- output?: never;
1838
- errorText?: never;
1839
- callProviderMetadata?: ProviderMetadata;
1840
- approval: {
1841
- id: string;
1842
- approved: false;
1843
- reason?: string;
1844
- };
1845
- });
1846
- /**
1847
- * Type guard to check if a message part is a text part.
1848
- */
1849
- declare function isTextUIPart(part: UIMessagePart<UIDataTypes, UITools>): part is TextUIPart;
1850
- /**
1851
- * Type guard to check if a message part is a file part.
1852
- */
1853
- declare function isFileUIPart(part: UIMessagePart<UIDataTypes, UITools>): part is FileUIPart;
1854
- /**
1855
- * Type guard to check if a message part is a reasoning part.
1856
- */
1857
- declare function isReasoningUIPart(part: UIMessagePart<UIDataTypes, UITools>): part is ReasoningUIPart;
1858
- /**
1859
- * Check if a message part is a static tool part.
1860
- *
1861
- * Static tools are tools for which the types are known at development time.
1862
- */
1863
- declare function isStaticToolUIPart<TOOLS extends UITools>(part: UIMessagePart<UIDataTypes, TOOLS>): part is ToolUIPart<TOOLS>;
1864
- /**
1865
- * Check if a message part is a tool part.
1866
- *
1867
- * Tool parts are either static or dynamic tools.
1868
- *
1869
- * Use `isStaticToolUIPart` or `isDynamicToolUIPart` to check the type of the tool.
1870
- */
1871
- declare function isToolUIPart<TOOLS extends UITools>(part: UIMessagePart<UIDataTypes, TOOLS>): part is ToolUIPart<TOOLS> | DynamicToolUIPart;
1872
- /**
1873
- * @deprecated Use isToolUIPart instead.
1874
- */
1875
- declare const isToolOrDynamicToolUIPart: typeof isToolUIPart;
1876
- /**
1877
- * Returns the name of the static tool.
1878
- *
1879
- * The possible values are the keys of the tool set.
1880
- */
1881
- declare function getStaticToolName<TOOLS extends UITools>(part: ToolUIPart<TOOLS>): keyof TOOLS;
1882
- /**
1883
- * Returns the name of the tool (static or dynamic).
1884
- *
1885
- * This function will not restrict the name to the keys of the tool set.
1886
- * If you need to restrict the name to the keys of the tool set, use `getStaticToolName` instead.
1887
- */
1888
- declare function getToolName(part: ToolUIPart<UITools> | DynamicToolUIPart): string;
1889
- /**
1890
- * @deprecated Use getToolName instead.
1891
- */
1892
- declare const getToolOrDynamicToolName: typeof getToolName;
1893
- type InferUIMessageMetadata<T extends UIMessage> = T extends UIMessage<infer METADATA> ? METADATA : unknown;
1894
- type InferUIMessageData<T extends UIMessage> = T extends UIMessage<unknown, infer DATA_TYPES> ? DATA_TYPES : UIDataTypes;
1895
- type InferUIMessageTools<T extends UIMessage> = T extends UIMessage<unknown, UIDataTypes, infer TOOLS> ? TOOLS : UITools;
1896
- type InferUIMessageToolCall<UI_MESSAGE extends UIMessage> = ValueOf<{
1897
- [NAME in keyof InferUIMessageTools<UI_MESSAGE>]: ToolCall<NAME & string, InferUIMessageTools<UI_MESSAGE>[NAME] extends {
1898
- input: infer INPUT;
1899
- } ? INPUT : never> & {
1900
- dynamic?: false;
1901
- };
1902
- }> | (ToolCall<string, unknown> & {
1903
- dynamic: true;
1904
- });
1905
-
1906
- declare const uiMessageChunkSchema: _ai_sdk_provider_utils.LazySchema<{
1907
- type: "text-start";
1779
+ type: 'reasoning-start';
1908
1780
  id: string;
1909
- providerMetadata?: _ai_sdk_provider.SharedV3ProviderMetadata | undefined;
1781
+ providerMetadata?: ProviderMetadata;
1910
1782
  } | {
1911
- type: "text-delta";
1783
+ type: 'reasoning-delta';
1912
1784
  id: string;
1913
1785
  delta: string;
1914
- providerMetadata?: _ai_sdk_provider.SharedV3ProviderMetadata | undefined;
1786
+ providerMetadata?: ProviderMetadata;
1915
1787
  } | {
1916
- type: "text-end";
1788
+ type: 'reasoning-end';
1917
1789
  id: string;
1918
- providerMetadata?: _ai_sdk_provider.SharedV3ProviderMetadata | undefined;
1790
+ providerMetadata?: ProviderMetadata;
1919
1791
  } | {
1920
- type: "error";
1792
+ type: 'error';
1921
1793
  errorText: string;
1922
1794
  } | {
1923
- type: "tool-input-start";
1924
- toolCallId: string;
1925
- toolName: string;
1926
- providerExecuted?: boolean | undefined;
1927
- dynamic?: boolean | undefined;
1928
- title?: string | undefined;
1929
- } | {
1930
- type: "tool-input-delta";
1931
- toolCallId: string;
1932
- inputTextDelta: string;
1933
- } | {
1934
- type: "tool-input-available";
1795
+ type: 'tool-input-available';
1935
1796
  toolCallId: string;
1936
1797
  toolName: string;
1937
1798
  input: unknown;
1938
- providerExecuted?: boolean | undefined;
1939
- providerMetadata?: _ai_sdk_provider.SharedV3ProviderMetadata | undefined;
1940
- dynamic?: boolean | undefined;
1941
- title?: string | undefined;
1799
+ providerExecuted?: boolean;
1800
+ providerMetadata?: ProviderMetadata;
1801
+ dynamic?: boolean;
1802
+ title?: string;
1942
1803
  } | {
1943
- type: "tool-input-error";
1804
+ type: 'tool-input-error';
1944
1805
  toolCallId: string;
1945
1806
  toolName: string;
1946
1807
  input: unknown;
1808
+ providerExecuted?: boolean;
1809
+ providerMetadata?: ProviderMetadata;
1810
+ dynamic?: boolean;
1947
1811
  errorText: string;
1948
- providerExecuted?: boolean | undefined;
1949
- providerMetadata?: _ai_sdk_provider.SharedV3ProviderMetadata | undefined;
1950
- dynamic?: boolean | undefined;
1951
- title?: string | undefined;
1812
+ title?: string;
1952
1813
  } | {
1953
- type: "tool-approval-request";
1814
+ type: 'tool-approval-request';
1954
1815
  approvalId: string;
1955
1816
  toolCallId: string;
1956
1817
  } | {
1957
- type: "tool-output-available";
1818
+ type: 'tool-output-available';
1958
1819
  toolCallId: string;
1959
1820
  output: unknown;
1960
- providerExecuted?: boolean | undefined;
1961
- dynamic?: boolean | undefined;
1962
- preliminary?: boolean | undefined;
1821
+ providerExecuted?: boolean;
1822
+ dynamic?: boolean;
1823
+ preliminary?: boolean;
1963
1824
  } | {
1964
- type: "tool-output-error";
1825
+ type: 'tool-output-error';
1965
1826
  toolCallId: string;
1966
1827
  errorText: string;
1967
- providerExecuted?: boolean | undefined;
1968
- dynamic?: boolean | undefined;
1828
+ providerExecuted?: boolean;
1829
+ dynamic?: boolean;
1969
1830
  } | {
1970
- type: "tool-output-denied";
1831
+ type: 'tool-output-denied';
1971
1832
  toolCallId: string;
1972
1833
  } | {
1973
- type: "reasoning-start";
1974
- id: string;
1975
- providerMetadata?: _ai_sdk_provider.SharedV3ProviderMetadata | undefined;
1976
- } | {
1977
- type: "reasoning-delta";
1978
- id: string;
1979
- delta: string;
1980
- providerMetadata?: _ai_sdk_provider.SharedV3ProviderMetadata | undefined;
1834
+ type: 'tool-input-start';
1835
+ toolCallId: string;
1836
+ toolName: string;
1837
+ providerExecuted?: boolean;
1838
+ dynamic?: boolean;
1839
+ title?: string;
1981
1840
  } | {
1982
- type: "reasoning-end";
1983
- id: string;
1984
- providerMetadata?: _ai_sdk_provider.SharedV3ProviderMetadata | undefined;
1841
+ type: 'tool-input-delta';
1842
+ toolCallId: string;
1843
+ inputTextDelta: string;
1985
1844
  } | {
1986
- type: "source-url";
1845
+ type: 'source-url';
1987
1846
  sourceId: string;
1988
1847
  url: string;
1989
- title?: string | undefined;
1990
- providerMetadata?: _ai_sdk_provider.SharedV3ProviderMetadata | undefined;
1848
+ title?: string;
1849
+ providerMetadata?: ProviderMetadata;
1991
1850
  } | {
1992
- type: "source-document";
1851
+ type: 'source-document';
1993
1852
  sourceId: string;
1994
1853
  mediaType: string;
1995
1854
  title: string;
1996
- filename?: string | undefined;
1997
- providerMetadata?: _ai_sdk_provider.SharedV3ProviderMetadata | undefined;
1855
+ filename?: string;
1856
+ providerMetadata?: ProviderMetadata;
1998
1857
  } | {
1999
- type: "file";
1858
+ type: 'file';
2000
1859
  url: string;
2001
1860
  mediaType: string;
2002
- providerMetadata?: _ai_sdk_provider.SharedV3ProviderMetadata | undefined;
2003
- } | {
2004
- type: `data-${string}`;
2005
- data: unknown;
2006
- id?: string | undefined;
2007
- transient?: boolean | undefined;
2008
- } | {
2009
- type: "start-step";
1861
+ providerMetadata?: ProviderMetadata;
1862
+ } | DataUIMessageChunk<DATA_TYPES> | {
1863
+ type: 'start-step';
2010
1864
  } | {
2011
- type: "finish-step";
1865
+ type: 'finish-step';
2012
1866
  } | {
2013
- type: "start";
2014
- messageId?: string | undefined;
2015
- messageMetadata?: unknown;
1867
+ type: 'start';
1868
+ messageId?: string;
1869
+ messageMetadata?: METADATA;
2016
1870
  } | {
2017
- type: "finish";
2018
- finishReason?: "length" | "error" | "stop" | "content-filter" | "tool-calls" | "other" | undefined;
2019
- messageMetadata?: unknown;
1871
+ type: 'finish';
1872
+ finishReason?: FinishReason;
1873
+ messageMetadata?: METADATA;
2020
1874
  } | {
2021
- type: "abort";
2022
- reason?: string | undefined;
1875
+ type: 'abort';
1876
+ reason?: string;
2023
1877
  } | {
2024
- type: "message-metadata";
2025
- messageMetadata: unknown;
2026
- }>;
2027
- type DataUIMessageChunk<DATA_TYPES extends UIDataTypes> = ValueOf<{
2028
- [NAME in keyof DATA_TYPES & string]: {
2029
- type: `data-${NAME}`;
2030
- id?: string;
2031
- data: DATA_TYPES[NAME];
2032
- transient?: boolean;
2033
- };
2034
- }>;
2035
- type UIMessageChunk<METADATA = unknown, DATA_TYPES extends UIDataTypes = UIDataTypes> = {
1878
+ type: 'message-metadata';
1879
+ messageMetadata: METADATA;
1880
+ };
1881
+ type InferUIMessageChunk<T extends UIMessage> = UIMessageChunk<InferUIMessageMetadata<T>, InferUIMessageData<T>>;
1882
+
1883
+ type UIMessageStreamOnFinishCallback<UI_MESSAGE extends UIMessage> = (event: {
1884
+ /**
1885
+ * The updated list of UI messages.
1886
+ */
1887
+ messages: UI_MESSAGE[];
1888
+ /**
1889
+ * Indicates whether the response message is a continuation of the last original message,
1890
+ * or if a new message was created.
1891
+ */
1892
+ isContinuation: boolean;
1893
+ /**
1894
+ * Indicates whether the stream was aborted.
1895
+ */
1896
+ isAborted: boolean;
1897
+ /**
1898
+ * The message that was sent to the client as a response
1899
+ * (including the original message if it was extended).
1900
+ */
1901
+ responseMessage: UI_MESSAGE;
1902
+ /**
1903
+ * The reason why the generation finished.
1904
+ */
1905
+ finishReason?: FinishReason;
1906
+ }) => PromiseLike<void> | void;
1907
+
1908
+ type UIMessageStreamResponseInit = ResponseInit & {
1909
+ consumeSseStream?: (options: {
1910
+ stream: ReadableStream<string>;
1911
+ }) => PromiseLike<void> | void;
1912
+ };
1913
+
1914
+ /**
1915
+ * A type that combines AsyncIterable and ReadableStream.
1916
+ * This allows a ReadableStream to be consumed using for-await-of syntax.
1917
+ */
1918
+ type AsyncIterableStream<T> = AsyncIterable<T> & ReadableStream<T>;
1919
+
1920
+ type ErrorHandler = (error: unknown) => void;
1921
+
1922
+ type UIMessageStreamOptions<UI_MESSAGE extends UIMessage> = {
1923
+ /**
1924
+ * The original messages. If they are provided, persistence mode is assumed,
1925
+ * and a message ID is provided for the response message.
1926
+ */
1927
+ originalMessages?: UI_MESSAGE[];
1928
+ /**
1929
+ * Generate a message ID for the response message.
1930
+ *
1931
+ * If not provided, no message ID will be set for the response message (unless
1932
+ * the original messages are provided and the last message is an assistant message).
1933
+ */
1934
+ generateMessageId?: IdGenerator;
1935
+ onFinish?: UIMessageStreamOnFinishCallback<UI_MESSAGE>;
1936
+ /**
1937
+ * Extracts message metadata that will be send to the client.
1938
+ *
1939
+ * Called on `start` and `finish` events.
1940
+ */
1941
+ messageMetadata?: (options: {
1942
+ part: TextStreamPart<ToolSet>;
1943
+ }) => InferUIMessageMetadata<UI_MESSAGE> | undefined;
1944
+ /**
1945
+ * Send reasoning parts to the client.
1946
+ * Default to true.
1947
+ */
1948
+ sendReasoning?: boolean;
1949
+ /**
1950
+ * Send source parts to the client.
1951
+ * Default to false.
1952
+ */
1953
+ sendSources?: boolean;
1954
+ /**
1955
+ * Send the finish event to the client.
1956
+ * Set to false if you are using additional streamText calls
1957
+ * that send additional data.
1958
+ * Default to true.
1959
+ */
1960
+ sendFinish?: boolean;
1961
+ /**
1962
+ * Send the message start event to the client.
1963
+ * Set to false if you are using additional streamText calls
1964
+ * and the message start event has already been sent.
1965
+ * Default to true.
1966
+ */
1967
+ sendStart?: boolean;
1968
+ /**
1969
+ * Process an error, e.g. to log it. Default to `() => 'An error occurred.'`.
1970
+ *
1971
+ * @return error message to include in the data stream.
1972
+ */
1973
+ onError?: (error: unknown) => string;
1974
+ };
1975
+ type ConsumeStreamOptions = {
1976
+ onError?: ErrorHandler;
1977
+ };
1978
+ /**
1979
+ A result object for accessing different stream types and additional information.
1980
+ */
1981
+ interface StreamTextResult<TOOLS extends ToolSet, OUTPUT extends Output> {
1982
+ /**
1983
+ The content that was generated in the last step.
1984
+
1985
+ Automatically consumes the stream.
1986
+ */
1987
+ readonly content: PromiseLike<Array<ContentPart<TOOLS>>>;
1988
+ /**
1989
+ The full text that has been generated by the last step.
1990
+
1991
+ Automatically consumes the stream.
1992
+ */
1993
+ readonly text: PromiseLike<string>;
1994
+ /**
1995
+ The full reasoning that the model has generated.
1996
+
1997
+ Automatically consumes the stream.
1998
+ */
1999
+ readonly reasoning: PromiseLike<Array<ReasoningOutput>>;
2000
+ /**
2001
+ The reasoning that has been generated by the last step.
2002
+
2003
+ Automatically consumes the stream.
2004
+ */
2005
+ readonly reasoningText: PromiseLike<string | undefined>;
2006
+ /**
2007
+ Files that have been generated by the model in the last step.
2008
+
2009
+ Automatically consumes the stream.
2010
+ */
2011
+ readonly files: PromiseLike<GeneratedFile[]>;
2012
+ /**
2013
+ Sources that have been used as references in the last step.
2014
+
2015
+ Automatically consumes the stream.
2016
+ */
2017
+ readonly sources: PromiseLike<Source[]>;
2018
+ /**
2019
+ The tool calls that have been executed in the last step.
2020
+
2021
+ Automatically consumes the stream.
2022
+ */
2023
+ readonly toolCalls: PromiseLike<TypedToolCall<TOOLS>[]>;
2024
+ /**
2025
+ The static tool calls that have been executed in the last step.
2026
+
2027
+ Automatically consumes the stream.
2028
+ */
2029
+ readonly staticToolCalls: PromiseLike<StaticToolCall<TOOLS>[]>;
2030
+ /**
2031
+ The dynamic tool calls that have been executed in the last step.
2032
+
2033
+ Automatically consumes the stream.
2034
+ */
2035
+ readonly dynamicToolCalls: PromiseLike<DynamicToolCall[]>;
2036
+ /**
2037
+ The static tool results that have been generated in the last step.
2038
+
2039
+ Automatically consumes the stream.
2040
+ */
2041
+ readonly staticToolResults: PromiseLike<StaticToolResult<TOOLS>[]>;
2042
+ /**
2043
+ The dynamic tool results that have been generated in the last step.
2044
+
2045
+ Automatically consumes the stream.
2046
+ */
2047
+ readonly dynamicToolResults: PromiseLike<DynamicToolResult[]>;
2048
+ /**
2049
+ The tool results that have been generated in the last step.
2050
+
2051
+ Automatically consumes the stream.
2052
+ */
2053
+ readonly toolResults: PromiseLike<TypedToolResult<TOOLS>[]>;
2054
+ /**
2055
+ * The unified finish reason why the generation finished. Taken from the last step.
2056
+ *
2057
+ * Automatically consumes the stream.
2058
+ */
2059
+ readonly finishReason: PromiseLike<FinishReason>;
2060
+ /**
2061
+ * The raw reason why the generation finished (from the provider). Taken from the last step.
2062
+ *
2063
+ * Automatically consumes the stream.
2064
+ */
2065
+ readonly rawFinishReason: PromiseLike<string | undefined>;
2066
+ /**
2067
+ The token usage of the last step.
2068
+
2069
+ Automatically consumes the stream.
2070
+ */
2071
+ readonly usage: PromiseLike<LanguageModelUsage>;
2072
+ /**
2073
+ The total token usage of the generated response.
2074
+ When there are multiple steps, the usage is the sum of all step usages.
2075
+
2076
+ Automatically consumes the stream.
2077
+ */
2078
+ readonly totalUsage: PromiseLike<LanguageModelUsage>;
2079
+ /**
2080
+ Warnings from the model provider (e.g. unsupported settings) for the first step.
2081
+
2082
+ Automatically consumes the stream.
2083
+ */
2084
+ readonly warnings: PromiseLike<CallWarning[] | undefined>;
2085
+ /**
2086
+ Details for all steps.
2087
+ You can use this to get information about intermediate steps,
2088
+ such as the tool calls or the response headers.
2089
+
2090
+ Automatically consumes the stream.
2091
+ */
2092
+ readonly steps: PromiseLike<Array<StepResult<TOOLS>>>;
2093
+ /**
2094
+ Additional request information from the last step.
2095
+
2096
+ Automatically consumes the stream.
2097
+ */
2098
+ readonly request: PromiseLike<LanguageModelRequestMetadata>;
2099
+ /**
2100
+ Additional response information from the last step.
2101
+
2102
+ Automatically consumes the stream.
2103
+ */
2104
+ readonly response: PromiseLike<LanguageModelResponseMetadata & {
2105
+ /**
2106
+ The response messages that were generated during the call. It consists of an assistant message,
2107
+ potentially containing tool calls.
2108
+
2109
+ When there are tool results, there is an additional tool message with the tool results that are available.
2110
+ If there are tools that do not have execute functions, they are not included in the tool results and
2111
+ need to be added separately.
2112
+ */
2113
+ messages: Array<ResponseMessage>;
2114
+ }>;
2115
+ /**
2116
+ Additional provider-specific metadata from the last step.
2117
+ Metadata is passed through from the provider to the AI SDK and
2118
+ enables provider-specific results that can be fully encapsulated in the provider.
2119
+ */
2120
+ readonly providerMetadata: PromiseLike<ProviderMetadata | undefined>;
2121
+ /**
2122
+ A text stream that returns only the generated text deltas. You can use it
2123
+ as either an AsyncIterable or a ReadableStream. When an error occurs, the
2124
+ stream will throw the error.
2125
+ */
2126
+ readonly textStream: AsyncIterableStream<string>;
2127
+ /**
2128
+ A stream with all events, including text deltas, tool calls, tool results, and
2129
+ errors.
2130
+ You can use it as either an AsyncIterable or a ReadableStream.
2131
+ Only errors that stop the stream, such as network errors, are thrown.
2132
+ */
2133
+ readonly fullStream: AsyncIterableStream<TextStreamPart<TOOLS>>;
2134
+ /**
2135
+ * A stream of partial outputs. It uses the `output` specification.
2136
+ *
2137
+ * @deprecated Use `partialOutputStream` instead.
2138
+ */
2139
+ readonly experimental_partialOutputStream: AsyncIterableStream<InferPartialOutput<OUTPUT>>;
2140
+ /**
2141
+ * A stream of partial parsed outputs. It uses the `output` specification.
2142
+ */
2143
+ readonly partialOutputStream: AsyncIterableStream<InferPartialOutput<OUTPUT>>;
2144
+ /**
2145
+ * A stream of individual array elements as they complete.
2146
+ * Only available when using `output: Output.array()`.
2147
+ */
2148
+ readonly elementStream: AsyncIterableStream<InferElementOutput<OUTPUT>>;
2149
+ /**
2150
+ * The complete parsed output. It uses the `output` specification.
2151
+ */
2152
+ readonly output: PromiseLike<InferCompleteOutput<OUTPUT>>;
2153
+ /**
2154
+ Consumes the stream without processing the parts.
2155
+ This is useful to force the stream to finish.
2156
+ It effectively removes the backpressure and allows the stream to finish,
2157
+ triggering the `onFinish` callback and the promise resolution.
2158
+
2159
+ If an error occurs, it is passed to the optional `onError` callback.
2160
+ */
2161
+ consumeStream(options?: ConsumeStreamOptions): PromiseLike<void>;
2162
+ /**
2163
+ Converts the result to a UI message stream.
2164
+
2165
+ @return A UI message stream.
2166
+ */
2167
+ toUIMessageStream<UI_MESSAGE extends UIMessage>(options?: UIMessageStreamOptions<UI_MESSAGE>): AsyncIterableStream<InferUIMessageChunk<UI_MESSAGE>>;
2168
+ /**
2169
+ *Writes UI message stream output to a Node.js response-like object.
2170
+ */
2171
+ pipeUIMessageStreamToResponse<UI_MESSAGE extends UIMessage>(response: ServerResponse, options?: UIMessageStreamResponseInit & UIMessageStreamOptions<UI_MESSAGE>): void;
2172
+ /**
2173
+ Writes text delta output to a Node.js response-like object.
2174
+ It sets a `Content-Type` header to `text/plain; charset=utf-8` and
2175
+ writes each text delta as a separate chunk.
2176
+
2177
+ @param response A Node.js response-like object (ServerResponse).
2178
+ @param init Optional headers, status code, and status text.
2179
+ */
2180
+ pipeTextStreamToResponse(response: ServerResponse, init?: ResponseInit): void;
2181
+ /**
2182
+ Converts the result to a streamed response object with a stream data part stream.
2183
+
2184
+ @return A response object.
2185
+ */
2186
+ toUIMessageStreamResponse<UI_MESSAGE extends UIMessage>(options?: UIMessageStreamResponseInit & UIMessageStreamOptions<UI_MESSAGE>): Response;
2187
+ /**
2188
+ Creates a simple text stream response.
2189
+ Each text delta is encoded as UTF-8 and sent as a separate chunk.
2190
+ Non-text-delta events are ignored.
2191
+ @param init Optional headers, status code, and status text.
2192
+ */
2193
+ toTextStreamResponse(init?: ResponseInit): Response;
2194
+ }
2195
+ type TextStreamPart<TOOLS extends ToolSet> = {
2036
2196
  type: 'text-start';
2037
2197
  id: string;
2038
2198
  providerMetadata?: ProviderMetadata;
2039
- } | {
2040
- type: 'text-delta';
2041
- delta: string;
2042
- id: string;
2043
- providerMetadata?: ProviderMetadata;
2044
2199
  } | {
2045
2200
  type: 'text-end';
2046
2201
  id: string;
2047
2202
  providerMetadata?: ProviderMetadata;
2048
2203
  } | {
2049
- type: 'reasoning-start';
2204
+ type: 'text-delta';
2050
2205
  id: string;
2051
2206
  providerMetadata?: ProviderMetadata;
2207
+ text: string;
2052
2208
  } | {
2053
- type: 'reasoning-delta';
2209
+ type: 'reasoning-start';
2054
2210
  id: string;
2055
- delta: string;
2056
2211
  providerMetadata?: ProviderMetadata;
2057
2212
  } | {
2058
2213
  type: 'reasoning-end';
2059
2214
  id: string;
2060
2215
  providerMetadata?: ProviderMetadata;
2061
2216
  } | {
2062
- type: 'error';
2063
- errorText: string;
2064
- } | {
2065
- type: 'tool-input-available';
2066
- toolCallId: string;
2067
- toolName: string;
2068
- input: unknown;
2069
- providerExecuted?: boolean;
2070
- providerMetadata?: ProviderMetadata;
2071
- dynamic?: boolean;
2072
- title?: string;
2073
- } | {
2074
- type: 'tool-input-error';
2075
- toolCallId: string;
2076
- toolName: string;
2077
- input: unknown;
2078
- providerExecuted?: boolean;
2217
+ type: 'reasoning-delta';
2079
2218
  providerMetadata?: ProviderMetadata;
2080
- dynamic?: boolean;
2081
- errorText: string;
2082
- title?: string;
2083
- } | {
2084
- type: 'tool-approval-request';
2085
- approvalId: string;
2086
- toolCallId: string;
2087
- } | {
2088
- type: 'tool-output-available';
2089
- toolCallId: string;
2090
- output: unknown;
2091
- providerExecuted?: boolean;
2092
- dynamic?: boolean;
2093
- preliminary?: boolean;
2094
- } | {
2095
- type: 'tool-output-error';
2096
- toolCallId: string;
2097
- errorText: string;
2098
- providerExecuted?: boolean;
2099
- dynamic?: boolean;
2100
- } | {
2101
- type: 'tool-output-denied';
2102
- toolCallId: string;
2219
+ id: string;
2220
+ text: string;
2103
2221
  } | {
2104
2222
  type: 'tool-input-start';
2105
- toolCallId: string;
2223
+ id: string;
2106
2224
  toolName: string;
2225
+ providerMetadata?: ProviderMetadata;
2107
2226
  providerExecuted?: boolean;
2108
2227
  dynamic?: boolean;
2109
2228
  title?: string;
2110
2229
  } | {
2111
- type: 'tool-input-delta';
2112
- toolCallId: string;
2113
- inputTextDelta: string;
2114
- } | {
2115
- type: 'source-url';
2116
- sourceId: string;
2117
- url: string;
2118
- title?: string;
2230
+ type: 'tool-input-end';
2231
+ id: string;
2119
2232
  providerMetadata?: ProviderMetadata;
2120
2233
  } | {
2121
- type: 'source-document';
2122
- sourceId: string;
2123
- mediaType: string;
2124
- title: string;
2125
- filename?: string;
2234
+ type: 'tool-input-delta';
2235
+ id: string;
2236
+ delta: string;
2126
2237
  providerMetadata?: ProviderMetadata;
2127
- } | {
2238
+ } | ({
2239
+ type: 'source';
2240
+ } & Source) | {
2128
2241
  type: 'file';
2129
- url: string;
2130
- mediaType: string;
2131
- providerMetadata?: ProviderMetadata;
2132
- } | DataUIMessageChunk<DATA_TYPES> | {
2242
+ file: GeneratedFile;
2243
+ } | ({
2244
+ type: 'tool-call';
2245
+ } & TypedToolCall<TOOLS>) | ({
2246
+ type: 'tool-result';
2247
+ } & TypedToolResult<TOOLS>) | ({
2248
+ type: 'tool-error';
2249
+ } & TypedToolError<TOOLS>) | ({
2250
+ type: 'tool-output-denied';
2251
+ } & StaticToolOutputDenied<TOOLS>) | ToolApprovalRequestOutput<TOOLS> | {
2133
2252
  type: 'start-step';
2253
+ request: LanguageModelRequestMetadata;
2254
+ warnings: CallWarning[];
2134
2255
  } | {
2135
2256
  type: 'finish-step';
2257
+ response: LanguageModelResponseMetadata;
2258
+ usage: LanguageModelUsage;
2259
+ finishReason: FinishReason;
2260
+ rawFinishReason: string | undefined;
2261
+ providerMetadata: ProviderMetadata | undefined;
2136
2262
  } | {
2137
2263
  type: 'start';
2138
- messageId?: string;
2139
- messageMetadata?: METADATA;
2140
2264
  } | {
2141
2265
  type: 'finish';
2142
- finishReason?: FinishReason;
2143
- messageMetadata?: METADATA;
2266
+ finishReason: FinishReason;
2267
+ rawFinishReason: string | undefined;
2268
+ totalUsage: LanguageModelUsage;
2144
2269
  } | {
2145
2270
  type: 'abort';
2146
2271
  reason?: string;
2147
2272
  } | {
2148
- type: 'message-metadata';
2149
- messageMetadata: METADATA;
2273
+ type: 'error';
2274
+ error: unknown;
2275
+ } | {
2276
+ type: 'raw';
2277
+ rawValue: unknown;
2150
2278
  };
2151
- type InferUIMessageChunk<T extends UIMessage> = UIMessageChunk<InferUIMessageMetadata<T>, InferUIMessageData<T>>;
2152
2279
 
2153
- type UIMessageStreamOnFinishCallback<UI_MESSAGE extends UIMessage> = (event: {
2154
- /**
2155
- * The updated list of UI messages.
2156
- */
2157
- messages: UI_MESSAGE[];
2280
+ /**
2281
+ A transformation that is applied to the stream.
2282
+
2283
+ @param stopStream - A function that stops the source stream.
2284
+ @param tools - The tools that are accessible to and can be called by the model. The model needs to support calling tools.
2285
+ */
2286
+ type StreamTextTransform<TOOLS extends ToolSet> = (options: {
2287
+ tools: TOOLS;
2288
+ stopStream: () => void;
2289
+ }) => TransformStream<TextStreamPart<TOOLS>, TextStreamPart<TOOLS>>;
2290
+ /**
2291
+ Callback that is set using the `onError` option.
2292
+
2293
+ @param event - The event that is passed to the callback.
2294
+ */
2295
+ type StreamTextOnErrorCallback = (event: {
2296
+ error: unknown;
2297
+ }) => PromiseLike<void> | void;
2298
+ /**
2299
+ Callback that is set using the `onStepFinish` option.
2300
+
2301
+ @param stepResult - The result of the step.
2302
+ */
2303
+ type StreamTextOnStepFinishCallback<TOOLS extends ToolSet> = (stepResult: StepResult<TOOLS>) => PromiseLike<void> | void;
2304
+ /**
2305
+ Callback that is set using the `onChunk` option.
2306
+
2307
+ @param event - The event that is passed to the callback.
2308
+ */
2309
+ type StreamTextOnChunkCallback<TOOLS extends ToolSet> = (event: {
2310
+ chunk: Extract<TextStreamPart<TOOLS>, {
2311
+ type: 'text-delta' | 'reasoning-delta' | 'source' | 'tool-call' | 'tool-input-start' | 'tool-input-delta' | 'tool-result' | 'raw';
2312
+ }>;
2313
+ }) => PromiseLike<void> | void;
2314
+ /**
2315
+ Callback that is set using the `onFinish` option.
2316
+
2317
+ @param event - The event that is passed to the callback.
2318
+ */
2319
+ type StreamTextOnFinishCallback<TOOLS extends ToolSet> = (event: StepResult<TOOLS> & {
2158
2320
  /**
2159
- * Indicates whether the response message is a continuation of the last original message,
2160
- * or if a new message was created.
2321
+ * Details for all steps.
2161
2322
  */
2162
- isContinuation: boolean;
2323
+ readonly steps: StepResult<TOOLS>[];
2163
2324
  /**
2164
- * Indicates whether the stream was aborted.
2325
+ * Total usage for all steps. This is the sum of the usage of all steps.
2165
2326
  */
2166
- isAborted: boolean;
2327
+ readonly totalUsage: LanguageModelUsage;
2167
2328
  /**
2168
- * The message that was sent to the client as a response
2169
- * (including the original message if it was extended).
2329
+ * Context that is passed into tool execution.
2330
+ *
2331
+ * Experimental (can break in patch releases).
2332
+ *
2333
+ * @default undefined
2170
2334
  */
2171
- responseMessage: UI_MESSAGE;
2335
+ experimental_context: unknown;
2336
+ }) => PromiseLike<void> | void;
2337
+ /**
2338
+ Callback that is set using the `onAbort` option.
2339
+
2340
+ @param event - The event that is passed to the callback.
2341
+ */
2342
+ type StreamTextOnAbortCallback<TOOLS extends ToolSet> = (event: {
2172
2343
  /**
2173
- * The reason why the generation finished.
2344
+ Details for all previously finished steps.
2174
2345
  */
2175
- finishReason?: FinishReason;
2346
+ readonly steps: StepResult<TOOLS>[];
2176
2347
  }) => PromiseLike<void> | void;
2348
+ /**
2349
+ Generate a text and call tools for a given prompt using a language model.
2177
2350
 
2178
- type UIMessageStreamResponseInit = ResponseInit & {
2179
- consumeSseStream?: (options: {
2180
- stream: ReadableStream<string>;
2181
- }) => PromiseLike<void> | void;
2182
- };
2351
+ This function streams the output. If you do not want to stream the output, use `generateText` instead.
2183
2352
 
2184
- /**
2185
- * A type that combines AsyncIterable and ReadableStream.
2186
- * This allows a ReadableStream to be consumed using for-await-of syntax.
2187
- */
2188
- type AsyncIterableStream<T> = AsyncIterable<T> & ReadableStream<T>;
2353
+ @param model - The language model to use.
2354
+ @param tools - Tools that are accessible to and can be called by the model. The model needs to support calling tools.
2189
2355
 
2190
- type ErrorHandler = (error: unknown) => void;
2356
+ @param system - A system message that will be part of the prompt.
2357
+ @param prompt - A simple text prompt. You can either use `prompt` or `messages` but not both.
2358
+ @param messages - A list of messages. You can either use `prompt` or `messages` but not both.
2191
2359
 
2192
- type UIMessageStreamOptions<UI_MESSAGE extends UIMessage> = {
2193
- /**
2194
- * The original messages. If they are provided, persistence mode is assumed,
2195
- * and a message ID is provided for the response message.
2196
- */
2197
- originalMessages?: UI_MESSAGE[];
2198
- /**
2199
- * Generate a message ID for the response message.
2200
- *
2201
- * If not provided, no message ID will be set for the response message (unless
2202
- * the original messages are provided and the last message is an assistant message).
2203
- */
2204
- generateMessageId?: IdGenerator;
2205
- onFinish?: UIMessageStreamOnFinishCallback<UI_MESSAGE>;
2360
+ @param maxOutputTokens - Maximum number of tokens to generate.
2361
+ @param temperature - Temperature setting.
2362
+ The value is passed through to the provider. The range depends on the provider and model.
2363
+ It is recommended to set either `temperature` or `topP`, but not both.
2364
+ @param topP - Nucleus sampling.
2365
+ The value is passed through to the provider. The range depends on the provider and model.
2366
+ It is recommended to set either `temperature` or `topP`, but not both.
2367
+ @param topK - Only sample from the top K options for each subsequent token.
2368
+ Used to remove "long tail" low probability responses.
2369
+ Recommended for advanced use cases only. You usually only need to use temperature.
2370
+ @param presencePenalty - Presence penalty setting.
2371
+ It affects the likelihood of the model to repeat information that is already in the prompt.
2372
+ The value is passed through to the provider. The range depends on the provider and model.
2373
+ @param frequencyPenalty - Frequency penalty setting.
2374
+ It affects the likelihood of the model to repeatedly use the same words or phrases.
2375
+ The value is passed through to the provider. The range depends on the provider and model.
2376
+ @param stopSequences - Stop sequences.
2377
+ If set, the model will stop generating text when one of the stop sequences is generated.
2378
+ @param seed - The seed (integer) to use for random sampling.
2379
+ If set and supported by the model, calls will generate deterministic results.
2380
+
2381
+ @param maxRetries - Maximum number of retries. Set to 0 to disable retries. Default: 2.
2382
+ @param abortSignal - An optional abort signal that can be used to cancel the call.
2383
+ @param timeout - An optional timeout in milliseconds. The call will be aborted if it takes longer than the specified timeout.
2384
+ @param headers - Additional HTTP headers to be sent with the request. Only applicable for HTTP-based providers.
2385
+
2386
+ @param onChunk - Callback that is called for each chunk of the stream. The stream processing will pause until the callback promise is resolved.
2387
+ @param onError - Callback that is called when an error occurs during streaming. You can use it to log errors.
2388
+ @param onStepFinish - Callback that is called when each step (LLM call) is finished, including intermediate steps.
2389
+ @param onFinish - Callback that is called when all steps are finished and the response is complete.
2390
+
2391
+ @return
2392
+ A result object for accessing different stream types and additional information.
2393
+ */
2394
+ declare function streamText<TOOLS extends ToolSet, OUTPUT extends Output = Output<string, string, never>>({ model, tools, toolChoice, system, prompt, messages, maxRetries, abortSignal, timeout, headers, stopWhen, experimental_output, output, experimental_telemetry: telemetry, prepareStep, providerOptions, experimental_activeTools, activeTools, experimental_repairToolCall: repairToolCall, experimental_transform: transform, experimental_download: download, includeRawChunks, onChunk, onError, onFinish, onAbort, onStepFinish, experimental_context, _internal: { now, generateId }, ...settings }: CallSettings & Prompt & {
2206
2395
  /**
2207
- * Extracts message metadata that will be send to the client.
2208
- *
2209
- * Called on `start` and `finish` events.
2396
+ The language model to use.
2210
2397
  */
2211
- messageMetadata?: (options: {
2212
- part: TextStreamPart<ToolSet>;
2213
- }) => InferUIMessageMetadata<UI_MESSAGE> | undefined;
2398
+ model: LanguageModel;
2214
2399
  /**
2215
- * Send reasoning parts to the client.
2216
- * Default to true.
2217
- */
2218
- sendReasoning?: boolean;
2400
+ The tools that the model can call. The model needs to support calling tools.
2401
+ */
2402
+ tools?: TOOLS;
2219
2403
  /**
2220
- * Send source parts to the client.
2221
- * Default to false.
2404
+ The tool choice strategy. Default: 'auto'.
2222
2405
  */
2223
- sendSources?: boolean;
2406
+ toolChoice?: ToolChoice<TOOLS>;
2224
2407
  /**
2225
- * Send the finish event to the client.
2226
- * Set to false if you are using additional streamText calls
2227
- * that send additional data.
2228
- * Default to true.
2408
+ Condition for stopping the generation when there are tool results in the last step.
2409
+ When the condition is an array, any of the conditions can be met to stop the generation.
2410
+
2411
+ @default stepCountIs(1)
2229
2412
  */
2230
- sendFinish?: boolean;
2413
+ stopWhen?: StopCondition<NoInfer<TOOLS>> | Array<StopCondition<NoInfer<TOOLS>>>;
2231
2414
  /**
2232
- * Send the message start event to the client.
2233
- * Set to false if you are using additional streamText calls
2234
- * and the message start event has already been sent.
2235
- * Default to true.
2415
+ Optional telemetry configuration (experimental).
2236
2416
  */
2237
- sendStart?: boolean;
2417
+ experimental_telemetry?: TelemetrySettings;
2238
2418
  /**
2239
- * Process an error, e.g. to log it. Default to `() => 'An error occurred.'`.
2240
- *
2241
- * @return error message to include in the data stream.
2242
- */
2243
- onError?: (error: unknown) => string;
2244
- };
2245
- type ConsumeStreamOptions = {
2246
- onError?: ErrorHandler;
2247
- };
2248
- /**
2249
- A result object for accessing different stream types and additional information.
2419
+ Additional provider-specific options. They are passed through
2420
+ to the provider from the AI SDK and enable provider-specific
2421
+ functionality that can be fully encapsulated in the provider.
2250
2422
  */
2251
- interface StreamTextResult<TOOLS extends ToolSet, OUTPUT extends Output> {
2423
+ providerOptions?: ProviderOptions;
2252
2424
  /**
2253
- The content that was generated in the last step.
2254
-
2255
- Automatically consumes the stream.
2425
+ * @deprecated Use `activeTools` instead.
2256
2426
  */
2257
- readonly content: PromiseLike<Array<ContentPart<TOOLS>>>;
2427
+ experimental_activeTools?: Array<keyof NoInfer<TOOLS>>;
2258
2428
  /**
2259
- The full text that has been generated by the last step.
2260
-
2261
- Automatically consumes the stream.
2262
- */
2263
- readonly text: PromiseLike<string>;
2429
+ Limits the tools that are available for the model to call without
2430
+ changing the tool call and result types in the result.
2431
+ */
2432
+ activeTools?: Array<keyof NoInfer<TOOLS>>;
2264
2433
  /**
2265
- The full reasoning that the model has generated.
2266
-
2267
- Automatically consumes the stream.
2434
+ Optional specification for parsing structured outputs from the LLM response.
2268
2435
  */
2269
- readonly reasoning: PromiseLike<Array<ReasoningOutput>>;
2436
+ output?: OUTPUT;
2270
2437
  /**
2271
- The reasoning that has been generated by the last step.
2272
-
2273
- Automatically consumes the stream.
2274
- */
2275
- readonly reasoningText: PromiseLike<string | undefined>;
2438
+ Optional specification for parsing structured outputs from the LLM response.
2439
+
2440
+ @deprecated Use `output` instead.
2441
+ */
2442
+ experimental_output?: OUTPUT;
2276
2443
  /**
2277
- Files that have been generated by the model in the last step.
2278
-
2279
- Automatically consumes the stream.
2280
- */
2281
- readonly files: PromiseLike<GeneratedFile[]>;
2444
+ Optional function that you can use to provide different settings for a step.
2445
+
2446
+ @param options - The options for the step.
2447
+ @param options.steps - The steps that have been executed so far.
2448
+ @param options.stepNumber - The number of the step that is being executed.
2449
+ @param options.model - The model that is being used.
2450
+
2451
+ @returns An object that contains the settings for the step.
2452
+ If you return undefined (or for undefined settings), the settings from the outer level will be used.
2453
+ */
2454
+ prepareStep?: PrepareStepFunction<NoInfer<TOOLS>>;
2282
2455
  /**
2283
- Sources that have been used as references in the last step.
2284
-
2285
- Automatically consumes the stream.
2456
+ A function that attempts to repair a tool call that failed to parse.
2286
2457
  */
2287
- readonly sources: PromiseLike<Source[]>;
2288
- /**
2289
- The tool calls that have been executed in the last step.
2290
-
2291
- Automatically consumes the stream.
2292
- */
2293
- readonly toolCalls: PromiseLike<TypedToolCall<TOOLS>[]>;
2294
- /**
2295
- The static tool calls that have been executed in the last step.
2296
-
2297
- Automatically consumes the stream.
2298
- */
2299
- readonly staticToolCalls: PromiseLike<StaticToolCall<TOOLS>[]>;
2300
- /**
2301
- The dynamic tool calls that have been executed in the last step.
2302
-
2303
- Automatically consumes the stream.
2304
- */
2305
- readonly dynamicToolCalls: PromiseLike<DynamicToolCall[]>;
2306
- /**
2307
- The static tool results that have been generated in the last step.
2308
-
2309
- Automatically consumes the stream.
2310
- */
2311
- readonly staticToolResults: PromiseLike<StaticToolResult<TOOLS>[]>;
2312
- /**
2313
- The dynamic tool results that have been generated in the last step.
2314
-
2315
- Automatically consumes the stream.
2316
- */
2317
- readonly dynamicToolResults: PromiseLike<DynamicToolResult[]>;
2458
+ experimental_repairToolCall?: ToolCallRepairFunction<TOOLS>;
2318
2459
  /**
2319
- The tool results that have been generated in the last step.
2320
-
2321
- Automatically consumes the stream.
2460
+ Optional stream transformations.
2461
+ They are applied in the order they are provided.
2462
+ The stream transformations must maintain the stream structure for streamText to work correctly.
2322
2463
  */
2323
- readonly toolResults: PromiseLike<TypedToolResult<TOOLS>[]>;
2464
+ experimental_transform?: StreamTextTransform<TOOLS> | Array<StreamTextTransform<TOOLS>>;
2324
2465
  /**
2325
- * The unified finish reason why the generation finished. Taken from the last step.
2326
- *
2327
- * Automatically consumes the stream.
2466
+ Custom download function to use for URLs.
2467
+
2468
+ By default, files are downloaded if the model does not support the URL for the given media type.
2328
2469
  */
2329
- readonly finishReason: PromiseLike<FinishReason>;
2470
+ experimental_download?: DownloadFunction | undefined;
2330
2471
  /**
2331
- * The raw reason why the generation finished (from the provider). Taken from the last step.
2332
- *
2333
- * Automatically consumes the stream.
2472
+ Whether to include raw chunks from the provider in the stream.
2473
+ When enabled, you will receive raw chunks with type 'raw' that contain the unprocessed data from the provider.
2474
+ This allows access to cutting-edge provider features not yet wrapped by the AI SDK.
2475
+ Defaults to false.
2334
2476
  */
2335
- readonly rawFinishReason: PromiseLike<string | undefined>;
2477
+ includeRawChunks?: boolean;
2336
2478
  /**
2337
- The token usage of the last step.
2338
-
2339
- Automatically consumes the stream.
2479
+ Callback that is called for each chunk of the stream.
2480
+ The stream processing will pause until the callback promise is resolved.
2340
2481
  */
2341
- readonly usage: PromiseLike<LanguageModelUsage>;
2342
- /**
2343
- The total token usage of the generated response.
2344
- When there are multiple steps, the usage is the sum of all step usages.
2345
-
2346
- Automatically consumes the stream.
2347
- */
2348
- readonly totalUsage: PromiseLike<LanguageModelUsage>;
2349
- /**
2350
- Warnings from the model provider (e.g. unsupported settings) for the first step.
2351
-
2352
- Automatically consumes the stream.
2353
- */
2354
- readonly warnings: PromiseLike<CallWarning[] | undefined>;
2482
+ onChunk?: StreamTextOnChunkCallback<TOOLS>;
2355
2483
  /**
2356
- Details for all steps.
2357
- You can use this to get information about intermediate steps,
2358
- such as the tool calls or the response headers.
2359
-
2360
- Automatically consumes the stream.
2484
+ Callback that is invoked when an error occurs during streaming.
2485
+ You can use it to log errors.
2486
+ The stream processing will pause until the callback promise is resolved.
2361
2487
  */
2362
- readonly steps: PromiseLike<Array<StepResult<TOOLS>>>;
2363
- /**
2364
- Additional request information from the last step.
2365
-
2366
- Automatically consumes the stream.
2367
- */
2368
- readonly request: PromiseLike<LanguageModelRequestMetadata>;
2369
- /**
2370
- Additional response information from the last step.
2371
-
2372
- Automatically consumes the stream.
2373
- */
2374
- readonly response: PromiseLike<LanguageModelResponseMetadata & {
2375
- /**
2376
- The response messages that were generated during the call. It consists of an assistant message,
2377
- potentially containing tool calls.
2378
-
2379
- When there are tool results, there is an additional tool message with the tool results that are available.
2380
- If there are tools that do not have execute functions, they are not included in the tool results and
2381
- need to be added separately.
2382
- */
2383
- messages: Array<ResponseMessage>;
2384
- }>;
2488
+ onError?: StreamTextOnErrorCallback;
2385
2489
  /**
2386
- Additional provider-specific metadata from the last step.
2387
- Metadata is passed through from the provider to the AI SDK and
2388
- enables provider-specific results that can be fully encapsulated in the provider.
2490
+ Callback that is called when the LLM response and all request tool executions
2491
+ (for tools that have an `execute` function) are finished.
2492
+
2493
+ The usage is the combined usage of all steps.
2389
2494
  */
2390
- readonly providerMetadata: PromiseLike<ProviderMetadata | undefined>;
2391
- /**
2392
- A text stream that returns only the generated text deltas. You can use it
2393
- as either an AsyncIterable or a ReadableStream. When an error occurs, the
2394
- stream will throw the error.
2395
- */
2396
- readonly textStream: AsyncIterableStream<string>;
2495
+ onFinish?: StreamTextOnFinishCallback<TOOLS>;
2496
+ onAbort?: StreamTextOnAbortCallback<TOOLS>;
2397
2497
  /**
2398
- A stream with all events, including text deltas, tool calls, tool results, and
2399
- errors.
2400
- You can use it as either an AsyncIterable or a ReadableStream.
2401
- Only errors that stop the stream, such as network errors, are thrown.
2402
- */
2403
- readonly fullStream: AsyncIterableStream<TextStreamPart<TOOLS>>;
2498
+ Callback that is called when each step (LLM call) is finished, including intermediate steps.
2499
+ */
2500
+ onStepFinish?: StreamTextOnStepFinishCallback<TOOLS>;
2404
2501
  /**
2405
- * A stream of partial outputs. It uses the `output` specification.
2502
+ * Context that is passed into tool execution.
2406
2503
  *
2407
- * @deprecated Use `partialOutputStream` instead.
2504
+ * Experimental (can break in patch releases).
2505
+ *
2506
+ * @default undefined
2408
2507
  */
2409
- readonly experimental_partialOutputStream: AsyncIterableStream<InferPartialOutput<OUTPUT>>;
2508
+ experimental_context?: unknown;
2410
2509
  /**
2411
- * A stream of partial parsed outputs. It uses the `output` specification.
2510
+ Internal. For test use only. May change without notice.
2412
2511
  */
2413
- readonly partialOutputStream: AsyncIterableStream<InferPartialOutput<OUTPUT>>;
2512
+ _internal?: {
2513
+ now?: () => number;
2514
+ generateId?: IdGenerator;
2515
+ };
2516
+ }): StreamTextResult<TOOLS, OUTPUT>;
2517
+ type EnrichedStreamPart<TOOLS extends ToolSet, PARTIAL_OUTPUT> = {
2518
+ part: TextStreamPart<TOOLS>;
2519
+ partialOutput: PARTIAL_OUTPUT | undefined;
2520
+ };
2521
+
2522
+ interface Output<OUTPUT = any, PARTIAL = any, ELEMENT = any> {
2414
2523
  /**
2415
- * The complete parsed output. It uses the `output` specification.
2524
+ * The name of the output mode.
2416
2525
  */
2417
- readonly output: PromiseLike<InferCompleteOutput<OUTPUT>>;
2418
- /**
2419
- Consumes the stream without processing the parts.
2420
- This is useful to force the stream to finish.
2421
- It effectively removes the backpressure and allows the stream to finish,
2422
- triggering the `onFinish` callback and the promise resolution.
2423
-
2424
- If an error occurs, it is passed to the optional `onError` callback.
2425
- */
2426
- consumeStream(options?: ConsumeStreamOptions): PromiseLike<void>;
2526
+ name: string;
2427
2527
  /**
2428
- Converts the result to a UI message stream.
2429
-
2430
- @return A UI message stream.
2431
- */
2432
- toUIMessageStream<UI_MESSAGE extends UIMessage>(options?: UIMessageStreamOptions<UI_MESSAGE>): AsyncIterableStream<InferUIMessageChunk<UI_MESSAGE>>;
2433
- /**
2434
- *Writes UI message stream output to a Node.js response-like object.
2528
+ * The response format to use for the model.
2435
2529
  */
2436
- pipeUIMessageStreamToResponse<UI_MESSAGE extends UIMessage>(response: ServerResponse, options?: UIMessageStreamResponseInit & UIMessageStreamOptions<UI_MESSAGE>): void;
2530
+ responseFormat: PromiseLike<LanguageModelV3CallOptions['responseFormat']>;
2437
2531
  /**
2438
- Writes text delta output to a Node.js response-like object.
2439
- It sets a `Content-Type` header to `text/plain; charset=utf-8` and
2440
- writes each text delta as a separate chunk.
2441
-
2442
- @param response A Node.js response-like object (ServerResponse).
2443
- @param init Optional headers, status code, and status text.
2444
- */
2445
- pipeTextStreamToResponse(response: ServerResponse, init?: ResponseInit): void;
2532
+ * Parses the complete output of the model.
2533
+ */
2534
+ parseCompleteOutput(options: {
2535
+ text: string;
2536
+ }, context: {
2537
+ response: LanguageModelResponseMetadata;
2538
+ usage: LanguageModelUsage;
2539
+ finishReason: FinishReason;
2540
+ }): Promise<OUTPUT>;
2446
2541
  /**
2447
- Converts the result to a streamed response object with a stream data part stream.
2448
-
2449
- @return A response object.
2450
- */
2451
- toUIMessageStreamResponse<UI_MESSAGE extends UIMessage>(options?: UIMessageStreamResponseInit & UIMessageStreamOptions<UI_MESSAGE>): Response;
2542
+ * Parses the partial output of the model.
2543
+ */
2544
+ parsePartialOutput(options: {
2545
+ text: string;
2546
+ }): Promise<{
2547
+ partial: PARTIAL;
2548
+ } | undefined>;
2452
2549
  /**
2453
- Creates a simple text stream response.
2454
- Each text delta is encoded as UTF-8 and sent as a separate chunk.
2455
- Non-text-delta events are ignored.
2456
- @param init Optional headers, status code, and status text.
2457
- */
2458
- toTextStreamResponse(init?: ResponseInit): Response;
2550
+ * Creates a stream transform that emits individual elements as they complete.
2551
+ */
2552
+ createElementStreamTransform(): TransformStream<EnrichedStreamPart<any, PARTIAL>, ELEMENT> | undefined;
2459
2553
  }
2460
- type TextStreamPart<TOOLS extends ToolSet> = {
2461
- type: 'text-start';
2462
- id: string;
2463
- providerMetadata?: ProviderMetadata;
2464
- } | {
2465
- type: 'text-end';
2466
- id: string;
2467
- providerMetadata?: ProviderMetadata;
2468
- } | {
2469
- type: 'text-delta';
2470
- id: string;
2471
- providerMetadata?: ProviderMetadata;
2472
- text: string;
2473
- } | {
2474
- type: 'reasoning-start';
2475
- id: string;
2476
- providerMetadata?: ProviderMetadata;
2477
- } | {
2478
- type: 'reasoning-end';
2479
- id: string;
2480
- providerMetadata?: ProviderMetadata;
2481
- } | {
2482
- type: 'reasoning-delta';
2483
- providerMetadata?: ProviderMetadata;
2484
- id: string;
2485
- text: string;
2486
- } | {
2487
- type: 'tool-input-start';
2488
- id: string;
2489
- toolName: string;
2490
- providerMetadata?: ProviderMetadata;
2491
- providerExecuted?: boolean;
2492
- dynamic?: boolean;
2493
- title?: string;
2494
- } | {
2495
- type: 'tool-input-end';
2496
- id: string;
2497
- providerMetadata?: ProviderMetadata;
2498
- } | {
2499
- type: 'tool-input-delta';
2500
- id: string;
2501
- delta: string;
2502
- providerMetadata?: ProviderMetadata;
2503
- } | ({
2504
- type: 'source';
2505
- } & Source) | {
2506
- type: 'file';
2507
- file: GeneratedFile;
2508
- } | ({
2509
- type: 'tool-call';
2510
- } & TypedToolCall<TOOLS>) | ({
2511
- type: 'tool-result';
2512
- } & TypedToolResult<TOOLS>) | ({
2513
- type: 'tool-error';
2514
- } & TypedToolError<TOOLS>) | ({
2515
- type: 'tool-output-denied';
2516
- } & StaticToolOutputDenied<TOOLS>) | ToolApprovalRequestOutput<TOOLS> | {
2517
- type: 'start-step';
2518
- request: LanguageModelRequestMetadata;
2519
- warnings: CallWarning[];
2520
- } | {
2521
- type: 'finish-step';
2522
- response: LanguageModelResponseMetadata;
2523
- usage: LanguageModelUsage;
2524
- finishReason: FinishReason;
2525
- rawFinishReason: string | undefined;
2526
- providerMetadata: ProviderMetadata | undefined;
2527
- } | {
2528
- type: 'start';
2529
- } | {
2530
- type: 'finish';
2531
- finishReason: FinishReason;
2532
- rawFinishReason: string | undefined;
2533
- totalUsage: LanguageModelUsage;
2534
- } | {
2535
- type: 'abort';
2536
- reason?: string;
2537
- } | {
2538
- type: 'error';
2539
- error: unknown;
2540
- } | {
2541
- type: 'raw';
2542
- rawValue: unknown;
2543
- };
2544
-
2545
- /**
2546
- A transformation that is applied to the stream.
2547
-
2548
- @param stopStream - A function that stops the source stream.
2549
- @param tools - The tools that are accessible to and can be called by the model. The model needs to support calling tools.
2550
- */
2551
- type StreamTextTransform<TOOLS extends ToolSet> = (options: {
2552
- tools: TOOLS;
2553
- stopStream: () => void;
2554
- }) => TransformStream<TextStreamPart<TOOLS>, TextStreamPart<TOOLS>>;
2555
- /**
2556
- Callback that is set using the `onError` option.
2557
-
2558
- @param event - The event that is passed to the callback.
2559
- */
2560
- type StreamTextOnErrorCallback = (event: {
2561
- error: unknown;
2562
- }) => PromiseLike<void> | void;
2563
2554
  /**
2564
- Callback that is set using the `onStepFinish` option.
2565
-
2566
- @param stepResult - The result of the step.
2555
+ * Output specification for text generation.
2556
+ * This is the default output mode that generates plain text.
2557
+ *
2558
+ * @returns An output specification for generating text.
2567
2559
  */
2568
- type StreamTextOnStepFinishCallback<TOOLS extends ToolSet> = (stepResult: StepResult<TOOLS>) => PromiseLike<void> | void;
2560
+ declare const text: () => Output<string, string, never>;
2569
2561
  /**
2570
- Callback that is set using the `onChunk` option.
2571
-
2572
- @param event - The event that is passed to the callback.
2562
+ * Output specification for typed object generation using schemas.
2563
+ * When the model generates a text response, it will return an object that matches the schema.
2564
+ *
2565
+ * @param schema - The schema of the object to generate.
2566
+ * @param name - Optional name of the output that should be generated. Used by some providers for additional LLM guidance, e.g. via tool or schema name.
2567
+ * @param description - Optional description of the output that should be generated. Used by some providers for additional LLM guidance, e.g. via tool or schema description.
2568
+ *
2569
+ * @returns An output specification for generating objects with the specified schema.
2573
2570
  */
2574
- type StreamTextOnChunkCallback<TOOLS extends ToolSet> = (event: {
2575
- chunk: Extract<TextStreamPart<TOOLS>, {
2576
- type: 'text-delta' | 'reasoning-delta' | 'source' | 'tool-call' | 'tool-input-start' | 'tool-input-delta' | 'tool-result' | 'raw';
2577
- }>;
2578
- }) => PromiseLike<void> | void;
2571
+ declare const object: <OBJECT>({ schema: inputSchema, name, description, }: {
2572
+ schema: FlexibleSchema<OBJECT>;
2573
+ /**
2574
+ * Optional name of the output that should be generated.
2575
+ * Used by some providers for additional LLM guidance, e.g. via tool or schema name.
2576
+ */
2577
+ name?: string;
2578
+ /**
2579
+ * Optional description of the output that should be generated.
2580
+ * Used by some providers for additional LLM guidance, e.g. via tool or schema description.
2581
+ */
2582
+ description?: string;
2583
+ }) => Output<OBJECT, DeepPartial<OBJECT>, never>;
2579
2584
  /**
2580
- Callback that is set using the `onFinish` option.
2581
-
2582
- @param event - The event that is passed to the callback.
2585
+ * Output specification for array generation.
2586
+ * When the model generates a text response, it will return an array of elements.
2587
+ *
2588
+ * @param element - The schema of the array elements to generate.
2589
+ * @param name - Optional name of the output that should be generated. Used by some providers for additional LLM guidance, e.g. via tool or schema name.
2590
+ * @param description - Optional description of the output that should be generated. Used by some providers for additional LLM guidance, e.g. via tool or schema description.
2591
+ *
2592
+ * @returns An output specification for generating an array of elements.
2583
2593
  */
2584
- type StreamTextOnFinishCallback<TOOLS extends ToolSet> = (event: StepResult<TOOLS> & {
2594
+ declare const array: <ELEMENT>({ element: inputElementSchema, name, description, }: {
2595
+ element: FlexibleSchema<ELEMENT>;
2585
2596
  /**
2586
- * Details for all steps.
2597
+ * Optional name of the output that should be generated.
2598
+ * Used by some providers for additional LLM guidance, e.g. via tool or schema name.
2587
2599
  */
2588
- readonly steps: StepResult<TOOLS>[];
2600
+ name?: string;
2589
2601
  /**
2590
- * Total usage for all steps. This is the sum of the usage of all steps.
2602
+ * Optional description of the output that should be generated.
2603
+ * Used by some providers for additional LLM guidance, e.g. via tool or schema description.
2591
2604
  */
2592
- readonly totalUsage: LanguageModelUsage;
2605
+ description?: string;
2606
+ }) => Output<Array<ELEMENT>, Array<ELEMENT>, ELEMENT>;
2607
+ /**
2608
+ * Output specification for choice generation.
2609
+ * When the model generates a text response, it will return a one of the choice options.
2610
+ *
2611
+ * @param options - The available choices.
2612
+ * @param name - Optional name of the output that should be generated. Used by some providers for additional LLM guidance, e.g. via tool or schema name.
2613
+ * @param description - Optional description of the output that should be generated. Used by some providers for additional LLM guidance, e.g. via tool or schema description.
2614
+ *
2615
+ * @returns An output specification for generating a choice.
2616
+ */
2617
+ declare const choice: <CHOICE extends string>({ options: choiceOptions, name, description, }: {
2618
+ options: Array<CHOICE>;
2593
2619
  /**
2594
- * Context that is passed into tool execution.
2595
- *
2596
- * Experimental (can break in patch releases).
2597
- *
2598
- * @default undefined
2620
+ * Optional name of the output that should be generated.
2621
+ * Used by some providers for additional LLM guidance, e.g. via tool or schema name.
2622
+ */
2623
+ name?: string;
2624
+ /**
2625
+ * Optional description of the output that should be generated.
2626
+ * Used by some providers for additional LLM guidance, e.g. via tool or schema description.
2599
2627
  */
2600
- experimental_context: unknown;
2601
- }) => PromiseLike<void> | void;
2628
+ description?: string;
2629
+ }) => Output<CHOICE, CHOICE, never>;
2602
2630
  /**
2603
- Callback that is set using the `onAbort` option.
2604
-
2605
- @param event - The event that is passed to the callback.
2631
+ * Output specification for unstructured JSON generation.
2632
+ * When the model generates a text response, it will return a JSON object.
2633
+ *
2634
+ * @param name - Optional name of the output that should be generated. Used by some providers for additional LLM guidance, e.g. via tool or schema name.
2635
+ * @param description - Optional description of the output that should be generated. Used by some providers for additional LLM guidance, e.g. via tool or schema description.
2636
+ *
2637
+ * @returns An output specification for generating JSON.
2606
2638
  */
2607
- type StreamTextOnAbortCallback<TOOLS extends ToolSet> = (event: {
2639
+ declare const json: ({ name, description, }?: {
2608
2640
  /**
2609
- Details for all previously finished steps.
2641
+ * Optional name of the output that should be generated.
2642
+ * Used by some providers for additional LLM guidance, e.g. via tool or schema name.
2610
2643
  */
2611
- readonly steps: StepResult<TOOLS>[];
2612
- }) => PromiseLike<void> | void;
2613
- /**
2614
- Generate a text and call tools for a given prompt using a language model.
2615
-
2616
- This function streams the output. If you do not want to stream the output, use `generateText` instead.
2617
-
2618
- @param model - The language model to use.
2619
- @param tools - Tools that are accessible to and can be called by the model. The model needs to support calling tools.
2620
-
2621
- @param system - A system message that will be part of the prompt.
2622
- @param prompt - A simple text prompt. You can either use `prompt` or `messages` but not both.
2623
- @param messages - A list of messages. You can either use `prompt` or `messages` but not both.
2624
-
2625
- @param maxOutputTokens - Maximum number of tokens to generate.
2626
- @param temperature - Temperature setting.
2627
- The value is passed through to the provider. The range depends on the provider and model.
2628
- It is recommended to set either `temperature` or `topP`, but not both.
2629
- @param topP - Nucleus sampling.
2630
- The value is passed through to the provider. The range depends on the provider and model.
2631
- It is recommended to set either `temperature` or `topP`, but not both.
2632
- @param topK - Only sample from the top K options for each subsequent token.
2633
- Used to remove "long tail" low probability responses.
2634
- Recommended for advanced use cases only. You usually only need to use temperature.
2635
- @param presencePenalty - Presence penalty setting.
2636
- It affects the likelihood of the model to repeat information that is already in the prompt.
2637
- The value is passed through to the provider. The range depends on the provider and model.
2638
- @param frequencyPenalty - Frequency penalty setting.
2639
- It affects the likelihood of the model to repeatedly use the same words or phrases.
2640
- The value is passed through to the provider. The range depends on the provider and model.
2641
- @param stopSequences - Stop sequences.
2642
- If set, the model will stop generating text when one of the stop sequences is generated.
2643
- @param seed - The seed (integer) to use for random sampling.
2644
- If set and supported by the model, calls will generate deterministic results.
2645
-
2646
- @param maxRetries - Maximum number of retries. Set to 0 to disable retries. Default: 2.
2647
- @param abortSignal - An optional abort signal that can be used to cancel the call.
2648
- @param timeout - An optional timeout in milliseconds. The call will be aborted if it takes longer than the specified timeout.
2649
- @param headers - Additional HTTP headers to be sent with the request. Only applicable for HTTP-based providers.
2644
+ name?: string;
2645
+ /**
2646
+ * Optional description of the output that should be generated.
2647
+ * Used by some providers for additional LLM guidance, e.g. via tool or schema description.
2648
+ */
2649
+ description?: string;
2650
+ }) => Output<JSONValue$1, JSONValue$1, never>;
2650
2651
 
2651
- @param onChunk - Callback that is called for each chunk of the stream. The stream processing will pause until the callback promise is resolved.
2652
- @param onError - Callback that is called when an error occurs during streaming. You can use it to log errors.
2653
- @param onStepFinish - Callback that is called when each step (LLM call) is finished, including intermediate steps.
2654
- @param onFinish - Callback that is called when all steps are finished and the response is complete.
2652
+ type output_Output<OUTPUT = any, PARTIAL = any, ELEMENT = any> = Output<OUTPUT, PARTIAL, ELEMENT>;
2653
+ declare const output_array: typeof array;
2654
+ declare const output_choice: typeof choice;
2655
+ declare const output_json: typeof json;
2656
+ declare const output_object: typeof object;
2657
+ declare const output_text: typeof text;
2658
+ declare namespace output {
2659
+ export {
2660
+ output_Output as Output,
2661
+ output_array as array,
2662
+ output_choice as choice,
2663
+ output_json as json,
2664
+ output_object as object,
2665
+ output_text as text,
2666
+ };
2667
+ }
2655
2668
 
2656
- @return
2657
- A result object for accessing different stream types and additional information.
2669
+ /**
2670
+ The result of a `generateText` call.
2671
+ It contains the generated text, the tool calls that were made during the generation, and the results of the tool calls.
2658
2672
  */
2659
- declare function streamText<TOOLS extends ToolSet, OUTPUT extends Output = Output<string, string>>({ model, tools, toolChoice, system, prompt, messages, maxRetries, abortSignal, timeout, headers, stopWhen, experimental_output, output, experimental_telemetry: telemetry, prepareStep, providerOptions, experimental_activeTools, activeTools, experimental_repairToolCall: repairToolCall, experimental_transform: transform, experimental_download: download, includeRawChunks, onChunk, onError, onFinish, onAbort, onStepFinish, experimental_context, _internal: { now, generateId }, ...settings }: CallSettings & Prompt & {
2673
+ interface GenerateTextResult<TOOLS extends ToolSet, OUTPUT extends Output> {
2660
2674
  /**
2661
- The language model to use.
2675
+ The content that was generated in the last step.
2662
2676
  */
2663
- model: LanguageModel;
2677
+ readonly content: Array<ContentPart<TOOLS>>;
2664
2678
  /**
2665
- The tools that the model can call. The model needs to support calling tools.
2666
- */
2667
- tools?: TOOLS;
2679
+ The text that was generated in the last step.
2680
+ */
2681
+ readonly text: string;
2668
2682
  /**
2669
- The tool choice strategy. Default: 'auto'.
2683
+ The full reasoning that the model has generated in the last step.
2670
2684
  */
2671
- toolChoice?: ToolChoice<TOOLS>;
2685
+ readonly reasoning: Array<ReasoningOutput>;
2672
2686
  /**
2673
- Condition for stopping the generation when there are tool results in the last step.
2674
- When the condition is an array, any of the conditions can be met to stop the generation.
2675
-
2676
- @default stepCountIs(1)
2687
+ The reasoning text that the model has generated in the last step. Can be undefined if the model
2688
+ has only generated text.
2677
2689
  */
2678
- stopWhen?: StopCondition<NoInfer<TOOLS>> | Array<StopCondition<NoInfer<TOOLS>>>;
2690
+ readonly reasoningText: string | undefined;
2679
2691
  /**
2680
- Optional telemetry configuration (experimental).
2692
+ The files that were generated in the last step.
2693
+ Empty array if no files were generated.
2694
+ */
2695
+ readonly files: Array<GeneratedFile>;
2696
+ /**
2697
+ Sources that have been used as references in the last step.
2681
2698
  */
2682
- experimental_telemetry?: TelemetrySettings;
2699
+ readonly sources: Array<Source>;
2683
2700
  /**
2684
- Additional provider-specific options. They are passed through
2685
- to the provider from the AI SDK and enable provider-specific
2686
- functionality that can be fully encapsulated in the provider.
2687
- */
2688
- providerOptions?: ProviderOptions;
2701
+ The tool calls that were made in the last step.
2702
+ */
2703
+ readonly toolCalls: Array<TypedToolCall<TOOLS>>;
2689
2704
  /**
2690
- * @deprecated Use `activeTools` instead.
2705
+ The static tool calls that were made in the last step.
2691
2706
  */
2692
- experimental_activeTools?: Array<keyof NoInfer<TOOLS>>;
2707
+ readonly staticToolCalls: Array<StaticToolCall<TOOLS>>;
2693
2708
  /**
2694
- Limits the tools that are available for the model to call without
2695
- changing the tool call and result types in the result.
2696
- */
2697
- activeTools?: Array<keyof NoInfer<TOOLS>>;
2709
+ The dynamic tool calls that were made in the last step.
2710
+ */
2711
+ readonly dynamicToolCalls: Array<DynamicToolCall>;
2698
2712
  /**
2699
- Optional specification for parsing structured outputs from the LLM response.
2713
+ The results of the tool calls from the last step.
2700
2714
  */
2701
- output?: OUTPUT;
2715
+ readonly toolResults: Array<TypedToolResult<TOOLS>>;
2702
2716
  /**
2703
- Optional specification for parsing structured outputs from the LLM response.
2704
-
2705
- @deprecated Use `output` instead.
2706
- */
2707
- experimental_output?: OUTPUT;
2717
+ The static tool results that were made in the last step.
2718
+ */
2719
+ readonly staticToolResults: Array<StaticToolResult<TOOLS>>;
2708
2720
  /**
2709
- Optional function that you can use to provide different settings for a step.
2710
-
2711
- @param options - The options for the step.
2712
- @param options.steps - The steps that have been executed so far.
2713
- @param options.stepNumber - The number of the step that is being executed.
2714
- @param options.model - The model that is being used.
2715
-
2716
- @returns An object that contains the settings for the step.
2717
- If you return undefined (or for undefined settings), the settings from the outer level will be used.
2718
- */
2719
- prepareStep?: PrepareStepFunction<NoInfer<TOOLS>>;
2721
+ The dynamic tool results that were made in the last step.
2722
+ */
2723
+ readonly dynamicToolResults: Array<DynamicToolResult>;
2720
2724
  /**
2721
- A function that attempts to repair a tool call that failed to parse.
2725
+ * The unified reason why the generation finished.
2722
2726
  */
2723
- experimental_repairToolCall?: ToolCallRepairFunction<TOOLS>;
2727
+ readonly finishReason: FinishReason;
2724
2728
  /**
2725
- Optional stream transformations.
2726
- They are applied in the order they are provided.
2727
- The stream transformations must maintain the stream structure for streamText to work correctly.
2729
+ * The raw reason why the generation finished (from the provider).
2728
2730
  */
2729
- experimental_transform?: StreamTextTransform<TOOLS> | Array<StreamTextTransform<TOOLS>>;
2731
+ readonly rawFinishReason: string | undefined;
2730
2732
  /**
2731
- Custom download function to use for URLs.
2732
-
2733
- By default, files are downloaded if the model does not support the URL for the given media type.
2733
+ The token usage of the last step.
2734
2734
  */
2735
- experimental_download?: DownloadFunction | undefined;
2735
+ readonly usage: LanguageModelUsage;
2736
2736
  /**
2737
- Whether to include raw chunks from the provider in the stream.
2738
- When enabled, you will receive raw chunks with type 'raw' that contain the unprocessed data from the provider.
2739
- This allows access to cutting-edge provider features not yet wrapped by the AI SDK.
2740
- Defaults to false.
2737
+ The total token usage of all steps.
2738
+ When there are multiple steps, the usage is the sum of all step usages.
2741
2739
  */
2742
- includeRawChunks?: boolean;
2740
+ readonly totalUsage: LanguageModelUsage;
2743
2741
  /**
2744
- Callback that is called for each chunk of the stream.
2745
- The stream processing will pause until the callback promise is resolved.
2742
+ Warnings from the model provider (e.g. unsupported settings)
2746
2743
  */
2747
- onChunk?: StreamTextOnChunkCallback<TOOLS>;
2744
+ readonly warnings: CallWarning[] | undefined;
2748
2745
  /**
2749
- Callback that is invoked when an error occurs during streaming.
2750
- You can use it to log errors.
2751
- The stream processing will pause until the callback promise is resolved.
2746
+ Additional request information.
2752
2747
  */
2753
- onError?: StreamTextOnErrorCallback;
2748
+ readonly request: LanguageModelRequestMetadata;
2754
2749
  /**
2755
- Callback that is called when the LLM response and all request tool executions
2756
- (for tools that have an `execute` function) are finished.
2757
-
2758
- The usage is the combined usage of all steps.
2750
+ Additional response information.
2759
2751
  */
2760
- onFinish?: StreamTextOnFinishCallback<TOOLS>;
2761
- onAbort?: StreamTextOnAbortCallback<TOOLS>;
2752
+ readonly response: LanguageModelResponseMetadata & {
2753
+ /**
2754
+ The response messages that were generated during the call. It consists of an assistant message,
2755
+ potentially containing tool calls.
2756
+
2757
+ When there are tool results, there is an additional tool message with the tool results that are available.
2758
+ If there are tools that do not have execute functions, they are not included in the tool results and
2759
+ need to be added separately.
2760
+ */
2761
+ messages: Array<ResponseMessage>;
2762
+ /**
2763
+ Response body (available only for providers that use HTTP requests).
2764
+ */
2765
+ body?: unknown;
2766
+ };
2762
2767
  /**
2763
- Callback that is called when each step (LLM call) is finished, including intermediate steps.
2764
- */
2765
- onStepFinish?: StreamTextOnStepFinishCallback<TOOLS>;
2768
+ Additional provider-specific metadata. They are passed through
2769
+ from the provider to the AI SDK and enable provider-specific
2770
+ results that can be fully encapsulated in the provider.
2771
+ */
2772
+ readonly providerMetadata: ProviderMetadata | undefined;
2766
2773
  /**
2767
- * Context that is passed into tool execution.
2768
- *
2769
- * Experimental (can break in patch releases).
2770
- *
2771
- * @default undefined
2774
+ Details for all steps.
2775
+ You can use this to get information about intermediate steps,
2776
+ such as the tool calls or the response headers.
2772
2777
  */
2773
- experimental_context?: unknown;
2778
+ readonly steps: Array<StepResult<TOOLS>>;
2774
2779
  /**
2775
- Internal. For test use only. May change without notice.
2780
+ The generated structured output. It uses the `output` specification.
2781
+
2782
+ @deprecated Use `output` instead.
2776
2783
  */
2777
- _internal?: {
2778
- now?: () => number;
2779
- generateId?: IdGenerator;
2780
- };
2781
- }): StreamTextResult<TOOLS, OUTPUT>;
2784
+ readonly experimental_output: InferCompleteOutput<OUTPUT>;
2785
+ /**
2786
+ The generated structured output. It uses the `output` specification.
2787
+
2788
+ */
2789
+ readonly output: InferCompleteOutput<OUTPUT>;
2790
+ }
2782
2791
 
2783
2792
  /**
2784
2793
  * Parameters for calling an agent.
@@ -5560,4 +5569,4 @@ declare global {
5560
5569
  var AI_SDK_LOG_WARNINGS: LogWarningsFunction | undefined | false;
5561
5570
  }
5562
5571
 
5563
- export { AbstractChat, Agent, AgentCallParameters, AgentStreamParameters, AsyncIterableStream, CallSettings, CallWarning, ChatAddToolApproveResponseFunction, ChatInit, ChatOnDataCallback, ChatOnErrorCallback, ChatOnFinishCallback, ChatOnToolCallCallback, ChatRequestOptions, ChatState, ChatStatus, ChatTransport, ChunkDetector, CompletionRequestOptions, ContentPart, CreateUIMessage, DataUIPart, DeepPartial, DefaultChatTransport, DirectChatTransport, DirectChatTransportOptions, DynamicToolCall, DynamicToolError, DynamicToolResult, DynamicToolUIPart, EmbedManyResult, EmbedResult, Embedding, EmbeddingModel, EmbeddingModelMiddleware, EmbeddingModelUsage, ErrorHandler, ToolLoopAgent as Experimental_Agent, ToolLoopAgentSettings as Experimental_AgentSettings, DownloadFunction as Experimental_DownloadFunction, Experimental_GenerateImageResult, GeneratedFile as Experimental_GeneratedImage, InferAgentUIMessage as Experimental_InferAgentUIMessage, LogWarningsFunction as Experimental_LogWarningsFunction, SpeechResult as Experimental_SpeechResult, TranscriptionResult as Experimental_TranscriptionResult, FileUIPart, FinishReason, GenerateImageResult, GenerateObjectResult, GenerateTextOnFinishCallback, GenerateTextOnStepFinishCallback, GenerateTextResult, GeneratedAudioFile, GeneratedFile, HttpChatTransport, HttpChatTransportInitOptions, ImageModel, ImageModelMiddleware, ImageModelProviderMetadata, ImageModelResponseMetadata, ImageModelUsage, InferAgentUIMessage, InferCompleteOutput as InferGenerateOutput, InferPartialOutput as InferStreamOutput, InferUIDataParts, InferUIMessageChunk, InferUITool, InferUITools, InvalidArgumentError, InvalidDataContentError, InvalidMessageRoleError, InvalidStreamPartError, InvalidToolApprovalError, InvalidToolInputError, JSONValue, JsonToSseTransformStream, LanguageModel, LanguageModelMiddleware, LanguageModelRequestMetadata, LanguageModelResponseMetadata, LanguageModelUsage, LogWarningsFunction, MessageConversionError, NoImageGeneratedError, NoObjectGeneratedError, NoOutputGeneratedError, NoSpeechGeneratedError, NoSuchProviderError, NoSuchToolError, ObjectStreamPart, output as Output, PrepareReconnectToStreamRequest, PrepareSendMessagesRequest, PrepareStepFunction, PrepareStepResult, Prompt, Provider, ProviderMetadata, ProviderRegistryProvider, ReasoningOutput, ReasoningUIPart, RepairTextFunction, RerankResult, RerankingModel, RetryError, SafeValidateUIMessagesResult, SerialJobExecutor, SourceDocumentUIPart, SourceUrlUIPart, SpeechModel, SpeechModelResponseMetadata, StaticToolCall, StaticToolError, StaticToolOutputDenied, StaticToolResult, StepResult, StepStartUIPart, StopCondition, StreamObjectOnFinishCallback, StreamObjectResult, StreamTextOnChunkCallback, StreamTextOnErrorCallback, StreamTextOnFinishCallback, StreamTextOnStepFinishCallback, StreamTextResult, StreamTextTransform, TelemetrySettings, TextStreamChatTransport, TextStreamPart, TextUIPart, TimeoutConfiguration, ToolApprovalRequestOutput, ToolCallNotFoundForApprovalError, ToolCallRepairError, ToolCallRepairFunction, ToolChoice, ToolLoopAgent, ToolLoopAgentOnFinishCallback, ToolLoopAgentOnStepFinishCallback, ToolLoopAgentSettings, ToolSet, ToolUIPart, TranscriptionModel, TranscriptionModelResponseMetadata, TypedToolCall, TypedToolError, TypedToolOutputDenied, TypedToolResult, UIDataPartSchemas, UIDataTypes, UIMessage, UIMessageChunk, UIMessagePart, UIMessageStreamOnFinishCallback, UIMessageStreamOptions, UIMessageStreamWriter, UITool, UIToolInvocation, UITools, UI_MESSAGE_STREAM_HEADERS, UnsupportedModelVersionError, UseCompletionOptions, Warning, addToolInputExamplesMiddleware, assistantModelMessageSchema, callCompletionApi, consumeStream, convertFileListToFileUIParts, convertToModelMessages, cosineSimilarity, createAgentUIStream, createAgentUIStreamResponse, createProviderRegistry, createTextStreamResponse, createUIMessageStream, createUIMessageStreamResponse, customProvider, defaultEmbeddingSettingsMiddleware, defaultSettingsMiddleware, embed, embedMany, experimental_createProviderRegistry, experimental_customProvider, experimental_generateImage, generateSpeech as experimental_generateSpeech, transcribe as experimental_transcribe, extractReasoningMiddleware, generateImage, generateObject, generateText, getStaticToolName, getStepTimeoutMs, getTextFromDataUrl, getToolName, getToolOrDynamicToolName, getTotalTimeoutMs, hasToolCall, isDataUIPart, isDeepEqualData, isFileUIPart, isReasoningUIPart, isStaticToolUIPart, isTextUIPart, isToolOrDynamicToolUIPart, isToolUIPart, lastAssistantMessageIsCompleteWithApprovalResponses, lastAssistantMessageIsCompleteWithToolCalls, modelMessageSchema, parsePartialJson, pipeAgentUIStreamToResponse, pipeTextStreamToResponse, pipeUIMessageStreamToResponse, pruneMessages, readUIMessageStream, rerank, safeValidateUIMessages, simulateReadableStream, simulateStreamingMiddleware, smoothStream, stepCountIs, streamObject, streamText, systemModelMessageSchema, toolModelMessageSchema, uiMessageChunkSchema, userModelMessageSchema, validateUIMessages, wrapEmbeddingModel, wrapImageModel, wrapLanguageModel, wrapProvider };
5572
+ export { AbstractChat, Agent, AgentCallParameters, AgentStreamParameters, AsyncIterableStream, CallSettings, CallWarning, ChatAddToolApproveResponseFunction, ChatInit, ChatOnDataCallback, ChatOnErrorCallback, ChatOnFinishCallback, ChatOnToolCallCallback, ChatRequestOptions, ChatState, ChatStatus, ChatTransport, ChunkDetector, CompletionRequestOptions, ContentPart, CreateUIMessage, DataUIPart, DeepPartial, DefaultChatTransport, DirectChatTransport, DirectChatTransportOptions, DynamicToolCall, DynamicToolError, DynamicToolResult, DynamicToolUIPart, EmbedManyResult, EmbedResult, Embedding, EmbeddingModel, EmbeddingModelMiddleware, EmbeddingModelUsage, ErrorHandler, ToolLoopAgent as Experimental_Agent, ToolLoopAgentSettings as Experimental_AgentSettings, DownloadFunction as Experimental_DownloadFunction, Experimental_GenerateImageResult, GeneratedFile as Experimental_GeneratedImage, InferAgentUIMessage as Experimental_InferAgentUIMessage, LogWarningsFunction as Experimental_LogWarningsFunction, SpeechResult as Experimental_SpeechResult, TranscriptionResult as Experimental_TranscriptionResult, FileUIPart, FinishReason, GenerateImageResult, GenerateObjectResult, GenerateTextOnFinishCallback, GenerateTextOnStepFinishCallback, GenerateTextResult, GeneratedAudioFile, GeneratedFile, HttpChatTransport, HttpChatTransportInitOptions, ImageModel, ImageModelMiddleware, ImageModelProviderMetadata, ImageModelResponseMetadata, ImageModelUsage, InferAgentUIMessage, InferCompleteOutput as InferGenerateOutput, InferPartialOutput as InferStreamOutput, InferUIDataParts, InferUIMessageChunk, InferUITool, InferUITools, InvalidArgumentError, InvalidDataContentError, InvalidMessageRoleError, InvalidStreamPartError, InvalidToolApprovalError, InvalidToolInputError, JSONValue, JsonToSseTransformStream, LanguageModel, LanguageModelMiddleware, LanguageModelRequestMetadata, LanguageModelResponseMetadata, LanguageModelUsage, LogWarningsFunction, MessageConversionError, NoImageGeneratedError, NoObjectGeneratedError, NoOutputGeneratedError, NoSpeechGeneratedError, NoSuchProviderError, NoSuchToolError, ObjectStreamPart, output as Output, PrepareReconnectToStreamRequest, PrepareSendMessagesRequest, PrepareStepFunction, PrepareStepResult, Prompt, Provider, ProviderMetadata, ProviderRegistryProvider, ReasoningOutput, ReasoningUIPart, RepairTextFunction, RerankResult, RerankingModel, RetryError, SafeValidateUIMessagesResult, SerialJobExecutor, SourceDocumentUIPart, SourceUrlUIPart, SpeechModel, SpeechModelResponseMetadata, StaticToolCall, StaticToolError, StaticToolOutputDenied, StaticToolResult, StepResult, StepStartUIPart, StopCondition, StreamObjectOnFinishCallback, StreamObjectResult, StreamTextOnChunkCallback, StreamTextOnErrorCallback, StreamTextOnFinishCallback, StreamTextOnStepFinishCallback, StreamTextResult, StreamTextTransform, TelemetrySettings, TextStreamChatTransport, TextStreamPart, TextUIPart, TimeoutConfiguration, ToolApprovalRequestOutput, ToolCallNotFoundForApprovalError, ToolCallRepairError, ToolCallRepairFunction, ToolChoice, ToolLoopAgent, ToolLoopAgentOnFinishCallback, ToolLoopAgentOnStepFinishCallback, ToolLoopAgentSettings, ToolSet, ToolUIPart, TranscriptionModel, TranscriptionModelResponseMetadata, TypedToolCall, TypedToolError, TypedToolOutputDenied, TypedToolResult, UIDataPartSchemas, UIDataTypes, UIMessage, UIMessageChunk, UIMessagePart, UIMessageStreamOnFinishCallback, UIMessageStreamOptions, UIMessageStreamWriter, UITool, UIToolInvocation, UITools, UI_MESSAGE_STREAM_HEADERS, UnsupportedModelVersionError, UseCompletionOptions, Warning, addToolInputExamplesMiddleware, assistantModelMessageSchema, callCompletionApi, consumeStream, convertFileListToFileUIParts, convertToModelMessages, cosineSimilarity, createAgentUIStream, createAgentUIStreamResponse, createProviderRegistry, createTextStreamResponse, createUIMessageStream, createUIMessageStreamResponse, customProvider, defaultEmbeddingSettingsMiddleware, defaultSettingsMiddleware, embed, embedMany, experimental_createProviderRegistry, experimental_customProvider, experimental_generateImage, generateSpeech as experimental_generateSpeech, transcribe as experimental_transcribe, extractReasoningMiddleware, generateImage, generateObject, generateText, getStaticToolName, getTextFromDataUrl, getToolName, getToolOrDynamicToolName, hasToolCall, isDataUIPart, isDeepEqualData, isFileUIPart, isReasoningUIPart, isStaticToolUIPart, isTextUIPart, isToolOrDynamicToolUIPart, isToolUIPart, lastAssistantMessageIsCompleteWithApprovalResponses, lastAssistantMessageIsCompleteWithToolCalls, modelMessageSchema, parsePartialJson, pipeAgentUIStreamToResponse, pipeTextStreamToResponse, pipeUIMessageStreamToResponse, pruneMessages, readUIMessageStream, rerank, safeValidateUIMessages, simulateReadableStream, simulateStreamingMiddleware, smoothStream, stepCountIs, streamObject, streamText, systemModelMessageSchema, toolModelMessageSchema, uiMessageChunkSchema, userModelMessageSchema, validateUIMessages, wrapEmbeddingModel, wrapImageModel, wrapLanguageModel, wrapProvider };