ai 6.0.24 → 6.0.25

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.d.ts CHANGED
@@ -1,10 +1,10 @@
1
1
  import { GatewayModelId } from '@ai-sdk/gateway';
2
2
  export { GatewayModelId, createGateway, gateway } from '@ai-sdk/gateway';
3
3
  import * as _ai_sdk_provider_utils from '@ai-sdk/provider-utils';
4
- import { Tool, InferToolInput, InferToolOutput, FlexibleSchema, InferSchema, AssistantModelMessage, ToolModelMessage, ReasoningPart, SystemModelMessage, ModelMessage, ProviderOptions, UserModelMessage, IdGenerator, ToolCall, MaybePromiseLike, TextPart, FilePart, Resolvable, FetchFunction, DataContent } from '@ai-sdk/provider-utils';
4
+ import { Tool, InferToolInput, InferToolOutput, FlexibleSchema, InferSchema, SystemModelMessage, ModelMessage, AssistantModelMessage, ToolModelMessage, ReasoningPart, ProviderOptions, UserModelMessage, IdGenerator, ToolCall, MaybePromiseLike, TextPart, FilePart, Resolvable, FetchFunction, DataContent } from '@ai-sdk/provider-utils';
5
5
  export { AssistantContent, AssistantModelMessage, DataContent, DownloadError, FilePart, FlexibleSchema, IdGenerator, ImagePart, InferSchema, InferToolInput, InferToolOutput, ModelMessage, Schema, SystemModelMessage, TextPart, Tool, ToolApprovalRequest, ToolApprovalResponse, ToolCallOptions, ToolCallPart, ToolContent, ToolExecuteFunction, ToolExecutionOptions, ToolModelMessage, ToolResultPart, UserContent, UserModelMessage, asSchema, createIdGenerator, dynamicTool, generateId, jsonSchema, parseJsonEventStream, tool, zodSchema } from '@ai-sdk/provider-utils';
6
6
  import * as _ai_sdk_provider from '@ai-sdk/provider';
7
- import { EmbeddingModelV3, EmbeddingModelV2, EmbeddingModelV3Embedding, EmbeddingModelV3Middleware, ImageModelV3, ImageModelV2, ImageModelV3ProviderMetadata, ImageModelV2ProviderMetadata, ImageModelV3Middleware, JSONValue as JSONValue$1, LanguageModelV3, LanguageModelV2, SharedV3Warning, LanguageModelV3Source, LanguageModelV3Middleware, RerankingModelV3, SharedV3ProviderMetadata, SpeechModelV3, SpeechModelV2, TranscriptionModelV3, TranscriptionModelV2, JSONObject, ImageModelV3Usage, LanguageModelV3CallOptions, AISDKError, LanguageModelV3ToolCall, JSONSchema7, JSONParseError, TypeValidationError, EmbeddingModelV3CallOptions, ProviderV3, ProviderV2, NoSuchModelError } from '@ai-sdk/provider';
7
+ import { EmbeddingModelV3, EmbeddingModelV2, EmbeddingModelV3Embedding, EmbeddingModelV3Middleware, ImageModelV3, ImageModelV2, ImageModelV3ProviderMetadata, ImageModelV2ProviderMetadata, ImageModelV3Middleware, JSONValue as JSONValue$1, LanguageModelV3, LanguageModelV2, SharedV3Warning, LanguageModelV3Source, LanguageModelV3Middleware, RerankingModelV3, SharedV3ProviderMetadata, SpeechModelV3, SpeechModelV2, TranscriptionModelV3, TranscriptionModelV2, JSONObject, ImageModelV3Usage, AISDKError, LanguageModelV3ToolCall, JSONSchema7, LanguageModelV3CallOptions, JSONParseError, TypeValidationError, EmbeddingModelV3CallOptions, ProviderV3, ProviderV2, NoSuchModelError } from '@ai-sdk/provider';
8
8
  export { AISDKError, APICallError, EmptyResponseBodyError, InvalidPromptError, InvalidResponseDataError, JSONParseError, JSONSchema7, LoadAPIKeyError, LoadSettingError, NoContentGeneratedError, NoSuchModelError, TooManyEmbeddingValuesForCallError, TypeValidationError, UnsupportedFunctionalityError } from '@ai-sdk/provider';
9
9
  import { AttributeValue, Tracer } from '@opentelemetry/api';
10
10
  import { ServerResponse } from 'node:http';
@@ -569,188 +569,228 @@ type PartialObject<ObjectType extends object> = {
569
569
  [KeyType in keyof ObjectType]?: DeepPartialInternal<ObjectType[KeyType]>;
570
570
  };
571
571
 
572
- interface Output<OUTPUT = any, PARTIAL = any> {
572
+ /**
573
+ Timeout configuration for API calls. Can be specified as:
574
+ - A number representing milliseconds
575
+ - An object with `totalMs` property for the total timeout in milliseconds
576
+ - An object with `stepMs` property for the timeout of each step in milliseconds
577
+ - An object with `chunkMs` property for the timeout between stream chunks (streaming only)
578
+ */
579
+ type TimeoutConfiguration = number | {
580
+ totalMs?: number;
581
+ stepMs?: number;
582
+ chunkMs?: number;
583
+ };
584
+ type CallSettings = {
573
585
  /**
574
- * The response format to use for the model.
586
+ Maximum number of tokens to generate.
575
587
  */
576
- responseFormat: PromiseLike<LanguageModelV3CallOptions['responseFormat']>;
588
+ maxOutputTokens?: number;
577
589
  /**
578
- * Parses the complete output of the model.
590
+ Temperature setting. The range depends on the provider and model.
591
+
592
+ It is recommended to set either `temperature` or `topP`, but not both.
579
593
  */
580
- parseCompleteOutput(options: {
581
- text: string;
582
- }, context: {
583
- response: LanguageModelResponseMetadata;
584
- usage: LanguageModelUsage;
585
- finishReason: FinishReason;
586
- }): Promise<OUTPUT>;
594
+ temperature?: number;
587
595
  /**
588
- * Parses the partial output of the model.
596
+ Nucleus sampling. This is a number between 0 and 1.
597
+
598
+ E.g. 0.1 would mean that only tokens with the top 10% probability mass
599
+ are considered.
600
+
601
+ It is recommended to set either `temperature` or `topP`, but not both.
589
602
  */
590
- parsePartialOutput(options: {
591
- text: string;
592
- }): Promise<{
593
- partial: PARTIAL;
594
- } | undefined>;
595
- }
596
- /**
597
- * Output specification for text generation.
598
- * This is the default output mode that generates plain text.
599
- *
600
- * @returns An output specification for generating text.
601
- */
602
- declare const text: () => Output<string, string>;
603
- /**
604
- * Output specification for typed object generation using schemas.
605
- * When the model generates a text response, it will return an object that matches the schema.
606
- *
607
- * @param schema - The schema of the object to generate.
608
- * @param name - Optional name of the output that should be generated. Used by some providers for additional LLM guidance, e.g. via tool or schema name.
609
- * @param description - Optional description of the output that should be generated. Used by some providers for additional LLM guidance, e.g. via tool or schema description.
610
- *
611
- * @returns An output specification for generating objects with the specified schema.
612
- */
613
- declare const object: <OBJECT>({ schema: inputSchema, name, description, }: {
614
- schema: FlexibleSchema<OBJECT>;
603
+ topP?: number;
615
604
  /**
616
- * Optional name of the output that should be generated.
617
- * Used by some providers for additional LLM guidance, e.g. via tool or schema name.
605
+ Only sample from the top K options for each subsequent token.
606
+
607
+ Used to remove "long tail" low probability responses.
608
+ Recommended for advanced use cases only. You usually only need to use temperature.
618
609
  */
619
- name?: string;
610
+ topK?: number;
620
611
  /**
621
- * Optional description of the output that should be generated.
622
- * Used by some providers for additional LLM guidance, e.g. via tool or schema description.
612
+ Presence penalty setting. It affects the likelihood of the model to
613
+ repeat information that is already in the prompt.
614
+
615
+ The presence penalty is a number between -1 (increase repetition)
616
+ and 1 (maximum penalty, decrease repetition). 0 means no penalty.
623
617
  */
624
- description?: string;
625
- }) => Output<OBJECT, DeepPartial<OBJECT>>;
626
- /**
627
- * Output specification for array generation.
628
- * When the model generates a text response, it will return an array of elements.
629
- *
630
- * @param element - The schema of the array elements to generate.
631
- * @param name - Optional name of the output that should be generated. Used by some providers for additional LLM guidance, e.g. via tool or schema name.
632
- * @param description - Optional description of the output that should be generated. Used by some providers for additional LLM guidance, e.g. via tool or schema description.
633
- *
634
- * @returns An output specification for generating an array of elements.
635
- */
636
- declare const array: <ELEMENT>({ element: inputElementSchema, name, description, }: {
637
- element: FlexibleSchema<ELEMENT>;
618
+ presencePenalty?: number;
638
619
  /**
639
- * Optional name of the output that should be generated.
640
- * Used by some providers for additional LLM guidance, e.g. via tool or schema name.
620
+ Frequency penalty setting. It affects the likelihood of the model
621
+ to repeatedly use the same words or phrases.
622
+
623
+ The frequency penalty is a number between -1 (increase repetition)
624
+ and 1 (maximum penalty, decrease repetition). 0 means no penalty.
641
625
  */
642
- name?: string;
626
+ frequencyPenalty?: number;
643
627
  /**
644
- * Optional description of the output that should be generated.
645
- * Used by some providers for additional LLM guidance, e.g. via tool or schema description.
628
+ Stop sequences.
629
+ If set, the model will stop generating text when one of the stop sequences is generated.
630
+ Providers may have limits on the number of stop sequences.
646
631
  */
647
- description?: string;
648
- }) => Output<Array<ELEMENT>, Array<ELEMENT>>;
649
- /**
650
- * Output specification for choice generation.
651
- * When the model generates a text response, it will return a one of the choice options.
652
- *
653
- * @param options - The available choices.
654
- * @param name - Optional name of the output that should be generated. Used by some providers for additional LLM guidance, e.g. via tool or schema name.
655
- * @param description - Optional description of the output that should be generated. Used by some providers for additional LLM guidance, e.g. via tool or schema description.
656
- *
657
- * @returns An output specification for generating a choice.
658
- */
659
- declare const choice: <CHOICE extends string>({ options: choiceOptions, name, description, }: {
660
- options: Array<CHOICE>;
632
+ stopSequences?: string[];
661
633
  /**
662
- * Optional name of the output that should be generated.
663
- * Used by some providers for additional LLM guidance, e.g. via tool or schema name.
634
+ The seed (integer) to use for random sampling. If set and supported
635
+ by the model, calls will generate deterministic results.
664
636
  */
665
- name?: string;
637
+ seed?: number;
666
638
  /**
667
- * Optional description of the output that should be generated.
668
- * Used by some providers for additional LLM guidance, e.g. via tool or schema description.
639
+ Maximum number of retries. Set to 0 to disable retries.
640
+
641
+ @default 2
669
642
  */
670
- description?: string;
671
- }) => Output<CHOICE, CHOICE>;
672
- /**
673
- * Output specification for unstructured JSON generation.
674
- * When the model generates a text response, it will return a JSON object.
675
- *
676
- * @param name - Optional name of the output that should be generated. Used by some providers for additional LLM guidance, e.g. via tool or schema name.
677
- * @param description - Optional description of the output that should be generated. Used by some providers for additional LLM guidance, e.g. via tool or schema description.
678
- *
679
- * @returns An output specification for generating JSON.
680
- */
681
- declare const json: ({ name, description, }?: {
643
+ maxRetries?: number;
682
644
  /**
683
- * Optional name of the output that should be generated.
684
- * Used by some providers for additional LLM guidance, e.g. via tool or schema name.
645
+ Abort signal.
685
646
  */
686
- name?: string;
647
+ abortSignal?: AbortSignal;
687
648
  /**
688
- * Optional description of the output that should be generated.
689
- * Used by some providers for additional LLM guidance, e.g. via tool or schema description.
649
+ Timeout in milliseconds. The call will be aborted if it takes longer
650
+ than the specified timeout. Can be used alongside abortSignal.
651
+
652
+ Can be specified as a number (milliseconds) or as an object with `totalMs`.
690
653
  */
691
- description?: string;
692
- }) => Output<JSONValue$1, JSONValue$1>;
693
-
694
- type output_Output<OUTPUT = any, PARTIAL = any> = Output<OUTPUT, PARTIAL>;
695
- declare const output_array: typeof array;
696
- declare const output_choice: typeof choice;
697
- declare const output_json: typeof json;
698
- declare const output_object: typeof object;
699
- declare const output_text: typeof text;
700
- declare namespace output {
701
- export {
702
- output_Output as Output,
703
- output_array as array,
704
- output_choice as choice,
705
- output_json as json,
706
- output_object as object,
707
- output_text as text,
708
- };
709
- }
654
+ timeout?: TimeoutConfiguration;
655
+ /**
656
+ Additional HTTP headers to be sent with the request.
657
+ Only applicable for HTTP-based providers.
658
+ */
659
+ headers?: Record<string, string | undefined>;
660
+ };
710
661
 
711
662
  /**
712
- * Infers the complete output type from the output specification.
663
+ Prompt part of the AI function options.
664
+ It contains a system message, a simple text prompt, or a list of messages.
713
665
  */
714
- type InferCompleteOutput<OUTPUT extends Output> = OUTPUT extends Output<infer COMPLETE_OUTPUT, any> ? COMPLETE_OUTPUT : never;
715
- /**
716
- * Infers the partial output type from the output specification.
666
+ type Prompt = {
667
+ /**
668
+ System message to include in the prompt. Can be used with `prompt` or `messages`.
669
+ */
670
+ system?: string | SystemModelMessage | Array<SystemModelMessage>;
671
+ } & ({
672
+ /**
673
+ A prompt. It can be either a text prompt or a list of messages.
674
+
675
+ You can either use `prompt` or `messages` but not both.
676
+ */
677
+ prompt: string | Array<ModelMessage>;
678
+ /**
679
+ A list of messages.
680
+
681
+ You can either use `prompt` or `messages` but not both.
717
682
  */
718
- type InferPartialOutput<OUTPUT extends Output> = OUTPUT extends Output<any, infer PARTIAL_OUTPUT> ? PARTIAL_OUTPUT : never;
683
+ messages?: never;
684
+ } | {
685
+ /**
686
+ A list of messages.
719
687
 
720
- /**
721
- A message that was generated during the generation process.
722
- It can be either an assistant message or a tool message.
688
+ You can either use `prompt` or `messages` but not both.
723
689
  */
724
- type ResponseMessage = AssistantModelMessage | ToolModelMessage;
690
+ messages: Array<ModelMessage>;
691
+ /**
692
+ A prompt. It can be either a text prompt or a list of messages.
693
+
694
+ You can either use `prompt` or `messages` but not both.
695
+ */
696
+ prompt?: never;
697
+ });
725
698
 
726
699
  /**
727
- * The result of a single step in the generation process.
700
+ * Telemetry configuration.
728
701
  */
729
- type StepResult<TOOLS extends ToolSet> = {
702
+ type TelemetrySettings = {
730
703
  /**
731
- The content that was generated in the last step.
704
+ * Enable or disable telemetry. Disabled by default while experimental.
732
705
  */
733
- readonly content: Array<ContentPart<TOOLS>>;
734
- /**
735
- The generated text.
736
- */
737
- readonly text: string;
706
+ isEnabled?: boolean;
738
707
  /**
739
- The reasoning that was generated during the generation.
740
- */
741
- readonly reasoning: Array<ReasoningPart>;
708
+ * Enable or disable input recording. Enabled by default.
709
+ *
710
+ * You might want to disable input recording to avoid recording sensitive
711
+ * information, to reduce data transfers, or to increase performance.
712
+ */
713
+ recordInputs?: boolean;
742
714
  /**
743
- The reasoning text that was generated during the generation.
744
- */
745
- readonly reasoningText: string | undefined;
715
+ * Enable or disable output recording. Enabled by default.
716
+ *
717
+ * You might want to disable output recording to avoid recording sensitive
718
+ * information, to reduce data transfers, or to increase performance.
719
+ */
720
+ recordOutputs?: boolean;
746
721
  /**
747
- The files that were generated during the generation.
748
- */
749
- readonly files: Array<GeneratedFile>;
722
+ * Identifier for this function. Used to group telemetry data by function.
723
+ */
724
+ functionId?: string;
750
725
  /**
751
- The sources that were used to generate the text.
752
- */
753
- readonly sources: Array<Source>;
726
+ * Additional information to include in the telemetry data.
727
+ */
728
+ metadata?: Record<string, AttributeValue>;
729
+ /**
730
+ * A custom tracer to use for the telemetry data.
731
+ */
732
+ tracer?: Tracer;
733
+ };
734
+
735
+ /**
736
+ * Experimental. Can change in patch versions without warning.
737
+ *
738
+ * Download function. Called with the array of URLs and a boolean indicating
739
+ * whether the URL is supported by the model.
740
+ *
741
+ * The download function can decide for each URL:
742
+ * - to return null (which means that the URL should be passed to the model)
743
+ * - to download the asset and return the data (incl. retries, authentication, etc.)
744
+ *
745
+ * Should throw DownloadError if the download fails.
746
+ *
747
+ * Should return an array of objects sorted by the order of the requested downloads.
748
+ * For each object, the data should be a Uint8Array if the URL was downloaded.
749
+ * For each object, the mediaType should be the media type of the downloaded asset.
750
+ * For each object, the data should be null if the URL should be passed through as is.
751
+ */
752
+ type DownloadFunction = (options: Array<{
753
+ url: URL;
754
+ isUrlSupportedByModel: boolean;
755
+ }>) => PromiseLike<Array<{
756
+ data: Uint8Array;
757
+ mediaType: string | undefined;
758
+ } | null>>;
759
+
760
+ /**
761
+ A message that was generated during the generation process.
762
+ It can be either an assistant message or a tool message.
763
+ */
764
+ type ResponseMessage = AssistantModelMessage | ToolModelMessage;
765
+
766
+ /**
767
+ * The result of a single step in the generation process.
768
+ */
769
+ type StepResult<TOOLS extends ToolSet> = {
770
+ /**
771
+ The content that was generated in the last step.
772
+ */
773
+ readonly content: Array<ContentPart<TOOLS>>;
774
+ /**
775
+ The generated text.
776
+ */
777
+ readonly text: string;
778
+ /**
779
+ The reasoning that was generated during the generation.
780
+ */
781
+ readonly reasoning: Array<ReasoningPart>;
782
+ /**
783
+ The reasoning text that was generated during the generation.
784
+ */
785
+ readonly reasoningText: string | undefined;
786
+ /**
787
+ The files that were generated during the generation.
788
+ */
789
+ readonly files: Array<GeneratedFile>;
790
+ /**
791
+ The sources that were used to generate the text.
792
+ */
793
+ readonly sources: Array<Source>;
754
794
  /**
755
795
  The tool calls that were made during the generation.
756
796
  */
@@ -819,1954 +859,1935 @@ type StepResult<TOOLS extends ToolSet> = {
819
859
  };
820
860
 
821
861
  /**
822
- The result of a `generateText` call.
823
- It contains the generated text, the tool calls that were made during the generation, and the results of the tool calls.
862
+ * Function that you can use to provide different settings for a step.
863
+ *
864
+ * @param options - The options for the step.
865
+ * @param options.steps - The steps that have been executed so far.
866
+ * @param options.stepNumber - The number of the step that is being executed.
867
+ * @param options.model - The model that is being used.
868
+ * @param options.messages - The messages that will be sent to the model for the current step.
869
+ * @param options.experimental_context - The context passed via the experimental_context setting (experimental).
870
+ *
871
+ * @returns An object that contains the settings for the step.
872
+ * If you return undefined (or for undefined settings), the settings from the outer level will be used.
824
873
  */
825
- interface GenerateTextResult<TOOLS extends ToolSet, OUTPUT extends Output> {
874
+ type PrepareStepFunction<TOOLS extends Record<string, Tool> = Record<string, Tool>> = (options: {
826
875
  /**
827
- The content that was generated in the last step.
876
+ * The steps that have been executed so far.
828
877
  */
829
- readonly content: Array<ContentPart<TOOLS>>;
830
- /**
831
- The text that was generated in the last step.
832
- */
833
- readonly text: string;
878
+ steps: Array<StepResult<NoInfer<TOOLS>>>;
834
879
  /**
835
- The full reasoning that the model has generated in the last step.
880
+ * The number of the step that is being executed.
836
881
  */
837
- readonly reasoning: Array<ReasoningOutput>;
882
+ stepNumber: number;
838
883
  /**
839
- The reasoning text that the model has generated in the last step. Can be undefined if the model
840
- has only generated text.
884
+ * The model instance that is being used for this step.
841
885
  */
842
- readonly reasoningText: string | undefined;
843
- /**
844
- The files that were generated in the last step.
845
- Empty array if no files were generated.
846
- */
847
- readonly files: Array<GeneratedFile>;
886
+ model: LanguageModel;
848
887
  /**
849
- Sources that have been used as references in the last step.
888
+ * The messages that will be sent to the model for the current step.
850
889
  */
851
- readonly sources: Array<Source>;
890
+ messages: Array<ModelMessage>;
852
891
  /**
853
- The tool calls that were made in the last step.
892
+ * The context passed via the experimental_context setting (experimental).
854
893
  */
855
- readonly toolCalls: Array<TypedToolCall<TOOLS>>;
894
+ experimental_context: unknown;
895
+ }) => PromiseLike<PrepareStepResult<TOOLS>> | PrepareStepResult<TOOLS>;
896
+ /**
897
+ * The result type returned by a {@link PrepareStepFunction},
898
+ * allowing per-step overrides of model, tools, or messages.
899
+ */
900
+ type PrepareStepResult<TOOLS extends Record<string, Tool> = Record<string, Tool>> = {
856
901
  /**
857
- The static tool calls that were made in the last step.
902
+ * Optionally override which LanguageModel instance is used for this step.
858
903
  */
859
- readonly staticToolCalls: Array<StaticToolCall<TOOLS>>;
904
+ model?: LanguageModel;
860
905
  /**
861
- The dynamic tool calls that were made in the last step.
906
+ * Optionally set which tool the model must call, or provide tool call configuration
907
+ * for this step.
862
908
  */
863
- readonly dynamicToolCalls: Array<DynamicToolCall>;
909
+ toolChoice?: ToolChoice<NoInfer<TOOLS>>;
864
910
  /**
865
- The results of the tool calls from the last step.
911
+ * If provided, only these tools are enabled/available for this step.
866
912
  */
867
- readonly toolResults: Array<TypedToolResult<TOOLS>>;
913
+ activeTools?: Array<keyof NoInfer<TOOLS>>;
868
914
  /**
869
- The static tool results that were made in the last step.
915
+ * Optionally override the system message(s) sent to the model for this step.
870
916
  */
871
- readonly staticToolResults: Array<StaticToolResult<TOOLS>>;
917
+ system?: string | SystemModelMessage | Array<SystemModelMessage>;
872
918
  /**
873
- The dynamic tool results that were made in the last step.
919
+ * Optionally override the full set of messages sent to the model
920
+ * for this step.
874
921
  */
875
- readonly dynamicToolResults: Array<DynamicToolResult>;
922
+ messages?: Array<ModelMessage>;
876
923
  /**
877
- * The unified reason why the generation finished.
924
+ * Context that is passed into tool execution. Experimental.
925
+ *
926
+ * Changing the context will affect the context in this step
927
+ * and all subsequent steps.
878
928
  */
879
- readonly finishReason: FinishReason;
929
+ experimental_context?: unknown;
880
930
  /**
881
- * The raw reason why the generation finished (from the provider).
931
+ * Additional provider-specific options for this step.
932
+ *
933
+ * Can be used to pass provider-specific configuration such as
934
+ * container IDs for Anthropic's code execution.
882
935
  */
883
- readonly rawFinishReason: string | undefined;
936
+ providerOptions?: ProviderOptions;
937
+ } | undefined;
938
+
939
+ type StopCondition<TOOLS extends ToolSet> = (options: {
940
+ steps: Array<StepResult<TOOLS>>;
941
+ }) => PromiseLike<boolean> | boolean;
942
+ declare function stepCountIs(stepCount: number): StopCondition<any>;
943
+ declare function hasToolCall(toolName: string): StopCondition<any>;
944
+
945
+ declare const symbol$e: unique symbol;
946
+ declare class InvalidToolInputError extends AISDKError {
947
+ private readonly [symbol$e];
948
+ readonly toolName: string;
949
+ readonly toolInput: string;
950
+ constructor({ toolInput, toolName, cause, message, }: {
951
+ message?: string;
952
+ toolInput: string;
953
+ toolName: string;
954
+ cause: unknown;
955
+ });
956
+ static isInstance(error: unknown): error is InvalidToolInputError;
957
+ }
958
+
959
+ declare const symbol$d: unique symbol;
960
+ declare class NoSuchToolError extends AISDKError {
961
+ private readonly [symbol$d];
962
+ readonly toolName: string;
963
+ readonly availableTools: string[] | undefined;
964
+ constructor({ toolName, availableTools, message, }: {
965
+ toolName: string;
966
+ availableTools?: string[] | undefined;
967
+ message?: string;
968
+ });
969
+ static isInstance(error: unknown): error is NoSuchToolError;
970
+ }
971
+
972
+ declare const systemModelMessageSchema: z.ZodType<SystemModelMessage>;
973
+ declare const userModelMessageSchema: z.ZodType<UserModelMessage>;
974
+ declare const assistantModelMessageSchema: z.ZodType<AssistantModelMessage>;
975
+ declare const toolModelMessageSchema: z.ZodType<ToolModelMessage>;
976
+ declare const modelMessageSchema: z.ZodType<ModelMessage>;
977
+
978
+ /**
979
+ * A function that attempts to repair a tool call that failed to parse.
980
+ *
981
+ * It receives the error and the context as arguments and returns the repair
982
+ * tool call JSON as text.
983
+ *
984
+ * @param options.system - The system prompt.
985
+ * @param options.messages - The messages in the current generation step.
986
+ * @param options.toolCall - The tool call that failed to parse.
987
+ * @param options.tools - The tools that are available.
988
+ * @param options.inputSchema - A function that returns the JSON Schema for a tool.
989
+ * @param options.error - The error that occurred while parsing the tool call.
990
+ */
991
+ type ToolCallRepairFunction<TOOLS extends ToolSet> = (options: {
992
+ system: string | SystemModelMessage | Array<SystemModelMessage> | undefined;
993
+ messages: ModelMessage[];
994
+ toolCall: LanguageModelV3ToolCall;
995
+ tools: TOOLS;
996
+ inputSchema: (options: {
997
+ toolName: string;
998
+ }) => PromiseLike<JSONSchema7>;
999
+ error: NoSuchToolError | InvalidToolInputError;
1000
+ }) => Promise<LanguageModelV3ToolCall | null>;
1001
+
1002
+ /**
1003
+ Callback that is set using the `onStepFinish` option.
1004
+
1005
+ @param stepResult - The result of the step.
1006
+ */
1007
+ type GenerateTextOnStepFinishCallback<TOOLS extends ToolSet> = (stepResult: StepResult<TOOLS>) => Promise<void> | void;
1008
+ /**
1009
+ Callback that is set using the `onFinish` option.
1010
+
1011
+ @param event - The event that is passed to the callback.
1012
+ */
1013
+ type GenerateTextOnFinishCallback<TOOLS extends ToolSet> = (event: StepResult<TOOLS> & {
884
1014
  /**
885
- The token usage of the last step.
1015
+ * Details for all steps.
886
1016
  */
887
- readonly usage: LanguageModelUsage;
1017
+ readonly steps: StepResult<TOOLS>[];
888
1018
  /**
889
- The total token usage of all steps.
890
- When there are multiple steps, the usage is the sum of all step usages.
1019
+ * Total usage for all steps. This is the sum of the usage of all steps.
891
1020
  */
892
1021
  readonly totalUsage: LanguageModelUsage;
893
1022
  /**
894
- Warnings from the model provider (e.g. unsupported settings)
895
- */
896
- readonly warnings: CallWarning[] | undefined;
897
- /**
898
- Additional request information.
899
- */
900
- readonly request: LanguageModelRequestMetadata;
901
- /**
902
- Additional response information.
1023
+ * Context that is passed into tool execution.
1024
+ *
1025
+ * Experimental (can break in patch releases).
1026
+ *
1027
+ * @default undefined
903
1028
  */
904
- readonly response: LanguageModelResponseMetadata & {
905
- /**
906
- The response messages that were generated during the call. It consists of an assistant message,
907
- potentially containing tool calls.
908
-
909
- When there are tool results, there is an additional tool message with the tool results that are available.
910
- If there are tools that do not have execute functions, they are not included in the tool results and
911
- need to be added separately.
912
- */
913
- messages: Array<ResponseMessage>;
914
- /**
915
- Response body (available only for providers that use HTTP requests).
916
- */
917
- body?: unknown;
918
- };
1029
+ experimental_context: unknown;
1030
+ }) => PromiseLike<void> | void;
1031
+ /**
1032
+ Generate a text and call tools for a given prompt using a language model.
1033
+
1034
+ This function does not stream the output. If you want to stream the output, use `streamText` instead.
1035
+
1036
+ @param model - The language model to use.
1037
+
1038
+ @param tools - Tools that are accessible to and can be called by the model. The model needs to support calling tools.
1039
+ @param toolChoice - The tool choice strategy. Default: 'auto'.
1040
+
1041
+ @param system - A system message that will be part of the prompt.
1042
+ @param prompt - A simple text prompt. You can either use `prompt` or `messages` but not both.
1043
+ @param messages - A list of messages. You can either use `prompt` or `messages` but not both.
1044
+
1045
+ @param maxOutputTokens - Maximum number of tokens to generate.
1046
+ @param temperature - Temperature setting.
1047
+ The value is passed through to the provider. The range depends on the provider and model.
1048
+ It is recommended to set either `temperature` or `topP`, but not both.
1049
+ @param topP - Nucleus sampling.
1050
+ The value is passed through to the provider. The range depends on the provider and model.
1051
+ It is recommended to set either `temperature` or `topP`, but not both.
1052
+ @param topK - Only sample from the top K options for each subsequent token.
1053
+ Used to remove "long tail" low probability responses.
1054
+ Recommended for advanced use cases only. You usually only need to use temperature.
1055
+ @param presencePenalty - Presence penalty setting.
1056
+ It affects the likelihood of the model to repeat information that is already in the prompt.
1057
+ The value is passed through to the provider. The range depends on the provider and model.
1058
+ @param frequencyPenalty - Frequency penalty setting.
1059
+ It affects the likelihood of the model to repeatedly use the same words or phrases.
1060
+ The value is passed through to the provider. The range depends on the provider and model.
1061
+ @param stopSequences - Stop sequences.
1062
+ If set, the model will stop generating text when one of the stop sequences is generated.
1063
+ @param seed - The seed (integer) to use for random sampling.
1064
+ If set and supported by the model, calls will generate deterministic results.
1065
+
1066
+ @param maxRetries - Maximum number of retries. Set to 0 to disable retries. Default: 2.
1067
+ @param abortSignal - An optional abort signal that can be used to cancel the call.
1068
+ @param timeout - An optional timeout in milliseconds. The call will be aborted if it takes longer than the specified timeout.
1069
+ @param headers - Additional HTTP headers to be sent with the request. Only applicable for HTTP-based providers.
1070
+
1071
+ @param experimental_generateMessageId - Generate a unique ID for each message.
1072
+
1073
+ @param onStepFinish - Callback that is called when each step (LLM call) is finished, including intermediate steps.
1074
+ @param onFinish - Callback that is called when all steps are finished and the response is complete.
1075
+
1076
+ @returns
1077
+ A result object that contains the generated text, the results of the tool calls, and additional information.
1078
+ */
1079
+ declare function generateText<TOOLS extends ToolSet, OUTPUT extends Output = Output<string, string>>({ model: modelArg, tools, toolChoice, system, prompt, messages, maxRetries: maxRetriesArg, abortSignal, timeout, headers, stopWhen, experimental_output, output, experimental_telemetry: telemetry, providerOptions, experimental_activeTools, activeTools, experimental_prepareStep, prepareStep, experimental_repairToolCall: repairToolCall, experimental_download: download, experimental_context, _internal: { generateId }, onStepFinish, onFinish, ...settings }: CallSettings & Prompt & {
919
1080
  /**
920
- Additional provider-specific metadata. They are passed through
921
- from the provider to the AI SDK and enable provider-specific
922
- results that can be fully encapsulated in the provider.
1081
+ The language model to use.
923
1082
  */
924
- readonly providerMetadata: ProviderMetadata | undefined;
1083
+ model: LanguageModel;
925
1084
  /**
926
- Details for all steps.
927
- You can use this to get information about intermediate steps,
928
- such as the tool calls or the response headers.
1085
+ The tools that the model can call. The model needs to support calling tools.
1086
+ */
1087
+ tools?: TOOLS;
1088
+ /**
1089
+ The tool choice strategy. Default: 'auto'.
929
1090
  */
930
- readonly steps: Array<StepResult<TOOLS>>;
1091
+ toolChoice?: ToolChoice<NoInfer<TOOLS>>;
931
1092
  /**
932
- The generated structured output. It uses the `output` specification.
933
-
934
- @deprecated Use `output` instead.
1093
+ Condition for stopping the generation when there are tool results in the last step.
1094
+ When the condition is an array, any of the conditions can be met to stop the generation.
1095
+
1096
+ @default stepCountIs(1)
935
1097
  */
936
- readonly experimental_output: InferCompleteOutput<OUTPUT>;
1098
+ stopWhen?: StopCondition<NoInfer<TOOLS>> | Array<StopCondition<NoInfer<TOOLS>>>;
937
1099
  /**
938
- The generated structured output. It uses the `output` specification.
939
-
1100
+ Optional telemetry configuration (experimental).
940
1101
  */
941
- readonly output: InferCompleteOutput<OUTPUT>;
942
- }
943
-
944
- /**
945
- Timeout configuration for API calls. Can be specified as:
946
- - A number representing milliseconds
947
- - An object with `totalMs` property for the total timeout in milliseconds
948
- - An object with `stepMs` property for the timeout of each step in milliseconds
949
- - An object with `chunkMs` property for the timeout between stream chunks (streaming only)
1102
+ experimental_telemetry?: TelemetrySettings;
1103
+ /**
1104
+ Additional provider-specific options. They are passed through
1105
+ to the provider from the AI SDK and enable provider-specific
1106
+ functionality that can be fully encapsulated in the provider.
950
1107
  */
951
- type TimeoutConfiguration = number | {
952
- totalMs?: number;
953
- stepMs?: number;
954
- chunkMs?: number;
955
- };
956
- type CallSettings = {
1108
+ providerOptions?: ProviderOptions;
957
1109
  /**
958
- Maximum number of tokens to generate.
1110
+ * @deprecated Use `activeTools` instead.
959
1111
  */
960
- maxOutputTokens?: number;
1112
+ experimental_activeTools?: Array<keyof NoInfer<TOOLS>>;
961
1113
  /**
962
- Temperature setting. The range depends on the provider and model.
963
-
964
- It is recommended to set either `temperature` or `topP`, but not both.
1114
+ Limits the tools that are available for the model to call without
1115
+ changing the tool call and result types in the result.
965
1116
  */
966
- temperature?: number;
1117
+ activeTools?: Array<keyof NoInfer<TOOLS>>;
967
1118
  /**
968
- Nucleus sampling. This is a number between 0 and 1.
969
-
970
- E.g. 0.1 would mean that only tokens with the top 10% probability mass
971
- are considered.
972
-
973
- It is recommended to set either `temperature` or `topP`, but not both.
1119
+ Optional specification for parsing structured outputs from the LLM response.
974
1120
  */
975
- topP?: number;
1121
+ output?: OUTPUT;
976
1122
  /**
977
- Only sample from the top K options for each subsequent token.
978
-
979
- Used to remove "long tail" low probability responses.
980
- Recommended for advanced use cases only. You usually only need to use temperature.
1123
+ Optional specification for parsing structured outputs from the LLM response.
1124
+
1125
+ @deprecated Use `output` instead.
981
1126
  */
982
- topK?: number;
1127
+ experimental_output?: OUTPUT;
983
1128
  /**
984
- Presence penalty setting. It affects the likelihood of the model to
985
- repeat information that is already in the prompt.
986
-
987
- The presence penalty is a number between -1 (increase repetition)
988
- and 1 (maximum penalty, decrease repetition). 0 means no penalty.
1129
+ Custom download function to use for URLs.
1130
+
1131
+ By default, files are downloaded if the model does not support the URL for the given media type.
989
1132
  */
990
- presencePenalty?: number;
1133
+ experimental_download?: DownloadFunction | undefined;
991
1134
  /**
992
- Frequency penalty setting. It affects the likelihood of the model
993
- to repeatedly use the same words or phrases.
994
-
995
- The frequency penalty is a number between -1 (increase repetition)
996
- and 1 (maximum penalty, decrease repetition). 0 means no penalty.
1135
+ * @deprecated Use `prepareStep` instead.
997
1136
  */
998
- frequencyPenalty?: number;
1137
+ experimental_prepareStep?: PrepareStepFunction<NoInfer<TOOLS>>;
999
1138
  /**
1000
- Stop sequences.
1001
- If set, the model will stop generating text when one of the stop sequences is generated.
1002
- Providers may have limits on the number of stop sequences.
1003
- */
1004
- stopSequences?: string[];
1139
+ Optional function that you can use to provide different settings for a step.
1140
+ */
1141
+ prepareStep?: PrepareStepFunction<NoInfer<TOOLS>>;
1005
1142
  /**
1006
- The seed (integer) to use for random sampling. If set and supported
1007
- by the model, calls will generate deterministic results.
1143
+ A function that attempts to repair a tool call that failed to parse.
1008
1144
  */
1009
- seed?: number;
1145
+ experimental_repairToolCall?: ToolCallRepairFunction<NoInfer<TOOLS>>;
1010
1146
  /**
1011
- Maximum number of retries. Set to 0 to disable retries.
1012
-
1013
- @default 2
1147
+ * Callback that is called when each step (LLM call) is finished, including intermediate steps.
1014
1148
  */
1015
- maxRetries?: number;
1149
+ onStepFinish?: GenerateTextOnStepFinishCallback<NoInfer<TOOLS>>;
1016
1150
  /**
1017
- Abort signal.
1151
+ * Callback that is called when all steps are finished and the response is complete.
1018
1152
  */
1019
- abortSignal?: AbortSignal;
1153
+ onFinish?: GenerateTextOnFinishCallback<NoInfer<TOOLS>>;
1020
1154
  /**
1021
- Timeout in milliseconds. The call will be aborted if it takes longer
1022
- than the specified timeout. Can be used alongside abortSignal.
1023
-
1024
- Can be specified as a number (milliseconds) or as an object with `totalMs`.
1155
+ * Context that is passed into tool execution.
1156
+ *
1157
+ * Experimental (can break in patch releases).
1158
+ *
1159
+ * @default undefined
1025
1160
  */
1026
- timeout?: TimeoutConfiguration;
1161
+ experimental_context?: unknown;
1027
1162
  /**
1028
- Additional HTTP headers to be sent with the request.
1029
- Only applicable for HTTP-based providers.
1163
+ * Internal. For test use only. May change without notice.
1030
1164
  */
1031
- headers?: Record<string, string | undefined>;
1032
- };
1165
+ _internal?: {
1166
+ generateId?: IdGenerator;
1167
+ };
1168
+ }): Promise<GenerateTextResult<TOOLS, OUTPUT>>;
1033
1169
 
1034
1170
  /**
1035
- Prompt part of the AI function options.
1036
- It contains a system message, a simple text prompt, or a list of messages.
1171
+ * Infers the complete output type from the output specification.
1037
1172
  */
1038
- type Prompt = {
1039
- /**
1040
- System message to include in the prompt. Can be used with `prompt` or `messages`.
1041
- */
1042
- system?: string | SystemModelMessage | Array<SystemModelMessage>;
1043
- } & ({
1044
- /**
1045
- A prompt. It can be either a text prompt or a list of messages.
1046
-
1047
- You can either use `prompt` or `messages` but not both.
1048
- */
1049
- prompt: string | Array<ModelMessage>;
1050
- /**
1051
- A list of messages.
1052
-
1053
- You can either use `prompt` or `messages` but not both.
1173
+ type InferCompleteOutput<OUTPUT extends Output> = OUTPUT extends Output<infer COMPLETE_OUTPUT, any, any> ? COMPLETE_OUTPUT : never;
1174
+ /**
1175
+ * Infers the partial output type from the output specification.
1054
1176
  */
1055
- messages?: never;
1056
- } | {
1057
- /**
1058
- A list of messages.
1059
-
1060
- You can either use `prompt` or `messages` but not both.
1177
+ type InferPartialOutput<OUTPUT extends Output> = OUTPUT extends Output<any, infer PARTIAL_OUTPUT, any> ? PARTIAL_OUTPUT : never;
1178
+ /**
1179
+ * Infers the element type from an array output specification.
1061
1180
  */
1062
- messages: Array<ModelMessage>;
1063
- /**
1064
- A prompt. It can be either a text prompt or a list of messages.
1065
-
1066
- You can either use `prompt` or `messages` but not both.
1067
- */
1068
- prompt?: never;
1069
- });
1181
+ type InferElementOutput<OUTPUT extends Output> = OUTPUT extends Output<any, any, infer ELEMENT> ? ELEMENT : never;
1070
1182
 
1071
1183
  /**
1072
- * Telemetry configuration.
1184
+ * Prunes model messages from a list of model messages.
1185
+ *
1186
+ * @param messages - The list of model messages to prune.
1187
+ * @param reasoning - How to remove reasoning content from assistant messages. Default is `'none'`.
1188
+ * @param toolCalls - How to prune tool call/results/approval content. Default is `[]`.
1189
+ * @param emptyMessages - Whether to keep or remove messages whose content is empty after pruning. Default is `'remove'`.
1190
+ *
1191
+ * @returns The pruned list of model messages.
1073
1192
  */
1074
- type TelemetrySettings = {
1075
- /**
1076
- * Enable or disable telemetry. Disabled by default while experimental.
1077
- */
1078
- isEnabled?: boolean;
1079
- /**
1080
- * Enable or disable input recording. Enabled by default.
1081
- *
1082
- * You might want to disable input recording to avoid recording sensitive
1083
- * information, to reduce data transfers, or to increase performance.
1084
- */
1085
- recordInputs?: boolean;
1086
- /**
1087
- * Enable or disable output recording. Enabled by default.
1088
- *
1089
- * You might want to disable output recording to avoid recording sensitive
1090
- * information, to reduce data transfers, or to increase performance.
1091
- */
1092
- recordOutputs?: boolean;
1093
- /**
1094
- * Identifier for this function. Used to group telemetry data by function.
1095
- */
1096
- functionId?: string;
1097
- /**
1098
- * Additional information to include in the telemetry data.
1099
- */
1100
- metadata?: Record<string, AttributeValue>;
1101
- /**
1102
- * A custom tracer to use for the telemetry data.
1103
- */
1104
- tracer?: Tracer;
1105
- };
1193
+ declare function pruneMessages({ messages, reasoning, toolCalls, emptyMessages, }: {
1194
+ messages: ModelMessage[];
1195
+ reasoning?: 'all' | 'before-last-message' | 'none';
1196
+ toolCalls?: 'all' | 'before-last-message' | `before-last-${number}-messages` | 'none' | Array<{
1197
+ type: 'all' | 'before-last-message' | `before-last-${number}-messages`;
1198
+ tools?: string[];
1199
+ }>;
1200
+ emptyMessages?: 'keep' | 'remove';
1201
+ }): ModelMessage[];
1106
1202
 
1107
1203
  /**
1108
- * Experimental. Can change in patch versions without warning.
1109
- *
1110
- * Download function. Called with the array of URLs and a boolean indicating
1111
- * whether the URL is supported by the model.
1112
- *
1113
- * The download function can decide for each URL:
1114
- * - to return null (which means that the URL should be passed to the model)
1115
- * - to download the asset and return the data (incl. retries, authentication, etc.)
1204
+ * Detects the first chunk in a buffer.
1116
1205
  *
1117
- * Should throw DownloadError if the download fails.
1206
+ * @param buffer - The buffer to detect the first chunk in.
1118
1207
  *
1119
- * Should return an array of objects sorted by the order of the requested downloads.
1120
- * For each object, the data should be a Uint8Array if the URL was downloaded.
1121
- * For each object, the mediaType should be the media type of the downloaded asset.
1122
- * For each object, the data should be null if the URL should be passed through as is.
1208
+ * @returns The first detected chunk, or `undefined` if no chunk was detected.
1123
1209
  */
1124
- type DownloadFunction = (options: Array<{
1125
- url: URL;
1126
- isUrlSupportedByModel: boolean;
1127
- }>) => PromiseLike<Array<{
1128
- data: Uint8Array;
1129
- mediaType: string | undefined;
1130
- } | null>>;
1131
-
1210
+ type ChunkDetector = (buffer: string) => string | undefined | null;
1132
1211
  /**
1133
- * Function that you can use to provide different settings for a step.
1212
+ * Smooths text and reasoning streaming output.
1134
1213
  *
1135
- * @param options - The options for the step.
1136
- * @param options.steps - The steps that have been executed so far.
1137
- * @param options.stepNumber - The number of the step that is being executed.
1138
- * @param options.model - The model that is being used.
1139
- * @param options.messages - The messages that will be sent to the model for the current step.
1140
- * @param options.experimental_context - The context passed via the experimental_context setting (experimental).
1214
+ * @param delayInMs - The delay in milliseconds between each chunk. Defaults to 10ms. Can be set to `null` to skip the delay.
1215
+ * @param chunking - Controls how the text is chunked for streaming. Use "word" to stream word by word (default), "line" to stream line by line, provide a custom RegExp pattern for custom chunking, provide an Intl.Segmenter for locale-aware word segmentation (recommended for CJK languages), or provide a custom ChunkDetector function.
1141
1216
  *
1142
- * @returns An object that contains the settings for the step.
1143
- * If you return undefined (or for undefined settings), the settings from the outer level will be used.
1217
+ * @returns A transform stream that smooths text streaming output.
1144
1218
  */
1145
- type PrepareStepFunction<TOOLS extends Record<string, Tool> = Record<string, Tool>> = (options: {
1219
+ declare function smoothStream<TOOLS extends ToolSet>({ delayInMs, chunking, _internal: { delay }, }?: {
1220
+ delayInMs?: number | null;
1221
+ chunking?: 'word' | 'line' | RegExp | ChunkDetector | Intl.Segmenter;
1146
1222
  /**
1147
- * The steps that have been executed so far.
1223
+ * Internal. For test use only. May change without notice.
1148
1224
  */
1149
- steps: Array<StepResult<NoInfer<TOOLS>>>;
1225
+ _internal?: {
1226
+ delay?: (delayInMs: number | null) => Promise<void>;
1227
+ };
1228
+ }): (options: {
1229
+ tools: TOOLS;
1230
+ }) => TransformStream<TextStreamPart<TOOLS>, TextStreamPart<TOOLS>>;
1231
+
1232
+ /**
1233
+ * Tool output when the tool execution has been denied (for static tools).
1234
+ */
1235
+ type StaticToolOutputDenied<TOOLS extends ToolSet> = ValueOf<{
1236
+ [NAME in keyof TOOLS]: {
1237
+ type: 'tool-output-denied';
1238
+ toolCallId: string;
1239
+ toolName: NAME & string;
1240
+ providerExecuted?: boolean;
1241
+ dynamic?: false | undefined;
1242
+ };
1243
+ }>;
1244
+ /**
1245
+ * Tool output when the tool execution has been denied.
1246
+ */
1247
+ type TypedToolOutputDenied<TOOLS extends ToolSet> = StaticToolOutputDenied<TOOLS>;
1248
+
1249
+ /**
1250
+ The data types that can be used in the UI message for the UI message data parts.
1251
+ */
1252
+ type UIDataTypes = Record<string, unknown>;
1253
+ type UITool = {
1254
+ input: unknown;
1255
+ output: unknown | undefined;
1256
+ };
1257
+ /**
1258
+ * Infer the input and output types of a tool so it can be used as a UI tool.
1259
+ */
1260
+ type InferUITool<TOOL extends Tool> = {
1261
+ input: InferToolInput<TOOL>;
1262
+ output: InferToolOutput<TOOL>;
1263
+ };
1264
+ /**
1265
+ * Infer the input and output types of a tool set so it can be used as a UI tool set.
1266
+ */
1267
+ type InferUITools<TOOLS extends ToolSet> = {
1268
+ [NAME in keyof TOOLS & string]: InferUITool<TOOLS[NAME]>;
1269
+ };
1270
+ type UITools = Record<string, UITool>;
1271
+ /**
1272
+ AI SDK UI Messages. They are used in the client and to communicate between the frontend and the API routes.
1273
+ */
1274
+ interface UIMessage<METADATA = unknown, DATA_PARTS extends UIDataTypes = UIDataTypes, TOOLS extends UITools = UITools> {
1150
1275
  /**
1151
- * The number of the step that is being executed.
1276
+ A unique identifier for the message.
1152
1277
  */
1153
- stepNumber: number;
1278
+ id: string;
1154
1279
  /**
1155
- * The model instance that is being used for this step.
1280
+ The role of the message.
1156
1281
  */
1157
- model: LanguageModel;
1282
+ role: 'system' | 'user' | 'assistant';
1158
1283
  /**
1159
- * The messages that will be sent to the model for the current step.
1284
+ The metadata of the message.
1160
1285
  */
1161
- messages: Array<ModelMessage>;
1286
+ metadata?: METADATA;
1162
1287
  /**
1163
- * The context passed via the experimental_context setting (experimental).
1288
+ The parts of the message. Use this for rendering the message in the UI.
1289
+
1290
+ System messages should be avoided (set the system prompt on the server instead).
1291
+ They can have text parts.
1292
+
1293
+ User messages can have text parts and file parts.
1294
+
1295
+ Assistant messages can have text, reasoning, tool invocation, and file parts.
1164
1296
  */
1165
- experimental_context: unknown;
1166
- }) => PromiseLike<PrepareStepResult<TOOLS>> | PrepareStepResult<TOOLS>;
1297
+ parts: Array<UIMessagePart<DATA_PARTS, TOOLS>>;
1298
+ }
1299
+ type UIMessagePart<DATA_TYPES extends UIDataTypes, TOOLS extends UITools> = TextUIPart | ReasoningUIPart | ToolUIPart<TOOLS> | DynamicToolUIPart | SourceUrlUIPart | SourceDocumentUIPart | FileUIPart | DataUIPart<DATA_TYPES> | StepStartUIPart;
1167
1300
  /**
1168
- * The result type returned by a {@link PrepareStepFunction},
1169
- * allowing per-step overrides of model, tools, or messages.
1301
+ * A text part of a message.
1170
1302
  */
1171
- type PrepareStepResult<TOOLS extends Record<string, Tool> = Record<string, Tool>> = {
1303
+ type TextUIPart = {
1304
+ type: 'text';
1172
1305
  /**
1173
- * Optionally override which LanguageModel instance is used for this step.
1306
+ * The text content.
1174
1307
  */
1175
- model?: LanguageModel;
1308
+ text: string;
1176
1309
  /**
1177
- * Optionally set which tool the model must call, or provide tool call configuration
1178
- * for this step.
1310
+ * The state of the text part.
1179
1311
  */
1180
- toolChoice?: ToolChoice<NoInfer<TOOLS>>;
1312
+ state?: 'streaming' | 'done';
1181
1313
  /**
1182
- * If provided, only these tools are enabled/available for this step.
1314
+ * The provider metadata.
1183
1315
  */
1184
- activeTools?: Array<keyof NoInfer<TOOLS>>;
1316
+ providerMetadata?: ProviderMetadata;
1317
+ };
1318
+ /**
1319
+ * A reasoning part of a message.
1320
+ */
1321
+ type ReasoningUIPart = {
1322
+ type: 'reasoning';
1185
1323
  /**
1186
- * Optionally override the system message(s) sent to the model for this step.
1324
+ * The reasoning text.
1187
1325
  */
1188
- system?: string | SystemModelMessage | Array<SystemModelMessage>;
1326
+ text: string;
1189
1327
  /**
1190
- * Optionally override the full set of messages sent to the model
1191
- * for this step.
1328
+ * The state of the reasoning part.
1192
1329
  */
1193
- messages?: Array<ModelMessage>;
1330
+ state?: 'streaming' | 'done';
1194
1331
  /**
1195
- * Context that is passed into tool execution. Experimental.
1196
- *
1197
- * Changing the context will affect the context in this step
1198
- * and all subsequent steps.
1332
+ * The provider metadata.
1199
1333
  */
1200
- experimental_context?: unknown;
1334
+ providerMetadata?: ProviderMetadata;
1335
+ };
1336
+ /**
1337
+ * A source part of a message.
1338
+ */
1339
+ type SourceUrlUIPart = {
1340
+ type: 'source-url';
1341
+ sourceId: string;
1342
+ url: string;
1343
+ title?: string;
1344
+ providerMetadata?: ProviderMetadata;
1345
+ };
1346
+ /**
1347
+ * A document source part of a message.
1348
+ */
1349
+ type SourceDocumentUIPart = {
1350
+ type: 'source-document';
1351
+ sourceId: string;
1352
+ mediaType: string;
1353
+ title: string;
1354
+ filename?: string;
1355
+ providerMetadata?: ProviderMetadata;
1356
+ };
1357
+ /**
1358
+ * A file part of a message.
1359
+ */
1360
+ type FileUIPart = {
1361
+ type: 'file';
1201
1362
  /**
1202
- * Additional provider-specific options for this step.
1363
+ * IANA media type of the file.
1203
1364
  *
1204
- * Can be used to pass provider-specific configuration such as
1205
- * container IDs for Anthropic's code execution.
1365
+ * @see https://www.iana.org/assignments/media-types/media-types.xhtml
1206
1366
  */
1207
- providerOptions?: ProviderOptions;
1208
- } | undefined;
1209
-
1210
- type StopCondition<TOOLS extends ToolSet> = (options: {
1211
- steps: Array<StepResult<TOOLS>>;
1212
- }) => PromiseLike<boolean> | boolean;
1213
- declare function stepCountIs(stepCount: number): StopCondition<any>;
1214
- declare function hasToolCall(toolName: string): StopCondition<any>;
1215
-
1216
- declare const symbol$e: unique symbol;
1217
- declare class InvalidToolInputError extends AISDKError {
1218
- private readonly [symbol$e];
1219
- readonly toolName: string;
1220
- readonly toolInput: string;
1221
- constructor({ toolInput, toolName, cause, message, }: {
1222
- message?: string;
1223
- toolInput: string;
1224
- toolName: string;
1225
- cause: unknown;
1226
- });
1227
- static isInstance(error: unknown): error is InvalidToolInputError;
1228
- }
1229
-
1230
- declare const symbol$d: unique symbol;
1231
- declare class NoSuchToolError extends AISDKError {
1232
- private readonly [symbol$d];
1233
- readonly toolName: string;
1234
- readonly availableTools: string[] | undefined;
1235
- constructor({ toolName, availableTools, message, }: {
1236
- toolName: string;
1237
- availableTools?: string[] | undefined;
1238
- message?: string;
1239
- });
1240
- static isInstance(error: unknown): error is NoSuchToolError;
1241
- }
1242
-
1243
- declare const systemModelMessageSchema: z.ZodType<SystemModelMessage>;
1244
- declare const userModelMessageSchema: z.ZodType<UserModelMessage>;
1245
- declare const assistantModelMessageSchema: z.ZodType<AssistantModelMessage>;
1246
- declare const toolModelMessageSchema: z.ZodType<ToolModelMessage>;
1247
- declare const modelMessageSchema: z.ZodType<ModelMessage>;
1248
-
1249
- /**
1250
- * A function that attempts to repair a tool call that failed to parse.
1251
- *
1252
- * It receives the error and the context as arguments and returns the repair
1253
- * tool call JSON as text.
1254
- *
1255
- * @param options.system - The system prompt.
1256
- * @param options.messages - The messages in the current generation step.
1257
- * @param options.toolCall - The tool call that failed to parse.
1258
- * @param options.tools - The tools that are available.
1259
- * @param options.inputSchema - A function that returns the JSON Schema for a tool.
1260
- * @param options.error - The error that occurred while parsing the tool call.
1261
- */
1262
- type ToolCallRepairFunction<TOOLS extends ToolSet> = (options: {
1263
- system: string | SystemModelMessage | Array<SystemModelMessage> | undefined;
1264
- messages: ModelMessage[];
1265
- toolCall: LanguageModelV3ToolCall;
1266
- tools: TOOLS;
1267
- inputSchema: (options: {
1268
- toolName: string;
1269
- }) => PromiseLike<JSONSchema7>;
1270
- error: NoSuchToolError | InvalidToolInputError;
1271
- }) => Promise<LanguageModelV3ToolCall | null>;
1272
-
1273
- /**
1274
- Callback that is set using the `onStepFinish` option.
1275
-
1276
- @param stepResult - The result of the step.
1277
- */
1278
- type GenerateTextOnStepFinishCallback<TOOLS extends ToolSet> = (stepResult: StepResult<TOOLS>) => Promise<void> | void;
1279
- /**
1280
- Callback that is set using the `onFinish` option.
1281
-
1282
- @param event - The event that is passed to the callback.
1283
- */
1284
- type GenerateTextOnFinishCallback<TOOLS extends ToolSet> = (event: StepResult<TOOLS> & {
1367
+ mediaType: string;
1285
1368
  /**
1286
- * Details for all steps.
1369
+ * Optional filename of the file.
1287
1370
  */
1288
- readonly steps: StepResult<TOOLS>[];
1371
+ filename?: string;
1289
1372
  /**
1290
- * Total usage for all steps. This is the sum of the usage of all steps.
1373
+ * The URL of the file.
1374
+ * It can either be a URL to a hosted file or a [Data URL](https://developer.mozilla.org/en-US/docs/Web/HTTP/Basics_of_HTTP/Data_URLs).
1291
1375
  */
1292
- readonly totalUsage: LanguageModelUsage;
1376
+ url: string;
1293
1377
  /**
1294
- * Context that is passed into tool execution.
1295
- *
1296
- * Experimental (can break in patch releases).
1297
- *
1298
- * @default undefined
1378
+ * The provider metadata.
1299
1379
  */
1300
- experimental_context: unknown;
1301
- }) => PromiseLike<void> | void;
1380
+ providerMetadata?: ProviderMetadata;
1381
+ };
1302
1382
  /**
1303
- Generate a text and call tools for a given prompt using a language model.
1304
-
1305
- This function does not stream the output. If you want to stream the output, use `streamText` instead.
1306
-
1307
- @param model - The language model to use.
1308
-
1309
- @param tools - Tools that are accessible to and can be called by the model. The model needs to support calling tools.
1310
- @param toolChoice - The tool choice strategy. Default: 'auto'.
1311
-
1312
- @param system - A system message that will be part of the prompt.
1313
- @param prompt - A simple text prompt. You can either use `prompt` or `messages` but not both.
1314
- @param messages - A list of messages. You can either use `prompt` or `messages` but not both.
1315
-
1316
- @param maxOutputTokens - Maximum number of tokens to generate.
1317
- @param temperature - Temperature setting.
1318
- The value is passed through to the provider. The range depends on the provider and model.
1319
- It is recommended to set either `temperature` or `topP`, but not both.
1320
- @param topP - Nucleus sampling.
1321
- The value is passed through to the provider. The range depends on the provider and model.
1322
- It is recommended to set either `temperature` or `topP`, but not both.
1323
- @param topK - Only sample from the top K options for each subsequent token.
1324
- Used to remove "long tail" low probability responses.
1325
- Recommended for advanced use cases only. You usually only need to use temperature.
1326
- @param presencePenalty - Presence penalty setting.
1327
- It affects the likelihood of the model to repeat information that is already in the prompt.
1328
- The value is passed through to the provider. The range depends on the provider and model.
1329
- @param frequencyPenalty - Frequency penalty setting.
1330
- It affects the likelihood of the model to repeatedly use the same words or phrases.
1331
- The value is passed through to the provider. The range depends on the provider and model.
1332
- @param stopSequences - Stop sequences.
1333
- If set, the model will stop generating text when one of the stop sequences is generated.
1334
- @param seed - The seed (integer) to use for random sampling.
1335
- If set and supported by the model, calls will generate deterministic results.
1336
-
1337
- @param maxRetries - Maximum number of retries. Set to 0 to disable retries. Default: 2.
1338
- @param abortSignal - An optional abort signal that can be used to cancel the call.
1339
- @param timeout - An optional timeout in milliseconds. The call will be aborted if it takes longer than the specified timeout.
1340
- @param headers - Additional HTTP headers to be sent with the request. Only applicable for HTTP-based providers.
1341
-
1342
- @param experimental_generateMessageId - Generate a unique ID for each message.
1343
-
1344
- @param onStepFinish - Callback that is called when each step (LLM call) is finished, including intermediate steps.
1345
- @param onFinish - Callback that is called when all steps are finished and the response is complete.
1346
-
1347
- @returns
1348
- A result object that contains the generated text, the results of the tool calls, and additional information.
1383
+ * A step boundary part of a message.
1349
1384
  */
1350
- declare function generateText<TOOLS extends ToolSet, OUTPUT extends Output = Output<string, string>>({ model: modelArg, tools, toolChoice, system, prompt, messages, maxRetries: maxRetriesArg, abortSignal, timeout, headers, stopWhen, experimental_output, output, experimental_telemetry: telemetry, providerOptions, experimental_activeTools, activeTools, experimental_prepareStep, prepareStep, experimental_repairToolCall: repairToolCall, experimental_download: download, experimental_context, _internal: { generateId }, onStepFinish, onFinish, ...settings }: CallSettings & Prompt & {
1351
- /**
1352
- The language model to use.
1353
- */
1354
- model: LanguageModel;
1355
- /**
1356
- The tools that the model can call. The model needs to support calling tools.
1357
- */
1358
- tools?: TOOLS;
1359
- /**
1360
- The tool choice strategy. Default: 'auto'.
1361
- */
1362
- toolChoice?: ToolChoice<NoInfer<TOOLS>>;
1385
+ type StepStartUIPart = {
1386
+ type: 'step-start';
1387
+ };
1388
+ type DataUIPart<DATA_TYPES extends UIDataTypes> = ValueOf<{
1389
+ [NAME in keyof DATA_TYPES & string]: {
1390
+ type: `data-${NAME}`;
1391
+ id?: string;
1392
+ data: DATA_TYPES[NAME];
1393
+ };
1394
+ }>;
1395
+ type asUITool<TOOL extends UITool | Tool> = TOOL extends Tool ? InferUITool<TOOL> : TOOL;
1396
+ /**
1397
+ * Check if a message part is a data part.
1398
+ */
1399
+ declare function isDataUIPart<DATA_TYPES extends UIDataTypes>(part: UIMessagePart<DATA_TYPES, UITools>): part is DataUIPart<DATA_TYPES>;
1400
+ /**
1401
+ * A UI tool invocation contains all the information needed to render a tool invocation in the UI.
1402
+ * It can be derived from a tool without knowing the tool name, and can be used to define
1403
+ * UI components for the tool.
1404
+ */
1405
+ type UIToolInvocation<TOOL extends UITool | Tool> = {
1363
1406
  /**
1364
- Condition for stopping the generation when there are tool results in the last step.
1365
- When the condition is an array, any of the conditions can be met to stop the generation.
1366
-
1367
- @default stepCountIs(1)
1407
+ * ID of the tool call.
1368
1408
  */
1369
- stopWhen?: StopCondition<NoInfer<TOOLS>> | Array<StopCondition<NoInfer<TOOLS>>>;
1409
+ toolCallId: string;
1410
+ title?: string;
1370
1411
  /**
1371
- Optional telemetry configuration (experimental).
1412
+ * Whether the tool call was executed by the provider.
1372
1413
  */
1373
- experimental_telemetry?: TelemetrySettings;
1374
- /**
1375
- Additional provider-specific options. They are passed through
1376
- to the provider from the AI SDK and enable provider-specific
1377
- functionality that can be fully encapsulated in the provider.
1378
- */
1379
- providerOptions?: ProviderOptions;
1414
+ providerExecuted?: boolean;
1415
+ } & ({
1416
+ state: 'input-streaming';
1417
+ input: DeepPartial<asUITool<TOOL>['input']> | undefined;
1418
+ output?: never;
1419
+ errorText?: never;
1420
+ approval?: never;
1421
+ } | {
1422
+ state: 'input-available';
1423
+ input: asUITool<TOOL>['input'];
1424
+ output?: never;
1425
+ errorText?: never;
1426
+ callProviderMetadata?: ProviderMetadata;
1427
+ approval?: never;
1428
+ } | {
1429
+ state: 'approval-requested';
1430
+ input: asUITool<TOOL>['input'];
1431
+ output?: never;
1432
+ errorText?: never;
1433
+ callProviderMetadata?: ProviderMetadata;
1434
+ approval: {
1435
+ id: string;
1436
+ approved?: never;
1437
+ reason?: never;
1438
+ };
1439
+ } | {
1440
+ state: 'approval-responded';
1441
+ input: asUITool<TOOL>['input'];
1442
+ output?: never;
1443
+ errorText?: never;
1444
+ callProviderMetadata?: ProviderMetadata;
1445
+ approval: {
1446
+ id: string;
1447
+ approved: boolean;
1448
+ reason?: string;
1449
+ };
1450
+ } | {
1451
+ state: 'output-available';
1452
+ input: asUITool<TOOL>['input'];
1453
+ output: asUITool<TOOL>['output'];
1454
+ errorText?: never;
1455
+ callProviderMetadata?: ProviderMetadata;
1456
+ preliminary?: boolean;
1457
+ approval?: {
1458
+ id: string;
1459
+ approved: true;
1460
+ reason?: string;
1461
+ };
1462
+ } | {
1463
+ state: 'output-error';
1464
+ input: asUITool<TOOL>['input'] | undefined;
1465
+ rawInput?: unknown;
1466
+ output?: never;
1467
+ errorText: string;
1468
+ callProviderMetadata?: ProviderMetadata;
1469
+ approval?: {
1470
+ id: string;
1471
+ approved: true;
1472
+ reason?: string;
1473
+ };
1474
+ } | {
1475
+ state: 'output-denied';
1476
+ input: asUITool<TOOL>['input'];
1477
+ output?: never;
1478
+ errorText?: never;
1479
+ callProviderMetadata?: ProviderMetadata;
1480
+ approval: {
1481
+ id: string;
1482
+ approved: false;
1483
+ reason?: string;
1484
+ };
1485
+ });
1486
+ type ToolUIPart<TOOLS extends UITools = UITools> = ValueOf<{
1487
+ [NAME in keyof TOOLS & string]: {
1488
+ type: `tool-${NAME}`;
1489
+ } & UIToolInvocation<TOOLS[NAME]>;
1490
+ }>;
1491
+ type DynamicToolUIPart = {
1492
+ type: 'dynamic-tool';
1380
1493
  /**
1381
- * @deprecated Use `activeTools` instead.
1494
+ * Name of the tool that is being called.
1382
1495
  */
1383
- experimental_activeTools?: Array<keyof NoInfer<TOOLS>>;
1496
+ toolName: string;
1384
1497
  /**
1385
- Limits the tools that are available for the model to call without
1386
- changing the tool call and result types in the result.
1498
+ * ID of the tool call.
1387
1499
  */
1388
- activeTools?: Array<keyof NoInfer<TOOLS>>;
1500
+ toolCallId: string;
1501
+ title?: string;
1389
1502
  /**
1390
- Optional specification for parsing structured outputs from the LLM response.
1503
+ * Whether the tool call was executed by the provider.
1391
1504
  */
1392
- output?: OUTPUT;
1393
- /**
1394
- Optional specification for parsing structured outputs from the LLM response.
1395
-
1396
- @deprecated Use `output` instead.
1397
- */
1398
- experimental_output?: OUTPUT;
1399
- /**
1400
- Custom download function to use for URLs.
1401
-
1402
- By default, files are downloaded if the model does not support the URL for the given media type.
1403
- */
1404
- experimental_download?: DownloadFunction | undefined;
1405
- /**
1406
- * @deprecated Use `prepareStep` instead.
1407
- */
1408
- experimental_prepareStep?: PrepareStepFunction<NoInfer<TOOLS>>;
1409
- /**
1410
- Optional function that you can use to provide different settings for a step.
1411
- */
1412
- prepareStep?: PrepareStepFunction<NoInfer<TOOLS>>;
1413
- /**
1414
- A function that attempts to repair a tool call that failed to parse.
1415
- */
1416
- experimental_repairToolCall?: ToolCallRepairFunction<NoInfer<TOOLS>>;
1417
- /**
1418
- * Callback that is called when each step (LLM call) is finished, including intermediate steps.
1419
- */
1420
- onStepFinish?: GenerateTextOnStepFinishCallback<NoInfer<TOOLS>>;
1421
- /**
1422
- * Callback that is called when all steps are finished and the response is complete.
1423
- */
1424
- onFinish?: GenerateTextOnFinishCallback<NoInfer<TOOLS>>;
1425
- /**
1426
- * Context that is passed into tool execution.
1427
- *
1428
- * Experimental (can break in patch releases).
1429
- *
1430
- * @default undefined
1431
- */
1432
- experimental_context?: unknown;
1433
- /**
1434
- * Internal. For test use only. May change without notice.
1435
- */
1436
- _internal?: {
1437
- generateId?: IdGenerator;
1505
+ providerExecuted?: boolean;
1506
+ } & ({
1507
+ state: 'input-streaming';
1508
+ input: unknown | undefined;
1509
+ output?: never;
1510
+ errorText?: never;
1511
+ approval?: never;
1512
+ } | {
1513
+ state: 'input-available';
1514
+ input: unknown;
1515
+ output?: never;
1516
+ errorText?: never;
1517
+ callProviderMetadata?: ProviderMetadata;
1518
+ approval?: never;
1519
+ } | {
1520
+ state: 'approval-requested';
1521
+ input: unknown;
1522
+ output?: never;
1523
+ errorText?: never;
1524
+ callProviderMetadata?: ProviderMetadata;
1525
+ approval: {
1526
+ id: string;
1527
+ approved?: never;
1528
+ reason?: never;
1438
1529
  };
1439
- }): Promise<GenerateTextResult<TOOLS, OUTPUT>>;
1440
-
1530
+ } | {
1531
+ state: 'approval-responded';
1532
+ input: unknown;
1533
+ output?: never;
1534
+ errorText?: never;
1535
+ callProviderMetadata?: ProviderMetadata;
1536
+ approval: {
1537
+ id: string;
1538
+ approved: boolean;
1539
+ reason?: string;
1540
+ };
1541
+ } | {
1542
+ state: 'output-available';
1543
+ input: unknown;
1544
+ output: unknown;
1545
+ errorText?: never;
1546
+ callProviderMetadata?: ProviderMetadata;
1547
+ preliminary?: boolean;
1548
+ approval?: {
1549
+ id: string;
1550
+ approved: true;
1551
+ reason?: string;
1552
+ };
1553
+ } | {
1554
+ state: 'output-error';
1555
+ input: unknown;
1556
+ output?: never;
1557
+ errorText: string;
1558
+ callProviderMetadata?: ProviderMetadata;
1559
+ approval?: {
1560
+ id: string;
1561
+ approved: true;
1562
+ reason?: string;
1563
+ };
1564
+ } | {
1565
+ state: 'output-denied';
1566
+ input: unknown;
1567
+ output?: never;
1568
+ errorText?: never;
1569
+ callProviderMetadata?: ProviderMetadata;
1570
+ approval: {
1571
+ id: string;
1572
+ approved: false;
1573
+ reason?: string;
1574
+ };
1575
+ });
1441
1576
  /**
1442
- * Prunes model messages from a list of model messages.
1443
- *
1444
- * @param messages - The list of model messages to prune.
1445
- * @param reasoning - How to remove reasoning content from assistant messages. Default is `'none'`.
1446
- * @param toolCalls - How to prune tool call/results/approval content. Default is `[]`.
1447
- * @param emptyMessages - Whether to keep or remove messages whose content is empty after pruning. Default is `'remove'`.
1448
- *
1449
- * @returns The pruned list of model messages.
1577
+ * Type guard to check if a message part is a text part.
1450
1578
  */
1451
- declare function pruneMessages({ messages, reasoning, toolCalls, emptyMessages, }: {
1452
- messages: ModelMessage[];
1453
- reasoning?: 'all' | 'before-last-message' | 'none';
1454
- toolCalls?: 'all' | 'before-last-message' | `before-last-${number}-messages` | 'none' | Array<{
1455
- type: 'all' | 'before-last-message' | `before-last-${number}-messages`;
1456
- tools?: string[];
1457
- }>;
1458
- emptyMessages?: 'keep' | 'remove';
1459
- }): ModelMessage[];
1460
-
1579
+ declare function isTextUIPart(part: UIMessagePart<UIDataTypes, UITools>): part is TextUIPart;
1461
1580
  /**
1462
- * Detects the first chunk in a buffer.
1463
- *
1464
- * @param buffer - The buffer to detect the first chunk in.
1465
- *
1466
- * @returns The first detected chunk, or `undefined` if no chunk was detected.
1581
+ * Type guard to check if a message part is a file part.
1467
1582
  */
1468
- type ChunkDetector = (buffer: string) => string | undefined | null;
1583
+ declare function isFileUIPart(part: UIMessagePart<UIDataTypes, UITools>): part is FileUIPart;
1469
1584
  /**
1470
- * Smooths text and reasoning streaming output.
1471
- *
1472
- * @param delayInMs - The delay in milliseconds between each chunk. Defaults to 10ms. Can be set to `null` to skip the delay.
1473
- * @param chunking - Controls how the text is chunked for streaming. Use "word" to stream word by word (default), "line" to stream line by line, provide a custom RegExp pattern for custom chunking, provide an Intl.Segmenter for locale-aware word segmentation (recommended for CJK languages), or provide a custom ChunkDetector function.
1474
- *
1475
- * @returns A transform stream that smooths text streaming output.
1585
+ * Type guard to check if a message part is a reasoning part.
1476
1586
  */
1477
- declare function smoothStream<TOOLS extends ToolSet>({ delayInMs, chunking, _internal: { delay }, }?: {
1478
- delayInMs?: number | null;
1479
- chunking?: 'word' | 'line' | RegExp | ChunkDetector | Intl.Segmenter;
1480
- /**
1481
- * Internal. For test use only. May change without notice.
1482
- */
1483
- _internal?: {
1484
- delay?: (delayInMs: number | null) => Promise<void>;
1485
- };
1486
- }): (options: {
1487
- tools: TOOLS;
1488
- }) => TransformStream<TextStreamPart<TOOLS>, TextStreamPart<TOOLS>>;
1489
-
1587
+ declare function isReasoningUIPart(part: UIMessagePart<UIDataTypes, UITools>): part is ReasoningUIPart;
1490
1588
  /**
1491
- * Tool output when the tool execution has been denied (for static tools).
1589
+ * Check if a message part is a static tool part.
1590
+ *
1591
+ * Static tools are tools for which the types are known at development time.
1492
1592
  */
1493
- type StaticToolOutputDenied<TOOLS extends ToolSet> = ValueOf<{
1494
- [NAME in keyof TOOLS]: {
1495
- type: 'tool-output-denied';
1496
- toolCallId: string;
1497
- toolName: NAME & string;
1498
- providerExecuted?: boolean;
1499
- dynamic?: false | undefined;
1500
- };
1501
- }>;
1593
+ declare function isStaticToolUIPart<TOOLS extends UITools>(part: UIMessagePart<UIDataTypes, TOOLS>): part is ToolUIPart<TOOLS>;
1502
1594
  /**
1503
- * Tool output when the tool execution has been denied.
1595
+ * Check if a message part is a tool part.
1596
+ *
1597
+ * Tool parts are either static or dynamic tools.
1598
+ *
1599
+ * Use `isStaticToolUIPart` or `isDynamicToolUIPart` to check the type of the tool.
1504
1600
  */
1505
- type TypedToolOutputDenied<TOOLS extends ToolSet> = StaticToolOutputDenied<TOOLS>;
1506
-
1601
+ declare function isToolUIPart<TOOLS extends UITools>(part: UIMessagePart<UIDataTypes, TOOLS>): part is ToolUIPart<TOOLS> | DynamicToolUIPart;
1507
1602
  /**
1508
- The data types that can be used in the UI message for the UI message data parts.
1603
+ * @deprecated Use isToolUIPart instead.
1509
1604
  */
1510
- type UIDataTypes = Record<string, unknown>;
1511
- type UITool = {
1512
- input: unknown;
1513
- output: unknown | undefined;
1514
- };
1605
+ declare const isToolOrDynamicToolUIPart: typeof isToolUIPart;
1515
1606
  /**
1516
- * Infer the input and output types of a tool so it can be used as a UI tool.
1607
+ * Returns the name of the static tool.
1608
+ *
1609
+ * The possible values are the keys of the tool set.
1517
1610
  */
1518
- type InferUITool<TOOL extends Tool> = {
1519
- input: InferToolInput<TOOL>;
1520
- output: InferToolOutput<TOOL>;
1521
- };
1611
+ declare function getStaticToolName<TOOLS extends UITools>(part: ToolUIPart<TOOLS>): keyof TOOLS;
1522
1612
  /**
1523
- * Infer the input and output types of a tool set so it can be used as a UI tool set.
1613
+ * Returns the name of the tool (static or dynamic).
1614
+ *
1615
+ * This function will not restrict the name to the keys of the tool set.
1616
+ * If you need to restrict the name to the keys of the tool set, use `getStaticToolName` instead.
1524
1617
  */
1525
- type InferUITools<TOOLS extends ToolSet> = {
1526
- [NAME in keyof TOOLS & string]: InferUITool<TOOLS[NAME]>;
1527
- };
1528
- type UITools = Record<string, UITool>;
1618
+ declare function getToolName(part: ToolUIPart<UITools> | DynamicToolUIPart): string;
1529
1619
  /**
1530
- AI SDK UI Messages. They are used in the client and to communicate between the frontend and the API routes.
1620
+ * @deprecated Use getToolName instead.
1531
1621
  */
1532
- interface UIMessage<METADATA = unknown, DATA_PARTS extends UIDataTypes = UIDataTypes, TOOLS extends UITools = UITools> {
1533
- /**
1534
- A unique identifier for the message.
1535
- */
1622
+ declare const getToolOrDynamicToolName: typeof getToolName;
1623
+ type InferUIMessageMetadata<T extends UIMessage> = T extends UIMessage<infer METADATA> ? METADATA : unknown;
1624
+ type InferUIMessageData<T extends UIMessage> = T extends UIMessage<unknown, infer DATA_TYPES> ? DATA_TYPES : UIDataTypes;
1625
+ type InferUIMessageTools<T extends UIMessage> = T extends UIMessage<unknown, UIDataTypes, infer TOOLS> ? TOOLS : UITools;
1626
+ type InferUIMessageToolCall<UI_MESSAGE extends UIMessage> = ValueOf<{
1627
+ [NAME in keyof InferUIMessageTools<UI_MESSAGE>]: ToolCall<NAME & string, InferUIMessageTools<UI_MESSAGE>[NAME] extends {
1628
+ input: infer INPUT;
1629
+ } ? INPUT : never> & {
1630
+ dynamic?: false;
1631
+ };
1632
+ }> | (ToolCall<string, unknown> & {
1633
+ dynamic: true;
1634
+ });
1635
+
1636
+ declare const uiMessageChunkSchema: _ai_sdk_provider_utils.LazySchema<{
1637
+ type: "text-start";
1536
1638
  id: string;
1537
- /**
1538
- The role of the message.
1539
- */
1540
- role: 'system' | 'user' | 'assistant';
1541
- /**
1542
- The metadata of the message.
1543
- */
1544
- metadata?: METADATA;
1545
- /**
1546
- The parts of the message. Use this for rendering the message in the UI.
1547
-
1548
- System messages should be avoided (set the system prompt on the server instead).
1549
- They can have text parts.
1550
-
1551
- User messages can have text parts and file parts.
1552
-
1553
- Assistant messages can have text, reasoning, tool invocation, and file parts.
1554
- */
1555
- parts: Array<UIMessagePart<DATA_PARTS, TOOLS>>;
1556
- }
1557
- type UIMessagePart<DATA_TYPES extends UIDataTypes, TOOLS extends UITools> = TextUIPart | ReasoningUIPart | ToolUIPart<TOOLS> | DynamicToolUIPart | SourceUrlUIPart | SourceDocumentUIPart | FileUIPart | DataUIPart<DATA_TYPES> | StepStartUIPart;
1558
- /**
1559
- * A text part of a message.
1560
- */
1561
- type TextUIPart = {
1562
- type: 'text';
1563
- /**
1564
- * The text content.
1565
- */
1566
- text: string;
1567
- /**
1568
- * The state of the text part.
1569
- */
1570
- state?: 'streaming' | 'done';
1571
- /**
1572
- * The provider metadata.
1573
- */
1574
- providerMetadata?: ProviderMetadata;
1575
- };
1576
- /**
1577
- * A reasoning part of a message.
1578
- */
1579
- type ReasoningUIPart = {
1580
- type: 'reasoning';
1581
- /**
1582
- * The reasoning text.
1583
- */
1584
- text: string;
1585
- /**
1586
- * The state of the reasoning part.
1587
- */
1588
- state?: 'streaming' | 'done';
1589
- /**
1590
- * The provider metadata.
1591
- */
1592
- providerMetadata?: ProviderMetadata;
1593
- };
1594
- /**
1595
- * A source part of a message.
1596
- */
1597
- type SourceUrlUIPart = {
1598
- type: 'source-url';
1639
+ providerMetadata?: _ai_sdk_provider.SharedV3ProviderMetadata | undefined;
1640
+ } | {
1641
+ type: "text-delta";
1642
+ id: string;
1643
+ delta: string;
1644
+ providerMetadata?: _ai_sdk_provider.SharedV3ProviderMetadata | undefined;
1645
+ } | {
1646
+ type: "text-end";
1647
+ id: string;
1648
+ providerMetadata?: _ai_sdk_provider.SharedV3ProviderMetadata | undefined;
1649
+ } | {
1650
+ type: "error";
1651
+ errorText: string;
1652
+ } | {
1653
+ type: "tool-input-start";
1654
+ toolCallId: string;
1655
+ toolName: string;
1656
+ providerExecuted?: boolean | undefined;
1657
+ dynamic?: boolean | undefined;
1658
+ title?: string | undefined;
1659
+ } | {
1660
+ type: "tool-input-delta";
1661
+ toolCallId: string;
1662
+ inputTextDelta: string;
1663
+ } | {
1664
+ type: "tool-input-available";
1665
+ toolCallId: string;
1666
+ toolName: string;
1667
+ input: unknown;
1668
+ providerExecuted?: boolean | undefined;
1669
+ providerMetadata?: _ai_sdk_provider.SharedV3ProviderMetadata | undefined;
1670
+ dynamic?: boolean | undefined;
1671
+ title?: string | undefined;
1672
+ } | {
1673
+ type: "tool-input-error";
1674
+ toolCallId: string;
1675
+ toolName: string;
1676
+ input: unknown;
1677
+ errorText: string;
1678
+ providerExecuted?: boolean | undefined;
1679
+ providerMetadata?: _ai_sdk_provider.SharedV3ProviderMetadata | undefined;
1680
+ dynamic?: boolean | undefined;
1681
+ title?: string | undefined;
1682
+ } | {
1683
+ type: "tool-approval-request";
1684
+ approvalId: string;
1685
+ toolCallId: string;
1686
+ } | {
1687
+ type: "tool-output-available";
1688
+ toolCallId: string;
1689
+ output: unknown;
1690
+ providerExecuted?: boolean | undefined;
1691
+ dynamic?: boolean | undefined;
1692
+ preliminary?: boolean | undefined;
1693
+ } | {
1694
+ type: "tool-output-error";
1695
+ toolCallId: string;
1696
+ errorText: string;
1697
+ providerExecuted?: boolean | undefined;
1698
+ dynamic?: boolean | undefined;
1699
+ } | {
1700
+ type: "tool-output-denied";
1701
+ toolCallId: string;
1702
+ } | {
1703
+ type: "reasoning-start";
1704
+ id: string;
1705
+ providerMetadata?: _ai_sdk_provider.SharedV3ProviderMetadata | undefined;
1706
+ } | {
1707
+ type: "reasoning-delta";
1708
+ id: string;
1709
+ delta: string;
1710
+ providerMetadata?: _ai_sdk_provider.SharedV3ProviderMetadata | undefined;
1711
+ } | {
1712
+ type: "reasoning-end";
1713
+ id: string;
1714
+ providerMetadata?: _ai_sdk_provider.SharedV3ProviderMetadata | undefined;
1715
+ } | {
1716
+ type: "source-url";
1599
1717
  sourceId: string;
1600
1718
  url: string;
1601
- title?: string;
1602
- providerMetadata?: ProviderMetadata;
1603
- };
1604
- /**
1605
- * A document source part of a message.
1606
- */
1607
- type SourceDocumentUIPart = {
1608
- type: 'source-document';
1719
+ title?: string | undefined;
1720
+ providerMetadata?: _ai_sdk_provider.SharedV3ProviderMetadata | undefined;
1721
+ } | {
1722
+ type: "source-document";
1609
1723
  sourceId: string;
1610
1724
  mediaType: string;
1611
1725
  title: string;
1612
- filename?: string;
1613
- providerMetadata?: ProviderMetadata;
1614
- };
1615
- /**
1616
- * A file part of a message.
1617
- */
1618
- type FileUIPart = {
1619
- type: 'file';
1620
- /**
1621
- * IANA media type of the file.
1622
- *
1623
- * @see https://www.iana.org/assignments/media-types/media-types.xhtml
1624
- */
1625
- mediaType: string;
1626
- /**
1627
- * Optional filename of the file.
1628
- */
1629
- filename?: string;
1630
- /**
1631
- * The URL of the file.
1632
- * It can either be a URL to a hosted file or a [Data URL](https://developer.mozilla.org/en-US/docs/Web/HTTP/Basics_of_HTTP/Data_URLs).
1633
- */
1726
+ filename?: string | undefined;
1727
+ providerMetadata?: _ai_sdk_provider.SharedV3ProviderMetadata | undefined;
1728
+ } | {
1729
+ type: "file";
1634
1730
  url: string;
1635
- /**
1636
- * The provider metadata.
1637
- */
1638
- providerMetadata?: ProviderMetadata;
1639
- };
1640
- /**
1641
- * A step boundary part of a message.
1642
- */
1643
- type StepStartUIPart = {
1644
- type: 'step-start';
1645
- };
1646
- type DataUIPart<DATA_TYPES extends UIDataTypes> = ValueOf<{
1731
+ mediaType: string;
1732
+ providerMetadata?: _ai_sdk_provider.SharedV3ProviderMetadata | undefined;
1733
+ } | {
1734
+ type: `data-${string}`;
1735
+ data: unknown;
1736
+ id?: string | undefined;
1737
+ transient?: boolean | undefined;
1738
+ } | {
1739
+ type: "start-step";
1740
+ } | {
1741
+ type: "finish-step";
1742
+ } | {
1743
+ type: "start";
1744
+ messageId?: string | undefined;
1745
+ messageMetadata?: unknown;
1746
+ } | {
1747
+ type: "finish";
1748
+ finishReason?: "length" | "error" | "stop" | "content-filter" | "tool-calls" | "other" | undefined;
1749
+ messageMetadata?: unknown;
1750
+ } | {
1751
+ type: "abort";
1752
+ reason?: string | undefined;
1753
+ } | {
1754
+ type: "message-metadata";
1755
+ messageMetadata: unknown;
1756
+ }>;
1757
+ type DataUIMessageChunk<DATA_TYPES extends UIDataTypes> = ValueOf<{
1647
1758
  [NAME in keyof DATA_TYPES & string]: {
1648
1759
  type: `data-${NAME}`;
1649
1760
  id?: string;
1650
1761
  data: DATA_TYPES[NAME];
1762
+ transient?: boolean;
1651
1763
  };
1652
1764
  }>;
1653
- type asUITool<TOOL extends UITool | Tool> = TOOL extends Tool ? InferUITool<TOOL> : TOOL;
1654
- /**
1655
- * Check if a message part is a data part.
1656
- */
1657
- declare function isDataUIPart<DATA_TYPES extends UIDataTypes>(part: UIMessagePart<DATA_TYPES, UITools>): part is DataUIPart<DATA_TYPES>;
1658
- /**
1659
- * A UI tool invocation contains all the information needed to render a tool invocation in the UI.
1660
- * It can be derived from a tool without knowing the tool name, and can be used to define
1661
- * UI components for the tool.
1662
- */
1663
- type UIToolInvocation<TOOL extends UITool | Tool> = {
1664
- /**
1665
- * ID of the tool call.
1666
- */
1667
- toolCallId: string;
1668
- title?: string;
1669
- /**
1670
- * Whether the tool call was executed by the provider.
1671
- */
1672
- providerExecuted?: boolean;
1673
- } & ({
1674
- state: 'input-streaming';
1675
- input: DeepPartial<asUITool<TOOL>['input']> | undefined;
1676
- output?: never;
1677
- errorText?: never;
1678
- approval?: never;
1765
+ type UIMessageChunk<METADATA = unknown, DATA_TYPES extends UIDataTypes = UIDataTypes> = {
1766
+ type: 'text-start';
1767
+ id: string;
1768
+ providerMetadata?: ProviderMetadata;
1679
1769
  } | {
1680
- state: 'input-available';
1681
- input: asUITool<TOOL>['input'];
1682
- output?: never;
1683
- errorText?: never;
1684
- callProviderMetadata?: ProviderMetadata;
1685
- approval?: never;
1770
+ type: 'text-delta';
1771
+ delta: string;
1772
+ id: string;
1773
+ providerMetadata?: ProviderMetadata;
1686
1774
  } | {
1687
- state: 'approval-requested';
1688
- input: asUITool<TOOL>['input'];
1689
- output?: never;
1690
- errorText?: never;
1691
- callProviderMetadata?: ProviderMetadata;
1692
- approval: {
1693
- id: string;
1694
- approved?: never;
1695
- reason?: never;
1696
- };
1775
+ type: 'text-end';
1776
+ id: string;
1777
+ providerMetadata?: ProviderMetadata;
1697
1778
  } | {
1698
- state: 'approval-responded';
1699
- input: asUITool<TOOL>['input'];
1700
- output?: never;
1701
- errorText?: never;
1702
- callProviderMetadata?: ProviderMetadata;
1703
- approval: {
1704
- id: string;
1705
- approved: boolean;
1706
- reason?: string;
1707
- };
1708
- } | {
1709
- state: 'output-available';
1710
- input: asUITool<TOOL>['input'];
1711
- output: asUITool<TOOL>['output'];
1712
- errorText?: never;
1713
- callProviderMetadata?: ProviderMetadata;
1714
- preliminary?: boolean;
1715
- approval?: {
1716
- id: string;
1717
- approved: true;
1718
- reason?: string;
1719
- };
1720
- } | {
1721
- state: 'output-error';
1722
- input: asUITool<TOOL>['input'] | undefined;
1723
- rawInput?: unknown;
1724
- output?: never;
1725
- errorText: string;
1726
- callProviderMetadata?: ProviderMetadata;
1727
- approval?: {
1728
- id: string;
1729
- approved: true;
1730
- reason?: string;
1731
- };
1732
- } | {
1733
- state: 'output-denied';
1734
- input: asUITool<TOOL>['input'];
1735
- output?: never;
1736
- errorText?: never;
1737
- callProviderMetadata?: ProviderMetadata;
1738
- approval: {
1739
- id: string;
1740
- approved: false;
1741
- reason?: string;
1742
- };
1743
- });
1744
- type ToolUIPart<TOOLS extends UITools = UITools> = ValueOf<{
1745
- [NAME in keyof TOOLS & string]: {
1746
- type: `tool-${NAME}`;
1747
- } & UIToolInvocation<TOOLS[NAME]>;
1748
- }>;
1749
- type DynamicToolUIPart = {
1750
- type: 'dynamic-tool';
1751
- /**
1752
- * Name of the tool that is being called.
1753
- */
1754
- toolName: string;
1755
- /**
1756
- * ID of the tool call.
1757
- */
1758
- toolCallId: string;
1759
- title?: string;
1760
- /**
1761
- * Whether the tool call was executed by the provider.
1762
- */
1763
- providerExecuted?: boolean;
1764
- } & ({
1765
- state: 'input-streaming';
1766
- input: unknown | undefined;
1767
- output?: never;
1768
- errorText?: never;
1769
- approval?: never;
1770
- } | {
1771
- state: 'input-available';
1772
- input: unknown;
1773
- output?: never;
1774
- errorText?: never;
1775
- callProviderMetadata?: ProviderMetadata;
1776
- approval?: never;
1777
- } | {
1778
- state: 'approval-requested';
1779
- input: unknown;
1780
- output?: never;
1781
- errorText?: never;
1782
- callProviderMetadata?: ProviderMetadata;
1783
- approval: {
1784
- id: string;
1785
- approved?: never;
1786
- reason?: never;
1787
- };
1788
- } | {
1789
- state: 'approval-responded';
1790
- input: unknown;
1791
- output?: never;
1792
- errorText?: never;
1793
- callProviderMetadata?: ProviderMetadata;
1794
- approval: {
1795
- id: string;
1796
- approved: boolean;
1797
- reason?: string;
1798
- };
1799
- } | {
1800
- state: 'output-available';
1801
- input: unknown;
1802
- output: unknown;
1803
- errorText?: never;
1804
- callProviderMetadata?: ProviderMetadata;
1805
- preliminary?: boolean;
1806
- approval?: {
1807
- id: string;
1808
- approved: true;
1809
- reason?: string;
1810
- };
1811
- } | {
1812
- state: 'output-error';
1813
- input: unknown;
1814
- output?: never;
1815
- errorText: string;
1816
- callProviderMetadata?: ProviderMetadata;
1817
- approval?: {
1818
- id: string;
1819
- approved: true;
1820
- reason?: string;
1821
- };
1822
- } | {
1823
- state: 'output-denied';
1824
- input: unknown;
1825
- output?: never;
1826
- errorText?: never;
1827
- callProviderMetadata?: ProviderMetadata;
1828
- approval: {
1829
- id: string;
1830
- approved: false;
1831
- reason?: string;
1832
- };
1833
- });
1834
- /**
1835
- * Type guard to check if a message part is a text part.
1836
- */
1837
- declare function isTextUIPart(part: UIMessagePart<UIDataTypes, UITools>): part is TextUIPart;
1838
- /**
1839
- * Type guard to check if a message part is a file part.
1840
- */
1841
- declare function isFileUIPart(part: UIMessagePart<UIDataTypes, UITools>): part is FileUIPart;
1842
- /**
1843
- * Type guard to check if a message part is a reasoning part.
1844
- */
1845
- declare function isReasoningUIPart(part: UIMessagePart<UIDataTypes, UITools>): part is ReasoningUIPart;
1846
- /**
1847
- * Check if a message part is a static tool part.
1848
- *
1849
- * Static tools are tools for which the types are known at development time.
1850
- */
1851
- declare function isStaticToolUIPart<TOOLS extends UITools>(part: UIMessagePart<UIDataTypes, TOOLS>): part is ToolUIPart<TOOLS>;
1852
- /**
1853
- * Check if a message part is a tool part.
1854
- *
1855
- * Tool parts are either static or dynamic tools.
1856
- *
1857
- * Use `isStaticToolUIPart` or `isDynamicToolUIPart` to check the type of the tool.
1858
- */
1859
- declare function isToolUIPart<TOOLS extends UITools>(part: UIMessagePart<UIDataTypes, TOOLS>): part is ToolUIPart<TOOLS> | DynamicToolUIPart;
1860
- /**
1861
- * @deprecated Use isToolUIPart instead.
1862
- */
1863
- declare const isToolOrDynamicToolUIPart: typeof isToolUIPart;
1864
- /**
1865
- * Returns the name of the static tool.
1866
- *
1867
- * The possible values are the keys of the tool set.
1868
- */
1869
- declare function getStaticToolName<TOOLS extends UITools>(part: ToolUIPart<TOOLS>): keyof TOOLS;
1870
- /**
1871
- * Returns the name of the tool (static or dynamic).
1872
- *
1873
- * This function will not restrict the name to the keys of the tool set.
1874
- * If you need to restrict the name to the keys of the tool set, use `getStaticToolName` instead.
1875
- */
1876
- declare function getToolName(part: ToolUIPart<UITools> | DynamicToolUIPart): string;
1877
- /**
1878
- * @deprecated Use getToolName instead.
1879
- */
1880
- declare const getToolOrDynamicToolName: typeof getToolName;
1881
- type InferUIMessageMetadata<T extends UIMessage> = T extends UIMessage<infer METADATA> ? METADATA : unknown;
1882
- type InferUIMessageData<T extends UIMessage> = T extends UIMessage<unknown, infer DATA_TYPES> ? DATA_TYPES : UIDataTypes;
1883
- type InferUIMessageTools<T extends UIMessage> = T extends UIMessage<unknown, UIDataTypes, infer TOOLS> ? TOOLS : UITools;
1884
- type InferUIMessageToolCall<UI_MESSAGE extends UIMessage> = ValueOf<{
1885
- [NAME in keyof InferUIMessageTools<UI_MESSAGE>]: ToolCall<NAME & string, InferUIMessageTools<UI_MESSAGE>[NAME] extends {
1886
- input: infer INPUT;
1887
- } ? INPUT : never> & {
1888
- dynamic?: false;
1889
- };
1890
- }> | (ToolCall<string, unknown> & {
1891
- dynamic: true;
1892
- });
1893
-
1894
- declare const uiMessageChunkSchema: _ai_sdk_provider_utils.LazySchema<{
1895
- type: "text-start";
1779
+ type: 'reasoning-start';
1896
1780
  id: string;
1897
- providerMetadata?: _ai_sdk_provider.SharedV3ProviderMetadata | undefined;
1781
+ providerMetadata?: ProviderMetadata;
1898
1782
  } | {
1899
- type: "text-delta";
1783
+ type: 'reasoning-delta';
1900
1784
  id: string;
1901
1785
  delta: string;
1902
- providerMetadata?: _ai_sdk_provider.SharedV3ProviderMetadata | undefined;
1786
+ providerMetadata?: ProviderMetadata;
1903
1787
  } | {
1904
- type: "text-end";
1788
+ type: 'reasoning-end';
1905
1789
  id: string;
1906
- providerMetadata?: _ai_sdk_provider.SharedV3ProviderMetadata | undefined;
1790
+ providerMetadata?: ProviderMetadata;
1907
1791
  } | {
1908
- type: "error";
1792
+ type: 'error';
1909
1793
  errorText: string;
1910
1794
  } | {
1911
- type: "tool-input-start";
1912
- toolCallId: string;
1913
- toolName: string;
1914
- providerExecuted?: boolean | undefined;
1915
- dynamic?: boolean | undefined;
1916
- title?: string | undefined;
1917
- } | {
1918
- type: "tool-input-delta";
1919
- toolCallId: string;
1920
- inputTextDelta: string;
1921
- } | {
1922
- type: "tool-input-available";
1795
+ type: 'tool-input-available';
1923
1796
  toolCallId: string;
1924
1797
  toolName: string;
1925
1798
  input: unknown;
1926
- providerExecuted?: boolean | undefined;
1927
- providerMetadata?: _ai_sdk_provider.SharedV3ProviderMetadata | undefined;
1928
- dynamic?: boolean | undefined;
1929
- title?: string | undefined;
1799
+ providerExecuted?: boolean;
1800
+ providerMetadata?: ProviderMetadata;
1801
+ dynamic?: boolean;
1802
+ title?: string;
1930
1803
  } | {
1931
- type: "tool-input-error";
1804
+ type: 'tool-input-error';
1932
1805
  toolCallId: string;
1933
1806
  toolName: string;
1934
1807
  input: unknown;
1808
+ providerExecuted?: boolean;
1809
+ providerMetadata?: ProviderMetadata;
1810
+ dynamic?: boolean;
1935
1811
  errorText: string;
1936
- providerExecuted?: boolean | undefined;
1937
- providerMetadata?: _ai_sdk_provider.SharedV3ProviderMetadata | undefined;
1938
- dynamic?: boolean | undefined;
1939
- title?: string | undefined;
1812
+ title?: string;
1940
1813
  } | {
1941
- type: "tool-approval-request";
1814
+ type: 'tool-approval-request';
1942
1815
  approvalId: string;
1943
1816
  toolCallId: string;
1944
1817
  } | {
1945
- type: "tool-output-available";
1818
+ type: 'tool-output-available';
1946
1819
  toolCallId: string;
1947
1820
  output: unknown;
1948
- providerExecuted?: boolean | undefined;
1949
- dynamic?: boolean | undefined;
1950
- preliminary?: boolean | undefined;
1821
+ providerExecuted?: boolean;
1822
+ dynamic?: boolean;
1823
+ preliminary?: boolean;
1951
1824
  } | {
1952
- type: "tool-output-error";
1825
+ type: 'tool-output-error';
1953
1826
  toolCallId: string;
1954
1827
  errorText: string;
1955
- providerExecuted?: boolean | undefined;
1956
- dynamic?: boolean | undefined;
1828
+ providerExecuted?: boolean;
1829
+ dynamic?: boolean;
1957
1830
  } | {
1958
- type: "tool-output-denied";
1831
+ type: 'tool-output-denied';
1959
1832
  toolCallId: string;
1960
1833
  } | {
1961
- type: "reasoning-start";
1962
- id: string;
1963
- providerMetadata?: _ai_sdk_provider.SharedV3ProviderMetadata | undefined;
1964
- } | {
1965
- type: "reasoning-delta";
1966
- id: string;
1967
- delta: string;
1968
- providerMetadata?: _ai_sdk_provider.SharedV3ProviderMetadata | undefined;
1834
+ type: 'tool-input-start';
1835
+ toolCallId: string;
1836
+ toolName: string;
1837
+ providerExecuted?: boolean;
1838
+ dynamic?: boolean;
1839
+ title?: string;
1969
1840
  } | {
1970
- type: "reasoning-end";
1971
- id: string;
1972
- providerMetadata?: _ai_sdk_provider.SharedV3ProviderMetadata | undefined;
1841
+ type: 'tool-input-delta';
1842
+ toolCallId: string;
1843
+ inputTextDelta: string;
1973
1844
  } | {
1974
- type: "source-url";
1845
+ type: 'source-url';
1975
1846
  sourceId: string;
1976
1847
  url: string;
1977
- title?: string | undefined;
1978
- providerMetadata?: _ai_sdk_provider.SharedV3ProviderMetadata | undefined;
1848
+ title?: string;
1849
+ providerMetadata?: ProviderMetadata;
1979
1850
  } | {
1980
- type: "source-document";
1851
+ type: 'source-document';
1981
1852
  sourceId: string;
1982
1853
  mediaType: string;
1983
1854
  title: string;
1984
- filename?: string | undefined;
1985
- providerMetadata?: _ai_sdk_provider.SharedV3ProviderMetadata | undefined;
1855
+ filename?: string;
1856
+ providerMetadata?: ProviderMetadata;
1986
1857
  } | {
1987
- type: "file";
1858
+ type: 'file';
1988
1859
  url: string;
1989
1860
  mediaType: string;
1990
- providerMetadata?: _ai_sdk_provider.SharedV3ProviderMetadata | undefined;
1991
- } | {
1992
- type: `data-${string}`;
1993
- data: unknown;
1994
- id?: string | undefined;
1995
- transient?: boolean | undefined;
1996
- } | {
1997
- type: "start-step";
1861
+ providerMetadata?: ProviderMetadata;
1862
+ } | DataUIMessageChunk<DATA_TYPES> | {
1863
+ type: 'start-step';
1998
1864
  } | {
1999
- type: "finish-step";
1865
+ type: 'finish-step';
2000
1866
  } | {
2001
- type: "start";
2002
- messageId?: string | undefined;
2003
- messageMetadata?: unknown;
1867
+ type: 'start';
1868
+ messageId?: string;
1869
+ messageMetadata?: METADATA;
2004
1870
  } | {
2005
- type: "finish";
2006
- finishReason?: "length" | "error" | "stop" | "content-filter" | "tool-calls" | "other" | undefined;
2007
- messageMetadata?: unknown;
1871
+ type: 'finish';
1872
+ finishReason?: FinishReason;
1873
+ messageMetadata?: METADATA;
2008
1874
  } | {
2009
- type: "abort";
2010
- reason?: string | undefined;
1875
+ type: 'abort';
1876
+ reason?: string;
2011
1877
  } | {
2012
- type: "message-metadata";
2013
- messageMetadata: unknown;
2014
- }>;
2015
- type DataUIMessageChunk<DATA_TYPES extends UIDataTypes> = ValueOf<{
2016
- [NAME in keyof DATA_TYPES & string]: {
2017
- type: `data-${NAME}`;
2018
- id?: string;
2019
- data: DATA_TYPES[NAME];
2020
- transient?: boolean;
2021
- };
2022
- }>;
2023
- type UIMessageChunk<METADATA = unknown, DATA_TYPES extends UIDataTypes = UIDataTypes> = {
1878
+ type: 'message-metadata';
1879
+ messageMetadata: METADATA;
1880
+ };
1881
+ type InferUIMessageChunk<T extends UIMessage> = UIMessageChunk<InferUIMessageMetadata<T>, InferUIMessageData<T>>;
1882
+
1883
+ type UIMessageStreamOnFinishCallback<UI_MESSAGE extends UIMessage> = (event: {
1884
+ /**
1885
+ * The updated list of UI messages.
1886
+ */
1887
+ messages: UI_MESSAGE[];
1888
+ /**
1889
+ * Indicates whether the response message is a continuation of the last original message,
1890
+ * or if a new message was created.
1891
+ */
1892
+ isContinuation: boolean;
1893
+ /**
1894
+ * Indicates whether the stream was aborted.
1895
+ */
1896
+ isAborted: boolean;
1897
+ /**
1898
+ * The message that was sent to the client as a response
1899
+ * (including the original message if it was extended).
1900
+ */
1901
+ responseMessage: UI_MESSAGE;
1902
+ /**
1903
+ * The reason why the generation finished.
1904
+ */
1905
+ finishReason?: FinishReason;
1906
+ }) => PromiseLike<void> | void;
1907
+
1908
+ type UIMessageStreamResponseInit = ResponseInit & {
1909
+ consumeSseStream?: (options: {
1910
+ stream: ReadableStream<string>;
1911
+ }) => PromiseLike<void> | void;
1912
+ };
1913
+
1914
+ /**
1915
+ * A type that combines AsyncIterable and ReadableStream.
1916
+ * This allows a ReadableStream to be consumed using for-await-of syntax.
1917
+ */
1918
+ type AsyncIterableStream<T> = AsyncIterable<T> & ReadableStream<T>;
1919
+
1920
+ type ErrorHandler = (error: unknown) => void;
1921
+
1922
+ type UIMessageStreamOptions<UI_MESSAGE extends UIMessage> = {
1923
+ /**
1924
+ * The original messages. If they are provided, persistence mode is assumed,
1925
+ * and a message ID is provided for the response message.
1926
+ */
1927
+ originalMessages?: UI_MESSAGE[];
1928
+ /**
1929
+ * Generate a message ID for the response message.
1930
+ *
1931
+ * If not provided, no message ID will be set for the response message (unless
1932
+ * the original messages are provided and the last message is an assistant message).
1933
+ */
1934
+ generateMessageId?: IdGenerator;
1935
+ onFinish?: UIMessageStreamOnFinishCallback<UI_MESSAGE>;
1936
+ /**
1937
+ * Extracts message metadata that will be send to the client.
1938
+ *
1939
+ * Called on `start` and `finish` events.
1940
+ */
1941
+ messageMetadata?: (options: {
1942
+ part: TextStreamPart<ToolSet>;
1943
+ }) => InferUIMessageMetadata<UI_MESSAGE> | undefined;
1944
+ /**
1945
+ * Send reasoning parts to the client.
1946
+ * Default to true.
1947
+ */
1948
+ sendReasoning?: boolean;
1949
+ /**
1950
+ * Send source parts to the client.
1951
+ * Default to false.
1952
+ */
1953
+ sendSources?: boolean;
1954
+ /**
1955
+ * Send the finish event to the client.
1956
+ * Set to false if you are using additional streamText calls
1957
+ * that send additional data.
1958
+ * Default to true.
1959
+ */
1960
+ sendFinish?: boolean;
1961
+ /**
1962
+ * Send the message start event to the client.
1963
+ * Set to false if you are using additional streamText calls
1964
+ * and the message start event has already been sent.
1965
+ * Default to true.
1966
+ */
1967
+ sendStart?: boolean;
1968
+ /**
1969
+ * Process an error, e.g. to log it. Default to `() => 'An error occurred.'`.
1970
+ *
1971
+ * @return error message to include in the data stream.
1972
+ */
1973
+ onError?: (error: unknown) => string;
1974
+ };
1975
+ type ConsumeStreamOptions = {
1976
+ onError?: ErrorHandler;
1977
+ };
1978
+ /**
1979
+ A result object for accessing different stream types and additional information.
1980
+ */
1981
+ interface StreamTextResult<TOOLS extends ToolSet, OUTPUT extends Output> {
1982
+ /**
1983
+ The content that was generated in the last step.
1984
+
1985
+ Automatically consumes the stream.
1986
+ */
1987
+ readonly content: PromiseLike<Array<ContentPart<TOOLS>>>;
1988
+ /**
1989
+ The full text that has been generated by the last step.
1990
+
1991
+ Automatically consumes the stream.
1992
+ */
1993
+ readonly text: PromiseLike<string>;
1994
+ /**
1995
+ The full reasoning that the model has generated.
1996
+
1997
+ Automatically consumes the stream.
1998
+ */
1999
+ readonly reasoning: PromiseLike<Array<ReasoningOutput>>;
2000
+ /**
2001
+ The reasoning that has been generated by the last step.
2002
+
2003
+ Automatically consumes the stream.
2004
+ */
2005
+ readonly reasoningText: PromiseLike<string | undefined>;
2006
+ /**
2007
+ Files that have been generated by the model in the last step.
2008
+
2009
+ Automatically consumes the stream.
2010
+ */
2011
+ readonly files: PromiseLike<GeneratedFile[]>;
2012
+ /**
2013
+ Sources that have been used as references in the last step.
2014
+
2015
+ Automatically consumes the stream.
2016
+ */
2017
+ readonly sources: PromiseLike<Source[]>;
2018
+ /**
2019
+ The tool calls that have been executed in the last step.
2020
+
2021
+ Automatically consumes the stream.
2022
+ */
2023
+ readonly toolCalls: PromiseLike<TypedToolCall<TOOLS>[]>;
2024
+ /**
2025
+ The static tool calls that have been executed in the last step.
2026
+
2027
+ Automatically consumes the stream.
2028
+ */
2029
+ readonly staticToolCalls: PromiseLike<StaticToolCall<TOOLS>[]>;
2030
+ /**
2031
+ The dynamic tool calls that have been executed in the last step.
2032
+
2033
+ Automatically consumes the stream.
2034
+ */
2035
+ readonly dynamicToolCalls: PromiseLike<DynamicToolCall[]>;
2036
+ /**
2037
+ The static tool results that have been generated in the last step.
2038
+
2039
+ Automatically consumes the stream.
2040
+ */
2041
+ readonly staticToolResults: PromiseLike<StaticToolResult<TOOLS>[]>;
2042
+ /**
2043
+ The dynamic tool results that have been generated in the last step.
2044
+
2045
+ Automatically consumes the stream.
2046
+ */
2047
+ readonly dynamicToolResults: PromiseLike<DynamicToolResult[]>;
2048
+ /**
2049
+ The tool results that have been generated in the last step.
2050
+
2051
+ Automatically consumes the stream.
2052
+ */
2053
+ readonly toolResults: PromiseLike<TypedToolResult<TOOLS>[]>;
2054
+ /**
2055
+ * The unified finish reason why the generation finished. Taken from the last step.
2056
+ *
2057
+ * Automatically consumes the stream.
2058
+ */
2059
+ readonly finishReason: PromiseLike<FinishReason>;
2060
+ /**
2061
+ * The raw reason why the generation finished (from the provider). Taken from the last step.
2062
+ *
2063
+ * Automatically consumes the stream.
2064
+ */
2065
+ readonly rawFinishReason: PromiseLike<string | undefined>;
2066
+ /**
2067
+ The token usage of the last step.
2068
+
2069
+ Automatically consumes the stream.
2070
+ */
2071
+ readonly usage: PromiseLike<LanguageModelUsage>;
2072
+ /**
2073
+ The total token usage of the generated response.
2074
+ When there are multiple steps, the usage is the sum of all step usages.
2075
+
2076
+ Automatically consumes the stream.
2077
+ */
2078
+ readonly totalUsage: PromiseLike<LanguageModelUsage>;
2079
+ /**
2080
+ Warnings from the model provider (e.g. unsupported settings) for the first step.
2081
+
2082
+ Automatically consumes the stream.
2083
+ */
2084
+ readonly warnings: PromiseLike<CallWarning[] | undefined>;
2085
+ /**
2086
+ Details for all steps.
2087
+ You can use this to get information about intermediate steps,
2088
+ such as the tool calls or the response headers.
2089
+
2090
+ Automatically consumes the stream.
2091
+ */
2092
+ readonly steps: PromiseLike<Array<StepResult<TOOLS>>>;
2093
+ /**
2094
+ Additional request information from the last step.
2095
+
2096
+ Automatically consumes the stream.
2097
+ */
2098
+ readonly request: PromiseLike<LanguageModelRequestMetadata>;
2099
+ /**
2100
+ Additional response information from the last step.
2101
+
2102
+ Automatically consumes the stream.
2103
+ */
2104
+ readonly response: PromiseLike<LanguageModelResponseMetadata & {
2105
+ /**
2106
+ The response messages that were generated during the call. It consists of an assistant message,
2107
+ potentially containing tool calls.
2108
+
2109
+ When there are tool results, there is an additional tool message with the tool results that are available.
2110
+ If there are tools that do not have execute functions, they are not included in the tool results and
2111
+ need to be added separately.
2112
+ */
2113
+ messages: Array<ResponseMessage>;
2114
+ }>;
2115
+ /**
2116
+ Additional provider-specific metadata from the last step.
2117
+ Metadata is passed through from the provider to the AI SDK and
2118
+ enables provider-specific results that can be fully encapsulated in the provider.
2119
+ */
2120
+ readonly providerMetadata: PromiseLike<ProviderMetadata | undefined>;
2121
+ /**
2122
+ A text stream that returns only the generated text deltas. You can use it
2123
+ as either an AsyncIterable or a ReadableStream. When an error occurs, the
2124
+ stream will throw the error.
2125
+ */
2126
+ readonly textStream: AsyncIterableStream<string>;
2127
+ /**
2128
+ A stream with all events, including text deltas, tool calls, tool results, and
2129
+ errors.
2130
+ You can use it as either an AsyncIterable or a ReadableStream.
2131
+ Only errors that stop the stream, such as network errors, are thrown.
2132
+ */
2133
+ readonly fullStream: AsyncIterableStream<TextStreamPart<TOOLS>>;
2134
+ /**
2135
+ * A stream of partial outputs. It uses the `output` specification.
2136
+ *
2137
+ * @deprecated Use `partialOutputStream` instead.
2138
+ */
2139
+ readonly experimental_partialOutputStream: AsyncIterableStream<InferPartialOutput<OUTPUT>>;
2140
+ /**
2141
+ * A stream of partial parsed outputs. It uses the `output` specification.
2142
+ */
2143
+ readonly partialOutputStream: AsyncIterableStream<InferPartialOutput<OUTPUT>>;
2144
+ /**
2145
+ * A stream of individual array elements as they complete.
2146
+ * Only available when using `output: Output.array()`.
2147
+ */
2148
+ readonly elementStream: AsyncIterableStream<InferElementOutput<OUTPUT>>;
2149
+ /**
2150
+ * The complete parsed output. It uses the `output` specification.
2151
+ */
2152
+ readonly output: PromiseLike<InferCompleteOutput<OUTPUT>>;
2153
+ /**
2154
+ Consumes the stream without processing the parts.
2155
+ This is useful to force the stream to finish.
2156
+ It effectively removes the backpressure and allows the stream to finish,
2157
+ triggering the `onFinish` callback and the promise resolution.
2158
+
2159
+ If an error occurs, it is passed to the optional `onError` callback.
2160
+ */
2161
+ consumeStream(options?: ConsumeStreamOptions): PromiseLike<void>;
2162
+ /**
2163
+ Converts the result to a UI message stream.
2164
+
2165
+ @return A UI message stream.
2166
+ */
2167
+ toUIMessageStream<UI_MESSAGE extends UIMessage>(options?: UIMessageStreamOptions<UI_MESSAGE>): AsyncIterableStream<InferUIMessageChunk<UI_MESSAGE>>;
2168
+ /**
2169
+ *Writes UI message stream output to a Node.js response-like object.
2170
+ */
2171
+ pipeUIMessageStreamToResponse<UI_MESSAGE extends UIMessage>(response: ServerResponse, options?: UIMessageStreamResponseInit & UIMessageStreamOptions<UI_MESSAGE>): void;
2172
+ /**
2173
+ Writes text delta output to a Node.js response-like object.
2174
+ It sets a `Content-Type` header to `text/plain; charset=utf-8` and
2175
+ writes each text delta as a separate chunk.
2176
+
2177
+ @param response A Node.js response-like object (ServerResponse).
2178
+ @param init Optional headers, status code, and status text.
2179
+ */
2180
+ pipeTextStreamToResponse(response: ServerResponse, init?: ResponseInit): void;
2181
+ /**
2182
+ Converts the result to a streamed response object with a stream data part stream.
2183
+
2184
+ @return A response object.
2185
+ */
2186
+ toUIMessageStreamResponse<UI_MESSAGE extends UIMessage>(options?: UIMessageStreamResponseInit & UIMessageStreamOptions<UI_MESSAGE>): Response;
2187
+ /**
2188
+ Creates a simple text stream response.
2189
+ Each text delta is encoded as UTF-8 and sent as a separate chunk.
2190
+ Non-text-delta events are ignored.
2191
+ @param init Optional headers, status code, and status text.
2192
+ */
2193
+ toTextStreamResponse(init?: ResponseInit): Response;
2194
+ }
2195
+ type TextStreamPart<TOOLS extends ToolSet> = {
2024
2196
  type: 'text-start';
2025
2197
  id: string;
2026
2198
  providerMetadata?: ProviderMetadata;
2027
- } | {
2028
- type: 'text-delta';
2029
- delta: string;
2030
- id: string;
2031
- providerMetadata?: ProviderMetadata;
2032
2199
  } | {
2033
2200
  type: 'text-end';
2034
2201
  id: string;
2035
2202
  providerMetadata?: ProviderMetadata;
2036
2203
  } | {
2037
- type: 'reasoning-start';
2204
+ type: 'text-delta';
2038
2205
  id: string;
2039
2206
  providerMetadata?: ProviderMetadata;
2207
+ text: string;
2040
2208
  } | {
2041
- type: 'reasoning-delta';
2209
+ type: 'reasoning-start';
2042
2210
  id: string;
2043
- delta: string;
2044
2211
  providerMetadata?: ProviderMetadata;
2045
2212
  } | {
2046
2213
  type: 'reasoning-end';
2047
2214
  id: string;
2048
2215
  providerMetadata?: ProviderMetadata;
2049
2216
  } | {
2050
- type: 'error';
2051
- errorText: string;
2052
- } | {
2053
- type: 'tool-input-available';
2054
- toolCallId: string;
2055
- toolName: string;
2056
- input: unknown;
2057
- providerExecuted?: boolean;
2058
- providerMetadata?: ProviderMetadata;
2059
- dynamic?: boolean;
2060
- title?: string;
2061
- } | {
2062
- type: 'tool-input-error';
2063
- toolCallId: string;
2064
- toolName: string;
2065
- input: unknown;
2066
- providerExecuted?: boolean;
2217
+ type: 'reasoning-delta';
2067
2218
  providerMetadata?: ProviderMetadata;
2068
- dynamic?: boolean;
2069
- errorText: string;
2070
- title?: string;
2071
- } | {
2072
- type: 'tool-approval-request';
2073
- approvalId: string;
2074
- toolCallId: string;
2075
- } | {
2076
- type: 'tool-output-available';
2077
- toolCallId: string;
2078
- output: unknown;
2079
- providerExecuted?: boolean;
2080
- dynamic?: boolean;
2081
- preliminary?: boolean;
2082
- } | {
2083
- type: 'tool-output-error';
2084
- toolCallId: string;
2085
- errorText: string;
2086
- providerExecuted?: boolean;
2087
- dynamic?: boolean;
2088
- } | {
2089
- type: 'tool-output-denied';
2090
- toolCallId: string;
2219
+ id: string;
2220
+ text: string;
2091
2221
  } | {
2092
2222
  type: 'tool-input-start';
2093
- toolCallId: string;
2223
+ id: string;
2094
2224
  toolName: string;
2225
+ providerMetadata?: ProviderMetadata;
2095
2226
  providerExecuted?: boolean;
2096
2227
  dynamic?: boolean;
2097
2228
  title?: string;
2098
2229
  } | {
2099
- type: 'tool-input-delta';
2100
- toolCallId: string;
2101
- inputTextDelta: string;
2102
- } | {
2103
- type: 'source-url';
2104
- sourceId: string;
2105
- url: string;
2106
- title?: string;
2230
+ type: 'tool-input-end';
2231
+ id: string;
2107
2232
  providerMetadata?: ProviderMetadata;
2108
2233
  } | {
2109
- type: 'source-document';
2110
- sourceId: string;
2111
- mediaType: string;
2112
- title: string;
2113
- filename?: string;
2234
+ type: 'tool-input-delta';
2235
+ id: string;
2236
+ delta: string;
2114
2237
  providerMetadata?: ProviderMetadata;
2115
- } | {
2238
+ } | ({
2239
+ type: 'source';
2240
+ } & Source) | {
2116
2241
  type: 'file';
2117
- url: string;
2118
- mediaType: string;
2119
- providerMetadata?: ProviderMetadata;
2120
- } | DataUIMessageChunk<DATA_TYPES> | {
2242
+ file: GeneratedFile;
2243
+ } | ({
2244
+ type: 'tool-call';
2245
+ } & TypedToolCall<TOOLS>) | ({
2246
+ type: 'tool-result';
2247
+ } & TypedToolResult<TOOLS>) | ({
2248
+ type: 'tool-error';
2249
+ } & TypedToolError<TOOLS>) | ({
2250
+ type: 'tool-output-denied';
2251
+ } & StaticToolOutputDenied<TOOLS>) | ToolApprovalRequestOutput<TOOLS> | {
2121
2252
  type: 'start-step';
2253
+ request: LanguageModelRequestMetadata;
2254
+ warnings: CallWarning[];
2122
2255
  } | {
2123
2256
  type: 'finish-step';
2257
+ response: LanguageModelResponseMetadata;
2258
+ usage: LanguageModelUsage;
2259
+ finishReason: FinishReason;
2260
+ rawFinishReason: string | undefined;
2261
+ providerMetadata: ProviderMetadata | undefined;
2124
2262
  } | {
2125
2263
  type: 'start';
2126
- messageId?: string;
2127
- messageMetadata?: METADATA;
2128
2264
  } | {
2129
2265
  type: 'finish';
2130
- finishReason?: FinishReason;
2131
- messageMetadata?: METADATA;
2266
+ finishReason: FinishReason;
2267
+ rawFinishReason: string | undefined;
2268
+ totalUsage: LanguageModelUsage;
2132
2269
  } | {
2133
2270
  type: 'abort';
2134
2271
  reason?: string;
2135
2272
  } | {
2136
- type: 'message-metadata';
2137
- messageMetadata: METADATA;
2273
+ type: 'error';
2274
+ error: unknown;
2275
+ } | {
2276
+ type: 'raw';
2277
+ rawValue: unknown;
2138
2278
  };
2139
- type InferUIMessageChunk<T extends UIMessage> = UIMessageChunk<InferUIMessageMetadata<T>, InferUIMessageData<T>>;
2140
2279
 
2141
- type UIMessageStreamOnFinishCallback<UI_MESSAGE extends UIMessage> = (event: {
2142
- /**
2143
- * The updated list of UI messages.
2144
- */
2145
- messages: UI_MESSAGE[];
2280
+ /**
2281
+ A transformation that is applied to the stream.
2282
+
2283
+ @param stopStream - A function that stops the source stream.
2284
+ @param tools - The tools that are accessible to and can be called by the model. The model needs to support calling tools.
2285
+ */
2286
+ type StreamTextTransform<TOOLS extends ToolSet> = (options: {
2287
+ tools: TOOLS;
2288
+ stopStream: () => void;
2289
+ }) => TransformStream<TextStreamPart<TOOLS>, TextStreamPart<TOOLS>>;
2290
+ /**
2291
+ Callback that is set using the `onError` option.
2292
+
2293
+ @param event - The event that is passed to the callback.
2294
+ */
2295
+ type StreamTextOnErrorCallback = (event: {
2296
+ error: unknown;
2297
+ }) => PromiseLike<void> | void;
2298
+ /**
2299
+ Callback that is set using the `onStepFinish` option.
2300
+
2301
+ @param stepResult - The result of the step.
2302
+ */
2303
+ type StreamTextOnStepFinishCallback<TOOLS extends ToolSet> = (stepResult: StepResult<TOOLS>) => PromiseLike<void> | void;
2304
+ /**
2305
+ Callback that is set using the `onChunk` option.
2306
+
2307
+ @param event - The event that is passed to the callback.
2308
+ */
2309
+ type StreamTextOnChunkCallback<TOOLS extends ToolSet> = (event: {
2310
+ chunk: Extract<TextStreamPart<TOOLS>, {
2311
+ type: 'text-delta' | 'reasoning-delta' | 'source' | 'tool-call' | 'tool-input-start' | 'tool-input-delta' | 'tool-result' | 'raw';
2312
+ }>;
2313
+ }) => PromiseLike<void> | void;
2314
+ /**
2315
+ Callback that is set using the `onFinish` option.
2316
+
2317
+ @param event - The event that is passed to the callback.
2318
+ */
2319
+ type StreamTextOnFinishCallback<TOOLS extends ToolSet> = (event: StepResult<TOOLS> & {
2146
2320
  /**
2147
- * Indicates whether the response message is a continuation of the last original message,
2148
- * or if a new message was created.
2321
+ * Details for all steps.
2149
2322
  */
2150
- isContinuation: boolean;
2323
+ readonly steps: StepResult<TOOLS>[];
2151
2324
  /**
2152
- * Indicates whether the stream was aborted.
2325
+ * Total usage for all steps. This is the sum of the usage of all steps.
2153
2326
  */
2154
- isAborted: boolean;
2327
+ readonly totalUsage: LanguageModelUsage;
2155
2328
  /**
2156
- * The message that was sent to the client as a response
2157
- * (including the original message if it was extended).
2329
+ * Context that is passed into tool execution.
2330
+ *
2331
+ * Experimental (can break in patch releases).
2332
+ *
2333
+ * @default undefined
2158
2334
  */
2159
- responseMessage: UI_MESSAGE;
2335
+ experimental_context: unknown;
2336
+ }) => PromiseLike<void> | void;
2337
+ /**
2338
+ Callback that is set using the `onAbort` option.
2339
+
2340
+ @param event - The event that is passed to the callback.
2341
+ */
2342
+ type StreamTextOnAbortCallback<TOOLS extends ToolSet> = (event: {
2160
2343
  /**
2161
- * The reason why the generation finished.
2344
+ Details for all previously finished steps.
2162
2345
  */
2163
- finishReason?: FinishReason;
2346
+ readonly steps: StepResult<TOOLS>[];
2164
2347
  }) => PromiseLike<void> | void;
2348
+ /**
2349
+ Generate a text and call tools for a given prompt using a language model.
2165
2350
 
2166
- type UIMessageStreamResponseInit = ResponseInit & {
2167
- consumeSseStream?: (options: {
2168
- stream: ReadableStream<string>;
2169
- }) => PromiseLike<void> | void;
2170
- };
2351
+ This function streams the output. If you do not want to stream the output, use `generateText` instead.
2171
2352
 
2172
- /**
2173
- * A type that combines AsyncIterable and ReadableStream.
2174
- * This allows a ReadableStream to be consumed using for-await-of syntax.
2175
- */
2176
- type AsyncIterableStream<T> = AsyncIterable<T> & ReadableStream<T>;
2353
+ @param model - The language model to use.
2354
+ @param tools - Tools that are accessible to and can be called by the model. The model needs to support calling tools.
2177
2355
 
2178
- type ErrorHandler = (error: unknown) => void;
2356
+ @param system - A system message that will be part of the prompt.
2357
+ @param prompt - A simple text prompt. You can either use `prompt` or `messages` but not both.
2358
+ @param messages - A list of messages. You can either use `prompt` or `messages` but not both.
2179
2359
 
2180
- type UIMessageStreamOptions<UI_MESSAGE extends UIMessage> = {
2181
- /**
2182
- * The original messages. If they are provided, persistence mode is assumed,
2183
- * and a message ID is provided for the response message.
2184
- */
2185
- originalMessages?: UI_MESSAGE[];
2186
- /**
2187
- * Generate a message ID for the response message.
2188
- *
2189
- * If not provided, no message ID will be set for the response message (unless
2190
- * the original messages are provided and the last message is an assistant message).
2191
- */
2192
- generateMessageId?: IdGenerator;
2193
- onFinish?: UIMessageStreamOnFinishCallback<UI_MESSAGE>;
2360
+ @param maxOutputTokens - Maximum number of tokens to generate.
2361
+ @param temperature - Temperature setting.
2362
+ The value is passed through to the provider. The range depends on the provider and model.
2363
+ It is recommended to set either `temperature` or `topP`, but not both.
2364
+ @param topP - Nucleus sampling.
2365
+ The value is passed through to the provider. The range depends on the provider and model.
2366
+ It is recommended to set either `temperature` or `topP`, but not both.
2367
+ @param topK - Only sample from the top K options for each subsequent token.
2368
+ Used to remove "long tail" low probability responses.
2369
+ Recommended for advanced use cases only. You usually only need to use temperature.
2370
+ @param presencePenalty - Presence penalty setting.
2371
+ It affects the likelihood of the model to repeat information that is already in the prompt.
2372
+ The value is passed through to the provider. The range depends on the provider and model.
2373
+ @param frequencyPenalty - Frequency penalty setting.
2374
+ It affects the likelihood of the model to repeatedly use the same words or phrases.
2375
+ The value is passed through to the provider. The range depends on the provider and model.
2376
+ @param stopSequences - Stop sequences.
2377
+ If set, the model will stop generating text when one of the stop sequences is generated.
2378
+ @param seed - The seed (integer) to use for random sampling.
2379
+ If set and supported by the model, calls will generate deterministic results.
2380
+
2381
+ @param maxRetries - Maximum number of retries. Set to 0 to disable retries. Default: 2.
2382
+ @param abortSignal - An optional abort signal that can be used to cancel the call.
2383
+ @param timeout - An optional timeout in milliseconds. The call will be aborted if it takes longer than the specified timeout.
2384
+ @param headers - Additional HTTP headers to be sent with the request. Only applicable for HTTP-based providers.
2385
+
2386
+ @param onChunk - Callback that is called for each chunk of the stream. The stream processing will pause until the callback promise is resolved.
2387
+ @param onError - Callback that is called when an error occurs during streaming. You can use it to log errors.
2388
+ @param onStepFinish - Callback that is called when each step (LLM call) is finished, including intermediate steps.
2389
+ @param onFinish - Callback that is called when all steps are finished and the response is complete.
2390
+
2391
+ @return
2392
+ A result object for accessing different stream types and additional information.
2393
+ */
2394
+ declare function streamText<TOOLS extends ToolSet, OUTPUT extends Output = Output<string, string, never>>({ model, tools, toolChoice, system, prompt, messages, maxRetries, abortSignal, timeout, headers, stopWhen, experimental_output, output, experimental_telemetry: telemetry, prepareStep, providerOptions, experimental_activeTools, activeTools, experimental_repairToolCall: repairToolCall, experimental_transform: transform, experimental_download: download, includeRawChunks, onChunk, onError, onFinish, onAbort, onStepFinish, experimental_context, _internal: { now, generateId }, ...settings }: CallSettings & Prompt & {
2194
2395
  /**
2195
- * Extracts message metadata that will be send to the client.
2196
- *
2197
- * Called on `start` and `finish` events.
2396
+ The language model to use.
2198
2397
  */
2199
- messageMetadata?: (options: {
2200
- part: TextStreamPart<ToolSet>;
2201
- }) => InferUIMessageMetadata<UI_MESSAGE> | undefined;
2398
+ model: LanguageModel;
2202
2399
  /**
2203
- * Send reasoning parts to the client.
2204
- * Default to true.
2205
- */
2206
- sendReasoning?: boolean;
2400
+ The tools that the model can call. The model needs to support calling tools.
2401
+ */
2402
+ tools?: TOOLS;
2207
2403
  /**
2208
- * Send source parts to the client.
2209
- * Default to false.
2404
+ The tool choice strategy. Default: 'auto'.
2210
2405
  */
2211
- sendSources?: boolean;
2406
+ toolChoice?: ToolChoice<TOOLS>;
2212
2407
  /**
2213
- * Send the finish event to the client.
2214
- * Set to false if you are using additional streamText calls
2215
- * that send additional data.
2216
- * Default to true.
2408
+ Condition for stopping the generation when there are tool results in the last step.
2409
+ When the condition is an array, any of the conditions can be met to stop the generation.
2410
+
2411
+ @default stepCountIs(1)
2217
2412
  */
2218
- sendFinish?: boolean;
2413
+ stopWhen?: StopCondition<NoInfer<TOOLS>> | Array<StopCondition<NoInfer<TOOLS>>>;
2219
2414
  /**
2220
- * Send the message start event to the client.
2221
- * Set to false if you are using additional streamText calls
2222
- * and the message start event has already been sent.
2223
- * Default to true.
2415
+ Optional telemetry configuration (experimental).
2224
2416
  */
2225
- sendStart?: boolean;
2417
+ experimental_telemetry?: TelemetrySettings;
2226
2418
  /**
2227
- * Process an error, e.g. to log it. Default to `() => 'An error occurred.'`.
2228
- *
2229
- * @return error message to include in the data stream.
2230
- */
2231
- onError?: (error: unknown) => string;
2232
- };
2233
- type ConsumeStreamOptions = {
2234
- onError?: ErrorHandler;
2235
- };
2236
- /**
2237
- A result object for accessing different stream types and additional information.
2419
+ Additional provider-specific options. They are passed through
2420
+ to the provider from the AI SDK and enable provider-specific
2421
+ functionality that can be fully encapsulated in the provider.
2238
2422
  */
2239
- interface StreamTextResult<TOOLS extends ToolSet, OUTPUT extends Output> {
2423
+ providerOptions?: ProviderOptions;
2240
2424
  /**
2241
- The content that was generated in the last step.
2242
-
2243
- Automatically consumes the stream.
2425
+ * @deprecated Use `activeTools` instead.
2244
2426
  */
2245
- readonly content: PromiseLike<Array<ContentPart<TOOLS>>>;
2427
+ experimental_activeTools?: Array<keyof NoInfer<TOOLS>>;
2246
2428
  /**
2247
- The full text that has been generated by the last step.
2248
-
2249
- Automatically consumes the stream.
2250
- */
2251
- readonly text: PromiseLike<string>;
2429
+ Limits the tools that are available for the model to call without
2430
+ changing the tool call and result types in the result.
2431
+ */
2432
+ activeTools?: Array<keyof NoInfer<TOOLS>>;
2252
2433
  /**
2253
- The full reasoning that the model has generated.
2254
-
2255
- Automatically consumes the stream.
2434
+ Optional specification for parsing structured outputs from the LLM response.
2256
2435
  */
2257
- readonly reasoning: PromiseLike<Array<ReasoningOutput>>;
2436
+ output?: OUTPUT;
2258
2437
  /**
2259
- The reasoning that has been generated by the last step.
2260
-
2261
- Automatically consumes the stream.
2262
- */
2263
- readonly reasoningText: PromiseLike<string | undefined>;
2438
+ Optional specification for parsing structured outputs from the LLM response.
2439
+
2440
+ @deprecated Use `output` instead.
2441
+ */
2442
+ experimental_output?: OUTPUT;
2264
2443
  /**
2265
- Files that have been generated by the model in the last step.
2266
-
2267
- Automatically consumes the stream.
2268
- */
2269
- readonly files: PromiseLike<GeneratedFile[]>;
2444
+ Optional function that you can use to provide different settings for a step.
2445
+
2446
+ @param options - The options for the step.
2447
+ @param options.steps - The steps that have been executed so far.
2448
+ @param options.stepNumber - The number of the step that is being executed.
2449
+ @param options.model - The model that is being used.
2450
+
2451
+ @returns An object that contains the settings for the step.
2452
+ If you return undefined (or for undefined settings), the settings from the outer level will be used.
2453
+ */
2454
+ prepareStep?: PrepareStepFunction<NoInfer<TOOLS>>;
2270
2455
  /**
2271
- Sources that have been used as references in the last step.
2272
-
2273
- Automatically consumes the stream.
2456
+ A function that attempts to repair a tool call that failed to parse.
2274
2457
  */
2275
- readonly sources: PromiseLike<Source[]>;
2276
- /**
2277
- The tool calls that have been executed in the last step.
2278
-
2279
- Automatically consumes the stream.
2280
- */
2281
- readonly toolCalls: PromiseLike<TypedToolCall<TOOLS>[]>;
2282
- /**
2283
- The static tool calls that have been executed in the last step.
2284
-
2285
- Automatically consumes the stream.
2286
- */
2287
- readonly staticToolCalls: PromiseLike<StaticToolCall<TOOLS>[]>;
2288
- /**
2289
- The dynamic tool calls that have been executed in the last step.
2290
-
2291
- Automatically consumes the stream.
2292
- */
2293
- readonly dynamicToolCalls: PromiseLike<DynamicToolCall[]>;
2294
- /**
2295
- The static tool results that have been generated in the last step.
2296
-
2297
- Automatically consumes the stream.
2298
- */
2299
- readonly staticToolResults: PromiseLike<StaticToolResult<TOOLS>[]>;
2300
- /**
2301
- The dynamic tool results that have been generated in the last step.
2302
-
2303
- Automatically consumes the stream.
2304
- */
2305
- readonly dynamicToolResults: PromiseLike<DynamicToolResult[]>;
2458
+ experimental_repairToolCall?: ToolCallRepairFunction<TOOLS>;
2306
2459
  /**
2307
- The tool results that have been generated in the last step.
2308
-
2309
- Automatically consumes the stream.
2460
+ Optional stream transformations.
2461
+ They are applied in the order they are provided.
2462
+ The stream transformations must maintain the stream structure for streamText to work correctly.
2310
2463
  */
2311
- readonly toolResults: PromiseLike<TypedToolResult<TOOLS>[]>;
2464
+ experimental_transform?: StreamTextTransform<TOOLS> | Array<StreamTextTransform<TOOLS>>;
2312
2465
  /**
2313
- * The unified finish reason why the generation finished. Taken from the last step.
2314
- *
2315
- * Automatically consumes the stream.
2466
+ Custom download function to use for URLs.
2467
+
2468
+ By default, files are downloaded if the model does not support the URL for the given media type.
2316
2469
  */
2317
- readonly finishReason: PromiseLike<FinishReason>;
2470
+ experimental_download?: DownloadFunction | undefined;
2318
2471
  /**
2319
- * The raw reason why the generation finished (from the provider). Taken from the last step.
2320
- *
2321
- * Automatically consumes the stream.
2472
+ Whether to include raw chunks from the provider in the stream.
2473
+ When enabled, you will receive raw chunks with type 'raw' that contain the unprocessed data from the provider.
2474
+ This allows access to cutting-edge provider features not yet wrapped by the AI SDK.
2475
+ Defaults to false.
2322
2476
  */
2323
- readonly rawFinishReason: PromiseLike<string | undefined>;
2477
+ includeRawChunks?: boolean;
2324
2478
  /**
2325
- The token usage of the last step.
2326
-
2327
- Automatically consumes the stream.
2479
+ Callback that is called for each chunk of the stream.
2480
+ The stream processing will pause until the callback promise is resolved.
2328
2481
  */
2329
- readonly usage: PromiseLike<LanguageModelUsage>;
2330
- /**
2331
- The total token usage of the generated response.
2332
- When there are multiple steps, the usage is the sum of all step usages.
2333
-
2334
- Automatically consumes the stream.
2335
- */
2336
- readonly totalUsage: PromiseLike<LanguageModelUsage>;
2337
- /**
2338
- Warnings from the model provider (e.g. unsupported settings) for the first step.
2339
-
2340
- Automatically consumes the stream.
2341
- */
2342
- readonly warnings: PromiseLike<CallWarning[] | undefined>;
2482
+ onChunk?: StreamTextOnChunkCallback<TOOLS>;
2343
2483
  /**
2344
- Details for all steps.
2345
- You can use this to get information about intermediate steps,
2346
- such as the tool calls or the response headers.
2347
-
2348
- Automatically consumes the stream.
2484
+ Callback that is invoked when an error occurs during streaming.
2485
+ You can use it to log errors.
2486
+ The stream processing will pause until the callback promise is resolved.
2349
2487
  */
2350
- readonly steps: PromiseLike<Array<StepResult<TOOLS>>>;
2351
- /**
2352
- Additional request information from the last step.
2353
-
2354
- Automatically consumes the stream.
2355
- */
2356
- readonly request: PromiseLike<LanguageModelRequestMetadata>;
2357
- /**
2358
- Additional response information from the last step.
2359
-
2360
- Automatically consumes the stream.
2361
- */
2362
- readonly response: PromiseLike<LanguageModelResponseMetadata & {
2363
- /**
2364
- The response messages that were generated during the call. It consists of an assistant message,
2365
- potentially containing tool calls.
2366
-
2367
- When there are tool results, there is an additional tool message with the tool results that are available.
2368
- If there are tools that do not have execute functions, they are not included in the tool results and
2369
- need to be added separately.
2370
- */
2371
- messages: Array<ResponseMessage>;
2372
- }>;
2488
+ onError?: StreamTextOnErrorCallback;
2373
2489
  /**
2374
- Additional provider-specific metadata from the last step.
2375
- Metadata is passed through from the provider to the AI SDK and
2376
- enables provider-specific results that can be fully encapsulated in the provider.
2490
+ Callback that is called when the LLM response and all request tool executions
2491
+ (for tools that have an `execute` function) are finished.
2492
+
2493
+ The usage is the combined usage of all steps.
2377
2494
  */
2378
- readonly providerMetadata: PromiseLike<ProviderMetadata | undefined>;
2379
- /**
2380
- A text stream that returns only the generated text deltas. You can use it
2381
- as either an AsyncIterable or a ReadableStream. When an error occurs, the
2382
- stream will throw the error.
2383
- */
2384
- readonly textStream: AsyncIterableStream<string>;
2495
+ onFinish?: StreamTextOnFinishCallback<TOOLS>;
2496
+ onAbort?: StreamTextOnAbortCallback<TOOLS>;
2385
2497
  /**
2386
- A stream with all events, including text deltas, tool calls, tool results, and
2387
- errors.
2388
- You can use it as either an AsyncIterable or a ReadableStream.
2389
- Only errors that stop the stream, such as network errors, are thrown.
2390
- */
2391
- readonly fullStream: AsyncIterableStream<TextStreamPart<TOOLS>>;
2498
+ Callback that is called when each step (LLM call) is finished, including intermediate steps.
2499
+ */
2500
+ onStepFinish?: StreamTextOnStepFinishCallback<TOOLS>;
2392
2501
  /**
2393
- * A stream of partial outputs. It uses the `output` specification.
2502
+ * Context that is passed into tool execution.
2394
2503
  *
2395
- * @deprecated Use `partialOutputStream` instead.
2504
+ * Experimental (can break in patch releases).
2505
+ *
2506
+ * @default undefined
2396
2507
  */
2397
- readonly experimental_partialOutputStream: AsyncIterableStream<InferPartialOutput<OUTPUT>>;
2508
+ experimental_context?: unknown;
2398
2509
  /**
2399
- * A stream of partial parsed outputs. It uses the `output` specification.
2510
+ Internal. For test use only. May change without notice.
2400
2511
  */
2401
- readonly partialOutputStream: AsyncIterableStream<InferPartialOutput<OUTPUT>>;
2512
+ _internal?: {
2513
+ now?: () => number;
2514
+ generateId?: IdGenerator;
2515
+ };
2516
+ }): StreamTextResult<TOOLS, OUTPUT>;
2517
+ type EnrichedStreamPart<TOOLS extends ToolSet, PARTIAL_OUTPUT> = {
2518
+ part: TextStreamPart<TOOLS>;
2519
+ partialOutput: PARTIAL_OUTPUT | undefined;
2520
+ };
2521
+
2522
+ interface Output<OUTPUT = any, PARTIAL = any, ELEMENT = any> {
2402
2523
  /**
2403
- * The complete parsed output. It uses the `output` specification.
2524
+ * The name of the output mode.
2404
2525
  */
2405
- readonly output: PromiseLike<InferCompleteOutput<OUTPUT>>;
2406
- /**
2407
- Consumes the stream without processing the parts.
2408
- This is useful to force the stream to finish.
2409
- It effectively removes the backpressure and allows the stream to finish,
2410
- triggering the `onFinish` callback and the promise resolution.
2411
-
2412
- If an error occurs, it is passed to the optional `onError` callback.
2413
- */
2414
- consumeStream(options?: ConsumeStreamOptions): PromiseLike<void>;
2526
+ name: string;
2415
2527
  /**
2416
- Converts the result to a UI message stream.
2417
-
2418
- @return A UI message stream.
2419
- */
2420
- toUIMessageStream<UI_MESSAGE extends UIMessage>(options?: UIMessageStreamOptions<UI_MESSAGE>): AsyncIterableStream<InferUIMessageChunk<UI_MESSAGE>>;
2421
- /**
2422
- *Writes UI message stream output to a Node.js response-like object.
2528
+ * The response format to use for the model.
2423
2529
  */
2424
- pipeUIMessageStreamToResponse<UI_MESSAGE extends UIMessage>(response: ServerResponse, options?: UIMessageStreamResponseInit & UIMessageStreamOptions<UI_MESSAGE>): void;
2530
+ responseFormat: PromiseLike<LanguageModelV3CallOptions['responseFormat']>;
2425
2531
  /**
2426
- Writes text delta output to a Node.js response-like object.
2427
- It sets a `Content-Type` header to `text/plain; charset=utf-8` and
2428
- writes each text delta as a separate chunk.
2429
-
2430
- @param response A Node.js response-like object (ServerResponse).
2431
- @param init Optional headers, status code, and status text.
2432
- */
2433
- pipeTextStreamToResponse(response: ServerResponse, init?: ResponseInit): void;
2532
+ * Parses the complete output of the model.
2533
+ */
2534
+ parseCompleteOutput(options: {
2535
+ text: string;
2536
+ }, context: {
2537
+ response: LanguageModelResponseMetadata;
2538
+ usage: LanguageModelUsage;
2539
+ finishReason: FinishReason;
2540
+ }): Promise<OUTPUT>;
2434
2541
  /**
2435
- Converts the result to a streamed response object with a stream data part stream.
2436
-
2437
- @return A response object.
2438
- */
2439
- toUIMessageStreamResponse<UI_MESSAGE extends UIMessage>(options?: UIMessageStreamResponseInit & UIMessageStreamOptions<UI_MESSAGE>): Response;
2542
+ * Parses the partial output of the model.
2543
+ */
2544
+ parsePartialOutput(options: {
2545
+ text: string;
2546
+ }): Promise<{
2547
+ partial: PARTIAL;
2548
+ } | undefined>;
2440
2549
  /**
2441
- Creates a simple text stream response.
2442
- Each text delta is encoded as UTF-8 and sent as a separate chunk.
2443
- Non-text-delta events are ignored.
2444
- @param init Optional headers, status code, and status text.
2445
- */
2446
- toTextStreamResponse(init?: ResponseInit): Response;
2550
+ * Creates a stream transform that emits individual elements as they complete.
2551
+ */
2552
+ createElementStreamTransform(): TransformStream<EnrichedStreamPart<any, PARTIAL>, ELEMENT> | undefined;
2447
2553
  }
2448
- type TextStreamPart<TOOLS extends ToolSet> = {
2449
- type: 'text-start';
2450
- id: string;
2451
- providerMetadata?: ProviderMetadata;
2452
- } | {
2453
- type: 'text-end';
2454
- id: string;
2455
- providerMetadata?: ProviderMetadata;
2456
- } | {
2457
- type: 'text-delta';
2458
- id: string;
2459
- providerMetadata?: ProviderMetadata;
2460
- text: string;
2461
- } | {
2462
- type: 'reasoning-start';
2463
- id: string;
2464
- providerMetadata?: ProviderMetadata;
2465
- } | {
2466
- type: 'reasoning-end';
2467
- id: string;
2468
- providerMetadata?: ProviderMetadata;
2469
- } | {
2470
- type: 'reasoning-delta';
2471
- providerMetadata?: ProviderMetadata;
2472
- id: string;
2473
- text: string;
2474
- } | {
2475
- type: 'tool-input-start';
2476
- id: string;
2477
- toolName: string;
2478
- providerMetadata?: ProviderMetadata;
2479
- providerExecuted?: boolean;
2480
- dynamic?: boolean;
2481
- title?: string;
2482
- } | {
2483
- type: 'tool-input-end';
2484
- id: string;
2485
- providerMetadata?: ProviderMetadata;
2486
- } | {
2487
- type: 'tool-input-delta';
2488
- id: string;
2489
- delta: string;
2490
- providerMetadata?: ProviderMetadata;
2491
- } | ({
2492
- type: 'source';
2493
- } & Source) | {
2494
- type: 'file';
2495
- file: GeneratedFile;
2496
- } | ({
2497
- type: 'tool-call';
2498
- } & TypedToolCall<TOOLS>) | ({
2499
- type: 'tool-result';
2500
- } & TypedToolResult<TOOLS>) | ({
2501
- type: 'tool-error';
2502
- } & TypedToolError<TOOLS>) | ({
2503
- type: 'tool-output-denied';
2504
- } & StaticToolOutputDenied<TOOLS>) | ToolApprovalRequestOutput<TOOLS> | {
2505
- type: 'start-step';
2506
- request: LanguageModelRequestMetadata;
2507
- warnings: CallWarning[];
2508
- } | {
2509
- type: 'finish-step';
2510
- response: LanguageModelResponseMetadata;
2511
- usage: LanguageModelUsage;
2512
- finishReason: FinishReason;
2513
- rawFinishReason: string | undefined;
2514
- providerMetadata: ProviderMetadata | undefined;
2515
- } | {
2516
- type: 'start';
2517
- } | {
2518
- type: 'finish';
2519
- finishReason: FinishReason;
2520
- rawFinishReason: string | undefined;
2521
- totalUsage: LanguageModelUsage;
2522
- } | {
2523
- type: 'abort';
2524
- reason?: string;
2525
- } | {
2526
- type: 'error';
2527
- error: unknown;
2528
- } | {
2529
- type: 'raw';
2530
- rawValue: unknown;
2531
- };
2532
-
2533
- /**
2534
- A transformation that is applied to the stream.
2535
-
2536
- @param stopStream - A function that stops the source stream.
2537
- @param tools - The tools that are accessible to and can be called by the model. The model needs to support calling tools.
2538
- */
2539
- type StreamTextTransform<TOOLS extends ToolSet> = (options: {
2540
- tools: TOOLS;
2541
- stopStream: () => void;
2542
- }) => TransformStream<TextStreamPart<TOOLS>, TextStreamPart<TOOLS>>;
2543
- /**
2544
- Callback that is set using the `onError` option.
2545
-
2546
- @param event - The event that is passed to the callback.
2547
- */
2548
- type StreamTextOnErrorCallback = (event: {
2549
- error: unknown;
2550
- }) => PromiseLike<void> | void;
2551
2554
  /**
2552
- Callback that is set using the `onStepFinish` option.
2553
-
2554
- @param stepResult - The result of the step.
2555
+ * Output specification for text generation.
2556
+ * This is the default output mode that generates plain text.
2557
+ *
2558
+ * @returns An output specification for generating text.
2555
2559
  */
2556
- type StreamTextOnStepFinishCallback<TOOLS extends ToolSet> = (stepResult: StepResult<TOOLS>) => PromiseLike<void> | void;
2560
+ declare const text: () => Output<string, string, never>;
2557
2561
  /**
2558
- Callback that is set using the `onChunk` option.
2559
-
2560
- @param event - The event that is passed to the callback.
2562
+ * Output specification for typed object generation using schemas.
2563
+ * When the model generates a text response, it will return an object that matches the schema.
2564
+ *
2565
+ * @param schema - The schema of the object to generate.
2566
+ * @param name - Optional name of the output that should be generated. Used by some providers for additional LLM guidance, e.g. via tool or schema name.
2567
+ * @param description - Optional description of the output that should be generated. Used by some providers for additional LLM guidance, e.g. via tool or schema description.
2568
+ *
2569
+ * @returns An output specification for generating objects with the specified schema.
2561
2570
  */
2562
- type StreamTextOnChunkCallback<TOOLS extends ToolSet> = (event: {
2563
- chunk: Extract<TextStreamPart<TOOLS>, {
2564
- type: 'text-delta' | 'reasoning-delta' | 'source' | 'tool-call' | 'tool-input-start' | 'tool-input-delta' | 'tool-result' | 'raw';
2565
- }>;
2566
- }) => PromiseLike<void> | void;
2571
+ declare const object: <OBJECT>({ schema: inputSchema, name, description, }: {
2572
+ schema: FlexibleSchema<OBJECT>;
2573
+ /**
2574
+ * Optional name of the output that should be generated.
2575
+ * Used by some providers for additional LLM guidance, e.g. via tool or schema name.
2576
+ */
2577
+ name?: string;
2578
+ /**
2579
+ * Optional description of the output that should be generated.
2580
+ * Used by some providers for additional LLM guidance, e.g. via tool or schema description.
2581
+ */
2582
+ description?: string;
2583
+ }) => Output<OBJECT, DeepPartial<OBJECT>, never>;
2567
2584
  /**
2568
- Callback that is set using the `onFinish` option.
2569
-
2570
- @param event - The event that is passed to the callback.
2585
+ * Output specification for array generation.
2586
+ * When the model generates a text response, it will return an array of elements.
2587
+ *
2588
+ * @param element - The schema of the array elements to generate.
2589
+ * @param name - Optional name of the output that should be generated. Used by some providers for additional LLM guidance, e.g. via tool or schema name.
2590
+ * @param description - Optional description of the output that should be generated. Used by some providers for additional LLM guidance, e.g. via tool or schema description.
2591
+ *
2592
+ * @returns An output specification for generating an array of elements.
2571
2593
  */
2572
- type StreamTextOnFinishCallback<TOOLS extends ToolSet> = (event: StepResult<TOOLS> & {
2594
+ declare const array: <ELEMENT>({ element: inputElementSchema, name, description, }: {
2595
+ element: FlexibleSchema<ELEMENT>;
2573
2596
  /**
2574
- * Details for all steps.
2597
+ * Optional name of the output that should be generated.
2598
+ * Used by some providers for additional LLM guidance, e.g. via tool or schema name.
2575
2599
  */
2576
- readonly steps: StepResult<TOOLS>[];
2600
+ name?: string;
2577
2601
  /**
2578
- * Total usage for all steps. This is the sum of the usage of all steps.
2602
+ * Optional description of the output that should be generated.
2603
+ * Used by some providers for additional LLM guidance, e.g. via tool or schema description.
2579
2604
  */
2580
- readonly totalUsage: LanguageModelUsage;
2605
+ description?: string;
2606
+ }) => Output<Array<ELEMENT>, Array<ELEMENT>, ELEMENT>;
2607
+ /**
2608
+ * Output specification for choice generation.
2609
+ * When the model generates a text response, it will return a one of the choice options.
2610
+ *
2611
+ * @param options - The available choices.
2612
+ * @param name - Optional name of the output that should be generated. Used by some providers for additional LLM guidance, e.g. via tool or schema name.
2613
+ * @param description - Optional description of the output that should be generated. Used by some providers for additional LLM guidance, e.g. via tool or schema description.
2614
+ *
2615
+ * @returns An output specification for generating a choice.
2616
+ */
2617
+ declare const choice: <CHOICE extends string>({ options: choiceOptions, name, description, }: {
2618
+ options: Array<CHOICE>;
2581
2619
  /**
2582
- * Context that is passed into tool execution.
2583
- *
2584
- * Experimental (can break in patch releases).
2585
- *
2586
- * @default undefined
2620
+ * Optional name of the output that should be generated.
2621
+ * Used by some providers for additional LLM guidance, e.g. via tool or schema name.
2622
+ */
2623
+ name?: string;
2624
+ /**
2625
+ * Optional description of the output that should be generated.
2626
+ * Used by some providers for additional LLM guidance, e.g. via tool or schema description.
2587
2627
  */
2588
- experimental_context: unknown;
2589
- }) => PromiseLike<void> | void;
2628
+ description?: string;
2629
+ }) => Output<CHOICE, CHOICE, never>;
2590
2630
  /**
2591
- Callback that is set using the `onAbort` option.
2592
-
2593
- @param event - The event that is passed to the callback.
2631
+ * Output specification for unstructured JSON generation.
2632
+ * When the model generates a text response, it will return a JSON object.
2633
+ *
2634
+ * @param name - Optional name of the output that should be generated. Used by some providers for additional LLM guidance, e.g. via tool or schema name.
2635
+ * @param description - Optional description of the output that should be generated. Used by some providers for additional LLM guidance, e.g. via tool or schema description.
2636
+ *
2637
+ * @returns An output specification for generating JSON.
2594
2638
  */
2595
- type StreamTextOnAbortCallback<TOOLS extends ToolSet> = (event: {
2639
+ declare const json: ({ name, description, }?: {
2596
2640
  /**
2597
- Details for all previously finished steps.
2641
+ * Optional name of the output that should be generated.
2642
+ * Used by some providers for additional LLM guidance, e.g. via tool or schema name.
2598
2643
  */
2599
- readonly steps: StepResult<TOOLS>[];
2600
- }) => PromiseLike<void> | void;
2601
- /**
2602
- Generate a text and call tools for a given prompt using a language model.
2603
-
2604
- This function streams the output. If you do not want to stream the output, use `generateText` instead.
2605
-
2606
- @param model - The language model to use.
2607
- @param tools - Tools that are accessible to and can be called by the model. The model needs to support calling tools.
2608
-
2609
- @param system - A system message that will be part of the prompt.
2610
- @param prompt - A simple text prompt. You can either use `prompt` or `messages` but not both.
2611
- @param messages - A list of messages. You can either use `prompt` or `messages` but not both.
2612
-
2613
- @param maxOutputTokens - Maximum number of tokens to generate.
2614
- @param temperature - Temperature setting.
2615
- The value is passed through to the provider. The range depends on the provider and model.
2616
- It is recommended to set either `temperature` or `topP`, but not both.
2617
- @param topP - Nucleus sampling.
2618
- The value is passed through to the provider. The range depends on the provider and model.
2619
- It is recommended to set either `temperature` or `topP`, but not both.
2620
- @param topK - Only sample from the top K options for each subsequent token.
2621
- Used to remove "long tail" low probability responses.
2622
- Recommended for advanced use cases only. You usually only need to use temperature.
2623
- @param presencePenalty - Presence penalty setting.
2624
- It affects the likelihood of the model to repeat information that is already in the prompt.
2625
- The value is passed through to the provider. The range depends on the provider and model.
2626
- @param frequencyPenalty - Frequency penalty setting.
2627
- It affects the likelihood of the model to repeatedly use the same words or phrases.
2628
- The value is passed through to the provider. The range depends on the provider and model.
2629
- @param stopSequences - Stop sequences.
2630
- If set, the model will stop generating text when one of the stop sequences is generated.
2631
- @param seed - The seed (integer) to use for random sampling.
2632
- If set and supported by the model, calls will generate deterministic results.
2633
-
2634
- @param maxRetries - Maximum number of retries. Set to 0 to disable retries. Default: 2.
2635
- @param abortSignal - An optional abort signal that can be used to cancel the call.
2636
- @param timeout - An optional timeout in milliseconds. The call will be aborted if it takes longer than the specified timeout.
2637
- @param headers - Additional HTTP headers to be sent with the request. Only applicable for HTTP-based providers.
2644
+ name?: string;
2645
+ /**
2646
+ * Optional description of the output that should be generated.
2647
+ * Used by some providers for additional LLM guidance, e.g. via tool or schema description.
2648
+ */
2649
+ description?: string;
2650
+ }) => Output<JSONValue$1, JSONValue$1, never>;
2638
2651
 
2639
- @param onChunk - Callback that is called for each chunk of the stream. The stream processing will pause until the callback promise is resolved.
2640
- @param onError - Callback that is called when an error occurs during streaming. You can use it to log errors.
2641
- @param onStepFinish - Callback that is called when each step (LLM call) is finished, including intermediate steps.
2642
- @param onFinish - Callback that is called when all steps are finished and the response is complete.
2652
+ type output_Output<OUTPUT = any, PARTIAL = any, ELEMENT = any> = Output<OUTPUT, PARTIAL, ELEMENT>;
2653
+ declare const output_array: typeof array;
2654
+ declare const output_choice: typeof choice;
2655
+ declare const output_json: typeof json;
2656
+ declare const output_object: typeof object;
2657
+ declare const output_text: typeof text;
2658
+ declare namespace output {
2659
+ export {
2660
+ output_Output as Output,
2661
+ output_array as array,
2662
+ output_choice as choice,
2663
+ output_json as json,
2664
+ output_object as object,
2665
+ output_text as text,
2666
+ };
2667
+ }
2643
2668
 
2644
- @return
2645
- A result object for accessing different stream types and additional information.
2669
+ /**
2670
+ The result of a `generateText` call.
2671
+ It contains the generated text, the tool calls that were made during the generation, and the results of the tool calls.
2646
2672
  */
2647
- declare function streamText<TOOLS extends ToolSet, OUTPUT extends Output = Output<string, string>>({ model, tools, toolChoice, system, prompt, messages, maxRetries, abortSignal, timeout, headers, stopWhen, experimental_output, output, experimental_telemetry: telemetry, prepareStep, providerOptions, experimental_activeTools, activeTools, experimental_repairToolCall: repairToolCall, experimental_transform: transform, experimental_download: download, includeRawChunks, onChunk, onError, onFinish, onAbort, onStepFinish, experimental_context, _internal: { now, generateId }, ...settings }: CallSettings & Prompt & {
2673
+ interface GenerateTextResult<TOOLS extends ToolSet, OUTPUT extends Output> {
2648
2674
  /**
2649
- The language model to use.
2675
+ The content that was generated in the last step.
2650
2676
  */
2651
- model: LanguageModel;
2677
+ readonly content: Array<ContentPart<TOOLS>>;
2652
2678
  /**
2653
- The tools that the model can call. The model needs to support calling tools.
2654
- */
2655
- tools?: TOOLS;
2679
+ The text that was generated in the last step.
2680
+ */
2681
+ readonly text: string;
2656
2682
  /**
2657
- The tool choice strategy. Default: 'auto'.
2683
+ The full reasoning that the model has generated in the last step.
2658
2684
  */
2659
- toolChoice?: ToolChoice<TOOLS>;
2685
+ readonly reasoning: Array<ReasoningOutput>;
2660
2686
  /**
2661
- Condition for stopping the generation when there are tool results in the last step.
2662
- When the condition is an array, any of the conditions can be met to stop the generation.
2663
-
2664
- @default stepCountIs(1)
2687
+ The reasoning text that the model has generated in the last step. Can be undefined if the model
2688
+ has only generated text.
2665
2689
  */
2666
- stopWhen?: StopCondition<NoInfer<TOOLS>> | Array<StopCondition<NoInfer<TOOLS>>>;
2690
+ readonly reasoningText: string | undefined;
2667
2691
  /**
2668
- Optional telemetry configuration (experimental).
2692
+ The files that were generated in the last step.
2693
+ Empty array if no files were generated.
2694
+ */
2695
+ readonly files: Array<GeneratedFile>;
2696
+ /**
2697
+ Sources that have been used as references in the last step.
2669
2698
  */
2670
- experimental_telemetry?: TelemetrySettings;
2699
+ readonly sources: Array<Source>;
2671
2700
  /**
2672
- Additional provider-specific options. They are passed through
2673
- to the provider from the AI SDK and enable provider-specific
2674
- functionality that can be fully encapsulated in the provider.
2675
- */
2676
- providerOptions?: ProviderOptions;
2701
+ The tool calls that were made in the last step.
2702
+ */
2703
+ readonly toolCalls: Array<TypedToolCall<TOOLS>>;
2677
2704
  /**
2678
- * @deprecated Use `activeTools` instead.
2705
+ The static tool calls that were made in the last step.
2679
2706
  */
2680
- experimental_activeTools?: Array<keyof NoInfer<TOOLS>>;
2707
+ readonly staticToolCalls: Array<StaticToolCall<TOOLS>>;
2681
2708
  /**
2682
- Limits the tools that are available for the model to call without
2683
- changing the tool call and result types in the result.
2684
- */
2685
- activeTools?: Array<keyof NoInfer<TOOLS>>;
2709
+ The dynamic tool calls that were made in the last step.
2710
+ */
2711
+ readonly dynamicToolCalls: Array<DynamicToolCall>;
2686
2712
  /**
2687
- Optional specification for parsing structured outputs from the LLM response.
2713
+ The results of the tool calls from the last step.
2688
2714
  */
2689
- output?: OUTPUT;
2715
+ readonly toolResults: Array<TypedToolResult<TOOLS>>;
2690
2716
  /**
2691
- Optional specification for parsing structured outputs from the LLM response.
2692
-
2693
- @deprecated Use `output` instead.
2694
- */
2695
- experimental_output?: OUTPUT;
2717
+ The static tool results that were made in the last step.
2718
+ */
2719
+ readonly staticToolResults: Array<StaticToolResult<TOOLS>>;
2696
2720
  /**
2697
- Optional function that you can use to provide different settings for a step.
2698
-
2699
- @param options - The options for the step.
2700
- @param options.steps - The steps that have been executed so far.
2701
- @param options.stepNumber - The number of the step that is being executed.
2702
- @param options.model - The model that is being used.
2703
-
2704
- @returns An object that contains the settings for the step.
2705
- If you return undefined (or for undefined settings), the settings from the outer level will be used.
2706
- */
2707
- prepareStep?: PrepareStepFunction<NoInfer<TOOLS>>;
2721
+ The dynamic tool results that were made in the last step.
2722
+ */
2723
+ readonly dynamicToolResults: Array<DynamicToolResult>;
2708
2724
  /**
2709
- A function that attempts to repair a tool call that failed to parse.
2725
+ * The unified reason why the generation finished.
2710
2726
  */
2711
- experimental_repairToolCall?: ToolCallRepairFunction<TOOLS>;
2727
+ readonly finishReason: FinishReason;
2712
2728
  /**
2713
- Optional stream transformations.
2714
- They are applied in the order they are provided.
2715
- The stream transformations must maintain the stream structure for streamText to work correctly.
2729
+ * The raw reason why the generation finished (from the provider).
2716
2730
  */
2717
- experimental_transform?: StreamTextTransform<TOOLS> | Array<StreamTextTransform<TOOLS>>;
2731
+ readonly rawFinishReason: string | undefined;
2718
2732
  /**
2719
- Custom download function to use for URLs.
2720
-
2721
- By default, files are downloaded if the model does not support the URL for the given media type.
2733
+ The token usage of the last step.
2722
2734
  */
2723
- experimental_download?: DownloadFunction | undefined;
2735
+ readonly usage: LanguageModelUsage;
2724
2736
  /**
2725
- Whether to include raw chunks from the provider in the stream.
2726
- When enabled, you will receive raw chunks with type 'raw' that contain the unprocessed data from the provider.
2727
- This allows access to cutting-edge provider features not yet wrapped by the AI SDK.
2728
- Defaults to false.
2737
+ The total token usage of all steps.
2738
+ When there are multiple steps, the usage is the sum of all step usages.
2729
2739
  */
2730
- includeRawChunks?: boolean;
2740
+ readonly totalUsage: LanguageModelUsage;
2731
2741
  /**
2732
- Callback that is called for each chunk of the stream.
2733
- The stream processing will pause until the callback promise is resolved.
2742
+ Warnings from the model provider (e.g. unsupported settings)
2734
2743
  */
2735
- onChunk?: StreamTextOnChunkCallback<TOOLS>;
2744
+ readonly warnings: CallWarning[] | undefined;
2736
2745
  /**
2737
- Callback that is invoked when an error occurs during streaming.
2738
- You can use it to log errors.
2739
- The stream processing will pause until the callback promise is resolved.
2746
+ Additional request information.
2740
2747
  */
2741
- onError?: StreamTextOnErrorCallback;
2748
+ readonly request: LanguageModelRequestMetadata;
2742
2749
  /**
2743
- Callback that is called when the LLM response and all request tool executions
2744
- (for tools that have an `execute` function) are finished.
2745
-
2746
- The usage is the combined usage of all steps.
2750
+ Additional response information.
2747
2751
  */
2748
- onFinish?: StreamTextOnFinishCallback<TOOLS>;
2749
- onAbort?: StreamTextOnAbortCallback<TOOLS>;
2752
+ readonly response: LanguageModelResponseMetadata & {
2753
+ /**
2754
+ The response messages that were generated during the call. It consists of an assistant message,
2755
+ potentially containing tool calls.
2756
+
2757
+ When there are tool results, there is an additional tool message with the tool results that are available.
2758
+ If there are tools that do not have execute functions, they are not included in the tool results and
2759
+ need to be added separately.
2760
+ */
2761
+ messages: Array<ResponseMessage>;
2762
+ /**
2763
+ Response body (available only for providers that use HTTP requests).
2764
+ */
2765
+ body?: unknown;
2766
+ };
2750
2767
  /**
2751
- Callback that is called when each step (LLM call) is finished, including intermediate steps.
2752
- */
2753
- onStepFinish?: StreamTextOnStepFinishCallback<TOOLS>;
2768
+ Additional provider-specific metadata. They are passed through
2769
+ from the provider to the AI SDK and enable provider-specific
2770
+ results that can be fully encapsulated in the provider.
2771
+ */
2772
+ readonly providerMetadata: ProviderMetadata | undefined;
2754
2773
  /**
2755
- * Context that is passed into tool execution.
2756
- *
2757
- * Experimental (can break in patch releases).
2758
- *
2759
- * @default undefined
2774
+ Details for all steps.
2775
+ You can use this to get information about intermediate steps,
2776
+ such as the tool calls or the response headers.
2760
2777
  */
2761
- experimental_context?: unknown;
2778
+ readonly steps: Array<StepResult<TOOLS>>;
2762
2779
  /**
2763
- Internal. For test use only. May change without notice.
2780
+ The generated structured output. It uses the `output` specification.
2781
+
2782
+ @deprecated Use `output` instead.
2764
2783
  */
2765
- _internal?: {
2766
- now?: () => number;
2767
- generateId?: IdGenerator;
2768
- };
2769
- }): StreamTextResult<TOOLS, OUTPUT>;
2784
+ readonly experimental_output: InferCompleteOutput<OUTPUT>;
2785
+ /**
2786
+ The generated structured output. It uses the `output` specification.
2787
+
2788
+ */
2789
+ readonly output: InferCompleteOutput<OUTPUT>;
2790
+ }
2770
2791
 
2771
2792
  /**
2772
2793
  * Parameters for calling an agent.