ai 3.3.15 → 3.3.17

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.d.ts CHANGED
@@ -1,7 +1,7 @@
1
- import { Schema, DeepPartial, Attachment, JSONValue, CreateMessage, FunctionCall as FunctionCall$1, AssistantMessage, DataMessage } from '@ai-sdk/ui-utils';
1
+ import { Schema, DeepPartial, Attachment, JSONValue as JSONValue$1, CreateMessage, FunctionCall as FunctionCall$1, AssistantMessage, DataMessage } from '@ai-sdk/ui-utils';
2
2
  export { AssistantMessage, AssistantStatus, ChatRequest, ChatRequestOptions, CreateMessage, DataMessage, DeepPartial, Function, FunctionCall, FunctionCallHandler, IdGenerator, JSONValue, Message, RequestOptions, Schema, StreamPart, Tool, ToolCall, ToolCallHandler, ToolChoice, ToolInvocation, UseAssistantOptions, formatStreamPart, jsonSchema, parseComplexResponse, parseStreamPart, readDataStream } from '@ai-sdk/ui-utils';
3
- import { AttributeValue, Span } from '@opentelemetry/api';
4
- import { EmbeddingModelV1, EmbeddingModelV1Embedding, LanguageModelV1, LanguageModelV1FinishReason, LanguageModelV1LogProbs, LanguageModelV1CallWarning, LanguageModelV1ProviderMetadata, LanguageModelV1StreamPart, AISDKError } from '@ai-sdk/provider';
3
+ import { AttributeValue } from '@opentelemetry/api';
4
+ import { EmbeddingModelV1, EmbeddingModelV1Embedding, LanguageModelV1, LanguageModelV1FinishReason, LanguageModelV1LogProbs, LanguageModelV1CallWarning, LanguageModelV1ProviderMetadata, JSONValue, AISDKError } from '@ai-sdk/provider';
5
5
  export { AISDKError, APICallError, EmptyResponseBodyError, InvalidPromptError, InvalidResponseDataError, JSONParseError, LoadAPIKeyError, TypeValidationError, UnsupportedFunctionalityError } from '@ai-sdk/provider';
6
6
  import { z } from 'zod';
7
7
  import { ServerResponse } from 'http';
@@ -592,63 +592,30 @@ Generate a structured, typed object for a given prompt and schema using a langua
592
592
 
593
593
  This function does not stream the output. If you want to stream the output, use `streamObject` instead.
594
594
 
595
- @param model - The language model to use.
596
-
597
- @param schema - The schema of the object that the model should generate.
598
- @param schemaName - Optional name of the output that should be generated. Used by some providers for additional LLM guidance, e.g. via tool or schema name.
599
- @param schemaDescription - Optional description of the output that should be generated. Used by some providers for additional LLM guidance, e.g. via tool or schema description.
600
- @param mode - The mode to use for object generation. Not all models support all modes. Defaults to 'auto'.
601
-
602
- @param system - A system message that will be part of the prompt.
603
- @param prompt - A simple text prompt. You can either use `prompt` or `messages` but not both.
604
- @param messages - A list of messages. You can either use `prompt` or `messages` but not both.
605
-
606
- @param maxTokens - Maximum number of tokens to generate.
607
- @param temperature - Temperature setting.
608
- The value is passed through to the provider. The range depends on the provider and model.
609
- It is recommended to set either `temperature` or `topP`, but not both.
610
- @param topP - Nucleus sampling.
611
- The value is passed through to the provider. The range depends on the provider and model.
612
- It is recommended to set either `temperature` or `topP`, but not both.
613
- @param topK - Only sample from the top K options for each subsequent token.
614
- Used to remove "long tail" low probability responses.
615
- Recommended for advanced use cases only. You usually only need to use temperature.
616
- @param presencePenalty - Presence penalty setting.
617
- It affects the likelihood of the model to repeat information that is already in the prompt.
618
- The value is passed through to the provider. The range depends on the provider and model.
619
- @param frequencyPenalty - Frequency penalty setting.
620
- It affects the likelihood of the model to repeatedly use the same words or phrases.
621
- The value is passed through to the provider. The range depends on the provider and model.
622
- @param seed - The seed (integer) to use for random sampling.
623
- If set and supported by the model, calls will generate deterministic results.
624
-
625
- @param maxRetries - Maximum number of retries. Set to 0 to disable retries. Default: 2.
626
- @param abortSignal - An optional abort signal that can be used to cancel the call.
627
- @param headers - Additional HTTP headers to be sent with the request. Only applicable for HTTP-based providers.
628
-
629
595
  @returns
630
596
  A result object that contains the generated object, the finish reason, the token usage, and additional information.
631
597
  */
632
- declare function generateObject<T>({ model, schema: inputSchema, schemaName, schemaDescription, mode, system, prompt, messages, maxRetries, abortSignal, headers, experimental_telemetry: telemetry, ...settings }: Omit<CallSettings, 'stopSequences'> & Prompt & {
598
+ declare function generateObject<OBJECT>(options: Omit<CallSettings, 'stopSequences'> & Prompt & {
599
+ output?: 'object' | undefined;
633
600
  /**
634
601
  The language model to use.
635
- */
602
+ */
636
603
  model: LanguageModel;
637
604
  /**
638
605
  The schema of the object that the model should generate.
639
- */
640
- schema: z.Schema<T, z.ZodTypeDef, any> | Schema<T>;
606
+ */
607
+ schema: z.Schema<OBJECT, z.ZodTypeDef, any> | Schema<OBJECT>;
641
608
  /**
642
609
  Optional name of the output that should be generated.
643
610
  Used by some providers for additional LLM guidance, e.g.
644
611
  via tool or schema name.
645
- */
612
+ */
646
613
  schemaName?: string;
647
614
  /**
648
615
  Optional description of the output that should be generated.
649
616
  Used by some providers for additional LLM guidance, e.g.
650
617
  via tool or schema description.
651
- */
618
+ */
652
619
  schemaDescription?: string;
653
620
  /**
654
621
  The mode to use for object generation.
@@ -662,32 +629,85 @@ The schema is converted in a JSON schema and used in one of the following ways
662
629
  Please note that most providers do not support all modes.
663
630
 
664
631
  Default and recommended: 'auto' (best mode for the model).
632
+ */
633
+ mode?: 'auto' | 'json' | 'tool';
634
+ /**
635
+ Optional telemetry configuration (experimental).
665
636
  */
637
+ experimental_telemetry?: TelemetrySettings;
638
+ }): Promise<GenerateObjectResult<OBJECT>>;
639
+ /**
640
+ Generate an array with structured, typed elements for a given prompt and element schema using a language model.
641
+
642
+ This function does not stream the output. If you want to stream the output, use `streamObject` instead.
643
+
644
+ @return
645
+ A result object that contains the generated object, the finish reason, the token usage, and additional information.
646
+ */
647
+ declare function generateObject<ELEMENT>(options: Omit<CallSettings, 'stopSequences'> & Prompt & {
648
+ output: 'array';
649
+ /**
650
+ The language model to use.
651
+ */
652
+ model: LanguageModel;
653
+ /**
654
+ The element schema of the array that the model should generate.
655
+ */
656
+ schema: z.Schema<ELEMENT, z.ZodTypeDef, any> | Schema<ELEMENT>;
657
+ /**
658
+ Optional name of the array that should be generated.
659
+ Used by some providers for additional LLM guidance, e.g.
660
+ via tool or schema name.
661
+ */
662
+ schemaName?: string;
663
+ /**
664
+ Optional description of the array that should be generated.
665
+ Used by some providers for additional LLM guidance, e.g.
666
+ via tool or schema description.
667
+ */
668
+ schemaDescription?: string;
669
+ /**
670
+ The mode to use for object generation.
671
+
672
+ The schema is converted in a JSON schema and used in one of the following ways
673
+
674
+ - 'auto': The provider will choose the best mode for the model.
675
+ - 'tool': A tool with the JSON schema as parameters is is provided and the provider is instructed to use it.
676
+ - 'json': The JSON schema and an instruction is injected into the prompt. If the provider supports JSON mode, it is enabled. If the provider supports JSON grammars, the grammar is used.
677
+
678
+ Please note that most providers do not support all modes.
679
+
680
+ Default and recommended: 'auto' (best mode for the model).
681
+ */
666
682
  mode?: 'auto' | 'json' | 'tool';
667
683
  /**
668
- * Optional telemetry configuration (experimental).
684
+ Optional telemetry configuration (experimental).
685
+ */
686
+ experimental_telemetry?: TelemetrySettings;
687
+ }): Promise<GenerateObjectResult<Array<ELEMENT>>>;
688
+ /**
689
+ Generate JSON with any schema for a given prompt using a language model.
690
+
691
+ This function does not stream the output. If you want to stream the output, use `streamObject` instead.
692
+
693
+ @returns
694
+ A result object that contains the generated object, the finish reason, the token usage, and additional information.
695
+ */
696
+ declare function generateObject(options: Omit<CallSettings, 'stopSequences'> & Prompt & {
697
+ output: 'no-schema';
698
+ /**
699
+ The language model to use.
700
+ */
701
+ model: LanguageModel;
702
+ /**
703
+ The mode to use for object generation. Must be "json" for no-schema output.
704
+ */
705
+ mode?: 'json';
706
+ /**
707
+ Optional telemetry configuration (experimental).
669
708
  */
670
709
  experimental_telemetry?: TelemetrySettings;
671
- }): Promise<DefaultGenerateObjectResult<T>>;
672
- declare class DefaultGenerateObjectResult<T> implements GenerateObjectResult<T> {
673
- readonly object: GenerateObjectResult<T>['object'];
674
- readonly finishReason: GenerateObjectResult<T>['finishReason'];
675
- readonly usage: GenerateObjectResult<T>['usage'];
676
- readonly warnings: GenerateObjectResult<T>['warnings'];
677
- readonly rawResponse: GenerateObjectResult<T>['rawResponse'];
678
- readonly logprobs: GenerateObjectResult<T>['logprobs'];
679
- readonly experimental_providerMetadata: GenerateObjectResult<T>['experimental_providerMetadata'];
680
- constructor(options: {
681
- object: GenerateObjectResult<T>['object'];
682
- finishReason: GenerateObjectResult<T>['finishReason'];
683
- usage: GenerateObjectResult<T>['usage'];
684
- warnings: GenerateObjectResult<T>['warnings'];
685
- rawResponse: GenerateObjectResult<T>['rawResponse'];
686
- logprobs: GenerateObjectResult<T>['logprobs'];
687
- providerMetadata: GenerateObjectResult<T>['experimental_providerMetadata'];
688
- });
689
- toJsonResponse(init?: ResponseInit): Response;
690
- }
710
+ }): Promise<GenerateObjectResult<JSONValue>>;
691
711
  /**
692
712
  * @deprecated Use `generateObject` instead.
693
713
  */
@@ -698,7 +718,7 @@ type AsyncIterableStream<T> = AsyncIterable<T> & ReadableStream<T>;
698
718
  /**
699
719
  The result of a `streamObject` call that contains the partial object stream and additional information.
700
720
  */
701
- interface StreamObjectResult<T> {
721
+ interface StreamObjectResult<PARTIAL, RESULT, ELEMENT_STREAM> {
702
722
  /**
703
723
  Warnings from the model provider (e.g. unsupported settings)
704
724
  */
@@ -725,14 +745,18 @@ interface StreamObjectResult<T> {
725
745
  /**
726
746
  The generated object (typed according to the schema). Resolved when the response is finished.
727
747
  */
728
- readonly object: Promise<T>;
748
+ readonly object: Promise<RESULT>;
729
749
  /**
730
750
  Stream of partial objects. It gets more complete as the stream progresses.
731
751
 
732
752
  Note that the partial object is not validated.
733
753
  If you want to be certain that the actual content matches your schema, you need to implement your own validation for partial results.
734
754
  */
735
- readonly partialObjectStream: AsyncIterableStream<DeepPartial<T>>;
755
+ readonly partialObjectStream: AsyncIterableStream<PARTIAL>;
756
+ /**
757
+ * Stream over complete array elements. Only available if the output strategy is set to `array`.
758
+ */
759
+ readonly elementStream: ELEMENT_STREAM;
736
760
  /**
737
761
  Text stream of the JSON representation of the generated object. It contains text chunks.
738
762
  When the stream is finished, the object is valid JSON that can be parsed.
@@ -742,7 +766,7 @@ interface StreamObjectResult<T> {
742
766
  Stream of different types of events, including partial objects, errors, and finish events.
743
767
  Only errors that stop the stream, such as network errors, are thrown.
744
768
  */
745
- readonly fullStream: AsyncIterableStream<ObjectStreamPart<T>>;
769
+ readonly fullStream: AsyncIterableStream<ObjectStreamPart<PARTIAL>>;
746
770
  /**
747
771
  Writes text delta output to a Node.js response-like object.
748
772
  It sets a `Content-Type` header to `text/plain; charset=utf-8` and
@@ -779,76 +803,129 @@ type ObjectStreamInputPart = {
779
803
  };
780
804
  providerMetadata?: ProviderMetadata;
781
805
  };
782
- type ObjectStreamPart<T> = ObjectStreamInputPart | {
806
+ type ObjectStreamPart<PARTIAL> = ObjectStreamInputPart | {
783
807
  type: 'object';
784
- object: DeepPartial<T>;
808
+ object: PARTIAL;
785
809
  } | {
786
810
  type: 'text-delta';
787
811
  textDelta: string;
788
812
  };
789
813
 
814
+ type OnFinishCallback<RESULT> = (event: {
815
+ /**
816
+ The token usage of the generated response.
817
+ */
818
+ usage: CompletionTokenUsage$1;
819
+ /**
820
+ The generated object. Can be undefined if the final object does not match the schema.
821
+ */
822
+ object: RESULT | undefined;
823
+ /**
824
+ Optional error object. This is e.g. a TypeValidationError when the final object does not match the schema.
825
+ */
826
+ error: unknown | undefined;
827
+ /**
828
+ Optional raw response data.
829
+ */
830
+ rawResponse?: {
831
+ /**
832
+ Response headers.
833
+ */
834
+ headers?: Record<string, string>;
835
+ };
836
+ /**
837
+ Warnings from the model provider (e.g. unsupported settings).
838
+ */
839
+ warnings?: CallWarning[];
840
+ /**
841
+ Additional provider-specific metadata. They are passed through
842
+ from the provider to the AI SDK and enable provider-specific
843
+ results that can be fully encapsulated in the provider.
844
+ */
845
+ experimental_providerMetadata: ProviderMetadata | undefined;
846
+ }) => Promise<void> | void;
790
847
  /**
791
848
  Generate a structured, typed object for a given prompt and schema using a language model.
792
849
 
793
850
  This function streams the output. If you do not want to stream the output, use `generateObject` instead.
794
851
 
795
- @param model - The language model to use.
852
+ @return
853
+ A result object for accessing the partial object stream and additional information.
854
+ */
855
+ declare function streamObject<OBJECT>(options: Omit<CallSettings, 'stopSequences'> & Prompt & {
856
+ output?: 'object' | undefined;
857
+ /**
858
+ The language model to use.
859
+ */
860
+ model: LanguageModel;
861
+ /**
862
+ The schema of the object that the model should generate.
863
+ */
864
+ schema: z.Schema<OBJECT, z.ZodTypeDef, any> | Schema<OBJECT>;
865
+ /**
866
+ Optional name of the output that should be generated.
867
+ Used by some providers for additional LLM guidance, e.g.
868
+ via tool or schema name.
869
+ */
870
+ schemaName?: string;
871
+ /**
872
+ Optional description of the output that should be generated.
873
+ Used by some providers for additional LLM guidance, e.g.
874
+ via tool or schema description.
875
+ */
876
+ schemaDescription?: string;
877
+ /**
878
+ The mode to use for object generation.
796
879
 
797
- @param schema - The schema of the object that the model should generate.
798
- @param schemaName - Optional name of the output that should be generated. Used by some providers for additional LLM guidance, e.g. via tool or schema name.
799
- @param schemaDescription - Optional description of the output that should be generated. Used by some providers for additional LLM guidance, e.g. via tool or schema description.
800
- @param mode - The mode to use for object generation. Not all models support all modes. Defaults to 'auto'.
880
+ The schema is converted in a JSON schema and used in one of the following ways
801
881
 
802
- @param system - A system message that will be part of the prompt.
803
- @param prompt - A simple text prompt. You can either use `prompt` or `messages` but not both.
804
- @param messages - A list of messages. You can either use `prompt` or `messages` but not both.
882
+ - 'auto': The provider will choose the best mode for the model.
883
+ - 'tool': A tool with the JSON schema as parameters is is provided and the provider is instructed to use it.
884
+ - 'json': The JSON schema and an instruction is injected into the prompt. If the provider supports JSON mode, it is enabled. If the provider supports JSON grammars, the grammar is used.
805
885
 
806
- @param maxTokens - Maximum number of tokens to generate.
807
- @param temperature - Temperature setting.
808
- The value is passed through to the provider. The range depends on the provider and model.
809
- It is recommended to set either `temperature` or `topP`, but not both.
810
- @param topP - Nucleus sampling.
811
- The value is passed through to the provider. The range depends on the provider and model.
812
- It is recommended to set either `temperature` or `topP`, but not both.
813
- @param topK - Only sample from the top K options for each subsequent token.
814
- Used to remove "long tail" low probability responses.
815
- Recommended for advanced use cases only. You usually only need to use temperature.
816
- @param presencePenalty - Presence penalty setting.
817
- It affects the likelihood of the model to repeat information that is already in the prompt.
818
- The value is passed through to the provider. The range depends on the provider and model.
819
- @param frequencyPenalty - Frequency penalty setting.
820
- It affects the likelihood of the model to repeatedly use the same words or phrases.
821
- The value is passed through to the provider. The range depends on the provider and model.
822
- @param seed - The seed (integer) to use for random sampling.
823
- If set and supported by the model, calls will generate deterministic results.
886
+ Please note that most providers do not support all modes.
824
887
 
825
- @param maxRetries - Maximum number of retries. Set to 0 to disable retries. Default: 2.
826
- @param abortSignal - An optional abort signal that can be used to cancel the call.
827
- @param headers - Additional HTTP headers to be sent with the request. Only applicable for HTTP-based providers.
888
+ Default and recommended: 'auto' (best mode for the model).
889
+ */
890
+ mode?: 'auto' | 'json' | 'tool';
891
+ /**
892
+ Optional telemetry configuration (experimental).
893
+ */
894
+ experimental_telemetry?: TelemetrySettings;
895
+ /**
896
+ Callback that is called when the LLM response and the final object validation are finished.
897
+ */
898
+ onFinish?: OnFinishCallback<OBJECT>;
899
+ }): Promise<StreamObjectResult<DeepPartial<OBJECT>, OBJECT, never>>;
900
+ /**
901
+ Generate an array with structured, typed elements for a given prompt and element schema using a language model.
902
+
903
+ This function streams the output. If you do not want to stream the output, use `generateObject` instead.
828
904
 
829
905
  @return
830
906
  A result object for accessing the partial object stream and additional information.
831
907
  */
832
- declare function streamObject<T>({ model, schema: inputSchema, schemaName, schemaDescription, mode, system, prompt, messages, maxRetries, abortSignal, headers, experimental_telemetry: telemetry, onFinish, ...settings }: Omit<CallSettings, 'stopSequences'> & Prompt & {
908
+ declare function streamObject<ELEMENT>(options: Omit<CallSettings, 'stopSequences'> & Prompt & {
909
+ output: 'array';
833
910
  /**
834
911
  The language model to use.
835
- */
912
+ */
836
913
  model: LanguageModel;
837
914
  /**
838
- The schema of the object that the model should generate.
839
- */
840
- schema: z.Schema<T, z.ZodTypeDef, any> | Schema<T>;
915
+ The element schema of the array that the model should generate.
916
+ */
917
+ schema: z.Schema<ELEMENT, z.ZodTypeDef, any> | Schema<ELEMENT>;
841
918
  /**
842
- Optional name of the output that should be generated.
919
+ Optional name of the array that should be generated.
843
920
  Used by some providers for additional LLM guidance, e.g.
844
921
  via tool or schema name.
845
- */
922
+ */
846
923
  schemaName?: string;
847
924
  /**
848
- Optional description of the output that should be generated.
925
+ Optional description of the array that should be generated.
849
926
  Used by some providers for additional LLM guidance, e.g.
850
927
  via tool or schema description.
851
- */
928
+ */
852
929
  schemaDescription?: string;
853
930
  /**
854
931
  The mode to use for object generation.
@@ -862,91 +939,58 @@ The schema is converted in a JSON schema and used in one of the following ways
862
939
  Please note that most providers do not support all modes.
863
940
 
864
941
  Default and recommended: 'auto' (best mode for the model).
865
- */
942
+ */
866
943
  mode?: 'auto' | 'json' | 'tool';
867
944
  /**
868
945
  Optional telemetry configuration (experimental).
869
- */
946
+ */
870
947
  experimental_telemetry?: TelemetrySettings;
871
948
  /**
872
949
  Callback that is called when the LLM response and the final object validation are finished.
873
- */
874
- onFinish?: (event: {
875
- /**
876
- The token usage of the generated response.
877
- */
878
- usage: CompletionTokenUsage$1;
879
- /**
880
- The generated object (typed according to the schema). Can be undefined if the final object does not match the schema.
881
- */
882
- object: T | undefined;
883
- /**
884
- Optional error object. This is e.g. a TypeValidationError when the final object does not match the schema.
885
- */
886
- error: unknown | undefined;
887
- /**
888
- Optional raw response data.
889
- */
890
- rawResponse?: {
891
- /**
892
- Response headers.
893
- */
894
- headers?: Record<string, string>;
895
- };
896
- /**
897
- Warnings from the model provider (e.g. unsupported settings).
898
- */
899
- warnings?: CallWarning[];
900
- /**
901
- Additional provider-specific metadata. They are passed through
902
- from the provider to the AI SDK and enable provider-specific
903
- results that can be fully encapsulated in the provider.
904
- */
905
- experimental_providerMetadata: ProviderMetadata | undefined;
906
- }) => Promise<void> | void;
907
- }): Promise<DefaultStreamObjectResult<T>>;
908
- declare class DefaultStreamObjectResult<T> implements StreamObjectResult<T> {
909
- private readonly originalStream;
910
- private readonly objectPromise;
911
- readonly warnings: StreamObjectResult<T>['warnings'];
912
- readonly usage: StreamObjectResult<T>['usage'];
913
- readonly experimental_providerMetadata: StreamObjectResult<T>['experimental_providerMetadata'];
914
- readonly rawResponse: StreamObjectResult<T>['rawResponse'];
915
- constructor({ stream, warnings, rawResponse, schema, onFinish, rootSpan, doStreamSpan, telemetry, startTimestamp, }: {
916
- stream: ReadableStream<string | Omit<LanguageModelV1StreamPart, 'text-delta'>>;
917
- warnings: StreamObjectResult<T>['warnings'];
918
- rawResponse?: StreamObjectResult<T>['rawResponse'];
919
- schema: z.Schema<T, z.ZodTypeDef, any> | Schema<T>;
920
- onFinish: Parameters<typeof streamObject<T>>[0]['onFinish'];
921
- rootSpan: Span;
922
- doStreamSpan: Span;
923
- telemetry: TelemetrySettings | undefined;
924
- startTimestamp: number;
925
- });
926
- get object(): Promise<T>;
927
- get partialObjectStream(): AsyncIterableStream<DeepPartial<T>>;
928
- get textStream(): AsyncIterableStream<string>;
929
- get fullStream(): AsyncIterableStream<ObjectStreamPart<T>>;
930
- pipeTextStreamToResponse(response: ServerResponse, init?: {
931
- headers?: Record<string, string>;
932
- status?: number;
933
- }): void;
934
- toTextStreamResponse(init?: ResponseInit): Response;
935
- }
950
+ */
951
+ onFinish?: OnFinishCallback<Array<ELEMENT>>;
952
+ }): Promise<StreamObjectResult<Array<ELEMENT>, Array<ELEMENT>, AsyncIterableStream<ELEMENT>>>;
953
+ /**
954
+ Generate JSON with any schema for a given prompt using a language model.
955
+
956
+ This function streams the output. If you do not want to stream the output, use `generateObject` instead.
957
+
958
+ @return
959
+ A result object for accessing the partial object stream and additional information.
960
+ */
961
+ declare function streamObject(options: Omit<CallSettings, 'stopSequences'> & Prompt & {
962
+ output: 'no-schema';
963
+ /**
964
+ The language model to use.
965
+ */
966
+ model: LanguageModel;
967
+ /**
968
+ The mode to use for object generation. Must be "json" for no-schema output.
969
+ */
970
+ mode?: 'json';
971
+ /**
972
+ Optional telemetry configuration (experimental).
973
+ */
974
+ experimental_telemetry?: TelemetrySettings;
975
+ /**
976
+ Callback that is called when the LLM response and the final object validation are finished.
977
+ */
978
+ onFinish?: OnFinishCallback<JSONValue>;
979
+ }): Promise<StreamObjectResult<JSONValue, JSONValue, never>>;
936
980
  /**
937
981
  * @deprecated Use `streamObject` instead.
938
982
  */
939
983
  declare const experimental_streamObject: typeof streamObject;
940
984
 
941
- type Parameters$1 = z.ZodTypeAny | Schema<any>;
942
- type inferParameters<PARAMETERS extends Parameters$1> = PARAMETERS extends Schema<any> ? PARAMETERS['_type'] : PARAMETERS extends z.ZodTypeAny ? z.infer<PARAMETERS> : never;
985
+ type Parameters = z.ZodTypeAny | Schema<any>;
986
+ type inferParameters<PARAMETERS extends Parameters> = PARAMETERS extends Schema<any> ? PARAMETERS['_type'] : PARAMETERS extends z.ZodTypeAny ? z.infer<PARAMETERS> : never;
943
987
  /**
944
988
  A tool contains the description and the schema of the input that the tool expects.
945
989
  This enables the language model to generate the input.
946
990
 
947
991
  The tool can also contain an optional execute function for the actual execution function of the tool.
948
992
  */
949
- interface CoreTool<PARAMETERS extends Parameters$1 = any, RESULT = any> {
993
+ interface CoreTool<PARAMETERS extends Parameters = any, RESULT = any> {
950
994
  /**
951
995
  An optional description of what the tool does. Will be used by the language model to decide whether to use the tool.
952
996
  */
@@ -966,12 +1010,12 @@ interface CoreTool<PARAMETERS extends Parameters$1 = any, RESULT = any> {
966
1010
  /**
967
1011
  Helper function for inferring the execute args of a tool.
968
1012
  */
969
- declare function tool<PARAMETERS extends Parameters$1, RESULT>(tool: CoreTool<PARAMETERS, RESULT> & {
1013
+ declare function tool<PARAMETERS extends Parameters, RESULT>(tool: CoreTool<PARAMETERS, RESULT> & {
970
1014
  execute: (args: inferParameters<PARAMETERS>) => PromiseLike<RESULT>;
971
1015
  }): CoreTool<PARAMETERS, RESULT> & {
972
1016
  execute: (args: inferParameters<PARAMETERS>) => PromiseLike<RESULT>;
973
1017
  };
974
- declare function tool<PARAMETERS extends Parameters$1, RESULT>(tool: CoreTool<PARAMETERS, RESULT> & {
1018
+ declare function tool<PARAMETERS extends Parameters, RESULT>(tool: CoreTool<PARAMETERS, RESULT> & {
975
1019
  execute?: undefined;
976
1020
  }): CoreTool<PARAMETERS, RESULT> & {
977
1021
  execute: undefined;
@@ -1406,6 +1450,7 @@ interface StreamTextResult<TOOLS extends Record<string, CoreTool>> {
1406
1450
  toDataStreamResponse(options?: ResponseInit | {
1407
1451
  init?: ResponseInit;
1408
1452
  data?: StreamData;
1453
+ getErrorMessage?: (error: unknown) => string;
1409
1454
  }): Response;
1410
1455
  /**
1411
1456
  Creates a simple text stream response.
@@ -1565,64 +1610,7 @@ Callback that is called when the LLM response and all request tool executions
1565
1610
  */
1566
1611
  readonly experimental_providerMetadata: ProviderMetadata | undefined;
1567
1612
  }) => Promise<void> | void;
1568
- }): Promise<DefaultStreamTextResult<TOOLS>>;
1569
- declare class DefaultStreamTextResult<TOOLS extends Record<string, CoreTool>> implements StreamTextResult<TOOLS> {
1570
- private originalStream;
1571
- readonly warnings: StreamTextResult<TOOLS>['warnings'];
1572
- readonly usage: StreamTextResult<TOOLS>['usage'];
1573
- readonly finishReason: StreamTextResult<TOOLS>['finishReason'];
1574
- readonly experimental_providerMetadata: StreamTextResult<TOOLS>['experimental_providerMetadata'];
1575
- readonly text: StreamTextResult<TOOLS>['text'];
1576
- readonly toolCalls: StreamTextResult<TOOLS>['toolCalls'];
1577
- readonly toolResults: StreamTextResult<TOOLS>['toolResults'];
1578
- readonly rawResponse: StreamTextResult<TOOLS>['rawResponse'];
1579
- constructor({ stream, warnings, rawResponse, onChunk, onFinish, rootSpan, doStreamSpan, telemetry, startTimestamp, }: {
1580
- stream: ReadableStream<TextStreamPart<TOOLS>>;
1581
- warnings: StreamTextResult<TOOLS>['warnings'];
1582
- rawResponse: StreamTextResult<TOOLS>['rawResponse'];
1583
- onChunk: Parameters<typeof streamText>[0]['onChunk'];
1584
- onFinish: Parameters<typeof streamText>[0]['onFinish'];
1585
- rootSpan: Span;
1586
- doStreamSpan: Span;
1587
- telemetry: TelemetrySettings | undefined;
1588
- startTimestamp: number;
1589
- });
1590
- /**
1591
- Split out a new stream from the original stream.
1592
- The original stream is replaced to allow for further splitting,
1593
- since we do not know how many times the stream will be split.
1594
-
1595
- Note: this leads to buffering the stream content on the server.
1596
- However, the LLM results are expected to be small enough to not cause issues.
1597
- */
1598
- private teeStream;
1599
- get textStream(): AsyncIterableStream<string>;
1600
- get fullStream(): AsyncIterableStream<TextStreamPart<TOOLS>>;
1601
- toAIStream(callbacks?: AIStreamCallbacksAndOptions): ReadableStream<Uint8Array>;
1602
- private toDataStream;
1603
- pipeAIStreamToResponse(response: ServerResponse$1, init?: {
1604
- headers?: Record<string, string>;
1605
- status?: number;
1606
- }): void;
1607
- pipeDataStreamToResponse(response: ServerResponse$1, init?: {
1608
- headers?: Record<string, string>;
1609
- status?: number;
1610
- }): void;
1611
- pipeTextStreamToResponse(response: ServerResponse$1, init?: {
1612
- headers?: Record<string, string>;
1613
- status?: number;
1614
- }): void;
1615
- toAIStreamResponse(options?: ResponseInit | {
1616
- init?: ResponseInit;
1617
- data?: StreamData;
1618
- }): Response;
1619
- toDataStreamResponse(options?: ResponseInit | {
1620
- init?: ResponseInit;
1621
- data?: StreamData;
1622
- getErrorMessage?: (error: unknown) => string;
1623
- }): Response;
1624
- toTextStreamResponse(init?: ResponseInit): Response;
1625
- }
1613
+ }): Promise<StreamTextResult<TOOLS>>;
1626
1614
  /**
1627
1615
  * @deprecated Use `streamText` instead.
1628
1616
  */
@@ -2083,7 +2071,7 @@ type OpenAIStreamCallbacks = AIStreamCallbacksAndOptions & {
2083
2071
  * })
2084
2072
  * ```
2085
2073
  */
2086
- experimental_onFunctionCall?: (functionCallPayload: FunctionCallPayload, createFunctionCallMessages: (functionCallResult: JSONValue) => CreateMessage[]) => Promise<Response | undefined | void | string | AsyncIterableOpenAIStreamReturnTypes>;
2074
+ experimental_onFunctionCall?: (functionCallPayload: FunctionCallPayload, createFunctionCallMessages: (functionCallResult: JSONValue$1) => CreateMessage[]) => Promise<Response | undefined | void | string | AsyncIterableOpenAIStreamReturnTypes>;
2087
2075
  /**
2088
2076
  * @example
2089
2077
  * ```js
@@ -2122,7 +2110,7 @@ type OpenAIStreamCallbacks = AIStreamCallbacksAndOptions & {
2122
2110
  experimental_onToolCall?: (toolCallPayload: ToolCallPayload, appendToolCallMessage: (result?: {
2123
2111
  tool_call_id: string;
2124
2112
  function_name: string;
2125
- tool_call_result: JSONValue;
2113
+ tool_call_result: JSONValue$1;
2126
2114
  }) => CreateMessage[]) => Promise<Response | undefined | void | string | AsyncIterableOpenAIStreamReturnTypes>;
2127
2115
  };
2128
2116
  interface ChatCompletionChunk {
@@ -2583,8 +2571,8 @@ declare class StreamData {
2583
2571
  private warningTimeout;
2584
2572
  constructor();
2585
2573
  close(): Promise<void>;
2586
- append(value: JSONValue): void;
2587
- appendMessageAnnotation(value: JSONValue): void;
2574
+ append(value: JSONValue$1): void;
2575
+ appendMessageAnnotation(value: JSONValue$1): void;
2588
2576
  }
2589
2577
  /**
2590
2578
  * A TransformStream for LLMs that do not have their own transform stream handlers managing encoding (e.g. OpenAIStream has one for function call handling).