ai 3.3.15 → 3.3.16
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.d.mts +120 -105
- package/dist/index.d.ts +120 -105
- package/dist/index.js +108 -22
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +108 -22
- package/dist/index.mjs.map +1 -1
- package/package.json +7 -7
package/dist/index.d.mts
CHANGED
@@ -1,7 +1,7 @@
|
|
1
|
-
import { Schema, DeepPartial, Attachment, JSONValue, CreateMessage, FunctionCall as FunctionCall$1, AssistantMessage, DataMessage } from '@ai-sdk/ui-utils';
|
1
|
+
import { Schema, DeepPartial, Attachment, JSONValue as JSONValue$1, CreateMessage, FunctionCall as FunctionCall$1, AssistantMessage, DataMessage } from '@ai-sdk/ui-utils';
|
2
2
|
export { AssistantMessage, AssistantStatus, ChatRequest, ChatRequestOptions, CreateMessage, DataMessage, DeepPartial, Function, FunctionCall, FunctionCallHandler, IdGenerator, JSONValue, Message, RequestOptions, Schema, StreamPart, Tool, ToolCall, ToolCallHandler, ToolChoice, ToolInvocation, UseAssistantOptions, formatStreamPart, jsonSchema, parseComplexResponse, parseStreamPart, readDataStream } from '@ai-sdk/ui-utils';
|
3
3
|
import { AttributeValue, Span } from '@opentelemetry/api';
|
4
|
-
import { EmbeddingModelV1, EmbeddingModelV1Embedding, LanguageModelV1, LanguageModelV1FinishReason, LanguageModelV1LogProbs, LanguageModelV1CallWarning, LanguageModelV1ProviderMetadata, LanguageModelV1StreamPart, AISDKError } from '@ai-sdk/provider';
|
4
|
+
import { EmbeddingModelV1, EmbeddingModelV1Embedding, LanguageModelV1, LanguageModelV1FinishReason, LanguageModelV1LogProbs, LanguageModelV1CallWarning, LanguageModelV1ProviderMetadata, JSONValue, LanguageModelV1StreamPart, AISDKError } from '@ai-sdk/provider';
|
5
5
|
export { AISDKError, APICallError, EmptyResponseBodyError, InvalidPromptError, InvalidResponseDataError, JSONParseError, LoadAPIKeyError, TypeValidationError, UnsupportedFunctionalityError } from '@ai-sdk/provider';
|
6
6
|
import { z } from 'zod';
|
7
7
|
import { ServerResponse } from 'http';
|
@@ -592,63 +592,30 @@ Generate a structured, typed object for a given prompt and schema using a langua
|
|
592
592
|
|
593
593
|
This function does not stream the output. If you want to stream the output, use `streamObject` instead.
|
594
594
|
|
595
|
-
@param model - The language model to use.
|
596
|
-
|
597
|
-
@param schema - The schema of the object that the model should generate.
|
598
|
-
@param schemaName - Optional name of the output that should be generated. Used by some providers for additional LLM guidance, e.g. via tool or schema name.
|
599
|
-
@param schemaDescription - Optional description of the output that should be generated. Used by some providers for additional LLM guidance, e.g. via tool or schema description.
|
600
|
-
@param mode - The mode to use for object generation. Not all models support all modes. Defaults to 'auto'.
|
601
|
-
|
602
|
-
@param system - A system message that will be part of the prompt.
|
603
|
-
@param prompt - A simple text prompt. You can either use `prompt` or `messages` but not both.
|
604
|
-
@param messages - A list of messages. You can either use `prompt` or `messages` but not both.
|
605
|
-
|
606
|
-
@param maxTokens - Maximum number of tokens to generate.
|
607
|
-
@param temperature - Temperature setting.
|
608
|
-
The value is passed through to the provider. The range depends on the provider and model.
|
609
|
-
It is recommended to set either `temperature` or `topP`, but not both.
|
610
|
-
@param topP - Nucleus sampling.
|
611
|
-
The value is passed through to the provider. The range depends on the provider and model.
|
612
|
-
It is recommended to set either `temperature` or `topP`, but not both.
|
613
|
-
@param topK - Only sample from the top K options for each subsequent token.
|
614
|
-
Used to remove "long tail" low probability responses.
|
615
|
-
Recommended for advanced use cases only. You usually only need to use temperature.
|
616
|
-
@param presencePenalty - Presence penalty setting.
|
617
|
-
It affects the likelihood of the model to repeat information that is already in the prompt.
|
618
|
-
The value is passed through to the provider. The range depends on the provider and model.
|
619
|
-
@param frequencyPenalty - Frequency penalty setting.
|
620
|
-
It affects the likelihood of the model to repeatedly use the same words or phrases.
|
621
|
-
The value is passed through to the provider. The range depends on the provider and model.
|
622
|
-
@param seed - The seed (integer) to use for random sampling.
|
623
|
-
If set and supported by the model, calls will generate deterministic results.
|
624
|
-
|
625
|
-
@param maxRetries - Maximum number of retries. Set to 0 to disable retries. Default: 2.
|
626
|
-
@param abortSignal - An optional abort signal that can be used to cancel the call.
|
627
|
-
@param headers - Additional HTTP headers to be sent with the request. Only applicable for HTTP-based providers.
|
628
|
-
|
629
595
|
@returns
|
630
596
|
A result object that contains the generated object, the finish reason, the token usage, and additional information.
|
631
597
|
*/
|
632
|
-
declare function generateObject<T>(
|
598
|
+
declare function generateObject<T>(options: Omit<CallSettings, 'stopSequences'> & Prompt & {
|
599
|
+
output?: 'object' | undefined;
|
633
600
|
/**
|
634
601
|
The language model to use.
|
635
|
-
|
602
|
+
*/
|
636
603
|
model: LanguageModel;
|
637
604
|
/**
|
638
605
|
The schema of the object that the model should generate.
|
639
|
-
|
606
|
+
*/
|
640
607
|
schema: z.Schema<T, z.ZodTypeDef, any> | Schema<T>;
|
641
608
|
/**
|
642
609
|
Optional name of the output that should be generated.
|
643
610
|
Used by some providers for additional LLM guidance, e.g.
|
644
611
|
via tool or schema name.
|
645
|
-
|
612
|
+
*/
|
646
613
|
schemaName?: string;
|
647
614
|
/**
|
648
615
|
Optional description of the output that should be generated.
|
649
616
|
Used by some providers for additional LLM guidance, e.g.
|
650
617
|
via tool or schema description.
|
651
|
-
|
618
|
+
*/
|
652
619
|
schemaDescription?: string;
|
653
620
|
/**
|
654
621
|
The mode to use for object generation.
|
@@ -662,13 +629,36 @@ The schema is converted in a JSON schema and used in one of the following ways
|
|
662
629
|
Please note that most providers do not support all modes.
|
663
630
|
|
664
631
|
Default and recommended: 'auto' (best mode for the model).
|
665
|
-
|
632
|
+
*/
|
666
633
|
mode?: 'auto' | 'json' | 'tool';
|
667
634
|
/**
|
668
|
-
|
635
|
+
Optional telemetry configuration (experimental).
|
669
636
|
*/
|
670
637
|
experimental_telemetry?: TelemetrySettings;
|
671
638
|
}): Promise<DefaultGenerateObjectResult<T>>;
|
639
|
+
/**
|
640
|
+
Generate JSON with any schema for a given prompt using a language model.
|
641
|
+
|
642
|
+
This function does not stream the output. If you want to stream the output, use `streamObject` instead.
|
643
|
+
|
644
|
+
@returns
|
645
|
+
A result object that contains the generated object, the finish reason, the token usage, and additional information.
|
646
|
+
*/
|
647
|
+
declare function generateObject(options: Omit<CallSettings, 'stopSequences'> & Prompt & {
|
648
|
+
output: 'no-schema';
|
649
|
+
/**
|
650
|
+
The language model to use.
|
651
|
+
*/
|
652
|
+
model: LanguageModel;
|
653
|
+
/**
|
654
|
+
The mode to use for object generation. Must be "json" for no-schema output.
|
655
|
+
*/
|
656
|
+
mode?: 'json';
|
657
|
+
/**
|
658
|
+
Optional telemetry configuration (experimental).
|
659
|
+
*/
|
660
|
+
experimental_telemetry?: TelemetrySettings;
|
661
|
+
}): Promise<DefaultGenerateObjectResult<JSONValue>>;
|
672
662
|
declare class DefaultGenerateObjectResult<T> implements GenerateObjectResult<T> {
|
673
663
|
readonly object: GenerateObjectResult<T>['object'];
|
674
664
|
readonly finishReason: GenerateObjectResult<T>['finishReason'];
|
@@ -792,63 +782,29 @@ Generate a structured, typed object for a given prompt and schema using a langua
|
|
792
782
|
|
793
783
|
This function streams the output. If you do not want to stream the output, use `generateObject` instead.
|
794
784
|
|
795
|
-
@param model - The language model to use.
|
796
|
-
|
797
|
-
@param schema - The schema of the object that the model should generate.
|
798
|
-
@param schemaName - Optional name of the output that should be generated. Used by some providers for additional LLM guidance, e.g. via tool or schema name.
|
799
|
-
@param schemaDescription - Optional description of the output that should be generated. Used by some providers for additional LLM guidance, e.g. via tool or schema description.
|
800
|
-
@param mode - The mode to use for object generation. Not all models support all modes. Defaults to 'auto'.
|
801
|
-
|
802
|
-
@param system - A system message that will be part of the prompt.
|
803
|
-
@param prompt - A simple text prompt. You can either use `prompt` or `messages` but not both.
|
804
|
-
@param messages - A list of messages. You can either use `prompt` or `messages` but not both.
|
805
|
-
|
806
|
-
@param maxTokens - Maximum number of tokens to generate.
|
807
|
-
@param temperature - Temperature setting.
|
808
|
-
The value is passed through to the provider. The range depends on the provider and model.
|
809
|
-
It is recommended to set either `temperature` or `topP`, but not both.
|
810
|
-
@param topP - Nucleus sampling.
|
811
|
-
The value is passed through to the provider. The range depends on the provider and model.
|
812
|
-
It is recommended to set either `temperature` or `topP`, but not both.
|
813
|
-
@param topK - Only sample from the top K options for each subsequent token.
|
814
|
-
Used to remove "long tail" low probability responses.
|
815
|
-
Recommended for advanced use cases only. You usually only need to use temperature.
|
816
|
-
@param presencePenalty - Presence penalty setting.
|
817
|
-
It affects the likelihood of the model to repeat information that is already in the prompt.
|
818
|
-
The value is passed through to the provider. The range depends on the provider and model.
|
819
|
-
@param frequencyPenalty - Frequency penalty setting.
|
820
|
-
It affects the likelihood of the model to repeatedly use the same words or phrases.
|
821
|
-
The value is passed through to the provider. The range depends on the provider and model.
|
822
|
-
@param seed - The seed (integer) to use for random sampling.
|
823
|
-
If set and supported by the model, calls will generate deterministic results.
|
824
|
-
|
825
|
-
@param maxRetries - Maximum number of retries. Set to 0 to disable retries. Default: 2.
|
826
|
-
@param abortSignal - An optional abort signal that can be used to cancel the call.
|
827
|
-
@param headers - Additional HTTP headers to be sent with the request. Only applicable for HTTP-based providers.
|
828
|
-
|
829
785
|
@return
|
830
786
|
A result object for accessing the partial object stream and additional information.
|
831
787
|
*/
|
832
|
-
declare function streamObject<T>(
|
788
|
+
declare function streamObject<T>(options: Omit<CallSettings, 'stopSequences'> & Prompt & {
|
833
789
|
/**
|
834
790
|
The language model to use.
|
835
|
-
|
791
|
+
*/
|
836
792
|
model: LanguageModel;
|
837
793
|
/**
|
838
794
|
The schema of the object that the model should generate.
|
839
|
-
|
795
|
+
*/
|
840
796
|
schema: z.Schema<T, z.ZodTypeDef, any> | Schema<T>;
|
841
797
|
/**
|
842
798
|
Optional name of the output that should be generated.
|
843
799
|
Used by some providers for additional LLM guidance, e.g.
|
844
800
|
via tool or schema name.
|
845
|
-
|
801
|
+
*/
|
846
802
|
schemaName?: string;
|
847
803
|
/**
|
848
804
|
Optional description of the output that should be generated.
|
849
805
|
Used by some providers for additional LLM guidance, e.g.
|
850
806
|
via tool or schema description.
|
851
|
-
|
807
|
+
*/
|
852
808
|
schemaDescription?: string;
|
853
809
|
/**
|
854
810
|
The mode to use for object generation.
|
@@ -862,49 +818,108 @@ The schema is converted in a JSON schema and used in one of the following ways
|
|
862
818
|
Please note that most providers do not support all modes.
|
863
819
|
|
864
820
|
Default and recommended: 'auto' (best mode for the model).
|
865
|
-
|
821
|
+
*/
|
866
822
|
mode?: 'auto' | 'json' | 'tool';
|
867
823
|
/**
|
868
824
|
Optional telemetry configuration (experimental).
|
869
|
-
|
825
|
+
*/
|
870
826
|
experimental_telemetry?: TelemetrySettings;
|
871
827
|
/**
|
872
828
|
Callback that is called when the LLM response and the final object validation are finished.
|
873
|
-
|
829
|
+
*/
|
874
830
|
onFinish?: (event: {
|
875
831
|
/**
|
876
|
-
|
877
|
-
|
832
|
+
The token usage of the generated response.
|
833
|
+
*/
|
878
834
|
usage: CompletionTokenUsage$1;
|
879
835
|
/**
|
880
|
-
|
881
|
-
|
836
|
+
The generated object (typed according to the schema). Can be undefined if the final object does not match the schema.
|
837
|
+
*/
|
882
838
|
object: T | undefined;
|
883
839
|
/**
|
884
|
-
|
885
|
-
|
840
|
+
Optional error object. This is e.g. a TypeValidationError when the final object does not match the schema.
|
841
|
+
*/
|
886
842
|
error: unknown | undefined;
|
887
843
|
/**
|
888
|
-
|
889
|
-
|
844
|
+
Optional raw response data.
|
845
|
+
*/
|
890
846
|
rawResponse?: {
|
891
847
|
/**
|
892
|
-
|
893
|
-
|
848
|
+
Response headers.
|
849
|
+
*/
|
894
850
|
headers?: Record<string, string>;
|
895
851
|
};
|
896
852
|
/**
|
897
|
-
|
898
|
-
|
853
|
+
Warnings from the model provider (e.g. unsupported settings).
|
854
|
+
*/
|
899
855
|
warnings?: CallWarning[];
|
900
856
|
/**
|
901
|
-
|
902
|
-
|
903
|
-
|
904
|
-
|
857
|
+
Additional provider-specific metadata. They are passed through
|
858
|
+
from the provider to the AI SDK and enable provider-specific
|
859
|
+
results that can be fully encapsulated in the provider.
|
860
|
+
*/
|
905
861
|
experimental_providerMetadata: ProviderMetadata | undefined;
|
906
862
|
}) => Promise<void> | void;
|
907
863
|
}): Promise<DefaultStreamObjectResult<T>>;
|
864
|
+
/**
|
865
|
+
Generate JSON with any schema for a given prompt using a language model.
|
866
|
+
|
867
|
+
This function streams the output. If you do not want to stream the output, use `generateObject` instead.
|
868
|
+
|
869
|
+
@return
|
870
|
+
A result object for accessing the partial object stream and additional information.
|
871
|
+
*/
|
872
|
+
declare function streamObject(options: Omit<CallSettings, 'stopSequences'> & Prompt & {
|
873
|
+
output: 'no-schema';
|
874
|
+
/**
|
875
|
+
The language model to use.
|
876
|
+
*/
|
877
|
+
model: LanguageModel;
|
878
|
+
/**
|
879
|
+
The mode to use for object generation. Must be "json" for no-schema output.
|
880
|
+
*/
|
881
|
+
mode?: 'json';
|
882
|
+
/**
|
883
|
+
Optional telemetry configuration (experimental).
|
884
|
+
*/
|
885
|
+
experimental_telemetry?: TelemetrySettings;
|
886
|
+
/**
|
887
|
+
Callback that is called when the LLM response and the final object validation are finished.
|
888
|
+
*/
|
889
|
+
onFinish?: (event: {
|
890
|
+
/**
|
891
|
+
The token usage of the generated response.
|
892
|
+
*/
|
893
|
+
usage: CompletionTokenUsage$1;
|
894
|
+
/**
|
895
|
+
The generated object (typed according to the schema). Can be undefined if the final object does not match the schema.
|
896
|
+
*/
|
897
|
+
object: JSONValue | undefined;
|
898
|
+
/**
|
899
|
+
Optional error object. This is e.g. a TypeValidationError when the final object does not match the schema.
|
900
|
+
*/
|
901
|
+
error: unknown | undefined;
|
902
|
+
/**
|
903
|
+
Optional raw response data.
|
904
|
+
*/
|
905
|
+
rawResponse?: {
|
906
|
+
/**
|
907
|
+
Response headers.
|
908
|
+
*/
|
909
|
+
headers?: Record<string, string>;
|
910
|
+
};
|
911
|
+
/**
|
912
|
+
Warnings from the model provider (e.g. unsupported settings).
|
913
|
+
*/
|
914
|
+
warnings?: CallWarning[];
|
915
|
+
/**
|
916
|
+
Additional provider-specific metadata. They are passed through
|
917
|
+
from the provider to the AI SDK and enable provider-specific
|
918
|
+
results that can be fully encapsulated in the provider.
|
919
|
+
*/
|
920
|
+
experimental_providerMetadata: ProviderMetadata | undefined;
|
921
|
+
}) => Promise<void> | void;
|
922
|
+
}): Promise<DefaultStreamObjectResult<JSONValue>>;
|
908
923
|
declare class DefaultStreamObjectResult<T> implements StreamObjectResult<T> {
|
909
924
|
private readonly originalStream;
|
910
925
|
private readonly objectPromise;
|
@@ -2083,7 +2098,7 @@ type OpenAIStreamCallbacks = AIStreamCallbacksAndOptions & {
|
|
2083
2098
|
* })
|
2084
2099
|
* ```
|
2085
2100
|
*/
|
2086
|
-
experimental_onFunctionCall?: (functionCallPayload: FunctionCallPayload, createFunctionCallMessages: (functionCallResult: JSONValue) => CreateMessage[]) => Promise<Response | undefined | void | string | AsyncIterableOpenAIStreamReturnTypes>;
|
2101
|
+
experimental_onFunctionCall?: (functionCallPayload: FunctionCallPayload, createFunctionCallMessages: (functionCallResult: JSONValue$1) => CreateMessage[]) => Promise<Response | undefined | void | string | AsyncIterableOpenAIStreamReturnTypes>;
|
2087
2102
|
/**
|
2088
2103
|
* @example
|
2089
2104
|
* ```js
|
@@ -2122,7 +2137,7 @@ type OpenAIStreamCallbacks = AIStreamCallbacksAndOptions & {
|
|
2122
2137
|
experimental_onToolCall?: (toolCallPayload: ToolCallPayload, appendToolCallMessage: (result?: {
|
2123
2138
|
tool_call_id: string;
|
2124
2139
|
function_name: string;
|
2125
|
-
tool_call_result: JSONValue;
|
2140
|
+
tool_call_result: JSONValue$1;
|
2126
2141
|
}) => CreateMessage[]) => Promise<Response | undefined | void | string | AsyncIterableOpenAIStreamReturnTypes>;
|
2127
2142
|
};
|
2128
2143
|
interface ChatCompletionChunk {
|
@@ -2583,8 +2598,8 @@ declare class StreamData {
|
|
2583
2598
|
private warningTimeout;
|
2584
2599
|
constructor();
|
2585
2600
|
close(): Promise<void>;
|
2586
|
-
append(value: JSONValue): void;
|
2587
|
-
appendMessageAnnotation(value: JSONValue): void;
|
2601
|
+
append(value: JSONValue$1): void;
|
2602
|
+
appendMessageAnnotation(value: JSONValue$1): void;
|
2588
2603
|
}
|
2589
2604
|
/**
|
2590
2605
|
* A TransformStream for LLMs that do not have their own transform stream handlers managing encoding (e.g. OpenAIStream has one for function call handling).
|