ai 5.0.0-canary.10 → 5.0.0-canary.12
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +18 -0
- package/dist/index.d.mts +19 -44
- package/dist/index.d.ts +19 -44
- package/dist/index.js +374 -640
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +341 -607
- package/dist/index.mjs.map +1 -1
- package/dist/internal/index.d.mts +7 -5
- package/dist/internal/index.d.ts +7 -5
- package/dist/internal/index.js +23 -15
- package/dist/internal/index.js.map +1 -1
- package/dist/internal/index.mjs +23 -15
- package/dist/internal/index.mjs.map +1 -1
- package/dist/mcp-stdio/index.js +3 -3
- package/dist/mcp-stdio/index.js.map +1 -1
- package/dist/mcp-stdio/index.mjs +3 -3
- package/dist/mcp-stdio/index.mjs.map +1 -1
- package/dist/test/index.d.mts +3 -7
- package/dist/test/index.d.ts +3 -7
- package/dist/test/index.js +3 -7
- package/dist/test/index.js.map +1 -1
- package/dist/test/index.mjs +3 -7
- package/dist/test/index.mjs.map +1 -1
- package/package.json +3 -3
package/CHANGELOG.md
CHANGED
@@ -1,5 +1,23 @@
|
|
1
1
|
# ai
|
2
2
|
|
3
|
+
## 5.0.0-canary.12
|
4
|
+
|
5
|
+
### Patch Changes
|
6
|
+
|
7
|
+
- Updated dependencies [e86be6f]
|
8
|
+
- @ai-sdk/provider@2.0.0-canary.9
|
9
|
+
- @ai-sdk/provider-utils@3.0.0-canary.10
|
10
|
+
|
11
|
+
## 5.0.0-canary.11
|
12
|
+
|
13
|
+
### Patch Changes
|
14
|
+
|
15
|
+
- 8e64e9c: feat (ai): allow using provider default temperature by specifying null
|
16
|
+
- Updated dependencies [95857aa]
|
17
|
+
- Updated dependencies [7ea4132]
|
18
|
+
- @ai-sdk/provider@2.0.0-canary.8
|
19
|
+
- @ai-sdk/provider-utils@3.0.0-canary.9
|
20
|
+
|
3
21
|
## 5.0.0-canary.10
|
4
22
|
|
5
23
|
### Patch Changes
|
package/dist/index.d.mts
CHANGED
@@ -1,6 +1,6 @@
|
|
1
1
|
import { ToolCall, ToolResult, FetchFunction, Validator, IDGenerator } from '@ai-sdk/provider-utils';
|
2
2
|
export { IDGenerator, ToolCall, ToolResult, createIdGenerator, generateId } from '@ai-sdk/provider-utils';
|
3
|
-
import { EmbeddingModelV2, EmbeddingModelV2Embedding, ImageModelV1, ImageModelV1CallWarning, LanguageModelV2, LanguageModelV2FinishReason,
|
3
|
+
import { EmbeddingModelV2, EmbeddingModelV2Embedding, ImageModelV1, ImageModelV1CallWarning, LanguageModelV2, LanguageModelV2FinishReason, LanguageModelV2CallWarning, LanguageModelV2Source, SharedV2ProviderMetadata, SharedV2ProviderOptions, TranscriptionModelV1, TranscriptionModelV1CallWarning, SpeechModelV1, SpeechModelV1CallWarning, JSONValue as JSONValue$1, JSONObject, LanguageModelV2CallOptions, AISDKError, LanguageModelV2ToolCall, JSONSchema7 as JSONSchema7$1, JSONParseError, TypeValidationError, LanguageModelV2Middleware, ProviderV2, NoSuchModelError } from '@ai-sdk/provider';
|
4
4
|
export { AISDKError, APICallError, EmptyResponseBodyError, InvalidPromptError, InvalidResponseDataError, JSONParseError, LoadAPIKeyError, NoContentGeneratedError, NoSuchModelError, TypeValidationError, UnsupportedFunctionalityError } from '@ai-sdk/provider';
|
5
5
|
import { ServerResponse } from 'node:http';
|
6
6
|
import { AttributeValue, Tracer } from '@opentelemetry/api';
|
@@ -59,12 +59,6 @@ Can be one of the following:
|
|
59
59
|
*/
|
60
60
|
type FinishReason = LanguageModelV2FinishReason;
|
61
61
|
/**
|
62
|
-
Log probabilities for each token and its top log probabilities.
|
63
|
-
|
64
|
-
@deprecated Will become a provider extension in the future.
|
65
|
-
*/
|
66
|
-
type LogProbs = LanguageModelV2LogProbs;
|
67
|
-
/**
|
68
62
|
Warning from the model provider for this call. The call will proceed, but e.g.
|
69
63
|
some settings might not be supported, which can lead to suboptimal results.
|
70
64
|
*/
|
@@ -1216,10 +1210,11 @@ type CallSettings = {
|
|
1216
1210
|
1 (very random).
|
1217
1211
|
|
1218
1212
|
It is recommended to set either `temperature` or `topP`, but not both.
|
1213
|
+
Use `null` to use the provider's default temperature.
|
1219
1214
|
|
1220
1215
|
@default 0
|
1221
1216
|
*/
|
1222
|
-
temperature?: number;
|
1217
|
+
temperature?: number | null;
|
1223
1218
|
/**
|
1224
1219
|
Nucleus sampling. This is a number between 0 and 1.
|
1225
1220
|
|
@@ -2840,11 +2835,6 @@ type StepResult<TOOLS extends ToolSet> = {
|
|
2840
2835
|
*/
|
2841
2836
|
readonly warnings: CallWarning[] | undefined;
|
2842
2837
|
/**
|
2843
|
-
Logprobs for the completion.
|
2844
|
-
`undefined` if the mode does not support logprobs or if was not enabled.
|
2845
|
-
*/
|
2846
|
-
readonly logprobs: LogProbs | undefined;
|
2847
|
-
/**
|
2848
2838
|
Additional request information.
|
2849
2839
|
*/
|
2850
2840
|
readonly request: LanguageModelRequestMetadata;
|
@@ -2961,13 +2951,6 @@ interface GenerateTextResult<TOOLS extends ToolSet, OUTPUT> {
|
|
2961
2951
|
body?: unknown;
|
2962
2952
|
};
|
2963
2953
|
/**
|
2964
|
-
Logprobs for the completion.
|
2965
|
-
`undefined` if the mode does not support logprobs or if it was not enabled.
|
2966
|
-
|
2967
|
-
@deprecated Will become a provider extension in the future.
|
2968
|
-
*/
|
2969
|
-
readonly logprobs: LogProbs | undefined;
|
2970
|
-
/**
|
2971
2954
|
Additional provider-specific metadata. They are passed through
|
2972
2955
|
from the provider to the AI SDK and enable provider-specific
|
2973
2956
|
results that can be fully encapsulated in the provider.
|
@@ -2977,13 +2960,7 @@ interface GenerateTextResult<TOOLS extends ToolSet, OUTPUT> {
|
|
2977
2960
|
|
2978
2961
|
interface Output$1<OUTPUT, PARTIAL> {
|
2979
2962
|
readonly type: 'object' | 'text';
|
2980
|
-
|
2981
|
-
system: string | undefined;
|
2982
|
-
model: LanguageModel;
|
2983
|
-
}): string | undefined;
|
2984
|
-
responseFormat: (options: {
|
2985
|
-
model: LanguageModel;
|
2986
|
-
}) => LanguageModelV2CallOptions['responseFormat'];
|
2963
|
+
responseFormat: LanguageModelV2CallOptions['responseFormat'];
|
2987
2964
|
parsePartial(options: {
|
2988
2965
|
text: string;
|
2989
2966
|
}): {
|
@@ -3511,7 +3488,6 @@ type TextStreamPart<TOOLS extends ToolSet> = {
|
|
3511
3488
|
} | {
|
3512
3489
|
type: 'step-finish';
|
3513
3490
|
messageId: string;
|
3514
|
-
logprobs?: LogProbs;
|
3515
3491
|
request: LanguageModelRequestMetadata;
|
3516
3492
|
warnings: CallWarning[] | undefined;
|
3517
3493
|
response: LanguageModelResponseMetadata;
|
@@ -3524,10 +3500,6 @@ type TextStreamPart<TOOLS extends ToolSet> = {
|
|
3524
3500
|
finishReason: FinishReason;
|
3525
3501
|
usage: LanguageModelUsage$1;
|
3526
3502
|
providerMetadata: ProviderMetadata | undefined;
|
3527
|
-
/**
|
3528
|
-
* @deprecated will be moved into provider metadata
|
3529
|
-
*/
|
3530
|
-
logprobs?: LogProbs;
|
3531
3503
|
/**
|
3532
3504
|
* @deprecated use response on step-finish instead
|
3533
3505
|
*/
|
@@ -3891,13 +3863,6 @@ interface GenerateObjectResult<OBJECT> {
|
|
3891
3863
|
body?: unknown;
|
3892
3864
|
};
|
3893
3865
|
/**
|
3894
|
-
Logprobs for the completion.
|
3895
|
-
`undefined` if the mode does not support logprobs or if was not enabled.
|
3896
|
-
|
3897
|
-
@deprecated Will become a provider extension in the future.
|
3898
|
-
*/
|
3899
|
-
readonly logprobs: LogProbs | undefined;
|
3900
|
-
/**
|
3901
3866
|
Additional provider-specific metadata. They are passed through
|
3902
3867
|
from the provider to the AI SDK and enable provider-specific
|
3903
3868
|
results that can be fully encapsulated in the provider.
|
@@ -4078,7 +4043,6 @@ type ObjectStreamPart<PARTIAL> = {
|
|
4078
4043
|
} | {
|
4079
4044
|
type: 'finish';
|
4080
4045
|
finishReason: FinishReason;
|
4081
|
-
logprobs?: LogProbs;
|
4082
4046
|
usage: LanguageModelUsage$1;
|
4083
4047
|
response: LanguageModelResponseMetadata;
|
4084
4048
|
providerMetadata?: ProviderMetadata;
|
@@ -4533,8 +4497,20 @@ declare function transcribe({ model, audio, providerOptions, maxRetries: maxRetr
|
|
4533
4497
|
* Applies default settings for a language model.
|
4534
4498
|
*/
|
4535
4499
|
declare function defaultSettingsMiddleware({ settings, }: {
|
4536
|
-
settings: Partial<
|
4537
|
-
|
4500
|
+
settings: Partial<{
|
4501
|
+
maxOutputTokens?: LanguageModelV2CallOptions['maxOutputTokens'] | null;
|
4502
|
+
temperature?: LanguageModelV2CallOptions['temperature'] | null;
|
4503
|
+
stopSequences?: LanguageModelV2CallOptions['stopSequences'] | null;
|
4504
|
+
topP?: LanguageModelV2CallOptions['topP'] | null;
|
4505
|
+
topK?: LanguageModelV2CallOptions['topK'] | null;
|
4506
|
+
presencePenalty?: LanguageModelV2CallOptions['presencePenalty'] | null;
|
4507
|
+
frequencyPenalty?: LanguageModelV2CallOptions['frequencyPenalty'] | null;
|
4508
|
+
responseFormat?: LanguageModelV2CallOptions['responseFormat'] | null;
|
4509
|
+
seed?: LanguageModelV2CallOptions['seed'] | null;
|
4510
|
+
tools?: LanguageModelV2CallOptions['tools'] | null;
|
4511
|
+
toolChoice?: LanguageModelV2CallOptions['toolChoice'] | null;
|
4512
|
+
headers?: LanguageModelV2CallOptions['headers'];
|
4513
|
+
providerOptions?: LanguageModelV2CallOptions['providerOptions'];
|
4538
4514
|
}>;
|
4539
4515
|
}): LanguageModelV2Middleware;
|
4540
4516
|
|
@@ -4728,7 +4704,6 @@ type SingleRequestTextStreamPart<TOOLS extends ToolSet> = {
|
|
4728
4704
|
} | {
|
4729
4705
|
type: 'finish';
|
4730
4706
|
finishReason: FinishReason;
|
4731
|
-
logprobs?: LogProbs;
|
4732
4707
|
usage: LanguageModelUsage$1;
|
4733
4708
|
providerMetadata?: ProviderMetadata;
|
4734
4709
|
} | {
|
@@ -5024,4 +4999,4 @@ declare namespace llamaindexAdapter {
|
|
5024
4999
|
};
|
5025
5000
|
}
|
5026
5001
|
|
5027
|
-
export { AssistantContent, Attachment, CallSettings, CallWarning, ChatRequest, ChatRequestOptions, ChunkDetector, CoreAssistantMessage, CoreMessage, CoreSystemMessage, CoreToolMessage, CoreUserMessage, CreateMessage, DataContent, DataStreamOptions, DataStreamPart, DataStreamString, DataStreamWriter, DeepPartial, DownloadError, EmbedManyResult, EmbedResult, Embedding, EmbeddingModel, EmbeddingModelUsage, GenerateImageResult as Experimental_GenerateImageResult, GeneratedFile as Experimental_GeneratedImage, SpeechResult as Experimental_SpeechResult, TranscriptionResult as Experimental_TranscriptionResult, FilePart, FileUIPart, FinishReason, GenerateObjectResult, GenerateTextOnStepFinishCallback, GenerateTextResult, GeneratedAudioFile, GeneratedFile, IdGenerator, ImageModel, ImageGenerationWarning as ImageModelCallWarning, ImageModelResponseMetadata, ImagePart, InvalidArgumentError, InvalidDataContentError, InvalidMessageRoleError, InvalidStreamPartError, InvalidToolArgumentsError, JSONRPCError, JSONRPCMessage, JSONRPCNotification, JSONRPCRequest, JSONRPCResponse, JSONValue, langchainAdapter as LangChainAdapter, LanguageModel, LanguageModelRequestMetadata, LanguageModelResponseMetadata, LanguageModelUsage$1 as LanguageModelUsage, llamaindexAdapter as LlamaIndexAdapter,
|
5002
|
+
export { AssistantContent, Attachment, CallSettings, CallWarning, ChatRequest, ChatRequestOptions, ChunkDetector, CoreAssistantMessage, CoreMessage, CoreSystemMessage, CoreToolMessage, CoreUserMessage, CreateMessage, DataContent, DataStreamOptions, DataStreamPart, DataStreamString, DataStreamWriter, DeepPartial, DownloadError, EmbedManyResult, EmbedResult, Embedding, EmbeddingModel, EmbeddingModelUsage, GenerateImageResult as Experimental_GenerateImageResult, GeneratedFile as Experimental_GeneratedImage, SpeechResult as Experimental_SpeechResult, TranscriptionResult as Experimental_TranscriptionResult, FilePart, FileUIPart, FinishReason, GenerateObjectResult, GenerateTextOnStepFinishCallback, GenerateTextResult, GeneratedAudioFile, GeneratedFile, IdGenerator, ImageModel, ImageGenerationWarning as ImageModelCallWarning, ImageModelResponseMetadata, ImagePart, InvalidArgumentError, InvalidDataContentError, InvalidMessageRoleError, InvalidStreamPartError, InvalidToolArgumentsError, JSONRPCError, JSONRPCMessage, JSONRPCNotification, JSONRPCRequest, JSONRPCResponse, JSONValue, langchainAdapter as LangChainAdapter, LanguageModel, LanguageModelRequestMetadata, LanguageModelResponseMetadata, LanguageModelUsage$1 as LanguageModelUsage, llamaindexAdapter as LlamaIndexAdapter, MCPClientError, MCPTransport, Message, MessageConversionError, NoImageGeneratedError, NoObjectGeneratedError, NoOutputSpecifiedError, NoSuchProviderError, NoSuchToolError, ObjectStreamPart, output as Output, Prompt, Provider, ProviderMetadata, ProviderOptions, ProviderRegistryProvider, ReasoningUIPart, RepairTextFunction, RequestOptions, RetryError, Schema, SourceUIPart, SpeechModel, SpeechModelResponseMetadata, SpeechWarning, StepResult, StepStartUIPart, StreamData, StreamObjectOnFinishCallback, StreamObjectResult, StreamTextOnChunkCallback, StreamTextOnErrorCallback, StreamTextOnFinishCallback, StreamTextOnStepFinishCallback, StreamTextResult, StreamTextTransform, TelemetrySettings, TextPart, TextStreamPart, TextUIPart, Tool, ToolCallPart, ToolCallRepairError, ToolCallRepairFunction, ToolCallUnion, ToolChoice, ToolContent, ToolExecutionError, ToolExecutionOptions, ToolInvocation, ToolInvocationUIPart, ToolResultPart, ToolResultUnion, ToolSet, TranscriptionModel, TranscriptionModelResponseMetadata, TranscriptionWarning, UIMessage, UseChatOptions, UseCompletionOptions, UserContent, appendClientMessage, appendResponseMessages, asSchema, callChatApi, callCompletionApi, convertToCoreMessages, coreAssistantMessageSchema, coreMessageSchema, coreSystemMessageSchema, coreToolMessageSchema, coreUserMessageSchema, cosineSimilarity, createDataStream, createDataStreamResponse, createProviderRegistry, customProvider, defaultSettingsMiddleware, embed, embedMany, createMCPClient as experimental_createMCPClient, experimental_createProviderRegistry, experimental_customProvider, generateImage as experimental_generateImage, generateSpeech as experimental_generateSpeech, transcribe as experimental_transcribe, extractMaxToolInvocationStep, extractReasoningMiddleware, fillMessageParts, formatDataStreamPart, generateObject, generateText, getMessageParts, getTextFromDataUrl, isAssistantMessageWithCompletedToolCalls, isDeepEqualData, jsonSchema, parseDataStreamPart, parsePartialJson, pipeDataStreamToResponse, prepareAttachmentsForRequest, processDataStream, processTextStream, shouldResubmitMessages, simulateReadableStream, simulateStreamingMiddleware, smoothStream, streamObject, streamText, tool, updateToolCallResult, wrapLanguageModel, zodSchema };
|
package/dist/index.d.ts
CHANGED
@@ -1,6 +1,6 @@
|
|
1
1
|
import { ToolCall, ToolResult, FetchFunction, Validator, IDGenerator } from '@ai-sdk/provider-utils';
|
2
2
|
export { IDGenerator, ToolCall, ToolResult, createIdGenerator, generateId } from '@ai-sdk/provider-utils';
|
3
|
-
import { EmbeddingModelV2, EmbeddingModelV2Embedding, ImageModelV1, ImageModelV1CallWarning, LanguageModelV2, LanguageModelV2FinishReason,
|
3
|
+
import { EmbeddingModelV2, EmbeddingModelV2Embedding, ImageModelV1, ImageModelV1CallWarning, LanguageModelV2, LanguageModelV2FinishReason, LanguageModelV2CallWarning, LanguageModelV2Source, SharedV2ProviderMetadata, SharedV2ProviderOptions, TranscriptionModelV1, TranscriptionModelV1CallWarning, SpeechModelV1, SpeechModelV1CallWarning, JSONValue as JSONValue$1, JSONObject, LanguageModelV2CallOptions, AISDKError, LanguageModelV2ToolCall, JSONSchema7 as JSONSchema7$1, JSONParseError, TypeValidationError, LanguageModelV2Middleware, ProviderV2, NoSuchModelError } from '@ai-sdk/provider';
|
4
4
|
export { AISDKError, APICallError, EmptyResponseBodyError, InvalidPromptError, InvalidResponseDataError, JSONParseError, LoadAPIKeyError, NoContentGeneratedError, NoSuchModelError, TypeValidationError, UnsupportedFunctionalityError } from '@ai-sdk/provider';
|
5
5
|
import { ServerResponse } from 'node:http';
|
6
6
|
import { AttributeValue, Tracer } from '@opentelemetry/api';
|
@@ -59,12 +59,6 @@ Can be one of the following:
|
|
59
59
|
*/
|
60
60
|
type FinishReason = LanguageModelV2FinishReason;
|
61
61
|
/**
|
62
|
-
Log probabilities for each token and its top log probabilities.
|
63
|
-
|
64
|
-
@deprecated Will become a provider extension in the future.
|
65
|
-
*/
|
66
|
-
type LogProbs = LanguageModelV2LogProbs;
|
67
|
-
/**
|
68
62
|
Warning from the model provider for this call. The call will proceed, but e.g.
|
69
63
|
some settings might not be supported, which can lead to suboptimal results.
|
70
64
|
*/
|
@@ -1216,10 +1210,11 @@ type CallSettings = {
|
|
1216
1210
|
1 (very random).
|
1217
1211
|
|
1218
1212
|
It is recommended to set either `temperature` or `topP`, but not both.
|
1213
|
+
Use `null` to use the provider's default temperature.
|
1219
1214
|
|
1220
1215
|
@default 0
|
1221
1216
|
*/
|
1222
|
-
temperature?: number;
|
1217
|
+
temperature?: number | null;
|
1223
1218
|
/**
|
1224
1219
|
Nucleus sampling. This is a number between 0 and 1.
|
1225
1220
|
|
@@ -2840,11 +2835,6 @@ type StepResult<TOOLS extends ToolSet> = {
|
|
2840
2835
|
*/
|
2841
2836
|
readonly warnings: CallWarning[] | undefined;
|
2842
2837
|
/**
|
2843
|
-
Logprobs for the completion.
|
2844
|
-
`undefined` if the mode does not support logprobs or if was not enabled.
|
2845
|
-
*/
|
2846
|
-
readonly logprobs: LogProbs | undefined;
|
2847
|
-
/**
|
2848
2838
|
Additional request information.
|
2849
2839
|
*/
|
2850
2840
|
readonly request: LanguageModelRequestMetadata;
|
@@ -2961,13 +2951,6 @@ interface GenerateTextResult<TOOLS extends ToolSet, OUTPUT> {
|
|
2961
2951
|
body?: unknown;
|
2962
2952
|
};
|
2963
2953
|
/**
|
2964
|
-
Logprobs for the completion.
|
2965
|
-
`undefined` if the mode does not support logprobs or if it was not enabled.
|
2966
|
-
|
2967
|
-
@deprecated Will become a provider extension in the future.
|
2968
|
-
*/
|
2969
|
-
readonly logprobs: LogProbs | undefined;
|
2970
|
-
/**
|
2971
2954
|
Additional provider-specific metadata. They are passed through
|
2972
2955
|
from the provider to the AI SDK and enable provider-specific
|
2973
2956
|
results that can be fully encapsulated in the provider.
|
@@ -2977,13 +2960,7 @@ interface GenerateTextResult<TOOLS extends ToolSet, OUTPUT> {
|
|
2977
2960
|
|
2978
2961
|
interface Output$1<OUTPUT, PARTIAL> {
|
2979
2962
|
readonly type: 'object' | 'text';
|
2980
|
-
|
2981
|
-
system: string | undefined;
|
2982
|
-
model: LanguageModel;
|
2983
|
-
}): string | undefined;
|
2984
|
-
responseFormat: (options: {
|
2985
|
-
model: LanguageModel;
|
2986
|
-
}) => LanguageModelV2CallOptions['responseFormat'];
|
2963
|
+
responseFormat: LanguageModelV2CallOptions['responseFormat'];
|
2987
2964
|
parsePartial(options: {
|
2988
2965
|
text: string;
|
2989
2966
|
}): {
|
@@ -3511,7 +3488,6 @@ type TextStreamPart<TOOLS extends ToolSet> = {
|
|
3511
3488
|
} | {
|
3512
3489
|
type: 'step-finish';
|
3513
3490
|
messageId: string;
|
3514
|
-
logprobs?: LogProbs;
|
3515
3491
|
request: LanguageModelRequestMetadata;
|
3516
3492
|
warnings: CallWarning[] | undefined;
|
3517
3493
|
response: LanguageModelResponseMetadata;
|
@@ -3524,10 +3500,6 @@ type TextStreamPart<TOOLS extends ToolSet> = {
|
|
3524
3500
|
finishReason: FinishReason;
|
3525
3501
|
usage: LanguageModelUsage$1;
|
3526
3502
|
providerMetadata: ProviderMetadata | undefined;
|
3527
|
-
/**
|
3528
|
-
* @deprecated will be moved into provider metadata
|
3529
|
-
*/
|
3530
|
-
logprobs?: LogProbs;
|
3531
3503
|
/**
|
3532
3504
|
* @deprecated use response on step-finish instead
|
3533
3505
|
*/
|
@@ -3891,13 +3863,6 @@ interface GenerateObjectResult<OBJECT> {
|
|
3891
3863
|
body?: unknown;
|
3892
3864
|
};
|
3893
3865
|
/**
|
3894
|
-
Logprobs for the completion.
|
3895
|
-
`undefined` if the mode does not support logprobs or if was not enabled.
|
3896
|
-
|
3897
|
-
@deprecated Will become a provider extension in the future.
|
3898
|
-
*/
|
3899
|
-
readonly logprobs: LogProbs | undefined;
|
3900
|
-
/**
|
3901
3866
|
Additional provider-specific metadata. They are passed through
|
3902
3867
|
from the provider to the AI SDK and enable provider-specific
|
3903
3868
|
results that can be fully encapsulated in the provider.
|
@@ -4078,7 +4043,6 @@ type ObjectStreamPart<PARTIAL> = {
|
|
4078
4043
|
} | {
|
4079
4044
|
type: 'finish';
|
4080
4045
|
finishReason: FinishReason;
|
4081
|
-
logprobs?: LogProbs;
|
4082
4046
|
usage: LanguageModelUsage$1;
|
4083
4047
|
response: LanguageModelResponseMetadata;
|
4084
4048
|
providerMetadata?: ProviderMetadata;
|
@@ -4533,8 +4497,20 @@ declare function transcribe({ model, audio, providerOptions, maxRetries: maxRetr
|
|
4533
4497
|
* Applies default settings for a language model.
|
4534
4498
|
*/
|
4535
4499
|
declare function defaultSettingsMiddleware({ settings, }: {
|
4536
|
-
settings: Partial<
|
4537
|
-
|
4500
|
+
settings: Partial<{
|
4501
|
+
maxOutputTokens?: LanguageModelV2CallOptions['maxOutputTokens'] | null;
|
4502
|
+
temperature?: LanguageModelV2CallOptions['temperature'] | null;
|
4503
|
+
stopSequences?: LanguageModelV2CallOptions['stopSequences'] | null;
|
4504
|
+
topP?: LanguageModelV2CallOptions['topP'] | null;
|
4505
|
+
topK?: LanguageModelV2CallOptions['topK'] | null;
|
4506
|
+
presencePenalty?: LanguageModelV2CallOptions['presencePenalty'] | null;
|
4507
|
+
frequencyPenalty?: LanguageModelV2CallOptions['frequencyPenalty'] | null;
|
4508
|
+
responseFormat?: LanguageModelV2CallOptions['responseFormat'] | null;
|
4509
|
+
seed?: LanguageModelV2CallOptions['seed'] | null;
|
4510
|
+
tools?: LanguageModelV2CallOptions['tools'] | null;
|
4511
|
+
toolChoice?: LanguageModelV2CallOptions['toolChoice'] | null;
|
4512
|
+
headers?: LanguageModelV2CallOptions['headers'];
|
4513
|
+
providerOptions?: LanguageModelV2CallOptions['providerOptions'];
|
4538
4514
|
}>;
|
4539
4515
|
}): LanguageModelV2Middleware;
|
4540
4516
|
|
@@ -4728,7 +4704,6 @@ type SingleRequestTextStreamPart<TOOLS extends ToolSet> = {
|
|
4728
4704
|
} | {
|
4729
4705
|
type: 'finish';
|
4730
4706
|
finishReason: FinishReason;
|
4731
|
-
logprobs?: LogProbs;
|
4732
4707
|
usage: LanguageModelUsage$1;
|
4733
4708
|
providerMetadata?: ProviderMetadata;
|
4734
4709
|
} | {
|
@@ -5024,4 +4999,4 @@ declare namespace llamaindexAdapter {
|
|
5024
4999
|
};
|
5025
5000
|
}
|
5026
5001
|
|
5027
|
-
export { AssistantContent, Attachment, CallSettings, CallWarning, ChatRequest, ChatRequestOptions, ChunkDetector, CoreAssistantMessage, CoreMessage, CoreSystemMessage, CoreToolMessage, CoreUserMessage, CreateMessage, DataContent, DataStreamOptions, DataStreamPart, DataStreamString, DataStreamWriter, DeepPartial, DownloadError, EmbedManyResult, EmbedResult, Embedding, EmbeddingModel, EmbeddingModelUsage, GenerateImageResult as Experimental_GenerateImageResult, GeneratedFile as Experimental_GeneratedImage, SpeechResult as Experimental_SpeechResult, TranscriptionResult as Experimental_TranscriptionResult, FilePart, FileUIPart, FinishReason, GenerateObjectResult, GenerateTextOnStepFinishCallback, GenerateTextResult, GeneratedAudioFile, GeneratedFile, IdGenerator, ImageModel, ImageGenerationWarning as ImageModelCallWarning, ImageModelResponseMetadata, ImagePart, InvalidArgumentError, InvalidDataContentError, InvalidMessageRoleError, InvalidStreamPartError, InvalidToolArgumentsError, JSONRPCError, JSONRPCMessage, JSONRPCNotification, JSONRPCRequest, JSONRPCResponse, JSONValue, langchainAdapter as LangChainAdapter, LanguageModel, LanguageModelRequestMetadata, LanguageModelResponseMetadata, LanguageModelUsage$1 as LanguageModelUsage, llamaindexAdapter as LlamaIndexAdapter,
|
5002
|
+
export { AssistantContent, Attachment, CallSettings, CallWarning, ChatRequest, ChatRequestOptions, ChunkDetector, CoreAssistantMessage, CoreMessage, CoreSystemMessage, CoreToolMessage, CoreUserMessage, CreateMessage, DataContent, DataStreamOptions, DataStreamPart, DataStreamString, DataStreamWriter, DeepPartial, DownloadError, EmbedManyResult, EmbedResult, Embedding, EmbeddingModel, EmbeddingModelUsage, GenerateImageResult as Experimental_GenerateImageResult, GeneratedFile as Experimental_GeneratedImage, SpeechResult as Experimental_SpeechResult, TranscriptionResult as Experimental_TranscriptionResult, FilePart, FileUIPart, FinishReason, GenerateObjectResult, GenerateTextOnStepFinishCallback, GenerateTextResult, GeneratedAudioFile, GeneratedFile, IdGenerator, ImageModel, ImageGenerationWarning as ImageModelCallWarning, ImageModelResponseMetadata, ImagePart, InvalidArgumentError, InvalidDataContentError, InvalidMessageRoleError, InvalidStreamPartError, InvalidToolArgumentsError, JSONRPCError, JSONRPCMessage, JSONRPCNotification, JSONRPCRequest, JSONRPCResponse, JSONValue, langchainAdapter as LangChainAdapter, LanguageModel, LanguageModelRequestMetadata, LanguageModelResponseMetadata, LanguageModelUsage$1 as LanguageModelUsage, llamaindexAdapter as LlamaIndexAdapter, MCPClientError, MCPTransport, Message, MessageConversionError, NoImageGeneratedError, NoObjectGeneratedError, NoOutputSpecifiedError, NoSuchProviderError, NoSuchToolError, ObjectStreamPart, output as Output, Prompt, Provider, ProviderMetadata, ProviderOptions, ProviderRegistryProvider, ReasoningUIPart, RepairTextFunction, RequestOptions, RetryError, Schema, SourceUIPart, SpeechModel, SpeechModelResponseMetadata, SpeechWarning, StepResult, StepStartUIPart, StreamData, StreamObjectOnFinishCallback, StreamObjectResult, StreamTextOnChunkCallback, StreamTextOnErrorCallback, StreamTextOnFinishCallback, StreamTextOnStepFinishCallback, StreamTextResult, StreamTextTransform, TelemetrySettings, TextPart, TextStreamPart, TextUIPart, Tool, ToolCallPart, ToolCallRepairError, ToolCallRepairFunction, ToolCallUnion, ToolChoice, ToolContent, ToolExecutionError, ToolExecutionOptions, ToolInvocation, ToolInvocationUIPart, ToolResultPart, ToolResultUnion, ToolSet, TranscriptionModel, TranscriptionModelResponseMetadata, TranscriptionWarning, UIMessage, UseChatOptions, UseCompletionOptions, UserContent, appendClientMessage, appendResponseMessages, asSchema, callChatApi, callCompletionApi, convertToCoreMessages, coreAssistantMessageSchema, coreMessageSchema, coreSystemMessageSchema, coreToolMessageSchema, coreUserMessageSchema, cosineSimilarity, createDataStream, createDataStreamResponse, createProviderRegistry, customProvider, defaultSettingsMiddleware, embed, embedMany, createMCPClient as experimental_createMCPClient, experimental_createProviderRegistry, experimental_customProvider, generateImage as experimental_generateImage, generateSpeech as experimental_generateSpeech, transcribe as experimental_transcribe, extractMaxToolInvocationStep, extractReasoningMiddleware, fillMessageParts, formatDataStreamPart, generateObject, generateText, getMessageParts, getTextFromDataUrl, isAssistantMessageWithCompletedToolCalls, isDeepEqualData, jsonSchema, parseDataStreamPart, parsePartialJson, pipeDataStreamToResponse, prepareAttachmentsForRequest, processDataStream, processTextStream, shouldResubmitMessages, simulateReadableStream, simulateStreamingMiddleware, smoothStream, streamObject, streamText, tool, updateToolCallResult, wrapLanguageModel, zodSchema };
|