ai 5.0.0-canary.11 → 5.0.0-canary.12
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +8 -0
- package/dist/index.d.mts +2 -34
- package/dist/index.d.ts +2 -34
- package/dist/index.js +0 -15
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +0 -15
- package/dist/index.mjs.map +1 -1
- package/package.json +3 -3
package/CHANGELOG.md
CHANGED
package/dist/index.d.mts
CHANGED
@@ -1,6 +1,6 @@
|
|
1
1
|
import { ToolCall, ToolResult, FetchFunction, Validator, IDGenerator } from '@ai-sdk/provider-utils';
|
2
2
|
export { IDGenerator, ToolCall, ToolResult, createIdGenerator, generateId } from '@ai-sdk/provider-utils';
|
3
|
-
import { EmbeddingModelV2, EmbeddingModelV2Embedding, ImageModelV1, ImageModelV1CallWarning, LanguageModelV2, LanguageModelV2FinishReason,
|
3
|
+
import { EmbeddingModelV2, EmbeddingModelV2Embedding, ImageModelV1, ImageModelV1CallWarning, LanguageModelV2, LanguageModelV2FinishReason, LanguageModelV2CallWarning, LanguageModelV2Source, SharedV2ProviderMetadata, SharedV2ProviderOptions, TranscriptionModelV1, TranscriptionModelV1CallWarning, SpeechModelV1, SpeechModelV1CallWarning, JSONValue as JSONValue$1, JSONObject, LanguageModelV2CallOptions, AISDKError, LanguageModelV2ToolCall, JSONSchema7 as JSONSchema7$1, JSONParseError, TypeValidationError, LanguageModelV2Middleware, ProviderV2, NoSuchModelError } from '@ai-sdk/provider';
|
4
4
|
export { AISDKError, APICallError, EmptyResponseBodyError, InvalidPromptError, InvalidResponseDataError, JSONParseError, LoadAPIKeyError, NoContentGeneratedError, NoSuchModelError, TypeValidationError, UnsupportedFunctionalityError } from '@ai-sdk/provider';
|
5
5
|
import { ServerResponse } from 'node:http';
|
6
6
|
import { AttributeValue, Tracer } from '@opentelemetry/api';
|
@@ -59,12 +59,6 @@ Can be one of the following:
|
|
59
59
|
*/
|
60
60
|
type FinishReason = LanguageModelV2FinishReason;
|
61
61
|
/**
|
62
|
-
Log probabilities for each token and its top log probabilities.
|
63
|
-
|
64
|
-
@deprecated Will become a provider extension in the future.
|
65
|
-
*/
|
66
|
-
type LogProbs = LanguageModelV2LogProbs;
|
67
|
-
/**
|
68
62
|
Warning from the model provider for this call. The call will proceed, but e.g.
|
69
63
|
some settings might not be supported, which can lead to suboptimal results.
|
70
64
|
*/
|
@@ -2841,11 +2835,6 @@ type StepResult<TOOLS extends ToolSet> = {
|
|
2841
2835
|
*/
|
2842
2836
|
readonly warnings: CallWarning[] | undefined;
|
2843
2837
|
/**
|
2844
|
-
Logprobs for the completion.
|
2845
|
-
`undefined` if the mode does not support logprobs or if was not enabled.
|
2846
|
-
*/
|
2847
|
-
readonly logprobs: LogProbs | undefined;
|
2848
|
-
/**
|
2849
2838
|
Additional request information.
|
2850
2839
|
*/
|
2851
2840
|
readonly request: LanguageModelRequestMetadata;
|
@@ -2962,13 +2951,6 @@ interface GenerateTextResult<TOOLS extends ToolSet, OUTPUT> {
|
|
2962
2951
|
body?: unknown;
|
2963
2952
|
};
|
2964
2953
|
/**
|
2965
|
-
Logprobs for the completion.
|
2966
|
-
`undefined` if the mode does not support logprobs or if it was not enabled.
|
2967
|
-
|
2968
|
-
@deprecated Will become a provider extension in the future.
|
2969
|
-
*/
|
2970
|
-
readonly logprobs: LogProbs | undefined;
|
2971
|
-
/**
|
2972
2954
|
Additional provider-specific metadata. They are passed through
|
2973
2955
|
from the provider to the AI SDK and enable provider-specific
|
2974
2956
|
results that can be fully encapsulated in the provider.
|
@@ -3506,7 +3488,6 @@ type TextStreamPart<TOOLS extends ToolSet> = {
|
|
3506
3488
|
} | {
|
3507
3489
|
type: 'step-finish';
|
3508
3490
|
messageId: string;
|
3509
|
-
logprobs?: LogProbs;
|
3510
3491
|
request: LanguageModelRequestMetadata;
|
3511
3492
|
warnings: CallWarning[] | undefined;
|
3512
3493
|
response: LanguageModelResponseMetadata;
|
@@ -3519,10 +3500,6 @@ type TextStreamPart<TOOLS extends ToolSet> = {
|
|
3519
3500
|
finishReason: FinishReason;
|
3520
3501
|
usage: LanguageModelUsage$1;
|
3521
3502
|
providerMetadata: ProviderMetadata | undefined;
|
3522
|
-
/**
|
3523
|
-
* @deprecated will be moved into provider metadata
|
3524
|
-
*/
|
3525
|
-
logprobs?: LogProbs;
|
3526
3503
|
/**
|
3527
3504
|
* @deprecated use response on step-finish instead
|
3528
3505
|
*/
|
@@ -3886,13 +3863,6 @@ interface GenerateObjectResult<OBJECT> {
|
|
3886
3863
|
body?: unknown;
|
3887
3864
|
};
|
3888
3865
|
/**
|
3889
|
-
Logprobs for the completion.
|
3890
|
-
`undefined` if the mode does not support logprobs or if was not enabled.
|
3891
|
-
|
3892
|
-
@deprecated Will become a provider extension in the future.
|
3893
|
-
*/
|
3894
|
-
readonly logprobs: LogProbs | undefined;
|
3895
|
-
/**
|
3896
3866
|
Additional provider-specific metadata. They are passed through
|
3897
3867
|
from the provider to the AI SDK and enable provider-specific
|
3898
3868
|
results that can be fully encapsulated in the provider.
|
@@ -4073,7 +4043,6 @@ type ObjectStreamPart<PARTIAL> = {
|
|
4073
4043
|
} | {
|
4074
4044
|
type: 'finish';
|
4075
4045
|
finishReason: FinishReason;
|
4076
|
-
logprobs?: LogProbs;
|
4077
4046
|
usage: LanguageModelUsage$1;
|
4078
4047
|
response: LanguageModelResponseMetadata;
|
4079
4048
|
providerMetadata?: ProviderMetadata;
|
@@ -4735,7 +4704,6 @@ type SingleRequestTextStreamPart<TOOLS extends ToolSet> = {
|
|
4735
4704
|
} | {
|
4736
4705
|
type: 'finish';
|
4737
4706
|
finishReason: FinishReason;
|
4738
|
-
logprobs?: LogProbs;
|
4739
4707
|
usage: LanguageModelUsage$1;
|
4740
4708
|
providerMetadata?: ProviderMetadata;
|
4741
4709
|
} | {
|
@@ -5031,4 +4999,4 @@ declare namespace llamaindexAdapter {
|
|
5031
4999
|
};
|
5032
5000
|
}
|
5033
5001
|
|
5034
|
-
export { AssistantContent, Attachment, CallSettings, CallWarning, ChatRequest, ChatRequestOptions, ChunkDetector, CoreAssistantMessage, CoreMessage, CoreSystemMessage, CoreToolMessage, CoreUserMessage, CreateMessage, DataContent, DataStreamOptions, DataStreamPart, DataStreamString, DataStreamWriter, DeepPartial, DownloadError, EmbedManyResult, EmbedResult, Embedding, EmbeddingModel, EmbeddingModelUsage, GenerateImageResult as Experimental_GenerateImageResult, GeneratedFile as Experimental_GeneratedImage, SpeechResult as Experimental_SpeechResult, TranscriptionResult as Experimental_TranscriptionResult, FilePart, FileUIPart, FinishReason, GenerateObjectResult, GenerateTextOnStepFinishCallback, GenerateTextResult, GeneratedAudioFile, GeneratedFile, IdGenerator, ImageModel, ImageGenerationWarning as ImageModelCallWarning, ImageModelResponseMetadata, ImagePart, InvalidArgumentError, InvalidDataContentError, InvalidMessageRoleError, InvalidStreamPartError, InvalidToolArgumentsError, JSONRPCError, JSONRPCMessage, JSONRPCNotification, JSONRPCRequest, JSONRPCResponse, JSONValue, langchainAdapter as LangChainAdapter, LanguageModel, LanguageModelRequestMetadata, LanguageModelResponseMetadata, LanguageModelUsage$1 as LanguageModelUsage, llamaindexAdapter as LlamaIndexAdapter,
|
5002
|
+
export { AssistantContent, Attachment, CallSettings, CallWarning, ChatRequest, ChatRequestOptions, ChunkDetector, CoreAssistantMessage, CoreMessage, CoreSystemMessage, CoreToolMessage, CoreUserMessage, CreateMessage, DataContent, DataStreamOptions, DataStreamPart, DataStreamString, DataStreamWriter, DeepPartial, DownloadError, EmbedManyResult, EmbedResult, Embedding, EmbeddingModel, EmbeddingModelUsage, GenerateImageResult as Experimental_GenerateImageResult, GeneratedFile as Experimental_GeneratedImage, SpeechResult as Experimental_SpeechResult, TranscriptionResult as Experimental_TranscriptionResult, FilePart, FileUIPart, FinishReason, GenerateObjectResult, GenerateTextOnStepFinishCallback, GenerateTextResult, GeneratedAudioFile, GeneratedFile, IdGenerator, ImageModel, ImageGenerationWarning as ImageModelCallWarning, ImageModelResponseMetadata, ImagePart, InvalidArgumentError, InvalidDataContentError, InvalidMessageRoleError, InvalidStreamPartError, InvalidToolArgumentsError, JSONRPCError, JSONRPCMessage, JSONRPCNotification, JSONRPCRequest, JSONRPCResponse, JSONValue, langchainAdapter as LangChainAdapter, LanguageModel, LanguageModelRequestMetadata, LanguageModelResponseMetadata, LanguageModelUsage$1 as LanguageModelUsage, llamaindexAdapter as LlamaIndexAdapter, MCPClientError, MCPTransport, Message, MessageConversionError, NoImageGeneratedError, NoObjectGeneratedError, NoOutputSpecifiedError, NoSuchProviderError, NoSuchToolError, ObjectStreamPart, output as Output, Prompt, Provider, ProviderMetadata, ProviderOptions, ProviderRegistryProvider, ReasoningUIPart, RepairTextFunction, RequestOptions, RetryError, Schema, SourceUIPart, SpeechModel, SpeechModelResponseMetadata, SpeechWarning, StepResult, StepStartUIPart, StreamData, StreamObjectOnFinishCallback, StreamObjectResult, StreamTextOnChunkCallback, StreamTextOnErrorCallback, StreamTextOnFinishCallback, StreamTextOnStepFinishCallback, StreamTextResult, StreamTextTransform, TelemetrySettings, TextPart, TextStreamPart, TextUIPart, Tool, ToolCallPart, ToolCallRepairError, ToolCallRepairFunction, ToolCallUnion, ToolChoice, ToolContent, ToolExecutionError, ToolExecutionOptions, ToolInvocation, ToolInvocationUIPart, ToolResultPart, ToolResultUnion, ToolSet, TranscriptionModel, TranscriptionModelResponseMetadata, TranscriptionWarning, UIMessage, UseChatOptions, UseCompletionOptions, UserContent, appendClientMessage, appendResponseMessages, asSchema, callChatApi, callCompletionApi, convertToCoreMessages, coreAssistantMessageSchema, coreMessageSchema, coreSystemMessageSchema, coreToolMessageSchema, coreUserMessageSchema, cosineSimilarity, createDataStream, createDataStreamResponse, createProviderRegistry, customProvider, defaultSettingsMiddleware, embed, embedMany, createMCPClient as experimental_createMCPClient, experimental_createProviderRegistry, experimental_customProvider, generateImage as experimental_generateImage, generateSpeech as experimental_generateSpeech, transcribe as experimental_transcribe, extractMaxToolInvocationStep, extractReasoningMiddleware, fillMessageParts, formatDataStreamPart, generateObject, generateText, getMessageParts, getTextFromDataUrl, isAssistantMessageWithCompletedToolCalls, isDeepEqualData, jsonSchema, parseDataStreamPart, parsePartialJson, pipeDataStreamToResponse, prepareAttachmentsForRequest, processDataStream, processTextStream, shouldResubmitMessages, simulateReadableStream, simulateStreamingMiddleware, smoothStream, streamObject, streamText, tool, updateToolCallResult, wrapLanguageModel, zodSchema };
|
package/dist/index.d.ts
CHANGED
@@ -1,6 +1,6 @@
|
|
1
1
|
import { ToolCall, ToolResult, FetchFunction, Validator, IDGenerator } from '@ai-sdk/provider-utils';
|
2
2
|
export { IDGenerator, ToolCall, ToolResult, createIdGenerator, generateId } from '@ai-sdk/provider-utils';
|
3
|
-
import { EmbeddingModelV2, EmbeddingModelV2Embedding, ImageModelV1, ImageModelV1CallWarning, LanguageModelV2, LanguageModelV2FinishReason,
|
3
|
+
import { EmbeddingModelV2, EmbeddingModelV2Embedding, ImageModelV1, ImageModelV1CallWarning, LanguageModelV2, LanguageModelV2FinishReason, LanguageModelV2CallWarning, LanguageModelV2Source, SharedV2ProviderMetadata, SharedV2ProviderOptions, TranscriptionModelV1, TranscriptionModelV1CallWarning, SpeechModelV1, SpeechModelV1CallWarning, JSONValue as JSONValue$1, JSONObject, LanguageModelV2CallOptions, AISDKError, LanguageModelV2ToolCall, JSONSchema7 as JSONSchema7$1, JSONParseError, TypeValidationError, LanguageModelV2Middleware, ProviderV2, NoSuchModelError } from '@ai-sdk/provider';
|
4
4
|
export { AISDKError, APICallError, EmptyResponseBodyError, InvalidPromptError, InvalidResponseDataError, JSONParseError, LoadAPIKeyError, NoContentGeneratedError, NoSuchModelError, TypeValidationError, UnsupportedFunctionalityError } from '@ai-sdk/provider';
|
5
5
|
import { ServerResponse } from 'node:http';
|
6
6
|
import { AttributeValue, Tracer } from '@opentelemetry/api';
|
@@ -59,12 +59,6 @@ Can be one of the following:
|
|
59
59
|
*/
|
60
60
|
type FinishReason = LanguageModelV2FinishReason;
|
61
61
|
/**
|
62
|
-
Log probabilities for each token and its top log probabilities.
|
63
|
-
|
64
|
-
@deprecated Will become a provider extension in the future.
|
65
|
-
*/
|
66
|
-
type LogProbs = LanguageModelV2LogProbs;
|
67
|
-
/**
|
68
62
|
Warning from the model provider for this call. The call will proceed, but e.g.
|
69
63
|
some settings might not be supported, which can lead to suboptimal results.
|
70
64
|
*/
|
@@ -2841,11 +2835,6 @@ type StepResult<TOOLS extends ToolSet> = {
|
|
2841
2835
|
*/
|
2842
2836
|
readonly warnings: CallWarning[] | undefined;
|
2843
2837
|
/**
|
2844
|
-
Logprobs for the completion.
|
2845
|
-
`undefined` if the mode does not support logprobs or if was not enabled.
|
2846
|
-
*/
|
2847
|
-
readonly logprobs: LogProbs | undefined;
|
2848
|
-
/**
|
2849
2838
|
Additional request information.
|
2850
2839
|
*/
|
2851
2840
|
readonly request: LanguageModelRequestMetadata;
|
@@ -2962,13 +2951,6 @@ interface GenerateTextResult<TOOLS extends ToolSet, OUTPUT> {
|
|
2962
2951
|
body?: unknown;
|
2963
2952
|
};
|
2964
2953
|
/**
|
2965
|
-
Logprobs for the completion.
|
2966
|
-
`undefined` if the mode does not support logprobs or if it was not enabled.
|
2967
|
-
|
2968
|
-
@deprecated Will become a provider extension in the future.
|
2969
|
-
*/
|
2970
|
-
readonly logprobs: LogProbs | undefined;
|
2971
|
-
/**
|
2972
2954
|
Additional provider-specific metadata. They are passed through
|
2973
2955
|
from the provider to the AI SDK and enable provider-specific
|
2974
2956
|
results that can be fully encapsulated in the provider.
|
@@ -3506,7 +3488,6 @@ type TextStreamPart<TOOLS extends ToolSet> = {
|
|
3506
3488
|
} | {
|
3507
3489
|
type: 'step-finish';
|
3508
3490
|
messageId: string;
|
3509
|
-
logprobs?: LogProbs;
|
3510
3491
|
request: LanguageModelRequestMetadata;
|
3511
3492
|
warnings: CallWarning[] | undefined;
|
3512
3493
|
response: LanguageModelResponseMetadata;
|
@@ -3519,10 +3500,6 @@ type TextStreamPart<TOOLS extends ToolSet> = {
|
|
3519
3500
|
finishReason: FinishReason;
|
3520
3501
|
usage: LanguageModelUsage$1;
|
3521
3502
|
providerMetadata: ProviderMetadata | undefined;
|
3522
|
-
/**
|
3523
|
-
* @deprecated will be moved into provider metadata
|
3524
|
-
*/
|
3525
|
-
logprobs?: LogProbs;
|
3526
3503
|
/**
|
3527
3504
|
* @deprecated use response on step-finish instead
|
3528
3505
|
*/
|
@@ -3886,13 +3863,6 @@ interface GenerateObjectResult<OBJECT> {
|
|
3886
3863
|
body?: unknown;
|
3887
3864
|
};
|
3888
3865
|
/**
|
3889
|
-
Logprobs for the completion.
|
3890
|
-
`undefined` if the mode does not support logprobs or if was not enabled.
|
3891
|
-
|
3892
|
-
@deprecated Will become a provider extension in the future.
|
3893
|
-
*/
|
3894
|
-
readonly logprobs: LogProbs | undefined;
|
3895
|
-
/**
|
3896
3866
|
Additional provider-specific metadata. They are passed through
|
3897
3867
|
from the provider to the AI SDK and enable provider-specific
|
3898
3868
|
results that can be fully encapsulated in the provider.
|
@@ -4073,7 +4043,6 @@ type ObjectStreamPart<PARTIAL> = {
|
|
4073
4043
|
} | {
|
4074
4044
|
type: 'finish';
|
4075
4045
|
finishReason: FinishReason;
|
4076
|
-
logprobs?: LogProbs;
|
4077
4046
|
usage: LanguageModelUsage$1;
|
4078
4047
|
response: LanguageModelResponseMetadata;
|
4079
4048
|
providerMetadata?: ProviderMetadata;
|
@@ -4735,7 +4704,6 @@ type SingleRequestTextStreamPart<TOOLS extends ToolSet> = {
|
|
4735
4704
|
} | {
|
4736
4705
|
type: 'finish';
|
4737
4706
|
finishReason: FinishReason;
|
4738
|
-
logprobs?: LogProbs;
|
4739
4707
|
usage: LanguageModelUsage$1;
|
4740
4708
|
providerMetadata?: ProviderMetadata;
|
4741
4709
|
} | {
|
@@ -5031,4 +4999,4 @@ declare namespace llamaindexAdapter {
|
|
5031
4999
|
};
|
5032
5000
|
}
|
5033
5001
|
|
5034
|
-
export { AssistantContent, Attachment, CallSettings, CallWarning, ChatRequest, ChatRequestOptions, ChunkDetector, CoreAssistantMessage, CoreMessage, CoreSystemMessage, CoreToolMessage, CoreUserMessage, CreateMessage, DataContent, DataStreamOptions, DataStreamPart, DataStreamString, DataStreamWriter, DeepPartial, DownloadError, EmbedManyResult, EmbedResult, Embedding, EmbeddingModel, EmbeddingModelUsage, GenerateImageResult as Experimental_GenerateImageResult, GeneratedFile as Experimental_GeneratedImage, SpeechResult as Experimental_SpeechResult, TranscriptionResult as Experimental_TranscriptionResult, FilePart, FileUIPart, FinishReason, GenerateObjectResult, GenerateTextOnStepFinishCallback, GenerateTextResult, GeneratedAudioFile, GeneratedFile, IdGenerator, ImageModel, ImageGenerationWarning as ImageModelCallWarning, ImageModelResponseMetadata, ImagePart, InvalidArgumentError, InvalidDataContentError, InvalidMessageRoleError, InvalidStreamPartError, InvalidToolArgumentsError, JSONRPCError, JSONRPCMessage, JSONRPCNotification, JSONRPCRequest, JSONRPCResponse, JSONValue, langchainAdapter as LangChainAdapter, LanguageModel, LanguageModelRequestMetadata, LanguageModelResponseMetadata, LanguageModelUsage$1 as LanguageModelUsage, llamaindexAdapter as LlamaIndexAdapter,
|
5002
|
+
export { AssistantContent, Attachment, CallSettings, CallWarning, ChatRequest, ChatRequestOptions, ChunkDetector, CoreAssistantMessage, CoreMessage, CoreSystemMessage, CoreToolMessage, CoreUserMessage, CreateMessage, DataContent, DataStreamOptions, DataStreamPart, DataStreamString, DataStreamWriter, DeepPartial, DownloadError, EmbedManyResult, EmbedResult, Embedding, EmbeddingModel, EmbeddingModelUsage, GenerateImageResult as Experimental_GenerateImageResult, GeneratedFile as Experimental_GeneratedImage, SpeechResult as Experimental_SpeechResult, TranscriptionResult as Experimental_TranscriptionResult, FilePart, FileUIPart, FinishReason, GenerateObjectResult, GenerateTextOnStepFinishCallback, GenerateTextResult, GeneratedAudioFile, GeneratedFile, IdGenerator, ImageModel, ImageGenerationWarning as ImageModelCallWarning, ImageModelResponseMetadata, ImagePart, InvalidArgumentError, InvalidDataContentError, InvalidMessageRoleError, InvalidStreamPartError, InvalidToolArgumentsError, JSONRPCError, JSONRPCMessage, JSONRPCNotification, JSONRPCRequest, JSONRPCResponse, JSONValue, langchainAdapter as LangChainAdapter, LanguageModel, LanguageModelRequestMetadata, LanguageModelResponseMetadata, LanguageModelUsage$1 as LanguageModelUsage, llamaindexAdapter as LlamaIndexAdapter, MCPClientError, MCPTransport, Message, MessageConversionError, NoImageGeneratedError, NoObjectGeneratedError, NoOutputSpecifiedError, NoSuchProviderError, NoSuchToolError, ObjectStreamPart, output as Output, Prompt, Provider, ProviderMetadata, ProviderOptions, ProviderRegistryProvider, ReasoningUIPart, RepairTextFunction, RequestOptions, RetryError, Schema, SourceUIPart, SpeechModel, SpeechModelResponseMetadata, SpeechWarning, StepResult, StepStartUIPart, StreamData, StreamObjectOnFinishCallback, StreamObjectResult, StreamTextOnChunkCallback, StreamTextOnErrorCallback, StreamTextOnFinishCallback, StreamTextOnStepFinishCallback, StreamTextResult, StreamTextTransform, TelemetrySettings, TextPart, TextStreamPart, TextUIPart, Tool, ToolCallPart, ToolCallRepairError, ToolCallRepairFunction, ToolCallUnion, ToolChoice, ToolContent, ToolExecutionError, ToolExecutionOptions, ToolInvocation, ToolInvocationUIPart, ToolResultPart, ToolResultUnion, ToolSet, TranscriptionModel, TranscriptionModelResponseMetadata, TranscriptionWarning, UIMessage, UseChatOptions, UseCompletionOptions, UserContent, appendClientMessage, appendResponseMessages, asSchema, callChatApi, callCompletionApi, convertToCoreMessages, coreAssistantMessageSchema, coreMessageSchema, coreSystemMessageSchema, coreToolMessageSchema, coreUserMessageSchema, cosineSimilarity, createDataStream, createDataStreamResponse, createProviderRegistry, customProvider, defaultSettingsMiddleware, embed, embedMany, createMCPClient as experimental_createMCPClient, experimental_createProviderRegistry, experimental_customProvider, generateImage as experimental_generateImage, generateSpeech as experimental_generateSpeech, transcribe as experimental_transcribe, extractMaxToolInvocationStep, extractReasoningMiddleware, fillMessageParts, formatDataStreamPart, generateObject, generateText, getMessageParts, getTextFromDataUrl, isAssistantMessageWithCompletedToolCalls, isDeepEqualData, jsonSchema, parseDataStreamPart, parsePartialJson, pipeDataStreamToResponse, prepareAttachmentsForRequest, processDataStream, processTextStream, shouldResubmitMessages, simulateReadableStream, simulateStreamingMiddleware, smoothStream, streamObject, streamText, tool, updateToolCallResult, wrapLanguageModel, zodSchema };
|
package/dist/index.js
CHANGED
@@ -4274,7 +4274,6 @@ async function generateObject({
|
|
4274
4274
|
let warnings;
|
4275
4275
|
let response;
|
4276
4276
|
let request;
|
4277
|
-
let logprobs;
|
4278
4277
|
let resultProviderMetadata;
|
4279
4278
|
const standardizedPrompt = standardizePrompt({
|
4280
4279
|
prompt: { system, prompt, messages },
|
@@ -4374,7 +4373,6 @@ async function generateObject({
|
|
4374
4373
|
finishReason = generateResult.finishReason;
|
4375
4374
|
usage = generateResult.usage;
|
4376
4375
|
warnings = generateResult.warnings;
|
4377
|
-
logprobs = generateResult.logprobs;
|
4378
4376
|
resultProviderMetadata = generateResult.providerMetadata;
|
4379
4377
|
request = (_a17 = generateResult.request) != null ? _a17 : {};
|
4380
4378
|
response = generateResult.responseData;
|
@@ -4448,7 +4446,6 @@ async function generateObject({
|
|
4448
4446
|
warnings,
|
4449
4447
|
request,
|
4450
4448
|
response,
|
4451
|
-
logprobs,
|
4452
4449
|
providerMetadata: resultProviderMetadata
|
4453
4450
|
});
|
4454
4451
|
}
|
@@ -4463,7 +4460,6 @@ var DefaultGenerateObjectResult = class {
|
|
4463
4460
|
this.providerMetadata = options.providerMetadata;
|
4464
4461
|
this.response = options.response;
|
4465
4462
|
this.request = options.request;
|
4466
|
-
this.logprobs = options.logprobs;
|
4467
4463
|
}
|
4468
4464
|
toJsonResponse(init) {
|
4469
4465
|
var _a17;
|
@@ -5732,7 +5728,6 @@ async function generateText({
|
|
5732
5728
|
finishReason: currentModelResponse.finishReason,
|
5733
5729
|
usage: currentUsage,
|
5734
5730
|
warnings: currentModelResponse.warnings,
|
5735
|
-
logprobs: currentModelResponse.logprobs,
|
5736
5731
|
request: (_b = currentModelResponse.request) != null ? _b : {},
|
5737
5732
|
response: {
|
5738
5733
|
...currentModelResponse.response,
|
@@ -5795,7 +5790,6 @@ async function generateText({
|
|
5795
5790
|
...currentModelResponse.response,
|
5796
5791
|
messages: responseMessages
|
5797
5792
|
},
|
5798
|
-
logprobs: currentModelResponse.logprobs,
|
5799
5793
|
steps,
|
5800
5794
|
providerMetadata: currentModelResponse.providerMetadata
|
5801
5795
|
});
|
@@ -5892,7 +5886,6 @@ var DefaultGenerateTextResult = class {
|
|
5892
5886
|
this.response = options.response;
|
5893
5887
|
this.steps = options.steps;
|
5894
5888
|
this.providerMetadata = options.providerMetadata;
|
5895
|
-
this.logprobs = options.logprobs;
|
5896
5889
|
this.outputResolver = options.outputResolver;
|
5897
5890
|
this.sources = options.sources;
|
5898
5891
|
}
|
@@ -6412,7 +6405,6 @@ function runToolsTransformation({
|
|
6412
6405
|
finishChunk = {
|
6413
6406
|
type: "finish",
|
6414
6407
|
finishReason: chunk.finishReason,
|
6415
|
-
logprobs: chunk.logprobs,
|
6416
6408
|
usage: calculateLanguageModelUsage2(chunk.usage),
|
6417
6409
|
providerMetadata: chunk.providerMetadata
|
6418
6410
|
};
|
@@ -6734,7 +6726,6 @@ var DefaultStreamTextResult = class {
|
|
6734
6726
|
finishReason: part.finishReason,
|
6735
6727
|
usage: part.usage,
|
6736
6728
|
warnings: part.warnings,
|
6737
|
-
logprobs: part.logprobs,
|
6738
6729
|
request: part.request,
|
6739
6730
|
response: {
|
6740
6731
|
...part.response,
|
@@ -6798,7 +6789,6 @@ var DefaultStreamTextResult = class {
|
|
6798
6789
|
self.stepsPromise.resolve(recordedSteps);
|
6799
6790
|
await (onFinish == null ? void 0 : onFinish({
|
6800
6791
|
finishReason,
|
6801
|
-
logprobs: void 0,
|
6802
6792
|
usage,
|
6803
6793
|
text: recordedFullText,
|
6804
6794
|
reasoningText: lastStep.reasoningText,
|
@@ -7005,7 +6995,6 @@ var DefaultStreamTextResult = class {
|
|
7005
6995
|
let stepFirstChunk = true;
|
7006
6996
|
let stepText = "";
|
7007
6997
|
let fullStepText = stepType2 === "continue" ? previousStepText : "";
|
7008
|
-
let stepLogProbs;
|
7009
6998
|
let stepResponse = {
|
7010
6999
|
id: generateId3(),
|
7011
7000
|
timestamp: currentDate(),
|
@@ -7130,7 +7119,6 @@ var DefaultStreamTextResult = class {
|
|
7130
7119
|
stepUsage = chunk.usage;
|
7131
7120
|
stepFinishReason = chunk.finishReason;
|
7132
7121
|
stepProviderMetadata = chunk.providerMetadata;
|
7133
|
-
stepLogProbs = chunk.logprobs;
|
7134
7122
|
const msToFinish = now2() - startTimestampMs;
|
7135
7123
|
doStreamSpan.addEvent("ai.stream.finish");
|
7136
7124
|
doStreamSpan.setAttributes({
|
@@ -7218,7 +7206,6 @@ var DefaultStreamTextResult = class {
|
|
7218
7206
|
finishReason: stepFinishReason,
|
7219
7207
|
usage: stepUsage,
|
7220
7208
|
providerMetadata: stepProviderMetadata,
|
7221
|
-
logprobs: stepLogProbs,
|
7222
7209
|
request: stepRequest,
|
7223
7210
|
response: {
|
7224
7211
|
...stepResponse,
|
@@ -7235,7 +7222,6 @@ var DefaultStreamTextResult = class {
|
|
7235
7222
|
finishReason: stepFinishReason,
|
7236
7223
|
usage: combinedUsage,
|
7237
7224
|
providerMetadata: stepProviderMetadata,
|
7238
|
-
logprobs: stepLogProbs,
|
7239
7225
|
response: {
|
7240
7226
|
...stepResponse,
|
7241
7227
|
headers: response == null ? void 0 : response.headers
|
@@ -8037,7 +8023,6 @@ function simulateStreamingMiddleware() {
|
|
8037
8023
|
type: "finish",
|
8038
8024
|
finishReason: result.finishReason,
|
8039
8025
|
usage: result.usage,
|
8040
|
-
logprobs: result.logprobs,
|
8041
8026
|
providerMetadata: result.providerMetadata
|
8042
8027
|
});
|
8043
8028
|
controller.close();
|