ai 5.0.0-canary.8 → 5.0.0-canary.9
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +18 -0
- package/dist/index.d.mts +172 -37
- package/dist/index.d.ts +172 -37
- package/dist/index.js +255 -152
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +247 -142
- package/dist/index.mjs.map +1 -1
- package/package.json +3 -3
package/CHANGELOG.md
CHANGED
@@ -1,5 +1,23 @@
|
|
1
1
|
# ai
|
2
2
|
|
3
|
+
## 5.0.0-canary.9
|
4
|
+
|
5
|
+
### Major Changes
|
6
|
+
|
7
|
+
- a847c3e: chore: rename reasoning to reasoningText etc
|
8
|
+
- b32e192: chore (ai): rename reasoning to reasoningText, rename reasoningDetails to reasoning (streamText, generateText)
|
9
|
+
|
10
|
+
### Patch Changes
|
11
|
+
|
12
|
+
- cb9c9e4: remove deprecated `experimental_wrapLanguageModel`
|
13
|
+
- 8aa9e20: feat: add speech with experimental_generateSpeech
|
14
|
+
- Updated dependencies [5d142ab]
|
15
|
+
- Updated dependencies [b6b43c7]
|
16
|
+
- Updated dependencies [8aa9e20]
|
17
|
+
- Updated dependencies [3795467]
|
18
|
+
- @ai-sdk/provider-utils@3.0.0-canary.8
|
19
|
+
- @ai-sdk/provider@2.0.0-canary.7
|
20
|
+
|
3
21
|
## 5.0.0-canary.8
|
4
22
|
|
5
23
|
### Major Changes
|
package/dist/index.d.mts
CHANGED
@@ -1,6 +1,6 @@
|
|
1
1
|
import { ToolCall, ToolResult, FetchFunction, Validator, IDGenerator } from '@ai-sdk/provider-utils';
|
2
|
-
export {
|
3
|
-
import { EmbeddingModelV2, EmbeddingModelV2Embedding, ImageModelV1, ImageModelV1CallWarning, LanguageModelV2, LanguageModelV2FinishReason, LanguageModelV2LogProbs, LanguageModelV2CallWarning, LanguageModelV2Source, SharedV2ProviderMetadata, SharedV2ProviderOptions, JSONValue as JSONValue$1, JSONObject, LanguageModelV2CallOptions, AISDKError, LanguageModelV2ToolCall, JSONSchema7 as JSONSchema7$1, JSONParseError, TypeValidationError,
|
2
|
+
export { IDGenerator, ToolCall, ToolResult, createIdGenerator, generateId } from '@ai-sdk/provider-utils';
|
3
|
+
import { EmbeddingModelV2, EmbeddingModelV2Embedding, ImageModelV1, ImageModelV1CallWarning, LanguageModelV2, LanguageModelV2FinishReason, LanguageModelV2LogProbs, LanguageModelV2CallWarning, LanguageModelV2Source, SharedV2ProviderMetadata, SharedV2ProviderOptions, TranscriptionModelV1, TranscriptionModelV1CallWarning, SpeechModelV1, SpeechModelV1CallWarning, JSONValue as JSONValue$1, JSONObject, LanguageModelV2CallOptions, AISDKError, LanguageModelV2ToolCall, JSONSchema7 as JSONSchema7$1, JSONParseError, TypeValidationError, LanguageModelV2Middleware, ProviderV2, NoSuchModelError } from '@ai-sdk/provider';
|
4
4
|
export { AISDKError, APICallError, EmptyResponseBodyError, InvalidPromptError, InvalidResponseDataError, JSONParseError, LoadAPIKeyError, NoContentGeneratedError, NoSuchModelError, TypeValidationError, UnsupportedFunctionalityError } from '@ai-sdk/provider';
|
5
5
|
import { ServerResponse } from 'node:http';
|
6
6
|
import { AttributeValue, Tracer } from '@opentelemetry/api';
|
@@ -606,6 +606,60 @@ type JSONValue = null | string | number | boolean | {
|
|
606
606
|
[value: string]: JSONValue;
|
607
607
|
} | Array<JSONValue>;
|
608
608
|
|
609
|
+
/**
|
610
|
+
Transcription model that is used by the AI SDK Core functions.
|
611
|
+
*/
|
612
|
+
type TranscriptionModel = TranscriptionModelV1;
|
613
|
+
/**
|
614
|
+
Warning from the model provider for this call. The call will proceed, but e.g.
|
615
|
+
some settings might not be supported, which can lead to suboptimal results.
|
616
|
+
*/
|
617
|
+
type TranscriptionWarning = TranscriptionModelV1CallWarning;
|
618
|
+
|
619
|
+
type TranscriptionModelResponseMetadata = {
|
620
|
+
/**
|
621
|
+
Timestamp for the start of the generated response.
|
622
|
+
*/
|
623
|
+
timestamp: Date;
|
624
|
+
/**
|
625
|
+
The ID of the response model that was used to generate the response.
|
626
|
+
*/
|
627
|
+
modelId: string;
|
628
|
+
/**
|
629
|
+
Response headers.
|
630
|
+
*/
|
631
|
+
headers?: Record<string, string>;
|
632
|
+
};
|
633
|
+
|
634
|
+
/**
|
635
|
+
Speech model that is used by the AI SDK Core functions.
|
636
|
+
*/
|
637
|
+
type SpeechModel = SpeechModelV1;
|
638
|
+
/**
|
639
|
+
Warning from the model provider for this call. The call will proceed, but e.g.
|
640
|
+
some settings might not be supported, which can lead to suboptimal results.
|
641
|
+
*/
|
642
|
+
type SpeechWarning = SpeechModelV1CallWarning;
|
643
|
+
|
644
|
+
type SpeechModelResponseMetadata = {
|
645
|
+
/**
|
646
|
+
Timestamp for the start of the generated response.
|
647
|
+
*/
|
648
|
+
timestamp: Date;
|
649
|
+
/**
|
650
|
+
The ID of the response model that was used to generate the response.
|
651
|
+
*/
|
652
|
+
modelId: string;
|
653
|
+
/**
|
654
|
+
Response headers.
|
655
|
+
*/
|
656
|
+
headers?: Record<string, string>;
|
657
|
+
/**
|
658
|
+
Response body.
|
659
|
+
*/
|
660
|
+
body?: unknown;
|
661
|
+
};
|
662
|
+
|
609
663
|
declare const getOriginalFetch$1: () => typeof fetch;
|
610
664
|
declare function callChatApi({ api, body, streamProtocol, credentials, headers, abortController, restoreMessagesOnFailure, onResponse, onUpdate, onFinish, onToolCall, generateId, fetch, lastMessage, }: {
|
611
665
|
api: string;
|
@@ -1534,7 +1588,7 @@ interface GeneratedFile {
|
|
1534
1588
|
readonly mediaType: string;
|
1535
1589
|
}
|
1536
1590
|
|
1537
|
-
type
|
1591
|
+
type Reasoning = {
|
1538
1592
|
type: 'text';
|
1539
1593
|
text: string;
|
1540
1594
|
signature?: string;
|
@@ -2752,8 +2806,11 @@ type StepResult<TOOLS extends ToolSet> = {
|
|
2752
2806
|
/**
|
2753
2807
|
The reasoning that was generated during the generation.
|
2754
2808
|
*/
|
2755
|
-
readonly reasoning:
|
2756
|
-
|
2809
|
+
readonly reasoning: Array<Reasoning>;
|
2810
|
+
/**
|
2811
|
+
The reasoning text that was generated during the generation.
|
2812
|
+
*/
|
2813
|
+
readonly reasoningText: string | undefined;
|
2757
2814
|
/**
|
2758
2815
|
The files that were generated during the generation.
|
2759
2816
|
*/
|
@@ -2834,19 +2891,19 @@ interface GenerateTextResult<TOOLS extends ToolSet, OUTPUT> {
|
|
2834
2891
|
*/
|
2835
2892
|
readonly text: string;
|
2836
2893
|
/**
|
2894
|
+
The full reasoning that the model has generated.
|
2895
|
+
*/
|
2896
|
+
readonly reasoning: Array<Reasoning>;
|
2897
|
+
/**
|
2837
2898
|
The reasoning text that the model has generated. Can be undefined if the model
|
2838
2899
|
has only generated text.
|
2839
2900
|
*/
|
2840
|
-
readonly
|
2901
|
+
readonly reasoningText: string | undefined;
|
2841
2902
|
/**
|
2842
2903
|
The files that were generated. Empty array if no files were generated.
|
2843
2904
|
*/
|
2844
2905
|
readonly files: Array<GeneratedFile>;
|
2845
2906
|
/**
|
2846
|
-
The full reasoning that the model has generated.
|
2847
|
-
*/
|
2848
|
-
readonly reasoningDetails: Array<ReasoningDetail>;
|
2849
|
-
/**
|
2850
2907
|
Sources that have been used as input to generate the response.
|
2851
2908
|
For multi-step generation, the sources are accumulated from all steps.
|
2852
2909
|
*/
|
@@ -3267,17 +3324,17 @@ interface StreamTextResult<TOOLS extends ToolSet, PARTIAL_OUTPUT> {
|
|
3267
3324
|
*/
|
3268
3325
|
readonly text: Promise<string>;
|
3269
3326
|
/**
|
3270
|
-
The reasoning that has
|
3327
|
+
The full reasoning that the model has generated.
|
3271
3328
|
|
3272
3329
|
Resolved when the response is finished.
|
3273
|
-
|
3274
|
-
readonly reasoning: Promise<
|
3330
|
+
*/
|
3331
|
+
readonly reasoning: Promise<Array<Reasoning>>;
|
3275
3332
|
/**
|
3276
|
-
The
|
3333
|
+
The reasoning that has been generated by the last step.
|
3277
3334
|
|
3278
3335
|
Resolved when the response is finished.
|
3279
|
-
|
3280
|
-
readonly
|
3336
|
+
*/
|
3337
|
+
readonly reasoningText: Promise<string | undefined>;
|
3281
3338
|
/**
|
3282
3339
|
The tool calls that have been executed in the last step.
|
3283
3340
|
|
@@ -4263,25 +4320,109 @@ Callback that is called when the LLM response and the final object validation ar
|
|
4263
4320
|
}): StreamObjectResult<JSONValue$1, JSONValue$1, never>;
|
4264
4321
|
|
4265
4322
|
/**
|
4266
|
-
|
4267
|
-
|
4268
|
-
|
4269
|
-
|
4323
|
+
* A generated audio file.
|
4324
|
+
*/
|
4325
|
+
interface GeneratedAudioFile extends GeneratedFile {
|
4326
|
+
/**
|
4327
|
+
* Audio format of the file (e.g., 'mp3', 'wav', etc.)
|
4328
|
+
*/
|
4329
|
+
readonly format: string;
|
4330
|
+
}
|
4270
4331
|
|
4271
|
-
|
4332
|
+
/**
|
4333
|
+
The result of a `generateSpeech` call.
|
4334
|
+
It contains the audio data and additional information.
|
4335
|
+
*/
|
4336
|
+
interface SpeechResult {
|
4272
4337
|
/**
|
4273
|
-
|
4338
|
+
* The audio data as a base64 encoded string or binary data.
|
4274
4339
|
*/
|
4275
|
-
|
4340
|
+
readonly audio: GeneratedAudioFile;
|
4276
4341
|
/**
|
4277
|
-
|
4342
|
+
Warnings for the call, e.g. unsupported settings.
|
4343
|
+
*/
|
4344
|
+
readonly warnings: Array<SpeechWarning>;
|
4345
|
+
/**
|
4346
|
+
Response metadata from the provider. There may be multiple responses if we made multiple calls to the model.
|
4278
4347
|
*/
|
4279
|
-
|
4348
|
+
readonly responses: Array<SpeechModelResponseMetadata>;
|
4280
4349
|
/**
|
4281
|
-
|
4350
|
+
Provider metadata from the provider.
|
4351
|
+
*/
|
4352
|
+
readonly providerMetadata: Record<string, Record<string, JSONValue$1>>;
|
4353
|
+
}
|
4354
|
+
|
4355
|
+
/**
|
4356
|
+
Generates speech audio using a speech model.
|
4357
|
+
|
4358
|
+
@param model - The speech model to use.
|
4359
|
+
@param text - The text to convert to speech.
|
4360
|
+
@param voice - The voice to use for speech generation.
|
4361
|
+
@param outputFormat - The output format to use for speech generation e.g. "mp3", "wav", etc.
|
4362
|
+
@param instructions - Instructions for the speech generation e.g. "Speak in a slow and steady tone".
|
4363
|
+
@param speed - The speed of the speech generation.
|
4364
|
+
@param providerOptions - Additional provider-specific options that are passed through to the provider
|
4365
|
+
as body parameters.
|
4366
|
+
@param maxRetries - Maximum number of retries. Set to 0 to disable retries. Default: 2.
|
4367
|
+
@param abortSignal - An optional abort signal that can be used to cancel the call.
|
4368
|
+
@param headers - Additional HTTP headers to be sent with the request. Only applicable for HTTP-based providers.
|
4369
|
+
|
4370
|
+
@returns A result object that contains the generated audio data.
|
4371
|
+
*/
|
4372
|
+
declare function generateSpeech({ model, text, voice, outputFormat, instructions, speed, providerOptions, maxRetries: maxRetriesArg, abortSignal, headers, }: {
|
4373
|
+
/**
|
4374
|
+
The speech model to use.
|
4375
|
+
*/
|
4376
|
+
model: SpeechModelV1;
|
4377
|
+
/**
|
4378
|
+
The text to convert to speech.
|
4282
4379
|
*/
|
4380
|
+
text: string;
|
4381
|
+
/**
|
4382
|
+
The voice to use for speech generation.
|
4383
|
+
*/
|
4384
|
+
voice?: string;
|
4385
|
+
/**
|
4386
|
+
* The desired output format for the audio e.g. "mp3", "wav", etc.
|
4387
|
+
*/
|
4388
|
+
outputFormat?: 'mp3' | 'wav' | (string & {});
|
4389
|
+
/**
|
4390
|
+
Instructions for the speech generation e.g. "Speak in a slow and steady tone".
|
4391
|
+
*/
|
4392
|
+
instructions?: string;
|
4393
|
+
/**
|
4394
|
+
The speed of the speech generation.
|
4395
|
+
*/
|
4396
|
+
speed?: number;
|
4397
|
+
/**
|
4398
|
+
Additional provider-specific options that are passed through to the provider
|
4399
|
+
as body parameters.
|
4400
|
+
|
4401
|
+
The outer record is keyed by the provider name, and the inner
|
4402
|
+
record is keyed by the provider-specific metadata key.
|
4403
|
+
```ts
|
4404
|
+
{
|
4405
|
+
"openai": {}
|
4406
|
+
}
|
4407
|
+
```
|
4408
|
+
*/
|
4409
|
+
providerOptions?: ProviderOptions;
|
4410
|
+
/**
|
4411
|
+
Maximum number of retries per speech model call. Set to 0 to disable retries.
|
4412
|
+
|
4413
|
+
@default 2
|
4414
|
+
*/
|
4415
|
+
maxRetries?: number;
|
4416
|
+
/**
|
4417
|
+
Abort signal.
|
4418
|
+
*/
|
4419
|
+
abortSignal?: AbortSignal;
|
4420
|
+
/**
|
4421
|
+
Additional headers to include in the request.
|
4422
|
+
Only applicable for HTTP-based providers.
|
4423
|
+
*/
|
4283
4424
|
headers?: Record<string, string>;
|
4284
|
-
}
|
4425
|
+
}): Promise<SpeechResult>;
|
4285
4426
|
|
4286
4427
|
/**
|
4287
4428
|
The result of a `transcribe` call.
|
@@ -4434,15 +4575,6 @@ declare const wrapLanguageModel: ({ model, middleware: middlewareArg, modelId, p
|
|
4434
4575
|
modelId?: string;
|
4435
4576
|
providerId?: string;
|
4436
4577
|
}) => LanguageModelV2;
|
4437
|
-
/**
|
4438
|
-
* @deprecated Use `wrapLanguageModel` instead.
|
4439
|
-
*/
|
4440
|
-
declare const experimental_wrapLanguageModel: ({ model, middleware: middlewareArg, modelId, providerId, }: {
|
4441
|
-
model: LanguageModelV2;
|
4442
|
-
middleware: LanguageModelV2Middleware | LanguageModelV2Middleware[];
|
4443
|
-
modelId?: string;
|
4444
|
-
providerId?: string;
|
4445
|
-
}) => LanguageModelV2;
|
4446
4578
|
|
4447
4579
|
/**
|
4448
4580
|
* Creates a custom provider with specified language models, text embedding models, and an optional fallback provider.
|
@@ -4553,6 +4685,9 @@ declare class InvalidArgumentError extends AISDKError {
|
|
4553
4685
|
}
|
4554
4686
|
|
4555
4687
|
type SingleRequestTextStreamPart<TOOLS extends ToolSet> = {
|
4688
|
+
type: 'stream-start';
|
4689
|
+
warnings: LanguageModelV2CallWarning[];
|
4690
|
+
} | {
|
4556
4691
|
type: 'text';
|
4557
4692
|
text: string;
|
4558
4693
|
} | {
|
@@ -4889,4 +5024,4 @@ declare namespace llamaindexAdapter {
|
|
4889
5024
|
};
|
4890
5025
|
}
|
4891
5026
|
|
4892
|
-
export { AssistantContent, Attachment, CallSettings, CallWarning, ChatRequest, ChatRequestOptions, ChunkDetector, CoreAssistantMessage, CoreMessage, CoreSystemMessage, CoreToolMessage, CoreUserMessage, CreateMessage, DataContent, DataStreamOptions, DataStreamPart, DataStreamString, DataStreamWriter, DeepPartial, DownloadError, EmbedManyResult, EmbedResult, Embedding, EmbeddingModel, EmbeddingModelUsage, GenerateImageResult as Experimental_GenerateImageResult, GeneratedFile as Experimental_GeneratedImage, TranscriptionResult as Experimental_TranscriptionResult, FilePart, FileUIPart, FinishReason, GenerateObjectResult, GenerateTextOnStepFinishCallback, GenerateTextResult, GeneratedFile, IdGenerator, ImageModel, ImageGenerationWarning as ImageModelCallWarning, ImageModelResponseMetadata, ImagePart, InvalidArgumentError, InvalidDataContentError, InvalidMessageRoleError, InvalidStreamPartError, InvalidToolArgumentsError, JSONRPCError, JSONRPCMessage, JSONRPCNotification, JSONRPCRequest, JSONRPCResponse, JSONValue, langchainAdapter as LangChainAdapter, LanguageModel, LanguageModelRequestMetadata, LanguageModelResponseMetadata, LanguageModelUsage$1 as LanguageModelUsage, llamaindexAdapter as LlamaIndexAdapter, LogProbs, MCPClientError, MCPTransport, Message, MessageConversionError, NoImageGeneratedError, NoObjectGeneratedError, NoOutputSpecifiedError, NoSuchProviderError, NoSuchToolError, ObjectStreamPart, output as Output, Prompt, Provider, ProviderMetadata, ProviderOptions, ProviderRegistryProvider, ReasoningUIPart, RepairTextFunction, RequestOptions, RetryError, Schema, SourceUIPart, StepResult, StepStartUIPart, StreamData, StreamObjectOnFinishCallback, StreamObjectResult, StreamTextOnChunkCallback, StreamTextOnErrorCallback, StreamTextOnFinishCallback, StreamTextOnStepFinishCallback, StreamTextResult, StreamTextTransform, TelemetrySettings, TextPart, TextStreamPart, TextUIPart, Tool, ToolCallPart, ToolCallRepairError, ToolCallRepairFunction, ToolCallUnion, ToolChoice, ToolContent, ToolExecutionError, ToolExecutionOptions, ToolInvocation, ToolInvocationUIPart, ToolResultPart, ToolResultUnion, ToolSet, UIMessage, UseChatOptions, UseCompletionOptions, UserContent, appendClientMessage, appendResponseMessages, asSchema, callChatApi, callCompletionApi, convertToCoreMessages, coreAssistantMessageSchema, coreMessageSchema, coreSystemMessageSchema, coreToolMessageSchema, coreUserMessageSchema, cosineSimilarity, createDataStream, createDataStreamResponse, createProviderRegistry, customProvider, defaultSettingsMiddleware, embed, embedMany, createMCPClient as experimental_createMCPClient, experimental_createProviderRegistry, experimental_customProvider, generateImage as experimental_generateImage, transcribe as experimental_transcribe,
|
5027
|
+
export { AssistantContent, Attachment, CallSettings, CallWarning, ChatRequest, ChatRequestOptions, ChunkDetector, CoreAssistantMessage, CoreMessage, CoreSystemMessage, CoreToolMessage, CoreUserMessage, CreateMessage, DataContent, DataStreamOptions, DataStreamPart, DataStreamString, DataStreamWriter, DeepPartial, DownloadError, EmbedManyResult, EmbedResult, Embedding, EmbeddingModel, EmbeddingModelUsage, GenerateImageResult as Experimental_GenerateImageResult, GeneratedFile as Experimental_GeneratedImage, SpeechResult as Experimental_SpeechResult, TranscriptionResult as Experimental_TranscriptionResult, FilePart, FileUIPart, FinishReason, GenerateObjectResult, GenerateTextOnStepFinishCallback, GenerateTextResult, GeneratedAudioFile, GeneratedFile, IdGenerator, ImageModel, ImageGenerationWarning as ImageModelCallWarning, ImageModelResponseMetadata, ImagePart, InvalidArgumentError, InvalidDataContentError, InvalidMessageRoleError, InvalidStreamPartError, InvalidToolArgumentsError, JSONRPCError, JSONRPCMessage, JSONRPCNotification, JSONRPCRequest, JSONRPCResponse, JSONValue, langchainAdapter as LangChainAdapter, LanguageModel, LanguageModelRequestMetadata, LanguageModelResponseMetadata, LanguageModelUsage$1 as LanguageModelUsage, llamaindexAdapter as LlamaIndexAdapter, LogProbs, MCPClientError, MCPTransport, Message, MessageConversionError, NoImageGeneratedError, NoObjectGeneratedError, NoOutputSpecifiedError, NoSuchProviderError, NoSuchToolError, ObjectStreamPart, output as Output, Prompt, Provider, ProviderMetadata, ProviderOptions, ProviderRegistryProvider, ReasoningUIPart, RepairTextFunction, RequestOptions, RetryError, Schema, SourceUIPart, SpeechModel, SpeechModelResponseMetadata, SpeechWarning, StepResult, StepStartUIPart, StreamData, StreamObjectOnFinishCallback, StreamObjectResult, StreamTextOnChunkCallback, StreamTextOnErrorCallback, StreamTextOnFinishCallback, StreamTextOnStepFinishCallback, StreamTextResult, StreamTextTransform, TelemetrySettings, TextPart, TextStreamPart, TextUIPart, Tool, ToolCallPart, ToolCallRepairError, ToolCallRepairFunction, ToolCallUnion, ToolChoice, ToolContent, ToolExecutionError, ToolExecutionOptions, ToolInvocation, ToolInvocationUIPart, ToolResultPart, ToolResultUnion, ToolSet, TranscriptionModel, TranscriptionModelResponseMetadata, TranscriptionWarning, UIMessage, UseChatOptions, UseCompletionOptions, UserContent, appendClientMessage, appendResponseMessages, asSchema, callChatApi, callCompletionApi, convertToCoreMessages, coreAssistantMessageSchema, coreMessageSchema, coreSystemMessageSchema, coreToolMessageSchema, coreUserMessageSchema, cosineSimilarity, createDataStream, createDataStreamResponse, createProviderRegistry, customProvider, defaultSettingsMiddleware, embed, embedMany, createMCPClient as experimental_createMCPClient, experimental_createProviderRegistry, experimental_customProvider, generateImage as experimental_generateImage, generateSpeech as experimental_generateSpeech, transcribe as experimental_transcribe, extractMaxToolInvocationStep, extractReasoningMiddleware, fillMessageParts, formatDataStreamPart, generateObject, generateText, getMessageParts, getTextFromDataUrl, isAssistantMessageWithCompletedToolCalls, isDeepEqualData, jsonSchema, parseDataStreamPart, parsePartialJson, pipeDataStreamToResponse, prepareAttachmentsForRequest, processDataStream, processTextStream, shouldResubmitMessages, simulateReadableStream, simulateStreamingMiddleware, smoothStream, streamObject, streamText, tool, updateToolCallResult, wrapLanguageModel, zodSchema };
|
package/dist/index.d.ts
CHANGED
@@ -1,6 +1,6 @@
|
|
1
1
|
import { ToolCall, ToolResult, FetchFunction, Validator, IDGenerator } from '@ai-sdk/provider-utils';
|
2
|
-
export {
|
3
|
-
import { EmbeddingModelV2, EmbeddingModelV2Embedding, ImageModelV1, ImageModelV1CallWarning, LanguageModelV2, LanguageModelV2FinishReason, LanguageModelV2LogProbs, LanguageModelV2CallWarning, LanguageModelV2Source, SharedV2ProviderMetadata, SharedV2ProviderOptions, JSONValue as JSONValue$1, JSONObject, LanguageModelV2CallOptions, AISDKError, LanguageModelV2ToolCall, JSONSchema7 as JSONSchema7$1, JSONParseError, TypeValidationError,
|
2
|
+
export { IDGenerator, ToolCall, ToolResult, createIdGenerator, generateId } from '@ai-sdk/provider-utils';
|
3
|
+
import { EmbeddingModelV2, EmbeddingModelV2Embedding, ImageModelV1, ImageModelV1CallWarning, LanguageModelV2, LanguageModelV2FinishReason, LanguageModelV2LogProbs, LanguageModelV2CallWarning, LanguageModelV2Source, SharedV2ProviderMetadata, SharedV2ProviderOptions, TranscriptionModelV1, TranscriptionModelV1CallWarning, SpeechModelV1, SpeechModelV1CallWarning, JSONValue as JSONValue$1, JSONObject, LanguageModelV2CallOptions, AISDKError, LanguageModelV2ToolCall, JSONSchema7 as JSONSchema7$1, JSONParseError, TypeValidationError, LanguageModelV2Middleware, ProviderV2, NoSuchModelError } from '@ai-sdk/provider';
|
4
4
|
export { AISDKError, APICallError, EmptyResponseBodyError, InvalidPromptError, InvalidResponseDataError, JSONParseError, LoadAPIKeyError, NoContentGeneratedError, NoSuchModelError, TypeValidationError, UnsupportedFunctionalityError } from '@ai-sdk/provider';
|
5
5
|
import { ServerResponse } from 'node:http';
|
6
6
|
import { AttributeValue, Tracer } from '@opentelemetry/api';
|
@@ -606,6 +606,60 @@ type JSONValue = null | string | number | boolean | {
|
|
606
606
|
[value: string]: JSONValue;
|
607
607
|
} | Array<JSONValue>;
|
608
608
|
|
609
|
+
/**
|
610
|
+
Transcription model that is used by the AI SDK Core functions.
|
611
|
+
*/
|
612
|
+
type TranscriptionModel = TranscriptionModelV1;
|
613
|
+
/**
|
614
|
+
Warning from the model provider for this call. The call will proceed, but e.g.
|
615
|
+
some settings might not be supported, which can lead to suboptimal results.
|
616
|
+
*/
|
617
|
+
type TranscriptionWarning = TranscriptionModelV1CallWarning;
|
618
|
+
|
619
|
+
type TranscriptionModelResponseMetadata = {
|
620
|
+
/**
|
621
|
+
Timestamp for the start of the generated response.
|
622
|
+
*/
|
623
|
+
timestamp: Date;
|
624
|
+
/**
|
625
|
+
The ID of the response model that was used to generate the response.
|
626
|
+
*/
|
627
|
+
modelId: string;
|
628
|
+
/**
|
629
|
+
Response headers.
|
630
|
+
*/
|
631
|
+
headers?: Record<string, string>;
|
632
|
+
};
|
633
|
+
|
634
|
+
/**
|
635
|
+
Speech model that is used by the AI SDK Core functions.
|
636
|
+
*/
|
637
|
+
type SpeechModel = SpeechModelV1;
|
638
|
+
/**
|
639
|
+
Warning from the model provider for this call. The call will proceed, but e.g.
|
640
|
+
some settings might not be supported, which can lead to suboptimal results.
|
641
|
+
*/
|
642
|
+
type SpeechWarning = SpeechModelV1CallWarning;
|
643
|
+
|
644
|
+
type SpeechModelResponseMetadata = {
|
645
|
+
/**
|
646
|
+
Timestamp for the start of the generated response.
|
647
|
+
*/
|
648
|
+
timestamp: Date;
|
649
|
+
/**
|
650
|
+
The ID of the response model that was used to generate the response.
|
651
|
+
*/
|
652
|
+
modelId: string;
|
653
|
+
/**
|
654
|
+
Response headers.
|
655
|
+
*/
|
656
|
+
headers?: Record<string, string>;
|
657
|
+
/**
|
658
|
+
Response body.
|
659
|
+
*/
|
660
|
+
body?: unknown;
|
661
|
+
};
|
662
|
+
|
609
663
|
declare const getOriginalFetch$1: () => typeof fetch;
|
610
664
|
declare function callChatApi({ api, body, streamProtocol, credentials, headers, abortController, restoreMessagesOnFailure, onResponse, onUpdate, onFinish, onToolCall, generateId, fetch, lastMessage, }: {
|
611
665
|
api: string;
|
@@ -1534,7 +1588,7 @@ interface GeneratedFile {
|
|
1534
1588
|
readonly mediaType: string;
|
1535
1589
|
}
|
1536
1590
|
|
1537
|
-
type
|
1591
|
+
type Reasoning = {
|
1538
1592
|
type: 'text';
|
1539
1593
|
text: string;
|
1540
1594
|
signature?: string;
|
@@ -2752,8 +2806,11 @@ type StepResult<TOOLS extends ToolSet> = {
|
|
2752
2806
|
/**
|
2753
2807
|
The reasoning that was generated during the generation.
|
2754
2808
|
*/
|
2755
|
-
readonly reasoning:
|
2756
|
-
|
2809
|
+
readonly reasoning: Array<Reasoning>;
|
2810
|
+
/**
|
2811
|
+
The reasoning text that was generated during the generation.
|
2812
|
+
*/
|
2813
|
+
readonly reasoningText: string | undefined;
|
2757
2814
|
/**
|
2758
2815
|
The files that were generated during the generation.
|
2759
2816
|
*/
|
@@ -2834,19 +2891,19 @@ interface GenerateTextResult<TOOLS extends ToolSet, OUTPUT> {
|
|
2834
2891
|
*/
|
2835
2892
|
readonly text: string;
|
2836
2893
|
/**
|
2894
|
+
The full reasoning that the model has generated.
|
2895
|
+
*/
|
2896
|
+
readonly reasoning: Array<Reasoning>;
|
2897
|
+
/**
|
2837
2898
|
The reasoning text that the model has generated. Can be undefined if the model
|
2838
2899
|
has only generated text.
|
2839
2900
|
*/
|
2840
|
-
readonly
|
2901
|
+
readonly reasoningText: string | undefined;
|
2841
2902
|
/**
|
2842
2903
|
The files that were generated. Empty array if no files were generated.
|
2843
2904
|
*/
|
2844
2905
|
readonly files: Array<GeneratedFile>;
|
2845
2906
|
/**
|
2846
|
-
The full reasoning that the model has generated.
|
2847
|
-
*/
|
2848
|
-
readonly reasoningDetails: Array<ReasoningDetail>;
|
2849
|
-
/**
|
2850
2907
|
Sources that have been used as input to generate the response.
|
2851
2908
|
For multi-step generation, the sources are accumulated from all steps.
|
2852
2909
|
*/
|
@@ -3267,17 +3324,17 @@ interface StreamTextResult<TOOLS extends ToolSet, PARTIAL_OUTPUT> {
|
|
3267
3324
|
*/
|
3268
3325
|
readonly text: Promise<string>;
|
3269
3326
|
/**
|
3270
|
-
The reasoning that has
|
3327
|
+
The full reasoning that the model has generated.
|
3271
3328
|
|
3272
3329
|
Resolved when the response is finished.
|
3273
|
-
|
3274
|
-
readonly reasoning: Promise<
|
3330
|
+
*/
|
3331
|
+
readonly reasoning: Promise<Array<Reasoning>>;
|
3275
3332
|
/**
|
3276
|
-
The
|
3333
|
+
The reasoning that has been generated by the last step.
|
3277
3334
|
|
3278
3335
|
Resolved when the response is finished.
|
3279
|
-
|
3280
|
-
readonly
|
3336
|
+
*/
|
3337
|
+
readonly reasoningText: Promise<string | undefined>;
|
3281
3338
|
/**
|
3282
3339
|
The tool calls that have been executed in the last step.
|
3283
3340
|
|
@@ -4263,25 +4320,109 @@ Callback that is called when the LLM response and the final object validation ar
|
|
4263
4320
|
}): StreamObjectResult<JSONValue$1, JSONValue$1, never>;
|
4264
4321
|
|
4265
4322
|
/**
|
4266
|
-
|
4267
|
-
|
4268
|
-
|
4269
|
-
|
4323
|
+
* A generated audio file.
|
4324
|
+
*/
|
4325
|
+
interface GeneratedAudioFile extends GeneratedFile {
|
4326
|
+
/**
|
4327
|
+
* Audio format of the file (e.g., 'mp3', 'wav', etc.)
|
4328
|
+
*/
|
4329
|
+
readonly format: string;
|
4330
|
+
}
|
4270
4331
|
|
4271
|
-
|
4332
|
+
/**
|
4333
|
+
The result of a `generateSpeech` call.
|
4334
|
+
It contains the audio data and additional information.
|
4335
|
+
*/
|
4336
|
+
interface SpeechResult {
|
4272
4337
|
/**
|
4273
|
-
|
4338
|
+
* The audio data as a base64 encoded string or binary data.
|
4274
4339
|
*/
|
4275
|
-
|
4340
|
+
readonly audio: GeneratedAudioFile;
|
4276
4341
|
/**
|
4277
|
-
|
4342
|
+
Warnings for the call, e.g. unsupported settings.
|
4343
|
+
*/
|
4344
|
+
readonly warnings: Array<SpeechWarning>;
|
4345
|
+
/**
|
4346
|
+
Response metadata from the provider. There may be multiple responses if we made multiple calls to the model.
|
4278
4347
|
*/
|
4279
|
-
|
4348
|
+
readonly responses: Array<SpeechModelResponseMetadata>;
|
4280
4349
|
/**
|
4281
|
-
|
4350
|
+
Provider metadata from the provider.
|
4351
|
+
*/
|
4352
|
+
readonly providerMetadata: Record<string, Record<string, JSONValue$1>>;
|
4353
|
+
}
|
4354
|
+
|
4355
|
+
/**
|
4356
|
+
Generates speech audio using a speech model.
|
4357
|
+
|
4358
|
+
@param model - The speech model to use.
|
4359
|
+
@param text - The text to convert to speech.
|
4360
|
+
@param voice - The voice to use for speech generation.
|
4361
|
+
@param outputFormat - The output format to use for speech generation e.g. "mp3", "wav", etc.
|
4362
|
+
@param instructions - Instructions for the speech generation e.g. "Speak in a slow and steady tone".
|
4363
|
+
@param speed - The speed of the speech generation.
|
4364
|
+
@param providerOptions - Additional provider-specific options that are passed through to the provider
|
4365
|
+
as body parameters.
|
4366
|
+
@param maxRetries - Maximum number of retries. Set to 0 to disable retries. Default: 2.
|
4367
|
+
@param abortSignal - An optional abort signal that can be used to cancel the call.
|
4368
|
+
@param headers - Additional HTTP headers to be sent with the request. Only applicable for HTTP-based providers.
|
4369
|
+
|
4370
|
+
@returns A result object that contains the generated audio data.
|
4371
|
+
*/
|
4372
|
+
declare function generateSpeech({ model, text, voice, outputFormat, instructions, speed, providerOptions, maxRetries: maxRetriesArg, abortSignal, headers, }: {
|
4373
|
+
/**
|
4374
|
+
The speech model to use.
|
4375
|
+
*/
|
4376
|
+
model: SpeechModelV1;
|
4377
|
+
/**
|
4378
|
+
The text to convert to speech.
|
4282
4379
|
*/
|
4380
|
+
text: string;
|
4381
|
+
/**
|
4382
|
+
The voice to use for speech generation.
|
4383
|
+
*/
|
4384
|
+
voice?: string;
|
4385
|
+
/**
|
4386
|
+
* The desired output format for the audio e.g. "mp3", "wav", etc.
|
4387
|
+
*/
|
4388
|
+
outputFormat?: 'mp3' | 'wav' | (string & {});
|
4389
|
+
/**
|
4390
|
+
Instructions for the speech generation e.g. "Speak in a slow and steady tone".
|
4391
|
+
*/
|
4392
|
+
instructions?: string;
|
4393
|
+
/**
|
4394
|
+
The speed of the speech generation.
|
4395
|
+
*/
|
4396
|
+
speed?: number;
|
4397
|
+
/**
|
4398
|
+
Additional provider-specific options that are passed through to the provider
|
4399
|
+
as body parameters.
|
4400
|
+
|
4401
|
+
The outer record is keyed by the provider name, and the inner
|
4402
|
+
record is keyed by the provider-specific metadata key.
|
4403
|
+
```ts
|
4404
|
+
{
|
4405
|
+
"openai": {}
|
4406
|
+
}
|
4407
|
+
```
|
4408
|
+
*/
|
4409
|
+
providerOptions?: ProviderOptions;
|
4410
|
+
/**
|
4411
|
+
Maximum number of retries per speech model call. Set to 0 to disable retries.
|
4412
|
+
|
4413
|
+
@default 2
|
4414
|
+
*/
|
4415
|
+
maxRetries?: number;
|
4416
|
+
/**
|
4417
|
+
Abort signal.
|
4418
|
+
*/
|
4419
|
+
abortSignal?: AbortSignal;
|
4420
|
+
/**
|
4421
|
+
Additional headers to include in the request.
|
4422
|
+
Only applicable for HTTP-based providers.
|
4423
|
+
*/
|
4283
4424
|
headers?: Record<string, string>;
|
4284
|
-
}
|
4425
|
+
}): Promise<SpeechResult>;
|
4285
4426
|
|
4286
4427
|
/**
|
4287
4428
|
The result of a `transcribe` call.
|
@@ -4434,15 +4575,6 @@ declare const wrapLanguageModel: ({ model, middleware: middlewareArg, modelId, p
|
|
4434
4575
|
modelId?: string;
|
4435
4576
|
providerId?: string;
|
4436
4577
|
}) => LanguageModelV2;
|
4437
|
-
/**
|
4438
|
-
* @deprecated Use `wrapLanguageModel` instead.
|
4439
|
-
*/
|
4440
|
-
declare const experimental_wrapLanguageModel: ({ model, middleware: middlewareArg, modelId, providerId, }: {
|
4441
|
-
model: LanguageModelV2;
|
4442
|
-
middleware: LanguageModelV2Middleware | LanguageModelV2Middleware[];
|
4443
|
-
modelId?: string;
|
4444
|
-
providerId?: string;
|
4445
|
-
}) => LanguageModelV2;
|
4446
4578
|
|
4447
4579
|
/**
|
4448
4580
|
* Creates a custom provider with specified language models, text embedding models, and an optional fallback provider.
|
@@ -4553,6 +4685,9 @@ declare class InvalidArgumentError extends AISDKError {
|
|
4553
4685
|
}
|
4554
4686
|
|
4555
4687
|
type SingleRequestTextStreamPart<TOOLS extends ToolSet> = {
|
4688
|
+
type: 'stream-start';
|
4689
|
+
warnings: LanguageModelV2CallWarning[];
|
4690
|
+
} | {
|
4556
4691
|
type: 'text';
|
4557
4692
|
text: string;
|
4558
4693
|
} | {
|
@@ -4889,4 +5024,4 @@ declare namespace llamaindexAdapter {
|
|
4889
5024
|
};
|
4890
5025
|
}
|
4891
5026
|
|
4892
|
-
export { AssistantContent, Attachment, CallSettings, CallWarning, ChatRequest, ChatRequestOptions, ChunkDetector, CoreAssistantMessage, CoreMessage, CoreSystemMessage, CoreToolMessage, CoreUserMessage, CreateMessage, DataContent, DataStreamOptions, DataStreamPart, DataStreamString, DataStreamWriter, DeepPartial, DownloadError, EmbedManyResult, EmbedResult, Embedding, EmbeddingModel, EmbeddingModelUsage, GenerateImageResult as Experimental_GenerateImageResult, GeneratedFile as Experimental_GeneratedImage, TranscriptionResult as Experimental_TranscriptionResult, FilePart, FileUIPart, FinishReason, GenerateObjectResult, GenerateTextOnStepFinishCallback, GenerateTextResult, GeneratedFile, IdGenerator, ImageModel, ImageGenerationWarning as ImageModelCallWarning, ImageModelResponseMetadata, ImagePart, InvalidArgumentError, InvalidDataContentError, InvalidMessageRoleError, InvalidStreamPartError, InvalidToolArgumentsError, JSONRPCError, JSONRPCMessage, JSONRPCNotification, JSONRPCRequest, JSONRPCResponse, JSONValue, langchainAdapter as LangChainAdapter, LanguageModel, LanguageModelRequestMetadata, LanguageModelResponseMetadata, LanguageModelUsage$1 as LanguageModelUsage, llamaindexAdapter as LlamaIndexAdapter, LogProbs, MCPClientError, MCPTransport, Message, MessageConversionError, NoImageGeneratedError, NoObjectGeneratedError, NoOutputSpecifiedError, NoSuchProviderError, NoSuchToolError, ObjectStreamPart, output as Output, Prompt, Provider, ProviderMetadata, ProviderOptions, ProviderRegistryProvider, ReasoningUIPart, RepairTextFunction, RequestOptions, RetryError, Schema, SourceUIPart, StepResult, StepStartUIPart, StreamData, StreamObjectOnFinishCallback, StreamObjectResult, StreamTextOnChunkCallback, StreamTextOnErrorCallback, StreamTextOnFinishCallback, StreamTextOnStepFinishCallback, StreamTextResult, StreamTextTransform, TelemetrySettings, TextPart, TextStreamPart, TextUIPart, Tool, ToolCallPart, ToolCallRepairError, ToolCallRepairFunction, ToolCallUnion, ToolChoice, ToolContent, ToolExecutionError, ToolExecutionOptions, ToolInvocation, ToolInvocationUIPart, ToolResultPart, ToolResultUnion, ToolSet, UIMessage, UseChatOptions, UseCompletionOptions, UserContent, appendClientMessage, appendResponseMessages, asSchema, callChatApi, callCompletionApi, convertToCoreMessages, coreAssistantMessageSchema, coreMessageSchema, coreSystemMessageSchema, coreToolMessageSchema, coreUserMessageSchema, cosineSimilarity, createDataStream, createDataStreamResponse, createProviderRegistry, customProvider, defaultSettingsMiddleware, embed, embedMany, createMCPClient as experimental_createMCPClient, experimental_createProviderRegistry, experimental_customProvider, generateImage as experimental_generateImage, transcribe as experimental_transcribe,
|
5027
|
+
export { AssistantContent, Attachment, CallSettings, CallWarning, ChatRequest, ChatRequestOptions, ChunkDetector, CoreAssistantMessage, CoreMessage, CoreSystemMessage, CoreToolMessage, CoreUserMessage, CreateMessage, DataContent, DataStreamOptions, DataStreamPart, DataStreamString, DataStreamWriter, DeepPartial, DownloadError, EmbedManyResult, EmbedResult, Embedding, EmbeddingModel, EmbeddingModelUsage, GenerateImageResult as Experimental_GenerateImageResult, GeneratedFile as Experimental_GeneratedImage, SpeechResult as Experimental_SpeechResult, TranscriptionResult as Experimental_TranscriptionResult, FilePart, FileUIPart, FinishReason, GenerateObjectResult, GenerateTextOnStepFinishCallback, GenerateTextResult, GeneratedAudioFile, GeneratedFile, IdGenerator, ImageModel, ImageGenerationWarning as ImageModelCallWarning, ImageModelResponseMetadata, ImagePart, InvalidArgumentError, InvalidDataContentError, InvalidMessageRoleError, InvalidStreamPartError, InvalidToolArgumentsError, JSONRPCError, JSONRPCMessage, JSONRPCNotification, JSONRPCRequest, JSONRPCResponse, JSONValue, langchainAdapter as LangChainAdapter, LanguageModel, LanguageModelRequestMetadata, LanguageModelResponseMetadata, LanguageModelUsage$1 as LanguageModelUsage, llamaindexAdapter as LlamaIndexAdapter, LogProbs, MCPClientError, MCPTransport, Message, MessageConversionError, NoImageGeneratedError, NoObjectGeneratedError, NoOutputSpecifiedError, NoSuchProviderError, NoSuchToolError, ObjectStreamPart, output as Output, Prompt, Provider, ProviderMetadata, ProviderOptions, ProviderRegistryProvider, ReasoningUIPart, RepairTextFunction, RequestOptions, RetryError, Schema, SourceUIPart, SpeechModel, SpeechModelResponseMetadata, SpeechWarning, StepResult, StepStartUIPart, StreamData, StreamObjectOnFinishCallback, StreamObjectResult, StreamTextOnChunkCallback, StreamTextOnErrorCallback, StreamTextOnFinishCallback, StreamTextOnStepFinishCallback, StreamTextResult, StreamTextTransform, TelemetrySettings, TextPart, TextStreamPart, TextUIPart, Tool, ToolCallPart, ToolCallRepairError, ToolCallRepairFunction, ToolCallUnion, ToolChoice, ToolContent, ToolExecutionError, ToolExecutionOptions, ToolInvocation, ToolInvocationUIPart, ToolResultPart, ToolResultUnion, ToolSet, TranscriptionModel, TranscriptionModelResponseMetadata, TranscriptionWarning, UIMessage, UseChatOptions, UseCompletionOptions, UserContent, appendClientMessage, appendResponseMessages, asSchema, callChatApi, callCompletionApi, convertToCoreMessages, coreAssistantMessageSchema, coreMessageSchema, coreSystemMessageSchema, coreToolMessageSchema, coreUserMessageSchema, cosineSimilarity, createDataStream, createDataStreamResponse, createProviderRegistry, customProvider, defaultSettingsMiddleware, embed, embedMany, createMCPClient as experimental_createMCPClient, experimental_createProviderRegistry, experimental_customProvider, generateImage as experimental_generateImage, generateSpeech as experimental_generateSpeech, transcribe as experimental_transcribe, extractMaxToolInvocationStep, extractReasoningMiddleware, fillMessageParts, formatDataStreamPart, generateObject, generateText, getMessageParts, getTextFromDataUrl, isAssistantMessageWithCompletedToolCalls, isDeepEqualData, jsonSchema, parseDataStreamPart, parsePartialJson, pipeDataStreamToResponse, prepareAttachmentsForRequest, processDataStream, processTextStream, shouldResubmitMessages, simulateReadableStream, simulateStreamingMiddleware, smoothStream, streamObject, streamText, tool, updateToolCallResult, wrapLanguageModel, zodSchema };
|