ai 5.0.0-canary.7 → 5.0.0-canary.9
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +55 -0
- package/dist/index.d.mts +243 -97
- package/dist/index.d.ts +243 -97
- package/dist/index.js +453 -299
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +445 -289
- package/dist/index.mjs.map +1 -1
- package/dist/internal/index.d.mts +6 -6
- package/dist/internal/index.d.ts +6 -6
- package/dist/internal/index.js.map +1 -1
- package/dist/internal/index.mjs.map +1 -1
- package/dist/test/index.d.mts +14 -14
- package/dist/test/index.d.ts +14 -14
- package/dist/test/index.js +5 -5
- package/dist/test/index.js.map +1 -1
- package/dist/test/index.mjs +4 -4
- package/dist/test/index.mjs.map +1 -1
- package/package.json +3 -3
package/dist/index.d.ts
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
import { ToolCall, ToolResult, FetchFunction, Validator, IDGenerator } from '@ai-sdk/provider-utils';
|
2
|
-
export {
|
3
|
-
import {
|
4
|
-
export { AISDKError, APICallError, EmptyResponseBodyError, InvalidPromptError, InvalidResponseDataError, JSONParseError,
|
2
|
+
export { IDGenerator, ToolCall, ToolResult, createIdGenerator, generateId } from '@ai-sdk/provider-utils';
|
3
|
+
import { EmbeddingModelV2, EmbeddingModelV2Embedding, ImageModelV1, ImageModelV1CallWarning, LanguageModelV2, LanguageModelV2FinishReason, LanguageModelV2LogProbs, LanguageModelV2CallWarning, LanguageModelV2Source, SharedV2ProviderMetadata, SharedV2ProviderOptions, TranscriptionModelV1, TranscriptionModelV1CallWarning, SpeechModelV1, SpeechModelV1CallWarning, JSONValue as JSONValue$1, JSONObject, LanguageModelV2CallOptions, AISDKError, LanguageModelV2ToolCall, JSONSchema7 as JSONSchema7$1, JSONParseError, TypeValidationError, LanguageModelV2Middleware, ProviderV2, NoSuchModelError } from '@ai-sdk/provider';
|
4
|
+
export { AISDKError, APICallError, EmptyResponseBodyError, InvalidPromptError, InvalidResponseDataError, JSONParseError, LoadAPIKeyError, NoContentGeneratedError, NoSuchModelError, TypeValidationError, UnsupportedFunctionalityError } from '@ai-sdk/provider';
|
5
5
|
import { ServerResponse } from 'node:http';
|
6
6
|
import { AttributeValue, Tracer } from '@opentelemetry/api';
|
7
7
|
import { z } from 'zod';
|
@@ -11,11 +11,11 @@ import { ServerResponse as ServerResponse$1 } from 'http';
|
|
11
11
|
/**
|
12
12
|
Embedding model that is used by the AI SDK Core functions.
|
13
13
|
*/
|
14
|
-
type EmbeddingModel<VALUE> =
|
14
|
+
type EmbeddingModel<VALUE> = EmbeddingModelV2<VALUE>;
|
15
15
|
/**
|
16
16
|
Embedding.
|
17
17
|
*/
|
18
|
-
type Embedding =
|
18
|
+
type Embedding = EmbeddingModelV2Embedding;
|
19
19
|
|
20
20
|
/**
|
21
21
|
Image model that is used by the AI SDK Core functions.
|
@@ -85,10 +85,6 @@ type ToolChoice<TOOLS extends Record<string, unknown>> = 'auto' | 'none' | 'requ
|
|
85
85
|
type: 'tool';
|
86
86
|
toolName: Extract<keyof TOOLS, string>;
|
87
87
|
};
|
88
|
-
/**
|
89
|
-
* @deprecated Use `ToolChoice` instead.
|
90
|
-
*/
|
91
|
-
type CoreToolChoice<TOOLS extends Record<string, unknown>> = ToolChoice<TOOLS>;
|
92
88
|
|
93
89
|
type LanguageModelRequestMetadata = {
|
94
90
|
/**
|
@@ -159,14 +155,14 @@ Additional provider-specific metadata that is returned from the provider.
|
|
159
155
|
This is needed to enable provider-specific functionality that can be
|
160
156
|
fully encapsulated in the provider.
|
161
157
|
*/
|
162
|
-
type ProviderMetadata =
|
158
|
+
type ProviderMetadata = SharedV2ProviderMetadata;
|
163
159
|
/**
|
164
160
|
Additional provider-specific options.
|
165
161
|
|
166
162
|
They are passed through to the provider from the AI SDK and enable
|
167
163
|
provider-specific functionality that can be fully encapsulated in the provider.
|
168
164
|
*/
|
169
|
-
type ProviderOptions =
|
165
|
+
type ProviderOptions = SharedV2ProviderOptions;
|
170
166
|
|
171
167
|
/**
|
172
168
|
Represents the number of tokens used in a prompt and completion.
|
@@ -610,6 +606,60 @@ type JSONValue = null | string | number | boolean | {
|
|
610
606
|
[value: string]: JSONValue;
|
611
607
|
} | Array<JSONValue>;
|
612
608
|
|
609
|
+
/**
|
610
|
+
Transcription model that is used by the AI SDK Core functions.
|
611
|
+
*/
|
612
|
+
type TranscriptionModel = TranscriptionModelV1;
|
613
|
+
/**
|
614
|
+
Warning from the model provider for this call. The call will proceed, but e.g.
|
615
|
+
some settings might not be supported, which can lead to suboptimal results.
|
616
|
+
*/
|
617
|
+
type TranscriptionWarning = TranscriptionModelV1CallWarning;
|
618
|
+
|
619
|
+
type TranscriptionModelResponseMetadata = {
|
620
|
+
/**
|
621
|
+
Timestamp for the start of the generated response.
|
622
|
+
*/
|
623
|
+
timestamp: Date;
|
624
|
+
/**
|
625
|
+
The ID of the response model that was used to generate the response.
|
626
|
+
*/
|
627
|
+
modelId: string;
|
628
|
+
/**
|
629
|
+
Response headers.
|
630
|
+
*/
|
631
|
+
headers?: Record<string, string>;
|
632
|
+
};
|
633
|
+
|
634
|
+
/**
|
635
|
+
Speech model that is used by the AI SDK Core functions.
|
636
|
+
*/
|
637
|
+
type SpeechModel = SpeechModelV1;
|
638
|
+
/**
|
639
|
+
Warning from the model provider for this call. The call will proceed, but e.g.
|
640
|
+
some settings might not be supported, which can lead to suboptimal results.
|
641
|
+
*/
|
642
|
+
type SpeechWarning = SpeechModelV1CallWarning;
|
643
|
+
|
644
|
+
type SpeechModelResponseMetadata = {
|
645
|
+
/**
|
646
|
+
Timestamp for the start of the generated response.
|
647
|
+
*/
|
648
|
+
timestamp: Date;
|
649
|
+
/**
|
650
|
+
The ID of the response model that was used to generate the response.
|
651
|
+
*/
|
652
|
+
modelId: string;
|
653
|
+
/**
|
654
|
+
Response headers.
|
655
|
+
*/
|
656
|
+
headers?: Record<string, string>;
|
657
|
+
/**
|
658
|
+
Response body.
|
659
|
+
*/
|
660
|
+
body?: unknown;
|
661
|
+
};
|
662
|
+
|
613
663
|
declare const getOriginalFetch$1: () => typeof fetch;
|
614
664
|
declare function callChatApi({ api, body, streamProtocol, credentials, headers, abortController, restoreMessagesOnFailure, onResponse, onUpdate, onFinish, onToolCall, generateId, fetch, lastMessage, }: {
|
615
665
|
api: string;
|
@@ -1010,13 +1060,17 @@ interface EmbedResult<VALUE> {
|
|
1010
1060
|
*/
|
1011
1061
|
readonly usage: EmbeddingModelUsage;
|
1012
1062
|
/**
|
1013
|
-
Optional
|
1063
|
+
Optional response data.
|
1014
1064
|
*/
|
1015
|
-
readonly
|
1065
|
+
readonly response?: {
|
1016
1066
|
/**
|
1017
1067
|
Response headers.
|
1018
1068
|
*/
|
1019
1069
|
headers?: Record<string, string>;
|
1070
|
+
/**
|
1071
|
+
The response body.
|
1072
|
+
*/
|
1073
|
+
body?: unknown;
|
1020
1074
|
};
|
1021
1075
|
}
|
1022
1076
|
|
@@ -1032,7 +1086,7 @@ Embed a value using an embedding model. The type of the value is defined by the
|
|
1032
1086
|
|
1033
1087
|
@returns A result object that contains the embedding, the value, and additional information.
|
1034
1088
|
*/
|
1035
|
-
declare function embed<VALUE>({ model, value, maxRetries: maxRetriesArg, abortSignal, headers, experimental_telemetry: telemetry, }: {
|
1089
|
+
declare function embed<VALUE>({ model, value, providerOptions, maxRetries: maxRetriesArg, abortSignal, headers, experimental_telemetry: telemetry, }: {
|
1036
1090
|
/**
|
1037
1091
|
The embedding model to use.
|
1038
1092
|
*/
|
@@ -1056,6 +1110,12 @@ declare function embed<VALUE>({ model, value, maxRetries: maxRetriesArg, abortSi
|
|
1056
1110
|
Only applicable for HTTP-based providers.
|
1057
1111
|
*/
|
1058
1112
|
headers?: Record<string, string>;
|
1113
|
+
/**
|
1114
|
+
Additional provider-specific options. They are passed through
|
1115
|
+
to the provider from the AI SDK and enable provider-specific
|
1116
|
+
functionality that can be fully encapsulated in the provider.
|
1117
|
+
*/
|
1118
|
+
providerOptions?: ProviderOptions;
|
1059
1119
|
/**
|
1060
1120
|
* Optional telemetry configuration (experimental).
|
1061
1121
|
*/
|
@@ -1079,6 +1139,19 @@ interface EmbedManyResult<VALUE> {
|
|
1079
1139
|
The embedding token usage.
|
1080
1140
|
*/
|
1081
1141
|
readonly usage: EmbeddingModelUsage;
|
1142
|
+
/**
|
1143
|
+
Optional raw response data.
|
1144
|
+
*/
|
1145
|
+
readonly responses?: Array<{
|
1146
|
+
/**
|
1147
|
+
Response headers.
|
1148
|
+
*/
|
1149
|
+
headers?: Record<string, string>;
|
1150
|
+
/**
|
1151
|
+
The response body.
|
1152
|
+
*/
|
1153
|
+
body?: unknown;
|
1154
|
+
} | undefined>;
|
1082
1155
|
}
|
1083
1156
|
|
1084
1157
|
/**
|
@@ -1097,7 +1170,7 @@ has a limit on how many embeddings can be generated in a single call.
|
|
1097
1170
|
|
1098
1171
|
@returns A result object that contains the embeddings, the value, and additional information.
|
1099
1172
|
*/
|
1100
|
-
declare function embedMany<VALUE>({ model, values, maxRetries: maxRetriesArg, abortSignal, headers, experimental_telemetry: telemetry, }: {
|
1173
|
+
declare function embedMany<VALUE>({ model, values, maxRetries: maxRetriesArg, abortSignal, headers, providerOptions, experimental_telemetry: telemetry, }: {
|
1101
1174
|
/**
|
1102
1175
|
The embedding model to use.
|
1103
1176
|
*/
|
@@ -1125,6 +1198,12 @@ declare function embedMany<VALUE>({ model, values, maxRetries: maxRetriesArg, ab
|
|
1125
1198
|
* Optional telemetry configuration (experimental).
|
1126
1199
|
*/
|
1127
1200
|
experimental_telemetry?: TelemetrySettings;
|
1201
|
+
/**
|
1202
|
+
Additional provider-specific options. They are passed through
|
1203
|
+
to the provider from the AI SDK and enable provider-specific
|
1204
|
+
functionality that can be fully encapsulated in the provider.
|
1205
|
+
*/
|
1206
|
+
providerOptions?: ProviderOptions;
|
1128
1207
|
}): Promise<EmbedManyResult<VALUE>>;
|
1129
1208
|
|
1130
1209
|
type CallSettings = {
|
@@ -1509,7 +1588,7 @@ interface GeneratedFile {
|
|
1509
1588
|
readonly mediaType: string;
|
1510
1589
|
}
|
1511
1590
|
|
1512
|
-
type
|
1591
|
+
type Reasoning = {
|
1513
1592
|
type: 'text';
|
1514
1593
|
text: string;
|
1515
1594
|
signature?: string;
|
@@ -1839,26 +1918,22 @@ Optional conversion function that maps the tool result to multi-part tool conten
|
|
1839
1918
|
}> & ({
|
1840
1919
|
/**
|
1841
1920
|
Function tool.
|
1842
|
-
|
1921
|
+
*/
|
1843
1922
|
type?: undefined | 'function';
|
1844
1923
|
} | {
|
1845
1924
|
/**
|
1846
1925
|
Provider-defined tool.
|
1847
|
-
|
1926
|
+
*/
|
1848
1927
|
type: 'provider-defined';
|
1849
1928
|
/**
|
1850
1929
|
The ID of the tool. Should follow the format `<provider-name>.<tool-name>`.
|
1851
|
-
|
1930
|
+
*/
|
1852
1931
|
id: `${string}.${string}`;
|
1853
1932
|
/**
|
1854
1933
|
The arguments for configuring the tool. Must match the expected arguments defined by the provider for this tool.
|
1855
|
-
|
1934
|
+
*/
|
1856
1935
|
args: Record<string, unknown>;
|
1857
1936
|
});
|
1858
|
-
/**
|
1859
|
-
* @deprecated Use `Tool` instead.
|
1860
|
-
*/
|
1861
|
-
type CoreTool<PARAMETERS extends ToolParameters = any, RESULT = any> = Tool<PARAMETERS, RESULT>;
|
1862
1937
|
/**
|
1863
1938
|
Helper function for inferring the execute args of a tool.
|
1864
1939
|
*/
|
@@ -2693,10 +2768,6 @@ type ToolCallUnion<TOOLS extends ToolSet> = ValueOf<{
|
|
2693
2768
|
args: TOOLS[NAME] extends Tool<infer PARAMETERS> ? PARAMETERS : never;
|
2694
2769
|
};
|
2695
2770
|
}>;
|
2696
|
-
/**
|
2697
|
-
* @deprecated Use `ToolCallUnion` instead.
|
2698
|
-
*/
|
2699
|
-
type CoreToolCallUnion<TOOLS extends ToolSet> = ToolCallUnion<ToolSet>;
|
2700
2771
|
type ToolCallArray<TOOLS extends ToolSet> = Array<ToolCallUnion<TOOLS>>;
|
2701
2772
|
|
2702
2773
|
type ToToolsWithDefinedExecute<TOOLS extends ToolSet> = {
|
@@ -2712,10 +2783,6 @@ type ToToolResultObject<TOOLS extends ToolSet> = ValueOf<{
|
|
2712
2783
|
};
|
2713
2784
|
}>;
|
2714
2785
|
type ToolResultUnion<TOOLS extends ToolSet> = ToToolResultObject<ToToolsWithDefinedExecute<TOOLS>>;
|
2715
|
-
/**
|
2716
|
-
* @deprecated Use `ToolResultUnion` instead.
|
2717
|
-
*/
|
2718
|
-
type CoreToolResultUnion<TOOLS extends ToolSet> = ToolResultUnion<TOOLS>;
|
2719
2786
|
type ToolResultArray<TOOLS extends ToolSet> = Array<ToolResultUnion<TOOLS>>;
|
2720
2787
|
|
2721
2788
|
/**
|
@@ -2739,8 +2806,11 @@ type StepResult<TOOLS extends ToolSet> = {
|
|
2739
2806
|
/**
|
2740
2807
|
The reasoning that was generated during the generation.
|
2741
2808
|
*/
|
2742
|
-
readonly reasoning:
|
2743
|
-
|
2809
|
+
readonly reasoning: Array<Reasoning>;
|
2810
|
+
/**
|
2811
|
+
The reasoning text that was generated during the generation.
|
2812
|
+
*/
|
2813
|
+
readonly reasoningText: string | undefined;
|
2744
2814
|
/**
|
2745
2815
|
The files that were generated during the generation.
|
2746
2816
|
*/
|
@@ -2821,19 +2891,19 @@ interface GenerateTextResult<TOOLS extends ToolSet, OUTPUT> {
|
|
2821
2891
|
*/
|
2822
2892
|
readonly text: string;
|
2823
2893
|
/**
|
2894
|
+
The full reasoning that the model has generated.
|
2895
|
+
*/
|
2896
|
+
readonly reasoning: Array<Reasoning>;
|
2897
|
+
/**
|
2824
2898
|
The reasoning text that the model has generated. Can be undefined if the model
|
2825
2899
|
has only generated text.
|
2826
2900
|
*/
|
2827
|
-
readonly
|
2901
|
+
readonly reasoningText: string | undefined;
|
2828
2902
|
/**
|
2829
2903
|
The files that were generated. Empty array if no files were generated.
|
2830
2904
|
*/
|
2831
2905
|
readonly files: Array<GeneratedFile>;
|
2832
2906
|
/**
|
2833
|
-
The full reasoning that the model has generated.
|
2834
|
-
*/
|
2835
|
-
readonly reasoningDetails: Array<ReasoningDetail>;
|
2836
|
-
/**
|
2837
2907
|
Sources that have been used as input to generate the response.
|
2838
2908
|
For multi-step generation, the sources are accumulated from all steps.
|
2839
2909
|
*/
|
@@ -3021,13 +3091,13 @@ declare function convertToCoreMessages<TOOLS extends ToolSet = never>(messages:
|
|
3021
3091
|
type ToolCallRepairFunction<TOOLS extends ToolSet> = (options: {
|
3022
3092
|
system: string | undefined;
|
3023
3093
|
messages: CoreMessage[];
|
3024
|
-
toolCall:
|
3094
|
+
toolCall: LanguageModelV2ToolCall;
|
3025
3095
|
tools: TOOLS;
|
3026
3096
|
parameterSchema: (options: {
|
3027
3097
|
toolName: string;
|
3028
3098
|
}) => JSONSchema7$1;
|
3029
3099
|
error: NoSuchToolError | InvalidToolArgumentsError;
|
3030
|
-
}) => Promise<
|
3100
|
+
}) => Promise<LanguageModelV2ToolCall | null>;
|
3031
3101
|
|
3032
3102
|
/**
|
3033
3103
|
Callback that is set using the `onStepFinish` option.
|
@@ -3254,17 +3324,17 @@ interface StreamTextResult<TOOLS extends ToolSet, PARTIAL_OUTPUT> {
|
|
3254
3324
|
*/
|
3255
3325
|
readonly text: Promise<string>;
|
3256
3326
|
/**
|
3257
|
-
The reasoning that has
|
3327
|
+
The full reasoning that the model has generated.
|
3258
3328
|
|
3259
3329
|
Resolved when the response is finished.
|
3260
|
-
|
3261
|
-
readonly reasoning: Promise<
|
3330
|
+
*/
|
3331
|
+
readonly reasoning: Promise<Array<Reasoning>>;
|
3262
3332
|
/**
|
3263
|
-
The
|
3333
|
+
The reasoning that has been generated by the last step.
|
3264
3334
|
|
3265
3335
|
Resolved when the response is finished.
|
3266
|
-
|
3267
|
-
readonly
|
3336
|
+
*/
|
3337
|
+
readonly reasoningText: Promise<string | undefined>;
|
3268
3338
|
/**
|
3269
3339
|
The tool calls that have been executed in the last step.
|
3270
3340
|
|
@@ -3401,23 +3471,26 @@ interface StreamTextResult<TOOLS extends ToolSet, PARTIAL_OUTPUT> {
|
|
3401
3471
|
toTextStreamResponse(init?: ResponseInit): Response;
|
3402
3472
|
}
|
3403
3473
|
type TextStreamPart<TOOLS extends ToolSet> = {
|
3404
|
-
type: 'text
|
3405
|
-
|
3474
|
+
type: 'text';
|
3475
|
+
text: string;
|
3406
3476
|
} | {
|
3407
3477
|
type: 'reasoning';
|
3408
|
-
|
3478
|
+
reasoningType: 'text';
|
3479
|
+
text: string;
|
3409
3480
|
} | {
|
3410
|
-
type: 'reasoning
|
3481
|
+
type: 'reasoning';
|
3482
|
+
reasoningType: 'signature';
|
3411
3483
|
signature: string;
|
3412
3484
|
} | {
|
3413
|
-
type: '
|
3485
|
+
type: 'reasoning';
|
3486
|
+
reasoningType: 'redacted';
|
3414
3487
|
data: string;
|
3415
|
-
} | {
|
3416
|
-
type: 'source';
|
3417
|
-
source: Source;
|
3418
3488
|
} | ({
|
3489
|
+
type: 'source';
|
3490
|
+
} & Source) | {
|
3419
3491
|
type: 'file';
|
3420
|
-
|
3492
|
+
file: GeneratedFile;
|
3493
|
+
} | ({
|
3421
3494
|
type: 'tool-call';
|
3422
3495
|
} & ToolCallUnion<TOOLS>) | {
|
3423
3496
|
type: 'tool-call-streaming-start';
|
@@ -3524,7 +3597,7 @@ Callback that is set using the `onChunk` option.
|
|
3524
3597
|
*/
|
3525
3598
|
type StreamTextOnChunkCallback<TOOLS extends ToolSet> = (event: {
|
3526
3599
|
chunk: Extract<TextStreamPart<TOOLS>, {
|
3527
|
-
type: 'text
|
3600
|
+
type: 'text' | 'reasoning' | 'source' | 'tool-call' | 'tool-call-streaming-start' | 'tool-call-delta' | 'tool-result';
|
3528
3601
|
}>;
|
3529
3602
|
}) => Promise<void> | void;
|
3530
3603
|
/**
|
@@ -4247,25 +4320,109 @@ Callback that is called when the LLM response and the final object validation ar
|
|
4247
4320
|
}): StreamObjectResult<JSONValue$1, JSONValue$1, never>;
|
4248
4321
|
|
4249
4322
|
/**
|
4250
|
-
|
4251
|
-
|
4252
|
-
|
4253
|
-
|
4323
|
+
* A generated audio file.
|
4324
|
+
*/
|
4325
|
+
interface GeneratedAudioFile extends GeneratedFile {
|
4326
|
+
/**
|
4327
|
+
* Audio format of the file (e.g., 'mp3', 'wav', etc.)
|
4328
|
+
*/
|
4329
|
+
readonly format: string;
|
4330
|
+
}
|
4254
4331
|
|
4255
|
-
|
4332
|
+
/**
|
4333
|
+
The result of a `generateSpeech` call.
|
4334
|
+
It contains the audio data and additional information.
|
4335
|
+
*/
|
4336
|
+
interface SpeechResult {
|
4256
4337
|
/**
|
4257
|
-
|
4338
|
+
* The audio data as a base64 encoded string or binary data.
|
4258
4339
|
*/
|
4259
|
-
|
4340
|
+
readonly audio: GeneratedAudioFile;
|
4260
4341
|
/**
|
4261
|
-
|
4342
|
+
Warnings for the call, e.g. unsupported settings.
|
4343
|
+
*/
|
4344
|
+
readonly warnings: Array<SpeechWarning>;
|
4345
|
+
/**
|
4346
|
+
Response metadata from the provider. There may be multiple responses if we made multiple calls to the model.
|
4262
4347
|
*/
|
4263
|
-
|
4348
|
+
readonly responses: Array<SpeechModelResponseMetadata>;
|
4264
4349
|
/**
|
4265
|
-
|
4350
|
+
Provider metadata from the provider.
|
4351
|
+
*/
|
4352
|
+
readonly providerMetadata: Record<string, Record<string, JSONValue$1>>;
|
4353
|
+
}
|
4354
|
+
|
4355
|
+
/**
|
4356
|
+
Generates speech audio using a speech model.
|
4357
|
+
|
4358
|
+
@param model - The speech model to use.
|
4359
|
+
@param text - The text to convert to speech.
|
4360
|
+
@param voice - The voice to use for speech generation.
|
4361
|
+
@param outputFormat - The output format to use for speech generation e.g. "mp3", "wav", etc.
|
4362
|
+
@param instructions - Instructions for the speech generation e.g. "Speak in a slow and steady tone".
|
4363
|
+
@param speed - The speed of the speech generation.
|
4364
|
+
@param providerOptions - Additional provider-specific options that are passed through to the provider
|
4365
|
+
as body parameters.
|
4366
|
+
@param maxRetries - Maximum number of retries. Set to 0 to disable retries. Default: 2.
|
4367
|
+
@param abortSignal - An optional abort signal that can be used to cancel the call.
|
4368
|
+
@param headers - Additional HTTP headers to be sent with the request. Only applicable for HTTP-based providers.
|
4369
|
+
|
4370
|
+
@returns A result object that contains the generated audio data.
|
4371
|
+
*/
|
4372
|
+
declare function generateSpeech({ model, text, voice, outputFormat, instructions, speed, providerOptions, maxRetries: maxRetriesArg, abortSignal, headers, }: {
|
4373
|
+
/**
|
4374
|
+
The speech model to use.
|
4375
|
+
*/
|
4376
|
+
model: SpeechModelV1;
|
4377
|
+
/**
|
4378
|
+
The text to convert to speech.
|
4266
4379
|
*/
|
4380
|
+
text: string;
|
4381
|
+
/**
|
4382
|
+
The voice to use for speech generation.
|
4383
|
+
*/
|
4384
|
+
voice?: string;
|
4385
|
+
/**
|
4386
|
+
* The desired output format for the audio e.g. "mp3", "wav", etc.
|
4387
|
+
*/
|
4388
|
+
outputFormat?: 'mp3' | 'wav' | (string & {});
|
4389
|
+
/**
|
4390
|
+
Instructions for the speech generation e.g. "Speak in a slow and steady tone".
|
4391
|
+
*/
|
4392
|
+
instructions?: string;
|
4393
|
+
/**
|
4394
|
+
The speed of the speech generation.
|
4395
|
+
*/
|
4396
|
+
speed?: number;
|
4397
|
+
/**
|
4398
|
+
Additional provider-specific options that are passed through to the provider
|
4399
|
+
as body parameters.
|
4400
|
+
|
4401
|
+
The outer record is keyed by the provider name, and the inner
|
4402
|
+
record is keyed by the provider-specific metadata key.
|
4403
|
+
```ts
|
4404
|
+
{
|
4405
|
+
"openai": {}
|
4406
|
+
}
|
4407
|
+
```
|
4408
|
+
*/
|
4409
|
+
providerOptions?: ProviderOptions;
|
4410
|
+
/**
|
4411
|
+
Maximum number of retries per speech model call. Set to 0 to disable retries.
|
4412
|
+
|
4413
|
+
@default 2
|
4414
|
+
*/
|
4415
|
+
maxRetries?: number;
|
4416
|
+
/**
|
4417
|
+
Abort signal.
|
4418
|
+
*/
|
4419
|
+
abortSignal?: AbortSignal;
|
4420
|
+
/**
|
4421
|
+
Additional headers to include in the request.
|
4422
|
+
Only applicable for HTTP-based providers.
|
4423
|
+
*/
|
4267
4424
|
headers?: Record<string, string>;
|
4268
|
-
}
|
4425
|
+
}): Promise<SpeechResult>;
|
4269
4426
|
|
4270
4427
|
/**
|
4271
4428
|
The result of a `transcribe` call.
|
@@ -4377,7 +4534,7 @@ declare function transcribe({ model, audio, providerOptions, maxRetries: maxRetr
|
|
4377
4534
|
*/
|
4378
4535
|
declare function defaultSettingsMiddleware({ settings, }: {
|
4379
4536
|
settings: Partial<LanguageModelV2CallOptions & {
|
4380
|
-
providerOptions?:
|
4537
|
+
providerOptions?: SharedV2ProviderOptions;
|
4381
4538
|
}>;
|
4382
4539
|
}): LanguageModelV2Middleware;
|
4383
4540
|
|
@@ -4418,15 +4575,6 @@ declare const wrapLanguageModel: ({ model, middleware: middlewareArg, modelId, p
|
|
4418
4575
|
modelId?: string;
|
4419
4576
|
providerId?: string;
|
4420
4577
|
}) => LanguageModelV2;
|
4421
|
-
/**
|
4422
|
-
* @deprecated Use `wrapLanguageModel` instead.
|
4423
|
-
*/
|
4424
|
-
declare const experimental_wrapLanguageModel: ({ model, middleware: middlewareArg, modelId, providerId, }: {
|
4425
|
-
model: LanguageModelV2;
|
4426
|
-
middleware: LanguageModelV2Middleware | LanguageModelV2Middleware[];
|
4427
|
-
modelId?: string;
|
4428
|
-
providerId?: string;
|
4429
|
-
}) => LanguageModelV2;
|
4430
4578
|
|
4431
4579
|
/**
|
4432
4580
|
* Creates a custom provider with specified language models, text embedding models, and an optional fallback provider.
|
@@ -4497,21 +4645,13 @@ declare const experimental_createProviderRegistry: typeof createProviderRegistry
|
|
4497
4645
|
*
|
4498
4646
|
* @param vector1 - The first vector.
|
4499
4647
|
* @param vector2 - The second vector.
|
4500
|
-
* @param options - Optional configuration.
|
4501
|
-
* @param options.throwErrorForEmptyVectors - If true, throws an error for empty vectors. Default: false.
|
4502
4648
|
*
|
4503
4649
|
* @returns The cosine similarity between vector1 and vector2.
|
4504
4650
|
* @returns 0 if either vector is the zero vector.
|
4505
4651
|
*
|
4506
|
-
* @throws {InvalidArgumentError} If throwErrorForEmptyVectors is true and vectors are empty.
|
4507
4652
|
* @throws {InvalidArgumentError} If the vectors do not have the same length.
|
4508
4653
|
*/
|
4509
|
-
declare function cosineSimilarity(vector1: number[], vector2: number[]
|
4510
|
-
/**
|
4511
|
-
* @deprecated will be removed in 5.0
|
4512
|
-
*/
|
4513
|
-
throwErrorForEmptyVectors?: boolean;
|
4514
|
-
}): number;
|
4654
|
+
declare function cosineSimilarity(vector1: number[], vector2: number[]): number;
|
4515
4655
|
|
4516
4656
|
/**
|
4517
4657
|
* Creates a ReadableStream that emits the provided values with an optional delay between each value.
|
@@ -4545,23 +4685,29 @@ declare class InvalidArgumentError extends AISDKError {
|
|
4545
4685
|
}
|
4546
4686
|
|
4547
4687
|
type SingleRequestTextStreamPart<TOOLS extends ToolSet> = {
|
4548
|
-
type: '
|
4549
|
-
|
4688
|
+
type: 'stream-start';
|
4689
|
+
warnings: LanguageModelV2CallWarning[];
|
4690
|
+
} | {
|
4691
|
+
type: 'text';
|
4692
|
+
text: string;
|
4550
4693
|
} | {
|
4551
4694
|
type: 'reasoning';
|
4552
|
-
|
4695
|
+
reasoningType: 'text';
|
4696
|
+
text: string;
|
4553
4697
|
} | {
|
4554
|
-
type: 'reasoning
|
4698
|
+
type: 'reasoning';
|
4699
|
+
reasoningType: 'signature';
|
4555
4700
|
signature: string;
|
4556
4701
|
} | {
|
4557
|
-
type: '
|
4702
|
+
type: 'reasoning';
|
4703
|
+
reasoningType: 'redacted';
|
4558
4704
|
data: string;
|
4559
|
-
} |
|
4705
|
+
} | {
|
4560
4706
|
type: 'file';
|
4561
|
-
|
4562
|
-
type: 'source';
|
4563
|
-
source: Source;
|
4707
|
+
file: GeneratedFile;
|
4564
4708
|
} | ({
|
4709
|
+
type: 'source';
|
4710
|
+
} & Source) | ({
|
4565
4711
|
type: 'tool-call';
|
4566
4712
|
} & ToolCallUnion<TOOLS>) | {
|
4567
4713
|
type: 'tool-call-streaming-start';
|
@@ -4878,4 +5024,4 @@ declare namespace llamaindexAdapter {
|
|
4878
5024
|
};
|
4879
5025
|
}
|
4880
5026
|
|
4881
|
-
export { AssistantContent, Attachment, CallSettings, CallWarning, ChatRequest, ChatRequestOptions, ChunkDetector, CoreAssistantMessage, CoreMessage, CoreSystemMessage,
|
5027
|
+
export { AssistantContent, Attachment, CallSettings, CallWarning, ChatRequest, ChatRequestOptions, ChunkDetector, CoreAssistantMessage, CoreMessage, CoreSystemMessage, CoreToolMessage, CoreUserMessage, CreateMessage, DataContent, DataStreamOptions, DataStreamPart, DataStreamString, DataStreamWriter, DeepPartial, DownloadError, EmbedManyResult, EmbedResult, Embedding, EmbeddingModel, EmbeddingModelUsage, GenerateImageResult as Experimental_GenerateImageResult, GeneratedFile as Experimental_GeneratedImage, SpeechResult as Experimental_SpeechResult, TranscriptionResult as Experimental_TranscriptionResult, FilePart, FileUIPart, FinishReason, GenerateObjectResult, GenerateTextOnStepFinishCallback, GenerateTextResult, GeneratedAudioFile, GeneratedFile, IdGenerator, ImageModel, ImageGenerationWarning as ImageModelCallWarning, ImageModelResponseMetadata, ImagePart, InvalidArgumentError, InvalidDataContentError, InvalidMessageRoleError, InvalidStreamPartError, InvalidToolArgumentsError, JSONRPCError, JSONRPCMessage, JSONRPCNotification, JSONRPCRequest, JSONRPCResponse, JSONValue, langchainAdapter as LangChainAdapter, LanguageModel, LanguageModelRequestMetadata, LanguageModelResponseMetadata, LanguageModelUsage$1 as LanguageModelUsage, llamaindexAdapter as LlamaIndexAdapter, LogProbs, MCPClientError, MCPTransport, Message, MessageConversionError, NoImageGeneratedError, NoObjectGeneratedError, NoOutputSpecifiedError, NoSuchProviderError, NoSuchToolError, ObjectStreamPart, output as Output, Prompt, Provider, ProviderMetadata, ProviderOptions, ProviderRegistryProvider, ReasoningUIPart, RepairTextFunction, RequestOptions, RetryError, Schema, SourceUIPart, SpeechModel, SpeechModelResponseMetadata, SpeechWarning, StepResult, StepStartUIPart, StreamData, StreamObjectOnFinishCallback, StreamObjectResult, StreamTextOnChunkCallback, StreamTextOnErrorCallback, StreamTextOnFinishCallback, StreamTextOnStepFinishCallback, StreamTextResult, StreamTextTransform, TelemetrySettings, TextPart, TextStreamPart, TextUIPart, Tool, ToolCallPart, ToolCallRepairError, ToolCallRepairFunction, ToolCallUnion, ToolChoice, ToolContent, ToolExecutionError, ToolExecutionOptions, ToolInvocation, ToolInvocationUIPart, ToolResultPart, ToolResultUnion, ToolSet, TranscriptionModel, TranscriptionModelResponseMetadata, TranscriptionWarning, UIMessage, UseChatOptions, UseCompletionOptions, UserContent, appendClientMessage, appendResponseMessages, asSchema, callChatApi, callCompletionApi, convertToCoreMessages, coreAssistantMessageSchema, coreMessageSchema, coreSystemMessageSchema, coreToolMessageSchema, coreUserMessageSchema, cosineSimilarity, createDataStream, createDataStreamResponse, createProviderRegistry, customProvider, defaultSettingsMiddleware, embed, embedMany, createMCPClient as experimental_createMCPClient, experimental_createProviderRegistry, experimental_customProvider, generateImage as experimental_generateImage, generateSpeech as experimental_generateSpeech, transcribe as experimental_transcribe, extractMaxToolInvocationStep, extractReasoningMiddleware, fillMessageParts, formatDataStreamPart, generateObject, generateText, getMessageParts, getTextFromDataUrl, isAssistantMessageWithCompletedToolCalls, isDeepEqualData, jsonSchema, parseDataStreamPart, parsePartialJson, pipeDataStreamToResponse, prepareAttachmentsForRequest, processDataStream, processTextStream, shouldResubmitMessages, simulateReadableStream, simulateStreamingMiddleware, smoothStream, streamObject, streamText, tool, updateToolCallResult, wrapLanguageModel, zodSchema };
|