ai 5.0.0-canary.12 → 5.0.0-canary.13
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +13 -0
- package/dist/index.d.mts +38 -183
- package/dist/index.d.ts +38 -183
- package/dist/index.js +185 -479
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +175 -469
- package/dist/index.mjs.map +1 -1
- package/dist/internal/index.d.mts +196 -32
- package/dist/internal/index.d.ts +196 -32
- package/dist/internal/index.js +468 -33
- package/dist/internal/index.js.map +1 -1
- package/dist/internal/index.mjs +463 -33
- package/dist/internal/index.mjs.map +1 -1
- package/internal.d.ts +1 -0
- package/mcp-stdio.d.ts +1 -0
- package/package.json +7 -4
- package/test.d.ts +1 -0
package/dist/index.d.ts
CHANGED
@@ -1,6 +1,6 @@
|
|
1
1
|
import { ToolCall, ToolResult, FetchFunction, Validator, IDGenerator } from '@ai-sdk/provider-utils';
|
2
2
|
export { IDGenerator, ToolCall, ToolResult, createIdGenerator, generateId } from '@ai-sdk/provider-utils';
|
3
|
-
import { EmbeddingModelV2, EmbeddingModelV2Embedding,
|
3
|
+
import { EmbeddingModelV2, EmbeddingModelV2Embedding, ImageModelV2, ImageModelV2CallWarning, LanguageModelV2, LanguageModelV2FinishReason, LanguageModelV2CallWarning, LanguageModelV2Source, SharedV2ProviderMetadata, SharedV2ProviderOptions, TranscriptionModelV1, TranscriptionModelV1CallWarning, SpeechModelV1, SpeechModelV1CallWarning, JSONValue as JSONValue$1, JSONObject, LanguageModelV2CallOptions, AISDKError, LanguageModelV2ToolCall, JSONSchema7 as JSONSchema7$1, JSONParseError, TypeValidationError, LanguageModelV2Middleware, ProviderV2, NoSuchModelError } from '@ai-sdk/provider';
|
4
4
|
export { AISDKError, APICallError, EmptyResponseBodyError, InvalidPromptError, InvalidResponseDataError, JSONParseError, LoadAPIKeyError, NoContentGeneratedError, NoSuchModelError, TypeValidationError, UnsupportedFunctionalityError } from '@ai-sdk/provider';
|
5
5
|
import { ServerResponse } from 'node:http';
|
6
6
|
import { AttributeValue, Tracer } from '@opentelemetry/api';
|
@@ -20,12 +20,12 @@ type Embedding = EmbeddingModelV2Embedding;
|
|
20
20
|
/**
|
21
21
|
Image model that is used by the AI SDK Core functions.
|
22
22
|
*/
|
23
|
-
type ImageModel =
|
23
|
+
type ImageModel = ImageModelV2;
|
24
24
|
/**
|
25
25
|
Warning from the model provider for this call. The call will proceed, but e.g.
|
26
26
|
some settings might not be supported, which can lead to suboptimal results.
|
27
27
|
*/
|
28
|
-
type ImageGenerationWarning =
|
28
|
+
type ImageGenerationWarning = ImageModelV2CallWarning;
|
29
29
|
|
30
30
|
type ImageModelResponseMetadata = {
|
31
31
|
/**
|
@@ -324,14 +324,10 @@ type ReasoningUIPart = {
|
|
324
324
|
* The reasoning text.
|
325
325
|
*/
|
326
326
|
reasoning: string;
|
327
|
-
|
328
|
-
|
329
|
-
|
330
|
-
|
331
|
-
} | {
|
332
|
-
type: 'redacted';
|
333
|
-
data: string;
|
334
|
-
}>;
|
327
|
+
/**
|
328
|
+
* The provider metadata.
|
329
|
+
*/
|
330
|
+
providerMetadata?: Record<string, any>;
|
335
331
|
};
|
336
332
|
/**
|
337
333
|
* A tool invocation part of a message.
|
@@ -725,11 +721,10 @@ declare const dataStreamParts: readonly [DataStreamPart<"0", "text", string>, Da
|
|
725
721
|
};
|
726
722
|
}>, DataStreamPart<"f", "start_step", {
|
727
723
|
messageId: string;
|
728
|
-
}>, DataStreamPart<"g", "reasoning",
|
729
|
-
|
730
|
-
|
731
|
-
|
732
|
-
}>, DataStreamPart<"k", "file", {
|
724
|
+
}>, DataStreamPart<"g", "reasoning", {
|
725
|
+
text: string;
|
726
|
+
providerMetadata?: Record<string, any> | undefined;
|
727
|
+
}>, DataStreamPart<"h", "source", LanguageModelV2Source>, DataStreamPart<"l", "reasoning_part_finish", {}>, DataStreamPart<"k", "file", {
|
733
728
|
data: string;
|
734
729
|
mimeType: string;
|
735
730
|
}>];
|
@@ -817,14 +812,14 @@ declare function getMessageParts(message: Message | CreateMessage | UIMessage):
|
|
817
812
|
*/
|
818
813
|
declare function isDeepEqualData(obj1: any, obj2: any): boolean;
|
819
814
|
|
820
|
-
declare function parsePartialJson(jsonText: string | undefined): {
|
815
|
+
declare function parsePartialJson(jsonText: string | undefined): Promise<{
|
821
816
|
value: JSONValue$1 | undefined;
|
822
817
|
state: 'undefined-input' | 'successful-parse' | 'repaired-parse' | 'failed-parse';
|
823
|
-
}
|
818
|
+
}>;
|
824
819
|
|
825
820
|
declare function prepareAttachmentsForRequest(attachmentsFromOptions: FileList | Array<Attachment> | undefined): Promise<Attachment[]>;
|
826
821
|
|
827
|
-
declare function processDataStream({ stream, onTextPart, onReasoningPart,
|
822
|
+
declare function processDataStream({ stream, onTextPart, onReasoningPart, onReasoningPartFinish, onSourcePart, onFilePart, onDataPart, onErrorPart, onToolCallStreamingStartPart, onToolCallDeltaPart, onToolCallPart, onToolResultPart, onMessageAnnotationsPart, onFinishMessagePart, onFinishStepPart, onStartStepPart, }: {
|
828
823
|
stream: ReadableStream<Uint8Array>;
|
829
824
|
onTextPart?: (streamPart: (DataStreamPartType & {
|
830
825
|
type: 'text';
|
@@ -832,11 +827,8 @@ declare function processDataStream({ stream, onTextPart, onReasoningPart, onReas
|
|
832
827
|
onReasoningPart?: (streamPart: (DataStreamPartType & {
|
833
828
|
type: 'reasoning';
|
834
829
|
})['value']) => Promise<void> | void;
|
835
|
-
|
836
|
-
type: '
|
837
|
-
})['value']) => Promise<void> | void;
|
838
|
-
onRedactedReasoningPart?: (streamPart: (DataStreamPartType & {
|
839
|
-
type: 'redacted_reasoning';
|
830
|
+
onReasoningPartFinish?: (streamPart: (DataStreamPartType & {
|
831
|
+
type: 'reasoning_part_finish';
|
840
832
|
})['value']) => Promise<void> | void;
|
841
833
|
onFilePart?: (streamPart: (DataStreamPartType & {
|
842
834
|
type: 'file';
|
@@ -1381,26 +1373,6 @@ interface ReasoningPart {
|
|
1381
1373
|
*/
|
1382
1374
|
text: string;
|
1383
1375
|
/**
|
1384
|
-
An optional signature for verifying that the reasoning originated from the model.
|
1385
|
-
*/
|
1386
|
-
signature?: string;
|
1387
|
-
/**
|
1388
|
-
Additional provider-specific metadata. They are passed through
|
1389
|
-
to the provider from the AI SDK and enable provider-specific
|
1390
|
-
functionality that can be fully encapsulated in the provider.
|
1391
|
-
*/
|
1392
|
-
providerOptions?: ProviderOptions;
|
1393
|
-
}
|
1394
|
-
/**
|
1395
|
-
Redacted reasoning content part of a prompt.
|
1396
|
-
*/
|
1397
|
-
interface RedactedReasoningPart {
|
1398
|
-
type: 'redacted-reasoning';
|
1399
|
-
/**
|
1400
|
-
Redacted reasoning data.
|
1401
|
-
*/
|
1402
|
-
data: string;
|
1403
|
-
/**
|
1404
1376
|
Additional provider-specific metadata. They are passed through
|
1405
1377
|
to the provider from the AI SDK and enable provider-specific
|
1406
1378
|
functionality that can be fully encapsulated in the provider.
|
@@ -1518,7 +1490,7 @@ declare const coreAssistantMessageSchema: z.ZodType<CoreAssistantMessage>;
|
|
1518
1490
|
Content of an assistant message.
|
1519
1491
|
It can be a string or an array of text, image, reasoning, redacted reasoning, and tool call parts.
|
1520
1492
|
*/
|
1521
|
-
type AssistantContent = string | Array<TextPart | FilePart | ReasoningPart |
|
1493
|
+
type AssistantContent = string | Array<TextPart | FilePart | ReasoningPart | ToolCallPart>;
|
1522
1494
|
/**
|
1523
1495
|
A tool message. It contains the result of one or more tool calls.
|
1524
1496
|
*/
|
@@ -1583,15 +1555,6 @@ interface GeneratedFile {
|
|
1583
1555
|
readonly mediaType: string;
|
1584
1556
|
}
|
1585
1557
|
|
1586
|
-
type Reasoning = {
|
1587
|
-
type: 'text';
|
1588
|
-
text: string;
|
1589
|
-
signature?: string;
|
1590
|
-
} | {
|
1591
|
-
type: 'redacted';
|
1592
|
-
data: string;
|
1593
|
-
};
|
1594
|
-
|
1595
1558
|
declare const JSONRPCRequestSchema: z.ZodObject<z.objectUtil.extendShape<{
|
1596
1559
|
jsonrpc: z.ZodLiteral<"2.0">;
|
1597
1560
|
id: z.ZodUnion<[z.ZodString, z.ZodNumber]>;
|
@@ -2801,7 +2764,7 @@ type StepResult<TOOLS extends ToolSet> = {
|
|
2801
2764
|
/**
|
2802
2765
|
The reasoning that was generated during the generation.
|
2803
2766
|
*/
|
2804
|
-
readonly reasoning: Array<
|
2767
|
+
readonly reasoning: Array<ReasoningPart>;
|
2805
2768
|
/**
|
2806
2769
|
The reasoning text that was generated during the generation.
|
2807
2770
|
*/
|
@@ -2883,7 +2846,7 @@ interface GenerateTextResult<TOOLS extends ToolSet, OUTPUT> {
|
|
2883
2846
|
/**
|
2884
2847
|
The full reasoning that the model has generated.
|
2885
2848
|
*/
|
2886
|
-
readonly reasoning: Array<
|
2849
|
+
readonly reasoning: Array<ReasoningPart>;
|
2887
2850
|
/**
|
2888
2851
|
The reasoning text that the model has generated. Can be undefined if the model
|
2889
2852
|
has only generated text.
|
@@ -2963,16 +2926,16 @@ interface Output$1<OUTPUT, PARTIAL> {
|
|
2963
2926
|
responseFormat: LanguageModelV2CallOptions['responseFormat'];
|
2964
2927
|
parsePartial(options: {
|
2965
2928
|
text: string;
|
2966
|
-
}): {
|
2929
|
+
}): Promise<{
|
2967
2930
|
partial: PARTIAL;
|
2968
|
-
} | undefined
|
2931
|
+
} | undefined>;
|
2969
2932
|
parseOutput(options: {
|
2970
2933
|
text: string;
|
2971
2934
|
}, context: {
|
2972
2935
|
response: LanguageModelResponseMetadata;
|
2973
2936
|
usage: LanguageModelUsage$1;
|
2974
2937
|
finishReason: FinishReason;
|
2975
|
-
}): OUTPUT
|
2938
|
+
}): Promise<OUTPUT>;
|
2976
2939
|
}
|
2977
2940
|
declare const text: () => Output$1<string, string>;
|
2978
2941
|
declare const object: <OUTPUT>({ schema: inputSchema, }: {
|
@@ -3305,7 +3268,7 @@ interface StreamTextResult<TOOLS extends ToolSet, PARTIAL_OUTPUT> {
|
|
3305
3268
|
|
3306
3269
|
Resolved when the response is finished.
|
3307
3270
|
*/
|
3308
|
-
readonly reasoning: Promise<Array<
|
3271
|
+
readonly reasoning: Promise<Array<ReasoningPart>>;
|
3309
3272
|
/**
|
3310
3273
|
The reasoning that has been generated by the last step.
|
3311
3274
|
|
@@ -3452,16 +3415,10 @@ type TextStreamPart<TOOLS extends ToolSet> = {
|
|
3452
3415
|
text: string;
|
3453
3416
|
} | {
|
3454
3417
|
type: 'reasoning';
|
3455
|
-
reasoningType: 'text';
|
3456
3418
|
text: string;
|
3419
|
+
providerMetadata?: ProviderMetadata;
|
3457
3420
|
} | {
|
3458
|
-
type: 'reasoning';
|
3459
|
-
reasoningType: 'signature';
|
3460
|
-
signature: string;
|
3461
|
-
} | {
|
3462
|
-
type: 'reasoning';
|
3463
|
-
reasoningType: 'redacted';
|
3464
|
-
data: string;
|
3421
|
+
type: 'reasoning-part-finish';
|
3465
3422
|
} | ({
|
3466
3423
|
type: 'source';
|
3467
3424
|
} & Source) | {
|
@@ -3776,7 +3733,7 @@ declare function generateImage({ model, prompt, n, size, aspectRatio, seed, prov
|
|
3776
3733
|
/**
|
3777
3734
|
The image model to use.
|
3778
3735
|
*/
|
3779
|
-
model:
|
3736
|
+
model: ImageModelV2;
|
3780
3737
|
/**
|
3781
3738
|
The prompt that should be used to generate the image.
|
3782
3739
|
*/
|
@@ -3896,26 +3853,26 @@ A result object that contains the generated object, the finish reason, the token
|
|
3896
3853
|
declare function generateObject<TYPE extends SCHEMA extends z.Schema ? Output extends 'array' ? Array<z.infer<SCHEMA>> : z.infer<SCHEMA> : SCHEMA extends Schema<infer T> ? Output extends 'array' ? Array<T> : T : never, SCHEMA extends z.Schema | Schema = z.Schema<JSONValue$1>, Output extends 'object' | 'array' | 'enum' | 'no-schema' = TYPE extends string ? 'enum' : 'object'>(options: Omit<CallSettings, 'stopSequences'> & Prompt & (Output extends 'enum' ? {
|
3897
3854
|
/**
|
3898
3855
|
The enum values that the model should use.
|
3899
|
-
|
3856
|
+
*/
|
3900
3857
|
enum: Array<TYPE>;
|
3901
3858
|
mode?: 'json';
|
3902
3859
|
output: 'enum';
|
3903
3860
|
} : Output extends 'no-schema' ? {} : {
|
3904
3861
|
/**
|
3905
3862
|
The schema of the object that the model should generate.
|
3906
|
-
|
3863
|
+
*/
|
3907
3864
|
schema: SCHEMA;
|
3908
3865
|
/**
|
3909
3866
|
Optional name of the output that should be generated.
|
3910
3867
|
Used by some providers for additional LLM guidance, e.g.
|
3911
3868
|
via tool or schema name.
|
3912
|
-
|
3869
|
+
*/
|
3913
3870
|
schemaName?: string;
|
3914
3871
|
/**
|
3915
3872
|
Optional description of the output that should be generated.
|
3916
3873
|
Used by some providers for additional LLM guidance, e.g.
|
3917
3874
|
via tool or schema description.
|
3918
|
-
|
3875
|
+
*/
|
3919
3876
|
schemaDescription?: string;
|
3920
3877
|
/**
|
3921
3878
|
The mode to use for object generation.
|
@@ -3929,28 +3886,28 @@ The schema is converted into a JSON schema and used in one of the following ways
|
|
3929
3886
|
Please note that most providers do not support all modes.
|
3930
3887
|
|
3931
3888
|
Default and recommended: 'auto' (best mode for the model).
|
3932
|
-
|
3889
|
+
*/
|
3933
3890
|
mode?: 'auto' | 'json' | 'tool';
|
3934
3891
|
}) & {
|
3935
3892
|
output?: Output;
|
3936
3893
|
/**
|
3937
3894
|
The language model to use.
|
3938
|
-
|
3895
|
+
*/
|
3939
3896
|
model: LanguageModel;
|
3940
3897
|
/**
|
3941
3898
|
A function that attempts to repair the raw output of the mode
|
3942
3899
|
to enable JSON parsing.
|
3943
|
-
|
3900
|
+
*/
|
3944
3901
|
experimental_repairText?: RepairTextFunction;
|
3945
3902
|
/**
|
3946
3903
|
Optional telemetry configuration (experimental).
|
3947
|
-
|
3904
|
+
*/
|
3948
3905
|
experimental_telemetry?: TelemetrySettings;
|
3949
3906
|
/**
|
3950
3907
|
Additional provider-specific options. They are passed through
|
3951
3908
|
to the provider from the AI SDK and enable provider-specific
|
3952
3909
|
functionality that can be fully encapsulated in the provider.
|
3953
|
-
*/
|
3910
|
+
*/
|
3954
3911
|
providerOptions?: ProviderOptions;
|
3955
3912
|
/**
|
3956
3913
|
* Internal. For test use only. May change without notice.
|
@@ -4668,16 +4625,10 @@ type SingleRequestTextStreamPart<TOOLS extends ToolSet> = {
|
|
4668
4625
|
text: string;
|
4669
4626
|
} | {
|
4670
4627
|
type: 'reasoning';
|
4671
|
-
reasoningType: 'text';
|
4672
4628
|
text: string;
|
4629
|
+
providerMetadata?: ProviderMetadata;
|
4673
4630
|
} | {
|
4674
|
-
type: 'reasoning';
|
4675
|
-
reasoningType: 'signature';
|
4676
|
-
signature: string;
|
4677
|
-
} | {
|
4678
|
-
type: 'reasoning';
|
4679
|
-
reasoningType: 'redacted';
|
4680
|
-
data: string;
|
4631
|
+
type: 'reasoning-part-finish';
|
4681
4632
|
} | {
|
4682
4633
|
type: 'file';
|
4683
4634
|
file: GeneratedFile;
|
@@ -4903,100 +4854,4 @@ declare class RetryError extends AISDKError {
|
|
4903
4854
|
static isInstance(error: unknown): error is RetryError;
|
4904
4855
|
}
|
4905
4856
|
|
4906
|
-
|
4907
|
-
* Configuration options and helper callback methods for stream lifecycle events.
|
4908
|
-
*/
|
4909
|
-
interface StreamCallbacks {
|
4910
|
-
/** `onStart`: Called once when the stream is initialized. */
|
4911
|
-
onStart?: () => Promise<void> | void;
|
4912
|
-
/**
|
4913
|
-
* `onCompletion`: Called for each tokenized message.
|
4914
|
-
*
|
4915
|
-
* @deprecated Use `onFinal` instead.
|
4916
|
-
*/
|
4917
|
-
onCompletion?: (completion: string) => Promise<void> | void;
|
4918
|
-
/** `onFinal`: Called once when the stream is closed with the final completion message. */
|
4919
|
-
onFinal?: (completion: string) => Promise<void> | void;
|
4920
|
-
/** `onToken`: Called for each tokenized message. */
|
4921
|
-
onToken?: (token: string) => Promise<void> | void;
|
4922
|
-
/** `onText`: Called for each text chunk. */
|
4923
|
-
onText?: (text: string) => Promise<void> | void;
|
4924
|
-
}
|
4925
|
-
|
4926
|
-
type LangChainImageDetail = 'auto' | 'low' | 'high';
|
4927
|
-
type LangChainMessageContentText = {
|
4928
|
-
type: 'text';
|
4929
|
-
text: string;
|
4930
|
-
};
|
4931
|
-
type LangChainMessageContentImageUrl = {
|
4932
|
-
type: 'image_url';
|
4933
|
-
image_url: string | {
|
4934
|
-
url: string;
|
4935
|
-
detail?: LangChainImageDetail;
|
4936
|
-
};
|
4937
|
-
};
|
4938
|
-
type LangChainMessageContentComplex = LangChainMessageContentText | LangChainMessageContentImageUrl | (Record<string, any> & {
|
4939
|
-
type?: 'text' | 'image_url' | string;
|
4940
|
-
}) | (Record<string, any> & {
|
4941
|
-
type?: never;
|
4942
|
-
});
|
4943
|
-
type LangChainMessageContent = string | LangChainMessageContentComplex[];
|
4944
|
-
type LangChainAIMessageChunk = {
|
4945
|
-
content: LangChainMessageContent;
|
4946
|
-
};
|
4947
|
-
type LangChainStreamEvent = {
|
4948
|
-
event: string;
|
4949
|
-
data: any;
|
4950
|
-
};
|
4951
|
-
/**
|
4952
|
-
Converts LangChain output streams to an AI SDK Data Stream.
|
4953
|
-
|
4954
|
-
The following streams are supported:
|
4955
|
-
- `LangChainAIMessageChunk` streams (LangChain `model.stream` output)
|
4956
|
-
- `string` streams (LangChain `StringOutputParser` output)
|
4957
|
-
*/
|
4958
|
-
declare function toDataStream$1(stream: ReadableStream<LangChainStreamEvent> | ReadableStream<LangChainAIMessageChunk> | ReadableStream<string>, callbacks?: StreamCallbacks): ReadableStream<Uint8Array<ArrayBufferLike>>;
|
4959
|
-
declare function toDataStreamResponse$1(stream: ReadableStream<LangChainStreamEvent> | ReadableStream<LangChainAIMessageChunk> | ReadableStream<string>, options?: {
|
4960
|
-
init?: ResponseInit;
|
4961
|
-
data?: StreamData;
|
4962
|
-
callbacks?: StreamCallbacks;
|
4963
|
-
}): Response;
|
4964
|
-
declare function mergeIntoDataStream$1(stream: ReadableStream<LangChainStreamEvent> | ReadableStream<LangChainAIMessageChunk> | ReadableStream<string>, options: {
|
4965
|
-
dataStream: DataStreamWriter;
|
4966
|
-
callbacks?: StreamCallbacks;
|
4967
|
-
}): void;
|
4968
|
-
|
4969
|
-
declare namespace langchainAdapter {
|
4970
|
-
export {
|
4971
|
-
mergeIntoDataStream$1 as mergeIntoDataStream,
|
4972
|
-
toDataStream$1 as toDataStream,
|
4973
|
-
toDataStreamResponse$1 as toDataStreamResponse,
|
4974
|
-
};
|
4975
|
-
}
|
4976
|
-
|
4977
|
-
type EngineResponse = {
|
4978
|
-
delta: string;
|
4979
|
-
};
|
4980
|
-
declare function toDataStream(stream: AsyncIterable<EngineResponse>, callbacks?: StreamCallbacks): ReadableStream<Uint8Array<ArrayBufferLike>>;
|
4981
|
-
declare function toDataStreamResponse(stream: AsyncIterable<EngineResponse>, options?: {
|
4982
|
-
init?: ResponseInit;
|
4983
|
-
data?: StreamData;
|
4984
|
-
callbacks?: StreamCallbacks;
|
4985
|
-
}): Response;
|
4986
|
-
declare function mergeIntoDataStream(stream: AsyncIterable<EngineResponse>, options: {
|
4987
|
-
dataStream: DataStreamWriter;
|
4988
|
-
callbacks?: StreamCallbacks;
|
4989
|
-
}): void;
|
4990
|
-
|
4991
|
-
declare const llamaindexAdapter_mergeIntoDataStream: typeof mergeIntoDataStream;
|
4992
|
-
declare const llamaindexAdapter_toDataStream: typeof toDataStream;
|
4993
|
-
declare const llamaindexAdapter_toDataStreamResponse: typeof toDataStreamResponse;
|
4994
|
-
declare namespace llamaindexAdapter {
|
4995
|
-
export {
|
4996
|
-
llamaindexAdapter_mergeIntoDataStream as mergeIntoDataStream,
|
4997
|
-
llamaindexAdapter_toDataStream as toDataStream,
|
4998
|
-
llamaindexAdapter_toDataStreamResponse as toDataStreamResponse,
|
4999
|
-
};
|
5000
|
-
}
|
5001
|
-
|
5002
|
-
export { AssistantContent, Attachment, CallSettings, CallWarning, ChatRequest, ChatRequestOptions, ChunkDetector, CoreAssistantMessage, CoreMessage, CoreSystemMessage, CoreToolMessage, CoreUserMessage, CreateMessage, DataContent, DataStreamOptions, DataStreamPart, DataStreamString, DataStreamWriter, DeepPartial, DownloadError, EmbedManyResult, EmbedResult, Embedding, EmbeddingModel, EmbeddingModelUsage, GenerateImageResult as Experimental_GenerateImageResult, GeneratedFile as Experimental_GeneratedImage, SpeechResult as Experimental_SpeechResult, TranscriptionResult as Experimental_TranscriptionResult, FilePart, FileUIPart, FinishReason, GenerateObjectResult, GenerateTextOnStepFinishCallback, GenerateTextResult, GeneratedAudioFile, GeneratedFile, IdGenerator, ImageModel, ImageGenerationWarning as ImageModelCallWarning, ImageModelResponseMetadata, ImagePart, InvalidArgumentError, InvalidDataContentError, InvalidMessageRoleError, InvalidStreamPartError, InvalidToolArgumentsError, JSONRPCError, JSONRPCMessage, JSONRPCNotification, JSONRPCRequest, JSONRPCResponse, JSONValue, langchainAdapter as LangChainAdapter, LanguageModel, LanguageModelRequestMetadata, LanguageModelResponseMetadata, LanguageModelUsage$1 as LanguageModelUsage, llamaindexAdapter as LlamaIndexAdapter, MCPClientError, MCPTransport, Message, MessageConversionError, NoImageGeneratedError, NoObjectGeneratedError, NoOutputSpecifiedError, NoSuchProviderError, NoSuchToolError, ObjectStreamPart, output as Output, Prompt, Provider, ProviderMetadata, ProviderOptions, ProviderRegistryProvider, ReasoningUIPart, RepairTextFunction, RequestOptions, RetryError, Schema, SourceUIPart, SpeechModel, SpeechModelResponseMetadata, SpeechWarning, StepResult, StepStartUIPart, StreamData, StreamObjectOnFinishCallback, StreamObjectResult, StreamTextOnChunkCallback, StreamTextOnErrorCallback, StreamTextOnFinishCallback, StreamTextOnStepFinishCallback, StreamTextResult, StreamTextTransform, TelemetrySettings, TextPart, TextStreamPart, TextUIPart, Tool, ToolCallPart, ToolCallRepairError, ToolCallRepairFunction, ToolCallUnion, ToolChoice, ToolContent, ToolExecutionError, ToolExecutionOptions, ToolInvocation, ToolInvocationUIPart, ToolResultPart, ToolResultUnion, ToolSet, TranscriptionModel, TranscriptionModelResponseMetadata, TranscriptionWarning, UIMessage, UseChatOptions, UseCompletionOptions, UserContent, appendClientMessage, appendResponseMessages, asSchema, callChatApi, callCompletionApi, convertToCoreMessages, coreAssistantMessageSchema, coreMessageSchema, coreSystemMessageSchema, coreToolMessageSchema, coreUserMessageSchema, cosineSimilarity, createDataStream, createDataStreamResponse, createProviderRegistry, customProvider, defaultSettingsMiddleware, embed, embedMany, createMCPClient as experimental_createMCPClient, experimental_createProviderRegistry, experimental_customProvider, generateImage as experimental_generateImage, generateSpeech as experimental_generateSpeech, transcribe as experimental_transcribe, extractMaxToolInvocationStep, extractReasoningMiddleware, fillMessageParts, formatDataStreamPart, generateObject, generateText, getMessageParts, getTextFromDataUrl, isAssistantMessageWithCompletedToolCalls, isDeepEqualData, jsonSchema, parseDataStreamPart, parsePartialJson, pipeDataStreamToResponse, prepareAttachmentsForRequest, processDataStream, processTextStream, shouldResubmitMessages, simulateReadableStream, simulateStreamingMiddleware, smoothStream, streamObject, streamText, tool, updateToolCallResult, wrapLanguageModel, zodSchema };
|
4857
|
+
export { AssistantContent, Attachment, CallSettings, CallWarning, ChatRequest, ChatRequestOptions, ChunkDetector, CoreAssistantMessage, CoreMessage, CoreSystemMessage, CoreToolMessage, CoreUserMessage, CreateMessage, DataContent, DataStreamOptions, DataStreamPart, DataStreamString, DataStreamWriter, DeepPartial, DownloadError, EmbedManyResult, EmbedResult, Embedding, EmbeddingModel, EmbeddingModelUsage, GenerateImageResult as Experimental_GenerateImageResult, GeneratedFile as Experimental_GeneratedImage, SpeechResult as Experimental_SpeechResult, TranscriptionResult as Experimental_TranscriptionResult, FilePart, FileUIPart, FinishReason, GenerateObjectResult, GenerateTextOnStepFinishCallback, GenerateTextResult, GeneratedAudioFile, GeneratedFile, IdGenerator, ImageModel, ImageGenerationWarning as ImageModelCallWarning, ImageModelResponseMetadata, ImagePart, InvalidArgumentError, InvalidDataContentError, InvalidMessageRoleError, InvalidStreamPartError, InvalidToolArgumentsError, JSONRPCError, JSONRPCMessage, JSONRPCNotification, JSONRPCRequest, JSONRPCResponse, JSONValue, LanguageModel, LanguageModelRequestMetadata, LanguageModelResponseMetadata, LanguageModelUsage$1 as LanguageModelUsage, MCPClientError, MCPTransport, Message, MessageConversionError, NoImageGeneratedError, NoObjectGeneratedError, NoOutputSpecifiedError, NoSuchProviderError, NoSuchToolError, ObjectStreamPart, output as Output, Prompt, Provider, ProviderMetadata, ProviderOptions, ProviderRegistryProvider, ReasoningUIPart, RepairTextFunction, RequestOptions, RetryError, Schema, SourceUIPart, SpeechModel, SpeechModelResponseMetadata, SpeechWarning, StepResult, StepStartUIPart, StreamData, StreamObjectOnFinishCallback, StreamObjectResult, StreamTextOnChunkCallback, StreamTextOnErrorCallback, StreamTextOnFinishCallback, StreamTextOnStepFinishCallback, StreamTextResult, StreamTextTransform, TelemetrySettings, TextPart, TextStreamPart, TextUIPart, Tool, ToolCallPart, ToolCallRepairError, ToolCallRepairFunction, ToolCallUnion, ToolChoice, ToolContent, ToolExecutionError, ToolExecutionOptions, ToolInvocation, ToolInvocationUIPart, ToolResultPart, ToolResultUnion, ToolSet, TranscriptionModel, TranscriptionModelResponseMetadata, TranscriptionWarning, UIMessage, UseChatOptions, UseCompletionOptions, UserContent, appendClientMessage, appendResponseMessages, asSchema, callChatApi, callCompletionApi, convertToCoreMessages, coreAssistantMessageSchema, coreMessageSchema, coreSystemMessageSchema, coreToolMessageSchema, coreUserMessageSchema, cosineSimilarity, createDataStream, createDataStreamResponse, createProviderRegistry, customProvider, defaultSettingsMiddleware, embed, embedMany, createMCPClient as experimental_createMCPClient, experimental_createProviderRegistry, experimental_customProvider, generateImage as experimental_generateImage, generateSpeech as experimental_generateSpeech, transcribe as experimental_transcribe, extractMaxToolInvocationStep, extractReasoningMiddleware, fillMessageParts, formatDataStreamPart, generateObject, generateText, getMessageParts, getTextFromDataUrl, isAssistantMessageWithCompletedToolCalls, isDeepEqualData, jsonSchema, parseDataStreamPart, parsePartialJson, pipeDataStreamToResponse, prepareAttachmentsForRequest, processDataStream, processTextStream, shouldResubmitMessages, simulateReadableStream, simulateStreamingMiddleware, smoothStream, streamObject, streamText, tool, updateToolCallResult, wrapLanguageModel, zodSchema };
|