ai 5.1.0-beta.23 → 5.1.0-beta.25
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +12 -0
- package/dist/index.d.mts +221 -189
- package/dist/index.d.ts +221 -189
- package/dist/index.js +44 -15
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +44 -15
- package/dist/index.mjs.map +1 -1
- package/dist/internal/index.js +1 -1
- package/dist/internal/index.mjs +1 -1
- package/package.json +1 -1
package/dist/index.d.ts
CHANGED
|
@@ -1,160 +1,16 @@
|
|
|
1
1
|
export { createGateway, gateway } from '@ai-sdk/gateway';
|
|
2
|
-
import {
|
|
2
|
+
import { Tool, InferToolInput, InferToolOutput, AssistantModelMessage, ToolModelMessage, ReasoningPart, ModelMessage, Schema, SystemModelMessage, UserModelMessage, ProviderOptions, IdGenerator, ToolCall, InferSchema, FlexibleSchema, DataContent, Validator, StandardSchemaV1, Resolvable, FetchFunction } from '@ai-sdk/provider-utils';
|
|
3
3
|
export { AssistantContent, AssistantModelMessage, DataContent, FilePart, IdGenerator, ImagePart, InferToolInput, InferToolOutput, ModelMessage, Schema, SystemModelMessage, TextPart, Tool, ToolApprovalRequest, ToolApprovalResponse, ToolCallOptions, ToolCallPart, ToolContent, ToolExecuteFunction, ToolModelMessage, ToolResultPart, UserContent, UserModelMessage, asSchema, createIdGenerator, dynamicTool, generateId, jsonSchema, parseJsonEventStream, tool, zodSchema } from '@ai-sdk/provider-utils';
|
|
4
|
-
import { AttributeValue, Tracer } from '@opentelemetry/api';
|
|
5
4
|
import * as _ai_sdk_provider from '@ai-sdk/provider';
|
|
6
5
|
import { EmbeddingModelV3, EmbeddingModelV2, EmbeddingModelV3Embedding, ImageModelV3, ImageModelV3CallWarning, ImageModelV3ProviderMetadata, JSONValue as JSONValue$1, LanguageModelV3, LanguageModelV2, LanguageModelV3FinishReason, LanguageModelV3CallWarning, LanguageModelV3Source, LanguageModelV3Middleware, SharedV3ProviderMetadata, SpeechModelV3, SpeechModelV3CallWarning, TranscriptionModelV3, TranscriptionModelV3CallWarning, LanguageModelV3Usage, LanguageModelV3CallOptions, AISDKError, LanguageModelV3ToolCall, JSONSchema7, JSONParseError, TypeValidationError, ProviderV3, ProviderV2, NoSuchModelError, JSONObject } from '@ai-sdk/provider';
|
|
7
6
|
export { AISDKError, APICallError, EmptyResponseBodyError, InvalidPromptError, InvalidResponseDataError, JSONParseError, JSONSchema7, LoadAPIKeyError, NoContentGeneratedError, NoSuchModelError, TooManyEmbeddingValuesForCallError, TypeValidationError, UnsupportedFunctionalityError } from '@ai-sdk/provider';
|
|
8
|
-
import
|
|
7
|
+
import { ServerResponse } from 'node:http';
|
|
9
8
|
import * as z4 from 'zod/v4';
|
|
10
9
|
import { z } from 'zod/v4';
|
|
11
|
-
import
|
|
10
|
+
import * as z3 from 'zod/v3';
|
|
11
|
+
import { AttributeValue, Tracer } from '@opentelemetry/api';
|
|
12
12
|
import { ServerResponse as ServerResponse$1 } from 'http';
|
|
13
13
|
|
|
14
|
-
type CallSettings = {
|
|
15
|
-
/**
|
|
16
|
-
Maximum number of tokens to generate.
|
|
17
|
-
*/
|
|
18
|
-
maxOutputTokens?: number;
|
|
19
|
-
/**
|
|
20
|
-
Temperature setting. The range depends on the provider and model.
|
|
21
|
-
|
|
22
|
-
It is recommended to set either `temperature` or `topP`, but not both.
|
|
23
|
-
*/
|
|
24
|
-
temperature?: number;
|
|
25
|
-
/**
|
|
26
|
-
Nucleus sampling. This is a number between 0 and 1.
|
|
27
|
-
|
|
28
|
-
E.g. 0.1 would mean that only tokens with the top 10% probability mass
|
|
29
|
-
are considered.
|
|
30
|
-
|
|
31
|
-
It is recommended to set either `temperature` or `topP`, but not both.
|
|
32
|
-
*/
|
|
33
|
-
topP?: number;
|
|
34
|
-
/**
|
|
35
|
-
Only sample from the top K options for each subsequent token.
|
|
36
|
-
|
|
37
|
-
Used to remove "long tail" low probability responses.
|
|
38
|
-
Recommended for advanced use cases only. You usually only need to use temperature.
|
|
39
|
-
*/
|
|
40
|
-
topK?: number;
|
|
41
|
-
/**
|
|
42
|
-
Presence penalty setting. It affects the likelihood of the model to
|
|
43
|
-
repeat information that is already in the prompt.
|
|
44
|
-
|
|
45
|
-
The presence penalty is a number between -1 (increase repetition)
|
|
46
|
-
and 1 (maximum penalty, decrease repetition). 0 means no penalty.
|
|
47
|
-
*/
|
|
48
|
-
presencePenalty?: number;
|
|
49
|
-
/**
|
|
50
|
-
Frequency penalty setting. It affects the likelihood of the model
|
|
51
|
-
to repeatedly use the same words or phrases.
|
|
52
|
-
|
|
53
|
-
The frequency penalty is a number between -1 (increase repetition)
|
|
54
|
-
and 1 (maximum penalty, decrease repetition). 0 means no penalty.
|
|
55
|
-
*/
|
|
56
|
-
frequencyPenalty?: number;
|
|
57
|
-
/**
|
|
58
|
-
Stop sequences.
|
|
59
|
-
If set, the model will stop generating text when one of the stop sequences is generated.
|
|
60
|
-
Providers may have limits on the number of stop sequences.
|
|
61
|
-
*/
|
|
62
|
-
stopSequences?: string[];
|
|
63
|
-
/**
|
|
64
|
-
The seed (integer) to use for random sampling. If set and supported
|
|
65
|
-
by the model, calls will generate deterministic results.
|
|
66
|
-
*/
|
|
67
|
-
seed?: number;
|
|
68
|
-
/**
|
|
69
|
-
Maximum number of retries. Set to 0 to disable retries.
|
|
70
|
-
|
|
71
|
-
@default 2
|
|
72
|
-
*/
|
|
73
|
-
maxRetries?: number;
|
|
74
|
-
/**
|
|
75
|
-
Abort signal.
|
|
76
|
-
*/
|
|
77
|
-
abortSignal?: AbortSignal;
|
|
78
|
-
/**
|
|
79
|
-
Additional HTTP headers to be sent with the request.
|
|
80
|
-
Only applicable for HTTP-based providers.
|
|
81
|
-
*/
|
|
82
|
-
headers?: Record<string, string | undefined>;
|
|
83
|
-
};
|
|
84
|
-
|
|
85
|
-
/**
|
|
86
|
-
Prompt part of the AI function options.
|
|
87
|
-
It contains a system message, a simple text prompt, or a list of messages.
|
|
88
|
-
*/
|
|
89
|
-
type Prompt = {
|
|
90
|
-
/**
|
|
91
|
-
System message to include in the prompt. Can be used with `prompt` or `messages`.
|
|
92
|
-
*/
|
|
93
|
-
system?: string;
|
|
94
|
-
} & ({
|
|
95
|
-
/**
|
|
96
|
-
A prompt. It can be either a text prompt or a list of messages.
|
|
97
|
-
|
|
98
|
-
You can either use `prompt` or `messages` but not both.
|
|
99
|
-
*/
|
|
100
|
-
prompt: string | Array<ModelMessage>;
|
|
101
|
-
/**
|
|
102
|
-
A list of messages.
|
|
103
|
-
|
|
104
|
-
You can either use `prompt` or `messages` but not both.
|
|
105
|
-
*/
|
|
106
|
-
messages?: never;
|
|
107
|
-
} | {
|
|
108
|
-
/**
|
|
109
|
-
A list of messages.
|
|
110
|
-
|
|
111
|
-
You can either use `prompt` or `messages` but not both.
|
|
112
|
-
*/
|
|
113
|
-
messages: Array<ModelMessage>;
|
|
114
|
-
/**
|
|
115
|
-
A prompt. It can be either a text prompt or a list of messages.
|
|
116
|
-
|
|
117
|
-
You can either use `prompt` or `messages` but not both.
|
|
118
|
-
*/
|
|
119
|
-
prompt?: never;
|
|
120
|
-
});
|
|
121
|
-
|
|
122
|
-
/**
|
|
123
|
-
* Telemetry configuration.
|
|
124
|
-
*/
|
|
125
|
-
type TelemetrySettings = {
|
|
126
|
-
/**
|
|
127
|
-
* Enable or disable telemetry. Disabled by default while experimental.
|
|
128
|
-
*/
|
|
129
|
-
isEnabled?: boolean;
|
|
130
|
-
/**
|
|
131
|
-
* Enable or disable input recording. Enabled by default.
|
|
132
|
-
*
|
|
133
|
-
* You might want to disable input recording to avoid recording sensitive
|
|
134
|
-
* information, to reduce data transfers, or to increase performance.
|
|
135
|
-
*/
|
|
136
|
-
recordInputs?: boolean;
|
|
137
|
-
/**
|
|
138
|
-
* Enable or disable output recording. Enabled by default.
|
|
139
|
-
*
|
|
140
|
-
* You might want to disable output recording to avoid recording sensitive
|
|
141
|
-
* information, to reduce data transfers, or to increase performance.
|
|
142
|
-
*/
|
|
143
|
-
recordOutputs?: boolean;
|
|
144
|
-
/**
|
|
145
|
-
* Identifier for this function. Used to group telemetry data by function.
|
|
146
|
-
*/
|
|
147
|
-
functionId?: string;
|
|
148
|
-
/**
|
|
149
|
-
* Additional information to include in the telemetry data.
|
|
150
|
-
*/
|
|
151
|
-
metadata?: Record<string, AttributeValue>;
|
|
152
|
-
/**
|
|
153
|
-
* A custom tracer to use for the telemetry data.
|
|
154
|
-
*/
|
|
155
|
-
tracer?: Tracer;
|
|
156
|
-
};
|
|
157
|
-
|
|
158
14
|
/**
|
|
159
15
|
Embedding model that is used by the AI SDK Core functions.
|
|
160
16
|
*/
|
|
@@ -374,31 +230,6 @@ type EmbeddingModelUsage = {
|
|
|
374
230
|
tokens: number;
|
|
375
231
|
};
|
|
376
232
|
|
|
377
|
-
/**
|
|
378
|
-
* Experimental. Can change in patch versions without warning.
|
|
379
|
-
*
|
|
380
|
-
* Download function. Called with the array of URLs and a boolean indicating
|
|
381
|
-
* whether the URL is supported by the model.
|
|
382
|
-
*
|
|
383
|
-
* The download function can decide for each URL:
|
|
384
|
-
* - to return null (which means that the URL should be passed to the model)
|
|
385
|
-
* - to download the asset and return the data (incl. retries, authentication, etc.)
|
|
386
|
-
*
|
|
387
|
-
* Should throw DownloadError if the download fails.
|
|
388
|
-
*
|
|
389
|
-
* Should return an array of objects sorted by the order of the requested downloads.
|
|
390
|
-
* For each object, the data should be a Uint8Array if the URL was downloaded.
|
|
391
|
-
* For each object, the mediaType should be the media type of the downloaded asset.
|
|
392
|
-
* For each object, the data should be null if the URL should be passed through as is.
|
|
393
|
-
*/
|
|
394
|
-
type DownloadFunction = (options: Array<{
|
|
395
|
-
url: URL;
|
|
396
|
-
isUrlSupportedByModel: boolean;
|
|
397
|
-
}>) => PromiseLike<Array<{
|
|
398
|
-
data: Uint8Array;
|
|
399
|
-
mediaType: string | undefined;
|
|
400
|
-
} | null>>;
|
|
401
|
-
|
|
402
233
|
/**
|
|
403
234
|
* A generated file.
|
|
404
235
|
*/
|
|
@@ -807,6 +638,175 @@ interface GenerateTextResult<TOOLS extends ToolSet, OUTPUT> {
|
|
|
807
638
|
readonly experimental_output: OUTPUT;
|
|
808
639
|
}
|
|
809
640
|
|
|
641
|
+
type CallSettings = {
|
|
642
|
+
/**
|
|
643
|
+
Maximum number of tokens to generate.
|
|
644
|
+
*/
|
|
645
|
+
maxOutputTokens?: number;
|
|
646
|
+
/**
|
|
647
|
+
Temperature setting. The range depends on the provider and model.
|
|
648
|
+
|
|
649
|
+
It is recommended to set either `temperature` or `topP`, but not both.
|
|
650
|
+
*/
|
|
651
|
+
temperature?: number;
|
|
652
|
+
/**
|
|
653
|
+
Nucleus sampling. This is a number between 0 and 1.
|
|
654
|
+
|
|
655
|
+
E.g. 0.1 would mean that only tokens with the top 10% probability mass
|
|
656
|
+
are considered.
|
|
657
|
+
|
|
658
|
+
It is recommended to set either `temperature` or `topP`, but not both.
|
|
659
|
+
*/
|
|
660
|
+
topP?: number;
|
|
661
|
+
/**
|
|
662
|
+
Only sample from the top K options for each subsequent token.
|
|
663
|
+
|
|
664
|
+
Used to remove "long tail" low probability responses.
|
|
665
|
+
Recommended for advanced use cases only. You usually only need to use temperature.
|
|
666
|
+
*/
|
|
667
|
+
topK?: number;
|
|
668
|
+
/**
|
|
669
|
+
Presence penalty setting. It affects the likelihood of the model to
|
|
670
|
+
repeat information that is already in the prompt.
|
|
671
|
+
|
|
672
|
+
The presence penalty is a number between -1 (increase repetition)
|
|
673
|
+
and 1 (maximum penalty, decrease repetition). 0 means no penalty.
|
|
674
|
+
*/
|
|
675
|
+
presencePenalty?: number;
|
|
676
|
+
/**
|
|
677
|
+
Frequency penalty setting. It affects the likelihood of the model
|
|
678
|
+
to repeatedly use the same words or phrases.
|
|
679
|
+
|
|
680
|
+
The frequency penalty is a number between -1 (increase repetition)
|
|
681
|
+
and 1 (maximum penalty, decrease repetition). 0 means no penalty.
|
|
682
|
+
*/
|
|
683
|
+
frequencyPenalty?: number;
|
|
684
|
+
/**
|
|
685
|
+
Stop sequences.
|
|
686
|
+
If set, the model will stop generating text when one of the stop sequences is generated.
|
|
687
|
+
Providers may have limits on the number of stop sequences.
|
|
688
|
+
*/
|
|
689
|
+
stopSequences?: string[];
|
|
690
|
+
/**
|
|
691
|
+
The seed (integer) to use for random sampling. If set and supported
|
|
692
|
+
by the model, calls will generate deterministic results.
|
|
693
|
+
*/
|
|
694
|
+
seed?: number;
|
|
695
|
+
/**
|
|
696
|
+
Maximum number of retries. Set to 0 to disable retries.
|
|
697
|
+
|
|
698
|
+
@default 2
|
|
699
|
+
*/
|
|
700
|
+
maxRetries?: number;
|
|
701
|
+
/**
|
|
702
|
+
Abort signal.
|
|
703
|
+
*/
|
|
704
|
+
abortSignal?: AbortSignal;
|
|
705
|
+
/**
|
|
706
|
+
Additional HTTP headers to be sent with the request.
|
|
707
|
+
Only applicable for HTTP-based providers.
|
|
708
|
+
*/
|
|
709
|
+
headers?: Record<string, string | undefined>;
|
|
710
|
+
};
|
|
711
|
+
|
|
712
|
+
/**
|
|
713
|
+
Prompt part of the AI function options.
|
|
714
|
+
It contains a system message, a simple text prompt, or a list of messages.
|
|
715
|
+
*/
|
|
716
|
+
type Prompt = {
|
|
717
|
+
/**
|
|
718
|
+
System message to include in the prompt. Can be used with `prompt` or `messages`.
|
|
719
|
+
*/
|
|
720
|
+
system?: string;
|
|
721
|
+
} & ({
|
|
722
|
+
/**
|
|
723
|
+
A prompt. It can be either a text prompt or a list of messages.
|
|
724
|
+
|
|
725
|
+
You can either use `prompt` or `messages` but not both.
|
|
726
|
+
*/
|
|
727
|
+
prompt: string | Array<ModelMessage>;
|
|
728
|
+
/**
|
|
729
|
+
A list of messages.
|
|
730
|
+
|
|
731
|
+
You can either use `prompt` or `messages` but not both.
|
|
732
|
+
*/
|
|
733
|
+
messages?: never;
|
|
734
|
+
} | {
|
|
735
|
+
/**
|
|
736
|
+
A list of messages.
|
|
737
|
+
|
|
738
|
+
You can either use `prompt` or `messages` but not both.
|
|
739
|
+
*/
|
|
740
|
+
messages: Array<ModelMessage>;
|
|
741
|
+
/**
|
|
742
|
+
A prompt. It can be either a text prompt or a list of messages.
|
|
743
|
+
|
|
744
|
+
You can either use `prompt` or `messages` but not both.
|
|
745
|
+
*/
|
|
746
|
+
prompt?: never;
|
|
747
|
+
});
|
|
748
|
+
|
|
749
|
+
/**
|
|
750
|
+
* Telemetry configuration.
|
|
751
|
+
*/
|
|
752
|
+
type TelemetrySettings = {
|
|
753
|
+
/**
|
|
754
|
+
* Enable or disable telemetry. Disabled by default while experimental.
|
|
755
|
+
*/
|
|
756
|
+
isEnabled?: boolean;
|
|
757
|
+
/**
|
|
758
|
+
* Enable or disable input recording. Enabled by default.
|
|
759
|
+
*
|
|
760
|
+
* You might want to disable input recording to avoid recording sensitive
|
|
761
|
+
* information, to reduce data transfers, or to increase performance.
|
|
762
|
+
*/
|
|
763
|
+
recordInputs?: boolean;
|
|
764
|
+
/**
|
|
765
|
+
* Enable or disable output recording. Enabled by default.
|
|
766
|
+
*
|
|
767
|
+
* You might want to disable output recording to avoid recording sensitive
|
|
768
|
+
* information, to reduce data transfers, or to increase performance.
|
|
769
|
+
*/
|
|
770
|
+
recordOutputs?: boolean;
|
|
771
|
+
/**
|
|
772
|
+
* Identifier for this function. Used to group telemetry data by function.
|
|
773
|
+
*/
|
|
774
|
+
functionId?: string;
|
|
775
|
+
/**
|
|
776
|
+
* Additional information to include in the telemetry data.
|
|
777
|
+
*/
|
|
778
|
+
metadata?: Record<string, AttributeValue>;
|
|
779
|
+
/**
|
|
780
|
+
* A custom tracer to use for the telemetry data.
|
|
781
|
+
*/
|
|
782
|
+
tracer?: Tracer;
|
|
783
|
+
};
|
|
784
|
+
|
|
785
|
+
/**
|
|
786
|
+
* Experimental. Can change in patch versions without warning.
|
|
787
|
+
*
|
|
788
|
+
* Download function. Called with the array of URLs and a boolean indicating
|
|
789
|
+
* whether the URL is supported by the model.
|
|
790
|
+
*
|
|
791
|
+
* The download function can decide for each URL:
|
|
792
|
+
* - to return null (which means that the URL should be passed to the model)
|
|
793
|
+
* - to download the asset and return the data (incl. retries, authentication, etc.)
|
|
794
|
+
*
|
|
795
|
+
* Should throw DownloadError if the download fails.
|
|
796
|
+
*
|
|
797
|
+
* Should return an array of objects sorted by the order of the requested downloads.
|
|
798
|
+
* For each object, the data should be a Uint8Array if the URL was downloaded.
|
|
799
|
+
* For each object, the mediaType should be the media type of the downloaded asset.
|
|
800
|
+
* For each object, the data should be null if the URL should be passed through as is.
|
|
801
|
+
*/
|
|
802
|
+
type DownloadFunction = (options: Array<{
|
|
803
|
+
url: URL;
|
|
804
|
+
isUrlSupportedByModel: boolean;
|
|
805
|
+
}>) => PromiseLike<Array<{
|
|
806
|
+
data: Uint8Array;
|
|
807
|
+
mediaType: string | undefined;
|
|
808
|
+
} | null>>;
|
|
809
|
+
|
|
810
810
|
/**
|
|
811
811
|
Create a type from an object with all keys and nested keys set to optional.
|
|
812
812
|
The helper supports normal objects and Zod schemas (which are resolved automatically).
|
|
@@ -991,6 +991,21 @@ Callback that is set using the `onStepFinish` option.
|
|
|
991
991
|
*/
|
|
992
992
|
type GenerateTextOnStepFinishCallback<TOOLS extends ToolSet> = (stepResult: StepResult<TOOLS>) => Promise<void> | void;
|
|
993
993
|
/**
|
|
994
|
+
Callback that is set using the `onFinish` option.
|
|
995
|
+
|
|
996
|
+
@param event - The event that is passed to the callback.
|
|
997
|
+
*/
|
|
998
|
+
type GenerateTextOnFinishCallback<TOOLS extends ToolSet> = (event: StepResult<TOOLS> & {
|
|
999
|
+
/**
|
|
1000
|
+
Details for all steps.
|
|
1001
|
+
*/
|
|
1002
|
+
readonly steps: StepResult<TOOLS>[];
|
|
1003
|
+
/**
|
|
1004
|
+
Total usage for all steps. This is the sum of the usage of all steps.
|
|
1005
|
+
*/
|
|
1006
|
+
readonly totalUsage: LanguageModelUsage;
|
|
1007
|
+
}) => PromiseLike<void> | void;
|
|
1008
|
+
/**
|
|
994
1009
|
Generate a text and call tools for a given prompt using a language model.
|
|
995
1010
|
|
|
996
1011
|
This function does not stream the output. If you want to stream the output, use `streamText` instead.
|
|
@@ -1032,11 +1047,12 @@ If set and supported by the model, calls will generate deterministic results.
|
|
|
1032
1047
|
@param experimental_generateMessageId - Generate a unique ID for each message.
|
|
1033
1048
|
|
|
1034
1049
|
@param onStepFinish - Callback that is called when each step (LLM call) is finished, including intermediate steps.
|
|
1050
|
+
@param onFinish - Callback that is called when all steps are finished and the response is complete.
|
|
1035
1051
|
|
|
1036
1052
|
@returns
|
|
1037
1053
|
A result object that contains the generated text, the results of the tool calls, and additional information.
|
|
1038
1054
|
*/
|
|
1039
|
-
declare function generateText<TOOLS extends ToolSet, OUTPUT = never, OUTPUT_PARTIAL = never>({ model: modelArg, tools, toolChoice, system, prompt, messages, maxRetries: maxRetriesArg, abortSignal, headers, stopWhen, experimental_output: output, experimental_telemetry: telemetry, providerOptions, experimental_activeTools, activeTools, experimental_prepareStep, prepareStep, experimental_repairToolCall: repairToolCall, experimental_download: download, experimental_context, _internal: { generateId, currentDate, }, onStepFinish, ...settings }: CallSettings & Prompt & {
|
|
1055
|
+
declare function generateText<TOOLS extends ToolSet, OUTPUT = never, OUTPUT_PARTIAL = never>({ model: modelArg, tools, toolChoice, system, prompt, messages, maxRetries: maxRetriesArg, abortSignal, headers, stopWhen, experimental_output: output, experimental_telemetry: telemetry, providerOptions, experimental_activeTools, activeTools, experimental_prepareStep, prepareStep, experimental_repairToolCall: repairToolCall, experimental_download: download, experimental_context, _internal: { generateId, currentDate, }, onStepFinish, onFinish, ...settings }: CallSettings & Prompt & {
|
|
1040
1056
|
/**
|
|
1041
1057
|
The language model to use.
|
|
1042
1058
|
*/
|
|
@@ -1098,9 +1114,13 @@ A function that attempts to repair a tool call that failed to parse.
|
|
|
1098
1114
|
*/
|
|
1099
1115
|
experimental_repairToolCall?: ToolCallRepairFunction<NoInfer<TOOLS>>;
|
|
1100
1116
|
/**
|
|
1101
|
-
|
|
1102
|
-
|
|
1117
|
+
* Callback that is called when each step (LLM call) is finished, including intermediate steps.
|
|
1118
|
+
*/
|
|
1103
1119
|
onStepFinish?: GenerateTextOnStepFinishCallback<NoInfer<TOOLS>>;
|
|
1120
|
+
/**
|
|
1121
|
+
* Callback that is called when all steps are finished and the response is complete.
|
|
1122
|
+
*/
|
|
1123
|
+
onFinish?: GenerateTextOnFinishCallback<NoInfer<TOOLS>>;
|
|
1104
1124
|
/**
|
|
1105
1125
|
* Context that is passed into tool execution.
|
|
1106
1126
|
*
|
|
@@ -1244,13 +1264,10 @@ If set and supported by the model, calls will generate deterministic results.
|
|
|
1244
1264
|
@param abortSignal - An optional abort signal that can be used to cancel the call.
|
|
1245
1265
|
@param headers - Additional HTTP headers to be sent with the request. Only applicable for HTTP-based providers.
|
|
1246
1266
|
|
|
1247
|
-
@param maxSteps - Maximum number of sequential LLM calls (steps), e.g. when you use tool calls.
|
|
1248
|
-
|
|
1249
1267
|
@param onChunk - Callback that is called for each chunk of the stream. The stream processing will pause until the callback promise is resolved.
|
|
1250
1268
|
@param onError - Callback that is called when an error occurs during streaming. You can use it to log errors.
|
|
1251
1269
|
@param onStepFinish - Callback that is called when each step (LLM call) is finished, including intermediate steps.
|
|
1252
|
-
@param onFinish - Callback that is called when
|
|
1253
|
-
(for tools that have an `execute` function) are finished.
|
|
1270
|
+
@param onFinish - Callback that is called when all steps are finished and the response is complete.
|
|
1254
1271
|
|
|
1255
1272
|
@return
|
|
1256
1273
|
A result object for accessing different stream types and additional information.
|
|
@@ -2279,6 +2296,16 @@ type TextStreamPart<TOOLS extends ToolSet> = {
|
|
|
2279
2296
|
rawValue: unknown;
|
|
2280
2297
|
};
|
|
2281
2298
|
|
|
2299
|
+
/**
|
|
2300
|
+
Callback that is set using the `onStepFinish` option.
|
|
2301
|
+
|
|
2302
|
+
@param stepResult - The result of the step.
|
|
2303
|
+
*/
|
|
2304
|
+
type AgentOnStepFinishCallback<TOOLS extends ToolSet> = (stepResult: StepResult<TOOLS>) => Promise<void> | void;
|
|
2305
|
+
|
|
2306
|
+
/**
|
|
2307
|
+
* Configuration options for an agent.
|
|
2308
|
+
*/
|
|
2282
2309
|
type AgentSettings<TOOLS extends ToolSet, OUTPUT = never, OUTPUT_PARTIAL = never> = CallSettings & {
|
|
2283
2310
|
/**
|
|
2284
2311
|
* The name of the agent.
|
|
@@ -2335,7 +2362,7 @@ type AgentSettings<TOOLS extends ToolSet, OUTPUT = never, OUTPUT_PARTIAL = never
|
|
|
2335
2362
|
/**
|
|
2336
2363
|
Callback that is called when each step (LLM call) is finished, including intermediate steps.
|
|
2337
2364
|
*/
|
|
2338
|
-
onStepFinish?:
|
|
2365
|
+
onStepFinish?: AgentOnStepFinishCallback<NoInfer<TOOLS>>;
|
|
2339
2366
|
/**
|
|
2340
2367
|
Additional provider-specific options. They are passed through
|
|
2341
2368
|
to the provider from the AI SDK and enable provider-specific
|
|
@@ -2350,14 +2377,8 @@ type AgentSettings<TOOLS extends ToolSet, OUTPUT = never, OUTPUT_PARTIAL = never
|
|
|
2350
2377
|
* @default undefined
|
|
2351
2378
|
*/
|
|
2352
2379
|
experimental_context?: unknown;
|
|
2353
|
-
/**
|
|
2354
|
-
* Internal. For test use only. May change without notice.
|
|
2355
|
-
*/
|
|
2356
|
-
_internal?: {
|
|
2357
|
-
generateId?: IdGenerator;
|
|
2358
|
-
currentDate?: () => Date;
|
|
2359
|
-
};
|
|
2360
2380
|
};
|
|
2381
|
+
|
|
2361
2382
|
/**
|
|
2362
2383
|
* The Agent class provides a structured way to encapsulate LLM configuration, tools,
|
|
2363
2384
|
* and behavior into reusable components.
|
|
@@ -2378,7 +2399,13 @@ declare class Agent<TOOLS extends ToolSet, OUTPUT = never, OUTPUT_PARTIAL = neve
|
|
|
2378
2399
|
* The tools that the agent can use.
|
|
2379
2400
|
*/
|
|
2380
2401
|
get tools(): TOOLS;
|
|
2402
|
+
/**
|
|
2403
|
+
* Generates an output from the agent (non-streaming).
|
|
2404
|
+
*/
|
|
2381
2405
|
generate(options: Prompt): Promise<GenerateTextResult<TOOLS, OUTPUT>>;
|
|
2406
|
+
/**
|
|
2407
|
+
* Streams an output from the agent (streaming).
|
|
2408
|
+
*/
|
|
2382
2409
|
stream(options: Prompt): StreamTextResult<TOOLS, OUTPUT_PARTIAL>;
|
|
2383
2410
|
/**
|
|
2384
2411
|
* Creates a response object that streams UI messages to the client.
|
|
@@ -2387,7 +2414,12 @@ declare class Agent<TOOLS extends ToolSet, OUTPUT = never, OUTPUT_PARTIAL = neve
|
|
|
2387
2414
|
messages: UIMessage<never, never, InferUITools<TOOLS>>[];
|
|
2388
2415
|
}): Response;
|
|
2389
2416
|
}
|
|
2417
|
+
|
|
2418
|
+
/**
|
|
2419
|
+
* Infer the type of the tools of an agent.
|
|
2420
|
+
*/
|
|
2390
2421
|
type InferAgentTools<AGENT> = AGENT extends Agent<infer TOOLS, any, any> ? TOOLS : never;
|
|
2422
|
+
|
|
2391
2423
|
/**
|
|
2392
2424
|
* Infer the UI message type of an agent.
|
|
2393
2425
|
*/
|
|
@@ -4740,4 +4772,4 @@ declare global {
|
|
|
4740
4772
|
var AI_SDK_LOG_WARNINGS: LogWarningsFunction | undefined | false;
|
|
4741
4773
|
}
|
|
4742
4774
|
|
|
4743
|
-
export { AbstractChat, Agent, AgentSettings, AsyncIterableStream, CallSettings, CallWarning, ChatAddToolApproveResponseFunction, ChatInit, ChatOnDataCallback, ChatOnErrorCallback, ChatOnFinishCallback, ChatOnToolCallCallback, ChatRequestOptions, ChatState, ChatStatus, ChatTransport, ChunkDetector, CompletionRequestOptions, CoreAssistantMessage, CoreMessage, CoreSystemMessage, CoreToolMessage, CoreUserMessage, CreateUIMessage, DataUIPart, DeepPartial, DefaultChatTransport, DownloadError, DynamicToolCall, DynamicToolError, DynamicToolResult, DynamicToolUIPart, EmbedManyResult, EmbedResult, Embedding, EmbeddingModel, EmbeddingModelUsage, ErrorHandler, Agent as Experimental_Agent, AgentSettings as Experimental_AgentSettings, DownloadFunction as Experimental_DownloadFunction, GenerateImageResult as Experimental_GenerateImageResult, GeneratedFile as Experimental_GeneratedImage, InferAgentUIMessage as Experimental_InferAgentUIMessage, LogWarningsFunction as Experimental_LogWarningsFunction, SpeechResult as Experimental_SpeechResult, TranscriptionResult as Experimental_TranscriptionResult, Warning as Experimental_Warning, FileUIPart, FinishReason, GenerateObjectResult, GenerateTextOnStepFinishCallback, GenerateTextResult, GeneratedAudioFile, GeneratedFile, HttpChatTransport, HttpChatTransportInitOptions, ImageModel, ImageGenerationWarning as ImageModelCallWarning, ImageModelProviderMetadata, ImageModelResponseMetadata, InferAgentUIMessage, InferUIDataParts, InferUIMessageChunk, InferUITool, InferUITools, InvalidArgumentError, InvalidDataContentError, InvalidMessageRoleError, InvalidStreamPartError, InvalidToolInputError, JSONRPCError, JSONRPCMessage, JSONRPCNotification, JSONRPCRequest, JSONRPCResponse, JSONValue, JsonToSseTransformStream, LanguageModel, LanguageModelMiddleware, LanguageModelRequestMetadata, LanguageModelResponseMetadata, LanguageModelUsage, MCPClientError, MCPTransport, MessageConversionError, NoImageGeneratedError, NoObjectGeneratedError, NoOutputGeneratedError, NoOutputSpecifiedError, NoSpeechGeneratedError, NoSuchProviderError, NoSuchToolError, ObjectStreamPart, output as Output, PrepareReconnectToStreamRequest, PrepareSendMessagesRequest, PrepareStepFunction, PrepareStepResult, Prompt, Provider, ProviderMetadata, ProviderRegistryProvider, ReasoningOutput, ReasoningUIPart, RepairTextFunction, RetryError, SafeValidateUIMessagesResult, SerialJobExecutor, SourceDocumentUIPart, SourceUrlUIPart, SpeechModel, SpeechModelResponseMetadata, SpeechWarning, StaticToolCall, StaticToolError, StaticToolOutputDenied, StaticToolResult, StepResult, StepStartUIPart, StopCondition, StreamObjectOnFinishCallback, StreamObjectResult, StreamTextOnChunkCallback, StreamTextOnErrorCallback, StreamTextOnFinishCallback, StreamTextOnStepFinishCallback, StreamTextResult, StreamTextTransform, TelemetrySettings, TextStreamChatTransport, TextStreamPart, TextUIPart, ToolApprovalRequestOutput, ToolCallRepairError, ToolCallRepairFunction, ToolChoice, ToolSet, ToolUIPart, TranscriptionModel, TranscriptionModelResponseMetadata, TranscriptionWarning, TypedToolCall, TypedToolError, TypedToolOutputDenied, TypedToolResult, UIDataPartSchemas, UIDataTypes, UIMessage, UIMessageChunk, UIMessagePart, UIMessageStreamOnFinishCallback, UIMessageStreamOptions, UIMessageStreamWriter, UITool, UIToolInvocation, UITools, UI_MESSAGE_STREAM_HEADERS, UnsupportedModelVersionError, UseCompletionOptions, assistantModelMessageSchema, callCompletionApi, consumeStream, convertFileListToFileUIParts, convertToCoreMessages, convertToModelMessages, coreAssistantMessageSchema, coreMessageSchema, coreSystemMessageSchema, coreToolMessageSchema, coreUserMessageSchema, cosineSimilarity, createProviderRegistry, createTextStreamResponse, createUIMessageStream, createUIMessageStreamResponse, customProvider, defaultSettingsMiddleware, embed, embedMany, MCPClient as experimental_MCPClient, MCPClientConfig as experimental_MCPClientConfig, createMCPClient as experimental_createMCPClient, experimental_createProviderRegistry, experimental_customProvider, generateImage as experimental_generateImage, generateSpeech as experimental_generateSpeech, transcribe as experimental_transcribe, extractReasoningMiddleware, generateObject, generateText, getTextFromDataUrl, getToolName, getToolOrDynamicToolName, hasToolCall, isDeepEqualData, isToolOrDynamicToolUIPart, isToolUIPart, lastAssistantMessageIsCompleteWithApprovalResponses, lastAssistantMessageIsCompleteWithToolCalls, modelMessageSchema, parsePartialJson, pipeTextStreamToResponse, pipeUIMessageStreamToResponse, readUIMessageStream, safeValidateUIMessages, simulateReadableStream, simulateStreamingMiddleware, smoothStream, stepCountIs, streamObject, streamText, systemModelMessageSchema, toolModelMessageSchema, uiMessageChunkSchema, userModelMessageSchema, validateUIMessages, wrapLanguageModel, wrapProvider };
|
|
4775
|
+
export { AbstractChat, Agent, AgentOnStepFinishCallback, AgentSettings, AsyncIterableStream, CallSettings, CallWarning, ChatAddToolApproveResponseFunction, ChatInit, ChatOnDataCallback, ChatOnErrorCallback, ChatOnFinishCallback, ChatOnToolCallCallback, ChatRequestOptions, ChatState, ChatStatus, ChatTransport, ChunkDetector, CompletionRequestOptions, CoreAssistantMessage, CoreMessage, CoreSystemMessage, CoreToolMessage, CoreUserMessage, CreateUIMessage, DataUIPart, DeepPartial, DefaultChatTransport, DownloadError, DynamicToolCall, DynamicToolError, DynamicToolResult, DynamicToolUIPart, EmbedManyResult, EmbedResult, Embedding, EmbeddingModel, EmbeddingModelUsage, ErrorHandler, Agent as Experimental_Agent, AgentSettings as Experimental_AgentSettings, DownloadFunction as Experimental_DownloadFunction, GenerateImageResult as Experimental_GenerateImageResult, GeneratedFile as Experimental_GeneratedImage, InferAgentUIMessage as Experimental_InferAgentUIMessage, LogWarningsFunction as Experimental_LogWarningsFunction, SpeechResult as Experimental_SpeechResult, TranscriptionResult as Experimental_TranscriptionResult, Warning as Experimental_Warning, FileUIPart, FinishReason, GenerateObjectResult, GenerateTextOnFinishCallback, GenerateTextOnStepFinishCallback, GenerateTextResult, GeneratedAudioFile, GeneratedFile, HttpChatTransport, HttpChatTransportInitOptions, ImageModel, ImageGenerationWarning as ImageModelCallWarning, ImageModelProviderMetadata, ImageModelResponseMetadata, InferAgentUIMessage, InferUIDataParts, InferUIMessageChunk, InferUITool, InferUITools, InvalidArgumentError, InvalidDataContentError, InvalidMessageRoleError, InvalidStreamPartError, InvalidToolInputError, JSONRPCError, JSONRPCMessage, JSONRPCNotification, JSONRPCRequest, JSONRPCResponse, JSONValue, JsonToSseTransformStream, LanguageModel, LanguageModelMiddleware, LanguageModelRequestMetadata, LanguageModelResponseMetadata, LanguageModelUsage, MCPClientError, MCPTransport, MessageConversionError, NoImageGeneratedError, NoObjectGeneratedError, NoOutputGeneratedError, NoOutputSpecifiedError, NoSpeechGeneratedError, NoSuchProviderError, NoSuchToolError, ObjectStreamPart, output as Output, PrepareReconnectToStreamRequest, PrepareSendMessagesRequest, PrepareStepFunction, PrepareStepResult, Prompt, Provider, ProviderMetadata, ProviderRegistryProvider, ReasoningOutput, ReasoningUIPart, RepairTextFunction, RetryError, SafeValidateUIMessagesResult, SerialJobExecutor, SourceDocumentUIPart, SourceUrlUIPart, SpeechModel, SpeechModelResponseMetadata, SpeechWarning, StaticToolCall, StaticToolError, StaticToolOutputDenied, StaticToolResult, StepResult, StepStartUIPart, StopCondition, StreamObjectOnFinishCallback, StreamObjectResult, StreamTextOnChunkCallback, StreamTextOnErrorCallback, StreamTextOnFinishCallback, StreamTextOnStepFinishCallback, StreamTextResult, StreamTextTransform, TelemetrySettings, TextStreamChatTransport, TextStreamPart, TextUIPart, ToolApprovalRequestOutput, ToolCallRepairError, ToolCallRepairFunction, ToolChoice, ToolSet, ToolUIPart, TranscriptionModel, TranscriptionModelResponseMetadata, TranscriptionWarning, TypedToolCall, TypedToolError, TypedToolOutputDenied, TypedToolResult, UIDataPartSchemas, UIDataTypes, UIMessage, UIMessageChunk, UIMessagePart, UIMessageStreamOnFinishCallback, UIMessageStreamOptions, UIMessageStreamWriter, UITool, UIToolInvocation, UITools, UI_MESSAGE_STREAM_HEADERS, UnsupportedModelVersionError, UseCompletionOptions, assistantModelMessageSchema, callCompletionApi, consumeStream, convertFileListToFileUIParts, convertToCoreMessages, convertToModelMessages, coreAssistantMessageSchema, coreMessageSchema, coreSystemMessageSchema, coreToolMessageSchema, coreUserMessageSchema, cosineSimilarity, createProviderRegistry, createTextStreamResponse, createUIMessageStream, createUIMessageStreamResponse, customProvider, defaultSettingsMiddleware, embed, embedMany, MCPClient as experimental_MCPClient, MCPClientConfig as experimental_MCPClientConfig, createMCPClient as experimental_createMCPClient, experimental_createProviderRegistry, experimental_customProvider, generateImage as experimental_generateImage, generateSpeech as experimental_generateSpeech, transcribe as experimental_transcribe, extractReasoningMiddleware, generateObject, generateText, getTextFromDataUrl, getToolName, getToolOrDynamicToolName, hasToolCall, isDeepEqualData, isToolOrDynamicToolUIPart, isToolUIPart, lastAssistantMessageIsCompleteWithApprovalResponses, lastAssistantMessageIsCompleteWithToolCalls, modelMessageSchema, parsePartialJson, pipeTextStreamToResponse, pipeUIMessageStreamToResponse, readUIMessageStream, safeValidateUIMessages, simulateReadableStream, simulateStreamingMiddleware, smoothStream, stepCountIs, streamObject, streamText, systemModelMessageSchema, toolModelMessageSchema, uiMessageChunkSchema, userModelMessageSchema, validateUIMessages, wrapLanguageModel, wrapProvider };
|
package/dist/index.js
CHANGED
|
@@ -809,7 +809,7 @@ function detectMediaType({
|
|
|
809
809
|
var import_provider_utils2 = require("@ai-sdk/provider-utils");
|
|
810
810
|
|
|
811
811
|
// src/version.ts
|
|
812
|
-
var VERSION = true ? "5.1.0-beta.
|
|
812
|
+
var VERSION = true ? "5.1.0-beta.25" : "0.0.0-test";
|
|
813
813
|
|
|
814
814
|
// src/util/download/download.ts
|
|
815
815
|
var download = async ({ url }) => {
|
|
@@ -2459,6 +2459,7 @@ async function generateText({
|
|
|
2459
2459
|
currentDate = () => /* @__PURE__ */ new Date()
|
|
2460
2460
|
} = {},
|
|
2461
2461
|
onStepFinish,
|
|
2462
|
+
onFinish,
|
|
2462
2463
|
...settings
|
|
2463
2464
|
}) {
|
|
2464
2465
|
const model = resolveLanguageModel(modelArg);
|
|
@@ -2800,8 +2801,43 @@ async function generateText({
|
|
|
2800
2801
|
})
|
|
2801
2802
|
);
|
|
2802
2803
|
const lastStep = steps[steps.length - 1];
|
|
2804
|
+
const totalUsage = steps.reduce(
|
|
2805
|
+
(totalUsage2, step) => {
|
|
2806
|
+
return addLanguageModelUsage(totalUsage2, step.usage);
|
|
2807
|
+
},
|
|
2808
|
+
{
|
|
2809
|
+
inputTokens: void 0,
|
|
2810
|
+
outputTokens: void 0,
|
|
2811
|
+
totalTokens: void 0,
|
|
2812
|
+
reasoningTokens: void 0,
|
|
2813
|
+
cachedInputTokens: void 0
|
|
2814
|
+
}
|
|
2815
|
+
);
|
|
2816
|
+
await (onFinish == null ? void 0 : onFinish({
|
|
2817
|
+
finishReason: lastStep.finishReason,
|
|
2818
|
+
usage: lastStep.usage,
|
|
2819
|
+
content: lastStep.content,
|
|
2820
|
+
text: lastStep.text,
|
|
2821
|
+
reasoningText: lastStep.reasoningText,
|
|
2822
|
+
reasoning: lastStep.reasoning,
|
|
2823
|
+
files: lastStep.files,
|
|
2824
|
+
sources: lastStep.sources,
|
|
2825
|
+
toolCalls: lastStep.toolCalls,
|
|
2826
|
+
staticToolCalls: lastStep.staticToolCalls,
|
|
2827
|
+
dynamicToolCalls: lastStep.dynamicToolCalls,
|
|
2828
|
+
toolResults: lastStep.toolResults,
|
|
2829
|
+
staticToolResults: lastStep.staticToolResults,
|
|
2830
|
+
dynamicToolResults: lastStep.dynamicToolResults,
|
|
2831
|
+
request: lastStep.request,
|
|
2832
|
+
response: lastStep.response,
|
|
2833
|
+
warnings: lastStep.warnings,
|
|
2834
|
+
providerMetadata: lastStep.providerMetadata,
|
|
2835
|
+
steps,
|
|
2836
|
+
totalUsage
|
|
2837
|
+
}));
|
|
2803
2838
|
return new DefaultGenerateTextResult({
|
|
2804
2839
|
steps,
|
|
2840
|
+
totalUsage,
|
|
2805
2841
|
resolvedOutput: await (output == null ? void 0 : output.parseOutput(
|
|
2806
2842
|
{ text: lastStep.text },
|
|
2807
2843
|
{
|
|
@@ -2847,6 +2883,7 @@ var DefaultGenerateTextResult = class {
|
|
|
2847
2883
|
constructor(options) {
|
|
2848
2884
|
this.steps = options.steps;
|
|
2849
2885
|
this.resolvedOutput = options.resolvedOutput;
|
|
2886
|
+
this.totalUsage = options.totalUsage;
|
|
2850
2887
|
}
|
|
2851
2888
|
get finalStep() {
|
|
2852
2889
|
return this.steps[this.steps.length - 1];
|
|
@@ -2905,20 +2942,6 @@ var DefaultGenerateTextResult = class {
|
|
|
2905
2942
|
get usage() {
|
|
2906
2943
|
return this.finalStep.usage;
|
|
2907
2944
|
}
|
|
2908
|
-
get totalUsage() {
|
|
2909
|
-
return this.steps.reduce(
|
|
2910
|
-
(totalUsage, step) => {
|
|
2911
|
-
return addLanguageModelUsage(totalUsage, step.usage);
|
|
2912
|
-
},
|
|
2913
|
-
{
|
|
2914
|
-
inputTokens: void 0,
|
|
2915
|
-
outputTokens: void 0,
|
|
2916
|
-
totalTokens: void 0,
|
|
2917
|
-
reasoningTokens: void 0,
|
|
2918
|
-
cachedInputTokens: void 0
|
|
2919
|
-
}
|
|
2920
|
-
);
|
|
2921
|
-
}
|
|
2922
2945
|
get experimental_output() {
|
|
2923
2946
|
if (this.resolvedOutput == null) {
|
|
2924
2947
|
throw new NoOutputSpecifiedError();
|
|
@@ -6311,6 +6334,9 @@ var Agent = class {
|
|
|
6311
6334
|
get tools() {
|
|
6312
6335
|
return this.settings.tools;
|
|
6313
6336
|
}
|
|
6337
|
+
/**
|
|
6338
|
+
* Generates an output from the agent (non-streaming).
|
|
6339
|
+
*/
|
|
6314
6340
|
async generate(options) {
|
|
6315
6341
|
var _a17;
|
|
6316
6342
|
return generateText({
|
|
@@ -6319,6 +6345,9 @@ var Agent = class {
|
|
|
6319
6345
|
...options
|
|
6320
6346
|
});
|
|
6321
6347
|
}
|
|
6348
|
+
/**
|
|
6349
|
+
* Streams an output from the agent (streaming).
|
|
6350
|
+
*/
|
|
6322
6351
|
stream(options) {
|
|
6323
6352
|
var _a17;
|
|
6324
6353
|
return streamText({
|