ai 4.0.0-canary.1 → 4.0.0-canary.11
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +129 -0
- package/README.md +1 -1
- package/dist/index.d.mts +96 -675
- package/dist/index.d.ts +96 -675
- package/dist/index.js +1219 -1584
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +1209 -1562
- package/dist/index.mjs.map +1 -1
- package/package.json +10 -24
- package/react/dist/index.d.ts +0 -17
- package/rsc/dist/index.d.ts +19 -19
- package/rsc/dist/rsc-server.d.mts +19 -19
- package/rsc/dist/rsc-server.mjs +9 -132
- package/rsc/dist/rsc-server.mjs.map +1 -1
- package/react/dist/index.server.d.mts +0 -17
- package/react/dist/index.server.d.ts +0 -17
- package/react/dist/index.server.js +0 -50
- package/react/dist/index.server.js.map +0 -1
- package/react/dist/index.server.mjs +0 -23
- package/react/dist/index.server.mjs.map +0 -1
package/dist/index.d.ts
CHANGED
@@ -1,14 +1,12 @@
|
|
1
|
-
import { ToolInvocation, Attachment, Schema, DeepPartial,
|
1
|
+
import { ToolInvocation, Attachment, Schema, DeepPartial, JSONValue as JSONValue$1, AssistantMessage, DataMessage } from '@ai-sdk/ui-utils';
|
2
2
|
export { AssistantMessage, AssistantStatus, Attachment, ChatRequest, ChatRequestOptions, CreateMessage, DataMessage, DeepPartial, IdGenerator, JSONValue, Message, RequestOptions, Schema, StreamPart, ToolInvocation, UseAssistantOptions, formatStreamPart, jsonSchema, parseStreamPart, processDataProtocolResponse, readDataStream } from '@ai-sdk/ui-utils';
|
3
|
+
export { ToolCall as CoreToolCall, ToolResult as CoreToolResult, generateId } from '@ai-sdk/provider-utils';
|
3
4
|
import { AttributeValue, Tracer } from '@opentelemetry/api';
|
4
5
|
import { EmbeddingModelV1, EmbeddingModelV1Embedding, LanguageModelV1, LanguageModelV1FinishReason, LanguageModelV1LogProbs, LanguageModelV1CallWarning, LanguageModelV1ProviderMetadata, JSONValue, LanguageModelV1CallOptions, NoSuchModelError, AISDKError } from '@ai-sdk/provider';
|
5
6
|
export { AISDKError, APICallError, EmptyResponseBodyError, InvalidPromptError, InvalidResponseDataError, JSONParseError, LanguageModelV1, LanguageModelV1CallOptions, LanguageModelV1Prompt, LanguageModelV1StreamPart, LoadAPIKeyError, NoContentGeneratedError, NoSuchModelError, TypeValidationError, UnsupportedFunctionalityError } from '@ai-sdk/provider';
|
6
7
|
import { z } from 'zod';
|
7
8
|
import { ServerResponse } from 'http';
|
8
9
|
import { ServerResponse as ServerResponse$1 } from 'node:http';
|
9
|
-
import { AssistantStream } from 'openai/lib/AssistantStream';
|
10
|
-
import { Run } from 'openai/resources/beta/threads/runs/runs';
|
11
|
-
export { ToolCall as CoreToolCall, ToolResult as CoreToolResult } from '@ai-sdk/provider-utils';
|
12
10
|
|
13
11
|
/**
|
14
12
|
* Telemetry configuration.
|
@@ -46,33 +44,6 @@ type TelemetrySettings = {
|
|
46
44
|
tracer?: Tracer;
|
47
45
|
};
|
48
46
|
|
49
|
-
/**
|
50
|
-
Represents the number of tokens used in a prompt and completion.
|
51
|
-
*/
|
52
|
-
type LanguageModelUsage$1 = {
|
53
|
-
/**
|
54
|
-
The number of tokens used in the prompt.
|
55
|
-
*/
|
56
|
-
promptTokens: number;
|
57
|
-
/**
|
58
|
-
The number of tokens used in the completion.
|
59
|
-
*/
|
60
|
-
completionTokens: number;
|
61
|
-
/**
|
62
|
-
The total number of tokens used (promptTokens + completionTokens).
|
63
|
-
*/
|
64
|
-
totalTokens: number;
|
65
|
-
};
|
66
|
-
/**
|
67
|
-
Represents the number of tokens used in an embedding.
|
68
|
-
*/
|
69
|
-
type EmbeddingModelUsage$1 = {
|
70
|
-
/**
|
71
|
-
The number of tokens used in the embedding.
|
72
|
-
*/
|
73
|
-
tokens: number;
|
74
|
-
};
|
75
|
-
|
76
47
|
/**
|
77
48
|
Embedding model that is used by the AI SDK Core functions.
|
78
49
|
*/
|
@@ -147,10 +118,6 @@ type LanguageModelResponseMetadata = {
|
|
147
118
|
*/
|
148
119
|
headers?: Record<string, string>;
|
149
120
|
};
|
150
|
-
/**
|
151
|
-
@deprecated Use `LanguageModelResponseMetadata` instead.
|
152
|
-
*/
|
153
|
-
type LanguageModelResponseMetadataWithHeaders = LanguageModelResponseMetadata;
|
154
121
|
|
155
122
|
/**
|
156
123
|
* Provider for language and text embedding models.
|
@@ -188,19 +155,31 @@ functionality that can be fully encapsulated in the provider.
|
|
188
155
|
type ProviderMetadata = LanguageModelV1ProviderMetadata;
|
189
156
|
|
190
157
|
/**
|
191
|
-
|
192
|
-
*/
|
193
|
-
type TokenUsage = LanguageModelUsage$1;
|
194
|
-
/**
|
195
|
-
* @deprecated Use LanguageModelUsage instead.
|
158
|
+
Represents the number of tokens used in a prompt and completion.
|
196
159
|
*/
|
197
|
-
type
|
198
|
-
|
160
|
+
type LanguageModelUsage = {
|
161
|
+
/**
|
162
|
+
The number of tokens used in the prompt.
|
163
|
+
*/
|
164
|
+
promptTokens: number;
|
165
|
+
/**
|
166
|
+
The number of tokens used in the completion.
|
167
|
+
*/
|
168
|
+
completionTokens: number;
|
169
|
+
/**
|
170
|
+
The total number of tokens used (promptTokens + completionTokens).
|
171
|
+
*/
|
172
|
+
totalTokens: number;
|
173
|
+
};
|
199
174
|
/**
|
200
|
-
|
175
|
+
Represents the number of tokens used in an embedding.
|
201
176
|
*/
|
202
|
-
type
|
203
|
-
|
177
|
+
type EmbeddingModelUsage = {
|
178
|
+
/**
|
179
|
+
The number of tokens used in the embedding.
|
180
|
+
*/
|
181
|
+
tokens: number;
|
182
|
+
};
|
204
183
|
|
205
184
|
/**
|
206
185
|
The result of an `embed` call.
|
@@ -218,7 +197,7 @@ interface EmbedResult<VALUE> {
|
|
218
197
|
/**
|
219
198
|
The embedding token usage.
|
220
199
|
*/
|
221
|
-
readonly usage: EmbeddingModelUsage
|
200
|
+
readonly usage: EmbeddingModelUsage;
|
222
201
|
/**
|
223
202
|
Optional raw response data.
|
224
203
|
*/
|
@@ -288,7 +267,7 @@ interface EmbedManyResult<VALUE> {
|
|
288
267
|
/**
|
289
268
|
The embedding token usage.
|
290
269
|
*/
|
291
|
-
readonly usage: EmbeddingModelUsage
|
270
|
+
readonly usage: EmbeddingModelUsage;
|
292
271
|
}
|
293
272
|
|
294
273
|
/**
|
@@ -561,10 +540,6 @@ type CoreSystemMessage = {
|
|
561
540
|
*/
|
562
541
|
experimental_providerMetadata?: ProviderMetadata;
|
563
542
|
};
|
564
|
-
/**
|
565
|
-
* @deprecated Use `CoreMessage` instead.
|
566
|
-
*/
|
567
|
-
type ExperimentalMessage = CoreMessage;
|
568
543
|
/**
|
569
544
|
A user message. It can contain text or a combination of text and images.
|
570
545
|
*/
|
@@ -578,10 +553,6 @@ type CoreUserMessage = {
|
|
578
553
|
*/
|
579
554
|
experimental_providerMetadata?: ProviderMetadata;
|
580
555
|
};
|
581
|
-
/**
|
582
|
-
* @deprecated Use `CoreUserMessage` instead.
|
583
|
-
*/
|
584
|
-
type ExperimentalUserMessage = CoreUserMessage;
|
585
556
|
/**
|
586
557
|
Content of a user message. It can be a string or an array of text and image parts.
|
587
558
|
*/
|
@@ -599,10 +570,6 @@ type CoreAssistantMessage = {
|
|
599
570
|
*/
|
600
571
|
experimental_providerMetadata?: ProviderMetadata;
|
601
572
|
};
|
602
|
-
/**
|
603
|
-
* @deprecated Use `CoreAssistantMessage` instead.
|
604
|
-
*/
|
605
|
-
type ExperimentalAssistantMessage = CoreAssistantMessage;
|
606
573
|
/**
|
607
574
|
Content of an assistant message. It can be a string or an array of text and tool call parts.
|
608
575
|
*/
|
@@ -620,10 +587,6 @@ type CoreToolMessage = {
|
|
620
587
|
*/
|
621
588
|
experimental_providerMetadata?: ProviderMetadata;
|
622
589
|
};
|
623
|
-
/**
|
624
|
-
* @deprecated Use `CoreToolMessage` instead.
|
625
|
-
*/
|
626
|
-
type ExperimentalToolMessage = CoreToolMessage;
|
627
590
|
/**
|
628
591
|
Content of a tool message. It is an array of tool result parts.
|
629
592
|
*/
|
@@ -635,7 +598,7 @@ It can be a user message, an assistant message, or a tool message.
|
|
635
598
|
type CoreMessage = CoreSystemMessage | CoreUserMessage | CoreAssistantMessage | CoreToolMessage;
|
636
599
|
|
637
600
|
type UIMessage = {
|
638
|
-
role: 'system' | 'user' | 'assistant' | '
|
601
|
+
role: 'system' | 'user' | 'assistant' | 'data';
|
639
602
|
content: string;
|
640
603
|
toolInvocations?: ToolInvocation[];
|
641
604
|
experimental_attachments?: Attachment[];
|
@@ -675,23 +638,12 @@ interface GenerateObjectResult<T> {
|
|
675
638
|
/**
|
676
639
|
The token usage of the generated text.
|
677
640
|
*/
|
678
|
-
readonly usage: LanguageModelUsage
|
641
|
+
readonly usage: LanguageModelUsage;
|
679
642
|
/**
|
680
643
|
Warnings from the model provider (e.g. unsupported settings).
|
681
644
|
*/
|
682
645
|
readonly warnings: CallWarning[] | undefined;
|
683
646
|
/**
|
684
|
-
Optional raw response data.
|
685
|
-
|
686
|
-
@deprecated Use `response.headers` instead.
|
687
|
-
*/
|
688
|
-
readonly rawResponse?: {
|
689
|
-
/**
|
690
|
-
Response headers.
|
691
|
-
*/
|
692
|
-
headers?: Record<string, string>;
|
693
|
-
};
|
694
|
-
/**
|
695
647
|
Additional request information.
|
696
648
|
*/
|
697
649
|
readonly request: LanguageModelRequestMetadata;
|
@@ -929,10 +881,6 @@ functionality that can be fully encapsulated in the provider.
|
|
929
881
|
currentDate?: () => Date;
|
930
882
|
};
|
931
883
|
}): Promise<GenerateObjectResult<JSONValue>>;
|
932
|
-
/**
|
933
|
-
* @deprecated Use `generateObject` instead.
|
934
|
-
*/
|
935
|
-
declare const experimental_generateObject: typeof generateObject;
|
936
884
|
|
937
885
|
type AsyncIterableStream<T> = AsyncIterable<T> & ReadableStream<T>;
|
938
886
|
|
@@ -943,11 +891,11 @@ interface StreamObjectResult<PARTIAL, RESULT, ELEMENT_STREAM> {
|
|
943
891
|
/**
|
944
892
|
Warnings from the model provider (e.g. unsupported settings)
|
945
893
|
*/
|
946
|
-
readonly warnings: CallWarning[] | undefined
|
894
|
+
readonly warnings: Promise<CallWarning[] | undefined>;
|
947
895
|
/**
|
948
896
|
The token usage of the generated response. Resolved when the response is finished.
|
949
897
|
*/
|
950
|
-
readonly usage: Promise<LanguageModelUsage
|
898
|
+
readonly usage: Promise<LanguageModelUsage>;
|
951
899
|
/**
|
952
900
|
Additional provider-specific metadata. They are passed through
|
953
901
|
from the provider to the AI SDK and enable provider-specific
|
@@ -955,17 +903,6 @@ interface StreamObjectResult<PARTIAL, RESULT, ELEMENT_STREAM> {
|
|
955
903
|
*/
|
956
904
|
readonly experimental_providerMetadata: Promise<ProviderMetadata | undefined>;
|
957
905
|
/**
|
958
|
-
Optional raw response data.
|
959
|
-
|
960
|
-
@deprecated Use `response` instead.
|
961
|
-
*/
|
962
|
-
readonly rawResponse?: {
|
963
|
-
/**
|
964
|
-
Response headers.
|
965
|
-
*/
|
966
|
-
headers?: Record<string, string>;
|
967
|
-
};
|
968
|
-
/**
|
969
906
|
Additional request information from the last step.
|
970
907
|
*/
|
971
908
|
readonly request: Promise<LanguageModelRequestMetadata>;
|
@@ -1030,7 +967,7 @@ type ObjectStreamPart<PARTIAL> = {
|
|
1030
967
|
type: 'finish';
|
1031
968
|
finishReason: FinishReason;
|
1032
969
|
logprobs?: LogProbs;
|
1033
|
-
usage: LanguageModelUsage
|
970
|
+
usage: LanguageModelUsage;
|
1034
971
|
response: LanguageModelResponseMetadata;
|
1035
972
|
providerMetadata?: ProviderMetadata;
|
1036
973
|
};
|
@@ -1039,7 +976,7 @@ type OnFinishCallback<RESULT> = (event: {
|
|
1039
976
|
/**
|
1040
977
|
The token usage of the generated response.
|
1041
978
|
*/
|
1042
|
-
usage: LanguageModelUsage
|
979
|
+
usage: LanguageModelUsage;
|
1043
980
|
/**
|
1044
981
|
The generated object. Can be undefined if the final object does not match the schema.
|
1045
982
|
*/
|
@@ -1049,17 +986,6 @@ type OnFinishCallback<RESULT> = (event: {
|
|
1049
986
|
*/
|
1050
987
|
error: unknown | undefined;
|
1051
988
|
/**
|
1052
|
-
Optional raw response data.
|
1053
|
-
|
1054
|
-
@deprecated Use `response` instead.
|
1055
|
-
*/
|
1056
|
-
rawResponse?: {
|
1057
|
-
/**
|
1058
|
-
Response headers.
|
1059
|
-
*/
|
1060
|
-
headers?: Record<string, string>;
|
1061
|
-
};
|
1062
|
-
/**
|
1063
989
|
Response metadata.
|
1064
990
|
*/
|
1065
991
|
response: LanguageModelResponseMetadata;
|
@@ -1140,7 +1066,7 @@ Callback that is called when the LLM response and the final object validation ar
|
|
1140
1066
|
currentDate?: () => Date;
|
1141
1067
|
now?: () => number;
|
1142
1068
|
};
|
1143
|
-
}):
|
1069
|
+
}): StreamObjectResult<DeepPartial<OBJECT>, OBJECT, never>;
|
1144
1070
|
/**
|
1145
1071
|
Generate an array with structured, typed elements for a given prompt and element schema using a language model.
|
1146
1072
|
|
@@ -1207,7 +1133,7 @@ Callback that is called when the LLM response and the final object validation ar
|
|
1207
1133
|
currentDate?: () => Date;
|
1208
1134
|
now?: () => number;
|
1209
1135
|
};
|
1210
|
-
}):
|
1136
|
+
}): StreamObjectResult<Array<ELEMENT>, Array<ELEMENT>, AsyncIterableStream<ELEMENT>>;
|
1211
1137
|
/**
|
1212
1138
|
Generate JSON with any schema for a given prompt using a language model.
|
1213
1139
|
|
@@ -1248,11 +1174,7 @@ Callback that is called when the LLM response and the final object validation ar
|
|
1248
1174
|
currentDate?: () => Date;
|
1249
1175
|
now?: () => number;
|
1250
1176
|
};
|
1251
|
-
}):
|
1252
|
-
/**
|
1253
|
-
* @deprecated Use `streamObject` instead.
|
1254
|
-
*/
|
1255
|
-
declare const experimental_streamObject: typeof streamObject;
|
1177
|
+
}): StreamObjectResult<JSONValue, JSONValue, never>;
|
1256
1178
|
|
1257
1179
|
type Parameters = z.ZodTypeAny | Schema<any>;
|
1258
1180
|
type inferParameters<PARAMETERS extends Parameters> = PARAMETERS extends Schema<any> ? PARAMETERS['_type'] : PARAMETERS extends z.ZodTypeAny ? z.infer<PARAMETERS> : never;
|
@@ -1323,10 +1245,6 @@ declare function tool<PARAMETERS extends Parameters, RESULT>(tool: CoreTool<PARA
|
|
1323
1245
|
}): CoreTool<PARAMETERS, RESULT> & {
|
1324
1246
|
execute: undefined;
|
1325
1247
|
};
|
1326
|
-
/**
|
1327
|
-
* @deprecated Use `CoreTool` instead.
|
1328
|
-
*/
|
1329
|
-
type ExperimentalTool = CoreTool;
|
1330
1248
|
|
1331
1249
|
/**
|
1332
1250
|
Converts an array of messages from useChat into an array of CoreMessages that can be used
|
@@ -1431,7 +1349,7 @@ type StepResult<TOOLS extends Record<string, CoreTool>> = {
|
|
1431
1349
|
/**
|
1432
1350
|
The token usage of the generated text.
|
1433
1351
|
*/
|
1434
|
-
readonly usage: LanguageModelUsage
|
1352
|
+
readonly usage: LanguageModelUsage;
|
1435
1353
|
/**
|
1436
1354
|
Warnings from the model provider (e.g. unsupported settings).
|
1437
1355
|
*/
|
@@ -1442,17 +1360,6 @@ type StepResult<TOOLS extends Record<string, CoreTool>> = {
|
|
1442
1360
|
*/
|
1443
1361
|
readonly logprobs: LogProbs | undefined;
|
1444
1362
|
/**
|
1445
|
-
Optional raw response data.
|
1446
|
-
|
1447
|
-
@deprecated Use `response.headers` instead.
|
1448
|
-
*/
|
1449
|
-
readonly rawResponse?: {
|
1450
|
-
/**
|
1451
|
-
Response headers.
|
1452
|
-
*/
|
1453
|
-
readonly headers?: Record<string, string>;
|
1454
|
-
};
|
1455
|
-
/**
|
1456
1363
|
Additional request information.
|
1457
1364
|
*/
|
1458
1365
|
readonly request: LanguageModelRequestMetadata;
|
@@ -1508,40 +1415,18 @@ interface GenerateTextResult<TOOLS extends Record<string, CoreTool>> {
|
|
1508
1415
|
/**
|
1509
1416
|
The token usage of the generated text.
|
1510
1417
|
*/
|
1511
|
-
readonly usage: LanguageModelUsage
|
1418
|
+
readonly usage: LanguageModelUsage;
|
1512
1419
|
/**
|
1513
1420
|
Warnings from the model provider (e.g. unsupported settings)
|
1514
1421
|
*/
|
1515
1422
|
readonly warnings: CallWarning[] | undefined;
|
1516
1423
|
/**
|
1517
|
-
@deprecated use `response.messages` instead.
|
1518
|
-
*/
|
1519
|
-
readonly responseMessages: Array<CoreAssistantMessage | CoreToolMessage>;
|
1520
|
-
/**
|
1521
|
-
Response information for every roundtrip.
|
1522
|
-
You can use this to get information about intermediate steps, such as the tool calls or the response headers.
|
1523
|
-
|
1524
|
-
@deprecated use `steps` instead.
|
1525
|
-
*/
|
1526
|
-
readonly roundtrips: Array<StepResult<TOOLS>>;
|
1527
|
-
/**
|
1528
1424
|
Details for all steps.
|
1529
1425
|
You can use this to get information about intermediate steps,
|
1530
1426
|
such as the tool calls or the response headers.
|
1531
1427
|
*/
|
1532
1428
|
readonly steps: Array<StepResult<TOOLS>>;
|
1533
1429
|
/**
|
1534
|
-
Optional raw response data.
|
1535
|
-
|
1536
|
-
@deprecated Use `response.headers` instead.
|
1537
|
-
*/
|
1538
|
-
readonly rawResponse?: {
|
1539
|
-
/**
|
1540
|
-
Response headers.
|
1541
|
-
*/
|
1542
|
-
readonly headers?: Record<string, string>;
|
1543
|
-
};
|
1544
|
-
/**
|
1545
1430
|
Additional request information.
|
1546
1431
|
*/
|
1547
1432
|
readonly request: LanguageModelRequestMetadata;
|
@@ -1620,7 +1505,7 @@ If set and supported by the model, calls will generate deterministic results.
|
|
1620
1505
|
@returns
|
1621
1506
|
A result object that contains the generated text, the results of the tool calls, and additional information.
|
1622
1507
|
*/
|
1623
|
-
declare function generateText<TOOLS extends Record<string, CoreTool>>({ model, tools, toolChoice, system, prompt, messages, maxRetries, abortSignal, headers,
|
1508
|
+
declare function generateText<TOOLS extends Record<string, CoreTool>>({ model, tools, toolChoice, system, prompt, messages, maxRetries, abortSignal, headers, maxSteps, experimental_continueSteps: continueSteps, experimental_telemetry: telemetry, experimental_providerMetadata: providerMetadata, experimental_activeTools: activeTools, _internal: { generateId, currentDate, }, onStepFinish, ...settings }: CallSettings & Prompt & {
|
1624
1509
|
/**
|
1625
1510
|
The language model to use.
|
1626
1511
|
*/
|
@@ -1634,25 +1519,6 @@ The tool choice strategy. Default: 'auto'.
|
|
1634
1519
|
*/
|
1635
1520
|
toolChoice?: CoreToolChoice<TOOLS>;
|
1636
1521
|
/**
|
1637
|
-
@deprecated Use `maxToolRoundtrips` instead.
|
1638
|
-
*/
|
1639
|
-
maxAutomaticRoundtrips?: number;
|
1640
|
-
/**
|
1641
|
-
Maximum number of automatic roundtrips for tool calls.
|
1642
|
-
|
1643
|
-
An automatic tool call roundtrip is another LLM call with the
|
1644
|
-
tool call results when all tool calls of the last assistant
|
1645
|
-
message have results.
|
1646
|
-
|
1647
|
-
A maximum number is required to prevent infinite loops in the
|
1648
|
-
case of misconfigured tools.
|
1649
|
-
|
1650
|
-
By default, it's set to 0, which will disable the feature.
|
1651
|
-
|
1652
|
-
@deprecated Use `maxSteps` instead (which is `maxToolRoundtrips` + 1).
|
1653
|
-
*/
|
1654
|
-
maxToolRoundtrips?: number;
|
1655
|
-
/**
|
1656
1522
|
Maximum number of sequential LLM calls (steps), e.g. when you use tool calls. Must be at least 1.
|
1657
1523
|
|
1658
1524
|
A maximum number is required to prevent infinite loops in the case of misconfigured tools.
|
@@ -1661,10 +1527,6 @@ By default, it's set to 1, which means that only a single LLM call is made.
|
|
1661
1527
|
*/
|
1662
1528
|
maxSteps?: number;
|
1663
1529
|
/**
|
1664
|
-
@deprecated Use `experimental_continueSteps` instead.
|
1665
|
-
*/
|
1666
|
-
experimental_continuationSteps?: boolean;
|
1667
|
-
/**
|
1668
1530
|
When enabled, the model will perform additional steps if the finish reason is "length" (experimental).
|
1669
1531
|
|
1670
1532
|
By default, it's set to false.
|
@@ -1697,10 +1559,26 @@ changing the tool call and result types in the result.
|
|
1697
1559
|
currentDate?: () => Date;
|
1698
1560
|
};
|
1699
1561
|
}): Promise<GenerateTextResult<TOOLS>>;
|
1562
|
+
|
1700
1563
|
/**
|
1701
|
-
*
|
1564
|
+
* A stream wrapper to send custom JSON-encoded data back to the client.
|
1565
|
+
*/
|
1566
|
+
declare class StreamData {
|
1567
|
+
private encoder;
|
1568
|
+
private controller;
|
1569
|
+
stream: ReadableStream<Uint8Array>;
|
1570
|
+
private isClosed;
|
1571
|
+
private warningTimeout;
|
1572
|
+
constructor();
|
1573
|
+
close(): Promise<void>;
|
1574
|
+
append(value: JSONValue$1): void;
|
1575
|
+
appendMessageAnnotation(value: JSONValue$1): void;
|
1576
|
+
}
|
1577
|
+
/**
|
1578
|
+
* A TransformStream for LLMs that do not have their own transform stream handlers managing encoding (e.g. OpenAIStream has one for function call handling).
|
1579
|
+
* This assumes every chunk is a 'text' chunk.
|
1702
1580
|
*/
|
1703
|
-
declare
|
1581
|
+
declare function createStreamDataTransformer(): TransformStream<any, any>;
|
1704
1582
|
|
1705
1583
|
/**
|
1706
1584
|
A result object for accessing different stream types and additional information.
|
@@ -1709,14 +1587,14 @@ interface StreamTextResult<TOOLS extends Record<string, CoreTool>> {
|
|
1709
1587
|
/**
|
1710
1588
|
Warnings from the model provider (e.g. unsupported settings) for the first step.
|
1711
1589
|
*/
|
1712
|
-
readonly warnings: CallWarning[] | undefined
|
1590
|
+
readonly warnings: Promise<CallWarning[] | undefined>;
|
1713
1591
|
/**
|
1714
1592
|
The total token usage of the generated response.
|
1715
1593
|
When there are multiple steps, the usage is the sum of all step usages.
|
1716
1594
|
|
1717
1595
|
Resolved when the response is finished.
|
1718
1596
|
*/
|
1719
|
-
readonly usage: Promise<LanguageModelUsage
|
1597
|
+
readonly usage: Promise<LanguageModelUsage>;
|
1720
1598
|
/**
|
1721
1599
|
The reason why the generation finished. Taken from the last step.
|
1722
1600
|
|
@@ -1748,21 +1626,6 @@ interface StreamTextResult<TOOLS extends Record<string, CoreTool>> {
|
|
1748
1626
|
*/
|
1749
1627
|
readonly toolResults: Promise<ToolResultUnion<TOOLS>[]>;
|
1750
1628
|
/**
|
1751
|
-
Optional raw response data.
|
1752
|
-
|
1753
|
-
@deprecated Use `response` instead.
|
1754
|
-
*/
|
1755
|
-
readonly rawResponse?: {
|
1756
|
-
/**
|
1757
|
-
Response headers.
|
1758
|
-
*/
|
1759
|
-
headers?: Record<string, string>;
|
1760
|
-
};
|
1761
|
-
/**
|
1762
|
-
@deprecated use `response.messages` instead.
|
1763
|
-
*/
|
1764
|
-
readonly responseMessages: Promise<Array<CoreAssistantMessage | CoreToolMessage>>;
|
1765
|
-
/**
|
1766
1629
|
Details for all steps.
|
1767
1630
|
You can use this to get information about intermediate steps,
|
1768
1631
|
such as the tool calls or the response headers.
|
@@ -1800,18 +1663,6 @@ interface StreamTextResult<TOOLS extends Record<string, CoreTool>> {
|
|
1800
1663
|
*/
|
1801
1664
|
readonly fullStream: AsyncIterableStream<TextStreamPart<TOOLS>>;
|
1802
1665
|
/**
|
1803
|
-
Converts the result to an `AIStream` object that is compatible with `StreamingTextResponse`.
|
1804
|
-
It can be used with the `useChat` and `useCompletion` hooks.
|
1805
|
-
|
1806
|
-
@param callbacks
|
1807
|
-
Stream callbacks that will be called when the stream emits events.
|
1808
|
-
|
1809
|
-
@returns A data stream.
|
1810
|
-
|
1811
|
-
@deprecated Use `toDataStream` instead.
|
1812
|
-
*/
|
1813
|
-
toAIStream(callbacks?: AIStreamCallbacksAndOptions): ReadableStream<Uint8Array>;
|
1814
|
-
/**
|
1815
1666
|
Converts the result to a data stream.
|
1816
1667
|
|
1817
1668
|
@param data an optional StreamData object that will be merged into the stream.
|
@@ -1826,28 +1677,17 @@ interface StreamTextResult<TOOLS extends Record<string, CoreTool>> {
|
|
1826
1677
|
sendUsage?: boolean;
|
1827
1678
|
}): ReadableStream<Uint8Array>;
|
1828
1679
|
/**
|
1829
|
-
Writes stream data output to a Node.js response-like object.
|
1830
|
-
It sets a `Content-Type` header to `text/plain; charset=utf-8` and
|
1831
|
-
writes each stream data part as a separate chunk.
|
1832
|
-
|
1833
|
-
@param response A Node.js response-like object (ServerResponse).
|
1834
|
-
@param init Optional headers and status code.
|
1835
|
-
|
1836
|
-
@deprecated Use `pipeDataStreamToResponse` instead.
|
1837
|
-
*/
|
1838
|
-
pipeAIStreamToResponse(response: ServerResponse$1, init?: {
|
1839
|
-
headers?: Record<string, string>;
|
1840
|
-
status?: number;
|
1841
|
-
}): void;
|
1842
|
-
/**
|
1843
1680
|
Writes data stream output to a Node.js response-like object.
|
1844
1681
|
|
1845
1682
|
@param response A Node.js response-like object (ServerResponse).
|
1846
|
-
@param options
|
1847
|
-
|
1683
|
+
@param options.status The status code.
|
1684
|
+
@param options.statusText The status text.
|
1685
|
+
@param options.headers The headers.
|
1686
|
+
@param options.data The stream data.
|
1687
|
+
@param options.getErrorMessage An optional function that converts an error to an error message.
|
1688
|
+
@param options.sendUsage Whether to send the usage information to the client. Defaults to true.
|
1848
1689
|
*/
|
1849
|
-
pipeDataStreamToResponse(response: ServerResponse$1, options?: ResponseInit
|
1850
|
-
init?: ResponseInit;
|
1690
|
+
pipeDataStreamToResponse(response: ServerResponse$1, options?: ResponseInit & {
|
1851
1691
|
data?: StreamData;
|
1852
1692
|
getErrorMessage?: (error: unknown) => string;
|
1853
1693
|
sendUsage?: boolean;
|
@@ -1865,28 +1705,16 @@ interface StreamTextResult<TOOLS extends Record<string, CoreTool>> {
|
|
1865
1705
|
Converts the result to a streamed response object with a stream data part stream.
|
1866
1706
|
It can be used with the `useChat` and `useCompletion` hooks.
|
1867
1707
|
|
1868
|
-
@param options
|
1869
|
-
|
1870
|
-
|
1871
|
-
@
|
1872
|
-
|
1873
|
-
@
|
1874
|
-
*/
|
1875
|
-
toAIStreamResponse(options?: ResponseInit | {
|
1876
|
-
init?: ResponseInit;
|
1877
|
-
data?: StreamData;
|
1878
|
-
}): Response;
|
1879
|
-
/**
|
1880
|
-
Converts the result to a streamed response object with a stream data part stream.
|
1881
|
-
It can be used with the `useChat` and `useCompletion` hooks.
|
1882
|
-
|
1883
|
-
@param options An object with an init property (ResponseInit) and a data property.
|
1884
|
-
You can also pass in a ResponseInit directly (deprecated).
|
1708
|
+
@param options.status The status code.
|
1709
|
+
@param options.statusText The status text.
|
1710
|
+
@param options.headers The headers.
|
1711
|
+
@param options.data The stream data.
|
1712
|
+
@param options.getErrorMessage An optional function that converts an error to an error message.
|
1713
|
+
@param options.sendUsage Whether to send the usage information to the client. Defaults to true.
|
1885
1714
|
|
1886
1715
|
@return A response object.
|
1887
1716
|
*/
|
1888
|
-
toDataStreamResponse(options?: ResponseInit
|
1889
|
-
init?: ResponseInit;
|
1717
|
+
toDataStreamResponse(options?: ResponseInit & {
|
1890
1718
|
data?: StreamData;
|
1891
1719
|
getErrorMessage?: (error: unknown) => string;
|
1892
1720
|
sendUsage?: boolean;
|
@@ -1920,7 +1748,7 @@ type TextStreamPart<TOOLS extends Record<string, CoreTool>> = {
|
|
1920
1748
|
type: 'step-finish';
|
1921
1749
|
finishReason: FinishReason;
|
1922
1750
|
logprobs?: LogProbs;
|
1923
|
-
usage: LanguageModelUsage
|
1751
|
+
usage: LanguageModelUsage;
|
1924
1752
|
response: LanguageModelResponseMetadata;
|
1925
1753
|
experimental_providerMetadata?: ProviderMetadata;
|
1926
1754
|
isContinued: boolean;
|
@@ -1928,7 +1756,7 @@ type TextStreamPart<TOOLS extends Record<string, CoreTool>> = {
|
|
1928
1756
|
type: 'finish';
|
1929
1757
|
finishReason: FinishReason;
|
1930
1758
|
logprobs?: LogProbs;
|
1931
|
-
usage: LanguageModelUsage
|
1759
|
+
usage: LanguageModelUsage;
|
1932
1760
|
response: LanguageModelResponseMetadata;
|
1933
1761
|
experimental_providerMetadata?: ProviderMetadata;
|
1934
1762
|
} | {
|
@@ -1983,7 +1811,7 @@ If set and supported by the model, calls will generate deterministic results.
|
|
1983
1811
|
@return
|
1984
1812
|
A result object for accessing different stream types and additional information.
|
1985
1813
|
*/
|
1986
|
-
declare function streamText<TOOLS extends Record<string, CoreTool>>({ model, tools, toolChoice, system, prompt, messages, maxRetries, abortSignal, headers,
|
1814
|
+
declare function streamText<TOOLS extends Record<string, CoreTool>>({ model, tools, toolChoice, system, prompt, messages, maxRetries, abortSignal, headers, maxSteps, experimental_continueSteps: continueSteps, experimental_telemetry: telemetry, experimental_providerMetadata: providerMetadata, experimental_toolCallStreaming: toolCallStreaming, experimental_activeTools: activeTools, onChunk, onFinish, onStepFinish, _internal: { now, generateId, currentDate, }, ...settings }: CallSettings & Prompt & {
|
1987
1815
|
/**
|
1988
1816
|
The language model to use.
|
1989
1817
|
*/
|
@@ -1997,21 +1825,6 @@ The tool choice strategy. Default: 'auto'.
|
|
1997
1825
|
*/
|
1998
1826
|
toolChoice?: CoreToolChoice<TOOLS>;
|
1999
1827
|
/**
|
2000
|
-
Maximum number of automatic roundtrips for tool calls.
|
2001
|
-
|
2002
|
-
An automatic tool call roundtrip is another LLM call with the
|
2003
|
-
tool call results when all tool calls of the last assistant
|
2004
|
-
message have results.
|
2005
|
-
|
2006
|
-
A maximum number is required to prevent infinite loops in the
|
2007
|
-
case of misconfigured tools.
|
2008
|
-
|
2009
|
-
By default, it's set to 0, which will disable the feature.
|
2010
|
-
|
2011
|
-
@deprecated Use `maxSteps` instead (which is `maxToolRoundtrips` + 1).
|
2012
|
-
*/
|
2013
|
-
maxToolRoundtrips?: number;
|
2014
|
-
/**
|
2015
1828
|
Maximum number of sequential LLM calls (steps), e.g. when you use tool calls. Must be at least 1.
|
2016
1829
|
|
2017
1830
|
A maximum number is required to prevent infinite loops in the case of misconfigured tools.
|
@@ -2063,15 +1876,6 @@ The usage is the combined usage of all steps.
|
|
2063
1876
|
Details for all steps.
|
2064
1877
|
*/
|
2065
1878
|
readonly steps: StepResult<TOOLS>[];
|
2066
|
-
/**
|
2067
|
-
The response messages that were generated during the call. It consists of an assistant message,
|
2068
|
-
potentially containing tool calls.
|
2069
|
-
|
2070
|
-
When there are tool results, there is an additional tool message with the tool results that are available.
|
2071
|
-
If there are tools that do not have execute functions, they are not included in the tool results and
|
2072
|
-
need to be added separately.
|
2073
|
-
*/
|
2074
|
-
readonly responseMessages: Array<CoreAssistantMessage | CoreToolMessage>;
|
2075
1879
|
}) => Promise<void> | void;
|
2076
1880
|
/**
|
2077
1881
|
Callback that is called when each step (LLM call) is finished, including intermediate steps.
|
@@ -2085,11 +1889,7 @@ need to be added separately.
|
|
2085
1889
|
generateId?: () => string;
|
2086
1890
|
currentDate?: () => Date;
|
2087
1891
|
};
|
2088
|
-
}):
|
2089
|
-
/**
|
2090
|
-
* @deprecated Use `streamText` instead.
|
2091
|
-
*/
|
2092
|
-
declare const experimental_streamText: typeof streamText;
|
1892
|
+
}): StreamTextResult<TOOLS>;
|
2093
1893
|
|
2094
1894
|
/**
|
2095
1895
|
* Experimental middleware for LanguageModelV1.
|
@@ -2187,88 +1987,12 @@ declare class NoSuchProviderError extends NoSuchModelError {
|
|
2187
1987
|
message?: string;
|
2188
1988
|
});
|
2189
1989
|
static isInstance(error: unknown): error is NoSuchProviderError;
|
2190
|
-
/**
|
2191
|
-
* @deprecated use `isInstance` instead
|
2192
|
-
*/
|
2193
|
-
static isNoSuchProviderError(error: unknown): error is NoSuchProviderError;
|
2194
|
-
/**
|
2195
|
-
* @deprecated Do not use this method. It will be removed in the next major version.
|
2196
|
-
*/
|
2197
|
-
toJSON(): {
|
2198
|
-
name: string;
|
2199
|
-
message: string;
|
2200
|
-
stack: string | undefined;
|
2201
|
-
modelId: string;
|
2202
|
-
modelType: "languageModel" | "textEmbeddingModel";
|
2203
|
-
providerId: string;
|
2204
|
-
availableProviders: string[];
|
2205
|
-
};
|
2206
|
-
}
|
2207
|
-
|
2208
|
-
/**
|
2209
|
-
* Provides for language and text embedding models.
|
2210
|
-
*
|
2211
|
-
* @deprecated Use `ProviderV1` instead.
|
2212
|
-
*/
|
2213
|
-
interface experimental_Provider {
|
2214
|
-
/**
|
2215
|
-
Returns the language model with the given id in the format `providerId:modelId`.
|
2216
|
-
The model id is then passed to the provider function to get the model.
|
2217
|
-
|
2218
|
-
@param {string} id - The id of the model to return.
|
2219
|
-
|
2220
|
-
@throws {NoSuchModelError} If no model with the given id exists.
|
2221
|
-
@throws {NoSuchProviderError} If no provider with the given id exists.
|
2222
|
-
|
2223
|
-
@returns {LanguageModel} The language model associated with the id.
|
2224
|
-
*/
|
2225
|
-
languageModel?: (modelId: string) => LanguageModel;
|
2226
|
-
/**
|
2227
|
-
Returns the text embedding model with the given id in the format `providerId:modelId`.
|
2228
|
-
The model id is then passed to the provider function to get the model.
|
2229
|
-
|
2230
|
-
@param {string} id - The id of the model to return.
|
2231
|
-
|
2232
|
-
@throws {NoSuchModelError} If no model with the given id exists.
|
2233
|
-
@throws {NoSuchProviderError} If no provider with the given id exists.
|
2234
|
-
|
2235
|
-
@returns {LanguageModel} The language model associated with the id.
|
2236
|
-
*/
|
2237
|
-
textEmbeddingModel?: (modelId: string) => EmbeddingModel<string>;
|
2238
|
-
/**
|
2239
|
-
Returns the text embedding model with the given id in the format `providerId:modelId`.
|
2240
|
-
The model id is then passed to the provider function to get the model.
|
2241
|
-
|
2242
|
-
@param {string} id - The id of the model to return.
|
2243
|
-
|
2244
|
-
@throws {NoSuchModelError} If no model with the given id exists.
|
2245
|
-
@throws {NoSuchProviderError} If no provider with the given id exists.
|
2246
|
-
|
2247
|
-
@returns {LanguageModel} The language model associated with the id.
|
2248
|
-
|
2249
|
-
@deprecated use `textEmbeddingModel` instead.
|
2250
|
-
*/
|
2251
|
-
textEmbedding?: (modelId: string) => EmbeddingModel<string>;
|
2252
1990
|
}
|
2253
1991
|
|
2254
|
-
/**
|
2255
|
-
Registry for managing models. It enables getting a model with a string id.
|
2256
|
-
|
2257
|
-
@deprecated Use `experimental_Provider` instead.
|
2258
|
-
*/
|
2259
|
-
type experimental_ProviderRegistry = Provider;
|
2260
|
-
/**
|
2261
|
-
* @deprecated Use `experimental_ProviderRegistry` instead.
|
2262
|
-
*/
|
2263
|
-
type experimental_ModelRegistry = experimental_ProviderRegistry;
|
2264
1992
|
/**
|
2265
1993
|
* Creates a registry for the given providers.
|
2266
1994
|
*/
|
2267
|
-
declare function experimental_createProviderRegistry(providers: Record<string,
|
2268
|
-
/**
|
2269
|
-
* @deprecated Use `experimental_createProviderRegistry` instead.
|
2270
|
-
*/
|
2271
|
-
declare const experimental_createModelRegistry: typeof experimental_createProviderRegistry;
|
1995
|
+
declare function experimental_createProviderRegistry(providers: Record<string, Provider>): Provider;
|
2272
1996
|
|
2273
1997
|
/**
|
2274
1998
|
* Calculates the cosine similarity between two vectors. This is a useful metric for
|
@@ -2293,17 +2017,6 @@ declare class InvalidArgumentError extends AISDKError {
|
|
2293
2017
|
message: string;
|
2294
2018
|
});
|
2295
2019
|
static isInstance(error: unknown): error is InvalidArgumentError;
|
2296
|
-
/**
|
2297
|
-
* @deprecated use `isInstance` instead
|
2298
|
-
*/
|
2299
|
-
static isInvalidArgumentError(error: unknown): error is InvalidArgumentError;
|
2300
|
-
toJSON(): {
|
2301
|
-
name: string;
|
2302
|
-
message: string;
|
2303
|
-
stack: string | undefined;
|
2304
|
-
parameter: string;
|
2305
|
-
value: unknown;
|
2306
|
-
};
|
2307
2020
|
}
|
2308
2021
|
|
2309
2022
|
declare const symbol$7: unique symbol;
|
@@ -2318,21 +2031,6 @@ declare class InvalidToolArgumentsError extends AISDKError {
|
|
2318
2031
|
cause: unknown;
|
2319
2032
|
});
|
2320
2033
|
static isInstance(error: unknown): error is InvalidToolArgumentsError;
|
2321
|
-
/**
|
2322
|
-
* @deprecated use `isInstance` instead
|
2323
|
-
*/
|
2324
|
-
static isInvalidToolArgumentsError(error: unknown): error is InvalidToolArgumentsError;
|
2325
|
-
/**
|
2326
|
-
* @deprecated Do not use this method. It will be removed in the next major version.
|
2327
|
-
*/
|
2328
|
-
toJSON(): {
|
2329
|
-
name: string;
|
2330
|
-
message: string;
|
2331
|
-
cause: unknown;
|
2332
|
-
stack: string | undefined;
|
2333
|
-
toolName: string;
|
2334
|
-
toolArgs: string;
|
2335
|
-
};
|
2336
2034
|
}
|
2337
2035
|
|
2338
2036
|
declare const symbol$6: unique symbol;
|
@@ -2346,20 +2044,6 @@ declare class NoSuchToolError extends AISDKError {
|
|
2346
2044
|
message?: string;
|
2347
2045
|
});
|
2348
2046
|
static isInstance(error: unknown): error is NoSuchToolError;
|
2349
|
-
/**
|
2350
|
-
* @deprecated use `isInstance` instead
|
2351
|
-
*/
|
2352
|
-
static isNoSuchToolError(error: unknown): error is NoSuchToolError;
|
2353
|
-
/**
|
2354
|
-
* @deprecated Do not use this method. It will be removed in the next major version.
|
2355
|
-
*/
|
2356
|
-
toJSON(): {
|
2357
|
-
name: string;
|
2358
|
-
message: string;
|
2359
|
-
stack: string | undefined;
|
2360
|
-
toolName: string;
|
2361
|
-
availableTools: string[] | undefined;
|
2362
|
-
};
|
2363
2047
|
}
|
2364
2048
|
|
2365
2049
|
declare const symbol$5: unique symbol;
|
@@ -2372,19 +2056,6 @@ declare class NoObjectGeneratedError extends AISDKError {
|
|
2372
2056
|
message?: string;
|
2373
2057
|
});
|
2374
2058
|
static isInstance(error: unknown): error is NoObjectGeneratedError;
|
2375
|
-
/**
|
2376
|
-
* @deprecated Use isInstance instead.
|
2377
|
-
*/
|
2378
|
-
static isNoObjectGeneratedError(error: unknown): error is NoObjectGeneratedError;
|
2379
|
-
/**
|
2380
|
-
* @deprecated Do not use this method. It will be removed in the next major version.
|
2381
|
-
*/
|
2382
|
-
toJSON(): {
|
2383
|
-
name: string;
|
2384
|
-
cause: unknown;
|
2385
|
-
message: string;
|
2386
|
-
stack: string | undefined;
|
2387
|
-
};
|
2388
2059
|
}
|
2389
2060
|
|
2390
2061
|
declare const symbol$4: unique symbol;
|
@@ -2397,20 +2068,6 @@ declare class InvalidDataContentError extends AISDKError {
|
|
2397
2068
|
message?: string;
|
2398
2069
|
});
|
2399
2070
|
static isInstance(error: unknown): error is InvalidDataContentError;
|
2400
|
-
/**
|
2401
|
-
* @deprecated use `isInstance` instead
|
2402
|
-
*/
|
2403
|
-
static isInvalidDataContentError(error: unknown): error is InvalidDataContentError;
|
2404
|
-
/**
|
2405
|
-
* @deprecated Do not use this method. It will be removed in the next major version.
|
2406
|
-
*/
|
2407
|
-
toJSON(): {
|
2408
|
-
name: string;
|
2409
|
-
message: string;
|
2410
|
-
stack: string | undefined;
|
2411
|
-
cause: unknown;
|
2412
|
-
content: unknown;
|
2413
|
-
};
|
2414
2071
|
}
|
2415
2072
|
|
2416
2073
|
declare const symbol$3: unique symbol;
|
@@ -2422,19 +2079,6 @@ declare class InvalidMessageRoleError extends AISDKError {
|
|
2422
2079
|
message?: string;
|
2423
2080
|
});
|
2424
2081
|
static isInstance(error: unknown): error is InvalidMessageRoleError;
|
2425
|
-
/**
|
2426
|
-
* @deprecated use `isInstance` instead
|
2427
|
-
*/
|
2428
|
-
static isInvalidMessageRoleError(error: unknown): error is InvalidMessageRoleError;
|
2429
|
-
/**
|
2430
|
-
* @deprecated Do not use this method. It will be removed in the next major version.
|
2431
|
-
*/
|
2432
|
-
toJSON(): {
|
2433
|
-
name: string;
|
2434
|
-
message: string;
|
2435
|
-
stack: string | undefined;
|
2436
|
-
role: string;
|
2437
|
-
};
|
2438
2082
|
}
|
2439
2083
|
|
2440
2084
|
declare const symbol$2: unique symbol;
|
@@ -2462,21 +2106,6 @@ declare class DownloadError extends AISDKError {
|
|
2462
2106
|
cause?: unknown;
|
2463
2107
|
});
|
2464
2108
|
static isInstance(error: unknown): error is DownloadError;
|
2465
|
-
/**
|
2466
|
-
* @deprecated use `isInstance` instead
|
2467
|
-
*/
|
2468
|
-
static isDownloadError(error: unknown): error is DownloadError;
|
2469
|
-
/**
|
2470
|
-
* @deprecated Do not use this method. It will be removed in the next major version.
|
2471
|
-
*/
|
2472
|
-
toJSON(): {
|
2473
|
-
name: string;
|
2474
|
-
message: string;
|
2475
|
-
url: string;
|
2476
|
-
statusCode: number | undefined;
|
2477
|
-
statusText: string | undefined;
|
2478
|
-
cause: unknown;
|
2479
|
-
};
|
2480
2109
|
}
|
2481
2110
|
|
2482
2111
|
declare const symbol: unique symbol;
|
@@ -2492,154 +2121,8 @@ declare class RetryError extends AISDKError {
|
|
2492
2121
|
errors: Array<unknown>;
|
2493
2122
|
});
|
2494
2123
|
static isInstance(error: unknown): error is RetryError;
|
2495
|
-
/**
|
2496
|
-
* @deprecated use `isInstance` instead
|
2497
|
-
*/
|
2498
|
-
static isRetryError(error: unknown): error is RetryError;
|
2499
|
-
/**
|
2500
|
-
* @deprecated Do not use this method. It will be removed in the next major version.
|
2501
|
-
*/
|
2502
|
-
toJSON(): {
|
2503
|
-
name: string;
|
2504
|
-
message: string;
|
2505
|
-
reason: RetryErrorReason;
|
2506
|
-
lastError: unknown;
|
2507
|
-
errors: unknown[];
|
2508
|
-
};
|
2509
2124
|
}
|
2510
2125
|
|
2511
|
-
interface FunctionCallPayload {
|
2512
|
-
name: string;
|
2513
|
-
arguments: Record<string, unknown>;
|
2514
|
-
}
|
2515
|
-
interface ToolCallPayload {
|
2516
|
-
tools: {
|
2517
|
-
id: string;
|
2518
|
-
type: 'function';
|
2519
|
-
func: {
|
2520
|
-
name: string;
|
2521
|
-
arguments: Record<string, unknown>;
|
2522
|
-
};
|
2523
|
-
}[];
|
2524
|
-
}
|
2525
|
-
/**
|
2526
|
-
* Configuration options and helper callback methods for AIStream stream lifecycle events.
|
2527
|
-
* @interface
|
2528
|
-
*/
|
2529
|
-
interface AIStreamCallbacksAndOptions {
|
2530
|
-
/** `onStart`: Called once when the stream is initialized. */
|
2531
|
-
onStart?: () => Promise<void> | void;
|
2532
|
-
/** `onCompletion`: Called for each tokenized message. */
|
2533
|
-
onCompletion?: (completion: string) => Promise<void> | void;
|
2534
|
-
/** `onFinal`: Called once when the stream is closed with the final completion message. */
|
2535
|
-
onFinal?: (completion: string) => Promise<void> | void;
|
2536
|
-
/** `onToken`: Called for each tokenized message. */
|
2537
|
-
onToken?: (token: string) => Promise<void> | void;
|
2538
|
-
/** `onText`: Called for each text chunk. */
|
2539
|
-
onText?: (text: string) => Promise<void> | void;
|
2540
|
-
/**
|
2541
|
-
* @deprecated This flag is no longer used and only retained for backwards compatibility.
|
2542
|
-
* You can remove it from your code.
|
2543
|
-
*/
|
2544
|
-
experimental_streamData?: boolean;
|
2545
|
-
}
|
2546
|
-
/**
|
2547
|
-
* Options for the AIStreamParser.
|
2548
|
-
* @interface
|
2549
|
-
* @property {string} event - The event (type) from the server side event stream.
|
2550
|
-
*/
|
2551
|
-
interface AIStreamParserOptions {
|
2552
|
-
event?: string;
|
2553
|
-
}
|
2554
|
-
/**
|
2555
|
-
* Custom parser for AIStream data.
|
2556
|
-
* @interface
|
2557
|
-
* @param {string} data - The data to be parsed.
|
2558
|
-
* @param {AIStreamParserOptions} options - The options for the parser.
|
2559
|
-
* @returns {string | void} The parsed data or void.
|
2560
|
-
*/
|
2561
|
-
interface AIStreamParser {
|
2562
|
-
(data: string, options: AIStreamParserOptions): string | void | {
|
2563
|
-
isText: false;
|
2564
|
-
content: string;
|
2565
|
-
};
|
2566
|
-
}
|
2567
|
-
/**
|
2568
|
-
* Creates a TransformStream that parses events from an EventSource stream using a custom parser.
|
2569
|
-
* @param {AIStreamParser} customParser - Function to handle event data.
|
2570
|
-
* @returns {TransformStream<Uint8Array, string>} TransformStream parsing events.
|
2571
|
-
*/
|
2572
|
-
declare function createEventStreamTransformer(customParser?: AIStreamParser): TransformStream<Uint8Array, string | {
|
2573
|
-
isText: false;
|
2574
|
-
content: string;
|
2575
|
-
}>;
|
2576
|
-
/**
|
2577
|
-
* Creates a transform stream that encodes input messages and invokes optional callback functions.
|
2578
|
-
* The transform stream uses the provided callbacks to execute custom logic at different stages of the stream's lifecycle.
|
2579
|
-
* - `onStart`: Called once when the stream is initialized.
|
2580
|
-
* - `onToken`: Called for each tokenized message.
|
2581
|
-
* - `onCompletion`: Called every time an AIStream completion message is received. This can occur multiple times when using e.g. OpenAI functions
|
2582
|
-
* - `onFinal`: Called once when the stream is closed with the final completion message.
|
2583
|
-
*
|
2584
|
-
* This function is useful when you want to process a stream of messages and perform specific actions during the stream's lifecycle.
|
2585
|
-
*
|
2586
|
-
* @param {AIStreamCallbacksAndOptions} [callbacks] - An object containing the callback functions.
|
2587
|
-
* @return {TransformStream<string, Uint8Array>} A transform stream that encodes input messages as Uint8Array and allows the execution of custom logic through callbacks.
|
2588
|
-
*
|
2589
|
-
* @example
|
2590
|
-
* const callbacks = {
|
2591
|
-
* onStart: async () => console.log('Stream started'),
|
2592
|
-
* onToken: async (token) => console.log(`Token: ${token}`),
|
2593
|
-
* onCompletion: async (completion) => console.log(`Completion: ${completion}`)
|
2594
|
-
* onFinal: async () => data.close()
|
2595
|
-
* };
|
2596
|
-
* const transformer = createCallbacksTransformer(callbacks);
|
2597
|
-
*/
|
2598
|
-
declare function createCallbacksTransformer(cb: AIStreamCallbacksAndOptions | undefined): TransformStream<string | {
|
2599
|
-
isText: false;
|
2600
|
-
content: string;
|
2601
|
-
}, Uint8Array>;
|
2602
|
-
/**
|
2603
|
-
* Returns a stateful function that, when invoked, trims leading whitespace
|
2604
|
-
* from the input text. The trimming only occurs on the first invocation, ensuring that
|
2605
|
-
* subsequent calls do not alter the input text. This is particularly useful in scenarios
|
2606
|
-
* where a text stream is being processed and only the initial whitespace should be removed.
|
2607
|
-
*
|
2608
|
-
* @return {function(string): string} A function that takes a string as input and returns a string
|
2609
|
-
* with leading whitespace removed if it is the first invocation; otherwise, it returns the input unchanged.
|
2610
|
-
*
|
2611
|
-
* @example
|
2612
|
-
* const trimStart = trimStartOfStreamHelper();
|
2613
|
-
* const output1 = trimStart(" text"); // "text"
|
2614
|
-
* const output2 = trimStart(" text"); // " text"
|
2615
|
-
*
|
2616
|
-
*/
|
2617
|
-
declare function trimStartOfStreamHelper(): (text: string) => string;
|
2618
|
-
/**
|
2619
|
-
* Returns a ReadableStream created from the response, parsed and handled with custom logic.
|
2620
|
-
* The stream goes through two transformation stages, first parsing the events and then
|
2621
|
-
* invoking the provided callbacks.
|
2622
|
-
*
|
2623
|
-
* For 2xx HTTP responses:
|
2624
|
-
* - The function continues with standard stream processing.
|
2625
|
-
*
|
2626
|
-
* For non-2xx HTTP responses:
|
2627
|
-
* - If the response body is defined, it asynchronously extracts and decodes the response body.
|
2628
|
-
* - It then creates a custom ReadableStream to propagate a detailed error message.
|
2629
|
-
*
|
2630
|
-
* @param {Response} response - The response.
|
2631
|
-
* @param {AIStreamParser} customParser - The custom parser function.
|
2632
|
-
* @param {AIStreamCallbacksAndOptions} callbacks - The callbacks.
|
2633
|
-
* @return {ReadableStream} The AIStream.
|
2634
|
-
* @throws Will throw an error if the response is not OK.
|
2635
|
-
*/
|
2636
|
-
declare function AIStream(response: Response, customParser?: AIStreamParser, callbacks?: AIStreamCallbacksAndOptions): ReadableStream<Uint8Array>;
|
2637
|
-
/**
|
2638
|
-
* Implements ReadableStream.from(asyncIterable), which isn't documented in MDN and isn't implemented in node.
|
2639
|
-
* https://github.com/whatwg/streams/commit/8d7a0bf26eb2cc23e884ddbaac7c1da4b91cf2bc
|
2640
|
-
*/
|
2641
|
-
declare function readableFromAsyncIterable<T>(iterable: AsyncIterable<T>): ReadableStream<T>;
|
2642
|
-
|
2643
2126
|
/**
|
2644
2127
|
You can pass the thread and the latest message into the `AssistantResponse`. This establishes the context for the response.
|
2645
2128
|
*/
|
@@ -2658,14 +2141,6 @@ The process parameter is a callback in which you can run the assistant on thread
|
|
2658
2141
|
*/
|
2659
2142
|
type AssistantResponseCallback = (options: {
|
2660
2143
|
/**
|
2661
|
-
@deprecated use variable from outer scope instead.
|
2662
|
-
*/
|
2663
|
-
threadId: string;
|
2664
|
-
/**
|
2665
|
-
@deprecated use variable from outer scope instead.
|
2666
|
-
*/
|
2667
|
-
messageId: string;
|
2668
|
-
/**
|
2669
2144
|
Forwards an assistant message (non-streaming) to the client.
|
2670
2145
|
*/
|
2671
2146
|
sendMessage: (message: AssistantMessage) => void;
|
@@ -2676,7 +2151,7 @@ type AssistantResponseCallback = (options: {
|
|
2676
2151
|
/**
|
2677
2152
|
Forwards the assistant response stream to the client. Returns the `Run` object after it completes, or when it requires an action.
|
2678
2153
|
*/
|
2679
|
-
forwardStream: (stream:
|
2154
|
+
forwardStream: (stream: any) => Promise<any | undefined>;
|
2680
2155
|
}) => Promise<void>;
|
2681
2156
|
/**
|
2682
2157
|
The `AssistantResponse` allows you to send a stream of assistant update to `useAssistant`.
|
@@ -2684,34 +2159,21 @@ It is designed to facilitate streaming assistant responses to the `useAssistant`
|
|
2684
2159
|
It receives an assistant thread and a current message, and can send messages and data messages to the client.
|
2685
2160
|
*/
|
2686
2161
|
declare function AssistantResponse({ threadId, messageId }: AssistantResponseSettings, process: AssistantResponseCallback): Response;
|
2687
|
-
/**
|
2688
|
-
@deprecated Use `AssistantResponse` instead.
|
2689
|
-
*/
|
2690
|
-
declare const experimental_AssistantResponse: typeof AssistantResponse;
|
2691
2162
|
|
2692
2163
|
/**
|
2693
|
-
*
|
2164
|
+
* Configuration options and helper callback methods for stream lifecycle events.
|
2694
2165
|
*/
|
2695
|
-
|
2696
|
-
|
2697
|
-
|
2698
|
-
|
2699
|
-
|
2700
|
-
|
2701
|
-
|
2702
|
-
|
2703
|
-
|
2704
|
-
|
2705
|
-
|
2706
|
-
/**
|
2707
|
-
* A TransformStream for LLMs that do not have their own transform stream handlers managing encoding (e.g. OpenAIStream has one for function call handling).
|
2708
|
-
* This assumes every chunk is a 'text' chunk.
|
2709
|
-
*/
|
2710
|
-
declare function createStreamDataTransformer(): TransformStream<any, any>;
|
2711
|
-
/**
|
2712
|
-
@deprecated Use `StreamData` instead.
|
2713
|
-
*/
|
2714
|
-
declare class experimental_StreamData extends StreamData {
|
2166
|
+
interface StreamCallbacks {
|
2167
|
+
/** `onStart`: Called once when the stream is initialized. */
|
2168
|
+
onStart?: () => Promise<void> | void;
|
2169
|
+
/** `onCompletion`: Called for each tokenized message. */
|
2170
|
+
onCompletion?: (completion: string) => Promise<void> | void;
|
2171
|
+
/** `onFinal`: Called once when the stream is closed with the final completion message. */
|
2172
|
+
onFinal?: (completion: string) => Promise<void> | void;
|
2173
|
+
/** `onToken`: Called for each tokenized message. */
|
2174
|
+
onToken?: (token: string) => Promise<void> | void;
|
2175
|
+
/** `onText`: Called for each text chunk. */
|
2176
|
+
onText?: (text: string) => Promise<void> | void;
|
2715
2177
|
}
|
2716
2178
|
|
2717
2179
|
type LangChainImageDetail = 'auto' | 'low' | 'high';
|
@@ -2740,33 +2202,21 @@ type LangChainStreamEvent = {
|
|
2740
2202
|
data: any;
|
2741
2203
|
};
|
2742
2204
|
/**
|
2743
|
-
Converts LangChain output streams to
|
2744
|
-
|
2745
|
-
The following streams are supported:
|
2746
|
-
- `LangChainAIMessageChunk` streams (LangChain `model.stream` output)
|
2747
|
-
- `string` streams (LangChain `StringOutputParser` output)
|
2748
|
-
|
2749
|
-
@deprecated Use `toDataStream` instead.
|
2750
|
-
*/
|
2751
|
-
declare function toAIStream(stream: ReadableStream<LangChainStreamEvent> | ReadableStream<LangChainAIMessageChunk> | ReadableStream<string>, callbacks?: AIStreamCallbacksAndOptions): ReadableStream<any>;
|
2752
|
-
/**
|
2753
|
-
Converts LangChain output streams to AIStream.
|
2205
|
+
Converts LangChain output streams to an AI SDK Data Stream.
|
2754
2206
|
|
2755
2207
|
The following streams are supported:
|
2756
2208
|
- `LangChainAIMessageChunk` streams (LangChain `model.stream` output)
|
2757
2209
|
- `string` streams (LangChain `StringOutputParser` output)
|
2758
2210
|
*/
|
2759
|
-
declare function toDataStream$1(stream: ReadableStream<LangChainStreamEvent> | ReadableStream<LangChainAIMessageChunk> | ReadableStream<string>, callbacks?:
|
2211
|
+
declare function toDataStream$1(stream: ReadableStream<LangChainStreamEvent> | ReadableStream<LangChainAIMessageChunk> | ReadableStream<string>, callbacks?: StreamCallbacks): ReadableStream<any>;
|
2760
2212
|
declare function toDataStreamResponse$1(stream: ReadableStream<LangChainStreamEvent> | ReadableStream<LangChainAIMessageChunk> | ReadableStream<string>, options?: {
|
2761
2213
|
init?: ResponseInit;
|
2762
2214
|
data?: StreamData;
|
2763
|
-
callbacks?:
|
2215
|
+
callbacks?: StreamCallbacks;
|
2764
2216
|
}): Response;
|
2765
2217
|
|
2766
|
-
declare const langchainAdapter_toAIStream: typeof toAIStream;
|
2767
2218
|
declare namespace langchainAdapter {
|
2768
2219
|
export {
|
2769
|
-
langchainAdapter_toAIStream as toAIStream,
|
2770
2220
|
toDataStream$1 as toDataStream,
|
2771
2221
|
toDataStreamResponse$1 as toDataStreamResponse,
|
2772
2222
|
};
|
@@ -2775,11 +2225,11 @@ declare namespace langchainAdapter {
|
|
2775
2225
|
type EngineResponse = {
|
2776
2226
|
delta: string;
|
2777
2227
|
};
|
2778
|
-
declare function toDataStream(stream: AsyncIterable<EngineResponse>, callbacks?:
|
2228
|
+
declare function toDataStream(stream: AsyncIterable<EngineResponse>, callbacks?: StreamCallbacks): ReadableStream<any>;
|
2779
2229
|
declare function toDataStreamResponse(stream: AsyncIterable<EngineResponse>, options?: {
|
2780
2230
|
init?: ResponseInit;
|
2781
2231
|
data?: StreamData;
|
2782
|
-
callbacks?:
|
2232
|
+
callbacks?: StreamCallbacks;
|
2783
2233
|
}): Response;
|
2784
2234
|
|
2785
2235
|
declare const llamaindexAdapter_toDataStream: typeof toDataStream;
|
@@ -2791,33 +2241,4 @@ declare namespace llamaindexAdapter {
|
|
2791
2241
|
};
|
2792
2242
|
}
|
2793
2243
|
|
2794
|
-
|
2795
|
-
* A utility function to stream a ReadableStream to a Node.js response-like object.
|
2796
|
-
*
|
2797
|
-
* @deprecated Use `pipeDataStreamToResponse` (part of `StreamTextResult`) instead.
|
2798
|
-
*/
|
2799
|
-
declare function streamToResponse(res: ReadableStream, response: ServerResponse$1, init?: {
|
2800
|
-
headers?: Record<string, string>;
|
2801
|
-
status?: number;
|
2802
|
-
}, data?: StreamData): void;
|
2803
|
-
|
2804
|
-
/**
|
2805
|
-
* A utility class for streaming text responses.
|
2806
|
-
*
|
2807
|
-
* @deprecated Use `streamText.toDataStreamResponse()` (if you did send StreamData)
|
2808
|
-
* or a regular `Response` instead (if you did not send any StreamData):
|
2809
|
-
*
|
2810
|
-
* ```ts
|
2811
|
-
* return new Response(stream, {
|
2812
|
-
* status: 200,
|
2813
|
-
* contentType: 'text/plain; charset=utf-8',
|
2814
|
-
* })
|
2815
|
-
* ```
|
2816
|
-
*/
|
2817
|
-
declare class StreamingTextResponse extends Response {
|
2818
|
-
constructor(res: ReadableStream, init?: ResponseInit, data?: StreamData);
|
2819
|
-
}
|
2820
|
-
|
2821
|
-
declare const generateId: (size?: number) => string;
|
2822
|
-
|
2823
|
-
export { AIStream, AIStreamCallbacksAndOptions, AIStreamParser, AIStreamParserOptions, AssistantContent, AssistantResponse, CallWarning, CompletionTokenUsage, CoreAssistantMessage, CoreMessage, CoreSystemMessage, CoreTool, ToolCallUnion as CoreToolCallUnion, CoreToolChoice, CoreToolMessage, ToolResultUnion as CoreToolResultUnion, CoreUserMessage, DataContent, DownloadError, EmbedManyResult, EmbedResult, Embedding, EmbeddingModel, EmbeddingModelUsage, EmbeddingTokenUsage, ExperimentalAssistantMessage, ExperimentalMessage, ExperimentalTool, ExperimentalToolMessage, ExperimentalUserMessage, Experimental_LanguageModelV1Middleware, FilePart, FinishReason, FunctionCallPayload, GenerateObjectResult, GenerateTextResult, ImagePart, InvalidArgumentError, InvalidDataContentError, InvalidMessageRoleError, InvalidToolArgumentsError, langchainAdapter as LangChainAdapter, LanguageModel, LanguageModelRequestMetadata, LanguageModelResponseMetadata, LanguageModelResponseMetadataWithHeaders, LanguageModelUsage, llamaindexAdapter as LlamaIndexAdapter, LogProbs, MessageConversionError, NoObjectGeneratedError, NoSuchProviderError, NoSuchToolError, ObjectStreamPart, Provider, ProviderMetadata, RetryError, StepResult, StreamData, StreamObjectResult, StreamTextResult, StreamingTextResponse, TextPart, TextStreamPart, TokenUsage, ToolCallPart, ToolCallPayload, ToolContent, ToolResultPart, UserContent, convertToCoreMessages, cosineSimilarity, createCallbacksTransformer, createEventStreamTransformer, createStreamDataTransformer, embed, embedMany, experimental_AssistantResponse, experimental_ModelRegistry, experimental_Provider, experimental_ProviderRegistry, experimental_StreamData, experimental_createModelRegistry, experimental_createProviderRegistry, experimental_customProvider, experimental_generateObject, experimental_generateText, experimental_streamObject, experimental_streamText, experimental_wrapLanguageModel, generateId, generateObject, generateText, readableFromAsyncIterable, streamObject, streamText, streamToResponse, tool, trimStartOfStreamHelper };
|
2244
|
+
export { AssistantContent, AssistantResponse, CallWarning, CoreAssistantMessage, CoreMessage, CoreSystemMessage, CoreTool, ToolCallUnion as CoreToolCallUnion, CoreToolChoice, CoreToolMessage, ToolResultUnion as CoreToolResultUnion, CoreUserMessage, DataContent, DownloadError, EmbedManyResult, EmbedResult, Embedding, EmbeddingModel, EmbeddingModelUsage, Experimental_LanguageModelV1Middleware, FilePart, FinishReason, GenerateObjectResult, GenerateTextResult, ImagePart, InvalidArgumentError, InvalidDataContentError, InvalidMessageRoleError, InvalidToolArgumentsError, langchainAdapter as LangChainAdapter, LanguageModel, LanguageModelRequestMetadata, LanguageModelResponseMetadata, LanguageModelUsage, llamaindexAdapter as LlamaIndexAdapter, LogProbs, MessageConversionError, NoObjectGeneratedError, NoSuchProviderError, NoSuchToolError, ObjectStreamPart, Provider, ProviderMetadata, RetryError, StepResult, StreamData, StreamObjectResult, StreamTextResult, TextPart, TextStreamPart, ToolCallPart, ToolContent, ToolResultPart, UserContent, convertToCoreMessages, cosineSimilarity, createStreamDataTransformer, embed, embedMany, experimental_createProviderRegistry, experimental_customProvider, experimental_wrapLanguageModel, generateObject, generateText, streamObject, streamText, tool };
|