ai 3.1.0-canary.4 → 3.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.d.mts +982 -24
- package/dist/index.d.ts +982 -24
- package/dist/index.js +1748 -175
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +1723 -174
- package/dist/index.mjs.map +1 -1
- package/package.json +11 -28
- package/prompts/dist/index.d.mts +13 -1
- package/prompts/dist/index.d.ts +13 -1
- package/prompts/dist/index.js +13 -0
- package/prompts/dist/index.js.map +1 -1
- package/prompts/dist/index.mjs +12 -0
- package/prompts/dist/index.mjs.map +1 -1
- package/react/dist/index.d.mts +23 -6
- package/react/dist/index.d.ts +27 -8
- package/react/dist/index.js +154 -141
- package/react/dist/index.js.map +1 -1
- package/react/dist/index.mjs +153 -141
- package/react/dist/index.mjs.map +1 -1
- package/react/dist/index.server.d.mts +4 -2
- package/react/dist/index.server.d.ts +4 -2
- package/react/dist/index.server.js.map +1 -1
- package/react/dist/index.server.mjs.map +1 -1
- package/rsc/dist/index.d.ts +385 -20
- package/rsc/dist/rsc-client.d.mts +1 -1
- package/rsc/dist/rsc-client.mjs +2 -0
- package/rsc/dist/rsc-client.mjs.map +1 -1
- package/rsc/dist/rsc-server.d.mts +367 -20
- package/rsc/dist/rsc-server.mjs +676 -35
- package/rsc/dist/rsc-server.mjs.map +1 -1
- package/rsc/dist/rsc-shared.d.mts +24 -9
- package/rsc/dist/rsc-shared.mjs +98 -4
- package/rsc/dist/rsc-shared.mjs.map +1 -1
- package/solid/dist/index.d.mts +7 -3
- package/solid/dist/index.d.ts +7 -3
- package/solid/dist/index.js +106 -107
- package/solid/dist/index.js.map +1 -1
- package/solid/dist/index.mjs +106 -107
- package/solid/dist/index.mjs.map +1 -1
- package/svelte/dist/index.d.mts +7 -3
- package/svelte/dist/index.d.ts +7 -3
- package/svelte/dist/index.js +109 -109
- package/svelte/dist/index.js.map +1 -1
- package/svelte/dist/index.mjs +109 -109
- package/svelte/dist/index.mjs.map +1 -1
- package/vue/dist/index.d.mts +7 -3
- package/vue/dist/index.d.ts +7 -3
- package/vue/dist/index.js +106 -107
- package/vue/dist/index.js.map +1 -1
- package/vue/dist/index.mjs +106 -107
- package/vue/dist/index.mjs.map +1 -1
- package/ai-model-specification/dist/index.d.mts +0 -665
- package/ai-model-specification/dist/index.d.ts +0 -665
- package/ai-model-specification/dist/index.js +0 -716
- package/ai-model-specification/dist/index.js.map +0 -1
- package/ai-model-specification/dist/index.mjs +0 -656
- package/ai-model-specification/dist/index.mjs.map +0 -1
- package/core/dist/index.d.mts +0 -626
- package/core/dist/index.d.ts +0 -626
- package/core/dist/index.js +0 -1918
- package/core/dist/index.js.map +0 -1
- package/core/dist/index.mjs +0 -1873
- package/core/dist/index.mjs.map +0 -1
- package/openai/dist/index.d.mts +0 -429
- package/openai/dist/index.d.ts +0 -429
- package/openai/dist/index.js +0 -1231
- package/openai/dist/index.js.map +0 -1
- package/openai/dist/index.mjs +0 -1195
- package/openai/dist/index.mjs.map +0 -1
package/dist/index.d.mts
CHANGED
@@ -1,9 +1,861 @@
|
|
1
|
+
import { z } from 'zod';
|
2
|
+
import { LanguageModelV1, LanguageModelV1FinishReason, LanguageModelV1LogProbs, LanguageModelV1CallWarning } from '@ai-sdk/provider';
|
3
|
+
export { APICallError, EmptyResponseBodyError, InvalidArgumentError, InvalidDataContentError, InvalidPromptError, InvalidResponseDataError, InvalidToolArgumentsError, JSONParseError, LoadAPIKeyError, NoObjectGeneratedError, NoSuchToolError, RetryError, ToolCallParseError, TypeValidationError, UnsupportedFunctionalityError, UnsupportedJSONSchemaError } from '@ai-sdk/provider';
|
4
|
+
import { ServerResponse } from 'node:http';
|
1
5
|
import { AssistantStream } from 'openai/lib/AssistantStream';
|
2
6
|
import { Run } from 'openai/resources/beta/threads/runs/runs';
|
3
|
-
import { ChatCompletionResponseChunk } from '@mistralai/mistralai';
|
4
|
-
import { ServerResponse } from 'node:http';
|
5
7
|
|
6
|
-
|
8
|
+
type TokenUsage = {
|
9
|
+
promptTokens: number;
|
10
|
+
completionTokens: number;
|
11
|
+
totalTokens: number;
|
12
|
+
};
|
13
|
+
|
14
|
+
type CallSettings = {
|
15
|
+
/**
|
16
|
+
Maximum number of tokens to generate.
|
17
|
+
*/
|
18
|
+
maxTokens?: number;
|
19
|
+
/**
|
20
|
+
Temperature setting. This is a number between 0 (almost no randomness) and
|
21
|
+
1 (very random).
|
22
|
+
|
23
|
+
It is recommended to set either `temperature` or `topP`, but not both.
|
24
|
+
|
25
|
+
@default 0
|
26
|
+
*/
|
27
|
+
temperature?: number;
|
28
|
+
/**
|
29
|
+
Nucleus sampling. This is a number between 0 and 1.
|
30
|
+
|
31
|
+
E.g. 0.1 would mean that only tokens with the top 10% probability mass
|
32
|
+
are considered.
|
33
|
+
|
34
|
+
It is recommended to set either `temperature` or `topP`, but not both.
|
35
|
+
*/
|
36
|
+
topP?: number;
|
37
|
+
/**
|
38
|
+
Presence penalty setting. It affects the likelihood of the model to
|
39
|
+
repeat information that is already in the prompt.
|
40
|
+
|
41
|
+
The presence penalty is a number between -1 (increase repetition)
|
42
|
+
and 1 (maximum penalty, decrease repetition). 0 means no penalty.
|
43
|
+
|
44
|
+
@default 0
|
45
|
+
*/
|
46
|
+
presencePenalty?: number;
|
47
|
+
/**
|
48
|
+
Frequency penalty setting. It affects the likelihood of the model
|
49
|
+
to repeatedly use the same words or phrases.
|
50
|
+
|
51
|
+
The frequency penalty is a number between -1 (increase repetition)
|
52
|
+
and 1 (maximum penalty, decrease repetition). 0 means no penalty.
|
53
|
+
|
54
|
+
@default 0
|
55
|
+
*/
|
56
|
+
frequencyPenalty?: number;
|
57
|
+
/**
|
58
|
+
The seed (integer) to use for random sampling. If set and supported
|
59
|
+
by the model, calls will generate deterministic results.
|
60
|
+
*/
|
61
|
+
seed?: number;
|
62
|
+
/**
|
63
|
+
Maximum number of retries. Set to 0 to disable retries.
|
64
|
+
|
65
|
+
@default 2
|
66
|
+
*/
|
67
|
+
maxRetries?: number;
|
68
|
+
/**
|
69
|
+
Abort signal.
|
70
|
+
*/
|
71
|
+
abortSignal?: AbortSignal;
|
72
|
+
};
|
73
|
+
|
74
|
+
/**
|
75
|
+
Data content. Can either be a base64-encoded string, a Uint8Array, an ArrayBuffer, or a Buffer.
|
76
|
+
*/
|
77
|
+
type DataContent = string | Uint8Array | ArrayBuffer | Buffer;
|
78
|
+
/**
|
79
|
+
Converts data content to a base64-encoded string.
|
80
|
+
|
81
|
+
@param content - Data content to convert.
|
82
|
+
@returns Base64-encoded string.
|
83
|
+
*/
|
84
|
+
declare function convertDataContentToBase64String(content: DataContent): string;
|
85
|
+
/**
|
86
|
+
Converts data content to a Uint8Array.
|
87
|
+
|
88
|
+
@param content - Data content to convert.
|
89
|
+
@returns Uint8Array.
|
90
|
+
*/
|
91
|
+
declare function convertDataContentToUint8Array(content: DataContent): Uint8Array;
|
92
|
+
|
93
|
+
/**
|
94
|
+
Text content part of a prompt. It contains a string of text.
|
95
|
+
*/
|
96
|
+
interface TextPart$1 {
|
97
|
+
type: 'text';
|
98
|
+
/**
|
99
|
+
The text content.
|
100
|
+
*/
|
101
|
+
text: string;
|
102
|
+
}
|
103
|
+
/**
|
104
|
+
Image content part of a prompt. It contains an image.
|
105
|
+
*/
|
106
|
+
interface ImagePart {
|
107
|
+
type: 'image';
|
108
|
+
/**
|
109
|
+
Image data. Can either be:
|
110
|
+
|
111
|
+
- data: a base64-encoded string, a Uint8Array, an ArrayBuffer, or a Buffer
|
112
|
+
- URL: a URL that points to the image
|
113
|
+
*/
|
114
|
+
image: DataContent | URL;
|
115
|
+
/**
|
116
|
+
Optional mime type of the image.
|
117
|
+
*/
|
118
|
+
mimeType?: string;
|
119
|
+
}
|
120
|
+
/**
|
121
|
+
Tool call content part of a prompt. It contains a tool call (usually generated by the AI model).
|
122
|
+
*/
|
123
|
+
interface ToolCallPart {
|
124
|
+
type: 'tool-call';
|
125
|
+
/**
|
126
|
+
ID of the tool call. This ID is used to match the tool call with the tool result.
|
127
|
+
*/
|
128
|
+
toolCallId: string;
|
129
|
+
/**
|
130
|
+
Name of the tool that is being called.
|
131
|
+
*/
|
132
|
+
toolName: string;
|
133
|
+
/**
|
134
|
+
Arguments of the tool call. This is a JSON-serializable object that matches the tool's input schema.
|
135
|
+
*/
|
136
|
+
args: unknown;
|
137
|
+
}
|
138
|
+
/**
|
139
|
+
Tool result content part of a prompt. It contains the result of the tool call with the matching ID.
|
140
|
+
*/
|
141
|
+
interface ToolResultPart {
|
142
|
+
type: 'tool-result';
|
143
|
+
/**
|
144
|
+
ID of the tool call that this result is associated with.
|
145
|
+
*/
|
146
|
+
toolCallId: string;
|
147
|
+
/**
|
148
|
+
Name of the tool that generated this result.
|
149
|
+
*/
|
150
|
+
toolName: string;
|
151
|
+
/**
|
152
|
+
Result of the tool call. This is a JSON-serializable object.
|
153
|
+
*/
|
154
|
+
result: unknown;
|
155
|
+
/**
|
156
|
+
Optional flag if the result is an error or an error message.
|
157
|
+
*/
|
158
|
+
isError?: boolean;
|
159
|
+
}
|
160
|
+
|
161
|
+
/**
|
162
|
+
A message that can be used in the `messages` field of a prompt.
|
163
|
+
It can be a user message, an assistant message, or a tool message.
|
164
|
+
*/
|
165
|
+
type CoreMessage = CoreUserMessage | CoreAssistantMessage | CoreToolMessage;
|
166
|
+
/**
|
167
|
+
* @deprecated Use `CoreMessage` instead.
|
168
|
+
*/
|
169
|
+
type ExperimentalMessage = CoreMessage;
|
170
|
+
/**
|
171
|
+
A user message. It can contain text or a combination of text and images.
|
172
|
+
*/
|
173
|
+
type CoreUserMessage = {
|
174
|
+
role: 'user';
|
175
|
+
content: UserContent;
|
176
|
+
};
|
177
|
+
/**
|
178
|
+
* @deprecated Use `CoreUserMessage` instead.
|
179
|
+
*/
|
180
|
+
type ExperimentalUserMessage = CoreUserMessage;
|
181
|
+
/**
|
182
|
+
Content of a user message. It can be a string or an array of text and image parts.
|
183
|
+
*/
|
184
|
+
type UserContent = string | Array<TextPart$1 | ImagePart>;
|
185
|
+
/**
|
186
|
+
An assistant message. It can contain text, tool calls, or a combination of text and tool calls.
|
187
|
+
*/
|
188
|
+
type CoreAssistantMessage = {
|
189
|
+
role: 'assistant';
|
190
|
+
content: AssistantContent;
|
191
|
+
};
|
192
|
+
/**
|
193
|
+
* @deprecated Use `CoreAssistantMessage` instead.
|
194
|
+
*/
|
195
|
+
type ExperimentalAssistantMessage = CoreAssistantMessage;
|
196
|
+
/**
|
197
|
+
Content of an assistant message. It can be a string or an array of text and tool call parts.
|
198
|
+
*/
|
199
|
+
type AssistantContent = string | Array<TextPart$1 | ToolCallPart>;
|
200
|
+
/**
|
201
|
+
A tool message. It contains the result of one or more tool calls.
|
202
|
+
*/
|
203
|
+
type CoreToolMessage = {
|
204
|
+
role: 'tool';
|
205
|
+
content: ToolContent;
|
206
|
+
};
|
207
|
+
/**
|
208
|
+
* @deprecated Use `CoreToolMessage` instead.
|
209
|
+
*/
|
210
|
+
type ExperimentalToolMessage = CoreToolMessage;
|
211
|
+
/**
|
212
|
+
Content of a tool message. It is an array of tool result parts.
|
213
|
+
*/
|
214
|
+
type ToolContent = Array<ToolResultPart>;
|
215
|
+
|
216
|
+
/**
|
217
|
+
Prompt part of the AI function options. It contains a system message, a simple text prompt, or a list of messages.
|
218
|
+
*/
|
219
|
+
type Prompt = {
|
220
|
+
/**
|
221
|
+
System message to include in the prompt. Can be used with `prompt` or `messages`.
|
222
|
+
*/
|
223
|
+
system?: string;
|
224
|
+
/**
|
225
|
+
A simple text prompt. You can either use `prompt` or `messages` but not both.
|
226
|
+
*/
|
227
|
+
prompt?: string;
|
228
|
+
/**
|
229
|
+
A list of messsages. You can either use `prompt` or `messages` but not both.
|
230
|
+
*/
|
231
|
+
messages?: Array<CoreMessage>;
|
232
|
+
};
|
233
|
+
|
234
|
+
/**
|
235
|
+
Language model that is used by the AI SDK Core functions.
|
236
|
+
*/
|
237
|
+
type LanguageModel = LanguageModelV1;
|
238
|
+
/**
|
239
|
+
Reason why a language model finished generating a response.
|
240
|
+
|
241
|
+
Can be one of the following:
|
242
|
+
- `stop`: model generated stop sequence
|
243
|
+
- `length`: model generated maximum number of tokens
|
244
|
+
- `content-filter`: content filter violation stopped the model
|
245
|
+
- `tool-calls`: model triggered tool calls
|
246
|
+
- `error`: model stopped because of an error
|
247
|
+
- `other`: model stopped for other reasons
|
248
|
+
*/
|
249
|
+
type FinishReason = LanguageModelV1FinishReason;
|
250
|
+
/**
|
251
|
+
Log probabilities for each token and its top log probabilities.
|
252
|
+
*/
|
253
|
+
type LogProbs = LanguageModelV1LogProbs;
|
254
|
+
/**
|
255
|
+
Warning from the model provider for this call. The call will proceed, but e.g.
|
256
|
+
some settings might not be supported, which can lead to suboptimal results.
|
257
|
+
*/
|
258
|
+
type CallWarning = LanguageModelV1CallWarning;
|
259
|
+
|
260
|
+
/**
|
261
|
+
Generate a structured, typed object for a given prompt and schema using a language model.
|
262
|
+
|
263
|
+
This function does not stream the output. If you want to stream the output, use `streamObject` instead.
|
264
|
+
|
265
|
+
@param model - The language model to use.
|
266
|
+
|
267
|
+
@param schema - The schema of the object that the model should generate.
|
268
|
+
@param mode - The mode to use for object generation. Not all models support all modes. Defaults to 'auto'.
|
269
|
+
|
270
|
+
@param system - A system message that will be part of the prompt.
|
271
|
+
@param prompt - A simple text prompt. You can either use `prompt` or `messages` but not both.
|
272
|
+
@param messages - A list of messages. You can either use `prompt` or `messages` but not both.
|
273
|
+
|
274
|
+
@param maxTokens - Maximum number of tokens to generate.
|
275
|
+
@param temperature - Temperature setting.
|
276
|
+
The value is passed through to the provider. The range depends on the provider and model.
|
277
|
+
It is recommended to set either `temperature` or `topP`, but not both.
|
278
|
+
@param topP - Nucleus sampling.
|
279
|
+
The value is passed through to the provider. The range depends on the provider and model.
|
280
|
+
It is recommended to set either `temperature` or `topP`, but not both.
|
281
|
+
@param presencePenalty - Presence penalty setting.
|
282
|
+
It affects the likelihood of the model to repeat information that is already in the prompt.
|
283
|
+
The value is passed through to the provider. The range depends on the provider and model.
|
284
|
+
@param frequencyPenalty - Frequency penalty setting.
|
285
|
+
It affects the likelihood of the model to repeatedly use the same words or phrases.
|
286
|
+
The value is passed through to the provider. The range depends on the provider and model.
|
287
|
+
@param seed - The seed (integer) to use for random sampling.
|
288
|
+
If set and supported by the model, calls will generate deterministic results.
|
289
|
+
|
290
|
+
@param maxRetries - Maximum number of retries. Set to 0 to disable retries. Default: 2.
|
291
|
+
@param abortSignal - An optional abort signal that can be used to cancel the call.
|
292
|
+
|
293
|
+
@returns
|
294
|
+
A result object that contains the generated object, the finish reason, the token usage, and additional information.
|
295
|
+
*/
|
296
|
+
declare function generateObject<T>({ model, schema, mode, system, prompt, messages, maxRetries, abortSignal, ...settings }: CallSettings & Prompt & {
|
297
|
+
/**
|
298
|
+
The language model to use.
|
299
|
+
*/
|
300
|
+
model: LanguageModel;
|
301
|
+
/**
|
302
|
+
The schema of the object that the model should generate.
|
303
|
+
*/
|
304
|
+
schema: z.Schema<T>;
|
305
|
+
/**
|
306
|
+
The mode to use for object generation. Not all models support all modes.
|
307
|
+
|
308
|
+
Default and recommended: 'auto' (best mode for the model).
|
309
|
+
*/
|
310
|
+
mode?: 'auto' | 'json' | 'tool' | 'grammar';
|
311
|
+
}): Promise<GenerateObjectResult<T>>;
|
312
|
+
/**
|
313
|
+
The result of a `generateObject` call.
|
314
|
+
*/
|
315
|
+
declare class GenerateObjectResult<T> {
|
316
|
+
/**
|
317
|
+
The generated object (typed according to the schema).
|
318
|
+
*/
|
319
|
+
readonly object: T;
|
320
|
+
/**
|
321
|
+
The reason why the generation finished.
|
322
|
+
*/
|
323
|
+
readonly finishReason: FinishReason;
|
324
|
+
/**
|
325
|
+
The token usage of the generated text.
|
326
|
+
*/
|
327
|
+
readonly usage: TokenUsage;
|
328
|
+
/**
|
329
|
+
Warnings from the model provider (e.g. unsupported settings)
|
330
|
+
*/
|
331
|
+
readonly warnings: CallWarning[] | undefined;
|
332
|
+
/**
|
333
|
+
Optional raw response data.
|
334
|
+
*/
|
335
|
+
rawResponse?: {
|
336
|
+
/**
|
337
|
+
Response headers.
|
338
|
+
*/
|
339
|
+
headers?: Record<string, string>;
|
340
|
+
};
|
341
|
+
/**
|
342
|
+
Logprobs for the completion.
|
343
|
+
`undefined` if the mode does not support logprobs or if was not enabled
|
344
|
+
*/
|
345
|
+
readonly logprobs: LogProbs | undefined;
|
346
|
+
constructor(options: {
|
347
|
+
object: T;
|
348
|
+
finishReason: FinishReason;
|
349
|
+
usage: TokenUsage;
|
350
|
+
warnings: CallWarning[] | undefined;
|
351
|
+
rawResponse?: {
|
352
|
+
headers?: Record<string, string>;
|
353
|
+
};
|
354
|
+
logprobs: LogProbs | undefined;
|
355
|
+
});
|
356
|
+
}
|
357
|
+
/**
|
358
|
+
* @deprecated Use `generateObject` instead.
|
359
|
+
*/
|
360
|
+
declare const experimental_generateObject: typeof generateObject;
|
361
|
+
|
362
|
+
type AsyncIterableStream<T> = AsyncIterable<T> & ReadableStream<T>;
|
363
|
+
|
364
|
+
/**
|
365
|
+
Create a type from an object with all keys and nested keys set to optional.
|
366
|
+
The helper supports normal objects and Zod schemas (which are resolved automatically).
|
367
|
+
It always recurses into arrays.
|
368
|
+
|
369
|
+
Adopted from [type-fest](https://github.com/sindresorhus/type-fest/tree/main) PartialDeep.
|
370
|
+
*/
|
371
|
+
type DeepPartial<T> = T extends null | undefined | string | number | boolean | symbol | bigint | void | Date | RegExp | ((...arguments_: any[]) => unknown) | (new (...arguments_: any[]) => unknown) ? T : T extends Map<infer KeyType, infer ValueType> ? PartialMap<KeyType, ValueType> : T extends Set<infer ItemType> ? PartialSet<ItemType> : T extends ReadonlyMap<infer KeyType, infer ValueType> ? PartialReadonlyMap<KeyType, ValueType> : T extends ReadonlySet<infer ItemType> ? PartialReadonlySet<ItemType> : T extends z.Schema<any> ? DeepPartial<T['_type']> : T extends object ? T extends ReadonlyArray<infer ItemType> ? ItemType[] extends T ? readonly ItemType[] extends T ? ReadonlyArray<DeepPartial<ItemType | undefined>> : Array<DeepPartial<ItemType | undefined>> : PartialObject<T> : PartialObject<T> : unknown;
|
372
|
+
type PartialMap<KeyType, ValueType> = {} & Map<DeepPartial<KeyType>, DeepPartial<ValueType>>;
|
373
|
+
type PartialSet<T> = {} & Set<DeepPartial<T>>;
|
374
|
+
type PartialReadonlyMap<KeyType, ValueType> = {} & ReadonlyMap<DeepPartial<KeyType>, DeepPartial<ValueType>>;
|
375
|
+
type PartialReadonlySet<T> = {} & ReadonlySet<DeepPartial<T>>;
|
376
|
+
type PartialObject<ObjectType extends object> = {
|
377
|
+
[KeyType in keyof ObjectType]?: DeepPartial<ObjectType[KeyType]>;
|
378
|
+
};
|
379
|
+
|
380
|
+
/**
|
381
|
+
Generate a structured, typed object for a given prompt and schema using a language model.
|
382
|
+
|
383
|
+
This function streams the output. If you do not want to stream the output, use `generateObject` instead.
|
384
|
+
|
385
|
+
@param model - The language model to use.
|
386
|
+
|
387
|
+
@param schema - The schema of the object that the model should generate.
|
388
|
+
@param mode - The mode to use for object generation. Not all models support all modes. Defaults to 'auto'.
|
389
|
+
|
390
|
+
@param system - A system message that will be part of the prompt.
|
391
|
+
@param prompt - A simple text prompt. You can either use `prompt` or `messages` but not both.
|
392
|
+
@param messages - A list of messages. You can either use `prompt` or `messages` but not both.
|
393
|
+
|
394
|
+
@param maxTokens - Maximum number of tokens to generate.
|
395
|
+
@param temperature - Temperature setting.
|
396
|
+
The value is passed through to the provider. The range depends on the provider and model.
|
397
|
+
It is recommended to set either `temperature` or `topP`, but not both.
|
398
|
+
@param topP - Nucleus sampling.
|
399
|
+
The value is passed through to the provider. The range depends on the provider and model.
|
400
|
+
It is recommended to set either `temperature` or `topP`, but not both.
|
401
|
+
@param presencePenalty - Presence penalty setting.
|
402
|
+
It affects the likelihood of the model to repeat information that is already in the prompt.
|
403
|
+
The value is passed through to the provider. The range depends on the provider and model.
|
404
|
+
@param frequencyPenalty - Frequency penalty setting.
|
405
|
+
It affects the likelihood of the model to repeatedly use the same words or phrases.
|
406
|
+
The value is passed through to the provider. The range depends on the provider and model.
|
407
|
+
@param seed - The seed (integer) to use for random sampling.
|
408
|
+
If set and supported by the model, calls will generate deterministic results.
|
409
|
+
|
410
|
+
@param maxRetries - Maximum number of retries. Set to 0 to disable retries. Default: 2.
|
411
|
+
@param abortSignal - An optional abort signal that can be used to cancel the call.
|
412
|
+
|
413
|
+
@return
|
414
|
+
A result object for accessing the partial object stream and additional information.
|
415
|
+
*/
|
416
|
+
declare function streamObject<T>({ model, schema, mode, system, prompt, messages, maxRetries, abortSignal, ...settings }: CallSettings & Prompt & {
|
417
|
+
/**
|
418
|
+
The language model to use.
|
419
|
+
*/
|
420
|
+
model: LanguageModel;
|
421
|
+
/**
|
422
|
+
The schema of the object that the model should generate.
|
423
|
+
*/
|
424
|
+
schema: z.Schema<T>;
|
425
|
+
/**
|
426
|
+
The mode to use for object generation. Not all models support all modes.
|
427
|
+
|
428
|
+
Default and recommended: 'auto' (best mode for the model).
|
429
|
+
*/
|
430
|
+
mode?: 'auto' | 'json' | 'tool' | 'grammar';
|
431
|
+
}): Promise<StreamObjectResult<T>>;
|
432
|
+
type ObjectStreamPartInput = {
|
433
|
+
type: 'error';
|
434
|
+
error: unknown;
|
435
|
+
} | {
|
436
|
+
type: 'finish';
|
437
|
+
finishReason: FinishReason;
|
438
|
+
logprobs?: LogProbs;
|
439
|
+
usage: {
|
440
|
+
promptTokens: number;
|
441
|
+
completionTokens: number;
|
442
|
+
totalTokens: number;
|
443
|
+
};
|
444
|
+
};
|
445
|
+
type ObjectStreamPart<T> = ObjectStreamPartInput | {
|
446
|
+
type: 'object';
|
447
|
+
object: DeepPartial<T>;
|
448
|
+
};
|
449
|
+
/**
|
450
|
+
The result of a `streamObject` call that contains the partial object stream and additional information.
|
451
|
+
*/
|
452
|
+
declare class StreamObjectResult<T> {
|
453
|
+
private readonly originalStream;
|
454
|
+
/**
|
455
|
+
Warnings from the model provider (e.g. unsupported settings)
|
456
|
+
*/
|
457
|
+
readonly warnings: CallWarning[] | undefined;
|
458
|
+
/**
|
459
|
+
Optional raw response data.
|
460
|
+
*/
|
461
|
+
rawResponse?: {
|
462
|
+
/**
|
463
|
+
Response headers.
|
464
|
+
*/
|
465
|
+
headers?: Record<string, string>;
|
466
|
+
};
|
467
|
+
constructor({ stream, warnings, rawResponse, }: {
|
468
|
+
stream: ReadableStream<string | ObjectStreamPartInput>;
|
469
|
+
warnings: CallWarning[] | undefined;
|
470
|
+
rawResponse?: {
|
471
|
+
headers?: Record<string, string>;
|
472
|
+
};
|
473
|
+
});
|
474
|
+
get partialObjectStream(): AsyncIterableStream<DeepPartial<T>>;
|
475
|
+
get fullStream(): AsyncIterableStream<ObjectStreamPart<T>>;
|
476
|
+
}
|
477
|
+
/**
|
478
|
+
* @deprecated Use `streamObject` instead.
|
479
|
+
*/
|
480
|
+
declare const experimental_streamObject: typeof streamObject;
|
481
|
+
|
482
|
+
/**
|
483
|
+
A tool contains the description and the schema of the input that the tool expects.
|
484
|
+
This enables the language model to generate the input.
|
485
|
+
|
486
|
+
The tool can also contain an optional execute function for the actual execution function of the tool.
|
487
|
+
*/
|
488
|
+
interface CoreTool<PARAMETERS extends z.ZodTypeAny = any, RESULT = any> {
|
489
|
+
/**
|
490
|
+
An optional description of what the tool does. Will be used by the language model to decide whether to use the tool.
|
491
|
+
*/
|
492
|
+
description?: string;
|
493
|
+
/**
|
494
|
+
The schema of the input that the tool expects. The language model will use this to generate the input.
|
495
|
+
Use descriptions to make the input understandable for the language model.
|
496
|
+
*/
|
497
|
+
parameters: PARAMETERS;
|
498
|
+
/**
|
499
|
+
An optional execute function for the actual execution function of the tool.
|
500
|
+
If not provided, the tool will not be executed automatically.
|
501
|
+
*/
|
502
|
+
execute?: (args: z.infer<PARAMETERS>) => PromiseLike<RESULT>;
|
503
|
+
}
|
504
|
+
/**
|
505
|
+
Helper function for inferring the execute args of a tool.
|
506
|
+
*/
|
507
|
+
declare function tool<PARAMETERS extends z.ZodTypeAny, RESULT>(tool: CoreTool<PARAMETERS, RESULT> & {
|
508
|
+
execute: (args: z.infer<PARAMETERS>) => PromiseLike<RESULT>;
|
509
|
+
}): CoreTool<PARAMETERS, RESULT> & {
|
510
|
+
execute: (args: z.infer<PARAMETERS>) => PromiseLike<RESULT>;
|
511
|
+
};
|
512
|
+
declare function tool<PARAMETERS extends z.ZodTypeAny, RESULT>(tool: CoreTool<PARAMETERS, RESULT> & {
|
513
|
+
execute?: undefined;
|
514
|
+
}): CoreTool<PARAMETERS, RESULT> & {
|
515
|
+
execute: undefined;
|
516
|
+
};
|
517
|
+
/**
|
518
|
+
* @deprecated Use `CoreTool` instead.
|
519
|
+
*/
|
520
|
+
type ExperimentalTool = CoreTool;
|
521
|
+
|
522
|
+
/**
|
523
|
+
Create a union of the given object's values, and optionally specify which keys to get the values from.
|
524
|
+
|
525
|
+
Please upvote [this issue](https://github.com/microsoft/TypeScript/issues/31438) if you want to have this type as a built-in in TypeScript.
|
526
|
+
|
527
|
+
@example
|
528
|
+
```
|
529
|
+
// data.json
|
530
|
+
{
|
531
|
+
'foo': 1,
|
532
|
+
'bar': 2,
|
533
|
+
'biz': 3
|
534
|
+
}
|
535
|
+
|
536
|
+
// main.ts
|
537
|
+
import type {ValueOf} from 'type-fest';
|
538
|
+
import data = require('./data.json');
|
539
|
+
|
540
|
+
export function getData(name: string): ValueOf<typeof data> {
|
541
|
+
return data[name];
|
542
|
+
}
|
543
|
+
|
544
|
+
export function onlyBar(name: string): ValueOf<typeof data, 'bar'> {
|
545
|
+
return data[name];
|
546
|
+
}
|
547
|
+
|
548
|
+
// file.ts
|
549
|
+
import {getData, onlyBar} from './main';
|
550
|
+
|
551
|
+
getData('foo');
|
552
|
+
//=> 1
|
553
|
+
|
554
|
+
onlyBar('foo');
|
555
|
+
//=> TypeError ...
|
556
|
+
|
557
|
+
onlyBar('bar');
|
558
|
+
//=> 2
|
559
|
+
```
|
560
|
+
* @see https://github.com/sindresorhus/type-fest/blob/main/source/value-of.d.ts
|
561
|
+
*/
|
562
|
+
type ValueOf<ObjectType, ValueType extends keyof ObjectType = keyof ObjectType> = ObjectType[ValueType];
|
563
|
+
|
564
|
+
type ToToolCall<TOOLS extends Record<string, CoreTool>> = ValueOf<{
|
565
|
+
[NAME in keyof TOOLS]: {
|
566
|
+
type: 'tool-call';
|
567
|
+
toolCallId: string;
|
568
|
+
toolName: NAME & string;
|
569
|
+
args: z.infer<TOOLS[NAME]['parameters']>;
|
570
|
+
};
|
571
|
+
}>;
|
572
|
+
type ToToolCallArray<TOOLS extends Record<string, CoreTool>> = Array<ToToolCall<TOOLS>>;
|
573
|
+
|
574
|
+
type ToToolsWithExecute<TOOLS extends Record<string, CoreTool>> = {
|
575
|
+
[K in keyof TOOLS as TOOLS[K] extends {
|
576
|
+
execute: any;
|
577
|
+
} ? K : never]: TOOLS[K];
|
578
|
+
};
|
579
|
+
type ToToolsWithDefinedExecute<TOOLS extends Record<string, CoreTool>> = {
|
580
|
+
[K in keyof TOOLS as TOOLS[K]['execute'] extends undefined ? never : K]: TOOLS[K];
|
581
|
+
};
|
582
|
+
type ToToolResultObject<TOOLS extends Record<string, CoreTool>> = ValueOf<{
|
583
|
+
[NAME in keyof TOOLS]: {
|
584
|
+
type: 'tool-result';
|
585
|
+
toolCallId: string;
|
586
|
+
toolName: NAME & string;
|
587
|
+
args: z.infer<TOOLS[NAME]['parameters']>;
|
588
|
+
result: Awaited<ReturnType<Exclude<TOOLS[NAME]['execute'], undefined>>>;
|
589
|
+
};
|
590
|
+
}>;
|
591
|
+
type ToToolResult<TOOLS extends Record<string, CoreTool>> = ToToolResultObject<ToToolsWithDefinedExecute<ToToolsWithExecute<TOOLS>>>;
|
592
|
+
type ToToolResultArray<TOOLS extends Record<string, CoreTool>> = Array<ToToolResult<TOOLS>>;
|
593
|
+
|
594
|
+
/**
|
595
|
+
Generate a text and call tools for a given prompt using a language model.
|
596
|
+
|
597
|
+
This function does not stream the output. If you want to stream the output, use `streamText` instead.
|
598
|
+
|
599
|
+
@param model - The language model to use.
|
600
|
+
@param tools - The tools that the model can call. The model needs to support calling tools.
|
601
|
+
|
602
|
+
@param system - A system message that will be part of the prompt.
|
603
|
+
@param prompt - A simple text prompt. You can either use `prompt` or `messages` but not both.
|
604
|
+
@param messages - A list of messages. You can either use `prompt` or `messages` but not both.
|
605
|
+
|
606
|
+
@param maxTokens - Maximum number of tokens to generate.
|
607
|
+
@param temperature - Temperature setting.
|
608
|
+
The value is passed through to the provider. The range depends on the provider and model.
|
609
|
+
It is recommended to set either `temperature` or `topP`, but not both.
|
610
|
+
@param topP - Nucleus sampling.
|
611
|
+
The value is passed through to the provider. The range depends on the provider and model.
|
612
|
+
It is recommended to set either `temperature` or `topP`, but not both.
|
613
|
+
@param presencePenalty - Presence penalty setting.
|
614
|
+
It affects the likelihood of the model to repeat information that is already in the prompt.
|
615
|
+
The value is passed through to the provider. The range depends on the provider and model.
|
616
|
+
@param frequencyPenalty - Frequency penalty setting.
|
617
|
+
It affects the likelihood of the model to repeatedly use the same words or phrases.
|
618
|
+
The value is passed through to the provider. The range depends on the provider and model.
|
619
|
+
@param seed - The seed (integer) to use for random sampling.
|
620
|
+
If set and supported by the model, calls will generate deterministic results.
|
621
|
+
|
622
|
+
@param maxRetries - Maximum number of retries. Set to 0 to disable retries. Default: 2.
|
623
|
+
@param abortSignal - An optional abort signal that can be used to cancel the call.
|
624
|
+
|
625
|
+
@returns
|
626
|
+
A result object that contains the generated text, the results of the tool calls, and additional information.
|
627
|
+
*/
|
628
|
+
declare function generateText<TOOLS extends Record<string, CoreTool>>({ model, tools, system, prompt, messages, maxRetries, abortSignal, ...settings }: CallSettings & Prompt & {
|
629
|
+
/**
|
630
|
+
The language model to use.
|
631
|
+
*/
|
632
|
+
model: LanguageModel;
|
633
|
+
/**
|
634
|
+
The tools that the model can call. The model needs to support calling tools.
|
635
|
+
*/
|
636
|
+
tools?: TOOLS;
|
637
|
+
}): Promise<GenerateTextResult<TOOLS>>;
|
638
|
+
/**
|
639
|
+
The result of a `generateText` call.
|
640
|
+
It contains the generated text, the tool calls that were made during the generation, and the results of the tool calls.
|
641
|
+
*/
|
642
|
+
declare class GenerateTextResult<TOOLS extends Record<string, CoreTool>> {
|
643
|
+
/**
|
644
|
+
The generated text.
|
645
|
+
*/
|
646
|
+
readonly text: string;
|
647
|
+
/**
|
648
|
+
The tool calls that were made during the generation.
|
649
|
+
*/
|
650
|
+
readonly toolCalls: ToToolCallArray<TOOLS>;
|
651
|
+
/**
|
652
|
+
The results of the tool calls.
|
653
|
+
*/
|
654
|
+
readonly toolResults: ToToolResultArray<TOOLS>;
|
655
|
+
/**
|
656
|
+
The reason why the generation finished.
|
657
|
+
*/
|
658
|
+
readonly finishReason: FinishReason;
|
659
|
+
/**
|
660
|
+
The token usage of the generated text.
|
661
|
+
*/
|
662
|
+
readonly usage: TokenUsage;
|
663
|
+
/**
|
664
|
+
Warnings from the model provider (e.g. unsupported settings)
|
665
|
+
*/
|
666
|
+
readonly warnings: CallWarning[] | undefined;
|
667
|
+
/**
|
668
|
+
Optional raw response data.
|
669
|
+
*/
|
670
|
+
rawResponse?: {
|
671
|
+
/**
|
672
|
+
Response headers.
|
673
|
+
*/
|
674
|
+
headers?: Record<string, string>;
|
675
|
+
};
|
676
|
+
/**
|
677
|
+
Logprobs for the completion.
|
678
|
+
`undefined` if the mode does not support logprobs or if was not enabled
|
679
|
+
*/
|
680
|
+
readonly logprobs: LogProbs | undefined;
|
681
|
+
constructor(options: {
|
682
|
+
text: string;
|
683
|
+
toolCalls: ToToolCallArray<TOOLS>;
|
684
|
+
toolResults: ToToolResultArray<TOOLS>;
|
685
|
+
finishReason: FinishReason;
|
686
|
+
usage: TokenUsage;
|
687
|
+
warnings: CallWarning[] | undefined;
|
688
|
+
rawResponse?: {
|
689
|
+
headers?: Record<string, string>;
|
690
|
+
};
|
691
|
+
logprobs: LogProbs | undefined;
|
692
|
+
});
|
693
|
+
}
|
694
|
+
/**
|
695
|
+
* @deprecated Use `generateText` instead.
|
696
|
+
*/
|
697
|
+
declare const experimental_generateText: typeof generateText;
|
698
|
+
|
699
|
+
/**
|
700
|
+
Generate a text and call tools for a given prompt using a language model.
|
701
|
+
|
702
|
+
This function streams the output. If you do not want to stream the output, use `generateText` instead.
|
703
|
+
|
704
|
+
@param model - The language model to use.
|
705
|
+
@param tools - The tools that the model can call. The model needs to support calling tools.
|
706
|
+
|
707
|
+
@param system - A system message that will be part of the prompt.
|
708
|
+
@param prompt - A simple text prompt. You can either use `prompt` or `messages` but not both.
|
709
|
+
@param messages - A list of messages. You can either use `prompt` or `messages` but not both.
|
710
|
+
|
711
|
+
@param maxTokens - Maximum number of tokens to generate.
|
712
|
+
@param temperature - Temperature setting.
|
713
|
+
The value is passed through to the provider. The range depends on the provider and model.
|
714
|
+
It is recommended to set either `temperature` or `topP`, but not both.
|
715
|
+
@param topP - Nucleus sampling.
|
716
|
+
The value is passed through to the provider. The range depends on the provider and model.
|
717
|
+
It is recommended to set either `temperature` or `topP`, but not both.
|
718
|
+
@param presencePenalty - Presence penalty setting.
|
719
|
+
It affects the likelihood of the model to repeat information that is already in the prompt.
|
720
|
+
The value is passed through to the provider. The range depends on the provider and model.
|
721
|
+
@param frequencyPenalty - Frequency penalty setting.
|
722
|
+
It affects the likelihood of the model to repeatedly use the same words or phrases.
|
723
|
+
The value is passed through to the provider. The range depends on the provider and model.
|
724
|
+
@param seed - The seed (integer) to use for random sampling.
|
725
|
+
If set and supported by the model, calls will generate deterministic results.
|
726
|
+
|
727
|
+
@param maxRetries - Maximum number of retries. Set to 0 to disable retries. Default: 2.
|
728
|
+
@param abortSignal - An optional abort signal that can be used to cancel the call.
|
729
|
+
|
730
|
+
@return
|
731
|
+
A result object for accessing different stream types and additional information.
|
732
|
+
*/
|
733
|
+
declare function streamText<TOOLS extends Record<string, CoreTool>>({ model, tools, system, prompt, messages, maxRetries, abortSignal, ...settings }: CallSettings & Prompt & {
|
734
|
+
/**
|
735
|
+
The language model to use.
|
736
|
+
*/
|
737
|
+
model: LanguageModel;
|
738
|
+
/**
|
739
|
+
The tools that the model can call. The model needs to support calling tools.
|
740
|
+
*/
|
741
|
+
tools?: TOOLS;
|
742
|
+
}): Promise<StreamTextResult<TOOLS>>;
|
743
|
+
type TextStreamPart<TOOLS extends Record<string, CoreTool>> = {
|
744
|
+
type: 'text-delta';
|
745
|
+
textDelta: string;
|
746
|
+
} | ({
|
747
|
+
type: 'tool-call';
|
748
|
+
} & ToToolCall<TOOLS>) | {
|
749
|
+
type: 'error';
|
750
|
+
error: unknown;
|
751
|
+
} | ({
|
752
|
+
type: 'tool-result';
|
753
|
+
} & ToToolResult<TOOLS>) | {
|
754
|
+
type: 'finish';
|
755
|
+
finishReason: FinishReason;
|
756
|
+
logprobs?: LogProbs;
|
757
|
+
usage: {
|
758
|
+
promptTokens: number;
|
759
|
+
completionTokens: number;
|
760
|
+
totalTokens: number;
|
761
|
+
};
|
762
|
+
};
|
763
|
+
/**
|
764
|
+
A result object for accessing different stream types and additional information.
|
765
|
+
*/
|
766
|
+
declare class StreamTextResult<TOOLS extends Record<string, CoreTool>> {
|
767
|
+
private readonly originalStream;
|
768
|
+
/**
|
769
|
+
Warnings from the model provider (e.g. unsupported settings)
|
770
|
+
*/
|
771
|
+
readonly warnings: CallWarning[] | undefined;
|
772
|
+
/**
|
773
|
+
Optional raw response data.
|
774
|
+
*/
|
775
|
+
rawResponse?: {
|
776
|
+
/**
|
777
|
+
Response headers.
|
778
|
+
*/
|
779
|
+
headers?: Record<string, string>;
|
780
|
+
};
|
781
|
+
constructor({ stream, warnings, rawResponse, }: {
|
782
|
+
stream: ReadableStream<TextStreamPart<TOOLS>>;
|
783
|
+
warnings: CallWarning[] | undefined;
|
784
|
+
rawResponse?: {
|
785
|
+
headers?: Record<string, string>;
|
786
|
+
};
|
787
|
+
});
|
788
|
+
/**
|
789
|
+
A text stream that returns only the generated text deltas. You can use it
|
790
|
+
as either an AsyncIterable or a ReadableStream. When an error occurs, the
|
791
|
+
stream will throw the error.
|
792
|
+
*/
|
793
|
+
get textStream(): AsyncIterableStream<string>;
|
794
|
+
/**
|
795
|
+
A stream with all events, including text deltas, tool calls, tool results, and
|
796
|
+
errors.
|
797
|
+
You can use it as either an AsyncIterable or a ReadableStream. When an error occurs, the
|
798
|
+
stream will throw the error.
|
799
|
+
*/
|
800
|
+
get fullStream(): AsyncIterableStream<TextStreamPart<TOOLS>>;
|
801
|
+
/**
|
802
|
+
Converts the result to an `AIStream` object that is compatible with `StreamingTextResponse`.
|
803
|
+
It can be used with the `useChat` and `useCompletion` hooks.
|
804
|
+
|
805
|
+
@param callbacks
|
806
|
+
Stream callbacks that will be called when the stream emits events.
|
807
|
+
|
808
|
+
@returns an `AIStream` object.
|
809
|
+
*/
|
810
|
+
toAIStream(callbacks?: AIStreamCallbacksAndOptions): ReadableStream<any>;
|
811
|
+
/**
|
812
|
+
Writes stream data output to a Node.js response-like object.
|
813
|
+
It sets a `Content-Type` header to `text/plain; charset=utf-8` and
|
814
|
+
writes each stream data part as a separate chunk.
|
815
|
+
|
816
|
+
@param response A Node.js response-like object (ServerResponse).
|
817
|
+
@param init Optional headers and status code.
|
818
|
+
*/
|
819
|
+
pipeAIStreamToResponse(response: ServerResponse, init?: {
|
820
|
+
headers?: Record<string, string>;
|
821
|
+
status?: number;
|
822
|
+
}): void;
|
823
|
+
/**
|
824
|
+
Writes text delta output to a Node.js response-like object.
|
825
|
+
It sets a `Content-Type` header to `text/plain; charset=utf-8` and
|
826
|
+
writes each text delta as a separate chunk.
|
827
|
+
|
828
|
+
@param response A Node.js response-like object (ServerResponse).
|
829
|
+
@param init Optional headers and status code.
|
830
|
+
*/
|
831
|
+
pipeTextStreamToResponse(response: ServerResponse, init?: {
|
832
|
+
headers?: Record<string, string>;
|
833
|
+
status?: number;
|
834
|
+
}): void;
|
835
|
+
/**
|
836
|
+
Converts the result to a streamed response object with a stream data part stream.
|
837
|
+
It can be used with the `useChat` and `useCompletion` hooks.
|
838
|
+
|
839
|
+
@param init Optional headers.
|
840
|
+
|
841
|
+
@return A response object.
|
842
|
+
*/
|
843
|
+
toAIStreamResponse(init?: ResponseInit): Response;
|
844
|
+
/**
|
845
|
+
Creates a simple text stream response.
|
846
|
+
Each text delta is encoded as UTF-8 and sent as a separate chunk.
|
847
|
+
Non-text-delta events are ignored.
|
848
|
+
|
849
|
+
@param init Optional headers and status code.
|
850
|
+
*/
|
851
|
+
toTextStreamResponse(init?: ResponseInit): Response;
|
852
|
+
}
|
853
|
+
/**
|
854
|
+
* @deprecated Use `streamText` instead.
|
855
|
+
*/
|
856
|
+
declare const experimental_streamText: typeof streamText;
|
857
|
+
|
858
|
+
interface FunctionCall$1 {
|
7
859
|
/**
|
8
860
|
* The arguments to call the function with, as generated by the model in JSON
|
9
861
|
* format. Note that the model does not always generate valid JSON, and may
|
@@ -91,7 +943,7 @@ interface Message$1 {
|
|
91
943
|
* contains the function call name and arguments. Otherwise, the field should
|
92
944
|
* not be set. (Deprecated and replaced by tool_calls.)
|
93
945
|
*/
|
94
|
-
function_call?: string | FunctionCall;
|
946
|
+
function_call?: string | FunctionCall$1;
|
95
947
|
data?: JSONValue;
|
96
948
|
/**
|
97
949
|
* If the assistant role makes a tool call, the `tool_calls` field contains
|
@@ -110,12 +962,12 @@ type ChatRequest = {
|
|
110
962
|
messages: Message$1[];
|
111
963
|
options?: RequestOptions;
|
112
964
|
functions?: Array<Function>;
|
113
|
-
function_call?: FunctionCall;
|
965
|
+
function_call?: FunctionCall$1;
|
114
966
|
data?: Record<string, string>;
|
115
967
|
tools?: Array<Tool>;
|
116
968
|
tool_choice?: ToolChoice;
|
117
969
|
};
|
118
|
-
type FunctionCallHandler = (chatMessages: Message$1[], functionCall: FunctionCall) => Promise<ChatRequest | void>;
|
970
|
+
type FunctionCallHandler = (chatMessages: Message$1[], functionCall: FunctionCall$1) => Promise<ChatRequest | void>;
|
119
971
|
type ToolCallHandler = (chatMessages: Message$1[], toolCalls: ToolCall[]) => Promise<ChatRequest | void>;
|
120
972
|
type RequestOptions = {
|
121
973
|
headers?: Record<string, string> | Headers;
|
@@ -124,7 +976,7 @@ type RequestOptions = {
|
|
124
976
|
type ChatRequestOptions = {
|
125
977
|
options?: RequestOptions;
|
126
978
|
functions?: Array<Function>;
|
127
|
-
function_call?: FunctionCall;
|
979
|
+
function_call?: FunctionCall$1;
|
128
980
|
tools?: Array<Tool>;
|
129
981
|
tool_choice?: ToolChoice;
|
130
982
|
data?: Record<string, string>;
|
@@ -207,6 +1059,8 @@ type UseChatOptions = {
|
|
207
1059
|
* handle the extra fields before forwarding the request to the AI service.
|
208
1060
|
*/
|
209
1061
|
sendExtraMessageFields?: boolean;
|
1062
|
+
/** Stream mode (default to "stream-data") */
|
1063
|
+
streamMode?: 'stream-data' | 'text';
|
210
1064
|
};
|
211
1065
|
type UseCompletionOptions = {
|
212
1066
|
/**
|
@@ -263,6 +1117,8 @@ type UseCompletionOptions = {
|
|
263
1117
|
* ```
|
264
1118
|
*/
|
265
1119
|
body?: object;
|
1120
|
+
/** Stream mode (default to "stream-data") */
|
1121
|
+
streamMode?: 'stream-data' | 'text';
|
266
1122
|
};
|
267
1123
|
type JSONValue = null | string | number | boolean | {
|
268
1124
|
[x: string]: JSONValue;
|
@@ -293,7 +1149,7 @@ interface StreamPart<CODE extends string, NAME extends string, TYPE> {
|
|
293
1149
|
}
|
294
1150
|
declare const textStreamPart: StreamPart<'0', 'text', string>;
|
295
1151
|
declare const functionCallStreamPart: StreamPart<'1', 'function_call', {
|
296
|
-
function_call: FunctionCall;
|
1152
|
+
function_call: FunctionCall$1;
|
297
1153
|
}>;
|
298
1154
|
declare const dataStreamPart: StreamPart<'2', 'data', Array<JSONValue>>;
|
299
1155
|
declare const errorStreamPart: StreamPart<'3', 'error', string>;
|
@@ -307,6 +1163,13 @@ declare const toolCallStreamPart: StreamPart<'7', 'tool_calls', {
|
|
307
1163
|
tool_calls: ToolCall[];
|
308
1164
|
}>;
|
309
1165
|
declare const messageAnnotationsStreamPart: StreamPart<'8', 'message_annotations', Array<JSONValue>>;
|
1166
|
+
type StreamParts = typeof textStreamPart | typeof functionCallStreamPart | typeof dataStreamPart | typeof errorStreamPart | typeof assistantMessageStreamPart | typeof assistantControlDataStreamPart | typeof dataMessageStreamPart | typeof toolCallStreamPart | typeof messageAnnotationsStreamPart;
|
1167
|
+
/**
|
1168
|
+
* Maps the type of a stream part to its value type.
|
1169
|
+
*/
|
1170
|
+
type StreamPartValueType = {
|
1171
|
+
[P in StreamParts as P['name']]: ReturnType<P['parse']>['value'];
|
1172
|
+
};
|
310
1173
|
type StreamPartType = ReturnType<typeof textStreamPart.parse> | ReturnType<typeof functionCallStreamPart.parse> | ReturnType<typeof dataStreamPart.parse> | ReturnType<typeof errorStreamPart.parse> | ReturnType<typeof assistantMessageStreamPart.parse> | ReturnType<typeof assistantControlDataStreamPart.parse> | ReturnType<typeof dataMessageStreamPart.parse> | ReturnType<typeof toolCallStreamPart.parse> | ReturnType<typeof messageAnnotationsStreamPart.parse>;
|
311
1174
|
/**
|
312
1175
|
* The map of prefixes for data in the stream
|
@@ -341,8 +1204,42 @@ declare const StreamStringPrefixes: {
|
|
341
1204
|
readonly tool_calls: "7";
|
342
1205
|
readonly message_annotations: "8";
|
343
1206
|
};
|
1207
|
+
/**
|
1208
|
+
Parses a stream part from a string.
|
1209
|
+
|
1210
|
+
@param line The string to parse.
|
1211
|
+
@returns The parsed stream part.
|
1212
|
+
@throws An error if the string cannot be parsed.
|
1213
|
+
*/
|
1214
|
+
declare const parseStreamPart: (line: string) => StreamPartType;
|
1215
|
+
/**
|
1216
|
+
Prepends a string with a prefix from the `StreamChunkPrefixes`, JSON-ifies it,
|
1217
|
+
and appends a new line.
|
1218
|
+
|
1219
|
+
It ensures type-safety for the part type and value.
|
1220
|
+
*/
|
1221
|
+
declare function formatStreamPart<T extends keyof StreamPartValueType>(type: T, value: StreamPartValueType[T]): StreamString;
|
1222
|
+
|
1223
|
+
/**
|
1224
|
+
* Generates a 7-character random string to use for IDs. Not secure.
|
1225
|
+
*/
|
1226
|
+
declare const generateId: (size?: number | undefined) => string;
|
1227
|
+
|
1228
|
+
/**
|
1229
|
+
Converts a ReadableStreamDefaultReader into an async generator that yields
|
1230
|
+
StreamPart objects.
|
1231
|
+
|
1232
|
+
@param reader
|
1233
|
+
Reader for the stream to read from.
|
1234
|
+
@param isAborted
|
1235
|
+
Optional function that returns true if the request has been aborted.
|
1236
|
+
If the function returns true, the generator will stop reading the stream.
|
1237
|
+
If the function is not provided, the generator will not stop reading the stream.
|
1238
|
+
*/
|
1239
|
+
declare function readDataStream(reader: ReadableStreamDefaultReader<Uint8Array>, { isAborted, }?: {
|
1240
|
+
isAborted?: () => boolean;
|
1241
|
+
}): AsyncGenerator<StreamPartType>;
|
344
1242
|
|
345
|
-
declare const nanoid: (size?: number | undefined) => string;
|
346
1243
|
declare function createChunkDecoder(): (chunk: Uint8Array | undefined) => string;
|
347
1244
|
declare function createChunkDecoder(complex: false): (chunk: Uint8Array | undefined) => string;
|
348
1245
|
declare function createChunkDecoder(complex: true): (chunk: Uint8Array | undefined) => StreamPartType[];
|
@@ -350,10 +1247,6 @@ declare function createChunkDecoder(complex?: boolean): (chunk: Uint8Array | und
|
|
350
1247
|
|
351
1248
|
declare const isStreamStringEqualToType: (type: keyof typeof StreamStringPrefixes, value: string) => value is `0:${string}\n` | `1:${string}\n` | `2:${string}\n` | `3:${string}\n` | `4:${string}\n` | `5:${string}\n` | `6:${string}\n` | `7:${string}\n` | `8:${string}\n`;
|
352
1249
|
type StreamString = `${(typeof StreamStringPrefixes)[keyof typeof StreamStringPrefixes]}:${string}\n`;
|
353
|
-
/**
|
354
|
-
* A header sent to the client so it knows how to handle parsing the stream (as a deprecated text response or using the new prefixed protocol)
|
355
|
-
*/
|
356
|
-
declare const COMPLEX_HEADER = "X-Experimental-Stream-Data";
|
357
1250
|
|
358
1251
|
declare interface AzureChatCompletions {
|
359
1252
|
id: string;
|
@@ -481,7 +1374,7 @@ interface ChoiceDelta {
|
|
481
1374
|
* The name and arguments of a function that should be called, as generated by the
|
482
1375
|
* model.
|
483
1376
|
*/
|
484
|
-
function_call?: FunctionCall;
|
1377
|
+
function_call?: FunctionCall$1;
|
485
1378
|
/**
|
486
1379
|
* The role of the author of this message.
|
487
1380
|
*/
|
@@ -607,10 +1500,8 @@ interface AIStreamCallbacksAndOptions {
|
|
607
1500
|
/** `onText`: Called for each text chunk. */
|
608
1501
|
onText?: (text: string) => Promise<void> | void;
|
609
1502
|
/**
|
610
|
-
*
|
611
|
-
*
|
612
|
-
*
|
613
|
-
* When StreamData is rolled out, this will be removed and the new protocol will be used by default.
|
1503
|
+
* @deprecated This flag is no longer used and only retained for backwards compatibility.
|
1504
|
+
* You can remove it from your code.
|
614
1505
|
*/
|
615
1506
|
experimental_streamData?: boolean;
|
616
1507
|
}
|
@@ -797,18 +1688,54 @@ interface MessageStopEvent {
|
|
797
1688
|
*/
|
798
1689
|
declare function AnthropicStream(res: Response | AsyncIterable<CompletionChunk> | AsyncIterable<MessageStreamEvent>, cb?: AIStreamCallbacksAndOptions): ReadableStream;
|
799
1690
|
|
1691
|
+
/**
|
1692
|
+
You can pass the thread and the latest message into the `AssistantResponse`. This establishes the context for the response.
|
1693
|
+
*/
|
800
1694
|
type AssistantResponseSettings = {
|
1695
|
+
/**
|
1696
|
+
The thread ID that the response is associated with.
|
1697
|
+
*/
|
801
1698
|
threadId: string;
|
1699
|
+
/**
|
1700
|
+
The ID of the latest message that the response is associated with.
|
1701
|
+
*/
|
802
1702
|
messageId: string;
|
803
1703
|
};
|
1704
|
+
/**
|
1705
|
+
The process parameter is a callback in which you can run the assistant on threads, and send messages and data messages to the client.
|
1706
|
+
*/
|
804
1707
|
type AssistantResponseCallback = (options: {
|
1708
|
+
/**
|
1709
|
+
@deprecated use variable from outer scope instead.
|
1710
|
+
*/
|
805
1711
|
threadId: string;
|
1712
|
+
/**
|
1713
|
+
@deprecated use variable from outer scope instead.
|
1714
|
+
*/
|
806
1715
|
messageId: string;
|
1716
|
+
/**
|
1717
|
+
Forwards an assistant message (non-streaming) to the client.
|
1718
|
+
*/
|
807
1719
|
sendMessage: (message: AssistantMessage) => void;
|
1720
|
+
/**
|
1721
|
+
Send a data message to the client. You can use this to provide information for rendering custom UIs while the assistant is processing the thread.
|
1722
|
+
*/
|
808
1723
|
sendDataMessage: (message: DataMessage) => void;
|
1724
|
+
/**
|
1725
|
+
Forwards the assistant response stream to the client. Returns the `Run` object after it completes, or when it requires an action.
|
1726
|
+
*/
|
809
1727
|
forwardStream: (stream: AssistantStream) => Promise<Run | undefined>;
|
810
1728
|
}) => Promise<void>;
|
811
|
-
|
1729
|
+
/**
|
1730
|
+
The `AssistantResponse` allows you to send a stream of assistant update to `useAssistant`.
|
1731
|
+
It is designed to facilitate streaming assistant responses to the `useAssistant` hook.
|
1732
|
+
It receives an assistant thread and a current message, and can send messages and data messages to the client.
|
1733
|
+
*/
|
1734
|
+
declare function AssistantResponse({ threadId, messageId }: AssistantResponseSettings, process: AssistantResponseCallback): Response;
|
1735
|
+
/**
|
1736
|
+
@deprecated Use `AssistantResponse` instead.
|
1737
|
+
*/
|
1738
|
+
declare const experimental_AssistantResponse: typeof AssistantResponse;
|
812
1739
|
|
813
1740
|
interface AWSBedrockResponse {
|
814
1741
|
body?: AsyncIterable<{
|
@@ -817,6 +1744,7 @@ interface AWSBedrockResponse {
|
|
817
1744
|
};
|
818
1745
|
}>;
|
819
1746
|
}
|
1747
|
+
declare function AWSBedrockAnthropicMessagesStream(response: AWSBedrockResponse, callbacks?: AIStreamCallbacksAndOptions): ReadableStream;
|
820
1748
|
declare function AWSBedrockAnthropicStream(response: AWSBedrockResponse, callbacks?: AIStreamCallbacksAndOptions): ReadableStream;
|
821
1749
|
declare function AWSBedrockCohereStream(response: AWSBedrockResponse, callbacks?: AIStreamCallbacksAndOptions): ReadableStream;
|
822
1750
|
declare function AWSBedrockLlama2Stream(response: AWSBedrockResponse, callbacks?: AIStreamCallbacksAndOptions): ReadableStream;
|
@@ -881,6 +1809,31 @@ declare function LangChainStream(callbacks?: AIStreamCallbacksAndOptions): {
|
|
881
1809
|
};
|
882
1810
|
};
|
883
1811
|
|
1812
|
+
interface ChatCompletionResponseChunk {
|
1813
|
+
id: string;
|
1814
|
+
object: 'chat.completion.chunk';
|
1815
|
+
created: number;
|
1816
|
+
model: string;
|
1817
|
+
choices: ChatCompletionResponseChunkChoice[];
|
1818
|
+
}
|
1819
|
+
interface ChatCompletionResponseChunkChoice {
|
1820
|
+
index: number;
|
1821
|
+
delta: {
|
1822
|
+
role?: string;
|
1823
|
+
content?: string;
|
1824
|
+
tool_calls?: ToolCalls[];
|
1825
|
+
};
|
1826
|
+
finish_reason: string;
|
1827
|
+
}
|
1828
|
+
interface FunctionCall {
|
1829
|
+
name: string;
|
1830
|
+
arguments: string;
|
1831
|
+
}
|
1832
|
+
interface ToolCalls {
|
1833
|
+
id: 'null';
|
1834
|
+
type: 'function';
|
1835
|
+
function: FunctionCall;
|
1836
|
+
}
|
884
1837
|
declare function MistralStream(response: AsyncGenerator<ChatCompletionResponseChunk, void, unknown>, callbacks?: AIStreamCallbacksAndOptions): ReadableStream;
|
885
1838
|
|
886
1839
|
interface Prediction {
|
@@ -932,7 +1885,7 @@ declare function ReplicateStream(res: Prediction, cb?: AIStreamCallbacksAndOptio
|
|
932
1885
|
/**
|
933
1886
|
* A stream wrapper to send custom JSON-encoded data back to the client.
|
934
1887
|
*/
|
935
|
-
declare class
|
1888
|
+
declare class StreamData {
|
936
1889
|
private encoder;
|
937
1890
|
private controller;
|
938
1891
|
stream: TransformStream<Uint8Array, Uint8Array>;
|
@@ -950,7 +1903,12 @@ declare class experimental_StreamData {
|
|
950
1903
|
* A TransformStream for LLMs that do not have their own transform stream handlers managing encoding (e.g. OpenAIStream has one for function call handling).
|
951
1904
|
* This assumes every chunk is a 'text' chunk.
|
952
1905
|
*/
|
953
|
-
declare function createStreamDataTransformer(
|
1906
|
+
declare function createStreamDataTransformer(): TransformStream<any, any>;
|
1907
|
+
/**
|
1908
|
+
@deprecated Use `StreamData` instead.
|
1909
|
+
*/
|
1910
|
+
declare class experimental_StreamData extends StreamData {
|
1911
|
+
}
|
954
1912
|
|
955
1913
|
/**
|
956
1914
|
* This is a naive implementation of the streaming React response API.
|
@@ -979,7 +1937,7 @@ declare class experimental_StreamingReactResponse {
|
|
979
1937
|
content: string;
|
980
1938
|
data?: JSONValue[];
|
981
1939
|
}) => UINode | Promise<UINode>;
|
982
|
-
data?:
|
1940
|
+
data?: StreamData;
|
983
1941
|
generateId?: IdGenerator;
|
984
1942
|
});
|
985
1943
|
}
|
@@ -988,7 +1946,7 @@ declare class experimental_StreamingReactResponse {
|
|
988
1946
|
* A utility class for streaming text responses.
|
989
1947
|
*/
|
990
1948
|
declare class StreamingTextResponse extends Response {
|
991
|
-
constructor(res: ReadableStream, init?: ResponseInit, data?:
|
1949
|
+
constructor(res: ReadableStream, init?: ResponseInit, data?: StreamData);
|
992
1950
|
}
|
993
1951
|
/**
|
994
1952
|
* A utility function to stream a ReadableStream to a Node.js response-like object.
|
@@ -998,4 +1956,4 @@ declare function streamToResponse(res: ReadableStream, response: ServerResponse,
|
|
998
1956
|
status?: number;
|
999
1957
|
}): void;
|
1000
1958
|
|
1001
|
-
export { AIStream, AIStreamCallbacksAndOptions, AIStreamParser, AIStreamParserOptions, AWSBedrockAnthropicStream, AWSBedrockCohereStream, AWSBedrockLlama2Stream, AWSBedrockStream, AnthropicStream, AssistantMessage,
|
1959
|
+
export { AIStream, AIStreamCallbacksAndOptions, AIStreamParser, AIStreamParserOptions, AWSBedrockAnthropicMessagesStream, AWSBedrockAnthropicStream, AWSBedrockCohereStream, AWSBedrockLlama2Stream, AWSBedrockStream, AnthropicStream, AssistantContent, AssistantMessage, AssistantResponse, CallWarning, ChatRequest, ChatRequestOptions, CohereStream, CompletionUsage, CoreAssistantMessage, CoreMessage, CoreTool, CoreToolMessage, CoreUserMessage, CreateMessage, DataContent, DataMessage, DeepPartial, ExperimentalAssistantMessage, ExperimentalMessage, ExperimentalTool, ExperimentalToolMessage, ExperimentalUserMessage, FinishReason, Function, FunctionCall$1 as FunctionCall, FunctionCallHandler, FunctionCallPayload, GenerateObjectResult, GenerateTextResult, GoogleGenerativeAIStream, HuggingFaceStream, IdGenerator, ImagePart, InkeepAIStreamCallbacksAndOptions, InkeepChatResultCallbacks, InkeepOnFinalMetadata, InkeepStream, JSONValue, LangChainStream, LanguageModel, LogProbs, Message$1 as Message, MistralStream, ObjectStreamPart, ObjectStreamPartInput, OpenAIStream, OpenAIStreamCallbacks, ReactResponseRow, ReplicateStream, RequestOptions, StreamData, StreamObjectResult, StreamPart, StreamString, StreamTextResult, StreamingTextResponse, TextPart$1 as TextPart, TextStreamPart, Tool, ToolCall, ToolCallHandler, ToolCallPart, ToolCallPayload, ToolChoice, ToolContent, ToolResultPart, UseChatOptions, UseCompletionOptions, UserContent, convertDataContentToBase64String, convertDataContentToUint8Array, createCallbacksTransformer, createChunkDecoder, createEventStreamTransformer, createStreamDataTransformer, experimental_AssistantResponse, experimental_StreamData, experimental_StreamingReactResponse, experimental_generateObject, experimental_generateText, experimental_streamObject, experimental_streamText, formatStreamPart, generateId, generateObject, generateText, isStreamStringEqualToType, generateId as nanoid, parseStreamPart, readDataStream, readableFromAsyncIterable, streamObject, streamText, streamToResponse, tool, trimStartOfStreamHelper };
|