ai 0.0.0-e27b4ed4-20240419203611

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (56) hide show
  1. package/LICENSE +13 -0
  2. package/README.md +37 -0
  3. package/dist/index.d.mts +1770 -0
  4. package/dist/index.d.ts +1770 -0
  5. package/dist/index.js +2958 -0
  6. package/dist/index.js.map +1 -0
  7. package/dist/index.mjs +2887 -0
  8. package/dist/index.mjs.map +1 -0
  9. package/package.json +174 -0
  10. package/prompts/dist/index.d.mts +267 -0
  11. package/prompts/dist/index.d.ts +267 -0
  12. package/prompts/dist/index.js +178 -0
  13. package/prompts/dist/index.js.map +1 -0
  14. package/prompts/dist/index.mjs +146 -0
  15. package/prompts/dist/index.mjs.map +1 -0
  16. package/react/dist/index.d.mts +487 -0
  17. package/react/dist/index.d.ts +504 -0
  18. package/react/dist/index.js +1310 -0
  19. package/react/dist/index.js.map +1 -0
  20. package/react/dist/index.mjs +1271 -0
  21. package/react/dist/index.mjs.map +1 -0
  22. package/react/dist/index.server.d.mts +17 -0
  23. package/react/dist/index.server.d.ts +17 -0
  24. package/react/dist/index.server.js +50 -0
  25. package/react/dist/index.server.js.map +1 -0
  26. package/react/dist/index.server.mjs +23 -0
  27. package/react/dist/index.server.mjs.map +1 -0
  28. package/rsc/dist/index.d.ts +289 -0
  29. package/rsc/dist/index.mjs +18 -0
  30. package/rsc/dist/rsc-client.d.mts +1 -0
  31. package/rsc/dist/rsc-client.mjs +18 -0
  32. package/rsc/dist/rsc-client.mjs.map +1 -0
  33. package/rsc/dist/rsc-server.d.mts +225 -0
  34. package/rsc/dist/rsc-server.mjs +1246 -0
  35. package/rsc/dist/rsc-server.mjs.map +1 -0
  36. package/rsc/dist/rsc-shared.d.mts +94 -0
  37. package/rsc/dist/rsc-shared.mjs +346 -0
  38. package/rsc/dist/rsc-shared.mjs.map +1 -0
  39. package/solid/dist/index.d.mts +351 -0
  40. package/solid/dist/index.d.ts +351 -0
  41. package/solid/dist/index.js +1002 -0
  42. package/solid/dist/index.js.map +1 -0
  43. package/solid/dist/index.mjs +974 -0
  44. package/solid/dist/index.mjs.map +1 -0
  45. package/svelte/dist/index.d.mts +348 -0
  46. package/svelte/dist/index.d.ts +348 -0
  47. package/svelte/dist/index.js +1556 -0
  48. package/svelte/dist/index.js.map +1 -0
  49. package/svelte/dist/index.mjs +1528 -0
  50. package/svelte/dist/index.mjs.map +1 -0
  51. package/vue/dist/index.d.mts +345 -0
  52. package/vue/dist/index.d.ts +345 -0
  53. package/vue/dist/index.js +1002 -0
  54. package/vue/dist/index.js.map +1 -0
  55. package/vue/dist/index.mjs +964 -0
  56. package/vue/dist/index.mjs.map +1 -0
@@ -0,0 +1,1770 @@
1
+ import { LanguageModelV1, LanguageModelV1FinishReason, LanguageModelV1CallWarning } from '@ai-sdk/provider';
2
+ import { z } from 'zod';
3
+ import { AssistantStream } from 'openai/lib/AssistantStream';
4
+ import { Run } from 'openai/resources/beta/threads/runs/runs';
5
+ import { ChatCompletionResponseChunk } from '@mistralai/mistralai';
6
+ import { ServerResponse } from 'node:http';
7
+
8
+ type TokenUsage = {
9
+ promptTokens: number;
10
+ completionTokens: number;
11
+ totalTokens: number;
12
+ };
13
+
14
+ type CallSettings = {
15
+ /**
16
+ Maximum number of tokens to generate.
17
+ */
18
+ maxTokens?: number;
19
+ /**
20
+ Temperature setting. This is a number between 0 (almost no randomness) and
21
+ 1 (very random).
22
+
23
+ It is recommended to set either `temperature` or `topP`, but not both.
24
+
25
+ @default 0
26
+ */
27
+ temperature?: number;
28
+ /**
29
+ Nucleus sampling. This is a number between 0 and 1.
30
+
31
+ E.g. 0.1 would mean that only tokens with the top 10% probability mass
32
+ are considered.
33
+
34
+ It is recommended to set either `temperature` or `topP`, but not both.
35
+ */
36
+ topP?: number;
37
+ /**
38
+ Presence penalty setting. It affects the likelihood of the model to
39
+ repeat information that is already in the prompt.
40
+
41
+ The presence penalty is a number between -1 (increase repetition)
42
+ and 1 (maximum penalty, decrease repetition). 0 means no penalty.
43
+
44
+ @default 0
45
+ */
46
+ presencePenalty?: number;
47
+ /**
48
+ Frequency penalty setting. It affects the likelihood of the model
49
+ to repeatedly use the same words or phrases.
50
+
51
+ The frequency penalty is a number between -1 (increase repetition)
52
+ and 1 (maximum penalty, decrease repetition). 0 means no penalty.
53
+
54
+ @default 0
55
+ */
56
+ frequencyPenalty?: number;
57
+ /**
58
+ The seed (integer) to use for random sampling. If set and supported
59
+ by the model, calls will generate deterministic results.
60
+ */
61
+ seed?: number;
62
+ /**
63
+ Maximum number of retries. Set to 0 to disable retries.
64
+
65
+ @default 2
66
+ */
67
+ maxRetries?: number;
68
+ /**
69
+ Abort signal.
70
+ */
71
+ abortSignal?: AbortSignal;
72
+ };
73
+
74
+ /**
75
+ Data content. Can either be a base64-encoded string, a Uint8Array, an ArrayBuffer, or a Buffer.
76
+ */
77
+ type DataContent = string | Uint8Array | ArrayBuffer | Buffer;
78
+ /**
79
+ Converts data content to a base64-encoded string.
80
+
81
+ @param content - Data content to convert.
82
+ @returns Base64-encoded string.
83
+ */
84
+ declare function convertDataContentToBase64String(content: DataContent): string;
85
+ /**
86
+ Converts data content to a Uint8Array.
87
+
88
+ @param content - Data content to convert.
89
+ @returns Uint8Array.
90
+ */
91
+ declare function convertDataContentToUint8Array(content: DataContent): Uint8Array;
92
+
93
+ /**
94
+ Text content part of a prompt. It contains a string of text.
95
+ */
96
+ interface TextPart$1 {
97
+ type: 'text';
98
+ /**
99
+ The text content.
100
+ */
101
+ text: string;
102
+ }
103
+ /**
104
+ Image content part of a prompt. It contains an image.
105
+ */
106
+ interface ImagePart {
107
+ type: 'image';
108
+ /**
109
+ Image data. Can either be:
110
+
111
+ - data: a base64-encoded string, a Uint8Array, an ArrayBuffer, or a Buffer
112
+ - URL: a URL that points to the image
113
+ */
114
+ image: DataContent | URL;
115
+ /**
116
+ Optional mime type of the image.
117
+ */
118
+ mimeType?: string;
119
+ }
120
+ /**
121
+ Tool call content part of a prompt. It contains a tool call (usually generated by the AI model).
122
+ */
123
+ interface ToolCallPart {
124
+ type: 'tool-call';
125
+ /**
126
+ ID of the tool call. This ID is used to match the tool call with the tool result.
127
+ */
128
+ toolCallId: string;
129
+ /**
130
+ Name of the tool that is being called.
131
+ */
132
+ toolName: string;
133
+ /**
134
+ Arguments of the tool call. This is a JSON-serializable object that matches the tool's input schema.
135
+ */
136
+ args: unknown;
137
+ }
138
+ /**
139
+ Tool result content part of a prompt. It contains the result of the tool call with the matching ID.
140
+ */
141
+ interface ToolResultPart {
142
+ type: 'tool-result';
143
+ /**
144
+ ID of the tool call that this result is associated with.
145
+ */
146
+ toolCallId: string;
147
+ /**
148
+ Name of the tool that generated this result.
149
+ */
150
+ toolName: string;
151
+ /**
152
+ Result of the tool call. This is a JSON-serializable object.
153
+ */
154
+ result: unknown;
155
+ /**
156
+ Optional flag if the result is an error or an error message.
157
+ */
158
+ isError?: boolean;
159
+ }
160
+
161
+ /**
162
+ A message that can be used in the `messages` field of a prompt.
163
+ It can be a user message, an assistant message, or a tool message.
164
+ */
165
+ type ExperimentalMessage = ExperimentalUserMessage | ExperimentalAssistantMessage | ExperimentalToolMessage;
166
+ /**
167
+ A user message. It can contain text or a combination of text and images.
168
+ */
169
+ type ExperimentalUserMessage = {
170
+ role: 'user';
171
+ content: UserContent;
172
+ };
173
+ /**
174
+ Content of a user message. It can be a string or an array of text and image parts.
175
+ */
176
+ type UserContent = string | Array<TextPart$1 | ImagePart>;
177
+ /**
178
+ An assistant message. It can contain text, tool calls, or a combination of text and tool calls.
179
+ */
180
+ type ExperimentalAssistantMessage = {
181
+ role: 'assistant';
182
+ content: AssistantContent;
183
+ };
184
+ /**
185
+ Content of an assistant message. It can be a string or an array of text and tool call parts.
186
+ */
187
+ type AssistantContent = string | Array<TextPart$1 | ToolCallPart>;
188
+ /**
189
+ A tool message. It contains the result of one or more tool calls.
190
+ */
191
+ type ExperimentalToolMessage = {
192
+ role: 'tool';
193
+ content: ToolContent;
194
+ };
195
+ /**
196
+ Content of a tool message. It is an array of tool result parts.
197
+ */
198
+ type ToolContent = Array<ToolResultPart>;
199
+
200
+ /**
201
+ Prompt part of the AI function options. It contains a system message, a simple text prompt, or a list of messages.
202
+ */
203
+ type Prompt = {
204
+ /**
205
+ System message to include in the prompt. Can be used with `prompt` or `messages`.
206
+ */
207
+ system?: string;
208
+ /**
209
+ A simple text prompt. You can either use `prompt` or `messages` but not both.
210
+ */
211
+ prompt?: string;
212
+ /**
213
+ A list of messsages. You can either use `prompt` or `messages` but not both.
214
+ */
215
+ messages?: Array<ExperimentalMessage>;
216
+ };
217
+
218
+ /**
219
+ Generate a structured, typed object for a given prompt and schema using a language model.
220
+
221
+ This function does not stream the output. If you want to stream the output, use `experimental_streamObject` instead.
222
+
223
+ @param model - The language model to use.
224
+
225
+ @param schema - The schema of the object that the model should generate.
226
+ @param mode - The mode to use for object generation. Not all models support all modes. Defaults to 'auto'.
227
+
228
+ @param system - A system message that will be part of the prompt.
229
+ @param prompt - A simple text prompt. You can either use `prompt` or `messages` but not both.
230
+ @param messages - A list of messages. You can either use `prompt` or `messages` but not both.
231
+
232
+ @param maxTokens - Maximum number of tokens to generate.
233
+ @param temperature - Temperature setting.
234
+ This is a number between 0 (almost no randomness) and 1 (very random).
235
+ It is recommended to set either `temperature` or `topP`, but not both.
236
+ @param topP - Nucleus sampling. This is a number between 0 and 1.
237
+ E.g. 0.1 would mean that only tokens with the top 10% probability mass are considered.
238
+ It is recommended to set either `temperature` or `topP`, but not both.
239
+ @param presencePenalty - Presence penalty setting.
240
+ It affects the likelihood of the model to repeat information that is already in the prompt.
241
+ The presence penalty is a number between -1 (increase repetition) and 1 (maximum penalty, decrease repetition).
242
+ 0 means no penalty.
243
+ @param frequencyPenalty - Frequency penalty setting.
244
+ It affects the likelihood of the model to repeatedly use the same words or phrases.
245
+ The frequency penalty is a number between -1 (increase repetition) and 1 (maximum penalty, decrease repetition).
246
+ 0 means no penalty.
247
+ @param seed - The seed (integer) to use for random sampling.
248
+ If set and supported by the model, calls will generate deterministic results.
249
+
250
+ @param maxRetries - Maximum number of retries. Set to 0 to disable retries. Default: 2.
251
+ @param abortSignal - An optional abort signal that can be used to cancel the call.
252
+
253
+ @returns
254
+ A result object that contains the generated object, the finish reason, the token usage, and additional information.
255
+ */
256
+ declare function experimental_generateObject<T>({ model, schema, mode, system, prompt, messages, maxRetries, abortSignal, ...settings }: CallSettings & Prompt & {
257
+ /**
258
+ The language model to use.
259
+ */
260
+ model: LanguageModelV1;
261
+ /**
262
+ The schema of the object that the model should generate.
263
+ */
264
+ schema: z.Schema<T>;
265
+ /**
266
+ The mode to use for object generation. Not all models support all modes.
267
+
268
+ Default and recommended: 'auto' (best mode for the model).
269
+ */
270
+ mode?: 'auto' | 'json' | 'tool' | 'grammar';
271
+ }): Promise<GenerateObjectResult<T>>;
272
+ /**
273
+ The result of a `generateObject` call.
274
+ */
275
+ declare class GenerateObjectResult<T> {
276
+ /**
277
+ The generated object (typed according to the schema).
278
+ */
279
+ readonly object: T;
280
+ /**
281
+ The reason why the generation finished.
282
+ */
283
+ readonly finishReason: LanguageModelV1FinishReason;
284
+ /**
285
+ The token usage of the generated text.
286
+ */
287
+ readonly usage: TokenUsage;
288
+ /**
289
+ Warnings from the model provider (e.g. unsupported settings)
290
+ */
291
+ readonly warnings: LanguageModelV1CallWarning[] | undefined;
292
+ constructor(options: {
293
+ object: T;
294
+ finishReason: LanguageModelV1FinishReason;
295
+ usage: TokenUsage;
296
+ warnings: LanguageModelV1CallWarning[] | undefined;
297
+ });
298
+ }
299
+
300
+ type AsyncIterableStream<T> = AsyncIterable<T> & ReadableStream<T>;
301
+
302
+ /**
303
+ Create a type from an object with all keys and nested keys set to optional.
304
+ The helper supports normal objects and Zod schemas (which are resolved automatically).
305
+ It always recurses into arrays.
306
+
307
+ Adopted from [type-fest](https://github.com/sindresorhus/type-fest/tree/main) PartialDeep.
308
+ */
309
+ type DeepPartial<T> = T extends null | undefined | string | number | boolean | symbol | bigint | void | Date | RegExp | ((...arguments_: any[]) => unknown) | (new (...arguments_: any[]) => unknown) ? T : T extends Map<infer KeyType, infer ValueType> ? PartialMap<KeyType, ValueType> : T extends Set<infer ItemType> ? PartialSet<ItemType> : T extends ReadonlyMap<infer KeyType, infer ValueType> ? PartialReadonlyMap<KeyType, ValueType> : T extends ReadonlySet<infer ItemType> ? PartialReadonlySet<ItemType> : T extends z.Schema<any> ? DeepPartial<T['_type']> : T extends object ? T extends ReadonlyArray<infer ItemType> ? ItemType[] extends T ? readonly ItemType[] extends T ? ReadonlyArray<DeepPartial<ItemType | undefined>> : Array<DeepPartial<ItemType | undefined>> : PartialObject<T> : PartialObject<T> : unknown;
310
+ type PartialMap<KeyType, ValueType> = {} & Map<DeepPartial<KeyType>, DeepPartial<ValueType>>;
311
+ type PartialSet<T> = {} & Set<DeepPartial<T>>;
312
+ type PartialReadonlyMap<KeyType, ValueType> = {} & ReadonlyMap<DeepPartial<KeyType>, DeepPartial<ValueType>>;
313
+ type PartialReadonlySet<T> = {} & ReadonlySet<DeepPartial<T>>;
314
+ type PartialObject<ObjectType extends object> = {
315
+ [KeyType in keyof ObjectType]?: DeepPartial<ObjectType[KeyType]>;
316
+ };
317
+
318
+ /**
319
+ Generate a structured, typed object for a given prompt and schema using a language model.
320
+
321
+ This function streams the output. If you do not want to stream the output, use `experimental_generateObject` instead.
322
+
323
+ @param model - The language model to use.
324
+
325
+ @param schema - The schema of the object that the model should generate.
326
+ @param mode - The mode to use for object generation. Not all models support all modes. Defaults to 'auto'.
327
+
328
+ @param system - A system message that will be part of the prompt.
329
+ @param prompt - A simple text prompt. You can either use `prompt` or `messages` but not both.
330
+ @param messages - A list of messages. You can either use `prompt` or `messages` but not both.
331
+
332
+ @param maxTokens - Maximum number of tokens to generate.
333
+ @param temperature - Temperature setting.
334
+ This is a number between 0 (almost no randomness) and 1 (very random).
335
+ It is recommended to set either `temperature` or `topP`, but not both.
336
+ @param topP - Nucleus sampling. This is a number between 0 and 1.
337
+ E.g. 0.1 would mean that only tokens with the top 10% probability mass are considered.
338
+ It is recommended to set either `temperature` or `topP`, but not both.
339
+ @param presencePenalty - Presence penalty setting.
340
+ It affects the likelihood of the model to repeat information that is already in the prompt.
341
+ The presence penalty is a number between -1 (increase repetition) and 1 (maximum penalty, decrease repetition).
342
+ 0 means no penalty.
343
+ @param frequencyPenalty - Frequency penalty setting.
344
+ It affects the likelihood of the model to repeatedly use the same words or phrases.
345
+ The frequency penalty is a number between -1 (increase repetition) and 1 (maximum penalty, decrease repetition).
346
+ 0 means no penalty.
347
+ @param seed - The seed (integer) to use for random sampling.
348
+ If set and supported by the model, calls will generate deterministic results.
349
+
350
+ @param maxRetries - Maximum number of retries. Set to 0 to disable retries. Default: 2.
351
+ @param abortSignal - An optional abort signal that can be used to cancel the call.
352
+
353
+ @return
354
+ A result object for accessing the partial object stream and additional information.
355
+ */
356
+ declare function experimental_streamObject<T>({ model, schema, mode, system, prompt, messages, maxRetries, abortSignal, ...settings }: CallSettings & Prompt & {
357
+ /**
358
+ The language model to use.
359
+ */
360
+ model: LanguageModelV1;
361
+ /**
362
+ The schema of the object that the model should generate.
363
+ */
364
+ schema: z.Schema<T>;
365
+ /**
366
+ The mode to use for object generation. Not all models support all modes.
367
+
368
+ Default and recommended: 'auto' (best mode for the model).
369
+ */
370
+ mode?: 'auto' | 'json' | 'tool' | 'grammar';
371
+ }): Promise<StreamObjectResult<T>>;
372
+ /**
373
+ The result of a `streamObject` call that contains the partial object stream and additional information.
374
+ */
375
+ declare class StreamObjectResult<T> {
376
+ private readonly originalStream;
377
+ /**
378
+ Warnings from the model provider (e.g. unsupported settings)
379
+ */
380
+ readonly warnings: LanguageModelV1CallWarning[] | undefined;
381
+ constructor({ stream, warnings, }: {
382
+ stream: ReadableStream<string | ErrorStreamPart>;
383
+ warnings: LanguageModelV1CallWarning[] | undefined;
384
+ });
385
+ get partialObjectStream(): AsyncIterableStream<DeepPartial<T>>;
386
+ }
387
+ type ErrorStreamPart = {
388
+ type: 'error';
389
+ error: unknown;
390
+ };
391
+
392
+ /**
393
+ A tool contains the description and the schema of the input that the tool expects.
394
+ This enables the language model to generate the input.
395
+
396
+ The tool can also contain an optional execute function for the actual execution function of the tool.
397
+ */
398
+ interface ExperimentalTool<PARAMETERS extends z.ZodTypeAny = any, RESULT = any> {
399
+ /**
400
+ An optional description of what the tool does. Will be used by the language model to decide whether to use the tool.
401
+ */
402
+ description?: string;
403
+ /**
404
+ The schema of the input that the tool expects. The language model will use this to generate the input.
405
+ Use descriptions to make the input understandable for the language model.
406
+ */
407
+ parameters: PARAMETERS;
408
+ /**
409
+ An optional execute function for the actual execution function of the tool.
410
+ If not provided, the tool will not be executed automatically.
411
+ */
412
+ execute?: (args: z.infer<PARAMETERS>) => PromiseLike<RESULT>;
413
+ }
414
+ /**
415
+ Helper function for inferring the execute args of a tool.
416
+ */
417
+ declare function tool<PARAMETERS extends z.ZodTypeAny, RESULT>(tool: ExperimentalTool<PARAMETERS, RESULT> & {
418
+ execute: (args: z.infer<PARAMETERS>) => PromiseLike<RESULT>;
419
+ }): ExperimentalTool<PARAMETERS, RESULT> & {
420
+ execute: (args: z.infer<PARAMETERS>) => PromiseLike<RESULT>;
421
+ };
422
+ declare function tool<PARAMETERS extends z.ZodTypeAny, RESULT>(tool: ExperimentalTool<PARAMETERS, RESULT> & {
423
+ execute?: undefined;
424
+ }): ExperimentalTool<PARAMETERS, RESULT> & {
425
+ execute: undefined;
426
+ };
427
+
428
+ /**
429
+ Create a union of the given object's values, and optionally specify which keys to get the values from.
430
+
431
+ Please upvote [this issue](https://github.com/microsoft/TypeScript/issues/31438) if you want to have this type as a built-in in TypeScript.
432
+
433
+ @example
434
+ ```
435
+ // data.json
436
+ {
437
+ 'foo': 1,
438
+ 'bar': 2,
439
+ 'biz': 3
440
+ }
441
+
442
+ // main.ts
443
+ import type {ValueOf} from 'type-fest';
444
+ import data = require('./data.json');
445
+
446
+ export function getData(name: string): ValueOf<typeof data> {
447
+ return data[name];
448
+ }
449
+
450
+ export function onlyBar(name: string): ValueOf<typeof data, 'bar'> {
451
+ return data[name];
452
+ }
453
+
454
+ // file.ts
455
+ import {getData, onlyBar} from './main';
456
+
457
+ getData('foo');
458
+ //=> 1
459
+
460
+ onlyBar('foo');
461
+ //=> TypeError ...
462
+
463
+ onlyBar('bar');
464
+ //=> 2
465
+ ```
466
+ * @see https://github.com/sindresorhus/type-fest/blob/main/source/value-of.d.ts
467
+ */
468
+ type ValueOf<ObjectType, ValueType extends keyof ObjectType = keyof ObjectType> = ObjectType[ValueType];
469
+
470
+ type ToToolCall<TOOLS extends Record<string, ExperimentalTool>> = ValueOf<{
471
+ [NAME in keyof TOOLS]: {
472
+ type: 'tool-call';
473
+ toolCallId: string;
474
+ toolName: NAME & string;
475
+ args: z.infer<TOOLS[NAME]['parameters']>;
476
+ };
477
+ }>;
478
+ type ToToolCallArray<TOOLS extends Record<string, ExperimentalTool>> = Array<ToToolCall<TOOLS>>;
479
+
480
+ type ToToolsWithExecute<TOOLS extends Record<string, ExperimentalTool>> = {
481
+ [K in keyof TOOLS as TOOLS[K] extends {
482
+ execute: any;
483
+ } ? K : never]: TOOLS[K];
484
+ };
485
+ type ToToolsWithDefinedExecute<TOOLS extends Record<string, ExperimentalTool>> = {
486
+ [K in keyof TOOLS as TOOLS[K]['execute'] extends undefined ? never : K]: TOOLS[K];
487
+ };
488
+ type ToToolResultObject<TOOLS extends Record<string, ExperimentalTool>> = ValueOf<{
489
+ [NAME in keyof TOOLS]: {
490
+ type: 'tool-result';
491
+ toolCallId: string;
492
+ toolName: NAME & string;
493
+ args: z.infer<TOOLS[NAME]['parameters']>;
494
+ result: Awaited<ReturnType<Exclude<TOOLS[NAME]['execute'], undefined>>>;
495
+ };
496
+ }>;
497
+ type ToToolResult<TOOLS extends Record<string, ExperimentalTool>> = ToToolResultObject<ToToolsWithDefinedExecute<ToToolsWithExecute<TOOLS>>>;
498
+ type ToToolResultArray<TOOLS extends Record<string, ExperimentalTool>> = Array<ToToolResult<TOOLS>>;
499
+
500
+ /**
501
+ Generate a text and call tools for a given prompt using a language model.
502
+
503
+ This function does not stream the output. If you want to stream the output, use `experimental_streamText` instead.
504
+
505
+ @param model - The language model to use.
506
+ @param tools - The tools that the model can call. The model needs to support calling tools.
507
+
508
+ @param system - A system message that will be part of the prompt.
509
+ @param prompt - A simple text prompt. You can either use `prompt` or `messages` but not both.
510
+ @param messages - A list of messages. You can either use `prompt` or `messages` but not both.
511
+
512
+ @param maxTokens - Maximum number of tokens to generate.
513
+ @param temperature - Temperature setting.
514
+ This is a number between 0 (almost no randomness) and 1 (very random).
515
+ It is recommended to set either `temperature` or `topP`, but not both.
516
+ @param topP - Nucleus sampling. This is a number between 0 and 1.
517
+ E.g. 0.1 would mean that only tokens with the top 10% probability mass are considered.
518
+ It is recommended to set either `temperature` or `topP`, but not both.
519
+ @param presencePenalty - Presence penalty setting.
520
+ It affects the likelihood of the model to repeat information that is already in the prompt.
521
+ The presence penalty is a number between -1 (increase repetition) and 1 (maximum penalty, decrease repetition).
522
+ 0 means no penalty.
523
+ @param frequencyPenalty - Frequency penalty setting.
524
+ It affects the likelihood of the model to repeatedly use the same words or phrases.
525
+ The frequency penalty is a number between -1 (increase repetition) and 1 (maximum penalty, decrease repetition).
526
+ 0 means no penalty.
527
+ @param seed - The seed (integer) to use for random sampling.
528
+ If set and supported by the model, calls will generate deterministic results.
529
+
530
+ @param maxRetries - Maximum number of retries. Set to 0 to disable retries. Default: 2.
531
+ @param abortSignal - An optional abort signal that can be used to cancel the call.
532
+
533
+ @returns
534
+ A result object that contains the generated text, the results of the tool calls, and additional information.
535
+ */
536
+ declare function experimental_generateText<TOOLS extends Record<string, ExperimentalTool>>({ model, tools, system, prompt, messages, maxRetries, abortSignal, ...settings }: CallSettings & Prompt & {
537
+ /**
538
+ The language model to use.
539
+ */
540
+ model: LanguageModelV1;
541
+ /**
542
+ The tools that the model can call. The model needs to support calling tools.
543
+ */
544
+ tools?: TOOLS;
545
+ }): Promise<GenerateTextResult<TOOLS>>;
546
+ /**
547
+ The result of a `generateText` call.
548
+ It contains the generated text, the tool calls that were made during the generation, and the results of the tool calls.
549
+ */
550
+ declare class GenerateTextResult<TOOLS extends Record<string, ExperimentalTool>> {
551
+ /**
552
+ The generated text.
553
+ */
554
+ readonly text: string;
555
+ /**
556
+ The tool calls that were made during the generation.
557
+ */
558
+ readonly toolCalls: ToToolCallArray<TOOLS>;
559
+ /**
560
+ The results of the tool calls.
561
+ */
562
+ readonly toolResults: ToToolResultArray<TOOLS>;
563
+ /**
564
+ The reason why the generation finished.
565
+ */
566
+ readonly finishReason: LanguageModelV1FinishReason;
567
+ /**
568
+ The token usage of the generated text.
569
+ */
570
+ readonly usage: TokenUsage;
571
+ /**
572
+ Warnings from the model provider (e.g. unsupported settings)
573
+ */
574
+ readonly warnings: LanguageModelV1CallWarning[] | undefined;
575
+ constructor(options: {
576
+ text: string;
577
+ toolCalls: ToToolCallArray<TOOLS>;
578
+ toolResults: ToToolResultArray<TOOLS>;
579
+ finishReason: LanguageModelV1FinishReason;
580
+ usage: TokenUsage;
581
+ warnings: LanguageModelV1CallWarning[] | undefined;
582
+ });
583
+ }
584
+
585
+ /**
586
+ Generate a text and call tools for a given prompt using a language model.
587
+
588
+ This function streams the output. If you do not want to stream the output, use `experimental_generateText` instead.
589
+
590
+ @param model - The language model to use.
591
+ @param tools - The tools that the model can call. The model needs to support calling tools.
592
+
593
+ @param system - A system message that will be part of the prompt.
594
+ @param prompt - A simple text prompt. You can either use `prompt` or `messages` but not both.
595
+ @param messages - A list of messages. You can either use `prompt` or `messages` but not both.
596
+
597
+ @param maxTokens - Maximum number of tokens to generate.
598
+ @param temperature - Temperature setting.
599
+ This is a number between 0 (almost no randomness) and 1 (very random).
600
+ It is recommended to set either `temperature` or `topP`, but not both.
601
+ @param topP - Nucleus sampling. This is a number between 0 and 1.
602
+ E.g. 0.1 would mean that only tokens with the top 10% probability mass are considered.
603
+ It is recommended to set either `temperature` or `topP`, but not both.
604
+ @param presencePenalty - Presence penalty setting.
605
+ It affects the likelihood of the model to repeat information that is already in the prompt.
606
+ The presence penalty is a number between -1 (increase repetition) and 1 (maximum penalty, decrease repetition).
607
+ 0 means no penalty.
608
+ @param frequencyPenalty - Frequency penalty setting.
609
+ It affects the likelihood of the model to repeatedly use the same words or phrases.
610
+ The frequency penalty is a number between -1 (increase repetition) and 1 (maximum penalty, decrease repetition).
611
+ 0 means no penalty.
612
+ @param seed - The seed (integer) to use for random sampling.
613
+ If set and supported by the model, calls will generate deterministic results.
614
+
615
+ @param maxRetries - Maximum number of retries. Set to 0 to disable retries. Default: 2.
616
+ @param abortSignal - An optional abort signal that can be used to cancel the call.
617
+
618
+ @return
619
+ A result object for accessing different stream types and additional information.
620
+ */
621
+ declare function experimental_streamText<TOOLS extends Record<string, ExperimentalTool>>({ model, tools, system, prompt, messages, maxRetries, abortSignal, ...settings }: CallSettings & Prompt & {
622
+ /**
623
+ The language model to use.
624
+ */
625
+ model: LanguageModelV1;
626
+ /**
627
+ The tools that the model can call. The model needs to support calling tools.
628
+ */
629
+ tools?: TOOLS;
630
+ }): Promise<StreamTextResult<TOOLS>>;
631
+ type TextStreamPart<TOOLS extends Record<string, ExperimentalTool>> = {
632
+ type: 'text-delta';
633
+ textDelta: string;
634
+ } | ({
635
+ type: 'tool-call';
636
+ } & ToToolCall<TOOLS>) | {
637
+ type: 'error';
638
+ error: unknown;
639
+ } | ({
640
+ type: 'tool-result';
641
+ } & ToToolResult<TOOLS>) | {
642
+ type: 'finish';
643
+ finishReason: LanguageModelV1FinishReason;
644
+ usage: {
645
+ promptTokens: number;
646
+ completionTokens: number;
647
+ totalTokens: number;
648
+ };
649
+ };
650
+ /**
651
+ A result object for accessing different stream types and additional information.
652
+ */
653
+ declare class StreamTextResult<TOOLS extends Record<string, ExperimentalTool>> {
654
+ private readonly originalStream;
655
+ /**
656
+ Warnings from the model provider (e.g. unsupported settings)
657
+ */
658
+ readonly warnings: LanguageModelV1CallWarning[] | undefined;
659
+ constructor({ stream, warnings, }: {
660
+ stream: ReadableStream<TextStreamPart<TOOLS>>;
661
+ warnings: LanguageModelV1CallWarning[] | undefined;
662
+ });
663
+ /**
664
+ A text stream that returns only the generated text deltas. You can use it
665
+ as either an AsyncIterable or a ReadableStream. When an error occurs, the
666
+ stream will throw the error.
667
+ */
668
+ get textStream(): AsyncIterableStream<string>;
669
+ /**
670
+ A stream with all events, including text deltas, tool calls, tool results, and
671
+ errors.
672
+ You can use it as either an AsyncIterable or a ReadableStream. When an error occurs, the
673
+ stream will throw the error.
674
+ */
675
+ get fullStream(): AsyncIterableStream<TextStreamPart<TOOLS>>;
676
+ /**
677
+ Converts the result to an `AIStream` object that is compatible with `StreamingTextResponse`.
678
+ It can be used with the `useChat` and `useCompletion` hooks.
679
+
680
+ @param callbacks
681
+ Stream callbacks that will be called when the stream emits events.
682
+
683
+ @returns an `AIStream` object.
684
+ */
685
+ toAIStream(callbacks?: AIStreamCallbacksAndOptions): ReadableStream<any>;
686
+ /**
687
+ Creates a simple text stream response.
688
+ Each text delta is encoded as UTF-8 and sent as a separate chunk.
689
+ Non-text-delta events are ignored.
690
+ */
691
+ toTextStreamResponse(init?: ResponseInit): Response;
692
+ }
693
+
694
+ interface FunctionCall {
695
+ /**
696
+ * The arguments to call the function with, as generated by the model in JSON
697
+ * format. Note that the model does not always generate valid JSON, and may
698
+ * hallucinate parameters not defined by your function schema. Validate the
699
+ * arguments in your code before calling your function.
700
+ */
701
+ arguments?: string;
702
+ /**
703
+ * The name of the function to call.
704
+ */
705
+ name?: string;
706
+ }
707
+ /**
708
+ * The tool calls generated by the model, such as function calls.
709
+ */
710
+ interface ToolCall {
711
+ id: string;
712
+ type: string;
713
+ function: {
714
+ name: string;
715
+ arguments: string;
716
+ };
717
+ }
718
+ /**
719
+ * Controls which (if any) function is called by the model.
720
+ * - none means the model will not call a function and instead generates a message.
721
+ * - auto means the model can pick between generating a message or calling a function.
722
+ * - Specifying a particular function via {"type: "function", "function": {"name": "my_function"}} forces the model to call that function.
723
+ * none is the default when no functions are present. auto is the default if functions are present.
724
+ */
725
+ type ToolChoice = 'none' | 'auto' | {
726
+ type: 'function';
727
+ function: {
728
+ name: string;
729
+ };
730
+ };
731
+ /**
732
+ * A list of tools the model may call. Currently, only functions are supported as a tool.
733
+ * Use this to provide a list of functions the model may generate JSON inputs for.
734
+ */
735
+ interface Tool {
736
+ type: 'function';
737
+ function: Function;
738
+ }
739
+ interface Function {
740
+ /**
741
+ * The name of the function to be called. Must be a-z, A-Z, 0-9, or contain
742
+ * underscores and dashes, with a maximum length of 64.
743
+ */
744
+ name: string;
745
+ /**
746
+ * The parameters the functions accepts, described as a JSON Schema object. See the
747
+ * [guide](/docs/guides/gpt/function-calling) for examples, and the
748
+ * [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for
749
+ * documentation about the format.
750
+ *
751
+ * To describe a function that accepts no parameters, provide the value
752
+ * `{"type": "object", "properties": {}}`.
753
+ */
754
+ parameters: Record<string, unknown>;
755
+ /**
756
+ * A description of what the function does, used by the model to choose when and
757
+ * how to call the function.
758
+ */
759
+ description?: string;
760
+ }
761
+ type IdGenerator = () => string;
762
+ /**
763
+ * Shared types between the API and UI packages.
764
+ */
765
+ interface Message$1 {
766
+ id: string;
767
+ tool_call_id?: string;
768
+ createdAt?: Date;
769
+ content: string;
770
+ ui?: string | JSX.Element | JSX.Element[] | null | undefined;
771
+ role: 'system' | 'user' | 'assistant' | 'function' | 'data' | 'tool';
772
+ /**
773
+ * If the message has a role of `function`, the `name` field is the name of the function.
774
+ * Otherwise, the name field should not be set.
775
+ */
776
+ name?: string;
777
+ /**
778
+ * If the assistant role makes a function call, the `function_call` field
779
+ * contains the function call name and arguments. Otherwise, the field should
780
+ * not be set. (Deprecated and replaced by tool_calls.)
781
+ */
782
+ function_call?: string | FunctionCall;
783
+ data?: JSONValue;
784
+ /**
785
+ * If the assistant role makes a tool call, the `tool_calls` field contains
786
+ * the tool call name and arguments. Otherwise, the field should not be set.
787
+ */
788
+ tool_calls?: string | ToolCall[];
789
+ /**
790
+ * Additional message-specific information added on the server via StreamData
791
+ */
792
+ annotations?: JSONValue[] | undefined;
793
+ }
794
+ type CreateMessage = Omit<Message$1, 'id'> & {
795
+ id?: Message$1['id'];
796
+ };
797
+ type ChatRequest = {
798
+ messages: Message$1[];
799
+ options?: RequestOptions;
800
+ functions?: Array<Function>;
801
+ function_call?: FunctionCall;
802
+ data?: Record<string, string>;
803
+ tools?: Array<Tool>;
804
+ tool_choice?: ToolChoice;
805
+ };
806
+ type FunctionCallHandler = (chatMessages: Message$1[], functionCall: FunctionCall) => Promise<ChatRequest | void>;
807
+ type ToolCallHandler = (chatMessages: Message$1[], toolCalls: ToolCall[]) => Promise<ChatRequest | void>;
808
+ type RequestOptions = {
809
+ headers?: Record<string, string> | Headers;
810
+ body?: object;
811
+ };
812
+ type ChatRequestOptions = {
813
+ options?: RequestOptions;
814
+ functions?: Array<Function>;
815
+ function_call?: FunctionCall;
816
+ tools?: Array<Tool>;
817
+ tool_choice?: ToolChoice;
818
+ data?: Record<string, string>;
819
+ };
820
+ type UseChatOptions = {
821
+ /**
822
+ * The API endpoint that accepts a `{ messages: Message[] }` object and returns
823
+ * a stream of tokens of the AI chat response. Defaults to `/api/chat`.
824
+ */
825
+ api?: string;
826
+ /**
827
+ * A unique identifier for the chat. If not provided, a random one will be
828
+ * generated. When provided, the `useChat` hook with the same `id` will
829
+ * have shared states across components.
830
+ */
831
+ id?: string;
832
+ /**
833
+ * Initial messages of the chat. Useful to load an existing chat history.
834
+ */
835
+ initialMessages?: Message$1[];
836
+ /**
837
+ * Initial input of the chat.
838
+ */
839
+ initialInput?: string;
840
+ /**
841
+ * Callback function to be called when a function call is received.
842
+ * If the function returns a `ChatRequest` object, the request will be sent
843
+ * automatically to the API and will be used to update the chat.
844
+ */
845
+ experimental_onFunctionCall?: FunctionCallHandler;
846
+ /**
847
+ * Callback function to be called when a tool call is received.
848
+ * If the function returns a `ChatRequest` object, the request will be sent
849
+ * automatically to the API and will be used to update the chat.
850
+ */
851
+ experimental_onToolCall?: ToolCallHandler;
852
+ /**
853
+ * Callback function to be called when the API response is received.
854
+ */
855
+ onResponse?: (response: Response) => void | Promise<void>;
856
+ /**
857
+ * Callback function to be called when the chat is finished streaming.
858
+ */
859
+ onFinish?: (message: Message$1) => void;
860
+ /**
861
+ * Callback function to be called when an error is encountered.
862
+ */
863
+ onError?: (error: Error) => void;
864
+ /**
865
+ * A way to provide a function that is going to be used for ids for messages.
866
+ * If not provided nanoid is used by default.
867
+ */
868
+ generateId?: IdGenerator;
869
+ /**
870
+ * The credentials mode to be used for the fetch request.
871
+ * Possible values are: 'omit', 'same-origin', 'include'.
872
+ * Defaults to 'same-origin'.
873
+ */
874
+ credentials?: RequestCredentials;
875
+ /**
876
+ * HTTP headers to be sent with the API request.
877
+ */
878
+ headers?: Record<string, string> | Headers;
879
+ /**
880
+ * Extra body object to be sent with the API request.
881
+ * @example
882
+ * Send a `sessionId` to the API along with the messages.
883
+ * ```js
884
+ * useChat({
885
+ * body: {
886
+ * sessionId: '123',
887
+ * }
888
+ * })
889
+ * ```
890
+ */
891
+ body?: object;
892
+ /**
893
+ * Whether to send extra message fields such as `message.id` and `message.createdAt` to the API.
894
+ * Defaults to `false`. When set to `true`, the API endpoint might need to
895
+ * handle the extra fields before forwarding the request to the AI service.
896
+ */
897
+ sendExtraMessageFields?: boolean;
898
+ /** Stream mode (default to "stream-data") */
899
+ streamMode?: 'stream-data' | 'text';
900
+ };
901
+ type UseCompletionOptions = {
902
+ /**
903
+ * The API endpoint that accepts a `{ prompt: string }` object and returns
904
+ * a stream of tokens of the AI completion response. Defaults to `/api/completion`.
905
+ */
906
+ api?: string;
907
+ /**
908
+ * An unique identifier for the chat. If not provided, a random one will be
909
+ * generated. When provided, the `useChat` hook with the same `id` will
910
+ * have shared states across components.
911
+ */
912
+ id?: string;
913
+ /**
914
+ * Initial prompt input of the completion.
915
+ */
916
+ initialInput?: string;
917
+ /**
918
+ * Initial completion result. Useful to load an existing history.
919
+ */
920
+ initialCompletion?: string;
921
+ /**
922
+ * Callback function to be called when the API response is received.
923
+ */
924
+ onResponse?: (response: Response) => void | Promise<void>;
925
+ /**
926
+ * Callback function to be called when the completion is finished streaming.
927
+ */
928
+ onFinish?: (prompt: string, completion: string) => void;
929
+ /**
930
+ * Callback function to be called when an error is encountered.
931
+ */
932
+ onError?: (error: Error) => void;
933
+ /**
934
+ * The credentials mode to be used for the fetch request.
935
+ * Possible values are: 'omit', 'same-origin', 'include'.
936
+ * Defaults to 'same-origin'.
937
+ */
938
+ credentials?: RequestCredentials;
939
+ /**
940
+ * HTTP headers to be sent with the API request.
941
+ */
942
+ headers?: Record<string, string> | Headers;
943
+ /**
944
+ * Extra body object to be sent with the API request.
945
+ * @example
946
+ * Send a `sessionId` to the API along with the prompt.
947
+ * ```js
948
+ * useChat({
949
+ * body: {
950
+ * sessionId: '123',
951
+ * }
952
+ * })
953
+ * ```
954
+ */
955
+ body?: object;
956
+ /** Stream mode (default to "stream-data") */
957
+ streamMode?: 'stream-data' | 'text';
958
+ };
959
+ type JSONValue = null | string | number | boolean | {
960
+ [x: string]: JSONValue;
961
+ } | Array<JSONValue>;
962
+ type AssistantMessage = {
963
+ id: string;
964
+ role: 'assistant';
965
+ content: Array<{
966
+ type: 'text';
967
+ text: {
968
+ value: string;
969
+ };
970
+ }>;
971
+ };
972
+ type DataMessage = {
973
+ id?: string;
974
+ role: 'data';
975
+ data: JSONValue;
976
+ };
977
+
978
+ interface StreamPart<CODE extends string, NAME extends string, TYPE> {
979
+ code: CODE;
980
+ name: NAME;
981
+ parse: (value: JSONValue) => {
982
+ type: NAME;
983
+ value: TYPE;
984
+ };
985
+ }
986
+ declare const textStreamPart: StreamPart<'0', 'text', string>;
987
+ declare const functionCallStreamPart: StreamPart<'1', 'function_call', {
988
+ function_call: FunctionCall;
989
+ }>;
990
+ declare const dataStreamPart: StreamPart<'2', 'data', Array<JSONValue>>;
991
+ declare const errorStreamPart: StreamPart<'3', 'error', string>;
992
+ declare const assistantMessageStreamPart: StreamPart<'4', 'assistant_message', AssistantMessage>;
993
+ declare const assistantControlDataStreamPart: StreamPart<'5', 'assistant_control_data', {
994
+ threadId: string;
995
+ messageId: string;
996
+ }>;
997
+ declare const dataMessageStreamPart: StreamPart<'6', 'data_message', DataMessage>;
998
+ declare const toolCallStreamPart: StreamPart<'7', 'tool_calls', {
999
+ tool_calls: ToolCall[];
1000
+ }>;
1001
+ declare const messageAnnotationsStreamPart: StreamPart<'8', 'message_annotations', Array<JSONValue>>;
1002
+ type StreamParts = typeof textStreamPart | typeof functionCallStreamPart | typeof dataStreamPart | typeof errorStreamPart | typeof assistantMessageStreamPart | typeof assistantControlDataStreamPart | typeof dataMessageStreamPart | typeof toolCallStreamPart | typeof messageAnnotationsStreamPart;
1003
+ /**
1004
+ * Maps the type of a stream part to its value type.
1005
+ */
1006
+ type StreamPartValueType = {
1007
+ [P in StreamParts as P['name']]: ReturnType<P['parse']>['value'];
1008
+ };
1009
+ type StreamPartType = ReturnType<typeof textStreamPart.parse> | ReturnType<typeof functionCallStreamPart.parse> | ReturnType<typeof dataStreamPart.parse> | ReturnType<typeof errorStreamPart.parse> | ReturnType<typeof assistantMessageStreamPart.parse> | ReturnType<typeof assistantControlDataStreamPart.parse> | ReturnType<typeof dataMessageStreamPart.parse> | ReturnType<typeof toolCallStreamPart.parse> | ReturnType<typeof messageAnnotationsStreamPart.parse>;
1010
+ /**
1011
+ * The map of prefixes for data in the stream
1012
+ *
1013
+ * - 0: Text from the LLM response
1014
+ * - 1: (OpenAI) function_call responses
1015
+ * - 2: custom JSON added by the user using `Data`
1016
+ * - 6: (OpenAI) tool_call responses
1017
+ *
1018
+ * Example:
1019
+ * ```
1020
+ * 0:Vercel
1021
+ * 0:'s
1022
+ * 0: AI
1023
+ * 0: AI
1024
+ * 0: SDK
1025
+ * 0: is great
1026
+ * 0:!
1027
+ * 2: { "someJson": "value" }
1028
+ * 1: {"function_call": {"name": "get_current_weather", "arguments": "{\\n\\"location\\": \\"Charlottesville, Virginia\\",\\n\\"format\\": \\"celsius\\"\\n}"}}
1029
+ * 6: {"tool_call": {"id": "tool_0", "type": "function", "function": {"name": "get_current_weather", "arguments": "{\\n\\"location\\": \\"Charlottesville, Virginia\\",\\n\\"format\\": \\"celsius\\"\\n}"}}}
1030
+ *```
1031
+ */
1032
+ declare const StreamStringPrefixes: {
1033
+ readonly text: "0";
1034
+ readonly function_call: "1";
1035
+ readonly data: "2";
1036
+ readonly error: "3";
1037
+ readonly assistant_message: "4";
1038
+ readonly assistant_control_data: "5";
1039
+ readonly data_message: "6";
1040
+ readonly tool_calls: "7";
1041
+ readonly message_annotations: "8";
1042
+ };
1043
+ /**
1044
+ Parses a stream part from a string.
1045
+
1046
+ @param line The string to parse.
1047
+ @returns The parsed stream part.
1048
+ @throws An error if the string cannot be parsed.
1049
+ */
1050
+ declare const parseStreamPart: (line: string) => StreamPartType;
1051
+ /**
1052
+ Prepends a string with a prefix from the `StreamChunkPrefixes`, JSON-ifies it,
1053
+ and appends a new line.
1054
+
1055
+ It ensures type-safety for the part type and value.
1056
+ */
1057
+ declare function formatStreamPart<T extends keyof StreamPartValueType>(type: T, value: StreamPartValueType[T]): StreamString;
1058
+
1059
+ /**
1060
+ * Generates a 7-character random string to use for IDs. Not secure.
1061
+ */
1062
+ declare const generateId: (size?: number | undefined) => string;
1063
+
1064
+ /**
1065
+ Converts a ReadableStreamDefaultReader into an async generator that yields
1066
+ StreamPart objects.
1067
+
1068
+ @param reader
1069
+ Reader for the stream to read from.
1070
+ @param isAborted
1071
+ Optional function that returns true if the request has been aborted.
1072
+ If the function returns true, the generator will stop reading the stream.
1073
+ If the function is not provided, the generator will not stop reading the stream.
1074
+ */
1075
+ declare function readDataStream(reader: ReadableStreamDefaultReader<Uint8Array>, { isAborted, }?: {
1076
+ isAborted?: () => boolean;
1077
+ }): AsyncGenerator<StreamPartType>;
1078
+
1079
+ declare function createChunkDecoder(): (chunk: Uint8Array | undefined) => string;
1080
+ declare function createChunkDecoder(complex: false): (chunk: Uint8Array | undefined) => string;
1081
+ declare function createChunkDecoder(complex: true): (chunk: Uint8Array | undefined) => StreamPartType[];
1082
+ declare function createChunkDecoder(complex?: boolean): (chunk: Uint8Array | undefined) => StreamPartType[] | string;
1083
+
1084
+ declare const isStreamStringEqualToType: (type: keyof typeof StreamStringPrefixes, value: string) => value is `0:${string}\n` | `1:${string}\n` | `2:${string}\n` | `3:${string}\n` | `4:${string}\n` | `5:${string}\n` | `6:${string}\n` | `7:${string}\n` | `8:${string}\n`;
1085
+ type StreamString = `${(typeof StreamStringPrefixes)[keyof typeof StreamStringPrefixes]}:${string}\n`;
1086
+
1087
+ declare interface AzureChatCompletions {
1088
+ id: string;
1089
+ created: Date;
1090
+ choices: AzureChatChoice[];
1091
+ systemFingerprint?: string;
1092
+ usage?: AzureCompletionsUsage;
1093
+ promptFilterResults: any[];
1094
+ }
1095
+ declare interface AzureChatChoice {
1096
+ message?: AzureChatResponseMessage;
1097
+ index: number;
1098
+ finishReason: string | null;
1099
+ delta?: AzureChatResponseMessage;
1100
+ }
1101
+ declare interface AzureChatResponseMessage {
1102
+ role: string;
1103
+ content: string | null;
1104
+ toolCalls: AzureChatCompletionsFunctionToolCall[];
1105
+ functionCall?: AzureFunctionCall;
1106
+ }
1107
+ declare interface AzureCompletionsUsage {
1108
+ completionTokens: number;
1109
+ promptTokens: number;
1110
+ totalTokens: number;
1111
+ }
1112
+ declare interface AzureFunctionCall {
1113
+ name: string;
1114
+ arguments: string;
1115
+ }
1116
+ declare interface AzureChatCompletionsFunctionToolCall {
1117
+ type: 'function';
1118
+ function: AzureFunctionCall;
1119
+ id: string;
1120
+ }
1121
+
1122
+ type OpenAIStreamCallbacks = AIStreamCallbacksAndOptions & {
1123
+ /**
1124
+ * @example
1125
+ * ```js
1126
+ * const response = await openai.chat.completions.create({
1127
+ * model: 'gpt-3.5-turbo-0613',
1128
+ * stream: true,
1129
+ * messages,
1130
+ * functions,
1131
+ * })
1132
+ *
1133
+ * const stream = OpenAIStream(response, {
1134
+ * experimental_onFunctionCall: async (functionCallPayload, createFunctionCallMessages) => {
1135
+ * // ... run your custom logic here
1136
+ * const result = await myFunction(functionCallPayload)
1137
+ *
1138
+ * // Ask for another completion, or return a string to send to the client as an assistant message.
1139
+ * return await openai.chat.completions.create({
1140
+ * model: 'gpt-3.5-turbo-0613',
1141
+ * stream: true,
1142
+ * // Append the relevant "assistant" and "function" call messages
1143
+ * messages: [...messages, ...createFunctionCallMessages(result)],
1144
+ * functions,
1145
+ * })
1146
+ * }
1147
+ * })
1148
+ * ```
1149
+ */
1150
+ experimental_onFunctionCall?: (functionCallPayload: FunctionCallPayload, createFunctionCallMessages: (functionCallResult: JSONValue) => CreateMessage[]) => Promise<Response | undefined | void | string | AsyncIterableOpenAIStreamReturnTypes>;
1151
+ /**
1152
+ * @example
1153
+ * ```js
1154
+ * const response = await openai.chat.completions.create({
1155
+ * model: 'gpt-3.5-turbo-1106', // or gpt-4-1106-preview
1156
+ * stream: true,
1157
+ * messages,
1158
+ * tools,
1159
+ * tool_choice: "auto", // auto is default, but we'll be explicit
1160
+ * })
1161
+ *
1162
+ * const stream = OpenAIStream(response, {
1163
+ * experimental_onToolCall: async (toolCallPayload, appendToolCallMessages) => {
1164
+ * let messages: CreateMessage[] = []
1165
+ * // There might be multiple tool calls, so we need to iterate through them
1166
+ * for (const tool of toolCallPayload.tools) {
1167
+ * // ... run your custom logic here
1168
+ * const result = await myFunction(tool.function)
1169
+ * // Append the relevant "assistant" and "tool" call messages
1170
+ * appendToolCallMessage({tool_call_id:tool.id, function_name:tool.function.name, tool_call_result:result})
1171
+ * }
1172
+ * // Ask for another completion, or return a string to send to the client as an assistant message.
1173
+ * return await openai.chat.completions.create({
1174
+ * model: 'gpt-3.5-turbo-1106', // or gpt-4-1106-preview
1175
+ * stream: true,
1176
+ * // Append the results messages, calling appendToolCallMessage without
1177
+ * // any arguments will jsut return the accumulated messages
1178
+ * messages: [...messages, ...appendToolCallMessage()],
1179
+ * tools,
1180
+ * tool_choice: "auto", // auto is default, but we'll be explicit
1181
+ * })
1182
+ * }
1183
+ * })
1184
+ * ```
1185
+ */
1186
+ experimental_onToolCall?: (toolCallPayload: ToolCallPayload, appendToolCallMessage: (result?: {
1187
+ tool_call_id: string;
1188
+ function_name: string;
1189
+ tool_call_result: JSONValue;
1190
+ }) => CreateMessage[]) => Promise<Response | undefined | void | string | AsyncIterableOpenAIStreamReturnTypes>;
1191
+ };
1192
+ interface ChatCompletionChunk {
1193
+ id: string;
1194
+ choices: Array<ChatCompletionChunkChoice>;
1195
+ created: number;
1196
+ model: string;
1197
+ object: string;
1198
+ }
1199
+ interface ChatCompletionChunkChoice {
1200
+ delta: ChoiceDelta;
1201
+ finish_reason: 'stop' | 'length' | 'tool_calls' | 'content_filter' | 'function_call' | null;
1202
+ index: number;
1203
+ }
1204
+ interface ChoiceDelta {
1205
+ /**
1206
+ * The contents of the chunk message.
1207
+ */
1208
+ content?: string | null;
1209
+ /**
1210
+ * The name and arguments of a function that should be called, as generated by the
1211
+ * model.
1212
+ */
1213
+ function_call?: FunctionCall;
1214
+ /**
1215
+ * The role of the author of this message.
1216
+ */
1217
+ role?: 'system' | 'user' | 'assistant' | 'tool';
1218
+ tool_calls?: Array<DeltaToolCall>;
1219
+ }
1220
+ interface DeltaToolCall {
1221
+ index: number;
1222
+ /**
1223
+ * The ID of the tool call.
1224
+ */
1225
+ id?: string;
1226
+ /**
1227
+ * The function that the model called.
1228
+ */
1229
+ function?: ToolCallFunction;
1230
+ /**
1231
+ * The type of the tool. Currently, only `function` is supported.
1232
+ */
1233
+ type?: 'function';
1234
+ }
1235
+ interface ToolCallFunction {
1236
+ /**
1237
+ * The arguments to call the function with, as generated by the model in JSON
1238
+ * format. Note that the model does not always generate valid JSON, and may
1239
+ * hallucinate parameters not defined by your function schema. Validate the
1240
+ * arguments in your code before calling your function.
1241
+ */
1242
+ arguments?: string;
1243
+ /**
1244
+ * The name of the function to call.
1245
+ */
1246
+ name?: string;
1247
+ }
1248
+ /**
1249
+ * https://github.com/openai/openai-node/blob/3ec43ee790a2eb6a0ccdd5f25faa23251b0f9b8e/src/resources/completions.ts#L28C1-L64C1
1250
+ * Completions API. Streamed and non-streamed responses are the same.
1251
+ */
1252
+ interface Completion {
1253
+ /**
1254
+ * A unique identifier for the completion.
1255
+ */
1256
+ id: string;
1257
+ /**
1258
+ * The list of completion choices the model generated for the input prompt.
1259
+ */
1260
+ choices: Array<CompletionChoice>;
1261
+ /**
1262
+ * The Unix timestamp of when the completion was created.
1263
+ */
1264
+ created: number;
1265
+ /**
1266
+ * The model used for completion.
1267
+ */
1268
+ model: string;
1269
+ /**
1270
+ * The object type, which is always "text_completion"
1271
+ */
1272
+ object: string;
1273
+ /**
1274
+ * Usage statistics for the completion request.
1275
+ */
1276
+ usage?: CompletionUsage;
1277
+ }
1278
+ interface CompletionChoice {
1279
+ /**
1280
+ * The reason the model stopped generating tokens. This will be `stop` if the model
1281
+ * hit a natural stop point or a provided stop sequence, or `length` if the maximum
1282
+ * number of tokens specified in the request was reached.
1283
+ */
1284
+ finish_reason: 'stop' | 'length' | 'content_filter';
1285
+ index: number;
1286
+ logprobs: any | null;
1287
+ text: string;
1288
+ }
1289
+ interface CompletionUsage {
1290
+ /**
1291
+ * Usage statistics for the completion request.
1292
+ */
1293
+ /**
1294
+ * Number of tokens in the generated completion.
1295
+ */
1296
+ completion_tokens: number;
1297
+ /**
1298
+ * Number of tokens in the prompt.
1299
+ */
1300
+ prompt_tokens: number;
1301
+ /**
1302
+ * Total number of tokens used in the request (prompt + completion).
1303
+ */
1304
+ total_tokens: number;
1305
+ }
1306
+ type AsyncIterableOpenAIStreamReturnTypes = AsyncIterable<ChatCompletionChunk> | AsyncIterable<Completion> | AsyncIterable<AzureChatCompletions>;
1307
+ declare function OpenAIStream(res: Response | AsyncIterableOpenAIStreamReturnTypes, callbacks?: OpenAIStreamCallbacks): ReadableStream;
1308
+
1309
+ interface FunctionCallPayload {
1310
+ name: string;
1311
+ arguments: Record<string, unknown>;
1312
+ }
1313
+ interface ToolCallPayload {
1314
+ tools: {
1315
+ id: string;
1316
+ type: 'function';
1317
+ func: {
1318
+ name: string;
1319
+ arguments: Record<string, unknown>;
1320
+ };
1321
+ }[];
1322
+ }
1323
+ /**
1324
+ * Configuration options and helper callback methods for AIStream stream lifecycle events.
1325
+ * @interface
1326
+ */
1327
+ interface AIStreamCallbacksAndOptions {
1328
+ /** `onStart`: Called once when the stream is initialized. */
1329
+ onStart?: () => Promise<void> | void;
1330
+ /** `onCompletion`: Called for each tokenized message. */
1331
+ onCompletion?: (completion: string) => Promise<void> | void;
1332
+ /** `onFinal`: Called once when the stream is closed with the final completion message. */
1333
+ onFinal?: (completion: string) => Promise<void> | void;
1334
+ /** `onToken`: Called for each tokenized message. */
1335
+ onToken?: (token: string) => Promise<void> | void;
1336
+ /** `onText`: Called for each text chunk. */
1337
+ onText?: (text: string) => Promise<void> | void;
1338
+ /**
1339
+ * @deprecated This flag is no longer used and only retained for backwards compatibility.
1340
+ * You can remove it from your code.
1341
+ */
1342
+ experimental_streamData?: boolean;
1343
+ }
1344
+ /**
1345
+ * Options for the AIStreamParser.
1346
+ * @interface
1347
+ * @property {string} event - The event (type) from the server side event stream.
1348
+ */
1349
+ interface AIStreamParserOptions {
1350
+ event?: string;
1351
+ }
1352
+ /**
1353
+ * Custom parser for AIStream data.
1354
+ * @interface
1355
+ * @param {string} data - The data to be parsed.
1356
+ * @param {AIStreamParserOptions} options - The options for the parser.
1357
+ * @returns {string | void} The parsed data or void.
1358
+ */
1359
+ interface AIStreamParser {
1360
+ (data: string, options: AIStreamParserOptions): string | void | {
1361
+ isText: false;
1362
+ content: string;
1363
+ };
1364
+ }
1365
+ /**
1366
+ * Creates a TransformStream that parses events from an EventSource stream using a custom parser.
1367
+ * @param {AIStreamParser} customParser - Function to handle event data.
1368
+ * @returns {TransformStream<Uint8Array, string>} TransformStream parsing events.
1369
+ */
1370
+ declare function createEventStreamTransformer(customParser?: AIStreamParser): TransformStream<Uint8Array, string | {
1371
+ isText: false;
1372
+ content: string;
1373
+ }>;
1374
+ /**
1375
+ * Creates a transform stream that encodes input messages and invokes optional callback functions.
1376
+ * The transform stream uses the provided callbacks to execute custom logic at different stages of the stream's lifecycle.
1377
+ * - `onStart`: Called once when the stream is initialized.
1378
+ * - `onToken`: Called for each tokenized message.
1379
+ * - `onCompletion`: Called every time an AIStream completion message is received. This can occur multiple times when using e.g. OpenAI functions
1380
+ * - `onFinal`: Called once when the stream is closed with the final completion message.
1381
+ *
1382
+ * This function is useful when you want to process a stream of messages and perform specific actions during the stream's lifecycle.
1383
+ *
1384
+ * @param {AIStreamCallbacksAndOptions} [callbacks] - An object containing the callback functions.
1385
+ * @return {TransformStream<string, Uint8Array>} A transform stream that encodes input messages as Uint8Array and allows the execution of custom logic through callbacks.
1386
+ *
1387
+ * @example
1388
+ * const callbacks = {
1389
+ * onStart: async () => console.log('Stream started'),
1390
+ * onToken: async (token) => console.log(`Token: ${token}`),
1391
+ * onCompletion: async (completion) => console.log(`Completion: ${completion}`)
1392
+ * onFinal: async () => data.close()
1393
+ * };
1394
+ * const transformer = createCallbacksTransformer(callbacks);
1395
+ */
1396
+ declare function createCallbacksTransformer(cb: AIStreamCallbacksAndOptions | OpenAIStreamCallbacks | undefined): TransformStream<string | {
1397
+ isText: false;
1398
+ content: string;
1399
+ }, Uint8Array>;
1400
+ /**
1401
+ * Returns a stateful function that, when invoked, trims leading whitespace
1402
+ * from the input text. The trimming only occurs on the first invocation, ensuring that
1403
+ * subsequent calls do not alter the input text. This is particularly useful in scenarios
1404
+ * where a text stream is being processed and only the initial whitespace should be removed.
1405
+ *
1406
+ * @return {function(string): string} A function that takes a string as input and returns a string
1407
+ * with leading whitespace removed if it is the first invocation; otherwise, it returns the input unchanged.
1408
+ *
1409
+ * @example
1410
+ * const trimStart = trimStartOfStreamHelper();
1411
+ * const output1 = trimStart(" text"); // "text"
1412
+ * const output2 = trimStart(" text"); // " text"
1413
+ *
1414
+ */
1415
+ declare function trimStartOfStreamHelper(): (text: string) => string;
1416
+ /**
1417
+ * Returns a ReadableStream created from the response, parsed and handled with custom logic.
1418
+ * The stream goes through two transformation stages, first parsing the events and then
1419
+ * invoking the provided callbacks.
1420
+ *
1421
+ * For 2xx HTTP responses:
1422
+ * - The function continues with standard stream processing.
1423
+ *
1424
+ * For non-2xx HTTP responses:
1425
+ * - If the response body is defined, it asynchronously extracts and decodes the response body.
1426
+ * - It then creates a custom ReadableStream to propagate a detailed error message.
1427
+ *
1428
+ * @param {Response} response - The response.
1429
+ * @param {AIStreamParser} customParser - The custom parser function.
1430
+ * @param {AIStreamCallbacksAndOptions} callbacks - The callbacks.
1431
+ * @return {ReadableStream} The AIStream.
1432
+ * @throws Will throw an error if the response is not OK.
1433
+ */
1434
+ declare function AIStream(response: Response, customParser?: AIStreamParser, callbacks?: AIStreamCallbacksAndOptions): ReadableStream<Uint8Array>;
1435
+ /**
1436
+ * Implements ReadableStream.from(asyncIterable), which isn't documented in MDN and isn't implemented in node.
1437
+ * https://github.com/whatwg/streams/commit/8d7a0bf26eb2cc23e884ddbaac7c1da4b91cf2bc
1438
+ */
1439
+ declare function readableFromAsyncIterable<T>(iterable: AsyncIterable<T>): ReadableStream<T>;
1440
+
1441
+ interface CompletionChunk {
1442
+ /**
1443
+ * Unique object identifier.
1444
+ *
1445
+ * The format and length of IDs may change over time.
1446
+ */
1447
+ id: string;
1448
+ /**
1449
+ * The resulting completion up to and excluding the stop sequences.
1450
+ */
1451
+ completion: string;
1452
+ /**
1453
+ * The model that handled the request.
1454
+ */
1455
+ model: string;
1456
+ /**
1457
+ * The reason that we stopped.
1458
+ *
1459
+ * This may be one the following values:
1460
+ *
1461
+ * - `"stop_sequence"`: we reached a stop sequence — either provided by you via the
1462
+ * `stop_sequences` parameter, or a stop sequence built into the model
1463
+ * - `"max_tokens"`: we exceeded `max_tokens_to_sample` or the model's maximum
1464
+ */
1465
+ stop_reason: string | null;
1466
+ /**
1467
+ * Object type.
1468
+ *
1469
+ * For Text Completions, this is always `"completion"`.
1470
+ */
1471
+ type: 'completion';
1472
+ }
1473
+ interface Message {
1474
+ id: string;
1475
+ content: Array<ContentBlock>;
1476
+ model: string;
1477
+ role: 'assistant';
1478
+ stop_reason: 'end_turn' | 'max_tokens' | 'stop_sequence' | null;
1479
+ stop_sequence: string | null;
1480
+ type: 'message';
1481
+ }
1482
+ interface ContentBlock {
1483
+ text: string;
1484
+ type: 'text';
1485
+ }
1486
+ interface TextDelta {
1487
+ text: string;
1488
+ type: 'text_delta';
1489
+ }
1490
+ interface ContentBlockDeltaEvent {
1491
+ delta: TextDelta;
1492
+ index: number;
1493
+ type: 'content_block_delta';
1494
+ }
1495
+ interface ContentBlockStartEvent {
1496
+ content_block: ContentBlock;
1497
+ index: number;
1498
+ type: 'content_block_start';
1499
+ }
1500
+ interface ContentBlockStopEvent {
1501
+ index: number;
1502
+ type: 'content_block_stop';
1503
+ }
1504
+ interface MessageDeltaEventDelta {
1505
+ stop_reason: 'end_turn' | 'max_tokens' | 'stop_sequence' | null;
1506
+ stop_sequence: string | null;
1507
+ }
1508
+ interface MessageDeltaEvent {
1509
+ delta: MessageDeltaEventDelta;
1510
+ type: 'message_delta';
1511
+ }
1512
+ type MessageStreamEvent = MessageStartEvent | MessageDeltaEvent | MessageStopEvent | ContentBlockStartEvent | ContentBlockDeltaEvent | ContentBlockStopEvent;
1513
+ interface MessageStartEvent {
1514
+ message: Message;
1515
+ type: 'message_start';
1516
+ }
1517
+ interface MessageStopEvent {
1518
+ type: 'message_stop';
1519
+ }
1520
+ /**
1521
+ * Accepts either a fetch Response from the Anthropic `POST /v1/complete` endpoint,
1522
+ * or the return value of `await client.completions.create({ stream: true })`
1523
+ * from the `@anthropic-ai/sdk` package.
1524
+ */
1525
+ declare function AnthropicStream(res: Response | AsyncIterable<CompletionChunk> | AsyncIterable<MessageStreamEvent>, cb?: AIStreamCallbacksAndOptions): ReadableStream;
1526
+
1527
+ /**
1528
+ You can pass the thread and the latest message into the `AssistantResponse`. This establishes the context for the response.
1529
+ */
1530
+ type AssistantResponseSettings = {
1531
+ /**
1532
+ The thread ID that the response is associated with.
1533
+ */
1534
+ threadId: string;
1535
+ /**
1536
+ The ID of the latest message that the response is associated with.
1537
+ */
1538
+ messageId: string;
1539
+ };
1540
+ /**
1541
+ The process parameter is a callback in which you can run the assistant on threads, and send messages and data messages to the client.
1542
+ */
1543
+ type AssistantResponseCallback = (options: {
1544
+ /**
1545
+ @deprecated use variable from outer scope instead.
1546
+ */
1547
+ threadId: string;
1548
+ /**
1549
+ @deprecated use variable from outer scope instead.
1550
+ */
1551
+ messageId: string;
1552
+ /**
1553
+ Forwards an assistant message (non-streaming) to the client.
1554
+ */
1555
+ sendMessage: (message: AssistantMessage) => void;
1556
+ /**
1557
+ Send a data message to the client. You can use this to provide information for rendering custom UIs while the assistant is processing the thread.
1558
+ */
1559
+ sendDataMessage: (message: DataMessage) => void;
1560
+ /**
1561
+ Forwards the assistant response stream to the client. Returns the `Run` object after it completes, or when it requires an action.
1562
+ */
1563
+ forwardStream: (stream: AssistantStream) => Promise<Run | undefined>;
1564
+ }) => Promise<void>;
1565
+ /**
1566
+ The `AssistantResponse` allows you to send a stream of assistant update to `useAssistant`.
1567
+ It is designed to facilitate streaming assistant responses to the `useAssistant` hook.
1568
+ It receives an assistant thread and a current message, and can send messages and data messages to the client.
1569
+ */
1570
+ declare function AssistantResponse({ threadId, messageId }: AssistantResponseSettings, process: AssistantResponseCallback): Response;
1571
+ /**
1572
+ @deprecated Use `AssistantResponse` instead.
1573
+ */
1574
+ declare const experimental_AssistantResponse: typeof AssistantResponse;
1575
+
1576
+ interface AWSBedrockResponse {
1577
+ body?: AsyncIterable<{
1578
+ chunk?: {
1579
+ bytes?: Uint8Array;
1580
+ };
1581
+ }>;
1582
+ }
1583
+ declare function AWSBedrockAnthropicMessagesStream(response: AWSBedrockResponse, callbacks?: AIStreamCallbacksAndOptions): ReadableStream;
1584
+ declare function AWSBedrockAnthropicStream(response: AWSBedrockResponse, callbacks?: AIStreamCallbacksAndOptions): ReadableStream;
1585
+ declare function AWSBedrockCohereStream(response: AWSBedrockResponse, callbacks?: AIStreamCallbacksAndOptions): ReadableStream;
1586
+ declare function AWSBedrockLlama2Stream(response: AWSBedrockResponse, callbacks?: AIStreamCallbacksAndOptions): ReadableStream;
1587
+ declare function AWSBedrockStream(response: AWSBedrockResponse, callbacks: AIStreamCallbacksAndOptions | undefined, extractTextDeltaFromChunk: (chunk: any) => string): ReadableStream<any>;
1588
+
1589
+ interface StreamChunk {
1590
+ text?: string;
1591
+ eventType: 'stream-start' | 'search-queries-generation' | 'search-results' | 'text-generation' | 'citation-generation' | 'stream-end';
1592
+ }
1593
+ declare function CohereStream(reader: Response | AsyncIterable<StreamChunk>, callbacks?: AIStreamCallbacksAndOptions): ReadableStream;
1594
+
1595
+ interface GenerateContentResponse {
1596
+ candidates?: GenerateContentCandidate[];
1597
+ }
1598
+ interface GenerateContentCandidate {
1599
+ index: number;
1600
+ content: Content;
1601
+ }
1602
+ interface Content {
1603
+ role: string;
1604
+ parts: Part[];
1605
+ }
1606
+ type Part = TextPart | InlineDataPart;
1607
+ interface InlineDataPart {
1608
+ text?: never;
1609
+ }
1610
+ interface TextPart {
1611
+ text: string;
1612
+ inlineData?: never;
1613
+ }
1614
+ declare function GoogleGenerativeAIStream(response: {
1615
+ stream: AsyncIterable<GenerateContentResponse>;
1616
+ }, cb?: AIStreamCallbacksAndOptions): ReadableStream;
1617
+
1618
+ declare function HuggingFaceStream(res: AsyncGenerator<any>, callbacks?: AIStreamCallbacksAndOptions): ReadableStream;
1619
+
1620
+ type InkeepOnFinalMetadata = {
1621
+ chat_session_id: string;
1622
+ records_cited: any;
1623
+ };
1624
+ type InkeepChatResultCallbacks = {
1625
+ onFinal?: (completion: string, metadata?: InkeepOnFinalMetadata) => Promise<void> | void;
1626
+ onRecordsCited?: (records_cited: InkeepOnFinalMetadata['records_cited']) => void;
1627
+ };
1628
+ type InkeepAIStreamCallbacksAndOptions = AIStreamCallbacksAndOptions & InkeepChatResultCallbacks;
1629
+ declare function InkeepStream(res: Response, callbacks?: InkeepAIStreamCallbacksAndOptions): ReadableStream;
1630
+
1631
+ declare function LangChainStream(callbacks?: AIStreamCallbacksAndOptions): {
1632
+ stream: ReadableStream<any>;
1633
+ writer: WritableStreamDefaultWriter<any>;
1634
+ handlers: {
1635
+ handleLLMNewToken: (token: string) => Promise<void>;
1636
+ handleLLMStart: (_llm: any, _prompts: string[], runId: string) => Promise<void>;
1637
+ handleLLMEnd: (_output: any, runId: string) => Promise<void>;
1638
+ handleLLMError: (e: Error, runId: string) => Promise<void>;
1639
+ handleChainStart: (_chain: any, _inputs: any, runId: string) => Promise<void>;
1640
+ handleChainEnd: (_outputs: any, runId: string) => Promise<void>;
1641
+ handleChainError: (e: Error, runId: string) => Promise<void>;
1642
+ handleToolStart: (_tool: any, _input: string, runId: string) => Promise<void>;
1643
+ handleToolEnd: (_output: string, runId: string) => Promise<void>;
1644
+ handleToolError: (e: Error, runId: string) => Promise<void>;
1645
+ };
1646
+ };
1647
+
1648
+ declare function MistralStream(response: AsyncGenerator<ChatCompletionResponseChunk, void, unknown>, callbacks?: AIStreamCallbacksAndOptions): ReadableStream;
1649
+
1650
+ interface Prediction {
1651
+ id: string;
1652
+ status: 'starting' | 'processing' | 'succeeded' | 'failed' | 'canceled';
1653
+ version: string;
1654
+ input: object;
1655
+ output?: any;
1656
+ source: 'api' | 'web';
1657
+ error?: any;
1658
+ logs?: string;
1659
+ metrics?: {
1660
+ predict_time?: number;
1661
+ };
1662
+ webhook?: string;
1663
+ webhook_events_filter?: ('start' | 'output' | 'logs' | 'completed')[];
1664
+ created_at: string;
1665
+ updated_at?: string;
1666
+ completed_at?: string;
1667
+ urls: {
1668
+ get: string;
1669
+ cancel: string;
1670
+ stream?: string;
1671
+ };
1672
+ }
1673
+ /**
1674
+ * Stream predictions from Replicate.
1675
+ * Only certain models are supported and you must pass `stream: true` to
1676
+ * replicate.predictions.create().
1677
+ * @see https://github.com/replicate/replicate-javascript#streaming
1678
+ *
1679
+ * @example
1680
+ * const response = await replicate.predictions.create({
1681
+ * stream: true,
1682
+ * input: {
1683
+ * prompt: messages.join('\n')
1684
+ * },
1685
+ * version: '2c1608e18606fad2812020dc541930f2d0495ce32eee50074220b87300bc16e1'
1686
+ * })
1687
+ *
1688
+ * const stream = await ReplicateStream(response)
1689
+ * return new StreamingTextResponse(stream)
1690
+ *
1691
+ */
1692
+ declare function ReplicateStream(res: Prediction, cb?: AIStreamCallbacksAndOptions, options?: {
1693
+ headers?: Record<string, string>;
1694
+ }): Promise<ReadableStream>;
1695
+
1696
+ /**
1697
+ * A stream wrapper to send custom JSON-encoded data back to the client.
1698
+ */
1699
+ declare class StreamData {
1700
+ private encoder;
1701
+ private controller;
1702
+ stream: TransformStream<Uint8Array, Uint8Array>;
1703
+ private isClosedPromise;
1704
+ private isClosedPromiseResolver;
1705
+ private isClosed;
1706
+ private data;
1707
+ private messageAnnotations;
1708
+ constructor();
1709
+ close(): Promise<void>;
1710
+ append(value: JSONValue): void;
1711
+ appendMessageAnnotation(value: JSONValue): void;
1712
+ }
1713
+ /**
1714
+ * A TransformStream for LLMs that do not have their own transform stream handlers managing encoding (e.g. OpenAIStream has one for function call handling).
1715
+ * This assumes every chunk is a 'text' chunk.
1716
+ */
1717
+ declare function createStreamDataTransformer(): TransformStream<any, any>;
1718
+ /**
1719
+ @deprecated Use `StreamData` instead.
1720
+ */
1721
+ declare class experimental_StreamData extends StreamData {
1722
+ }
1723
+
1724
+ /**
1725
+ * This is a naive implementation of the streaming React response API.
1726
+ * Currently, it can carry the original raw content, data payload and a special
1727
+ * UI payload and stream them via "rows" (nested promises).
1728
+ * It must be used inside Server Actions so Flight can encode the React elements.
1729
+ *
1730
+ * It is naive as unlike the StreamingTextResponse, it does not send the diff
1731
+ * between the rows, but flushing the full payload on each row.
1732
+ */
1733
+
1734
+ type UINode = string | JSX.Element | JSX.Element[] | null | undefined;
1735
+ type Payload = {
1736
+ ui: UINode | Promise<UINode>;
1737
+ content: string;
1738
+ };
1739
+ type ReactResponseRow = Payload & {
1740
+ next: null | Promise<ReactResponseRow>;
1741
+ };
1742
+ /**
1743
+ * A utility class for streaming React responses.
1744
+ */
1745
+ declare class experimental_StreamingReactResponse {
1746
+ constructor(res: ReadableStream, options?: {
1747
+ ui?: (message: {
1748
+ content: string;
1749
+ data?: JSONValue[];
1750
+ }) => UINode | Promise<UINode>;
1751
+ data?: StreamData;
1752
+ generateId?: IdGenerator;
1753
+ });
1754
+ }
1755
+
1756
+ /**
1757
+ * A utility class for streaming text responses.
1758
+ */
1759
+ declare class StreamingTextResponse extends Response {
1760
+ constructor(res: ReadableStream, init?: ResponseInit, data?: StreamData);
1761
+ }
1762
+ /**
1763
+ * A utility function to stream a ReadableStream to a Node.js response-like object.
1764
+ */
1765
+ declare function streamToResponse(res: ReadableStream, response: ServerResponse, init?: {
1766
+ headers?: Record<string, string>;
1767
+ status?: number;
1768
+ }): void;
1769
+
1770
+ export { AIStream, AIStreamCallbacksAndOptions, AIStreamParser, AIStreamParserOptions, AWSBedrockAnthropicMessagesStream, AWSBedrockAnthropicStream, AWSBedrockCohereStream, AWSBedrockLlama2Stream, AWSBedrockStream, AnthropicStream, AssistantContent, AssistantMessage, AssistantResponse, ChatRequest, ChatRequestOptions, CohereStream, CompletionUsage, CreateMessage, DataContent, DataMessage, DeepPartial, ErrorStreamPart, ExperimentalAssistantMessage, ExperimentalMessage, ExperimentalTool, ExperimentalToolMessage, ExperimentalUserMessage, Function, FunctionCall, FunctionCallHandler, FunctionCallPayload, GenerateObjectResult, GenerateTextResult, GoogleGenerativeAIStream, HuggingFaceStream, IdGenerator, ImagePart, InkeepAIStreamCallbacksAndOptions, InkeepChatResultCallbacks, InkeepOnFinalMetadata, InkeepStream, JSONValue, LangChainStream, Message$1 as Message, MistralStream, OpenAIStream, OpenAIStreamCallbacks, ReactResponseRow, ReplicateStream, RequestOptions, StreamData, StreamObjectResult, StreamPart, StreamString, StreamTextResult, StreamingTextResponse, TextPart$1 as TextPart, TextStreamPart, Tool, ToolCall, ToolCallHandler, ToolCallPart, ToolCallPayload, ToolChoice, ToolContent, ToolResultPart, UseChatOptions, UseCompletionOptions, UserContent, convertDataContentToBase64String, convertDataContentToUint8Array, createCallbacksTransformer, createChunkDecoder, createEventStreamTransformer, createStreamDataTransformer, experimental_AssistantResponse, experimental_StreamData, experimental_StreamingReactResponse, experimental_generateObject, experimental_generateText, experimental_streamObject, experimental_streamText, formatStreamPart, generateId, isStreamStringEqualToType, generateId as nanoid, parseStreamPart, readDataStream, readableFromAsyncIterable, streamToResponse, tool, trimStartOfStreamHelper };