ai 0.0.0-85f9a635-20240518005312 → 0.0.0-8777c42a-20250115032312

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (58) hide show
  1. package/CHANGELOG.md +2863 -0
  2. package/README.md +99 -22
  3. package/dist/index.d.mts +1925 -1592
  4. package/dist/index.d.ts +1925 -1592
  5. package/dist/index.js +5500 -2961
  6. package/dist/index.js.map +1 -1
  7. package/dist/index.mjs +5497 -2916
  8. package/dist/index.mjs.map +1 -1
  9. package/package.json +39 -100
  10. package/react/dist/index.d.mts +8 -563
  11. package/react/dist/index.d.ts +8 -580
  12. package/react/dist/index.js +7 -1395
  13. package/react/dist/index.js.map +1 -1
  14. package/react/dist/index.mjs +12 -1383
  15. package/react/dist/index.mjs.map +1 -1
  16. package/rsc/dist/index.d.ts +340 -197
  17. package/rsc/dist/rsc-server.d.mts +339 -197
  18. package/rsc/dist/rsc-server.mjs +1295 -1347
  19. package/rsc/dist/rsc-server.mjs.map +1 -1
  20. package/rsc/dist/rsc-shared.d.mts +30 -23
  21. package/rsc/dist/rsc-shared.mjs +69 -105
  22. package/rsc/dist/rsc-shared.mjs.map +1 -1
  23. package/test/dist/index.d.mts +67 -0
  24. package/test/dist/index.d.ts +67 -0
  25. package/test/dist/index.js +131 -0
  26. package/test/dist/index.js.map +1 -0
  27. package/test/dist/index.mjs +101 -0
  28. package/test/dist/index.mjs.map +1 -0
  29. package/prompts/dist/index.d.mts +0 -324
  30. package/prompts/dist/index.d.ts +0 -324
  31. package/prompts/dist/index.js +0 -178
  32. package/prompts/dist/index.js.map +0 -1
  33. package/prompts/dist/index.mjs +0 -146
  34. package/prompts/dist/index.mjs.map +0 -1
  35. package/react/dist/index.server.d.mts +0 -17
  36. package/react/dist/index.server.d.ts +0 -17
  37. package/react/dist/index.server.js +0 -50
  38. package/react/dist/index.server.js.map +0 -1
  39. package/react/dist/index.server.mjs +0 -23
  40. package/react/dist/index.server.mjs.map +0 -1
  41. package/solid/dist/index.d.mts +0 -408
  42. package/solid/dist/index.d.ts +0 -408
  43. package/solid/dist/index.js +0 -1072
  44. package/solid/dist/index.js.map +0 -1
  45. package/solid/dist/index.mjs +0 -1044
  46. package/solid/dist/index.mjs.map +0 -1
  47. package/svelte/dist/index.d.mts +0 -484
  48. package/svelte/dist/index.d.ts +0 -484
  49. package/svelte/dist/index.js +0 -1778
  50. package/svelte/dist/index.js.map +0 -1
  51. package/svelte/dist/index.mjs +0 -1749
  52. package/svelte/dist/index.mjs.map +0 -1
  53. package/vue/dist/index.d.mts +0 -402
  54. package/vue/dist/index.d.ts +0 -402
  55. package/vue/dist/index.js +0 -1072
  56. package/vue/dist/index.js.map +0 -1
  57. package/vue/dist/index.mjs +0 -1034
  58. package/vue/dist/index.mjs.map +0 -1
@@ -1,8 +1,7 @@
1
- import * as react_jsx_runtime from 'react/jsx-runtime';
1
+ import { LanguageModelV1FinishReason, LanguageModelV1CallWarning, LanguageModelV1ProviderMetadata, LanguageModelV1 } from '@ai-sdk/provider';
2
2
  import { ReactNode } from 'react';
3
- import OpenAI from 'openai';
4
3
  import { z } from 'zod';
5
- import { LanguageModelV1 } from '@ai-sdk/provider';
4
+ import { ToolInvocation, Attachment } from '@ai-sdk/ui-utils';
6
5
 
7
6
  type AIAction<T = any, R = any> = (...args: T[]) => Promise<R>;
8
7
  type AIActions<T = any, R = any> = Record<string, AIAction<T, R>>;
@@ -27,11 +26,6 @@ type MutableAIState<AIState> = {
27
26
  update: (newState: ValueOrUpdater<AIState>) => void;
28
27
  done: ((newState: AIState) => void) | (() => void);
29
28
  };
30
- /**
31
- * StreamableValue is a value that can be streamed over the network via AI Actions.
32
- * To read the streamed values, use the `readStreamableValue` or `useStreamableValue` APIs.
33
- */
34
- type StreamableValue<T = any, E = any> = {};
35
29
 
36
30
  /**
37
31
  * Get the current AI state.
@@ -41,10 +35,10 @@ type StreamableValue<T = any, E = any> = {};
41
35
  * @example const state = getAIState() // Get the entire AI state
42
36
  * @example const field = getAIState('key') // Get the value of the key
43
37
  */
44
- declare function getAIState<AI extends AIProvider = any>(): InferAIState<AI, any>;
45
- declare function getAIState<AI extends AIProvider = any>(key: keyof InferAIState<AI, any>): InferAIState<AI, any>[typeof key];
38
+ declare function getAIState<AI extends AIProvider = any>(): Readonly<InferAIState<AI, any>>;
39
+ declare function getAIState<AI extends AIProvider = any>(key: keyof InferAIState<AI, any>): Readonly<InferAIState<AI, any>[typeof key]>;
46
40
  /**
47
- * Get the mutable AI state. Note that you must call `.close()` when finishing
41
+ * Get the mutable AI state. Note that you must call `.done()` when finishing
48
42
  * updating the AI state.
49
43
  *
50
44
  * @example
@@ -64,160 +58,45 @@ declare function getAIState<AI extends AIProvider = any>(key: keyof InferAIState
64
58
  declare function getMutableAIState<AI extends AIProvider = any>(): MutableAIState<InferAIState<AI, any>>;
65
59
  declare function getMutableAIState<AI extends AIProvider = any>(key: keyof InferAIState<AI, any>): MutableAIState<InferAIState<AI, any>[typeof key]>;
66
60
 
67
- /**
68
- * Create a piece of changable UI that can be streamed to the client.
69
- * On the client side, it can be rendered as a normal React node.
70
- */
71
- declare function createStreamableUI(initialValue?: React.ReactNode): {
72
- /**
73
- * The value of the streamable UI. This can be returned from a Server Action and received by the client.
74
- */
75
- value: react_jsx_runtime.JSX.Element;
61
+ declare function createAI<AIState = any, UIState = any, Actions extends AIActions = {}>({ actions, initialAIState, initialUIState, onSetAIState, onGetUIState, }: {
62
+ actions: Actions;
63
+ initialAIState?: AIState;
64
+ initialUIState?: UIState;
76
65
  /**
77
- * This method updates the current UI node. It takes a new UI node and replaces the old one.
66
+ * This function is called whenever the AI state is updated by an Action.
67
+ * You can use this to persist the AI state to a database, or to send it to a
68
+ * logging service.
78
69
  */
79
- update(value: React.ReactNode): any;
70
+ onSetAIState?: OnSetAIState<AIState>;
80
71
  /**
81
- * This method is used to append a new UI node to the end of the old one.
82
- * Once appended a new UI node, the previous UI node cannot be updated anymore.
83
- *
84
- * @example
85
- * ```jsx
86
- * const ui = createStreamableUI(<div>hello</div>)
87
- * ui.append(<div>world</div>)
72
+ * This function is used to retrieve the UI state based on the AI state.
73
+ * For example, to render the initial UI state based on a given AI state, or
74
+ * to sync the UI state when the application is already loaded.
88
75
  *
89
- * // The UI node will be:
90
- * // <>
91
- * // <div>hello</div>
92
- * // <div>world</div>
93
- * // </>
94
- * ```
95
- */
96
- append(value: React.ReactNode): any;
97
- /**
98
- * This method is used to signal that there is an error in the UI stream.
99
- * It will be thrown on the client side and caught by the nearest error boundary component.
100
- */
101
- error(error: any): any;
102
- /**
103
- * This method marks the UI node as finalized. You can either call it without any parameters or with a new UI node as the final state.
104
- * Once called, the UI node cannot be updated or appended anymore.
76
+ * If returning `undefined`, the client side UI state will not be updated.
105
77
  *
106
- * This method is always **required** to be called, otherwise the response will be stuck in a loading state.
107
- */
108
- done(...args: [] | [React.ReactNode]): any;
109
- };
110
- declare const STREAMABLE_VALUE_INTERNAL_LOCK: unique symbol;
111
- /**
112
- * Create a wrapped, changable value that can be streamed to the client.
113
- * On the client side, the value can be accessed via the readStreamableValue() API.
114
- */
115
- declare function createStreamableValue<T = any, E = any>(initialValue?: T | ReadableStream<T>): {
116
- /**
117
- * @internal This is an internal lock to prevent the value from being
118
- * updated by the user.
119
- */
120
- [STREAMABLE_VALUE_INTERNAL_LOCK]: boolean;
121
- /**
122
- * The value of the streamable. This can be returned from a Server Action and
123
- * received by the client. To read the streamed values, use the
124
- * `readStreamableValue` or `useStreamableValue` APIs.
125
- */
126
- readonly value: StreamableValue<T, E>;
127
- /**
128
- * This method updates the current value with a new one.
129
- */
130
- update(value: T): any;
131
- /**
132
- * This method is used to append a delta string to the current value. It
133
- * requires the current value of the streamable to be a string.
78
+ * This function must be annotated with the `"use server"` directive.
134
79
  *
135
80
  * @example
136
- * ```jsx
137
- * const streamable = createStreamableValue('hello');
138
- * streamable.append(' world');
81
+ * ```tsx
82
+ * onGetUIState: async () => {
83
+ * 'use server';
139
84
  *
140
- * // The value will be 'hello world'
141
- * ```
142
- */
143
- append(value: T): any;
144
- /**
145
- * This method is used to signal that there is an error in the value stream.
146
- * It will be thrown on the client side when consumed via
147
- * `readStreamableValue` or `useStreamableValue`.
148
- */
149
- error(error: any): any;
150
- /**
151
- * This method marks the value as finalized. You can either call it without
152
- * any parameters or with a new value as the final state.
153
- * Once called, the value cannot be updated or appended anymore.
85
+ * const currentAIState = getAIState();
86
+ * const externalAIState = await loadAIStateFromDatabase();
154
87
  *
155
- * This method is always **required** to be called, otherwise the response
156
- * will be stuck in a loading state.
157
- */
158
- done(...args: [] | [T]): any;
159
- };
160
-
161
- type Streamable$1 = ReactNode | Promise<ReactNode>;
162
- type Renderer$1<T> = (props: T) => Streamable$1 | Generator<Streamable$1, Streamable$1, void> | AsyncGenerator<Streamable$1, Streamable$1, void>;
163
- /**
164
- * `render` is a helper function to create a streamable UI from some LLMs.
165
- * This API only supports OpenAI's GPT models with Function Calling and Assistants Tools,
166
- * please use `streamUI` for compatibility with other providers.
167
- *
168
- * @deprecated It's recommended to use the `streamUI` API for compatibility with AI SDK Core APIs
169
- * and future features. This API will be removed in a future release.
170
- */
171
- declare function render<TS extends {
172
- [name: string]: z.Schema;
173
- } = {}, FS extends {
174
- [name: string]: z.Schema;
175
- } = {}>(options: {
176
- /**
177
- * The model name to use. Must be OpenAI SDK compatible. Tools and Functions are only supported
178
- * GPT models (3.5/4), OpenAI Assistants, Mistral small and large, and Fireworks firefunction-v1.
88
+ * if (currentAIState === externalAIState) return undefined;
179
89
  *
180
- * @example "gpt-3.5-turbo"
181
- */
182
- model: string;
183
- /**
184
- * The provider instance to use. Currently the only provider available is OpenAI.
185
- * This needs to match the model name.
90
+ * // Update current AI state and return the new UI state
91
+ * const state = getMutableAIState()
92
+ * state.done(externalAIState)
93
+ *
94
+ * return <div>...</div>;
95
+ * }
96
+ * ```
186
97
  */
187
- provider: OpenAI;
188
- messages: Parameters<typeof OpenAI.prototype.chat.completions.create>[0]['messages'];
189
- text?: Renderer$1<{
190
- /**
191
- * The full text content from the model so far.
192
- */
193
- content: string;
194
- /**
195
- * The new appended text content from the model since the last `text` call.
196
- */
197
- delta: string;
198
- /**
199
- * Whether the model is done generating text.
200
- * If `true`, the `content` will be the final output and this call will be the last.
201
- */
202
- done: boolean;
203
- }>;
204
- tools?: {
205
- [name in keyof TS]: {
206
- description?: string;
207
- parameters: TS[name];
208
- render: Renderer$1<z.infer<TS[name]>>;
209
- };
210
- };
211
- functions?: {
212
- [name in keyof FS]: {
213
- description?: string;
214
- parameters: FS[name];
215
- render: Renderer$1<z.infer<FS[name]>>;
216
- };
217
- };
218
- initial?: ReactNode;
219
- temperature?: number;
220
- }): ReactNode;
98
+ onGetUIState?: OnGetUIState<UIState>;
99
+ }): AIProvider<AIState, UIState, Actions>;
221
100
 
222
101
  type CallSettings = {
223
102
  /**
@@ -243,13 +122,18 @@ type CallSettings = {
243
122
  */
244
123
  topP?: number;
245
124
  /**
125
+ Only sample from the top K options for each subsequent token.
126
+
127
+ Used to remove "long tail" low probability responses.
128
+ Recommended for advanced use cases only. You usually only need to use temperature.
129
+ */
130
+ topK?: number;
131
+ /**
246
132
  Presence penalty setting. It affects the likelihood of the model to
247
133
  repeat information that is already in the prompt.
248
134
 
249
135
  The presence penalty is a number between -1 (increase repetition)
250
136
  and 1 (maximum penalty, decrease repetition). 0 means no penalty.
251
-
252
- @default 0
253
137
  */
254
138
  presencePenalty?: number;
255
139
  /**
@@ -258,11 +142,15 @@ type CallSettings = {
258
142
 
259
143
  The frequency penalty is a number between -1 (increase repetition)
260
144
  and 1 (maximum penalty, decrease repetition). 0 means no penalty.
261
-
262
- @default 0
263
145
  */
264
146
  frequencyPenalty?: number;
265
147
  /**
148
+ Stop sequences.
149
+ If set, the model will stop generating text when one of the stop sequences is generated.
150
+ Providers may have limits on the number of stop sequences.
151
+ */
152
+ stopSequences?: string[];
153
+ /**
266
154
  The seed (integer) to use for random sampling. If set and supported
267
155
  by the model, calls will generate deterministic results.
268
156
  */
@@ -277,6 +165,66 @@ type CallSettings = {
277
165
  Abort signal.
278
166
  */
279
167
  abortSignal?: AbortSignal;
168
+ /**
169
+ Additional HTTP headers to be sent with the request.
170
+ Only applicable for HTTP-based providers.
171
+ */
172
+ headers?: Record<string, string | undefined>;
173
+ };
174
+
175
+ /**
176
+ Reason why a language model finished generating a response.
177
+
178
+ Can be one of the following:
179
+ - `stop`: model generated stop sequence
180
+ - `length`: model generated maximum number of tokens
181
+ - `content-filter`: content filter violation stopped the model
182
+ - `tool-calls`: model triggered tool calls
183
+ - `error`: model stopped because of an error
184
+ - `other`: model stopped for other reasons
185
+ */
186
+ type FinishReason = LanguageModelV1FinishReason;
187
+ /**
188
+ Warning from the model provider for this call. The call will proceed, but e.g.
189
+ some settings might not be supported, which can lead to suboptimal results.
190
+ */
191
+ type CallWarning = LanguageModelV1CallWarning;
192
+ /**
193
+ Tool choice for the generation. It supports the following settings:
194
+
195
+ - `auto` (default): the model can choose whether and which tools to call.
196
+ - `required`: the model must call a tool. It can choose which tool to call.
197
+ - `none`: the model must not call tools
198
+ - `{ type: 'tool', toolName: string (typed) }`: the model must call the specified tool
199
+ */
200
+ type CoreToolChoice<TOOLS extends Record<string, unknown>> = 'auto' | 'none' | 'required' | {
201
+ type: 'tool';
202
+ toolName: keyof TOOLS;
203
+ };
204
+
205
+ /**
206
+ Additional provider-specific metadata. They are passed through
207
+ to the provider from the AI SDK and enable provider-specific
208
+ functionality that can be fully encapsulated in the provider.
209
+ */
210
+ type ProviderMetadata = LanguageModelV1ProviderMetadata;
211
+
212
+ /**
213
+ Represents the number of tokens used in a prompt and completion.
214
+ */
215
+ type LanguageModelUsage = {
216
+ /**
217
+ The number of tokens used in the prompt.
218
+ */
219
+ promptTokens: number;
220
+ /**
221
+ The number of tokens used in the completion.
222
+ */
223
+ completionTokens: number;
224
+ /**
225
+ The total number of tokens used (promptTokens + completionTokens).
226
+ */
227
+ totalTokens: number;
280
228
  };
281
229
 
282
230
  /**
@@ -284,6 +232,15 @@ Data content. Can either be a base64-encoded string, a Uint8Array, an ArrayBuffe
284
232
  */
285
233
  type DataContent = string | Uint8Array | ArrayBuffer | Buffer;
286
234
 
235
+ type ToolResultContent = Array<{
236
+ type: 'text';
237
+ text: string;
238
+ } | {
239
+ type: 'image';
240
+ data: string;
241
+ mimeType?: string;
242
+ }>;
243
+
287
244
  /**
288
245
  Text content part of a prompt. It contains a string of text.
289
246
  */
@@ -293,6 +250,12 @@ interface TextPart {
293
250
  The text content.
294
251
  */
295
252
  text: string;
253
+ /**
254
+ Additional provider-specific metadata. They are passed through
255
+ to the provider from the AI SDK and enable provider-specific
256
+ functionality that can be fully encapsulated in the provider.
257
+ */
258
+ experimental_providerMetadata?: ProviderMetadata;
296
259
  }
297
260
  /**
298
261
  Image content part of a prompt. It contains an image.
@@ -310,6 +273,35 @@ interface ImagePart {
310
273
  Optional mime type of the image.
311
274
  */
312
275
  mimeType?: string;
276
+ /**
277
+ Additional provider-specific metadata. They are passed through
278
+ to the provider from the AI SDK and enable provider-specific
279
+ functionality that can be fully encapsulated in the provider.
280
+ */
281
+ experimental_providerMetadata?: ProviderMetadata;
282
+ }
283
+ /**
284
+ File content part of a prompt. It contains a file.
285
+ */
286
+ interface FilePart {
287
+ type: 'file';
288
+ /**
289
+ File data. Can either be:
290
+
291
+ - data: a base64-encoded string, a Uint8Array, an ArrayBuffer, or a Buffer
292
+ - URL: a URL that points to the image
293
+ */
294
+ data: DataContent | URL;
295
+ /**
296
+ Mime type of the file.
297
+ */
298
+ mimeType: string;
299
+ /**
300
+ Additional provider-specific metadata. They are passed through
301
+ to the provider from the AI SDK and enable provider-specific
302
+ functionality that can be fully encapsulated in the provider.
303
+ */
304
+ experimental_providerMetadata?: ProviderMetadata;
313
305
  }
314
306
  /**
315
307
  Tool call content part of a prompt. It contains a tool call (usually generated by the AI model).
@@ -328,6 +320,12 @@ interface ToolCallPart {
328
320
  Arguments of the tool call. This is a JSON-serializable object that matches the tool's input schema.
329
321
  */
330
322
  args: unknown;
323
+ /**
324
+ Additional provider-specific metadata. They are passed through
325
+ to the provider from the AI SDK and enable provider-specific
326
+ functionality that can be fully encapsulated in the provider.
327
+ */
328
+ experimental_providerMetadata?: ProviderMetadata;
331
329
  }
332
330
  /**
333
331
  Tool result content part of a prompt. It contains the result of the tool call with the matching ID.
@@ -347,16 +345,21 @@ interface ToolResultPart {
347
345
  */
348
346
  result: unknown;
349
347
  /**
348
+ Multi-part content of the tool result. Only for tools that support multipart results.
349
+ */
350
+ experimental_content?: ToolResultContent;
351
+ /**
350
352
  Optional flag if the result is an error or an error message.
351
353
  */
352
354
  isError?: boolean;
355
+ /**
356
+ Additional provider-specific metadata. They are passed through
357
+ to the provider from the AI SDK and enable provider-specific
358
+ functionality that can be fully encapsulated in the provider.
359
+ */
360
+ experimental_providerMetadata?: ProviderMetadata;
353
361
  }
354
362
 
355
- /**
356
- A message that can be used in the `messages` field of a prompt.
357
- It can be a user message, an assistant message, or a tool message.
358
- */
359
- type CoreMessage = CoreSystemMessage | CoreUserMessage | CoreAssistantMessage | CoreToolMessage;
360
363
  /**
361
364
  A system message. It can contain system information.
362
365
 
@@ -367,6 +370,12 @@ type CoreMessage = CoreSystemMessage | CoreUserMessage | CoreAssistantMessage |
367
370
  type CoreSystemMessage = {
368
371
  role: 'system';
369
372
  content: string;
373
+ /**
374
+ Additional provider-specific metadata. They are passed through
375
+ to the provider from the AI SDK and enable provider-specific
376
+ functionality that can be fully encapsulated in the provider.
377
+ */
378
+ experimental_providerMetadata?: ProviderMetadata;
370
379
  };
371
380
  /**
372
381
  A user message. It can contain text or a combination of text and images.
@@ -374,17 +383,29 @@ A user message. It can contain text or a combination of text and images.
374
383
  type CoreUserMessage = {
375
384
  role: 'user';
376
385
  content: UserContent;
386
+ /**
387
+ Additional provider-specific metadata. They are passed through
388
+ to the provider from the AI SDK and enable provider-specific
389
+ functionality that can be fully encapsulated in the provider.
390
+ */
391
+ experimental_providerMetadata?: ProviderMetadata;
377
392
  };
378
393
  /**
379
394
  Content of a user message. It can be a string or an array of text and image parts.
380
395
  */
381
- type UserContent = string | Array<TextPart | ImagePart>;
396
+ type UserContent = string | Array<TextPart | ImagePart | FilePart>;
382
397
  /**
383
398
  An assistant message. It can contain text, tool calls, or a combination of text and tool calls.
384
399
  */
385
400
  type CoreAssistantMessage = {
386
401
  role: 'assistant';
387
402
  content: AssistantContent;
403
+ /**
404
+ Additional provider-specific metadata. They are passed through
405
+ to the provider from the AI SDK and enable provider-specific
406
+ functionality that can be fully encapsulated in the provider.
407
+ */
408
+ experimental_providerMetadata?: ProviderMetadata;
388
409
  };
389
410
  /**
390
411
  Content of an assistant message. It can be a string or an array of text and tool call parts.
@@ -396,14 +417,33 @@ A tool message. It contains the result of one or more tool calls.
396
417
  type CoreToolMessage = {
397
418
  role: 'tool';
398
419
  content: ToolContent;
420
+ /**
421
+ Additional provider-specific metadata. They are passed through
422
+ to the provider from the AI SDK and enable provider-specific
423
+ functionality that can be fully encapsulated in the provider.
424
+ */
425
+ experimental_providerMetadata?: ProviderMetadata;
399
426
  };
400
427
  /**
401
428
  Content of a tool message. It is an array of tool result parts.
402
429
  */
403
430
  type ToolContent = Array<ToolResultPart>;
431
+ /**
432
+ A message that can be used in the `messages` field of a prompt.
433
+ It can be a user message, an assistant message, or a tool message.
434
+ */
435
+ type CoreMessage = CoreSystemMessage | CoreUserMessage | CoreAssistantMessage | CoreToolMessage;
436
+
437
+ type UIMessage = {
438
+ role: 'system' | 'user' | 'assistant' | 'data';
439
+ content: string;
440
+ toolInvocations?: ToolInvocation[];
441
+ experimental_attachments?: Attachment[];
442
+ };
404
443
 
405
444
  /**
406
- Prompt part of the AI function options. It contains a system message, a simple text prompt, or a list of messages.
445
+ Prompt part of the AI function options.
446
+ It contains a system message, a simple text prompt, or a list of messages.
407
447
  */
408
448
  type Prompt = {
409
449
  /**
@@ -415,9 +455,9 @@ type Prompt = {
415
455
  */
416
456
  prompt?: string;
417
457
  /**
418
- A list of messsages. You can either use `prompt` or `messages` but not both.
458
+ A list of messages. You can either use `prompt` or `messages` but not both.
419
459
  */
420
- messages?: Array<CoreMessage>;
460
+ messages?: Array<CoreMessage> | Array<UIMessage>;
421
461
  };
422
462
 
423
463
  type Streamable = ReactNode | Promise<ReactNode>;
@@ -458,7 +498,7 @@ type RenderResult = {
458
498
  */
459
499
  declare function streamUI<TOOLS extends {
460
500
  [name: string]: z.ZodTypeAny;
461
- } = {}>({ model, tools, system, prompt, messages, maxRetries, abortSignal, initial, text, ...settings }: CallSettings & Prompt & {
501
+ } = {}>({ model, tools, toolChoice, system, prompt, messages, maxRetries, abortSignal, headers, initial, text, experimental_providerMetadata: providerMetadata, onFinish, ...settings }: CallSettings & Prompt & {
462
502
  /**
463
503
  * The language model to use.
464
504
  */
@@ -469,48 +509,150 @@ declare function streamUI<TOOLS extends {
469
509
  tools?: {
470
510
  [name in keyof TOOLS]: RenderTool<TOOLS[name]>;
471
511
  };
512
+ /**
513
+ * The tool choice strategy. Default: 'auto'.
514
+ */
515
+ toolChoice?: CoreToolChoice<TOOLS>;
472
516
  text?: RenderText;
473
517
  initial?: ReactNode;
518
+ /**
519
+ Additional provider-specific metadata. They are passed through
520
+ to the provider from the AI SDK and enable provider-specific
521
+ functionality that can be fully encapsulated in the provider.
522
+ */
523
+ experimental_providerMetadata?: ProviderMetadata;
524
+ /**
525
+ * Callback that is called when the LLM response and the final object validation are finished.
526
+ */
527
+ onFinish?: (event: {
528
+ /**
529
+ * The reason why the generation finished.
530
+ */
531
+ finishReason: FinishReason;
532
+ /**
533
+ * The token usage of the generated response.
534
+ */
535
+ usage: LanguageModelUsage;
536
+ /**
537
+ * The final ui node that was generated.
538
+ */
539
+ value: ReactNode;
540
+ /**
541
+ * Warnings from the model provider (e.g. unsupported settings)
542
+ */
543
+ warnings?: CallWarning[];
544
+ /**
545
+ * Optional raw response data.
546
+ */
547
+ rawResponse?: {
548
+ /**
549
+ * Response headers.
550
+ */
551
+ headers?: Record<string, string>;
552
+ };
553
+ }) => Promise<void> | void;
474
554
  }): Promise<RenderResult>;
475
555
 
476
- declare function createAI<AIState = any, UIState = any, Actions extends AIActions = {}>({ actions, initialAIState, initialUIState, onSetAIState, onGetUIState, }: {
477
- actions: Actions;
478
- initialAIState?: AIState;
479
- initialUIState?: UIState;
556
+ type StreamableUIWrapper = {
480
557
  /**
481
- * This function is called whenever the AI state is updated by an Action.
482
- * You can use this to persist the AI state to a database, or to send it to a
483
- * logging service.
558
+ * The value of the streamable UI. This can be returned from a Server Action and received by the client.
484
559
  */
485
- onSetAIState?: OnSetAIState<AIState>;
560
+ readonly value: React.ReactNode;
486
561
  /**
487
- * This function is used to retrieve the UI state based on the AI state.
488
- * For example, to render the initial UI state based on a given AI state, or
489
- * to sync the UI state when the application is already loaded.
490
- *
491
- * If returning `undefined`, the client side UI state will not be updated.
492
- *
493
- * This function must be annotated with the `"use server"` directive.
562
+ * This method updates the current UI node. It takes a new UI node and replaces the old one.
563
+ */
564
+ update(value: React.ReactNode): StreamableUIWrapper;
565
+ /**
566
+ * This method is used to append a new UI node to the end of the old one.
567
+ * Once appended a new UI node, the previous UI node cannot be updated anymore.
494
568
  *
495
569
  * @example
496
- * ```tsx
497
- * onGetUIState: async () => {
498
- * 'use server';
570
+ * ```jsx
571
+ * const ui = createStreamableUI(<div>hello</div>)
572
+ * ui.append(<div>world</div>)
499
573
  *
500
- * const currentAIState = getAIState();
501
- * const externalAIState = await loadAIStateFromDatabase();
574
+ * // The UI node will be:
575
+ * // <>
576
+ * // <div>hello</div>
577
+ * // <div>world</div>
578
+ * // </>
579
+ * ```
580
+ */
581
+ append(value: React.ReactNode): StreamableUIWrapper;
582
+ /**
583
+ * This method is used to signal that there is an error in the UI stream.
584
+ * It will be thrown on the client side and caught by the nearest error boundary component.
585
+ */
586
+ error(error: any): StreamableUIWrapper;
587
+ /**
588
+ * This method marks the UI node as finalized. You can either call it without any parameters or with a new UI node as the final state.
589
+ * Once called, the UI node cannot be updated or appended anymore.
502
590
  *
503
- * if (currentAIState === externalAIState) return undefined;
591
+ * This method is always **required** to be called, otherwise the response will be stuck in a loading state.
592
+ */
593
+ done(...args: [React.ReactNode] | []): StreamableUIWrapper;
594
+ };
595
+ /**
596
+ * Create a piece of changeable UI that can be streamed to the client.
597
+ * On the client side, it can be rendered as a normal React node.
598
+ */
599
+ declare function createStreamableUI(initialValue?: React.ReactNode): StreamableUIWrapper;
600
+
601
+ declare const __internal_curr: unique symbol;
602
+ declare const __internal_error: unique symbol;
603
+ /**
604
+ * StreamableValue is a value that can be streamed over the network via AI Actions.
605
+ * To read the streamed values, use the `readStreamableValue` or `useStreamableValue` APIs.
606
+ */
607
+ type StreamableValue<T = any, E = any> = {
608
+ [__internal_curr]?: T;
609
+ [__internal_error]?: E;
610
+ };
611
+
612
+ /**
613
+ * Create a wrapped, changeable value that can be streamed to the client.
614
+ * On the client side, the value can be accessed via the readStreamableValue() API.
615
+ */
616
+ declare function createStreamableValue<T = any, E = any>(initialValue?: T | ReadableStream<T>): StreamableValueWrapper<T, E>;
617
+ type StreamableValueWrapper<T, E> = {
618
+ /**
619
+ * The value of the streamable. This can be returned from a Server Action and
620
+ * received by the client. To read the streamed values, use the
621
+ * `readStreamableValue` or `useStreamableValue` APIs.
622
+ */
623
+ readonly value: StreamableValue<T, E>;
624
+ /**
625
+ * This method updates the current value with a new one.
626
+ */
627
+ update(value: T): StreamableValueWrapper<T, E>;
628
+ /**
629
+ * This method is used to append a delta string to the current value. It
630
+ * requires the current value of the streamable to be a string.
504
631
  *
505
- * // Update current AI state and return the new UI state
506
- * const state = getMutableAIState()
507
- * state.done(externalAIState)
632
+ * @example
633
+ * ```jsx
634
+ * const streamable = createStreamableValue('hello');
635
+ * streamable.append(' world');
508
636
  *
509
- * return <div>...</div>;
510
- * }
637
+ * // The value will be 'hello world'
511
638
  * ```
512
639
  */
513
- onGetUIState?: OnGetUIState<UIState>;
514
- }): AIProvider<AIState, UIState, Actions>;
640
+ append(value: T): StreamableValueWrapper<T, E>;
641
+ /**
642
+ * This method is used to signal that there is an error in the value stream.
643
+ * It will be thrown on the client side when consumed via
644
+ * `readStreamableValue` or `useStreamableValue`.
645
+ */
646
+ error(error: any): StreamableValueWrapper<T, E>;
647
+ /**
648
+ * This method marks the value as finalized. You can either call it without
649
+ * any parameters or with a new value as the final state.
650
+ * Once called, the value cannot be updated or appended anymore.
651
+ *
652
+ * This method is always **required** to be called, otherwise the response
653
+ * will be stuck in a loading state.
654
+ */
655
+ done(...args: [T] | []): StreamableValueWrapper<T, E>;
656
+ };
515
657
 
516
- export { createAI, createStreamableUI, createStreamableValue, getAIState, getMutableAIState, render, streamUI };
658
+ export { createAI, createStreamableUI, createStreamableValue, getAIState, getMutableAIState, streamUI };