ai 3.1.0-canary.3 → 3.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (70) hide show
  1. package/README.md +1 -1
  2. package/dist/index.d.mts +982 -24
  3. package/dist/index.d.ts +982 -24
  4. package/dist/index.js +1748 -175
  5. package/dist/index.js.map +1 -1
  6. package/dist/index.mjs +1723 -174
  7. package/dist/index.mjs.map +1 -1
  8. package/package.json +14 -31
  9. package/prompts/dist/index.d.mts +13 -1
  10. package/prompts/dist/index.d.ts +13 -1
  11. package/prompts/dist/index.js +13 -0
  12. package/prompts/dist/index.js.map +1 -1
  13. package/prompts/dist/index.mjs +12 -0
  14. package/prompts/dist/index.mjs.map +1 -1
  15. package/react/dist/index.d.mts +27 -6
  16. package/react/dist/index.d.ts +31 -8
  17. package/react/dist/index.js +155 -141
  18. package/react/dist/index.js.map +1 -1
  19. package/react/dist/index.mjs +154 -141
  20. package/react/dist/index.mjs.map +1 -1
  21. package/react/dist/index.server.d.mts +4 -2
  22. package/react/dist/index.server.d.ts +4 -2
  23. package/react/dist/index.server.js.map +1 -1
  24. package/react/dist/index.server.mjs.map +1 -1
  25. package/rsc/dist/index.d.ts +385 -20
  26. package/rsc/dist/rsc-client.d.mts +1 -1
  27. package/rsc/dist/rsc-client.mjs +2 -0
  28. package/rsc/dist/rsc-client.mjs.map +1 -1
  29. package/rsc/dist/rsc-server.d.mts +367 -20
  30. package/rsc/dist/rsc-server.mjs +676 -35
  31. package/rsc/dist/rsc-server.mjs.map +1 -1
  32. package/rsc/dist/rsc-shared.d.mts +24 -9
  33. package/rsc/dist/rsc-shared.mjs +98 -4
  34. package/rsc/dist/rsc-shared.mjs.map +1 -1
  35. package/solid/dist/index.d.mts +7 -3
  36. package/solid/dist/index.d.ts +7 -3
  37. package/solid/dist/index.js +106 -107
  38. package/solid/dist/index.js.map +1 -1
  39. package/solid/dist/index.mjs +106 -107
  40. package/solid/dist/index.mjs.map +1 -1
  41. package/svelte/dist/index.d.mts +7 -3
  42. package/svelte/dist/index.d.ts +7 -3
  43. package/svelte/dist/index.js +109 -109
  44. package/svelte/dist/index.js.map +1 -1
  45. package/svelte/dist/index.mjs +109 -109
  46. package/svelte/dist/index.mjs.map +1 -1
  47. package/vue/dist/index.d.mts +7 -3
  48. package/vue/dist/index.d.ts +7 -3
  49. package/vue/dist/index.js +106 -107
  50. package/vue/dist/index.js.map +1 -1
  51. package/vue/dist/index.mjs +106 -107
  52. package/vue/dist/index.mjs.map +1 -1
  53. package/ai-model-specification/dist/index.d.mts +0 -606
  54. package/ai-model-specification/dist/index.d.ts +0 -606
  55. package/ai-model-specification/dist/index.js +0 -617
  56. package/ai-model-specification/dist/index.js.map +0 -1
  57. package/ai-model-specification/dist/index.mjs +0 -560
  58. package/ai-model-specification/dist/index.mjs.map +0 -1
  59. package/core/dist/index.d.mts +0 -590
  60. package/core/dist/index.d.ts +0 -590
  61. package/core/dist/index.js +0 -1528
  62. package/core/dist/index.js.map +0 -1
  63. package/core/dist/index.mjs +0 -1481
  64. package/core/dist/index.mjs.map +0 -1
  65. package/provider/dist/index.d.mts +0 -429
  66. package/provider/dist/index.d.ts +0 -429
  67. package/provider/dist/index.js +0 -1194
  68. package/provider/dist/index.js.map +0 -1
  69. package/provider/dist/index.mjs +0 -1158
  70. package/provider/dist/index.mjs.map +0 -1
@@ -2,8 +2,7 @@ import * as react_jsx_runtime from 'react/jsx-runtime';
2
2
  import { ReactNode } from 'react';
3
3
  import OpenAI from 'openai';
4
4
  import { z } from 'zod';
5
-
6
- declare const STREAMABLE_VALUE_TYPE: unique symbol;
5
+ import { LanguageModelV1 } from '@ai-sdk/provider';
7
6
 
8
7
  type AIAction<T = any, R = any> = (...args: T[]) => Promise<R>;
9
8
  type AIActions<T = any, R = any> = Record<string, AIAction<T, R>>;
@@ -28,12 +27,11 @@ type MutableAIState<AIState> = {
28
27
  update: (newState: ValueOrUpdater<AIState>) => void;
29
28
  done: ((newState: AIState) => void) | (() => void);
30
29
  };
31
- type StreamableValue<T = any, E = any> = {
32
- type?: typeof STREAMABLE_VALUE_TYPE;
33
- curr?: T;
34
- error?: E;
35
- next?: Promise<StreamableValue<T, E>>;
36
- };
30
+ /**
31
+ * StreamableValue is a value that can be streamed over the network via AI Actions.
32
+ * To read the streamed values, use the `readStreamableValue` or `useStreamableValue` APIs.
33
+ */
34
+ type StreamableValue<T = any, E = any> = {};
37
35
 
38
36
  /**
39
37
  * Get the current AI state.
@@ -71,28 +69,102 @@ declare function getMutableAIState<AI extends AIProvider = any>(key: keyof Infer
71
69
  * On the client side, it can be rendered as a normal React node.
72
70
  */
73
71
  declare function createStreamableUI(initialValue?: React.ReactNode): {
72
+ /**
73
+ * The value of the streamable UI. This can be returned from a Server Action and received by the client.
74
+ */
74
75
  value: react_jsx_runtime.JSX.Element;
76
+ /**
77
+ * This method updates the current UI node. It takes a new UI node and replaces the old one.
78
+ */
75
79
  update(value: React.ReactNode): void;
80
+ /**
81
+ * This method is used to append a new UI node to the end of the old one.
82
+ * Once appended a new UI node, the previous UI node cannot be updated anymore.
83
+ *
84
+ * @example
85
+ * ```jsx
86
+ * const ui = createStreamableUI(<div>hello</div>)
87
+ * ui.append(<div>world</div>)
88
+ *
89
+ * // The UI node will be:
90
+ * // <>
91
+ * // <div>hello</div>
92
+ * // <div>world</div>
93
+ * // </>
94
+ * ```
95
+ */
76
96
  append(value: React.ReactNode): void;
97
+ /**
98
+ * This method is used to signal that there is an error in the UI stream.
99
+ * It will be thrown on the client side and caught by the nearest error boundary component.
100
+ */
77
101
  error(error: any): void;
102
+ /**
103
+ * This method marks the UI node as finalized. You can either call it without any parameters or with a new UI node as the final state.
104
+ * Once called, the UI node cannot be updated or appended anymore.
105
+ *
106
+ * This method is always **required** to be called, otherwise the response will be stuck in a loading state.
107
+ */
78
108
  done(...args: [] | [React.ReactNode]): void;
79
109
  };
110
+ declare const STREAMABLE_VALUE_INTERNAL_LOCK: unique symbol;
80
111
  /**
81
112
  * Create a wrapped, changable value that can be streamed to the client.
82
113
  * On the client side, the value can be accessed via the readStreamableValue() API.
83
114
  */
84
- declare function createStreamableValue<T = any, E = any>(initialValue?: T): {
115
+ declare function createStreamableValue<T = any, E = any>(initialValue?: T | ReadableStream<T>): {
116
+ /**
117
+ * @internal This is an internal lock to prevent the value from being
118
+ * updated by the user.
119
+ */
120
+ [STREAMABLE_VALUE_INTERNAL_LOCK]: boolean;
121
+ /**
122
+ * The value of the streamable. This can be returned from a Server Action and
123
+ * received by the client. To read the streamed values, use the
124
+ * `readStreamableValue` or `useStreamableValue` APIs.
125
+ */
85
126
  readonly value: StreamableValue<T, E>;
127
+ /**
128
+ * This method updates the current value with a new one.
129
+ */
86
130
  update(value: T): void;
131
+ /**
132
+ * This method is used to append a delta string to the current value. It
133
+ * requires the current value of the streamable to be a string.
134
+ *
135
+ * @example
136
+ * ```jsx
137
+ * const streamable = createStreamableValue('hello');
138
+ * streamable.append(' world');
139
+ *
140
+ * // The value will be 'hello world'
141
+ * ```
142
+ */
143
+ append(value: T): void;
144
+ /**
145
+ * This method is used to signal that there is an error in the value stream.
146
+ * It will be thrown on the client side when consumed via
147
+ * `readStreamableValue` or `useStreamableValue`.
148
+ */
87
149
  error(error: any): void;
88
- done(...args: [
89
- ] | [T]): void;
150
+ /**
151
+ * This method marks the value as finalized. You can either call it without
152
+ * any parameters or with a new value as the final state.
153
+ * Once called, the value cannot be updated or appended anymore.
154
+ *
155
+ * This method is always **required** to be called, otherwise the response
156
+ * will be stuck in a loading state.
157
+ */
158
+ done(...args: [] | [T]): void;
90
159
  };
91
- type Streamable = ReactNode | Promise<ReactNode>;
92
- type Renderer<T> = (props: T) => Streamable | Generator<Streamable, Streamable, void> | AsyncGenerator<Streamable, Streamable, void>;
160
+
161
+ type Streamable$1 = ReactNode | Promise<ReactNode>;
162
+ type Renderer$1<T> = (props: T) => Streamable$1 | Generator<Streamable$1, Streamable$1, void> | AsyncGenerator<Streamable$1, Streamable$1, void>;
93
163
  /**
94
164
  * `render` is a helper function to create a streamable UI from some LLMs.
95
165
  * Currently, it only supports OpenAI's GPT models with Function Calling and Assistants Tools.
166
+ *
167
+ * @deprecated It's recommended to use the `experimental_streamUI` API for compatibility with the new core APIs.
96
168
  */
97
169
  declare function render<TS extends {
98
170
  [name: string]: z.Schema;
@@ -112,7 +184,7 @@ declare function render<TS extends {
112
184
  */
113
185
  provider: OpenAI;
114
186
  messages: Parameters<typeof OpenAI.prototype.chat.completions.create>[0]['messages'];
115
- text?: Renderer<{
187
+ text?: Renderer$1<{
116
188
  /**
117
189
  * The full text content from the model so far.
118
190
  */
@@ -131,26 +203,301 @@ declare function render<TS extends {
131
203
  [name in keyof TS]: {
132
204
  description?: string;
133
205
  parameters: TS[name];
134
- render: Renderer<z.infer<TS[name]>>;
206
+ render: Renderer$1<z.infer<TS[name]>>;
135
207
  };
136
208
  };
137
209
  functions?: {
138
210
  [name in keyof FS]: {
139
211
  description?: string;
140
212
  parameters: FS[name];
141
- render: Renderer<z.infer<FS[name]>>;
213
+ render: Renderer$1<z.infer<FS[name]>>;
142
214
  };
143
215
  };
144
216
  initial?: ReactNode;
145
217
  temperature?: number;
146
218
  }): ReactNode;
147
219
 
148
- declare function createAI<AIState = any, UIState = any, Actions extends AIActions = {}>({ actions, initialAIState, initialUIState, unstable_onSetAIState: onSetAIState, unstable_onGetUIState: onGetUIState, }: {
220
+ type CallSettings = {
221
+ /**
222
+ Maximum number of tokens to generate.
223
+ */
224
+ maxTokens?: number;
225
+ /**
226
+ Temperature setting. This is a number between 0 (almost no randomness) and
227
+ 1 (very random).
228
+
229
+ It is recommended to set either `temperature` or `topP`, but not both.
230
+
231
+ @default 0
232
+ */
233
+ temperature?: number;
234
+ /**
235
+ Nucleus sampling. This is a number between 0 and 1.
236
+
237
+ E.g. 0.1 would mean that only tokens with the top 10% probability mass
238
+ are considered.
239
+
240
+ It is recommended to set either `temperature` or `topP`, but not both.
241
+ */
242
+ topP?: number;
243
+ /**
244
+ Presence penalty setting. It affects the likelihood of the model to
245
+ repeat information that is already in the prompt.
246
+
247
+ The presence penalty is a number between -1 (increase repetition)
248
+ and 1 (maximum penalty, decrease repetition). 0 means no penalty.
249
+
250
+ @default 0
251
+ */
252
+ presencePenalty?: number;
253
+ /**
254
+ Frequency penalty setting. It affects the likelihood of the model
255
+ to repeatedly use the same words or phrases.
256
+
257
+ The frequency penalty is a number between -1 (increase repetition)
258
+ and 1 (maximum penalty, decrease repetition). 0 means no penalty.
259
+
260
+ @default 0
261
+ */
262
+ frequencyPenalty?: number;
263
+ /**
264
+ The seed (integer) to use for random sampling. If set and supported
265
+ by the model, calls will generate deterministic results.
266
+ */
267
+ seed?: number;
268
+ /**
269
+ Maximum number of retries. Set to 0 to disable retries.
270
+
271
+ @default 2
272
+ */
273
+ maxRetries?: number;
274
+ /**
275
+ Abort signal.
276
+ */
277
+ abortSignal?: AbortSignal;
278
+ };
279
+
280
+ /**
281
+ Data content. Can either be a base64-encoded string, a Uint8Array, an ArrayBuffer, or a Buffer.
282
+ */
283
+ type DataContent = string | Uint8Array | ArrayBuffer | Buffer;
284
+
285
+ /**
286
+ Text content part of a prompt. It contains a string of text.
287
+ */
288
+ interface TextPart {
289
+ type: 'text';
290
+ /**
291
+ The text content.
292
+ */
293
+ text: string;
294
+ }
295
+ /**
296
+ Image content part of a prompt. It contains an image.
297
+ */
298
+ interface ImagePart {
299
+ type: 'image';
300
+ /**
301
+ Image data. Can either be:
302
+
303
+ - data: a base64-encoded string, a Uint8Array, an ArrayBuffer, or a Buffer
304
+ - URL: a URL that points to the image
305
+ */
306
+ image: DataContent | URL;
307
+ /**
308
+ Optional mime type of the image.
309
+ */
310
+ mimeType?: string;
311
+ }
312
+ /**
313
+ Tool call content part of a prompt. It contains a tool call (usually generated by the AI model).
314
+ */
315
+ interface ToolCallPart {
316
+ type: 'tool-call';
317
+ /**
318
+ ID of the tool call. This ID is used to match the tool call with the tool result.
319
+ */
320
+ toolCallId: string;
321
+ /**
322
+ Name of the tool that is being called.
323
+ */
324
+ toolName: string;
325
+ /**
326
+ Arguments of the tool call. This is a JSON-serializable object that matches the tool's input schema.
327
+ */
328
+ args: unknown;
329
+ }
330
+ /**
331
+ Tool result content part of a prompt. It contains the result of the tool call with the matching ID.
332
+ */
333
+ interface ToolResultPart {
334
+ type: 'tool-result';
335
+ /**
336
+ ID of the tool call that this result is associated with.
337
+ */
338
+ toolCallId: string;
339
+ /**
340
+ Name of the tool that generated this result.
341
+ */
342
+ toolName: string;
343
+ /**
344
+ Result of the tool call. This is a JSON-serializable object.
345
+ */
346
+ result: unknown;
347
+ /**
348
+ Optional flag if the result is an error or an error message.
349
+ */
350
+ isError?: boolean;
351
+ }
352
+
353
+ /**
354
+ A message that can be used in the `messages` field of a prompt.
355
+ It can be a user message, an assistant message, or a tool message.
356
+ */
357
+ type CoreMessage = CoreUserMessage | CoreAssistantMessage | CoreToolMessage;
358
+ /**
359
+ A user message. It can contain text or a combination of text and images.
360
+ */
361
+ type CoreUserMessage = {
362
+ role: 'user';
363
+ content: UserContent;
364
+ };
365
+ /**
366
+ Content of a user message. It can be a string or an array of text and image parts.
367
+ */
368
+ type UserContent = string | Array<TextPart | ImagePart>;
369
+ /**
370
+ An assistant message. It can contain text, tool calls, or a combination of text and tool calls.
371
+ */
372
+ type CoreAssistantMessage = {
373
+ role: 'assistant';
374
+ content: AssistantContent;
375
+ };
376
+ /**
377
+ Content of an assistant message. It can be a string or an array of text and tool call parts.
378
+ */
379
+ type AssistantContent = string | Array<TextPart | ToolCallPart>;
380
+ /**
381
+ A tool message. It contains the result of one or more tool calls.
382
+ */
383
+ type CoreToolMessage = {
384
+ role: 'tool';
385
+ content: ToolContent;
386
+ };
387
+ /**
388
+ Content of a tool message. It is an array of tool result parts.
389
+ */
390
+ type ToolContent = Array<ToolResultPart>;
391
+
392
+ /**
393
+ Prompt part of the AI function options. It contains a system message, a simple text prompt, or a list of messages.
394
+ */
395
+ type Prompt = {
396
+ /**
397
+ System message to include in the prompt. Can be used with `prompt` or `messages`.
398
+ */
399
+ system?: string;
400
+ /**
401
+ A simple text prompt. You can either use `prompt` or `messages` but not both.
402
+ */
403
+ prompt?: string;
404
+ /**
405
+ A list of messsages. You can either use `prompt` or `messages` but not both.
406
+ */
407
+ messages?: Array<CoreMessage>;
408
+ };
409
+
410
+ type Streamable = ReactNode | Promise<ReactNode>;
411
+ type Renderer<T extends Array<any>> = (...args: T) => Streamable | Generator<Streamable, Streamable, void> | AsyncGenerator<Streamable, Streamable, void>;
412
+ type RenderTool<PARAMETERS extends z.ZodTypeAny = any> = {
413
+ description?: string;
414
+ parameters: PARAMETERS;
415
+ generate?: Renderer<[
416
+ z.infer<PARAMETERS>,
417
+ {
418
+ toolName: string;
419
+ toolCallId: string;
420
+ }
421
+ ]>;
422
+ };
423
+ type RenderText = Renderer<[
424
+ {
425
+ /**
426
+ * The full text content from the model so far.
427
+ */
428
+ content: string;
429
+ /**
430
+ * The new appended text content from the model since the last `text` call.
431
+ */
432
+ delta: string;
433
+ /**
434
+ * Whether the model is done generating text.
435
+ * If `true`, the `content` will be the final output and this call will be the last.
436
+ */
437
+ done: boolean;
438
+ }
439
+ ]>;
440
+ type RenderResult = {
441
+ value: ReactNode;
442
+ } & Awaited<ReturnType<LanguageModelV1['doStream']>>;
443
+ /**
444
+ * `experimental_streamUI` is a helper function to create a streamable UI from LLMs.
445
+ */
446
+ declare function experimental_streamUI<TOOLS extends {
447
+ [name: string]: z.ZodTypeAny;
448
+ } = {}>({ model, tools, system, prompt, messages, maxRetries, abortSignal, initial, text, ...settings }: CallSettings & Prompt & {
449
+ /**
450
+ * The language model to use.
451
+ */
452
+ model: LanguageModelV1;
453
+ /**
454
+ * The tools that the model can call. The model needs to support calling tools.
455
+ */
456
+ tools?: {
457
+ [name in keyof TOOLS]: RenderTool<TOOLS[name]>;
458
+ };
459
+ text?: RenderText;
460
+ initial?: ReactNode;
461
+ }): Promise<RenderResult>;
462
+
463
+ declare function createAI<AIState = any, UIState = any, Actions extends AIActions = {}>({ actions, initialAIState, initialUIState, onSetAIState, onGetUIState, }: {
149
464
  actions: Actions;
150
465
  initialAIState?: AIState;
151
466
  initialUIState?: UIState;
152
- unstable_onSetAIState?: OnSetAIState<AIState>;
153
- unstable_onGetUIState?: OnGetUIState<UIState>;
467
+ /**
468
+ * This function is called whenever the AI state is updated by an Action.
469
+ * You can use this to persist the AI state to a database, or to send it to a
470
+ * logging service.
471
+ */
472
+ onSetAIState?: OnSetAIState<AIState>;
473
+ /**
474
+ * This function is used to retrieve the UI state based on the AI state.
475
+ * For example, to render the initial UI state based on a given AI state, or
476
+ * to sync the UI state when the application is already loaded.
477
+ *
478
+ * If returning `undefined`, the client side UI state will not be updated.
479
+ *
480
+ * This function must be annotated with the `"use server"` directive.
481
+ *
482
+ * @example
483
+ * ```tsx
484
+ * onGetUIState: async () => {
485
+ * 'use server';
486
+ *
487
+ * const currentAIState = getAIState();
488
+ * const externalAIState = await loadAIStateFromDatabase();
489
+ *
490
+ * if (currentAIState === externalAIState) return undefined;
491
+ *
492
+ * // Update current AI state and return the new UI state
493
+ * const state = getMutableAIState()
494
+ * state.done(externalAIState)
495
+ *
496
+ * return <div>...</div>;
497
+ * }
498
+ * ```
499
+ */
500
+ onGetUIState?: OnGetUIState<UIState>;
154
501
  }): AIProvider<AIState, UIState, Actions>;
155
502
 
156
- export { createAI, createStreamableUI, createStreamableValue, getAIState, getMutableAIState, render };
503
+ export { createAI, createStreamableUI, createStreamableValue, experimental_streamUI, getAIState, getMutableAIState, render };