ai 3.0.33 → 3.0.35

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -2,6 +2,7 @@ import * as react_jsx_runtime from 'react/jsx-runtime';
2
2
  import { ReactNode } from 'react';
3
3
  import OpenAI from 'openai';
4
4
  import { z } from 'zod';
5
+ import { LanguageModelV1 } from '@ai-sdk/provider';
5
6
 
6
7
  type AIAction<T = any, R = any> = (...args: T[]) => Promise<R>;
7
8
  type AIActions<T = any, R = any> = Record<string, AIAction<T, R>>;
@@ -106,11 +107,17 @@ declare function createStreamableUI(initialValue?: React.ReactNode): {
106
107
  */
107
108
  done(...args: [] | [React.ReactNode]): void;
108
109
  };
110
+ declare const STREAMABLE_VALUE_INTERNAL_LOCK: unique symbol;
109
111
  /**
110
112
  * Create a wrapped, changable value that can be streamed to the client.
111
113
  * On the client side, the value can be accessed via the readStreamableValue() API.
112
114
  */
113
- declare function createStreamableValue<T = any, E = any>(initialValue?: T): {
115
+ declare function createStreamableValue<T = any, E = any>(initialValue?: T | ReadableStream<T>): {
116
+ /**
117
+ * @internal This is an internal lock to prevent the value from being
118
+ * updated by the user.
119
+ */
120
+ [STREAMABLE_VALUE_INTERNAL_LOCK]: boolean;
114
121
  /**
115
122
  * The value of the streamable. This can be returned from a Server Action and
116
123
  * received by the client. To read the streamed values, use the
@@ -121,15 +128,43 @@ declare function createStreamableValue<T = any, E = any>(initialValue?: T): {
121
128
  * This method updates the current value with a new one.
122
129
  */
123
130
  update(value: T): void;
131
+ /**
132
+ * This method is used to append a delta string to the current value. It
133
+ * requires the current value of the streamable to be a string.
134
+ *
135
+ * @example
136
+ * ```jsx
137
+ * const streamable = createStreamableValue('hello');
138
+ * streamable.append(' world');
139
+ *
140
+ * // The value will be 'hello world'
141
+ * ```
142
+ */
143
+ append(value: T): void;
144
+ /**
145
+ * This method is used to signal that there is an error in the value stream.
146
+ * It will be thrown on the client side when consumed via
147
+ * `readStreamableValue` or `useStreamableValue`.
148
+ */
124
149
  error(error: any): void;
125
- done(...args: [
126
- ] | [T]): void;
150
+ /**
151
+ * This method marks the value as finalized. You can either call it without
152
+ * any parameters or with a new value as the final state.
153
+ * Once called, the value cannot be updated or appended anymore.
154
+ *
155
+ * This method is always **required** to be called, otherwise the response
156
+ * will be stuck in a loading state.
157
+ */
158
+ done(...args: [] | [T]): void;
127
159
  };
128
- type Streamable = ReactNode | Promise<ReactNode>;
129
- type Renderer<T> = (props: T) => Streamable | Generator<Streamable, Streamable, void> | AsyncGenerator<Streamable, Streamable, void>;
160
+
161
+ type Streamable$1 = ReactNode | Promise<ReactNode>;
162
+ type Renderer$1<T> = (props: T) => Streamable$1 | Generator<Streamable$1, Streamable$1, void> | AsyncGenerator<Streamable$1, Streamable$1, void>;
130
163
  /**
131
164
  * `render` is a helper function to create a streamable UI from some LLMs.
132
165
  * Currently, it only supports OpenAI's GPT models with Function Calling and Assistants Tools.
166
+ *
167
+ * @deprecated It's recommended to use the `experimental_streamUI` API for compatibility with the new core APIs.
133
168
  */
134
169
  declare function render<TS extends {
135
170
  [name: string]: z.Schema;
@@ -149,7 +184,7 @@ declare function render<TS extends {
149
184
  */
150
185
  provider: OpenAI;
151
186
  messages: Parameters<typeof OpenAI.prototype.chat.completions.create>[0]['messages'];
152
- text?: Renderer<{
187
+ text?: Renderer$1<{
153
188
  /**
154
189
  * The full text content from the model so far.
155
190
  */
@@ -168,20 +203,263 @@ declare function render<TS extends {
168
203
  [name in keyof TS]: {
169
204
  description?: string;
170
205
  parameters: TS[name];
171
- render: Renderer<z.infer<TS[name]>>;
206
+ render: Renderer$1<z.infer<TS[name]>>;
172
207
  };
173
208
  };
174
209
  functions?: {
175
210
  [name in keyof FS]: {
176
211
  description?: string;
177
212
  parameters: FS[name];
178
- render: Renderer<z.infer<FS[name]>>;
213
+ render: Renderer$1<z.infer<FS[name]>>;
179
214
  };
180
215
  };
181
216
  initial?: ReactNode;
182
217
  temperature?: number;
183
218
  }): ReactNode;
184
219
 
220
+ type CallSettings = {
221
+ /**
222
+ Maximum number of tokens to generate.
223
+ */
224
+ maxTokens?: number;
225
+ /**
226
+ Temperature setting. This is a number between 0 (almost no randomness) and
227
+ 1 (very random).
228
+
229
+ It is recommended to set either `temperature` or `topP`, but not both.
230
+
231
+ @default 0
232
+ */
233
+ temperature?: number;
234
+ /**
235
+ Nucleus sampling. This is a number between 0 and 1.
236
+
237
+ E.g. 0.1 would mean that only tokens with the top 10% probability mass
238
+ are considered.
239
+
240
+ It is recommended to set either `temperature` or `topP`, but not both.
241
+ */
242
+ topP?: number;
243
+ /**
244
+ Presence penalty setting. It affects the likelihood of the model to
245
+ repeat information that is already in the prompt.
246
+
247
+ The presence penalty is a number between -1 (increase repetition)
248
+ and 1 (maximum penalty, decrease repetition). 0 means no penalty.
249
+
250
+ @default 0
251
+ */
252
+ presencePenalty?: number;
253
+ /**
254
+ Frequency penalty setting. It affects the likelihood of the model
255
+ to repeatedly use the same words or phrases.
256
+
257
+ The frequency penalty is a number between -1 (increase repetition)
258
+ and 1 (maximum penalty, decrease repetition). 0 means no penalty.
259
+
260
+ @default 0
261
+ */
262
+ frequencyPenalty?: number;
263
+ /**
264
+ The seed (integer) to use for random sampling. If set and supported
265
+ by the model, calls will generate deterministic results.
266
+ */
267
+ seed?: number;
268
+ /**
269
+ Maximum number of retries. Set to 0 to disable retries.
270
+
271
+ @default 2
272
+ */
273
+ maxRetries?: number;
274
+ /**
275
+ Abort signal.
276
+ */
277
+ abortSignal?: AbortSignal;
278
+ };
279
+
280
+ /**
281
+ Data content. Can either be a base64-encoded string, a Uint8Array, an ArrayBuffer, or a Buffer.
282
+ */
283
+ type DataContent = string | Uint8Array | ArrayBuffer | Buffer;
284
+
285
+ /**
286
+ Text content part of a prompt. It contains a string of text.
287
+ */
288
+ interface TextPart {
289
+ type: 'text';
290
+ /**
291
+ The text content.
292
+ */
293
+ text: string;
294
+ }
295
+ /**
296
+ Image content part of a prompt. It contains an image.
297
+ */
298
+ interface ImagePart {
299
+ type: 'image';
300
+ /**
301
+ Image data. Can either be:
302
+
303
+ - data: a base64-encoded string, a Uint8Array, an ArrayBuffer, or a Buffer
304
+ - URL: a URL that points to the image
305
+ */
306
+ image: DataContent | URL;
307
+ /**
308
+ Optional mime type of the image.
309
+ */
310
+ mimeType?: string;
311
+ }
312
+ /**
313
+ Tool call content part of a prompt. It contains a tool call (usually generated by the AI model).
314
+ */
315
+ interface ToolCallPart {
316
+ type: 'tool-call';
317
+ /**
318
+ ID of the tool call. This ID is used to match the tool call with the tool result.
319
+ */
320
+ toolCallId: string;
321
+ /**
322
+ Name of the tool that is being called.
323
+ */
324
+ toolName: string;
325
+ /**
326
+ Arguments of the tool call. This is a JSON-serializable object that matches the tool's input schema.
327
+ */
328
+ args: unknown;
329
+ }
330
+ /**
331
+ Tool result content part of a prompt. It contains the result of the tool call with the matching ID.
332
+ */
333
+ interface ToolResultPart {
334
+ type: 'tool-result';
335
+ /**
336
+ ID of the tool call that this result is associated with.
337
+ */
338
+ toolCallId: string;
339
+ /**
340
+ Name of the tool that generated this result.
341
+ */
342
+ toolName: string;
343
+ /**
344
+ Result of the tool call. This is a JSON-serializable object.
345
+ */
346
+ result: unknown;
347
+ /**
348
+ Optional flag if the result is an error or an error message.
349
+ */
350
+ isError?: boolean;
351
+ }
352
+
353
+ /**
354
+ A message that can be used in the `messages` field of a prompt.
355
+ It can be a user message, an assistant message, or a tool message.
356
+ */
357
+ type ExperimentalMessage = ExperimentalUserMessage | ExperimentalAssistantMessage | ExperimentalToolMessage;
358
+ /**
359
+ A user message. It can contain text or a combination of text and images.
360
+ */
361
+ type ExperimentalUserMessage = {
362
+ role: 'user';
363
+ content: UserContent;
364
+ };
365
+ /**
366
+ Content of a user message. It can be a string or an array of text and image parts.
367
+ */
368
+ type UserContent = string | Array<TextPart | ImagePart>;
369
+ /**
370
+ An assistant message. It can contain text, tool calls, or a combination of text and tool calls.
371
+ */
372
+ type ExperimentalAssistantMessage = {
373
+ role: 'assistant';
374
+ content: AssistantContent;
375
+ };
376
+ /**
377
+ Content of an assistant message. It can be a string or an array of text and tool call parts.
378
+ */
379
+ type AssistantContent = string | Array<TextPart | ToolCallPart>;
380
+ /**
381
+ A tool message. It contains the result of one or more tool calls.
382
+ */
383
+ type ExperimentalToolMessage = {
384
+ role: 'tool';
385
+ content: ToolContent;
386
+ };
387
+ /**
388
+ Content of a tool message. It is an array of tool result parts.
389
+ */
390
+ type ToolContent = Array<ToolResultPart>;
391
+
392
+ /**
393
+ Prompt part of the AI function options. It contains a system message, a simple text prompt, or a list of messages.
394
+ */
395
+ type Prompt = {
396
+ /**
397
+ System message to include in the prompt. Can be used with `prompt` or `messages`.
398
+ */
399
+ system?: string;
400
+ /**
401
+ A simple text prompt. You can either use `prompt` or `messages` but not both.
402
+ */
403
+ prompt?: string;
404
+ /**
405
+ A list of messsages. You can either use `prompt` or `messages` but not both.
406
+ */
407
+ messages?: Array<ExperimentalMessage>;
408
+ };
409
+
410
+ type Streamable = ReactNode | Promise<ReactNode>;
411
+ type Renderer<T extends Array<any>> = (...args: T) => Streamable | Generator<Streamable, Streamable, void> | AsyncGenerator<Streamable, Streamable, void>;
412
+ type RenderTool<PARAMETERS extends z.ZodTypeAny = any> = {
413
+ description?: string;
414
+ parameters: PARAMETERS;
415
+ generate?: Renderer<[
416
+ z.infer<PARAMETERS>,
417
+ {
418
+ toolName: string;
419
+ toolCallId: string;
420
+ }
421
+ ]>;
422
+ };
423
+ type RenderText = Renderer<[
424
+ {
425
+ /**
426
+ * The full text content from the model so far.
427
+ */
428
+ content: string;
429
+ /**
430
+ * The new appended text content from the model since the last `text` call.
431
+ */
432
+ delta: string;
433
+ /**
434
+ * Whether the model is done generating text.
435
+ * If `true`, the `content` will be the final output and this call will be the last.
436
+ */
437
+ done: boolean;
438
+ }
439
+ ]>;
440
+ type RenderResult = {
441
+ value: ReactNode;
442
+ } & Awaited<ReturnType<LanguageModelV1['doStream']>>;
443
+ /**
444
+ * `experimental_streamUI` is a helper function to create a streamable UI from LLMs.
445
+ */
446
+ declare function experimental_streamUI<TOOLS extends {
447
+ [name: string]: z.ZodTypeAny;
448
+ } = {}>({ model, tools, system, prompt, messages, maxRetries, abortSignal, initial, text, ...settings }: CallSettings & Prompt & {
449
+ /**
450
+ * The language model to use.
451
+ */
452
+ model: LanguageModelV1;
453
+ /**
454
+ * The tools that the model can call. The model needs to support calling tools.
455
+ */
456
+ tools?: {
457
+ [name in keyof TOOLS]: RenderTool<TOOLS[name]>;
458
+ };
459
+ text?: RenderText;
460
+ initial?: ReactNode;
461
+ }): Promise<RenderResult>;
462
+
185
463
  declare function createAI<AIState = any, UIState = any, Actions extends AIActions = {}>({ actions, initialAIState, initialUIState, onSetAIState, onGetUIState, }: {
186
464
  actions: Actions;
187
465
  initialAIState?: AIState;
@@ -222,4 +500,4 @@ declare function createAI<AIState = any, UIState = any, Actions extends AIAction
222
500
  onGetUIState?: OnGetUIState<UIState>;
223
501
  }): AIProvider<AIState, UIState, Actions>;
224
502
 
225
- export { createAI, createStreamableUI, createStreamableValue, getAIState, getMutableAIState, render };
503
+ export { createAI, createStreamableUI, createStreamableValue, experimental_streamUI, getAIState, getMutableAIState, render };