ai 3.0.33 → 3.0.35

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "ai",
3
- "version": "3.0.33",
3
+ "version": "3.0.35",
4
4
  "license": "Apache-2.0",
5
5
  "sideEffects": false,
6
6
  "main": "./dist/index.js",
@@ -57,8 +57,8 @@
57
57
  }
58
58
  },
59
59
  "dependencies": {
60
- "@ai-sdk/provider": "0.0.2",
61
- "@ai-sdk/provider-utils": "0.0.4",
60
+ "@ai-sdk/provider": "0.0.3",
61
+ "@ai-sdk/provider-utils": "0.0.5",
62
62
  "secure-json-parse": "2.7.0",
63
63
  "eventsource-parser": "1.1.2",
64
64
  "jsondiffpatch": "0.6.0",
@@ -2,6 +2,7 @@ import * as react_jsx_runtime from 'react/jsx-runtime';
2
2
  import { ReactNode } from 'react';
3
3
  import OpenAI from 'openai';
4
4
  import { z } from 'zod';
5
+ import { LanguageModelV1 } from '@ai-sdk/provider';
5
6
 
6
7
  type AIAction<T = any, R = any> = (...args: T[]) => Promise<R>;
7
8
  type AIActions<T = any, R = any> = Record<string, AIAction<T, R>>;
@@ -108,11 +109,17 @@ declare function createStreamableUI(initialValue?: React.ReactNode): {
108
109
  */
109
110
  done(...args: [] | [React.ReactNode]): void;
110
111
  };
112
+ declare const STREAMABLE_VALUE_INTERNAL_LOCK: unique symbol;
111
113
  /**
112
114
  * Create a wrapped, changable value that can be streamed to the client.
113
115
  * On the client side, the value can be accessed via the readStreamableValue() API.
114
116
  */
115
- declare function createStreamableValue<T = any, E = any>(initialValue?: T): {
117
+ declare function createStreamableValue<T = any, E = any>(initialValue?: T | ReadableStream<T>): {
118
+ /**
119
+ * @internal This is an internal lock to prevent the value from being
120
+ * updated by the user.
121
+ */
122
+ [STREAMABLE_VALUE_INTERNAL_LOCK]: boolean;
116
123
  /**
117
124
  * The value of the streamable. This can be returned from a Server Action and
118
125
  * received by the client. To read the streamed values, use the
@@ -123,15 +130,43 @@ declare function createStreamableValue<T = any, E = any>(initialValue?: T): {
123
130
  * This method updates the current value with a new one.
124
131
  */
125
132
  update(value: T): void;
133
+ /**
134
+ * This method is used to append a delta string to the current value. It
135
+ * requires the current value of the streamable to be a string.
136
+ *
137
+ * @example
138
+ * ```jsx
139
+ * const streamable = createStreamableValue('hello');
140
+ * streamable.append(' world');
141
+ *
142
+ * // The value will be 'hello world'
143
+ * ```
144
+ */
145
+ append(value: T): void;
146
+ /**
147
+ * This method is used to signal that there is an error in the value stream.
148
+ * It will be thrown on the client side when consumed via
149
+ * `readStreamableValue` or `useStreamableValue`.
150
+ */
126
151
  error(error: any): void;
127
- done(...args: [
128
- ] | [T]): void;
152
+ /**
153
+ * This method marks the value as finalized. You can either call it without
154
+ * any parameters or with a new value as the final state.
155
+ * Once called, the value cannot be updated or appended anymore.
156
+ *
157
+ * This method is always **required** to be called, otherwise the response
158
+ * will be stuck in a loading state.
159
+ */
160
+ done(...args: [] | [T]): void;
129
161
  };
130
- type Streamable = ReactNode | Promise<ReactNode>;
131
- type Renderer<T> = (props: T) => Streamable | Generator<Streamable, Streamable, void> | AsyncGenerator<Streamable, Streamable, void>;
162
+
163
+ type Streamable$1 = ReactNode | Promise<ReactNode>;
164
+ type Renderer$1<T> = (props: T) => Streamable$1 | Generator<Streamable$1, Streamable$1, void> | AsyncGenerator<Streamable$1, Streamable$1, void>;
132
165
  /**
133
166
  * `render` is a helper function to create a streamable UI from some LLMs.
134
167
  * Currently, it only supports OpenAI's GPT models with Function Calling and Assistants Tools.
168
+ *
169
+ * @deprecated It's recommended to use the `experimental_streamUI` API for compatibility with the new core APIs.
135
170
  */
136
171
  declare function render<TS extends {
137
172
  [name: string]: z.Schema;
@@ -151,7 +186,7 @@ declare function render<TS extends {
151
186
  */
152
187
  provider: OpenAI;
153
188
  messages: Parameters<typeof OpenAI.prototype.chat.completions.create>[0]['messages'];
154
- text?: Renderer<{
189
+ text?: Renderer$1<{
155
190
  /**
156
191
  * The full text content from the model so far.
157
192
  */
@@ -170,20 +205,263 @@ declare function render<TS extends {
170
205
  [name in keyof TS]: {
171
206
  description?: string;
172
207
  parameters: TS[name];
173
- render: Renderer<z.infer<TS[name]>>;
208
+ render: Renderer$1<z.infer<TS[name]>>;
174
209
  };
175
210
  };
176
211
  functions?: {
177
212
  [name in keyof FS]: {
178
213
  description?: string;
179
214
  parameters: FS[name];
180
- render: Renderer<z.infer<FS[name]>>;
215
+ render: Renderer$1<z.infer<FS[name]>>;
181
216
  };
182
217
  };
183
218
  initial?: ReactNode;
184
219
  temperature?: number;
185
220
  }): ReactNode;
186
221
 
222
+ type CallSettings = {
223
+ /**
224
+ Maximum number of tokens to generate.
225
+ */
226
+ maxTokens?: number;
227
+ /**
228
+ Temperature setting. This is a number between 0 (almost no randomness) and
229
+ 1 (very random).
230
+
231
+ It is recommended to set either `temperature` or `topP`, but not both.
232
+
233
+ @default 0
234
+ */
235
+ temperature?: number;
236
+ /**
237
+ Nucleus sampling. This is a number between 0 and 1.
238
+
239
+ E.g. 0.1 would mean that only tokens with the top 10% probability mass
240
+ are considered.
241
+
242
+ It is recommended to set either `temperature` or `topP`, but not both.
243
+ */
244
+ topP?: number;
245
+ /**
246
+ Presence penalty setting. It affects the likelihood of the model to
247
+ repeat information that is already in the prompt.
248
+
249
+ The presence penalty is a number between -1 (increase repetition)
250
+ and 1 (maximum penalty, decrease repetition). 0 means no penalty.
251
+
252
+ @default 0
253
+ */
254
+ presencePenalty?: number;
255
+ /**
256
+ Frequency penalty setting. It affects the likelihood of the model
257
+ to repeatedly use the same words or phrases.
258
+
259
+ The frequency penalty is a number between -1 (increase repetition)
260
+ and 1 (maximum penalty, decrease repetition). 0 means no penalty.
261
+
262
+ @default 0
263
+ */
264
+ frequencyPenalty?: number;
265
+ /**
266
+ The seed (integer) to use for random sampling. If set and supported
267
+ by the model, calls will generate deterministic results.
268
+ */
269
+ seed?: number;
270
+ /**
271
+ Maximum number of retries. Set to 0 to disable retries.
272
+
273
+ @default 2
274
+ */
275
+ maxRetries?: number;
276
+ /**
277
+ Abort signal.
278
+ */
279
+ abortSignal?: AbortSignal;
280
+ };
281
+
282
+ /**
283
+ Data content. Can either be a base64-encoded string, a Uint8Array, an ArrayBuffer, or a Buffer.
284
+ */
285
+ type DataContent = string | Uint8Array | ArrayBuffer | Buffer;
286
+
287
+ /**
288
+ Text content part of a prompt. It contains a string of text.
289
+ */
290
+ interface TextPart {
291
+ type: 'text';
292
+ /**
293
+ The text content.
294
+ */
295
+ text: string;
296
+ }
297
+ /**
298
+ Image content part of a prompt. It contains an image.
299
+ */
300
+ interface ImagePart {
301
+ type: 'image';
302
+ /**
303
+ Image data. Can either be:
304
+
305
+ - data: a base64-encoded string, a Uint8Array, an ArrayBuffer, or a Buffer
306
+ - URL: a URL that points to the image
307
+ */
308
+ image: DataContent | URL;
309
+ /**
310
+ Optional mime type of the image.
311
+ */
312
+ mimeType?: string;
313
+ }
314
+ /**
315
+ Tool call content part of a prompt. It contains a tool call (usually generated by the AI model).
316
+ */
317
+ interface ToolCallPart {
318
+ type: 'tool-call';
319
+ /**
320
+ ID of the tool call. This ID is used to match the tool call with the tool result.
321
+ */
322
+ toolCallId: string;
323
+ /**
324
+ Name of the tool that is being called.
325
+ */
326
+ toolName: string;
327
+ /**
328
+ Arguments of the tool call. This is a JSON-serializable object that matches the tool's input schema.
329
+ */
330
+ args: unknown;
331
+ }
332
+ /**
333
+ Tool result content part of a prompt. It contains the result of the tool call with the matching ID.
334
+ */
335
+ interface ToolResultPart {
336
+ type: 'tool-result';
337
+ /**
338
+ ID of the tool call that this result is associated with.
339
+ */
340
+ toolCallId: string;
341
+ /**
342
+ Name of the tool that generated this result.
343
+ */
344
+ toolName: string;
345
+ /**
346
+ Result of the tool call. This is a JSON-serializable object.
347
+ */
348
+ result: unknown;
349
+ /**
350
+ Optional flag if the result is an error or an error message.
351
+ */
352
+ isError?: boolean;
353
+ }
354
+
355
+ /**
356
+ A message that can be used in the `messages` field of a prompt.
357
+ It can be a user message, an assistant message, or a tool message.
358
+ */
359
+ type ExperimentalMessage = ExperimentalUserMessage | ExperimentalAssistantMessage | ExperimentalToolMessage;
360
+ /**
361
+ A user message. It can contain text or a combination of text and images.
362
+ */
363
+ type ExperimentalUserMessage = {
364
+ role: 'user';
365
+ content: UserContent;
366
+ };
367
+ /**
368
+ Content of a user message. It can be a string or an array of text and image parts.
369
+ */
370
+ type UserContent = string | Array<TextPart | ImagePart>;
371
+ /**
372
+ An assistant message. It can contain text, tool calls, or a combination of text and tool calls.
373
+ */
374
+ type ExperimentalAssistantMessage = {
375
+ role: 'assistant';
376
+ content: AssistantContent;
377
+ };
378
+ /**
379
+ Content of an assistant message. It can be a string or an array of text and tool call parts.
380
+ */
381
+ type AssistantContent = string | Array<TextPart | ToolCallPart>;
382
+ /**
383
+ A tool message. It contains the result of one or more tool calls.
384
+ */
385
+ type ExperimentalToolMessage = {
386
+ role: 'tool';
387
+ content: ToolContent;
388
+ };
389
+ /**
390
+ Content of a tool message. It is an array of tool result parts.
391
+ */
392
+ type ToolContent = Array<ToolResultPart>;
393
+
394
+ /**
395
+ Prompt part of the AI function options. It contains a system message, a simple text prompt, or a list of messages.
396
+ */
397
+ type Prompt = {
398
+ /**
399
+ System message to include in the prompt. Can be used with `prompt` or `messages`.
400
+ */
401
+ system?: string;
402
+ /**
403
+ A simple text prompt. You can either use `prompt` or `messages` but not both.
404
+ */
405
+ prompt?: string;
406
+ /**
407
+ A list of messsages. You can either use `prompt` or `messages` but not both.
408
+ */
409
+ messages?: Array<ExperimentalMessage>;
410
+ };
411
+
412
+ type Streamable = ReactNode | Promise<ReactNode>;
413
+ type Renderer<T extends Array<any>> = (...args: T) => Streamable | Generator<Streamable, Streamable, void> | AsyncGenerator<Streamable, Streamable, void>;
414
+ type RenderTool<PARAMETERS extends z.ZodTypeAny = any> = {
415
+ description?: string;
416
+ parameters: PARAMETERS;
417
+ generate?: Renderer<[
418
+ z.infer<PARAMETERS>,
419
+ {
420
+ toolName: string;
421
+ toolCallId: string;
422
+ }
423
+ ]>;
424
+ };
425
+ type RenderText = Renderer<[
426
+ {
427
+ /**
428
+ * The full text content from the model so far.
429
+ */
430
+ content: string;
431
+ /**
432
+ * The new appended text content from the model since the last `text` call.
433
+ */
434
+ delta: string;
435
+ /**
436
+ * Whether the model is done generating text.
437
+ * If `true`, the `content` will be the final output and this call will be the last.
438
+ */
439
+ done: boolean;
440
+ }
441
+ ]>;
442
+ type RenderResult = {
443
+ value: ReactNode;
444
+ } & Awaited<ReturnType<LanguageModelV1['doStream']>>;
445
+ /**
446
+ * `experimental_streamUI` is a helper function to create a streamable UI from LLMs.
447
+ */
448
+ declare function experimental_streamUI<TOOLS extends {
449
+ [name: string]: z.ZodTypeAny;
450
+ } = {}>({ model, tools, system, prompt, messages, maxRetries, abortSignal, initial, text, ...settings }: CallSettings & Prompt & {
451
+ /**
452
+ * The language model to use.
453
+ */
454
+ model: LanguageModelV1;
455
+ /**
456
+ * The tools that the model can call. The model needs to support calling tools.
457
+ */
458
+ tools?: {
459
+ [name in keyof TOOLS]: RenderTool<TOOLS[name]>;
460
+ };
461
+ text?: RenderText;
462
+ initial?: ReactNode;
463
+ }): Promise<RenderResult>;
464
+
187
465
  declare function createAI<AIState = any, UIState = any, Actions extends AIActions = {}>({ actions, initialAIState, initialUIState, onSetAIState, onGetUIState, }: {
188
466
  actions: Actions;
189
467
  initialAIState?: AIState;
@@ -286,4 +564,4 @@ declare function useAIState<AI extends AIProvider = any>(key: keyof InferAIState
286
564
  declare function useActions<AI extends AIProvider = any>(): InferActions<AI, any>;
287
565
  declare function useSyncUIState(): () => Promise<void>;
288
566
 
289
- export { StreamableValue, createAI, createStreamableUI, createStreamableValue, getAIState, getMutableAIState, readStreamableValue, render, useAIState, useActions, useStreamableValue, useSyncUIState, useUIState };
567
+ export { StreamableValue, createAI, createStreamableUI, createStreamableValue, experimental_streamUI, getAIState, getMutableAIState, readStreamableValue, render, useAIState, useActions, useStreamableValue, useSyncUIState, useUIState };