ai 3.0.33 → 3.0.34

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "ai",
3
- "version": "3.0.33",
3
+ "version": "3.0.34",
4
4
  "license": "Apache-2.0",
5
5
  "sideEffects": false,
6
6
  "main": "./dist/index.js",
@@ -2,6 +2,7 @@ import * as react_jsx_runtime from 'react/jsx-runtime';
2
2
  import { ReactNode } from 'react';
3
3
  import OpenAI from 'openai';
4
4
  import { z } from 'zod';
5
+ import { LanguageModelV1 } from '@ai-sdk/provider';
5
6
 
6
7
  type AIAction<T = any, R = any> = (...args: T[]) => Promise<R>;
7
8
  type AIActions<T = any, R = any> = Record<string, AIAction<T, R>>;
@@ -127,11 +128,13 @@ declare function createStreamableValue<T = any, E = any>(initialValue?: T): {
127
128
  done(...args: [
128
129
  ] | [T]): void;
129
130
  };
130
- type Streamable = ReactNode | Promise<ReactNode>;
131
- type Renderer<T> = (props: T) => Streamable | Generator<Streamable, Streamable, void> | AsyncGenerator<Streamable, Streamable, void>;
131
+ type Streamable$1 = ReactNode | Promise<ReactNode>;
132
+ type Renderer$1<T> = (props: T) => Streamable$1 | Generator<Streamable$1, Streamable$1, void> | AsyncGenerator<Streamable$1, Streamable$1, void>;
132
133
  /**
133
134
  * `render` is a helper function to create a streamable UI from some LLMs.
134
135
  * Currently, it only supports OpenAI's GPT models with Function Calling and Assistants Tools.
136
+ *
137
+ * @deprecated It's recommended to use the `experimental_streamUI` API for compatibility with the new core APIs.
135
138
  */
136
139
  declare function render<TS extends {
137
140
  [name: string]: z.Schema;
@@ -151,7 +154,7 @@ declare function render<TS extends {
151
154
  */
152
155
  provider: OpenAI;
153
156
  messages: Parameters<typeof OpenAI.prototype.chat.completions.create>[0]['messages'];
154
- text?: Renderer<{
157
+ text?: Renderer$1<{
155
158
  /**
156
159
  * The full text content from the model so far.
157
160
  */
@@ -170,20 +173,259 @@ declare function render<TS extends {
170
173
  [name in keyof TS]: {
171
174
  description?: string;
172
175
  parameters: TS[name];
173
- render: Renderer<z.infer<TS[name]>>;
176
+ render: Renderer$1<z.infer<TS[name]>>;
174
177
  };
175
178
  };
176
179
  functions?: {
177
180
  [name in keyof FS]: {
178
181
  description?: string;
179
182
  parameters: FS[name];
180
- render: Renderer<z.infer<FS[name]>>;
183
+ render: Renderer$1<z.infer<FS[name]>>;
181
184
  };
182
185
  };
183
186
  initial?: ReactNode;
184
187
  temperature?: number;
185
188
  }): ReactNode;
186
189
 
190
+ type CallSettings = {
191
+ /**
192
+ Maximum number of tokens to generate.
193
+ */
194
+ maxTokens?: number;
195
+ /**
196
+ Temperature setting. This is a number between 0 (almost no randomness) and
197
+ 1 (very random).
198
+
199
+ It is recommended to set either `temperature` or `topP`, but not both.
200
+
201
+ @default 0
202
+ */
203
+ temperature?: number;
204
+ /**
205
+ Nucleus sampling. This is a number between 0 and 1.
206
+
207
+ E.g. 0.1 would mean that only tokens with the top 10% probability mass
208
+ are considered.
209
+
210
+ It is recommended to set either `temperature` or `topP`, but not both.
211
+ */
212
+ topP?: number;
213
+ /**
214
+ Presence penalty setting. It affects the likelihood of the model to
215
+ repeat information that is already in the prompt.
216
+
217
+ The presence penalty is a number between -1 (increase repetition)
218
+ and 1 (maximum penalty, decrease repetition). 0 means no penalty.
219
+
220
+ @default 0
221
+ */
222
+ presencePenalty?: number;
223
+ /**
224
+ Frequency penalty setting. It affects the likelihood of the model
225
+ to repeatedly use the same words or phrases.
226
+
227
+ The frequency penalty is a number between -1 (increase repetition)
228
+ and 1 (maximum penalty, decrease repetition). 0 means no penalty.
229
+
230
+ @default 0
231
+ */
232
+ frequencyPenalty?: number;
233
+ /**
234
+ The seed (integer) to use for random sampling. If set and supported
235
+ by the model, calls will generate deterministic results.
236
+ */
237
+ seed?: number;
238
+ /**
239
+ Maximum number of retries. Set to 0 to disable retries.
240
+
241
+ @default 2
242
+ */
243
+ maxRetries?: number;
244
+ /**
245
+ Abort signal.
246
+ */
247
+ abortSignal?: AbortSignal;
248
+ };
249
+
250
+ /**
251
+ Data content. Can either be a base64-encoded string, a Uint8Array, an ArrayBuffer, or a Buffer.
252
+ */
253
+ type DataContent = string | Uint8Array | ArrayBuffer | Buffer;
254
+
255
+ /**
256
+ Text content part of a prompt. It contains a string of text.
257
+ */
258
+ interface TextPart {
259
+ type: 'text';
260
+ /**
261
+ The text content.
262
+ */
263
+ text: string;
264
+ }
265
+ /**
266
+ Image content part of a prompt. It contains an image.
267
+ */
268
+ interface ImagePart {
269
+ type: 'image';
270
+ /**
271
+ Image data. Can either be:
272
+
273
+ - data: a base64-encoded string, a Uint8Array, an ArrayBuffer, or a Buffer
274
+ - URL: a URL that points to the image
275
+ */
276
+ image: DataContent | URL;
277
+ /**
278
+ Optional mime type of the image.
279
+ */
280
+ mimeType?: string;
281
+ }
282
+ /**
283
+ Tool call content part of a prompt. It contains a tool call (usually generated by the AI model).
284
+ */
285
+ interface ToolCallPart {
286
+ type: 'tool-call';
287
+ /**
288
+ ID of the tool call. This ID is used to match the tool call with the tool result.
289
+ */
290
+ toolCallId: string;
291
+ /**
292
+ Name of the tool that is being called.
293
+ */
294
+ toolName: string;
295
+ /**
296
+ Arguments of the tool call. This is a JSON-serializable object that matches the tool's input schema.
297
+ */
298
+ args: unknown;
299
+ }
300
+ /**
301
+ Tool result content part of a prompt. It contains the result of the tool call with the matching ID.
302
+ */
303
+ interface ToolResultPart {
304
+ type: 'tool-result';
305
+ /**
306
+ ID of the tool call that this result is associated with.
307
+ */
308
+ toolCallId: string;
309
+ /**
310
+ Name of the tool that generated this result.
311
+ */
312
+ toolName: string;
313
+ /**
314
+ Result of the tool call. This is a JSON-serializable object.
315
+ */
316
+ result: unknown;
317
+ /**
318
+ Optional flag if the result is an error or an error message.
319
+ */
320
+ isError?: boolean;
321
+ }
322
+
323
+ /**
324
+ A message that can be used in the `messages` field of a prompt.
325
+ It can be a user message, an assistant message, or a tool message.
326
+ */
327
+ type ExperimentalMessage = ExperimentalUserMessage | ExperimentalAssistantMessage | ExperimentalToolMessage;
328
+ /**
329
+ A user message. It can contain text or a combination of text and images.
330
+ */
331
+ type ExperimentalUserMessage = {
332
+ role: 'user';
333
+ content: UserContent;
334
+ };
335
+ /**
336
+ Content of a user message. It can be a string or an array of text and image parts.
337
+ */
338
+ type UserContent = string | Array<TextPart | ImagePart>;
339
+ /**
340
+ An assistant message. It can contain text, tool calls, or a combination of text and tool calls.
341
+ */
342
+ type ExperimentalAssistantMessage = {
343
+ role: 'assistant';
344
+ content: AssistantContent;
345
+ };
346
+ /**
347
+ Content of an assistant message. It can be a string or an array of text and tool call parts.
348
+ */
349
+ type AssistantContent = string | Array<TextPart | ToolCallPart>;
350
+ /**
351
+ A tool message. It contains the result of one or more tool calls.
352
+ */
353
+ type ExperimentalToolMessage = {
354
+ role: 'tool';
355
+ content: ToolContent;
356
+ };
357
+ /**
358
+ Content of a tool message. It is an array of tool result parts.
359
+ */
360
+ type ToolContent = Array<ToolResultPart>;
361
+
362
+ /**
363
+ Prompt part of the AI function options. It contains a system message, a simple text prompt, or a list of messages.
364
+ */
365
+ type Prompt = {
366
+ /**
367
+ System message to include in the prompt. Can be used with `prompt` or `messages`.
368
+ */
369
+ system?: string;
370
+ /**
371
+ A simple text prompt. You can either use `prompt` or `messages` but not both.
372
+ */
373
+ prompt?: string;
374
+ /**
375
+ A list of messsages. You can either use `prompt` or `messages` but not both.
376
+ */
377
+ messages?: Array<ExperimentalMessage>;
378
+ };
379
+
380
+ type Streamable = ReactNode | Promise<ReactNode>;
381
+ type Renderer<T extends Array<any>> = (...args: T) => Streamable | Generator<Streamable, Streamable, void> | AsyncGenerator<Streamable, Streamable, void>;
382
+ type RenderTool<PARAMETERS extends z.ZodTypeAny = any> = {
383
+ description?: string;
384
+ parameters: PARAMETERS;
385
+ generate?: Renderer<[
386
+ z.infer<PARAMETERS>,
387
+ {
388
+ toolName: string;
389
+ toolCallId: string;
390
+ }
391
+ ]>;
392
+ };
393
+ type RenderText = Renderer<[
394
+ {
395
+ /**
396
+ * The full text content from the model so far.
397
+ */
398
+ content: string;
399
+ /**
400
+ * The new appended text content from the model since the last `text` call.
401
+ */
402
+ delta: string;
403
+ /**
404
+ * Whether the model is done generating text.
405
+ * If `true`, the `content` will be the final output and this call will be the last.
406
+ */
407
+ done: boolean;
408
+ }
409
+ ]>;
410
+ type RenderResult = {
411
+ value: ReactNode;
412
+ } & Awaited<ReturnType<LanguageModelV1['doStream']>>;
413
+ /**
414
+ * `experimental_streamUI` is a helper function to create a streamable UI from LLMs.
415
+ */
416
+ declare function experimental_streamUI<TOOLS extends Record<string, RenderTool>>({ model, tools, system, prompt, messages, maxRetries, abortSignal, initial, text, ...settings }: CallSettings & Prompt & {
417
+ /**
418
+ * The language model to use.
419
+ */
420
+ model: LanguageModelV1;
421
+ /**
422
+ * The tools that the model can call. The model needs to support calling tools.
423
+ */
424
+ tools?: TOOLS;
425
+ text?: RenderText;
426
+ initial?: ReactNode;
427
+ }): Promise<RenderResult>;
428
+
187
429
  declare function createAI<AIState = any, UIState = any, Actions extends AIActions = {}>({ actions, initialAIState, initialUIState, onSetAIState, onGetUIState, }: {
188
430
  actions: Actions;
189
431
  initialAIState?: AIState;
@@ -286,4 +528,4 @@ declare function useAIState<AI extends AIProvider = any>(key: keyof InferAIState
286
528
  declare function useActions<AI extends AIProvider = any>(): InferActions<AI, any>;
287
529
  declare function useSyncUIState(): () => Promise<void>;
288
530
 
289
- export { StreamableValue, createAI, createStreamableUI, createStreamableValue, getAIState, getMutableAIState, readStreamableValue, render, useAIState, useActions, useStreamableValue, useSyncUIState, useUIState };
531
+ export { StreamableValue, createAI, createStreamableUI, createStreamableValue, experimental_streamUI, getAIState, getMutableAIState, readStreamableValue, render, useAIState, useActions, useStreamableValue, useSyncUIState, useUIState };
@@ -2,6 +2,7 @@ import * as react_jsx_runtime from 'react/jsx-runtime';
2
2
  import { ReactNode } from 'react';
3
3
  import OpenAI from 'openai';
4
4
  import { z } from 'zod';
5
+ import { LanguageModelV1 } from '@ai-sdk/provider';
5
6
 
6
7
  type AIAction<T = any, R = any> = (...args: T[]) => Promise<R>;
7
8
  type AIActions<T = any, R = any> = Record<string, AIAction<T, R>>;
@@ -125,11 +126,13 @@ declare function createStreamableValue<T = any, E = any>(initialValue?: T): {
125
126
  done(...args: [
126
127
  ] | [T]): void;
127
128
  };
128
- type Streamable = ReactNode | Promise<ReactNode>;
129
- type Renderer<T> = (props: T) => Streamable | Generator<Streamable, Streamable, void> | AsyncGenerator<Streamable, Streamable, void>;
129
+ type Streamable$1 = ReactNode | Promise<ReactNode>;
130
+ type Renderer$1<T> = (props: T) => Streamable$1 | Generator<Streamable$1, Streamable$1, void> | AsyncGenerator<Streamable$1, Streamable$1, void>;
130
131
  /**
131
132
  * `render` is a helper function to create a streamable UI from some LLMs.
132
133
  * Currently, it only supports OpenAI's GPT models with Function Calling and Assistants Tools.
134
+ *
135
+ * @deprecated It's recommended to use the `experimental_streamUI` API for compatibility with the new core APIs.
133
136
  */
134
137
  declare function render<TS extends {
135
138
  [name: string]: z.Schema;
@@ -149,7 +152,7 @@ declare function render<TS extends {
149
152
  */
150
153
  provider: OpenAI;
151
154
  messages: Parameters<typeof OpenAI.prototype.chat.completions.create>[0]['messages'];
152
- text?: Renderer<{
155
+ text?: Renderer$1<{
153
156
  /**
154
157
  * The full text content from the model so far.
155
158
  */
@@ -168,20 +171,259 @@ declare function render<TS extends {
168
171
  [name in keyof TS]: {
169
172
  description?: string;
170
173
  parameters: TS[name];
171
- render: Renderer<z.infer<TS[name]>>;
174
+ render: Renderer$1<z.infer<TS[name]>>;
172
175
  };
173
176
  };
174
177
  functions?: {
175
178
  [name in keyof FS]: {
176
179
  description?: string;
177
180
  parameters: FS[name];
178
- render: Renderer<z.infer<FS[name]>>;
181
+ render: Renderer$1<z.infer<FS[name]>>;
179
182
  };
180
183
  };
181
184
  initial?: ReactNode;
182
185
  temperature?: number;
183
186
  }): ReactNode;
184
187
 
188
+ type CallSettings = {
189
+ /**
190
+ Maximum number of tokens to generate.
191
+ */
192
+ maxTokens?: number;
193
+ /**
194
+ Temperature setting. This is a number between 0 (almost no randomness) and
195
+ 1 (very random).
196
+
197
+ It is recommended to set either `temperature` or `topP`, but not both.
198
+
199
+ @default 0
200
+ */
201
+ temperature?: number;
202
+ /**
203
+ Nucleus sampling. This is a number between 0 and 1.
204
+
205
+ E.g. 0.1 would mean that only tokens with the top 10% probability mass
206
+ are considered.
207
+
208
+ It is recommended to set either `temperature` or `topP`, but not both.
209
+ */
210
+ topP?: number;
211
+ /**
212
+ Presence penalty setting. It affects the likelihood of the model to
213
+ repeat information that is already in the prompt.
214
+
215
+ The presence penalty is a number between -1 (increase repetition)
216
+ and 1 (maximum penalty, decrease repetition). 0 means no penalty.
217
+
218
+ @default 0
219
+ */
220
+ presencePenalty?: number;
221
+ /**
222
+ Frequency penalty setting. It affects the likelihood of the model
223
+ to repeatedly use the same words or phrases.
224
+
225
+ The frequency penalty is a number between -1 (increase repetition)
226
+ and 1 (maximum penalty, decrease repetition). 0 means no penalty.
227
+
228
+ @default 0
229
+ */
230
+ frequencyPenalty?: number;
231
+ /**
232
+ The seed (integer) to use for random sampling. If set and supported
233
+ by the model, calls will generate deterministic results.
234
+ */
235
+ seed?: number;
236
+ /**
237
+ Maximum number of retries. Set to 0 to disable retries.
238
+
239
+ @default 2
240
+ */
241
+ maxRetries?: number;
242
+ /**
243
+ Abort signal.
244
+ */
245
+ abortSignal?: AbortSignal;
246
+ };
247
+
248
+ /**
249
+ Data content. Can either be a base64-encoded string, a Uint8Array, an ArrayBuffer, or a Buffer.
250
+ */
251
+ type DataContent = string | Uint8Array | ArrayBuffer | Buffer;
252
+
253
+ /**
254
+ Text content part of a prompt. It contains a string of text.
255
+ */
256
+ interface TextPart {
257
+ type: 'text';
258
+ /**
259
+ The text content.
260
+ */
261
+ text: string;
262
+ }
263
+ /**
264
+ Image content part of a prompt. It contains an image.
265
+ */
266
+ interface ImagePart {
267
+ type: 'image';
268
+ /**
269
+ Image data. Can either be:
270
+
271
+ - data: a base64-encoded string, a Uint8Array, an ArrayBuffer, or a Buffer
272
+ - URL: a URL that points to the image
273
+ */
274
+ image: DataContent | URL;
275
+ /**
276
+ Optional mime type of the image.
277
+ */
278
+ mimeType?: string;
279
+ }
280
+ /**
281
+ Tool call content part of a prompt. It contains a tool call (usually generated by the AI model).
282
+ */
283
+ interface ToolCallPart {
284
+ type: 'tool-call';
285
+ /**
286
+ ID of the tool call. This ID is used to match the tool call with the tool result.
287
+ */
288
+ toolCallId: string;
289
+ /**
290
+ Name of the tool that is being called.
291
+ */
292
+ toolName: string;
293
+ /**
294
+ Arguments of the tool call. This is a JSON-serializable object that matches the tool's input schema.
295
+ */
296
+ args: unknown;
297
+ }
298
+ /**
299
+ Tool result content part of a prompt. It contains the result of the tool call with the matching ID.
300
+ */
301
+ interface ToolResultPart {
302
+ type: 'tool-result';
303
+ /**
304
+ ID of the tool call that this result is associated with.
305
+ */
306
+ toolCallId: string;
307
+ /**
308
+ Name of the tool that generated this result.
309
+ */
310
+ toolName: string;
311
+ /**
312
+ Result of the tool call. This is a JSON-serializable object.
313
+ */
314
+ result: unknown;
315
+ /**
316
+ Optional flag if the result is an error or an error message.
317
+ */
318
+ isError?: boolean;
319
+ }
320
+
321
+ /**
322
+ A message that can be used in the `messages` field of a prompt.
323
+ It can be a user message, an assistant message, or a tool message.
324
+ */
325
+ type ExperimentalMessage = ExperimentalUserMessage | ExperimentalAssistantMessage | ExperimentalToolMessage;
326
+ /**
327
+ A user message. It can contain text or a combination of text and images.
328
+ */
329
+ type ExperimentalUserMessage = {
330
+ role: 'user';
331
+ content: UserContent;
332
+ };
333
+ /**
334
+ Content of a user message. It can be a string or an array of text and image parts.
335
+ */
336
+ type UserContent = string | Array<TextPart | ImagePart>;
337
+ /**
338
+ An assistant message. It can contain text, tool calls, or a combination of text and tool calls.
339
+ */
340
+ type ExperimentalAssistantMessage = {
341
+ role: 'assistant';
342
+ content: AssistantContent;
343
+ };
344
+ /**
345
+ Content of an assistant message. It can be a string or an array of text and tool call parts.
346
+ */
347
+ type AssistantContent = string | Array<TextPart | ToolCallPart>;
348
+ /**
349
+ A tool message. It contains the result of one or more tool calls.
350
+ */
351
+ type ExperimentalToolMessage = {
352
+ role: 'tool';
353
+ content: ToolContent;
354
+ };
355
+ /**
356
+ Content of a tool message. It is an array of tool result parts.
357
+ */
358
+ type ToolContent = Array<ToolResultPart>;
359
+
360
+ /**
361
+ Prompt part of the AI function options. It contains a system message, a simple text prompt, or a list of messages.
362
+ */
363
+ type Prompt = {
364
+ /**
365
+ System message to include in the prompt. Can be used with `prompt` or `messages`.
366
+ */
367
+ system?: string;
368
+ /**
369
+ A simple text prompt. You can either use `prompt` or `messages` but not both.
370
+ */
371
+ prompt?: string;
372
+ /**
373
+ A list of messsages. You can either use `prompt` or `messages` but not both.
374
+ */
375
+ messages?: Array<ExperimentalMessage>;
376
+ };
377
+
378
+ type Streamable = ReactNode | Promise<ReactNode>;
379
+ type Renderer<T extends Array<any>> = (...args: T) => Streamable | Generator<Streamable, Streamable, void> | AsyncGenerator<Streamable, Streamable, void>;
380
+ type RenderTool<PARAMETERS extends z.ZodTypeAny = any> = {
381
+ description?: string;
382
+ parameters: PARAMETERS;
383
+ generate?: Renderer<[
384
+ z.infer<PARAMETERS>,
385
+ {
386
+ toolName: string;
387
+ toolCallId: string;
388
+ }
389
+ ]>;
390
+ };
391
+ type RenderText = Renderer<[
392
+ {
393
+ /**
394
+ * The full text content from the model so far.
395
+ */
396
+ content: string;
397
+ /**
398
+ * The new appended text content from the model since the last `text` call.
399
+ */
400
+ delta: string;
401
+ /**
402
+ * Whether the model is done generating text.
403
+ * If `true`, the `content` will be the final output and this call will be the last.
404
+ */
405
+ done: boolean;
406
+ }
407
+ ]>;
408
+ type RenderResult = {
409
+ value: ReactNode;
410
+ } & Awaited<ReturnType<LanguageModelV1['doStream']>>;
411
+ /**
412
+ * `experimental_streamUI` is a helper function to create a streamable UI from LLMs.
413
+ */
414
+ declare function experimental_streamUI<TOOLS extends Record<string, RenderTool>>({ model, tools, system, prompt, messages, maxRetries, abortSignal, initial, text, ...settings }: CallSettings & Prompt & {
415
+ /**
416
+ * The language model to use.
417
+ */
418
+ model: LanguageModelV1;
419
+ /**
420
+ * The tools that the model can call. The model needs to support calling tools.
421
+ */
422
+ tools?: TOOLS;
423
+ text?: RenderText;
424
+ initial?: ReactNode;
425
+ }): Promise<RenderResult>;
426
+
185
427
  declare function createAI<AIState = any, UIState = any, Actions extends AIActions = {}>({ actions, initialAIState, initialUIState, onSetAIState, onGetUIState, }: {
186
428
  actions: Actions;
187
429
  initialAIState?: AIState;
@@ -222,4 +464,4 @@ declare function createAI<AIState = any, UIState = any, Actions extends AIAction
222
464
  onGetUIState?: OnGetUIState<UIState>;
223
465
  }): AIProvider<AIState, UIState, Actions>;
224
466
 
225
- export { createAI, createStreamableUI, createStreamableValue, getAIState, getMutableAIState, render };
467
+ export { createAI, createStreamableUI, createStreamableValue, experimental_streamUI, getAIState, getMutableAIState, render };