ai 3.0.8 → 3.1.0-canary.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (37) hide show
  1. package/README.md +10 -4
  2. package/core/dist/index.d.mts +480 -0
  3. package/core/dist/index.d.ts +480 -0
  4. package/core/dist/index.js +1528 -0
  5. package/core/dist/index.js.map +1 -0
  6. package/core/dist/index.mjs +1479 -0
  7. package/core/dist/index.mjs.map +1 -0
  8. package/package.json +20 -3
  9. package/provider/dist/chunk-3DTRVHCT.mjs +5046 -0
  10. package/provider/dist/chunk-3DTRVHCT.mjs.map +1 -0
  11. package/provider/dist/chunk-4OUDS3CP.mjs +30 -0
  12. package/provider/dist/chunk-4OUDS3CP.mjs.map +1 -0
  13. package/provider/dist/chunk-5IYCPJBV.mjs +56 -0
  14. package/provider/dist/chunk-5IYCPJBV.mjs.map +1 -0
  15. package/provider/dist/chunk-VB2TCVQ4.mjs +6746 -0
  16. package/provider/dist/chunk-VB2TCVQ4.mjs.map +1 -0
  17. package/provider/dist/chunk-VYIXVZ6L.mjs +317 -0
  18. package/provider/dist/chunk-VYIXVZ6L.mjs.map +1 -0
  19. package/provider/dist/chunk-WTOUHN6A.mjs +2251 -0
  20. package/provider/dist/chunk-WTOUHN6A.mjs.map +1 -0
  21. package/provider/dist/client-22WAAXR7.mjs +10 -0
  22. package/provider/dist/client-22WAAXR7.mjs.map +1 -0
  23. package/provider/dist/fileFromPath-23RINPB2.mjs +115 -0
  24. package/provider/dist/fileFromPath-23RINPB2.mjs.map +1 -0
  25. package/provider/dist/index.d.mts +387 -0
  26. package/provider/dist/index.d.ts +387 -0
  27. package/provider/dist/index.js +26487 -0
  28. package/provider/dist/index.js.map +1 -0
  29. package/provider/dist/index.mjs +8087 -0
  30. package/provider/dist/index.mjs.map +1 -0
  31. package/provider/dist/lib-BZMMM4HX.mjs +20 -0
  32. package/provider/dist/lib-BZMMM4HX.mjs.map +1 -0
  33. package/provider/dist/openai-3YL4AWLI.mjs +3451 -0
  34. package/provider/dist/openai-3YL4AWLI.mjs.map +1 -0
  35. package/rsc/dist/index.d.ts +4 -3
  36. package/rsc/dist/rsc-server.d.mts +3 -2
  37. package/rsc/dist/rsc-server.mjs.map +1 -1
package/README.md CHANGED
@@ -4,11 +4,11 @@ The Vercel AI SDK is **a library for building AI-powered streaming text and chat
4
4
 
5
5
  ## Features
6
6
 
7
- - React Server Components API for streaming Generative UI
8
- - [SWR](https://swr.vercel.app)-powered React, Svelte, Vue and Solid helpers for streaming text responses and building chat and completion UIs
9
- - First-class support for [LangChain](https://js.langchain.com/docs) and [OpenAI](https://openai.com), [Anthropic](https://www.anthropic.com), [Cohere](https://cohere.com), [Hugging Face](https://huggingface.co), [Fireworks](https://app.fireworks.ai) and [Replicate](https://replicate.com)
7
+ - React, Svelte, Vue and Solid helpers for streaming text responses and building chat and completion UIs
8
+ - React Server Components API for streaming [Generative UI](https://vercel.com/blog/ai-sdk-3-generative-ui)
9
+ - First-class support for [OpenAI](https://openai.com), [Anthropic](https://www.anthropic.com), [Mistral](https://mistral.ai), [Perplexity](https://perplexity.ai), [AWS Bedrock](https://aws.amazon.com/bedrock/), [Azure](https://ai.azure.com), [Google Gemini](https://ai.google.dev), [Hugging Face](https://huggingface.co), [Fireworks](https://app.fireworks.ai), [Cohere](https://cohere.com), [LangChain](https://js.langchain.com/docs), [Replicate](https://replicate.com), Ollama, and more.
10
10
  - Node.js, Serverless, and [Edge Runtime](https://edge-runtime.vercel.app/) support
11
- - Callbacks for saving completed streaming responses to a database (in the same request)
11
+ - Lifecycle callbacks for saving completed streaming responses to a database (in the same request)
12
12
 
13
13
  ## Installation
14
14
 
@@ -29,3 +29,9 @@ This library is created by [Vercel](https://vercel.com) and [Next.js](https://ne
29
29
  - Justin Ridgewell ([@jridgewell](https://github.com/jridgewell)) - [Vercel](https://vercel.com)
30
30
 
31
31
  [Contributors](https://github.com/vercel/ai/graphs/contributors)
32
+
33
+ ## Related: Deploy your own Next.js AI Chatbot
34
+
35
+ If you're looking to for a full AI Chatbot application to jumpstart your AI journey, you should checkout [our sister OSS AI Chatbot project](https://github.com/vercel/ai-chatbot) or click the button below to deploy your own to [Vercel](https://vercel.com).
36
+
37
+ [![Deploy with Vercel](https://vercel.com/button)](https://vercel.com/templates/Next.js/nextjs-ai-chatbot)
@@ -0,0 +1,480 @@
1
+ import { z } from 'zod';
2
+ import { PartialDeep, ValueOf } from 'type-fest';
3
+
4
+ type LanguageModelV1CallSettings = {
5
+ /**
6
+ * Maximum number of tokens to generate.
7
+ */
8
+ maxTokens?: number;
9
+ /**
10
+ * Temperature setting. This is a number between 0 (almost no randomness) and
11
+ * 1 (very random).
12
+ *
13
+ * Different LLM providers have different temperature
14
+ * scales, so they'd need to map it (without mapping, the same temperature has
15
+ * different effects on different models). The provider can also chose to map
16
+ * this to topP, potentially even using a custom setting on their model.
17
+ *
18
+ * Note: This is an example of a setting that requires a clear specification of
19
+ * the semantics.
20
+ */
21
+ temperature?: number;
22
+ /**
23
+ * Nucleus sampling. This is a number between 0 and 1.
24
+ *
25
+ * E.g. 0.1 would mean that only tokens with the top 10% probability mass
26
+ * are considered.
27
+ *
28
+ * It is recommended to set either `temperature` or `topP`, but not both.
29
+ */
30
+ topP?: number;
31
+ /**
32
+ * Presence penalty setting. This is a number between 0 (no penalty)
33
+ * and 1 (maximum penalty). It affects the likelihood of the model to repeat
34
+ * information that is already in the prompt.
35
+ */
36
+ presencePenalty?: number;
37
+ /**
38
+ * Frequency penalty setting. This is a number between 0 (no penalty)
39
+ * and 1 (maximum penalty). It affects the likelihood of the model to repeatedly
40
+ * use the same words or phrases.
41
+ */
42
+ frequencyPenalty?: number;
43
+ /**
44
+ * The seed to use for random sampling. If set and supported by the model,
45
+ * calls will generate deterministic results.
46
+ */
47
+ seed?: number;
48
+ };
49
+
50
+ /**
51
+ * Warning from the model provider for this call. The call will proceed, but e.g.
52
+ * some settings might not be supported, which can lead to suboptimal results.
53
+ */
54
+ type LanguageModelV1CallWarning = {
55
+ type: 'unsupported-setting';
56
+ setting: keyof LanguageModelV1CallSettings;
57
+ } | {
58
+ type: 'other';
59
+ message: string;
60
+ };
61
+
62
+ type JsonSchema = Record<string, unknown>;
63
+
64
+ /**
65
+ * A tool has a name, a description, and a set of parameters.
66
+ *
67
+ * Note: this is **not** the user-facing tool definition. The AI SDK methods will
68
+ * map the user-facing tool definitions to this format.
69
+ */
70
+ type LanguageModelV1FunctionTool = {
71
+ /**
72
+ * The type of the tool. Only functions for now, but this gives us room to
73
+ * add more specific tool types in the future and use a discriminated union.
74
+ */
75
+ type: 'function';
76
+ /**
77
+ * The name of the tool. Unique within this model call.
78
+ */
79
+ name: string;
80
+ description?: string;
81
+ parameters: JsonSchema;
82
+ };
83
+
84
+ /**
85
+ * A prompt is a list of messages.
86
+ *
87
+ * Note: Not all models and prompt formats support multi-modal inputs and
88
+ * tool calls. The validation happens at runtime.
89
+ *
90
+ * Note: This is not a user-facing prompt. The AI SDK methods will map the
91
+ * user-facing prompt types such as chat or instruction prompts to this format.
92
+ */
93
+ type LanguageModelV1Prompt = Array<LanguageModelV1Message>;
94
+ type LanguageModelV1Message = {
95
+ role: 'system';
96
+ content: string;
97
+ } | {
98
+ role: 'user';
99
+ content: Array<LanguageModelV1TextPart | LanguageModelV1ImagePart>;
100
+ } | {
101
+ role: 'assistant';
102
+ content: Array<LanguageModelV1TextPart | LanguageModelV1ToolCallPart>;
103
+ } | {
104
+ role: 'tool';
105
+ content: Array<LanguageModelV1ToolResultPart>;
106
+ };
107
+ interface LanguageModelV1TextPart {
108
+ type: 'text';
109
+ /**
110
+ * The text content.
111
+ */
112
+ text: string;
113
+ }
114
+ interface LanguageModelV1ImagePart {
115
+ type: 'image';
116
+ /**
117
+ * Image data as a Uint8Array.
118
+ */
119
+ image: Uint8Array;
120
+ /**
121
+ * Optional mime type of the image.
122
+ */
123
+ mimeType?: string;
124
+ }
125
+ interface LanguageModelV1ToolCallPart {
126
+ type: 'tool-call';
127
+ toolCallId: string;
128
+ toolName: string;
129
+ args: unknown;
130
+ }
131
+ interface LanguageModelV1ToolResultPart {
132
+ type: 'tool-result';
133
+ toolCallId: string;
134
+ toolName: string;
135
+ result: unknown;
136
+ }
137
+
138
+ type LanguageModelV1CallOptions = LanguageModelV1CallSettings & {
139
+ /**
140
+ * Whether the user provided the input as messages or as
141
+ * a prompt. This can help guide non-chat models in the
142
+ * expansion, bc different expansions can be needed for
143
+ * chat/non-chat use cases.
144
+ */
145
+ inputFormat: 'messages' | 'prompt';
146
+ /**
147
+ * The mode affects the behavior of the language model. It is required to
148
+ * support provider-independent streaming and generation of structured objects.
149
+ * The model can take this information and e.g. configure json mode, the correct
150
+ * low level grammar, etc. It can also be used to optimize the efficiency of the
151
+ * streaming, e.g. tool-delta stream parts are only needed in the
152
+ * object-tool mode.
153
+ */
154
+ mode: {
155
+ type: 'regular';
156
+ tools?: Array<LanguageModelV1FunctionTool>;
157
+ } | {
158
+ type: 'object-json';
159
+ } | {
160
+ type: 'object-grammar';
161
+ schema: JsonSchema;
162
+ } | {
163
+ type: 'object-tool';
164
+ tool: LanguageModelV1FunctionTool;
165
+ };
166
+ /**
167
+ * A language mode prompt is a standardized prompt type.
168
+ *
169
+ * Note: This is **not** the user-facing prompt. The AI SDK methods will map the
170
+ * user-facing prompt types such as chat or instruction prompts to this format.
171
+ * That approach allows us to evolve the user facing prompts without breaking
172
+ * the language model interface.
173
+ */
174
+ prompt: LanguageModelV1Prompt;
175
+ };
176
+
177
+ interface LanguageModel {
178
+ /**
179
+ * Default object generation mode that should be used with this model when
180
+ * no mode is specified. Should be the mode with the best results for this
181
+ * model. `undefined` can be returned if object generation is not supported.
182
+ *
183
+ * This is needed to generate the best objects possible w/o requiring the
184
+ * user to explicitly specify the object generation mode.
185
+ */
186
+ readonly defaultObjectGenerationMode: 'json' | 'tool' | 'grammar' | undefined;
187
+ doGenerate(options: LanguageModelV1CallOptions): PromiseLike<{
188
+ text?: string;
189
+ toolCalls?: Array<LanguageModelToolCall>;
190
+ warnings: LanguageModelV1CallWarning[];
191
+ }>;
192
+ doStream(options: LanguageModelV1CallOptions): PromiseLike<{
193
+ stream: ReadableStream<LanguageModelStreamPart>;
194
+ warnings: LanguageModelV1CallWarning[];
195
+ }>;
196
+ }
197
+ type ErrorStreamPart = {
198
+ type: 'error';
199
+ error: unknown;
200
+ };
201
+ type LanguageModelToolCall = {
202
+ toolCallId: string;
203
+ toolName: string;
204
+ args: string;
205
+ };
206
+ type ToolCallStreamPart = {
207
+ type: 'tool-call';
208
+ } & LanguageModelToolCall;
209
+ type ToolCallDeltaStreamPart = {
210
+ type: 'tool-call-delta';
211
+ toolCallId: string;
212
+ toolName: string;
213
+ argsTextDelta: string;
214
+ };
215
+ type TextDeltaStreamPart = {
216
+ type: 'text-delta';
217
+ textDelta: string;
218
+ };
219
+ type LanguageModelStreamPart = TextDeltaStreamPart | ToolCallDeltaStreamPart | ToolCallStreamPart | ErrorStreamPart;
220
+
221
+ type CallSettings = {
222
+ /**
223
+ * Maximum number of tokens to generate.
224
+ */
225
+ maxTokens?: number;
226
+ /**
227
+ * Temperature setting. This is a number between 0 (almost no randomness) and
228
+ * 1 (very random).
229
+ *
230
+ * It is recommended to set either `temperature` or `topP`, but not both.
231
+ */
232
+ temperature?: number;
233
+ /**
234
+ * Nucleus sampling. This is a number between 0 and 1.
235
+ *
236
+ * E.g. 0.1 would mean that only tokens with the top 10% probability mass
237
+ * are considered.
238
+ *
239
+ * It is recommended to set either `temperature` or `topP`, but not both.
240
+ */
241
+ topP?: number;
242
+ /**
243
+ * Presence penalty setting. This is a number between 0 (no penalty)
244
+ * and 1 (maximum penalty). It affects the likelihood of the model to repeat
245
+ * information that is already in the prompt.
246
+ */
247
+ presencePenalty?: number;
248
+ /**
249
+ * Frequency penalty setting. This is a number between 0 (no penalty)
250
+ * and 1 (maximum penalty). It affects the likelihood of the model to
251
+ * repeatedly use the same words or phrases.
252
+ */
253
+ frequencyPenalty?: number;
254
+ /**
255
+ * The seed to use for random sampling. If set and supported by the model,
256
+ * calls will generate deterministic results.
257
+ */
258
+ seed?: number;
259
+ };
260
+
261
+ /**
262
+ * Data content. Can either be a base64-encoded string, a Uint8Array, an ArrayBuffer, or a Buffer.
263
+ */
264
+ type DataContent = string | Uint8Array | ArrayBuffer | Buffer;
265
+ declare function convertDataContentToBase64String(content: DataContent): string;
266
+ declare function convertDataContentToUint8Array(content: DataContent): Uint8Array;
267
+
268
+ interface TextPart {
269
+ type: 'text';
270
+ /**
271
+ * The text content.
272
+ */
273
+ text: string;
274
+ }
275
+ interface ImagePart {
276
+ type: 'image';
277
+ /**
278
+ * Image data. Can either be a base64-encoded string, a Uint8Array, an ArrayBuffer, or a Buffer.
279
+ */
280
+ image: DataContent;
281
+ /**
282
+ * Optional mime type of the image.
283
+ */
284
+ mimeType?: string;
285
+ }
286
+ interface ToolCallPart {
287
+ type: 'tool-call';
288
+ toolCallId: string;
289
+ toolName: string;
290
+ args: unknown;
291
+ }
292
+ interface ToolResultPart {
293
+ type: 'tool-result';
294
+ toolCallId: string;
295
+ toolName: string;
296
+ result: unknown;
297
+ }
298
+
299
+ type Message = UserMessage | AssistantMessage | ToolMessage;
300
+ type UserMessage = {
301
+ role: 'user';
302
+ content: UserContent;
303
+ };
304
+ type AssistantMessage = {
305
+ role: 'assistant';
306
+ content: AssistantContent;
307
+ };
308
+ type ToolMessage = {
309
+ role: 'tool';
310
+ content: ToolContent;
311
+ };
312
+ type UserContent = string | Array<TextPart | ImagePart>;
313
+ type AssistantContent = string | Array<TextPart | ToolCallPart>;
314
+ type ToolContent = Array<ToolResultPart>;
315
+
316
+ type Prompt = {
317
+ system?: string;
318
+ prompt?: string;
319
+ messages?: Array<Message>;
320
+ };
321
+
322
+ /**
323
+ * Generate a structured, typed object using a language model.
324
+ */
325
+ declare function generateObject<T>({ model, schema: zodSchema, mode, system, prompt, messages, ...settings }: CallSettings & Prompt & {
326
+ model: LanguageModel;
327
+ schema: z.Schema<T>;
328
+ mode?: 'json' | 'tool' | 'grammar';
329
+ }): Promise<GenerateObjectResult<T>>;
330
+ declare class GenerateObjectResult<T> {
331
+ readonly object: T;
332
+ constructor(options: {
333
+ object: T;
334
+ });
335
+ }
336
+
337
+ /**
338
+ * Stream an object as a partial object stream.
339
+ */
340
+ declare function streamObject<T>({ model, schema: zodSchema, mode, system, prompt, messages, ...settings }: CallSettings & Prompt & {
341
+ model: LanguageModel;
342
+ schema: z.Schema<T>;
343
+ mode?: 'json' | 'tool' | 'grammar';
344
+ }): Promise<StreamObjectResult<T>>;
345
+ declare class StreamObjectResult<T> {
346
+ readonly objectStream: AsyncIterable<PartialDeep<T, {
347
+ recurseIntoArrays: true;
348
+ }>>;
349
+ constructor(modelStream: ReadableStream<string | ErrorStreamPart>);
350
+ }
351
+
352
+ /**
353
+ * A tool contains the description and the schema of the input that the tool expects.
354
+ * This enables the language model to generate the input.
355
+ *
356
+ * The tool can also contain an optional execute function for the actual execution function of the tool.
357
+ */
358
+ interface Tool<PARAMETERS extends z.ZodTypeAny = any, RESULT = any> {
359
+ /**
360
+ * A optional description of what the tool does. Will be used by the language model to decide whether to use the tool.
361
+ */
362
+ description?: string;
363
+ /**
364
+ * The schema of the input that the tool expects. The language model will use this to generate the input.
365
+ * Use descriptions to make the input understandable for the language model.
366
+ */
367
+ parameters: PARAMETERS;
368
+ /**
369
+ * An optional execute function for the actual execution function of the tool.
370
+ * If not provided, the tool will not be executed automatically.
371
+ */
372
+ execute?: (args: z.infer<PARAMETERS>) => PromiseLike<RESULT>;
373
+ }
374
+ /**
375
+ * Helper function for inferring the execute args of a tool.
376
+ */
377
+ declare function tool<PARAMETERS extends z.ZodTypeAny, RESULT>(tool: Tool<PARAMETERS, RESULT> & {
378
+ execute: (args: z.infer<PARAMETERS>) => PromiseLike<RESULT>;
379
+ }): Tool<PARAMETERS, RESULT> & {
380
+ execute: (args: z.infer<PARAMETERS>) => PromiseLike<RESULT>;
381
+ };
382
+ declare function tool<PARAMETERS extends z.ZodTypeAny, RESULT>(tool: Tool<PARAMETERS, RESULT> & {
383
+ execute?: undefined;
384
+ }): Tool<PARAMETERS, RESULT> & {
385
+ execute: undefined;
386
+ };
387
+
388
+ type ToToolCall<TOOLS extends Record<string, Tool>> = ValueOf<{
389
+ [NAME in keyof TOOLS]: {
390
+ toolCallId: string;
391
+ toolName: NAME & string;
392
+ args: z.infer<TOOLS[NAME]['parameters']>;
393
+ };
394
+ }>;
395
+ type ToToolCallArray<TOOLS extends Record<string, Tool>> = Array<ToToolCall<TOOLS>>;
396
+
397
+ type ToToolsWithExecute<TOOLS extends Record<string, Tool>> = {
398
+ [K in keyof TOOLS as TOOLS[K] extends {
399
+ execute: any;
400
+ } ? K : never]: TOOLS[K];
401
+ };
402
+ type ToToolsWithDefinedExecute<TOOLS extends Record<string, Tool>> = {
403
+ [K in keyof TOOLS as TOOLS[K]['execute'] extends undefined ? never : K]: TOOLS[K];
404
+ };
405
+ type ToToolResultObject<TOOLS extends Record<string, Tool>> = ValueOf<{
406
+ [NAME in keyof TOOLS]: {
407
+ toolCallId: string;
408
+ toolName: NAME & string;
409
+ args: z.infer<TOOLS[NAME]['parameters']>;
410
+ result: Awaited<ReturnType<Exclude<TOOLS[NAME]['execute'], undefined>>>;
411
+ };
412
+ }>;
413
+ type ToToolResult<TOOLS extends Record<string, Tool>> = ToToolResultObject<ToToolsWithDefinedExecute<ToToolsWithExecute<TOOLS>>>;
414
+ type ToToolResultArray<TOOLS extends Record<string, Tool>> = Array<ToToolResult<TOOLS>>;
415
+
416
+ /**
417
+ * Generate a text and call tools using a language model.
418
+ */
419
+ declare function generateText<TOOLS extends Record<string, Tool>>({ model, tools, system, prompt, messages, ...settings }: CallSettings & Prompt & {
420
+ model: LanguageModel;
421
+ tools?: TOOLS;
422
+ }): Promise<GenerateTextResult<TOOLS>>;
423
+ declare class GenerateTextResult<TOOLS extends Record<string, Tool>> {
424
+ readonly text: string;
425
+ readonly toolCalls: ToToolCallArray<TOOLS>;
426
+ readonly toolResults: ToToolResultArray<TOOLS>;
427
+ constructor(options: {
428
+ text: string;
429
+ toolCalls: ToToolCallArray<TOOLS>;
430
+ toolResults: ToToolResultArray<TOOLS>;
431
+ });
432
+ }
433
+
434
+ declare class StreamTextHttpResponse extends Response {
435
+ constructor(messageStream: ReadableStream<TextStreamPart<any>>);
436
+ }
437
+
438
+ /**
439
+ * Stream text generated by a language model.
440
+ */
441
+ declare function streamText<TOOLS extends Record<string, Tool>>({ model, tools, system, prompt, messages, ...settings }: CallSettings & Prompt & {
442
+ model: LanguageModel;
443
+ tools?: TOOLS;
444
+ }): Promise<StreamTextResult<TOOLS>>;
445
+ type TextStreamPart<TOOLS extends Record<string, Tool>> = {
446
+ type: 'text-delta';
447
+ textDelta: string;
448
+ } | ({
449
+ type: 'tool-call';
450
+ } & ToToolCall<TOOLS>) | {
451
+ type: 'error';
452
+ error: unknown;
453
+ } | ({
454
+ type: 'tool-result';
455
+ } & ToToolResult<TOOLS>);
456
+ declare class StreamTextResult<TOOLS extends Record<string, Tool>> {
457
+ private readonly rootStream;
458
+ readonly textStream: AsyncIterable<string>;
459
+ readonly fullStream: AsyncIterable<TextStreamPart<TOOLS>>;
460
+ constructor(stream: ReadableStream<TextStreamPart<TOOLS>>);
461
+ toResponse(): StreamTextHttpResponse;
462
+ }
463
+
464
+ declare class UnsupportedFunctionalityError extends Error {
465
+ readonly functionality: string;
466
+ readonly provider: string;
467
+ constructor({ provider, functionality, }: {
468
+ provider: string;
469
+ functionality: string;
470
+ });
471
+ toJSON(): {
472
+ name: string;
473
+ message: string;
474
+ stack: string | undefined;
475
+ provider: string;
476
+ functionality: string;
477
+ };
478
+ }
479
+
480
+ export { AssistantContent, AssistantMessage, DataContent, ErrorStreamPart, GenerateObjectResult, GenerateTextResult, ImagePart, LanguageModel, LanguageModelStreamPart, LanguageModelToolCall, LanguageModelV1ImagePart, LanguageModelV1Message, LanguageModelV1Prompt, LanguageModelV1TextPart, LanguageModelV1ToolCallPart, LanguageModelV1ToolResultPart, Message, StreamObjectResult, StreamTextHttpResponse, StreamTextResult, TextPart, TextStreamPart, Tool, ToolCallPart, ToolContent, ToolMessage, ToolResultPart, UnsupportedFunctionalityError, UserContent, UserMessage, convertDataContentToBase64String, convertDataContentToUint8Array, generateObject, generateText, streamObject, streamText, tool };