@browser-ai/core 1.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md ADDED
@@ -0,0 +1,379 @@
1
+ # Built-in AI provider for Vercel AI SDK
2
+
3
+ <div align="center">
4
+ <img src="./hero.png">
5
+ </div>
6
+
7
+ <div align="center">
8
+
9
+ [![NPM Version](https://img.shields.io/npm/v/%40built-in-ai%2Fcore)](https://www.npmjs.com/package/@built-in-ai/core)
10
+ [![NPM Downloads](https://img.shields.io/npm/dm/%40built-in-ai%2Fcore)](https://www.npmjs.com/package/@built-in-ai/core)
11
+
12
+ </div>
13
+
14
+ A TypeScript library that provides access to browser-based AI capabilities with seamless fallback to using server-side models using the [Vercel AI SDK](https://ai-sdk.dev/). This library enables you to leverage **Chrome** and **Edge's** built-in AI features ([Prompt API](https://github.com/webmachinelearning/prompt-api)) with the AI SDK.
15
+
16
+ > [!IMPORTANT]
17
+ > This package is under constant development as the Prompt API matures, and may contain errors and incompatible changes.
18
+
19
+ ## Installation
20
+
21
+ ```bash
22
+ npm i @built-in-ai/core
23
+ ```
24
+
25
+ The `@built-in-ai/core` package is the AI SDK provider for your Chrome and Edge browser's built-in AI models. It provides seamless access to both language models and text embeddings through browser-native APIs.
26
+
27
+ ## Browser Requirements
28
+
29
+ > [!IMPORTANT]
30
+ > The Prompt API is currently experimental and might change as it matures. The below enablement guide of the API might also change in the future.
31
+
32
+ 1. You need Chrome (v. 128 or higher) or Edge Dev/Canary (v. 138.0.3309.2 or higher)
33
+
34
+ 2. Enable these experimental flags:
35
+ - If you're using Chrome:
36
+ 1. Go to `chrome://flags/`, search for _'Prompt API for Gemini Nano with Multimodal Input'_ and set it to Enabled
37
+ 2. Go to `chrome://components` and click Check for Update on Optimization Guide On Device Model
38
+ - If you're using Edge:
39
+ 1. Go to `edge://flags/#prompt-api-for-phi-mini` and set it to Enabled
40
+
41
+ For more information, check out [this guide](https://developer.chrome.com/docs/extensions/ai/prompt-api)
42
+
43
+ ## Usage
44
+
45
+ ### Basic Usage (chat)
46
+
47
+ ```typescript
48
+ import { streamText } from "ai";
49
+ import { builtInAI } from "@built-in-ai/core";
50
+
51
+ const result = streamText({
52
+ // or generateText
53
+ model: builtInAI(),
54
+ messages: [{ role: "user", content: "Hello, how are you?" }],
55
+ });
56
+
57
+ for await (const chunk of result.textStream) {
58
+ console.log(chunk);
59
+ }
60
+ ```
61
+
62
+ ### Language Models
63
+
64
+ ```typescript
65
+ import { generateText } from "ai";
66
+ import { builtInAI } from "@built-in-ai/core";
67
+
68
+ const model = builtInAI();
69
+
70
+ const result = await generateText({
71
+ model,
72
+ messages: [{ role: "user", content: "Write a short poem about AI" }],
73
+ });
74
+ ```
75
+
76
+ ### Text Embeddings
77
+
78
+ ```typescript
79
+ import { embed, embedMany } from "ai";
80
+ import { builtInAI } from "@built-in-ai/core";
81
+
82
+ // Single embedding
83
+ const result = await embed({
84
+ model: builtInAI.textEmbedding("embedding"),
85
+ value: "Hello, world!",
86
+ });
87
+
88
+ console.log(result.embedding); // [0.1, 0.2, 0.3, ...]
89
+
90
+ // Multiple embeddings
91
+ const results = await embedMany({
92
+ model: builtInAI.textEmbedding("embedding"),
93
+ values: ["Hello", "World", "AI"],
94
+ });
95
+
96
+ console.log(results.embeddings); // [[...], [...], [...]]
97
+ ```
98
+
99
+ ## Download Progress Tracking
100
+
101
+ When using the built-in AI models in Chrome & Edge for the first time, the model needs to be downloaded first.
102
+
103
+ You'll probably want to show download progress in your applications to improve UX.
104
+
105
+ ### Basic Progress Monitoring
106
+
107
+ ```typescript
108
+ import { streamText } from "ai";
109
+ import { builtInAI } from "@built-in-ai/core";
110
+
111
+ const model = builtInAI();
112
+ const availability = await model.availability();
113
+
114
+ if (availability === "unavailable") {
115
+ console.log("Browser doesn't support built-in AI");
116
+ return;
117
+ }
118
+
119
+ if (availability === "downloadable") {
120
+ await model.createSessionWithProgress((progress) => {
121
+ console.log(`Download progress: ${Math.round(progress * 100)}%`);
122
+ });
123
+ }
124
+
125
+ // Model is ready
126
+ const result = streamText({
127
+ model,
128
+ messages: [{ role: "user", content: "Hello!" }],
129
+ });
130
+ ```
131
+
132
+ ## Integration with useChat Hook
133
+
134
+ When using this library with the `useChat` hook, you'll need to create a [custom transport](https://v5.ai-sdk.dev/docs/ai-sdk-ui/transport#transport) implementation to handle client-side AI with download progress. You can do this by importing `BuiltInAIUIMessage` from `@built-in-ai/core` that extends `UIMessage` to include [data parts](https://v5.ai-sdk.dev/docs/ai-sdk-ui/streaming-data) such as download progress.
135
+
136
+ See the complete working example: **[`/examples/next-hybrid/app/(core)/util/client-side-chat-transport.ts`](<../../examples/next-hybrid/app/(core)/util/client-side-chat-transport.ts>)** and the **[`/examples/next-hybrid/app/page.tsx`](<../../examples/next-hybrid/app/(core)/page.tsx>)** components.
137
+
138
+ This example includes:
139
+
140
+ - Download progress with UI progress bar and status message updates
141
+ - Hybrid client/server architecture with fallback
142
+ - Error handling and notifications
143
+ - Full integration with `useChat` hook
144
+
145
+ ## Multimodal Support
146
+
147
+ The Prompt API supports both images and audio files:
148
+
149
+ ```typescript
150
+ import { streamText } from "ai";
151
+ import { builtInAI } from "@built-in-ai/core";
152
+
153
+ const result = streamText({
154
+ model: builtInAI(),
155
+ messages: [
156
+ {
157
+ role: "user",
158
+ content: [
159
+ { type: "text", text: "What's in this image?" },
160
+ { type: "file", mediaType: "image/png", data: base64ImageData },
161
+ ],
162
+ },
163
+ {
164
+ role: "user",
165
+ content: [{ type: "file", mediaType: "audio/mp3", data: audioData }],
166
+ },
167
+ ],
168
+ });
169
+
170
+ for await (const chunk of result.textStream) {
171
+ console.log(chunk);
172
+ }
173
+ ```
174
+
175
+ ## Tool Calling (with support for multiple steps)
176
+
177
+ The `builtInAI` model supports tool calling, allowing the AI to use external functions and APIs. This is particularly useful for building AI agents that can perform actions or retrieve data:
178
+
179
+ ```typescript
180
+ import { streamText, stepCountIs } from "ai";
181
+ import { builtInAI } from "@built-in-ai/core";
182
+ import { z } from "zod";
183
+
184
+ const result = await streamText({
185
+ model: builtInAI(),
186
+ messages: [{ role: "user", content: "What's the weather in San Francisco?" }],
187
+ tools: {
188
+ search: {
189
+ description: "Search the web for information",
190
+ parameters: z.object({
191
+ query: z.string(),
192
+ }),
193
+ execute: async ({ query }) => {
194
+ // Search implementation
195
+ return { results: [{ title: "...", url: "..." }] };
196
+ },
197
+ },
198
+ fetchContent: {
199
+ description: "Fetch the content of a URL",
200
+ parameters: z.object({
201
+ url: z.string(),
202
+ }),
203
+ execute: async ({ url }) => {
204
+ // Fetch implementation
205
+ return { content: "Article content..." };
206
+ },
207
+ },
208
+ },
209
+ stopWhen: stepCountIs(5), // allow multiple steps
210
+ });
211
+ ```
212
+
213
+ ## Generating Structured Data
214
+
215
+ The `builtInAI` model also allows using the AI SDK `generateObject` and `streamObject`:
216
+
217
+ ### streamObject
218
+
219
+ ```typescript
220
+ import { streamObject } from "ai";
221
+ import { builtInAI } from "@built-in-ai/core";
222
+
223
+ const { object } = await streamObject({
224
+ model: builtInAI(),
225
+ schema: z.object({
226
+ recipe: z.object({
227
+ name: z.string(),
228
+ ingredients: z.array(z.object({ name: z.string(), amount: z.string() })),
229
+ steps: z.array(z.string()),
230
+ }),
231
+ }),
232
+ prompt: "Generate a lasagna recipe.",
233
+ });
234
+ ```
235
+
236
+ ### generateObject
237
+
238
+ ```typescript
239
+ const { object } = await generateObject({
240
+ model: builtInAI(),
241
+ schema: z.object({
242
+ recipe: z.object({
243
+ name: z.string(),
244
+ ingredients: z.array(z.object({ name: z.string(), amount: z.string() })),
245
+ steps: z.array(z.string()),
246
+ }),
247
+ }),
248
+ prompt: "Generate a lasagna recipe.",
249
+ });
250
+ ```
251
+
252
+ ## Features
253
+
254
+ ### Supported
255
+
256
+ - [x] **Text generation** (`generateText()`)
257
+ - [x] **Streaming responses** (`streamText()`)
258
+ - [x] **Download progress streaming** - Real-time progress updates during model downloads
259
+ - [x] **Multimodal functionality** (image and audio support)\*
260
+ - [x] **Temperature control**
261
+ - [x] **Response format constraints** (JSON `generateObject()/streamObject()`)
262
+ - [x] **Tool calling** - Full support for function calling with JSON format
263
+ - [x] **Abort signals**
264
+
265
+ ### Planned (when implemented in the Prompt API)
266
+
267
+ - [ ] **Token counting**
268
+ - [ ] **Custom stop sequences**
269
+ - [ ] **Presence/frequency penalties**
270
+
271
+ > \*Multimodal functionality is currently only available in Chrome's Prompt API implementation
272
+
273
+ ## API Reference
274
+
275
+ ### `builtInAI(modelId?, settings?)`
276
+
277
+ Creates a browser AI model instance for chat or embeddings.
278
+
279
+ **For Chat Models:**
280
+
281
+ - `modelId` (optional): The model identifier, defaults to 'text'
282
+ - `settings` (optional): Configuration options for the chat model
283
+ - `temperature?: number` - Controls randomness (0-1)
284
+ - `topK?: number` - Limits vocabulary selection
285
+
286
+ **Returns:** `BuiltInAIChatLanguageModel` instance
287
+
288
+ **For Embedding Models:**
289
+
290
+ - `modelId`: Must be 'embedding'
291
+ - `settings` (optional): Configuration options for the embedding model
292
+ - `wasmLoaderPath?: string` - Path to WASM loader (default: CDN hosted)
293
+ - `wasmBinaryPath?: string` - Path to WASM binary (default: CDN hosted)
294
+ - `modelAssetPath?: string` - Path to model asset file (default: CDN hosted)
295
+ - `l2Normalize?: boolean` - Whether to normalize with L2 norm (default: false)
296
+ - `quantize?: boolean` - Whether to quantize embeddings to bytes (default: false)
297
+ - `delegate?: 'CPU' | 'GPU'` - Backend to use for inference
298
+
299
+ **Returns:** `BuiltInAIEmbeddingModel` instance
300
+
301
+ ### `doesBrowserSupportBuiltInAI(): boolean`
302
+
303
+ Quick check if the browser supports the built-in AI API. Useful for component-level decisions and feature flags.
304
+
305
+ **Returns:** `boolean` - `true` if browser supports the Prompt API, `false` otherwise
306
+
307
+ **Example:**
308
+
309
+ ```typescript
310
+ import { doesBrowserSupportBuiltInAI } from "@built-in-ai/core";
311
+
312
+ if (doesBrowserSupportBuiltInAI()) {
313
+ // Show built-in AI option in UI
314
+ } else {
315
+ // Show server-side option only
316
+ }
317
+ ```
318
+
319
+ ### `BuiltInAIUIMessage`
320
+
321
+ Extended UI message type for use with the `useChat` hook that includes custom data parts for built-in AI functionality.
322
+
323
+ **Type Definition:**
324
+
325
+ ```typescript
326
+ type BuiltInAIUIMessage = UIMessage<
327
+ never,
328
+ {
329
+ modelDownloadProgress: {
330
+ status: "downloading" | "complete" | "error";
331
+ progress?: number;
332
+ message: string;
333
+ };
334
+ notification: {
335
+ message: string;
336
+ level: "info" | "warning" | "error";
337
+ };
338
+ }
339
+ >;
340
+ ```
341
+
342
+ **Data Parts:**
343
+
344
+ - `modelDownloadProgress` - Tracks browser AI model download status and progress
345
+ - `notification` - Displays temporary messages and alerts to users
346
+
347
+ ### `BuiltInAIChatLanguageModel.createSessionWithProgress(onDownloadProgress?)`
348
+
349
+ Creates a language model session with optional download progress monitoring.
350
+
351
+ **Parameters:**
352
+
353
+ - `onDownloadProgress?: (progress: number) => void` - Optional callback that receives progress values from 0 to 1 during model download
354
+
355
+ **Returns:** `Promise<LanguageModel>` - The configured language model session
356
+
357
+ **Example:**
358
+
359
+ ```typescript
360
+ const model = builtInAI();
361
+ await model.createSessionWithProgress((progress) => {
362
+ console.log(`Download: ${Math.round(progress * 100)}%`);
363
+ });
364
+ ```
365
+
366
+ ### `BuiltInAIChatLanguageModel.availability()`
367
+
368
+ Checks the current availability status of the built-in AI model.
369
+
370
+ **Returns:** `Promise<"unavailable" | "downloadable" | "downloading" | "available">`
371
+
372
+ - `"unavailable"` - Model is not supported in the browser
373
+ - `"downloadable"` - Model is supported but needs to be downloaded first
374
+ - `"downloading"` - Model is currently being downloaded
375
+ - `"available"` - Model is ready to use
376
+
377
+ ## Author
378
+
379
+ 2025 © Jakob Hoeg Mørk
@@ -0,0 +1,243 @@
1
+ import { LanguageModelV2, LanguageModelV2CallOptions, LanguageModelV2Content, LanguageModelV2FinishReason, LanguageModelV2CallWarning, LanguageModelV2StreamPart, EmbeddingModelV2, EmbeddingModelV2Embedding, ProviderV2 } from '@ai-sdk/provider';
2
+ import { TextEmbedder } from '@mediapipe/tasks-text';
3
+ import { UIMessage } from 'ai';
4
+
5
+ type BuiltInAIChatModelId = "text";
6
+ interface BuiltInAIChatSettings extends LanguageModelCreateOptions {
7
+ /**
8
+ * Expected input types for the session, for multimodal inputs.
9
+ */
10
+ expectedInputs?: Array<{
11
+ type: "text" | "image" | "audio";
12
+ languages?: string[];
13
+ }>;
14
+ }
15
+ /**
16
+ * Check if the browser supports the built-in AI API
17
+ * @returns true if the browser supports the built-in AI API, false otherwise
18
+ */
19
+ declare function doesBrowserSupportBuiltInAI(): boolean;
20
+ /**
21
+ * Check if the Prompt API is available
22
+ * @deprecated Use `doesBrowserSupportBuiltInAI()` instead for clearer naming
23
+ * @returns true if the browser supports the built-in AI API, false otherwise
24
+ */
25
+ declare function isBuiltInAIModelAvailable(): boolean;
26
+ declare class BuiltInAIChatLanguageModel implements LanguageModelV2 {
27
+ readonly specificationVersion = "v2";
28
+ readonly modelId: BuiltInAIChatModelId;
29
+ readonly provider = "browser-ai";
30
+ private readonly config;
31
+ private readonly sessionManager;
32
+ constructor(modelId: BuiltInAIChatModelId, options?: BuiltInAIChatSettings);
33
+ readonly supportedUrls: Record<string, RegExp[]>;
34
+ /**
35
+ * Gets a session with the specified options
36
+ * Delegates to SessionManager for all session lifecycle management
37
+ * @private
38
+ */
39
+ private getSession;
40
+ private getArgs;
41
+ /**
42
+ * Generates a complete text response using the browser's built-in Prompt API
43
+ * @param options
44
+ * @returns Promise resolving to the generated content with finish reason, usage stats, and any warnings
45
+ * @throws {LoadSettingError} When the Prompt API is not available or model needs to be downloaded
46
+ * @throws {UnsupportedFunctionalityError} When unsupported features like file input are used
47
+ */
48
+ doGenerate(options: LanguageModelV2CallOptions): Promise<{
49
+ content: LanguageModelV2Content[];
50
+ finishReason: LanguageModelV2FinishReason;
51
+ usage: {
52
+ inputTokens: undefined;
53
+ outputTokens: undefined;
54
+ totalTokens: undefined;
55
+ };
56
+ request: {
57
+ body: {
58
+ messages: LanguageModelMessage[];
59
+ options: LanguageModelPromptOptions & LanguageModelCreateCoreOptions;
60
+ };
61
+ };
62
+ warnings: LanguageModelV2CallWarning[];
63
+ }>;
64
+ /**
65
+ * Check the availability of the built-in AI model
66
+ * @returns Promise resolving to "unavailable", "available", or "available-after-download"
67
+ */
68
+ availability(): Promise<Availability>;
69
+ /**
70
+ * Creates a session with download progress monitoring.
71
+ *
72
+ * @example
73
+ * ```typescript
74
+ * const session = await model.createSessionWithProgress(
75
+ * (progress) => {
76
+ * console.log(`Download progress: ${Math.round(progress * 100)}%`);
77
+ * }
78
+ * );
79
+ * ```
80
+ *
81
+ * @param onDownloadProgress Optional callback receiving progress values 0-1 during model download
82
+ * @returns Promise resolving to a configured LanguageModel session
83
+ * @throws {LoadSettingError} When the Prompt API is not available or model is unavailable
84
+ */
85
+ createSessionWithProgress(onDownloadProgress?: (progress: number) => void): Promise<LanguageModel>;
86
+ /**
87
+ * Generates a streaming text response using the browser's built-in Prompt API
88
+ * @param options
89
+ * @returns Promise resolving to a readable stream of text chunks and request metadata
90
+ * @throws {LoadSettingError} When the Prompt API is not available or model needs to be downloaded
91
+ * @throws {UnsupportedFunctionalityError} When unsupported features like file input are used
92
+ */
93
+ doStream(options: LanguageModelV2CallOptions): Promise<{
94
+ stream: ReadableStream<LanguageModelV2StreamPart>;
95
+ request: {
96
+ body: {
97
+ messages: LanguageModelMessage[];
98
+ options: LanguageModelPromptOptions & LanguageModelCreateCoreOptions;
99
+ };
100
+ };
101
+ }>;
102
+ }
103
+
104
+ interface BuiltInAIEmbeddingModelSettings {
105
+ /**
106
+ * An optional base path to specify the directory the Wasm files should be loaded from.
107
+ * @default 'https://pub-ddcfe353995744e89b8002f16bf98575.r2.dev/text_wasm_internal.js'
108
+ */
109
+ wasmLoaderPath?: string;
110
+ /**
111
+ * It's about 6mb before gzip.
112
+ * @default 'https://pub-ddcfe353995744e89b8002f16bf98575.r2.dev/text_wasm_internal.wasm'
113
+ */
114
+ wasmBinaryPath?: string;
115
+ /**
116
+ * The model path to the model asset file.
117
+ * It's about 6.1mb before gzip.
118
+ * @default 'https://pub-ddcfe353995744e89b8002f16bf98575.r2.dev/universal_sentence_encoder.tflite'
119
+ */
120
+ modelAssetPath?: string;
121
+ /**
122
+ * Whether to normalize the returned feature vector with L2 norm. Use this
123
+ * option only if the model does not already contain a native L2_NORMALIZATION
124
+ * TF Lite Op. In most cases, this is already the case and L2 norm is thus
125
+ * achieved through TF Lite inference.
126
+ * @default false
127
+ */
128
+ l2Normalize?: boolean;
129
+ /**
130
+ * Whether the returned embedding should be quantized to bytes via scalar
131
+ * quantization. Embeddings are implicitly assumed to be unit-norm and
132
+ * therefore any dimension is guaranteed to have a value in [-1.0, 1.0]. Use
133
+ * the l2_normalize option if this is not the case.
134
+ * @default false
135
+ */
136
+ quantize?: boolean;
137
+ /**
138
+ * Overrides the default backend to use for the provided model.
139
+ */
140
+ delegate?: "CPU" | "GPU";
141
+ }
142
+ declare class BuiltInAIEmbeddingModel implements EmbeddingModelV2<string> {
143
+ readonly specificationVersion = "v2";
144
+ readonly provider = "google-mediapipe";
145
+ readonly modelId: string;
146
+ readonly supportsParallelCalls = true;
147
+ readonly maxEmbeddingsPerCall: undefined;
148
+ private settings;
149
+ private modelAssetBuffer;
150
+ private textEmbedder;
151
+ constructor(settings?: BuiltInAIEmbeddingModelSettings);
152
+ protected getTextEmbedder: () => Promise<TextEmbedder>;
153
+ doEmbed: (options: {
154
+ values: string[];
155
+ abortSignal?: AbortSignal;
156
+ }) => Promise<{
157
+ embeddings: Array<EmbeddingModelV2Embedding>;
158
+ rawResponse?: Record<PropertyKey, any>;
159
+ }>;
160
+ }
161
+
162
+ interface BuiltInAIProvider extends ProviderV2 {
163
+ (modelId?: BuiltInAIChatModelId, settings?: BuiltInAIChatSettings): BuiltInAIChatLanguageModel;
164
+ /**
165
+ * Creates a model for text generation.
166
+ */
167
+ languageModel(modelId: BuiltInAIChatModelId, settings?: BuiltInAIChatSettings): BuiltInAIChatLanguageModel;
168
+ /**
169
+ * Creates a model for text generation.
170
+ */
171
+ chat(modelId: BuiltInAIChatModelId, settings?: BuiltInAIChatSettings): BuiltInAIChatLanguageModel;
172
+ textEmbedding(modelId: "embedding", settings?: BuiltInAIEmbeddingModelSettings): EmbeddingModelV2<string>;
173
+ textEmbeddingModel: (modelId: "embedding", settings?: BuiltInAIEmbeddingModelSettings) => EmbeddingModelV2<string>;
174
+ imageModel(modelId: string): never;
175
+ speechModel(modelId: string): never;
176
+ transcriptionModel(modelId: string): never;
177
+ }
178
+ interface BuiltInAIProviderSettings {
179
+ }
180
+ /**
181
+ * Create a BuiltInAI provider instance.
182
+ */
183
+ declare function createBuiltInAI(options?: BuiltInAIProviderSettings): BuiltInAIProvider;
184
+ /**
185
+ * Default BuiltInAI provider instance.
186
+ */
187
+ declare const builtInAI: BuiltInAIProvider;
188
+
189
+ /**
190
+ * UI message type for built-in AI features with custom data parts.
191
+ *
192
+ * Extends base UIMessage to include specific data part schemas
193
+ * for built-in AI functionality such as model download progress tracking
194
+ *
195
+ * @example
196
+ * // Import and use with useChat hook from @ai-sdk/react
197
+ * ```typescript
198
+ * import { useChat } from "@ai-sdk/react";
199
+ * import { BuiltInAIUIMessage } from "@built-in-ai/core";
200
+ *
201
+ * const { messages, sendMessage } = useChat<BuiltInAIUIMessage>({
202
+ * onData: (dataPart) => {
203
+ * if (dataPart.type === 'data-modelDownloadProgress') {
204
+ * console.log(`Download: ${dataPart.data.progress}%`);
205
+ * }
206
+ * if (dataPart.type === 'data-notification') {
207
+ * console.log(`${dataPart.data.level}: ${dataPart.data.message}`);
208
+ * }
209
+ * }
210
+ * });
211
+ * ```
212
+ *
213
+ * @see {@link https://v5.ai-sdk.dev/docs/reference/ai-sdk-ui/use-chat | useChat hook documentation}
214
+ */
215
+ type BuiltInAIUIMessage = UIMessage<
216
+ never, // No custom metadata type
217
+ {
218
+ /**
219
+ * Model download progress data part for tracking browser AI model download status.
220
+ * Used to display download progress bars and status messages to users.
221
+ */
222
+ modelDownloadProgress: {
223
+ /** Current download/initialization status */
224
+ status: "downloading" | "complete" | "error";
225
+ /** Download progress percentage (0-100), undefined for non-downloading states */
226
+ progress?: number;
227
+ /** Human-readable status message to display to users */
228
+ message: string;
229
+ };
230
+ /**
231
+ * User notification data part for displaying temporary messages and alerts.
232
+ * These are typically transient and not persisted in message history.
233
+ */
234
+ notification: {
235
+ /** The notification message text */
236
+ message: string;
237
+ /** Notification severity level for styling and priority */
238
+ level: "info" | "warning" | "error";
239
+ };
240
+ }
241
+ >;
242
+
243
+ export { BuiltInAIChatLanguageModel, type BuiltInAIChatSettings, BuiltInAIEmbeddingModel, type BuiltInAIEmbeddingModelSettings, type BuiltInAIProvider, type BuiltInAIProviderSettings, type BuiltInAIUIMessage, builtInAI, createBuiltInAI, doesBrowserSupportBuiltInAI, isBuiltInAIModelAvailable };