@browser-ai/core 1.0.0 → 2.0.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +32 -379
- package/dist/index.d.mts +41 -70
- package/dist/index.d.ts +41 -70
- package/dist/index.js +98 -62
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +93 -56
- package/dist/index.mjs.map +1 -1
- package/package.json +72 -72
package/README.md
CHANGED
|
@@ -1,379 +1,32 @@
|
|
|
1
|
-
#
|
|
2
|
-
|
|
3
|
-
<div align="center">
|
|
4
|
-
|
|
5
|
-
|
|
6
|
-
|
|
7
|
-
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
2. Enable these experimental flags:
|
|
35
|
-
- If you're using Chrome:
|
|
36
|
-
1. Go to `chrome://flags/`, search for _'Prompt API for Gemini Nano with Multimodal Input'_ and set it to Enabled
|
|
37
|
-
2. Go to `chrome://components` and click Check for Update on Optimization Guide On Device Model
|
|
38
|
-
- If you're using Edge:
|
|
39
|
-
1. Go to `edge://flags/#prompt-api-for-phi-mini` and set it to Enabled
|
|
40
|
-
|
|
41
|
-
For more information, check out [this guide](https://developer.chrome.com/docs/extensions/ai/prompt-api)
|
|
42
|
-
|
|
43
|
-
## Usage
|
|
44
|
-
|
|
45
|
-
### Basic Usage (chat)
|
|
46
|
-
|
|
47
|
-
```typescript
|
|
48
|
-
import { streamText } from "ai";
|
|
49
|
-
import { builtInAI } from "@built-in-ai/core";
|
|
50
|
-
|
|
51
|
-
const result = streamText({
|
|
52
|
-
// or generateText
|
|
53
|
-
model: builtInAI(),
|
|
54
|
-
messages: [{ role: "user", content: "Hello, how are you?" }],
|
|
55
|
-
});
|
|
56
|
-
|
|
57
|
-
for await (const chunk of result.textStream) {
|
|
58
|
-
console.log(chunk);
|
|
59
|
-
}
|
|
60
|
-
```
|
|
61
|
-
|
|
62
|
-
### Language Models
|
|
63
|
-
|
|
64
|
-
```typescript
|
|
65
|
-
import { generateText } from "ai";
|
|
66
|
-
import { builtInAI } from "@built-in-ai/core";
|
|
67
|
-
|
|
68
|
-
const model = builtInAI();
|
|
69
|
-
|
|
70
|
-
const result = await generateText({
|
|
71
|
-
model,
|
|
72
|
-
messages: [{ role: "user", content: "Write a short poem about AI" }],
|
|
73
|
-
});
|
|
74
|
-
```
|
|
75
|
-
|
|
76
|
-
### Text Embeddings
|
|
77
|
-
|
|
78
|
-
```typescript
|
|
79
|
-
import { embed, embedMany } from "ai";
|
|
80
|
-
import { builtInAI } from "@built-in-ai/core";
|
|
81
|
-
|
|
82
|
-
// Single embedding
|
|
83
|
-
const result = await embed({
|
|
84
|
-
model: builtInAI.textEmbedding("embedding"),
|
|
85
|
-
value: "Hello, world!",
|
|
86
|
-
});
|
|
87
|
-
|
|
88
|
-
console.log(result.embedding); // [0.1, 0.2, 0.3, ...]
|
|
89
|
-
|
|
90
|
-
// Multiple embeddings
|
|
91
|
-
const results = await embedMany({
|
|
92
|
-
model: builtInAI.textEmbedding("embedding"),
|
|
93
|
-
values: ["Hello", "World", "AI"],
|
|
94
|
-
});
|
|
95
|
-
|
|
96
|
-
console.log(results.embeddings); // [[...], [...], [...]]
|
|
97
|
-
```
|
|
98
|
-
|
|
99
|
-
## Download Progress Tracking
|
|
100
|
-
|
|
101
|
-
When using the built-in AI models in Chrome & Edge for the first time, the model needs to be downloaded first.
|
|
102
|
-
|
|
103
|
-
You'll probably want to show download progress in your applications to improve UX.
|
|
104
|
-
|
|
105
|
-
### Basic Progress Monitoring
|
|
106
|
-
|
|
107
|
-
```typescript
|
|
108
|
-
import { streamText } from "ai";
|
|
109
|
-
import { builtInAI } from "@built-in-ai/core";
|
|
110
|
-
|
|
111
|
-
const model = builtInAI();
|
|
112
|
-
const availability = await model.availability();
|
|
113
|
-
|
|
114
|
-
if (availability === "unavailable") {
|
|
115
|
-
console.log("Browser doesn't support built-in AI");
|
|
116
|
-
return;
|
|
117
|
-
}
|
|
118
|
-
|
|
119
|
-
if (availability === "downloadable") {
|
|
120
|
-
await model.createSessionWithProgress((progress) => {
|
|
121
|
-
console.log(`Download progress: ${Math.round(progress * 100)}%`);
|
|
122
|
-
});
|
|
123
|
-
}
|
|
124
|
-
|
|
125
|
-
// Model is ready
|
|
126
|
-
const result = streamText({
|
|
127
|
-
model,
|
|
128
|
-
messages: [{ role: "user", content: "Hello!" }],
|
|
129
|
-
});
|
|
130
|
-
```
|
|
131
|
-
|
|
132
|
-
## Integration with useChat Hook
|
|
133
|
-
|
|
134
|
-
When using this library with the `useChat` hook, you'll need to create a [custom transport](https://v5.ai-sdk.dev/docs/ai-sdk-ui/transport#transport) implementation to handle client-side AI with download progress. You can do this by importing `BuiltInAIUIMessage` from `@built-in-ai/core` that extends `UIMessage` to include [data parts](https://v5.ai-sdk.dev/docs/ai-sdk-ui/streaming-data) such as download progress.
|
|
135
|
-
|
|
136
|
-
See the complete working example: **[`/examples/next-hybrid/app/(core)/util/client-side-chat-transport.ts`](<../../examples/next-hybrid/app/(core)/util/client-side-chat-transport.ts>)** and the **[`/examples/next-hybrid/app/page.tsx`](<../../examples/next-hybrid/app/(core)/page.tsx>)** components.
|
|
137
|
-
|
|
138
|
-
This example includes:
|
|
139
|
-
|
|
140
|
-
- Download progress with UI progress bar and status message updates
|
|
141
|
-
- Hybrid client/server architecture with fallback
|
|
142
|
-
- Error handling and notifications
|
|
143
|
-
- Full integration with `useChat` hook
|
|
144
|
-
|
|
145
|
-
## Multimodal Support
|
|
146
|
-
|
|
147
|
-
The Prompt API supports both images and audio files:
|
|
148
|
-
|
|
149
|
-
```typescript
|
|
150
|
-
import { streamText } from "ai";
|
|
151
|
-
import { builtInAI } from "@built-in-ai/core";
|
|
152
|
-
|
|
153
|
-
const result = streamText({
|
|
154
|
-
model: builtInAI(),
|
|
155
|
-
messages: [
|
|
156
|
-
{
|
|
157
|
-
role: "user",
|
|
158
|
-
content: [
|
|
159
|
-
{ type: "text", text: "What's in this image?" },
|
|
160
|
-
{ type: "file", mediaType: "image/png", data: base64ImageData },
|
|
161
|
-
],
|
|
162
|
-
},
|
|
163
|
-
{
|
|
164
|
-
role: "user",
|
|
165
|
-
content: [{ type: "file", mediaType: "audio/mp3", data: audioData }],
|
|
166
|
-
},
|
|
167
|
-
],
|
|
168
|
-
});
|
|
169
|
-
|
|
170
|
-
for await (const chunk of result.textStream) {
|
|
171
|
-
console.log(chunk);
|
|
172
|
-
}
|
|
173
|
-
```
|
|
174
|
-
|
|
175
|
-
## Tool Calling (with support for multiple steps)
|
|
176
|
-
|
|
177
|
-
The `builtInAI` model supports tool calling, allowing the AI to use external functions and APIs. This is particularly useful for building AI agents that can perform actions or retrieve data:
|
|
178
|
-
|
|
179
|
-
```typescript
|
|
180
|
-
import { streamText, stepCountIs } from "ai";
|
|
181
|
-
import { builtInAI } from "@built-in-ai/core";
|
|
182
|
-
import { z } from "zod";
|
|
183
|
-
|
|
184
|
-
const result = await streamText({
|
|
185
|
-
model: builtInAI(),
|
|
186
|
-
messages: [{ role: "user", content: "What's the weather in San Francisco?" }],
|
|
187
|
-
tools: {
|
|
188
|
-
search: {
|
|
189
|
-
description: "Search the web for information",
|
|
190
|
-
parameters: z.object({
|
|
191
|
-
query: z.string(),
|
|
192
|
-
}),
|
|
193
|
-
execute: async ({ query }) => {
|
|
194
|
-
// Search implementation
|
|
195
|
-
return { results: [{ title: "...", url: "..." }] };
|
|
196
|
-
},
|
|
197
|
-
},
|
|
198
|
-
fetchContent: {
|
|
199
|
-
description: "Fetch the content of a URL",
|
|
200
|
-
parameters: z.object({
|
|
201
|
-
url: z.string(),
|
|
202
|
-
}),
|
|
203
|
-
execute: async ({ url }) => {
|
|
204
|
-
// Fetch implementation
|
|
205
|
-
return { content: "Article content..." };
|
|
206
|
-
},
|
|
207
|
-
},
|
|
208
|
-
},
|
|
209
|
-
stopWhen: stepCountIs(5), // allow multiple steps
|
|
210
|
-
});
|
|
211
|
-
```
|
|
212
|
-
|
|
213
|
-
## Generating Structured Data
|
|
214
|
-
|
|
215
|
-
The `builtInAI` model also allows using the AI SDK `generateObject` and `streamObject`:
|
|
216
|
-
|
|
217
|
-
### streamObject
|
|
218
|
-
|
|
219
|
-
```typescript
|
|
220
|
-
import { streamObject } from "ai";
|
|
221
|
-
import { builtInAI } from "@built-in-ai/core";
|
|
222
|
-
|
|
223
|
-
const { object } = await streamObject({
|
|
224
|
-
model: builtInAI(),
|
|
225
|
-
schema: z.object({
|
|
226
|
-
recipe: z.object({
|
|
227
|
-
name: z.string(),
|
|
228
|
-
ingredients: z.array(z.object({ name: z.string(), amount: z.string() })),
|
|
229
|
-
steps: z.array(z.string()),
|
|
230
|
-
}),
|
|
231
|
-
}),
|
|
232
|
-
prompt: "Generate a lasagna recipe.",
|
|
233
|
-
});
|
|
234
|
-
```
|
|
235
|
-
|
|
236
|
-
### generateObject
|
|
237
|
-
|
|
238
|
-
```typescript
|
|
239
|
-
const { object } = await generateObject({
|
|
240
|
-
model: builtInAI(),
|
|
241
|
-
schema: z.object({
|
|
242
|
-
recipe: z.object({
|
|
243
|
-
name: z.string(),
|
|
244
|
-
ingredients: z.array(z.object({ name: z.string(), amount: z.string() })),
|
|
245
|
-
steps: z.array(z.string()),
|
|
246
|
-
}),
|
|
247
|
-
}),
|
|
248
|
-
prompt: "Generate a lasagna recipe.",
|
|
249
|
-
});
|
|
250
|
-
```
|
|
251
|
-
|
|
252
|
-
## Features
|
|
253
|
-
|
|
254
|
-
### Supported
|
|
255
|
-
|
|
256
|
-
- [x] **Text generation** (`generateText()`)
|
|
257
|
-
- [x] **Streaming responses** (`streamText()`)
|
|
258
|
-
- [x] **Download progress streaming** - Real-time progress updates during model downloads
|
|
259
|
-
- [x] **Multimodal functionality** (image and audio support)\*
|
|
260
|
-
- [x] **Temperature control**
|
|
261
|
-
- [x] **Response format constraints** (JSON `generateObject()/streamObject()`)
|
|
262
|
-
- [x] **Tool calling** - Full support for function calling with JSON format
|
|
263
|
-
- [x] **Abort signals**
|
|
264
|
-
|
|
265
|
-
### Planned (when implemented in the Prompt API)
|
|
266
|
-
|
|
267
|
-
- [ ] **Token counting**
|
|
268
|
-
- [ ] **Custom stop sequences**
|
|
269
|
-
- [ ] **Presence/frequency penalties**
|
|
270
|
-
|
|
271
|
-
> \*Multimodal functionality is currently only available in Chrome's Prompt API implementation
|
|
272
|
-
|
|
273
|
-
## API Reference
|
|
274
|
-
|
|
275
|
-
### `builtInAI(modelId?, settings?)`
|
|
276
|
-
|
|
277
|
-
Creates a browser AI model instance for chat or embeddings.
|
|
278
|
-
|
|
279
|
-
**For Chat Models:**
|
|
280
|
-
|
|
281
|
-
- `modelId` (optional): The model identifier, defaults to 'text'
|
|
282
|
-
- `settings` (optional): Configuration options for the chat model
|
|
283
|
-
- `temperature?: number` - Controls randomness (0-1)
|
|
284
|
-
- `topK?: number` - Limits vocabulary selection
|
|
285
|
-
|
|
286
|
-
**Returns:** `BuiltInAIChatLanguageModel` instance
|
|
287
|
-
|
|
288
|
-
**For Embedding Models:**
|
|
289
|
-
|
|
290
|
-
- `modelId`: Must be 'embedding'
|
|
291
|
-
- `settings` (optional): Configuration options for the embedding model
|
|
292
|
-
- `wasmLoaderPath?: string` - Path to WASM loader (default: CDN hosted)
|
|
293
|
-
- `wasmBinaryPath?: string` - Path to WASM binary (default: CDN hosted)
|
|
294
|
-
- `modelAssetPath?: string` - Path to model asset file (default: CDN hosted)
|
|
295
|
-
- `l2Normalize?: boolean` - Whether to normalize with L2 norm (default: false)
|
|
296
|
-
- `quantize?: boolean` - Whether to quantize embeddings to bytes (default: false)
|
|
297
|
-
- `delegate?: 'CPU' | 'GPU'` - Backend to use for inference
|
|
298
|
-
|
|
299
|
-
**Returns:** `BuiltInAIEmbeddingModel` instance
|
|
300
|
-
|
|
301
|
-
### `doesBrowserSupportBuiltInAI(): boolean`
|
|
302
|
-
|
|
303
|
-
Quick check if the browser supports the built-in AI API. Useful for component-level decisions and feature flags.
|
|
304
|
-
|
|
305
|
-
**Returns:** `boolean` - `true` if browser supports the Prompt API, `false` otherwise
|
|
306
|
-
|
|
307
|
-
**Example:**
|
|
308
|
-
|
|
309
|
-
```typescript
|
|
310
|
-
import { doesBrowserSupportBuiltInAI } from "@built-in-ai/core";
|
|
311
|
-
|
|
312
|
-
if (doesBrowserSupportBuiltInAI()) {
|
|
313
|
-
// Show built-in AI option in UI
|
|
314
|
-
} else {
|
|
315
|
-
// Show server-side option only
|
|
316
|
-
}
|
|
317
|
-
```
|
|
318
|
-
|
|
319
|
-
### `BuiltInAIUIMessage`
|
|
320
|
-
|
|
321
|
-
Extended UI message type for use with the `useChat` hook that includes custom data parts for built-in AI functionality.
|
|
322
|
-
|
|
323
|
-
**Type Definition:**
|
|
324
|
-
|
|
325
|
-
```typescript
|
|
326
|
-
type BuiltInAIUIMessage = UIMessage<
|
|
327
|
-
never,
|
|
328
|
-
{
|
|
329
|
-
modelDownloadProgress: {
|
|
330
|
-
status: "downloading" | "complete" | "error";
|
|
331
|
-
progress?: number;
|
|
332
|
-
message: string;
|
|
333
|
-
};
|
|
334
|
-
notification: {
|
|
335
|
-
message: string;
|
|
336
|
-
level: "info" | "warning" | "error";
|
|
337
|
-
};
|
|
338
|
-
}
|
|
339
|
-
>;
|
|
340
|
-
```
|
|
341
|
-
|
|
342
|
-
**Data Parts:**
|
|
343
|
-
|
|
344
|
-
- `modelDownloadProgress` - Tracks browser AI model download status and progress
|
|
345
|
-
- `notification` - Displays temporary messages and alerts to users
|
|
346
|
-
|
|
347
|
-
### `BuiltInAIChatLanguageModel.createSessionWithProgress(onDownloadProgress?)`
|
|
348
|
-
|
|
349
|
-
Creates a language model session with optional download progress monitoring.
|
|
350
|
-
|
|
351
|
-
**Parameters:**
|
|
352
|
-
|
|
353
|
-
- `onDownloadProgress?: (progress: number) => void` - Optional callback that receives progress values from 0 to 1 during model download
|
|
354
|
-
|
|
355
|
-
**Returns:** `Promise<LanguageModel>` - The configured language model session
|
|
356
|
-
|
|
357
|
-
**Example:**
|
|
358
|
-
|
|
359
|
-
```typescript
|
|
360
|
-
const model = builtInAI();
|
|
361
|
-
await model.createSessionWithProgress((progress) => {
|
|
362
|
-
console.log(`Download: ${Math.round(progress * 100)}%`);
|
|
363
|
-
});
|
|
364
|
-
```
|
|
365
|
-
|
|
366
|
-
### `BuiltInAIChatLanguageModel.availability()`
|
|
367
|
-
|
|
368
|
-
Checks the current availability status of the built-in AI model.
|
|
369
|
-
|
|
370
|
-
**Returns:** `Promise<"unavailable" | "downloadable" | "downloading" | "available">`
|
|
371
|
-
|
|
372
|
-
- `"unavailable"` - Model is not supported in the browser
|
|
373
|
-
- `"downloadable"` - Model is supported but needs to be downloaded first
|
|
374
|
-
- `"downloading"` - Model is currently being downloaded
|
|
375
|
-
- `"available"` - Model is ready to use
|
|
376
|
-
|
|
377
|
-
## Author
|
|
378
|
-
|
|
379
|
-
2025 © Jakob Hoeg Mørk
|
|
1
|
+
# Browser AI provider for Vercel AI SDK
|
|
2
|
+
|
|
3
|
+
<div align="center">
|
|
4
|
+
|
|
5
|
+

|
|
6
|
+
|
|
7
|
+
</div>
|
|
8
|
+
|
|
9
|
+
<div align="center">
|
|
10
|
+
|
|
11
|
+
[](https://www.npmjs.com/package/@browser-ai/core)
|
|
12
|
+
[](https://www.npmjs.com/package/@browser-ai/core)
|
|
13
|
+
|
|
14
|
+
</div>
|
|
15
|
+
|
|
16
|
+
A TypeScript library that provides access to browser-based AI capabilities with seamless fallback to using server-side models using the [Vercel AI SDK](https://ai-sdk.dev/). This library enables you to leverage **Chrome** and **Edge's** built-in browser AI features ([Prompt API](https://github.com/webmachinelearning/prompt-api)) with the AI SDK.
|
|
17
|
+
|
|
18
|
+
## Installation
|
|
19
|
+
|
|
20
|
+
```bash
|
|
21
|
+
npm i @browser-ai/core
|
|
22
|
+
```
|
|
23
|
+
|
|
24
|
+
The `@browser-ai/core` package is the AI SDK provider for your Chrome and Edge browser's built-in browser AI models. It provides seamless access to both language models and text embeddings through browser-native APIs.
|
|
25
|
+
|
|
26
|
+
## Documentation
|
|
27
|
+
|
|
28
|
+
For a complete documentation including examples, refer to [this](https://www.browser-ai.dev/docs/ai-sdk-v6/core) site.
|
|
29
|
+
|
|
30
|
+
## Author
|
|
31
|
+
|
|
32
|
+
2025 © Jakob Hoeg Mørk
|
package/dist/index.d.mts
CHANGED
|
@@ -1,9 +1,9 @@
|
|
|
1
|
-
import {
|
|
1
|
+
import { LanguageModelV3, LanguageModelV3CallOptions, LanguageModelV3GenerateResult, LanguageModelV3StreamResult, EmbeddingModelV3, EmbeddingModelV3CallOptions, EmbeddingModelV3Result, ProviderV3 } from '@ai-sdk/provider';
|
|
2
2
|
import { TextEmbedder } from '@mediapipe/tasks-text';
|
|
3
3
|
import { UIMessage } from 'ai';
|
|
4
4
|
|
|
5
|
-
type
|
|
6
|
-
interface
|
|
5
|
+
type BrowserAIChatModelId = "text";
|
|
6
|
+
interface BrowserAIChatSettings extends LanguageModelCreateOptions {
|
|
7
7
|
/**
|
|
8
8
|
* Expected input types for the session, for multimodal inputs.
|
|
9
9
|
*/
|
|
@@ -11,25 +11,25 @@ interface BuiltInAIChatSettings extends LanguageModelCreateOptions {
|
|
|
11
11
|
type: "text" | "image" | "audio";
|
|
12
12
|
languages?: string[];
|
|
13
13
|
}>;
|
|
14
|
+
/**
|
|
15
|
+
* Callback invoked when the model quota is exceeded.
|
|
16
|
+
* @see [Prompt API Quota Overflow](https://github.com/webmachinelearning/prompt-api?tab=readme-ov-file#tokenization-context-window-length-limits-and-overflow)
|
|
17
|
+
* @param event
|
|
18
|
+
*/
|
|
19
|
+
onQuotaOverflow?: (event: Event) => void;
|
|
14
20
|
}
|
|
15
21
|
/**
|
|
16
|
-
* Check if the browser supports the
|
|
17
|
-
* @returns true if the browser supports the
|
|
18
|
-
*/
|
|
19
|
-
declare function doesBrowserSupportBuiltInAI(): boolean;
|
|
20
|
-
/**
|
|
21
|
-
* Check if the Prompt API is available
|
|
22
|
-
* @deprecated Use `doesBrowserSupportBuiltInAI()` instead for clearer naming
|
|
23
|
-
* @returns true if the browser supports the built-in AI API, false otherwise
|
|
22
|
+
* Check if the browser supports the browser AI API
|
|
23
|
+
* @returns true if the browser supports the browser AI API, false otherwise
|
|
24
24
|
*/
|
|
25
|
-
declare function
|
|
26
|
-
declare class
|
|
27
|
-
readonly specificationVersion = "
|
|
28
|
-
readonly modelId:
|
|
25
|
+
declare function doesBrowserSupportBrowserAI(): boolean;
|
|
26
|
+
declare class BrowserAIChatLanguageModel implements LanguageModelV3 {
|
|
27
|
+
readonly specificationVersion = "v3";
|
|
28
|
+
readonly modelId: BrowserAIChatModelId;
|
|
29
29
|
readonly provider = "browser-ai";
|
|
30
30
|
private readonly config;
|
|
31
31
|
private readonly sessionManager;
|
|
32
|
-
constructor(modelId:
|
|
32
|
+
constructor(modelId: BrowserAIChatModelId, options?: BrowserAIChatSettings);
|
|
33
33
|
readonly supportedUrls: Record<string, RegExp[]>;
|
|
34
34
|
/**
|
|
35
35
|
* Gets a session with the specified options
|
|
@@ -45,24 +45,9 @@ declare class BuiltInAIChatLanguageModel implements LanguageModelV2 {
|
|
|
45
45
|
* @throws {LoadSettingError} When the Prompt API is not available or model needs to be downloaded
|
|
46
46
|
* @throws {UnsupportedFunctionalityError} When unsupported features like file input are used
|
|
47
47
|
*/
|
|
48
|
-
doGenerate(options:
|
|
49
|
-
content: LanguageModelV2Content[];
|
|
50
|
-
finishReason: LanguageModelV2FinishReason;
|
|
51
|
-
usage: {
|
|
52
|
-
inputTokens: undefined;
|
|
53
|
-
outputTokens: undefined;
|
|
54
|
-
totalTokens: undefined;
|
|
55
|
-
};
|
|
56
|
-
request: {
|
|
57
|
-
body: {
|
|
58
|
-
messages: LanguageModelMessage[];
|
|
59
|
-
options: LanguageModelPromptOptions & LanguageModelCreateCoreOptions;
|
|
60
|
-
};
|
|
61
|
-
};
|
|
62
|
-
warnings: LanguageModelV2CallWarning[];
|
|
63
|
-
}>;
|
|
48
|
+
doGenerate(options: LanguageModelV3CallOptions): Promise<LanguageModelV3GenerateResult>;
|
|
64
49
|
/**
|
|
65
|
-
* Check the availability of the
|
|
50
|
+
* Check the availability of the browser AI model
|
|
66
51
|
* @returns Promise resolving to "unavailable", "available", or "available-after-download"
|
|
67
52
|
*/
|
|
68
53
|
availability(): Promise<Availability>;
|
|
@@ -90,18 +75,10 @@ declare class BuiltInAIChatLanguageModel implements LanguageModelV2 {
|
|
|
90
75
|
* @throws {LoadSettingError} When the Prompt API is not available or model needs to be downloaded
|
|
91
76
|
* @throws {UnsupportedFunctionalityError} When unsupported features like file input are used
|
|
92
77
|
*/
|
|
93
|
-
doStream(options:
|
|
94
|
-
stream: ReadableStream<LanguageModelV2StreamPart>;
|
|
95
|
-
request: {
|
|
96
|
-
body: {
|
|
97
|
-
messages: LanguageModelMessage[];
|
|
98
|
-
options: LanguageModelPromptOptions & LanguageModelCreateCoreOptions;
|
|
99
|
-
};
|
|
100
|
-
};
|
|
101
|
-
}>;
|
|
78
|
+
doStream(options: LanguageModelV3CallOptions): Promise<LanguageModelV3StreamResult>;
|
|
102
79
|
}
|
|
103
80
|
|
|
104
|
-
interface
|
|
81
|
+
interface BrowserAIEmbeddingModelSettings {
|
|
105
82
|
/**
|
|
106
83
|
* An optional base path to specify the directory the Wasm files should be loaded from.
|
|
107
84
|
* @default 'https://pub-ddcfe353995744e89b8002f16bf98575.r2.dev/text_wasm_internal.js'
|
|
@@ -139,8 +116,8 @@ interface BuiltInAIEmbeddingModelSettings {
|
|
|
139
116
|
*/
|
|
140
117
|
delegate?: "CPU" | "GPU";
|
|
141
118
|
}
|
|
142
|
-
declare class
|
|
143
|
-
readonly specificationVersion = "
|
|
119
|
+
declare class BrowserAIEmbeddingModel implements EmbeddingModelV3 {
|
|
120
|
+
readonly specificationVersion = "v3";
|
|
144
121
|
readonly provider = "google-mediapipe";
|
|
145
122
|
readonly modelId: string;
|
|
146
123
|
readonly supportsParallelCalls = true;
|
|
@@ -148,57 +125,51 @@ declare class BuiltInAIEmbeddingModel implements EmbeddingModelV2<string> {
|
|
|
148
125
|
private settings;
|
|
149
126
|
private modelAssetBuffer;
|
|
150
127
|
private textEmbedder;
|
|
151
|
-
constructor(settings?:
|
|
128
|
+
constructor(settings?: BrowserAIEmbeddingModelSettings);
|
|
152
129
|
protected getTextEmbedder: () => Promise<TextEmbedder>;
|
|
153
|
-
doEmbed: (options:
|
|
154
|
-
values: string[];
|
|
155
|
-
abortSignal?: AbortSignal;
|
|
156
|
-
}) => Promise<{
|
|
157
|
-
embeddings: Array<EmbeddingModelV2Embedding>;
|
|
158
|
-
rawResponse?: Record<PropertyKey, any>;
|
|
159
|
-
}>;
|
|
130
|
+
doEmbed: (options: EmbeddingModelV3CallOptions) => Promise<EmbeddingModelV3Result>;
|
|
160
131
|
}
|
|
161
132
|
|
|
162
|
-
interface
|
|
163
|
-
(modelId?:
|
|
133
|
+
interface BrowserAIProvider extends ProviderV3 {
|
|
134
|
+
(modelId?: BrowserAIChatModelId, settings?: BrowserAIChatSettings): BrowserAIChatLanguageModel;
|
|
164
135
|
/**
|
|
165
136
|
* Creates a model for text generation.
|
|
166
137
|
*/
|
|
167
|
-
languageModel(modelId:
|
|
138
|
+
languageModel(modelId: BrowserAIChatModelId, settings?: BrowserAIChatSettings): BrowserAIChatLanguageModel;
|
|
168
139
|
/**
|
|
169
140
|
* Creates a model for text generation.
|
|
170
141
|
*/
|
|
171
|
-
chat(modelId:
|
|
172
|
-
|
|
173
|
-
|
|
142
|
+
chat(modelId: BrowserAIChatModelId, settings?: BrowserAIChatSettings): BrowserAIChatLanguageModel;
|
|
143
|
+
embedding(modelId: "embedding", settings?: BrowserAIEmbeddingModelSettings): EmbeddingModelV3;
|
|
144
|
+
embeddingModel: (modelId: "embedding", settings?: BrowserAIEmbeddingModelSettings) => EmbeddingModelV3;
|
|
174
145
|
imageModel(modelId: string): never;
|
|
175
146
|
speechModel(modelId: string): never;
|
|
176
147
|
transcriptionModel(modelId: string): never;
|
|
177
148
|
}
|
|
178
|
-
interface
|
|
149
|
+
interface BrowserAIProviderSettings {
|
|
179
150
|
}
|
|
180
151
|
/**
|
|
181
|
-
* Create a
|
|
152
|
+
* Create a BrowserAI provider instance.
|
|
182
153
|
*/
|
|
183
|
-
declare function
|
|
154
|
+
declare function createBrowserAI(options?: BrowserAIProviderSettings): BrowserAIProvider;
|
|
184
155
|
/**
|
|
185
|
-
* Default
|
|
156
|
+
* Default BrowserAI provider instance.
|
|
186
157
|
*/
|
|
187
|
-
declare const
|
|
158
|
+
declare const browserAI: BrowserAIProvider;
|
|
188
159
|
|
|
189
160
|
/**
|
|
190
|
-
* UI message type for
|
|
161
|
+
* UI message type for browser AI features with custom data parts.
|
|
191
162
|
*
|
|
192
163
|
* Extends base UIMessage to include specific data part schemas
|
|
193
|
-
* for
|
|
164
|
+
* for browser AI functionality such as model download progress tracking
|
|
194
165
|
*
|
|
195
166
|
* @example
|
|
196
167
|
* // Import and use with useChat hook from @ai-sdk/react
|
|
197
168
|
* ```typescript
|
|
198
169
|
* import { useChat } from "@ai-sdk/react";
|
|
199
|
-
* import {
|
|
170
|
+
* import { BrowserAIUIMessage } from "@browser-ai/core";
|
|
200
171
|
*
|
|
201
|
-
* const { messages, sendMessage } = useChat<
|
|
172
|
+
* const { messages, sendMessage } = useChat<BrowserAIUIMessage>({
|
|
202
173
|
* onData: (dataPart) => {
|
|
203
174
|
* if (dataPart.type === 'data-modelDownloadProgress') {
|
|
204
175
|
* console.log(`Download: ${dataPart.data.progress}%`);
|
|
@@ -212,7 +183,7 @@ declare const builtInAI: BuiltInAIProvider;
|
|
|
212
183
|
*
|
|
213
184
|
* @see {@link https://v5.ai-sdk.dev/docs/reference/ai-sdk-ui/use-chat | useChat hook documentation}
|
|
214
185
|
*/
|
|
215
|
-
type
|
|
186
|
+
type BrowserAIUIMessage = UIMessage<
|
|
216
187
|
never, // No custom metadata type
|
|
217
188
|
{
|
|
218
189
|
/**
|
|
@@ -240,4 +211,4 @@ type BuiltInAIUIMessage = UIMessage<
|
|
|
240
211
|
}
|
|
241
212
|
>;
|
|
242
213
|
|
|
243
|
-
export {
|
|
214
|
+
export { BrowserAIChatLanguageModel, type BrowserAIChatSettings, BrowserAIEmbeddingModel, type BrowserAIEmbeddingModelSettings, type BrowserAIProvider, type BrowserAIProviderSettings, type BrowserAIUIMessage, browserAI, createBrowserAI, doesBrowserSupportBrowserAI };
|