prompt-api-polyfill 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +228 -0
- package/async-iterator-polyfill.js +16 -0
- package/dot_env.json +6 -0
- package/json-schema-converter.js +86 -0
- package/multimodal-converter.js +195 -0
- package/package.json +43 -0
- package/prompt-api-polyfill.js +548 -0
package/README.md
ADDED
|
@@ -0,0 +1,228 @@
|
|
|
1
|
+
# Prompt API Polyfill (Firebase AI Logic backend)
|
|
2
|
+
|
|
3
|
+
This package provides a browser polyfill for the
|
|
4
|
+
[Prompt API `LanguageModel`](https://github.com/webmachinelearning/prompt-api)
|
|
5
|
+
backed by **Firebase AI Logic**.
|
|
6
|
+
|
|
7
|
+
When loaded in the browser, it defines a global:
|
|
8
|
+
|
|
9
|
+
```js
|
|
10
|
+
window.LanguageModel;
|
|
11
|
+
```
|
|
12
|
+
|
|
13
|
+
so you can use the Prompt API shape even in environments where it is not yet
|
|
14
|
+
natively available.
|
|
15
|
+
|
|
16
|
+
- Back end: Firebase AI Logic
|
|
17
|
+
- Default model: `gemini-2.5-flash-lite` (configurable via `modelName`)
|
|
18
|
+
|
|
19
|
+
---
|
|
20
|
+
|
|
21
|
+
## Installation
|
|
22
|
+
|
|
23
|
+
Install from npm:
|
|
24
|
+
|
|
25
|
+
```bash
|
|
26
|
+
npm install prompt-api-polyfill
|
|
27
|
+
```
|
|
28
|
+
|
|
29
|
+
## Quick start
|
|
30
|
+
|
|
31
|
+
1. **Create a Firebase project with Generative AI enabled** (see Configuration
|
|
32
|
+
below).
|
|
33
|
+
2. **Provide your Firebase config** on `window.FIREBASE_CONFIG`.
|
|
34
|
+
3. **Import the polyfill** so it can attach `window.LanguageModel`.
|
|
35
|
+
|
|
36
|
+
### Example (using a JSON config file)
|
|
37
|
+
|
|
38
|
+
Create a `.env.json` file (see
|
|
39
|
+
[Configuring `dot_env.json` / `.env.json`](#configuring-dot_envjson--envjson))
|
|
40
|
+
and then use it from a browser entry point:
|
|
41
|
+
|
|
42
|
+
```html
|
|
43
|
+
<script type="module">
|
|
44
|
+
import firebaseConfig from './.env.json' with { type: 'json' };
|
|
45
|
+
|
|
46
|
+
// Make the config available to the polyfill
|
|
47
|
+
window.FIREBASE_CONFIG = firebaseConfig;
|
|
48
|
+
|
|
49
|
+
// Only load the polyfill if LanguageModel is not available natively
|
|
50
|
+
if (!('LanguageModel' in window)) {
|
|
51
|
+
await import('prompt-api-polyfill');
|
|
52
|
+
}
|
|
53
|
+
|
|
54
|
+
const session = await LanguageModel.create();
|
|
55
|
+
const text = await session.prompt('Say hello from the polyfill!');
|
|
56
|
+
console.log(text);
|
|
57
|
+
</script>
|
|
58
|
+
```
|
|
59
|
+
|
|
60
|
+
> **Note**: The polyfill attaches `LanguageModel` to `window` as a side effect.
|
|
61
|
+
> There are no named exports.
|
|
62
|
+
|
|
63
|
+
### Example based on `index.html` in this repo
|
|
64
|
+
|
|
65
|
+
The included `index.html` demonstrates the full surface area of the polyfill,
|
|
66
|
+
including:
|
|
67
|
+
|
|
68
|
+
- `LanguageModel.create()` with options
|
|
69
|
+
- `prompt()` and `promptStreaming()`
|
|
70
|
+
- Multimodal inputs (text, image, audio)
|
|
71
|
+
- `append()` and `measureInputUsage()`
|
|
72
|
+
- Quota handling via `onquotaoverflow`
|
|
73
|
+
- `clone()` and `destroy()`
|
|
74
|
+
|
|
75
|
+
A simplified version of how it is wired up:
|
|
76
|
+
|
|
77
|
+
```html
|
|
78
|
+
<script type="module">
|
|
79
|
+
import firebaseConfig from './.env.json' with { type: 'json' };
|
|
80
|
+
window.FIREBASE_CONFIG = firebaseConfig;
|
|
81
|
+
|
|
82
|
+
// Load the polyfill only when necessary
|
|
83
|
+
if (!('LanguageModel' in window)) {
|
|
84
|
+
await import('prompt-api-polyfill');
|
|
85
|
+
}
|
|
86
|
+
|
|
87
|
+
const controller = new AbortController();
|
|
88
|
+
const session = await LanguageModel.create();
|
|
89
|
+
|
|
90
|
+
try {
|
|
91
|
+
const stream = session.promptStreaming('Write me a very long poem', {
|
|
92
|
+
signal: controller.signal,
|
|
93
|
+
});
|
|
94
|
+
|
|
95
|
+
for await (const chunk of stream) {
|
|
96
|
+
console.log(chunk);
|
|
97
|
+
}
|
|
98
|
+
} catch (error) {
|
|
99
|
+
console.error(error);
|
|
100
|
+
}
|
|
101
|
+
</script>
|
|
102
|
+
```
|
|
103
|
+
|
|
104
|
+
---
|
|
105
|
+
|
|
106
|
+
## Configuring `dot_env.json` / `.env.json`
|
|
107
|
+
|
|
108
|
+
This repo ships with a template file:
|
|
109
|
+
|
|
110
|
+
```jsonc
|
|
111
|
+
// dot_env.json
|
|
112
|
+
{
|
|
113
|
+
"apiKey": "",
|
|
114
|
+
"projectId": "",
|
|
115
|
+
"appId": "",
|
|
116
|
+
"modelName": "",
|
|
117
|
+
}
|
|
118
|
+
```
|
|
119
|
+
|
|
120
|
+
You should treat `dot_env.json` as a **template** and create a real `.env.json`
|
|
121
|
+
that is **not committed** with your secrets.
|
|
122
|
+
|
|
123
|
+
### 1. Create `.env.json`
|
|
124
|
+
|
|
125
|
+
Copy the template:
|
|
126
|
+
|
|
127
|
+
```bash
|
|
128
|
+
cp dot_env.json .env.json
|
|
129
|
+
```
|
|
130
|
+
|
|
131
|
+
Then open `.env.json` and fill in the values from your Firebase project:
|
|
132
|
+
|
|
133
|
+
```json
|
|
134
|
+
{
|
|
135
|
+
"apiKey": "YOUR_FIREBASE_WEB_API_KEY",
|
|
136
|
+
"projectId": "your-gcp-project-id",
|
|
137
|
+
"appId": "YOUR_FIREBASE_APP_ID",
|
|
138
|
+
"modelName": "gemini-2.5-flash-lite"
|
|
139
|
+
}
|
|
140
|
+
```
|
|
141
|
+
|
|
142
|
+
### 2. Field-by-field explanation
|
|
143
|
+
|
|
144
|
+
- `apiKey` Your **Firebase Web API key**. You can find this in the Firebase
|
|
145
|
+
Console under: _Project settings → General → Your apps → Web app_.
|
|
146
|
+
|
|
147
|
+
- `projectId` The **GCP / Firebase project ID**, e.g. `my-ai-project`.
|
|
148
|
+
|
|
149
|
+
- `appId` The **Firebase Web app ID**, e.g. `1:1234567890:web:abcdef123456`.
|
|
150
|
+
|
|
151
|
+
- `modelName` (optional) The Gemini model ID to use. If omitted, the polyfill
|
|
152
|
+
defaults to:
|
|
153
|
+
|
|
154
|
+
```json
|
|
155
|
+
"modelName": "gemini-2.5-flash-lite"
|
|
156
|
+
```
|
|
157
|
+
|
|
158
|
+
You can substitute another supported Gemini model here if desired.
|
|
159
|
+
|
|
160
|
+
These fields are passed directly to:
|
|
161
|
+
|
|
162
|
+
- `initializeApp(firebaseConfig)` from Firebase
|
|
163
|
+
- `getAI(app, { backend: new GoogleAIBackend() })` from the Firebase AI SDK
|
|
164
|
+
|
|
165
|
+
and `modelName` is used to select which Gemini model to call.
|
|
166
|
+
|
|
167
|
+
> **Important:** Do **not** commit a real `.env.json` with production
|
|
168
|
+
> credentials to source control. Use `dot_env.json` as the committed template
|
|
169
|
+
> and keep `.env.json` local.
|
|
170
|
+
|
|
171
|
+
### 3. Wiring the config into the polyfill
|
|
172
|
+
|
|
173
|
+
Once `.env.json` is filled out, you can import it and expose it to the polyfill
|
|
174
|
+
exactly like in `index.html`:
|
|
175
|
+
|
|
176
|
+
```js
|
|
177
|
+
import firebaseConfig from './.env.json' with { type: 'json' };
|
|
178
|
+
|
|
179
|
+
window.FIREBASE_CONFIG = firebaseConfig;
|
|
180
|
+
|
|
181
|
+
if (!('LanguageModel' in window)) {
|
|
182
|
+
await import('prompt-api-polyfill');
|
|
183
|
+
}
|
|
184
|
+
```
|
|
185
|
+
|
|
186
|
+
From this point on, `LanguageModel.create()` will use your Firebase
|
|
187
|
+
configuration.
|
|
188
|
+
|
|
189
|
+
---
|
|
190
|
+
|
|
191
|
+
## API surface
|
|
192
|
+
|
|
193
|
+
Once the polyfill is loaded and `window.LanguageModel` is available, you can use
|
|
194
|
+
it as described in the
|
|
195
|
+
[Prompt API documentation](https://developer.chrome.com/docs/ai/prompt-api).
|
|
196
|
+
|
|
197
|
+
For a complete, end-to-end example, see the `index.html` file in this directory.
|
|
198
|
+
|
|
199
|
+
---
|
|
200
|
+
|
|
201
|
+
## Running the demo locally
|
|
202
|
+
|
|
203
|
+
1. Install dependencies and this package (if using the npm-installed version in
|
|
204
|
+
another project):
|
|
205
|
+
|
|
206
|
+
```bash
|
|
207
|
+
npm install
|
|
208
|
+
```
|
|
209
|
+
|
|
210
|
+
2. Copy and fill in your config:
|
|
211
|
+
|
|
212
|
+
```bash
|
|
213
|
+
cp dot_env.json .env.json
|
|
214
|
+
# then edit .env.json with your Firebase and model settings
|
|
215
|
+
```
|
|
216
|
+
|
|
217
|
+
3. Serve `index.html`:
|
|
218
|
+
|
|
219
|
+
```bash
|
|
220
|
+
npm start
|
|
221
|
+
```
|
|
222
|
+
|
|
223
|
+
You should see network requests to the Vertex AI / Firebase AI backend and
|
|
224
|
+
streaming responses logged in the console.
|
|
225
|
+
|
|
226
|
+
## License
|
|
227
|
+
|
|
228
|
+
Apache 2.0
|
|
@@ -0,0 +1,16 @@
|
|
|
1
|
+
if (!ReadableStream.prototype[Symbol.asyncIterator]) {
|
|
2
|
+
ReadableStream.prototype[Symbol.asyncIterator] = async function* () {
|
|
3
|
+
const reader = this.getReader();
|
|
4
|
+
try {
|
|
5
|
+
while (true) {
|
|
6
|
+
const { done, value } = await reader.read();
|
|
7
|
+
if (done) {
|
|
8
|
+
return;
|
|
9
|
+
}
|
|
10
|
+
yield value;
|
|
11
|
+
}
|
|
12
|
+
} finally {
|
|
13
|
+
reader.releaseLock();
|
|
14
|
+
}
|
|
15
|
+
};
|
|
16
|
+
}
|
package/dot_env.json
ADDED
|
@@ -0,0 +1,86 @@
|
|
|
1
|
+
import { Schema } from 'https://esm.run/firebase/ai';
|
|
2
|
+
|
|
3
|
+
/**
|
|
4
|
+
* Converts a standard JSON Schema object into a Firebase Vertex AI Schema class instance.
|
|
5
|
+
* * @param {Object} jsonSchema - The standard JSON Schema object.
|
|
6
|
+
* @returns {Schema} - The Firebase Vertex AI Schema instance.
|
|
7
|
+
*/
|
|
8
|
+
export function convertJsonSchemaToVertexSchema(jsonSchema) {
|
|
9
|
+
if (!jsonSchema) return undefined;
|
|
10
|
+
|
|
11
|
+
// Extract common base parameters supported by all Schema types
|
|
12
|
+
const baseParams = {
|
|
13
|
+
description: jsonSchema.description,
|
|
14
|
+
nullable: jsonSchema.nullable || false,
|
|
15
|
+
format: jsonSchema.format,
|
|
16
|
+
};
|
|
17
|
+
|
|
18
|
+
// Handle "type": ["string", "null"] pattern common in JSON Schema
|
|
19
|
+
if (Array.isArray(jsonSchema.type) && jsonSchema.type.includes('null')) {
|
|
20
|
+
baseParams.nullable = true;
|
|
21
|
+
jsonSchema.type = jsonSchema.type.find((t) => t !== 'null');
|
|
22
|
+
}
|
|
23
|
+
|
|
24
|
+
// SWITCH based on schema type
|
|
25
|
+
switch (jsonSchema.type) {
|
|
26
|
+
case 'string':
|
|
27
|
+
// Check for Enums
|
|
28
|
+
if (jsonSchema.enum && Array.isArray(jsonSchema.enum)) {
|
|
29
|
+
return Schema.enumString({
|
|
30
|
+
...baseParams,
|
|
31
|
+
enum: jsonSchema.enum,
|
|
32
|
+
});
|
|
33
|
+
}
|
|
34
|
+
return Schema.string(baseParams);
|
|
35
|
+
|
|
36
|
+
case 'number':
|
|
37
|
+
return Schema.number(baseParams);
|
|
38
|
+
|
|
39
|
+
case 'integer':
|
|
40
|
+
return Schema.integer(baseParams);
|
|
41
|
+
|
|
42
|
+
case 'boolean':
|
|
43
|
+
return Schema.boolean(baseParams);
|
|
44
|
+
|
|
45
|
+
case 'array':
|
|
46
|
+
return Schema.array({
|
|
47
|
+
...baseParams,
|
|
48
|
+
// Recursively convert the 'items' schema
|
|
49
|
+
items: convertJsonSchemaToVertexSchema(jsonSchema.items),
|
|
50
|
+
});
|
|
51
|
+
|
|
52
|
+
case 'object':
|
|
53
|
+
const properties = {};
|
|
54
|
+
const allPropertyKeys = jsonSchema.properties
|
|
55
|
+
? Object.keys(jsonSchema.properties)
|
|
56
|
+
: [];
|
|
57
|
+
|
|
58
|
+
// Recursively convert each property
|
|
59
|
+
allPropertyKeys.forEach((key) => {
|
|
60
|
+
properties[key] = convertJsonSchemaToVertexSchema(
|
|
61
|
+
jsonSchema.properties[key]
|
|
62
|
+
);
|
|
63
|
+
});
|
|
64
|
+
|
|
65
|
+
// Calculate optionalProperties
|
|
66
|
+
// JSON Schema uses "required" (allowlist), Vertex SDK uses "optionalProperties" (blocklist)
|
|
67
|
+
const required = jsonSchema.required || [];
|
|
68
|
+
const optionalProperties = allPropertyKeys.filter(
|
|
69
|
+
(key) => !required.includes(key)
|
|
70
|
+
);
|
|
71
|
+
|
|
72
|
+
return Schema.object({
|
|
73
|
+
...baseParams,
|
|
74
|
+
properties: properties,
|
|
75
|
+
optionalProperties: optionalProperties,
|
|
76
|
+
});
|
|
77
|
+
|
|
78
|
+
default:
|
|
79
|
+
// Fallback for unknown types or complex types not fully supported (like oneOf)
|
|
80
|
+
// defaulting to string usually prevents crashes, but use with caution.
|
|
81
|
+
console.warn(
|
|
82
|
+
`Unsupported type: ${jsonSchema.type}, defaulting to string.`
|
|
83
|
+
);
|
|
84
|
+
return Schema.string(baseParams);
|
|
85
|
+
}
|
|
86
|
+
}
|
|
@@ -0,0 +1,195 @@
|
|
|
1
|
+
export default class MultimodalConverter {
|
|
2
|
+
static async convert(type, value) {
|
|
3
|
+
if (type === 'image') return this.processImage(value);
|
|
4
|
+
if (type === 'audio') return this.processAudio(value);
|
|
5
|
+
throw new DOMException(
|
|
6
|
+
`Unsupported media type: ${type}`,
|
|
7
|
+
'NotSupportedError'
|
|
8
|
+
);
|
|
9
|
+
}
|
|
10
|
+
|
|
11
|
+
static async processImage(source) {
|
|
12
|
+
// Blob
|
|
13
|
+
if (source instanceof Blob) {
|
|
14
|
+
return this.blobToInlineData(source);
|
|
15
|
+
}
|
|
16
|
+
|
|
17
|
+
// BufferSource (ArrayBuffer/View) -> Sniff or Default
|
|
18
|
+
if (ArrayBuffer.isView(source) || source instanceof ArrayBuffer) {
|
|
19
|
+
const buffer = source instanceof ArrayBuffer ? source : source.buffer;
|
|
20
|
+
const base64 = this.arrayBufferToBase64(buffer);
|
|
21
|
+
// Basic sniffing for PNG/JPEG magic bytes
|
|
22
|
+
const u8 = new Uint8Array(buffer);
|
|
23
|
+
let mimeType = 'image/png'; // Default
|
|
24
|
+
if (u8[0] === 0xff && u8[1] === 0xd8) mimeType = 'image/jpeg';
|
|
25
|
+
else if (u8[0] === 0x89 && u8[1] === 0x50) mimeType = 'image/png';
|
|
26
|
+
|
|
27
|
+
return { inlineData: { data: base64, mimeType } };
|
|
28
|
+
}
|
|
29
|
+
|
|
30
|
+
// ImageBitmapSource (Canvas, Image, VideoFrame, etc.)
|
|
31
|
+
// We draw to a canvas to standardize to PNG
|
|
32
|
+
return this.canvasSourceToInlineData(source);
|
|
33
|
+
}
|
|
34
|
+
|
|
35
|
+
static async processAudio(source) {
|
|
36
|
+
// Blob
|
|
37
|
+
if (source instanceof Blob) {
|
|
38
|
+
return this.blobToInlineData(source);
|
|
39
|
+
}
|
|
40
|
+
|
|
41
|
+
// AudioBuffer -> WAV
|
|
42
|
+
if (source instanceof AudioBuffer) {
|
|
43
|
+
const wavBuffer = this.audioBufferToWav(source);
|
|
44
|
+
const base64 = this.arrayBufferToBase64(wavBuffer);
|
|
45
|
+
return { inlineData: { data: base64, mimeType: 'audio/wav' } };
|
|
46
|
+
}
|
|
47
|
+
|
|
48
|
+
// BufferSource -> Assume it's already an audio file (mp3/wav)
|
|
49
|
+
if (ArrayBuffer.isView(source) || source instanceof ArrayBuffer) {
|
|
50
|
+
const buffer = source instanceof ArrayBuffer ? source : source.buffer;
|
|
51
|
+
return {
|
|
52
|
+
inlineData: {
|
|
53
|
+
data: this.arrayBufferToBase64(buffer),
|
|
54
|
+
mimeType: 'audio/wav', // Fallback assumption
|
|
55
|
+
},
|
|
56
|
+
};
|
|
57
|
+
}
|
|
58
|
+
|
|
59
|
+
throw new DOMException('Unsupported audio source', 'NotSupportedError');
|
|
60
|
+
}
|
|
61
|
+
|
|
62
|
+
// Low Level Converters
|
|
63
|
+
|
|
64
|
+
static blobToInlineData(blob) {
|
|
65
|
+
return new Promise((resolve, reject) => {
|
|
66
|
+
const reader = new FileReader();
|
|
67
|
+
reader.onloadend = () => {
|
|
68
|
+
if (reader.error) reject(reader.error);
|
|
69
|
+
else
|
|
70
|
+
resolve({
|
|
71
|
+
inlineData: {
|
|
72
|
+
data: reader.result.split(',')[1],
|
|
73
|
+
mimeType: blob.type,
|
|
74
|
+
},
|
|
75
|
+
});
|
|
76
|
+
};
|
|
77
|
+
reader.readAsDataURL(blob);
|
|
78
|
+
});
|
|
79
|
+
}
|
|
80
|
+
|
|
81
|
+
static canvasSourceToInlineData(source) {
|
|
82
|
+
const canvas = document.createElement('canvas');
|
|
83
|
+
const w = source.naturalWidth || source.videoWidth || source.width;
|
|
84
|
+
const h = source.naturalHeight || source.videoHeight || source.height;
|
|
85
|
+
|
|
86
|
+
canvas.width = w;
|
|
87
|
+
canvas.height = h;
|
|
88
|
+
|
|
89
|
+
const ctx = canvas.getContext('2d');
|
|
90
|
+
ctx.drawImage(source, 0, 0);
|
|
91
|
+
|
|
92
|
+
const dataUrl = canvas.toDataURL('image/png');
|
|
93
|
+
return {
|
|
94
|
+
inlineData: {
|
|
95
|
+
data: dataUrl.split(',')[1],
|
|
96
|
+
mimeType: 'image/png',
|
|
97
|
+
},
|
|
98
|
+
};
|
|
99
|
+
}
|
|
100
|
+
|
|
101
|
+
static arrayBufferToBase64(buffer) {
|
|
102
|
+
let binary = '';
|
|
103
|
+
const bytes = new Uint8Array(buffer);
|
|
104
|
+
const len = bytes.byteLength;
|
|
105
|
+
for (let i = 0; i < len; i++) {
|
|
106
|
+
binary += String.fromCharCode(bytes[i]);
|
|
107
|
+
}
|
|
108
|
+
return window.btoa(binary);
|
|
109
|
+
}
|
|
110
|
+
|
|
111
|
+
// Simple WAV Encoder for AudioBuffer
|
|
112
|
+
static audioBufferToWav(buffer) {
|
|
113
|
+
const numChannels = buffer.numberOfChannels;
|
|
114
|
+
const sampleRate = buffer.sampleRate;
|
|
115
|
+
const format = 1; // PCM
|
|
116
|
+
const bitDepth = 16;
|
|
117
|
+
|
|
118
|
+
let result;
|
|
119
|
+
if (numChannels === 2) {
|
|
120
|
+
result = this.interleave(
|
|
121
|
+
buffer.getChannelData(0),
|
|
122
|
+
buffer.getChannelData(1)
|
|
123
|
+
);
|
|
124
|
+
} else {
|
|
125
|
+
result = buffer.getChannelData(0);
|
|
126
|
+
}
|
|
127
|
+
|
|
128
|
+
return this.encodeWAV(result, format, sampleRate, numChannels, bitDepth);
|
|
129
|
+
}
|
|
130
|
+
|
|
131
|
+
static interleave(inputL, inputR) {
|
|
132
|
+
const length = inputL.length + inputR.length;
|
|
133
|
+
const result = new Float32Array(length);
|
|
134
|
+
let index = 0;
|
|
135
|
+
let inputIndex = 0;
|
|
136
|
+
while (index < length) {
|
|
137
|
+
result[index++] = inputL[inputIndex];
|
|
138
|
+
result[index++] = inputR[inputIndex];
|
|
139
|
+
inputIndex++;
|
|
140
|
+
}
|
|
141
|
+
return result;
|
|
142
|
+
}
|
|
143
|
+
|
|
144
|
+
static encodeWAV(samples, format, sampleRate, numChannels, bitDepth) {
|
|
145
|
+
const bytesPerSample = bitDepth / 8;
|
|
146
|
+
const blockAlign = numChannels * bytesPerSample;
|
|
147
|
+
|
|
148
|
+
const buffer = new ArrayBuffer(44 + samples.length * bytesPerSample);
|
|
149
|
+
const view = new DataView(buffer);
|
|
150
|
+
|
|
151
|
+
/* RIFF identifier */
|
|
152
|
+
this.writeString(view, 0, 'RIFF');
|
|
153
|
+
/* RIFF chunk length */
|
|
154
|
+
view.setUint32(4, 36 + samples.length * bytesPerSample, true);
|
|
155
|
+
/* RIFF type */
|
|
156
|
+
this.writeString(view, 8, 'WAVE');
|
|
157
|
+
/* format chunk identifier */
|
|
158
|
+
this.writeString(view, 12, 'fmt ');
|
|
159
|
+
/* format chunk length */
|
|
160
|
+
view.setUint32(16, 16, true);
|
|
161
|
+
/* sample format (raw) */
|
|
162
|
+
view.setUint16(20, format, true);
|
|
163
|
+
/* channel count */
|
|
164
|
+
view.setUint16(22, numChannels, true);
|
|
165
|
+
/* sample rate */
|
|
166
|
+
view.setUint32(24, sampleRate, true);
|
|
167
|
+
/* byte rate (sample rate * block align) */
|
|
168
|
+
view.setUint32(28, sampleRate * blockAlign, true);
|
|
169
|
+
/* block align (channel count * bytes per sample) */
|
|
170
|
+
view.setUint16(32, blockAlign, true);
|
|
171
|
+
/* bits per sample */
|
|
172
|
+
view.setUint16(34, bitDepth, true);
|
|
173
|
+
/* data chunk identifier */
|
|
174
|
+
this.writeString(view, 36, 'data');
|
|
175
|
+
/* data chunk length */
|
|
176
|
+
view.setUint32(40, samples.length * bytesPerSample, true);
|
|
177
|
+
|
|
178
|
+
this.floatTo16BitPCM(view, 44, samples);
|
|
179
|
+
|
|
180
|
+
return buffer;
|
|
181
|
+
}
|
|
182
|
+
|
|
183
|
+
static floatTo16BitPCM(output, offset, input) {
|
|
184
|
+
for (let i = 0; i < input.length; i++, offset += 2) {
|
|
185
|
+
const s = Math.max(-1, Math.min(1, input[i]));
|
|
186
|
+
output.setInt16(offset, s < 0 ? s * 0x8000 : s * 0x7fff, true);
|
|
187
|
+
}
|
|
188
|
+
}
|
|
189
|
+
|
|
190
|
+
static writeString(view, offset, string) {
|
|
191
|
+
for (let i = 0; i < string.length; i++) {
|
|
192
|
+
view.setUint8(offset + i, string.charCodeAt(i));
|
|
193
|
+
}
|
|
194
|
+
}
|
|
195
|
+
}
|
package/package.json
ADDED
|
@@ -0,0 +1,43 @@
|
|
|
1
|
+
{
|
|
2
|
+
"name": "prompt-api-polyfill",
|
|
3
|
+
"version": "0.1.0",
|
|
4
|
+
"description": "Polyfill for the Prompt API (`LanguageModel`) backed by Firebase AI Logic.",
|
|
5
|
+
"type": "module",
|
|
6
|
+
"main": "./prompt-api-polyfill.js",
|
|
7
|
+
"module": "./prompt-api-polyfill.js",
|
|
8
|
+
"browser": "./prompt-api-polyfill.js",
|
|
9
|
+
"exports": {
|
|
10
|
+
".": "./prompt-api-polyfill.js"
|
|
11
|
+
},
|
|
12
|
+
"files": [
|
|
13
|
+
"async-iterator-polyfill.js",
|
|
14
|
+
"json-schema-converter.js",
|
|
15
|
+
"multimodal-converter.js",
|
|
16
|
+
"prompt-api-polyfill.js",
|
|
17
|
+
"dot_env.json"
|
|
18
|
+
],
|
|
19
|
+
"sideEffects": true,
|
|
20
|
+
"keywords": [
|
|
21
|
+
"prompt-api",
|
|
22
|
+
"language-model",
|
|
23
|
+
"polyfill",
|
|
24
|
+
"firebase",
|
|
25
|
+
"web-ai"
|
|
26
|
+
],
|
|
27
|
+
"repository": {
|
|
28
|
+
"type": "git",
|
|
29
|
+
"url": "git+https://github.com/GoogleChromeLabs/web-ai-demos.git",
|
|
30
|
+
"directory": "prompt-api-polyfill"
|
|
31
|
+
},
|
|
32
|
+
"bugs": {
|
|
33
|
+
"url": "https://github.com/GoogleChromeLabs/web-ai-demos/issues"
|
|
34
|
+
},
|
|
35
|
+
"homepage": "https://github.com/GoogleChromeLabs/web-ai-demos/tree/main/prompt-api-polyfill/README.md",
|
|
36
|
+
"license": "Apache-2.0",
|
|
37
|
+
"scripts": {
|
|
38
|
+
"start": "npx http-server"
|
|
39
|
+
},
|
|
40
|
+
"devDependencies": {
|
|
41
|
+
"http-server": "^14.1.1"
|
|
42
|
+
}
|
|
43
|
+
}
|
|
@@ -0,0 +1,548 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Polyfill for the Prompt API (`LanguageModel`)
|
|
3
|
+
* Backend: Firebase AI Logic
|
|
4
|
+
* Spec: https://github.com/webmachinelearning/prompt-api/blob/main/README.md
|
|
5
|
+
*
|
|
6
|
+
* * Instructions:
|
|
7
|
+
* 1. Include this script in your HTML type="module".
|
|
8
|
+
* 2. Define window.FIREBASE_CONFIG with your Firebase configuration object BEFORE importing this.
|
|
9
|
+
*/
|
|
10
|
+
|
|
11
|
+
import { initializeApp } from 'https://esm.run/firebase/app';
|
|
12
|
+
import {
|
|
13
|
+
getAI,
|
|
14
|
+
getGenerativeModel,
|
|
15
|
+
GoogleAIBackend,
|
|
16
|
+
InferenceMode,
|
|
17
|
+
} from 'https://esm.run/firebase/ai';
|
|
18
|
+
|
|
19
|
+
import './async-iterator-polyfill.js'; // Still needed for Safari 26.2.
|
|
20
|
+
import MultimodalConverter from './multimodal-converter.js';
|
|
21
|
+
import { convertJsonSchemaToVertexSchema } from './json-schema-converter.js';
|
|
22
|
+
|
|
23
|
+
(() => {
|
|
24
|
+
if ('LanguageModel' in window) {
|
|
25
|
+
return;
|
|
26
|
+
}
|
|
27
|
+
|
|
28
|
+
const firebaseConfig = window.FIREBASE_CONFIG;
|
|
29
|
+
if (!firebaseConfig) {
|
|
30
|
+
console.error(
|
|
31
|
+
'Firebase Prompt API Polyfill: Missing configuration. Please set window.FIREBASE_CONFIG.'
|
|
32
|
+
);
|
|
33
|
+
return;
|
|
34
|
+
}
|
|
35
|
+
|
|
36
|
+
// Initialize Firebase
|
|
37
|
+
const app = initializeApp(firebaseConfig);
|
|
38
|
+
const ai = getAI(app, { backend: new GoogleAIBackend() });
|
|
39
|
+
const MODEL_NAME = firebaseConfig.modelName || 'gemini-2.5-flash-lite';
|
|
40
|
+
|
|
41
|
+
// Helper to convert initial History
|
|
42
|
+
async function convertToFirebaseHistory(prompts) {
|
|
43
|
+
const history = [];
|
|
44
|
+
for (const p of prompts) {
|
|
45
|
+
const role = p.role === 'assistant' ? 'model' : 'user';
|
|
46
|
+
let parts = [];
|
|
47
|
+
|
|
48
|
+
if (Array.isArray(p.content)) {
|
|
49
|
+
// Mixed content
|
|
50
|
+
for (const item of p.content) {
|
|
51
|
+
if (item.type === 'text') {
|
|
52
|
+
parts.push({ text: item.value || item.text || '' });
|
|
53
|
+
} else {
|
|
54
|
+
const part = await MultimodalConverter.convert(
|
|
55
|
+
item.type,
|
|
56
|
+
item.value
|
|
57
|
+
);
|
|
58
|
+
parts.push(part);
|
|
59
|
+
}
|
|
60
|
+
}
|
|
61
|
+
} else {
|
|
62
|
+
// Simple string
|
|
63
|
+
parts.push({ text: p.content });
|
|
64
|
+
}
|
|
65
|
+
history.push({ role, parts });
|
|
66
|
+
}
|
|
67
|
+
return history;
|
|
68
|
+
}
|
|
69
|
+
|
|
70
|
+
/**
|
|
71
|
+
* Main LanguageModel Class
|
|
72
|
+
*/
|
|
73
|
+
class LanguageModel extends EventTarget {
|
|
74
|
+
#model;
|
|
75
|
+
#history;
|
|
76
|
+
#options;
|
|
77
|
+
#inCloudParams;
|
|
78
|
+
#destroyed;
|
|
79
|
+
#inputUsage;
|
|
80
|
+
#topK;
|
|
81
|
+
#temperature;
|
|
82
|
+
#onquotaoverflow;
|
|
83
|
+
|
|
84
|
+
constructor(model, initialHistory, options = {}, inCloudParams) {
|
|
85
|
+
super();
|
|
86
|
+
this.#model = model;
|
|
87
|
+
this.#history = initialHistory || [];
|
|
88
|
+
this.#options = options;
|
|
89
|
+
this.#inCloudParams = inCloudParams;
|
|
90
|
+
this.#destroyed = false;
|
|
91
|
+
this.#inputUsage = 0;
|
|
92
|
+
|
|
93
|
+
this.#topK = options.topK;
|
|
94
|
+
this.#temperature = options.temperature;
|
|
95
|
+
}
|
|
96
|
+
|
|
97
|
+
get inputUsage() {
|
|
98
|
+
return this.#inputUsage;
|
|
99
|
+
}
|
|
100
|
+
get inputQuota() {
|
|
101
|
+
return 1000000;
|
|
102
|
+
}
|
|
103
|
+
get topK() {
|
|
104
|
+
return this.#topK;
|
|
105
|
+
}
|
|
106
|
+
get temperature() {
|
|
107
|
+
return this.#temperature;
|
|
108
|
+
}
|
|
109
|
+
|
|
110
|
+
get onquotaoverflow() {
|
|
111
|
+
return this.#onquotaoverflow;
|
|
112
|
+
}
|
|
113
|
+
|
|
114
|
+
set onquotaoverflow(handler) {
|
|
115
|
+
if (this.#onquotaoverflow)
|
|
116
|
+
this.removeEventListener('quotaoverflow', this.#onquotaoverflow);
|
|
117
|
+
this.#onquotaoverflow = handler;
|
|
118
|
+
if (typeof handler === 'function')
|
|
119
|
+
this.addEventListener('quotaoverflow', handler);
|
|
120
|
+
}
|
|
121
|
+
|
|
122
|
+
static async availability(options = {}) {
|
|
123
|
+
await LanguageModel.#validateOptions(options);
|
|
124
|
+
return 'available';
|
|
125
|
+
}
|
|
126
|
+
|
|
127
|
+
static async #validateOptions(options = {}) {
|
|
128
|
+
const { maxTemperature, maxTopK } = await LanguageModel.params();
|
|
129
|
+
|
|
130
|
+
const hasTemperature = Object.prototype.hasOwnProperty.call(
|
|
131
|
+
options,
|
|
132
|
+
'temperature'
|
|
133
|
+
);
|
|
134
|
+
const hasTopK = Object.prototype.hasOwnProperty.call(options, 'topK');
|
|
135
|
+
|
|
136
|
+
if (hasTemperature !== hasTopK) {
|
|
137
|
+
throw new DOMException(
|
|
138
|
+
'Initializing a new session must either specify both topK and temperature, or neither of them.',
|
|
139
|
+
'NotSupportedError'
|
|
140
|
+
);
|
|
141
|
+
}
|
|
142
|
+
|
|
143
|
+
// If neither temperature nor topK are provided, nothing to validate.
|
|
144
|
+
if (!hasTemperature && !hasTopK) {
|
|
145
|
+
return;
|
|
146
|
+
}
|
|
147
|
+
|
|
148
|
+
const { temperature, topK } = options;
|
|
149
|
+
|
|
150
|
+
if (
|
|
151
|
+
typeof temperature !== 'number' ||
|
|
152
|
+
Number.isNaN(temperature) ||
|
|
153
|
+
typeof topK !== 'number' ||
|
|
154
|
+
Number.isNaN(topK)
|
|
155
|
+
) {
|
|
156
|
+
throw new DOMException(
|
|
157
|
+
'The provided temperature and topK must be numbers.',
|
|
158
|
+
'NotSupportedError'
|
|
159
|
+
);
|
|
160
|
+
}
|
|
161
|
+
|
|
162
|
+
if (temperature < 0 || temperature > maxTemperature || topK > maxTopK) {
|
|
163
|
+
throw new DOMException(
|
|
164
|
+
'The provided temperature or topK is outside the supported range.',
|
|
165
|
+
'NotSupportedError'
|
|
166
|
+
);
|
|
167
|
+
}
|
|
168
|
+
}
|
|
169
|
+
|
|
170
|
+
static async params() {
|
|
171
|
+
return {
|
|
172
|
+
// Values from https://docs.cloud.google.com/vertex-ai/generative-ai/docs/models/gemini/2-5-flash-lite#:~:text=%2C%20audio/webm-,Parameter%20defaults,-tune.
|
|
173
|
+
defaultTemperature: 1.0,
|
|
174
|
+
defaultTopK: 64,
|
|
175
|
+
maxTemperature: 2.0,
|
|
176
|
+
maxTopK: 64, // Fixed
|
|
177
|
+
};
|
|
178
|
+
}
|
|
179
|
+
|
|
180
|
+
static async create(options = {}) {
|
|
181
|
+
const availability = await LanguageModel.availability(options);
|
|
182
|
+
// This will be relevant when the implementation is backed by a local
|
|
183
|
+
// model that needs downloading and simulates the Prompt API's behavior.
|
|
184
|
+
if (availability === 'downloadable' || availability === 'downloading') {
|
|
185
|
+
throw new DOMException(
|
|
186
|
+
'Requires a user gesture when availability is "downloading" or "downloadable".',
|
|
187
|
+
'NotAllowedError'
|
|
188
|
+
);
|
|
189
|
+
}
|
|
190
|
+
const defaults = {
|
|
191
|
+
temperature: 1.0,
|
|
192
|
+
topK: 3,
|
|
193
|
+
};
|
|
194
|
+
|
|
195
|
+
const resolvedOptions = { ...defaults, ...options };
|
|
196
|
+
|
|
197
|
+
const inCloudParams = {
|
|
198
|
+
model: MODEL_NAME,
|
|
199
|
+
generationConfig: {
|
|
200
|
+
temperature: resolvedOptions.temperature,
|
|
201
|
+
topK: resolvedOptions.topK,
|
|
202
|
+
},
|
|
203
|
+
};
|
|
204
|
+
|
|
205
|
+
let initialHistory = [];
|
|
206
|
+
let systemInstruction = undefined;
|
|
207
|
+
|
|
208
|
+
if (
|
|
209
|
+
resolvedOptions.initialPrompts &&
|
|
210
|
+
Array.isArray(resolvedOptions.initialPrompts)
|
|
211
|
+
) {
|
|
212
|
+
const systemPrompts = resolvedOptions.initialPrompts.filter(
|
|
213
|
+
(p) => p.role === 'system'
|
|
214
|
+
);
|
|
215
|
+
const conversationPrompts = resolvedOptions.initialPrompts.filter(
|
|
216
|
+
(p) => p.role !== 'system'
|
|
217
|
+
);
|
|
218
|
+
|
|
219
|
+
if (systemPrompts.length > 0) {
|
|
220
|
+
inCloudParams.systemInstruction = systemPrompts
|
|
221
|
+
.map((p) => p.content)
|
|
222
|
+
.join('\n');
|
|
223
|
+
}
|
|
224
|
+
// Await the conversion of history items (in case of images in history)
|
|
225
|
+
initialHistory = await convertToFirebaseHistory(conversationPrompts);
|
|
226
|
+
}
|
|
227
|
+
|
|
228
|
+
const model = getGenerativeModel(ai, {
|
|
229
|
+
mode: InferenceMode.ONLY_IN_CLOUD,
|
|
230
|
+
inCloudParams,
|
|
231
|
+
});
|
|
232
|
+
|
|
233
|
+
// If a monitor callback is provided, simulate simple downloadprogress events
|
|
234
|
+
if (typeof resolvedOptions.monitor === 'function') {
|
|
235
|
+
const monitorTarget = new EventTarget();
|
|
236
|
+
|
|
237
|
+
// Let the caller attach listeners
|
|
238
|
+
try {
|
|
239
|
+
resolvedOptions.monitor(monitorTarget);
|
|
240
|
+
} catch (e) {
|
|
241
|
+
console.error('Error in monitor callback:', e);
|
|
242
|
+
}
|
|
243
|
+
|
|
244
|
+
// Fire two fake downloadprogress events: first with loaded = 0, then loaded = 1
|
|
245
|
+
try {
|
|
246
|
+
const startEvent = new ProgressEvent('downloadprogress', {
|
|
247
|
+
loaded: 0,
|
|
248
|
+
total: 1,
|
|
249
|
+
});
|
|
250
|
+
const endEvent = new ProgressEvent('downloadprogress', {
|
|
251
|
+
loaded: 1,
|
|
252
|
+
total: 1,
|
|
253
|
+
});
|
|
254
|
+
// The `ProgressEvent`'s `currentTarget`, `srcElement` and `target`
|
|
255
|
+
// properties are `EventTarget`, not `CreateMonitor`, when using the
|
|
256
|
+
// polyfill. Hopefully developers won't rely on these properties.
|
|
257
|
+
monitorTarget.dispatchEvent(startEvent);
|
|
258
|
+
monitorTarget.dispatchEvent(endEvent);
|
|
259
|
+
} catch (e) {
|
|
260
|
+
console.error('Error dispatching downloadprogress events:', e);
|
|
261
|
+
}
|
|
262
|
+
}
|
|
263
|
+
|
|
264
|
+
return new LanguageModel(
|
|
265
|
+
model,
|
|
266
|
+
initialHistory,
|
|
267
|
+
resolvedOptions,
|
|
268
|
+
inCloudParams
|
|
269
|
+
);
|
|
270
|
+
}
|
|
271
|
+
|
|
272
|
+
// Instance Methods
|
|
273
|
+
|
|
274
|
+
async clone(options = {}) {
|
|
275
|
+
if (this.#destroyed)
|
|
276
|
+
throw new DOMException('Session is destroyed', 'InvalidStateError');
|
|
277
|
+
// Clone private history
|
|
278
|
+
const historyCopy = JSON.parse(JSON.stringify(this.#history));
|
|
279
|
+
return new LanguageModel(
|
|
280
|
+
this.#model,
|
|
281
|
+
historyCopy,
|
|
282
|
+
{
|
|
283
|
+
...this.#options,
|
|
284
|
+
...options,
|
|
285
|
+
},
|
|
286
|
+
this.#inCloudParams
|
|
287
|
+
);
|
|
288
|
+
}
|
|
289
|
+
|
|
290
|
+
destroy() {
|
|
291
|
+
this.#destroyed = true;
|
|
292
|
+
this.#history = null;
|
|
293
|
+
}
|
|
294
|
+
|
|
295
|
+
async prompt(input, options = {}) {
|
|
296
|
+
if (this.#destroyed)
|
|
297
|
+
throw new DOMException('Session is destroyed', 'InvalidStateError');
|
|
298
|
+
if (options.signal?.aborted)
|
|
299
|
+
throw new DOMException('Aborted', 'AbortError');
|
|
300
|
+
|
|
301
|
+
if (options.responseConstraint) {
|
|
302
|
+
const vertexSchema = convertJsonSchemaToVertexSchema(
|
|
303
|
+
options.responseConstraint
|
|
304
|
+
);
|
|
305
|
+
this.#inCloudParams.generationConfig.responseMimeType =
|
|
306
|
+
'application/json';
|
|
307
|
+
this.#inCloudParams.generationConfig.responseSchema = vertexSchema;
|
|
308
|
+
this.#model = getGenerativeModel(ai, {
|
|
309
|
+
mode: InferenceMode.ONLY_IN_CLOUD,
|
|
310
|
+
inCloudParams: this.#inCloudParams,
|
|
311
|
+
});
|
|
312
|
+
}
|
|
313
|
+
|
|
314
|
+
// Process Input (Async conversion of Blob/Canvas/AudioBuffer)
|
|
315
|
+
const parts = await this.#processInput(input);
|
|
316
|
+
const userContent = { role: 'user', parts: parts };
|
|
317
|
+
|
|
318
|
+
try {
|
|
319
|
+
// Estimate usage before request to fire quota events if needed
|
|
320
|
+
const { totalTokens } = await this.#model.countTokens({
|
|
321
|
+
contents: [{ role: 'user', parts }],
|
|
322
|
+
});
|
|
323
|
+
if (this.#inputUsage + totalTokens > this.inputQuota)
|
|
324
|
+
this.dispatchEvent(new Event('quotaoverflow'));
|
|
325
|
+
|
|
326
|
+
const requestContents = [...this.#history, userContent];
|
|
327
|
+
|
|
328
|
+
const result = await this.#model.generateContent({
|
|
329
|
+
contents: requestContents,
|
|
330
|
+
});
|
|
331
|
+
|
|
332
|
+
// Exact usage update from Backend response
|
|
333
|
+
if (result.response.usageMetadata?.totalTokenCount) {
|
|
334
|
+
this.#inputUsage = result.response.usageMetadata.totalTokenCount;
|
|
335
|
+
}
|
|
336
|
+
|
|
337
|
+
const responseText = result.response.text();
|
|
338
|
+
|
|
339
|
+
this.#history.push(userContent);
|
|
340
|
+
this.#history.push({ role: 'model', parts: [{ text: responseText }] });
|
|
341
|
+
|
|
342
|
+
return responseText;
|
|
343
|
+
} catch (error) {
|
|
344
|
+
console.error('Firebase AI Logic Error:', error);
|
|
345
|
+
throw error;
|
|
346
|
+
}
|
|
347
|
+
}
|
|
348
|
+
|
|
349
|
+
promptStreaming(input, options = {}) {
|
|
350
|
+
if (this.#destroyed)
|
|
351
|
+
throw new DOMException('Session is destroyed', 'InvalidStateError');
|
|
352
|
+
if (options.signal?.aborted)
|
|
353
|
+
throw new DOMException('Aborted', 'AbortError');
|
|
354
|
+
|
|
355
|
+
const _this = this; // Capture 'this' to access private fields in callback
|
|
356
|
+
|
|
357
|
+
if (options.responseConstraint) {
|
|
358
|
+
const vertexSchema = convertJsonSchemaToVertexSchema(
|
|
359
|
+
options.responseConstraint
|
|
360
|
+
);
|
|
361
|
+
this.#inCloudParams.generationConfig.responseMimeType =
|
|
362
|
+
'application/json';
|
|
363
|
+
this.#inCloudParams.generationConfig.responseSchema = vertexSchema;
|
|
364
|
+
this.#model = getGenerativeModel(ai, {
|
|
365
|
+
mode: InferenceMode.ONLY_IN_CLOUD,
|
|
366
|
+
inCloudParams: this.#inCloudParams,
|
|
367
|
+
});
|
|
368
|
+
}
|
|
369
|
+
|
|
370
|
+
const signal = options.signal;
|
|
371
|
+
|
|
372
|
+
return new ReadableStream({
|
|
373
|
+
async start(controller) {
|
|
374
|
+
const abortError = new DOMException('Aborted', 'AbortError');
|
|
375
|
+
|
|
376
|
+
// If already aborted before the stream starts, error the stream.
|
|
377
|
+
if (signal?.aborted) {
|
|
378
|
+
controller.error(abortError);
|
|
379
|
+
return;
|
|
380
|
+
}
|
|
381
|
+
|
|
382
|
+
let aborted = false;
|
|
383
|
+
const onAbort = () => {
|
|
384
|
+
aborted = true;
|
|
385
|
+
try {
|
|
386
|
+
controller.error(abortError);
|
|
387
|
+
} catch {
|
|
388
|
+
// Controller might already be closed/errored; ignore.
|
|
389
|
+
}
|
|
390
|
+
};
|
|
391
|
+
|
|
392
|
+
if (signal) {
|
|
393
|
+
signal.addEventListener('abort', onAbort);
|
|
394
|
+
}
|
|
395
|
+
|
|
396
|
+
try {
|
|
397
|
+
// Access private methods/fields via captured _this
|
|
398
|
+
const parts = await _this.#processInput(input);
|
|
399
|
+
const userContent = { role: 'user', parts: parts };
|
|
400
|
+
|
|
401
|
+
// Estimate usage before request to fire quota events if needed
|
|
402
|
+
const { totalTokens } = await _this.#model.countTokens({
|
|
403
|
+
contents: [{ role: 'user', parts }],
|
|
404
|
+
});
|
|
405
|
+
if (_this.#inputUsage + totalTokens > this.inputQuota)
|
|
406
|
+
this.dispatchEvent(new Event('quotaoverflow'));
|
|
407
|
+
|
|
408
|
+
const requestContents = [..._this.#history, userContent];
|
|
409
|
+
|
|
410
|
+
const result = await _this.#model.generateContentStream({
|
|
411
|
+
contents: requestContents,
|
|
412
|
+
});
|
|
413
|
+
|
|
414
|
+
let fullResponseText = '';
|
|
415
|
+
|
|
416
|
+
for await (const chunk of result.stream) {
|
|
417
|
+
if (aborted) {
|
|
418
|
+
// Try to cancel the underlying iterator; ignore any abort-related errors.
|
|
419
|
+
if (typeof result.stream.return === 'function') {
|
|
420
|
+
try {
|
|
421
|
+
await result.stream.return();
|
|
422
|
+
} catch (e) {
|
|
423
|
+
// Ignore cancellation errors (including AbortError).
|
|
424
|
+
}
|
|
425
|
+
}
|
|
426
|
+
return;
|
|
427
|
+
}
|
|
428
|
+
if (chunk.usageMetadata?.totalTokenCount) {
|
|
429
|
+
_this.#inputUsage += chunk.usageMetadata.totalTokenCount;
|
|
430
|
+
}
|
|
431
|
+
const chunkText = chunk.text();
|
|
432
|
+
fullResponseText += chunkText;
|
|
433
|
+
controller.enqueue(chunkText);
|
|
434
|
+
}
|
|
435
|
+
|
|
436
|
+
if (!aborted) {
|
|
437
|
+
_this.#history.push(userContent);
|
|
438
|
+
_this.#history.push({
|
|
439
|
+
role: 'model',
|
|
440
|
+
parts: [{ text: fullResponseText }],
|
|
441
|
+
});
|
|
442
|
+
|
|
443
|
+
controller.close();
|
|
444
|
+
}
|
|
445
|
+
} catch (error) {
|
|
446
|
+
// If we aborted, we've already signaled an AbortError; otherwise surface the error.
|
|
447
|
+
if (!aborted) {
|
|
448
|
+
controller.error(error);
|
|
449
|
+
}
|
|
450
|
+
} finally {
|
|
451
|
+
if (signal) {
|
|
452
|
+
signal.removeEventListener('abort', onAbort);
|
|
453
|
+
}
|
|
454
|
+
}
|
|
455
|
+
},
|
|
456
|
+
});
|
|
457
|
+
}
|
|
458
|
+
|
|
459
|
+
async append(input, options = {}) {
|
|
460
|
+
if (this.#destroyed)
|
|
461
|
+
throw new DOMException('Session is destroyed', 'InvalidStateError');
|
|
462
|
+
if (options.signal?.aborted)
|
|
463
|
+
throw new DOMException('Aborted', 'AbortError');
|
|
464
|
+
|
|
465
|
+
const parts = await this.#processInput(input);
|
|
466
|
+
const content = { role: 'user', parts: parts };
|
|
467
|
+
|
|
468
|
+
try {
|
|
469
|
+
// Try to get accurate count first
|
|
470
|
+
const { totalTokens } = await this.#model.countTokens({
|
|
471
|
+
contents: [...this.#history, content],
|
|
472
|
+
});
|
|
473
|
+
this.#inputUsage = totalTokens;
|
|
474
|
+
} catch {
|
|
475
|
+
// Do nothing.
|
|
476
|
+
}
|
|
477
|
+
|
|
478
|
+
this.#history.push(content);
|
|
479
|
+
|
|
480
|
+
if (this.#inputUsage > this.inputQuota) {
|
|
481
|
+
this.dispatchEvent(new Event('quotaoverflow'));
|
|
482
|
+
}
|
|
483
|
+
}
|
|
484
|
+
|
|
485
|
+
async measureInputUsage(input) {
|
|
486
|
+
if (this.#destroyed)
|
|
487
|
+
throw new DOMException('Session is destroyed', 'InvalidStateError');
|
|
488
|
+
|
|
489
|
+
try {
|
|
490
|
+
const parts = await this.#processInput(input);
|
|
491
|
+
const { totalTokens } = await this.#model.countTokens({
|
|
492
|
+
contents: [{ role: 'user', parts }],
|
|
493
|
+
});
|
|
494
|
+
return totalTokens;
|
|
495
|
+
} catch (e) {
|
|
496
|
+
// The API can't reject, so just return 0 if we don't know.
|
|
497
|
+
console.warn(
|
|
498
|
+
'The underlying API call failed, quota usage (0) is not reported accurately.'
|
|
499
|
+
);
|
|
500
|
+
return 0;
|
|
501
|
+
}
|
|
502
|
+
}
|
|
503
|
+
|
|
504
|
+
// Private Helper to process diverse input types
|
|
505
|
+
async #processInput(input) {
|
|
506
|
+
if (typeof input === 'string') {
|
|
507
|
+
return [{ text: input }];
|
|
508
|
+
}
|
|
509
|
+
|
|
510
|
+
if (Array.isArray(input)) {
|
|
511
|
+
if (input.length > 0 && input[0].role) {
|
|
512
|
+
let combinedParts = [];
|
|
513
|
+
for (const msg of input) {
|
|
514
|
+
if (typeof msg.content === 'string') {
|
|
515
|
+
combinedParts.push({ text: msg.content });
|
|
516
|
+
if (msg.prefix) {
|
|
517
|
+
console.warn(
|
|
518
|
+
"The `prefix` flag isn't supported and was ignored."
|
|
519
|
+
);
|
|
520
|
+
}
|
|
521
|
+
} else if (Array.isArray(msg.content)) {
|
|
522
|
+
for (const c of msg.content) {
|
|
523
|
+
if (c.type === 'text') combinedParts.push({ text: c.value });
|
|
524
|
+
else {
|
|
525
|
+
const part = await MultimodalConverter.convert(
|
|
526
|
+
c.type,
|
|
527
|
+
c.value
|
|
528
|
+
);
|
|
529
|
+
combinedParts.push(part);
|
|
530
|
+
}
|
|
531
|
+
}
|
|
532
|
+
}
|
|
533
|
+
}
|
|
534
|
+
return combinedParts;
|
|
535
|
+
}
|
|
536
|
+
return input.map((s) => ({ text: String(s) }));
|
|
537
|
+
}
|
|
538
|
+
|
|
539
|
+
return [{ text: JSON.stringify(input) }];
|
|
540
|
+
}
|
|
541
|
+
}
|
|
542
|
+
|
|
543
|
+
// Attach to window
|
|
544
|
+
window.LanguageModel = LanguageModel;
|
|
545
|
+
console.log(
|
|
546
|
+
'Polyfill: window.LanguageModel is now backed by Firebase AI Logic.'
|
|
547
|
+
);
|
|
548
|
+
})();
|