prompt-api-polyfill 0.1.0 → 0.3.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,340 @@
1
+ import OpenAI from 'https://esm.run/openai';
2
+ import PolyfillBackend from './base.js';
3
+ import { DEFAULT_MODELS } from './defaults.js';
4
+
5
+ /**
6
+ * OpenAI API Backend
7
+ */
8
+ export default class OpenAIBackend extends PolyfillBackend {
9
+ #model;
10
+
11
+ constructor(config) {
12
+ super(config.modelName || DEFAULT_MODELS.openai);
13
+ this.config = config;
14
+ this.openai = new OpenAI({
15
+ apiKey: config.apiKey,
16
+ dangerouslyAllowBrowser: true, // Required for client-side usage
17
+ });
18
+ }
19
+
20
+ static availability(options = {}) {
21
+ if (options.expectedInputs) {
22
+ const hasAudio = options.expectedInputs.some(
23
+ (input) => input.type === 'audio'
24
+ );
25
+ const hasImage = options.expectedInputs.some(
26
+ (input) => input.type === 'image'
27
+ );
28
+ if (hasAudio && hasImage) {
29
+ return 'unavailable';
30
+ }
31
+ }
32
+ return 'available';
33
+ }
34
+
35
+ createSession(options, inCloudParams) {
36
+ // OpenAI doesn't have a "session" object like Gemini, so we return a context object
37
+ // tailored for our generate methods.
38
+ this.#model = {
39
+ model: options.modelName || this.modelName,
40
+ temperature: inCloudParams.generationConfig?.temperature,
41
+ top_p: 1.0, // Default to 1.0 as topK is not directly supported the same way
42
+ systemInstruction: inCloudParams.systemInstruction,
43
+ };
44
+
45
+ const config = inCloudParams.generationConfig || {};
46
+ if (config.responseSchema) {
47
+ const { schema, wrapped } = this.#fixSchemaForOpenAI(
48
+ config.responseSchema
49
+ );
50
+ this.#model.response_format = {
51
+ type: 'json_schema',
52
+ json_schema: {
53
+ name: 'response',
54
+ strict: true,
55
+ schema: schema,
56
+ },
57
+ };
58
+ this.#model.response_wrapped = wrapped;
59
+ } else if (config.responseMimeType === 'application/json') {
60
+ this.#model.response_format = { type: 'json_object' };
61
+ }
62
+
63
+ return this.#model;
64
+ }
65
+
66
+ /**
67
+ * OpenAI Structured Outputs require:
68
+ * 1. All fields in objects to be marked as 'required'.
69
+ * 2. Objects to have 'additionalProperties: false'.
70
+ * 3. The root must be an 'object'.
71
+ */
72
+ #fixSchemaForOpenAI(schema) {
73
+ if (typeof schema !== 'object' || schema === null) {
74
+ return { schema, wrapped: false };
75
+ }
76
+
77
+ const processNode = (node) => {
78
+ if (node.type === 'object') {
79
+ if (node.properties) {
80
+ node.additionalProperties = false;
81
+ node.required = Object.keys(node.properties);
82
+ for (const key in node.properties) {
83
+ processNode(node.properties[key]);
84
+ }
85
+ } else {
86
+ node.additionalProperties = false;
87
+ node.required = [];
88
+ }
89
+ } else if (node.type === 'array' && node.items) {
90
+ processNode(node.items);
91
+ }
92
+ return node;
93
+ };
94
+
95
+ // Deep clone to avoid side effects
96
+ const cloned = JSON.parse(JSON.stringify(schema));
97
+
98
+ if (cloned.type !== 'object') {
99
+ // Wrap in object as OpenAI requires object root
100
+ return {
101
+ wrapped: true,
102
+ schema: {
103
+ type: 'object',
104
+ properties: { value: cloned },
105
+ required: ['value'],
106
+ additionalProperties: false,
107
+ },
108
+ };
109
+ }
110
+
111
+ return {
112
+ wrapped: false,
113
+ schema: processNode(cloned),
114
+ };
115
+ }
116
+
117
+ #validateContent(messages) {
118
+ let hasImage = false;
119
+ let hasAudio = false;
120
+
121
+ for (const msg of messages) {
122
+ if (Array.isArray(msg.content)) {
123
+ for (const part of msg.content) {
124
+ if (part.type === 'image_url') {
125
+ hasImage = true;
126
+ }
127
+ if (part.type === 'input_audio') {
128
+ hasAudio = true;
129
+ }
130
+ }
131
+ }
132
+ }
133
+
134
+ if (hasImage && hasAudio) {
135
+ throw new Error(
136
+ 'OpenAI backend does not support mixing images and audio in the same session. Please start a new session.'
137
+ );
138
+ }
139
+
140
+ return { hasImage, hasAudio };
141
+ }
142
+
143
+ #routeModel(hasAudio) {
144
+ // If the user explicitly provided a model in the session options, respect it.
145
+ // Otherwise, pick based on content.
146
+ if (this.#model.model !== this.modelName) {
147
+ return this.#model.model;
148
+ }
149
+
150
+ return hasAudio ? `${this.modelName}-audio-preview` : this.modelName;
151
+ }
152
+
153
+ async generateContent(contents) {
154
+ const { messages } = this.#convertContentsToInput(
155
+ contents,
156
+ this.#model.systemInstruction
157
+ );
158
+ const { hasAudio } = this.#validateContent(messages);
159
+ const model = this.#routeModel(hasAudio);
160
+
161
+ if (
162
+ model === `${this.modelName}-audio-preview` &&
163
+ this.#model.response_format
164
+ ) {
165
+ throw new DOMException(
166
+ `OpenAI audio model ('${model}') does not support structured outputs (responseConstraint).`,
167
+ 'NotSupportedError'
168
+ );
169
+ }
170
+
171
+ const options = {
172
+ model: model,
173
+ messages: messages,
174
+ };
175
+
176
+ if (this.#model.temperature > 0) {
177
+ options.temperature = this.#model.temperature;
178
+ }
179
+
180
+ if (this.#model.response_format) {
181
+ options.response_format = this.#model.response_format;
182
+ }
183
+
184
+ try {
185
+ const response = await this.openai.chat.completions.create(options);
186
+
187
+ const choice = response.choices[0];
188
+ let text = choice.message.content;
189
+
190
+ if (this.#model.response_wrapped && text) {
191
+ try {
192
+ const parsed = JSON.parse(text);
193
+ if (parsed && typeof parsed === 'object' && 'value' in parsed) {
194
+ text = JSON.stringify(parsed.value);
195
+ }
196
+ } catch {
197
+ // Ignore parsing error, return raw text
198
+ }
199
+ }
200
+
201
+ const usage = response.usage?.prompt_tokens || 0;
202
+
203
+ return { text, usage };
204
+ } catch (error) {
205
+ console.error('OpenAI Generate Content Error:', error);
206
+ throw error;
207
+ }
208
+ }
209
+
210
+ async generateContentStream(contents) {
211
+ const { messages } = this.#convertContentsToInput(
212
+ contents,
213
+ this.#model.systemInstruction
214
+ );
215
+ const { hasAudio } = this.#validateContent(messages);
216
+ const model = this.#routeModel(hasAudio);
217
+
218
+ if (
219
+ model === `${this.modelName}-audio-preview` &&
220
+ this.#model.response_format
221
+ ) {
222
+ throw new DOMException(
223
+ `OpenAI audio model ('${model}') does not support structured outputs (responseConstraint).`,
224
+ 'NotSupportedError'
225
+ );
226
+ }
227
+
228
+ const options = {
229
+ model: model,
230
+ messages: messages,
231
+ stream: true,
232
+ };
233
+
234
+ if (this.#model.temperature > 0) {
235
+ options.temperature = this.#model.temperature;
236
+ }
237
+
238
+ if (this.#model.response_format) {
239
+ options.response_format = this.#model.response_format;
240
+ }
241
+
242
+ try {
243
+ const stream = await this.openai.chat.completions.create(options);
244
+
245
+ // Convert OpenAI stream to an AsyncIterable that yields chunks
246
+ return (async function* () {
247
+ let firstChunk = true;
248
+ for await (const chunk of stream) {
249
+ let text = chunk.choices[0]?.delta?.content;
250
+ if (text) {
251
+ // Note: Unwrapping a wrapped object in a stream is complex.
252
+ // For now, streaming wrapped results will yield the full JSON including the wrapper.
253
+ yield {
254
+ text: () => text,
255
+ usageMetadata: { totalTokenCount: 0 },
256
+ };
257
+ }
258
+ }
259
+ })();
260
+ } catch (error) {
261
+ console.error('OpenAI Generate Content Stream Error:', error);
262
+ throw error;
263
+ }
264
+ }
265
+
266
+ async countTokens(contents) {
267
+ // OpenAI does not provide a public API endpoint for counting tokens before generation.
268
+ // Implementing countTokens strictly requires a tokenizer like `tiktoken`.
269
+ // For this initial implementation, we use a character-based approximation (e.g., text.length / 4)
270
+ // to avoid adding heavy WASM dependencies (`tiktoken`) to the polyfill.
271
+ let totalText = '';
272
+ if (this.#model && this.#model.systemInstruction) {
273
+ totalText += this.#model.systemInstruction;
274
+ }
275
+
276
+ if (Array.isArray(contents)) {
277
+ for (const content of contents) {
278
+ if (!content.parts) {
279
+ continue;
280
+ }
281
+ for (const part of content.parts) {
282
+ if (part.text) {
283
+ totalText += part.text;
284
+ } else if (part.inlineData) {
285
+ // Approximate image token cost (e.g., ~1000 chars worth)
286
+ totalText += ' '.repeat(1000);
287
+ }
288
+ }
289
+ }
290
+ }
291
+
292
+ return Math.ceil(totalText.length / 4);
293
+ }
294
+
295
+ #convertContentsToInput(contents, systemInstruction) {
296
+ const messages = [];
297
+
298
+ // System instructions
299
+ if (systemInstruction) {
300
+ messages.push({
301
+ role: 'system',
302
+ content: systemInstruction,
303
+ });
304
+ }
305
+
306
+ for (const content of contents) {
307
+ const role = content.role === 'model' ? 'assistant' : 'user';
308
+ const contentParts = [];
309
+
310
+ for (const part of content.parts) {
311
+ if (part.text) {
312
+ contentParts.push({ type: 'text', text: part.text });
313
+ } else if (part.inlineData) {
314
+ const { data, mimeType } = part.inlineData;
315
+ if (mimeType.startsWith('image/')) {
316
+ contentParts.push({
317
+ type: 'image_url',
318
+ image_url: { url: `data:${mimeType};base64,${data}` },
319
+ });
320
+ } else if (mimeType.startsWith('audio/')) {
321
+ contentParts.push({
322
+ type: 'input_audio',
323
+ input_audio: {
324
+ data: data,
325
+ format: mimeType.split('/')[1] === 'mpeg' ? 'mp3' : 'wav',
326
+ },
327
+ });
328
+ }
329
+ }
330
+ }
331
+
332
+ // Simplification: if only one text part, just send string content for better compatibility
333
+ // but multimodal models usually prefer the array format.
334
+ // We'll keep the array format for consistency with multimodal inputs.
335
+ messages.push({ role, content: contentParts });
336
+ }
337
+
338
+ return { messages };
339
+ }
340
+ }
@@ -0,0 +1,106 @@
1
+ import { pipeline, TextStreamer } from 'https://esm.run/@huggingface/transformers';
2
+ import PolyfillBackend from './base.js';
3
+ import { DEFAULT_MODELS } from './defaults.js';
4
+
5
+ /**
6
+ * Transformers.js (ONNX Runtime) Backend
7
+ */
8
+ export default class TransformersBackend extends PolyfillBackend {
9
+ #generator;
10
+ #tokenizer;
11
+
12
+ constructor(config) {
13
+ super(config.modelName || DEFAULT_MODELS.transformers);
14
+ }
15
+
16
+ async #ensureGenerator() {
17
+ if (!this.#generator) {
18
+ console.log(`[Transformers.js] Loading model: ${this.modelName}`);
19
+ this.#generator = await pipeline('text-generation', this.modelName, {
20
+ device: 'webgpu',
21
+ });
22
+ this.#tokenizer = this.#generator.tokenizer;
23
+ }
24
+ return this.#generator;
25
+ }
26
+
27
+ async createSession(options, inCloudParams) {
28
+ // Initializing the generator can be slow, so we do it lazily or here.
29
+ // For now, let's trigger the loading.
30
+ await this.#ensureGenerator();
31
+
32
+ // We don't really have "sessions" in the same way Gemini does,
33
+ // but we can store the generation config.
34
+ this.generationConfig = {
35
+ max_new_tokens: 512, // Default limit
36
+ temperature: inCloudParams.generationConfig?.temperature || 1.0,
37
+ top_p: 1.0,
38
+ do_sample: inCloudParams.generationConfig?.temperature > 0,
39
+ };
40
+
41
+ return this.#generator;
42
+ }
43
+
44
+ async generateContent(contents) {
45
+ const generator = await this.#ensureGenerator();
46
+ const prompt = this.#convertContentsToPrompt(contents);
47
+
48
+ const output = await generator(prompt, this.generationConfig);
49
+ const text = output[0].generated_text.slice(prompt.length);
50
+
51
+ // Approximate usage
52
+ const usage = await this.countTokens(contents);
53
+
54
+ return { text, usage };
55
+ }
56
+
57
+ async generateContentStream(contents) {
58
+ const generator = await this.#ensureGenerator();
59
+ const prompt = this.#convertContentsToPrompt(contents);
60
+
61
+ const streamer = new TextStreamer(this.#tokenizer, {
62
+ skip_prompt: true,
63
+ skip_special_tokens: true,
64
+ });
65
+
66
+ // Run generation in the background (don't await)
67
+ generator(prompt, {
68
+ ...this.generationConfig,
69
+ streamer,
70
+ });
71
+
72
+ // streamer is an AsyncIterable in Transformers.js v3
73
+ return (async function* () {
74
+ for await (const newText of streamer) {
75
+ yield {
76
+ text: () => newText,
77
+ usageMetadata: { totalTokenCount: 0 },
78
+ };
79
+ }
80
+ })();
81
+ }
82
+
83
+ async countTokens(contents) {
84
+ await this.#ensureGenerator();
85
+ const text = this.#convertContentsToPrompt(contents);
86
+ const { input_ids } = await this.#tokenizer(text);
87
+ return input_ids.size;
88
+ }
89
+
90
+ #convertContentsToPrompt(contents) {
91
+ // Simple ChatML-like format for Qwen/Llama
92
+ let prompt = '';
93
+ for (const content of contents) {
94
+ const role = content.role === 'model' ? 'assistant' : 'user';
95
+ prompt += `<|im_start|>${role}\n`;
96
+ for (const part of content.parts) {
97
+ if (part.text) {
98
+ prompt += part.text;
99
+ }
100
+ }
101
+ prompt += '<|im_end|>\n';
102
+ }
103
+ prompt += '<|im_start|>assistant\n';
104
+ return prompt;
105
+ }
106
+ }
@@ -6,7 +6,9 @@ import { Schema } from 'https://esm.run/firebase/ai';
6
6
  * @returns {Schema} - The Firebase Vertex AI Schema instance.
7
7
  */
8
8
  export function convertJsonSchemaToVertexSchema(jsonSchema) {
9
- if (!jsonSchema) return undefined;
9
+ if (!jsonSchema) {
10
+ return undefined;
11
+ }
10
12
 
11
13
  // Extract common base parameters supported by all Schema types
12
14
  const baseParams = {
@@ -1,7 +1,11 @@
1
1
  export default class MultimodalConverter {
2
2
  static async convert(type, value) {
3
- if (type === 'image') return this.processImage(value);
4
- if (type === 'audio') return this.processAudio(value);
3
+ if (type === 'image') {
4
+ return this.processImage(value);
5
+ }
6
+ if (type === 'audio') {
7
+ return this.processAudio(value);
8
+ }
5
9
  throw new DOMException(
6
10
  `Unsupported media type: ${type}`,
7
11
  'NotSupportedError'
@@ -16,13 +20,16 @@ export default class MultimodalConverter {
16
20
 
17
21
  // BufferSource (ArrayBuffer/View) -> Sniff or Default
18
22
  if (ArrayBuffer.isView(source) || source instanceof ArrayBuffer) {
19
- const buffer = source instanceof ArrayBuffer ? source : source.buffer;
23
+ const u8 =
24
+ source instanceof ArrayBuffer
25
+ ? new Uint8Array(source)
26
+ : new Uint8Array(source.buffer, source.byteOffset, source.byteLength);
27
+ const buffer = u8.buffer.slice(
28
+ u8.byteOffset,
29
+ u8.byteOffset + u8.byteLength
30
+ );
20
31
  const base64 = this.arrayBufferToBase64(buffer);
21
- // Basic sniffing for PNG/JPEG magic bytes
22
- const u8 = new Uint8Array(buffer);
23
- let mimeType = 'image/png'; // Default
24
- if (u8[0] === 0xff && u8[1] === 0xd8) mimeType = 'image/jpeg';
25
- else if (u8[0] === 0x89 && u8[1] === 0x50) mimeType = 'image/png';
32
+ const mimeType = this.#sniffImageMimeType(u8) || 'image/png';
26
33
 
27
34
  return { inlineData: { data: base64, mimeType } };
28
35
  }
@@ -32,6 +39,111 @@ export default class MultimodalConverter {
32
39
  return this.canvasSourceToInlineData(source);
33
40
  }
34
41
 
42
+ static #sniffImageMimeType(u8) {
43
+ const len = u8.length;
44
+ if (len < 4) {
45
+ return null;
46
+ }
47
+
48
+ // JPEG: FF D8 FF
49
+ if (u8[0] === 0xff && u8[1] === 0xd8 && u8[2] === 0xff) {
50
+ return 'image/jpeg';
51
+ }
52
+
53
+ // PNG: 89 50 4E 47 0D 0A 1A 0A
54
+ if (
55
+ u8[0] === 0x89 &&
56
+ u8[1] === 0x50 &&
57
+ u8[2] === 0x4e &&
58
+ u8[3] === 0x47 &&
59
+ u8[4] === 0x0d &&
60
+ u8[5] === 0x0a &&
61
+ u8[6] === 0x1a &&
62
+ u8[7] === 0x0a
63
+ ) {
64
+ return 'image/png';
65
+ }
66
+
67
+ // GIF: GIF87a / GIF89a
68
+ if (u8[0] === 0x47 && u8[1] === 0x49 && u8[2] === 0x46 && u8[3] === 0x38) {
69
+ return 'image/gif';
70
+ }
71
+
72
+ // WebP: RIFF (offset 0) + WEBP (offset 8)
73
+ if (
74
+ u8[0] === 0x52 &&
75
+ u8[1] === 0x49 &&
76
+ u8[2] === 0x46 &&
77
+ u8[3] === 0x46 &&
78
+ u8[8] === 0x57 &&
79
+ u8[9] === 0x45 &&
80
+ u8[10] === 0x42 &&
81
+ u8[11] === 0x50
82
+ ) {
83
+ return 'image/webp';
84
+ }
85
+
86
+ // BMP: BM
87
+ if (u8[0] === 0x42 && u8[1] === 0x4d) {
88
+ return 'image/bmp';
89
+ }
90
+
91
+ // ICO: 00 00 01 00
92
+ if (u8[0] === 0x00 && u8[1] === 0x00 && u8[2] === 0x01 && u8[3] === 0x00) {
93
+ return 'image/x-icon';
94
+ }
95
+
96
+ // TIFF: II* (LE) / MM* (BE)
97
+ if (
98
+ (u8[0] === 0x49 && u8[1] === 0x49 && u8[2] === 0x2a) ||
99
+ (u8[0] === 0x4d && u8[1] === 0x4d && u8[2] === 0x2a)
100
+ ) {
101
+ return 'image/tiff';
102
+ }
103
+
104
+ // ISOBMFF (AVIF / HEIC / HEIF)
105
+ // "ftyp" at offset 4
106
+ if (u8[4] === 0x66 && u8[5] === 0x74 && u8[6] === 0x79 && u8[7] === 0x70) {
107
+ const type = String.fromCharCode(u8[8], u8[9], u8[10], u8[11]);
108
+ if (type === 'avif' || type === 'avis') {
109
+ return 'image/avif';
110
+ }
111
+ if (
112
+ type === 'heic' ||
113
+ type === 'heix' ||
114
+ type === 'hevc' ||
115
+ type === 'hevx'
116
+ ) {
117
+ return 'image/heic';
118
+ }
119
+ if (type === 'mif1' || type === 'msf1') {
120
+ return 'image/heif';
121
+ }
122
+ }
123
+
124
+ // JPEG XL: FF 0A or container bits
125
+ if (u8[0] === 0xff && u8[1] === 0x0a) {
126
+ return 'image/jxl';
127
+ }
128
+ // Container: 00 00 00 0c 4a 58 4c 20 0d 0a 87 0a (JXL )
129
+ if (u8[0] === 0x00 && u8[4] === 0x4a && u8[5] === 0x58 && u8[6] === 0x4c) {
130
+ return 'image/jxl';
131
+ }
132
+
133
+ // JPEG 2000
134
+ if (u8[0] === 0x00 && u8[4] === 0x6a && u8[5] === 0x50 && u8[6] === 0x20) {
135
+ return 'image/jp2';
136
+ }
137
+
138
+ // SVG: Check for <svg or <?xml (heuristics)
139
+ const preview = String.fromCharCode(...u8.slice(0, 100)).toLowerCase();
140
+ if (preview.includes('<svg') || preview.includes('<?xml')) {
141
+ return 'image/svg+xml';
142
+ }
143
+
144
+ return null;
145
+ }
146
+
35
147
  static async processAudio(source) {
36
148
  // Blob
37
149
  if (source instanceof Blob) {
@@ -46,8 +158,20 @@ export default class MultimodalConverter {
46
158
  }
47
159
 
48
160
  // BufferSource -> Assume it's already an audio file (mp3/wav)
49
- if (ArrayBuffer.isView(source) || source instanceof ArrayBuffer) {
50
- const buffer = source instanceof ArrayBuffer ? source : source.buffer;
161
+ const isArrayBuffer =
162
+ source instanceof ArrayBuffer ||
163
+ (source &&
164
+ source.constructor &&
165
+ source.constructor.name === 'ArrayBuffer');
166
+ const isView =
167
+ ArrayBuffer.isView(source) ||
168
+ (source &&
169
+ source.buffer &&
170
+ (source.buffer instanceof ArrayBuffer ||
171
+ source.buffer.constructor.name === 'ArrayBuffer'));
172
+
173
+ if (isArrayBuffer || isView) {
174
+ const buffer = isArrayBuffer ? source : source.buffer;
51
175
  return {
52
176
  inlineData: {
53
177
  data: this.arrayBufferToBase64(buffer),
@@ -65,14 +189,16 @@ export default class MultimodalConverter {
65
189
  return new Promise((resolve, reject) => {
66
190
  const reader = new FileReader();
67
191
  reader.onloadend = () => {
68
- if (reader.error) reject(reader.error);
69
- else
192
+ if (reader.error) {
193
+ reject(reader.error);
194
+ } else {
70
195
  resolve({
71
196
  inlineData: {
72
197
  data: reader.result.split(',')[1],
73
198
  mimeType: blob.type,
74
199
  },
75
200
  });
201
+ }
76
202
  };
77
203
  reader.readAsDataURL(blob);
78
204
  });
package/package.json CHANGED
@@ -1,7 +1,7 @@
1
1
  {
2
2
  "name": "prompt-api-polyfill",
3
- "version": "0.1.0",
4
- "description": "Polyfill for the Prompt API (`LanguageModel`) backed by Firebase AI Logic.",
3
+ "version": "0.3.0",
4
+ "description": "Polyfill for the Prompt API (`LanguageModel`) backed by Firebase AI Logic, Gemini API, or OpenAI API.",
5
5
  "type": "module",
6
6
  "main": "./prompt-api-polyfill.js",
7
7
  "module": "./prompt-api-polyfill.js",
@@ -14,7 +14,8 @@
14
14
  "json-schema-converter.js",
15
15
  "multimodal-converter.js",
16
16
  "prompt-api-polyfill.js",
17
- "dot_env.json"
17
+ "dot_env.json",
18
+ "backends/"
18
19
  ],
19
20
  "sideEffects": true,
20
21
  "keywords": [
@@ -22,6 +23,8 @@
22
23
  "language-model",
23
24
  "polyfill",
24
25
  "firebase",
26
+ "gemini",
27
+ "openai",
25
28
  "web-ai"
26
29
  ],
27
30
  "repository": {
@@ -35,9 +38,22 @@
35
38
  "homepage": "https://github.com/GoogleChromeLabs/web-ai-demos/tree/main/prompt-api-polyfill/README.md",
36
39
  "license": "Apache-2.0",
37
40
  "scripts": {
38
- "start": "npx http-server"
41
+ "start": "npx http-server",
42
+ "test:browser": "node scripts/list-backends.js && vitest run -c vitest.browser.config.js .browser.test.js",
43
+ "fix": "npx prettier --write ."
39
44
  },
40
45
  "devDependencies": {
41
- "http-server": "^14.1.1"
46
+ "@firebase/ai": "^2.6.1",
47
+ "@google/generative-ai": "^0.24.1",
48
+ "@vitest/browser": "^4.0.17",
49
+ "@vitest/browser-playwright": "^4.0.17",
50
+ "ajv": "^8.17.1",
51
+ "firebase": "^12.7.0",
52
+ "http-server": "^14.1.1",
53
+ "jsdom": "^27.4.0",
54
+ "openai": "^6.16.0",
55
+ "playwright": "^1.57.0",
56
+ "prettier-plugin-curly": "^0.4.1",
57
+ "vitest": "^4.0.17"
42
58
  }
43
59
  }