@firebase/ai 2.1.0-canary.9b63cd60e → 2.1.0-canary.cbef6c6e5
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/ai-public.d.ts +115 -1
- package/dist/ai.d.ts +153 -1
- package/dist/esm/index.esm.js +374 -362
- package/dist/esm/index.esm.js.map +1 -1
- package/dist/esm/src/index.d.ts +2 -1
- package/dist/esm/src/methods/chrome-adapter.d.ts +7 -3
- package/dist/esm/src/service.d.ts +4 -2
- package/dist/esm/src/types/imagen/internal.d.ts +10 -0
- package/dist/index.cjs.js +374 -362
- package/dist/index.cjs.js.map +1 -1
- package/dist/index.node.cjs.js +12 -295
- package/dist/index.node.cjs.js.map +1 -1
- package/dist/index.node.mjs +12 -295
- package/dist/index.node.mjs.map +1 -1
- package/dist/src/index.d.ts +2 -1
- package/dist/src/methods/chrome-adapter.d.ts +7 -3
- package/dist/src/service.d.ts +4 -2
- package/dist/src/types/imagen/internal.d.ts +10 -0
- package/package.json +10 -8
package/dist/index.node.mjs
CHANGED
|
@@ -4,7 +4,7 @@ import { FirebaseError, getModularInstance } from '@firebase/util';
|
|
|
4
4
|
import { Logger } from '@firebase/logger';
|
|
5
5
|
|
|
6
6
|
var name = "@firebase/ai";
|
|
7
|
-
var version = "2.1.0-canary.
|
|
7
|
+
var version = "2.1.0-canary.cbef6c6e5";
|
|
8
8
|
|
|
9
9
|
/**
|
|
10
10
|
* @license
|
|
@@ -636,9 +636,10 @@ class VertexAIBackend extends Backend {
|
|
|
636
636
|
* limitations under the License.
|
|
637
637
|
*/
|
|
638
638
|
class AIService {
|
|
639
|
-
constructor(app, backend, authProvider, appCheckProvider) {
|
|
639
|
+
constructor(app, backend, authProvider, appCheckProvider, chromeAdapterFactory) {
|
|
640
640
|
this.app = app;
|
|
641
641
|
this.backend = backend;
|
|
642
|
+
this.chromeAdapterFactory = chromeAdapterFactory;
|
|
642
643
|
const appCheck = appCheckProvider?.getImmediate({ optional: true });
|
|
643
644
|
const auth = authProvider?.getImmediate({ optional: true });
|
|
644
645
|
this.auth = auth || null;
|
|
@@ -1324,8 +1325,9 @@ async function handlePredictResponse(response) {
|
|
|
1324
1325
|
gcsURI: prediction.gcsUri
|
|
1325
1326
|
});
|
|
1326
1327
|
}
|
|
1328
|
+
else if (prediction.safetyAttributes) ;
|
|
1327
1329
|
else {
|
|
1328
|
-
throw new AIError(AIErrorCode.RESPONSE_ERROR, `
|
|
1330
|
+
throw new AIError(AIErrorCode.RESPONSE_ERROR, `Unexpected element in 'predictions' array in response: '${JSON.stringify(prediction)}'`);
|
|
1329
1331
|
}
|
|
1330
1332
|
}
|
|
1331
1333
|
return { images, filteredReason };
|
|
@@ -1866,7 +1868,8 @@ function createPredictRequestBody(prompt, { gcsURI, imageFormat, addWatermark, n
|
|
|
1866
1868
|
addWatermark,
|
|
1867
1869
|
safetyFilterLevel,
|
|
1868
1870
|
personGeneration: personFilterLevel,
|
|
1869
|
-
includeRaiReason: true
|
|
1871
|
+
includeRaiReason: true,
|
|
1872
|
+
includeSafetyAttributes: true
|
|
1870
1873
|
}
|
|
1871
1874
|
};
|
|
1872
1875
|
return body;
|
|
@@ -2345,292 +2348,6 @@ class ImagenModel extends AIModel {
|
|
|
2345
2348
|
}
|
|
2346
2349
|
}
|
|
2347
2350
|
|
|
2348
|
-
/**
|
|
2349
|
-
* @internal
|
|
2350
|
-
*/
|
|
2351
|
-
var Availability;
|
|
2352
|
-
(function (Availability) {
|
|
2353
|
-
Availability["UNAVAILABLE"] = "unavailable";
|
|
2354
|
-
Availability["DOWNLOADABLE"] = "downloadable";
|
|
2355
|
-
Availability["DOWNLOADING"] = "downloading";
|
|
2356
|
-
Availability["AVAILABLE"] = "available";
|
|
2357
|
-
})(Availability || (Availability = {}));
|
|
2358
|
-
|
|
2359
|
-
/**
|
|
2360
|
-
* @license
|
|
2361
|
-
* Copyright 2025 Google LLC
|
|
2362
|
-
*
|
|
2363
|
-
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
2364
|
-
* you may not use this file except in compliance with the License.
|
|
2365
|
-
* You may obtain a copy of the License at
|
|
2366
|
-
*
|
|
2367
|
-
* http://www.apache.org/licenses/LICENSE-2.0
|
|
2368
|
-
*
|
|
2369
|
-
* Unless required by applicable law or agreed to in writing, software
|
|
2370
|
-
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
2371
|
-
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
2372
|
-
* See the License for the specific language governing permissions and
|
|
2373
|
-
* limitations under the License.
|
|
2374
|
-
*/
|
|
2375
|
-
/**
|
|
2376
|
-
* Defines an inference "backend" that uses Chrome's on-device model,
|
|
2377
|
-
* and encapsulates logic for detecting when on-device inference is
|
|
2378
|
-
* possible.
|
|
2379
|
-
*/
|
|
2380
|
-
class ChromeAdapterImpl {
|
|
2381
|
-
constructor(languageModelProvider, mode, onDeviceParams = {
|
|
2382
|
-
createOptions: {
|
|
2383
|
-
// Defaults to support image inputs for convenience.
|
|
2384
|
-
expectedInputs: [{ type: 'image' }]
|
|
2385
|
-
}
|
|
2386
|
-
}) {
|
|
2387
|
-
this.languageModelProvider = languageModelProvider;
|
|
2388
|
-
this.mode = mode;
|
|
2389
|
-
this.onDeviceParams = onDeviceParams;
|
|
2390
|
-
this.isDownloading = false;
|
|
2391
|
-
}
|
|
2392
|
-
/**
|
|
2393
|
-
* Checks if a given request can be made on-device.
|
|
2394
|
-
*
|
|
2395
|
-
* Encapsulates a few concerns:
|
|
2396
|
-
* the mode
|
|
2397
|
-
* API existence
|
|
2398
|
-
* prompt formatting
|
|
2399
|
-
* model availability, including triggering download if necessary
|
|
2400
|
-
*
|
|
2401
|
-
*
|
|
2402
|
-
* Pros: callers needn't be concerned with details of on-device availability.</p>
|
|
2403
|
-
* Cons: this method spans a few concerns and splits request validation from usage.
|
|
2404
|
-
* If instance variables weren't already part of the API, we could consider a better
|
|
2405
|
-
* separation of concerns.
|
|
2406
|
-
*/
|
|
2407
|
-
async isAvailable(request) {
|
|
2408
|
-
if (!this.mode) {
|
|
2409
|
-
logger.debug(`On-device inference unavailable because mode is undefined.`);
|
|
2410
|
-
return false;
|
|
2411
|
-
}
|
|
2412
|
-
if (this.mode === InferenceMode.ONLY_IN_CLOUD) {
|
|
2413
|
-
logger.debug(`On-device inference unavailable because mode is "only_in_cloud".`);
|
|
2414
|
-
return false;
|
|
2415
|
-
}
|
|
2416
|
-
// Triggers out-of-band download so model will eventually become available.
|
|
2417
|
-
const availability = await this.downloadIfAvailable();
|
|
2418
|
-
if (this.mode === InferenceMode.ONLY_ON_DEVICE) {
|
|
2419
|
-
// If it will never be available due to API inavailability, throw.
|
|
2420
|
-
if (availability === Availability.UNAVAILABLE) {
|
|
2421
|
-
throw new AIError(AIErrorCode.API_NOT_ENABLED, 'Local LanguageModel API not available in this environment.');
|
|
2422
|
-
}
|
|
2423
|
-
else if (availability === Availability.DOWNLOADABLE ||
|
|
2424
|
-
availability === Availability.DOWNLOADING) {
|
|
2425
|
-
// TODO(chholland): Better user experience during download - progress?
|
|
2426
|
-
logger.debug(`Waiting for download of LanguageModel to complete.`);
|
|
2427
|
-
await this.downloadPromise;
|
|
2428
|
-
return true;
|
|
2429
|
-
}
|
|
2430
|
-
return true;
|
|
2431
|
-
}
|
|
2432
|
-
// Applies prefer_on_device logic.
|
|
2433
|
-
if (availability !== Availability.AVAILABLE) {
|
|
2434
|
-
logger.debug(`On-device inference unavailable because availability is "${availability}".`);
|
|
2435
|
-
return false;
|
|
2436
|
-
}
|
|
2437
|
-
if (!ChromeAdapterImpl.isOnDeviceRequest(request)) {
|
|
2438
|
-
logger.debug(`On-device inference unavailable because request is incompatible.`);
|
|
2439
|
-
return false;
|
|
2440
|
-
}
|
|
2441
|
-
return true;
|
|
2442
|
-
}
|
|
2443
|
-
/**
|
|
2444
|
-
* Generates content on device.
|
|
2445
|
-
*
|
|
2446
|
-
* @remarks
|
|
2447
|
-
* This is comparable to {@link GenerativeModel.generateContent} for generating content in
|
|
2448
|
-
* Cloud.
|
|
2449
|
-
* @param request - a standard Firebase AI {@link GenerateContentRequest}
|
|
2450
|
-
* @returns {@link Response}, so we can reuse common response formatting.
|
|
2451
|
-
*/
|
|
2452
|
-
async generateContent(request) {
|
|
2453
|
-
const session = await this.createSession();
|
|
2454
|
-
const contents = await Promise.all(request.contents.map(ChromeAdapterImpl.toLanguageModelMessage));
|
|
2455
|
-
const text = await session.prompt(contents, this.onDeviceParams.promptOptions);
|
|
2456
|
-
return ChromeAdapterImpl.toResponse(text);
|
|
2457
|
-
}
|
|
2458
|
-
/**
|
|
2459
|
-
* Generates content stream on device.
|
|
2460
|
-
*
|
|
2461
|
-
* @remarks
|
|
2462
|
-
* This is comparable to {@link GenerativeModel.generateContentStream} for generating content in
|
|
2463
|
-
* Cloud.
|
|
2464
|
-
* @param request - a standard Firebase AI {@link GenerateContentRequest}
|
|
2465
|
-
* @returns {@link Response}, so we can reuse common response formatting.
|
|
2466
|
-
*/
|
|
2467
|
-
async generateContentStream(request) {
|
|
2468
|
-
const session = await this.createSession();
|
|
2469
|
-
const contents = await Promise.all(request.contents.map(ChromeAdapterImpl.toLanguageModelMessage));
|
|
2470
|
-
const stream = session.promptStreaming(contents, this.onDeviceParams.promptOptions);
|
|
2471
|
-
return ChromeAdapterImpl.toStreamResponse(stream);
|
|
2472
|
-
}
|
|
2473
|
-
async countTokens(_request) {
|
|
2474
|
-
throw new AIError(AIErrorCode.REQUEST_ERROR, 'Count Tokens is not yet available for on-device model.');
|
|
2475
|
-
}
|
|
2476
|
-
/**
|
|
2477
|
-
* Asserts inference for the given request can be performed by an on-device model.
|
|
2478
|
-
*/
|
|
2479
|
-
static isOnDeviceRequest(request) {
|
|
2480
|
-
// Returns false if the prompt is empty.
|
|
2481
|
-
if (request.contents.length === 0) {
|
|
2482
|
-
logger.debug('Empty prompt rejected for on-device inference.');
|
|
2483
|
-
return false;
|
|
2484
|
-
}
|
|
2485
|
-
for (const content of request.contents) {
|
|
2486
|
-
if (content.role === 'function') {
|
|
2487
|
-
logger.debug(`"Function" role rejected for on-device inference.`);
|
|
2488
|
-
return false;
|
|
2489
|
-
}
|
|
2490
|
-
// Returns false if request contains an image with an unsupported mime type.
|
|
2491
|
-
for (const part of content.parts) {
|
|
2492
|
-
if (part.inlineData &&
|
|
2493
|
-
ChromeAdapterImpl.SUPPORTED_MIME_TYPES.indexOf(part.inlineData.mimeType) === -1) {
|
|
2494
|
-
logger.debug(`Unsupported mime type "${part.inlineData.mimeType}" rejected for on-device inference.`);
|
|
2495
|
-
return false;
|
|
2496
|
-
}
|
|
2497
|
-
}
|
|
2498
|
-
}
|
|
2499
|
-
return true;
|
|
2500
|
-
}
|
|
2501
|
-
/**
|
|
2502
|
-
* Encapsulates logic to get availability and download a model if one is downloadable.
|
|
2503
|
-
*/
|
|
2504
|
-
async downloadIfAvailable() {
|
|
2505
|
-
const availability = await this.languageModelProvider?.availability(this.onDeviceParams.createOptions);
|
|
2506
|
-
if (availability === Availability.DOWNLOADABLE) {
|
|
2507
|
-
this.download();
|
|
2508
|
-
}
|
|
2509
|
-
return availability;
|
|
2510
|
-
}
|
|
2511
|
-
/**
|
|
2512
|
-
* Triggers out-of-band download of an on-device model.
|
|
2513
|
-
*
|
|
2514
|
-
* Chrome only downloads models as needed. Chrome knows a model is needed when code calls
|
|
2515
|
-
* LanguageModel.create.
|
|
2516
|
-
*
|
|
2517
|
-
* Since Chrome manages the download, the SDK can only avoid redundant download requests by
|
|
2518
|
-
* tracking if a download has previously been requested.
|
|
2519
|
-
*/
|
|
2520
|
-
download() {
|
|
2521
|
-
if (this.isDownloading) {
|
|
2522
|
-
return;
|
|
2523
|
-
}
|
|
2524
|
-
this.isDownloading = true;
|
|
2525
|
-
this.downloadPromise = this.languageModelProvider
|
|
2526
|
-
?.create(this.onDeviceParams.createOptions)
|
|
2527
|
-
.finally(() => {
|
|
2528
|
-
this.isDownloading = false;
|
|
2529
|
-
});
|
|
2530
|
-
}
|
|
2531
|
-
/**
|
|
2532
|
-
* Converts Firebase AI {@link Content} object to a Chrome {@link LanguageModelMessage} object.
|
|
2533
|
-
*/
|
|
2534
|
-
static async toLanguageModelMessage(content) {
|
|
2535
|
-
const languageModelMessageContents = await Promise.all(content.parts.map(ChromeAdapterImpl.toLanguageModelMessageContent));
|
|
2536
|
-
return {
|
|
2537
|
-
role: ChromeAdapterImpl.toLanguageModelMessageRole(content.role),
|
|
2538
|
-
content: languageModelMessageContents
|
|
2539
|
-
};
|
|
2540
|
-
}
|
|
2541
|
-
/**
|
|
2542
|
-
* Converts a Firebase AI Part object to a Chrome LanguageModelMessageContent object.
|
|
2543
|
-
*/
|
|
2544
|
-
static async toLanguageModelMessageContent(part) {
|
|
2545
|
-
if (part.text) {
|
|
2546
|
-
return {
|
|
2547
|
-
type: 'text',
|
|
2548
|
-
value: part.text
|
|
2549
|
-
};
|
|
2550
|
-
}
|
|
2551
|
-
else if (part.inlineData) {
|
|
2552
|
-
const formattedImageContent = await fetch(`data:${part.inlineData.mimeType};base64,${part.inlineData.data}`);
|
|
2553
|
-
const imageBlob = await formattedImageContent.blob();
|
|
2554
|
-
const imageBitmap = await createImageBitmap(imageBlob);
|
|
2555
|
-
return {
|
|
2556
|
-
type: 'image',
|
|
2557
|
-
value: imageBitmap
|
|
2558
|
-
};
|
|
2559
|
-
}
|
|
2560
|
-
throw new AIError(AIErrorCode.REQUEST_ERROR, `Processing of this Part type is not currently supported.`);
|
|
2561
|
-
}
|
|
2562
|
-
/**
|
|
2563
|
-
* Converts a Firebase AI {@link Role} string to a {@link LanguageModelMessageRole} string.
|
|
2564
|
-
*/
|
|
2565
|
-
static toLanguageModelMessageRole(role) {
|
|
2566
|
-
// Assumes 'function' rule has been filtered by isOnDeviceRequest
|
|
2567
|
-
return role === 'model' ? 'assistant' : 'user';
|
|
2568
|
-
}
|
|
2569
|
-
/**
|
|
2570
|
-
* Abstracts Chrome session creation.
|
|
2571
|
-
*
|
|
2572
|
-
* Chrome uses a multi-turn session for all inference. Firebase AI uses single-turn for all
|
|
2573
|
-
* inference. To map the Firebase AI API to Chrome's API, the SDK creates a new session for all
|
|
2574
|
-
* inference.
|
|
2575
|
-
*
|
|
2576
|
-
* Chrome will remove a model from memory if it's no longer in use, so this method ensures a
|
|
2577
|
-
* new session is created before an old session is destroyed.
|
|
2578
|
-
*/
|
|
2579
|
-
async createSession() {
|
|
2580
|
-
if (!this.languageModelProvider) {
|
|
2581
|
-
throw new AIError(AIErrorCode.UNSUPPORTED, 'Chrome AI requested for unsupported browser version.');
|
|
2582
|
-
}
|
|
2583
|
-
const newSession = await this.languageModelProvider.create(this.onDeviceParams.createOptions);
|
|
2584
|
-
if (this.oldSession) {
|
|
2585
|
-
this.oldSession.destroy();
|
|
2586
|
-
}
|
|
2587
|
-
// Holds session reference, so model isn't unloaded from memory.
|
|
2588
|
-
this.oldSession = newSession;
|
|
2589
|
-
return newSession;
|
|
2590
|
-
}
|
|
2591
|
-
/**
|
|
2592
|
-
* Formats string returned by Chrome as a {@link Response} returned by Firebase AI.
|
|
2593
|
-
*/
|
|
2594
|
-
static toResponse(text) {
|
|
2595
|
-
return {
|
|
2596
|
-
json: async () => ({
|
|
2597
|
-
candidates: [
|
|
2598
|
-
{
|
|
2599
|
-
content: {
|
|
2600
|
-
parts: [{ text }]
|
|
2601
|
-
}
|
|
2602
|
-
}
|
|
2603
|
-
]
|
|
2604
|
-
})
|
|
2605
|
-
};
|
|
2606
|
-
}
|
|
2607
|
-
/**
|
|
2608
|
-
* Formats string stream returned by Chrome as SSE returned by Firebase AI.
|
|
2609
|
-
*/
|
|
2610
|
-
static toStreamResponse(stream) {
|
|
2611
|
-
const encoder = new TextEncoder();
|
|
2612
|
-
return {
|
|
2613
|
-
body: stream.pipeThrough(new TransformStream({
|
|
2614
|
-
transform(chunk, controller) {
|
|
2615
|
-
const json = JSON.stringify({
|
|
2616
|
-
candidates: [
|
|
2617
|
-
{
|
|
2618
|
-
content: {
|
|
2619
|
-
role: 'model',
|
|
2620
|
-
parts: [{ text: chunk }]
|
|
2621
|
-
}
|
|
2622
|
-
}
|
|
2623
|
-
]
|
|
2624
|
-
});
|
|
2625
|
-
controller.enqueue(encoder.encode(`data: ${json}\n\n`));
|
|
2626
|
-
}
|
|
2627
|
-
}))
|
|
2628
|
-
};
|
|
2629
|
-
}
|
|
2630
|
-
}
|
|
2631
|
-
// Visible for testing
|
|
2632
|
-
ChromeAdapterImpl.SUPPORTED_MIME_TYPES = ['image/jpeg', 'image/png'];
|
|
2633
|
-
|
|
2634
2351
|
/**
|
|
2635
2352
|
* @license
|
|
2636
2353
|
* Copyright 2024 Google LLC
|
|
@@ -3018,11 +2735,11 @@ function getGenerativeModel(ai, modelParams, requestOptions) {
|
|
|
3018
2735
|
if (!inCloudParams.model) {
|
|
3019
2736
|
throw new AIError(AIErrorCode.NO_MODEL, `Must provide a model name. Example: getGenerativeModel({ model: 'my-model-name' })`);
|
|
3020
2737
|
}
|
|
3021
|
-
|
|
3022
|
-
|
|
3023
|
-
|
|
3024
|
-
|
|
3025
|
-
|
|
2738
|
+
/**
|
|
2739
|
+
* An AIService registered by index.node.ts will not have a
|
|
2740
|
+
* chromeAdapterFactory() method.
|
|
2741
|
+
*/
|
|
2742
|
+
const chromeAdapter = ai.chromeAdapterFactory?.(hybridParams.mode, typeof window === 'undefined' ? undefined : window, hybridParams.onDeviceParams);
|
|
3026
2743
|
return new GenerativeModel(ai, inCloudParams, requestOptions, chromeAdapter);
|
|
3027
2744
|
}
|
|
3028
2745
|
/**
|