@firebase/ai 2.5.0 → 2.6.0-20251113021847
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/ai-public.d.ts +102 -0
- package/dist/ai.d.ts +129 -0
- package/dist/esm/index.esm.js +324 -105
- package/dist/esm/index.esm.js.map +1 -1
- package/dist/esm/src/api.d.ts +23 -1
- package/dist/esm/src/backend.d.ts +24 -0
- package/dist/esm/src/constants.d.ts +1 -0
- package/dist/esm/src/factory-node.d.ts +19 -0
- package/dist/esm/src/methods/generate-content.d.ts +2 -0
- package/dist/esm/src/models/template-generative-model.d.ts +64 -0
- package/dist/esm/src/models/template-imagen-model.d.ts +51 -0
- package/dist/esm/src/models/utils.d.ts +26 -0
- package/dist/esm/src/requests/request.d.ts +33 -15
- package/dist/esm/src/service.d.ts +3 -4
- package/dist/esm/src/types/chrome-adapter.d.ts +5 -0
- package/dist/index.cjs.js +327 -104
- package/dist/index.cjs.js.map +1 -1
- package/dist/index.node.cjs.js +429 -188
- package/dist/index.node.cjs.js.map +1 -1
- package/dist/index.node.mjs +426 -189
- package/dist/index.node.mjs.map +1 -1
- package/dist/src/api.d.ts +23 -1
- package/dist/src/backend.d.ts +24 -0
- package/dist/src/constants.d.ts +1 -0
- package/dist/src/factory-node.d.ts +19 -0
- package/dist/src/methods/generate-content.d.ts +2 -0
- package/dist/src/models/template-generative-model.d.ts +64 -0
- package/dist/src/models/template-imagen-model.d.ts +51 -0
- package/dist/src/models/utils.d.ts +26 -0
- package/dist/src/requests/request.d.ts +33 -15
- package/dist/src/service.d.ts +3 -4
- package/dist/src/types/chrome-adapter.d.ts +5 -0
- package/package.json +1 -1
package/dist/esm/index.esm.js
CHANGED
|
@@ -4,7 +4,7 @@ import { FirebaseError, Deferred, getModularInstance } from '@firebase/util';
|
|
|
4
4
|
import { Logger } from '@firebase/logger';
|
|
5
5
|
|
|
6
6
|
var name = "@firebase/ai";
|
|
7
|
-
var version = "2.
|
|
7
|
+
var version = "2.6.0-20251113021847";
|
|
8
8
|
|
|
9
9
|
/**
|
|
10
10
|
* @license
|
|
@@ -766,6 +766,18 @@ class GoogleAIBackend extends Backend {
|
|
|
766
766
|
constructor() {
|
|
767
767
|
super(BackendType.GOOGLE_AI);
|
|
768
768
|
}
|
|
769
|
+
/**
|
|
770
|
+
* @internal
|
|
771
|
+
*/
|
|
772
|
+
_getModelPath(project, model) {
|
|
773
|
+
return `/${DEFAULT_API_VERSION}/projects/${project}/${model}`;
|
|
774
|
+
}
|
|
775
|
+
/**
|
|
776
|
+
* @internal
|
|
777
|
+
*/
|
|
778
|
+
_getTemplatePath(project, templateId) {
|
|
779
|
+
return `/${DEFAULT_API_VERSION}/projects/${project}/templates/${templateId}`;
|
|
780
|
+
}
|
|
769
781
|
}
|
|
770
782
|
/**
|
|
771
783
|
* Configuration class for the Vertex AI Gemini API.
|
|
@@ -792,6 +804,18 @@ class VertexAIBackend extends Backend {
|
|
|
792
804
|
this.location = location;
|
|
793
805
|
}
|
|
794
806
|
}
|
|
807
|
+
/**
|
|
808
|
+
* @internal
|
|
809
|
+
*/
|
|
810
|
+
_getModelPath(project, model) {
|
|
811
|
+
return `/${DEFAULT_API_VERSION}/projects/${project}/locations/${this.location}/${model}`;
|
|
812
|
+
}
|
|
813
|
+
/**
|
|
814
|
+
* @internal
|
|
815
|
+
*/
|
|
816
|
+
_getTemplatePath(project, templateId) {
|
|
817
|
+
return `/${DEFAULT_API_VERSION}/projects/${project}/locations/${this.location}/templates/${templateId}`;
|
|
818
|
+
}
|
|
795
819
|
}
|
|
796
820
|
|
|
797
821
|
/**
|
|
@@ -1249,6 +1273,67 @@ function factory(container, { instanceIdentifier }) {
|
|
|
1249
1273
|
return new AIService(app, backend, auth, appCheckProvider, chromeAdapterFactory);
|
|
1250
1274
|
}
|
|
1251
1275
|
|
|
1276
|
+
/**
|
|
1277
|
+
* @license
|
|
1278
|
+
* Copyright 2025 Google LLC
|
|
1279
|
+
*
|
|
1280
|
+
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
1281
|
+
* you may not use this file except in compliance with the License.
|
|
1282
|
+
* You may obtain a copy of the License at
|
|
1283
|
+
*
|
|
1284
|
+
* http://www.apache.org/licenses/LICENSE-2.0
|
|
1285
|
+
*
|
|
1286
|
+
* Unless required by applicable law or agreed to in writing, software
|
|
1287
|
+
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
1288
|
+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
1289
|
+
* See the License for the specific language governing permissions and
|
|
1290
|
+
* limitations under the License.
|
|
1291
|
+
*/
|
|
1292
|
+
/**
|
|
1293
|
+
* Initializes an {@link ApiSettings} object from an {@link AI} instance.
|
|
1294
|
+
*
|
|
1295
|
+
* If this is a Server App, the {@link ApiSettings} object's `getAppCheckToken()` will resolve
|
|
1296
|
+
* with the `FirebaseServerAppSettings.appCheckToken`, instead of requiring that an App Check
|
|
1297
|
+
* instance is initialized.
|
|
1298
|
+
*/
|
|
1299
|
+
function initApiSettings(ai) {
|
|
1300
|
+
if (!ai.app?.options?.apiKey) {
|
|
1301
|
+
throw new AIError(AIErrorCode.NO_API_KEY, `The "apiKey" field is empty in the local Firebase config. Firebase AI requires this field to contain a valid API key.`);
|
|
1302
|
+
}
|
|
1303
|
+
else if (!ai.app?.options?.projectId) {
|
|
1304
|
+
throw new AIError(AIErrorCode.NO_PROJECT_ID, `The "projectId" field is empty in the local Firebase config. Firebase AI requires this field to contain a valid project ID.`);
|
|
1305
|
+
}
|
|
1306
|
+
else if (!ai.app?.options?.appId) {
|
|
1307
|
+
throw new AIError(AIErrorCode.NO_APP_ID, `The "appId" field is empty in the local Firebase config. Firebase AI requires this field to contain a valid app ID.`);
|
|
1308
|
+
}
|
|
1309
|
+
const apiSettings = {
|
|
1310
|
+
apiKey: ai.app.options.apiKey,
|
|
1311
|
+
project: ai.app.options.projectId,
|
|
1312
|
+
appId: ai.app.options.appId,
|
|
1313
|
+
automaticDataCollectionEnabled: ai.app.automaticDataCollectionEnabled,
|
|
1314
|
+
location: ai.location,
|
|
1315
|
+
backend: ai.backend
|
|
1316
|
+
};
|
|
1317
|
+
if (_isFirebaseServerApp(ai.app) && ai.app.settings.appCheckToken) {
|
|
1318
|
+
const token = ai.app.settings.appCheckToken;
|
|
1319
|
+
apiSettings.getAppCheckToken = () => {
|
|
1320
|
+
return Promise.resolve({ token });
|
|
1321
|
+
};
|
|
1322
|
+
}
|
|
1323
|
+
else if (ai.appCheck) {
|
|
1324
|
+
if (ai.options?.useLimitedUseAppCheckTokens) {
|
|
1325
|
+
apiSettings.getAppCheckToken = () => ai.appCheck.getLimitedUseToken();
|
|
1326
|
+
}
|
|
1327
|
+
else {
|
|
1328
|
+
apiSettings.getAppCheckToken = () => ai.appCheck.getToken();
|
|
1329
|
+
}
|
|
1330
|
+
}
|
|
1331
|
+
if (ai.auth) {
|
|
1332
|
+
apiSettings.getAuthToken = () => ai.auth.getToken();
|
|
1333
|
+
}
|
|
1334
|
+
return apiSettings;
|
|
1335
|
+
}
|
|
1336
|
+
|
|
1252
1337
|
/**
|
|
1253
1338
|
* @license
|
|
1254
1339
|
* Copyright 2025 Google LLC
|
|
@@ -1292,43 +1377,8 @@ class AIModel {
|
|
|
1292
1377
|
* @internal
|
|
1293
1378
|
*/
|
|
1294
1379
|
constructor(ai, modelName) {
|
|
1295
|
-
|
|
1296
|
-
|
|
1297
|
-
}
|
|
1298
|
-
else if (!ai.app?.options?.projectId) {
|
|
1299
|
-
throw new AIError(AIErrorCode.NO_PROJECT_ID, `The "projectId" field is empty in the local Firebase config. Firebase AI requires this field to contain a valid project ID.`);
|
|
1300
|
-
}
|
|
1301
|
-
else if (!ai.app?.options?.appId) {
|
|
1302
|
-
throw new AIError(AIErrorCode.NO_APP_ID, `The "appId" field is empty in the local Firebase config. Firebase AI requires this field to contain a valid app ID.`);
|
|
1303
|
-
}
|
|
1304
|
-
else {
|
|
1305
|
-
this._apiSettings = {
|
|
1306
|
-
apiKey: ai.app.options.apiKey,
|
|
1307
|
-
project: ai.app.options.projectId,
|
|
1308
|
-
appId: ai.app.options.appId,
|
|
1309
|
-
automaticDataCollectionEnabled: ai.app.automaticDataCollectionEnabled,
|
|
1310
|
-
location: ai.location,
|
|
1311
|
-
backend: ai.backend
|
|
1312
|
-
};
|
|
1313
|
-
if (_isFirebaseServerApp(ai.app) && ai.app.settings.appCheckToken) {
|
|
1314
|
-
const token = ai.app.settings.appCheckToken;
|
|
1315
|
-
this._apiSettings.getAppCheckToken = () => {
|
|
1316
|
-
return Promise.resolve({ token });
|
|
1317
|
-
};
|
|
1318
|
-
}
|
|
1319
|
-
else if (ai.appCheck) {
|
|
1320
|
-
if (ai.options?.useLimitedUseAppCheckTokens) {
|
|
1321
|
-
this._apiSettings.getAppCheckToken = () => ai.appCheck.getLimitedUseToken();
|
|
1322
|
-
}
|
|
1323
|
-
else {
|
|
1324
|
-
this._apiSettings.getAppCheckToken = () => ai.appCheck.getToken();
|
|
1325
|
-
}
|
|
1326
|
-
}
|
|
1327
|
-
if (ai.auth) {
|
|
1328
|
-
this._apiSettings.getAuthToken = () => ai.auth.getToken();
|
|
1329
|
-
}
|
|
1330
|
-
this.model = AIModel.normalizeModelName(modelName, this._apiSettings.backend.backendType);
|
|
1331
|
-
}
|
|
1380
|
+
this._apiSettings = initApiSettings(ai);
|
|
1381
|
+
this.model = AIModel.normalizeModelName(modelName, this._apiSettings.backend.backendType);
|
|
1332
1382
|
}
|
|
1333
1383
|
/**
|
|
1334
1384
|
* Normalizes the given model name to a fully qualified model resource name.
|
|
@@ -1377,7 +1427,7 @@ class AIModel {
|
|
|
1377
1427
|
|
|
1378
1428
|
/**
|
|
1379
1429
|
* @license
|
|
1380
|
-
* Copyright
|
|
1430
|
+
* Copyright 2025 Google LLC
|
|
1381
1431
|
*
|
|
1382
1432
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
1383
1433
|
* you may not use this file except in compliance with the License.
|
|
@@ -1391,47 +1441,33 @@ class AIModel {
|
|
|
1391
1441
|
* See the License for the specific language governing permissions and
|
|
1392
1442
|
* limitations under the License.
|
|
1393
1443
|
*/
|
|
1394
|
-
|
|
1395
|
-
(
|
|
1396
|
-
|
|
1397
|
-
Task["STREAM_GENERATE_CONTENT"] = "streamGenerateContent";
|
|
1398
|
-
Task["COUNT_TOKENS"] = "countTokens";
|
|
1399
|
-
Task["PREDICT"] = "predict";
|
|
1400
|
-
})(Task || (Task = {}));
|
|
1401
|
-
class RequestUrl {
|
|
1402
|
-
constructor(model, task, apiSettings, stream, requestOptions) {
|
|
1403
|
-
this.model = model;
|
|
1404
|
-
this.task = task;
|
|
1405
|
-
this.apiSettings = apiSettings;
|
|
1406
|
-
this.stream = stream;
|
|
1407
|
-
this.requestOptions = requestOptions;
|
|
1444
|
+
class RequestURL {
|
|
1445
|
+
constructor(params) {
|
|
1446
|
+
this.params = params;
|
|
1408
1447
|
}
|
|
1409
1448
|
toString() {
|
|
1410
1449
|
const url = new URL(this.baseUrl); // Throws if the URL is invalid
|
|
1411
|
-
url.pathname =
|
|
1450
|
+
url.pathname = this.pathname;
|
|
1412
1451
|
url.search = this.queryParams.toString();
|
|
1413
1452
|
return url.toString();
|
|
1414
1453
|
}
|
|
1415
|
-
get
|
|
1416
|
-
|
|
1417
|
-
|
|
1418
|
-
|
|
1419
|
-
|
|
1420
|
-
|
|
1421
|
-
get modelPath() {
|
|
1422
|
-
if (this.apiSettings.backend instanceof GoogleAIBackend) {
|
|
1423
|
-
return `projects/${this.apiSettings.project}/${this.model}`;
|
|
1424
|
-
}
|
|
1425
|
-
else if (this.apiSettings.backend instanceof VertexAIBackend) {
|
|
1426
|
-
return `projects/${this.apiSettings.project}/locations/${this.apiSettings.backend.location}/${this.model}`;
|
|
1454
|
+
get pathname() {
|
|
1455
|
+
// We need to construct a different URL if the request is for server side prompt templates,
|
|
1456
|
+
// since the URL patterns are different. Server side prompt templates expect a templateId
|
|
1457
|
+
// instead of a model name.
|
|
1458
|
+
if (this.params.templateId) {
|
|
1459
|
+
return `${this.params.apiSettings.backend._getTemplatePath(this.params.apiSettings.project, this.params.templateId)}:${this.params.task}`;
|
|
1427
1460
|
}
|
|
1428
1461
|
else {
|
|
1429
|
-
|
|
1462
|
+
return `${this.params.apiSettings.backend._getModelPath(this.params.apiSettings.project, this.params.model)}:${this.params.task}`;
|
|
1430
1463
|
}
|
|
1431
1464
|
}
|
|
1465
|
+
get baseUrl() {
|
|
1466
|
+
return this.params.requestOptions?.baseUrl ?? `https://${DEFAULT_DOMAIN}`;
|
|
1467
|
+
}
|
|
1432
1468
|
get queryParams() {
|
|
1433
1469
|
const params = new URLSearchParams();
|
|
1434
|
-
if (this.stream) {
|
|
1470
|
+
if (this.params.stream) {
|
|
1435
1471
|
params.set('alt', 'sse');
|
|
1436
1472
|
}
|
|
1437
1473
|
return params;
|
|
@@ -1471,12 +1507,12 @@ async function getHeaders(url) {
|
|
|
1471
1507
|
const headers = new Headers();
|
|
1472
1508
|
headers.append('Content-Type', 'application/json');
|
|
1473
1509
|
headers.append('x-goog-api-client', getClientHeaders());
|
|
1474
|
-
headers.append('x-goog-api-key', url.apiSettings.apiKey);
|
|
1475
|
-
if (url.apiSettings.automaticDataCollectionEnabled) {
|
|
1476
|
-
headers.append('X-Firebase-Appid', url.apiSettings.appId);
|
|
1510
|
+
headers.append('x-goog-api-key', url.params.apiSettings.apiKey);
|
|
1511
|
+
if (url.params.apiSettings.automaticDataCollectionEnabled) {
|
|
1512
|
+
headers.append('X-Firebase-Appid', url.params.apiSettings.appId);
|
|
1477
1513
|
}
|
|
1478
|
-
if (url.apiSettings.getAppCheckToken) {
|
|
1479
|
-
const appCheckToken = await url.apiSettings.getAppCheckToken();
|
|
1514
|
+
if (url.params.apiSettings.getAppCheckToken) {
|
|
1515
|
+
const appCheckToken = await url.params.apiSettings.getAppCheckToken();
|
|
1480
1516
|
if (appCheckToken) {
|
|
1481
1517
|
headers.append('X-Firebase-AppCheck', appCheckToken.token);
|
|
1482
1518
|
if (appCheckToken.error) {
|
|
@@ -1484,39 +1520,33 @@ async function getHeaders(url) {
|
|
|
1484
1520
|
}
|
|
1485
1521
|
}
|
|
1486
1522
|
}
|
|
1487
|
-
if (url.apiSettings.getAuthToken) {
|
|
1488
|
-
const authToken = await url.apiSettings.getAuthToken();
|
|
1523
|
+
if (url.params.apiSettings.getAuthToken) {
|
|
1524
|
+
const authToken = await url.params.apiSettings.getAuthToken();
|
|
1489
1525
|
if (authToken) {
|
|
1490
1526
|
headers.append('Authorization', `Firebase ${authToken.accessToken}`);
|
|
1491
1527
|
}
|
|
1492
1528
|
}
|
|
1493
1529
|
return headers;
|
|
1494
1530
|
}
|
|
1495
|
-
async function
|
|
1496
|
-
const url = new
|
|
1497
|
-
return {
|
|
1498
|
-
url: url.toString(),
|
|
1499
|
-
fetchOptions: {
|
|
1500
|
-
method: 'POST',
|
|
1501
|
-
headers: await getHeaders(url),
|
|
1502
|
-
body
|
|
1503
|
-
}
|
|
1504
|
-
};
|
|
1505
|
-
}
|
|
1506
|
-
async function makeRequest(model, task, apiSettings, stream, body, requestOptions) {
|
|
1507
|
-
const url = new RequestUrl(model, task, apiSettings, stream, requestOptions);
|
|
1531
|
+
async function makeRequest(requestUrlParams, body) {
|
|
1532
|
+
const url = new RequestURL(requestUrlParams);
|
|
1508
1533
|
let response;
|
|
1509
1534
|
let fetchTimeoutId;
|
|
1510
1535
|
try {
|
|
1511
|
-
const
|
|
1512
|
-
|
|
1513
|
-
|
|
1514
|
-
|
|
1536
|
+
const fetchOptions = {
|
|
1537
|
+
method: 'POST',
|
|
1538
|
+
headers: await getHeaders(url),
|
|
1539
|
+
body
|
|
1540
|
+
};
|
|
1541
|
+
// Timeout is 180s by default.
|
|
1542
|
+
const timeoutMillis = requestUrlParams.requestOptions?.timeout != null &&
|
|
1543
|
+
requestUrlParams.requestOptions.timeout >= 0
|
|
1544
|
+
? requestUrlParams.requestOptions.timeout
|
|
1515
1545
|
: DEFAULT_FETCH_TIMEOUT_MS;
|
|
1516
1546
|
const abortController = new AbortController();
|
|
1517
1547
|
fetchTimeoutId = setTimeout(() => abortController.abort(), timeoutMillis);
|
|
1518
|
-
|
|
1519
|
-
response = await fetch(
|
|
1548
|
+
fetchOptions.signal = abortController.signal;
|
|
1549
|
+
response = await fetch(url.toString(), fetchOptions);
|
|
1520
1550
|
if (!response.ok) {
|
|
1521
1551
|
let message = '';
|
|
1522
1552
|
let errorDetails;
|
|
@@ -1538,7 +1568,7 @@ async function makeRequest(model, task, apiSettings, stream, body, requestOption
|
|
|
1538
1568
|
throw new AIError(AIErrorCode.API_NOT_ENABLED, `The Firebase AI SDK requires the Firebase AI ` +
|
|
1539
1569
|
`API ('firebasevertexai.googleapis.com') to be enabled in your ` +
|
|
1540
1570
|
`Firebase project. Enable this API by visiting the Firebase Console ` +
|
|
1541
|
-
`at https://console.firebase.google.com/project/${url.apiSettings.project}/genai/ ` +
|
|
1571
|
+
`at https://console.firebase.google.com/project/${url.params.apiSettings.project}/genai/ ` +
|
|
1542
1572
|
`and clicking "Get started". If you enabled this API recently, ` +
|
|
1543
1573
|
`wait a few minutes for the action to propagate to our systems and ` +
|
|
1544
1574
|
`then retry.`, {
|
|
@@ -2279,8 +2309,13 @@ async function generateContentStreamOnCloud(apiSettings, model, params, requestO
|
|
|
2279
2309
|
if (apiSettings.backend.backendType === BackendType.GOOGLE_AI) {
|
|
2280
2310
|
params = mapGenerateContentRequest(params);
|
|
2281
2311
|
}
|
|
2282
|
-
return makeRequest(
|
|
2283
|
-
|
|
2312
|
+
return makeRequest({
|
|
2313
|
+
task: "streamGenerateContent" /* Task.STREAM_GENERATE_CONTENT */,
|
|
2314
|
+
model,
|
|
2315
|
+
apiSettings,
|
|
2316
|
+
stream: true,
|
|
2317
|
+
requestOptions
|
|
2318
|
+
}, JSON.stringify(params));
|
|
2284
2319
|
}
|
|
2285
2320
|
async function generateContentStream(apiSettings, model, params, chromeAdapter, requestOptions) {
|
|
2286
2321
|
const callResult = await callCloudOrDevice(params, chromeAdapter, () => chromeAdapter.generateContentStream(params), () => generateContentStreamOnCloud(apiSettings, model, params, requestOptions));
|
|
@@ -2290,8 +2325,37 @@ async function generateContentOnCloud(apiSettings, model, params, requestOptions
|
|
|
2290
2325
|
if (apiSettings.backend.backendType === BackendType.GOOGLE_AI) {
|
|
2291
2326
|
params = mapGenerateContentRequest(params);
|
|
2292
2327
|
}
|
|
2293
|
-
return makeRequest(
|
|
2294
|
-
|
|
2328
|
+
return makeRequest({
|
|
2329
|
+
model,
|
|
2330
|
+
task: "generateContent" /* Task.GENERATE_CONTENT */,
|
|
2331
|
+
apiSettings,
|
|
2332
|
+
stream: false,
|
|
2333
|
+
requestOptions
|
|
2334
|
+
}, JSON.stringify(params));
|
|
2335
|
+
}
|
|
2336
|
+
async function templateGenerateContent(apiSettings, templateId, templateParams, requestOptions) {
|
|
2337
|
+
const response = await makeRequest({
|
|
2338
|
+
task: "templateGenerateContent" /* ServerPromptTemplateTask.TEMPLATE_GENERATE_CONTENT */,
|
|
2339
|
+
templateId,
|
|
2340
|
+
apiSettings,
|
|
2341
|
+
stream: false,
|
|
2342
|
+
requestOptions
|
|
2343
|
+
}, JSON.stringify(templateParams));
|
|
2344
|
+
const generateContentResponse = await processGenerateContentResponse(response, apiSettings);
|
|
2345
|
+
const enhancedResponse = createEnhancedContentResponse(generateContentResponse);
|
|
2346
|
+
return {
|
|
2347
|
+
response: enhancedResponse
|
|
2348
|
+
};
|
|
2349
|
+
}
|
|
2350
|
+
async function templateGenerateContentStream(apiSettings, templateId, templateParams, requestOptions) {
|
|
2351
|
+
const response = await makeRequest({
|
|
2352
|
+
task: "templateStreamGenerateContent" /* ServerPromptTemplateTask.TEMPLATE_STREAM_GENERATE_CONTENT */,
|
|
2353
|
+
templateId,
|
|
2354
|
+
apiSettings,
|
|
2355
|
+
stream: true,
|
|
2356
|
+
requestOptions
|
|
2357
|
+
}, JSON.stringify(templateParams));
|
|
2358
|
+
return processStream(response, apiSettings);
|
|
2295
2359
|
}
|
|
2296
2360
|
async function generateContent(apiSettings, model, params, chromeAdapter, requestOptions) {
|
|
2297
2361
|
const callResult = await callCloudOrDevice(params, chromeAdapter, () => chromeAdapter.generateContent(params), () => generateContentOnCloud(apiSettings, model, params, requestOptions));
|
|
@@ -2703,7 +2767,13 @@ async function countTokensOnCloud(apiSettings, model, params, requestOptions) {
|
|
|
2703
2767
|
else {
|
|
2704
2768
|
body = JSON.stringify(params);
|
|
2705
2769
|
}
|
|
2706
|
-
const response = await makeRequest(
|
|
2770
|
+
const response = await makeRequest({
|
|
2771
|
+
model,
|
|
2772
|
+
task: "countTokens" /* Task.COUNT_TOKENS */,
|
|
2773
|
+
apiSettings,
|
|
2774
|
+
stream: false,
|
|
2775
|
+
requestOptions
|
|
2776
|
+
}, body);
|
|
2707
2777
|
return response.json();
|
|
2708
2778
|
}
|
|
2709
2779
|
async function countTokens(apiSettings, model, params, chromeAdapter, requestOptions) {
|
|
@@ -3256,8 +3326,13 @@ class ImagenModel extends AIModel {
|
|
|
3256
3326
|
...this.generationConfig,
|
|
3257
3327
|
...this.safetySettings
|
|
3258
3328
|
});
|
|
3259
|
-
const response = await makeRequest(
|
|
3260
|
-
|
|
3329
|
+
const response = await makeRequest({
|
|
3330
|
+
task: "predict" /* Task.PREDICT */,
|
|
3331
|
+
model: this.model,
|
|
3332
|
+
apiSettings: this._apiSettings,
|
|
3333
|
+
stream: false,
|
|
3334
|
+
requestOptions: this.requestOptions
|
|
3335
|
+
}, JSON.stringify(body));
|
|
3261
3336
|
return handlePredictResponse(response);
|
|
3262
3337
|
}
|
|
3263
3338
|
/**
|
|
@@ -3285,8 +3360,13 @@ class ImagenModel extends AIModel {
|
|
|
3285
3360
|
...this.generationConfig,
|
|
3286
3361
|
...this.safetySettings
|
|
3287
3362
|
});
|
|
3288
|
-
const response = await makeRequest(
|
|
3289
|
-
|
|
3363
|
+
const response = await makeRequest({
|
|
3364
|
+
task: "predict" /* Task.PREDICT */,
|
|
3365
|
+
model: this.model,
|
|
3366
|
+
apiSettings: this._apiSettings,
|
|
3367
|
+
stream: false,
|
|
3368
|
+
requestOptions: this.requestOptions
|
|
3369
|
+
}, JSON.stringify(body));
|
|
3290
3370
|
return handlePredictResponse(response);
|
|
3291
3371
|
}
|
|
3292
3372
|
}
|
|
@@ -3438,6 +3518,121 @@ class WebSocketHandlerImpl {
|
|
|
3438
3518
|
}
|
|
3439
3519
|
}
|
|
3440
3520
|
|
|
3521
|
+
/**
|
|
3522
|
+
* @license
|
|
3523
|
+
* Copyright 2025 Google LLC
|
|
3524
|
+
*
|
|
3525
|
+
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
3526
|
+
* you may not use this file except in compliance with the License.
|
|
3527
|
+
* You may obtain a copy of the License at
|
|
3528
|
+
*
|
|
3529
|
+
* http://www.apache.org/licenses/LICENSE-2.0
|
|
3530
|
+
*
|
|
3531
|
+
* Unless required by applicable law or agreed to in writing, software
|
|
3532
|
+
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
3533
|
+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
3534
|
+
* See the License for the specific language governing permissions and
|
|
3535
|
+
* limitations under the License.
|
|
3536
|
+
*/
|
|
3537
|
+
/**
|
|
3538
|
+
* {@link GenerativeModel} APIs that execute on a server-side template.
|
|
3539
|
+
*
|
|
3540
|
+
* This class should only be instantiated with {@link getTemplateGenerativeModel}.
|
|
3541
|
+
*
|
|
3542
|
+
* @beta
|
|
3543
|
+
*/
|
|
3544
|
+
class TemplateGenerativeModel {
|
|
3545
|
+
/**
|
|
3546
|
+
* @hideconstructor
|
|
3547
|
+
*/
|
|
3548
|
+
constructor(ai, requestOptions) {
|
|
3549
|
+
this.requestOptions = requestOptions || {};
|
|
3550
|
+
this._apiSettings = initApiSettings(ai);
|
|
3551
|
+
}
|
|
3552
|
+
/**
|
|
3553
|
+
* Makes a single non-streaming call to the model and returns an object
|
|
3554
|
+
* containing a single {@link GenerateContentResponse}.
|
|
3555
|
+
*
|
|
3556
|
+
* @param templateId - The ID of the server-side template to execute.
|
|
3557
|
+
* @param templateVariables - A key-value map of variables to populate the
|
|
3558
|
+
* template with.
|
|
3559
|
+
*
|
|
3560
|
+
* @beta
|
|
3561
|
+
*/
|
|
3562
|
+
async generateContent(templateId, templateVariables // anything!
|
|
3563
|
+
) {
|
|
3564
|
+
return templateGenerateContent(this._apiSettings, templateId, { inputs: templateVariables }, this.requestOptions);
|
|
3565
|
+
}
|
|
3566
|
+
/**
|
|
3567
|
+
* Makes a single streaming call to the model and returns an object
|
|
3568
|
+
* containing an iterable stream that iterates over all chunks in the
|
|
3569
|
+
* streaming response as well as a promise that returns the final aggregated
|
|
3570
|
+
* response.
|
|
3571
|
+
*
|
|
3572
|
+
* @param templateId - The ID of the server-side template to execute.
|
|
3573
|
+
* @param templateVariables - A key-value map of variables to populate the
|
|
3574
|
+
* template with.
|
|
3575
|
+
*
|
|
3576
|
+
* @beta
|
|
3577
|
+
*/
|
|
3578
|
+
async generateContentStream(templateId, templateVariables) {
|
|
3579
|
+
return templateGenerateContentStream(this._apiSettings, templateId, { inputs: templateVariables }, this.requestOptions);
|
|
3580
|
+
}
|
|
3581
|
+
}
|
|
3582
|
+
|
|
3583
|
+
/**
|
|
3584
|
+
* @license
|
|
3585
|
+
* Copyright 2025 Google LLC
|
|
3586
|
+
*
|
|
3587
|
+
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
3588
|
+
* you may not use this file except in compliance with the License.
|
|
3589
|
+
* You may obtain a copy of the License at
|
|
3590
|
+
*
|
|
3591
|
+
* http://www.apache.org/licenses/LICENSE-2.0
|
|
3592
|
+
*
|
|
3593
|
+
* Unless required by applicable law or agreed to in writing, software
|
|
3594
|
+
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
3595
|
+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
3596
|
+
* See the License for the specific language governing permissions and
|
|
3597
|
+
* limitations under the License.
|
|
3598
|
+
*/
|
|
3599
|
+
/**
|
|
3600
|
+
* Class for Imagen model APIs that execute on a server-side template.
|
|
3601
|
+
*
|
|
3602
|
+
* This class should only be instantiated with {@link getTemplateImagenModel}.
|
|
3603
|
+
*
|
|
3604
|
+
* @beta
|
|
3605
|
+
*/
|
|
3606
|
+
class TemplateImagenModel {
|
|
3607
|
+
/**
|
|
3608
|
+
* @hideconstructor
|
|
3609
|
+
*/
|
|
3610
|
+
constructor(ai, requestOptions) {
|
|
3611
|
+
this.requestOptions = requestOptions || {};
|
|
3612
|
+
this._apiSettings = initApiSettings(ai);
|
|
3613
|
+
}
|
|
3614
|
+
/**
|
|
3615
|
+
* Makes a single call to the model and returns an object containing a single
|
|
3616
|
+
* {@link ImagenGenerationResponse}.
|
|
3617
|
+
*
|
|
3618
|
+
* @param templateId - The ID of the server-side template to execute.
|
|
3619
|
+
* @param templateVariables - A key-value map of variables to populate the
|
|
3620
|
+
* template with.
|
|
3621
|
+
*
|
|
3622
|
+
* @beta
|
|
3623
|
+
*/
|
|
3624
|
+
async generateImages(templateId, templateVariables) {
|
|
3625
|
+
const response = await makeRequest({
|
|
3626
|
+
task: "templatePredict" /* ServerPromptTemplateTask.TEMPLATE_PREDICT */,
|
|
3627
|
+
templateId,
|
|
3628
|
+
apiSettings: this._apiSettings,
|
|
3629
|
+
stream: false,
|
|
3630
|
+
requestOptions: this.requestOptions
|
|
3631
|
+
}, JSON.stringify({ inputs: templateVariables }));
|
|
3632
|
+
return handlePredictResponse(response);
|
|
3633
|
+
}
|
|
3634
|
+
}
|
|
3635
|
+
|
|
3441
3636
|
/**
|
|
3442
3637
|
* @license
|
|
3443
3638
|
* Copyright 2024 Google LLC
|
|
@@ -4229,6 +4424,30 @@ function getLiveGenerativeModel(ai, modelParams) {
|
|
|
4229
4424
|
const webSocketHandler = new WebSocketHandlerImpl();
|
|
4230
4425
|
return new LiveGenerativeModel(ai, modelParams, webSocketHandler);
|
|
4231
4426
|
}
|
|
4427
|
+
/**
|
|
4428
|
+
* Returns a {@link TemplateGenerativeModel} class for executing server-side
|
|
4429
|
+
* templates.
|
|
4430
|
+
*
|
|
4431
|
+
* @param ai - An {@link AI} instance.
|
|
4432
|
+
* @param requestOptions - Additional options to use when making requests.
|
|
4433
|
+
*
|
|
4434
|
+
* @beta
|
|
4435
|
+
*/
|
|
4436
|
+
function getTemplateGenerativeModel(ai, requestOptions) {
|
|
4437
|
+
return new TemplateGenerativeModel(ai, requestOptions);
|
|
4438
|
+
}
|
|
4439
|
+
/**
|
|
4440
|
+
* Returns a {@link TemplateImagenModel} class for executing server-side
|
|
4441
|
+
* Imagen templates.
|
|
4442
|
+
*
|
|
4443
|
+
* @param ai - An {@link AI} instance.
|
|
4444
|
+
* @param requestOptions - Additional options to use when making requests.
|
|
4445
|
+
*
|
|
4446
|
+
* @beta
|
|
4447
|
+
*/
|
|
4448
|
+
function getTemplateImagenModel(ai, requestOptions) {
|
|
4449
|
+
return new TemplateImagenModel(ai, requestOptions);
|
|
4450
|
+
}
|
|
4232
4451
|
|
|
4233
4452
|
/**
|
|
4234
4453
|
* The Firebase AI Web SDK.
|
|
@@ -4243,5 +4462,5 @@ function registerAI() {
|
|
|
4243
4462
|
}
|
|
4244
4463
|
registerAI();
|
|
4245
4464
|
|
|
4246
|
-
export { AIError, AIErrorCode, AIModel, AnyOfSchema, ArraySchema, Backend, BackendType, BlockReason, BooleanSchema, ChatSession, FinishReason, FunctionCallingMode, GenerativeModel, GoogleAIBackend, HarmBlockMethod, HarmBlockThreshold, HarmCategory, HarmProbability, HarmSeverity, ImagenAspectRatio, ImagenImageFormat, ImagenModel, ImagenPersonFilterLevel, ImagenSafetyFilterLevel, InferenceMode, InferenceSource, IntegerSchema, Language, LiveGenerativeModel, LiveResponseType, LiveSession, Modality, NumberSchema, ObjectSchema, Outcome, POSSIBLE_ROLES, ResponseModality, Schema, SchemaType, StringSchema, URLRetrievalStatus, VertexAIBackend, getAI, getGenerativeModel, getImagenModel, getLiveGenerativeModel, startAudioConversation };
|
|
4465
|
+
export { AIError, AIErrorCode, AIModel, AnyOfSchema, ArraySchema, Backend, BackendType, BlockReason, BooleanSchema, ChatSession, FinishReason, FunctionCallingMode, GenerativeModel, GoogleAIBackend, HarmBlockMethod, HarmBlockThreshold, HarmCategory, HarmProbability, HarmSeverity, ImagenAspectRatio, ImagenImageFormat, ImagenModel, ImagenPersonFilterLevel, ImagenSafetyFilterLevel, InferenceMode, InferenceSource, IntegerSchema, Language, LiveGenerativeModel, LiveResponseType, LiveSession, Modality, NumberSchema, ObjectSchema, Outcome, POSSIBLE_ROLES, ResponseModality, Schema, SchemaType, StringSchema, TemplateGenerativeModel, TemplateImagenModel, URLRetrievalStatus, VertexAIBackend, getAI, getGenerativeModel, getImagenModel, getLiveGenerativeModel, getTemplateGenerativeModel, getTemplateImagenModel, startAudioConversation };
|
|
4247
4466
|
//# sourceMappingURL=index.esm.js.map
|