@firebase/ai 2.2.1 → 2.3.0-canary.cb3bdd812

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -8,7 +8,7 @@ var util = require('@firebase/util');
8
8
  var logger$1 = require('@firebase/logger');
9
9
 
10
10
  var name = "@firebase/ai";
11
- var version = "2.2.1";
11
+ var version = "2.3.0-canary.cb3bdd812";
12
12
 
13
13
  /**
14
14
  * @license
@@ -301,14 +301,51 @@ const ResponseModality = {
301
301
  AUDIO: 'AUDIO'
302
302
  };
303
303
  /**
304
- * <b>(EXPERIMENTAL)</b>
305
304
  * Determines whether inference happens on-device or in-cloud.
306
- * @public
305
+ *
306
+ * @remarks
307
+ * <b>PREFER_ON_DEVICE:</b> Attempt to make inference calls using an
308
+ * on-device model. If on-device inference is not available, the SDK
309
+ * will fall back to using a cloud-hosted model.
310
+ * <br/>
311
+ * <b>ONLY_ON_DEVICE:</b> Only attempt to make inference calls using an
312
+ * on-device model. The SDK will not fall back to a cloud-hosted model.
313
+ * If on-device inference is not available, inference methods will throw.
314
+ * <br/>
315
+ * <b>ONLY_IN_CLOUD:</b> Only attempt to make inference calls using a
316
+ * cloud-hosted model. The SDK will not fall back to an on-device model.
317
+ * <br/>
318
+ * <b>PREFER_IN_CLOUD:</b> Attempt to make inference calls to a
319
+ * cloud-hosted model. If not available, the SDK will fall back to an
320
+ * on-device model.
321
+ *
322
+ * @beta
307
323
  */
308
324
  const InferenceMode = {
309
325
  'PREFER_ON_DEVICE': 'prefer_on_device',
310
326
  'ONLY_ON_DEVICE': 'only_on_device',
311
- 'ONLY_IN_CLOUD': 'only_in_cloud'
327
+ 'ONLY_IN_CLOUD': 'only_in_cloud',
328
+ 'PREFER_IN_CLOUD': 'prefer_in_cloud'
329
+ };
330
+ /**
331
+ * Represents the result of the code execution.
332
+ *
333
+ * @public
334
+ */
335
+ const Outcome = {
336
+ UNSPECIFIED: 'OUTCOME_UNSPECIFIED',
337
+ OK: 'OUTCOME_OK',
338
+ FAILED: 'OUTCOME_FAILED',
339
+ DEADLINE_EXCEEDED: 'OUTCOME_DEADLINE_EXCEEDED'
340
+ };
341
+ /**
342
+ * The programming language of the code.
343
+ *
344
+ * @public
345
+ */
346
+ const Language = {
347
+ UNSPECIFIED: 'LANGUAGE_UNSPECIFIED',
348
+ PYTHON: 'PYTHON'
312
349
  };
313
350
 
314
351
  /**
@@ -1738,6 +1775,72 @@ function aggregateResponses(responses) {
1738
1775
  return aggregatedResponse;
1739
1776
  }
1740
1777
 
1778
+ /**
1779
+ * @license
1780
+ * Copyright 2025 Google LLC
1781
+ *
1782
+ * Licensed under the Apache License, Version 2.0 (the "License");
1783
+ * you may not use this file except in compliance with the License.
1784
+ * You may obtain a copy of the License at
1785
+ *
1786
+ * http://www.apache.org/licenses/LICENSE-2.0
1787
+ *
1788
+ * Unless required by applicable law or agreed to in writing, software
1789
+ * distributed under the License is distributed on an "AS IS" BASIS,
1790
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
1791
+ * See the License for the specific language governing permissions and
1792
+ * limitations under the License.
1793
+ */
1794
+ const errorsCausingFallback = [
1795
+ // most network errors
1796
+ AIErrorCode.FETCH_ERROR,
1797
+ // fallback code for all other errors in makeRequest
1798
+ AIErrorCode.ERROR,
1799
+ // error due to API not being enabled in project
1800
+ AIErrorCode.API_NOT_ENABLED
1801
+ ];
1802
+ /**
1803
+ * Dispatches a request to the appropriate backend (on-device or in-cloud)
1804
+ * based on the inference mode.
1805
+ *
1806
+ * @param request - The request to be sent.
1807
+ * @param chromeAdapter - The on-device model adapter.
1808
+ * @param onDeviceCall - The function to call for on-device inference.
1809
+ * @param inCloudCall - The function to call for in-cloud inference.
1810
+ * @returns The response from the backend.
1811
+ */
1812
+ async function callCloudOrDevice(request, chromeAdapter, onDeviceCall, inCloudCall) {
1813
+ if (!chromeAdapter) {
1814
+ return inCloudCall();
1815
+ }
1816
+ switch (chromeAdapter.mode) {
1817
+ case InferenceMode.ONLY_ON_DEVICE:
1818
+ if (await chromeAdapter.isAvailable(request)) {
1819
+ return onDeviceCall();
1820
+ }
1821
+ throw new AIError(AIErrorCode.UNSUPPORTED, 'Inference mode is ONLY_ON_DEVICE, but an on-device model is not available.');
1822
+ case InferenceMode.ONLY_IN_CLOUD:
1823
+ return inCloudCall();
1824
+ case InferenceMode.PREFER_IN_CLOUD:
1825
+ try {
1826
+ return await inCloudCall();
1827
+ }
1828
+ catch (e) {
1829
+ if (e instanceof AIError && errorsCausingFallback.includes(e.code)) {
1830
+ return onDeviceCall();
1831
+ }
1832
+ throw e;
1833
+ }
1834
+ case InferenceMode.PREFER_ON_DEVICE:
1835
+ if (await chromeAdapter.isAvailable(request)) {
1836
+ return onDeviceCall();
1837
+ }
1838
+ return inCloudCall();
1839
+ default:
1840
+ throw new AIError(AIErrorCode.ERROR, `Unexpected infererence mode: ${chromeAdapter.mode}`);
1841
+ }
1842
+ }
1843
+
1741
1844
  /**
1742
1845
  * @license
1743
1846
  * Copyright 2024 Google LLC
@@ -1762,13 +1865,7 @@ async function generateContentStreamOnCloud(apiSettings, model, params, requestO
1762
1865
  /* stream */ true, JSON.stringify(params), requestOptions);
1763
1866
  }
1764
1867
  async function generateContentStream(apiSettings, model, params, chromeAdapter, requestOptions) {
1765
- let response;
1766
- if (chromeAdapter && (await chromeAdapter.isAvailable(params))) {
1767
- response = await chromeAdapter.generateContentStream(params);
1768
- }
1769
- else {
1770
- response = await generateContentStreamOnCloud(apiSettings, model, params, requestOptions);
1771
- }
1868
+ const response = await callCloudOrDevice(params, chromeAdapter, () => chromeAdapter.generateContentStream(params), () => generateContentStreamOnCloud(apiSettings, model, params, requestOptions));
1772
1869
  return processStream(response, apiSettings); // TODO: Map streaming responses
1773
1870
  }
1774
1871
  async function generateContentOnCloud(apiSettings, model, params, requestOptions) {
@@ -1779,13 +1876,7 @@ async function generateContentOnCloud(apiSettings, model, params, requestOptions
1779
1876
  /* stream */ false, JSON.stringify(params), requestOptions);
1780
1877
  }
1781
1878
  async function generateContent(apiSettings, model, params, chromeAdapter, requestOptions) {
1782
- let response;
1783
- if (chromeAdapter && (await chromeAdapter.isAvailable(params))) {
1784
- response = await chromeAdapter.generateContent(params);
1785
- }
1786
- else {
1787
- response = await generateContentOnCloud(apiSettings, model, params, requestOptions);
1788
- }
1879
+ const response = await callCloudOrDevice(params, chromeAdapter, () => chromeAdapter.generateContent(params), () => generateContentOnCloud(apiSettings, model, params, requestOptions));
1789
1880
  const generateContentResponse = await processGenerateContentResponse(response, apiSettings);
1790
1881
  const enhancedResponse = createEnhancedContentResponse(generateContentResponse);
1791
1882
  return {
@@ -1995,7 +2086,9 @@ function validateChatHistory(history) {
1995
2086
  functionCall: 0,
1996
2087
  functionResponse: 0,
1997
2088
  thought: 0,
1998
- thoughtSignature: 0
2089
+ thoughtSignature: 0,
2090
+ executableCode: 0,
2091
+ codeExecutionResult: 0
1999
2092
  };
2000
2093
  for (const part of parts) {
2001
2094
  for (const key of VALID_PART_FIELDS) {
@@ -2196,8 +2289,8 @@ async function countTokensOnCloud(apiSettings, model, params, requestOptions) {
2196
2289
  return response.json();
2197
2290
  }
2198
2291
  async function countTokens(apiSettings, model, params, chromeAdapter, requestOptions) {
2199
- if (chromeAdapter && (await chromeAdapter.isAvailable(params))) {
2200
- return (await chromeAdapter.countTokens(params)).json();
2292
+ if (chromeAdapter?.mode === InferenceMode.ONLY_ON_DEVICE) {
2293
+ throw new AIError(AIErrorCode.UNSUPPORTED, 'countTokens() is not supported for on-device models.');
2201
2294
  }
2202
2295
  return countTokensOnCloud(apiSettings, model, params, requestOptions);
2203
2296
  }
@@ -3659,12 +3752,14 @@ exports.ImagenPersonFilterLevel = ImagenPersonFilterLevel;
3659
3752
  exports.ImagenSafetyFilterLevel = ImagenSafetyFilterLevel;
3660
3753
  exports.InferenceMode = InferenceMode;
3661
3754
  exports.IntegerSchema = IntegerSchema;
3755
+ exports.Language = Language;
3662
3756
  exports.LiveGenerativeModel = LiveGenerativeModel;
3663
3757
  exports.LiveResponseType = LiveResponseType;
3664
3758
  exports.LiveSession = LiveSession;
3665
3759
  exports.Modality = Modality;
3666
3760
  exports.NumberSchema = NumberSchema;
3667
3761
  exports.ObjectSchema = ObjectSchema;
3762
+ exports.Outcome = Outcome;
3668
3763
  exports.POSSIBLE_ROLES = POSSIBLE_ROLES;
3669
3764
  exports.ResponseModality = ResponseModality;
3670
3765
  exports.Schema = Schema;