@firebase/ai 2.2.0 → 2.2.1-canary.06ab5c4f9

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -4,7 +4,7 @@ import { FirebaseError, Deferred, getModularInstance } from '@firebase/util';
4
4
  import { Logger } from '@firebase/logger';
5
5
 
6
6
  var name = "@firebase/ai";
7
- var version = "2.2.0";
7
+ var version = "2.2.1-canary.06ab5c4f9";
8
8
 
9
9
  /**
10
10
  * @license
@@ -299,12 +299,30 @@ const ResponseModality = {
299
299
  /**
300
300
  * <b>(EXPERIMENTAL)</b>
301
301
  * Determines whether inference happens on-device or in-cloud.
302
+ *
303
+ * @remarks
304
+ * <b>PREFER_ON_DEVICE:</b> Attempt to make inference calls using an
305
+ * on-device model. If on-device inference is not available, the SDK
306
+ * will fall back to using a cloud-hosted model.
307
+ * <br/>
308
+ * <b>ONLY_ON_DEVICE:</b> Only attempt to make inference calls using an
309
+ * on-device model. The SDK will not fall back to a cloud-hosted model.
310
+ * If on-device inference is not available, inference methods will throw.
311
+ * <br/>
312
+ * <b>ONLY_IN_CLOUD:</b> Only attempt to make inference calls using a
313
+ * cloud-hosted model. The SDK will not fall back to an on-device model.
314
+ * <br/>
315
+ * <b>PREFER_IN_CLOUD:</b> Attempt to make inference calls to a
316
+ * cloud-hosted model. If not available, the SDK will fall back to an
317
+ * on-device model.
318
+ *
302
319
  * @public
303
320
  */
304
321
  const InferenceMode = {
305
322
  'PREFER_ON_DEVICE': 'prefer_on_device',
306
323
  'ONLY_ON_DEVICE': 'only_on_device',
307
- 'ONLY_IN_CLOUD': 'only_in_cloud'
324
+ 'ONLY_IN_CLOUD': 'only_in_cloud',
325
+ 'PREFER_IN_CLOUD': 'prefer_in_cloud'
308
326
  };
309
327
 
310
328
  /**
@@ -1734,6 +1752,72 @@ function aggregateResponses(responses) {
1734
1752
  return aggregatedResponse;
1735
1753
  }
1736
1754
 
1755
+ /**
1756
+ * @license
1757
+ * Copyright 2025 Google LLC
1758
+ *
1759
+ * Licensed under the Apache License, Version 2.0 (the "License");
1760
+ * you may not use this file except in compliance with the License.
1761
+ * You may obtain a copy of the License at
1762
+ *
1763
+ * http://www.apache.org/licenses/LICENSE-2.0
1764
+ *
1765
+ * Unless required by applicable law or agreed to in writing, software
1766
+ * distributed under the License is distributed on an "AS IS" BASIS,
1767
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
1768
+ * See the License for the specific language governing permissions and
1769
+ * limitations under the License.
1770
+ */
1771
+ const errorsCausingFallback = [
1772
+ // most network errors
1773
+ AIErrorCode.FETCH_ERROR,
1774
+ // fallback code for all other errors in makeRequest
1775
+ AIErrorCode.ERROR,
1776
+ // error due to API not being enabled in project
1777
+ AIErrorCode.API_NOT_ENABLED
1778
+ ];
1779
+ /**
1780
+ * Dispatches a request to the appropriate backend (on-device or in-cloud)
1781
+ * based on the inference mode.
1782
+ *
1783
+ * @param request - The request to be sent.
1784
+ * @param chromeAdapter - The on-device model adapter.
1785
+ * @param onDeviceCall - The function to call for on-device inference.
1786
+ * @param inCloudCall - The function to call for in-cloud inference.
1787
+ * @returns The response from the backend.
1788
+ */
1789
+ async function callCloudOrDevice(request, chromeAdapter, onDeviceCall, inCloudCall) {
1790
+ if (!chromeAdapter) {
1791
+ return inCloudCall();
1792
+ }
1793
+ switch (chromeAdapter.mode) {
1794
+ case InferenceMode.ONLY_ON_DEVICE:
1795
+ if (await chromeAdapter.isAvailable(request)) {
1796
+ return onDeviceCall();
1797
+ }
1798
+ throw new AIError(AIErrorCode.UNSUPPORTED, 'Inference mode is ONLY_ON_DEVICE, but an on-device model is not available.');
1799
+ case InferenceMode.ONLY_IN_CLOUD:
1800
+ return inCloudCall();
1801
+ case InferenceMode.PREFER_IN_CLOUD:
1802
+ try {
1803
+ return await inCloudCall();
1804
+ }
1805
+ catch (e) {
1806
+ if (e instanceof AIError && errorsCausingFallback.includes(e.code)) {
1807
+ return onDeviceCall();
1808
+ }
1809
+ throw e;
1810
+ }
1811
+ case InferenceMode.PREFER_ON_DEVICE:
1812
+ if (await chromeAdapter.isAvailable(request)) {
1813
+ return onDeviceCall();
1814
+ }
1815
+ return inCloudCall();
1816
+ default:
1817
+ throw new AIError(AIErrorCode.ERROR, `Unexpected infererence mode: ${chromeAdapter.mode}`);
1818
+ }
1819
+ }
1820
+
1737
1821
  /**
1738
1822
  * @license
1739
1823
  * Copyright 2024 Google LLC
@@ -1758,13 +1842,7 @@ async function generateContentStreamOnCloud(apiSettings, model, params, requestO
1758
1842
  /* stream */ true, JSON.stringify(params), requestOptions);
1759
1843
  }
1760
1844
  async function generateContentStream(apiSettings, model, params, chromeAdapter, requestOptions) {
1761
- let response;
1762
- if (chromeAdapter && (await chromeAdapter.isAvailable(params))) {
1763
- response = await chromeAdapter.generateContentStream(params);
1764
- }
1765
- else {
1766
- response = await generateContentStreamOnCloud(apiSettings, model, params, requestOptions);
1767
- }
1845
+ const response = await callCloudOrDevice(params, chromeAdapter, () => chromeAdapter.generateContentStream(params), () => generateContentStreamOnCloud(apiSettings, model, params, requestOptions));
1768
1846
  return processStream(response, apiSettings); // TODO: Map streaming responses
1769
1847
  }
1770
1848
  async function generateContentOnCloud(apiSettings, model, params, requestOptions) {
@@ -1775,13 +1853,7 @@ async function generateContentOnCloud(apiSettings, model, params, requestOptions
1775
1853
  /* stream */ false, JSON.stringify(params), requestOptions);
1776
1854
  }
1777
1855
  async function generateContent(apiSettings, model, params, chromeAdapter, requestOptions) {
1778
- let response;
1779
- if (chromeAdapter && (await chromeAdapter.isAvailable(params))) {
1780
- response = await chromeAdapter.generateContent(params);
1781
- }
1782
- else {
1783
- response = await generateContentOnCloud(apiSettings, model, params, requestOptions);
1784
- }
1856
+ const response = await callCloudOrDevice(params, chromeAdapter, () => chromeAdapter.generateContent(params), () => generateContentOnCloud(apiSettings, model, params, requestOptions));
1785
1857
  const generateContentResponse = await processGenerateContentResponse(response, apiSettings);
1786
1858
  const enhancedResponse = createEnhancedContentResponse(generateContentResponse);
1787
1859
  return {
@@ -2192,8 +2264,8 @@ async function countTokensOnCloud(apiSettings, model, params, requestOptions) {
2192
2264
  return response.json();
2193
2265
  }
2194
2266
  async function countTokens(apiSettings, model, params, chromeAdapter, requestOptions) {
2195
- if (chromeAdapter && (await chromeAdapter.isAvailable(params))) {
2196
- return (await chromeAdapter.countTokens(params)).json();
2267
+ if (chromeAdapter?.mode === InferenceMode.ONLY_ON_DEVICE) {
2268
+ throw new AIError(AIErrorCode.UNSUPPORTED, 'countTokens() is not supported for on-device models.');
2197
2269
  }
2198
2270
  return countTokensOnCloud(apiSettings, model, params, requestOptions);
2199
2271
  }