@firebase/ai 2.2.1-canary.55f3f83a7 → 2.2.1-canary.9b8ab02c5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -8,7 +8,7 @@ var util = require('@firebase/util');
8
8
  var logger$1 = require('@firebase/logger');
9
9
 
10
10
  var name = "@firebase/ai";
11
- var version = "2.2.1-canary.55f3f83a7";
11
+ var version = "2.2.1-canary.9b8ab02c5";
12
12
 
13
13
  /**
14
14
  * @license
@@ -303,12 +303,50 @@ const ResponseModality = {
303
303
  /**
304
304
  * <b>(EXPERIMENTAL)</b>
305
305
  * Determines whether inference happens on-device or in-cloud.
306
+ *
307
+ * @remarks
308
+ * <b>PREFER_ON_DEVICE:</b> Attempt to make inference calls using an
309
+ * on-device model. If on-device inference is not available, the SDK
310
+ * will fall back to using a cloud-hosted model.
311
+ * <br/>
312
+ * <b>ONLY_ON_DEVICE:</b> Only attempt to make inference calls using an
313
+ * on-device model. The SDK will not fall back to a cloud-hosted model.
314
+ * If on-device inference is not available, inference methods will throw.
315
+ * <br/>
316
+ * <b>ONLY_IN_CLOUD:</b> Only attempt to make inference calls using a
317
+ * cloud-hosted model. The SDK will not fall back to an on-device model.
318
+ * <br/>
319
+ * <b>PREFER_IN_CLOUD:</b> Attempt to make inference calls to a
320
+ * cloud-hosted model. If not available, the SDK will fall back to an
321
+ * on-device model.
322
+ *
306
323
  * @public
307
324
  */
308
325
  const InferenceMode = {
309
326
  'PREFER_ON_DEVICE': 'prefer_on_device',
310
327
  'ONLY_ON_DEVICE': 'only_on_device',
311
- 'ONLY_IN_CLOUD': 'only_in_cloud'
328
+ 'ONLY_IN_CLOUD': 'only_in_cloud',
329
+ 'PREFER_IN_CLOUD': 'prefer_in_cloud'
330
+ };
331
+ /**
332
+ * Represents the result of the code execution.
333
+ *
334
+ * @public
335
+ */
336
+ const Outcome = {
337
+ UNSPECIFIED: 'OUTCOME_UNSPECIFIED',
338
+ OK: 'OUTCOME_OK',
339
+ FAILED: 'OUTCOME_FAILED',
340
+ DEADLINE_EXCEEDED: 'OUTCOME_DEADLINE_EXCEEDED'
341
+ };
342
+ /**
343
+ * The programming language of the code.
344
+ *
345
+ * @public
346
+ */
347
+ const Language = {
348
+ UNSPECIFIED: 'LANGUAGE_UNSPECIFIED',
349
+ PYTHON: 'PYTHON'
312
350
  };
313
351
 
314
352
  /**
@@ -1738,6 +1776,72 @@ function aggregateResponses(responses) {
1738
1776
  return aggregatedResponse;
1739
1777
  }
1740
1778
 
1779
+ /**
1780
+ * @license
1781
+ * Copyright 2025 Google LLC
1782
+ *
1783
+ * Licensed under the Apache License, Version 2.0 (the "License");
1784
+ * you may not use this file except in compliance with the License.
1785
+ * You may obtain a copy of the License at
1786
+ *
1787
+ * http://www.apache.org/licenses/LICENSE-2.0
1788
+ *
1789
+ * Unless required by applicable law or agreed to in writing, software
1790
+ * distributed under the License is distributed on an "AS IS" BASIS,
1791
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
1792
+ * See the License for the specific language governing permissions and
1793
+ * limitations under the License.
1794
+ */
1795
+ const errorsCausingFallback = [
1796
+ // most network errors
1797
+ AIErrorCode.FETCH_ERROR,
1798
+ // fallback code for all other errors in makeRequest
1799
+ AIErrorCode.ERROR,
1800
+ // error due to API not being enabled in project
1801
+ AIErrorCode.API_NOT_ENABLED
1802
+ ];
1803
+ /**
1804
+ * Dispatches a request to the appropriate backend (on-device or in-cloud)
1805
+ * based on the inference mode.
1806
+ *
1807
+ * @param request - The request to be sent.
1808
+ * @param chromeAdapter - The on-device model adapter.
1809
+ * @param onDeviceCall - The function to call for on-device inference.
1810
+ * @param inCloudCall - The function to call for in-cloud inference.
1811
+ * @returns The response from the backend.
1812
+ */
1813
+ async function callCloudOrDevice(request, chromeAdapter, onDeviceCall, inCloudCall) {
1814
+ if (!chromeAdapter) {
1815
+ return inCloudCall();
1816
+ }
1817
+ switch (chromeAdapter.mode) {
1818
+ case InferenceMode.ONLY_ON_DEVICE:
1819
+ if (await chromeAdapter.isAvailable(request)) {
1820
+ return onDeviceCall();
1821
+ }
1822
+ throw new AIError(AIErrorCode.UNSUPPORTED, 'Inference mode is ONLY_ON_DEVICE, but an on-device model is not available.');
1823
+ case InferenceMode.ONLY_IN_CLOUD:
1824
+ return inCloudCall();
1825
+ case InferenceMode.PREFER_IN_CLOUD:
1826
+ try {
1827
+ return await inCloudCall();
1828
+ }
1829
+ catch (e) {
1830
+ if (e instanceof AIError && errorsCausingFallback.includes(e.code)) {
1831
+ return onDeviceCall();
1832
+ }
1833
+ throw e;
1834
+ }
1835
+ case InferenceMode.PREFER_ON_DEVICE:
1836
+ if (await chromeAdapter.isAvailable(request)) {
1837
+ return onDeviceCall();
1838
+ }
1839
+ return inCloudCall();
1840
+ default:
1841
+ throw new AIError(AIErrorCode.ERROR, `Unexpected infererence mode: ${chromeAdapter.mode}`);
1842
+ }
1843
+ }
1844
+
1741
1845
  /**
1742
1846
  * @license
1743
1847
  * Copyright 2024 Google LLC
@@ -1762,13 +1866,7 @@ async function generateContentStreamOnCloud(apiSettings, model, params, requestO
1762
1866
  /* stream */ true, JSON.stringify(params), requestOptions);
1763
1867
  }
1764
1868
  async function generateContentStream(apiSettings, model, params, chromeAdapter, requestOptions) {
1765
- let response;
1766
- if (chromeAdapter && (await chromeAdapter.isAvailable(params))) {
1767
- response = await chromeAdapter.generateContentStream(params);
1768
- }
1769
- else {
1770
- response = await generateContentStreamOnCloud(apiSettings, model, params, requestOptions);
1771
- }
1869
+ const response = await callCloudOrDevice(params, chromeAdapter, () => chromeAdapter.generateContentStream(params), () => generateContentStreamOnCloud(apiSettings, model, params, requestOptions));
1772
1870
  return processStream(response, apiSettings); // TODO: Map streaming responses
1773
1871
  }
1774
1872
  async function generateContentOnCloud(apiSettings, model, params, requestOptions) {
@@ -1779,13 +1877,7 @@ async function generateContentOnCloud(apiSettings, model, params, requestOptions
1779
1877
  /* stream */ false, JSON.stringify(params), requestOptions);
1780
1878
  }
1781
1879
  async function generateContent(apiSettings, model, params, chromeAdapter, requestOptions) {
1782
- let response;
1783
- if (chromeAdapter && (await chromeAdapter.isAvailable(params))) {
1784
- response = await chromeAdapter.generateContent(params);
1785
- }
1786
- else {
1787
- response = await generateContentOnCloud(apiSettings, model, params, requestOptions);
1788
- }
1880
+ const response = await callCloudOrDevice(params, chromeAdapter, () => chromeAdapter.generateContent(params), () => generateContentOnCloud(apiSettings, model, params, requestOptions));
1789
1881
  const generateContentResponse = await processGenerateContentResponse(response, apiSettings);
1790
1882
  const enhancedResponse = createEnhancedContentResponse(generateContentResponse);
1791
1883
  return {
@@ -1995,7 +2087,9 @@ function validateChatHistory(history) {
1995
2087
  functionCall: 0,
1996
2088
  functionResponse: 0,
1997
2089
  thought: 0,
1998
- thoughtSignature: 0
2090
+ thoughtSignature: 0,
2091
+ executableCode: 0,
2092
+ codeExecutionResult: 0
1999
2093
  };
2000
2094
  for (const part of parts) {
2001
2095
  for (const key of VALID_PART_FIELDS) {
@@ -2196,8 +2290,8 @@ async function countTokensOnCloud(apiSettings, model, params, requestOptions) {
2196
2290
  return response.json();
2197
2291
  }
2198
2292
  async function countTokens(apiSettings, model, params, chromeAdapter, requestOptions) {
2199
- if (chromeAdapter && (await chromeAdapter.isAvailable(params))) {
2200
- return (await chromeAdapter.countTokens(params)).json();
2293
+ if (chromeAdapter?.mode === InferenceMode.ONLY_ON_DEVICE) {
2294
+ throw new AIError(AIErrorCode.UNSUPPORTED, 'countTokens() is not supported for on-device models.');
2201
2295
  }
2202
2296
  return countTokensOnCloud(apiSettings, model, params, requestOptions);
2203
2297
  }
@@ -3659,12 +3753,14 @@ exports.ImagenPersonFilterLevel = ImagenPersonFilterLevel;
3659
3753
  exports.ImagenSafetyFilterLevel = ImagenSafetyFilterLevel;
3660
3754
  exports.InferenceMode = InferenceMode;
3661
3755
  exports.IntegerSchema = IntegerSchema;
3756
+ exports.Language = Language;
3662
3757
  exports.LiveGenerativeModel = LiveGenerativeModel;
3663
3758
  exports.LiveResponseType = LiveResponseType;
3664
3759
  exports.LiveSession = LiveSession;
3665
3760
  exports.Modality = Modality;
3666
3761
  exports.NumberSchema = NumberSchema;
3667
3762
  exports.ObjectSchema = ObjectSchema;
3763
+ exports.Outcome = Outcome;
3668
3764
  exports.POSSIBLE_ROLES = POSSIBLE_ROLES;
3669
3765
  exports.ResponseModality = ResponseModality;
3670
3766
  exports.Schema = Schema;