@firebase/ai 2.2.1-canary.55f3f83a7 → 2.2.1-canary.9b8ab02c5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -4,7 +4,7 @@ import { FirebaseError, Deferred, getModularInstance } from '@firebase/util';
4
4
  import { Logger } from '@firebase/logger';
5
5
 
6
6
  var name = "@firebase/ai";
7
- var version = "2.2.1-canary.55f3f83a7";
7
+ var version = "2.2.1-canary.9b8ab02c5";
8
8
 
9
9
  /**
10
10
  * @license
@@ -299,12 +299,50 @@ const ResponseModality = {
299
299
  /**
300
300
  * <b>(EXPERIMENTAL)</b>
301
301
  * Determines whether inference happens on-device or in-cloud.
302
+ *
303
+ * @remarks
304
+ * <b>PREFER_ON_DEVICE:</b> Attempt to make inference calls using an
305
+ * on-device model. If on-device inference is not available, the SDK
306
+ * will fall back to using a cloud-hosted model.
307
+ * <br/>
308
+ * <b>ONLY_ON_DEVICE:</b> Only attempt to make inference calls using an
309
+ * on-device model. The SDK will not fall back to a cloud-hosted model.
310
+ * If on-device inference is not available, inference methods will throw.
311
+ * <br/>
312
+ * <b>ONLY_IN_CLOUD:</b> Only attempt to make inference calls using a
313
+ * cloud-hosted model. The SDK will not fall back to an on-device model.
314
+ * <br/>
315
+ * <b>PREFER_IN_CLOUD:</b> Attempt to make inference calls to a
316
+ * cloud-hosted model. If not available, the SDK will fall back to an
317
+ * on-device model.
318
+ *
302
319
  * @public
303
320
  */
304
321
  const InferenceMode = {
305
322
  'PREFER_ON_DEVICE': 'prefer_on_device',
306
323
  'ONLY_ON_DEVICE': 'only_on_device',
307
- 'ONLY_IN_CLOUD': 'only_in_cloud'
324
+ 'ONLY_IN_CLOUD': 'only_in_cloud',
325
+ 'PREFER_IN_CLOUD': 'prefer_in_cloud'
326
+ };
327
+ /**
328
+ * Represents the result of the code execution.
329
+ *
330
+ * @public
331
+ */
332
+ const Outcome = {
333
+ UNSPECIFIED: 'OUTCOME_UNSPECIFIED',
334
+ OK: 'OUTCOME_OK',
335
+ FAILED: 'OUTCOME_FAILED',
336
+ DEADLINE_EXCEEDED: 'OUTCOME_DEADLINE_EXCEEDED'
337
+ };
338
+ /**
339
+ * The programming language of the code.
340
+ *
341
+ * @public
342
+ */
343
+ const Language = {
344
+ UNSPECIFIED: 'LANGUAGE_UNSPECIFIED',
345
+ PYTHON: 'PYTHON'
308
346
  };
309
347
 
310
348
  /**
@@ -1734,6 +1772,72 @@ function aggregateResponses(responses) {
1734
1772
  return aggregatedResponse;
1735
1773
  }
1736
1774
 
1775
+ /**
1776
+ * @license
1777
+ * Copyright 2025 Google LLC
1778
+ *
1779
+ * Licensed under the Apache License, Version 2.0 (the "License");
1780
+ * you may not use this file except in compliance with the License.
1781
+ * You may obtain a copy of the License at
1782
+ *
1783
+ * http://www.apache.org/licenses/LICENSE-2.0
1784
+ *
1785
+ * Unless required by applicable law or agreed to in writing, software
1786
+ * distributed under the License is distributed on an "AS IS" BASIS,
1787
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
1788
+ * See the License for the specific language governing permissions and
1789
+ * limitations under the License.
1790
+ */
1791
+ const errorsCausingFallback = [
1792
+ // most network errors
1793
+ AIErrorCode.FETCH_ERROR,
1794
+ // fallback code for all other errors in makeRequest
1795
+ AIErrorCode.ERROR,
1796
+ // error due to API not being enabled in project
1797
+ AIErrorCode.API_NOT_ENABLED
1798
+ ];
1799
+ /**
1800
+ * Dispatches a request to the appropriate backend (on-device or in-cloud)
1801
+ * based on the inference mode.
1802
+ *
1803
+ * @param request - The request to be sent.
1804
+ * @param chromeAdapter - The on-device model adapter.
1805
+ * @param onDeviceCall - The function to call for on-device inference.
1806
+ * @param inCloudCall - The function to call for in-cloud inference.
1807
+ * @returns The response from the backend.
1808
+ */
1809
+ async function callCloudOrDevice(request, chromeAdapter, onDeviceCall, inCloudCall) {
1810
+ if (!chromeAdapter) {
1811
+ return inCloudCall();
1812
+ }
1813
+ switch (chromeAdapter.mode) {
1814
+ case InferenceMode.ONLY_ON_DEVICE:
1815
+ if (await chromeAdapter.isAvailable(request)) {
1816
+ return onDeviceCall();
1817
+ }
1818
+ throw new AIError(AIErrorCode.UNSUPPORTED, 'Inference mode is ONLY_ON_DEVICE, but an on-device model is not available.');
1819
+ case InferenceMode.ONLY_IN_CLOUD:
1820
+ return inCloudCall();
1821
+ case InferenceMode.PREFER_IN_CLOUD:
1822
+ try {
1823
+ return await inCloudCall();
1824
+ }
1825
+ catch (e) {
1826
+ if (e instanceof AIError && errorsCausingFallback.includes(e.code)) {
1827
+ return onDeviceCall();
1828
+ }
1829
+ throw e;
1830
+ }
1831
+ case InferenceMode.PREFER_ON_DEVICE:
1832
+ if (await chromeAdapter.isAvailable(request)) {
1833
+ return onDeviceCall();
1834
+ }
1835
+ return inCloudCall();
1836
+ default:
1837
+ throw new AIError(AIErrorCode.ERROR, `Unexpected infererence mode: ${chromeAdapter.mode}`);
1838
+ }
1839
+ }
1840
+
1737
1841
  /**
1738
1842
  * @license
1739
1843
  * Copyright 2024 Google LLC
@@ -1758,13 +1862,7 @@ async function generateContentStreamOnCloud(apiSettings, model, params, requestO
1758
1862
  /* stream */ true, JSON.stringify(params), requestOptions);
1759
1863
  }
1760
1864
  async function generateContentStream(apiSettings, model, params, chromeAdapter, requestOptions) {
1761
- let response;
1762
- if (chromeAdapter && (await chromeAdapter.isAvailable(params))) {
1763
- response = await chromeAdapter.generateContentStream(params);
1764
- }
1765
- else {
1766
- response = await generateContentStreamOnCloud(apiSettings, model, params, requestOptions);
1767
- }
1865
+ const response = await callCloudOrDevice(params, chromeAdapter, () => chromeAdapter.generateContentStream(params), () => generateContentStreamOnCloud(apiSettings, model, params, requestOptions));
1768
1866
  return processStream(response, apiSettings); // TODO: Map streaming responses
1769
1867
  }
1770
1868
  async function generateContentOnCloud(apiSettings, model, params, requestOptions) {
@@ -1775,13 +1873,7 @@ async function generateContentOnCloud(apiSettings, model, params, requestOptions
1775
1873
  /* stream */ false, JSON.stringify(params), requestOptions);
1776
1874
  }
1777
1875
  async function generateContent(apiSettings, model, params, chromeAdapter, requestOptions) {
1778
- let response;
1779
- if (chromeAdapter && (await chromeAdapter.isAvailable(params))) {
1780
- response = await chromeAdapter.generateContent(params);
1781
- }
1782
- else {
1783
- response = await generateContentOnCloud(apiSettings, model, params, requestOptions);
1784
- }
1876
+ const response = await callCloudOrDevice(params, chromeAdapter, () => chromeAdapter.generateContent(params), () => generateContentOnCloud(apiSettings, model, params, requestOptions));
1785
1877
  const generateContentResponse = await processGenerateContentResponse(response, apiSettings);
1786
1878
  const enhancedResponse = createEnhancedContentResponse(generateContentResponse);
1787
1879
  return {
@@ -1991,7 +2083,9 @@ function validateChatHistory(history) {
1991
2083
  functionCall: 0,
1992
2084
  functionResponse: 0,
1993
2085
  thought: 0,
1994
- thoughtSignature: 0
2086
+ thoughtSignature: 0,
2087
+ executableCode: 0,
2088
+ codeExecutionResult: 0
1995
2089
  };
1996
2090
  for (const part of parts) {
1997
2091
  for (const key of VALID_PART_FIELDS) {
@@ -2192,8 +2286,8 @@ async function countTokensOnCloud(apiSettings, model, params, requestOptions) {
2192
2286
  return response.json();
2193
2287
  }
2194
2288
  async function countTokens(apiSettings, model, params, chromeAdapter, requestOptions) {
2195
- if (chromeAdapter && (await chromeAdapter.isAvailable(params))) {
2196
- return (await chromeAdapter.countTokens(params)).json();
2289
+ if (chromeAdapter?.mode === InferenceMode.ONLY_ON_DEVICE) {
2290
+ throw new AIError(AIErrorCode.UNSUPPORTED, 'countTokens() is not supported for on-device models.');
2197
2291
  }
2198
2292
  return countTokensOnCloud(apiSettings, model, params, requestOptions);
2199
2293
  }
@@ -3629,5 +3723,5 @@ function registerAI() {
3629
3723
  }
3630
3724
  registerAI();
3631
3725
 
3632
- export { AIError, AIErrorCode, AIModel, AnyOfSchema, ArraySchema, Backend, BackendType, BlockReason, BooleanSchema, ChatSession, FinishReason, FunctionCallingMode, GenerativeModel, GoogleAIBackend, HarmBlockMethod, HarmBlockThreshold, HarmCategory, HarmProbability, HarmSeverity, ImagenAspectRatio, ImagenImageFormat, ImagenModel, ImagenPersonFilterLevel, ImagenSafetyFilterLevel, InferenceMode, IntegerSchema, LiveGenerativeModel, LiveResponseType, LiveSession, Modality, NumberSchema, ObjectSchema, POSSIBLE_ROLES, ResponseModality, Schema, SchemaType, StringSchema, VertexAIBackend, getAI, getGenerativeModel, getImagenModel, getLiveGenerativeModel, startAudioConversation };
3726
+ export { AIError, AIErrorCode, AIModel, AnyOfSchema, ArraySchema, Backend, BackendType, BlockReason, BooleanSchema, ChatSession, FinishReason, FunctionCallingMode, GenerativeModel, GoogleAIBackend, HarmBlockMethod, HarmBlockThreshold, HarmCategory, HarmProbability, HarmSeverity, ImagenAspectRatio, ImagenImageFormat, ImagenModel, ImagenPersonFilterLevel, ImagenSafetyFilterLevel, InferenceMode, IntegerSchema, Language, LiveGenerativeModel, LiveResponseType, LiveSession, Modality, NumberSchema, ObjectSchema, Outcome, POSSIBLE_ROLES, ResponseModality, Schema, SchemaType, StringSchema, VertexAIBackend, getAI, getGenerativeModel, getImagenModel, getLiveGenerativeModel, startAudioConversation };
3633
3727
  //# sourceMappingURL=index.node.mjs.map