@firebase/ai 2.2.1 → 2.3.0-canary.0ffcb26af

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -4,7 +4,7 @@ import { FirebaseError, Deferred, getModularInstance } from '@firebase/util';
4
4
  import { Logger } from '@firebase/logger';
5
5
 
6
6
  var name = "@firebase/ai";
7
- var version = "2.2.1";
7
+ var version = "2.3.0-canary.0ffcb26af";
8
8
 
9
9
  /**
10
10
  * @license
@@ -297,14 +297,51 @@ const ResponseModality = {
297
297
  AUDIO: 'AUDIO'
298
298
  };
299
299
  /**
300
- * <b>(EXPERIMENTAL)</b>
301
300
  * Determines whether inference happens on-device or in-cloud.
302
- * @public
301
+ *
302
+ * @remarks
303
+ * <b>PREFER_ON_DEVICE:</b> Attempt to make inference calls using an
304
+ * on-device model. If on-device inference is not available, the SDK
305
+ * will fall back to using a cloud-hosted model.
306
+ * <br/>
307
+ * <b>ONLY_ON_DEVICE:</b> Only attempt to make inference calls using an
308
+ * on-device model. The SDK will not fall back to a cloud-hosted model.
309
+ * If on-device inference is not available, inference methods will throw.
310
+ * <br/>
311
+ * <b>ONLY_IN_CLOUD:</b> Only attempt to make inference calls using a
312
+ * cloud-hosted model. The SDK will not fall back to an on-device model.
313
+ * <br/>
314
+ * <b>PREFER_IN_CLOUD:</b> Attempt to make inference calls to a
315
+ * cloud-hosted model. If not available, the SDK will fall back to an
316
+ * on-device model.
317
+ *
318
+ * @beta
303
319
  */
304
320
  const InferenceMode = {
305
321
  'PREFER_ON_DEVICE': 'prefer_on_device',
306
322
  'ONLY_ON_DEVICE': 'only_on_device',
307
- 'ONLY_IN_CLOUD': 'only_in_cloud'
323
+ 'ONLY_IN_CLOUD': 'only_in_cloud',
324
+ 'PREFER_IN_CLOUD': 'prefer_in_cloud'
325
+ };
326
+ /**
327
+ * Represents the result of the code execution.
328
+ *
329
+ * @public
330
+ */
331
+ const Outcome = {
332
+ UNSPECIFIED: 'OUTCOME_UNSPECIFIED',
333
+ OK: 'OUTCOME_OK',
334
+ FAILED: 'OUTCOME_FAILED',
335
+ DEADLINE_EXCEEDED: 'OUTCOME_DEADLINE_EXCEEDED'
336
+ };
337
+ /**
338
+ * The programming language of the code.
339
+ *
340
+ * @public
341
+ */
342
+ const Language = {
343
+ UNSPECIFIED: 'LANGUAGE_UNSPECIFIED',
344
+ PYTHON: 'PYTHON'
308
345
  };
309
346
 
310
347
  /**
@@ -323,6 +360,45 @@ const InferenceMode = {
323
360
  * See the License for the specific language governing permissions and
324
361
  * limitations under the License.
325
362
  */
363
+ /**
364
+ * The status of a URL retrieval.
365
+ *
366
+ * @remarks
367
+ * <b>URL_RETRIEVAL_STATUS_UNSPECIFIED:</b> Unspecified retrieval status.
368
+ * <br/>
369
+ * <b>URL_RETRIEVAL_STATUS_SUCCESS:</b> The URL retrieval was successful.
370
+ * <br/>
371
+ * <b>URL_RETRIEVAL_STATUS_ERROR:</b> The URL retrieval failed.
372
+ * <br/>
373
+ * <b>URL_RETRIEVAL_STATUS_PAYWALL:</b> The URL retrieval failed because the content is behind a paywall.
374
+ * <br/>
375
+ * <b>URL_RETRIEVAL_STATUS_UNSAFE:</b> The URL retrieval failed because the content is unsafe.
376
+ * <br/>
377
+ *
378
+ * @beta
379
+ */
380
+ const URLRetrievalStatus = {
381
+ /**
382
+ * Unspecified retrieval status.
383
+ */
384
+ URL_RETRIEVAL_STATUS_UNSPECIFIED: 'URL_RETRIEVAL_STATUS_UNSPECIFIED',
385
+ /**
386
+ * The URL retrieval was successful.
387
+ */
388
+ URL_RETRIEVAL_STATUS_SUCCESS: 'URL_RETRIEVAL_STATUS_SUCCESS',
389
+ /**
390
+ * The URL retrieval failed.
391
+ */
392
+ URL_RETRIEVAL_STATUS_ERROR: 'URL_RETRIEVAL_STATUS_ERROR',
393
+ /**
394
+ * The URL retrieval failed because the content is behind a paywall.
395
+ */
396
+ URL_RETRIEVAL_STATUS_PAYWALL: 'URL_RETRIEVAL_STATUS_PAYWALL',
397
+ /**
398
+ * The URL retrieval failed because the content is unsafe.
399
+ */
400
+ URL_RETRIEVAL_STATUS_UNSAFE: 'URL_RETRIEVAL_STATUS_UNSAFE'
401
+ };
326
402
  /**
327
403
  * The types of responses that can be returned by {@link LiveSession.receive}.
328
404
  *
@@ -1526,7 +1602,8 @@ function mapGenerateContentCandidates(candidates) {
1526
1602
  finishMessage: candidate.finishMessage,
1527
1603
  safetyRatings: mappedSafetyRatings,
1528
1604
  citationMetadata,
1529
- groundingMetadata: candidate.groundingMetadata
1605
+ groundingMetadata: candidate.groundingMetadata,
1606
+ urlContextMetadata: candidate.urlContextMetadata
1530
1607
  };
1531
1608
  mappedCandidates.push(mappedCandidate);
1532
1609
  });
@@ -1696,6 +1773,17 @@ function aggregateResponses(responses) {
1696
1773
  candidate.safetyRatings;
1697
1774
  aggregatedResponse.candidates[i].groundingMetadata =
1698
1775
  candidate.groundingMetadata;
1776
+ // The urlContextMetadata object is defined in the first chunk of the response stream.
1777
+ // In all subsequent chunks, the urlContextMetadata object will be undefined. We need to
1778
+ // make sure that we don't overwrite the first value urlContextMetadata object with undefined.
1779
+ // FIXME: What happens if we receive a second, valid urlContextMetadata object?
1780
+ const urlContextMetadata = candidate.urlContextMetadata;
1781
+ if (typeof urlContextMetadata === 'object' &&
1782
+ urlContextMetadata !== null &&
1783
+ Object.keys(urlContextMetadata).length > 0) {
1784
+ aggregatedResponse.candidates[i].urlContextMetadata =
1785
+ urlContextMetadata;
1786
+ }
1699
1787
  /**
1700
1788
  * Candidates should always have content and parts, but this handles
1701
1789
  * possible malformed responses.
@@ -1734,6 +1822,72 @@ function aggregateResponses(responses) {
1734
1822
  return aggregatedResponse;
1735
1823
  }
1736
1824
 
1825
+ /**
1826
+ * @license
1827
+ * Copyright 2025 Google LLC
1828
+ *
1829
+ * Licensed under the Apache License, Version 2.0 (the "License");
1830
+ * you may not use this file except in compliance with the License.
1831
+ * You may obtain a copy of the License at
1832
+ *
1833
+ * http://www.apache.org/licenses/LICENSE-2.0
1834
+ *
1835
+ * Unless required by applicable law or agreed to in writing, software
1836
+ * distributed under the License is distributed on an "AS IS" BASIS,
1837
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
1838
+ * See the License for the specific language governing permissions and
1839
+ * limitations under the License.
1840
+ */
1841
+ const errorsCausingFallback = [
1842
+ // most network errors
1843
+ AIErrorCode.FETCH_ERROR,
1844
+ // fallback code for all other errors in makeRequest
1845
+ AIErrorCode.ERROR,
1846
+ // error due to API not being enabled in project
1847
+ AIErrorCode.API_NOT_ENABLED
1848
+ ];
1849
+ /**
1850
+ * Dispatches a request to the appropriate backend (on-device or in-cloud)
1851
+ * based on the inference mode.
1852
+ *
1853
+ * @param request - The request to be sent.
1854
+ * @param chromeAdapter - The on-device model adapter.
1855
+ * @param onDeviceCall - The function to call for on-device inference.
1856
+ * @param inCloudCall - The function to call for in-cloud inference.
1857
+ * @returns The response from the backend.
1858
+ */
1859
+ async function callCloudOrDevice(request, chromeAdapter, onDeviceCall, inCloudCall) {
1860
+ if (!chromeAdapter) {
1861
+ return inCloudCall();
1862
+ }
1863
+ switch (chromeAdapter.mode) {
1864
+ case InferenceMode.ONLY_ON_DEVICE:
1865
+ if (await chromeAdapter.isAvailable(request)) {
1866
+ return onDeviceCall();
1867
+ }
1868
+ throw new AIError(AIErrorCode.UNSUPPORTED, 'Inference mode is ONLY_ON_DEVICE, but an on-device model is not available.');
1869
+ case InferenceMode.ONLY_IN_CLOUD:
1870
+ return inCloudCall();
1871
+ case InferenceMode.PREFER_IN_CLOUD:
1872
+ try {
1873
+ return await inCloudCall();
1874
+ }
1875
+ catch (e) {
1876
+ if (e instanceof AIError && errorsCausingFallback.includes(e.code)) {
1877
+ return onDeviceCall();
1878
+ }
1879
+ throw e;
1880
+ }
1881
+ case InferenceMode.PREFER_ON_DEVICE:
1882
+ if (await chromeAdapter.isAvailable(request)) {
1883
+ return onDeviceCall();
1884
+ }
1885
+ return inCloudCall();
1886
+ default:
1887
+ throw new AIError(AIErrorCode.ERROR, `Unexpected infererence mode: ${chromeAdapter.mode}`);
1888
+ }
1889
+ }
1890
+
1737
1891
  /**
1738
1892
  * @license
1739
1893
  * Copyright 2024 Google LLC
@@ -1758,13 +1912,7 @@ async function generateContentStreamOnCloud(apiSettings, model, params, requestO
1758
1912
  /* stream */ true, JSON.stringify(params), requestOptions);
1759
1913
  }
1760
1914
  async function generateContentStream(apiSettings, model, params, chromeAdapter, requestOptions) {
1761
- let response;
1762
- if (chromeAdapter && (await chromeAdapter.isAvailable(params))) {
1763
- response = await chromeAdapter.generateContentStream(params);
1764
- }
1765
- else {
1766
- response = await generateContentStreamOnCloud(apiSettings, model, params, requestOptions);
1767
- }
1915
+ const response = await callCloudOrDevice(params, chromeAdapter, () => chromeAdapter.generateContentStream(params), () => generateContentStreamOnCloud(apiSettings, model, params, requestOptions));
1768
1916
  return processStream(response, apiSettings); // TODO: Map streaming responses
1769
1917
  }
1770
1918
  async function generateContentOnCloud(apiSettings, model, params, requestOptions) {
@@ -1775,13 +1923,7 @@ async function generateContentOnCloud(apiSettings, model, params, requestOptions
1775
1923
  /* stream */ false, JSON.stringify(params), requestOptions);
1776
1924
  }
1777
1925
  async function generateContent(apiSettings, model, params, chromeAdapter, requestOptions) {
1778
- let response;
1779
- if (chromeAdapter && (await chromeAdapter.isAvailable(params))) {
1780
- response = await chromeAdapter.generateContent(params);
1781
- }
1782
- else {
1783
- response = await generateContentOnCloud(apiSettings, model, params, requestOptions);
1784
- }
1926
+ const response = await callCloudOrDevice(params, chromeAdapter, () => chromeAdapter.generateContent(params), () => generateContentOnCloud(apiSettings, model, params, requestOptions));
1785
1927
  const generateContentResponse = await processGenerateContentResponse(response, apiSettings);
1786
1928
  const enhancedResponse = createEnhancedContentResponse(generateContentResponse);
1787
1929
  return {
@@ -1991,7 +2133,9 @@ function validateChatHistory(history) {
1991
2133
  functionCall: 0,
1992
2134
  functionResponse: 0,
1993
2135
  thought: 0,
1994
- thoughtSignature: 0
2136
+ thoughtSignature: 0,
2137
+ executableCode: 0,
2138
+ codeExecutionResult: 0
1995
2139
  };
1996
2140
  for (const part of parts) {
1997
2141
  for (const key of VALID_PART_FIELDS) {
@@ -2192,8 +2336,8 @@ async function countTokensOnCloud(apiSettings, model, params, requestOptions) {
2192
2336
  return response.json();
2193
2337
  }
2194
2338
  async function countTokens(apiSettings, model, params, chromeAdapter, requestOptions) {
2195
- if (chromeAdapter && (await chromeAdapter.isAvailable(params))) {
2196
- return (await chromeAdapter.countTokens(params)).json();
2339
+ if (chromeAdapter?.mode === InferenceMode.ONLY_ON_DEVICE) {
2340
+ throw new AIError(AIErrorCode.UNSUPPORTED, 'countTokens() is not supported for on-device models.');
2197
2341
  }
2198
2342
  return countTokensOnCloud(apiSettings, model, params, requestOptions);
2199
2343
  }
@@ -3629,5 +3773,5 @@ function registerAI() {
3629
3773
  }
3630
3774
  registerAI();
3631
3775
 
3632
- export { AIError, AIErrorCode, AIModel, AnyOfSchema, ArraySchema, Backend, BackendType, BlockReason, BooleanSchema, ChatSession, FinishReason, FunctionCallingMode, GenerativeModel, GoogleAIBackend, HarmBlockMethod, HarmBlockThreshold, HarmCategory, HarmProbability, HarmSeverity, ImagenAspectRatio, ImagenImageFormat, ImagenModel, ImagenPersonFilterLevel, ImagenSafetyFilterLevel, InferenceMode, IntegerSchema, LiveGenerativeModel, LiveResponseType, LiveSession, Modality, NumberSchema, ObjectSchema, POSSIBLE_ROLES, ResponseModality, Schema, SchemaType, StringSchema, VertexAIBackend, getAI, getGenerativeModel, getImagenModel, getLiveGenerativeModel, startAudioConversation };
3776
+ export { AIError, AIErrorCode, AIModel, AnyOfSchema, ArraySchema, Backend, BackendType, BlockReason, BooleanSchema, ChatSession, FinishReason, FunctionCallingMode, GenerativeModel, GoogleAIBackend, HarmBlockMethod, HarmBlockThreshold, HarmCategory, HarmProbability, HarmSeverity, ImagenAspectRatio, ImagenImageFormat, ImagenModel, ImagenPersonFilterLevel, ImagenSafetyFilterLevel, InferenceMode, IntegerSchema, Language, LiveGenerativeModel, LiveResponseType, LiveSession, Modality, NumberSchema, ObjectSchema, Outcome, POSSIBLE_ROLES, ResponseModality, Schema, SchemaType, StringSchema, URLRetrievalStatus, VertexAIBackend, getAI, getGenerativeModel, getImagenModel, getLiveGenerativeModel, startAudioConversation };
3633
3777
  //# sourceMappingURL=index.node.mjs.map