@firebase/ai 2.3.0 → 2.4.0-20251007135320

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (37) hide show
  1. package/dist/ai-public.d.ts +156 -29
  2. package/dist/ai.d.ts +157 -29
  3. package/dist/esm/index.esm.js +108 -34
  4. package/dist/esm/index.esm.js.map +1 -1
  5. package/dist/esm/src/api.d.ts +1 -1
  6. package/dist/esm/src/methods/live-session-helpers.d.ts +2 -2
  7. package/dist/esm/src/methods/live-session.d.ts +10 -1
  8. package/dist/esm/src/models/imagen-model.d.ts +2 -2
  9. package/dist/esm/src/requests/imagen-image-format.d.ts +3 -3
  10. package/dist/esm/src/types/content.d.ts +4 -4
  11. package/dist/esm/src/types/enums.d.ts +4 -4
  12. package/dist/esm/src/types/googleai.d.ts +2 -1
  13. package/dist/esm/src/types/imagen/requests.d.ts +9 -9
  14. package/dist/esm/src/types/imagen/responses.d.ts +3 -3
  15. package/dist/esm/src/types/live-responses.d.ts +9 -1
  16. package/dist/esm/src/types/requests.d.ts +22 -2
  17. package/dist/esm/src/types/responses.d.ts +92 -0
  18. package/dist/index.cjs.js +108 -33
  19. package/dist/index.cjs.js.map +1 -1
  20. package/dist/index.node.cjs.js +108 -33
  21. package/dist/index.node.cjs.js.map +1 -1
  22. package/dist/index.node.mjs +108 -34
  23. package/dist/index.node.mjs.map +1 -1
  24. package/dist/src/api.d.ts +1 -1
  25. package/dist/src/methods/live-session-helpers.d.ts +2 -2
  26. package/dist/src/methods/live-session.d.ts +10 -1
  27. package/dist/src/models/imagen-model.d.ts +2 -2
  28. package/dist/src/requests/imagen-image-format.d.ts +3 -3
  29. package/dist/src/types/content.d.ts +4 -4
  30. package/dist/src/types/enums.d.ts +4 -4
  31. package/dist/src/types/googleai.d.ts +2 -1
  32. package/dist/src/types/imagen/requests.d.ts +9 -9
  33. package/dist/src/types/imagen/responses.d.ts +3 -3
  34. package/dist/src/types/live-responses.d.ts +9 -1
  35. package/dist/src/types/requests.d.ts +22 -2
  36. package/dist/src/types/responses.d.ts +92 -0
  37. package/package.json +1 -1
@@ -4,7 +4,7 @@ import { FirebaseError, Deferred, getModularInstance } from '@firebase/util';
4
4
  import { Logger } from '@firebase/logger';
5
5
 
6
6
  var name = "@firebase/ai";
7
- var version = "2.3.0";
7
+ var version = "2.4.0-20251007135320";
8
8
 
9
9
  /**
10
10
  * @license
@@ -326,7 +326,7 @@ const InferenceMode = {
326
326
  /**
327
327
  * Represents the result of the code execution.
328
328
  *
329
- * @public
329
+ * @beta
330
330
  */
331
331
  const Outcome = {
332
332
  UNSPECIFIED: 'OUTCOME_UNSPECIFIED',
@@ -337,7 +337,7 @@ const Outcome = {
337
337
  /**
338
338
  * The programming language of the code.
339
339
  *
340
- * @public
340
+ * @beta
341
341
  */
342
342
  const Language = {
343
343
  UNSPECIFIED: 'LANGUAGE_UNSPECIFIED',
@@ -360,6 +360,45 @@ const Language = {
360
360
  * See the License for the specific language governing permissions and
361
361
  * limitations under the License.
362
362
  */
363
+ /**
364
+ * The status of a URL retrieval.
365
+ *
366
+ * @remarks
367
+ * <b>URL_RETRIEVAL_STATUS_UNSPECIFIED:</b> Unspecified retrieval status.
368
+ * <br/>
369
+ * <b>URL_RETRIEVAL_STATUS_SUCCESS:</b> The URL retrieval was successful.
370
+ * <br/>
371
+ * <b>URL_RETRIEVAL_STATUS_ERROR:</b> The URL retrieval failed.
372
+ * <br/>
373
+ * <b>URL_RETRIEVAL_STATUS_PAYWALL:</b> The URL retrieval failed because the content is behind a paywall.
374
+ * <br/>
375
+ * <b>URL_RETRIEVAL_STATUS_UNSAFE:</b> The URL retrieval failed because the content is unsafe.
376
+ * <br/>
377
+ *
378
+ * @beta
379
+ */
380
+ const URLRetrievalStatus = {
381
+ /**
382
+ * Unspecified retrieval status.
383
+ */
384
+ URL_RETRIEVAL_STATUS_UNSPECIFIED: 'URL_RETRIEVAL_STATUS_UNSPECIFIED',
385
+ /**
386
+ * The URL retrieval was successful.
387
+ */
388
+ URL_RETRIEVAL_STATUS_SUCCESS: 'URL_RETRIEVAL_STATUS_SUCCESS',
389
+ /**
390
+ * The URL retrieval failed.
391
+ */
392
+ URL_RETRIEVAL_STATUS_ERROR: 'URL_RETRIEVAL_STATUS_ERROR',
393
+ /**
394
+ * The URL retrieval failed because the content is behind a paywall.
395
+ */
396
+ URL_RETRIEVAL_STATUS_PAYWALL: 'URL_RETRIEVAL_STATUS_PAYWALL',
397
+ /**
398
+ * The URL retrieval failed because the content is unsafe.
399
+ */
400
+ URL_RETRIEVAL_STATUS_UNSAFE: 'URL_RETRIEVAL_STATUS_UNSAFE'
401
+ };
363
402
  /**
364
403
  * The types of responses that can be returned by {@link LiveSession.receive}.
365
404
  *
@@ -486,7 +525,7 @@ const SchemaType = {
486
525
  * and the {@link https://cloud.google.com/vertex-ai/generative-ai/docs/image/responsible-ai-imagen#safety-filters | Responsible AI and usage guidelines}
487
526
  * for more details.
488
527
  *
489
- * @beta
528
+ * @public
490
529
  */
491
530
  const ImagenSafetyFilterLevel = {
492
531
  /**
@@ -515,7 +554,7 @@ const ImagenSafetyFilterLevel = {
515
554
  * See the <a href="http://firebase.google.com/docs/vertex-ai/generate-images">personGeneration</a>
516
555
  * documentation for more details.
517
556
  *
518
- * @beta
557
+ * @public
519
558
  */
520
559
  const ImagenPersonFilterLevel = {
521
560
  /**
@@ -548,7 +587,7 @@ const ImagenPersonFilterLevel = {
548
587
  * See the {@link http://firebase.google.com/docs/vertex-ai/generate-images | documentation }
549
588
  * for more details and examples of the supported aspect ratios.
550
589
  *
551
- * @beta
590
+ * @public
552
591
  */
553
592
  const ImagenAspectRatio = {
554
593
  /**
@@ -1553,7 +1592,7 @@ function mapGenerateContentCandidates(candidates) {
1553
1592
  // videoMetadata is not supported.
1554
1593
  // Throw early since developers may send a long video as input and only expect to pay
1555
1594
  // for inference on a small portion of the video.
1556
- if (candidate.content?.parts.some(part => part?.videoMetadata)) {
1595
+ if (candidate.content?.parts?.some(part => part?.videoMetadata)) {
1557
1596
  throw new AIError(AIErrorCode.UNSUPPORTED, 'Part.videoMetadata is not supported in the Gemini Developer API. Please remove this property.');
1558
1597
  }
1559
1598
  const mappedCandidate = {
@@ -1563,7 +1602,8 @@ function mapGenerateContentCandidates(candidates) {
1563
1602
  finishMessage: candidate.finishMessage,
1564
1603
  safetyRatings: mappedSafetyRatings,
1565
1604
  citationMetadata,
1566
- groundingMetadata: candidate.groundingMetadata
1605
+ groundingMetadata: candidate.groundingMetadata,
1606
+ urlContextMetadata: candidate.urlContextMetadata
1567
1607
  };
1568
1608
  mappedCandidates.push(mappedCandidate);
1569
1609
  });
@@ -1654,6 +1694,14 @@ async function* generateResponseSequence(stream, apiSettings) {
1654
1694
  else {
1655
1695
  enhancedResponse = createEnhancedContentResponse(value);
1656
1696
  }
1697
+ const firstCandidate = enhancedResponse.candidates?.[0];
1698
+ // Don't yield a response with no useful data for the developer.
1699
+ if (!firstCandidate?.content?.parts &&
1700
+ !firstCandidate?.finishReason &&
1701
+ !firstCandidate?.citationMetadata &&
1702
+ !firstCandidate?.urlContextMetadata) {
1703
+ continue;
1704
+ }
1657
1705
  yield enhancedResponse;
1658
1706
  }
1659
1707
  }
@@ -1733,36 +1781,43 @@ function aggregateResponses(responses) {
1733
1781
  candidate.safetyRatings;
1734
1782
  aggregatedResponse.candidates[i].groundingMetadata =
1735
1783
  candidate.groundingMetadata;
1784
+ // The urlContextMetadata object is defined in the first chunk of the response stream.
1785
+ // In all subsequent chunks, the urlContextMetadata object will be undefined. We need to
1786
+ // make sure that we don't overwrite the first value urlContextMetadata object with undefined.
1787
+ // FIXME: What happens if we receive a second, valid urlContextMetadata object?
1788
+ const urlContextMetadata = candidate.urlContextMetadata;
1789
+ if (typeof urlContextMetadata === 'object' &&
1790
+ urlContextMetadata !== null &&
1791
+ Object.keys(urlContextMetadata).length > 0) {
1792
+ aggregatedResponse.candidates[i].urlContextMetadata =
1793
+ urlContextMetadata;
1794
+ }
1736
1795
  /**
1737
1796
  * Candidates should always have content and parts, but this handles
1738
1797
  * possible malformed responses.
1739
1798
  */
1740
- if (candidate.content && candidate.content.parts) {
1799
+ if (candidate.content) {
1800
+ // Skip a candidate without parts.
1801
+ if (!candidate.content.parts) {
1802
+ continue;
1803
+ }
1741
1804
  if (!aggregatedResponse.candidates[i].content) {
1742
1805
  aggregatedResponse.candidates[i].content = {
1743
1806
  role: candidate.content.role || 'user',
1744
1807
  parts: []
1745
1808
  };
1746
1809
  }
1747
- const newPart = {};
1748
1810
  for (const part of candidate.content.parts) {
1749
- if (part.text !== undefined) {
1750
- // The backend can send empty text parts. If these are sent back
1751
- // (e.g. in chat history), the backend will respond with an error.
1752
- // To prevent this, ignore empty text parts.
1753
- if (part.text === '') {
1754
- continue;
1755
- }
1756
- newPart.text = part.text;
1757
- }
1758
- if (part.functionCall) {
1759
- newPart.functionCall = part.functionCall;
1811
+ const newPart = { ...part };
1812
+ // The backend can send empty text parts. If these are sent back
1813
+ // (e.g. in chat history), the backend will respond with an error.
1814
+ // To prevent this, ignore empty text parts.
1815
+ if (part.text === '') {
1816
+ continue;
1760
1817
  }
1761
- if (Object.keys(newPart).length === 0) {
1762
- throw new AIError(AIErrorCode.INVALID_CONTENT, 'Part should have at least one property, but there are none. This is likely caused ' +
1763
- 'by a malformed response from the backend.');
1818
+ if (Object.keys(newPart).length > 0) {
1819
+ aggregatedResponse.candidates[i].content.parts.push(newPart);
1764
1820
  }
1765
- aggregatedResponse.candidates[i].content.parts.push(newPart);
1766
1821
  }
1767
1822
  }
1768
1823
  }
@@ -2468,6 +2523,25 @@ class LiveSession {
2468
2523
  this.webSocketHandler.send(JSON.stringify(message));
2469
2524
  });
2470
2525
  }
2526
+ /**
2527
+ * Sends function responses to the server.
2528
+ *
2529
+ * @param functionResponses - The function responses to send.
2530
+ * @throws If this session has been closed.
2531
+ *
2532
+ * @beta
2533
+ */
2534
+ async sendFunctionResponses(functionResponses) {
2535
+ if (this.isClosed) {
2536
+ throw new AIError(AIErrorCode.REQUEST_ERROR, 'This LiveSession has been closed and cannot be used.');
2537
+ }
2538
+ const message = {
2539
+ toolResponse: {
2540
+ functionResponses
2541
+ }
2542
+ };
2543
+ this.webSocketHandler.send(JSON.stringify(message));
2544
+ }
2471
2545
  /**
2472
2546
  * Sends a stream of {@link GenerativeContentBlob}.
2473
2547
  *
@@ -2682,7 +2756,7 @@ class LiveGenerativeModel extends AIModel {
2682
2756
  * }
2683
2757
  * ```
2684
2758
  *
2685
- * @beta
2759
+ * @public
2686
2760
  */
2687
2761
  class ImagenModel extends AIModel {
2688
2762
  /**
@@ -2718,7 +2792,7 @@ class ImagenModel extends AIModel {
2718
2792
  * returned object will have a `filteredReason` property.
2719
2793
  * If all images are filtered, the `images` array will be empty.
2720
2794
  *
2721
- * @beta
2795
+ * @public
2722
2796
  */
2723
2797
  async generateImages(prompt) {
2724
2798
  const body = createPredictRequestBody(prompt, {
@@ -3181,7 +3255,7 @@ class AnyOfSchema extends Schema {
3181
3255
  * }
3182
3256
  * ```
3183
3257
  *
3184
- * @beta
3258
+ * @public
3185
3259
  */
3186
3260
  class ImagenImageFormat {
3187
3261
  constructor() {
@@ -3193,7 +3267,7 @@ class ImagenImageFormat {
3193
3267
  * @param compressionQuality - The level of compression (a number between 0 and 100).
3194
3268
  * @returns An {@link ImagenImageFormat} object for a JPEG image.
3195
3269
  *
3196
- * @beta
3270
+ * @public
3197
3271
  */
3198
3272
  static jpeg(compressionQuality) {
3199
3273
  if (compressionQuality &&
@@ -3207,7 +3281,7 @@ class ImagenImageFormat {
3207
3281
  *
3208
3282
  * @returns An {@link ImagenImageFormat} object for a PNG image.
3209
3283
  *
3210
- * @beta
3284
+ * @public
3211
3285
  */
3212
3286
  static png() {
3213
3287
  return { mimeType: 'image/png' };
@@ -3449,9 +3523,9 @@ class AudioConversationRunner {
3449
3523
  }
3450
3524
  else {
3451
3525
  try {
3452
- const resultPart = await this.options.functionCallingHandler(message.functionCalls);
3526
+ const functionResponse = await this.options.functionCallingHandler(message.functionCalls);
3453
3527
  if (!this.isStopped) {
3454
- void this.liveSession.send([resultPart]);
3528
+ void this.liveSession.sendFunctionResponses([functionResponse]);
3455
3529
  }
3456
3530
  }
3457
3531
  catch (e) {
@@ -3671,7 +3745,7 @@ function getGenerativeModel(ai, modelParams, requestOptions) {
3671
3745
  * @throws If the `apiKey` or `projectId` fields are missing in your
3672
3746
  * Firebase config.
3673
3747
  *
3674
- * @beta
3748
+ * @public
3675
3749
  */
3676
3750
  function getImagenModel(ai, modelParams, requestOptions) {
3677
3751
  if (!modelParams.model) {
@@ -3722,5 +3796,5 @@ function registerAI() {
3722
3796
  }
3723
3797
  registerAI();
3724
3798
 
3725
- export { AIError, AIErrorCode, AIModel, AnyOfSchema, ArraySchema, Backend, BackendType, BlockReason, BooleanSchema, ChatSession, FinishReason, FunctionCallingMode, GenerativeModel, GoogleAIBackend, HarmBlockMethod, HarmBlockThreshold, HarmCategory, HarmProbability, HarmSeverity, ImagenAspectRatio, ImagenImageFormat, ImagenModel, ImagenPersonFilterLevel, ImagenSafetyFilterLevel, InferenceMode, IntegerSchema, Language, LiveGenerativeModel, LiveResponseType, LiveSession, Modality, NumberSchema, ObjectSchema, Outcome, POSSIBLE_ROLES, ResponseModality, Schema, SchemaType, StringSchema, VertexAIBackend, getAI, getGenerativeModel, getImagenModel, getLiveGenerativeModel, startAudioConversation };
3799
+ export { AIError, AIErrorCode, AIModel, AnyOfSchema, ArraySchema, Backend, BackendType, BlockReason, BooleanSchema, ChatSession, FinishReason, FunctionCallingMode, GenerativeModel, GoogleAIBackend, HarmBlockMethod, HarmBlockThreshold, HarmCategory, HarmProbability, HarmSeverity, ImagenAspectRatio, ImagenImageFormat, ImagenModel, ImagenPersonFilterLevel, ImagenSafetyFilterLevel, InferenceMode, IntegerSchema, Language, LiveGenerativeModel, LiveResponseType, LiveSession, Modality, NumberSchema, ObjectSchema, Outcome, POSSIBLE_ROLES, ResponseModality, Schema, SchemaType, StringSchema, URLRetrievalStatus, VertexAIBackend, getAI, getGenerativeModel, getImagenModel, getLiveGenerativeModel, startAudioConversation };
3726
3800
  //# sourceMappingURL=index.node.mjs.map