@firebase/ai 2.3.0 → 2.4.0-20251007135320

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (37) hide show
  1. package/dist/ai-public.d.ts +156 -29
  2. package/dist/ai.d.ts +157 -29
  3. package/dist/esm/index.esm.js +108 -34
  4. package/dist/esm/index.esm.js.map +1 -1
  5. package/dist/esm/src/api.d.ts +1 -1
  6. package/dist/esm/src/methods/live-session-helpers.d.ts +2 -2
  7. package/dist/esm/src/methods/live-session.d.ts +10 -1
  8. package/dist/esm/src/models/imagen-model.d.ts +2 -2
  9. package/dist/esm/src/requests/imagen-image-format.d.ts +3 -3
  10. package/dist/esm/src/types/content.d.ts +4 -4
  11. package/dist/esm/src/types/enums.d.ts +4 -4
  12. package/dist/esm/src/types/googleai.d.ts +2 -1
  13. package/dist/esm/src/types/imagen/requests.d.ts +9 -9
  14. package/dist/esm/src/types/imagen/responses.d.ts +3 -3
  15. package/dist/esm/src/types/live-responses.d.ts +9 -1
  16. package/dist/esm/src/types/requests.d.ts +22 -2
  17. package/dist/esm/src/types/responses.d.ts +92 -0
  18. package/dist/index.cjs.js +108 -33
  19. package/dist/index.cjs.js.map +1 -1
  20. package/dist/index.node.cjs.js +108 -33
  21. package/dist/index.node.cjs.js.map +1 -1
  22. package/dist/index.node.mjs +108 -34
  23. package/dist/index.node.mjs.map +1 -1
  24. package/dist/src/api.d.ts +1 -1
  25. package/dist/src/methods/live-session-helpers.d.ts +2 -2
  26. package/dist/src/methods/live-session.d.ts +10 -1
  27. package/dist/src/models/imagen-model.d.ts +2 -2
  28. package/dist/src/requests/imagen-image-format.d.ts +3 -3
  29. package/dist/src/types/content.d.ts +4 -4
  30. package/dist/src/types/enums.d.ts +4 -4
  31. package/dist/src/types/googleai.d.ts +2 -1
  32. package/dist/src/types/imagen/requests.d.ts +9 -9
  33. package/dist/src/types/imagen/responses.d.ts +3 -3
  34. package/dist/src/types/live-responses.d.ts +9 -1
  35. package/dist/src/types/requests.d.ts +22 -2
  36. package/dist/src/types/responses.d.ts +92 -0
  37. package/package.json +1 -1
@@ -4,7 +4,7 @@ import { FirebaseError, Deferred, getModularInstance } from '@firebase/util';
4
4
  import { Logger } from '@firebase/logger';
5
5
 
6
6
  var name = "@firebase/ai";
7
- var version = "2.3.0";
7
+ var version = "2.4.0-20251007135320";
8
8
 
9
9
  /**
10
10
  * @license
@@ -382,7 +382,7 @@ const InferenceMode = {
382
382
  /**
383
383
  * Represents the result of the code execution.
384
384
  *
385
- * @public
385
+ * @beta
386
386
  */
387
387
  const Outcome = {
388
388
  UNSPECIFIED: 'OUTCOME_UNSPECIFIED',
@@ -393,7 +393,7 @@ const Outcome = {
393
393
  /**
394
394
  * The programming language of the code.
395
395
  *
396
- * @public
396
+ * @beta
397
397
  */
398
398
  const Language = {
399
399
  UNSPECIFIED: 'LANGUAGE_UNSPECIFIED',
@@ -416,6 +416,45 @@ const Language = {
416
416
  * See the License for the specific language governing permissions and
417
417
  * limitations under the License.
418
418
  */
419
+ /**
420
+ * The status of a URL retrieval.
421
+ *
422
+ * @remarks
423
+ * <b>URL_RETRIEVAL_STATUS_UNSPECIFIED:</b> Unspecified retrieval status.
424
+ * <br/>
425
+ * <b>URL_RETRIEVAL_STATUS_SUCCESS:</b> The URL retrieval was successful.
426
+ * <br/>
427
+ * <b>URL_RETRIEVAL_STATUS_ERROR:</b> The URL retrieval failed.
428
+ * <br/>
429
+ * <b>URL_RETRIEVAL_STATUS_PAYWALL:</b> The URL retrieval failed because the content is behind a paywall.
430
+ * <br/>
431
+ * <b>URL_RETRIEVAL_STATUS_UNSAFE:</b> The URL retrieval failed because the content is unsafe.
432
+ * <br/>
433
+ *
434
+ * @beta
435
+ */
436
+ const URLRetrievalStatus = {
437
+ /**
438
+ * Unspecified retrieval status.
439
+ */
440
+ URL_RETRIEVAL_STATUS_UNSPECIFIED: 'URL_RETRIEVAL_STATUS_UNSPECIFIED',
441
+ /**
442
+ * The URL retrieval was successful.
443
+ */
444
+ URL_RETRIEVAL_STATUS_SUCCESS: 'URL_RETRIEVAL_STATUS_SUCCESS',
445
+ /**
446
+ * The URL retrieval failed.
447
+ */
448
+ URL_RETRIEVAL_STATUS_ERROR: 'URL_RETRIEVAL_STATUS_ERROR',
449
+ /**
450
+ * The URL retrieval failed because the content is behind a paywall.
451
+ */
452
+ URL_RETRIEVAL_STATUS_PAYWALL: 'URL_RETRIEVAL_STATUS_PAYWALL',
453
+ /**
454
+ * The URL retrieval failed because the content is unsafe.
455
+ */
456
+ URL_RETRIEVAL_STATUS_UNSAFE: 'URL_RETRIEVAL_STATUS_UNSAFE'
457
+ };
419
458
  /**
420
459
  * The types of responses that can be returned by {@link LiveSession.receive}.
421
460
  *
@@ -542,7 +581,7 @@ const SchemaType = {
542
581
  * and the {@link https://cloud.google.com/vertex-ai/generative-ai/docs/image/responsible-ai-imagen#safety-filters | Responsible AI and usage guidelines}
543
582
  * for more details.
544
583
  *
545
- * @beta
584
+ * @public
546
585
  */
547
586
  const ImagenSafetyFilterLevel = {
548
587
  /**
@@ -571,7 +610,7 @@ const ImagenSafetyFilterLevel = {
571
610
  * See the <a href="http://firebase.google.com/docs/vertex-ai/generate-images">personGeneration</a>
572
611
  * documentation for more details.
573
612
  *
574
- * @beta
613
+ * @public
575
614
  */
576
615
  const ImagenPersonFilterLevel = {
577
616
  /**
@@ -604,7 +643,7 @@ const ImagenPersonFilterLevel = {
604
643
  * See the {@link http://firebase.google.com/docs/vertex-ai/generate-images | documentation }
605
644
  * for more details and examples of the supported aspect ratios.
606
645
  *
607
- * @beta
646
+ * @public
608
647
  */
609
648
  const ImagenAspectRatio = {
610
649
  /**
@@ -1876,7 +1915,7 @@ function mapGenerateContentCandidates(candidates) {
1876
1915
  // videoMetadata is not supported.
1877
1916
  // Throw early since developers may send a long video as input and only expect to pay
1878
1917
  // for inference on a small portion of the video.
1879
- if (candidate.content?.parts.some(part => part?.videoMetadata)) {
1918
+ if (candidate.content?.parts?.some(part => part?.videoMetadata)) {
1880
1919
  throw new AIError(AIErrorCode.UNSUPPORTED, 'Part.videoMetadata is not supported in the Gemini Developer API. Please remove this property.');
1881
1920
  }
1882
1921
  const mappedCandidate = {
@@ -1886,7 +1925,8 @@ function mapGenerateContentCandidates(candidates) {
1886
1925
  finishMessage: candidate.finishMessage,
1887
1926
  safetyRatings: mappedSafetyRatings,
1888
1927
  citationMetadata,
1889
- groundingMetadata: candidate.groundingMetadata
1928
+ groundingMetadata: candidate.groundingMetadata,
1929
+ urlContextMetadata: candidate.urlContextMetadata
1890
1930
  };
1891
1931
  mappedCandidates.push(mappedCandidate);
1892
1932
  });
@@ -1977,6 +2017,14 @@ async function* generateResponseSequence(stream, apiSettings) {
1977
2017
  else {
1978
2018
  enhancedResponse = createEnhancedContentResponse(value);
1979
2019
  }
2020
+ const firstCandidate = enhancedResponse.candidates?.[0];
2021
+ // Don't yield a response with no useful data for the developer.
2022
+ if (!firstCandidate?.content?.parts &&
2023
+ !firstCandidate?.finishReason &&
2024
+ !firstCandidate?.citationMetadata &&
2025
+ !firstCandidate?.urlContextMetadata) {
2026
+ continue;
2027
+ }
1980
2028
  yield enhancedResponse;
1981
2029
  }
1982
2030
  }
@@ -2056,36 +2104,43 @@ function aggregateResponses(responses) {
2056
2104
  candidate.safetyRatings;
2057
2105
  aggregatedResponse.candidates[i].groundingMetadata =
2058
2106
  candidate.groundingMetadata;
2107
+ // The urlContextMetadata object is defined in the first chunk of the response stream.
2108
+ // In all subsequent chunks, the urlContextMetadata object will be undefined. We need to
2109
+ // make sure that we don't overwrite the first value urlContextMetadata object with undefined.
2110
+ // FIXME: What happens if we receive a second, valid urlContextMetadata object?
2111
+ const urlContextMetadata = candidate.urlContextMetadata;
2112
+ if (typeof urlContextMetadata === 'object' &&
2113
+ urlContextMetadata !== null &&
2114
+ Object.keys(urlContextMetadata).length > 0) {
2115
+ aggregatedResponse.candidates[i].urlContextMetadata =
2116
+ urlContextMetadata;
2117
+ }
2059
2118
  /**
2060
2119
  * Candidates should always have content and parts, but this handles
2061
2120
  * possible malformed responses.
2062
2121
  */
2063
- if (candidate.content && candidate.content.parts) {
2122
+ if (candidate.content) {
2123
+ // Skip a candidate without parts.
2124
+ if (!candidate.content.parts) {
2125
+ continue;
2126
+ }
2064
2127
  if (!aggregatedResponse.candidates[i].content) {
2065
2128
  aggregatedResponse.candidates[i].content = {
2066
2129
  role: candidate.content.role || 'user',
2067
2130
  parts: []
2068
2131
  };
2069
2132
  }
2070
- const newPart = {};
2071
2133
  for (const part of candidate.content.parts) {
2072
- if (part.text !== undefined) {
2073
- // The backend can send empty text parts. If these are sent back
2074
- // (e.g. in chat history), the backend will respond with an error.
2075
- // To prevent this, ignore empty text parts.
2076
- if (part.text === '') {
2077
- continue;
2078
- }
2079
- newPart.text = part.text;
2134
+ const newPart = { ...part };
2135
+ // The backend can send empty text parts. If these are sent back
2136
+ // (e.g. in chat history), the backend will respond with an error.
2137
+ // To prevent this, ignore empty text parts.
2138
+ if (part.text === '') {
2139
+ continue;
2080
2140
  }
2081
- if (part.functionCall) {
2082
- newPart.functionCall = part.functionCall;
2141
+ if (Object.keys(newPart).length > 0) {
2142
+ aggregatedResponse.candidates[i].content.parts.push(newPart);
2083
2143
  }
2084
- if (Object.keys(newPart).length === 0) {
2085
- throw new AIError(AIErrorCode.INVALID_CONTENT, 'Part should have at least one property, but there are none. This is likely caused ' +
2086
- 'by a malformed response from the backend.');
2087
- }
2088
- aggregatedResponse.candidates[i].content.parts.push(newPart);
2089
2144
  }
2090
2145
  }
2091
2146
  }
@@ -2791,6 +2846,25 @@ class LiveSession {
2791
2846
  this.webSocketHandler.send(JSON.stringify(message));
2792
2847
  });
2793
2848
  }
2849
+ /**
2850
+ * Sends function responses to the server.
2851
+ *
2852
+ * @param functionResponses - The function responses to send.
2853
+ * @throws If this session has been closed.
2854
+ *
2855
+ * @beta
2856
+ */
2857
+ async sendFunctionResponses(functionResponses) {
2858
+ if (this.isClosed) {
2859
+ throw new AIError(AIErrorCode.REQUEST_ERROR, 'This LiveSession has been closed and cannot be used.');
2860
+ }
2861
+ const message = {
2862
+ toolResponse: {
2863
+ functionResponses
2864
+ }
2865
+ };
2866
+ this.webSocketHandler.send(JSON.stringify(message));
2867
+ }
2794
2868
  /**
2795
2869
  * Sends a stream of {@link GenerativeContentBlob}.
2796
2870
  *
@@ -3005,7 +3079,7 @@ class LiveGenerativeModel extends AIModel {
3005
3079
  * }
3006
3080
  * ```
3007
3081
  *
3008
- * @beta
3082
+ * @public
3009
3083
  */
3010
3084
  class ImagenModel extends AIModel {
3011
3085
  /**
@@ -3041,7 +3115,7 @@ class ImagenModel extends AIModel {
3041
3115
  * returned object will have a `filteredReason` property.
3042
3116
  * If all images are filtered, the `images` array will be empty.
3043
3117
  *
3044
- * @beta
3118
+ * @public
3045
3119
  */
3046
3120
  async generateImages(prompt) {
3047
3121
  const body = createPredictRequestBody(prompt, {
@@ -3504,7 +3578,7 @@ class AnyOfSchema extends Schema {
3504
3578
  * }
3505
3579
  * ```
3506
3580
  *
3507
- * @beta
3581
+ * @public
3508
3582
  */
3509
3583
  class ImagenImageFormat {
3510
3584
  constructor() {
@@ -3516,7 +3590,7 @@ class ImagenImageFormat {
3516
3590
  * @param compressionQuality - The level of compression (a number between 0 and 100).
3517
3591
  * @returns An {@link ImagenImageFormat} object for a JPEG image.
3518
3592
  *
3519
- * @beta
3593
+ * @public
3520
3594
  */
3521
3595
  static jpeg(compressionQuality) {
3522
3596
  if (compressionQuality &&
@@ -3530,7 +3604,7 @@ class ImagenImageFormat {
3530
3604
  *
3531
3605
  * @returns An {@link ImagenImageFormat} object for a PNG image.
3532
3606
  *
3533
- * @beta
3607
+ * @public
3534
3608
  */
3535
3609
  static png() {
3536
3610
  return { mimeType: 'image/png' };
@@ -3772,9 +3846,9 @@ class AudioConversationRunner {
3772
3846
  }
3773
3847
  else {
3774
3848
  try {
3775
- const resultPart = await this.options.functionCallingHandler(message.functionCalls);
3849
+ const functionResponse = await this.options.functionCallingHandler(message.functionCalls);
3776
3850
  if (!this.isStopped) {
3777
- void this.liveSession.send([resultPart]);
3851
+ void this.liveSession.sendFunctionResponses([functionResponse]);
3778
3852
  }
3779
3853
  }
3780
3854
  catch (e) {
@@ -3994,7 +4068,7 @@ function getGenerativeModel(ai, modelParams, requestOptions) {
3994
4068
  * @throws If the `apiKey` or `projectId` fields are missing in your
3995
4069
  * Firebase config.
3996
4070
  *
3997
- * @beta
4071
+ * @public
3998
4072
  */
3999
4073
  function getImagenModel(ai, modelParams, requestOptions) {
4000
4074
  if (!modelParams.model) {
@@ -4035,5 +4109,5 @@ function registerAI() {
4035
4109
  }
4036
4110
  registerAI();
4037
4111
 
4038
- export { AIError, AIErrorCode, AIModel, AnyOfSchema, ArraySchema, Backend, BackendType, BlockReason, BooleanSchema, ChatSession, FinishReason, FunctionCallingMode, GenerativeModel, GoogleAIBackend, HarmBlockMethod, HarmBlockThreshold, HarmCategory, HarmProbability, HarmSeverity, ImagenAspectRatio, ImagenImageFormat, ImagenModel, ImagenPersonFilterLevel, ImagenSafetyFilterLevel, InferenceMode, IntegerSchema, Language, LiveGenerativeModel, LiveResponseType, LiveSession, Modality, NumberSchema, ObjectSchema, Outcome, POSSIBLE_ROLES, ResponseModality, Schema, SchemaType, StringSchema, VertexAIBackend, getAI, getGenerativeModel, getImagenModel, getLiveGenerativeModel, startAudioConversation };
4112
+ export { AIError, AIErrorCode, AIModel, AnyOfSchema, ArraySchema, Backend, BackendType, BlockReason, BooleanSchema, ChatSession, FinishReason, FunctionCallingMode, GenerativeModel, GoogleAIBackend, HarmBlockMethod, HarmBlockThreshold, HarmCategory, HarmProbability, HarmSeverity, ImagenAspectRatio, ImagenImageFormat, ImagenModel, ImagenPersonFilterLevel, ImagenSafetyFilterLevel, InferenceMode, IntegerSchema, Language, LiveGenerativeModel, LiveResponseType, LiveSession, Modality, NumberSchema, ObjectSchema, Outcome, POSSIBLE_ROLES, ResponseModality, Schema, SchemaType, StringSchema, URLRetrievalStatus, VertexAIBackend, getAI, getGenerativeModel, getImagenModel, getLiveGenerativeModel, startAudioConversation };
4039
4113
  //# sourceMappingURL=index.esm.js.map