@depup/firebase__ai 2.9.0-depup.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (119) hide show
  1. package/README.md +31 -0
  2. package/changes.json +10 -0
  3. package/dist/ai-public.d.ts +3472 -0
  4. package/dist/ai.d.ts +3712 -0
  5. package/dist/esm/index.esm.js +4765 -0
  6. package/dist/esm/index.esm.js.map +1 -0
  7. package/dist/esm/package.json +1 -0
  8. package/dist/esm/src/api.d.ts +121 -0
  9. package/dist/esm/src/backend.d.ts +98 -0
  10. package/dist/esm/src/constants.d.ts +29 -0
  11. package/dist/esm/src/errors.d.ts +35 -0
  12. package/dist/esm/src/factory-browser.d.ts +19 -0
  13. package/dist/esm/src/factory-node.d.ts +19 -0
  14. package/dist/esm/src/googleai-mappers.d.ts +73 -0
  15. package/dist/esm/src/helpers.d.ts +30 -0
  16. package/dist/esm/src/index.d.ts +13 -0
  17. package/dist/esm/src/index.node.d.ts +7 -0
  18. package/dist/esm/src/logger.d.ts +18 -0
  19. package/dist/esm/src/methods/chat-session-helpers.d.ts +18 -0
  20. package/dist/esm/src/methods/chat-session.d.ts +77 -0
  21. package/dist/esm/src/methods/chrome-adapter.d.ts +124 -0
  22. package/dist/esm/src/methods/count-tokens.d.ts +21 -0
  23. package/dist/esm/src/methods/generate-content.d.ts +25 -0
  24. package/dist/esm/src/methods/live-session-helpers.d.ts +154 -0
  25. package/dist/esm/src/methods/live-session.d.ts +154 -0
  26. package/dist/esm/src/models/ai-model.d.ts +72 -0
  27. package/dist/esm/src/models/generative-model.d.ts +56 -0
  28. package/dist/esm/src/models/imagen-model.d.ts +102 -0
  29. package/dist/esm/src/models/index.d.ts +20 -0
  30. package/dist/esm/src/models/live-generative-model.d.ts +55 -0
  31. package/dist/esm/src/models/template-generative-model.d.ts +64 -0
  32. package/dist/esm/src/models/template-imagen-model.d.ts +51 -0
  33. package/dist/esm/src/models/utils.d.ts +26 -0
  34. package/dist/esm/src/public-types.d.ts +97 -0
  35. package/dist/esm/src/requests/hybrid-helpers.d.ts +33 -0
  36. package/dist/esm/src/requests/imagen-image-format.d.ts +61 -0
  37. package/dist/esm/src/requests/request-helpers.d.ts +28 -0
  38. package/dist/esm/src/requests/request.d.ts +69 -0
  39. package/dist/esm/src/requests/response-helpers.d.ts +57 -0
  40. package/dist/esm/src/requests/schema-builder.d.ts +170 -0
  41. package/dist/esm/src/requests/stream-reader.d.ts +39 -0
  42. package/dist/esm/src/service.d.ts +35 -0
  43. package/dist/esm/src/types/chrome-adapter.d.ts +61 -0
  44. package/dist/esm/src/types/content.d.ts +266 -0
  45. package/dist/esm/src/types/enums.d.ts +419 -0
  46. package/dist/esm/src/types/error.d.ts +89 -0
  47. package/dist/esm/src/types/googleai.d.ts +57 -0
  48. package/dist/esm/src/types/imagen/index.d.ts +18 -0
  49. package/dist/esm/src/types/imagen/internal.d.ts +134 -0
  50. package/dist/esm/src/types/imagen/requests.d.ts +245 -0
  51. package/dist/esm/src/types/imagen/responses.d.ts +79 -0
  52. package/dist/esm/src/types/index.d.ts +26 -0
  53. package/dist/esm/src/types/internal.d.ts +35 -0
  54. package/dist/esm/src/types/language-model.d.ts +107 -0
  55. package/dist/esm/src/types/live-responses.d.ts +79 -0
  56. package/dist/esm/src/types/requests.d.ts +543 -0
  57. package/dist/esm/src/types/responses.d.ts +607 -0
  58. package/dist/esm/src/types/schema.d.ts +139 -0
  59. package/dist/esm/src/websocket.d.ts +67 -0
  60. package/dist/index.cjs.js +4820 -0
  61. package/dist/index.cjs.js.map +1 -0
  62. package/dist/index.node.cjs.js +4512 -0
  63. package/dist/index.node.cjs.js.map +1 -0
  64. package/dist/index.node.mjs +4457 -0
  65. package/dist/index.node.mjs.map +1 -0
  66. package/dist/src/api.d.ts +121 -0
  67. package/dist/src/backend.d.ts +98 -0
  68. package/dist/src/constants.d.ts +29 -0
  69. package/dist/src/errors.d.ts +35 -0
  70. package/dist/src/factory-browser.d.ts +19 -0
  71. package/dist/src/factory-node.d.ts +19 -0
  72. package/dist/src/googleai-mappers.d.ts +73 -0
  73. package/dist/src/helpers.d.ts +30 -0
  74. package/dist/src/index.d.ts +13 -0
  75. package/dist/src/index.node.d.ts +7 -0
  76. package/dist/src/logger.d.ts +18 -0
  77. package/dist/src/methods/chat-session-helpers.d.ts +18 -0
  78. package/dist/src/methods/chat-session.d.ts +77 -0
  79. package/dist/src/methods/chrome-adapter.d.ts +124 -0
  80. package/dist/src/methods/count-tokens.d.ts +21 -0
  81. package/dist/src/methods/generate-content.d.ts +25 -0
  82. package/dist/src/methods/live-session-helpers.d.ts +154 -0
  83. package/dist/src/methods/live-session.d.ts +154 -0
  84. package/dist/src/models/ai-model.d.ts +72 -0
  85. package/dist/src/models/generative-model.d.ts +56 -0
  86. package/dist/src/models/imagen-model.d.ts +102 -0
  87. package/dist/src/models/index.d.ts +20 -0
  88. package/dist/src/models/live-generative-model.d.ts +55 -0
  89. package/dist/src/models/template-generative-model.d.ts +64 -0
  90. package/dist/src/models/template-imagen-model.d.ts +51 -0
  91. package/dist/src/models/utils.d.ts +26 -0
  92. package/dist/src/public-types.d.ts +97 -0
  93. package/dist/src/requests/hybrid-helpers.d.ts +33 -0
  94. package/dist/src/requests/imagen-image-format.d.ts +61 -0
  95. package/dist/src/requests/request-helpers.d.ts +28 -0
  96. package/dist/src/requests/request.d.ts +69 -0
  97. package/dist/src/requests/response-helpers.d.ts +57 -0
  98. package/dist/src/requests/schema-builder.d.ts +170 -0
  99. package/dist/src/requests/stream-reader.d.ts +39 -0
  100. package/dist/src/service.d.ts +35 -0
  101. package/dist/src/tsdoc-metadata.json +11 -0
  102. package/dist/src/types/chrome-adapter.d.ts +61 -0
  103. package/dist/src/types/content.d.ts +266 -0
  104. package/dist/src/types/enums.d.ts +419 -0
  105. package/dist/src/types/error.d.ts +89 -0
  106. package/dist/src/types/googleai.d.ts +57 -0
  107. package/dist/src/types/imagen/index.d.ts +18 -0
  108. package/dist/src/types/imagen/internal.d.ts +134 -0
  109. package/dist/src/types/imagen/requests.d.ts +245 -0
  110. package/dist/src/types/imagen/responses.d.ts +79 -0
  111. package/dist/src/types/index.d.ts +26 -0
  112. package/dist/src/types/internal.d.ts +35 -0
  113. package/dist/src/types/language-model.d.ts +107 -0
  114. package/dist/src/types/live-responses.d.ts +79 -0
  115. package/dist/src/types/requests.d.ts +543 -0
  116. package/dist/src/types/responses.d.ts +607 -0
  117. package/dist/src/types/schema.d.ts +139 -0
  118. package/dist/src/websocket.d.ts +67 -0
  119. package/package.json +106 -0
@@ -0,0 +1,4457 @@
1
+ import { _isFirebaseServerApp, _getProvider, getApp, _registerComponent, registerVersion } from '@firebase/app';
2
+ import { Component } from '@firebase/component';
3
+ import { FirebaseError, Deferred, getModularInstance } from '@firebase/util';
4
+ import { Logger } from '@firebase/logger';
5
+
6
+ var name = "@firebase/ai";
7
+ var version = "2.9.0";
8
+
9
+ /**
10
+ * @license
11
+ * Copyright 2024 Google LLC
12
+ *
13
+ * Licensed under the Apache License, Version 2.0 (the "License");
14
+ * you may not use this file except in compliance with the License.
15
+ * You may obtain a copy of the License at
16
+ *
17
+ * http://www.apache.org/licenses/LICENSE-2.0
18
+ *
19
+ * Unless required by applicable law or agreed to in writing, software
20
+ * distributed under the License is distributed on an "AS IS" BASIS,
21
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
22
+ * See the License for the specific language governing permissions and
23
+ * limitations under the License.
24
+ */
25
+ const AI_TYPE = 'AI';
26
+ const DEFAULT_LOCATION = 'us-central1';
27
+ const DEFAULT_DOMAIN = 'firebasevertexai.googleapis.com';
28
+ const DEFAULT_API_VERSION = 'v1beta';
29
+ const PACKAGE_VERSION = version;
30
+ const LANGUAGE_TAG = 'gl-js';
31
+ const HYBRID_TAG = 'hybrid';
32
+ const DEFAULT_FETCH_TIMEOUT_MS = 180 * 1000;
33
+ /**
34
+ * Defines the name of the default in-cloud model to use for hybrid inference.
35
+ */
36
+ const DEFAULT_HYBRID_IN_CLOUD_MODEL = 'gemini-2.5-flash-lite';
37
+
38
+ /**
39
+ * @license
40
+ * Copyright 2024 Google LLC
41
+ *
42
+ * Licensed under the Apache License, Version 2.0 (the "License");
43
+ * you may not use this file except in compliance with the License.
44
+ * You may obtain a copy of the License at
45
+ *
46
+ * http://www.apache.org/licenses/LICENSE-2.0
47
+ *
48
+ * Unless required by applicable law or agreed to in writing, software
49
+ * distributed under the License is distributed on an "AS IS" BASIS,
50
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
51
+ * See the License for the specific language governing permissions and
52
+ * limitations under the License.
53
+ */
54
+ /**
55
+ * Error class for the Firebase AI SDK.
56
+ *
57
+ * @public
58
+ */
59
+ class AIError extends FirebaseError {
60
+ /**
61
+ * Constructs a new instance of the `AIError` class.
62
+ *
63
+ * @param code - The error code from {@link (AIErrorCode:type)}.
64
+ * @param message - A human-readable message describing the error.
65
+ * @param customErrorData - Optional error data.
66
+ */
67
+ constructor(code, message, customErrorData) {
68
+ // Match error format used by FirebaseError from ErrorFactory
69
+ const service = AI_TYPE;
70
+ const fullCode = `${service}/${code}`;
71
+ const fullMessage = `${service}: ${message} (${fullCode})`;
72
+ super(code, fullMessage);
73
+ this.code = code;
74
+ this.customErrorData = customErrorData;
75
+ // FirebaseError initializes a stack trace, but it assumes the error is created from the error
76
+ // factory. Since we break this assumption, we set the stack trace to be originating from this
77
+ // constructor.
78
+ // This is only supported in V8.
79
+ if (Error.captureStackTrace) {
80
+ // Allows us to initialize the stack trace without including the constructor itself at the
81
+ // top level of the stack trace.
82
+ Error.captureStackTrace(this, AIError);
83
+ }
84
+ // Allows instanceof AIError in ES5/ES6
85
+ // https://github.com/Microsoft/TypeScript-wiki/blob/master/Breaking-Changes.md#extending-built-ins-like-error-array-and-map-may-no-longer-work
86
+ // TODO(dlarocque): Replace this with `new.target`: https://www.typescriptlang.org/docs/handbook/release-notes/typescript-2-2.html#support-for-newtarget
87
+ // which we can now use since we no longer target ES5.
88
+ Object.setPrototypeOf(this, AIError.prototype);
89
+ // Since Error is an interface, we don't inherit toString and so we define it ourselves.
90
+ this.toString = () => fullMessage;
91
+ }
92
+ }
93
+
94
+ /**
95
+ * @license
96
+ * Copyright 2024 Google LLC
97
+ *
98
+ * Licensed under the Apache License, Version 2.0 (the "License");
99
+ * you may not use this file except in compliance with the License.
100
+ * You may obtain a copy of the License at
101
+ *
102
+ * http://www.apache.org/licenses/LICENSE-2.0
103
+ *
104
+ * Unless required by applicable law or agreed to in writing, software
105
+ * distributed under the License is distributed on an "AS IS" BASIS,
106
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
107
+ * See the License for the specific language governing permissions and
108
+ * limitations under the License.
109
+ */
110
+ /**
111
+ * Possible roles.
112
+ * @public
113
+ */
114
+ const POSSIBLE_ROLES = ['user', 'model', 'function', 'system'];
115
+ /**
116
+ * Harm categories that would cause prompts or candidates to be blocked.
117
+ * @public
118
+ */
119
+ const HarmCategory = {
120
+ HARM_CATEGORY_HATE_SPEECH: 'HARM_CATEGORY_HATE_SPEECH',
121
+ HARM_CATEGORY_SEXUALLY_EXPLICIT: 'HARM_CATEGORY_SEXUALLY_EXPLICIT',
122
+ HARM_CATEGORY_HARASSMENT: 'HARM_CATEGORY_HARASSMENT',
123
+ HARM_CATEGORY_DANGEROUS_CONTENT: 'HARM_CATEGORY_DANGEROUS_CONTENT'
124
+ };
125
+ /**
126
+ * Threshold above which a prompt or candidate will be blocked.
127
+ * @public
128
+ */
129
+ const HarmBlockThreshold = {
130
+ /**
131
+ * Content with `NEGLIGIBLE` will be allowed.
132
+ */
133
+ BLOCK_LOW_AND_ABOVE: 'BLOCK_LOW_AND_ABOVE',
134
+ /**
135
+ * Content with `NEGLIGIBLE` and `LOW` will be allowed.
136
+ */
137
+ BLOCK_MEDIUM_AND_ABOVE: 'BLOCK_MEDIUM_AND_ABOVE',
138
+ /**
139
+ * Content with `NEGLIGIBLE`, `LOW`, and `MEDIUM` will be allowed.
140
+ */
141
+ BLOCK_ONLY_HIGH: 'BLOCK_ONLY_HIGH',
142
+ /**
143
+ * All content will be allowed.
144
+ */
145
+ BLOCK_NONE: 'BLOCK_NONE',
146
+ /**
147
+ * All content will be allowed. This is the same as `BLOCK_NONE`, but the metadata corresponding
148
+ * to the {@link (HarmCategory:type)} will not be present in the response.
149
+ */
150
+ OFF: 'OFF'
151
+ };
152
+ /**
153
+ * This property is not supported in the Gemini Developer API ({@link GoogleAIBackend}).
154
+ *
155
+ * @public
156
+ */
157
+ const HarmBlockMethod = {
158
+ /**
159
+ * The harm block method uses both probability and severity scores.
160
+ */
161
+ SEVERITY: 'SEVERITY',
162
+ /**
163
+ * The harm block method uses the probability score.
164
+ */
165
+ PROBABILITY: 'PROBABILITY'
166
+ };
167
+ /**
168
+ * Probability that a prompt or candidate matches a harm category.
169
+ * @public
170
+ */
171
+ const HarmProbability = {
172
+ /**
173
+ * Content has a negligible chance of being unsafe.
174
+ */
175
+ NEGLIGIBLE: 'NEGLIGIBLE',
176
+ /**
177
+ * Content has a low chance of being unsafe.
178
+ */
179
+ LOW: 'LOW',
180
+ /**
181
+ * Content has a medium chance of being unsafe.
182
+ */
183
+ MEDIUM: 'MEDIUM',
184
+ /**
185
+ * Content has a high chance of being unsafe.
186
+ */
187
+ HIGH: 'HIGH'
188
+ };
189
+ /**
190
+ * Harm severity levels.
191
+ * @public
192
+ */
193
+ const HarmSeverity = {
194
+ /**
195
+ * Negligible level of harm severity.
196
+ */
197
+ HARM_SEVERITY_NEGLIGIBLE: 'HARM_SEVERITY_NEGLIGIBLE',
198
+ /**
199
+ * Low level of harm severity.
200
+ */
201
+ HARM_SEVERITY_LOW: 'HARM_SEVERITY_LOW',
202
+ /**
203
+ * Medium level of harm severity.
204
+ */
205
+ HARM_SEVERITY_MEDIUM: 'HARM_SEVERITY_MEDIUM',
206
+ /**
207
+ * High level of harm severity.
208
+ */
209
+ HARM_SEVERITY_HIGH: 'HARM_SEVERITY_HIGH',
210
+ /**
211
+ * Harm severity is not supported.
212
+ *
213
+ * @remarks
214
+ * The GoogleAI backend does not support `HarmSeverity`, so this value is used as a fallback.
215
+ */
216
+ HARM_SEVERITY_UNSUPPORTED: 'HARM_SEVERITY_UNSUPPORTED'
217
+ };
218
+ /**
219
+ * Reason that a prompt was blocked.
220
+ * @public
221
+ */
222
+ const BlockReason = {
223
+ /**
224
+ * Content was blocked by safety settings.
225
+ */
226
+ SAFETY: 'SAFETY',
227
+ /**
228
+ * Content was blocked, but the reason is uncategorized.
229
+ */
230
+ OTHER: 'OTHER',
231
+ /**
232
+ * Content was blocked because it contained terms from the terminology blocklist.
233
+ */
234
+ BLOCKLIST: 'BLOCKLIST',
235
+ /**
236
+ * Content was blocked due to prohibited content.
237
+ */
238
+ PROHIBITED_CONTENT: 'PROHIBITED_CONTENT'
239
+ };
240
+ /**
241
+ * Reason that a candidate finished.
242
+ * @public
243
+ */
244
+ const FinishReason = {
245
+ /**
246
+ * Natural stop point of the model or provided stop sequence.
247
+ */
248
+ STOP: 'STOP',
249
+ /**
250
+ * The maximum number of tokens as specified in the request was reached.
251
+ */
252
+ MAX_TOKENS: 'MAX_TOKENS',
253
+ /**
254
+ * The candidate content was flagged for safety reasons.
255
+ */
256
+ SAFETY: 'SAFETY',
257
+ /**
258
+ * The candidate content was flagged for recitation reasons.
259
+ */
260
+ RECITATION: 'RECITATION',
261
+ /**
262
+ * Unknown reason.
263
+ */
264
+ OTHER: 'OTHER',
265
+ /**
266
+ * The candidate content contained forbidden terms.
267
+ */
268
+ BLOCKLIST: 'BLOCKLIST',
269
+ /**
270
+ * The candidate content potentially contained prohibited content.
271
+ */
272
+ PROHIBITED_CONTENT: 'PROHIBITED_CONTENT',
273
+ /**
274
+ * The candidate content potentially contained Sensitive Personally Identifiable Information (SPII).
275
+ */
276
+ SPII: 'SPII',
277
+ /**
278
+ * The function call generated by the model was invalid.
279
+ */
280
+ MALFORMED_FUNCTION_CALL: 'MALFORMED_FUNCTION_CALL'
281
+ };
282
+ /**
283
+ * @public
284
+ */
285
+ const FunctionCallingMode = {
286
+ /**
287
+ * Default model behavior; model decides to predict either a function call
288
+ * or a natural language response.
289
+ */
290
+ AUTO: 'AUTO',
291
+ /**
292
+ * Model is constrained to always predicting a function call only.
293
+ * If `allowed_function_names` is set, the predicted function call will be
294
+ * limited to any one of `allowed_function_names`, else the predicted
295
+ * function call will be any one of the provided `function_declarations`.
296
+ */
297
+ ANY: 'ANY',
298
+ /**
299
+ * Model will not predict any function call. Model behavior is same as when
300
+ * not passing any function declarations.
301
+ */
302
+ NONE: 'NONE'
303
+ };
304
+ /**
305
+ * Content part modality.
306
+ * @public
307
+ */
308
+ const Modality = {
309
+ /**
310
+ * Unspecified modality.
311
+ */
312
+ MODALITY_UNSPECIFIED: 'MODALITY_UNSPECIFIED',
313
+ /**
314
+ * Plain text.
315
+ */
316
+ TEXT: 'TEXT',
317
+ /**
318
+ * Image.
319
+ */
320
+ IMAGE: 'IMAGE',
321
+ /**
322
+ * Video.
323
+ */
324
+ VIDEO: 'VIDEO',
325
+ /**
326
+ * Audio.
327
+ */
328
+ AUDIO: 'AUDIO',
329
+ /**
330
+ * Document (for example, PDF).
331
+ */
332
+ DOCUMENT: 'DOCUMENT'
333
+ };
334
+ /**
335
+ * Generation modalities to be returned in generation responses.
336
+ *
337
+ * @beta
338
+ */
339
+ const ResponseModality = {
340
+ /**
341
+ * Text.
342
+ * @beta
343
+ */
344
+ TEXT: 'TEXT',
345
+ /**
346
+ * Image.
347
+ * @beta
348
+ */
349
+ IMAGE: 'IMAGE',
350
+ /**
351
+ * Audio.
352
+ * @beta
353
+ */
354
+ AUDIO: 'AUDIO'
355
+ };
356
+ /**
357
+ * Determines whether inference happens on-device or in-cloud.
358
+ *
359
+ * @remarks
360
+ * <b>PREFER_ON_DEVICE:</b> Attempt to make inference calls using an
361
+ * on-device model. If on-device inference is not available, the SDK
362
+ * will fall back to using a cloud-hosted model.
363
+ * <br/>
364
+ * <b>ONLY_ON_DEVICE:</b> Only attempt to make inference calls using an
365
+ * on-device model. The SDK will not fall back to a cloud-hosted model.
366
+ * If on-device inference is not available, inference methods will throw.
367
+ * <br/>
368
+ * <b>ONLY_IN_CLOUD:</b> Only attempt to make inference calls using a
369
+ * cloud-hosted model. The SDK will not fall back to an on-device model.
370
+ * <br/>
371
+ * <b>PREFER_IN_CLOUD:</b> Attempt to make inference calls to a
372
+ * cloud-hosted model. If not available, the SDK will fall back to an
373
+ * on-device model.
374
+ *
375
+ * @beta
376
+ */
377
+ const InferenceMode = {
378
+ 'PREFER_ON_DEVICE': 'prefer_on_device',
379
+ 'ONLY_ON_DEVICE': 'only_on_device',
380
+ 'ONLY_IN_CLOUD': 'only_in_cloud',
381
+ 'PREFER_IN_CLOUD': 'prefer_in_cloud'
382
+ };
383
+ /**
384
+ * Indicates whether inference happened on-device or in-cloud.
385
+ *
386
+ * @beta
387
+ */
388
+ const InferenceSource = {
389
+ 'ON_DEVICE': 'on_device',
390
+ 'IN_CLOUD': 'in_cloud'
391
+ };
392
+ /**
393
+ * Represents the result of the code execution.
394
+ *
395
+ * @public
396
+ */
397
+ const Outcome = {
398
+ UNSPECIFIED: 'OUTCOME_UNSPECIFIED',
399
+ OK: 'OUTCOME_OK',
400
+ FAILED: 'OUTCOME_FAILED',
401
+ DEADLINE_EXCEEDED: 'OUTCOME_DEADLINE_EXCEEDED'
402
+ };
403
+ /**
404
+ * The programming language of the code.
405
+ *
406
+ * @public
407
+ */
408
+ const Language = {
409
+ UNSPECIFIED: 'LANGUAGE_UNSPECIFIED',
410
+ PYTHON: 'PYTHON'
411
+ };
412
+ /**
413
+ * A preset that controls the model's "thinking" process. Use
414
+ * `ThinkingLevel.LOW` for faster responses on less complex tasks, and
415
+ * `ThinkingLevel.HIGH` for better reasoning on more complex tasks.
416
+ *
417
+ * @public
418
+ */
419
+ const ThinkingLevel = {
420
+ MINIMAL: 'MINIMAL',
421
+ LOW: 'LOW',
422
+ MEDIUM: 'MEDIUM',
423
+ HIGH: 'HIGH'
424
+ };
425
+
426
+ /**
427
+ * @license
428
+ * Copyright 2024 Google LLC
429
+ *
430
+ * Licensed under the Apache License, Version 2.0 (the "License");
431
+ * you may not use this file except in compliance with the License.
432
+ * You may obtain a copy of the License at
433
+ *
434
+ * http://www.apache.org/licenses/LICENSE-2.0
435
+ *
436
+ * Unless required by applicable law or agreed to in writing, software
437
+ * distributed under the License is distributed on an "AS IS" BASIS,
438
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
439
+ * See the License for the specific language governing permissions and
440
+ * limitations under the License.
441
+ */
442
+ /**
443
+ * The status of a URL retrieval.
444
+ *
445
+ * @remarks
446
+ * <b>URL_RETRIEVAL_STATUS_UNSPECIFIED:</b> Unspecified retrieval status.
447
+ * <br/>
448
+ * <b>URL_RETRIEVAL_STATUS_SUCCESS:</b> The URL retrieval was successful.
449
+ * <br/>
450
+ * <b>URL_RETRIEVAL_STATUS_ERROR:</b> The URL retrieval failed.
451
+ * <br/>
452
+ * <b>URL_RETRIEVAL_STATUS_PAYWALL:</b> The URL retrieval failed because the content is behind a paywall.
453
+ * <br/>
454
+ * <b>URL_RETRIEVAL_STATUS_UNSAFE:</b> The URL retrieval failed because the content is unsafe.
455
+ * <br/>
456
+ *
457
+ * @public
458
+ */
459
+ const URLRetrievalStatus = {
460
+ /**
461
+ * Unspecified retrieval status.
462
+ */
463
+ URL_RETRIEVAL_STATUS_UNSPECIFIED: 'URL_RETRIEVAL_STATUS_UNSPECIFIED',
464
+ /**
465
+ * The URL retrieval was successful.
466
+ */
467
+ URL_RETRIEVAL_STATUS_SUCCESS: 'URL_RETRIEVAL_STATUS_SUCCESS',
468
+ /**
469
+ * The URL retrieval failed.
470
+ */
471
+ URL_RETRIEVAL_STATUS_ERROR: 'URL_RETRIEVAL_STATUS_ERROR',
472
+ /**
473
+ * The URL retrieval failed because the content is behind a paywall.
474
+ */
475
+ URL_RETRIEVAL_STATUS_PAYWALL: 'URL_RETRIEVAL_STATUS_PAYWALL',
476
+ /**
477
+ * The URL retrieval failed because the content is unsafe.
478
+ */
479
+ URL_RETRIEVAL_STATUS_UNSAFE: 'URL_RETRIEVAL_STATUS_UNSAFE'
480
+ };
481
+ /**
482
+ * The types of responses that can be returned by {@link LiveSession.receive}.
483
+ *
484
+ * @beta
485
+ */
486
+ const LiveResponseType = {
487
+ SERVER_CONTENT: 'serverContent',
488
+ TOOL_CALL: 'toolCall',
489
+ TOOL_CALL_CANCELLATION: 'toolCallCancellation',
490
+ GOING_AWAY_NOTICE: 'goingAwayNotice'
491
+ };
492
+
493
+ /**
494
+ * @license
495
+ * Copyright 2024 Google LLC
496
+ *
497
+ * Licensed under the Apache License, Version 2.0 (the "License");
498
+ * you may not use this file except in compliance with the License.
499
+ * You may obtain a copy of the License at
500
+ *
501
+ * http://www.apache.org/licenses/LICENSE-2.0
502
+ *
503
+ * Unless required by applicable law or agreed to in writing, software
504
+ * distributed under the License is distributed on an "AS IS" BASIS,
505
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
506
+ * See the License for the specific language governing permissions and
507
+ * limitations under the License.
508
+ */
509
+ /**
510
+ * Standardized error codes that {@link AIError} can have.
511
+ *
512
+ * @public
513
+ */
514
+ const AIErrorCode = {
515
+ /** A generic error occurred. */
516
+ ERROR: 'error',
517
+ /** An error occurred in a request. */
518
+ REQUEST_ERROR: 'request-error',
519
+ /** An error occurred in a response. */
520
+ RESPONSE_ERROR: 'response-error',
521
+ /** An error occurred while performing a fetch. */
522
+ FETCH_ERROR: 'fetch-error',
523
+ /** An error occurred because an operation was attempted on a closed session. */
524
+ SESSION_CLOSED: 'session-closed',
525
+ /** An error associated with a Content object. */
526
+ INVALID_CONTENT: 'invalid-content',
527
+ /** An error due to the Firebase API not being enabled in the Console. */
528
+ API_NOT_ENABLED: 'api-not-enabled',
529
+ /** An error due to invalid Schema input. */
530
+ INVALID_SCHEMA: 'invalid-schema',
531
+ /** An error occurred due to a missing Firebase API key. */
532
+ NO_API_KEY: 'no-api-key',
533
+ /** An error occurred due to a missing Firebase app ID. */
534
+ NO_APP_ID: 'no-app-id',
535
+ /** An error occurred due to a model name not being specified during initialization. */
536
+ NO_MODEL: 'no-model',
537
+ /** An error occurred due to a missing project ID. */
538
+ NO_PROJECT_ID: 'no-project-id',
539
+ /** An error occurred while parsing. */
540
+ PARSE_FAILED: 'parse-failed',
541
+ /** An error occurred due an attempt to use an unsupported feature. */
542
+ UNSUPPORTED: 'unsupported'
543
+ };
544
+
545
+ /**
546
+ * @license
547
+ * Copyright 2024 Google LLC
548
+ *
549
+ * Licensed under the Apache License, Version 2.0 (the "License");
550
+ * you may not use this file except in compliance with the License.
551
+ * You may obtain a copy of the License at
552
+ *
553
+ * http://www.apache.org/licenses/LICENSE-2.0
554
+ *
555
+ * Unless required by applicable law or agreed to in writing, software
556
+ * distributed under the License is distributed on an "AS IS" BASIS,
557
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
558
+ * See the License for the specific language governing permissions and
559
+ * limitations under the License.
560
+ */
561
+ /**
562
+ * Contains the list of OpenAPI data types
563
+ * as defined by the
564
+ * {@link https://swagger.io/docs/specification/data-models/data-types/ | OpenAPI specification}
565
+ * @public
566
+ */
567
+ const SchemaType = {
568
+ /** String type. */
569
+ STRING: 'string',
570
+ /** Number type. */
571
+ NUMBER: 'number',
572
+ /** Integer type. */
573
+ INTEGER: 'integer',
574
+ /** Boolean type. */
575
+ BOOLEAN: 'boolean',
576
+ /** Array type. */
577
+ ARRAY: 'array',
578
+ /** Object type. */
579
+ OBJECT: 'object'
580
+ };
581
+
582
+ /**
583
+ * @license
584
+ * Copyright 2025 Google LLC
585
+ *
586
+ * Licensed under the Apache License, Version 2.0 (the "License");
587
+ * you may not use this file except in compliance with the License.
588
+ * You may obtain a copy of the License at
589
+ *
590
+ * http://www.apache.org/licenses/LICENSE-2.0
591
+ *
592
+ * Unless required by applicable law or agreed to in writing, software
593
+ * distributed under the License is distributed on an "AS IS" BASIS,
594
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
595
+ * See the License for the specific language governing permissions and
596
+ * limitations under the License.
597
+ */
598
+ /**
599
+ * A filter level controlling how aggressively to filter sensitive content.
600
+ *
601
+ * Text prompts provided as inputs and images (generated or uploaded) through Imagen on Vertex AI
602
+ * are assessed against a list of safety filters, which include 'harmful categories' (for example,
603
+ * `violence`, `sexual`, `derogatory`, and `toxic`). This filter level controls how aggressively to
604
+ * filter out potentially harmful content from responses. See the {@link http://firebase.google.com/docs/vertex-ai/generate-images | documentation }
605
+ * and the {@link https://cloud.google.com/vertex-ai/generative-ai/docs/image/responsible-ai-imagen#safety-filters | Responsible AI and usage guidelines}
606
+ * for more details.
607
+ *
608
+ * @public
609
+ */
610
+ const ImagenSafetyFilterLevel = {
611
+ /**
612
+ * The most aggressive filtering level; most strict blocking.
613
+ */
614
+ BLOCK_LOW_AND_ABOVE: 'block_low_and_above',
615
+ /**
616
+ * Blocks some sensitive prompts and responses.
617
+ */
618
+ BLOCK_MEDIUM_AND_ABOVE: 'block_medium_and_above',
619
+ /**
620
+ * Blocks few sensitive prompts and responses.
621
+ */
622
+ BLOCK_ONLY_HIGH: 'block_only_high',
623
+ /**
624
+ * The least aggressive filtering level; blocks very few sensitive prompts and responses.
625
+ *
626
+ * Access to this feature is restricted and may require your case to be reviewed and approved by
627
+ * Cloud support.
628
+ */
629
+ BLOCK_NONE: 'block_none'
630
+ };
631
+ /**
632
+ * A filter level controlling whether generation of images containing people or faces is allowed.
633
+ *
634
+ * See the <a href="http://firebase.google.com/docs/vertex-ai/generate-images">personGeneration</a>
635
+ * documentation for more details.
636
+ *
637
+ * @public
638
+ */
639
+ const ImagenPersonFilterLevel = {
640
+ /**
641
+ * Disallow generation of images containing people or faces; images of people are filtered out.
642
+ */
643
+ BLOCK_ALL: 'dont_allow',
644
+ /**
645
+ * Allow generation of images containing adults only; images of children are filtered out.
646
+ *
647
+ * Generation of images containing people or faces may require your use case to be
648
+ * reviewed and approved by Cloud support; see the {@link https://cloud.google.com/vertex-ai/generative-ai/docs/image/responsible-ai-imagen#person-face-gen | Responsible AI and usage guidelines}
649
+ * for more details.
650
+ */
651
+ ALLOW_ADULT: 'allow_adult',
652
+ /**
653
+ * Allow generation of images containing adults only; images of children are filtered out.
654
+ *
655
+ * Generation of images containing people or faces may require your use case to be
656
+ * reviewed and approved by Cloud support; see the {@link https://cloud.google.com/vertex-ai/generative-ai/docs/image/responsible-ai-imagen#person-face-gen | Responsible AI and usage guidelines}
657
+ * for more details.
658
+ */
659
+ ALLOW_ALL: 'allow_all'
660
+ };
661
+ /**
662
+ * Aspect ratios for Imagen images.
663
+ *
664
+ * To specify an aspect ratio for generated images, set the `aspectRatio` property in your
665
+ * {@link ImagenGenerationConfig}.
666
+ *
667
+ * See the {@link http://firebase.google.com/docs/vertex-ai/generate-images | documentation }
668
+ * for more details and examples of the supported aspect ratios.
669
+ *
670
+ * @public
671
+ */
672
+ const ImagenAspectRatio = {
673
+ /**
674
+ * Square (1:1) aspect ratio.
675
+ */
676
+ 'SQUARE': '1:1',
677
+ /**
678
+ * Landscape (3:4) aspect ratio.
679
+ */
680
+ 'LANDSCAPE_3x4': '3:4',
681
+ /**
682
+ * Portrait (4:3) aspect ratio.
683
+ */
684
+ 'PORTRAIT_4x3': '4:3',
685
+ /**
686
+ * Landscape (16:9) aspect ratio.
687
+ */
688
+ 'LANDSCAPE_16x9': '16:9',
689
+ /**
690
+ * Portrait (9:16) aspect ratio.
691
+ */
692
+ 'PORTRAIT_9x16': '9:16'
693
+ };
694
+
695
+ /**
696
+ * @license
697
+ * Copyright 2024 Google LLC
698
+ *
699
+ * Licensed under the Apache License, Version 2.0 (the "License");
700
+ * you may not use this file except in compliance with the License.
701
+ * You may obtain a copy of the License at
702
+ *
703
+ * http://www.apache.org/licenses/LICENSE-2.0
704
+ *
705
+ * Unless required by applicable law or agreed to in writing, software
706
+ * distributed under the License is distributed on an "AS IS" BASIS,
707
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
708
+ * See the License for the specific language governing permissions and
709
+ * limitations under the License.
710
+ */
711
+ /**
712
+ * An enum-like object containing constants that represent the supported backends
713
+ * for the Firebase AI SDK.
714
+ * This determines which backend service (Vertex AI Gemini API or Gemini Developer API)
715
+ * the SDK will communicate with.
716
+ *
717
+ * These values are assigned to the `backendType` property within the specific backend
718
+ * configuration objects ({@link GoogleAIBackend} or {@link VertexAIBackend}) to identify
719
+ * which service to target.
720
+ *
721
+ * @public
722
+ */
723
+ const BackendType = {
724
+ /**
725
+ * Identifies the backend service for the Vertex AI Gemini API provided through Google Cloud.
726
+ * Use this constant when creating a {@link VertexAIBackend} configuration.
727
+ */
728
+ VERTEX_AI: 'VERTEX_AI',
729
+ /**
730
+ * Identifies the backend service for the Gemini Developer API ({@link https://ai.google/ | Google AI}).
731
+ * Use this constant when creating a {@link GoogleAIBackend} configuration.
732
+ */
733
+ GOOGLE_AI: 'GOOGLE_AI'
734
+ }; // Using 'as const' makes the string values literal types
735
+
736
+ /**
737
+ * @license
738
+ * Copyright 2025 Google LLC
739
+ *
740
+ * Licensed under the Apache License, Version 2.0 (the "License");
741
+ * you may not use this file except in compliance with the License.
742
+ * You may obtain a copy of the License at
743
+ *
744
+ * http://www.apache.org/licenses/LICENSE-2.0
745
+ *
746
+ * Unless required by applicable law or agreed to in writing, software
747
+ * distributed under the License is distributed on an "AS IS" BASIS,
748
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
749
+ * See the License for the specific language governing permissions and
750
+ * limitations under the License.
751
+ */
752
+ /**
753
+ * Abstract base class representing the configuration for an AI service backend.
754
+ * This class should not be instantiated directly. Use its subclasses; {@link GoogleAIBackend} for
755
+ * the Gemini Developer API (via {@link https://ai.google/ | Google AI}), and
756
+ * {@link VertexAIBackend} for the Vertex AI Gemini API.
757
+ *
758
+ * @public
759
+ */
760
+ class Backend {
761
+ /**
762
+ * Protected constructor for use by subclasses.
763
+ * @param type - The backend type.
764
+ */
765
+ constructor(type) {
766
+ this.backendType = type;
767
+ }
768
+ }
769
+ /**
770
+ * Configuration class for the Gemini Developer API.
771
+ *
772
+ * Use this with {@link AIOptions} when initializing the AI service via
773
+ * {@link getAI | getAI()} to specify the Gemini Developer API as the backend.
774
+ *
775
+ * @public
776
+ */
777
+ class GoogleAIBackend extends Backend {
778
+ /**
779
+ * Creates a configuration object for the Gemini Developer API backend.
780
+ */
781
+ constructor() {
782
+ super(BackendType.GOOGLE_AI);
783
+ }
784
+ /**
785
+ * @internal
786
+ */
787
+ _getModelPath(project, model) {
788
+ return `/${DEFAULT_API_VERSION}/projects/${project}/${model}`;
789
+ }
790
+ /**
791
+ * @internal
792
+ */
793
+ _getTemplatePath(project, templateId) {
794
+ return `/${DEFAULT_API_VERSION}/projects/${project}/templates/${templateId}`;
795
+ }
796
+ }
797
+ /**
798
+ * Configuration class for the Vertex AI Gemini API.
799
+ *
800
+ * Use this with {@link AIOptions} when initializing the AI service via
801
+ * {@link getAI | getAI()} to specify the Vertex AI Gemini API as the backend.
802
+ *
803
+ * @public
804
+ */
805
+ class VertexAIBackend extends Backend {
806
+ /**
807
+ * Creates a configuration object for the Vertex AI backend.
808
+ *
809
+ * @param location - The region identifier, defaulting to `us-central1`;
810
+ * see {@link https://firebase.google.com/docs/vertex-ai/locations#available-locations | Vertex AI locations}
811
+ * for a list of supported locations.
812
+ */
813
+ constructor(location = DEFAULT_LOCATION) {
814
+ super(BackendType.VERTEX_AI);
815
+ if (!location) {
816
+ this.location = DEFAULT_LOCATION;
817
+ }
818
+ else {
819
+ this.location = location;
820
+ }
821
+ }
822
+ /**
823
+ * @internal
824
+ */
825
+ _getModelPath(project, model) {
826
+ return `/${DEFAULT_API_VERSION}/projects/${project}/locations/${this.location}/${model}`;
827
+ }
828
+ /**
829
+ * @internal
830
+ */
831
+ _getTemplatePath(project, templateId) {
832
+ return `/${DEFAULT_API_VERSION}/projects/${project}/locations/${this.location}/templates/${templateId}`;
833
+ }
834
+ }
835
+
836
+ /**
837
+ * @license
838
+ * Copyright 2025 Google LLC
839
+ *
840
+ * Licensed under the Apache License, Version 2.0 (the "License");
841
+ * you may not use this file except in compliance with the License.
842
+ * You may obtain a copy of the License at
843
+ *
844
+ * http://www.apache.org/licenses/LICENSE-2.0
845
+ *
846
+ * Unless required by applicable law or agreed to in writing, software
847
+ * distributed under the License is distributed on an "AS IS" BASIS,
848
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
849
+ * See the License for the specific language governing permissions and
850
+ * limitations under the License.
851
+ */
852
+ /**
853
+ * Encodes a {@link Backend} into a string that will be used to uniquely identify {@link AI}
854
+ * instances by backend type.
855
+ *
856
+ * @internal
857
+ */
858
+ function encodeInstanceIdentifier(backend) {
859
+ if (backend instanceof GoogleAIBackend) {
860
+ return `${AI_TYPE}/googleai`;
861
+ }
862
+ else if (backend instanceof VertexAIBackend) {
863
+ return `${AI_TYPE}/vertexai/${backend.location}`;
864
+ }
865
+ else {
866
+ throw new AIError(AIErrorCode.ERROR, `Invalid backend: ${JSON.stringify(backend.backendType)}`);
867
+ }
868
+ }
869
+ /**
870
+ * Decodes an instance identifier string into a {@link Backend}.
871
+ *
872
+ * @internal
873
+ */
874
+ function decodeInstanceIdentifier(instanceIdentifier) {
875
+ const identifierParts = instanceIdentifier.split('/');
876
+ if (identifierParts[0] !== AI_TYPE) {
877
+ throw new AIError(AIErrorCode.ERROR, `Invalid instance identifier, unknown prefix '${identifierParts[0]}'`);
878
+ }
879
+ const backendType = identifierParts[1];
880
+ switch (backendType) {
881
+ case 'vertexai':
882
+ const location = identifierParts[2];
883
+ if (!location) {
884
+ throw new AIError(AIErrorCode.ERROR, `Invalid instance identifier, unknown location '${instanceIdentifier}'`);
885
+ }
886
+ return new VertexAIBackend(location);
887
+ case 'googleai':
888
+ return new GoogleAIBackend();
889
+ default:
890
+ throw new AIError(AIErrorCode.ERROR, `Invalid instance identifier string: '${instanceIdentifier}'`);
891
+ }
892
+ }
893
+
894
+ /**
895
+ * @license
896
+ * Copyright 2024 Google LLC
897
+ *
898
+ * Licensed under the Apache License, Version 2.0 (the "License");
899
+ * you may not use this file except in compliance with the License.
900
+ * You may obtain a copy of the License at
901
+ *
902
+ * http://www.apache.org/licenses/LICENSE-2.0
903
+ *
904
+ * Unless required by applicable law or agreed to in writing, software
905
+ * distributed under the License is distributed on an "AS IS" BASIS,
906
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
907
+ * See the License for the specific language governing permissions and
908
+ * limitations under the License.
909
+ */
910
+ class AIService {
911
+ constructor(app, backend, authProvider, appCheckProvider, chromeAdapterFactory) {
912
+ this.app = app;
913
+ this.backend = backend;
914
+ this.chromeAdapterFactory = chromeAdapterFactory;
915
+ const appCheck = appCheckProvider?.getImmediate({ optional: true });
916
+ const auth = authProvider?.getImmediate({ optional: true });
917
+ this.auth = auth || null;
918
+ this.appCheck = appCheck || null;
919
+ if (backend instanceof VertexAIBackend) {
920
+ this.location = backend.location;
921
+ }
922
+ else {
923
+ this.location = '';
924
+ }
925
+ }
926
+ _delete() {
927
+ return Promise.resolve();
928
+ }
929
+ set options(optionsToSet) {
930
+ this._options = optionsToSet;
931
+ }
932
+ get options() {
933
+ return this._options;
934
+ }
935
+ }
936
+
937
+ /**
938
+ * @license
939
+ * Copyright 2025 Google LLC
940
+ *
941
+ * Licensed under the Apache License, Version 2.0 (the "License");
942
+ * you may not use this file except in compliance with the License.
943
+ * You may obtain a copy of the License at
944
+ *
945
+ * http://www.apache.org/licenses/LICENSE-2.0
946
+ *
947
+ * Unless required by applicable law or agreed to in writing, software
948
+ * distributed under the License is distributed on an "AS IS" BASIS,
949
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
950
+ * See the License for the specific language governing permissions and
951
+ * limitations under the License.
952
+ */
953
+ function factory(container, { instanceIdentifier }) {
954
+ if (!instanceIdentifier) {
955
+ throw new AIError(AIErrorCode.ERROR, 'AIService instance identifier is undefined.');
956
+ }
957
+ const backend = decodeInstanceIdentifier(instanceIdentifier);
958
+ // getImmediate for FirebaseApp will always succeed
959
+ const app = container.getProvider('app').getImmediate();
960
+ const auth = container.getProvider('auth-internal');
961
+ const appCheckProvider = container.getProvider('app-check-internal');
962
+ return new AIService(app, backend, auth, appCheckProvider);
963
+ }
964
+
965
+ /**
966
+ * @license
967
+ * Copyright 2025 Google LLC
968
+ *
969
+ * Licensed under the Apache License, Version 2.0 (the "License");
970
+ * you may not use this file except in compliance with the License.
971
+ * You may obtain a copy of the License at
972
+ *
973
+ * http://www.apache.org/licenses/LICENSE-2.0
974
+ *
975
+ * Unless required by applicable law or agreed to in writing, software
976
+ * distributed under the License is distributed on an "AS IS" BASIS,
977
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
978
+ * See the License for the specific language governing permissions and
979
+ * limitations under the License.
980
+ */
981
+ /**
982
+ * Initializes an {@link ApiSettings} object from an {@link AI} instance.
983
+ *
984
+ * If this is a Server App, the {@link ApiSettings} object's `getAppCheckToken()` will resolve
985
+ * with the `FirebaseServerAppSettings.appCheckToken`, instead of requiring that an App Check
986
+ * instance is initialized.
987
+ */
988
+ function initApiSettings(ai) {
989
+ if (!ai.app?.options?.apiKey) {
990
+ throw new AIError(AIErrorCode.NO_API_KEY, `The "apiKey" field is empty in the local Firebase config. Firebase AI requires this field to contain a valid API key.`);
991
+ }
992
+ else if (!ai.app?.options?.projectId) {
993
+ throw new AIError(AIErrorCode.NO_PROJECT_ID, `The "projectId" field is empty in the local Firebase config. Firebase AI requires this field to contain a valid project ID.`);
994
+ }
995
+ else if (!ai.app?.options?.appId) {
996
+ throw new AIError(AIErrorCode.NO_APP_ID, `The "appId" field is empty in the local Firebase config. Firebase AI requires this field to contain a valid app ID.`);
997
+ }
998
+ const apiSettings = {
999
+ apiKey: ai.app.options.apiKey,
1000
+ project: ai.app.options.projectId,
1001
+ appId: ai.app.options.appId,
1002
+ automaticDataCollectionEnabled: ai.app.automaticDataCollectionEnabled,
1003
+ location: ai.location,
1004
+ backend: ai.backend
1005
+ };
1006
+ if (_isFirebaseServerApp(ai.app) && ai.app.settings.appCheckToken) {
1007
+ const token = ai.app.settings.appCheckToken;
1008
+ apiSettings.getAppCheckToken = () => {
1009
+ return Promise.resolve({ token });
1010
+ };
1011
+ }
1012
+ else if (ai.appCheck) {
1013
+ if (ai.options?.useLimitedUseAppCheckTokens) {
1014
+ apiSettings.getAppCheckToken = () => ai.appCheck.getLimitedUseToken();
1015
+ }
1016
+ else {
1017
+ apiSettings.getAppCheckToken = () => ai.appCheck.getToken();
1018
+ }
1019
+ }
1020
+ if (ai.auth) {
1021
+ apiSettings.getAuthToken = () => ai.auth.getToken();
1022
+ }
1023
+ return apiSettings;
1024
+ }
1025
+
1026
+ /**
1027
+ * @license
1028
+ * Copyright 2025 Google LLC
1029
+ *
1030
+ * Licensed under the Apache License, Version 2.0 (the "License");
1031
+ * you may not use this file except in compliance with the License.
1032
+ * You may obtain a copy of the License at
1033
+ *
1034
+ * http://www.apache.org/licenses/LICENSE-2.0
1035
+ *
1036
+ * Unless required by applicable law or agreed to in writing, software
1037
+ * distributed under the License is distributed on an "AS IS" BASIS,
1038
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
1039
+ * See the License for the specific language governing permissions and
1040
+ * limitations under the License.
1041
+ */
1042
+ /**
1043
+ * Base class for Firebase AI model APIs.
1044
+ *
1045
+ * Instances of this class are associated with a specific Firebase AI {@link Backend}
1046
+ * and provide methods for interacting with the configured generative model.
1047
+ *
1048
+ * @public
1049
+ */
1050
+ class AIModel {
1051
+ /**
1052
+ * Constructs a new instance of the {@link AIModel} class.
1053
+ *
1054
+ * This constructor should only be called from subclasses that provide
1055
+ * a model API.
1056
+ *
1057
+ * @param ai - an {@link AI} instance.
1058
+ * @param modelName - The name of the model being used. It can be in one of the following formats:
1059
+ * - `my-model` (short name, will resolve to `publishers/google/models/my-model`)
1060
+ * - `models/my-model` (will resolve to `publishers/google/models/my-model`)
1061
+ * - `publishers/my-publisher/models/my-model` (fully qualified model name)
1062
+ *
1063
+ * @throws If the `apiKey` or `projectId` fields are missing in your
1064
+ * Firebase config.
1065
+ *
1066
+ * @internal
1067
+ */
1068
+ constructor(ai, modelName) {
1069
+ this._apiSettings = initApiSettings(ai);
1070
+ this.model = AIModel.normalizeModelName(modelName, this._apiSettings.backend.backendType);
1071
+ }
1072
+ /**
1073
+ * Normalizes the given model name to a fully qualified model resource name.
1074
+ *
1075
+ * @param modelName - The model name to normalize.
1076
+ * @returns The fully qualified model resource name.
1077
+ *
1078
+ * @internal
1079
+ */
1080
+ static normalizeModelName(modelName, backendType) {
1081
+ if (backendType === BackendType.GOOGLE_AI) {
1082
+ return AIModel.normalizeGoogleAIModelName(modelName);
1083
+ }
1084
+ else {
1085
+ return AIModel.normalizeVertexAIModelName(modelName);
1086
+ }
1087
+ }
1088
+ /**
1089
+ * @internal
1090
+ */
1091
+ static normalizeGoogleAIModelName(modelName) {
1092
+ return `models/${modelName}`;
1093
+ }
1094
+ /**
1095
+ * @internal
1096
+ */
1097
+ static normalizeVertexAIModelName(modelName) {
1098
+ let model;
1099
+ if (modelName.includes('/')) {
1100
+ if (modelName.startsWith('models/')) {
1101
+ // Add 'publishers/google' if the user is only passing in 'models/model-name'.
1102
+ model = `publishers/google/${modelName}`;
1103
+ }
1104
+ else {
1105
+ // Any other custom format (e.g. tuned models) must be passed in correctly.
1106
+ model = modelName;
1107
+ }
1108
+ }
1109
+ else {
1110
+ // If path is not included, assume it's a non-tuned model.
1111
+ model = `publishers/google/models/${modelName}`;
1112
+ }
1113
+ return model;
1114
+ }
1115
+ }
1116
+
1117
+ /**
1118
+ * @license
1119
+ * Copyright 2024 Google LLC
1120
+ *
1121
+ * Licensed under the Apache License, Version 2.0 (the "License");
1122
+ * you may not use this file except in compliance with the License.
1123
+ * You may obtain a copy of the License at
1124
+ *
1125
+ * http://www.apache.org/licenses/LICENSE-2.0
1126
+ *
1127
+ * Unless required by applicable law or agreed to in writing, software
1128
+ * distributed under the License is distributed on an "AS IS" BASIS,
1129
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
1130
+ * See the License for the specific language governing permissions and
1131
+ * limitations under the License.
1132
+ */
1133
+ const logger = new Logger('@firebase/vertexai');
1134
+
1135
+ /**
1136
+ * @license
1137
+ * Copyright 2025 Google LLC
1138
+ *
1139
+ * Licensed under the Apache License, Version 2.0 (the "License");
1140
+ * you may not use this file except in compliance with the License.
1141
+ * You may obtain a copy of the License at
1142
+ *
1143
+ * http://www.apache.org/licenses/LICENSE-2.0
1144
+ *
1145
+ * Unless required by applicable law or agreed to in writing, software
1146
+ * distributed under the License is distributed on an "AS IS" BASIS,
1147
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
1148
+ * See the License for the specific language governing permissions and
1149
+ * limitations under the License.
1150
+ */
1151
+ const TIMEOUT_EXPIRED_MESSAGE = 'Timeout has expired.';
1152
+ const ABORT_ERROR_NAME = 'AbortError';
1153
+ class RequestURL {
1154
+ constructor(params) {
1155
+ this.params = params;
1156
+ }
1157
+ toString() {
1158
+ const url = new URL(this.baseUrl); // Throws if the URL is invalid
1159
+ url.pathname = this.pathname;
1160
+ url.search = this.queryParams.toString();
1161
+ return url.toString();
1162
+ }
1163
+ get pathname() {
1164
+ // We need to construct a different URL if the request is for server side prompt templates,
1165
+ // since the URL patterns are different. Server side prompt templates expect a templateId
1166
+ // instead of a model name.
1167
+ if (this.params.templateId) {
1168
+ return `${this.params.apiSettings.backend._getTemplatePath(this.params.apiSettings.project, this.params.templateId)}:${this.params.task}`;
1169
+ }
1170
+ else {
1171
+ return `${this.params.apiSettings.backend._getModelPath(this.params.apiSettings.project, this.params.model)}:${this.params.task}`;
1172
+ }
1173
+ }
1174
+ get baseUrl() {
1175
+ return (this.params.singleRequestOptions?.baseUrl ?? `https://${DEFAULT_DOMAIN}`);
1176
+ }
1177
+ get queryParams() {
1178
+ const params = new URLSearchParams();
1179
+ if (this.params.stream) {
1180
+ params.set('alt', 'sse');
1181
+ }
1182
+ return params;
1183
+ }
1184
+ }
1185
+ class WebSocketUrl {
1186
+ constructor(apiSettings) {
1187
+ this.apiSettings = apiSettings;
1188
+ }
1189
+ toString() {
1190
+ const url = new URL(`wss://${DEFAULT_DOMAIN}`);
1191
+ url.pathname = this.pathname;
1192
+ const queryParams = new URLSearchParams();
1193
+ queryParams.set('key', this.apiSettings.apiKey);
1194
+ url.search = queryParams.toString();
1195
+ return url.toString();
1196
+ }
1197
+ get pathname() {
1198
+ if (this.apiSettings.backend.backendType === BackendType.GOOGLE_AI) {
1199
+ return 'ws/google.firebase.vertexai.v1beta.GenerativeService/BidiGenerateContent';
1200
+ }
1201
+ else {
1202
+ return `ws/google.firebase.vertexai.v1beta.LlmBidiService/BidiGenerateContent/locations/${this.apiSettings.location}`;
1203
+ }
1204
+ }
1205
+ }
1206
+ /**
1207
+ * Log language and "fire/version" to x-goog-api-client
1208
+ */
1209
+ function getClientHeaders(url) {
1210
+ const loggingTags = [];
1211
+ loggingTags.push(`${LANGUAGE_TAG}/${PACKAGE_VERSION}`);
1212
+ loggingTags.push(`fire/${PACKAGE_VERSION}`);
1213
+ /**
1214
+ * No call would be made if ONLY_ON_DEVICE.
1215
+ * ONLY_IN_CLOUD does not indicate an intention to use hybrid.
1216
+ */
1217
+ if (url.params.apiSettings.inferenceMode === InferenceMode.PREFER_ON_DEVICE ||
1218
+ url.params.apiSettings.inferenceMode === InferenceMode.PREFER_IN_CLOUD) {
1219
+ // No version
1220
+ loggingTags.push(HYBRID_TAG);
1221
+ }
1222
+ return loggingTags.join(' ');
1223
+ }
1224
+ async function getHeaders(url) {
1225
+ const headers = new Headers();
1226
+ headers.append('Content-Type', 'application/json');
1227
+ headers.append('x-goog-api-client', getClientHeaders(url));
1228
+ headers.append('x-goog-api-key', url.params.apiSettings.apiKey);
1229
+ if (url.params.apiSettings.automaticDataCollectionEnabled) {
1230
+ headers.append('X-Firebase-Appid', url.params.apiSettings.appId);
1231
+ }
1232
+ if (url.params.apiSettings.getAppCheckToken) {
1233
+ const appCheckToken = await url.params.apiSettings.getAppCheckToken();
1234
+ if (appCheckToken) {
1235
+ headers.append('X-Firebase-AppCheck', appCheckToken.token);
1236
+ if (appCheckToken.error) {
1237
+ logger.warn(`Unable to obtain a valid App Check token: ${appCheckToken.error.message}`);
1238
+ }
1239
+ }
1240
+ }
1241
+ if (url.params.apiSettings.getAuthToken) {
1242
+ const authToken = await url.params.apiSettings.getAuthToken();
1243
+ if (authToken) {
1244
+ headers.append('Authorization', `Firebase ${authToken.accessToken}`);
1245
+ }
1246
+ }
1247
+ return headers;
1248
+ }
1249
+ async function makeRequest(requestUrlParams, body) {
1250
+ const url = new RequestURL(requestUrlParams);
1251
+ let response;
1252
+ const externalSignal = requestUrlParams.singleRequestOptions?.signal;
1253
+ const timeoutMillis = requestUrlParams.singleRequestOptions?.timeout != null &&
1254
+ requestUrlParams.singleRequestOptions.timeout >= 0
1255
+ ? requestUrlParams.singleRequestOptions.timeout
1256
+ : DEFAULT_FETCH_TIMEOUT_MS;
1257
+ const internalAbortController = new AbortController();
1258
+ const fetchTimeoutId = setTimeout(() => {
1259
+ internalAbortController.abort(new DOMException(TIMEOUT_EXPIRED_MESSAGE, ABORT_ERROR_NAME));
1260
+ logger.debug(`Aborting request to ${url} due to timeout (${timeoutMillis}ms)`);
1261
+ }, timeoutMillis);
1262
+ // Used to abort the fetch if either the user-defined `externalSignal` is aborted, or if the
1263
+ // internal signal (triggered by timeouts) is aborted.
1264
+ const combinedSignal = AbortSignal.any(externalSignal
1265
+ ? [externalSignal, internalAbortController.signal]
1266
+ : [internalAbortController.signal]);
1267
+ if (externalSignal && externalSignal.aborted) {
1268
+ clearTimeout(fetchTimeoutId);
1269
+ throw new DOMException(externalSignal.reason ?? 'Aborted externally before fetch', ABORT_ERROR_NAME);
1270
+ }
1271
+ try {
1272
+ const fetchOptions = {
1273
+ method: 'POST',
1274
+ headers: await getHeaders(url),
1275
+ signal: combinedSignal,
1276
+ body
1277
+ };
1278
+ response = await fetch(url.toString(), fetchOptions);
1279
+ if (!response.ok) {
1280
+ let message = '';
1281
+ let errorDetails;
1282
+ try {
1283
+ const json = await response.json();
1284
+ message = json.error.message;
1285
+ if (json.error.details) {
1286
+ message += ` ${JSON.stringify(json.error.details)}`;
1287
+ errorDetails = json.error.details;
1288
+ }
1289
+ }
1290
+ catch (e) {
1291
+ // ignored
1292
+ }
1293
+ if (response.status === 403 &&
1294
+ errorDetails &&
1295
+ errorDetails.some((detail) => detail.reason === 'SERVICE_DISABLED') &&
1296
+ errorDetails.some((detail) => detail.links?.[0]?.description.includes('Google developers console API activation'))) {
1297
+ throw new AIError(AIErrorCode.API_NOT_ENABLED, `The Firebase AI SDK requires the Firebase AI ` +
1298
+ `API ('firebasevertexai.googleapis.com') to be enabled in your ` +
1299
+ `Firebase project. Enable this API by visiting the Firebase Console ` +
1300
+ `at https://console.firebase.google.com/project/${url.params.apiSettings.project}/ailogic/ ` +
1301
+ `and clicking "Get started". If you enabled this API recently, ` +
1302
+ `wait a few minutes for the action to propagate to our systems and ` +
1303
+ `then retry.`, {
1304
+ status: response.status,
1305
+ statusText: response.statusText,
1306
+ errorDetails
1307
+ });
1308
+ }
1309
+ throw new AIError(AIErrorCode.FETCH_ERROR, `Error fetching from ${url}: [${response.status} ${response.statusText}] ${message}`, {
1310
+ status: response.status,
1311
+ statusText: response.statusText,
1312
+ errorDetails
1313
+ });
1314
+ }
1315
+ }
1316
+ catch (e) {
1317
+ let err = e;
1318
+ if (e.code !== AIErrorCode.FETCH_ERROR &&
1319
+ e.code !== AIErrorCode.API_NOT_ENABLED &&
1320
+ e instanceof Error &&
1321
+ e.name !== ABORT_ERROR_NAME) {
1322
+ err = new AIError(AIErrorCode.ERROR, `Error fetching from ${url.toString()}: ${e.message}`);
1323
+ err.stack = e.stack;
1324
+ }
1325
+ throw err;
1326
+ }
1327
+ finally {
1328
+ // When doing streaming requests, this will clear the timeout once the stream begins.
1329
+ // If a timeout it 3000ms, and the stream starts after 300ms and ends after 5000ms, the
1330
+ // timeout will be cleared after 300ms, so it won't abort the request.
1331
+ clearTimeout(fetchTimeoutId);
1332
+ }
1333
+ return response;
1334
+ }
1335
+
1336
+ /**
1337
+ * @license
1338
+ * Copyright 2024 Google LLC
1339
+ *
1340
+ * Licensed under the Apache License, Version 2.0 (the "License");
1341
+ * you may not use this file except in compliance with the License.
1342
+ * You may obtain a copy of the License at
1343
+ *
1344
+ * http://www.apache.org/licenses/LICENSE-2.0
1345
+ *
1346
+ * Unless required by applicable law or agreed to in writing, software
1347
+ * distributed under the License is distributed on an "AS IS" BASIS,
1348
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
1349
+ * See the License for the specific language governing permissions and
1350
+ * limitations under the License.
1351
+ */
1352
+ /**
1353
+ * Check that at least one candidate exists and does not have a bad
1354
+ * finish reason. Warns if multiple candidates exist.
1355
+ */
1356
+ function hasValidCandidates(response) {
1357
+ if (response.candidates && response.candidates.length > 0) {
1358
+ if (response.candidates.length > 1) {
1359
+ logger.warn(`This response had ${response.candidates.length} ` +
1360
+ `candidates. Returning text from the first candidate only. ` +
1361
+ `Access response.candidates directly to use the other candidates.`);
1362
+ }
1363
+ if (hadBadFinishReason(response.candidates[0])) {
1364
+ throw new AIError(AIErrorCode.RESPONSE_ERROR, `Response error: ${formatBlockErrorMessage(response)}. Response body stored in error.response`, {
1365
+ response
1366
+ });
1367
+ }
1368
+ return true;
1369
+ }
1370
+ else {
1371
+ return false;
1372
+ }
1373
+ }
1374
+ /**
1375
+ * Creates an EnhancedGenerateContentResponse object that has helper functions and
1376
+ * other modifications that improve usability.
1377
+ */
1378
+ function createEnhancedContentResponse(response, inferenceSource = InferenceSource.IN_CLOUD) {
1379
+ /**
1380
+ * The Vertex AI backend omits default values.
1381
+ * This causes the `index` property to be omitted from the first candidate in the
1382
+ * response, since it has index 0, and 0 is a default value.
1383
+ * See: https://github.com/firebase/firebase-js-sdk/issues/8566
1384
+ */
1385
+ if (response.candidates && !response.candidates[0].hasOwnProperty('index')) {
1386
+ response.candidates[0].index = 0;
1387
+ }
1388
+ const responseWithHelpers = addHelpers(response);
1389
+ responseWithHelpers.inferenceSource = inferenceSource;
1390
+ return responseWithHelpers;
1391
+ }
1392
+ /**
1393
+ * Adds convenience helper methods to a response object, including stream
1394
+ * chunks (as long as each chunk is a complete GenerateContentResponse JSON).
1395
+ */
1396
+ function addHelpers(response) {
1397
+ response.text = () => {
1398
+ if (hasValidCandidates(response)) {
1399
+ return getText(response, part => !part.thought);
1400
+ }
1401
+ else if (response.promptFeedback) {
1402
+ throw new AIError(AIErrorCode.RESPONSE_ERROR, `Text not available. ${formatBlockErrorMessage(response)}`, {
1403
+ response
1404
+ });
1405
+ }
1406
+ return '';
1407
+ };
1408
+ response.thoughtSummary = () => {
1409
+ if (hasValidCandidates(response)) {
1410
+ const result = getText(response, part => !!part.thought);
1411
+ return result === '' ? undefined : result;
1412
+ }
1413
+ else if (response.promptFeedback) {
1414
+ throw new AIError(AIErrorCode.RESPONSE_ERROR, `Thought summary not available. ${formatBlockErrorMessage(response)}`, {
1415
+ response
1416
+ });
1417
+ }
1418
+ return undefined;
1419
+ };
1420
+ response.inlineDataParts = () => {
1421
+ if (hasValidCandidates(response)) {
1422
+ return getInlineDataParts(response);
1423
+ }
1424
+ else if (response.promptFeedback) {
1425
+ throw new AIError(AIErrorCode.RESPONSE_ERROR, `Data not available. ${formatBlockErrorMessage(response)}`, {
1426
+ response
1427
+ });
1428
+ }
1429
+ return undefined;
1430
+ };
1431
+ response.functionCalls = () => {
1432
+ if (hasValidCandidates(response)) {
1433
+ return getFunctionCalls(response);
1434
+ }
1435
+ else if (response.promptFeedback) {
1436
+ throw new AIError(AIErrorCode.RESPONSE_ERROR, `Function call not available. ${formatBlockErrorMessage(response)}`, {
1437
+ response
1438
+ });
1439
+ }
1440
+ return undefined;
1441
+ };
1442
+ return response;
1443
+ }
1444
+ /**
1445
+ * Returns all text from the first candidate's parts, filtering by whether
1446
+ * `partFilter()` returns true.
1447
+ *
1448
+ * @param response - The `GenerateContentResponse` from which to extract text.
1449
+ * @param partFilter - Only return `Part`s for which this returns true
1450
+ */
1451
+ function getText(response, partFilter) {
1452
+ const textStrings = [];
1453
+ if (response.candidates?.[0].content?.parts) {
1454
+ for (const part of response.candidates?.[0].content?.parts) {
1455
+ if (part.text && partFilter(part)) {
1456
+ textStrings.push(part.text);
1457
+ }
1458
+ }
1459
+ }
1460
+ if (textStrings.length > 0) {
1461
+ return textStrings.join('');
1462
+ }
1463
+ else {
1464
+ return '';
1465
+ }
1466
+ }
1467
+ /**
1468
+ * Returns every {@link FunctionCall} associated with first candidate.
1469
+ */
1470
+ function getFunctionCalls(response) {
1471
+ if (!response) {
1472
+ return undefined;
1473
+ }
1474
+ const functionCalls = [];
1475
+ if (response.candidates?.[0].content?.parts) {
1476
+ for (const part of response.candidates?.[0].content?.parts) {
1477
+ if (part.functionCall) {
1478
+ functionCalls.push(part.functionCall);
1479
+ }
1480
+ }
1481
+ }
1482
+ if (functionCalls.length > 0) {
1483
+ return functionCalls;
1484
+ }
1485
+ else {
1486
+ return undefined;
1487
+ }
1488
+ }
1489
+ /**
1490
+ * Returns every {@link InlineDataPart} in the first candidate if present.
1491
+ *
1492
+ * @internal
1493
+ */
1494
+ function getInlineDataParts(response) {
1495
+ const data = [];
1496
+ if (response.candidates?.[0].content?.parts) {
1497
+ for (const part of response.candidates?.[0].content?.parts) {
1498
+ if (part.inlineData) {
1499
+ data.push(part);
1500
+ }
1501
+ }
1502
+ }
1503
+ if (data.length > 0) {
1504
+ return data;
1505
+ }
1506
+ else {
1507
+ return undefined;
1508
+ }
1509
+ }
1510
+ const badFinishReasons = [FinishReason.RECITATION, FinishReason.SAFETY];
1511
+ function hadBadFinishReason(candidate) {
1512
+ return (!!candidate.finishReason &&
1513
+ badFinishReasons.some(reason => reason === candidate.finishReason));
1514
+ }
1515
+ function formatBlockErrorMessage(response) {
1516
+ let message = '';
1517
+ if ((!response.candidates || response.candidates.length === 0) &&
1518
+ response.promptFeedback) {
1519
+ message += 'Response was blocked';
1520
+ if (response.promptFeedback?.blockReason) {
1521
+ message += ` due to ${response.promptFeedback.blockReason}`;
1522
+ }
1523
+ if (response.promptFeedback?.blockReasonMessage) {
1524
+ message += `: ${response.promptFeedback.blockReasonMessage}`;
1525
+ }
1526
+ }
1527
+ else if (response.candidates?.[0]) {
1528
+ const firstCandidate = response.candidates[0];
1529
+ if (hadBadFinishReason(firstCandidate)) {
1530
+ message += `Candidate was blocked due to ${firstCandidate.finishReason}`;
1531
+ if (firstCandidate.finishMessage) {
1532
+ message += `: ${firstCandidate.finishMessage}`;
1533
+ }
1534
+ }
1535
+ }
1536
+ return message;
1537
+ }
1538
+ /**
1539
+ * Convert a generic successful fetch response body to an Imagen response object
1540
+ * that can be returned to the user. This converts the REST APIs response format to our
1541
+ * APIs representation of a response.
1542
+ *
1543
+ * @internal
1544
+ */
1545
+ async function handlePredictResponse(response) {
1546
+ const responseJson = await response.json();
1547
+ const images = [];
1548
+ let filteredReason = undefined;
1549
+ // The backend should always send a non-empty array of predictions if the response was successful.
1550
+ if (!responseJson.predictions || responseJson.predictions?.length === 0) {
1551
+ throw new AIError(AIErrorCode.RESPONSE_ERROR, 'No predictions or filtered reason received from Vertex AI. Please report this issue with the full error details at https://github.com/firebase/firebase-js-sdk/issues.');
1552
+ }
1553
+ for (const prediction of responseJson.predictions) {
1554
+ if (prediction.raiFilteredReason) {
1555
+ filteredReason = prediction.raiFilteredReason;
1556
+ }
1557
+ else if (prediction.mimeType && prediction.bytesBase64Encoded) {
1558
+ images.push({
1559
+ mimeType: prediction.mimeType,
1560
+ bytesBase64Encoded: prediction.bytesBase64Encoded
1561
+ });
1562
+ }
1563
+ else if (prediction.mimeType && prediction.gcsUri) {
1564
+ images.push({
1565
+ mimeType: prediction.mimeType,
1566
+ gcsURI: prediction.gcsUri
1567
+ });
1568
+ }
1569
+ else if (prediction.safetyAttributes) ;
1570
+ else {
1571
+ throw new AIError(AIErrorCode.RESPONSE_ERROR, `Unexpected element in 'predictions' array in response: '${JSON.stringify(prediction)}'`);
1572
+ }
1573
+ }
1574
+ return { images, filteredReason };
1575
+ }
1576
+
1577
+ /**
1578
+ * @license
1579
+ * Copyright 2025 Google LLC
1580
+ *
1581
+ * Licensed under the Apache License, Version 2.0 (the "License");
1582
+ * you may not use this file except in compliance with the License.
1583
+ * You may obtain a copy of the License at
1584
+ *
1585
+ * http://www.apache.org/licenses/LICENSE-2.0
1586
+ *
1587
+ * Unless required by applicable law or agreed to in writing, software
1588
+ * distributed under the License is distributed on an "AS IS" BASIS,
1589
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
1590
+ * See the License for the specific language governing permissions and
1591
+ * limitations under the License.
1592
+ */
1593
+ /**
1594
+ * This SDK supports both the Vertex AI Gemini API and the Gemini Developer API (using Google AI).
1595
+ * The public API prioritizes the format used by the Vertex AI Gemini API.
1596
+ * We avoid having two sets of types by translating requests and responses between the two API formats.
1597
+ * This translation allows developers to switch between the Vertex AI Gemini API and the Gemini Developer API
1598
+ * with minimal code changes.
1599
+ *
1600
+ * In here are functions that map requests and responses between the two API formats.
1601
+ * Requests in the Vertex AI format are mapped to the Google AI format before being sent.
1602
+ * Responses from the Google AI backend are mapped back to the Vertex AI format before being returned to the user.
1603
+ */
1604
+ /**
1605
+ * Maps a Vertex AI {@link GenerateContentRequest} to a format that can be sent to Google AI.
1606
+ *
1607
+ * @param generateContentRequest The {@link GenerateContentRequest} to map.
1608
+ * @returns A {@link GenerateContentResponse} that conforms to the Google AI format.
1609
+ *
1610
+ * @throws If the request contains properties that are unsupported by Google AI.
1611
+ *
1612
+ * @internal
1613
+ */
1614
+ function mapGenerateContentRequest(generateContentRequest) {
1615
+ generateContentRequest.safetySettings?.forEach(safetySetting => {
1616
+ if (safetySetting.method) {
1617
+ throw new AIError(AIErrorCode.UNSUPPORTED, 'SafetySetting.method is not supported in the the Gemini Developer API. Please remove this property.');
1618
+ }
1619
+ });
1620
+ if (generateContentRequest.generationConfig?.topK) {
1621
+ const roundedTopK = Math.round(generateContentRequest.generationConfig.topK);
1622
+ if (roundedTopK !== generateContentRequest.generationConfig.topK) {
1623
+ logger.warn('topK in GenerationConfig has been rounded to the nearest integer to match the format for requests to the Gemini Developer API.');
1624
+ generateContentRequest.generationConfig.topK = roundedTopK;
1625
+ }
1626
+ }
1627
+ return generateContentRequest;
1628
+ }
1629
+ /**
1630
+ * Maps a {@link GenerateContentResponse} from Google AI to the format of the
1631
+ * {@link GenerateContentResponse} that we get from VertexAI that is exposed in the public API.
1632
+ *
1633
+ * @param googleAIResponse The {@link GenerateContentResponse} from Google AI.
1634
+ * @returns A {@link GenerateContentResponse} that conforms to the public API's format.
1635
+ *
1636
+ * @internal
1637
+ */
1638
+ function mapGenerateContentResponse(googleAIResponse) {
1639
+ const generateContentResponse = {
1640
+ candidates: googleAIResponse.candidates
1641
+ ? mapGenerateContentCandidates(googleAIResponse.candidates)
1642
+ : undefined,
1643
+ prompt: googleAIResponse.promptFeedback
1644
+ ? mapPromptFeedback(googleAIResponse.promptFeedback)
1645
+ : undefined,
1646
+ usageMetadata: googleAIResponse.usageMetadata
1647
+ };
1648
+ return generateContentResponse;
1649
+ }
1650
+ /**
1651
+ * Maps a Vertex AI {@link CountTokensRequest} to a format that can be sent to Google AI.
1652
+ *
1653
+ * @param countTokensRequest The {@link CountTokensRequest} to map.
1654
+ * @param model The model to count tokens with.
1655
+ * @returns A {@link CountTokensRequest} that conforms to the Google AI format.
1656
+ *
1657
+ * @internal
1658
+ */
1659
+ function mapCountTokensRequest(countTokensRequest, model) {
1660
+ const mappedCountTokensRequest = {
1661
+ generateContentRequest: {
1662
+ model,
1663
+ ...countTokensRequest
1664
+ }
1665
+ };
1666
+ return mappedCountTokensRequest;
1667
+ }
1668
+ /**
1669
+ * Maps a Google AI {@link GoogleAIGenerateContentCandidate} to a format that conforms
1670
+ * to the Vertex AI API format.
1671
+ *
1672
+ * @param candidates The {@link GoogleAIGenerateContentCandidate} to map.
1673
+ * @returns A {@link GenerateContentCandidate} that conforms to the Vertex AI format.
1674
+ *
1675
+ * @throws If any {@link Part} in the candidates has a `videoMetadata` property.
1676
+ *
1677
+ * @internal
1678
+ */
1679
+ function mapGenerateContentCandidates(candidates) {
1680
+ const mappedCandidates = [];
1681
+ let mappedSafetyRatings;
1682
+ if (mappedCandidates) {
1683
+ candidates.forEach(candidate => {
1684
+ // Map citationSources to citations.
1685
+ let citationMetadata;
1686
+ if (candidate.citationMetadata) {
1687
+ citationMetadata = {
1688
+ citations: candidate.citationMetadata.citationSources
1689
+ };
1690
+ }
1691
+ // Assign missing candidate SafetyRatings properties to their defaults if undefined.
1692
+ if (candidate.safetyRatings) {
1693
+ mappedSafetyRatings = candidate.safetyRatings.map(safetyRating => {
1694
+ return {
1695
+ ...safetyRating,
1696
+ severity: safetyRating.severity ?? HarmSeverity.HARM_SEVERITY_UNSUPPORTED,
1697
+ probabilityScore: safetyRating.probabilityScore ?? 0,
1698
+ severityScore: safetyRating.severityScore ?? 0
1699
+ };
1700
+ });
1701
+ }
1702
+ // videoMetadata is not supported.
1703
+ // Throw early since developers may send a long video as input and only expect to pay
1704
+ // for inference on a small portion of the video.
1705
+ if (candidate.content?.parts?.some(part => part?.videoMetadata)) {
1706
+ throw new AIError(AIErrorCode.UNSUPPORTED, 'Part.videoMetadata is not supported in the Gemini Developer API. Please remove this property.');
1707
+ }
1708
+ const mappedCandidate = {
1709
+ index: candidate.index,
1710
+ content: candidate.content,
1711
+ finishReason: candidate.finishReason,
1712
+ finishMessage: candidate.finishMessage,
1713
+ safetyRatings: mappedSafetyRatings,
1714
+ citationMetadata,
1715
+ groundingMetadata: candidate.groundingMetadata,
1716
+ urlContextMetadata: candidate.urlContextMetadata
1717
+ };
1718
+ mappedCandidates.push(mappedCandidate);
1719
+ });
1720
+ }
1721
+ return mappedCandidates;
1722
+ }
1723
+ function mapPromptFeedback(promptFeedback) {
1724
+ // Assign missing SafetyRating properties to their defaults if undefined.
1725
+ const mappedSafetyRatings = [];
1726
+ promptFeedback.safetyRatings.forEach(safetyRating => {
1727
+ mappedSafetyRatings.push({
1728
+ category: safetyRating.category,
1729
+ probability: safetyRating.probability,
1730
+ severity: safetyRating.severity ?? HarmSeverity.HARM_SEVERITY_UNSUPPORTED,
1731
+ probabilityScore: safetyRating.probabilityScore ?? 0,
1732
+ severityScore: safetyRating.severityScore ?? 0,
1733
+ blocked: safetyRating.blocked
1734
+ });
1735
+ });
1736
+ const mappedPromptFeedback = {
1737
+ blockReason: promptFeedback.blockReason,
1738
+ safetyRatings: mappedSafetyRatings,
1739
+ blockReasonMessage: promptFeedback.blockReasonMessage
1740
+ };
1741
+ return mappedPromptFeedback;
1742
+ }
1743
+
1744
+ /**
1745
+ * @license
1746
+ * Copyright 2024 Google LLC
1747
+ *
1748
+ * Licensed under the Apache License, Version 2.0 (the "License");
1749
+ * you may not use this file except in compliance with the License.
1750
+ * You may obtain a copy of the License at
1751
+ *
1752
+ * http://www.apache.org/licenses/LICENSE-2.0
1753
+ *
1754
+ * Unless required by applicable law or agreed to in writing, software
1755
+ * distributed under the License is distributed on an "AS IS" BASIS,
1756
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
1757
+ * See the License for the specific language governing permissions and
1758
+ * limitations under the License.
1759
+ */
1760
+ const responseLineRE = /^data\: (.*)(?:\n\n|\r\r|\r\n\r\n)/;
1761
+ /**
1762
+ * Process a response.body stream from the backend and return an
1763
+ * iterator that provides one complete GenerateContentResponse at a time
1764
+ * and a promise that resolves with a single aggregated
1765
+ * GenerateContentResponse.
1766
+ *
1767
+ * @param response - Response from a fetch call
1768
+ */
1769
+ async function processStream(response, apiSettings, inferenceSource) {
1770
+ const inputStream = response.body.pipeThrough(new TextDecoderStream('utf8', { fatal: true }));
1771
+ const responseStream = getResponseStream(inputStream);
1772
+ // We split the stream so the user can iterate over partial results (stream1)
1773
+ // while we aggregate the full result for history/final response (stream2).
1774
+ const [stream1, stream2] = responseStream.tee();
1775
+ const { response: internalResponse, firstValue } = await processStreamInternal(stream2, apiSettings, inferenceSource);
1776
+ return {
1777
+ stream: generateResponseSequence(stream1, apiSettings, inferenceSource),
1778
+ response: internalResponse,
1779
+ firstValue
1780
+ };
1781
+ }
1782
+ /**
1783
+ * Consumes streams teed from the input stream for internal needs.
1784
+ * The streams need to be teed because each stream can only be consumed
1785
+ * by one reader.
1786
+ *
1787
+ * "streamForPeek"
1788
+ * This tee is used to peek at the first value for relevant information
1789
+ * that we need to evaluate before returning the stream handle to the
1790
+ * client. For example, we need to check if the response is a function
1791
+ * call that may need to be handled by automatic function calling before
1792
+ * returning a response to the client.
1793
+ *
1794
+ * "streamForAggregation"
1795
+ * We iterate through this tee independently from the user and aggregate
1796
+ * it into a single response when the stream is complete. We need this
1797
+ * aggregate object to add to chat history when using ChatSession. It's
1798
+ * also provided to the user if they want it.
1799
+ */
1800
+ async function processStreamInternal(stream, apiSettings, inferenceSource) {
1801
+ const [streamForPeek, streamForAggregation] = stream.tee();
1802
+ const reader = streamForPeek.getReader();
1803
+ const { value } = await reader.read();
1804
+ return {
1805
+ firstValue: value,
1806
+ response: getResponsePromise(streamForAggregation, apiSettings, inferenceSource)
1807
+ };
1808
+ }
1809
+ async function getResponsePromise(stream, apiSettings, inferenceSource) {
1810
+ const allResponses = [];
1811
+ const reader = stream.getReader();
1812
+ while (true) {
1813
+ const { done, value } = await reader.read();
1814
+ if (done) {
1815
+ let generateContentResponse = aggregateResponses(allResponses);
1816
+ if (apiSettings.backend.backendType === BackendType.GOOGLE_AI) {
1817
+ generateContentResponse = mapGenerateContentResponse(generateContentResponse);
1818
+ }
1819
+ return createEnhancedContentResponse(generateContentResponse, inferenceSource);
1820
+ }
1821
+ allResponses.push(value);
1822
+ }
1823
+ }
1824
+ async function* generateResponseSequence(stream, apiSettings, inferenceSource) {
1825
+ const reader = stream.getReader();
1826
+ while (true) {
1827
+ const { value, done } = await reader.read();
1828
+ if (done) {
1829
+ break;
1830
+ }
1831
+ let enhancedResponse;
1832
+ if (apiSettings.backend.backendType === BackendType.GOOGLE_AI) {
1833
+ enhancedResponse = createEnhancedContentResponse(mapGenerateContentResponse(value), inferenceSource);
1834
+ }
1835
+ else {
1836
+ enhancedResponse = createEnhancedContentResponse(value, inferenceSource);
1837
+ }
1838
+ const firstCandidate = enhancedResponse.candidates?.[0];
1839
+ if (!firstCandidate?.content?.parts &&
1840
+ !firstCandidate?.finishReason &&
1841
+ !firstCandidate?.citationMetadata &&
1842
+ !firstCandidate?.urlContextMetadata) {
1843
+ continue;
1844
+ }
1845
+ yield enhancedResponse;
1846
+ }
1847
+ }
1848
+ /**
1849
+ * Reads a raw string stream, buffers incomplete chunks, and yields parsed JSON objects.
1850
+ */
1851
+ function getResponseStream(inputStream) {
1852
+ const reader = inputStream.getReader();
1853
+ const stream = new ReadableStream({
1854
+ start(controller) {
1855
+ let currentText = '';
1856
+ return pump();
1857
+ function pump() {
1858
+ return reader.read().then(({ value, done }) => {
1859
+ if (done) {
1860
+ if (currentText.trim()) {
1861
+ controller.error(new AIError(AIErrorCode.PARSE_FAILED, 'Failed to parse stream'));
1862
+ return;
1863
+ }
1864
+ controller.close();
1865
+ return;
1866
+ }
1867
+ currentText += value;
1868
+ // SSE events may span chunk boundaries, so we buffer until we match
1869
+ // the full "data: {json}\n\n" pattern.
1870
+ let match = currentText.match(responseLineRE);
1871
+ let parsedResponse;
1872
+ while (match) {
1873
+ try {
1874
+ parsedResponse = JSON.parse(match[1]);
1875
+ }
1876
+ catch (e) {
1877
+ controller.error(new AIError(AIErrorCode.PARSE_FAILED, `Error parsing JSON response: "${match[1]}`));
1878
+ return;
1879
+ }
1880
+ controller.enqueue(parsedResponse);
1881
+ currentText = currentText.substring(match[0].length);
1882
+ match = currentText.match(responseLineRE);
1883
+ }
1884
+ return pump();
1885
+ });
1886
+ }
1887
+ }
1888
+ });
1889
+ return stream;
1890
+ }
1891
+ /**
1892
+ * Aggregates an array of `GenerateContentResponse`s into a single
1893
+ * GenerateContentResponse.
1894
+ */
1895
+ function aggregateResponses(responses) {
1896
+ const lastResponse = responses[responses.length - 1];
1897
+ const aggregatedResponse = {
1898
+ promptFeedback: lastResponse?.promptFeedback
1899
+ };
1900
+ for (const response of responses) {
1901
+ if (response.candidates) {
1902
+ for (const candidate of response.candidates) {
1903
+ // Use 0 if index is undefined (protobuf default value omission).
1904
+ const i = candidate.index || 0;
1905
+ if (!aggregatedResponse.candidates) {
1906
+ aggregatedResponse.candidates = [];
1907
+ }
1908
+ if (!aggregatedResponse.candidates[i]) {
1909
+ aggregatedResponse.candidates[i] = {
1910
+ index: candidate.index
1911
+ };
1912
+ }
1913
+ // Overwrite with the latest metadata
1914
+ aggregatedResponse.candidates[i].citationMetadata =
1915
+ candidate.citationMetadata;
1916
+ aggregatedResponse.candidates[i].finishReason = candidate.finishReason;
1917
+ aggregatedResponse.candidates[i].finishMessage =
1918
+ candidate.finishMessage;
1919
+ aggregatedResponse.candidates[i].safetyRatings =
1920
+ candidate.safetyRatings;
1921
+ aggregatedResponse.candidates[i].groundingMetadata =
1922
+ candidate.groundingMetadata;
1923
+ // The urlContextMetadata object is defined in the first chunk of the response stream.
1924
+ // In all subsequent chunks, the urlContextMetadata object will be undefined. We need to
1925
+ // make sure that we don't overwrite the first value urlContextMetadata object with undefined.
1926
+ // FIXME: What happens if we receive a second, valid urlContextMetadata object?
1927
+ const urlContextMetadata = candidate.urlContextMetadata;
1928
+ if (typeof urlContextMetadata === 'object' &&
1929
+ urlContextMetadata !== null &&
1930
+ Object.keys(urlContextMetadata).length > 0) {
1931
+ aggregatedResponse.candidates[i].urlContextMetadata =
1932
+ urlContextMetadata;
1933
+ }
1934
+ if (candidate.content) {
1935
+ if (!candidate.content.parts) {
1936
+ continue;
1937
+ }
1938
+ if (!aggregatedResponse.candidates[i].content) {
1939
+ aggregatedResponse.candidates[i].content = {
1940
+ role: candidate.content.role || 'user',
1941
+ parts: []
1942
+ };
1943
+ }
1944
+ for (const part of candidate.content.parts) {
1945
+ const newPart = { ...part };
1946
+ // The backend can send empty text parts. If these are sent back
1947
+ // (e.g. in chat history), the backend will respond with an error.
1948
+ // To prevent this, ignore empty text parts.
1949
+ if (part.text === '') {
1950
+ continue;
1951
+ }
1952
+ if (Object.keys(newPart).length > 0) {
1953
+ aggregatedResponse.candidates[i].content.parts.push(newPart);
1954
+ }
1955
+ }
1956
+ }
1957
+ }
1958
+ }
1959
+ }
1960
+ return aggregatedResponse;
1961
+ }
1962
+
1963
+ /**
1964
+ * @license
1965
+ * Copyright 2025 Google LLC
1966
+ *
1967
+ * Licensed under the Apache License, Version 2.0 (the "License");
1968
+ * you may not use this file except in compliance with the License.
1969
+ * You may obtain a copy of the License at
1970
+ *
1971
+ * http://www.apache.org/licenses/LICENSE-2.0
1972
+ *
1973
+ * Unless required by applicable law or agreed to in writing, software
1974
+ * distributed under the License is distributed on an "AS IS" BASIS,
1975
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
1976
+ * See the License for the specific language governing permissions and
1977
+ * limitations under the License.
1978
+ */
1979
+ const errorsCausingFallback = [
1980
+ // most network errors
1981
+ AIErrorCode.FETCH_ERROR,
1982
+ // fallback code for all other errors in makeRequest
1983
+ AIErrorCode.ERROR,
1984
+ // error due to API not being enabled in project
1985
+ AIErrorCode.API_NOT_ENABLED
1986
+ ];
1987
+ /**
1988
+ * Dispatches a request to the appropriate backend (on-device or in-cloud)
1989
+ * based on the inference mode.
1990
+ *
1991
+ * @param request - The request to be sent.
1992
+ * @param chromeAdapter - The on-device model adapter.
1993
+ * @param onDeviceCall - The function to call for on-device inference.
1994
+ * @param inCloudCall - The function to call for in-cloud inference.
1995
+ * @returns The response from the backend.
1996
+ */
1997
+ async function callCloudOrDevice(request, chromeAdapter, onDeviceCall, inCloudCall) {
1998
+ if (!chromeAdapter) {
1999
+ return {
2000
+ response: await inCloudCall(),
2001
+ inferenceSource: InferenceSource.IN_CLOUD
2002
+ };
2003
+ }
2004
+ switch (chromeAdapter.mode) {
2005
+ case InferenceMode.ONLY_ON_DEVICE:
2006
+ if (await chromeAdapter.isAvailable(request)) {
2007
+ return {
2008
+ response: await onDeviceCall(),
2009
+ inferenceSource: InferenceSource.ON_DEVICE
2010
+ };
2011
+ }
2012
+ throw new AIError(AIErrorCode.UNSUPPORTED, 'Inference mode is ONLY_ON_DEVICE, but an on-device model is not available.');
2013
+ case InferenceMode.ONLY_IN_CLOUD:
2014
+ return {
2015
+ response: await inCloudCall(),
2016
+ inferenceSource: InferenceSource.IN_CLOUD
2017
+ };
2018
+ case InferenceMode.PREFER_IN_CLOUD:
2019
+ try {
2020
+ return {
2021
+ response: await inCloudCall(),
2022
+ inferenceSource: InferenceSource.IN_CLOUD
2023
+ };
2024
+ }
2025
+ catch (e) {
2026
+ if (e instanceof AIError && errorsCausingFallback.includes(e.code)) {
2027
+ return {
2028
+ response: await onDeviceCall(),
2029
+ inferenceSource: InferenceSource.ON_DEVICE
2030
+ };
2031
+ }
2032
+ throw e;
2033
+ }
2034
+ case InferenceMode.PREFER_ON_DEVICE:
2035
+ if (await chromeAdapter.isAvailable(request)) {
2036
+ return {
2037
+ response: await onDeviceCall(),
2038
+ inferenceSource: InferenceSource.ON_DEVICE
2039
+ };
2040
+ }
2041
+ return {
2042
+ response: await inCloudCall(),
2043
+ inferenceSource: InferenceSource.IN_CLOUD
2044
+ };
2045
+ default:
2046
+ throw new AIError(AIErrorCode.ERROR, `Unexpected infererence mode: ${chromeAdapter.mode}`);
2047
+ }
2048
+ }
2049
+
2050
+ /**
2051
+ * @license
2052
+ * Copyright 2024 Google LLC
2053
+ *
2054
+ * Licensed under the Apache License, Version 2.0 (the "License");
2055
+ * you may not use this file except in compliance with the License.
2056
+ * You may obtain a copy of the License at
2057
+ *
2058
+ * http://www.apache.org/licenses/LICENSE-2.0
2059
+ *
2060
+ * Unless required by applicable law or agreed to in writing, software
2061
+ * distributed under the License is distributed on an "AS IS" BASIS,
2062
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
2063
+ * See the License for the specific language governing permissions and
2064
+ * limitations under the License.
2065
+ */
2066
+ async function generateContentStreamOnCloud(apiSettings, model, params, singleRequestOptions) {
2067
+ if (apiSettings.backend.backendType === BackendType.GOOGLE_AI) {
2068
+ params = mapGenerateContentRequest(params);
2069
+ }
2070
+ return makeRequest({
2071
+ task: "streamGenerateContent" /* Task.STREAM_GENERATE_CONTENT */,
2072
+ model,
2073
+ apiSettings,
2074
+ stream: true,
2075
+ singleRequestOptions
2076
+ }, JSON.stringify(params));
2077
+ }
2078
+ async function generateContentStream(apiSettings, model, params, chromeAdapter, singleRequestOptions) {
2079
+ const callResult = await callCloudOrDevice(params, chromeAdapter, () => chromeAdapter.generateContentStream(params), () => generateContentStreamOnCloud(apiSettings, model, params, singleRequestOptions));
2080
+ return processStream(callResult.response, apiSettings, callResult.inferenceSource);
2081
+ }
2082
+ async function generateContentOnCloud(apiSettings, model, params, singleRequestOptions) {
2083
+ if (apiSettings.backend.backendType === BackendType.GOOGLE_AI) {
2084
+ params = mapGenerateContentRequest(params);
2085
+ }
2086
+ return makeRequest({
2087
+ model,
2088
+ task: "generateContent" /* Task.GENERATE_CONTENT */,
2089
+ apiSettings,
2090
+ stream: false,
2091
+ singleRequestOptions
2092
+ }, JSON.stringify(params));
2093
+ }
2094
+ async function templateGenerateContent(apiSettings, templateId, templateParams, singleRequestOptions) {
2095
+ const response = await makeRequest({
2096
+ task: "templateGenerateContent" /* ServerPromptTemplateTask.TEMPLATE_GENERATE_CONTENT */,
2097
+ templateId,
2098
+ apiSettings,
2099
+ stream: false,
2100
+ singleRequestOptions
2101
+ }, JSON.stringify(templateParams));
2102
+ const generateContentResponse = await processGenerateContentResponse(response, apiSettings);
2103
+ const enhancedResponse = createEnhancedContentResponse(generateContentResponse);
2104
+ return {
2105
+ response: enhancedResponse
2106
+ };
2107
+ }
2108
+ async function templateGenerateContentStream(apiSettings, templateId, templateParams, singleRequestOptions) {
2109
+ const response = await makeRequest({
2110
+ task: "templateStreamGenerateContent" /* ServerPromptTemplateTask.TEMPLATE_STREAM_GENERATE_CONTENT */,
2111
+ templateId,
2112
+ apiSettings,
2113
+ stream: true,
2114
+ singleRequestOptions
2115
+ }, JSON.stringify(templateParams));
2116
+ return processStream(response, apiSettings);
2117
+ }
2118
+ async function generateContent(apiSettings, model, params, chromeAdapter, singleRequestOptions) {
2119
+ const callResult = await callCloudOrDevice(params, chromeAdapter, () => chromeAdapter.generateContent(params), () => generateContentOnCloud(apiSettings, model, params, singleRequestOptions));
2120
+ const generateContentResponse = await processGenerateContentResponse(callResult.response, apiSettings);
2121
+ const enhancedResponse = createEnhancedContentResponse(generateContentResponse, callResult.inferenceSource);
2122
+ return {
2123
+ response: enhancedResponse
2124
+ };
2125
+ }
2126
+ async function processGenerateContentResponse(response, apiSettings) {
2127
+ const responseJson = await response.json();
2128
+ if (apiSettings.backend.backendType === BackendType.GOOGLE_AI) {
2129
+ return mapGenerateContentResponse(responseJson);
2130
+ }
2131
+ else {
2132
+ return responseJson;
2133
+ }
2134
+ }
2135
+
2136
+ /**
2137
+ * @license
2138
+ * Copyright 2024 Google LLC
2139
+ *
2140
+ * Licensed under the Apache License, Version 2.0 (the "License");
2141
+ * you may not use this file except in compliance with the License.
2142
+ * You may obtain a copy of the License at
2143
+ *
2144
+ * http://www.apache.org/licenses/LICENSE-2.0
2145
+ *
2146
+ * Unless required by applicable law or agreed to in writing, software
2147
+ * distributed under the License is distributed on an "AS IS" BASIS,
2148
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
2149
+ * See the License for the specific language governing permissions and
2150
+ * limitations under the License.
2151
+ */
2152
+ function formatSystemInstruction(input) {
2153
+ // null or undefined
2154
+ if (input == null) {
2155
+ return undefined;
2156
+ }
2157
+ else if (typeof input === 'string') {
2158
+ return { role: 'system', parts: [{ text: input }] };
2159
+ }
2160
+ else if (input.text) {
2161
+ return { role: 'system', parts: [input] };
2162
+ }
2163
+ else if (input.parts) {
2164
+ if (!input.role) {
2165
+ return { role: 'system', parts: input.parts };
2166
+ }
2167
+ else {
2168
+ return input;
2169
+ }
2170
+ }
2171
+ }
2172
+ function formatNewContent(request) {
2173
+ let newParts = [];
2174
+ if (typeof request === 'string') {
2175
+ newParts = [{ text: request }];
2176
+ }
2177
+ else {
2178
+ for (const partOrString of request) {
2179
+ if (typeof partOrString === 'string') {
2180
+ newParts.push({ text: partOrString });
2181
+ }
2182
+ else {
2183
+ newParts.push(partOrString);
2184
+ }
2185
+ }
2186
+ }
2187
+ return assignRoleToPartsAndValidateSendMessageRequest(newParts);
2188
+ }
2189
+ /**
2190
+ * When multiple Part types (i.e. FunctionResponsePart and TextPart) are
2191
+ * passed in a single Part array, we may need to assign different roles to each
2192
+ * part. Currently only FunctionResponsePart requires a role other than 'user'.
2193
+ * @private
2194
+ * @param parts Array of parts to pass to the model
2195
+ * @returns Array of content items
2196
+ */
2197
+ function assignRoleToPartsAndValidateSendMessageRequest(parts) {
2198
+ const userContent = { role: 'user', parts: [] };
2199
+ const functionContent = { role: 'function', parts: [] };
2200
+ let hasUserContent = false;
2201
+ let hasFunctionContent = false;
2202
+ for (const part of parts) {
2203
+ if ('functionResponse' in part) {
2204
+ functionContent.parts.push(part);
2205
+ hasFunctionContent = true;
2206
+ }
2207
+ else {
2208
+ userContent.parts.push(part);
2209
+ hasUserContent = true;
2210
+ }
2211
+ }
2212
+ if (hasUserContent && hasFunctionContent) {
2213
+ throw new AIError(AIErrorCode.INVALID_CONTENT, 'Within a single message, FunctionResponse cannot be mixed with other type of Part in the request for sending chat message.');
2214
+ }
2215
+ if (!hasUserContent && !hasFunctionContent) {
2216
+ throw new AIError(AIErrorCode.INVALID_CONTENT, 'No Content is provided for sending chat message.');
2217
+ }
2218
+ if (hasUserContent) {
2219
+ return userContent;
2220
+ }
2221
+ return functionContent;
2222
+ }
2223
+ function formatGenerateContentInput(params) {
2224
+ let formattedRequest;
2225
+ if (params.contents) {
2226
+ formattedRequest = params;
2227
+ }
2228
+ else {
2229
+ // Array or string
2230
+ const content = formatNewContent(params);
2231
+ formattedRequest = { contents: [content] };
2232
+ }
2233
+ if (params.systemInstruction) {
2234
+ formattedRequest.systemInstruction = formatSystemInstruction(params.systemInstruction);
2235
+ }
2236
+ return formattedRequest;
2237
+ }
2238
+ /**
2239
+ * Convert the user-defined parameters in {@link ImagenGenerationParams} to the format
2240
+ * that is expected from the REST API.
2241
+ *
2242
+ * @internal
2243
+ */
2244
+ function createPredictRequestBody(prompt, { gcsURI, imageFormat, addWatermark, numberOfImages = 1, negativePrompt, aspectRatio, safetyFilterLevel, personFilterLevel }) {
2245
+ // Properties that are undefined will be omitted from the JSON string that is sent in the request.
2246
+ const body = {
2247
+ instances: [
2248
+ {
2249
+ prompt
2250
+ }
2251
+ ],
2252
+ parameters: {
2253
+ storageUri: gcsURI,
2254
+ negativePrompt,
2255
+ sampleCount: numberOfImages,
2256
+ aspectRatio,
2257
+ outputOptions: imageFormat,
2258
+ addWatermark,
2259
+ safetyFilterLevel,
2260
+ personGeneration: personFilterLevel,
2261
+ includeRaiReason: true,
2262
+ includeSafetyAttributes: true
2263
+ }
2264
+ };
2265
+ return body;
2266
+ }
2267
+
2268
+ /**
2269
+ * @license
2270
+ * Copyright 2024 Google LLC
2271
+ *
2272
+ * Licensed under the Apache License, Version 2.0 (the "License");
2273
+ * you may not use this file except in compliance with the License.
2274
+ * You may obtain a copy of the License at
2275
+ *
2276
+ * http://www.apache.org/licenses/LICENSE-2.0
2277
+ *
2278
+ * Unless required by applicable law or agreed to in writing, software
2279
+ * distributed under the License is distributed on an "AS IS" BASIS,
2280
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
2281
+ * See the License for the specific language governing permissions and
2282
+ * limitations under the License.
2283
+ */
2284
+ // https://ai.google.dev/api/rest/v1beta/Content#part
2285
+ const VALID_PART_FIELDS = [
2286
+ 'text',
2287
+ 'inlineData',
2288
+ 'functionCall',
2289
+ 'functionResponse',
2290
+ 'thought',
2291
+ 'thoughtSignature'
2292
+ ];
2293
+ const VALID_PARTS_PER_ROLE = {
2294
+ user: ['text', 'inlineData'],
2295
+ function: ['functionResponse'],
2296
+ model: ['text', 'functionCall', 'thought', 'thoughtSignature'],
2297
+ // System instructions shouldn't be in history anyway.
2298
+ system: ['text']
2299
+ };
2300
+ const VALID_PREVIOUS_CONTENT_ROLES = {
2301
+ user: ['model'],
2302
+ function: ['model'],
2303
+ model: ['user', 'function'],
2304
+ // System instructions shouldn't be in history.
2305
+ system: []
2306
+ };
2307
+ function validateChatHistory(history) {
2308
+ let prevContent = null;
2309
+ for (const currContent of history) {
2310
+ const { role, parts } = currContent;
2311
+ if (!prevContent && role !== 'user') {
2312
+ throw new AIError(AIErrorCode.INVALID_CONTENT, `First Content should be with role 'user', got ${role}`);
2313
+ }
2314
+ if (!POSSIBLE_ROLES.includes(role)) {
2315
+ throw new AIError(AIErrorCode.INVALID_CONTENT, `Each item should include role field. Got ${role} but valid roles are: ${JSON.stringify(POSSIBLE_ROLES)}`);
2316
+ }
2317
+ if (!Array.isArray(parts)) {
2318
+ throw new AIError(AIErrorCode.INVALID_CONTENT, `Content should have 'parts' property with an array of Parts`);
2319
+ }
2320
+ if (parts.length === 0) {
2321
+ throw new AIError(AIErrorCode.INVALID_CONTENT, `Each Content should have at least one part`);
2322
+ }
2323
+ const countFields = {
2324
+ text: 0,
2325
+ inlineData: 0,
2326
+ functionCall: 0,
2327
+ functionResponse: 0,
2328
+ thought: 0,
2329
+ thoughtSignature: 0,
2330
+ executableCode: 0,
2331
+ codeExecutionResult: 0
2332
+ };
2333
+ for (const part of parts) {
2334
+ for (const key of VALID_PART_FIELDS) {
2335
+ if (key in part) {
2336
+ countFields[key] += 1;
2337
+ }
2338
+ }
2339
+ }
2340
+ const validParts = VALID_PARTS_PER_ROLE[role];
2341
+ for (const key of VALID_PART_FIELDS) {
2342
+ if (!validParts.includes(key) && countFields[key] > 0) {
2343
+ throw new AIError(AIErrorCode.INVALID_CONTENT, `Content with role '${role}' can't contain '${key}' part`);
2344
+ }
2345
+ }
2346
+ if (prevContent) {
2347
+ const validPreviousContentRoles = VALID_PREVIOUS_CONTENT_ROLES[role];
2348
+ if (!validPreviousContentRoles.includes(prevContent.role)) {
2349
+ throw new AIError(AIErrorCode.INVALID_CONTENT, `Content with role '${role}' can't follow '${prevContent.role}'. Valid previous roles: ${JSON.stringify(VALID_PREVIOUS_CONTENT_ROLES)}`);
2350
+ }
2351
+ }
2352
+ prevContent = currContent;
2353
+ }
2354
+ }
2355
+
2356
+ /**
2357
+ * @license
2358
+ * Copyright 2024 Google LLC
2359
+ *
2360
+ * Licensed under the Apache License, Version 2.0 (the "License");
2361
+ * you may not use this file except in compliance with the License.
2362
+ * You may obtain a copy of the License at
2363
+ *
2364
+ * http://www.apache.org/licenses/LICENSE-2.0
2365
+ *
2366
+ * Unless required by applicable law or agreed to in writing, software
2367
+ * distributed under the License is distributed on an "AS IS" BASIS,
2368
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
2369
+ * See the License for the specific language governing permissions and
2370
+ * limitations under the License.
2371
+ */
2372
+ /**
2373
+ * Used to break the internal promise chain when an error is already handled
2374
+ * by the user, preventing duplicate console logs.
2375
+ */
2376
+ const SILENT_ERROR = 'SILENT_ERROR';
2377
+ /**
2378
+ * Prevent infinite loop if the model continues to request sequential
2379
+ * function calls during automatic function calling.
2380
+ */
2381
+ const DEFAULT_MAX_SEQUENTIAL_FUNCTION_CALLS = 10;
2382
+ /**
2383
+ * ChatSession class that enables sending chat messages and stores
2384
+ * history of sent and received messages so far.
2385
+ *
2386
+ * @public
2387
+ */
2388
+ class ChatSession {
2389
+ constructor(apiSettings, model, chromeAdapter, params, requestOptions) {
2390
+ this.model = model;
2391
+ this.chromeAdapter = chromeAdapter;
2392
+ this.params = params;
2393
+ this.requestOptions = requestOptions;
2394
+ this._history = [];
2395
+ /**
2396
+ * Ensures sequential execution of chat messages to maintain history order.
2397
+ * Each call waits for the previous one to settle before proceeding.
2398
+ */
2399
+ this._sendPromise = Promise.resolve();
2400
+ this._apiSettings = apiSettings;
2401
+ if (params?.history) {
2402
+ validateChatHistory(params.history);
2403
+ this._history = params.history;
2404
+ }
2405
+ }
2406
+ /**
2407
+ * Gets the chat history so far. Blocked prompts are not added to history.
2408
+ * Neither blocked candidates nor the prompts that generated them are added
2409
+ * to history.
2410
+ */
2411
+ async getHistory() {
2412
+ await this._sendPromise;
2413
+ return this._history;
2414
+ }
2415
+ /**
2416
+ * Format Content into a request for generateContent or
2417
+ * generateContentStream.
2418
+ * @internal
2419
+ */
2420
+ _formatRequest(incomingContent, tempHistory) {
2421
+ return {
2422
+ safetySettings: this.params?.safetySettings,
2423
+ generationConfig: this.params?.generationConfig,
2424
+ tools: this.params?.tools,
2425
+ toolConfig: this.params?.toolConfig,
2426
+ systemInstruction: this.params?.systemInstruction,
2427
+ contents: [...this._history, ...tempHistory, incomingContent]
2428
+ };
2429
+ }
2430
+ /**
2431
+ * Sends a chat message and receives a non-streaming
2432
+ * {@link GenerateContentResult}
2433
+ */
2434
+ async sendMessage(request, singleRequestOptions) {
2435
+ let finalResult = {};
2436
+ await this._sendPromise;
2437
+ /**
2438
+ * Temporarily store multiple turns for cases like automatic function
2439
+ * calling, only writing them to official history when the entire
2440
+ * sequence has completed successfully.
2441
+ */
2442
+ const tempHistory = [];
2443
+ this._sendPromise = this._sendPromise.then(async () => {
2444
+ let functionCalls;
2445
+ let functionCallTurnCount = 0;
2446
+ const functionCallMaxTurns = this.requestOptions?.maxSequentalFunctionCalls ??
2447
+ DEFAULT_MAX_SEQUENTIAL_FUNCTION_CALLS;
2448
+ // Repeats until model returns a response with no function calls
2449
+ // or until `functionCallMaxTurns` is met or exceeded.
2450
+ do {
2451
+ let formattedContent;
2452
+ if (functionCalls) {
2453
+ functionCallTurnCount++;
2454
+ const functionResponseParts = await this._callFunctionsAsNeeded(functionCalls);
2455
+ formattedContent = formatNewContent(functionResponseParts);
2456
+ }
2457
+ else {
2458
+ formattedContent = formatNewContent(request);
2459
+ }
2460
+ const formattedRequest = this._formatRequest(formattedContent, tempHistory);
2461
+ tempHistory.push(formattedContent);
2462
+ const result = await generateContent(this._apiSettings, this.model, formattedRequest, this.chromeAdapter, {
2463
+ ...this.requestOptions,
2464
+ ...singleRequestOptions
2465
+ });
2466
+ if (result) {
2467
+ finalResult = result;
2468
+ functionCalls = this._getCallableFunctionCalls(result.response);
2469
+ if (result.response.candidates &&
2470
+ result.response.candidates.length > 0) {
2471
+ // TODO: Make this update atomic. If creating `responseContent` throws,
2472
+ // history will contain the user message but not the response, causing
2473
+ // validation errors on the next request.
2474
+ const responseContent = {
2475
+ parts: result.response.candidates?.[0].content.parts || [],
2476
+ // Response seems to come back without a role set.
2477
+ role: result.response.candidates?.[0].content.role || 'model'
2478
+ };
2479
+ tempHistory.push(responseContent);
2480
+ }
2481
+ else {
2482
+ const blockErrorMessage = formatBlockErrorMessage(result.response);
2483
+ if (blockErrorMessage) {
2484
+ logger.warn(`sendMessage() was unsuccessful. ${blockErrorMessage}. Inspect response object for details.`);
2485
+ }
2486
+ }
2487
+ }
2488
+ else {
2489
+ functionCalls = undefined;
2490
+ }
2491
+ } while (functionCalls && functionCallTurnCount < functionCallMaxTurns);
2492
+ if (functionCalls && functionCallTurnCount >= functionCallMaxTurns) {
2493
+ logger.warn(`Automatic function calling exceeded the limit of` +
2494
+ ` ${functionCallMaxTurns} function calls. Returning last model response.`);
2495
+ }
2496
+ });
2497
+ await this._sendPromise;
2498
+ this._history = this._history.concat(tempHistory);
2499
+ return finalResult;
2500
+ }
2501
+ /**
2502
+ * Sends a chat message and receives the response as a
2503
+ * {@link GenerateContentStreamResult} containing an iterable stream
2504
+ * and a response promise.
2505
+ */
2506
+ async sendMessageStream(request, singleRequestOptions) {
2507
+ await this._sendPromise;
2508
+ /**
2509
+ * Temporarily store multiple turns for cases like automatic function
2510
+ * calling, only writing them to official history when the entire
2511
+ * sequence has completed successfully.
2512
+ */
2513
+ const tempHistory = [];
2514
+ const callGenerateContentStream = async () => {
2515
+ let functionCalls;
2516
+ let functionCallTurnCount = 0;
2517
+ const functionCallMaxTurns = this.requestOptions?.maxSequentalFunctionCalls ??
2518
+ DEFAULT_MAX_SEQUENTIAL_FUNCTION_CALLS;
2519
+ let result;
2520
+ // Repeats until model returns a response with no function calls
2521
+ // or until `functionCallMaxTurns` is met or exceeded.
2522
+ do {
2523
+ let formattedContent;
2524
+ if (functionCalls) {
2525
+ functionCallTurnCount++;
2526
+ const functionResponseParts = await this._callFunctionsAsNeeded(functionCalls);
2527
+ formattedContent = formatNewContent(functionResponseParts);
2528
+ }
2529
+ else {
2530
+ formattedContent = formatNewContent(request);
2531
+ }
2532
+ tempHistory.push(formattedContent);
2533
+ const formattedRequest = this._formatRequest(formattedContent, tempHistory);
2534
+ result = await generateContentStream(this._apiSettings, this.model, formattedRequest, this.chromeAdapter, {
2535
+ ...this.requestOptions,
2536
+ ...singleRequestOptions
2537
+ });
2538
+ functionCalls = this._getCallableFunctionCalls(result.firstValue);
2539
+ if (functionCalls &&
2540
+ result.firstValue &&
2541
+ result.firstValue.candidates &&
2542
+ result.firstValue.candidates.length > 0) {
2543
+ const responseContent = {
2544
+ ...result.firstValue.candidates[0].content
2545
+ };
2546
+ if (!responseContent.role) {
2547
+ responseContent.role = 'model';
2548
+ }
2549
+ tempHistory.push(responseContent);
2550
+ }
2551
+ } while (functionCalls && functionCallTurnCount < functionCallMaxTurns);
2552
+ if (functionCalls && functionCallTurnCount >= functionCallMaxTurns) {
2553
+ logger.warn(`Automatic function calling exceeded the limit of` +
2554
+ ` ${functionCallMaxTurns} function calls. Returning last model response.`);
2555
+ }
2556
+ return { stream: result.stream, response: result.response };
2557
+ };
2558
+ const streamPromise = callGenerateContentStream();
2559
+ // Add onto the chain.
2560
+ this._sendPromise = this._sendPromise
2561
+ .then(async () => streamPromise)
2562
+ // This must be handled to avoid unhandled rejection, but jump
2563
+ // to the final catch block with a label to not log this error.
2564
+ .catch(_ignored => {
2565
+ // If the initial fetch fails, the user's `streamPromise` rejects.
2566
+ // We swallow the error here to prevent double logging in the final catch.
2567
+ throw new Error(SILENT_ERROR);
2568
+ })
2569
+ .then(streamResult => streamResult.response)
2570
+ .then(response => {
2571
+ // This runs after the stream completes. Runtime errors here cannot be
2572
+ // caught by the user because their promise has likely already resolved.
2573
+ // TODO: Move response validation logic upstream to `stream-reader` so
2574
+ // errors propagate to the user's `result.response` promise.
2575
+ if (response.candidates && response.candidates.length > 0) {
2576
+ this._history = this._history.concat(tempHistory);
2577
+ // TODO: Validate that `response.candidates[0].content` is not null.
2578
+ const responseContent = { ...response.candidates[0].content };
2579
+ if (!responseContent.role) {
2580
+ responseContent.role = 'model';
2581
+ }
2582
+ this._history.push(responseContent);
2583
+ }
2584
+ else {
2585
+ const blockErrorMessage = formatBlockErrorMessage(response);
2586
+ if (blockErrorMessage) {
2587
+ logger.warn(`sendMessageStream() was unsuccessful. ${blockErrorMessage}. Inspect response object for details.`);
2588
+ }
2589
+ }
2590
+ })
2591
+ .catch(e => {
2592
+ // Filter out errors already handled by the user or initiated by them.
2593
+ if (e.message !== SILENT_ERROR && e.name !== 'AbortError') {
2594
+ logger.error(e);
2595
+ }
2596
+ });
2597
+ return streamPromise;
2598
+ }
2599
+ /**
2600
+ * Get function calls that the SDK has references to actually call.
2601
+ * This is all-or-nothing. If the model is requesting multiple
2602
+ * function calls, all of them must have references in order for
2603
+ * automatic function calling to work.
2604
+ *
2605
+ * @internal
2606
+ */
2607
+ _getCallableFunctionCalls(response) {
2608
+ const functionDeclarationsTool = this.params?.tools?.find(tool => tool.functionDeclarations);
2609
+ if (!functionDeclarationsTool?.functionDeclarations) {
2610
+ return;
2611
+ }
2612
+ const functionCalls = getFunctionCalls(response);
2613
+ if (!functionCalls) {
2614
+ return;
2615
+ }
2616
+ for (const functionCall of functionCalls) {
2617
+ const hasFunctionReference = functionDeclarationsTool.functionDeclarations?.some(declaration => declaration.name === functionCall.name &&
2618
+ typeof declaration.functionReference === 'function');
2619
+ if (!hasFunctionReference) {
2620
+ return;
2621
+ }
2622
+ }
2623
+ return functionCalls;
2624
+ }
2625
+ /**
2626
+ * Call user-defined functions if requested by the model, and return
2627
+ * the response that should be sent to the model.
2628
+ * @internal
2629
+ */
2630
+ async _callFunctionsAsNeeded(functionCalls) {
2631
+ const activeCallList = new Map();
2632
+ const promiseList = [];
2633
+ const functionDeclarationsTool = this.params?.tools?.find(tool => tool.functionDeclarations);
2634
+ if (functionDeclarationsTool &&
2635
+ functionDeclarationsTool.functionDeclarations) {
2636
+ for (const functionCall of functionCalls) {
2637
+ const functionDeclaration = functionDeclarationsTool.functionDeclarations.find(declaration => declaration.name === functionCall.name);
2638
+ if (functionDeclaration?.functionReference) {
2639
+ const results = Promise.resolve(functionDeclaration.functionReference(functionCall.args)).catch(e => {
2640
+ const wrappedError = new AIError(AIErrorCode.ERROR, `Error in user-defined function "${functionDeclaration.name}": ${e.message}`);
2641
+ wrappedError.stack = e.stack;
2642
+ throw wrappedError;
2643
+ });
2644
+ activeCallList.set(functionCall.name, {
2645
+ id: functionCall.id,
2646
+ results
2647
+ });
2648
+ promiseList.push(results);
2649
+ }
2650
+ }
2651
+ // Wait for promises to finish.
2652
+ await Promise.all(promiseList);
2653
+ const functionResponseParts = [];
2654
+ for (const [name, callData] of activeCallList) {
2655
+ functionResponseParts.push({
2656
+ functionResponse: {
2657
+ name,
2658
+ response: await callData.results
2659
+ }
2660
+ });
2661
+ }
2662
+ return functionResponseParts;
2663
+ }
2664
+ else {
2665
+ throw new AIError(AIErrorCode.REQUEST_ERROR, `No function declarations were provided in "tools".`);
2666
+ }
2667
+ }
2668
+ }
2669
+
2670
+ /**
2671
+ * @license
2672
+ * Copyright 2024 Google LLC
2673
+ *
2674
+ * Licensed under the Apache License, Version 2.0 (the "License");
2675
+ * you may not use this file except in compliance with the License.
2676
+ * You may obtain a copy of the License at
2677
+ *
2678
+ * http://www.apache.org/licenses/LICENSE-2.0
2679
+ *
2680
+ * Unless required by applicable law or agreed to in writing, software
2681
+ * distributed under the License is distributed on an "AS IS" BASIS,
2682
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
2683
+ * See the License for the specific language governing permissions and
2684
+ * limitations under the License.
2685
+ */
2686
+ async function countTokensOnCloud(apiSettings, model, params, singleRequestOptions) {
2687
+ let body = '';
2688
+ if (apiSettings.backend.backendType === BackendType.GOOGLE_AI) {
2689
+ const mappedParams = mapCountTokensRequest(params, model);
2690
+ body = JSON.stringify(mappedParams);
2691
+ }
2692
+ else {
2693
+ body = JSON.stringify(params);
2694
+ }
2695
+ const response = await makeRequest({
2696
+ model,
2697
+ task: "countTokens" /* Task.COUNT_TOKENS */,
2698
+ apiSettings,
2699
+ stream: false,
2700
+ singleRequestOptions
2701
+ }, body);
2702
+ return response.json();
2703
+ }
2704
+ async function countTokens(apiSettings, model, params, chromeAdapter, requestOptions) {
2705
+ if (chromeAdapter?.mode === InferenceMode.ONLY_ON_DEVICE) {
2706
+ throw new AIError(AIErrorCode.UNSUPPORTED, 'countTokens() is not supported for on-device models.');
2707
+ }
2708
+ return countTokensOnCloud(apiSettings, model, params, requestOptions);
2709
+ }
2710
+
2711
+ /**
2712
+ * @license
2713
+ * Copyright 2024 Google LLC
2714
+ *
2715
+ * Licensed under the Apache License, Version 2.0 (the "License");
2716
+ * you may not use this file except in compliance with the License.
2717
+ * You may obtain a copy of the License at
2718
+ *
2719
+ * http://www.apache.org/licenses/LICENSE-2.0
2720
+ *
2721
+ * Unless required by applicable law or agreed to in writing, software
2722
+ * distributed under the License is distributed on an "AS IS" BASIS,
2723
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
2724
+ * See the License for the specific language governing permissions and
2725
+ * limitations under the License.
2726
+ */
2727
+ /**
2728
+ * Class for generative model APIs.
2729
+ * @public
2730
+ */
2731
+ class GenerativeModel extends AIModel {
2732
+ constructor(ai, modelParams, requestOptions, chromeAdapter) {
2733
+ super(ai, modelParams.model);
2734
+ this.chromeAdapter = chromeAdapter;
2735
+ this.generationConfig = modelParams.generationConfig || {};
2736
+ validateGenerationConfig(this.generationConfig);
2737
+ this.safetySettings = modelParams.safetySettings || [];
2738
+ this.tools = modelParams.tools;
2739
+ this.toolConfig = modelParams.toolConfig;
2740
+ this.systemInstruction = formatSystemInstruction(modelParams.systemInstruction);
2741
+ this.requestOptions = requestOptions || {};
2742
+ }
2743
+ /**
2744
+ * Makes a single non-streaming call to the model
2745
+ * and returns an object containing a single {@link GenerateContentResponse}.
2746
+ */
2747
+ async generateContent(request, singleRequestOptions) {
2748
+ const formattedParams = formatGenerateContentInput(request);
2749
+ return generateContent(this._apiSettings, this.model, {
2750
+ generationConfig: this.generationConfig,
2751
+ safetySettings: this.safetySettings,
2752
+ tools: this.tools,
2753
+ toolConfig: this.toolConfig,
2754
+ systemInstruction: this.systemInstruction,
2755
+ ...formattedParams
2756
+ }, this.chromeAdapter,
2757
+ // Merge request options
2758
+ {
2759
+ ...this.requestOptions,
2760
+ ...singleRequestOptions
2761
+ });
2762
+ }
2763
+ /**
2764
+ * Makes a single streaming call to the model
2765
+ * and returns an object containing an iterable stream that iterates
2766
+ * over all chunks in the streaming response as well as
2767
+ * a promise that returns the final aggregated response.
2768
+ */
2769
+ async generateContentStream(request, singleRequestOptions) {
2770
+ const formattedParams = formatGenerateContentInput(request);
2771
+ const { stream, response } = await generateContentStream(this._apiSettings, this.model, {
2772
+ generationConfig: this.generationConfig,
2773
+ safetySettings: this.safetySettings,
2774
+ tools: this.tools,
2775
+ toolConfig: this.toolConfig,
2776
+ systemInstruction: this.systemInstruction,
2777
+ ...formattedParams
2778
+ }, this.chromeAdapter,
2779
+ // Merge request options
2780
+ {
2781
+ ...this.requestOptions,
2782
+ ...singleRequestOptions
2783
+ });
2784
+ return { stream, response };
2785
+ }
2786
+ /**
2787
+ * Gets a new {@link ChatSession} instance which can be used for
2788
+ * multi-turn chats.
2789
+ */
2790
+ startChat(startChatParams) {
2791
+ return new ChatSession(this._apiSettings, this.model, this.chromeAdapter, {
2792
+ tools: this.tools,
2793
+ toolConfig: this.toolConfig,
2794
+ systemInstruction: this.systemInstruction,
2795
+ generationConfig: this.generationConfig,
2796
+ safetySettings: this.safetySettings,
2797
+ /**
2798
+ * Overrides params inherited from GenerativeModel with those explicitly set in the
2799
+ * StartChatParams. For example, if startChatParams.generationConfig is set, it'll override
2800
+ * this.generationConfig.
2801
+ */
2802
+ ...startChatParams
2803
+ }, this.requestOptions);
2804
+ }
2805
+ /**
2806
+ * Counts the tokens in the provided request.
2807
+ */
2808
+ async countTokens(request, singleRequestOptions) {
2809
+ const formattedParams = formatGenerateContentInput(request);
2810
+ return countTokens(this._apiSettings, this.model, formattedParams, this.chromeAdapter,
2811
+ // Merge request options
2812
+ {
2813
+ ...this.requestOptions,
2814
+ ...singleRequestOptions
2815
+ });
2816
+ }
2817
+ }
2818
+ /**
2819
+ * Client-side validation of some common `GenerationConfig` pitfalls, in order
2820
+ * to save the developer a wasted request.
2821
+ */
2822
+ function validateGenerationConfig(generationConfig) {
2823
+ if (
2824
+ // != allows for null and undefined. 0 is considered "set" by the model
2825
+ generationConfig.thinkingConfig?.thinkingBudget != null &&
2826
+ generationConfig.thinkingConfig?.thinkingLevel) {
2827
+ throw new AIError(AIErrorCode.UNSUPPORTED, `Cannot set both thinkingBudget and thinkingLevel in a config.`);
2828
+ }
2829
+ }
2830
+
2831
+ /**
2832
+ * @license
2833
+ * Copyright 2025 Google LLC
2834
+ *
2835
+ * Licensed under the Apache License, Version 2.0 (the "License");
2836
+ * you may not use this file except in compliance with the License.
2837
+ * You may obtain a copy of the License at
2838
+ *
2839
+ * http://www.apache.org/licenses/LICENSE-2.0
2840
+ *
2841
+ * Unless required by applicable law or agreed to in writing, software
2842
+ * distributed under the License is distributed on an "AS IS" BASIS,
2843
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
2844
+ * See the License for the specific language governing permissions and
2845
+ * limitations under the License.
2846
+ */
2847
+ /**
2848
+ * Represents an active, real-time, bidirectional conversation with the model.
2849
+ *
2850
+ * This class should only be instantiated by calling {@link LiveGenerativeModel.connect}.
2851
+ *
2852
+ * @beta
2853
+ */
2854
+ class LiveSession {
2855
+ /**
2856
+ * @internal
2857
+ */
2858
+ constructor(webSocketHandler, serverMessages) {
2859
+ this.webSocketHandler = webSocketHandler;
2860
+ this.serverMessages = serverMessages;
2861
+ /**
2862
+ * Indicates whether this Live session is closed.
2863
+ *
2864
+ * @beta
2865
+ */
2866
+ this.isClosed = false;
2867
+ /**
2868
+ * Indicates whether this Live session is being controlled by an `AudioConversationController`.
2869
+ *
2870
+ * @beta
2871
+ */
2872
+ this.inConversation = false;
2873
+ }
2874
+ /**
2875
+ * Sends content to the server.
2876
+ *
2877
+ * @param request - The message to send to the model.
2878
+ * @param turnComplete - Indicates if the turn is complete. Defaults to false.
2879
+ * @throws If this session has been closed.
2880
+ *
2881
+ * @beta
2882
+ */
2883
+ async send(request, turnComplete = true) {
2884
+ if (this.isClosed) {
2885
+ throw new AIError(AIErrorCode.REQUEST_ERROR, 'This LiveSession has been closed and cannot be used.');
2886
+ }
2887
+ const newContent = formatNewContent(request);
2888
+ const message = {
2889
+ clientContent: {
2890
+ turns: [newContent],
2891
+ turnComplete
2892
+ }
2893
+ };
2894
+ this.webSocketHandler.send(JSON.stringify(message));
2895
+ }
2896
+ /**
2897
+ * Sends text to the server in realtime.
2898
+ *
2899
+ * @example
2900
+ * ```javascript
2901
+ * liveSession.sendTextRealtime("Hello, how are you?");
2902
+ * ```
2903
+ *
2904
+ * @param text - The text data to send.
2905
+ * @throws If this session has been closed.
2906
+ *
2907
+ * @beta
2908
+ */
2909
+ async sendTextRealtime(text) {
2910
+ if (this.isClosed) {
2911
+ throw new AIError(AIErrorCode.REQUEST_ERROR, 'This LiveSession has been closed and cannot be used.');
2912
+ }
2913
+ const message = {
2914
+ realtimeInput: {
2915
+ text
2916
+ }
2917
+ };
2918
+ this.webSocketHandler.send(JSON.stringify(message));
2919
+ }
2920
+ /**
2921
+ * Sends audio data to the server in realtime.
2922
+ *
2923
+ * @remarks The server requires that the audio data is base64-encoded 16-bit PCM at 16kHz
2924
+ * little-endian.
2925
+ *
2926
+ * @example
2927
+ * ```javascript
2928
+ * // const pcmData = ... base64-encoded 16-bit PCM at 16kHz little-endian.
2929
+ * const blob = { mimeType: "audio/pcm", data: pcmData };
2930
+ * liveSession.sendAudioRealtime(blob);
2931
+ * ```
2932
+ *
2933
+ * @param blob - The base64-encoded PCM data to send to the server in realtime.
2934
+ * @throws If this session has been closed.
2935
+ *
2936
+ * @beta
2937
+ */
2938
+ async sendAudioRealtime(blob) {
2939
+ if (this.isClosed) {
2940
+ throw new AIError(AIErrorCode.REQUEST_ERROR, 'This LiveSession has been closed and cannot be used.');
2941
+ }
2942
+ const message = {
2943
+ realtimeInput: {
2944
+ audio: blob
2945
+ }
2946
+ };
2947
+ this.webSocketHandler.send(JSON.stringify(message));
2948
+ }
2949
+ /**
2950
+ * Sends video data to the server in realtime.
2951
+ *
2952
+ * @remarks The server requires that the video is sent as individual video frames at 1 FPS. It
2953
+ * is recommended to set `mimeType` to `image/jpeg`.
2954
+ *
2955
+ * @example
2956
+ * ```javascript
2957
+ * // const videoFrame = ... base64-encoded JPEG data
2958
+ * const blob = { mimeType: "image/jpeg", data: videoFrame };
2959
+ * liveSession.sendVideoRealtime(blob);
2960
+ * ```
2961
+ * @param blob - The base64-encoded video data to send to the server in realtime.
2962
+ * @throws If this session has been closed.
2963
+ *
2964
+ * @beta
2965
+ */
2966
+ async sendVideoRealtime(blob) {
2967
+ if (this.isClosed) {
2968
+ throw new AIError(AIErrorCode.REQUEST_ERROR, 'This LiveSession has been closed and cannot be used.');
2969
+ }
2970
+ const message = {
2971
+ realtimeInput: {
2972
+ video: blob
2973
+ }
2974
+ };
2975
+ this.webSocketHandler.send(JSON.stringify(message));
2976
+ }
2977
+ /**
2978
+ * Sends function responses to the server.
2979
+ *
2980
+ * @param functionResponses - The function responses to send.
2981
+ * @throws If this session has been closed.
2982
+ *
2983
+ * @beta
2984
+ */
2985
+ async sendFunctionResponses(functionResponses) {
2986
+ if (this.isClosed) {
2987
+ throw new AIError(AIErrorCode.REQUEST_ERROR, 'This LiveSession has been closed and cannot be used.');
2988
+ }
2989
+ const message = {
2990
+ toolResponse: {
2991
+ functionResponses
2992
+ }
2993
+ };
2994
+ this.webSocketHandler.send(JSON.stringify(message));
2995
+ }
2996
+ /**
2997
+ * Yields messages received from the server.
2998
+ * This can only be used by one consumer at a time.
2999
+ *
3000
+ * @returns An `AsyncGenerator` that yields server messages as they arrive.
3001
+ * @throws If the session is already closed, or if we receive a response that we don't support.
3002
+ *
3003
+ * @beta
3004
+ */
3005
+ async *receive() {
3006
+ if (this.isClosed) {
3007
+ throw new AIError(AIErrorCode.SESSION_CLOSED, 'Cannot read from a Live session that is closed. Try starting a new Live session.');
3008
+ }
3009
+ for await (const message of this.serverMessages) {
3010
+ if (message && typeof message === 'object') {
3011
+ if (LiveResponseType.SERVER_CONTENT in message) {
3012
+ yield {
3013
+ type: 'serverContent',
3014
+ ...message
3015
+ .serverContent
3016
+ };
3017
+ }
3018
+ else if (LiveResponseType.TOOL_CALL in message) {
3019
+ yield {
3020
+ type: 'toolCall',
3021
+ ...message
3022
+ .toolCall
3023
+ };
3024
+ }
3025
+ else if (LiveResponseType.TOOL_CALL_CANCELLATION in message) {
3026
+ yield {
3027
+ type: 'toolCallCancellation',
3028
+ ...message.toolCallCancellation
3029
+ };
3030
+ }
3031
+ else if ('goAway' in message) {
3032
+ const notice = message.goAway;
3033
+ yield {
3034
+ type: LiveResponseType.GOING_AWAY_NOTICE,
3035
+ timeLeft: parseDuration(notice.timeLeft)
3036
+ };
3037
+ }
3038
+ else {
3039
+ logger.warn(`Received an unknown message type from the server: ${JSON.stringify(message)}`);
3040
+ }
3041
+ }
3042
+ else {
3043
+ logger.warn(`Received an invalid message from the server: ${JSON.stringify(message)}`);
3044
+ }
3045
+ }
3046
+ }
3047
+ /**
3048
+ * Closes this session.
3049
+ * All methods on this session will throw an error once this resolves.
3050
+ *
3051
+ * @beta
3052
+ */
3053
+ async close() {
3054
+ if (!this.isClosed) {
3055
+ this.isClosed = true;
3056
+ await this.webSocketHandler.close(1000, 'Client closed session.');
3057
+ }
3058
+ }
3059
+ /**
3060
+ * Sends realtime input to the server.
3061
+ *
3062
+ * @deprecated Use `sendTextRealtime()`, `sendAudioRealtime()`, and `sendVideoRealtime()` instead.
3063
+ *
3064
+ * @param mediaChunks - The media chunks to send.
3065
+ * @throws If this session has been closed.
3066
+ *
3067
+ * @beta
3068
+ */
3069
+ async sendMediaChunks(mediaChunks) {
3070
+ if (this.isClosed) {
3071
+ throw new AIError(AIErrorCode.REQUEST_ERROR, 'This LiveSession has been closed and cannot be used.');
3072
+ }
3073
+ // The backend does not support sending more than one mediaChunk in one message.
3074
+ // Work around this limitation by sending mediaChunks in separate messages.
3075
+ mediaChunks.forEach(mediaChunk => {
3076
+ const message = {
3077
+ realtimeInput: { mediaChunks: [mediaChunk] }
3078
+ };
3079
+ this.webSocketHandler.send(JSON.stringify(message));
3080
+ });
3081
+ }
3082
+ /**
3083
+ * @deprecated Use `sendTextRealtime()`, `sendAudioRealtime()`, and `sendVideoRealtime()` instead.
3084
+ *
3085
+ * Sends a stream of {@link GenerativeContentBlob}.
3086
+ *
3087
+ * @param mediaChunkStream - The stream of {@link GenerativeContentBlob} to send.
3088
+ * @throws If this session has been closed.
3089
+ *
3090
+ * @beta
3091
+ */
3092
+ async sendMediaStream(mediaChunkStream) {
3093
+ if (this.isClosed) {
3094
+ throw new AIError(AIErrorCode.REQUEST_ERROR, 'This LiveSession has been closed and cannot be used.');
3095
+ }
3096
+ const reader = mediaChunkStream.getReader();
3097
+ while (true) {
3098
+ try {
3099
+ const { done, value } = await reader.read();
3100
+ if (done) {
3101
+ break;
3102
+ }
3103
+ else if (!value) {
3104
+ throw new Error('Missing chunk in reader, but reader is not done.');
3105
+ }
3106
+ await this.sendMediaChunks([value]);
3107
+ }
3108
+ catch (e) {
3109
+ // Re-throw any errors that occur during stream consumption or sending.
3110
+ const message = e instanceof Error ? e.message : 'Error processing media stream.';
3111
+ throw new AIError(AIErrorCode.REQUEST_ERROR, message);
3112
+ }
3113
+ }
3114
+ }
3115
+ }
3116
+ /**
3117
+ * Parses a duration string (e.g. "3.000000001s") into a number of seconds.
3118
+ *
3119
+ * @param duration - The duration string to parse.
3120
+ * @returns The duration in seconds.
3121
+ */
3122
+ function parseDuration(duration) {
3123
+ if (!duration || !duration.endsWith('s')) {
3124
+ return 0;
3125
+ }
3126
+ return Number(duration.slice(0, -1)); // slice removes the trailing 's'.
3127
+ }
3128
+
3129
+ /**
3130
+ * @license
3131
+ * Copyright 2025 Google LLC
3132
+ *
3133
+ * Licensed under the Apache License, Version 2.0 (the "License");
3134
+ * you may not use this file except in compliance with the License.
3135
+ * You may obtain a copy of the License at
3136
+ *
3137
+ * http://www.apache.org/licenses/LICENSE-2.0
3138
+ *
3139
+ * Unless required by applicable law or agreed to in writing, software
3140
+ * distributed under the License is distributed on an "AS IS" BASIS,
3141
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
3142
+ * See the License for the specific language governing permissions and
3143
+ * limitations under the License.
3144
+ */
3145
+ /**
3146
+ * Class for Live generative model APIs. The Live API enables low-latency, two-way multimodal
3147
+ * interactions with Gemini.
3148
+ *
3149
+ * This class should only be instantiated with {@link getLiveGenerativeModel}.
3150
+ *
3151
+ * @beta
3152
+ */
3153
+ class LiveGenerativeModel extends AIModel {
3154
+ /**
3155
+ * @internal
3156
+ */
3157
+ constructor(ai, modelParams,
3158
+ /**
3159
+ * @internal
3160
+ */
3161
+ _webSocketHandler) {
3162
+ super(ai, modelParams.model);
3163
+ this._webSocketHandler = _webSocketHandler;
3164
+ this.generationConfig = modelParams.generationConfig || {};
3165
+ this.tools = modelParams.tools;
3166
+ this.toolConfig = modelParams.toolConfig;
3167
+ this.systemInstruction = formatSystemInstruction(modelParams.systemInstruction);
3168
+ }
3169
+ /**
3170
+ * Starts a {@link LiveSession}.
3171
+ *
3172
+ * @returns A {@link LiveSession}.
3173
+ * @throws If the connection failed to be established with the server.
3174
+ *
3175
+ * @beta
3176
+ */
3177
+ async connect() {
3178
+ const url = new WebSocketUrl(this._apiSettings);
3179
+ await this._webSocketHandler.connect(url.toString());
3180
+ let fullModelPath;
3181
+ if (this._apiSettings.backend.backendType === BackendType.GOOGLE_AI) {
3182
+ fullModelPath = `projects/${this._apiSettings.project}/${this.model}`;
3183
+ }
3184
+ else {
3185
+ fullModelPath = `projects/${this._apiSettings.project}/locations/${this._apiSettings.location}/${this.model}`;
3186
+ }
3187
+ // inputAudioTranscription and outputAudioTranscription are on the generation config in the public API,
3188
+ // but the backend expects them to be in the `setup` message.
3189
+ const { inputAudioTranscription, outputAudioTranscription, ...generationConfig } = this.generationConfig;
3190
+ const setupMessage = {
3191
+ setup: {
3192
+ model: fullModelPath,
3193
+ generationConfig,
3194
+ tools: this.tools,
3195
+ toolConfig: this.toolConfig,
3196
+ systemInstruction: this.systemInstruction,
3197
+ inputAudioTranscription,
3198
+ outputAudioTranscription
3199
+ }
3200
+ };
3201
+ try {
3202
+ // Begin listening for server messages, and begin the handshake by sending the 'setupMessage'
3203
+ const serverMessages = this._webSocketHandler.listen();
3204
+ this._webSocketHandler.send(JSON.stringify(setupMessage));
3205
+ // Verify we received the handshake response 'setupComplete'
3206
+ const firstMessage = (await serverMessages.next()).value;
3207
+ if (!firstMessage ||
3208
+ !(typeof firstMessage === 'object') ||
3209
+ !('setupComplete' in firstMessage)) {
3210
+ await this._webSocketHandler.close(1011, 'Handshake failure');
3211
+ throw new AIError(AIErrorCode.RESPONSE_ERROR, 'Server connection handshake failed. The server did not respond with a setupComplete message.');
3212
+ }
3213
+ return new LiveSession(this._webSocketHandler, serverMessages);
3214
+ }
3215
+ catch (e) {
3216
+ // Ensure connection is closed on any setup error
3217
+ await this._webSocketHandler.close();
3218
+ throw e;
3219
+ }
3220
+ }
3221
+ }
3222
+
3223
+ /**
3224
+ * @license
3225
+ * Copyright 2025 Google LLC
3226
+ *
3227
+ * Licensed under the Apache License, Version 2.0 (the "License");
3228
+ * you may not use this file except in compliance with the License.
3229
+ * You may obtain a copy of the License at
3230
+ *
3231
+ * http://www.apache.org/licenses/LICENSE-2.0
3232
+ *
3233
+ * Unless required by applicable law or agreed to in writing, software
3234
+ * distributed under the License is distributed on an "AS IS" BASIS,
3235
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
3236
+ * See the License for the specific language governing permissions and
3237
+ * limitations under the License.
3238
+ */
3239
+ /**
3240
+ * Class for Imagen model APIs.
3241
+ *
3242
+ * This class provides methods for generating images using the Imagen model.
3243
+ *
3244
+ * @example
3245
+ * ```javascript
3246
+ * const imagen = new ImagenModel(
3247
+ * ai,
3248
+ * {
3249
+ * model: 'imagen-3.0-generate-002'
3250
+ * }
3251
+ * );
3252
+ *
3253
+ * const response = await imagen.generateImages('A photo of a cat');
3254
+ * if (response.images.length > 0) {
3255
+ * console.log(response.images[0].bytesBase64Encoded);
3256
+ * }
3257
+ * ```
3258
+ *
3259
+ * @public
3260
+ */
3261
+ class ImagenModel extends AIModel {
3262
+ /**
3263
+ * Constructs a new instance of the {@link ImagenModel} class.
3264
+ *
3265
+ * @param ai - an {@link AI} instance.
3266
+ * @param modelParams - Parameters to use when making requests to Imagen.
3267
+ * @param requestOptions - Additional options to use when making requests.
3268
+ *
3269
+ * @throws If the `apiKey` or `projectId` fields are missing in your
3270
+ * Firebase config.
3271
+ */
3272
+ constructor(ai, modelParams, requestOptions) {
3273
+ const { model, generationConfig, safetySettings } = modelParams;
3274
+ super(ai, model);
3275
+ this.requestOptions = requestOptions;
3276
+ this.generationConfig = generationConfig;
3277
+ this.safetySettings = safetySettings;
3278
+ }
3279
+ /**
3280
+ * Generates images using the Imagen model and returns them as
3281
+ * base64-encoded strings.
3282
+ *
3283
+ * @param prompt - A text prompt describing the image(s) to generate.
3284
+ * @returns A promise that resolves to an {@link ImagenGenerationResponse}
3285
+ * object containing the generated images.
3286
+ *
3287
+ * @throws If the request to generate images fails. This happens if the
3288
+ * prompt is blocked.
3289
+ *
3290
+ * @remarks
3291
+ * If the prompt was not blocked, but one or more of the generated images were filtered, the
3292
+ * returned object will have a `filteredReason` property.
3293
+ * If all images are filtered, the `images` array will be empty.
3294
+ *
3295
+ * @public
3296
+ */
3297
+ async generateImages(prompt, singleRequestOptions) {
3298
+ const body = createPredictRequestBody(prompt, {
3299
+ ...this.generationConfig,
3300
+ ...this.safetySettings
3301
+ });
3302
+ const response = await makeRequest({
3303
+ task: "predict" /* Task.PREDICT */,
3304
+ model: this.model,
3305
+ apiSettings: this._apiSettings,
3306
+ stream: false,
3307
+ // Merge request options. Single request options overwrite the model's request options.
3308
+ singleRequestOptions: {
3309
+ ...this.requestOptions,
3310
+ ...singleRequestOptions
3311
+ }
3312
+ }, JSON.stringify(body));
3313
+ return handlePredictResponse(response);
3314
+ }
3315
+ /**
3316
+ * Generates images to Cloud Storage for Firebase using the Imagen model.
3317
+ *
3318
+ * @internal This method is temporarily internal.
3319
+ *
3320
+ * @param prompt - A text prompt describing the image(s) to generate.
3321
+ * @param gcsURI - The URI of file stored in a Cloud Storage for Firebase bucket.
3322
+ * This should be a directory. For example, `gs://my-bucket/my-directory/`.
3323
+ * @returns A promise that resolves to an {@link ImagenGenerationResponse}
3324
+ * object containing the URLs of the generated images.
3325
+ *
3326
+ * @throws If the request fails to generate images fails. This happens if
3327
+ * the prompt is blocked.
3328
+ *
3329
+ * @remarks
3330
+ * If the prompt was not blocked, but one or more of the generated images were filtered, the
3331
+ * returned object will have a `filteredReason` property.
3332
+ * If all images are filtered, the `images` array will be empty.
3333
+ */
3334
+ async generateImagesGCS(prompt, gcsURI, singleRequestOptions) {
3335
+ const body = createPredictRequestBody(prompt, {
3336
+ gcsURI,
3337
+ ...this.generationConfig,
3338
+ ...this.safetySettings
3339
+ });
3340
+ const response = await makeRequest({
3341
+ task: "predict" /* Task.PREDICT */,
3342
+ model: this.model,
3343
+ apiSettings: this._apiSettings,
3344
+ stream: false,
3345
+ // Merge request options. Single request options overwrite the model's request options.
3346
+ singleRequestOptions: {
3347
+ ...this.requestOptions,
3348
+ ...singleRequestOptions
3349
+ }
3350
+ }, JSON.stringify(body));
3351
+ return handlePredictResponse(response);
3352
+ }
3353
+ }
3354
+
3355
+ /**
3356
+ * @license
3357
+ * Copyright 2025 Google LLC
3358
+ *
3359
+ * Licensed under the Apache License, Version 2.0 (the "License");
3360
+ * you may not use this file except in compliance with the License.
3361
+ * You may obtain a copy of the License at
3362
+ *
3363
+ * http://www.apache.org/licenses/LICENSE-2.0
3364
+ *
3365
+ * Unless required by applicable law or agreed to in writing, software
3366
+ * distributed under the License is distributed on an "AS IS" BASIS,
3367
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
3368
+ * See the License for the specific language governing permissions and
3369
+ * limitations under the License.
3370
+ */
3371
+ /**
3372
+ * A wrapper for the native `WebSocket` available in both Browsers and Node >= 22.
3373
+ *
3374
+ * @internal
3375
+ */
3376
+ class WebSocketHandlerImpl {
3377
+ constructor() {
3378
+ if (typeof WebSocket === 'undefined') {
3379
+ throw new AIError(AIErrorCode.UNSUPPORTED, 'The WebSocket API is not available in this environment. ' +
3380
+ 'The "Live" feature is not supported here. It is supported in ' +
3381
+ 'modern browser windows, Web Workers with WebSocket support, and Node >= 22.');
3382
+ }
3383
+ }
3384
+ connect(url) {
3385
+ return new Promise((resolve, reject) => {
3386
+ this.ws = new WebSocket(url);
3387
+ this.ws.binaryType = 'blob'; // Only important to set in Node
3388
+ this.ws.addEventListener('open', () => resolve(), { once: true });
3389
+ this.ws.addEventListener('error', () => reject(new AIError(AIErrorCode.FETCH_ERROR, `Error event raised on WebSocket`)), { once: true });
3390
+ this.ws.addEventListener('close', (closeEvent) => {
3391
+ if (closeEvent.reason) {
3392
+ logger.warn(`WebSocket connection closed by server. Reason: '${closeEvent.reason}'`);
3393
+ }
3394
+ });
3395
+ });
3396
+ }
3397
+ send(data) {
3398
+ if (!this.ws || this.ws.readyState !== WebSocket.OPEN) {
3399
+ throw new AIError(AIErrorCode.REQUEST_ERROR, 'WebSocket is not open.');
3400
+ }
3401
+ this.ws.send(data);
3402
+ }
3403
+ async *listen() {
3404
+ if (!this.ws) {
3405
+ throw new AIError(AIErrorCode.REQUEST_ERROR, 'WebSocket is not connected.');
3406
+ }
3407
+ const messageQueue = [];
3408
+ const errorQueue = [];
3409
+ let resolvePromise = null;
3410
+ let isClosed = false;
3411
+ const messageListener = async (event) => {
3412
+ let data;
3413
+ if (event.data instanceof Blob) {
3414
+ data = await event.data.text();
3415
+ }
3416
+ else if (typeof event.data === 'string') {
3417
+ data = event.data;
3418
+ }
3419
+ else {
3420
+ errorQueue.push(new AIError(AIErrorCode.PARSE_FAILED, `Failed to parse WebSocket response. Expected data to be a Blob or string, but was ${typeof event.data}.`));
3421
+ if (resolvePromise) {
3422
+ resolvePromise();
3423
+ resolvePromise = null;
3424
+ }
3425
+ return;
3426
+ }
3427
+ try {
3428
+ const obj = JSON.parse(data);
3429
+ messageQueue.push(obj);
3430
+ }
3431
+ catch (e) {
3432
+ const err = e;
3433
+ errorQueue.push(new AIError(AIErrorCode.PARSE_FAILED, `Error parsing WebSocket message to JSON: ${err.message}`));
3434
+ }
3435
+ if (resolvePromise) {
3436
+ resolvePromise();
3437
+ resolvePromise = null;
3438
+ }
3439
+ };
3440
+ const errorListener = () => {
3441
+ errorQueue.push(new AIError(AIErrorCode.FETCH_ERROR, 'WebSocket connection error.'));
3442
+ if (resolvePromise) {
3443
+ resolvePromise();
3444
+ resolvePromise = null;
3445
+ }
3446
+ };
3447
+ const closeListener = (event) => {
3448
+ if (event.reason) {
3449
+ logger.warn(`WebSocket connection closed by the server with reason: ${event.reason}`);
3450
+ }
3451
+ isClosed = true;
3452
+ if (resolvePromise) {
3453
+ resolvePromise();
3454
+ resolvePromise = null;
3455
+ }
3456
+ // Clean up listeners to prevent memory leaks
3457
+ this.ws?.removeEventListener('message', messageListener);
3458
+ this.ws?.removeEventListener('close', closeListener);
3459
+ this.ws?.removeEventListener('error', errorListener);
3460
+ };
3461
+ this.ws.addEventListener('message', messageListener);
3462
+ this.ws.addEventListener('close', closeListener);
3463
+ this.ws.addEventListener('error', errorListener);
3464
+ while (!isClosed) {
3465
+ if (errorQueue.length > 0) {
3466
+ const error = errorQueue.shift();
3467
+ throw error;
3468
+ }
3469
+ if (messageQueue.length > 0) {
3470
+ yield messageQueue.shift();
3471
+ }
3472
+ else {
3473
+ await new Promise(resolve => {
3474
+ resolvePromise = resolve;
3475
+ });
3476
+ }
3477
+ }
3478
+ // If the loop terminated because isClosed is true, check for any final errors
3479
+ if (errorQueue.length > 0) {
3480
+ const error = errorQueue.shift();
3481
+ throw error;
3482
+ }
3483
+ }
3484
+ close(code, reason) {
3485
+ return new Promise(resolve => {
3486
+ if (!this.ws) {
3487
+ return resolve();
3488
+ }
3489
+ this.ws.addEventListener('close', () => resolve(), { once: true });
3490
+ // Calling 'close' during these states results in an error.
3491
+ if (this.ws.readyState === WebSocket.CLOSED ||
3492
+ this.ws.readyState === WebSocket.CONNECTING) {
3493
+ return resolve();
3494
+ }
3495
+ if (this.ws.readyState !== WebSocket.CLOSING) {
3496
+ this.ws.close(code, reason);
3497
+ }
3498
+ });
3499
+ }
3500
+ }
3501
+
3502
+ /**
3503
+ * @license
3504
+ * Copyright 2025 Google LLC
3505
+ *
3506
+ * Licensed under the Apache License, Version 2.0 (the "License");
3507
+ * you may not use this file except in compliance with the License.
3508
+ * You may obtain a copy of the License at
3509
+ *
3510
+ * http://www.apache.org/licenses/LICENSE-2.0
3511
+ *
3512
+ * Unless required by applicable law or agreed to in writing, software
3513
+ * distributed under the License is distributed on an "AS IS" BASIS,
3514
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
3515
+ * See the License for the specific language governing permissions and
3516
+ * limitations under the License.
3517
+ */
3518
+ /**
3519
+ * {@link GenerativeModel} APIs that execute on a server-side template.
3520
+ *
3521
+ * This class should only be instantiated with {@link getTemplateGenerativeModel}.
3522
+ *
3523
+ * @beta
3524
+ */
3525
+ class TemplateGenerativeModel {
3526
+ /**
3527
+ * @hideconstructor
3528
+ */
3529
+ constructor(ai, requestOptions) {
3530
+ this.requestOptions = requestOptions || {};
3531
+ this._apiSettings = initApiSettings(ai);
3532
+ }
3533
+ /**
3534
+ * Makes a single non-streaming call to the model and returns an object
3535
+ * containing a single {@link GenerateContentResponse}.
3536
+ *
3537
+ * @param templateId - The ID of the server-side template to execute.
3538
+ * @param templateVariables - A key-value map of variables to populate the
3539
+ * template with.
3540
+ *
3541
+ * @beta
3542
+ */
3543
+ async generateContent(templateId, templateVariables, singleRequestOptions) {
3544
+ return templateGenerateContent(this._apiSettings, templateId, { inputs: templateVariables }, {
3545
+ ...this.requestOptions,
3546
+ ...singleRequestOptions
3547
+ });
3548
+ }
3549
+ /**
3550
+ * Makes a single streaming call to the model and returns an object
3551
+ * containing an iterable stream that iterates over all chunks in the
3552
+ * streaming response as well as a promise that returns the final aggregated
3553
+ * response.
3554
+ *
3555
+ * @param templateId - The ID of the server-side template to execute.
3556
+ * @param templateVariables - A key-value map of variables to populate the
3557
+ * template with.
3558
+ *
3559
+ * @beta
3560
+ */
3561
+ async generateContentStream(templateId, templateVariables, singleRequestOptions) {
3562
+ return templateGenerateContentStream(this._apiSettings, templateId, { inputs: templateVariables }, {
3563
+ ...this.requestOptions,
3564
+ ...singleRequestOptions
3565
+ });
3566
+ }
3567
+ }
3568
+
3569
+ /**
3570
+ * @license
3571
+ * Copyright 2025 Google LLC
3572
+ *
3573
+ * Licensed under the Apache License, Version 2.0 (the "License");
3574
+ * you may not use this file except in compliance with the License.
3575
+ * You may obtain a copy of the License at
3576
+ *
3577
+ * http://www.apache.org/licenses/LICENSE-2.0
3578
+ *
3579
+ * Unless required by applicable law or agreed to in writing, software
3580
+ * distributed under the License is distributed on an "AS IS" BASIS,
3581
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
3582
+ * See the License for the specific language governing permissions and
3583
+ * limitations under the License.
3584
+ */
3585
+ /**
3586
+ * Class for Imagen model APIs that execute on a server-side template.
3587
+ *
3588
+ * This class should only be instantiated with {@link getTemplateImagenModel}.
3589
+ *
3590
+ * @beta
3591
+ */
3592
+ class TemplateImagenModel {
3593
+ /**
3594
+ * @hideconstructor
3595
+ */
3596
+ constructor(ai, requestOptions) {
3597
+ this.requestOptions = requestOptions || {};
3598
+ this._apiSettings = initApiSettings(ai);
3599
+ }
3600
+ /**
3601
+ * Makes a single call to the model and returns an object containing a single
3602
+ * {@link ImagenGenerationResponse}.
3603
+ *
3604
+ * @param templateId - The ID of the server-side template to execute.
3605
+ * @param templateVariables - A key-value map of variables to populate the
3606
+ * template with.
3607
+ *
3608
+ * @beta
3609
+ */
3610
+ async generateImages(templateId, templateVariables, singleRequestOptions) {
3611
+ const response = await makeRequest({
3612
+ task: "templatePredict" /* ServerPromptTemplateTask.TEMPLATE_PREDICT */,
3613
+ templateId,
3614
+ apiSettings: this._apiSettings,
3615
+ stream: false,
3616
+ singleRequestOptions: {
3617
+ ...this.requestOptions,
3618
+ ...singleRequestOptions
3619
+ }
3620
+ }, JSON.stringify({ inputs: templateVariables }));
3621
+ return handlePredictResponse(response);
3622
+ }
3623
+ }
3624
+
3625
+ /**
3626
+ * @license
3627
+ * Copyright 2024 Google LLC
3628
+ *
3629
+ * Licensed under the Apache License, Version 2.0 (the "License");
3630
+ * you may not use this file except in compliance with the License.
3631
+ * You may obtain a copy of the License at
3632
+ *
3633
+ * http://www.apache.org/licenses/LICENSE-2.0
3634
+ *
3635
+ * Unless required by applicable law or agreed to in writing, software
3636
+ * distributed under the License is distributed on an "AS IS" BASIS,
3637
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
3638
+ * See the License for the specific language governing permissions and
3639
+ * limitations under the License.
3640
+ */
3641
+ /**
3642
+ * Parent class encompassing all Schema types, with static methods that
3643
+ * allow building specific Schema types. This class can be converted with
3644
+ * `JSON.stringify()` into a JSON string accepted by Vertex AI REST endpoints.
3645
+ * (This string conversion is automatically done when calling SDK methods.)
3646
+ * @public
3647
+ */
3648
+ class Schema {
3649
+ constructor(schemaParams) {
3650
+ // TODO(dlarocque): Enforce this with union types
3651
+ if (!schemaParams.type && !schemaParams.anyOf) {
3652
+ throw new AIError(AIErrorCode.INVALID_SCHEMA, "A schema must have either a 'type' or an 'anyOf' array of sub-schemas.");
3653
+ }
3654
+ // eslint-disable-next-line guard-for-in
3655
+ for (const paramKey in schemaParams) {
3656
+ this[paramKey] = schemaParams[paramKey];
3657
+ }
3658
+ // Ensure these are explicitly set to avoid TS errors.
3659
+ this.type = schemaParams.type;
3660
+ this.format = schemaParams.hasOwnProperty('format')
3661
+ ? schemaParams.format
3662
+ : undefined;
3663
+ this.nullable = schemaParams.hasOwnProperty('nullable')
3664
+ ? !!schemaParams.nullable
3665
+ : false;
3666
+ }
3667
+ /**
3668
+ * Defines how this Schema should be serialized as JSON.
3669
+ * See https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/JSON/stringify#tojson_behavior
3670
+ * @internal
3671
+ */
3672
+ toJSON() {
3673
+ const obj = {
3674
+ type: this.type
3675
+ };
3676
+ for (const prop in this) {
3677
+ if (this.hasOwnProperty(prop) && this[prop] !== undefined) {
3678
+ if (prop !== 'required' || this.type === SchemaType.OBJECT) {
3679
+ obj[prop] = this[prop];
3680
+ }
3681
+ }
3682
+ }
3683
+ return obj;
3684
+ }
3685
+ static array(arrayParams) {
3686
+ return new ArraySchema(arrayParams, arrayParams.items);
3687
+ }
3688
+ static object(objectParams) {
3689
+ return new ObjectSchema(objectParams, objectParams.properties, objectParams.optionalProperties);
3690
+ }
3691
+ // eslint-disable-next-line id-blacklist
3692
+ static string(stringParams) {
3693
+ return new StringSchema(stringParams);
3694
+ }
3695
+ static enumString(stringParams) {
3696
+ return new StringSchema(stringParams, stringParams.enum);
3697
+ }
3698
+ static integer(integerParams) {
3699
+ return new IntegerSchema(integerParams);
3700
+ }
3701
+ // eslint-disable-next-line id-blacklist
3702
+ static number(numberParams) {
3703
+ return new NumberSchema(numberParams);
3704
+ }
3705
+ // eslint-disable-next-line id-blacklist
3706
+ static boolean(booleanParams) {
3707
+ return new BooleanSchema(booleanParams);
3708
+ }
3709
+ static anyOf(anyOfParams) {
3710
+ return new AnyOfSchema(anyOfParams);
3711
+ }
3712
+ }
3713
+ /**
3714
+ * Schema class for "integer" types.
3715
+ * @public
3716
+ */
3717
+ class IntegerSchema extends Schema {
3718
+ constructor(schemaParams) {
3719
+ super({
3720
+ type: SchemaType.INTEGER,
3721
+ ...schemaParams
3722
+ });
3723
+ }
3724
+ }
3725
+ /**
3726
+ * Schema class for "number" types.
3727
+ * @public
3728
+ */
3729
+ class NumberSchema extends Schema {
3730
+ constructor(schemaParams) {
3731
+ super({
3732
+ type: SchemaType.NUMBER,
3733
+ ...schemaParams
3734
+ });
3735
+ }
3736
+ }
3737
+ /**
3738
+ * Schema class for "boolean" types.
3739
+ * @public
3740
+ */
3741
+ class BooleanSchema extends Schema {
3742
+ constructor(schemaParams) {
3743
+ super({
3744
+ type: SchemaType.BOOLEAN,
3745
+ ...schemaParams
3746
+ });
3747
+ }
3748
+ }
3749
+ /**
3750
+ * Schema class for "string" types. Can be used with or without
3751
+ * enum values.
3752
+ * @public
3753
+ */
3754
+ class StringSchema extends Schema {
3755
+ constructor(schemaParams, enumValues) {
3756
+ super({
3757
+ type: SchemaType.STRING,
3758
+ ...schemaParams
3759
+ });
3760
+ this.enum = enumValues;
3761
+ }
3762
+ /**
3763
+ * @internal
3764
+ */
3765
+ toJSON() {
3766
+ const obj = super.toJSON();
3767
+ if (this.enum) {
3768
+ obj['enum'] = this.enum;
3769
+ }
3770
+ return obj;
3771
+ }
3772
+ }
3773
+ /**
3774
+ * Schema class for "array" types.
3775
+ * The `items` param should refer to the type of item that can be a member
3776
+ * of the array.
3777
+ * @public
3778
+ */
3779
+ class ArraySchema extends Schema {
3780
+ constructor(schemaParams, items) {
3781
+ super({
3782
+ type: SchemaType.ARRAY,
3783
+ ...schemaParams
3784
+ });
3785
+ this.items = items;
3786
+ }
3787
+ /**
3788
+ * @internal
3789
+ */
3790
+ toJSON() {
3791
+ const obj = super.toJSON();
3792
+ obj.items = this.items.toJSON();
3793
+ return obj;
3794
+ }
3795
+ }
3796
+ /**
3797
+ * Schema class for "object" types.
3798
+ * The `properties` param must be a map of `Schema` objects.
3799
+ * @public
3800
+ */
3801
+ class ObjectSchema extends Schema {
3802
+ constructor(schemaParams, properties, optionalProperties = []) {
3803
+ super({
3804
+ type: SchemaType.OBJECT,
3805
+ ...schemaParams
3806
+ });
3807
+ this.properties = properties;
3808
+ this.optionalProperties = optionalProperties;
3809
+ }
3810
+ /**
3811
+ * @internal
3812
+ */
3813
+ toJSON() {
3814
+ const obj = super.toJSON();
3815
+ obj.properties = { ...this.properties };
3816
+ const required = [];
3817
+ if (this.optionalProperties) {
3818
+ for (const propertyKey of this.optionalProperties) {
3819
+ if (!this.properties.hasOwnProperty(propertyKey)) {
3820
+ throw new AIError(AIErrorCode.INVALID_SCHEMA, `Property "${propertyKey}" specified in "optionalProperties" does not exist.`);
3821
+ }
3822
+ }
3823
+ }
3824
+ for (const propertyKey in this.properties) {
3825
+ if (this.properties.hasOwnProperty(propertyKey)) {
3826
+ obj.properties[propertyKey] = this.properties[propertyKey].toJSON();
3827
+ if (!this.optionalProperties.includes(propertyKey)) {
3828
+ required.push(propertyKey);
3829
+ }
3830
+ }
3831
+ }
3832
+ if (required.length > 0) {
3833
+ obj.required = required;
3834
+ }
3835
+ delete obj.optionalProperties;
3836
+ return obj;
3837
+ }
3838
+ }
3839
+ /**
3840
+ * Schema class representing a value that can conform to any of the provided sub-schemas. This is
3841
+ * useful when a field can accept multiple distinct types or structures.
3842
+ * @public
3843
+ */
3844
+ class AnyOfSchema extends Schema {
3845
+ constructor(schemaParams) {
3846
+ if (schemaParams.anyOf.length === 0) {
3847
+ throw new AIError(AIErrorCode.INVALID_SCHEMA, "The 'anyOf' array must not be empty.");
3848
+ }
3849
+ super({
3850
+ ...schemaParams,
3851
+ type: undefined // anyOf schemas do not have an explicit type
3852
+ });
3853
+ this.anyOf = schemaParams.anyOf;
3854
+ }
3855
+ /**
3856
+ * @internal
3857
+ */
3858
+ toJSON() {
3859
+ const obj = super.toJSON();
3860
+ // Ensure the 'anyOf' property contains serialized SchemaRequest objects.
3861
+ if (this.anyOf && Array.isArray(this.anyOf)) {
3862
+ obj.anyOf = this.anyOf.map(s => s.toJSON());
3863
+ }
3864
+ return obj;
3865
+ }
3866
+ }
3867
+
3868
+ /**
3869
+ * @license
3870
+ * Copyright 2025 Google LLC
3871
+ *
3872
+ * Licensed under the Apache License, Version 2.0 (the "License");
3873
+ * you may not use this file except in compliance with the License.
3874
+ * You may obtain a copy of the License at
3875
+ *
3876
+ * http://www.apache.org/licenses/LICENSE-2.0
3877
+ *
3878
+ * Unless required by applicable law or agreed to in writing, software
3879
+ * distributed under the License is distributed on an "AS IS" BASIS,
3880
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
3881
+ * See the License for the specific language governing permissions and
3882
+ * limitations under the License.
3883
+ */
3884
+ /**
3885
+ * Defines the image format for images generated by Imagen.
3886
+ *
3887
+ * Use this class to specify the desired format (JPEG or PNG) and compression quality
3888
+ * for images generated by Imagen. This is typically included as part of
3889
+ * {@link ImagenModelParams}.
3890
+ *
3891
+ * @example
3892
+ * ```javascript
3893
+ * const imagenModelParams = {
3894
+ * // ... other ImagenModelParams
3895
+ * imageFormat: ImagenImageFormat.jpeg(75) // JPEG with a compression level of 75.
3896
+ * }
3897
+ * ```
3898
+ *
3899
+ * @public
3900
+ */
3901
+ class ImagenImageFormat {
3902
+ constructor() {
3903
+ this.mimeType = 'image/png';
3904
+ }
3905
+ /**
3906
+ * Creates an {@link ImagenImageFormat} for a JPEG image.
3907
+ *
3908
+ * @param compressionQuality - The level of compression (a number between 0 and 100).
3909
+ * @returns An {@link ImagenImageFormat} object for a JPEG image.
3910
+ *
3911
+ * @public
3912
+ */
3913
+ static jpeg(compressionQuality) {
3914
+ if (compressionQuality &&
3915
+ (compressionQuality < 0 || compressionQuality > 100)) {
3916
+ logger.warn(`Invalid JPEG compression quality of ${compressionQuality} specified; the supported range is [0, 100].`);
3917
+ }
3918
+ return { mimeType: 'image/jpeg', compressionQuality };
3919
+ }
3920
+ /**
3921
+ * Creates an {@link ImagenImageFormat} for a PNG image.
3922
+ *
3923
+ * @returns An {@link ImagenImageFormat} object for a PNG image.
3924
+ *
3925
+ * @public
3926
+ */
3927
+ static png() {
3928
+ return { mimeType: 'image/png' };
3929
+ }
3930
+ }
3931
+
3932
+ /**
3933
+ * @license
3934
+ * Copyright 2025 Google LLC
3935
+ *
3936
+ * Licensed under the Apache License, Version 2.0 (the "License");
3937
+ * you may not use this file except in compliance with the License.
3938
+ * You may obtain a copy of the License at
3939
+ *
3940
+ * http://www.apache.org/licenses/LICENSE-2.0
3941
+ *
3942
+ * Unless required by applicable law or agreed to in writing, software
3943
+ * distributed under the License is distributed on an "AS IS" BASIS,
3944
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
3945
+ * See the License for the specific language governing permissions and
3946
+ * limitations under the License.
3947
+ */
3948
+ const SERVER_INPUT_SAMPLE_RATE = 16000;
3949
+ const SERVER_OUTPUT_SAMPLE_RATE = 24000;
3950
+ const AUDIO_PROCESSOR_NAME = 'audio-processor';
3951
+ /**
3952
+ * The JS for an `AudioWorkletProcessor`.
3953
+ * This processor is responsible for taking raw audio from the microphone,
3954
+ * converting it to the required 16-bit 16kHz PCM, and posting it back to the main thread.
3955
+ *
3956
+ * See: https://developer.mozilla.org/en-US/docs/Web/API/AudioWorkletProcessor
3957
+ *
3958
+ * It is defined as a string here so that it can be converted into a `Blob`
3959
+ * and loaded at runtime.
3960
+ */
3961
+ const audioProcessorWorkletString = `
3962
+ class AudioProcessor extends AudioWorkletProcessor {
3963
+ constructor(options) {
3964
+ super();
3965
+ this.targetSampleRate = options.processorOptions.targetSampleRate;
3966
+ // 'sampleRate' is a global variable available inside the AudioWorkletGlobalScope,
3967
+ // representing the native sample rate of the AudioContext.
3968
+ this.inputSampleRate = sampleRate;
3969
+ }
3970
+
3971
+ /**
3972
+ * This method is called by the browser's audio engine for each block of audio data.
3973
+ * Input is a single input, with a single channel (input[0][0]).
3974
+ */
3975
+ process(inputs) {
3976
+ const input = inputs[0];
3977
+ if (input && input.length > 0 && input[0].length > 0) {
3978
+ const pcmData = input[0]; // Float32Array of raw audio samples.
3979
+
3980
+ // Simple linear interpolation for resampling.
3981
+ const resampled = new Float32Array(Math.round(pcmData.length * this.targetSampleRate / this.inputSampleRate));
3982
+ const ratio = pcmData.length / resampled.length;
3983
+ for (let i = 0; i < resampled.length; i++) {
3984
+ resampled[i] = pcmData[Math.floor(i * ratio)];
3985
+ }
3986
+
3987
+ // Convert Float32 (-1, 1) samples to Int16 (-32768, 32767)
3988
+ const resampledInt16 = new Int16Array(resampled.length);
3989
+ for (let i = 0; i < resampled.length; i++) {
3990
+ const sample = Math.max(-1, Math.min(1, resampled[i]));
3991
+ if (sample < 0) {
3992
+ resampledInt16[i] = sample * 32768;
3993
+ } else {
3994
+ resampledInt16[i] = sample * 32767;
3995
+ }
3996
+ }
3997
+
3998
+ this.port.postMessage(resampledInt16);
3999
+ }
4000
+ // Return true to keep the processor alive and processing the next audio block.
4001
+ return true;
4002
+ }
4003
+ }
4004
+
4005
+ // Register the processor with a name that can be used to instantiate it from the main thread.
4006
+ registerProcessor('${AUDIO_PROCESSOR_NAME}', AudioProcessor);
4007
+ `;
4008
+ /**
4009
+ * Encapsulates the core logic of an audio conversation.
4010
+ *
4011
+ * @internal
4012
+ */
4013
+ class AudioConversationRunner {
4014
+ constructor(liveSession, options, deps) {
4015
+ this.liveSession = liveSession;
4016
+ this.options = options;
4017
+ this.deps = deps;
4018
+ /** A flag to indicate if the conversation has been stopped. */
4019
+ this.isStopped = false;
4020
+ /** A deferred that contains a promise that is resolved when stop() is called, to unblock the receive loop. */
4021
+ this.stopDeferred = new Deferred();
4022
+ /** A FIFO queue of 24kHz, 16-bit PCM audio chunks received from the server. */
4023
+ this.playbackQueue = [];
4024
+ /** Tracks scheduled audio sources. Used to cancel scheduled audio when the model is interrupted. */
4025
+ this.scheduledSources = [];
4026
+ /** A high-precision timeline pointer for scheduling gapless audio playback. */
4027
+ this.nextStartTime = 0;
4028
+ /** A mutex to prevent the playback processing loop from running multiple times concurrently. */
4029
+ this.isPlaybackLoopRunning = false;
4030
+ this.liveSession.inConversation = true;
4031
+ // Start listening for messages from the server.
4032
+ this.receiveLoopPromise = this.runReceiveLoop().finally(() => this.cleanup());
4033
+ // Set up the handler for receiving processed audio data from the worklet.
4034
+ // Message data has been resampled to 16kHz 16-bit PCM.
4035
+ this.deps.workletNode.port.onmessage = event => {
4036
+ if (this.isStopped) {
4037
+ return;
4038
+ }
4039
+ const pcm16 = event.data;
4040
+ const base64 = btoa(String.fromCharCode.apply(null, Array.from(new Uint8Array(pcm16.buffer))));
4041
+ const chunk = {
4042
+ mimeType: 'audio/pcm',
4043
+ data: base64
4044
+ };
4045
+ void this.liveSession.sendAudioRealtime(chunk);
4046
+ };
4047
+ }
4048
+ /**
4049
+ * Stops the conversation and unblocks the main receive loop.
4050
+ */
4051
+ async stop() {
4052
+ if (this.isStopped) {
4053
+ return;
4054
+ }
4055
+ this.isStopped = true;
4056
+ this.stopDeferred.resolve(); // Unblock the receive loop
4057
+ await this.receiveLoopPromise; // Wait for the loop and cleanup to finish
4058
+ }
4059
+ /**
4060
+ * Cleans up all audio resources (nodes, stream tracks, context) and marks the
4061
+ * session as no longer in a conversation.
4062
+ */
4063
+ cleanup() {
4064
+ this.interruptPlayback(); // Ensure all audio is stopped on final cleanup.
4065
+ this.deps.workletNode.port.onmessage = null;
4066
+ this.deps.workletNode.disconnect();
4067
+ this.deps.sourceNode.disconnect();
4068
+ this.deps.mediaStream.getTracks().forEach(track => track.stop());
4069
+ if (this.deps.audioContext.state !== 'closed') {
4070
+ void this.deps.audioContext.close();
4071
+ }
4072
+ this.liveSession.inConversation = false;
4073
+ }
4074
+ /**
4075
+ * Adds audio data to the queue and ensures the playback loop is running.
4076
+ */
4077
+ enqueueAndPlay(audioData) {
4078
+ this.playbackQueue.push(audioData);
4079
+ // Will no-op if it's already running.
4080
+ void this.processPlaybackQueue();
4081
+ }
4082
+ /**
4083
+ * Stops all current and pending audio playback and clears the queue. This is
4084
+ * called when the server indicates the model's speech was interrupted with
4085
+ * `LiveServerContent.modelTurn.interrupted`.
4086
+ */
4087
+ interruptPlayback() {
4088
+ // Stop all sources that have been scheduled. The onended event will fire for each,
4089
+ // which will clean up the scheduledSources array.
4090
+ [...this.scheduledSources].forEach(source => source.stop(0));
4091
+ // Clear the internal buffer of unprocessed audio chunks.
4092
+ this.playbackQueue.length = 0;
4093
+ // Reset the playback clock to start fresh.
4094
+ this.nextStartTime = this.deps.audioContext.currentTime;
4095
+ }
4096
+ /**
4097
+ * Processes the playback queue in a loop, scheduling each chunk in a gapless sequence.
4098
+ */
4099
+ async processPlaybackQueue() {
4100
+ if (this.isPlaybackLoopRunning) {
4101
+ return;
4102
+ }
4103
+ this.isPlaybackLoopRunning = true;
4104
+ while (this.playbackQueue.length > 0 && !this.isStopped) {
4105
+ const pcmRawBuffer = this.playbackQueue.shift();
4106
+ try {
4107
+ const pcm16 = new Int16Array(pcmRawBuffer);
4108
+ const frameCount = pcm16.length;
4109
+ const audioBuffer = this.deps.audioContext.createBuffer(1, frameCount, SERVER_OUTPUT_SAMPLE_RATE);
4110
+ // Convert 16-bit PCM to 32-bit PCM, required by the Web Audio API.
4111
+ const channelData = audioBuffer.getChannelData(0);
4112
+ for (let i = 0; i < frameCount; i++) {
4113
+ channelData[i] = pcm16[i] / 32768; // Normalize to Float32 range [-1.0, 1.0]
4114
+ }
4115
+ const source = this.deps.audioContext.createBufferSource();
4116
+ source.buffer = audioBuffer;
4117
+ source.connect(this.deps.audioContext.destination);
4118
+ // Track the source and set up a handler to remove it from tracking when it finishes.
4119
+ this.scheduledSources.push(source);
4120
+ source.onended = () => {
4121
+ this.scheduledSources = this.scheduledSources.filter(s => s !== source);
4122
+ };
4123
+ // To prevent gaps, schedule the next chunk to start either now (if we're catching up)
4124
+ // or exactly when the previous chunk is scheduled to end.
4125
+ this.nextStartTime = Math.max(this.deps.audioContext.currentTime, this.nextStartTime);
4126
+ source.start(this.nextStartTime);
4127
+ // Update the schedule for the *next* chunk.
4128
+ this.nextStartTime += audioBuffer.duration;
4129
+ }
4130
+ catch (e) {
4131
+ logger.error('Error playing audio:', e);
4132
+ }
4133
+ }
4134
+ this.isPlaybackLoopRunning = false;
4135
+ }
4136
+ /**
4137
+ * The main loop that listens for and processes messages from the server.
4138
+ */
4139
+ async runReceiveLoop() {
4140
+ const messageGenerator = this.liveSession.receive();
4141
+ while (!this.isStopped) {
4142
+ const result = await Promise.race([
4143
+ messageGenerator.next(),
4144
+ this.stopDeferred.promise
4145
+ ]);
4146
+ if (this.isStopped || !result || result.done) {
4147
+ break;
4148
+ }
4149
+ const message = result.value;
4150
+ if (message.type === 'serverContent') {
4151
+ const serverContent = message;
4152
+ if (serverContent.interrupted) {
4153
+ this.interruptPlayback();
4154
+ }
4155
+ const audioPart = serverContent.modelTurn?.parts.find(part => part.inlineData?.mimeType.startsWith('audio/'));
4156
+ if (audioPart?.inlineData) {
4157
+ const audioData = Uint8Array.from(atob(audioPart.inlineData.data), c => c.charCodeAt(0)).buffer;
4158
+ this.enqueueAndPlay(audioData);
4159
+ }
4160
+ }
4161
+ else if (message.type === 'toolCall') {
4162
+ if (!this.options.functionCallingHandler) {
4163
+ logger.warn('Received tool call message, but StartAudioConversationOptions.functionCallingHandler is undefined. Ignoring tool call.');
4164
+ }
4165
+ else {
4166
+ try {
4167
+ const functionResponse = await this.options.functionCallingHandler(message.functionCalls);
4168
+ if (!this.isStopped) {
4169
+ void this.liveSession.sendFunctionResponses([functionResponse]);
4170
+ }
4171
+ }
4172
+ catch (e) {
4173
+ throw new AIError(AIErrorCode.ERROR, `Function calling handler failed: ${e.message}`);
4174
+ }
4175
+ }
4176
+ }
4177
+ }
4178
+ }
4179
+ }
4180
+ /**
4181
+ * Starts a real-time, bidirectional audio conversation with the model. This helper function manages
4182
+ * the complexities of microphone access, audio recording, playback, and interruptions.
4183
+ *
4184
+ * @remarks Important: This function must be called in response to a user gesture
4185
+ * (for example, a button click) to comply with {@link https://developer.mozilla.org/en-US/docs/Web/API/Web_Audio_API/Best_practices#autoplay_policy | browser autoplay policies}.
4186
+ *
4187
+ * @example
4188
+ * ```javascript
4189
+ * const liveSession = await model.connect();
4190
+ * let conversationController;
4191
+ *
4192
+ * // This function must be called from within a click handler.
4193
+ * async function startConversation() {
4194
+ * try {
4195
+ * conversationController = await startAudioConversation(liveSession);
4196
+ * } catch (e) {
4197
+ * // Handle AI-specific errors
4198
+ * if (e instanceof AIError) {
4199
+ * console.error("AI Error:", e.message);
4200
+ * }
4201
+ * // Handle microphone permission and hardware errors
4202
+ * else if (e instanceof DOMException) {
4203
+ * console.error("Microphone Error:", e.message);
4204
+ * }
4205
+ * // Handle other unexpected errors
4206
+ * else {
4207
+ * console.error("An unexpected error occurred:", e);
4208
+ * }
4209
+ * }
4210
+ * }
4211
+ *
4212
+ * // Later, to stop the conversation:
4213
+ * // if (conversationController) {
4214
+ * // await conversationController.stop();
4215
+ * // }
4216
+ * ```
4217
+ *
4218
+ * @param liveSession - An active {@link LiveSession} instance.
4219
+ * @param options - Configuration options for the audio conversation.
4220
+ * @returns A `Promise` that resolves with an {@link AudioConversationController}.
4221
+ * @throws `AIError` if the environment does not support required Web APIs (`UNSUPPORTED`), if a conversation is already active (`REQUEST_ERROR`), the session is closed (`SESSION_CLOSED`), or if an unexpected initialization error occurs (`ERROR`).
4222
+ * @throws `DOMException` Thrown by `navigator.mediaDevices.getUserMedia()` if issues occur with microphone access, such as permissions being denied (`NotAllowedError`) or no compatible hardware being found (`NotFoundError`). See the {@link https://developer.mozilla.org/en-US/docs/Web/API/MediaDevices/getUserMedia#exceptions | MDN documentation} for a full list of exceptions.
4223
+ *
4224
+ * @beta
4225
+ */
4226
+ async function startAudioConversation(liveSession, options = {}) {
4227
+ if (liveSession.isClosed) {
4228
+ throw new AIError(AIErrorCode.SESSION_CLOSED, 'Cannot start audio conversation on a closed LiveSession.');
4229
+ }
4230
+ if (liveSession.inConversation) {
4231
+ throw new AIError(AIErrorCode.REQUEST_ERROR, 'An audio conversation is already in progress for this session.');
4232
+ }
4233
+ // Check for necessary Web API support.
4234
+ if (typeof AudioWorkletNode === 'undefined' ||
4235
+ typeof AudioContext === 'undefined' ||
4236
+ typeof navigator === 'undefined' ||
4237
+ !navigator.mediaDevices) {
4238
+ throw new AIError(AIErrorCode.UNSUPPORTED, 'Audio conversation is not supported in this environment. It requires the Web Audio API and AudioWorklet support.');
4239
+ }
4240
+ let audioContext;
4241
+ try {
4242
+ // 1. Set up the audio context. This must be in response to a user gesture.
4243
+ // See: https://developer.mozilla.org/en-US/docs/Web/API/Web_Audio_API/Best_practices#autoplay_policy
4244
+ audioContext = new AudioContext();
4245
+ if (audioContext.state === 'suspended') {
4246
+ await audioContext.resume();
4247
+ }
4248
+ // 2. Prompt for microphone access and get the media stream.
4249
+ // This can throw a variety of permission or hardware-related errors.
4250
+ const mediaStream = await navigator.mediaDevices.getUserMedia({
4251
+ audio: true
4252
+ });
4253
+ // 3. Load the AudioWorklet processor.
4254
+ // See: https://developer.mozilla.org/en-US/docs/Web/API/AudioWorklet
4255
+ const workletBlob = new Blob([audioProcessorWorkletString], {
4256
+ type: 'application/javascript'
4257
+ });
4258
+ const workletURL = URL.createObjectURL(workletBlob);
4259
+ await audioContext.audioWorklet.addModule(workletURL);
4260
+ // 4. Create the audio graph: Microphone -> Source Node -> Worklet Node
4261
+ const sourceNode = audioContext.createMediaStreamSource(mediaStream);
4262
+ const workletNode = new AudioWorkletNode(audioContext, AUDIO_PROCESSOR_NAME, {
4263
+ processorOptions: { targetSampleRate: SERVER_INPUT_SAMPLE_RATE }
4264
+ });
4265
+ sourceNode.connect(workletNode);
4266
+ // 5. Instantiate and return the runner which manages the conversation.
4267
+ const runner = new AudioConversationRunner(liveSession, options, {
4268
+ audioContext,
4269
+ mediaStream,
4270
+ sourceNode,
4271
+ workletNode
4272
+ });
4273
+ return { stop: () => runner.stop() };
4274
+ }
4275
+ catch (e) {
4276
+ // Ensure the audio context is closed on any setup error.
4277
+ if (audioContext && audioContext.state !== 'closed') {
4278
+ void audioContext.close();
4279
+ }
4280
+ // Re-throw specific, known error types directly. The user may want to handle `DOMException`
4281
+ // errors differently (for example, if permission to access audio device was denied).
4282
+ if (e instanceof AIError || e instanceof DOMException) {
4283
+ throw e;
4284
+ }
4285
+ // Wrap any other unexpected errors in a standard AIError.
4286
+ throw new AIError(AIErrorCode.ERROR, `Failed to initialize audio recording: ${e.message}`);
4287
+ }
4288
+ }
4289
+
4290
+ /**
4291
+ * @license
4292
+ * Copyright 2024 Google LLC
4293
+ *
4294
+ * Licensed under the Apache License, Version 2.0 (the "License");
4295
+ * you may not use this file except in compliance with the License.
4296
+ * You may obtain a copy of the License at
4297
+ *
4298
+ * http://www.apache.org/licenses/LICENSE-2.0
4299
+ *
4300
+ * Unless required by applicable law or agreed to in writing, software
4301
+ * distributed under the License is distributed on an "AS IS" BASIS,
4302
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
4303
+ * See the License for the specific language governing permissions and
4304
+ * limitations under the License.
4305
+ */
4306
+ /**
4307
+ * Returns the default {@link AI} instance that is associated with the provided
4308
+ * {@link @firebase/app#FirebaseApp}. If no instance exists, initializes a new instance with the
4309
+ * default settings.
4310
+ *
4311
+ * @example
4312
+ * ```javascript
4313
+ * const ai = getAI(app);
4314
+ * ```
4315
+ *
4316
+ * @example
4317
+ * ```javascript
4318
+ * // Get an AI instance configured to use the Gemini Developer API (via Google AI).
4319
+ * const ai = getAI(app, { backend: new GoogleAIBackend() });
4320
+ * ```
4321
+ *
4322
+ * @example
4323
+ * ```javascript
4324
+ * // Get an AI instance configured to use the Vertex AI Gemini API.
4325
+ * const ai = getAI(app, { backend: new VertexAIBackend() });
4326
+ * ```
4327
+ *
4328
+ * @param app - The {@link @firebase/app#FirebaseApp} to use.
4329
+ * @param options - {@link AIOptions} that configure the AI instance.
4330
+ * @returns The default {@link AI} instance for the given {@link @firebase/app#FirebaseApp}.
4331
+ *
4332
+ * @public
4333
+ */
4334
+ function getAI(app = getApp(), options) {
4335
+ app = getModularInstance(app);
4336
+ // Dependencies
4337
+ const AIProvider = _getProvider(app, AI_TYPE);
4338
+ const backend = options?.backend ?? new GoogleAIBackend();
4339
+ const finalOptions = {
4340
+ useLimitedUseAppCheckTokens: options?.useLimitedUseAppCheckTokens ?? false
4341
+ };
4342
+ const identifier = encodeInstanceIdentifier(backend);
4343
+ const aiInstance = AIProvider.getImmediate({
4344
+ identifier
4345
+ });
4346
+ aiInstance.options = finalOptions;
4347
+ return aiInstance;
4348
+ }
4349
+ /**
4350
+ * Returns a {@link GenerativeModel} class with methods for inference
4351
+ * and other functionality.
4352
+ *
4353
+ * @public
4354
+ */
4355
+ function getGenerativeModel(ai, modelParams, requestOptions) {
4356
+ // Uses the existence of HybridParams.mode to clarify the type of the modelParams input.
4357
+ const hybridParams = modelParams;
4358
+ let inCloudParams;
4359
+ if (hybridParams.mode) {
4360
+ inCloudParams = hybridParams.inCloudParams || {
4361
+ model: DEFAULT_HYBRID_IN_CLOUD_MODEL
4362
+ };
4363
+ }
4364
+ else {
4365
+ inCloudParams = modelParams;
4366
+ }
4367
+ if (!inCloudParams.model) {
4368
+ throw new AIError(AIErrorCode.NO_MODEL, `Must provide a model name. Example: getGenerativeModel({ model: 'my-model-name' })`);
4369
+ }
4370
+ /**
4371
+ * An AIService registered by index.node.ts will not have a
4372
+ * chromeAdapterFactory() method.
4373
+ */
4374
+ const chromeAdapter = ai.chromeAdapterFactory?.(hybridParams.mode, typeof window === 'undefined' ? undefined : window, hybridParams.onDeviceParams);
4375
+ const generativeModel = new GenerativeModel(ai, inCloudParams, requestOptions, chromeAdapter);
4376
+ generativeModel._apiSettings.inferenceMode = hybridParams.mode;
4377
+ return generativeModel;
4378
+ }
4379
+ /**
4380
+ * Returns an {@link ImagenModel} class with methods for using Imagen.
4381
+ *
4382
+ * Only Imagen 3 models (named `imagen-3.0-*`) are supported.
4383
+ *
4384
+ * @param ai - An {@link AI} instance.
4385
+ * @param modelParams - Parameters to use when making Imagen requests.
4386
+ * @param requestOptions - Additional options to use when making requests.
4387
+ *
4388
+ * @throws If the `apiKey` or `projectId` fields are missing in your
4389
+ * Firebase config.
4390
+ *
4391
+ * @public
4392
+ */
4393
+ function getImagenModel(ai, modelParams, requestOptions) {
4394
+ if (!modelParams.model) {
4395
+ throw new AIError(AIErrorCode.NO_MODEL, `Must provide a model name. Example: getImagenModel({ model: 'my-model-name' })`);
4396
+ }
4397
+ return new ImagenModel(ai, modelParams, requestOptions);
4398
+ }
4399
+ /**
4400
+ * Returns a {@link LiveGenerativeModel} class for real-time, bidirectional communication.
4401
+ *
4402
+ * The Live API is only supported in modern browser windows and Node >= 22.
4403
+ *
4404
+ * @param ai - An {@link AI} instance.
4405
+ * @param modelParams - Parameters to use when setting up a {@link LiveSession}.
4406
+ * @throws If the `apiKey` or `projectId` fields are missing in your
4407
+ * Firebase config.
4408
+ *
4409
+ * @beta
4410
+ */
4411
+ function getLiveGenerativeModel(ai, modelParams) {
4412
+ if (!modelParams.model) {
4413
+ throw new AIError(AIErrorCode.NO_MODEL, `Must provide a model name for getLiveGenerativeModel. Example: getLiveGenerativeModel(ai, { model: 'my-model-name' })`);
4414
+ }
4415
+ const webSocketHandler = new WebSocketHandlerImpl();
4416
+ return new LiveGenerativeModel(ai, modelParams, webSocketHandler);
4417
+ }
4418
+ /**
4419
+ * Returns a {@link TemplateGenerativeModel} class for executing server-side
4420
+ * templates.
4421
+ *
4422
+ * @param ai - An {@link AI} instance.
4423
+ * @param requestOptions - Additional options to use when making requests.
4424
+ *
4425
+ * @beta
4426
+ */
4427
+ function getTemplateGenerativeModel(ai, requestOptions) {
4428
+ return new TemplateGenerativeModel(ai, requestOptions);
4429
+ }
4430
+ /**
4431
+ * Returns a {@link TemplateImagenModel} class for executing server-side
4432
+ * Imagen templates.
4433
+ *
4434
+ * @param ai - An {@link AI} instance.
4435
+ * @param requestOptions - Additional options to use when making requests.
4436
+ *
4437
+ * @beta
4438
+ */
4439
+ function getTemplateImagenModel(ai, requestOptions) {
4440
+ return new TemplateImagenModel(ai, requestOptions);
4441
+ }
4442
+
4443
+ /**
4444
+ * The Firebase AI Web SDK.
4445
+ *
4446
+ * @packageDocumentation
4447
+ */
4448
+ function registerAI() {
4449
+ _registerComponent(new Component(AI_TYPE, factory, "PUBLIC" /* ComponentType.PUBLIC */).setMultipleInstances(true));
4450
+ registerVersion(name, version, 'node');
4451
+ // BUILD_TARGET will be replaced by values like esm, cjs, etc during the compilation
4452
+ registerVersion(name, version, 'esm2020');
4453
+ }
4454
+ registerAI();
4455
+
4456
+ export { AIError, AIErrorCode, AIModel, AnyOfSchema, ArraySchema, Backend, BackendType, BlockReason, BooleanSchema, ChatSession, FinishReason, FunctionCallingMode, GenerativeModel, GoogleAIBackend, HarmBlockMethod, HarmBlockThreshold, HarmCategory, HarmProbability, HarmSeverity, ImagenAspectRatio, ImagenImageFormat, ImagenModel, ImagenPersonFilterLevel, ImagenSafetyFilterLevel, InferenceMode, InferenceSource, IntegerSchema, Language, LiveGenerativeModel, LiveResponseType, LiveSession, Modality, NumberSchema, ObjectSchema, Outcome, POSSIBLE_ROLES, ResponseModality, Schema, SchemaType, StringSchema, TemplateGenerativeModel, TemplateImagenModel, ThinkingLevel, URLRetrievalStatus, VertexAIBackend, getAI, getGenerativeModel, getImagenModel, getLiveGenerativeModel, getTemplateGenerativeModel, getTemplateImagenModel, startAudioConversation };
4457
+ //# sourceMappingURL=index.node.mjs.map