@depup/firebase__ai 2.9.0-depup.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (119) hide show
  1. package/README.md +31 -0
  2. package/changes.json +10 -0
  3. package/dist/ai-public.d.ts +3472 -0
  4. package/dist/ai.d.ts +3712 -0
  5. package/dist/esm/index.esm.js +4765 -0
  6. package/dist/esm/index.esm.js.map +1 -0
  7. package/dist/esm/package.json +1 -0
  8. package/dist/esm/src/api.d.ts +121 -0
  9. package/dist/esm/src/backend.d.ts +98 -0
  10. package/dist/esm/src/constants.d.ts +29 -0
  11. package/dist/esm/src/errors.d.ts +35 -0
  12. package/dist/esm/src/factory-browser.d.ts +19 -0
  13. package/dist/esm/src/factory-node.d.ts +19 -0
  14. package/dist/esm/src/googleai-mappers.d.ts +73 -0
  15. package/dist/esm/src/helpers.d.ts +30 -0
  16. package/dist/esm/src/index.d.ts +13 -0
  17. package/dist/esm/src/index.node.d.ts +7 -0
  18. package/dist/esm/src/logger.d.ts +18 -0
  19. package/dist/esm/src/methods/chat-session-helpers.d.ts +18 -0
  20. package/dist/esm/src/methods/chat-session.d.ts +77 -0
  21. package/dist/esm/src/methods/chrome-adapter.d.ts +124 -0
  22. package/dist/esm/src/methods/count-tokens.d.ts +21 -0
  23. package/dist/esm/src/methods/generate-content.d.ts +25 -0
  24. package/dist/esm/src/methods/live-session-helpers.d.ts +154 -0
  25. package/dist/esm/src/methods/live-session.d.ts +154 -0
  26. package/dist/esm/src/models/ai-model.d.ts +72 -0
  27. package/dist/esm/src/models/generative-model.d.ts +56 -0
  28. package/dist/esm/src/models/imagen-model.d.ts +102 -0
  29. package/dist/esm/src/models/index.d.ts +20 -0
  30. package/dist/esm/src/models/live-generative-model.d.ts +55 -0
  31. package/dist/esm/src/models/template-generative-model.d.ts +64 -0
  32. package/dist/esm/src/models/template-imagen-model.d.ts +51 -0
  33. package/dist/esm/src/models/utils.d.ts +26 -0
  34. package/dist/esm/src/public-types.d.ts +97 -0
  35. package/dist/esm/src/requests/hybrid-helpers.d.ts +33 -0
  36. package/dist/esm/src/requests/imagen-image-format.d.ts +61 -0
  37. package/dist/esm/src/requests/request-helpers.d.ts +28 -0
  38. package/dist/esm/src/requests/request.d.ts +69 -0
  39. package/dist/esm/src/requests/response-helpers.d.ts +57 -0
  40. package/dist/esm/src/requests/schema-builder.d.ts +170 -0
  41. package/dist/esm/src/requests/stream-reader.d.ts +39 -0
  42. package/dist/esm/src/service.d.ts +35 -0
  43. package/dist/esm/src/types/chrome-adapter.d.ts +61 -0
  44. package/dist/esm/src/types/content.d.ts +266 -0
  45. package/dist/esm/src/types/enums.d.ts +419 -0
  46. package/dist/esm/src/types/error.d.ts +89 -0
  47. package/dist/esm/src/types/googleai.d.ts +57 -0
  48. package/dist/esm/src/types/imagen/index.d.ts +18 -0
  49. package/dist/esm/src/types/imagen/internal.d.ts +134 -0
  50. package/dist/esm/src/types/imagen/requests.d.ts +245 -0
  51. package/dist/esm/src/types/imagen/responses.d.ts +79 -0
  52. package/dist/esm/src/types/index.d.ts +26 -0
  53. package/dist/esm/src/types/internal.d.ts +35 -0
  54. package/dist/esm/src/types/language-model.d.ts +107 -0
  55. package/dist/esm/src/types/live-responses.d.ts +79 -0
  56. package/dist/esm/src/types/requests.d.ts +543 -0
  57. package/dist/esm/src/types/responses.d.ts +607 -0
  58. package/dist/esm/src/types/schema.d.ts +139 -0
  59. package/dist/esm/src/websocket.d.ts +67 -0
  60. package/dist/index.cjs.js +4820 -0
  61. package/dist/index.cjs.js.map +1 -0
  62. package/dist/index.node.cjs.js +4512 -0
  63. package/dist/index.node.cjs.js.map +1 -0
  64. package/dist/index.node.mjs +4457 -0
  65. package/dist/index.node.mjs.map +1 -0
  66. package/dist/src/api.d.ts +121 -0
  67. package/dist/src/backend.d.ts +98 -0
  68. package/dist/src/constants.d.ts +29 -0
  69. package/dist/src/errors.d.ts +35 -0
  70. package/dist/src/factory-browser.d.ts +19 -0
  71. package/dist/src/factory-node.d.ts +19 -0
  72. package/dist/src/googleai-mappers.d.ts +73 -0
  73. package/dist/src/helpers.d.ts +30 -0
  74. package/dist/src/index.d.ts +13 -0
  75. package/dist/src/index.node.d.ts +7 -0
  76. package/dist/src/logger.d.ts +18 -0
  77. package/dist/src/methods/chat-session-helpers.d.ts +18 -0
  78. package/dist/src/methods/chat-session.d.ts +77 -0
  79. package/dist/src/methods/chrome-adapter.d.ts +124 -0
  80. package/dist/src/methods/count-tokens.d.ts +21 -0
  81. package/dist/src/methods/generate-content.d.ts +25 -0
  82. package/dist/src/methods/live-session-helpers.d.ts +154 -0
  83. package/dist/src/methods/live-session.d.ts +154 -0
  84. package/dist/src/models/ai-model.d.ts +72 -0
  85. package/dist/src/models/generative-model.d.ts +56 -0
  86. package/dist/src/models/imagen-model.d.ts +102 -0
  87. package/dist/src/models/index.d.ts +20 -0
  88. package/dist/src/models/live-generative-model.d.ts +55 -0
  89. package/dist/src/models/template-generative-model.d.ts +64 -0
  90. package/dist/src/models/template-imagen-model.d.ts +51 -0
  91. package/dist/src/models/utils.d.ts +26 -0
  92. package/dist/src/public-types.d.ts +97 -0
  93. package/dist/src/requests/hybrid-helpers.d.ts +33 -0
  94. package/dist/src/requests/imagen-image-format.d.ts +61 -0
  95. package/dist/src/requests/request-helpers.d.ts +28 -0
  96. package/dist/src/requests/request.d.ts +69 -0
  97. package/dist/src/requests/response-helpers.d.ts +57 -0
  98. package/dist/src/requests/schema-builder.d.ts +170 -0
  99. package/dist/src/requests/stream-reader.d.ts +39 -0
  100. package/dist/src/service.d.ts +35 -0
  101. package/dist/src/tsdoc-metadata.json +11 -0
  102. package/dist/src/types/chrome-adapter.d.ts +61 -0
  103. package/dist/src/types/content.d.ts +266 -0
  104. package/dist/src/types/enums.d.ts +419 -0
  105. package/dist/src/types/error.d.ts +89 -0
  106. package/dist/src/types/googleai.d.ts +57 -0
  107. package/dist/src/types/imagen/index.d.ts +18 -0
  108. package/dist/src/types/imagen/internal.d.ts +134 -0
  109. package/dist/src/types/imagen/requests.d.ts +245 -0
  110. package/dist/src/types/imagen/responses.d.ts +79 -0
  111. package/dist/src/types/index.d.ts +26 -0
  112. package/dist/src/types/internal.d.ts +35 -0
  113. package/dist/src/types/language-model.d.ts +107 -0
  114. package/dist/src/types/live-responses.d.ts +79 -0
  115. package/dist/src/types/requests.d.ts +543 -0
  116. package/dist/src/types/responses.d.ts +607 -0
  117. package/dist/src/types/schema.d.ts +139 -0
  118. package/dist/src/websocket.d.ts +67 -0
  119. package/package.json +106 -0
@@ -0,0 +1,4512 @@
1
+ 'use strict';
2
+
3
+ Object.defineProperty(exports, '__esModule', { value: true });
4
+
5
+ var app = require('@firebase/app');
6
+ var component = require('@firebase/component');
7
+ var util = require('@firebase/util');
8
+ var logger$1 = require('@firebase/logger');
9
+
10
+ var name = "@firebase/ai";
11
+ var version = "2.9.0";
12
+
13
+ /**
14
+ * @license
15
+ * Copyright 2024 Google LLC
16
+ *
17
+ * Licensed under the Apache License, Version 2.0 (the "License");
18
+ * you may not use this file except in compliance with the License.
19
+ * You may obtain a copy of the License at
20
+ *
21
+ * http://www.apache.org/licenses/LICENSE-2.0
22
+ *
23
+ * Unless required by applicable law or agreed to in writing, software
24
+ * distributed under the License is distributed on an "AS IS" BASIS,
25
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
26
+ * See the License for the specific language governing permissions and
27
+ * limitations under the License.
28
+ */
29
+ const AI_TYPE = 'AI';
30
+ const DEFAULT_LOCATION = 'us-central1';
31
+ const DEFAULT_DOMAIN = 'firebasevertexai.googleapis.com';
32
+ const DEFAULT_API_VERSION = 'v1beta';
33
+ const PACKAGE_VERSION = version;
34
+ const LANGUAGE_TAG = 'gl-js';
35
+ const HYBRID_TAG = 'hybrid';
36
+ const DEFAULT_FETCH_TIMEOUT_MS = 180 * 1000;
37
+ /**
38
+ * Defines the name of the default in-cloud model to use for hybrid inference.
39
+ */
40
+ const DEFAULT_HYBRID_IN_CLOUD_MODEL = 'gemini-2.5-flash-lite';
41
+
42
+ /**
43
+ * @license
44
+ * Copyright 2024 Google LLC
45
+ *
46
+ * Licensed under the Apache License, Version 2.0 (the "License");
47
+ * you may not use this file except in compliance with the License.
48
+ * You may obtain a copy of the License at
49
+ *
50
+ * http://www.apache.org/licenses/LICENSE-2.0
51
+ *
52
+ * Unless required by applicable law or agreed to in writing, software
53
+ * distributed under the License is distributed on an "AS IS" BASIS,
54
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
55
+ * See the License for the specific language governing permissions and
56
+ * limitations under the License.
57
+ */
58
+ /**
59
+ * Error class for the Firebase AI SDK.
60
+ *
61
+ * @public
62
+ */
63
+ class AIError extends util.FirebaseError {
64
+ /**
65
+ * Constructs a new instance of the `AIError` class.
66
+ *
67
+ * @param code - The error code from {@link (AIErrorCode:type)}.
68
+ * @param message - A human-readable message describing the error.
69
+ * @param customErrorData - Optional error data.
70
+ */
71
+ constructor(code, message, customErrorData) {
72
+ // Match error format used by FirebaseError from ErrorFactory
73
+ const service = AI_TYPE;
74
+ const fullCode = `${service}/${code}`;
75
+ const fullMessage = `${service}: ${message} (${fullCode})`;
76
+ super(code, fullMessage);
77
+ this.code = code;
78
+ this.customErrorData = customErrorData;
79
+ // FirebaseError initializes a stack trace, but it assumes the error is created from the error
80
+ // factory. Since we break this assumption, we set the stack trace to be originating from this
81
+ // constructor.
82
+ // This is only supported in V8.
83
+ if (Error.captureStackTrace) {
84
+ // Allows us to initialize the stack trace without including the constructor itself at the
85
+ // top level of the stack trace.
86
+ Error.captureStackTrace(this, AIError);
87
+ }
88
+ // Allows instanceof AIError in ES5/ES6
89
+ // https://github.com/Microsoft/TypeScript-wiki/blob/master/Breaking-Changes.md#extending-built-ins-like-error-array-and-map-may-no-longer-work
90
+ // TODO(dlarocque): Replace this with `new.target`: https://www.typescriptlang.org/docs/handbook/release-notes/typescript-2-2.html#support-for-newtarget
91
+ // which we can now use since we no longer target ES5.
92
+ Object.setPrototypeOf(this, AIError.prototype);
93
+ // Since Error is an interface, we don't inherit toString and so we define it ourselves.
94
+ this.toString = () => fullMessage;
95
+ }
96
+ }
97
+
98
+ /**
99
+ * @license
100
+ * Copyright 2024 Google LLC
101
+ *
102
+ * Licensed under the Apache License, Version 2.0 (the "License");
103
+ * you may not use this file except in compliance with the License.
104
+ * You may obtain a copy of the License at
105
+ *
106
+ * http://www.apache.org/licenses/LICENSE-2.0
107
+ *
108
+ * Unless required by applicable law or agreed to in writing, software
109
+ * distributed under the License is distributed on an "AS IS" BASIS,
110
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
111
+ * See the License for the specific language governing permissions and
112
+ * limitations under the License.
113
+ */
114
+ /**
115
+ * Possible roles.
116
+ * @public
117
+ */
118
+ const POSSIBLE_ROLES = ['user', 'model', 'function', 'system'];
119
+ /**
120
+ * Harm categories that would cause prompts or candidates to be blocked.
121
+ * @public
122
+ */
123
+ const HarmCategory = {
124
+ HARM_CATEGORY_HATE_SPEECH: 'HARM_CATEGORY_HATE_SPEECH',
125
+ HARM_CATEGORY_SEXUALLY_EXPLICIT: 'HARM_CATEGORY_SEXUALLY_EXPLICIT',
126
+ HARM_CATEGORY_HARASSMENT: 'HARM_CATEGORY_HARASSMENT',
127
+ HARM_CATEGORY_DANGEROUS_CONTENT: 'HARM_CATEGORY_DANGEROUS_CONTENT'
128
+ };
129
+ /**
130
+ * Threshold above which a prompt or candidate will be blocked.
131
+ * @public
132
+ */
133
+ const HarmBlockThreshold = {
134
+ /**
135
+ * Content with `NEGLIGIBLE` will be allowed.
136
+ */
137
+ BLOCK_LOW_AND_ABOVE: 'BLOCK_LOW_AND_ABOVE',
138
+ /**
139
+ * Content with `NEGLIGIBLE` and `LOW` will be allowed.
140
+ */
141
+ BLOCK_MEDIUM_AND_ABOVE: 'BLOCK_MEDIUM_AND_ABOVE',
142
+ /**
143
+ * Content with `NEGLIGIBLE`, `LOW`, and `MEDIUM` will be allowed.
144
+ */
145
+ BLOCK_ONLY_HIGH: 'BLOCK_ONLY_HIGH',
146
+ /**
147
+ * All content will be allowed.
148
+ */
149
+ BLOCK_NONE: 'BLOCK_NONE',
150
+ /**
151
+ * All content will be allowed. This is the same as `BLOCK_NONE`, but the metadata corresponding
152
+ * to the {@link (HarmCategory:type)} will not be present in the response.
153
+ */
154
+ OFF: 'OFF'
155
+ };
156
+ /**
157
+ * This property is not supported in the Gemini Developer API ({@link GoogleAIBackend}).
158
+ *
159
+ * @public
160
+ */
161
+ const HarmBlockMethod = {
162
+ /**
163
+ * The harm block method uses both probability and severity scores.
164
+ */
165
+ SEVERITY: 'SEVERITY',
166
+ /**
167
+ * The harm block method uses the probability score.
168
+ */
169
+ PROBABILITY: 'PROBABILITY'
170
+ };
171
+ /**
172
+ * Probability that a prompt or candidate matches a harm category.
173
+ * @public
174
+ */
175
+ const HarmProbability = {
176
+ /**
177
+ * Content has a negligible chance of being unsafe.
178
+ */
179
+ NEGLIGIBLE: 'NEGLIGIBLE',
180
+ /**
181
+ * Content has a low chance of being unsafe.
182
+ */
183
+ LOW: 'LOW',
184
+ /**
185
+ * Content has a medium chance of being unsafe.
186
+ */
187
+ MEDIUM: 'MEDIUM',
188
+ /**
189
+ * Content has a high chance of being unsafe.
190
+ */
191
+ HIGH: 'HIGH'
192
+ };
193
+ /**
194
+ * Harm severity levels.
195
+ * @public
196
+ */
197
+ const HarmSeverity = {
198
+ /**
199
+ * Negligible level of harm severity.
200
+ */
201
+ HARM_SEVERITY_NEGLIGIBLE: 'HARM_SEVERITY_NEGLIGIBLE',
202
+ /**
203
+ * Low level of harm severity.
204
+ */
205
+ HARM_SEVERITY_LOW: 'HARM_SEVERITY_LOW',
206
+ /**
207
+ * Medium level of harm severity.
208
+ */
209
+ HARM_SEVERITY_MEDIUM: 'HARM_SEVERITY_MEDIUM',
210
+ /**
211
+ * High level of harm severity.
212
+ */
213
+ HARM_SEVERITY_HIGH: 'HARM_SEVERITY_HIGH',
214
+ /**
215
+ * Harm severity is not supported.
216
+ *
217
+ * @remarks
218
+ * The GoogleAI backend does not support `HarmSeverity`, so this value is used as a fallback.
219
+ */
220
+ HARM_SEVERITY_UNSUPPORTED: 'HARM_SEVERITY_UNSUPPORTED'
221
+ };
222
+ /**
223
+ * Reason that a prompt was blocked.
224
+ * @public
225
+ */
226
+ const BlockReason = {
227
+ /**
228
+ * Content was blocked by safety settings.
229
+ */
230
+ SAFETY: 'SAFETY',
231
+ /**
232
+ * Content was blocked, but the reason is uncategorized.
233
+ */
234
+ OTHER: 'OTHER',
235
+ /**
236
+ * Content was blocked because it contained terms from the terminology blocklist.
237
+ */
238
+ BLOCKLIST: 'BLOCKLIST',
239
+ /**
240
+ * Content was blocked due to prohibited content.
241
+ */
242
+ PROHIBITED_CONTENT: 'PROHIBITED_CONTENT'
243
+ };
244
+ /**
245
+ * Reason that a candidate finished.
246
+ * @public
247
+ */
248
+ const FinishReason = {
249
+ /**
250
+ * Natural stop point of the model or provided stop sequence.
251
+ */
252
+ STOP: 'STOP',
253
+ /**
254
+ * The maximum number of tokens as specified in the request was reached.
255
+ */
256
+ MAX_TOKENS: 'MAX_TOKENS',
257
+ /**
258
+ * The candidate content was flagged for safety reasons.
259
+ */
260
+ SAFETY: 'SAFETY',
261
+ /**
262
+ * The candidate content was flagged for recitation reasons.
263
+ */
264
+ RECITATION: 'RECITATION',
265
+ /**
266
+ * Unknown reason.
267
+ */
268
+ OTHER: 'OTHER',
269
+ /**
270
+ * The candidate content contained forbidden terms.
271
+ */
272
+ BLOCKLIST: 'BLOCKLIST',
273
+ /**
274
+ * The candidate content potentially contained prohibited content.
275
+ */
276
+ PROHIBITED_CONTENT: 'PROHIBITED_CONTENT',
277
+ /**
278
+ * The candidate content potentially contained Sensitive Personally Identifiable Information (SPII).
279
+ */
280
+ SPII: 'SPII',
281
+ /**
282
+ * The function call generated by the model was invalid.
283
+ */
284
+ MALFORMED_FUNCTION_CALL: 'MALFORMED_FUNCTION_CALL'
285
+ };
286
+ /**
287
+ * @public
288
+ */
289
+ const FunctionCallingMode = {
290
+ /**
291
+ * Default model behavior; model decides to predict either a function call
292
+ * or a natural language response.
293
+ */
294
+ AUTO: 'AUTO',
295
+ /**
296
+ * Model is constrained to always predicting a function call only.
297
+ * If `allowed_function_names` is set, the predicted function call will be
298
+ * limited to any one of `allowed_function_names`, else the predicted
299
+ * function call will be any one of the provided `function_declarations`.
300
+ */
301
+ ANY: 'ANY',
302
+ /**
303
+ * Model will not predict any function call. Model behavior is same as when
304
+ * not passing any function declarations.
305
+ */
306
+ NONE: 'NONE'
307
+ };
308
+ /**
309
+ * Content part modality.
310
+ * @public
311
+ */
312
+ const Modality = {
313
+ /**
314
+ * Unspecified modality.
315
+ */
316
+ MODALITY_UNSPECIFIED: 'MODALITY_UNSPECIFIED',
317
+ /**
318
+ * Plain text.
319
+ */
320
+ TEXT: 'TEXT',
321
+ /**
322
+ * Image.
323
+ */
324
+ IMAGE: 'IMAGE',
325
+ /**
326
+ * Video.
327
+ */
328
+ VIDEO: 'VIDEO',
329
+ /**
330
+ * Audio.
331
+ */
332
+ AUDIO: 'AUDIO',
333
+ /**
334
+ * Document (for example, PDF).
335
+ */
336
+ DOCUMENT: 'DOCUMENT'
337
+ };
338
+ /**
339
+ * Generation modalities to be returned in generation responses.
340
+ *
341
+ * @beta
342
+ */
343
+ const ResponseModality = {
344
+ /**
345
+ * Text.
346
+ * @beta
347
+ */
348
+ TEXT: 'TEXT',
349
+ /**
350
+ * Image.
351
+ * @beta
352
+ */
353
+ IMAGE: 'IMAGE',
354
+ /**
355
+ * Audio.
356
+ * @beta
357
+ */
358
+ AUDIO: 'AUDIO'
359
+ };
360
+ /**
361
+ * Determines whether inference happens on-device or in-cloud.
362
+ *
363
+ * @remarks
364
+ * <b>PREFER_ON_DEVICE:</b> Attempt to make inference calls using an
365
+ * on-device model. If on-device inference is not available, the SDK
366
+ * will fall back to using a cloud-hosted model.
367
+ * <br/>
368
+ * <b>ONLY_ON_DEVICE:</b> Only attempt to make inference calls using an
369
+ * on-device model. The SDK will not fall back to a cloud-hosted model.
370
+ * If on-device inference is not available, inference methods will throw.
371
+ * <br/>
372
+ * <b>ONLY_IN_CLOUD:</b> Only attempt to make inference calls using a
373
+ * cloud-hosted model. The SDK will not fall back to an on-device model.
374
+ * <br/>
375
+ * <b>PREFER_IN_CLOUD:</b> Attempt to make inference calls to a
376
+ * cloud-hosted model. If not available, the SDK will fall back to an
377
+ * on-device model.
378
+ *
379
+ * @beta
380
+ */
381
+ const InferenceMode = {
382
+ 'PREFER_ON_DEVICE': 'prefer_on_device',
383
+ 'ONLY_ON_DEVICE': 'only_on_device',
384
+ 'ONLY_IN_CLOUD': 'only_in_cloud',
385
+ 'PREFER_IN_CLOUD': 'prefer_in_cloud'
386
+ };
387
+ /**
388
+ * Indicates whether inference happened on-device or in-cloud.
389
+ *
390
+ * @beta
391
+ */
392
+ const InferenceSource = {
393
+ 'ON_DEVICE': 'on_device',
394
+ 'IN_CLOUD': 'in_cloud'
395
+ };
396
+ /**
397
+ * Represents the result of the code execution.
398
+ *
399
+ * @public
400
+ */
401
+ const Outcome = {
402
+ UNSPECIFIED: 'OUTCOME_UNSPECIFIED',
403
+ OK: 'OUTCOME_OK',
404
+ FAILED: 'OUTCOME_FAILED',
405
+ DEADLINE_EXCEEDED: 'OUTCOME_DEADLINE_EXCEEDED'
406
+ };
407
+ /**
408
+ * The programming language of the code.
409
+ *
410
+ * @public
411
+ */
412
+ const Language = {
413
+ UNSPECIFIED: 'LANGUAGE_UNSPECIFIED',
414
+ PYTHON: 'PYTHON'
415
+ };
416
+ /**
417
+ * A preset that controls the model's "thinking" process. Use
418
+ * `ThinkingLevel.LOW` for faster responses on less complex tasks, and
419
+ * `ThinkingLevel.HIGH` for better reasoning on more complex tasks.
420
+ *
421
+ * @public
422
+ */
423
+ const ThinkingLevel = {
424
+ MINIMAL: 'MINIMAL',
425
+ LOW: 'LOW',
426
+ MEDIUM: 'MEDIUM',
427
+ HIGH: 'HIGH'
428
+ };
429
+
430
+ /**
431
+ * @license
432
+ * Copyright 2024 Google LLC
433
+ *
434
+ * Licensed under the Apache License, Version 2.0 (the "License");
435
+ * you may not use this file except in compliance with the License.
436
+ * You may obtain a copy of the License at
437
+ *
438
+ * http://www.apache.org/licenses/LICENSE-2.0
439
+ *
440
+ * Unless required by applicable law or agreed to in writing, software
441
+ * distributed under the License is distributed on an "AS IS" BASIS,
442
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
443
+ * See the License for the specific language governing permissions and
444
+ * limitations under the License.
445
+ */
446
+ /**
447
+ * The status of a URL retrieval.
448
+ *
449
+ * @remarks
450
+ * <b>URL_RETRIEVAL_STATUS_UNSPECIFIED:</b> Unspecified retrieval status.
451
+ * <br/>
452
+ * <b>URL_RETRIEVAL_STATUS_SUCCESS:</b> The URL retrieval was successful.
453
+ * <br/>
454
+ * <b>URL_RETRIEVAL_STATUS_ERROR:</b> The URL retrieval failed.
455
+ * <br/>
456
+ * <b>URL_RETRIEVAL_STATUS_PAYWALL:</b> The URL retrieval failed because the content is behind a paywall.
457
+ * <br/>
458
+ * <b>URL_RETRIEVAL_STATUS_UNSAFE:</b> The URL retrieval failed because the content is unsafe.
459
+ * <br/>
460
+ *
461
+ * @public
462
+ */
463
+ const URLRetrievalStatus = {
464
+ /**
465
+ * Unspecified retrieval status.
466
+ */
467
+ URL_RETRIEVAL_STATUS_UNSPECIFIED: 'URL_RETRIEVAL_STATUS_UNSPECIFIED',
468
+ /**
469
+ * The URL retrieval was successful.
470
+ */
471
+ URL_RETRIEVAL_STATUS_SUCCESS: 'URL_RETRIEVAL_STATUS_SUCCESS',
472
+ /**
473
+ * The URL retrieval failed.
474
+ */
475
+ URL_RETRIEVAL_STATUS_ERROR: 'URL_RETRIEVAL_STATUS_ERROR',
476
+ /**
477
+ * The URL retrieval failed because the content is behind a paywall.
478
+ */
479
+ URL_RETRIEVAL_STATUS_PAYWALL: 'URL_RETRIEVAL_STATUS_PAYWALL',
480
+ /**
481
+ * The URL retrieval failed because the content is unsafe.
482
+ */
483
+ URL_RETRIEVAL_STATUS_UNSAFE: 'URL_RETRIEVAL_STATUS_UNSAFE'
484
+ };
485
+ /**
486
+ * The types of responses that can be returned by {@link LiveSession.receive}.
487
+ *
488
+ * @beta
489
+ */
490
+ const LiveResponseType = {
491
+ SERVER_CONTENT: 'serverContent',
492
+ TOOL_CALL: 'toolCall',
493
+ TOOL_CALL_CANCELLATION: 'toolCallCancellation',
494
+ GOING_AWAY_NOTICE: 'goingAwayNotice'
495
+ };
496
+
497
+ /**
498
+ * @license
499
+ * Copyright 2024 Google LLC
500
+ *
501
+ * Licensed under the Apache License, Version 2.0 (the "License");
502
+ * you may not use this file except in compliance with the License.
503
+ * You may obtain a copy of the License at
504
+ *
505
+ * http://www.apache.org/licenses/LICENSE-2.0
506
+ *
507
+ * Unless required by applicable law or agreed to in writing, software
508
+ * distributed under the License is distributed on an "AS IS" BASIS,
509
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
510
+ * See the License for the specific language governing permissions and
511
+ * limitations under the License.
512
+ */
513
+ /**
514
+ * Standardized error codes that {@link AIError} can have.
515
+ *
516
+ * @public
517
+ */
518
+ const AIErrorCode = {
519
+ /** A generic error occurred. */
520
+ ERROR: 'error',
521
+ /** An error occurred in a request. */
522
+ REQUEST_ERROR: 'request-error',
523
+ /** An error occurred in a response. */
524
+ RESPONSE_ERROR: 'response-error',
525
+ /** An error occurred while performing a fetch. */
526
+ FETCH_ERROR: 'fetch-error',
527
+ /** An error occurred because an operation was attempted on a closed session. */
528
+ SESSION_CLOSED: 'session-closed',
529
+ /** An error associated with a Content object. */
530
+ INVALID_CONTENT: 'invalid-content',
531
+ /** An error due to the Firebase API not being enabled in the Console. */
532
+ API_NOT_ENABLED: 'api-not-enabled',
533
+ /** An error due to invalid Schema input. */
534
+ INVALID_SCHEMA: 'invalid-schema',
535
+ /** An error occurred due to a missing Firebase API key. */
536
+ NO_API_KEY: 'no-api-key',
537
+ /** An error occurred due to a missing Firebase app ID. */
538
+ NO_APP_ID: 'no-app-id',
539
+ /** An error occurred due to a model name not being specified during initialization. */
540
+ NO_MODEL: 'no-model',
541
+ /** An error occurred due to a missing project ID. */
542
+ NO_PROJECT_ID: 'no-project-id',
543
+ /** An error occurred while parsing. */
544
+ PARSE_FAILED: 'parse-failed',
545
+ /** An error occurred due an attempt to use an unsupported feature. */
546
+ UNSUPPORTED: 'unsupported'
547
+ };
548
+
549
+ /**
550
+ * @license
551
+ * Copyright 2024 Google LLC
552
+ *
553
+ * Licensed under the Apache License, Version 2.0 (the "License");
554
+ * you may not use this file except in compliance with the License.
555
+ * You may obtain a copy of the License at
556
+ *
557
+ * http://www.apache.org/licenses/LICENSE-2.0
558
+ *
559
+ * Unless required by applicable law or agreed to in writing, software
560
+ * distributed under the License is distributed on an "AS IS" BASIS,
561
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
562
+ * See the License for the specific language governing permissions and
563
+ * limitations under the License.
564
+ */
565
+ /**
566
+ * Contains the list of OpenAPI data types
567
+ * as defined by the
568
+ * {@link https://swagger.io/docs/specification/data-models/data-types/ | OpenAPI specification}
569
+ * @public
570
+ */
571
+ const SchemaType = {
572
+ /** String type. */
573
+ STRING: 'string',
574
+ /** Number type. */
575
+ NUMBER: 'number',
576
+ /** Integer type. */
577
+ INTEGER: 'integer',
578
+ /** Boolean type. */
579
+ BOOLEAN: 'boolean',
580
+ /** Array type. */
581
+ ARRAY: 'array',
582
+ /** Object type. */
583
+ OBJECT: 'object'
584
+ };
585
+
586
+ /**
587
+ * @license
588
+ * Copyright 2025 Google LLC
589
+ *
590
+ * Licensed under the Apache License, Version 2.0 (the "License");
591
+ * you may not use this file except in compliance with the License.
592
+ * You may obtain a copy of the License at
593
+ *
594
+ * http://www.apache.org/licenses/LICENSE-2.0
595
+ *
596
+ * Unless required by applicable law or agreed to in writing, software
597
+ * distributed under the License is distributed on an "AS IS" BASIS,
598
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
599
+ * See the License for the specific language governing permissions and
600
+ * limitations under the License.
601
+ */
602
+ /**
603
+ * A filter level controlling how aggressively to filter sensitive content.
604
+ *
605
+ * Text prompts provided as inputs and images (generated or uploaded) through Imagen on Vertex AI
606
+ * are assessed against a list of safety filters, which include 'harmful categories' (for example,
607
+ * `violence`, `sexual`, `derogatory`, and `toxic`). This filter level controls how aggressively to
608
+ * filter out potentially harmful content from responses. See the {@link http://firebase.google.com/docs/vertex-ai/generate-images | documentation }
609
+ * and the {@link https://cloud.google.com/vertex-ai/generative-ai/docs/image/responsible-ai-imagen#safety-filters | Responsible AI and usage guidelines}
610
+ * for more details.
611
+ *
612
+ * @public
613
+ */
614
+ const ImagenSafetyFilterLevel = {
615
+ /**
616
+ * The most aggressive filtering level; most strict blocking.
617
+ */
618
+ BLOCK_LOW_AND_ABOVE: 'block_low_and_above',
619
+ /**
620
+ * Blocks some sensitive prompts and responses.
621
+ */
622
+ BLOCK_MEDIUM_AND_ABOVE: 'block_medium_and_above',
623
+ /**
624
+ * Blocks few sensitive prompts and responses.
625
+ */
626
+ BLOCK_ONLY_HIGH: 'block_only_high',
627
+ /**
628
+ * The least aggressive filtering level; blocks very few sensitive prompts and responses.
629
+ *
630
+ * Access to this feature is restricted and may require your case to be reviewed and approved by
631
+ * Cloud support.
632
+ */
633
+ BLOCK_NONE: 'block_none'
634
+ };
635
+ /**
636
+ * A filter level controlling whether generation of images containing people or faces is allowed.
637
+ *
638
+ * See the <a href="http://firebase.google.com/docs/vertex-ai/generate-images">personGeneration</a>
639
+ * documentation for more details.
640
+ *
641
+ * @public
642
+ */
643
+ const ImagenPersonFilterLevel = {
644
+ /**
645
+ * Disallow generation of images containing people or faces; images of people are filtered out.
646
+ */
647
+ BLOCK_ALL: 'dont_allow',
648
+ /**
649
+ * Allow generation of images containing adults only; images of children are filtered out.
650
+ *
651
+ * Generation of images containing people or faces may require your use case to be
652
+ * reviewed and approved by Cloud support; see the {@link https://cloud.google.com/vertex-ai/generative-ai/docs/image/responsible-ai-imagen#person-face-gen | Responsible AI and usage guidelines}
653
+ * for more details.
654
+ */
655
+ ALLOW_ADULT: 'allow_adult',
656
+ /**
657
+ * Allow generation of images containing adults only; images of children are filtered out.
658
+ *
659
+ * Generation of images containing people or faces may require your use case to be
660
+ * reviewed and approved by Cloud support; see the {@link https://cloud.google.com/vertex-ai/generative-ai/docs/image/responsible-ai-imagen#person-face-gen | Responsible AI and usage guidelines}
661
+ * for more details.
662
+ */
663
+ ALLOW_ALL: 'allow_all'
664
+ };
665
+ /**
666
+ * Aspect ratios for Imagen images.
667
+ *
668
+ * To specify an aspect ratio for generated images, set the `aspectRatio` property in your
669
+ * {@link ImagenGenerationConfig}.
670
+ *
671
+ * See the {@link http://firebase.google.com/docs/vertex-ai/generate-images | documentation }
672
+ * for more details and examples of the supported aspect ratios.
673
+ *
674
+ * @public
675
+ */
676
+ const ImagenAspectRatio = {
677
+ /**
678
+ * Square (1:1) aspect ratio.
679
+ */
680
+ 'SQUARE': '1:1',
681
+ /**
682
+ * Landscape (3:4) aspect ratio.
683
+ */
684
+ 'LANDSCAPE_3x4': '3:4',
685
+ /**
686
+ * Portrait (4:3) aspect ratio.
687
+ */
688
+ 'PORTRAIT_4x3': '4:3',
689
+ /**
690
+ * Landscape (16:9) aspect ratio.
691
+ */
692
+ 'LANDSCAPE_16x9': '16:9',
693
+ /**
694
+ * Portrait (9:16) aspect ratio.
695
+ */
696
+ 'PORTRAIT_9x16': '9:16'
697
+ };
698
+
699
+ /**
700
+ * @license
701
+ * Copyright 2024 Google LLC
702
+ *
703
+ * Licensed under the Apache License, Version 2.0 (the "License");
704
+ * you may not use this file except in compliance with the License.
705
+ * You may obtain a copy of the License at
706
+ *
707
+ * http://www.apache.org/licenses/LICENSE-2.0
708
+ *
709
+ * Unless required by applicable law or agreed to in writing, software
710
+ * distributed under the License is distributed on an "AS IS" BASIS,
711
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
712
+ * See the License for the specific language governing permissions and
713
+ * limitations under the License.
714
+ */
715
+ /**
716
+ * An enum-like object containing constants that represent the supported backends
717
+ * for the Firebase AI SDK.
718
+ * This determines which backend service (Vertex AI Gemini API or Gemini Developer API)
719
+ * the SDK will communicate with.
720
+ *
721
+ * These values are assigned to the `backendType` property within the specific backend
722
+ * configuration objects ({@link GoogleAIBackend} or {@link VertexAIBackend}) to identify
723
+ * which service to target.
724
+ *
725
+ * @public
726
+ */
727
+ const BackendType = {
728
+ /**
729
+ * Identifies the backend service for the Vertex AI Gemini API provided through Google Cloud.
730
+ * Use this constant when creating a {@link VertexAIBackend} configuration.
731
+ */
732
+ VERTEX_AI: 'VERTEX_AI',
733
+ /**
734
+ * Identifies the backend service for the Gemini Developer API ({@link https://ai.google/ | Google AI}).
735
+ * Use this constant when creating a {@link GoogleAIBackend} configuration.
736
+ */
737
+ GOOGLE_AI: 'GOOGLE_AI'
738
+ }; // Using 'as const' makes the string values literal types
739
+
740
+ /**
741
+ * @license
742
+ * Copyright 2025 Google LLC
743
+ *
744
+ * Licensed under the Apache License, Version 2.0 (the "License");
745
+ * you may not use this file except in compliance with the License.
746
+ * You may obtain a copy of the License at
747
+ *
748
+ * http://www.apache.org/licenses/LICENSE-2.0
749
+ *
750
+ * Unless required by applicable law or agreed to in writing, software
751
+ * distributed under the License is distributed on an "AS IS" BASIS,
752
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
753
+ * See the License for the specific language governing permissions and
754
+ * limitations under the License.
755
+ */
756
+ /**
757
+ * Abstract base class representing the configuration for an AI service backend.
758
+ * This class should not be instantiated directly. Use its subclasses; {@link GoogleAIBackend} for
759
+ * the Gemini Developer API (via {@link https://ai.google/ | Google AI}), and
760
+ * {@link VertexAIBackend} for the Vertex AI Gemini API.
761
+ *
762
+ * @public
763
+ */
764
+ class Backend {
765
+ /**
766
+ * Protected constructor for use by subclasses.
767
+ * @param type - The backend type.
768
+ */
769
+ constructor(type) {
770
+ this.backendType = type;
771
+ }
772
+ }
773
+ /**
774
+ * Configuration class for the Gemini Developer API.
775
+ *
776
+ * Use this with {@link AIOptions} when initializing the AI service via
777
+ * {@link getAI | getAI()} to specify the Gemini Developer API as the backend.
778
+ *
779
+ * @public
780
+ */
781
+ class GoogleAIBackend extends Backend {
782
+ /**
783
+ * Creates a configuration object for the Gemini Developer API backend.
784
+ */
785
+ constructor() {
786
+ super(BackendType.GOOGLE_AI);
787
+ }
788
+ /**
789
+ * @internal
790
+ */
791
+ _getModelPath(project, model) {
792
+ return `/${DEFAULT_API_VERSION}/projects/${project}/${model}`;
793
+ }
794
+ /**
795
+ * @internal
796
+ */
797
+ _getTemplatePath(project, templateId) {
798
+ return `/${DEFAULT_API_VERSION}/projects/${project}/templates/${templateId}`;
799
+ }
800
+ }
801
+ /**
802
+ * Configuration class for the Vertex AI Gemini API.
803
+ *
804
+ * Use this with {@link AIOptions} when initializing the AI service via
805
+ * {@link getAI | getAI()} to specify the Vertex AI Gemini API as the backend.
806
+ *
807
+ * @public
808
+ */
809
+ class VertexAIBackend extends Backend {
810
+ /**
811
+ * Creates a configuration object for the Vertex AI backend.
812
+ *
813
+ * @param location - The region identifier, defaulting to `us-central1`;
814
+ * see {@link https://firebase.google.com/docs/vertex-ai/locations#available-locations | Vertex AI locations}
815
+ * for a list of supported locations.
816
+ */
817
+ constructor(location = DEFAULT_LOCATION) {
818
+ super(BackendType.VERTEX_AI);
819
+ if (!location) {
820
+ this.location = DEFAULT_LOCATION;
821
+ }
822
+ else {
823
+ this.location = location;
824
+ }
825
+ }
826
+ /**
827
+ * @internal
828
+ */
829
+ _getModelPath(project, model) {
830
+ return `/${DEFAULT_API_VERSION}/projects/${project}/locations/${this.location}/${model}`;
831
+ }
832
+ /**
833
+ * @internal
834
+ */
835
+ _getTemplatePath(project, templateId) {
836
+ return `/${DEFAULT_API_VERSION}/projects/${project}/locations/${this.location}/templates/${templateId}`;
837
+ }
838
+ }
839
+
840
+ /**
841
+ * @license
842
+ * Copyright 2025 Google LLC
843
+ *
844
+ * Licensed under the Apache License, Version 2.0 (the "License");
845
+ * you may not use this file except in compliance with the License.
846
+ * You may obtain a copy of the License at
847
+ *
848
+ * http://www.apache.org/licenses/LICENSE-2.0
849
+ *
850
+ * Unless required by applicable law or agreed to in writing, software
851
+ * distributed under the License is distributed on an "AS IS" BASIS,
852
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
853
+ * See the License for the specific language governing permissions and
854
+ * limitations under the License.
855
+ */
856
+ /**
857
+ * Encodes a {@link Backend} into a string that will be used to uniquely identify {@link AI}
858
+ * instances by backend type.
859
+ *
860
+ * @internal
861
+ */
862
+ function encodeInstanceIdentifier(backend) {
863
+ if (backend instanceof GoogleAIBackend) {
864
+ return `${AI_TYPE}/googleai`;
865
+ }
866
+ else if (backend instanceof VertexAIBackend) {
867
+ return `${AI_TYPE}/vertexai/${backend.location}`;
868
+ }
869
+ else {
870
+ throw new AIError(AIErrorCode.ERROR, `Invalid backend: ${JSON.stringify(backend.backendType)}`);
871
+ }
872
+ }
873
+ /**
874
+ * Decodes an instance identifier string into a {@link Backend}.
875
+ *
876
+ * @internal
877
+ */
878
+ function decodeInstanceIdentifier(instanceIdentifier) {
879
+ const identifierParts = instanceIdentifier.split('/');
880
+ if (identifierParts[0] !== AI_TYPE) {
881
+ throw new AIError(AIErrorCode.ERROR, `Invalid instance identifier, unknown prefix '${identifierParts[0]}'`);
882
+ }
883
+ const backendType = identifierParts[1];
884
+ switch (backendType) {
885
+ case 'vertexai':
886
+ const location = identifierParts[2];
887
+ if (!location) {
888
+ throw new AIError(AIErrorCode.ERROR, `Invalid instance identifier, unknown location '${instanceIdentifier}'`);
889
+ }
890
+ return new VertexAIBackend(location);
891
+ case 'googleai':
892
+ return new GoogleAIBackend();
893
+ default:
894
+ throw new AIError(AIErrorCode.ERROR, `Invalid instance identifier string: '${instanceIdentifier}'`);
895
+ }
896
+ }
897
+
898
+ /**
899
+ * @license
900
+ * Copyright 2024 Google LLC
901
+ *
902
+ * Licensed under the Apache License, Version 2.0 (the "License");
903
+ * you may not use this file except in compliance with the License.
904
+ * You may obtain a copy of the License at
905
+ *
906
+ * http://www.apache.org/licenses/LICENSE-2.0
907
+ *
908
+ * Unless required by applicable law or agreed to in writing, software
909
+ * distributed under the License is distributed on an "AS IS" BASIS,
910
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
911
+ * See the License for the specific language governing permissions and
912
+ * limitations under the License.
913
+ */
914
+ class AIService {
915
+ constructor(app, backend, authProvider, appCheckProvider, chromeAdapterFactory) {
916
+ this.app = app;
917
+ this.backend = backend;
918
+ this.chromeAdapterFactory = chromeAdapterFactory;
919
+ const appCheck = appCheckProvider?.getImmediate({ optional: true });
920
+ const auth = authProvider?.getImmediate({ optional: true });
921
+ this.auth = auth || null;
922
+ this.appCheck = appCheck || null;
923
+ if (backend instanceof VertexAIBackend) {
924
+ this.location = backend.location;
925
+ }
926
+ else {
927
+ this.location = '';
928
+ }
929
+ }
930
+ _delete() {
931
+ return Promise.resolve();
932
+ }
933
+ set options(optionsToSet) {
934
+ this._options = optionsToSet;
935
+ }
936
+ get options() {
937
+ return this._options;
938
+ }
939
+ }
940
+
941
+ /**
942
+ * @license
943
+ * Copyright 2025 Google LLC
944
+ *
945
+ * Licensed under the Apache License, Version 2.0 (the "License");
946
+ * you may not use this file except in compliance with the License.
947
+ * You may obtain a copy of the License at
948
+ *
949
+ * http://www.apache.org/licenses/LICENSE-2.0
950
+ *
951
+ * Unless required by applicable law or agreed to in writing, software
952
+ * distributed under the License is distributed on an "AS IS" BASIS,
953
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
954
+ * See the License for the specific language governing permissions and
955
+ * limitations under the License.
956
+ */
957
+ function factory(container, { instanceIdentifier }) {
958
+ if (!instanceIdentifier) {
959
+ throw new AIError(AIErrorCode.ERROR, 'AIService instance identifier is undefined.');
960
+ }
961
+ const backend = decodeInstanceIdentifier(instanceIdentifier);
962
+ // getImmediate for FirebaseApp will always succeed
963
+ const app = container.getProvider('app').getImmediate();
964
+ const auth = container.getProvider('auth-internal');
965
+ const appCheckProvider = container.getProvider('app-check-internal');
966
+ return new AIService(app, backend, auth, appCheckProvider);
967
+ }
968
+
969
+ /**
970
+ * @license
971
+ * Copyright 2025 Google LLC
972
+ *
973
+ * Licensed under the Apache License, Version 2.0 (the "License");
974
+ * you may not use this file except in compliance with the License.
975
+ * You may obtain a copy of the License at
976
+ *
977
+ * http://www.apache.org/licenses/LICENSE-2.0
978
+ *
979
+ * Unless required by applicable law or agreed to in writing, software
980
+ * distributed under the License is distributed on an "AS IS" BASIS,
981
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
982
+ * See the License for the specific language governing permissions and
983
+ * limitations under the License.
984
+ */
985
+ /**
986
+ * Initializes an {@link ApiSettings} object from an {@link AI} instance.
987
+ *
988
+ * If this is a Server App, the {@link ApiSettings} object's `getAppCheckToken()` will resolve
989
+ * with the `FirebaseServerAppSettings.appCheckToken`, instead of requiring that an App Check
990
+ * instance is initialized.
991
+ */
992
+ function initApiSettings(ai) {
993
+ if (!ai.app?.options?.apiKey) {
994
+ throw new AIError(AIErrorCode.NO_API_KEY, `The "apiKey" field is empty in the local Firebase config. Firebase AI requires this field to contain a valid API key.`);
995
+ }
996
+ else if (!ai.app?.options?.projectId) {
997
+ throw new AIError(AIErrorCode.NO_PROJECT_ID, `The "projectId" field is empty in the local Firebase config. Firebase AI requires this field to contain a valid project ID.`);
998
+ }
999
+ else if (!ai.app?.options?.appId) {
1000
+ throw new AIError(AIErrorCode.NO_APP_ID, `The "appId" field is empty in the local Firebase config. Firebase AI requires this field to contain a valid app ID.`);
1001
+ }
1002
+ const apiSettings = {
1003
+ apiKey: ai.app.options.apiKey,
1004
+ project: ai.app.options.projectId,
1005
+ appId: ai.app.options.appId,
1006
+ automaticDataCollectionEnabled: ai.app.automaticDataCollectionEnabled,
1007
+ location: ai.location,
1008
+ backend: ai.backend
1009
+ };
1010
+ if (app._isFirebaseServerApp(ai.app) && ai.app.settings.appCheckToken) {
1011
+ const token = ai.app.settings.appCheckToken;
1012
+ apiSettings.getAppCheckToken = () => {
1013
+ return Promise.resolve({ token });
1014
+ };
1015
+ }
1016
+ else if (ai.appCheck) {
1017
+ if (ai.options?.useLimitedUseAppCheckTokens) {
1018
+ apiSettings.getAppCheckToken = () => ai.appCheck.getLimitedUseToken();
1019
+ }
1020
+ else {
1021
+ apiSettings.getAppCheckToken = () => ai.appCheck.getToken();
1022
+ }
1023
+ }
1024
+ if (ai.auth) {
1025
+ apiSettings.getAuthToken = () => ai.auth.getToken();
1026
+ }
1027
+ return apiSettings;
1028
+ }
1029
+
1030
+ /**
1031
+ * @license
1032
+ * Copyright 2025 Google LLC
1033
+ *
1034
+ * Licensed under the Apache License, Version 2.0 (the "License");
1035
+ * you may not use this file except in compliance with the License.
1036
+ * You may obtain a copy of the License at
1037
+ *
1038
+ * http://www.apache.org/licenses/LICENSE-2.0
1039
+ *
1040
+ * Unless required by applicable law or agreed to in writing, software
1041
+ * distributed under the License is distributed on an "AS IS" BASIS,
1042
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
1043
+ * See the License for the specific language governing permissions and
1044
+ * limitations under the License.
1045
+ */
1046
+ /**
1047
+ * Base class for Firebase AI model APIs.
1048
+ *
1049
+ * Instances of this class are associated with a specific Firebase AI {@link Backend}
1050
+ * and provide methods for interacting with the configured generative model.
1051
+ *
1052
+ * @public
1053
+ */
1054
+ class AIModel {
1055
+ /**
1056
+ * Constructs a new instance of the {@link AIModel} class.
1057
+ *
1058
+ * This constructor should only be called from subclasses that provide
1059
+ * a model API.
1060
+ *
1061
+ * @param ai - an {@link AI} instance.
1062
+ * @param modelName - The name of the model being used. It can be in one of the following formats:
1063
+ * - `my-model` (short name, will resolve to `publishers/google/models/my-model`)
1064
+ * - `models/my-model` (will resolve to `publishers/google/models/my-model`)
1065
+ * - `publishers/my-publisher/models/my-model` (fully qualified model name)
1066
+ *
1067
+ * @throws If the `apiKey` or `projectId` fields are missing in your
1068
+ * Firebase config.
1069
+ *
1070
+ * @internal
1071
+ */
1072
+ constructor(ai, modelName) {
1073
+ this._apiSettings = initApiSettings(ai);
1074
+ this.model = AIModel.normalizeModelName(modelName, this._apiSettings.backend.backendType);
1075
+ }
1076
+ /**
1077
+ * Normalizes the given model name to a fully qualified model resource name.
1078
+ *
1079
+ * @param modelName - The model name to normalize.
1080
+ * @returns The fully qualified model resource name.
1081
+ *
1082
+ * @internal
1083
+ */
1084
+ static normalizeModelName(modelName, backendType) {
1085
+ if (backendType === BackendType.GOOGLE_AI) {
1086
+ return AIModel.normalizeGoogleAIModelName(modelName);
1087
+ }
1088
+ else {
1089
+ return AIModel.normalizeVertexAIModelName(modelName);
1090
+ }
1091
+ }
1092
+ /**
1093
+ * @internal
1094
+ */
1095
+ static normalizeGoogleAIModelName(modelName) {
1096
+ return `models/${modelName}`;
1097
+ }
1098
+ /**
1099
+ * @internal
1100
+ */
1101
+ static normalizeVertexAIModelName(modelName) {
1102
+ let model;
1103
+ if (modelName.includes('/')) {
1104
+ if (modelName.startsWith('models/')) {
1105
+ // Add 'publishers/google' if the user is only passing in 'models/model-name'.
1106
+ model = `publishers/google/${modelName}`;
1107
+ }
1108
+ else {
1109
+ // Any other custom format (e.g. tuned models) must be passed in correctly.
1110
+ model = modelName;
1111
+ }
1112
+ }
1113
+ else {
1114
+ // If path is not included, assume it's a non-tuned model.
1115
+ model = `publishers/google/models/${modelName}`;
1116
+ }
1117
+ return model;
1118
+ }
1119
+ }
1120
+
1121
+ /**
1122
+ * @license
1123
+ * Copyright 2024 Google LLC
1124
+ *
1125
+ * Licensed under the Apache License, Version 2.0 (the "License");
1126
+ * you may not use this file except in compliance with the License.
1127
+ * You may obtain a copy of the License at
1128
+ *
1129
+ * http://www.apache.org/licenses/LICENSE-2.0
1130
+ *
1131
+ * Unless required by applicable law or agreed to in writing, software
1132
+ * distributed under the License is distributed on an "AS IS" BASIS,
1133
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
1134
+ * See the License for the specific language governing permissions and
1135
+ * limitations under the License.
1136
+ */
1137
+ const logger = new logger$1.Logger('@firebase/vertexai');
1138
+
1139
+ /**
1140
+ * @license
1141
+ * Copyright 2025 Google LLC
1142
+ *
1143
+ * Licensed under the Apache License, Version 2.0 (the "License");
1144
+ * you may not use this file except in compliance with the License.
1145
+ * You may obtain a copy of the License at
1146
+ *
1147
+ * http://www.apache.org/licenses/LICENSE-2.0
1148
+ *
1149
+ * Unless required by applicable law or agreed to in writing, software
1150
+ * distributed under the License is distributed on an "AS IS" BASIS,
1151
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
1152
+ * See the License for the specific language governing permissions and
1153
+ * limitations under the License.
1154
+ */
1155
+ const TIMEOUT_EXPIRED_MESSAGE = 'Timeout has expired.';
1156
+ const ABORT_ERROR_NAME = 'AbortError';
1157
+ class RequestURL {
1158
+ constructor(params) {
1159
+ this.params = params;
1160
+ }
1161
+ toString() {
1162
+ const url = new URL(this.baseUrl); // Throws if the URL is invalid
1163
+ url.pathname = this.pathname;
1164
+ url.search = this.queryParams.toString();
1165
+ return url.toString();
1166
+ }
1167
+ get pathname() {
1168
+ // We need to construct a different URL if the request is for server side prompt templates,
1169
+ // since the URL patterns are different. Server side prompt templates expect a templateId
1170
+ // instead of a model name.
1171
+ if (this.params.templateId) {
1172
+ return `${this.params.apiSettings.backend._getTemplatePath(this.params.apiSettings.project, this.params.templateId)}:${this.params.task}`;
1173
+ }
1174
+ else {
1175
+ return `${this.params.apiSettings.backend._getModelPath(this.params.apiSettings.project, this.params.model)}:${this.params.task}`;
1176
+ }
1177
+ }
1178
+ get baseUrl() {
1179
+ return (this.params.singleRequestOptions?.baseUrl ?? `https://${DEFAULT_DOMAIN}`);
1180
+ }
1181
+ get queryParams() {
1182
+ const params = new URLSearchParams();
1183
+ if (this.params.stream) {
1184
+ params.set('alt', 'sse');
1185
+ }
1186
+ return params;
1187
+ }
1188
+ }
1189
+ class WebSocketUrl {
1190
+ constructor(apiSettings) {
1191
+ this.apiSettings = apiSettings;
1192
+ }
1193
+ toString() {
1194
+ const url = new URL(`wss://${DEFAULT_DOMAIN}`);
1195
+ url.pathname = this.pathname;
1196
+ const queryParams = new URLSearchParams();
1197
+ queryParams.set('key', this.apiSettings.apiKey);
1198
+ url.search = queryParams.toString();
1199
+ return url.toString();
1200
+ }
1201
+ get pathname() {
1202
+ if (this.apiSettings.backend.backendType === BackendType.GOOGLE_AI) {
1203
+ return 'ws/google.firebase.vertexai.v1beta.GenerativeService/BidiGenerateContent';
1204
+ }
1205
+ else {
1206
+ return `ws/google.firebase.vertexai.v1beta.LlmBidiService/BidiGenerateContent/locations/${this.apiSettings.location}`;
1207
+ }
1208
+ }
1209
+ }
1210
+ /**
1211
+ * Log language and "fire/version" to x-goog-api-client
1212
+ */
1213
+ function getClientHeaders(url) {
1214
+ const loggingTags = [];
1215
+ loggingTags.push(`${LANGUAGE_TAG}/${PACKAGE_VERSION}`);
1216
+ loggingTags.push(`fire/${PACKAGE_VERSION}`);
1217
+ /**
1218
+ * No call would be made if ONLY_ON_DEVICE.
1219
+ * ONLY_IN_CLOUD does not indicate an intention to use hybrid.
1220
+ */
1221
+ if (url.params.apiSettings.inferenceMode === InferenceMode.PREFER_ON_DEVICE ||
1222
+ url.params.apiSettings.inferenceMode === InferenceMode.PREFER_IN_CLOUD) {
1223
+ // No version
1224
+ loggingTags.push(HYBRID_TAG);
1225
+ }
1226
+ return loggingTags.join(' ');
1227
+ }
1228
+ async function getHeaders(url) {
1229
+ const headers = new Headers();
1230
+ headers.append('Content-Type', 'application/json');
1231
+ headers.append('x-goog-api-client', getClientHeaders(url));
1232
+ headers.append('x-goog-api-key', url.params.apiSettings.apiKey);
1233
+ if (url.params.apiSettings.automaticDataCollectionEnabled) {
1234
+ headers.append('X-Firebase-Appid', url.params.apiSettings.appId);
1235
+ }
1236
+ if (url.params.apiSettings.getAppCheckToken) {
1237
+ const appCheckToken = await url.params.apiSettings.getAppCheckToken();
1238
+ if (appCheckToken) {
1239
+ headers.append('X-Firebase-AppCheck', appCheckToken.token);
1240
+ if (appCheckToken.error) {
1241
+ logger.warn(`Unable to obtain a valid App Check token: ${appCheckToken.error.message}`);
1242
+ }
1243
+ }
1244
+ }
1245
+ if (url.params.apiSettings.getAuthToken) {
1246
+ const authToken = await url.params.apiSettings.getAuthToken();
1247
+ if (authToken) {
1248
+ headers.append('Authorization', `Firebase ${authToken.accessToken}`);
1249
+ }
1250
+ }
1251
+ return headers;
1252
+ }
1253
+ async function makeRequest(requestUrlParams, body) {
1254
+ const url = new RequestURL(requestUrlParams);
1255
+ let response;
1256
+ const externalSignal = requestUrlParams.singleRequestOptions?.signal;
1257
+ const timeoutMillis = requestUrlParams.singleRequestOptions?.timeout != null &&
1258
+ requestUrlParams.singleRequestOptions.timeout >= 0
1259
+ ? requestUrlParams.singleRequestOptions.timeout
1260
+ : DEFAULT_FETCH_TIMEOUT_MS;
1261
+ const internalAbortController = new AbortController();
1262
+ const fetchTimeoutId = setTimeout(() => {
1263
+ internalAbortController.abort(new DOMException(TIMEOUT_EXPIRED_MESSAGE, ABORT_ERROR_NAME));
1264
+ logger.debug(`Aborting request to ${url} due to timeout (${timeoutMillis}ms)`);
1265
+ }, timeoutMillis);
1266
+ // Used to abort the fetch if either the user-defined `externalSignal` is aborted, or if the
1267
+ // internal signal (triggered by timeouts) is aborted.
1268
+ const combinedSignal = AbortSignal.any(externalSignal
1269
+ ? [externalSignal, internalAbortController.signal]
1270
+ : [internalAbortController.signal]);
1271
+ if (externalSignal && externalSignal.aborted) {
1272
+ clearTimeout(fetchTimeoutId);
1273
+ throw new DOMException(externalSignal.reason ?? 'Aborted externally before fetch', ABORT_ERROR_NAME);
1274
+ }
1275
+ try {
1276
+ const fetchOptions = {
1277
+ method: 'POST',
1278
+ headers: await getHeaders(url),
1279
+ signal: combinedSignal,
1280
+ body
1281
+ };
1282
+ response = await fetch(url.toString(), fetchOptions);
1283
+ if (!response.ok) {
1284
+ let message = '';
1285
+ let errorDetails;
1286
+ try {
1287
+ const json = await response.json();
1288
+ message = json.error.message;
1289
+ if (json.error.details) {
1290
+ message += ` ${JSON.stringify(json.error.details)}`;
1291
+ errorDetails = json.error.details;
1292
+ }
1293
+ }
1294
+ catch (e) {
1295
+ // ignored
1296
+ }
1297
+ if (response.status === 403 &&
1298
+ errorDetails &&
1299
+ errorDetails.some((detail) => detail.reason === 'SERVICE_DISABLED') &&
1300
+ errorDetails.some((detail) => detail.links?.[0]?.description.includes('Google developers console API activation'))) {
1301
+ throw new AIError(AIErrorCode.API_NOT_ENABLED, `The Firebase AI SDK requires the Firebase AI ` +
1302
+ `API ('firebasevertexai.googleapis.com') to be enabled in your ` +
1303
+ `Firebase project. Enable this API by visiting the Firebase Console ` +
1304
+ `at https://console.firebase.google.com/project/${url.params.apiSettings.project}/ailogic/ ` +
1305
+ `and clicking "Get started". If you enabled this API recently, ` +
1306
+ `wait a few minutes for the action to propagate to our systems and ` +
1307
+ `then retry.`, {
1308
+ status: response.status,
1309
+ statusText: response.statusText,
1310
+ errorDetails
1311
+ });
1312
+ }
1313
+ throw new AIError(AIErrorCode.FETCH_ERROR, `Error fetching from ${url}: [${response.status} ${response.statusText}] ${message}`, {
1314
+ status: response.status,
1315
+ statusText: response.statusText,
1316
+ errorDetails
1317
+ });
1318
+ }
1319
+ }
1320
+ catch (e) {
1321
+ let err = e;
1322
+ if (e.code !== AIErrorCode.FETCH_ERROR &&
1323
+ e.code !== AIErrorCode.API_NOT_ENABLED &&
1324
+ e instanceof Error &&
1325
+ e.name !== ABORT_ERROR_NAME) {
1326
+ err = new AIError(AIErrorCode.ERROR, `Error fetching from ${url.toString()}: ${e.message}`);
1327
+ err.stack = e.stack;
1328
+ }
1329
+ throw err;
1330
+ }
1331
+ finally {
1332
+ // When doing streaming requests, this will clear the timeout once the stream begins.
1333
+ // If a timeout it 3000ms, and the stream starts after 300ms and ends after 5000ms, the
1334
+ // timeout will be cleared after 300ms, so it won't abort the request.
1335
+ clearTimeout(fetchTimeoutId);
1336
+ }
1337
+ return response;
1338
+ }
1339
+
1340
+ /**
1341
+ * @license
1342
+ * Copyright 2024 Google LLC
1343
+ *
1344
+ * Licensed under the Apache License, Version 2.0 (the "License");
1345
+ * you may not use this file except in compliance with the License.
1346
+ * You may obtain a copy of the License at
1347
+ *
1348
+ * http://www.apache.org/licenses/LICENSE-2.0
1349
+ *
1350
+ * Unless required by applicable law or agreed to in writing, software
1351
+ * distributed under the License is distributed on an "AS IS" BASIS,
1352
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
1353
+ * See the License for the specific language governing permissions and
1354
+ * limitations under the License.
1355
+ */
1356
+ /**
1357
+ * Check that at least one candidate exists and does not have a bad
1358
+ * finish reason. Warns if multiple candidates exist.
1359
+ */
1360
+ function hasValidCandidates(response) {
1361
+ if (response.candidates && response.candidates.length > 0) {
1362
+ if (response.candidates.length > 1) {
1363
+ logger.warn(`This response had ${response.candidates.length} ` +
1364
+ `candidates. Returning text from the first candidate only. ` +
1365
+ `Access response.candidates directly to use the other candidates.`);
1366
+ }
1367
+ if (hadBadFinishReason(response.candidates[0])) {
1368
+ throw new AIError(AIErrorCode.RESPONSE_ERROR, `Response error: ${formatBlockErrorMessage(response)}. Response body stored in error.response`, {
1369
+ response
1370
+ });
1371
+ }
1372
+ return true;
1373
+ }
1374
+ else {
1375
+ return false;
1376
+ }
1377
+ }
1378
+ /**
1379
+ * Creates an EnhancedGenerateContentResponse object that has helper functions and
1380
+ * other modifications that improve usability.
1381
+ */
1382
+ function createEnhancedContentResponse(response, inferenceSource = InferenceSource.IN_CLOUD) {
1383
+ /**
1384
+ * The Vertex AI backend omits default values.
1385
+ * This causes the `index` property to be omitted from the first candidate in the
1386
+ * response, since it has index 0, and 0 is a default value.
1387
+ * See: https://github.com/firebase/firebase-js-sdk/issues/8566
1388
+ */
1389
+ if (response.candidates && !response.candidates[0].hasOwnProperty('index')) {
1390
+ response.candidates[0].index = 0;
1391
+ }
1392
+ const responseWithHelpers = addHelpers(response);
1393
+ responseWithHelpers.inferenceSource = inferenceSource;
1394
+ return responseWithHelpers;
1395
+ }
1396
+ /**
1397
+ * Adds convenience helper methods to a response object, including stream
1398
+ * chunks (as long as each chunk is a complete GenerateContentResponse JSON).
1399
+ */
1400
+ function addHelpers(response) {
1401
+ response.text = () => {
1402
+ if (hasValidCandidates(response)) {
1403
+ return getText(response, part => !part.thought);
1404
+ }
1405
+ else if (response.promptFeedback) {
1406
+ throw new AIError(AIErrorCode.RESPONSE_ERROR, `Text not available. ${formatBlockErrorMessage(response)}`, {
1407
+ response
1408
+ });
1409
+ }
1410
+ return '';
1411
+ };
1412
+ response.thoughtSummary = () => {
1413
+ if (hasValidCandidates(response)) {
1414
+ const result = getText(response, part => !!part.thought);
1415
+ return result === '' ? undefined : result;
1416
+ }
1417
+ else if (response.promptFeedback) {
1418
+ throw new AIError(AIErrorCode.RESPONSE_ERROR, `Thought summary not available. ${formatBlockErrorMessage(response)}`, {
1419
+ response
1420
+ });
1421
+ }
1422
+ return undefined;
1423
+ };
1424
+ response.inlineDataParts = () => {
1425
+ if (hasValidCandidates(response)) {
1426
+ return getInlineDataParts(response);
1427
+ }
1428
+ else if (response.promptFeedback) {
1429
+ throw new AIError(AIErrorCode.RESPONSE_ERROR, `Data not available. ${formatBlockErrorMessage(response)}`, {
1430
+ response
1431
+ });
1432
+ }
1433
+ return undefined;
1434
+ };
1435
+ response.functionCalls = () => {
1436
+ if (hasValidCandidates(response)) {
1437
+ return getFunctionCalls(response);
1438
+ }
1439
+ else if (response.promptFeedback) {
1440
+ throw new AIError(AIErrorCode.RESPONSE_ERROR, `Function call not available. ${formatBlockErrorMessage(response)}`, {
1441
+ response
1442
+ });
1443
+ }
1444
+ return undefined;
1445
+ };
1446
+ return response;
1447
+ }
1448
+ /**
1449
+ * Returns all text from the first candidate's parts, filtering by whether
1450
+ * `partFilter()` returns true.
1451
+ *
1452
+ * @param response - The `GenerateContentResponse` from which to extract text.
1453
+ * @param partFilter - Only return `Part`s for which this returns true
1454
+ */
1455
+ function getText(response, partFilter) {
1456
+ const textStrings = [];
1457
+ if (response.candidates?.[0].content?.parts) {
1458
+ for (const part of response.candidates?.[0].content?.parts) {
1459
+ if (part.text && partFilter(part)) {
1460
+ textStrings.push(part.text);
1461
+ }
1462
+ }
1463
+ }
1464
+ if (textStrings.length > 0) {
1465
+ return textStrings.join('');
1466
+ }
1467
+ else {
1468
+ return '';
1469
+ }
1470
+ }
1471
+ /**
1472
+ * Returns every {@link FunctionCall} associated with first candidate.
1473
+ */
1474
+ function getFunctionCalls(response) {
1475
+ if (!response) {
1476
+ return undefined;
1477
+ }
1478
+ const functionCalls = [];
1479
+ if (response.candidates?.[0].content?.parts) {
1480
+ for (const part of response.candidates?.[0].content?.parts) {
1481
+ if (part.functionCall) {
1482
+ functionCalls.push(part.functionCall);
1483
+ }
1484
+ }
1485
+ }
1486
+ if (functionCalls.length > 0) {
1487
+ return functionCalls;
1488
+ }
1489
+ else {
1490
+ return undefined;
1491
+ }
1492
+ }
1493
+ /**
1494
+ * Returns every {@link InlineDataPart} in the first candidate if present.
1495
+ *
1496
+ * @internal
1497
+ */
1498
+ function getInlineDataParts(response) {
1499
+ const data = [];
1500
+ if (response.candidates?.[0].content?.parts) {
1501
+ for (const part of response.candidates?.[0].content?.parts) {
1502
+ if (part.inlineData) {
1503
+ data.push(part);
1504
+ }
1505
+ }
1506
+ }
1507
+ if (data.length > 0) {
1508
+ return data;
1509
+ }
1510
+ else {
1511
+ return undefined;
1512
+ }
1513
+ }
1514
+ const badFinishReasons = [FinishReason.RECITATION, FinishReason.SAFETY];
1515
+ function hadBadFinishReason(candidate) {
1516
+ return (!!candidate.finishReason &&
1517
+ badFinishReasons.some(reason => reason === candidate.finishReason));
1518
+ }
1519
+ function formatBlockErrorMessage(response) {
1520
+ let message = '';
1521
+ if ((!response.candidates || response.candidates.length === 0) &&
1522
+ response.promptFeedback) {
1523
+ message += 'Response was blocked';
1524
+ if (response.promptFeedback?.blockReason) {
1525
+ message += ` due to ${response.promptFeedback.blockReason}`;
1526
+ }
1527
+ if (response.promptFeedback?.blockReasonMessage) {
1528
+ message += `: ${response.promptFeedback.blockReasonMessage}`;
1529
+ }
1530
+ }
1531
+ else if (response.candidates?.[0]) {
1532
+ const firstCandidate = response.candidates[0];
1533
+ if (hadBadFinishReason(firstCandidate)) {
1534
+ message += `Candidate was blocked due to ${firstCandidate.finishReason}`;
1535
+ if (firstCandidate.finishMessage) {
1536
+ message += `: ${firstCandidate.finishMessage}`;
1537
+ }
1538
+ }
1539
+ }
1540
+ return message;
1541
+ }
1542
+ /**
1543
+ * Convert a generic successful fetch response body to an Imagen response object
1544
+ * that can be returned to the user. This converts the REST APIs response format to our
1545
+ * APIs representation of a response.
1546
+ *
1547
+ * @internal
1548
+ */
1549
+ async function handlePredictResponse(response) {
1550
+ const responseJson = await response.json();
1551
+ const images = [];
1552
+ let filteredReason = undefined;
1553
+ // The backend should always send a non-empty array of predictions if the response was successful.
1554
+ if (!responseJson.predictions || responseJson.predictions?.length === 0) {
1555
+ throw new AIError(AIErrorCode.RESPONSE_ERROR, 'No predictions or filtered reason received from Vertex AI. Please report this issue with the full error details at https://github.com/firebase/firebase-js-sdk/issues.');
1556
+ }
1557
+ for (const prediction of responseJson.predictions) {
1558
+ if (prediction.raiFilteredReason) {
1559
+ filteredReason = prediction.raiFilteredReason;
1560
+ }
1561
+ else if (prediction.mimeType && prediction.bytesBase64Encoded) {
1562
+ images.push({
1563
+ mimeType: prediction.mimeType,
1564
+ bytesBase64Encoded: prediction.bytesBase64Encoded
1565
+ });
1566
+ }
1567
+ else if (prediction.mimeType && prediction.gcsUri) {
1568
+ images.push({
1569
+ mimeType: prediction.mimeType,
1570
+ gcsURI: prediction.gcsUri
1571
+ });
1572
+ }
1573
+ else if (prediction.safetyAttributes) ;
1574
+ else {
1575
+ throw new AIError(AIErrorCode.RESPONSE_ERROR, `Unexpected element in 'predictions' array in response: '${JSON.stringify(prediction)}'`);
1576
+ }
1577
+ }
1578
+ return { images, filteredReason };
1579
+ }
1580
+
1581
+ /**
1582
+ * @license
1583
+ * Copyright 2025 Google LLC
1584
+ *
1585
+ * Licensed under the Apache License, Version 2.0 (the "License");
1586
+ * you may not use this file except in compliance with the License.
1587
+ * You may obtain a copy of the License at
1588
+ *
1589
+ * http://www.apache.org/licenses/LICENSE-2.0
1590
+ *
1591
+ * Unless required by applicable law or agreed to in writing, software
1592
+ * distributed under the License is distributed on an "AS IS" BASIS,
1593
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
1594
+ * See the License for the specific language governing permissions and
1595
+ * limitations under the License.
1596
+ */
1597
+ /**
1598
+ * This SDK supports both the Vertex AI Gemini API and the Gemini Developer API (using Google AI).
1599
+ * The public API prioritizes the format used by the Vertex AI Gemini API.
1600
+ * We avoid having two sets of types by translating requests and responses between the two API formats.
1601
+ * This translation allows developers to switch between the Vertex AI Gemini API and the Gemini Developer API
1602
+ * with minimal code changes.
1603
+ *
1604
+ * In here are functions that map requests and responses between the two API formats.
1605
+ * Requests in the Vertex AI format are mapped to the Google AI format before being sent.
1606
+ * Responses from the Google AI backend are mapped back to the Vertex AI format before being returned to the user.
1607
+ */
1608
+ /**
1609
+ * Maps a Vertex AI {@link GenerateContentRequest} to a format that can be sent to Google AI.
1610
+ *
1611
+ * @param generateContentRequest The {@link GenerateContentRequest} to map.
1612
+ * @returns A {@link GenerateContentResponse} that conforms to the Google AI format.
1613
+ *
1614
+ * @throws If the request contains properties that are unsupported by Google AI.
1615
+ *
1616
+ * @internal
1617
+ */
1618
+ function mapGenerateContentRequest(generateContentRequest) {
1619
+ generateContentRequest.safetySettings?.forEach(safetySetting => {
1620
+ if (safetySetting.method) {
1621
+ throw new AIError(AIErrorCode.UNSUPPORTED, 'SafetySetting.method is not supported in the the Gemini Developer API. Please remove this property.');
1622
+ }
1623
+ });
1624
+ if (generateContentRequest.generationConfig?.topK) {
1625
+ const roundedTopK = Math.round(generateContentRequest.generationConfig.topK);
1626
+ if (roundedTopK !== generateContentRequest.generationConfig.topK) {
1627
+ logger.warn('topK in GenerationConfig has been rounded to the nearest integer to match the format for requests to the Gemini Developer API.');
1628
+ generateContentRequest.generationConfig.topK = roundedTopK;
1629
+ }
1630
+ }
1631
+ return generateContentRequest;
1632
+ }
1633
+ /**
1634
+ * Maps a {@link GenerateContentResponse} from Google AI to the format of the
1635
+ * {@link GenerateContentResponse} that we get from VertexAI that is exposed in the public API.
1636
+ *
1637
+ * @param googleAIResponse The {@link GenerateContentResponse} from Google AI.
1638
+ * @returns A {@link GenerateContentResponse} that conforms to the public API's format.
1639
+ *
1640
+ * @internal
1641
+ */
1642
+ function mapGenerateContentResponse(googleAIResponse) {
1643
+ const generateContentResponse = {
1644
+ candidates: googleAIResponse.candidates
1645
+ ? mapGenerateContentCandidates(googleAIResponse.candidates)
1646
+ : undefined,
1647
+ prompt: googleAIResponse.promptFeedback
1648
+ ? mapPromptFeedback(googleAIResponse.promptFeedback)
1649
+ : undefined,
1650
+ usageMetadata: googleAIResponse.usageMetadata
1651
+ };
1652
+ return generateContentResponse;
1653
+ }
1654
+ /**
1655
+ * Maps a Vertex AI {@link CountTokensRequest} to a format that can be sent to Google AI.
1656
+ *
1657
+ * @param countTokensRequest The {@link CountTokensRequest} to map.
1658
+ * @param model The model to count tokens with.
1659
+ * @returns A {@link CountTokensRequest} that conforms to the Google AI format.
1660
+ *
1661
+ * @internal
1662
+ */
1663
+ function mapCountTokensRequest(countTokensRequest, model) {
1664
+ const mappedCountTokensRequest = {
1665
+ generateContentRequest: {
1666
+ model,
1667
+ ...countTokensRequest
1668
+ }
1669
+ };
1670
+ return mappedCountTokensRequest;
1671
+ }
1672
+ /**
1673
+ * Maps a Google AI {@link GoogleAIGenerateContentCandidate} to a format that conforms
1674
+ * to the Vertex AI API format.
1675
+ *
1676
+ * @param candidates The {@link GoogleAIGenerateContentCandidate} to map.
1677
+ * @returns A {@link GenerateContentCandidate} that conforms to the Vertex AI format.
1678
+ *
1679
+ * @throws If any {@link Part} in the candidates has a `videoMetadata` property.
1680
+ *
1681
+ * @internal
1682
+ */
1683
+ function mapGenerateContentCandidates(candidates) {
1684
+ const mappedCandidates = [];
1685
+ let mappedSafetyRatings;
1686
+ if (mappedCandidates) {
1687
+ candidates.forEach(candidate => {
1688
+ // Map citationSources to citations.
1689
+ let citationMetadata;
1690
+ if (candidate.citationMetadata) {
1691
+ citationMetadata = {
1692
+ citations: candidate.citationMetadata.citationSources
1693
+ };
1694
+ }
1695
+ // Assign missing candidate SafetyRatings properties to their defaults if undefined.
1696
+ if (candidate.safetyRatings) {
1697
+ mappedSafetyRatings = candidate.safetyRatings.map(safetyRating => {
1698
+ return {
1699
+ ...safetyRating,
1700
+ severity: safetyRating.severity ?? HarmSeverity.HARM_SEVERITY_UNSUPPORTED,
1701
+ probabilityScore: safetyRating.probabilityScore ?? 0,
1702
+ severityScore: safetyRating.severityScore ?? 0
1703
+ };
1704
+ });
1705
+ }
1706
+ // videoMetadata is not supported.
1707
+ // Throw early since developers may send a long video as input and only expect to pay
1708
+ // for inference on a small portion of the video.
1709
+ if (candidate.content?.parts?.some(part => part?.videoMetadata)) {
1710
+ throw new AIError(AIErrorCode.UNSUPPORTED, 'Part.videoMetadata is not supported in the Gemini Developer API. Please remove this property.');
1711
+ }
1712
+ const mappedCandidate = {
1713
+ index: candidate.index,
1714
+ content: candidate.content,
1715
+ finishReason: candidate.finishReason,
1716
+ finishMessage: candidate.finishMessage,
1717
+ safetyRatings: mappedSafetyRatings,
1718
+ citationMetadata,
1719
+ groundingMetadata: candidate.groundingMetadata,
1720
+ urlContextMetadata: candidate.urlContextMetadata
1721
+ };
1722
+ mappedCandidates.push(mappedCandidate);
1723
+ });
1724
+ }
1725
+ return mappedCandidates;
1726
+ }
1727
+ function mapPromptFeedback(promptFeedback) {
1728
+ // Assign missing SafetyRating properties to their defaults if undefined.
1729
+ const mappedSafetyRatings = [];
1730
+ promptFeedback.safetyRatings.forEach(safetyRating => {
1731
+ mappedSafetyRatings.push({
1732
+ category: safetyRating.category,
1733
+ probability: safetyRating.probability,
1734
+ severity: safetyRating.severity ?? HarmSeverity.HARM_SEVERITY_UNSUPPORTED,
1735
+ probabilityScore: safetyRating.probabilityScore ?? 0,
1736
+ severityScore: safetyRating.severityScore ?? 0,
1737
+ blocked: safetyRating.blocked
1738
+ });
1739
+ });
1740
+ const mappedPromptFeedback = {
1741
+ blockReason: promptFeedback.blockReason,
1742
+ safetyRatings: mappedSafetyRatings,
1743
+ blockReasonMessage: promptFeedback.blockReasonMessage
1744
+ };
1745
+ return mappedPromptFeedback;
1746
+ }
1747
+
1748
+ /**
1749
+ * @license
1750
+ * Copyright 2024 Google LLC
1751
+ *
1752
+ * Licensed under the Apache License, Version 2.0 (the "License");
1753
+ * you may not use this file except in compliance with the License.
1754
+ * You may obtain a copy of the License at
1755
+ *
1756
+ * http://www.apache.org/licenses/LICENSE-2.0
1757
+ *
1758
+ * Unless required by applicable law or agreed to in writing, software
1759
+ * distributed under the License is distributed on an "AS IS" BASIS,
1760
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
1761
+ * See the License for the specific language governing permissions and
1762
+ * limitations under the License.
1763
+ */
1764
+ const responseLineRE = /^data\: (.*)(?:\n\n|\r\r|\r\n\r\n)/;
1765
+ /**
1766
+ * Process a response.body stream from the backend and return an
1767
+ * iterator that provides one complete GenerateContentResponse at a time
1768
+ * and a promise that resolves with a single aggregated
1769
+ * GenerateContentResponse.
1770
+ *
1771
+ * @param response - Response from a fetch call
1772
+ */
1773
+ async function processStream(response, apiSettings, inferenceSource) {
1774
+ const inputStream = response.body.pipeThrough(new TextDecoderStream('utf8', { fatal: true }));
1775
+ const responseStream = getResponseStream(inputStream);
1776
+ // We split the stream so the user can iterate over partial results (stream1)
1777
+ // while we aggregate the full result for history/final response (stream2).
1778
+ const [stream1, stream2] = responseStream.tee();
1779
+ const { response: internalResponse, firstValue } = await processStreamInternal(stream2, apiSettings, inferenceSource);
1780
+ return {
1781
+ stream: generateResponseSequence(stream1, apiSettings, inferenceSource),
1782
+ response: internalResponse,
1783
+ firstValue
1784
+ };
1785
+ }
1786
+ /**
1787
+ * Consumes streams teed from the input stream for internal needs.
1788
+ * The streams need to be teed because each stream can only be consumed
1789
+ * by one reader.
1790
+ *
1791
+ * "streamForPeek"
1792
+ * This tee is used to peek at the first value for relevant information
1793
+ * that we need to evaluate before returning the stream handle to the
1794
+ * client. For example, we need to check if the response is a function
1795
+ * call that may need to be handled by automatic function calling before
1796
+ * returning a response to the client.
1797
+ *
1798
+ * "streamForAggregation"
1799
+ * We iterate through this tee independently from the user and aggregate
1800
+ * it into a single response when the stream is complete. We need this
1801
+ * aggregate object to add to chat history when using ChatSession. It's
1802
+ * also provided to the user if they want it.
1803
+ */
1804
+ async function processStreamInternal(stream, apiSettings, inferenceSource) {
1805
+ const [streamForPeek, streamForAggregation] = stream.tee();
1806
+ const reader = streamForPeek.getReader();
1807
+ const { value } = await reader.read();
1808
+ return {
1809
+ firstValue: value,
1810
+ response: getResponsePromise(streamForAggregation, apiSettings, inferenceSource)
1811
+ };
1812
+ }
1813
+ async function getResponsePromise(stream, apiSettings, inferenceSource) {
1814
+ const allResponses = [];
1815
+ const reader = stream.getReader();
1816
+ while (true) {
1817
+ const { done, value } = await reader.read();
1818
+ if (done) {
1819
+ let generateContentResponse = aggregateResponses(allResponses);
1820
+ if (apiSettings.backend.backendType === BackendType.GOOGLE_AI) {
1821
+ generateContentResponse = mapGenerateContentResponse(generateContentResponse);
1822
+ }
1823
+ return createEnhancedContentResponse(generateContentResponse, inferenceSource);
1824
+ }
1825
+ allResponses.push(value);
1826
+ }
1827
+ }
1828
+ async function* generateResponseSequence(stream, apiSettings, inferenceSource) {
1829
+ const reader = stream.getReader();
1830
+ while (true) {
1831
+ const { value, done } = await reader.read();
1832
+ if (done) {
1833
+ break;
1834
+ }
1835
+ let enhancedResponse;
1836
+ if (apiSettings.backend.backendType === BackendType.GOOGLE_AI) {
1837
+ enhancedResponse = createEnhancedContentResponse(mapGenerateContentResponse(value), inferenceSource);
1838
+ }
1839
+ else {
1840
+ enhancedResponse = createEnhancedContentResponse(value, inferenceSource);
1841
+ }
1842
+ const firstCandidate = enhancedResponse.candidates?.[0];
1843
+ if (!firstCandidate?.content?.parts &&
1844
+ !firstCandidate?.finishReason &&
1845
+ !firstCandidate?.citationMetadata &&
1846
+ !firstCandidate?.urlContextMetadata) {
1847
+ continue;
1848
+ }
1849
+ yield enhancedResponse;
1850
+ }
1851
+ }
1852
+ /**
1853
+ * Reads a raw string stream, buffers incomplete chunks, and yields parsed JSON objects.
1854
+ */
1855
+ function getResponseStream(inputStream) {
1856
+ const reader = inputStream.getReader();
1857
+ const stream = new ReadableStream({
1858
+ start(controller) {
1859
+ let currentText = '';
1860
+ return pump();
1861
+ function pump() {
1862
+ return reader.read().then(({ value, done }) => {
1863
+ if (done) {
1864
+ if (currentText.trim()) {
1865
+ controller.error(new AIError(AIErrorCode.PARSE_FAILED, 'Failed to parse stream'));
1866
+ return;
1867
+ }
1868
+ controller.close();
1869
+ return;
1870
+ }
1871
+ currentText += value;
1872
+ // SSE events may span chunk boundaries, so we buffer until we match
1873
+ // the full "data: {json}\n\n" pattern.
1874
+ let match = currentText.match(responseLineRE);
1875
+ let parsedResponse;
1876
+ while (match) {
1877
+ try {
1878
+ parsedResponse = JSON.parse(match[1]);
1879
+ }
1880
+ catch (e) {
1881
+ controller.error(new AIError(AIErrorCode.PARSE_FAILED, `Error parsing JSON response: "${match[1]}`));
1882
+ return;
1883
+ }
1884
+ controller.enqueue(parsedResponse);
1885
+ currentText = currentText.substring(match[0].length);
1886
+ match = currentText.match(responseLineRE);
1887
+ }
1888
+ return pump();
1889
+ });
1890
+ }
1891
+ }
1892
+ });
1893
+ return stream;
1894
+ }
1895
+ /**
1896
+ * Aggregates an array of `GenerateContentResponse`s into a single
1897
+ * GenerateContentResponse.
1898
+ */
1899
+ function aggregateResponses(responses) {
1900
+ const lastResponse = responses[responses.length - 1];
1901
+ const aggregatedResponse = {
1902
+ promptFeedback: lastResponse?.promptFeedback
1903
+ };
1904
+ for (const response of responses) {
1905
+ if (response.candidates) {
1906
+ for (const candidate of response.candidates) {
1907
+ // Use 0 if index is undefined (protobuf default value omission).
1908
+ const i = candidate.index || 0;
1909
+ if (!aggregatedResponse.candidates) {
1910
+ aggregatedResponse.candidates = [];
1911
+ }
1912
+ if (!aggregatedResponse.candidates[i]) {
1913
+ aggregatedResponse.candidates[i] = {
1914
+ index: candidate.index
1915
+ };
1916
+ }
1917
+ // Overwrite with the latest metadata
1918
+ aggregatedResponse.candidates[i].citationMetadata =
1919
+ candidate.citationMetadata;
1920
+ aggregatedResponse.candidates[i].finishReason = candidate.finishReason;
1921
+ aggregatedResponse.candidates[i].finishMessage =
1922
+ candidate.finishMessage;
1923
+ aggregatedResponse.candidates[i].safetyRatings =
1924
+ candidate.safetyRatings;
1925
+ aggregatedResponse.candidates[i].groundingMetadata =
1926
+ candidate.groundingMetadata;
1927
+ // The urlContextMetadata object is defined in the first chunk of the response stream.
1928
+ // In all subsequent chunks, the urlContextMetadata object will be undefined. We need to
1929
+ // make sure that we don't overwrite the first value urlContextMetadata object with undefined.
1930
+ // FIXME: What happens if we receive a second, valid urlContextMetadata object?
1931
+ const urlContextMetadata = candidate.urlContextMetadata;
1932
+ if (typeof urlContextMetadata === 'object' &&
1933
+ urlContextMetadata !== null &&
1934
+ Object.keys(urlContextMetadata).length > 0) {
1935
+ aggregatedResponse.candidates[i].urlContextMetadata =
1936
+ urlContextMetadata;
1937
+ }
1938
+ if (candidate.content) {
1939
+ if (!candidate.content.parts) {
1940
+ continue;
1941
+ }
1942
+ if (!aggregatedResponse.candidates[i].content) {
1943
+ aggregatedResponse.candidates[i].content = {
1944
+ role: candidate.content.role || 'user',
1945
+ parts: []
1946
+ };
1947
+ }
1948
+ for (const part of candidate.content.parts) {
1949
+ const newPart = { ...part };
1950
+ // The backend can send empty text parts. If these are sent back
1951
+ // (e.g. in chat history), the backend will respond with an error.
1952
+ // To prevent this, ignore empty text parts.
1953
+ if (part.text === '') {
1954
+ continue;
1955
+ }
1956
+ if (Object.keys(newPart).length > 0) {
1957
+ aggregatedResponse.candidates[i].content.parts.push(newPart);
1958
+ }
1959
+ }
1960
+ }
1961
+ }
1962
+ }
1963
+ }
1964
+ return aggregatedResponse;
1965
+ }
1966
+
1967
+ /**
1968
+ * @license
1969
+ * Copyright 2025 Google LLC
1970
+ *
1971
+ * Licensed under the Apache License, Version 2.0 (the "License");
1972
+ * you may not use this file except in compliance with the License.
1973
+ * You may obtain a copy of the License at
1974
+ *
1975
+ * http://www.apache.org/licenses/LICENSE-2.0
1976
+ *
1977
+ * Unless required by applicable law or agreed to in writing, software
1978
+ * distributed under the License is distributed on an "AS IS" BASIS,
1979
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
1980
+ * See the License for the specific language governing permissions and
1981
+ * limitations under the License.
1982
+ */
1983
+ const errorsCausingFallback = [
1984
+ // most network errors
1985
+ AIErrorCode.FETCH_ERROR,
1986
+ // fallback code for all other errors in makeRequest
1987
+ AIErrorCode.ERROR,
1988
+ // error due to API not being enabled in project
1989
+ AIErrorCode.API_NOT_ENABLED
1990
+ ];
1991
+ /**
1992
+ * Dispatches a request to the appropriate backend (on-device or in-cloud)
1993
+ * based on the inference mode.
1994
+ *
1995
+ * @param request - The request to be sent.
1996
+ * @param chromeAdapter - The on-device model adapter.
1997
+ * @param onDeviceCall - The function to call for on-device inference.
1998
+ * @param inCloudCall - The function to call for in-cloud inference.
1999
+ * @returns The response from the backend.
2000
+ */
2001
+ async function callCloudOrDevice(request, chromeAdapter, onDeviceCall, inCloudCall) {
2002
+ if (!chromeAdapter) {
2003
+ return {
2004
+ response: await inCloudCall(),
2005
+ inferenceSource: InferenceSource.IN_CLOUD
2006
+ };
2007
+ }
2008
+ switch (chromeAdapter.mode) {
2009
+ case InferenceMode.ONLY_ON_DEVICE:
2010
+ if (await chromeAdapter.isAvailable(request)) {
2011
+ return {
2012
+ response: await onDeviceCall(),
2013
+ inferenceSource: InferenceSource.ON_DEVICE
2014
+ };
2015
+ }
2016
+ throw new AIError(AIErrorCode.UNSUPPORTED, 'Inference mode is ONLY_ON_DEVICE, but an on-device model is not available.');
2017
+ case InferenceMode.ONLY_IN_CLOUD:
2018
+ return {
2019
+ response: await inCloudCall(),
2020
+ inferenceSource: InferenceSource.IN_CLOUD
2021
+ };
2022
+ case InferenceMode.PREFER_IN_CLOUD:
2023
+ try {
2024
+ return {
2025
+ response: await inCloudCall(),
2026
+ inferenceSource: InferenceSource.IN_CLOUD
2027
+ };
2028
+ }
2029
+ catch (e) {
2030
+ if (e instanceof AIError && errorsCausingFallback.includes(e.code)) {
2031
+ return {
2032
+ response: await onDeviceCall(),
2033
+ inferenceSource: InferenceSource.ON_DEVICE
2034
+ };
2035
+ }
2036
+ throw e;
2037
+ }
2038
+ case InferenceMode.PREFER_ON_DEVICE:
2039
+ if (await chromeAdapter.isAvailable(request)) {
2040
+ return {
2041
+ response: await onDeviceCall(),
2042
+ inferenceSource: InferenceSource.ON_DEVICE
2043
+ };
2044
+ }
2045
+ return {
2046
+ response: await inCloudCall(),
2047
+ inferenceSource: InferenceSource.IN_CLOUD
2048
+ };
2049
+ default:
2050
+ throw new AIError(AIErrorCode.ERROR, `Unexpected infererence mode: ${chromeAdapter.mode}`);
2051
+ }
2052
+ }
2053
+
2054
+ /**
2055
+ * @license
2056
+ * Copyright 2024 Google LLC
2057
+ *
2058
+ * Licensed under the Apache License, Version 2.0 (the "License");
2059
+ * you may not use this file except in compliance with the License.
2060
+ * You may obtain a copy of the License at
2061
+ *
2062
+ * http://www.apache.org/licenses/LICENSE-2.0
2063
+ *
2064
+ * Unless required by applicable law or agreed to in writing, software
2065
+ * distributed under the License is distributed on an "AS IS" BASIS,
2066
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
2067
+ * See the License for the specific language governing permissions and
2068
+ * limitations under the License.
2069
+ */
2070
+ async function generateContentStreamOnCloud(apiSettings, model, params, singleRequestOptions) {
2071
+ if (apiSettings.backend.backendType === BackendType.GOOGLE_AI) {
2072
+ params = mapGenerateContentRequest(params);
2073
+ }
2074
+ return makeRequest({
2075
+ task: "streamGenerateContent" /* Task.STREAM_GENERATE_CONTENT */,
2076
+ model,
2077
+ apiSettings,
2078
+ stream: true,
2079
+ singleRequestOptions
2080
+ }, JSON.stringify(params));
2081
+ }
2082
+ async function generateContentStream(apiSettings, model, params, chromeAdapter, singleRequestOptions) {
2083
+ const callResult = await callCloudOrDevice(params, chromeAdapter, () => chromeAdapter.generateContentStream(params), () => generateContentStreamOnCloud(apiSettings, model, params, singleRequestOptions));
2084
+ return processStream(callResult.response, apiSettings, callResult.inferenceSource);
2085
+ }
2086
+ async function generateContentOnCloud(apiSettings, model, params, singleRequestOptions) {
2087
+ if (apiSettings.backend.backendType === BackendType.GOOGLE_AI) {
2088
+ params = mapGenerateContentRequest(params);
2089
+ }
2090
+ return makeRequest({
2091
+ model,
2092
+ task: "generateContent" /* Task.GENERATE_CONTENT */,
2093
+ apiSettings,
2094
+ stream: false,
2095
+ singleRequestOptions
2096
+ }, JSON.stringify(params));
2097
+ }
2098
+ async function templateGenerateContent(apiSettings, templateId, templateParams, singleRequestOptions) {
2099
+ const response = await makeRequest({
2100
+ task: "templateGenerateContent" /* ServerPromptTemplateTask.TEMPLATE_GENERATE_CONTENT */,
2101
+ templateId,
2102
+ apiSettings,
2103
+ stream: false,
2104
+ singleRequestOptions
2105
+ }, JSON.stringify(templateParams));
2106
+ const generateContentResponse = await processGenerateContentResponse(response, apiSettings);
2107
+ const enhancedResponse = createEnhancedContentResponse(generateContentResponse);
2108
+ return {
2109
+ response: enhancedResponse
2110
+ };
2111
+ }
2112
+ async function templateGenerateContentStream(apiSettings, templateId, templateParams, singleRequestOptions) {
2113
+ const response = await makeRequest({
2114
+ task: "templateStreamGenerateContent" /* ServerPromptTemplateTask.TEMPLATE_STREAM_GENERATE_CONTENT */,
2115
+ templateId,
2116
+ apiSettings,
2117
+ stream: true,
2118
+ singleRequestOptions
2119
+ }, JSON.stringify(templateParams));
2120
+ return processStream(response, apiSettings);
2121
+ }
2122
+ async function generateContent(apiSettings, model, params, chromeAdapter, singleRequestOptions) {
2123
+ const callResult = await callCloudOrDevice(params, chromeAdapter, () => chromeAdapter.generateContent(params), () => generateContentOnCloud(apiSettings, model, params, singleRequestOptions));
2124
+ const generateContentResponse = await processGenerateContentResponse(callResult.response, apiSettings);
2125
+ const enhancedResponse = createEnhancedContentResponse(generateContentResponse, callResult.inferenceSource);
2126
+ return {
2127
+ response: enhancedResponse
2128
+ };
2129
+ }
2130
+ async function processGenerateContentResponse(response, apiSettings) {
2131
+ const responseJson = await response.json();
2132
+ if (apiSettings.backend.backendType === BackendType.GOOGLE_AI) {
2133
+ return mapGenerateContentResponse(responseJson);
2134
+ }
2135
+ else {
2136
+ return responseJson;
2137
+ }
2138
+ }
2139
+
2140
+ /**
2141
+ * @license
2142
+ * Copyright 2024 Google LLC
2143
+ *
2144
+ * Licensed under the Apache License, Version 2.0 (the "License");
2145
+ * you may not use this file except in compliance with the License.
2146
+ * You may obtain a copy of the License at
2147
+ *
2148
+ * http://www.apache.org/licenses/LICENSE-2.0
2149
+ *
2150
+ * Unless required by applicable law or agreed to in writing, software
2151
+ * distributed under the License is distributed on an "AS IS" BASIS,
2152
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
2153
+ * See the License for the specific language governing permissions and
2154
+ * limitations under the License.
2155
+ */
2156
+ function formatSystemInstruction(input) {
2157
+ // null or undefined
2158
+ if (input == null) {
2159
+ return undefined;
2160
+ }
2161
+ else if (typeof input === 'string') {
2162
+ return { role: 'system', parts: [{ text: input }] };
2163
+ }
2164
+ else if (input.text) {
2165
+ return { role: 'system', parts: [input] };
2166
+ }
2167
+ else if (input.parts) {
2168
+ if (!input.role) {
2169
+ return { role: 'system', parts: input.parts };
2170
+ }
2171
+ else {
2172
+ return input;
2173
+ }
2174
+ }
2175
+ }
2176
+ function formatNewContent(request) {
2177
+ let newParts = [];
2178
+ if (typeof request === 'string') {
2179
+ newParts = [{ text: request }];
2180
+ }
2181
+ else {
2182
+ for (const partOrString of request) {
2183
+ if (typeof partOrString === 'string') {
2184
+ newParts.push({ text: partOrString });
2185
+ }
2186
+ else {
2187
+ newParts.push(partOrString);
2188
+ }
2189
+ }
2190
+ }
2191
+ return assignRoleToPartsAndValidateSendMessageRequest(newParts);
2192
+ }
2193
+ /**
2194
+ * When multiple Part types (i.e. FunctionResponsePart and TextPart) are
2195
+ * passed in a single Part array, we may need to assign different roles to each
2196
+ * part. Currently only FunctionResponsePart requires a role other than 'user'.
2197
+ * @private
2198
+ * @param parts Array of parts to pass to the model
2199
+ * @returns Array of content items
2200
+ */
2201
+ function assignRoleToPartsAndValidateSendMessageRequest(parts) {
2202
+ const userContent = { role: 'user', parts: [] };
2203
+ const functionContent = { role: 'function', parts: [] };
2204
+ let hasUserContent = false;
2205
+ let hasFunctionContent = false;
2206
+ for (const part of parts) {
2207
+ if ('functionResponse' in part) {
2208
+ functionContent.parts.push(part);
2209
+ hasFunctionContent = true;
2210
+ }
2211
+ else {
2212
+ userContent.parts.push(part);
2213
+ hasUserContent = true;
2214
+ }
2215
+ }
2216
+ if (hasUserContent && hasFunctionContent) {
2217
+ throw new AIError(AIErrorCode.INVALID_CONTENT, 'Within a single message, FunctionResponse cannot be mixed with other type of Part in the request for sending chat message.');
2218
+ }
2219
+ if (!hasUserContent && !hasFunctionContent) {
2220
+ throw new AIError(AIErrorCode.INVALID_CONTENT, 'No Content is provided for sending chat message.');
2221
+ }
2222
+ if (hasUserContent) {
2223
+ return userContent;
2224
+ }
2225
+ return functionContent;
2226
+ }
2227
+ function formatGenerateContentInput(params) {
2228
+ let formattedRequest;
2229
+ if (params.contents) {
2230
+ formattedRequest = params;
2231
+ }
2232
+ else {
2233
+ // Array or string
2234
+ const content = formatNewContent(params);
2235
+ formattedRequest = { contents: [content] };
2236
+ }
2237
+ if (params.systemInstruction) {
2238
+ formattedRequest.systemInstruction = formatSystemInstruction(params.systemInstruction);
2239
+ }
2240
+ return formattedRequest;
2241
+ }
2242
+ /**
2243
+ * Convert the user-defined parameters in {@link ImagenGenerationParams} to the format
2244
+ * that is expected from the REST API.
2245
+ *
2246
+ * @internal
2247
+ */
2248
+ function createPredictRequestBody(prompt, { gcsURI, imageFormat, addWatermark, numberOfImages = 1, negativePrompt, aspectRatio, safetyFilterLevel, personFilterLevel }) {
2249
+ // Properties that are undefined will be omitted from the JSON string that is sent in the request.
2250
+ const body = {
2251
+ instances: [
2252
+ {
2253
+ prompt
2254
+ }
2255
+ ],
2256
+ parameters: {
2257
+ storageUri: gcsURI,
2258
+ negativePrompt,
2259
+ sampleCount: numberOfImages,
2260
+ aspectRatio,
2261
+ outputOptions: imageFormat,
2262
+ addWatermark,
2263
+ safetyFilterLevel,
2264
+ personGeneration: personFilterLevel,
2265
+ includeRaiReason: true,
2266
+ includeSafetyAttributes: true
2267
+ }
2268
+ };
2269
+ return body;
2270
+ }
2271
+
2272
+ /**
2273
+ * @license
2274
+ * Copyright 2024 Google LLC
2275
+ *
2276
+ * Licensed under the Apache License, Version 2.0 (the "License");
2277
+ * you may not use this file except in compliance with the License.
2278
+ * You may obtain a copy of the License at
2279
+ *
2280
+ * http://www.apache.org/licenses/LICENSE-2.0
2281
+ *
2282
+ * Unless required by applicable law or agreed to in writing, software
2283
+ * distributed under the License is distributed on an "AS IS" BASIS,
2284
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
2285
+ * See the License for the specific language governing permissions and
2286
+ * limitations under the License.
2287
+ */
2288
+ // https://ai.google.dev/api/rest/v1beta/Content#part
2289
+ const VALID_PART_FIELDS = [
2290
+ 'text',
2291
+ 'inlineData',
2292
+ 'functionCall',
2293
+ 'functionResponse',
2294
+ 'thought',
2295
+ 'thoughtSignature'
2296
+ ];
2297
+ const VALID_PARTS_PER_ROLE = {
2298
+ user: ['text', 'inlineData'],
2299
+ function: ['functionResponse'],
2300
+ model: ['text', 'functionCall', 'thought', 'thoughtSignature'],
2301
+ // System instructions shouldn't be in history anyway.
2302
+ system: ['text']
2303
+ };
2304
+ const VALID_PREVIOUS_CONTENT_ROLES = {
2305
+ user: ['model'],
2306
+ function: ['model'],
2307
+ model: ['user', 'function'],
2308
+ // System instructions shouldn't be in history.
2309
+ system: []
2310
+ };
2311
+ function validateChatHistory(history) {
2312
+ let prevContent = null;
2313
+ for (const currContent of history) {
2314
+ const { role, parts } = currContent;
2315
+ if (!prevContent && role !== 'user') {
2316
+ throw new AIError(AIErrorCode.INVALID_CONTENT, `First Content should be with role 'user', got ${role}`);
2317
+ }
2318
+ if (!POSSIBLE_ROLES.includes(role)) {
2319
+ throw new AIError(AIErrorCode.INVALID_CONTENT, `Each item should include role field. Got ${role} but valid roles are: ${JSON.stringify(POSSIBLE_ROLES)}`);
2320
+ }
2321
+ if (!Array.isArray(parts)) {
2322
+ throw new AIError(AIErrorCode.INVALID_CONTENT, `Content should have 'parts' property with an array of Parts`);
2323
+ }
2324
+ if (parts.length === 0) {
2325
+ throw new AIError(AIErrorCode.INVALID_CONTENT, `Each Content should have at least one part`);
2326
+ }
2327
+ const countFields = {
2328
+ text: 0,
2329
+ inlineData: 0,
2330
+ functionCall: 0,
2331
+ functionResponse: 0,
2332
+ thought: 0,
2333
+ thoughtSignature: 0,
2334
+ executableCode: 0,
2335
+ codeExecutionResult: 0
2336
+ };
2337
+ for (const part of parts) {
2338
+ for (const key of VALID_PART_FIELDS) {
2339
+ if (key in part) {
2340
+ countFields[key] += 1;
2341
+ }
2342
+ }
2343
+ }
2344
+ const validParts = VALID_PARTS_PER_ROLE[role];
2345
+ for (const key of VALID_PART_FIELDS) {
2346
+ if (!validParts.includes(key) && countFields[key] > 0) {
2347
+ throw new AIError(AIErrorCode.INVALID_CONTENT, `Content with role '${role}' can't contain '${key}' part`);
2348
+ }
2349
+ }
2350
+ if (prevContent) {
2351
+ const validPreviousContentRoles = VALID_PREVIOUS_CONTENT_ROLES[role];
2352
+ if (!validPreviousContentRoles.includes(prevContent.role)) {
2353
+ throw new AIError(AIErrorCode.INVALID_CONTENT, `Content with role '${role}' can't follow '${prevContent.role}'. Valid previous roles: ${JSON.stringify(VALID_PREVIOUS_CONTENT_ROLES)}`);
2354
+ }
2355
+ }
2356
+ prevContent = currContent;
2357
+ }
2358
+ }
2359
+
2360
+ /**
2361
+ * @license
2362
+ * Copyright 2024 Google LLC
2363
+ *
2364
+ * Licensed under the Apache License, Version 2.0 (the "License");
2365
+ * you may not use this file except in compliance with the License.
2366
+ * You may obtain a copy of the License at
2367
+ *
2368
+ * http://www.apache.org/licenses/LICENSE-2.0
2369
+ *
2370
+ * Unless required by applicable law or agreed to in writing, software
2371
+ * distributed under the License is distributed on an "AS IS" BASIS,
2372
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
2373
+ * See the License for the specific language governing permissions and
2374
+ * limitations under the License.
2375
+ */
2376
+ /**
2377
+ * Used to break the internal promise chain when an error is already handled
2378
+ * by the user, preventing duplicate console logs.
2379
+ */
2380
+ const SILENT_ERROR = 'SILENT_ERROR';
2381
+ /**
2382
+ * Prevent infinite loop if the model continues to request sequential
2383
+ * function calls during automatic function calling.
2384
+ */
2385
+ const DEFAULT_MAX_SEQUENTIAL_FUNCTION_CALLS = 10;
2386
+ /**
2387
+ * ChatSession class that enables sending chat messages and stores
2388
+ * history of sent and received messages so far.
2389
+ *
2390
+ * @public
2391
+ */
2392
+ class ChatSession {
2393
+ constructor(apiSettings, model, chromeAdapter, params, requestOptions) {
2394
+ this.model = model;
2395
+ this.chromeAdapter = chromeAdapter;
2396
+ this.params = params;
2397
+ this.requestOptions = requestOptions;
2398
+ this._history = [];
2399
+ /**
2400
+ * Ensures sequential execution of chat messages to maintain history order.
2401
+ * Each call waits for the previous one to settle before proceeding.
2402
+ */
2403
+ this._sendPromise = Promise.resolve();
2404
+ this._apiSettings = apiSettings;
2405
+ if (params?.history) {
2406
+ validateChatHistory(params.history);
2407
+ this._history = params.history;
2408
+ }
2409
+ }
2410
+ /**
2411
+ * Gets the chat history so far. Blocked prompts are not added to history.
2412
+ * Neither blocked candidates nor the prompts that generated them are added
2413
+ * to history.
2414
+ */
2415
+ async getHistory() {
2416
+ await this._sendPromise;
2417
+ return this._history;
2418
+ }
2419
+ /**
2420
+ * Format Content into a request for generateContent or
2421
+ * generateContentStream.
2422
+ * @internal
2423
+ */
2424
+ _formatRequest(incomingContent, tempHistory) {
2425
+ return {
2426
+ safetySettings: this.params?.safetySettings,
2427
+ generationConfig: this.params?.generationConfig,
2428
+ tools: this.params?.tools,
2429
+ toolConfig: this.params?.toolConfig,
2430
+ systemInstruction: this.params?.systemInstruction,
2431
+ contents: [...this._history, ...tempHistory, incomingContent]
2432
+ };
2433
+ }
2434
+ /**
2435
+ * Sends a chat message and receives a non-streaming
2436
+ * {@link GenerateContentResult}
2437
+ */
2438
+ async sendMessage(request, singleRequestOptions) {
2439
+ let finalResult = {};
2440
+ await this._sendPromise;
2441
+ /**
2442
+ * Temporarily store multiple turns for cases like automatic function
2443
+ * calling, only writing them to official history when the entire
2444
+ * sequence has completed successfully.
2445
+ */
2446
+ const tempHistory = [];
2447
+ this._sendPromise = this._sendPromise.then(async () => {
2448
+ let functionCalls;
2449
+ let functionCallTurnCount = 0;
2450
+ const functionCallMaxTurns = this.requestOptions?.maxSequentalFunctionCalls ??
2451
+ DEFAULT_MAX_SEQUENTIAL_FUNCTION_CALLS;
2452
+ // Repeats until model returns a response with no function calls
2453
+ // or until `functionCallMaxTurns` is met or exceeded.
2454
+ do {
2455
+ let formattedContent;
2456
+ if (functionCalls) {
2457
+ functionCallTurnCount++;
2458
+ const functionResponseParts = await this._callFunctionsAsNeeded(functionCalls);
2459
+ formattedContent = formatNewContent(functionResponseParts);
2460
+ }
2461
+ else {
2462
+ formattedContent = formatNewContent(request);
2463
+ }
2464
+ const formattedRequest = this._formatRequest(formattedContent, tempHistory);
2465
+ tempHistory.push(formattedContent);
2466
+ const result = await generateContent(this._apiSettings, this.model, formattedRequest, this.chromeAdapter, {
2467
+ ...this.requestOptions,
2468
+ ...singleRequestOptions
2469
+ });
2470
+ if (result) {
2471
+ finalResult = result;
2472
+ functionCalls = this._getCallableFunctionCalls(result.response);
2473
+ if (result.response.candidates &&
2474
+ result.response.candidates.length > 0) {
2475
+ // TODO: Make this update atomic. If creating `responseContent` throws,
2476
+ // history will contain the user message but not the response, causing
2477
+ // validation errors on the next request.
2478
+ const responseContent = {
2479
+ parts: result.response.candidates?.[0].content.parts || [],
2480
+ // Response seems to come back without a role set.
2481
+ role: result.response.candidates?.[0].content.role || 'model'
2482
+ };
2483
+ tempHistory.push(responseContent);
2484
+ }
2485
+ else {
2486
+ const blockErrorMessage = formatBlockErrorMessage(result.response);
2487
+ if (blockErrorMessage) {
2488
+ logger.warn(`sendMessage() was unsuccessful. ${blockErrorMessage}. Inspect response object for details.`);
2489
+ }
2490
+ }
2491
+ }
2492
+ else {
2493
+ functionCalls = undefined;
2494
+ }
2495
+ } while (functionCalls && functionCallTurnCount < functionCallMaxTurns);
2496
+ if (functionCalls && functionCallTurnCount >= functionCallMaxTurns) {
2497
+ logger.warn(`Automatic function calling exceeded the limit of` +
2498
+ ` ${functionCallMaxTurns} function calls. Returning last model response.`);
2499
+ }
2500
+ });
2501
+ await this._sendPromise;
2502
+ this._history = this._history.concat(tempHistory);
2503
+ return finalResult;
2504
+ }
2505
+ /**
2506
+ * Sends a chat message and receives the response as a
2507
+ * {@link GenerateContentStreamResult} containing an iterable stream
2508
+ * and a response promise.
2509
+ */
2510
+ async sendMessageStream(request, singleRequestOptions) {
2511
+ await this._sendPromise;
2512
+ /**
2513
+ * Temporarily store multiple turns for cases like automatic function
2514
+ * calling, only writing them to official history when the entire
2515
+ * sequence has completed successfully.
2516
+ */
2517
+ const tempHistory = [];
2518
+ const callGenerateContentStream = async () => {
2519
+ let functionCalls;
2520
+ let functionCallTurnCount = 0;
2521
+ const functionCallMaxTurns = this.requestOptions?.maxSequentalFunctionCalls ??
2522
+ DEFAULT_MAX_SEQUENTIAL_FUNCTION_CALLS;
2523
+ let result;
2524
+ // Repeats until model returns a response with no function calls
2525
+ // or until `functionCallMaxTurns` is met or exceeded.
2526
+ do {
2527
+ let formattedContent;
2528
+ if (functionCalls) {
2529
+ functionCallTurnCount++;
2530
+ const functionResponseParts = await this._callFunctionsAsNeeded(functionCalls);
2531
+ formattedContent = formatNewContent(functionResponseParts);
2532
+ }
2533
+ else {
2534
+ formattedContent = formatNewContent(request);
2535
+ }
2536
+ tempHistory.push(formattedContent);
2537
+ const formattedRequest = this._formatRequest(formattedContent, tempHistory);
2538
+ result = await generateContentStream(this._apiSettings, this.model, formattedRequest, this.chromeAdapter, {
2539
+ ...this.requestOptions,
2540
+ ...singleRequestOptions
2541
+ });
2542
+ functionCalls = this._getCallableFunctionCalls(result.firstValue);
2543
+ if (functionCalls &&
2544
+ result.firstValue &&
2545
+ result.firstValue.candidates &&
2546
+ result.firstValue.candidates.length > 0) {
2547
+ const responseContent = {
2548
+ ...result.firstValue.candidates[0].content
2549
+ };
2550
+ if (!responseContent.role) {
2551
+ responseContent.role = 'model';
2552
+ }
2553
+ tempHistory.push(responseContent);
2554
+ }
2555
+ } while (functionCalls && functionCallTurnCount < functionCallMaxTurns);
2556
+ if (functionCalls && functionCallTurnCount >= functionCallMaxTurns) {
2557
+ logger.warn(`Automatic function calling exceeded the limit of` +
2558
+ ` ${functionCallMaxTurns} function calls. Returning last model response.`);
2559
+ }
2560
+ return { stream: result.stream, response: result.response };
2561
+ };
2562
+ const streamPromise = callGenerateContentStream();
2563
+ // Add onto the chain.
2564
+ this._sendPromise = this._sendPromise
2565
+ .then(async () => streamPromise)
2566
+ // This must be handled to avoid unhandled rejection, but jump
2567
+ // to the final catch block with a label to not log this error.
2568
+ .catch(_ignored => {
2569
+ // If the initial fetch fails, the user's `streamPromise` rejects.
2570
+ // We swallow the error here to prevent double logging in the final catch.
2571
+ throw new Error(SILENT_ERROR);
2572
+ })
2573
+ .then(streamResult => streamResult.response)
2574
+ .then(response => {
2575
+ // This runs after the stream completes. Runtime errors here cannot be
2576
+ // caught by the user because their promise has likely already resolved.
2577
+ // TODO: Move response validation logic upstream to `stream-reader` so
2578
+ // errors propagate to the user's `result.response` promise.
2579
+ if (response.candidates && response.candidates.length > 0) {
2580
+ this._history = this._history.concat(tempHistory);
2581
+ // TODO: Validate that `response.candidates[0].content` is not null.
2582
+ const responseContent = { ...response.candidates[0].content };
2583
+ if (!responseContent.role) {
2584
+ responseContent.role = 'model';
2585
+ }
2586
+ this._history.push(responseContent);
2587
+ }
2588
+ else {
2589
+ const blockErrorMessage = formatBlockErrorMessage(response);
2590
+ if (blockErrorMessage) {
2591
+ logger.warn(`sendMessageStream() was unsuccessful. ${blockErrorMessage}. Inspect response object for details.`);
2592
+ }
2593
+ }
2594
+ })
2595
+ .catch(e => {
2596
+ // Filter out errors already handled by the user or initiated by them.
2597
+ if (e.message !== SILENT_ERROR && e.name !== 'AbortError') {
2598
+ logger.error(e);
2599
+ }
2600
+ });
2601
+ return streamPromise;
2602
+ }
2603
+ /**
2604
+ * Get function calls that the SDK has references to actually call.
2605
+ * This is all-or-nothing. If the model is requesting multiple
2606
+ * function calls, all of them must have references in order for
2607
+ * automatic function calling to work.
2608
+ *
2609
+ * @internal
2610
+ */
2611
+ _getCallableFunctionCalls(response) {
2612
+ const functionDeclarationsTool = this.params?.tools?.find(tool => tool.functionDeclarations);
2613
+ if (!functionDeclarationsTool?.functionDeclarations) {
2614
+ return;
2615
+ }
2616
+ const functionCalls = getFunctionCalls(response);
2617
+ if (!functionCalls) {
2618
+ return;
2619
+ }
2620
+ for (const functionCall of functionCalls) {
2621
+ const hasFunctionReference = functionDeclarationsTool.functionDeclarations?.some(declaration => declaration.name === functionCall.name &&
2622
+ typeof declaration.functionReference === 'function');
2623
+ if (!hasFunctionReference) {
2624
+ return;
2625
+ }
2626
+ }
2627
+ return functionCalls;
2628
+ }
2629
+ /**
2630
+ * Call user-defined functions if requested by the model, and return
2631
+ * the response that should be sent to the model.
2632
+ * @internal
2633
+ */
2634
+ async _callFunctionsAsNeeded(functionCalls) {
2635
+ const activeCallList = new Map();
2636
+ const promiseList = [];
2637
+ const functionDeclarationsTool = this.params?.tools?.find(tool => tool.functionDeclarations);
2638
+ if (functionDeclarationsTool &&
2639
+ functionDeclarationsTool.functionDeclarations) {
2640
+ for (const functionCall of functionCalls) {
2641
+ const functionDeclaration = functionDeclarationsTool.functionDeclarations.find(declaration => declaration.name === functionCall.name);
2642
+ if (functionDeclaration?.functionReference) {
2643
+ const results = Promise.resolve(functionDeclaration.functionReference(functionCall.args)).catch(e => {
2644
+ const wrappedError = new AIError(AIErrorCode.ERROR, `Error in user-defined function "${functionDeclaration.name}": ${e.message}`);
2645
+ wrappedError.stack = e.stack;
2646
+ throw wrappedError;
2647
+ });
2648
+ activeCallList.set(functionCall.name, {
2649
+ id: functionCall.id,
2650
+ results
2651
+ });
2652
+ promiseList.push(results);
2653
+ }
2654
+ }
2655
+ // Wait for promises to finish.
2656
+ await Promise.all(promiseList);
2657
+ const functionResponseParts = [];
2658
+ for (const [name, callData] of activeCallList) {
2659
+ functionResponseParts.push({
2660
+ functionResponse: {
2661
+ name,
2662
+ response: await callData.results
2663
+ }
2664
+ });
2665
+ }
2666
+ return functionResponseParts;
2667
+ }
2668
+ else {
2669
+ throw new AIError(AIErrorCode.REQUEST_ERROR, `No function declarations were provided in "tools".`);
2670
+ }
2671
+ }
2672
+ }
2673
+
2674
+ /**
2675
+ * @license
2676
+ * Copyright 2024 Google LLC
2677
+ *
2678
+ * Licensed under the Apache License, Version 2.0 (the "License");
2679
+ * you may not use this file except in compliance with the License.
2680
+ * You may obtain a copy of the License at
2681
+ *
2682
+ * http://www.apache.org/licenses/LICENSE-2.0
2683
+ *
2684
+ * Unless required by applicable law or agreed to in writing, software
2685
+ * distributed under the License is distributed on an "AS IS" BASIS,
2686
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
2687
+ * See the License for the specific language governing permissions and
2688
+ * limitations under the License.
2689
+ */
2690
+ async function countTokensOnCloud(apiSettings, model, params, singleRequestOptions) {
2691
+ let body = '';
2692
+ if (apiSettings.backend.backendType === BackendType.GOOGLE_AI) {
2693
+ const mappedParams = mapCountTokensRequest(params, model);
2694
+ body = JSON.stringify(mappedParams);
2695
+ }
2696
+ else {
2697
+ body = JSON.stringify(params);
2698
+ }
2699
+ const response = await makeRequest({
2700
+ model,
2701
+ task: "countTokens" /* Task.COUNT_TOKENS */,
2702
+ apiSettings,
2703
+ stream: false,
2704
+ singleRequestOptions
2705
+ }, body);
2706
+ return response.json();
2707
+ }
2708
+ async function countTokens(apiSettings, model, params, chromeAdapter, requestOptions) {
2709
+ if (chromeAdapter?.mode === InferenceMode.ONLY_ON_DEVICE) {
2710
+ throw new AIError(AIErrorCode.UNSUPPORTED, 'countTokens() is not supported for on-device models.');
2711
+ }
2712
+ return countTokensOnCloud(apiSettings, model, params, requestOptions);
2713
+ }
2714
+
2715
+ /**
2716
+ * @license
2717
+ * Copyright 2024 Google LLC
2718
+ *
2719
+ * Licensed under the Apache License, Version 2.0 (the "License");
2720
+ * you may not use this file except in compliance with the License.
2721
+ * You may obtain a copy of the License at
2722
+ *
2723
+ * http://www.apache.org/licenses/LICENSE-2.0
2724
+ *
2725
+ * Unless required by applicable law or agreed to in writing, software
2726
+ * distributed under the License is distributed on an "AS IS" BASIS,
2727
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
2728
+ * See the License for the specific language governing permissions and
2729
+ * limitations under the License.
2730
+ */
2731
+ /**
2732
+ * Class for generative model APIs.
2733
+ * @public
2734
+ */
2735
+ class GenerativeModel extends AIModel {
2736
+ constructor(ai, modelParams, requestOptions, chromeAdapter) {
2737
+ super(ai, modelParams.model);
2738
+ this.chromeAdapter = chromeAdapter;
2739
+ this.generationConfig = modelParams.generationConfig || {};
2740
+ validateGenerationConfig(this.generationConfig);
2741
+ this.safetySettings = modelParams.safetySettings || [];
2742
+ this.tools = modelParams.tools;
2743
+ this.toolConfig = modelParams.toolConfig;
2744
+ this.systemInstruction = formatSystemInstruction(modelParams.systemInstruction);
2745
+ this.requestOptions = requestOptions || {};
2746
+ }
2747
+ /**
2748
+ * Makes a single non-streaming call to the model
2749
+ * and returns an object containing a single {@link GenerateContentResponse}.
2750
+ */
2751
+ async generateContent(request, singleRequestOptions) {
2752
+ const formattedParams = formatGenerateContentInput(request);
2753
+ return generateContent(this._apiSettings, this.model, {
2754
+ generationConfig: this.generationConfig,
2755
+ safetySettings: this.safetySettings,
2756
+ tools: this.tools,
2757
+ toolConfig: this.toolConfig,
2758
+ systemInstruction: this.systemInstruction,
2759
+ ...formattedParams
2760
+ }, this.chromeAdapter,
2761
+ // Merge request options
2762
+ {
2763
+ ...this.requestOptions,
2764
+ ...singleRequestOptions
2765
+ });
2766
+ }
2767
+ /**
2768
+ * Makes a single streaming call to the model
2769
+ * and returns an object containing an iterable stream that iterates
2770
+ * over all chunks in the streaming response as well as
2771
+ * a promise that returns the final aggregated response.
2772
+ */
2773
+ async generateContentStream(request, singleRequestOptions) {
2774
+ const formattedParams = formatGenerateContentInput(request);
2775
+ const { stream, response } = await generateContentStream(this._apiSettings, this.model, {
2776
+ generationConfig: this.generationConfig,
2777
+ safetySettings: this.safetySettings,
2778
+ tools: this.tools,
2779
+ toolConfig: this.toolConfig,
2780
+ systemInstruction: this.systemInstruction,
2781
+ ...formattedParams
2782
+ }, this.chromeAdapter,
2783
+ // Merge request options
2784
+ {
2785
+ ...this.requestOptions,
2786
+ ...singleRequestOptions
2787
+ });
2788
+ return { stream, response };
2789
+ }
2790
+ /**
2791
+ * Gets a new {@link ChatSession} instance which can be used for
2792
+ * multi-turn chats.
2793
+ */
2794
+ startChat(startChatParams) {
2795
+ return new ChatSession(this._apiSettings, this.model, this.chromeAdapter, {
2796
+ tools: this.tools,
2797
+ toolConfig: this.toolConfig,
2798
+ systemInstruction: this.systemInstruction,
2799
+ generationConfig: this.generationConfig,
2800
+ safetySettings: this.safetySettings,
2801
+ /**
2802
+ * Overrides params inherited from GenerativeModel with those explicitly set in the
2803
+ * StartChatParams. For example, if startChatParams.generationConfig is set, it'll override
2804
+ * this.generationConfig.
2805
+ */
2806
+ ...startChatParams
2807
+ }, this.requestOptions);
2808
+ }
2809
+ /**
2810
+ * Counts the tokens in the provided request.
2811
+ */
2812
+ async countTokens(request, singleRequestOptions) {
2813
+ const formattedParams = formatGenerateContentInput(request);
2814
+ return countTokens(this._apiSettings, this.model, formattedParams, this.chromeAdapter,
2815
+ // Merge request options
2816
+ {
2817
+ ...this.requestOptions,
2818
+ ...singleRequestOptions
2819
+ });
2820
+ }
2821
+ }
2822
+ /**
2823
+ * Client-side validation of some common `GenerationConfig` pitfalls, in order
2824
+ * to save the developer a wasted request.
2825
+ */
2826
+ function validateGenerationConfig(generationConfig) {
2827
+ if (
2828
+ // != allows for null and undefined. 0 is considered "set" by the model
2829
+ generationConfig.thinkingConfig?.thinkingBudget != null &&
2830
+ generationConfig.thinkingConfig?.thinkingLevel) {
2831
+ throw new AIError(AIErrorCode.UNSUPPORTED, `Cannot set both thinkingBudget and thinkingLevel in a config.`);
2832
+ }
2833
+ }
2834
+
2835
+ /**
2836
+ * @license
2837
+ * Copyright 2025 Google LLC
2838
+ *
2839
+ * Licensed under the Apache License, Version 2.0 (the "License");
2840
+ * you may not use this file except in compliance with the License.
2841
+ * You may obtain a copy of the License at
2842
+ *
2843
+ * http://www.apache.org/licenses/LICENSE-2.0
2844
+ *
2845
+ * Unless required by applicable law or agreed to in writing, software
2846
+ * distributed under the License is distributed on an "AS IS" BASIS,
2847
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
2848
+ * See the License for the specific language governing permissions and
2849
+ * limitations under the License.
2850
+ */
2851
+ /**
2852
+ * Represents an active, real-time, bidirectional conversation with the model.
2853
+ *
2854
+ * This class should only be instantiated by calling {@link LiveGenerativeModel.connect}.
2855
+ *
2856
+ * @beta
2857
+ */
2858
+ class LiveSession {
2859
+ /**
2860
+ * @internal
2861
+ */
2862
+ constructor(webSocketHandler, serverMessages) {
2863
+ this.webSocketHandler = webSocketHandler;
2864
+ this.serverMessages = serverMessages;
2865
+ /**
2866
+ * Indicates whether this Live session is closed.
2867
+ *
2868
+ * @beta
2869
+ */
2870
+ this.isClosed = false;
2871
+ /**
2872
+ * Indicates whether this Live session is being controlled by an `AudioConversationController`.
2873
+ *
2874
+ * @beta
2875
+ */
2876
+ this.inConversation = false;
2877
+ }
2878
+ /**
2879
+ * Sends content to the server.
2880
+ *
2881
+ * @param request - The message to send to the model.
2882
+ * @param turnComplete - Indicates if the turn is complete. Defaults to false.
2883
+ * @throws If this session has been closed.
2884
+ *
2885
+ * @beta
2886
+ */
2887
+ async send(request, turnComplete = true) {
2888
+ if (this.isClosed) {
2889
+ throw new AIError(AIErrorCode.REQUEST_ERROR, 'This LiveSession has been closed and cannot be used.');
2890
+ }
2891
+ const newContent = formatNewContent(request);
2892
+ const message = {
2893
+ clientContent: {
2894
+ turns: [newContent],
2895
+ turnComplete
2896
+ }
2897
+ };
2898
+ this.webSocketHandler.send(JSON.stringify(message));
2899
+ }
2900
+ /**
2901
+ * Sends text to the server in realtime.
2902
+ *
2903
+ * @example
2904
+ * ```javascript
2905
+ * liveSession.sendTextRealtime("Hello, how are you?");
2906
+ * ```
2907
+ *
2908
+ * @param text - The text data to send.
2909
+ * @throws If this session has been closed.
2910
+ *
2911
+ * @beta
2912
+ */
2913
+ async sendTextRealtime(text) {
2914
+ if (this.isClosed) {
2915
+ throw new AIError(AIErrorCode.REQUEST_ERROR, 'This LiveSession has been closed and cannot be used.');
2916
+ }
2917
+ const message = {
2918
+ realtimeInput: {
2919
+ text
2920
+ }
2921
+ };
2922
+ this.webSocketHandler.send(JSON.stringify(message));
2923
+ }
2924
+ /**
2925
+ * Sends audio data to the server in realtime.
2926
+ *
2927
+ * @remarks The server requires that the audio data is base64-encoded 16-bit PCM at 16kHz
2928
+ * little-endian.
2929
+ *
2930
+ * @example
2931
+ * ```javascript
2932
+ * // const pcmData = ... base64-encoded 16-bit PCM at 16kHz little-endian.
2933
+ * const blob = { mimeType: "audio/pcm", data: pcmData };
2934
+ * liveSession.sendAudioRealtime(blob);
2935
+ * ```
2936
+ *
2937
+ * @param blob - The base64-encoded PCM data to send to the server in realtime.
2938
+ * @throws If this session has been closed.
2939
+ *
2940
+ * @beta
2941
+ */
2942
+ async sendAudioRealtime(blob) {
2943
+ if (this.isClosed) {
2944
+ throw new AIError(AIErrorCode.REQUEST_ERROR, 'This LiveSession has been closed and cannot be used.');
2945
+ }
2946
+ const message = {
2947
+ realtimeInput: {
2948
+ audio: blob
2949
+ }
2950
+ };
2951
+ this.webSocketHandler.send(JSON.stringify(message));
2952
+ }
2953
+ /**
2954
+ * Sends video data to the server in realtime.
2955
+ *
2956
+ * @remarks The server requires that the video is sent as individual video frames at 1 FPS. It
2957
+ * is recommended to set `mimeType` to `image/jpeg`.
2958
+ *
2959
+ * @example
2960
+ * ```javascript
2961
+ * // const videoFrame = ... base64-encoded JPEG data
2962
+ * const blob = { mimeType: "image/jpeg", data: videoFrame };
2963
+ * liveSession.sendVideoRealtime(blob);
2964
+ * ```
2965
+ * @param blob - The base64-encoded video data to send to the server in realtime.
2966
+ * @throws If this session has been closed.
2967
+ *
2968
+ * @beta
2969
+ */
2970
+ async sendVideoRealtime(blob) {
2971
+ if (this.isClosed) {
2972
+ throw new AIError(AIErrorCode.REQUEST_ERROR, 'This LiveSession has been closed and cannot be used.');
2973
+ }
2974
+ const message = {
2975
+ realtimeInput: {
2976
+ video: blob
2977
+ }
2978
+ };
2979
+ this.webSocketHandler.send(JSON.stringify(message));
2980
+ }
2981
+ /**
2982
+ * Sends function responses to the server.
2983
+ *
2984
+ * @param functionResponses - The function responses to send.
2985
+ * @throws If this session has been closed.
2986
+ *
2987
+ * @beta
2988
+ */
2989
+ async sendFunctionResponses(functionResponses) {
2990
+ if (this.isClosed) {
2991
+ throw new AIError(AIErrorCode.REQUEST_ERROR, 'This LiveSession has been closed and cannot be used.');
2992
+ }
2993
+ const message = {
2994
+ toolResponse: {
2995
+ functionResponses
2996
+ }
2997
+ };
2998
+ this.webSocketHandler.send(JSON.stringify(message));
2999
+ }
3000
+ /**
3001
+ * Yields messages received from the server.
3002
+ * This can only be used by one consumer at a time.
3003
+ *
3004
+ * @returns An `AsyncGenerator` that yields server messages as they arrive.
3005
+ * @throws If the session is already closed, or if we receive a response that we don't support.
3006
+ *
3007
+ * @beta
3008
+ */
3009
+ async *receive() {
3010
+ if (this.isClosed) {
3011
+ throw new AIError(AIErrorCode.SESSION_CLOSED, 'Cannot read from a Live session that is closed. Try starting a new Live session.');
3012
+ }
3013
+ for await (const message of this.serverMessages) {
3014
+ if (message && typeof message === 'object') {
3015
+ if (LiveResponseType.SERVER_CONTENT in message) {
3016
+ yield {
3017
+ type: 'serverContent',
3018
+ ...message
3019
+ .serverContent
3020
+ };
3021
+ }
3022
+ else if (LiveResponseType.TOOL_CALL in message) {
3023
+ yield {
3024
+ type: 'toolCall',
3025
+ ...message
3026
+ .toolCall
3027
+ };
3028
+ }
3029
+ else if (LiveResponseType.TOOL_CALL_CANCELLATION in message) {
3030
+ yield {
3031
+ type: 'toolCallCancellation',
3032
+ ...message.toolCallCancellation
3033
+ };
3034
+ }
3035
+ else if ('goAway' in message) {
3036
+ const notice = message.goAway;
3037
+ yield {
3038
+ type: LiveResponseType.GOING_AWAY_NOTICE,
3039
+ timeLeft: parseDuration(notice.timeLeft)
3040
+ };
3041
+ }
3042
+ else {
3043
+ logger.warn(`Received an unknown message type from the server: ${JSON.stringify(message)}`);
3044
+ }
3045
+ }
3046
+ else {
3047
+ logger.warn(`Received an invalid message from the server: ${JSON.stringify(message)}`);
3048
+ }
3049
+ }
3050
+ }
3051
+ /**
3052
+ * Closes this session.
3053
+ * All methods on this session will throw an error once this resolves.
3054
+ *
3055
+ * @beta
3056
+ */
3057
+ async close() {
3058
+ if (!this.isClosed) {
3059
+ this.isClosed = true;
3060
+ await this.webSocketHandler.close(1000, 'Client closed session.');
3061
+ }
3062
+ }
3063
+ /**
3064
+ * Sends realtime input to the server.
3065
+ *
3066
+ * @deprecated Use `sendTextRealtime()`, `sendAudioRealtime()`, and `sendVideoRealtime()` instead.
3067
+ *
3068
+ * @param mediaChunks - The media chunks to send.
3069
+ * @throws If this session has been closed.
3070
+ *
3071
+ * @beta
3072
+ */
3073
+ async sendMediaChunks(mediaChunks) {
3074
+ if (this.isClosed) {
3075
+ throw new AIError(AIErrorCode.REQUEST_ERROR, 'This LiveSession has been closed and cannot be used.');
3076
+ }
3077
+ // The backend does not support sending more than one mediaChunk in one message.
3078
+ // Work around this limitation by sending mediaChunks in separate messages.
3079
+ mediaChunks.forEach(mediaChunk => {
3080
+ const message = {
3081
+ realtimeInput: { mediaChunks: [mediaChunk] }
3082
+ };
3083
+ this.webSocketHandler.send(JSON.stringify(message));
3084
+ });
3085
+ }
3086
+ /**
3087
+ * @deprecated Use `sendTextRealtime()`, `sendAudioRealtime()`, and `sendVideoRealtime()` instead.
3088
+ *
3089
+ * Sends a stream of {@link GenerativeContentBlob}.
3090
+ *
3091
+ * @param mediaChunkStream - The stream of {@link GenerativeContentBlob} to send.
3092
+ * @throws If this session has been closed.
3093
+ *
3094
+ * @beta
3095
+ */
3096
+ async sendMediaStream(mediaChunkStream) {
3097
+ if (this.isClosed) {
3098
+ throw new AIError(AIErrorCode.REQUEST_ERROR, 'This LiveSession has been closed and cannot be used.');
3099
+ }
3100
+ const reader = mediaChunkStream.getReader();
3101
+ while (true) {
3102
+ try {
3103
+ const { done, value } = await reader.read();
3104
+ if (done) {
3105
+ break;
3106
+ }
3107
+ else if (!value) {
3108
+ throw new Error('Missing chunk in reader, but reader is not done.');
3109
+ }
3110
+ await this.sendMediaChunks([value]);
3111
+ }
3112
+ catch (e) {
3113
+ // Re-throw any errors that occur during stream consumption or sending.
3114
+ const message = e instanceof Error ? e.message : 'Error processing media stream.';
3115
+ throw new AIError(AIErrorCode.REQUEST_ERROR, message);
3116
+ }
3117
+ }
3118
+ }
3119
+ }
3120
+ /**
3121
+ * Parses a duration string (e.g. "3.000000001s") into a number of seconds.
3122
+ *
3123
+ * @param duration - The duration string to parse.
3124
+ * @returns The duration in seconds.
3125
+ */
3126
+ function parseDuration(duration) {
3127
+ if (!duration || !duration.endsWith('s')) {
3128
+ return 0;
3129
+ }
3130
+ return Number(duration.slice(0, -1)); // slice removes the trailing 's'.
3131
+ }
3132
+
3133
+ /**
3134
+ * @license
3135
+ * Copyright 2025 Google LLC
3136
+ *
3137
+ * Licensed under the Apache License, Version 2.0 (the "License");
3138
+ * you may not use this file except in compliance with the License.
3139
+ * You may obtain a copy of the License at
3140
+ *
3141
+ * http://www.apache.org/licenses/LICENSE-2.0
3142
+ *
3143
+ * Unless required by applicable law or agreed to in writing, software
3144
+ * distributed under the License is distributed on an "AS IS" BASIS,
3145
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
3146
+ * See the License for the specific language governing permissions and
3147
+ * limitations under the License.
3148
+ */
3149
+ /**
3150
+ * Class for Live generative model APIs. The Live API enables low-latency, two-way multimodal
3151
+ * interactions with Gemini.
3152
+ *
3153
+ * This class should only be instantiated with {@link getLiveGenerativeModel}.
3154
+ *
3155
+ * @beta
3156
+ */
3157
+ class LiveGenerativeModel extends AIModel {
3158
+ /**
3159
+ * @internal
3160
+ */
3161
+ constructor(ai, modelParams,
3162
+ /**
3163
+ * @internal
3164
+ */
3165
+ _webSocketHandler) {
3166
+ super(ai, modelParams.model);
3167
+ this._webSocketHandler = _webSocketHandler;
3168
+ this.generationConfig = modelParams.generationConfig || {};
3169
+ this.tools = modelParams.tools;
3170
+ this.toolConfig = modelParams.toolConfig;
3171
+ this.systemInstruction = formatSystemInstruction(modelParams.systemInstruction);
3172
+ }
3173
+ /**
3174
+ * Starts a {@link LiveSession}.
3175
+ *
3176
+ * @returns A {@link LiveSession}.
3177
+ * @throws If the connection failed to be established with the server.
3178
+ *
3179
+ * @beta
3180
+ */
3181
+ async connect() {
3182
+ const url = new WebSocketUrl(this._apiSettings);
3183
+ await this._webSocketHandler.connect(url.toString());
3184
+ let fullModelPath;
3185
+ if (this._apiSettings.backend.backendType === BackendType.GOOGLE_AI) {
3186
+ fullModelPath = `projects/${this._apiSettings.project}/${this.model}`;
3187
+ }
3188
+ else {
3189
+ fullModelPath = `projects/${this._apiSettings.project}/locations/${this._apiSettings.location}/${this.model}`;
3190
+ }
3191
+ // inputAudioTranscription and outputAudioTranscription are on the generation config in the public API,
3192
+ // but the backend expects them to be in the `setup` message.
3193
+ const { inputAudioTranscription, outputAudioTranscription, ...generationConfig } = this.generationConfig;
3194
+ const setupMessage = {
3195
+ setup: {
3196
+ model: fullModelPath,
3197
+ generationConfig,
3198
+ tools: this.tools,
3199
+ toolConfig: this.toolConfig,
3200
+ systemInstruction: this.systemInstruction,
3201
+ inputAudioTranscription,
3202
+ outputAudioTranscription
3203
+ }
3204
+ };
3205
+ try {
3206
+ // Begin listening for server messages, and begin the handshake by sending the 'setupMessage'
3207
+ const serverMessages = this._webSocketHandler.listen();
3208
+ this._webSocketHandler.send(JSON.stringify(setupMessage));
3209
+ // Verify we received the handshake response 'setupComplete'
3210
+ const firstMessage = (await serverMessages.next()).value;
3211
+ if (!firstMessage ||
3212
+ !(typeof firstMessage === 'object') ||
3213
+ !('setupComplete' in firstMessage)) {
3214
+ await this._webSocketHandler.close(1011, 'Handshake failure');
3215
+ throw new AIError(AIErrorCode.RESPONSE_ERROR, 'Server connection handshake failed. The server did not respond with a setupComplete message.');
3216
+ }
3217
+ return new LiveSession(this._webSocketHandler, serverMessages);
3218
+ }
3219
+ catch (e) {
3220
+ // Ensure connection is closed on any setup error
3221
+ await this._webSocketHandler.close();
3222
+ throw e;
3223
+ }
3224
+ }
3225
+ }
3226
+
3227
+ /**
3228
+ * @license
3229
+ * Copyright 2025 Google LLC
3230
+ *
3231
+ * Licensed under the Apache License, Version 2.0 (the "License");
3232
+ * you may not use this file except in compliance with the License.
3233
+ * You may obtain a copy of the License at
3234
+ *
3235
+ * http://www.apache.org/licenses/LICENSE-2.0
3236
+ *
3237
+ * Unless required by applicable law or agreed to in writing, software
3238
+ * distributed under the License is distributed on an "AS IS" BASIS,
3239
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
3240
+ * See the License for the specific language governing permissions and
3241
+ * limitations under the License.
3242
+ */
3243
+ /**
3244
+ * Class for Imagen model APIs.
3245
+ *
3246
+ * This class provides methods for generating images using the Imagen model.
3247
+ *
3248
+ * @example
3249
+ * ```javascript
3250
+ * const imagen = new ImagenModel(
3251
+ * ai,
3252
+ * {
3253
+ * model: 'imagen-3.0-generate-002'
3254
+ * }
3255
+ * );
3256
+ *
3257
+ * const response = await imagen.generateImages('A photo of a cat');
3258
+ * if (response.images.length > 0) {
3259
+ * console.log(response.images[0].bytesBase64Encoded);
3260
+ * }
3261
+ * ```
3262
+ *
3263
+ * @public
3264
+ */
3265
+ class ImagenModel extends AIModel {
3266
+ /**
3267
+ * Constructs a new instance of the {@link ImagenModel} class.
3268
+ *
3269
+ * @param ai - an {@link AI} instance.
3270
+ * @param modelParams - Parameters to use when making requests to Imagen.
3271
+ * @param requestOptions - Additional options to use when making requests.
3272
+ *
3273
+ * @throws If the `apiKey` or `projectId` fields are missing in your
3274
+ * Firebase config.
3275
+ */
3276
+ constructor(ai, modelParams, requestOptions) {
3277
+ const { model, generationConfig, safetySettings } = modelParams;
3278
+ super(ai, model);
3279
+ this.requestOptions = requestOptions;
3280
+ this.generationConfig = generationConfig;
3281
+ this.safetySettings = safetySettings;
3282
+ }
3283
+ /**
3284
+ * Generates images using the Imagen model and returns them as
3285
+ * base64-encoded strings.
3286
+ *
3287
+ * @param prompt - A text prompt describing the image(s) to generate.
3288
+ * @returns A promise that resolves to an {@link ImagenGenerationResponse}
3289
+ * object containing the generated images.
3290
+ *
3291
+ * @throws If the request to generate images fails. This happens if the
3292
+ * prompt is blocked.
3293
+ *
3294
+ * @remarks
3295
+ * If the prompt was not blocked, but one or more of the generated images were filtered, the
3296
+ * returned object will have a `filteredReason` property.
3297
+ * If all images are filtered, the `images` array will be empty.
3298
+ *
3299
+ * @public
3300
+ */
3301
+ async generateImages(prompt, singleRequestOptions) {
3302
+ const body = createPredictRequestBody(prompt, {
3303
+ ...this.generationConfig,
3304
+ ...this.safetySettings
3305
+ });
3306
+ const response = await makeRequest({
3307
+ task: "predict" /* Task.PREDICT */,
3308
+ model: this.model,
3309
+ apiSettings: this._apiSettings,
3310
+ stream: false,
3311
+ // Merge request options. Single request options overwrite the model's request options.
3312
+ singleRequestOptions: {
3313
+ ...this.requestOptions,
3314
+ ...singleRequestOptions
3315
+ }
3316
+ }, JSON.stringify(body));
3317
+ return handlePredictResponse(response);
3318
+ }
3319
+ /**
3320
+ * Generates images to Cloud Storage for Firebase using the Imagen model.
3321
+ *
3322
+ * @internal This method is temporarily internal.
3323
+ *
3324
+ * @param prompt - A text prompt describing the image(s) to generate.
3325
+ * @param gcsURI - The URI of file stored in a Cloud Storage for Firebase bucket.
3326
+ * This should be a directory. For example, `gs://my-bucket/my-directory/`.
3327
+ * @returns A promise that resolves to an {@link ImagenGenerationResponse}
3328
+ * object containing the URLs of the generated images.
3329
+ *
3330
+ * @throws If the request fails to generate images fails. This happens if
3331
+ * the prompt is blocked.
3332
+ *
3333
+ * @remarks
3334
+ * If the prompt was not blocked, but one or more of the generated images were filtered, the
3335
+ * returned object will have a `filteredReason` property.
3336
+ * If all images are filtered, the `images` array will be empty.
3337
+ */
3338
+ async generateImagesGCS(prompt, gcsURI, singleRequestOptions) {
3339
+ const body = createPredictRequestBody(prompt, {
3340
+ gcsURI,
3341
+ ...this.generationConfig,
3342
+ ...this.safetySettings
3343
+ });
3344
+ const response = await makeRequest({
3345
+ task: "predict" /* Task.PREDICT */,
3346
+ model: this.model,
3347
+ apiSettings: this._apiSettings,
3348
+ stream: false,
3349
+ // Merge request options. Single request options overwrite the model's request options.
3350
+ singleRequestOptions: {
3351
+ ...this.requestOptions,
3352
+ ...singleRequestOptions
3353
+ }
3354
+ }, JSON.stringify(body));
3355
+ return handlePredictResponse(response);
3356
+ }
3357
+ }
3358
+
3359
+ /**
3360
+ * @license
3361
+ * Copyright 2025 Google LLC
3362
+ *
3363
+ * Licensed under the Apache License, Version 2.0 (the "License");
3364
+ * you may not use this file except in compliance with the License.
3365
+ * You may obtain a copy of the License at
3366
+ *
3367
+ * http://www.apache.org/licenses/LICENSE-2.0
3368
+ *
3369
+ * Unless required by applicable law or agreed to in writing, software
3370
+ * distributed under the License is distributed on an "AS IS" BASIS,
3371
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
3372
+ * See the License for the specific language governing permissions and
3373
+ * limitations under the License.
3374
+ */
3375
+ /**
3376
+ * A wrapper for the native `WebSocket` available in both Browsers and Node >= 22.
3377
+ *
3378
+ * @internal
3379
+ */
3380
+ class WebSocketHandlerImpl {
3381
+ constructor() {
3382
+ if (typeof WebSocket === 'undefined') {
3383
+ throw new AIError(AIErrorCode.UNSUPPORTED, 'The WebSocket API is not available in this environment. ' +
3384
+ 'The "Live" feature is not supported here. It is supported in ' +
3385
+ 'modern browser windows, Web Workers with WebSocket support, and Node >= 22.');
3386
+ }
3387
+ }
3388
+ connect(url) {
3389
+ return new Promise((resolve, reject) => {
3390
+ this.ws = new WebSocket(url);
3391
+ this.ws.binaryType = 'blob'; // Only important to set in Node
3392
+ this.ws.addEventListener('open', () => resolve(), { once: true });
3393
+ this.ws.addEventListener('error', () => reject(new AIError(AIErrorCode.FETCH_ERROR, `Error event raised on WebSocket`)), { once: true });
3394
+ this.ws.addEventListener('close', (closeEvent) => {
3395
+ if (closeEvent.reason) {
3396
+ logger.warn(`WebSocket connection closed by server. Reason: '${closeEvent.reason}'`);
3397
+ }
3398
+ });
3399
+ });
3400
+ }
3401
+ send(data) {
3402
+ if (!this.ws || this.ws.readyState !== WebSocket.OPEN) {
3403
+ throw new AIError(AIErrorCode.REQUEST_ERROR, 'WebSocket is not open.');
3404
+ }
3405
+ this.ws.send(data);
3406
+ }
3407
+ async *listen() {
3408
+ if (!this.ws) {
3409
+ throw new AIError(AIErrorCode.REQUEST_ERROR, 'WebSocket is not connected.');
3410
+ }
3411
+ const messageQueue = [];
3412
+ const errorQueue = [];
3413
+ let resolvePromise = null;
3414
+ let isClosed = false;
3415
+ const messageListener = async (event) => {
3416
+ let data;
3417
+ if (event.data instanceof Blob) {
3418
+ data = await event.data.text();
3419
+ }
3420
+ else if (typeof event.data === 'string') {
3421
+ data = event.data;
3422
+ }
3423
+ else {
3424
+ errorQueue.push(new AIError(AIErrorCode.PARSE_FAILED, `Failed to parse WebSocket response. Expected data to be a Blob or string, but was ${typeof event.data}.`));
3425
+ if (resolvePromise) {
3426
+ resolvePromise();
3427
+ resolvePromise = null;
3428
+ }
3429
+ return;
3430
+ }
3431
+ try {
3432
+ const obj = JSON.parse(data);
3433
+ messageQueue.push(obj);
3434
+ }
3435
+ catch (e) {
3436
+ const err = e;
3437
+ errorQueue.push(new AIError(AIErrorCode.PARSE_FAILED, `Error parsing WebSocket message to JSON: ${err.message}`));
3438
+ }
3439
+ if (resolvePromise) {
3440
+ resolvePromise();
3441
+ resolvePromise = null;
3442
+ }
3443
+ };
3444
+ const errorListener = () => {
3445
+ errorQueue.push(new AIError(AIErrorCode.FETCH_ERROR, 'WebSocket connection error.'));
3446
+ if (resolvePromise) {
3447
+ resolvePromise();
3448
+ resolvePromise = null;
3449
+ }
3450
+ };
3451
+ const closeListener = (event) => {
3452
+ if (event.reason) {
3453
+ logger.warn(`WebSocket connection closed by the server with reason: ${event.reason}`);
3454
+ }
3455
+ isClosed = true;
3456
+ if (resolvePromise) {
3457
+ resolvePromise();
3458
+ resolvePromise = null;
3459
+ }
3460
+ // Clean up listeners to prevent memory leaks
3461
+ this.ws?.removeEventListener('message', messageListener);
3462
+ this.ws?.removeEventListener('close', closeListener);
3463
+ this.ws?.removeEventListener('error', errorListener);
3464
+ };
3465
+ this.ws.addEventListener('message', messageListener);
3466
+ this.ws.addEventListener('close', closeListener);
3467
+ this.ws.addEventListener('error', errorListener);
3468
+ while (!isClosed) {
3469
+ if (errorQueue.length > 0) {
3470
+ const error = errorQueue.shift();
3471
+ throw error;
3472
+ }
3473
+ if (messageQueue.length > 0) {
3474
+ yield messageQueue.shift();
3475
+ }
3476
+ else {
3477
+ await new Promise(resolve => {
3478
+ resolvePromise = resolve;
3479
+ });
3480
+ }
3481
+ }
3482
+ // If the loop terminated because isClosed is true, check for any final errors
3483
+ if (errorQueue.length > 0) {
3484
+ const error = errorQueue.shift();
3485
+ throw error;
3486
+ }
3487
+ }
3488
+ close(code, reason) {
3489
+ return new Promise(resolve => {
3490
+ if (!this.ws) {
3491
+ return resolve();
3492
+ }
3493
+ this.ws.addEventListener('close', () => resolve(), { once: true });
3494
+ // Calling 'close' during these states results in an error.
3495
+ if (this.ws.readyState === WebSocket.CLOSED ||
3496
+ this.ws.readyState === WebSocket.CONNECTING) {
3497
+ return resolve();
3498
+ }
3499
+ if (this.ws.readyState !== WebSocket.CLOSING) {
3500
+ this.ws.close(code, reason);
3501
+ }
3502
+ });
3503
+ }
3504
+ }
3505
+
3506
+ /**
3507
+ * @license
3508
+ * Copyright 2025 Google LLC
3509
+ *
3510
+ * Licensed under the Apache License, Version 2.0 (the "License");
3511
+ * you may not use this file except in compliance with the License.
3512
+ * You may obtain a copy of the License at
3513
+ *
3514
+ * http://www.apache.org/licenses/LICENSE-2.0
3515
+ *
3516
+ * Unless required by applicable law or agreed to in writing, software
3517
+ * distributed under the License is distributed on an "AS IS" BASIS,
3518
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
3519
+ * See the License for the specific language governing permissions and
3520
+ * limitations under the License.
3521
+ */
3522
+ /**
3523
+ * {@link GenerativeModel} APIs that execute on a server-side template.
3524
+ *
3525
+ * This class should only be instantiated with {@link getTemplateGenerativeModel}.
3526
+ *
3527
+ * @beta
3528
+ */
3529
+ class TemplateGenerativeModel {
3530
+ /**
3531
+ * @hideconstructor
3532
+ */
3533
+ constructor(ai, requestOptions) {
3534
+ this.requestOptions = requestOptions || {};
3535
+ this._apiSettings = initApiSettings(ai);
3536
+ }
3537
+ /**
3538
+ * Makes a single non-streaming call to the model and returns an object
3539
+ * containing a single {@link GenerateContentResponse}.
3540
+ *
3541
+ * @param templateId - The ID of the server-side template to execute.
3542
+ * @param templateVariables - A key-value map of variables to populate the
3543
+ * template with.
3544
+ *
3545
+ * @beta
3546
+ */
3547
+ async generateContent(templateId, templateVariables, singleRequestOptions) {
3548
+ return templateGenerateContent(this._apiSettings, templateId, { inputs: templateVariables }, {
3549
+ ...this.requestOptions,
3550
+ ...singleRequestOptions
3551
+ });
3552
+ }
3553
+ /**
3554
+ * Makes a single streaming call to the model and returns an object
3555
+ * containing an iterable stream that iterates over all chunks in the
3556
+ * streaming response as well as a promise that returns the final aggregated
3557
+ * response.
3558
+ *
3559
+ * @param templateId - The ID of the server-side template to execute.
3560
+ * @param templateVariables - A key-value map of variables to populate the
3561
+ * template with.
3562
+ *
3563
+ * @beta
3564
+ */
3565
+ async generateContentStream(templateId, templateVariables, singleRequestOptions) {
3566
+ return templateGenerateContentStream(this._apiSettings, templateId, { inputs: templateVariables }, {
3567
+ ...this.requestOptions,
3568
+ ...singleRequestOptions
3569
+ });
3570
+ }
3571
+ }
3572
+
3573
+ /**
3574
+ * @license
3575
+ * Copyright 2025 Google LLC
3576
+ *
3577
+ * Licensed under the Apache License, Version 2.0 (the "License");
3578
+ * you may not use this file except in compliance with the License.
3579
+ * You may obtain a copy of the License at
3580
+ *
3581
+ * http://www.apache.org/licenses/LICENSE-2.0
3582
+ *
3583
+ * Unless required by applicable law or agreed to in writing, software
3584
+ * distributed under the License is distributed on an "AS IS" BASIS,
3585
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
3586
+ * See the License for the specific language governing permissions and
3587
+ * limitations under the License.
3588
+ */
3589
+ /**
3590
+ * Class for Imagen model APIs that execute on a server-side template.
3591
+ *
3592
+ * This class should only be instantiated with {@link getTemplateImagenModel}.
3593
+ *
3594
+ * @beta
3595
+ */
3596
+ class TemplateImagenModel {
3597
+ /**
3598
+ * @hideconstructor
3599
+ */
3600
+ constructor(ai, requestOptions) {
3601
+ this.requestOptions = requestOptions || {};
3602
+ this._apiSettings = initApiSettings(ai);
3603
+ }
3604
+ /**
3605
+ * Makes a single call to the model and returns an object containing a single
3606
+ * {@link ImagenGenerationResponse}.
3607
+ *
3608
+ * @param templateId - The ID of the server-side template to execute.
3609
+ * @param templateVariables - A key-value map of variables to populate the
3610
+ * template with.
3611
+ *
3612
+ * @beta
3613
+ */
3614
+ async generateImages(templateId, templateVariables, singleRequestOptions) {
3615
+ const response = await makeRequest({
3616
+ task: "templatePredict" /* ServerPromptTemplateTask.TEMPLATE_PREDICT */,
3617
+ templateId,
3618
+ apiSettings: this._apiSettings,
3619
+ stream: false,
3620
+ singleRequestOptions: {
3621
+ ...this.requestOptions,
3622
+ ...singleRequestOptions
3623
+ }
3624
+ }, JSON.stringify({ inputs: templateVariables }));
3625
+ return handlePredictResponse(response);
3626
+ }
3627
+ }
3628
+
3629
+ /**
3630
+ * @license
3631
+ * Copyright 2024 Google LLC
3632
+ *
3633
+ * Licensed under the Apache License, Version 2.0 (the "License");
3634
+ * you may not use this file except in compliance with the License.
3635
+ * You may obtain a copy of the License at
3636
+ *
3637
+ * http://www.apache.org/licenses/LICENSE-2.0
3638
+ *
3639
+ * Unless required by applicable law or agreed to in writing, software
3640
+ * distributed under the License is distributed on an "AS IS" BASIS,
3641
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
3642
+ * See the License for the specific language governing permissions and
3643
+ * limitations under the License.
3644
+ */
3645
+ /**
3646
+ * Parent class encompassing all Schema types, with static methods that
3647
+ * allow building specific Schema types. This class can be converted with
3648
+ * `JSON.stringify()` into a JSON string accepted by Vertex AI REST endpoints.
3649
+ * (This string conversion is automatically done when calling SDK methods.)
3650
+ * @public
3651
+ */
3652
+ class Schema {
3653
+ constructor(schemaParams) {
3654
+ // TODO(dlarocque): Enforce this with union types
3655
+ if (!schemaParams.type && !schemaParams.anyOf) {
3656
+ throw new AIError(AIErrorCode.INVALID_SCHEMA, "A schema must have either a 'type' or an 'anyOf' array of sub-schemas.");
3657
+ }
3658
+ // eslint-disable-next-line guard-for-in
3659
+ for (const paramKey in schemaParams) {
3660
+ this[paramKey] = schemaParams[paramKey];
3661
+ }
3662
+ // Ensure these are explicitly set to avoid TS errors.
3663
+ this.type = schemaParams.type;
3664
+ this.format = schemaParams.hasOwnProperty('format')
3665
+ ? schemaParams.format
3666
+ : undefined;
3667
+ this.nullable = schemaParams.hasOwnProperty('nullable')
3668
+ ? !!schemaParams.nullable
3669
+ : false;
3670
+ }
3671
+ /**
3672
+ * Defines how this Schema should be serialized as JSON.
3673
+ * See https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/JSON/stringify#tojson_behavior
3674
+ * @internal
3675
+ */
3676
+ toJSON() {
3677
+ const obj = {
3678
+ type: this.type
3679
+ };
3680
+ for (const prop in this) {
3681
+ if (this.hasOwnProperty(prop) && this[prop] !== undefined) {
3682
+ if (prop !== 'required' || this.type === SchemaType.OBJECT) {
3683
+ obj[prop] = this[prop];
3684
+ }
3685
+ }
3686
+ }
3687
+ return obj;
3688
+ }
3689
+ static array(arrayParams) {
3690
+ return new ArraySchema(arrayParams, arrayParams.items);
3691
+ }
3692
+ static object(objectParams) {
3693
+ return new ObjectSchema(objectParams, objectParams.properties, objectParams.optionalProperties);
3694
+ }
3695
+ // eslint-disable-next-line id-blacklist
3696
+ static string(stringParams) {
3697
+ return new StringSchema(stringParams);
3698
+ }
3699
+ static enumString(stringParams) {
3700
+ return new StringSchema(stringParams, stringParams.enum);
3701
+ }
3702
+ static integer(integerParams) {
3703
+ return new IntegerSchema(integerParams);
3704
+ }
3705
+ // eslint-disable-next-line id-blacklist
3706
+ static number(numberParams) {
3707
+ return new NumberSchema(numberParams);
3708
+ }
3709
+ // eslint-disable-next-line id-blacklist
3710
+ static boolean(booleanParams) {
3711
+ return new BooleanSchema(booleanParams);
3712
+ }
3713
+ static anyOf(anyOfParams) {
3714
+ return new AnyOfSchema(anyOfParams);
3715
+ }
3716
+ }
3717
+ /**
3718
+ * Schema class for "integer" types.
3719
+ * @public
3720
+ */
3721
+ class IntegerSchema extends Schema {
3722
+ constructor(schemaParams) {
3723
+ super({
3724
+ type: SchemaType.INTEGER,
3725
+ ...schemaParams
3726
+ });
3727
+ }
3728
+ }
3729
+ /**
3730
+ * Schema class for "number" types.
3731
+ * @public
3732
+ */
3733
+ class NumberSchema extends Schema {
3734
+ constructor(schemaParams) {
3735
+ super({
3736
+ type: SchemaType.NUMBER,
3737
+ ...schemaParams
3738
+ });
3739
+ }
3740
+ }
3741
+ /**
3742
+ * Schema class for "boolean" types.
3743
+ * @public
3744
+ */
3745
+ class BooleanSchema extends Schema {
3746
+ constructor(schemaParams) {
3747
+ super({
3748
+ type: SchemaType.BOOLEAN,
3749
+ ...schemaParams
3750
+ });
3751
+ }
3752
+ }
3753
+ /**
3754
+ * Schema class for "string" types. Can be used with or without
3755
+ * enum values.
3756
+ * @public
3757
+ */
3758
+ class StringSchema extends Schema {
3759
+ constructor(schemaParams, enumValues) {
3760
+ super({
3761
+ type: SchemaType.STRING,
3762
+ ...schemaParams
3763
+ });
3764
+ this.enum = enumValues;
3765
+ }
3766
+ /**
3767
+ * @internal
3768
+ */
3769
+ toJSON() {
3770
+ const obj = super.toJSON();
3771
+ if (this.enum) {
3772
+ obj['enum'] = this.enum;
3773
+ }
3774
+ return obj;
3775
+ }
3776
+ }
3777
+ /**
3778
+ * Schema class for "array" types.
3779
+ * The `items` param should refer to the type of item that can be a member
3780
+ * of the array.
3781
+ * @public
3782
+ */
3783
+ class ArraySchema extends Schema {
3784
+ constructor(schemaParams, items) {
3785
+ super({
3786
+ type: SchemaType.ARRAY,
3787
+ ...schemaParams
3788
+ });
3789
+ this.items = items;
3790
+ }
3791
+ /**
3792
+ * @internal
3793
+ */
3794
+ toJSON() {
3795
+ const obj = super.toJSON();
3796
+ obj.items = this.items.toJSON();
3797
+ return obj;
3798
+ }
3799
+ }
3800
+ /**
3801
+ * Schema class for "object" types.
3802
+ * The `properties` param must be a map of `Schema` objects.
3803
+ * @public
3804
+ */
3805
+ class ObjectSchema extends Schema {
3806
+ constructor(schemaParams, properties, optionalProperties = []) {
3807
+ super({
3808
+ type: SchemaType.OBJECT,
3809
+ ...schemaParams
3810
+ });
3811
+ this.properties = properties;
3812
+ this.optionalProperties = optionalProperties;
3813
+ }
3814
+ /**
3815
+ * @internal
3816
+ */
3817
+ toJSON() {
3818
+ const obj = super.toJSON();
3819
+ obj.properties = { ...this.properties };
3820
+ const required = [];
3821
+ if (this.optionalProperties) {
3822
+ for (const propertyKey of this.optionalProperties) {
3823
+ if (!this.properties.hasOwnProperty(propertyKey)) {
3824
+ throw new AIError(AIErrorCode.INVALID_SCHEMA, `Property "${propertyKey}" specified in "optionalProperties" does not exist.`);
3825
+ }
3826
+ }
3827
+ }
3828
+ for (const propertyKey in this.properties) {
3829
+ if (this.properties.hasOwnProperty(propertyKey)) {
3830
+ obj.properties[propertyKey] = this.properties[propertyKey].toJSON();
3831
+ if (!this.optionalProperties.includes(propertyKey)) {
3832
+ required.push(propertyKey);
3833
+ }
3834
+ }
3835
+ }
3836
+ if (required.length > 0) {
3837
+ obj.required = required;
3838
+ }
3839
+ delete obj.optionalProperties;
3840
+ return obj;
3841
+ }
3842
+ }
3843
+ /**
3844
+ * Schema class representing a value that can conform to any of the provided sub-schemas. This is
3845
+ * useful when a field can accept multiple distinct types or structures.
3846
+ * @public
3847
+ */
3848
+ class AnyOfSchema extends Schema {
3849
+ constructor(schemaParams) {
3850
+ if (schemaParams.anyOf.length === 0) {
3851
+ throw new AIError(AIErrorCode.INVALID_SCHEMA, "The 'anyOf' array must not be empty.");
3852
+ }
3853
+ super({
3854
+ ...schemaParams,
3855
+ type: undefined // anyOf schemas do not have an explicit type
3856
+ });
3857
+ this.anyOf = schemaParams.anyOf;
3858
+ }
3859
+ /**
3860
+ * @internal
3861
+ */
3862
+ toJSON() {
3863
+ const obj = super.toJSON();
3864
+ // Ensure the 'anyOf' property contains serialized SchemaRequest objects.
3865
+ if (this.anyOf && Array.isArray(this.anyOf)) {
3866
+ obj.anyOf = this.anyOf.map(s => s.toJSON());
3867
+ }
3868
+ return obj;
3869
+ }
3870
+ }
3871
+
3872
+ /**
3873
+ * @license
3874
+ * Copyright 2025 Google LLC
3875
+ *
3876
+ * Licensed under the Apache License, Version 2.0 (the "License");
3877
+ * you may not use this file except in compliance with the License.
3878
+ * You may obtain a copy of the License at
3879
+ *
3880
+ * http://www.apache.org/licenses/LICENSE-2.0
3881
+ *
3882
+ * Unless required by applicable law or agreed to in writing, software
3883
+ * distributed under the License is distributed on an "AS IS" BASIS,
3884
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
3885
+ * See the License for the specific language governing permissions and
3886
+ * limitations under the License.
3887
+ */
3888
+ /**
3889
+ * Defines the image format for images generated by Imagen.
3890
+ *
3891
+ * Use this class to specify the desired format (JPEG or PNG) and compression quality
3892
+ * for images generated by Imagen. This is typically included as part of
3893
+ * {@link ImagenModelParams}.
3894
+ *
3895
+ * @example
3896
+ * ```javascript
3897
+ * const imagenModelParams = {
3898
+ * // ... other ImagenModelParams
3899
+ * imageFormat: ImagenImageFormat.jpeg(75) // JPEG with a compression level of 75.
3900
+ * }
3901
+ * ```
3902
+ *
3903
+ * @public
3904
+ */
3905
+ class ImagenImageFormat {
3906
+ constructor() {
3907
+ this.mimeType = 'image/png';
3908
+ }
3909
+ /**
3910
+ * Creates an {@link ImagenImageFormat} for a JPEG image.
3911
+ *
3912
+ * @param compressionQuality - The level of compression (a number between 0 and 100).
3913
+ * @returns An {@link ImagenImageFormat} object for a JPEG image.
3914
+ *
3915
+ * @public
3916
+ */
3917
+ static jpeg(compressionQuality) {
3918
+ if (compressionQuality &&
3919
+ (compressionQuality < 0 || compressionQuality > 100)) {
3920
+ logger.warn(`Invalid JPEG compression quality of ${compressionQuality} specified; the supported range is [0, 100].`);
3921
+ }
3922
+ return { mimeType: 'image/jpeg', compressionQuality };
3923
+ }
3924
+ /**
3925
+ * Creates an {@link ImagenImageFormat} for a PNG image.
3926
+ *
3927
+ * @returns An {@link ImagenImageFormat} object for a PNG image.
3928
+ *
3929
+ * @public
3930
+ */
3931
+ static png() {
3932
+ return { mimeType: 'image/png' };
3933
+ }
3934
+ }
3935
+
3936
+ /**
3937
+ * @license
3938
+ * Copyright 2025 Google LLC
3939
+ *
3940
+ * Licensed under the Apache License, Version 2.0 (the "License");
3941
+ * you may not use this file except in compliance with the License.
3942
+ * You may obtain a copy of the License at
3943
+ *
3944
+ * http://www.apache.org/licenses/LICENSE-2.0
3945
+ *
3946
+ * Unless required by applicable law or agreed to in writing, software
3947
+ * distributed under the License is distributed on an "AS IS" BASIS,
3948
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
3949
+ * See the License for the specific language governing permissions and
3950
+ * limitations under the License.
3951
+ */
3952
+ const SERVER_INPUT_SAMPLE_RATE = 16000;
3953
+ const SERVER_OUTPUT_SAMPLE_RATE = 24000;
3954
+ const AUDIO_PROCESSOR_NAME = 'audio-processor';
3955
+ /**
3956
+ * The JS for an `AudioWorkletProcessor`.
3957
+ * This processor is responsible for taking raw audio from the microphone,
3958
+ * converting it to the required 16-bit 16kHz PCM, and posting it back to the main thread.
3959
+ *
3960
+ * See: https://developer.mozilla.org/en-US/docs/Web/API/AudioWorkletProcessor
3961
+ *
3962
+ * It is defined as a string here so that it can be converted into a `Blob`
3963
+ * and loaded at runtime.
3964
+ */
3965
+ const audioProcessorWorkletString = `
3966
+ class AudioProcessor extends AudioWorkletProcessor {
3967
+ constructor(options) {
3968
+ super();
3969
+ this.targetSampleRate = options.processorOptions.targetSampleRate;
3970
+ // 'sampleRate' is a global variable available inside the AudioWorkletGlobalScope,
3971
+ // representing the native sample rate of the AudioContext.
3972
+ this.inputSampleRate = sampleRate;
3973
+ }
3974
+
3975
+ /**
3976
+ * This method is called by the browser's audio engine for each block of audio data.
3977
+ * Input is a single input, with a single channel (input[0][0]).
3978
+ */
3979
+ process(inputs) {
3980
+ const input = inputs[0];
3981
+ if (input && input.length > 0 && input[0].length > 0) {
3982
+ const pcmData = input[0]; // Float32Array of raw audio samples.
3983
+
3984
+ // Simple linear interpolation for resampling.
3985
+ const resampled = new Float32Array(Math.round(pcmData.length * this.targetSampleRate / this.inputSampleRate));
3986
+ const ratio = pcmData.length / resampled.length;
3987
+ for (let i = 0; i < resampled.length; i++) {
3988
+ resampled[i] = pcmData[Math.floor(i * ratio)];
3989
+ }
3990
+
3991
+ // Convert Float32 (-1, 1) samples to Int16 (-32768, 32767)
3992
+ const resampledInt16 = new Int16Array(resampled.length);
3993
+ for (let i = 0; i < resampled.length; i++) {
3994
+ const sample = Math.max(-1, Math.min(1, resampled[i]));
3995
+ if (sample < 0) {
3996
+ resampledInt16[i] = sample * 32768;
3997
+ } else {
3998
+ resampledInt16[i] = sample * 32767;
3999
+ }
4000
+ }
4001
+
4002
+ this.port.postMessage(resampledInt16);
4003
+ }
4004
+ // Return true to keep the processor alive and processing the next audio block.
4005
+ return true;
4006
+ }
4007
+ }
4008
+
4009
+ // Register the processor with a name that can be used to instantiate it from the main thread.
4010
+ registerProcessor('${AUDIO_PROCESSOR_NAME}', AudioProcessor);
4011
+ `;
4012
+ /**
4013
+ * Encapsulates the core logic of an audio conversation.
4014
+ *
4015
+ * @internal
4016
+ */
4017
+ class AudioConversationRunner {
4018
+ constructor(liveSession, options, deps) {
4019
+ this.liveSession = liveSession;
4020
+ this.options = options;
4021
+ this.deps = deps;
4022
+ /** A flag to indicate if the conversation has been stopped. */
4023
+ this.isStopped = false;
4024
+ /** A deferred that contains a promise that is resolved when stop() is called, to unblock the receive loop. */
4025
+ this.stopDeferred = new util.Deferred();
4026
+ /** A FIFO queue of 24kHz, 16-bit PCM audio chunks received from the server. */
4027
+ this.playbackQueue = [];
4028
+ /** Tracks scheduled audio sources. Used to cancel scheduled audio when the model is interrupted. */
4029
+ this.scheduledSources = [];
4030
+ /** A high-precision timeline pointer for scheduling gapless audio playback. */
4031
+ this.nextStartTime = 0;
4032
+ /** A mutex to prevent the playback processing loop from running multiple times concurrently. */
4033
+ this.isPlaybackLoopRunning = false;
4034
+ this.liveSession.inConversation = true;
4035
+ // Start listening for messages from the server.
4036
+ this.receiveLoopPromise = this.runReceiveLoop().finally(() => this.cleanup());
4037
+ // Set up the handler for receiving processed audio data from the worklet.
4038
+ // Message data has been resampled to 16kHz 16-bit PCM.
4039
+ this.deps.workletNode.port.onmessage = event => {
4040
+ if (this.isStopped) {
4041
+ return;
4042
+ }
4043
+ const pcm16 = event.data;
4044
+ const base64 = btoa(String.fromCharCode.apply(null, Array.from(new Uint8Array(pcm16.buffer))));
4045
+ const chunk = {
4046
+ mimeType: 'audio/pcm',
4047
+ data: base64
4048
+ };
4049
+ void this.liveSession.sendAudioRealtime(chunk);
4050
+ };
4051
+ }
4052
+ /**
4053
+ * Stops the conversation and unblocks the main receive loop.
4054
+ */
4055
+ async stop() {
4056
+ if (this.isStopped) {
4057
+ return;
4058
+ }
4059
+ this.isStopped = true;
4060
+ this.stopDeferred.resolve(); // Unblock the receive loop
4061
+ await this.receiveLoopPromise; // Wait for the loop and cleanup to finish
4062
+ }
4063
+ /**
4064
+ * Cleans up all audio resources (nodes, stream tracks, context) and marks the
4065
+ * session as no longer in a conversation.
4066
+ */
4067
+ cleanup() {
4068
+ this.interruptPlayback(); // Ensure all audio is stopped on final cleanup.
4069
+ this.deps.workletNode.port.onmessage = null;
4070
+ this.deps.workletNode.disconnect();
4071
+ this.deps.sourceNode.disconnect();
4072
+ this.deps.mediaStream.getTracks().forEach(track => track.stop());
4073
+ if (this.deps.audioContext.state !== 'closed') {
4074
+ void this.deps.audioContext.close();
4075
+ }
4076
+ this.liveSession.inConversation = false;
4077
+ }
4078
+ /**
4079
+ * Adds audio data to the queue and ensures the playback loop is running.
4080
+ */
4081
+ enqueueAndPlay(audioData) {
4082
+ this.playbackQueue.push(audioData);
4083
+ // Will no-op if it's already running.
4084
+ void this.processPlaybackQueue();
4085
+ }
4086
+ /**
4087
+ * Stops all current and pending audio playback and clears the queue. This is
4088
+ * called when the server indicates the model's speech was interrupted with
4089
+ * `LiveServerContent.modelTurn.interrupted`.
4090
+ */
4091
+ interruptPlayback() {
4092
+ // Stop all sources that have been scheduled. The onended event will fire for each,
4093
+ // which will clean up the scheduledSources array.
4094
+ [...this.scheduledSources].forEach(source => source.stop(0));
4095
+ // Clear the internal buffer of unprocessed audio chunks.
4096
+ this.playbackQueue.length = 0;
4097
+ // Reset the playback clock to start fresh.
4098
+ this.nextStartTime = this.deps.audioContext.currentTime;
4099
+ }
4100
+ /**
4101
+ * Processes the playback queue in a loop, scheduling each chunk in a gapless sequence.
4102
+ */
4103
+ async processPlaybackQueue() {
4104
+ if (this.isPlaybackLoopRunning) {
4105
+ return;
4106
+ }
4107
+ this.isPlaybackLoopRunning = true;
4108
+ while (this.playbackQueue.length > 0 && !this.isStopped) {
4109
+ const pcmRawBuffer = this.playbackQueue.shift();
4110
+ try {
4111
+ const pcm16 = new Int16Array(pcmRawBuffer);
4112
+ const frameCount = pcm16.length;
4113
+ const audioBuffer = this.deps.audioContext.createBuffer(1, frameCount, SERVER_OUTPUT_SAMPLE_RATE);
4114
+ // Convert 16-bit PCM to 32-bit PCM, required by the Web Audio API.
4115
+ const channelData = audioBuffer.getChannelData(0);
4116
+ for (let i = 0; i < frameCount; i++) {
4117
+ channelData[i] = pcm16[i] / 32768; // Normalize to Float32 range [-1.0, 1.0]
4118
+ }
4119
+ const source = this.deps.audioContext.createBufferSource();
4120
+ source.buffer = audioBuffer;
4121
+ source.connect(this.deps.audioContext.destination);
4122
+ // Track the source and set up a handler to remove it from tracking when it finishes.
4123
+ this.scheduledSources.push(source);
4124
+ source.onended = () => {
4125
+ this.scheduledSources = this.scheduledSources.filter(s => s !== source);
4126
+ };
4127
+ // To prevent gaps, schedule the next chunk to start either now (if we're catching up)
4128
+ // or exactly when the previous chunk is scheduled to end.
4129
+ this.nextStartTime = Math.max(this.deps.audioContext.currentTime, this.nextStartTime);
4130
+ source.start(this.nextStartTime);
4131
+ // Update the schedule for the *next* chunk.
4132
+ this.nextStartTime += audioBuffer.duration;
4133
+ }
4134
+ catch (e) {
4135
+ logger.error('Error playing audio:', e);
4136
+ }
4137
+ }
4138
+ this.isPlaybackLoopRunning = false;
4139
+ }
4140
+ /**
4141
+ * The main loop that listens for and processes messages from the server.
4142
+ */
4143
+ async runReceiveLoop() {
4144
+ const messageGenerator = this.liveSession.receive();
4145
+ while (!this.isStopped) {
4146
+ const result = await Promise.race([
4147
+ messageGenerator.next(),
4148
+ this.stopDeferred.promise
4149
+ ]);
4150
+ if (this.isStopped || !result || result.done) {
4151
+ break;
4152
+ }
4153
+ const message = result.value;
4154
+ if (message.type === 'serverContent') {
4155
+ const serverContent = message;
4156
+ if (serverContent.interrupted) {
4157
+ this.interruptPlayback();
4158
+ }
4159
+ const audioPart = serverContent.modelTurn?.parts.find(part => part.inlineData?.mimeType.startsWith('audio/'));
4160
+ if (audioPart?.inlineData) {
4161
+ const audioData = Uint8Array.from(atob(audioPart.inlineData.data), c => c.charCodeAt(0)).buffer;
4162
+ this.enqueueAndPlay(audioData);
4163
+ }
4164
+ }
4165
+ else if (message.type === 'toolCall') {
4166
+ if (!this.options.functionCallingHandler) {
4167
+ logger.warn('Received tool call message, but StartAudioConversationOptions.functionCallingHandler is undefined. Ignoring tool call.');
4168
+ }
4169
+ else {
4170
+ try {
4171
+ const functionResponse = await this.options.functionCallingHandler(message.functionCalls);
4172
+ if (!this.isStopped) {
4173
+ void this.liveSession.sendFunctionResponses([functionResponse]);
4174
+ }
4175
+ }
4176
+ catch (e) {
4177
+ throw new AIError(AIErrorCode.ERROR, `Function calling handler failed: ${e.message}`);
4178
+ }
4179
+ }
4180
+ }
4181
+ }
4182
+ }
4183
+ }
4184
+ /**
4185
+ * Starts a real-time, bidirectional audio conversation with the model. This helper function manages
4186
+ * the complexities of microphone access, audio recording, playback, and interruptions.
4187
+ *
4188
+ * @remarks Important: This function must be called in response to a user gesture
4189
+ * (for example, a button click) to comply with {@link https://developer.mozilla.org/en-US/docs/Web/API/Web_Audio_API/Best_practices#autoplay_policy | browser autoplay policies}.
4190
+ *
4191
+ * @example
4192
+ * ```javascript
4193
+ * const liveSession = await model.connect();
4194
+ * let conversationController;
4195
+ *
4196
+ * // This function must be called from within a click handler.
4197
+ * async function startConversation() {
4198
+ * try {
4199
+ * conversationController = await startAudioConversation(liveSession);
4200
+ * } catch (e) {
4201
+ * // Handle AI-specific errors
4202
+ * if (e instanceof AIError) {
4203
+ * console.error("AI Error:", e.message);
4204
+ * }
4205
+ * // Handle microphone permission and hardware errors
4206
+ * else if (e instanceof DOMException) {
4207
+ * console.error("Microphone Error:", e.message);
4208
+ * }
4209
+ * // Handle other unexpected errors
4210
+ * else {
4211
+ * console.error("An unexpected error occurred:", e);
4212
+ * }
4213
+ * }
4214
+ * }
4215
+ *
4216
+ * // Later, to stop the conversation:
4217
+ * // if (conversationController) {
4218
+ * // await conversationController.stop();
4219
+ * // }
4220
+ * ```
4221
+ *
4222
+ * @param liveSession - An active {@link LiveSession} instance.
4223
+ * @param options - Configuration options for the audio conversation.
4224
+ * @returns A `Promise` that resolves with an {@link AudioConversationController}.
4225
+ * @throws `AIError` if the environment does not support required Web APIs (`UNSUPPORTED`), if a conversation is already active (`REQUEST_ERROR`), the session is closed (`SESSION_CLOSED`), or if an unexpected initialization error occurs (`ERROR`).
4226
+ * @throws `DOMException` Thrown by `navigator.mediaDevices.getUserMedia()` if issues occur with microphone access, such as permissions being denied (`NotAllowedError`) or no compatible hardware being found (`NotFoundError`). See the {@link https://developer.mozilla.org/en-US/docs/Web/API/MediaDevices/getUserMedia#exceptions | MDN documentation} for a full list of exceptions.
4227
+ *
4228
+ * @beta
4229
+ */
4230
+ async function startAudioConversation(liveSession, options = {}) {
4231
+ if (liveSession.isClosed) {
4232
+ throw new AIError(AIErrorCode.SESSION_CLOSED, 'Cannot start audio conversation on a closed LiveSession.');
4233
+ }
4234
+ if (liveSession.inConversation) {
4235
+ throw new AIError(AIErrorCode.REQUEST_ERROR, 'An audio conversation is already in progress for this session.');
4236
+ }
4237
+ // Check for necessary Web API support.
4238
+ if (typeof AudioWorkletNode === 'undefined' ||
4239
+ typeof AudioContext === 'undefined' ||
4240
+ typeof navigator === 'undefined' ||
4241
+ !navigator.mediaDevices) {
4242
+ throw new AIError(AIErrorCode.UNSUPPORTED, 'Audio conversation is not supported in this environment. It requires the Web Audio API and AudioWorklet support.');
4243
+ }
4244
+ let audioContext;
4245
+ try {
4246
+ // 1. Set up the audio context. This must be in response to a user gesture.
4247
+ // See: https://developer.mozilla.org/en-US/docs/Web/API/Web_Audio_API/Best_practices#autoplay_policy
4248
+ audioContext = new AudioContext();
4249
+ if (audioContext.state === 'suspended') {
4250
+ await audioContext.resume();
4251
+ }
4252
+ // 2. Prompt for microphone access and get the media stream.
4253
+ // This can throw a variety of permission or hardware-related errors.
4254
+ const mediaStream = await navigator.mediaDevices.getUserMedia({
4255
+ audio: true
4256
+ });
4257
+ // 3. Load the AudioWorklet processor.
4258
+ // See: https://developer.mozilla.org/en-US/docs/Web/API/AudioWorklet
4259
+ const workletBlob = new Blob([audioProcessorWorkletString], {
4260
+ type: 'application/javascript'
4261
+ });
4262
+ const workletURL = URL.createObjectURL(workletBlob);
4263
+ await audioContext.audioWorklet.addModule(workletURL);
4264
+ // 4. Create the audio graph: Microphone -> Source Node -> Worklet Node
4265
+ const sourceNode = audioContext.createMediaStreamSource(mediaStream);
4266
+ const workletNode = new AudioWorkletNode(audioContext, AUDIO_PROCESSOR_NAME, {
4267
+ processorOptions: { targetSampleRate: SERVER_INPUT_SAMPLE_RATE }
4268
+ });
4269
+ sourceNode.connect(workletNode);
4270
+ // 5. Instantiate and return the runner which manages the conversation.
4271
+ const runner = new AudioConversationRunner(liveSession, options, {
4272
+ audioContext,
4273
+ mediaStream,
4274
+ sourceNode,
4275
+ workletNode
4276
+ });
4277
+ return { stop: () => runner.stop() };
4278
+ }
4279
+ catch (e) {
4280
+ // Ensure the audio context is closed on any setup error.
4281
+ if (audioContext && audioContext.state !== 'closed') {
4282
+ void audioContext.close();
4283
+ }
4284
+ // Re-throw specific, known error types directly. The user may want to handle `DOMException`
4285
+ // errors differently (for example, if permission to access audio device was denied).
4286
+ if (e instanceof AIError || e instanceof DOMException) {
4287
+ throw e;
4288
+ }
4289
+ // Wrap any other unexpected errors in a standard AIError.
4290
+ throw new AIError(AIErrorCode.ERROR, `Failed to initialize audio recording: ${e.message}`);
4291
+ }
4292
+ }
4293
+
4294
+ /**
4295
+ * @license
4296
+ * Copyright 2024 Google LLC
4297
+ *
4298
+ * Licensed under the Apache License, Version 2.0 (the "License");
4299
+ * you may not use this file except in compliance with the License.
4300
+ * You may obtain a copy of the License at
4301
+ *
4302
+ * http://www.apache.org/licenses/LICENSE-2.0
4303
+ *
4304
+ * Unless required by applicable law or agreed to in writing, software
4305
+ * distributed under the License is distributed on an "AS IS" BASIS,
4306
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
4307
+ * See the License for the specific language governing permissions and
4308
+ * limitations under the License.
4309
+ */
4310
+ /**
4311
+ * Returns the default {@link AI} instance that is associated with the provided
4312
+ * {@link @firebase/app#FirebaseApp}. If no instance exists, initializes a new instance with the
4313
+ * default settings.
4314
+ *
4315
+ * @example
4316
+ * ```javascript
4317
+ * const ai = getAI(app);
4318
+ * ```
4319
+ *
4320
+ * @example
4321
+ * ```javascript
4322
+ * // Get an AI instance configured to use the Gemini Developer API (via Google AI).
4323
+ * const ai = getAI(app, { backend: new GoogleAIBackend() });
4324
+ * ```
4325
+ *
4326
+ * @example
4327
+ * ```javascript
4328
+ * // Get an AI instance configured to use the Vertex AI Gemini API.
4329
+ * const ai = getAI(app, { backend: new VertexAIBackend() });
4330
+ * ```
4331
+ *
4332
+ * @param app - The {@link @firebase/app#FirebaseApp} to use.
4333
+ * @param options - {@link AIOptions} that configure the AI instance.
4334
+ * @returns The default {@link AI} instance for the given {@link @firebase/app#FirebaseApp}.
4335
+ *
4336
+ * @public
4337
+ */
4338
+ function getAI(app$1 = app.getApp(), options) {
4339
+ app$1 = util.getModularInstance(app$1);
4340
+ // Dependencies
4341
+ const AIProvider = app._getProvider(app$1, AI_TYPE);
4342
+ const backend = options?.backend ?? new GoogleAIBackend();
4343
+ const finalOptions = {
4344
+ useLimitedUseAppCheckTokens: options?.useLimitedUseAppCheckTokens ?? false
4345
+ };
4346
+ const identifier = encodeInstanceIdentifier(backend);
4347
+ const aiInstance = AIProvider.getImmediate({
4348
+ identifier
4349
+ });
4350
+ aiInstance.options = finalOptions;
4351
+ return aiInstance;
4352
+ }
4353
+ /**
4354
+ * Returns a {@link GenerativeModel} class with methods for inference
4355
+ * and other functionality.
4356
+ *
4357
+ * @public
4358
+ */
4359
+ function getGenerativeModel(ai, modelParams, requestOptions) {
4360
+ // Uses the existence of HybridParams.mode to clarify the type of the modelParams input.
4361
+ const hybridParams = modelParams;
4362
+ let inCloudParams;
4363
+ if (hybridParams.mode) {
4364
+ inCloudParams = hybridParams.inCloudParams || {
4365
+ model: DEFAULT_HYBRID_IN_CLOUD_MODEL
4366
+ };
4367
+ }
4368
+ else {
4369
+ inCloudParams = modelParams;
4370
+ }
4371
+ if (!inCloudParams.model) {
4372
+ throw new AIError(AIErrorCode.NO_MODEL, `Must provide a model name. Example: getGenerativeModel({ model: 'my-model-name' })`);
4373
+ }
4374
+ /**
4375
+ * An AIService registered by index.node.ts will not have a
4376
+ * chromeAdapterFactory() method.
4377
+ */
4378
+ const chromeAdapter = ai.chromeAdapterFactory?.(hybridParams.mode, typeof window === 'undefined' ? undefined : window, hybridParams.onDeviceParams);
4379
+ const generativeModel = new GenerativeModel(ai, inCloudParams, requestOptions, chromeAdapter);
4380
+ generativeModel._apiSettings.inferenceMode = hybridParams.mode;
4381
+ return generativeModel;
4382
+ }
4383
+ /**
4384
+ * Returns an {@link ImagenModel} class with methods for using Imagen.
4385
+ *
4386
+ * Only Imagen 3 models (named `imagen-3.0-*`) are supported.
4387
+ *
4388
+ * @param ai - An {@link AI} instance.
4389
+ * @param modelParams - Parameters to use when making Imagen requests.
4390
+ * @param requestOptions - Additional options to use when making requests.
4391
+ *
4392
+ * @throws If the `apiKey` or `projectId` fields are missing in your
4393
+ * Firebase config.
4394
+ *
4395
+ * @public
4396
+ */
4397
+ function getImagenModel(ai, modelParams, requestOptions) {
4398
+ if (!modelParams.model) {
4399
+ throw new AIError(AIErrorCode.NO_MODEL, `Must provide a model name. Example: getImagenModel({ model: 'my-model-name' })`);
4400
+ }
4401
+ return new ImagenModel(ai, modelParams, requestOptions);
4402
+ }
4403
+ /**
4404
+ * Returns a {@link LiveGenerativeModel} class for real-time, bidirectional communication.
4405
+ *
4406
+ * The Live API is only supported in modern browser windows and Node >= 22.
4407
+ *
4408
+ * @param ai - An {@link AI} instance.
4409
+ * @param modelParams - Parameters to use when setting up a {@link LiveSession}.
4410
+ * @throws If the `apiKey` or `projectId` fields are missing in your
4411
+ * Firebase config.
4412
+ *
4413
+ * @beta
4414
+ */
4415
+ function getLiveGenerativeModel(ai, modelParams) {
4416
+ if (!modelParams.model) {
4417
+ throw new AIError(AIErrorCode.NO_MODEL, `Must provide a model name for getLiveGenerativeModel. Example: getLiveGenerativeModel(ai, { model: 'my-model-name' })`);
4418
+ }
4419
+ const webSocketHandler = new WebSocketHandlerImpl();
4420
+ return new LiveGenerativeModel(ai, modelParams, webSocketHandler);
4421
+ }
4422
+ /**
4423
+ * Returns a {@link TemplateGenerativeModel} class for executing server-side
4424
+ * templates.
4425
+ *
4426
+ * @param ai - An {@link AI} instance.
4427
+ * @param requestOptions - Additional options to use when making requests.
4428
+ *
4429
+ * @beta
4430
+ */
4431
+ function getTemplateGenerativeModel(ai, requestOptions) {
4432
+ return new TemplateGenerativeModel(ai, requestOptions);
4433
+ }
4434
+ /**
4435
+ * Returns a {@link TemplateImagenModel} class for executing server-side
4436
+ * Imagen templates.
4437
+ *
4438
+ * @param ai - An {@link AI} instance.
4439
+ * @param requestOptions - Additional options to use when making requests.
4440
+ *
4441
+ * @beta
4442
+ */
4443
+ function getTemplateImagenModel(ai, requestOptions) {
4444
+ return new TemplateImagenModel(ai, requestOptions);
4445
+ }
4446
+
4447
+ /**
4448
+ * The Firebase AI Web SDK.
4449
+ *
4450
+ * @packageDocumentation
4451
+ */
4452
+ function registerAI() {
4453
+ app._registerComponent(new component.Component(AI_TYPE, factory, "PUBLIC" /* ComponentType.PUBLIC */).setMultipleInstances(true));
4454
+ app.registerVersion(name, version, 'node');
4455
+ // BUILD_TARGET will be replaced by values like esm, cjs, etc during the compilation
4456
+ app.registerVersion(name, version, 'cjs2020');
4457
+ }
4458
+ registerAI();
4459
+
4460
+ exports.AIError = AIError;
4461
+ exports.AIErrorCode = AIErrorCode;
4462
+ exports.AIModel = AIModel;
4463
+ exports.AnyOfSchema = AnyOfSchema;
4464
+ exports.ArraySchema = ArraySchema;
4465
+ exports.Backend = Backend;
4466
+ exports.BackendType = BackendType;
4467
+ exports.BlockReason = BlockReason;
4468
+ exports.BooleanSchema = BooleanSchema;
4469
+ exports.ChatSession = ChatSession;
4470
+ exports.FinishReason = FinishReason;
4471
+ exports.FunctionCallingMode = FunctionCallingMode;
4472
+ exports.GenerativeModel = GenerativeModel;
4473
+ exports.GoogleAIBackend = GoogleAIBackend;
4474
+ exports.HarmBlockMethod = HarmBlockMethod;
4475
+ exports.HarmBlockThreshold = HarmBlockThreshold;
4476
+ exports.HarmCategory = HarmCategory;
4477
+ exports.HarmProbability = HarmProbability;
4478
+ exports.HarmSeverity = HarmSeverity;
4479
+ exports.ImagenAspectRatio = ImagenAspectRatio;
4480
+ exports.ImagenImageFormat = ImagenImageFormat;
4481
+ exports.ImagenModel = ImagenModel;
4482
+ exports.ImagenPersonFilterLevel = ImagenPersonFilterLevel;
4483
+ exports.ImagenSafetyFilterLevel = ImagenSafetyFilterLevel;
4484
+ exports.InferenceMode = InferenceMode;
4485
+ exports.InferenceSource = InferenceSource;
4486
+ exports.IntegerSchema = IntegerSchema;
4487
+ exports.Language = Language;
4488
+ exports.LiveGenerativeModel = LiveGenerativeModel;
4489
+ exports.LiveResponseType = LiveResponseType;
4490
+ exports.LiveSession = LiveSession;
4491
+ exports.Modality = Modality;
4492
+ exports.NumberSchema = NumberSchema;
4493
+ exports.ObjectSchema = ObjectSchema;
4494
+ exports.Outcome = Outcome;
4495
+ exports.POSSIBLE_ROLES = POSSIBLE_ROLES;
4496
+ exports.ResponseModality = ResponseModality;
4497
+ exports.Schema = Schema;
4498
+ exports.SchemaType = SchemaType;
4499
+ exports.StringSchema = StringSchema;
4500
+ exports.TemplateGenerativeModel = TemplateGenerativeModel;
4501
+ exports.TemplateImagenModel = TemplateImagenModel;
4502
+ exports.ThinkingLevel = ThinkingLevel;
4503
+ exports.URLRetrievalStatus = URLRetrievalStatus;
4504
+ exports.VertexAIBackend = VertexAIBackend;
4505
+ exports.getAI = getAI;
4506
+ exports.getGenerativeModel = getGenerativeModel;
4507
+ exports.getImagenModel = getImagenModel;
4508
+ exports.getLiveGenerativeModel = getLiveGenerativeModel;
4509
+ exports.getTemplateGenerativeModel = getTemplateGenerativeModel;
4510
+ exports.getTemplateImagenModel = getTemplateImagenModel;
4511
+ exports.startAudioConversation = startAudioConversation;
4512
+ //# sourceMappingURL=index.node.cjs.js.map