@depup/firebase__ai 2.9.0-depup.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (119) hide show
  1. package/README.md +31 -0
  2. package/changes.json +10 -0
  3. package/dist/ai-public.d.ts +3472 -0
  4. package/dist/ai.d.ts +3712 -0
  5. package/dist/esm/index.esm.js +4765 -0
  6. package/dist/esm/index.esm.js.map +1 -0
  7. package/dist/esm/package.json +1 -0
  8. package/dist/esm/src/api.d.ts +121 -0
  9. package/dist/esm/src/backend.d.ts +98 -0
  10. package/dist/esm/src/constants.d.ts +29 -0
  11. package/dist/esm/src/errors.d.ts +35 -0
  12. package/dist/esm/src/factory-browser.d.ts +19 -0
  13. package/dist/esm/src/factory-node.d.ts +19 -0
  14. package/dist/esm/src/googleai-mappers.d.ts +73 -0
  15. package/dist/esm/src/helpers.d.ts +30 -0
  16. package/dist/esm/src/index.d.ts +13 -0
  17. package/dist/esm/src/index.node.d.ts +7 -0
  18. package/dist/esm/src/logger.d.ts +18 -0
  19. package/dist/esm/src/methods/chat-session-helpers.d.ts +18 -0
  20. package/dist/esm/src/methods/chat-session.d.ts +77 -0
  21. package/dist/esm/src/methods/chrome-adapter.d.ts +124 -0
  22. package/dist/esm/src/methods/count-tokens.d.ts +21 -0
  23. package/dist/esm/src/methods/generate-content.d.ts +25 -0
  24. package/dist/esm/src/methods/live-session-helpers.d.ts +154 -0
  25. package/dist/esm/src/methods/live-session.d.ts +154 -0
  26. package/dist/esm/src/models/ai-model.d.ts +72 -0
  27. package/dist/esm/src/models/generative-model.d.ts +56 -0
  28. package/dist/esm/src/models/imagen-model.d.ts +102 -0
  29. package/dist/esm/src/models/index.d.ts +20 -0
  30. package/dist/esm/src/models/live-generative-model.d.ts +55 -0
  31. package/dist/esm/src/models/template-generative-model.d.ts +64 -0
  32. package/dist/esm/src/models/template-imagen-model.d.ts +51 -0
  33. package/dist/esm/src/models/utils.d.ts +26 -0
  34. package/dist/esm/src/public-types.d.ts +97 -0
  35. package/dist/esm/src/requests/hybrid-helpers.d.ts +33 -0
  36. package/dist/esm/src/requests/imagen-image-format.d.ts +61 -0
  37. package/dist/esm/src/requests/request-helpers.d.ts +28 -0
  38. package/dist/esm/src/requests/request.d.ts +69 -0
  39. package/dist/esm/src/requests/response-helpers.d.ts +57 -0
  40. package/dist/esm/src/requests/schema-builder.d.ts +170 -0
  41. package/dist/esm/src/requests/stream-reader.d.ts +39 -0
  42. package/dist/esm/src/service.d.ts +35 -0
  43. package/dist/esm/src/types/chrome-adapter.d.ts +61 -0
  44. package/dist/esm/src/types/content.d.ts +266 -0
  45. package/dist/esm/src/types/enums.d.ts +419 -0
  46. package/dist/esm/src/types/error.d.ts +89 -0
  47. package/dist/esm/src/types/googleai.d.ts +57 -0
  48. package/dist/esm/src/types/imagen/index.d.ts +18 -0
  49. package/dist/esm/src/types/imagen/internal.d.ts +134 -0
  50. package/dist/esm/src/types/imagen/requests.d.ts +245 -0
  51. package/dist/esm/src/types/imagen/responses.d.ts +79 -0
  52. package/dist/esm/src/types/index.d.ts +26 -0
  53. package/dist/esm/src/types/internal.d.ts +35 -0
  54. package/dist/esm/src/types/language-model.d.ts +107 -0
  55. package/dist/esm/src/types/live-responses.d.ts +79 -0
  56. package/dist/esm/src/types/requests.d.ts +543 -0
  57. package/dist/esm/src/types/responses.d.ts +607 -0
  58. package/dist/esm/src/types/schema.d.ts +139 -0
  59. package/dist/esm/src/websocket.d.ts +67 -0
  60. package/dist/index.cjs.js +4820 -0
  61. package/dist/index.cjs.js.map +1 -0
  62. package/dist/index.node.cjs.js +4512 -0
  63. package/dist/index.node.cjs.js.map +1 -0
  64. package/dist/index.node.mjs +4457 -0
  65. package/dist/index.node.mjs.map +1 -0
  66. package/dist/src/api.d.ts +121 -0
  67. package/dist/src/backend.d.ts +98 -0
  68. package/dist/src/constants.d.ts +29 -0
  69. package/dist/src/errors.d.ts +35 -0
  70. package/dist/src/factory-browser.d.ts +19 -0
  71. package/dist/src/factory-node.d.ts +19 -0
  72. package/dist/src/googleai-mappers.d.ts +73 -0
  73. package/dist/src/helpers.d.ts +30 -0
  74. package/dist/src/index.d.ts +13 -0
  75. package/dist/src/index.node.d.ts +7 -0
  76. package/dist/src/logger.d.ts +18 -0
  77. package/dist/src/methods/chat-session-helpers.d.ts +18 -0
  78. package/dist/src/methods/chat-session.d.ts +77 -0
  79. package/dist/src/methods/chrome-adapter.d.ts +124 -0
  80. package/dist/src/methods/count-tokens.d.ts +21 -0
  81. package/dist/src/methods/generate-content.d.ts +25 -0
  82. package/dist/src/methods/live-session-helpers.d.ts +154 -0
  83. package/dist/src/methods/live-session.d.ts +154 -0
  84. package/dist/src/models/ai-model.d.ts +72 -0
  85. package/dist/src/models/generative-model.d.ts +56 -0
  86. package/dist/src/models/imagen-model.d.ts +102 -0
  87. package/dist/src/models/index.d.ts +20 -0
  88. package/dist/src/models/live-generative-model.d.ts +55 -0
  89. package/dist/src/models/template-generative-model.d.ts +64 -0
  90. package/dist/src/models/template-imagen-model.d.ts +51 -0
  91. package/dist/src/models/utils.d.ts +26 -0
  92. package/dist/src/public-types.d.ts +97 -0
  93. package/dist/src/requests/hybrid-helpers.d.ts +33 -0
  94. package/dist/src/requests/imagen-image-format.d.ts +61 -0
  95. package/dist/src/requests/request-helpers.d.ts +28 -0
  96. package/dist/src/requests/request.d.ts +69 -0
  97. package/dist/src/requests/response-helpers.d.ts +57 -0
  98. package/dist/src/requests/schema-builder.d.ts +170 -0
  99. package/dist/src/requests/stream-reader.d.ts +39 -0
  100. package/dist/src/service.d.ts +35 -0
  101. package/dist/src/tsdoc-metadata.json +11 -0
  102. package/dist/src/types/chrome-adapter.d.ts +61 -0
  103. package/dist/src/types/content.d.ts +266 -0
  104. package/dist/src/types/enums.d.ts +419 -0
  105. package/dist/src/types/error.d.ts +89 -0
  106. package/dist/src/types/googleai.d.ts +57 -0
  107. package/dist/src/types/imagen/index.d.ts +18 -0
  108. package/dist/src/types/imagen/internal.d.ts +134 -0
  109. package/dist/src/types/imagen/requests.d.ts +245 -0
  110. package/dist/src/types/imagen/responses.d.ts +79 -0
  111. package/dist/src/types/index.d.ts +26 -0
  112. package/dist/src/types/internal.d.ts +35 -0
  113. package/dist/src/types/language-model.d.ts +107 -0
  114. package/dist/src/types/live-responses.d.ts +79 -0
  115. package/dist/src/types/requests.d.ts +543 -0
  116. package/dist/src/types/responses.d.ts +607 -0
  117. package/dist/src/types/schema.d.ts +139 -0
  118. package/dist/src/websocket.d.ts +67 -0
  119. package/package.json +106 -0
package/dist/ai.d.ts ADDED
@@ -0,0 +1,3712 @@
1
+ /**
2
+ * The Firebase AI Web SDK.
3
+ *
4
+ * @packageDocumentation
5
+ */
6
+
7
+ import { AppCheckTokenResult } from '@firebase/app-check-interop-types';
8
+ import { FirebaseApp } from '@firebase/app';
9
+ import { FirebaseAuthTokenData } from '@firebase/auth-interop-types';
10
+ import { FirebaseError } from '@firebase/util';
11
+
12
+ /**
13
+ * An instance of the Firebase AI SDK.
14
+ *
15
+ * Do not create this instance directly. Instead, use {@link getAI | getAI()}.
16
+ *
17
+ * @public
18
+ */
19
+ export declare interface AI {
20
+ /**
21
+ * The {@link @firebase/app#FirebaseApp} this {@link AI} instance is associated with.
22
+ */
23
+ app: FirebaseApp;
24
+ /**
25
+ * A {@link Backend} instance that specifies the configuration for the target backend,
26
+ * either the Gemini Developer API (using {@link GoogleAIBackend}) or the
27
+ * Vertex AI Gemini API (using {@link VertexAIBackend}).
28
+ */
29
+ backend: Backend;
30
+ /**
31
+ * Options applied to this {@link AI} instance.
32
+ */
33
+ options?: AIOptions;
34
+ /**
35
+ * @deprecated use `AI.backend.location` instead.
36
+ *
37
+ * The location configured for this AI service instance, relevant for Vertex AI backends.
38
+ */
39
+ location: string;
40
+ }
41
+
42
+ /**
43
+ * Error class for the Firebase AI SDK.
44
+ *
45
+ * @public
46
+ */
47
+ export declare class AIError extends FirebaseError {
48
+ readonly code: AIErrorCode;
49
+ readonly customErrorData?: CustomErrorData | undefined;
50
+ /**
51
+ * Constructs a new instance of the `AIError` class.
52
+ *
53
+ * @param code - The error code from {@link (AIErrorCode:type)}.
54
+ * @param message - A human-readable message describing the error.
55
+ * @param customErrorData - Optional error data.
56
+ */
57
+ constructor(code: AIErrorCode, message: string, customErrorData?: CustomErrorData | undefined);
58
+ }
59
+
60
+ /**
61
+ * Standardized error codes that {@link AIError} can have.
62
+ *
63
+ * @public
64
+ */
65
+ export declare const AIErrorCode: {
66
+ /** A generic error occurred. */
67
+ readonly ERROR: "error";
68
+ /** An error occurred in a request. */
69
+ readonly REQUEST_ERROR: "request-error";
70
+ /** An error occurred in a response. */
71
+ readonly RESPONSE_ERROR: "response-error";
72
+ /** An error occurred while performing a fetch. */
73
+ readonly FETCH_ERROR: "fetch-error";
74
+ /** An error occurred because an operation was attempted on a closed session. */
75
+ readonly SESSION_CLOSED: "session-closed";
76
+ /** An error associated with a Content object. */
77
+ readonly INVALID_CONTENT: "invalid-content";
78
+ /** An error due to the Firebase API not being enabled in the Console. */
79
+ readonly API_NOT_ENABLED: "api-not-enabled";
80
+ /** An error due to invalid Schema input. */
81
+ readonly INVALID_SCHEMA: "invalid-schema";
82
+ /** An error occurred due to a missing Firebase API key. */
83
+ readonly NO_API_KEY: "no-api-key";
84
+ /** An error occurred due to a missing Firebase app ID. */
85
+ readonly NO_APP_ID: "no-app-id";
86
+ /** An error occurred due to a model name not being specified during initialization. */
87
+ readonly NO_MODEL: "no-model";
88
+ /** An error occurred due to a missing project ID. */
89
+ readonly NO_PROJECT_ID: "no-project-id";
90
+ /** An error occurred while parsing. */
91
+ readonly PARSE_FAILED: "parse-failed";
92
+ /** An error occurred due an attempt to use an unsupported feature. */
93
+ readonly UNSUPPORTED: "unsupported";
94
+ };
95
+
96
+ /**
97
+ * Standardized error codes that {@link AIError} can have.
98
+ *
99
+ * @public
100
+ */
101
+ export declare type AIErrorCode = (typeof AIErrorCode)[keyof typeof AIErrorCode];
102
+
103
+ /**
104
+ * Base class for Firebase AI model APIs.
105
+ *
106
+ * Instances of this class are associated with a specific Firebase AI {@link Backend}
107
+ * and provide methods for interacting with the configured generative model.
108
+ *
109
+ * @public
110
+ */
111
+ export declare abstract class AIModel {
112
+ /**
113
+ * The fully qualified model resource name to use for generating images
114
+ * (for example, `publishers/google/models/imagen-3.0-generate-002`).
115
+ */
116
+ readonly model: string;
117
+ /**
118
+ * @internal
119
+ */
120
+ _apiSettings: ApiSettings;
121
+ /**
122
+ * Constructs a new instance of the {@link AIModel} class.
123
+ *
124
+ * This constructor should only be called from subclasses that provide
125
+ * a model API.
126
+ *
127
+ * @param ai - an {@link AI} instance.
128
+ * @param modelName - The name of the model being used. It can be in one of the following formats:
129
+ * - `my-model` (short name, will resolve to `publishers/google/models/my-model`)
130
+ * - `models/my-model` (will resolve to `publishers/google/models/my-model`)
131
+ * - `publishers/my-publisher/models/my-model` (fully qualified model name)
132
+ *
133
+ * @throws If the `apiKey` or `projectId` fields are missing in your
134
+ * Firebase config.
135
+ *
136
+ * @internal
137
+ */
138
+ protected constructor(ai: AI, modelName: string);
139
+ /**
140
+ * Normalizes the given model name to a fully qualified model resource name.
141
+ *
142
+ * @param modelName - The model name to normalize.
143
+ * @returns The fully qualified model resource name.
144
+ *
145
+ * @internal
146
+ */
147
+ static normalizeModelName(modelName: string, backendType: BackendType): string;
148
+ /**
149
+ * @internal
150
+ */
151
+ private static normalizeGoogleAIModelName;
152
+ /**
153
+ * @internal
154
+ */
155
+ private static normalizeVertexAIModelName;
156
+ }
157
+
158
+ /**
159
+ * Options for initializing the AI service using {@link getAI | getAI()}.
160
+ * This allows specifying which backend to use (Vertex AI Gemini API or Gemini Developer API)
161
+ * and configuring its specific options (like location for Vertex AI).
162
+ *
163
+ * @public
164
+ */
165
+ export declare interface AIOptions {
166
+ /**
167
+ * The backend configuration to use for the AI service instance.
168
+ * Defaults to the Gemini Developer API backend ({@link GoogleAIBackend}).
169
+ */
170
+ backend?: Backend;
171
+ /**
172
+ * Whether to use App Check limited use tokens. Defaults to false.
173
+ */
174
+ useLimitedUseAppCheckTokens?: boolean;
175
+ }
176
+
177
+ /**
178
+ * Schema class representing a value that can conform to any of the provided sub-schemas. This is
179
+ * useful when a field can accept multiple distinct types or structures.
180
+ * @public
181
+ */
182
+ export declare class AnyOfSchema extends Schema {
183
+ anyOf: TypedSchema[];
184
+ constructor(schemaParams: SchemaParams & {
185
+ anyOf: TypedSchema[];
186
+ });
187
+ /**
188
+ * @internal
189
+ */
190
+ toJSON(): SchemaRequest;
191
+ }
192
+
193
+ declare interface ApiSettings {
194
+ apiKey: string;
195
+ project: string;
196
+ appId: string;
197
+ automaticDataCollectionEnabled?: boolean;
198
+ /**
199
+ * @deprecated Use `backend.location` instead.
200
+ */
201
+ location: string;
202
+ backend: Backend;
203
+ getAuthToken?: () => Promise<FirebaseAuthTokenData | null>;
204
+ getAppCheckToken?: () => Promise<AppCheckTokenResult>;
205
+ inferenceMode?: InferenceMode;
206
+ }
207
+
208
+ /**
209
+ * Schema class for "array" types.
210
+ * The `items` param should refer to the type of item that can be a member
211
+ * of the array.
212
+ * @public
213
+ */
214
+ export declare class ArraySchema extends Schema {
215
+ items: TypedSchema;
216
+ constructor(schemaParams: SchemaParams, items: TypedSchema);
217
+ /**
218
+ * @internal
219
+ */
220
+ toJSON(): SchemaRequest;
221
+ }
222
+
223
+ /**
224
+ * A controller for managing an active audio conversation.
225
+ *
226
+ * @beta
227
+ */
228
+ export declare interface AudioConversationController {
229
+ /**
230
+ * Stops the audio conversation, closes the microphone connection, and
231
+ * cleans up resources. Returns a promise that resolves when cleanup is complete.
232
+ */
233
+ stop: () => Promise<void>;
234
+ }
235
+
236
+ /**
237
+ * The audio transcription configuration.
238
+ */
239
+ export declare interface AudioTranscriptionConfig {
240
+ }
241
+
242
+ /**
243
+ * Abstract base class representing the configuration for an AI service backend.
244
+ * This class should not be instantiated directly. Use its subclasses; {@link GoogleAIBackend} for
245
+ * the Gemini Developer API (via {@link https://ai.google/ | Google AI}), and
246
+ * {@link VertexAIBackend} for the Vertex AI Gemini API.
247
+ *
248
+ * @public
249
+ */
250
+ export declare abstract class Backend {
251
+ /**
252
+ * Specifies the backend type.
253
+ */
254
+ readonly backendType: BackendType;
255
+ /**
256
+ * Protected constructor for use by subclasses.
257
+ * @param type - The backend type.
258
+ */
259
+ protected constructor(type: BackendType);
260
+ /**
261
+ * @internal
262
+ */
263
+ abstract _getModelPath(project: string, model: string): string;
264
+ /**
265
+ * @internal
266
+ */
267
+ abstract _getTemplatePath(project: string, templateId: string): string;
268
+ }
269
+
270
+ /**
271
+ * An enum-like object containing constants that represent the supported backends
272
+ * for the Firebase AI SDK.
273
+ * This determines which backend service (Vertex AI Gemini API or Gemini Developer API)
274
+ * the SDK will communicate with.
275
+ *
276
+ * These values are assigned to the `backendType` property within the specific backend
277
+ * configuration objects ({@link GoogleAIBackend} or {@link VertexAIBackend}) to identify
278
+ * which service to target.
279
+ *
280
+ * @public
281
+ */
282
+ export declare const BackendType: {
283
+ /**
284
+ * Identifies the backend service for the Vertex AI Gemini API provided through Google Cloud.
285
+ * Use this constant when creating a {@link VertexAIBackend} configuration.
286
+ */
287
+ readonly VERTEX_AI: "VERTEX_AI";
288
+ /**
289
+ * Identifies the backend service for the Gemini Developer API ({@link https://ai.google/ | Google AI}).
290
+ * Use this constant when creating a {@link GoogleAIBackend} configuration.
291
+ */
292
+ readonly GOOGLE_AI: "GOOGLE_AI";
293
+ };
294
+
295
+ /**
296
+ * Type alias representing valid backend types.
297
+ * It can be either `'VERTEX_AI'` or `'GOOGLE_AI'`.
298
+ *
299
+ * @public
300
+ */
301
+ export declare type BackendType = (typeof BackendType)[keyof typeof BackendType];
302
+
303
+ /**
304
+ * Base parameters for a number of methods.
305
+ * @public
306
+ */
307
+ export declare interface BaseParams {
308
+ safetySettings?: SafetySetting[];
309
+ generationConfig?: GenerationConfig;
310
+ }
311
+
312
+ /**
313
+ * Reason that a prompt was blocked.
314
+ * @public
315
+ */
316
+ export declare const BlockReason: {
317
+ /**
318
+ * Content was blocked by safety settings.
319
+ */
320
+ readonly SAFETY: "SAFETY";
321
+ /**
322
+ * Content was blocked, but the reason is uncategorized.
323
+ */
324
+ readonly OTHER: "OTHER";
325
+ /**
326
+ * Content was blocked because it contained terms from the terminology blocklist.
327
+ */
328
+ readonly BLOCKLIST: "BLOCKLIST";
329
+ /**
330
+ * Content was blocked due to prohibited content.
331
+ */
332
+ readonly PROHIBITED_CONTENT: "PROHIBITED_CONTENT";
333
+ };
334
+
335
+ /**
336
+ * Reason that a prompt was blocked.
337
+ * @public
338
+ */
339
+ export declare type BlockReason = (typeof BlockReason)[keyof typeof BlockReason];
340
+
341
+ /**
342
+ * Schema class for "boolean" types.
343
+ * @public
344
+ */
345
+ export declare class BooleanSchema extends Schema {
346
+ constructor(schemaParams?: SchemaParams);
347
+ }
348
+
349
+ /**
350
+ * ChatSession class that enables sending chat messages and stores
351
+ * history of sent and received messages so far.
352
+ *
353
+ * @public
354
+ */
355
+ export declare class ChatSession {
356
+ model: string;
357
+ private chromeAdapter?;
358
+ params?: StartChatParams | undefined;
359
+ requestOptions?: RequestOptions | undefined;
360
+ private _apiSettings;
361
+ private _history;
362
+ /**
363
+ * Ensures sequential execution of chat messages to maintain history order.
364
+ * Each call waits for the previous one to settle before proceeding.
365
+ */
366
+ private _sendPromise;
367
+ constructor(apiSettings: ApiSettings, model: string, chromeAdapter?: ChromeAdapter | undefined, params?: StartChatParams | undefined, requestOptions?: RequestOptions | undefined);
368
+ /**
369
+ * Gets the chat history so far. Blocked prompts are not added to history.
370
+ * Neither blocked candidates nor the prompts that generated them are added
371
+ * to history.
372
+ */
373
+ getHistory(): Promise<Content[]>;
374
+ /**
375
+ * Format Content into a request for generateContent or
376
+ * generateContentStream.
377
+ * @internal
378
+ */
379
+ _formatRequest(incomingContent: Content, tempHistory: Content[]): GenerateContentRequest;
380
+ /**
381
+ * Sends a chat message and receives a non-streaming
382
+ * {@link GenerateContentResult}
383
+ */
384
+ sendMessage(request: string | Array<string | Part>, singleRequestOptions?: SingleRequestOptions): Promise<GenerateContentResult>;
385
+ /**
386
+ * Sends a chat message and receives the response as a
387
+ * {@link GenerateContentStreamResult} containing an iterable stream
388
+ * and a response promise.
389
+ */
390
+ sendMessageStream(request: string | Array<string | Part>, singleRequestOptions?: SingleRequestOptions): Promise<GenerateContentStreamResult>;
391
+ /**
392
+ * Get function calls that the SDK has references to actually call.
393
+ * This is all-or-nothing. If the model is requesting multiple
394
+ * function calls, all of them must have references in order for
395
+ * automatic function calling to work.
396
+ *
397
+ * @internal
398
+ */
399
+ _getCallableFunctionCalls(response?: GenerateContentResponse): FunctionCall[] | undefined;
400
+ /**
401
+ * Call user-defined functions if requested by the model, and return
402
+ * the response that should be sent to the model.
403
+ * @internal
404
+ */
405
+ _callFunctionsAsNeeded(functionCalls: FunctionCall[]): Promise<FunctionResponsePart[]>;
406
+ }
407
+
408
+ /**
409
+ * Defines an inference "backend" that uses Chrome's on-device model,
410
+ * and encapsulates logic for detecting when on-device inference is
411
+ * possible.
412
+ *
413
+ * These methods should not be called directly by the user.
414
+ *
415
+ * @beta
416
+ */
417
+ export declare interface ChromeAdapter {
418
+ /**
419
+ * @internal
420
+ */
421
+ mode: InferenceMode;
422
+ /**
423
+ * Checks if the on-device model is capable of handling a given
424
+ * request.
425
+ * @param request - A potential request to be passed to the model.
426
+ */
427
+ isAvailable(request: GenerateContentRequest): Promise<boolean>;
428
+ /**
429
+ * Generates content using on-device inference.
430
+ *
431
+ * @remarks
432
+ * This is comparable to {@link GenerativeModel.generateContent} for generating
433
+ * content using in-cloud inference.
434
+ * @param request - a standard Firebase AI {@link GenerateContentRequest}
435
+ */
436
+ generateContent(request: GenerateContentRequest): Promise<Response>;
437
+ /**
438
+ * Generates a content stream using on-device inference.
439
+ *
440
+ * @remarks
441
+ * This is comparable to {@link GenerativeModel.generateContentStream} for generating
442
+ * a content stream using in-cloud inference.
443
+ * @param request - a standard Firebase AI {@link GenerateContentRequest}
444
+ */
445
+ generateContentStream(request: GenerateContentRequest): Promise<Response>;
446
+ /**
447
+ * @internal
448
+ */
449
+ countTokens(request: CountTokensRequest): Promise<Response>;
450
+ }
451
+
452
+ /**
453
+ * A single citation.
454
+ * @public
455
+ */
456
+ export declare interface Citation {
457
+ startIndex?: number;
458
+ endIndex?: number;
459
+ uri?: string;
460
+ license?: string;
461
+ /**
462
+ * The title of the cited source, if available.
463
+ *
464
+ * This property is only supported in the Vertex AI Gemini API ({@link VertexAIBackend}).
465
+ */
466
+ title?: string;
467
+ /**
468
+ * The publication date of the cited source, if available.
469
+ *
470
+ * This property is only supported in the Vertex AI Gemini API ({@link VertexAIBackend}).
471
+ */
472
+ publicationDate?: Date_2;
473
+ }
474
+
475
+ /**
476
+ * Citation metadata that may be found on a {@link GenerateContentCandidate}.
477
+ * @public
478
+ */
479
+ export declare interface CitationMetadata {
480
+ citations: Citation[];
481
+ }
482
+
483
+ /**
484
+ * The results of code execution run by the model.
485
+ *
486
+ * @public
487
+ */
488
+ export declare interface CodeExecutionResult {
489
+ /**
490
+ * The result of the code execution.
491
+ */
492
+ outcome?: Outcome;
493
+ /**
494
+ * The output from the code execution, or an error message
495
+ * if it failed.
496
+ */
497
+ output?: string;
498
+ }
499
+
500
+ /**
501
+ * Represents the code execution result from the model.
502
+ *
503
+ * @public
504
+ */
505
+ export declare interface CodeExecutionResultPart {
506
+ text?: never;
507
+ inlineData?: never;
508
+ functionCall?: never;
509
+ functionResponse?: never;
510
+ fileData: never;
511
+ thought?: never;
512
+ /**
513
+ * @internal
514
+ */
515
+ thoughtSignature?: never;
516
+ executableCode?: never;
517
+ codeExecutionResult?: CodeExecutionResult;
518
+ }
519
+
520
+ /**
521
+ * A tool that enables the model to use code execution.
522
+ *
523
+ * @beta
524
+ */
525
+ export declare interface CodeExecutionTool {
526
+ /**
527
+ * Specifies the Google Search configuration.
528
+ * Currently, this is an empty object, but it's reserved for future configuration options.
529
+ */
530
+ codeExecution: {};
531
+ }
532
+
533
+ /**
534
+ * Content type for both prompts and response candidates.
535
+ * @public
536
+ */
537
+ export declare interface Content {
538
+ role: Role;
539
+ parts: Part[];
540
+ }
541
+
542
+ /**
543
+ * Params for calling {@link GenerativeModel.countTokens}
544
+ * @public
545
+ */
546
+ export declare interface CountTokensRequest {
547
+ contents: Content[];
548
+ /**
549
+ * Instructions that direct the model to behave a certain way.
550
+ */
551
+ systemInstruction?: string | Part | Content;
552
+ /**
553
+ * {@link Tool} configuration.
554
+ */
555
+ tools?: Tool[];
556
+ /**
557
+ * Configuration options that control how the model generates a response.
558
+ */
559
+ generationConfig?: GenerationConfig;
560
+ }
561
+
562
+ /**
563
+ * Response from calling {@link GenerativeModel.countTokens}.
564
+ * @public
565
+ */
566
+ export declare interface CountTokensResponse {
567
+ /**
568
+ * The total number of tokens counted across all instances from the request.
569
+ */
570
+ totalTokens: number;
571
+ /**
572
+ * @deprecated Use `totalTokens` instead. This property is undefined when using models greater than `gemini-1.5-*`.
573
+ *
574
+ * The total number of billable characters counted across all instances
575
+ * from the request.
576
+ */
577
+ totalBillableCharacters?: number;
578
+ /**
579
+ * The breakdown, by modality, of how many tokens are consumed by the prompt.
580
+ */
581
+ promptTokensDetails?: ModalityTokenCount[];
582
+ }
583
+
584
+ /**
585
+ * Details object that contains data originating from a bad HTTP response.
586
+ *
587
+ * @public
588
+ */
589
+ export declare interface CustomErrorData {
590
+ /** HTTP status code of the error response. */
591
+ status?: number;
592
+ /** HTTP status text of the error response. */
593
+ statusText?: string;
594
+ /** Response from a {@link GenerateContentRequest} */
595
+ response?: GenerateContentResponse;
596
+ /** Optional additional details about the error. */
597
+ errorDetails?: ErrorDetails[];
598
+ }
599
+
600
+ /**
601
+ * Protobuf google.type.Date
602
+ * @public
603
+ */
604
+ declare interface Date_2 {
605
+ year: number;
606
+ month: number;
607
+ day: number;
608
+ }
609
+ export { Date_2 as Date }
610
+
611
+ /**
612
+ * Response object wrapped with helper methods.
613
+ *
614
+ * @public
615
+ */
616
+ export declare interface EnhancedGenerateContentResponse extends GenerateContentResponse {
617
+ /**
618
+ * Returns the text string from the response, if available.
619
+ * Throws if the prompt or candidate was blocked.
620
+ */
621
+ text: () => string;
622
+ /**
623
+ * Aggregates and returns every {@link InlineDataPart} from the first candidate of
624
+ * {@link GenerateContentResponse}.
625
+ *
626
+ * @throws If the prompt or candidate was blocked.
627
+ */
628
+ inlineDataParts: () => InlineDataPart[] | undefined;
629
+ /**
630
+ * Aggregates and returns every {@link FunctionCall} from the first candidate of
631
+ * {@link GenerateContentResponse}.
632
+ *
633
+ * @throws If the prompt or candidate was blocked.
634
+ */
635
+ functionCalls: () => FunctionCall[] | undefined;
636
+ /**
637
+ * Aggregates and returns every {@link TextPart} with their `thought` property set
638
+ * to `true` from the first candidate of {@link GenerateContentResponse}.
639
+ *
640
+ * @throws If the prompt or candidate was blocked.
641
+ *
642
+ * @remarks
643
+ * Thought summaries provide a brief overview of the model's internal thinking process,
644
+ * offering insight into how it arrived at the final answer. This can be useful for
645
+ * debugging, understanding the model's reasoning, and verifying its accuracy.
646
+ *
647
+ * Thoughts will only be included if {@link ThinkingConfig.includeThoughts} is
648
+ * set to `true`.
649
+ */
650
+ thoughtSummary: () => string | undefined;
651
+ /**
652
+ * Indicates whether inference happened on-device or in-cloud.
653
+ *
654
+ * @beta
655
+ */
656
+ inferenceSource?: InferenceSource;
657
+ }
658
+
659
+ /**
660
+ * Details object that may be included in an error response.
661
+ *
662
+ * @public
663
+ */
664
+ export declare interface ErrorDetails {
665
+ '@type'?: string;
666
+ /** The reason for the error. */
667
+ reason?: string;
668
+ /** The domain where the error occurred. */
669
+ domain?: string;
670
+ /** Additional metadata about the error. */
671
+ metadata?: Record<string, unknown>;
672
+ /** Any other relevant information about the error. */
673
+ [key: string]: unknown;
674
+ }
675
+
676
+ /**
677
+ * An interface for executable code returned by the model.
678
+ *
679
+ * @public
680
+ */
681
+ export declare interface ExecutableCode {
682
+ /**
683
+ * The programming language of the code.
684
+ */
685
+ language?: Language;
686
+ /**
687
+ * The source code to be executed.
688
+ */
689
+ code?: string;
690
+ }
691
+
692
+ /**
693
+ * Represents the code that is executed by the model.
694
+ *
695
+ * @public
696
+ */
697
+ export declare interface ExecutableCodePart {
698
+ text?: never;
699
+ inlineData?: never;
700
+ functionCall?: never;
701
+ functionResponse?: never;
702
+ fileData: never;
703
+ thought?: never;
704
+ /**
705
+ * @internal
706
+ */
707
+ thoughtSignature?: never;
708
+ executableCode?: ExecutableCode;
709
+ codeExecutionResult?: never;
710
+ }
711
+
712
+ /**
713
+ * Data pointing to a file uploaded on Google Cloud Storage.
714
+ * @public
715
+ */
716
+ export declare interface FileData {
717
+ mimeType: string;
718
+ fileUri: string;
719
+ }
720
+
721
+ /**
722
+ * Content part interface if the part represents {@link FileData}
723
+ * @public
724
+ */
725
+ export declare interface FileDataPart {
726
+ text?: never;
727
+ inlineData?: never;
728
+ functionCall?: never;
729
+ functionResponse?: never;
730
+ fileData: FileData;
731
+ thought?: boolean;
732
+ /**
733
+ * @internal
734
+ */
735
+ thoughtSignature?: never;
736
+ executableCode?: never;
737
+ codeExecutionResult?: never;
738
+ }
739
+
740
+ /**
741
+ * Reason that a candidate finished.
742
+ * @public
743
+ */
744
+ export declare const FinishReason: {
745
+ /**
746
+ * Natural stop point of the model or provided stop sequence.
747
+ */
748
+ readonly STOP: "STOP";
749
+ /**
750
+ * The maximum number of tokens as specified in the request was reached.
751
+ */
752
+ readonly MAX_TOKENS: "MAX_TOKENS";
753
+ /**
754
+ * The candidate content was flagged for safety reasons.
755
+ */
756
+ readonly SAFETY: "SAFETY";
757
+ /**
758
+ * The candidate content was flagged for recitation reasons.
759
+ */
760
+ readonly RECITATION: "RECITATION";
761
+ /**
762
+ * Unknown reason.
763
+ */
764
+ readonly OTHER: "OTHER";
765
+ /**
766
+ * The candidate content contained forbidden terms.
767
+ */
768
+ readonly BLOCKLIST: "BLOCKLIST";
769
+ /**
770
+ * The candidate content potentially contained prohibited content.
771
+ */
772
+ readonly PROHIBITED_CONTENT: "PROHIBITED_CONTENT";
773
+ /**
774
+ * The candidate content potentially contained Sensitive Personally Identifiable Information (SPII).
775
+ */
776
+ readonly SPII: "SPII";
777
+ /**
778
+ * The function call generated by the model was invalid.
779
+ */
780
+ readonly MALFORMED_FUNCTION_CALL: "MALFORMED_FUNCTION_CALL";
781
+ };
782
+
783
+ /**
784
+ * Reason that a candidate finished.
785
+ * @public
786
+ */
787
+ export declare type FinishReason = (typeof FinishReason)[keyof typeof FinishReason];
788
+
789
+ /**
790
+ * A predicted {@link FunctionCall} returned from the model
791
+ * that contains a string representing the {@link FunctionDeclaration.name}
792
+ * and a structured JSON object containing the parameters and their values.
793
+ * @public
794
+ */
795
+ export declare interface FunctionCall {
796
+ /**
797
+ * The id of the function call. This must be sent back in the associated {@link FunctionResponse}.
798
+ *
799
+ *
800
+ * @remarks This property is only supported in the Gemini Developer API ({@link GoogleAIBackend}).
801
+ * When using the Gemini Developer API ({@link GoogleAIBackend}), this property will be
802
+ * `undefined`.
803
+ */
804
+ id?: string;
805
+ name: string;
806
+ args: object;
807
+ }
808
+
809
+ /**
810
+ * @public
811
+ */
812
+ export declare interface FunctionCallingConfig {
813
+ mode?: FunctionCallingMode;
814
+ allowedFunctionNames?: string[];
815
+ }
816
+
817
+ /**
818
+ * @public
819
+ */
820
+ export declare const FunctionCallingMode: {
821
+ /**
822
+ * Default model behavior; model decides to predict either a function call
823
+ * or a natural language response.
824
+ */
825
+ readonly AUTO: "AUTO";
826
+ /**
827
+ * Model is constrained to always predicting a function call only.
828
+ * If `allowed_function_names` is set, the predicted function call will be
829
+ * limited to any one of `allowed_function_names`, else the predicted
830
+ * function call will be any one of the provided `function_declarations`.
831
+ */
832
+ readonly ANY: "ANY";
833
+ /**
834
+ * Model will not predict any function call. Model behavior is same as when
835
+ * not passing any function declarations.
836
+ */
837
+ readonly NONE: "NONE";
838
+ };
839
+
840
+ /**
841
+ * @public
842
+ */
843
+ export declare type FunctionCallingMode = (typeof FunctionCallingMode)[keyof typeof FunctionCallingMode];
844
+
845
+ /**
846
+ * Content part interface if the part represents a {@link FunctionCall}.
847
+ * @public
848
+ */
849
+ export declare interface FunctionCallPart {
850
+ text?: never;
851
+ inlineData?: never;
852
+ functionCall: FunctionCall;
853
+ functionResponse?: never;
854
+ thought?: boolean;
855
+ /**
856
+ * @internal
857
+ */
858
+ thoughtSignature?: never;
859
+ executableCode?: never;
860
+ codeExecutionResult?: never;
861
+ }
862
+
863
+ /**
864
+ * Structured representation of a function declaration as defined by the
865
+ * {@link https://spec.openapis.org/oas/v3.0.3 | OpenAPI 3.0 specification}.
866
+ * Included
867
+ * in this declaration are the function name and parameters. This
868
+ * `FunctionDeclaration` is a representation of a block of code that can be used
869
+ * as a Tool by the model and executed by the client.
870
+ * @public
871
+ */
872
+ export declare interface FunctionDeclaration {
873
+ /**
874
+ * The name of the function to call. Must start with a letter or an
875
+ * underscore. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with
876
+ * a max length of 64.
877
+ */
878
+ name: string;
879
+ /**
880
+ * Description and purpose of the function. Model uses it to decide
881
+ * how and whether to call the function.
882
+ */
883
+ description: string;
884
+ /**
885
+ * Optional. Describes the parameters to this function in JSON Schema Object
886
+ * format. Reflects the Open API 3.03 Parameter Object. Parameter names are
887
+ * case-sensitive. For a function with no parameters, this can be left unset.
888
+ */
889
+ parameters?: ObjectSchema | ObjectSchemaRequest;
890
+ /**
891
+ * Reference to an actual function to call. Specifying this will cause the
892
+ * function to be called automatically when requested by the model.
893
+ */
894
+ functionReference?: Function;
895
+ }
896
+
897
+ /**
898
+ * A `FunctionDeclarationsTool` is a piece of code that enables the system to
899
+ * interact with external systems to perform an action, or set of actions,
900
+ * outside of knowledge and scope of the model.
901
+ * @public
902
+ */
903
+ export declare interface FunctionDeclarationsTool {
904
+ /**
905
+ * Optional. One or more function declarations
906
+ * to be passed to the model along with the current user query. Model may
907
+ * decide to call a subset of these functions by populating
908
+ * {@link FunctionCall} in the response. User should
909
+ * provide a {@link FunctionResponse} for each
910
+ * function call in the next turn. Based on the function responses, the model will
911
+ * generate the final response back to the user. Maximum 64 function
912
+ * declarations can be provided.
913
+ */
914
+ functionDeclarations?: FunctionDeclaration[];
915
+ }
916
+
917
+ /**
918
+ * The result output from a {@link FunctionCall} that contains a string
919
+ * representing the {@link FunctionDeclaration.name}
920
+ * and a structured JSON object containing any output
921
+ * from the function is used as context to the model.
922
+ * This should contain the result of a {@link FunctionCall}
923
+ * made based on model prediction.
924
+ * @public
925
+ */
926
+ export declare interface FunctionResponse {
927
+ /**
928
+ * The id of the {@link FunctionCall}.
929
+ *
930
+ * @remarks This property is only supported in the Gemini Developer API ({@link GoogleAIBackend}).
931
+ * When using the Gemini Developer API ({@link GoogleAIBackend}), this property will be
932
+ * `undefined`.
933
+ */
934
+ id?: string;
935
+ name: string;
936
+ response: object;
937
+ parts?: Part[];
938
+ }
939
+
940
+ /**
941
+ * Content part interface if the part represents {@link FunctionResponse}.
942
+ * @public
943
+ */
944
+ export declare interface FunctionResponsePart {
945
+ text?: never;
946
+ inlineData?: never;
947
+ functionCall?: never;
948
+ functionResponse: FunctionResponse;
949
+ thought?: boolean;
950
+ /**
951
+ * @internal
952
+ */
953
+ thoughtSignature?: never;
954
+ executableCode?: never;
955
+ codeExecutionResult?: never;
956
+ }
957
+
958
+ /**
959
+ * A candidate returned as part of a {@link GenerateContentResponse}.
960
+ * @public
961
+ */
962
+ export declare interface GenerateContentCandidate {
963
+ index: number;
964
+ content: Content;
965
+ finishReason?: FinishReason;
966
+ finishMessage?: string;
967
+ safetyRatings?: SafetyRating[];
968
+ citationMetadata?: CitationMetadata;
969
+ groundingMetadata?: GroundingMetadata;
970
+ urlContextMetadata?: URLContextMetadata;
971
+ }
972
+
973
+ /**
974
+ * Request sent through {@link GenerativeModel.generateContent}
975
+ * @public
976
+ */
977
+ export declare interface GenerateContentRequest extends BaseParams {
978
+ contents: Content[];
979
+ tools?: Tool[];
980
+ toolConfig?: ToolConfig;
981
+ systemInstruction?: string | Part | Content;
982
+ }
983
+
984
+ /**
985
+ * Individual response from {@link GenerativeModel.generateContent} and
986
+ * {@link GenerativeModel.generateContentStream}.
987
+ * `generateContentStream()` will return one in each chunk until
988
+ * the stream is done.
989
+ * @public
990
+ */
991
+ export declare interface GenerateContentResponse {
992
+ candidates?: GenerateContentCandidate[];
993
+ promptFeedback?: PromptFeedback;
994
+ usageMetadata?: UsageMetadata;
995
+ }
996
+
997
+ /**
998
+ * Result object returned from {@link GenerativeModel.generateContent} call.
999
+ *
1000
+ * @public
1001
+ */
1002
+ export declare interface GenerateContentResult {
1003
+ response: EnhancedGenerateContentResponse;
1004
+ }
1005
+
1006
+ /**
1007
+ * Result object returned from {@link GenerativeModel.generateContentStream} call.
1008
+ * Iterate over `stream` to get chunks as they come in and/or
1009
+ * use the `response` promise to get the aggregated response when
1010
+ * the stream is done.
1011
+ *
1012
+ * @public
1013
+ */
1014
+ export declare interface GenerateContentStreamResult {
1015
+ stream: AsyncGenerator<EnhancedGenerateContentResponse>;
1016
+ response: Promise<EnhancedGenerateContentResponse>;
1017
+ }
1018
+
1019
+ /**
1020
+ * Config options for content-related requests
1021
+ * @public
1022
+ */
1023
+ export declare interface GenerationConfig {
1024
+ candidateCount?: number;
1025
+ stopSequences?: string[];
1026
+ maxOutputTokens?: number;
1027
+ temperature?: number;
1028
+ topP?: number;
1029
+ topK?: number;
1030
+ presencePenalty?: number;
1031
+ frequencyPenalty?: number;
1032
+ /**
1033
+ * Output response MIME type of the generated candidate text.
1034
+ * Supported MIME types are `text/plain` (default, text output),
1035
+ * `application/json` (JSON response in the candidates), and
1036
+ * `text/x.enum`.
1037
+ */
1038
+ responseMimeType?: string;
1039
+ /**
1040
+ * Output response schema of the generated candidate text. This
1041
+ * value can be a class generated with a {@link Schema} static method
1042
+ * like `Schema.string()` or `Schema.object()` or it can be a plain
1043
+ * JS object matching the {@link SchemaRequest} interface.
1044
+ * <br/>Note: This only applies when the specified `responseMimeType` supports a schema; currently
1045
+ * this is limited to `application/json` and `text/x.enum`.
1046
+ */
1047
+ responseSchema?: TypedSchema | SchemaRequest;
1048
+ /**
1049
+ * Generation modalities to be returned in generation responses.
1050
+ *
1051
+ * @remarks
1052
+ * - Multimodal response generation is only supported by some Gemini models and versions; see {@link https://firebase.google.com/docs/vertex-ai/models | model versions}.
1053
+ * - Only image generation (`ResponseModality.IMAGE`) is supported.
1054
+ *
1055
+ * @beta
1056
+ */
1057
+ responseModalities?: ResponseModality[];
1058
+ /**
1059
+ * Configuration for "thinking" behavior of compatible Gemini models.
1060
+ */
1061
+ thinkingConfig?: ThinkingConfig;
1062
+ }
1063
+
1064
+ /**
1065
+ * Interface for sending an image.
1066
+ * @public
1067
+ */
1068
+ export declare interface GenerativeContentBlob {
1069
+ mimeType: string;
1070
+ /**
1071
+ * Image as a base64 string.
1072
+ */
1073
+ data: string;
1074
+ }
1075
+
1076
+ /**
1077
+ * Class for generative model APIs.
1078
+ * @public
1079
+ */
1080
+ export declare class GenerativeModel extends AIModel {
1081
+ private chromeAdapter?;
1082
+ generationConfig: GenerationConfig;
1083
+ safetySettings: SafetySetting[];
1084
+ requestOptions?: RequestOptions;
1085
+ tools?: Tool[];
1086
+ toolConfig?: ToolConfig;
1087
+ systemInstruction?: Content;
1088
+ constructor(ai: AI, modelParams: ModelParams, requestOptions?: RequestOptions, chromeAdapter?: ChromeAdapter | undefined);
1089
+ /**
1090
+ * Makes a single non-streaming call to the model
1091
+ * and returns an object containing a single {@link GenerateContentResponse}.
1092
+ */
1093
+ generateContent(request: GenerateContentRequest | string | Array<string | Part>, singleRequestOptions?: SingleRequestOptions): Promise<GenerateContentResult>;
1094
+ /**
1095
+ * Makes a single streaming call to the model
1096
+ * and returns an object containing an iterable stream that iterates
1097
+ * over all chunks in the streaming response as well as
1098
+ * a promise that returns the final aggregated response.
1099
+ */
1100
+ generateContentStream(request: GenerateContentRequest | string | Array<string | Part>, singleRequestOptions?: SingleRequestOptions): Promise<GenerateContentStreamResult>;
1101
+ /**
1102
+ * Gets a new {@link ChatSession} instance which can be used for
1103
+ * multi-turn chats.
1104
+ */
1105
+ startChat(startChatParams?: StartChatParams): ChatSession;
1106
+ /**
1107
+ * Counts the tokens in the provided request.
1108
+ */
1109
+ countTokens(request: CountTokensRequest | string | Array<string | Part>, singleRequestOptions?: SingleRequestOptions): Promise<CountTokensResponse>;
1110
+ }
1111
+
1112
+ /**
1113
+ * Returns the default {@link AI} instance that is associated with the provided
1114
+ * {@link @firebase/app#FirebaseApp}. If no instance exists, initializes a new instance with the
1115
+ * default settings.
1116
+ *
1117
+ * @example
1118
+ * ```javascript
1119
+ * const ai = getAI(app);
1120
+ * ```
1121
+ *
1122
+ * @example
1123
+ * ```javascript
1124
+ * // Get an AI instance configured to use the Gemini Developer API (via Google AI).
1125
+ * const ai = getAI(app, { backend: new GoogleAIBackend() });
1126
+ * ```
1127
+ *
1128
+ * @example
1129
+ * ```javascript
1130
+ * // Get an AI instance configured to use the Vertex AI Gemini API.
1131
+ * const ai = getAI(app, { backend: new VertexAIBackend() });
1132
+ * ```
1133
+ *
1134
+ * @param app - The {@link @firebase/app#FirebaseApp} to use.
1135
+ * @param options - {@link AIOptions} that configure the AI instance.
1136
+ * @returns The default {@link AI} instance for the given {@link @firebase/app#FirebaseApp}.
1137
+ *
1138
+ * @public
1139
+ */
1140
+ export declare function getAI(app?: FirebaseApp, options?: AIOptions): AI;
1141
+
1142
+ /**
1143
+ * Returns a {@link GenerativeModel} class with methods for inference
1144
+ * and other functionality.
1145
+ *
1146
+ * @public
1147
+ */
1148
+ export declare function getGenerativeModel(ai: AI, modelParams: ModelParams | HybridParams, requestOptions?: RequestOptions): GenerativeModel;
1149
+
1150
+ /**
1151
+ * Returns an {@link ImagenModel} class with methods for using Imagen.
1152
+ *
1153
+ * Only Imagen 3 models (named `imagen-3.0-*`) are supported.
1154
+ *
1155
+ * @param ai - An {@link AI} instance.
1156
+ * @param modelParams - Parameters to use when making Imagen requests.
1157
+ * @param requestOptions - Additional options to use when making requests.
1158
+ *
1159
+ * @throws If the `apiKey` or `projectId` fields are missing in your
1160
+ * Firebase config.
1161
+ *
1162
+ * @public
1163
+ */
1164
+ export declare function getImagenModel(ai: AI, modelParams: ImagenModelParams, requestOptions?: RequestOptions): ImagenModel;
1165
+
1166
+ /**
1167
+ * Returns a {@link LiveGenerativeModel} class for real-time, bidirectional communication.
1168
+ *
1169
+ * The Live API is only supported in modern browser windows and Node >= 22.
1170
+ *
1171
+ * @param ai - An {@link AI} instance.
1172
+ * @param modelParams - Parameters to use when setting up a {@link LiveSession}.
1173
+ * @throws If the `apiKey` or `projectId` fields are missing in your
1174
+ * Firebase config.
1175
+ *
1176
+ * @beta
1177
+ */
1178
+ export declare function getLiveGenerativeModel(ai: AI, modelParams: LiveModelParams): LiveGenerativeModel;
1179
+
1180
+ /**
1181
+ * Returns a {@link TemplateGenerativeModel} class for executing server-side
1182
+ * templates.
1183
+ *
1184
+ * @param ai - An {@link AI} instance.
1185
+ * @param requestOptions - Additional options to use when making requests.
1186
+ *
1187
+ * @beta
1188
+ */
1189
+ export declare function getTemplateGenerativeModel(ai: AI, requestOptions?: RequestOptions): TemplateGenerativeModel;
1190
+
1191
+ /**
1192
+ * Returns a {@link TemplateImagenModel} class for executing server-side
1193
+ * Imagen templates.
1194
+ *
1195
+ * @param ai - An {@link AI} instance.
1196
+ * @param requestOptions - Additional options to use when making requests.
1197
+ *
1198
+ * @beta
1199
+ */
1200
+ export declare function getTemplateImagenModel(ai: AI, requestOptions?: RequestOptions): TemplateImagenModel;
1201
+
1202
+ /**
1203
+ * Configuration class for the Gemini Developer API.
1204
+ *
1205
+ * Use this with {@link AIOptions} when initializing the AI service via
1206
+ * {@link getAI | getAI()} to specify the Gemini Developer API as the backend.
1207
+ *
1208
+ * @public
1209
+ */
1210
+ export declare class GoogleAIBackend extends Backend {
1211
+ /**
1212
+ * Creates a configuration object for the Gemini Developer API backend.
1213
+ */
1214
+ constructor();
1215
+ /**
1216
+ * @internal
1217
+ */
1218
+ _getModelPath(project: string, model: string): string;
1219
+ /**
1220
+ * @internal
1221
+ */
1222
+ _getTemplatePath(project: string, templateId: string): string;
1223
+ }
1224
+
1225
+ /**
1226
+ * @internal
1227
+ */
1228
+ export declare interface GoogleAICitationMetadata {
1229
+ citationSources: Citation[];
1230
+ }
1231
+
1232
+ /**
1233
+ * @internal
1234
+ */
1235
+ export declare interface GoogleAICountTokensRequest {
1236
+ generateContentRequest: {
1237
+ model: string;
1238
+ contents: Content[];
1239
+ systemInstruction?: string | Part | Content;
1240
+ tools?: Tool[];
1241
+ generationConfig?: GenerationConfig;
1242
+ };
1243
+ }
1244
+
1245
+ /**
1246
+ * @internal
1247
+ */
1248
+ export declare interface GoogleAIGenerateContentCandidate {
1249
+ index: number;
1250
+ content: Content;
1251
+ finishReason?: FinishReason;
1252
+ finishMessage?: string;
1253
+ safetyRatings?: SafetyRating[];
1254
+ citationMetadata?: GoogleAICitationMetadata;
1255
+ groundingMetadata?: GroundingMetadata;
1256
+ urlContextMetadata?: URLContextMetadata;
1257
+ }
1258
+
1259
+ /**
1260
+ * @internal
1261
+ */
1262
+ export declare interface GoogleAIGenerateContentResponse {
1263
+ candidates?: GoogleAIGenerateContentCandidate[];
1264
+ promptFeedback?: PromptFeedback;
1265
+ usageMetadata?: UsageMetadata;
1266
+ }
1267
+
1268
+ /**
1269
+ * Specifies the Google Search configuration.
1270
+ *
1271
+ * @remarks Currently, this is an empty object, but it's reserved for future configuration options.
1272
+ *
1273
+ * @public
1274
+ */
1275
+ export declare interface GoogleSearch {
1276
+ }
1277
+
1278
+ /**
1279
+ * A tool that allows a Gemini model to connect to Google Search to access and incorporate
1280
+ * up-to-date information from the web into its responses.
1281
+ *
1282
+ * Important: If using Grounding with Google Search, you are required to comply with the
1283
+ * "Grounding with Google Search" usage requirements for your chosen API provider: {@link https://ai.google.dev/gemini-api/terms#grounding-with-google-search | Gemini Developer API}
1284
+ * or Vertex AI Gemini API (see {@link https://cloud.google.com/terms/service-terms | Service Terms}
1285
+ * section within the Service Specific Terms).
1286
+ *
1287
+ * @public
1288
+ */
1289
+ export declare interface GoogleSearchTool {
1290
+ /**
1291
+ * Specifies the Google Search configuration.
1292
+ * Currently, this is an empty object, but it's reserved for future configuration options.
1293
+ *
1294
+ * When using this feature, you are required to comply with the "Grounding with Google Search"
1295
+ * usage requirements for your chosen API provider: {@link https://ai.google.dev/gemini-api/terms#grounding-with-google-search | Gemini Developer API}
1296
+ * or Vertex AI Gemini API (see {@link https://cloud.google.com/terms/service-terms | Service Terms}
1297
+ * section within the Service Specific Terms).
1298
+ */
1299
+ googleSearch: GoogleSearch;
1300
+ }
1301
+
1302
+ /**
1303
+ * Represents a chunk of retrieved data that supports a claim in the model's response. This is part
1304
+ * of the grounding information provided when grounding is enabled.
1305
+ *
1306
+ * @public
1307
+ */
1308
+ export declare interface GroundingChunk {
1309
+ /**
1310
+ * Contains details if the grounding chunk is from a web source.
1311
+ */
1312
+ web?: WebGroundingChunk;
1313
+ }
1314
+
1315
+ /**
1316
+ * Metadata returned when grounding is enabled.
1317
+ *
1318
+ * Currently, only Grounding with Google Search is supported (see {@link GoogleSearchTool}).
1319
+ *
1320
+ * Important: If using Grounding with Google Search, you are required to comply with the
1321
+ * "Grounding with Google Search" usage requirements for your chosen API provider: {@link https://ai.google.dev/gemini-api/terms#grounding-with-google-search | Gemini Developer API}
1322
+ * or Vertex AI Gemini API (see {@link https://cloud.google.com/terms/service-terms | Service Terms}
1323
+ * section within the Service Specific Terms).
1324
+ *
1325
+ * @public
1326
+ */
1327
+ export declare interface GroundingMetadata {
1328
+ /**
1329
+ * Google Search entry point for web searches. This contains an HTML/CSS snippet that must be
1330
+ * embedded in an app to display a Google Search entry point for follow-up web searches related to
1331
+ * a model's "Grounded Response".
1332
+ */
1333
+ searchEntryPoint?: SearchEntrypoint;
1334
+ /**
1335
+ * A list of {@link GroundingChunk} objects. Each chunk represents a piece of retrieved content
1336
+ * (for example, from a web page). that the model used to ground its response.
1337
+ */
1338
+ groundingChunks?: GroundingChunk[];
1339
+ /**
1340
+ * A list of {@link GroundingSupport} objects. Each object details how specific segments of the
1341
+ * model's response are supported by the `groundingChunks`.
1342
+ */
1343
+ groundingSupports?: GroundingSupport[];
1344
+ /**
1345
+ * A list of web search queries that the model performed to gather the grounding information.
1346
+ * These can be used to allow users to explore the search results themselves.
1347
+ */
1348
+ webSearchQueries?: string[];
1349
+ /**
1350
+ * @deprecated Use {@link GroundingSupport} instead.
1351
+ */
1352
+ retrievalQueries?: string[];
1353
+ }
1354
+
1355
+ /**
1356
+ * Provides information about how a specific segment of the model's response is supported by the
1357
+ * retrieved grounding chunks.
1358
+ *
1359
+ * @public
1360
+ */
1361
+ export declare interface GroundingSupport {
1362
+ /**
1363
+ * Specifies the segment of the model's response content that this grounding support pertains to.
1364
+ */
1365
+ segment?: Segment;
1366
+ /**
1367
+ * A list of indices that refer to specific {@link GroundingChunk} objects within the
1368
+ * {@link GroundingMetadata.groundingChunks} array. These referenced chunks
1369
+ * are the sources that support the claim made in the associated `segment` of the response.
1370
+ * For example, an array `[1, 3, 4]` means that `groundingChunks[1]`, `groundingChunks[3]`,
1371
+ * and `groundingChunks[4]` are the retrieved content supporting this part of the response.
1372
+ */
1373
+ groundingChunkIndices?: number[];
1374
+ }
1375
+
1376
+ /**
1377
+ * This property is not supported in the Gemini Developer API ({@link GoogleAIBackend}).
1378
+ *
1379
+ * @public
1380
+ */
1381
+ export declare const HarmBlockMethod: {
1382
+ /**
1383
+ * The harm block method uses both probability and severity scores.
1384
+ */
1385
+ readonly SEVERITY: "SEVERITY";
1386
+ /**
1387
+ * The harm block method uses the probability score.
1388
+ */
1389
+ readonly PROBABILITY: "PROBABILITY";
1390
+ };
1391
+
1392
+ /**
1393
+ * This property is not supported in the Gemini Developer API ({@link GoogleAIBackend}).
1394
+ *
1395
+ * @public
1396
+ */
1397
+ export declare type HarmBlockMethod = (typeof HarmBlockMethod)[keyof typeof HarmBlockMethod];
1398
+
1399
+ /**
1400
+ * Threshold above which a prompt or candidate will be blocked.
1401
+ * @public
1402
+ */
1403
+ export declare const HarmBlockThreshold: {
1404
+ /**
1405
+ * Content with `NEGLIGIBLE` will be allowed.
1406
+ */
1407
+ readonly BLOCK_LOW_AND_ABOVE: "BLOCK_LOW_AND_ABOVE";
1408
+ /**
1409
+ * Content with `NEGLIGIBLE` and `LOW` will be allowed.
1410
+ */
1411
+ readonly BLOCK_MEDIUM_AND_ABOVE: "BLOCK_MEDIUM_AND_ABOVE";
1412
+ /**
1413
+ * Content with `NEGLIGIBLE`, `LOW`, and `MEDIUM` will be allowed.
1414
+ */
1415
+ readonly BLOCK_ONLY_HIGH: "BLOCK_ONLY_HIGH";
1416
+ /**
1417
+ * All content will be allowed.
1418
+ */
1419
+ readonly BLOCK_NONE: "BLOCK_NONE";
1420
+ /**
1421
+ * All content will be allowed. This is the same as `BLOCK_NONE`, but the metadata corresponding
1422
+ * to the {@link (HarmCategory:type)} will not be present in the response.
1423
+ */
1424
+ readonly OFF: "OFF";
1425
+ };
1426
+
1427
+ /**
1428
+ * Threshold above which a prompt or candidate will be blocked.
1429
+ * @public
1430
+ */
1431
+ export declare type HarmBlockThreshold = (typeof HarmBlockThreshold)[keyof typeof HarmBlockThreshold];
1432
+
1433
+ /**
1434
+ * Harm categories that would cause prompts or candidates to be blocked.
1435
+ * @public
1436
+ */
1437
+ export declare const HarmCategory: {
1438
+ readonly HARM_CATEGORY_HATE_SPEECH: "HARM_CATEGORY_HATE_SPEECH";
1439
+ readonly HARM_CATEGORY_SEXUALLY_EXPLICIT: "HARM_CATEGORY_SEXUALLY_EXPLICIT";
1440
+ readonly HARM_CATEGORY_HARASSMENT: "HARM_CATEGORY_HARASSMENT";
1441
+ readonly HARM_CATEGORY_DANGEROUS_CONTENT: "HARM_CATEGORY_DANGEROUS_CONTENT";
1442
+ };
1443
+
1444
+ /**
1445
+ * Harm categories that would cause prompts or candidates to be blocked.
1446
+ * @public
1447
+ */
1448
+ export declare type HarmCategory = (typeof HarmCategory)[keyof typeof HarmCategory];
1449
+
1450
+ /**
1451
+ * Probability that a prompt or candidate matches a harm category.
1452
+ * @public
1453
+ */
1454
+ export declare const HarmProbability: {
1455
+ /**
1456
+ * Content has a negligible chance of being unsafe.
1457
+ */
1458
+ readonly NEGLIGIBLE: "NEGLIGIBLE";
1459
+ /**
1460
+ * Content has a low chance of being unsafe.
1461
+ */
1462
+ readonly LOW: "LOW";
1463
+ /**
1464
+ * Content has a medium chance of being unsafe.
1465
+ */
1466
+ readonly MEDIUM: "MEDIUM";
1467
+ /**
1468
+ * Content has a high chance of being unsafe.
1469
+ */
1470
+ readonly HIGH: "HIGH";
1471
+ };
1472
+
1473
+ /**
1474
+ * Probability that a prompt or candidate matches a harm category.
1475
+ * @public
1476
+ */
1477
+ export declare type HarmProbability = (typeof HarmProbability)[keyof typeof HarmProbability];
1478
+
1479
+ /**
1480
+ * Harm severity levels.
1481
+ * @public
1482
+ */
1483
+ export declare const HarmSeverity: {
1484
+ /**
1485
+ * Negligible level of harm severity.
1486
+ */
1487
+ readonly HARM_SEVERITY_NEGLIGIBLE: "HARM_SEVERITY_NEGLIGIBLE";
1488
+ /**
1489
+ * Low level of harm severity.
1490
+ */
1491
+ readonly HARM_SEVERITY_LOW: "HARM_SEVERITY_LOW";
1492
+ /**
1493
+ * Medium level of harm severity.
1494
+ */
1495
+ readonly HARM_SEVERITY_MEDIUM: "HARM_SEVERITY_MEDIUM";
1496
+ /**
1497
+ * High level of harm severity.
1498
+ */
1499
+ readonly HARM_SEVERITY_HIGH: "HARM_SEVERITY_HIGH";
1500
+ /**
1501
+ * Harm severity is not supported.
1502
+ *
1503
+ * @remarks
1504
+ * The GoogleAI backend does not support `HarmSeverity`, so this value is used as a fallback.
1505
+ */
1506
+ readonly HARM_SEVERITY_UNSUPPORTED: "HARM_SEVERITY_UNSUPPORTED";
1507
+ };
1508
+
1509
+ /**
1510
+ * Harm severity levels.
1511
+ * @public
1512
+ */
1513
+ export declare type HarmSeverity = (typeof HarmSeverity)[keyof typeof HarmSeverity];
1514
+
1515
+ /**
1516
+ * Configures hybrid inference.
1517
+ * @beta
1518
+ */
1519
+ export declare interface HybridParams {
1520
+ /**
1521
+ * Specifies on-device or in-cloud inference. Defaults to prefer on-device.
1522
+ */
1523
+ mode: InferenceMode;
1524
+ /**
1525
+ * Optional. Specifies advanced params for on-device inference.
1526
+ */
1527
+ onDeviceParams?: OnDeviceParams;
1528
+ /**
1529
+ * Optional. Specifies advanced params for in-cloud inference.
1530
+ */
1531
+ inCloudParams?: ModelParams;
1532
+ }
1533
+
1534
+ /**
1535
+ * Aspect ratios for Imagen images.
1536
+ *
1537
+ * To specify an aspect ratio for generated images, set the `aspectRatio` property in your
1538
+ * {@link ImagenGenerationConfig}.
1539
+ *
1540
+ * See the {@link http://firebase.google.com/docs/vertex-ai/generate-images | documentation }
1541
+ * for more details and examples of the supported aspect ratios.
1542
+ *
1543
+ * @public
1544
+ */
1545
+ export declare const ImagenAspectRatio: {
1546
+ /**
1547
+ * Square (1:1) aspect ratio.
1548
+ */
1549
+ readonly SQUARE: "1:1";
1550
+ /**
1551
+ * Landscape (3:4) aspect ratio.
1552
+ */
1553
+ readonly LANDSCAPE_3x4: "3:4";
1554
+ /**
1555
+ * Portrait (4:3) aspect ratio.
1556
+ */
1557
+ readonly PORTRAIT_4x3: "4:3";
1558
+ /**
1559
+ * Landscape (16:9) aspect ratio.
1560
+ */
1561
+ readonly LANDSCAPE_16x9: "16:9";
1562
+ /**
1563
+ * Portrait (9:16) aspect ratio.
1564
+ */
1565
+ readonly PORTRAIT_9x16: "9:16";
1566
+ };
1567
+
1568
+ /**
1569
+ * Aspect ratios for Imagen images.
1570
+ *
1571
+ * To specify an aspect ratio for generated images, set the `aspectRatio` property in your
1572
+ * {@link ImagenGenerationConfig}.
1573
+ *
1574
+ * See the {@link http://firebase.google.com/docs/vertex-ai/generate-images | documentation }
1575
+ * for more details and examples of the supported aspect ratios.
1576
+ *
1577
+ * @public
1578
+ */
1579
+ export declare type ImagenAspectRatio = (typeof ImagenAspectRatio)[keyof typeof ImagenAspectRatio];
1580
+
1581
+ /**
1582
+ * An image generated by Imagen, stored in a Cloud Storage for Firebase bucket.
1583
+ *
1584
+ * This feature is not available yet.
1585
+ * @public
1586
+ */
1587
+ export declare interface ImagenGCSImage {
1588
+ /**
1589
+ * The MIME type of the image; either `"image/png"` or `"image/jpeg"`.
1590
+ *
1591
+ * To request a different format, set the `imageFormat` property in your {@link ImagenGenerationConfig}.
1592
+ */
1593
+ mimeType: string;
1594
+ /**
1595
+ * The URI of the file stored in a Cloud Storage for Firebase bucket.
1596
+ *
1597
+ * @example `"gs://bucket-name/path/sample_0.jpg"`.
1598
+ */
1599
+ gcsURI: string;
1600
+ }
1601
+
1602
+ /**
1603
+ * Configuration options for generating images with Imagen.
1604
+ *
1605
+ * See the {@link http://firebase.google.com/docs/vertex-ai/generate-images-imagen | documentation} for
1606
+ * more details.
1607
+ *
1608
+ * @public
1609
+ */
1610
+ export declare interface ImagenGenerationConfig {
1611
+ /**
1612
+ * A description of what should be omitted from the generated images.
1613
+ *
1614
+ * Support for negative prompts depends on the Imagen model.
1615
+ *
1616
+ * See the {@link http://firebase.google.com/docs/vertex-ai/model-parameters#imagen | documentation} for more details.
1617
+ *
1618
+ * This is no longer supported in the Gemini Developer API ({@link GoogleAIBackend}) in versions
1619
+ * greater than `imagen-3.0-generate-002`.
1620
+ */
1621
+ negativePrompt?: string;
1622
+ /**
1623
+ * The number of images to generate. The default value is 1.
1624
+ *
1625
+ * The number of sample images that may be generated in each request depends on the model
1626
+ * (typically up to 4); see the <a href="http://firebase.google.com/docs/vertex-ai/model-parameters#imagen">sampleCount</a>
1627
+ * documentation for more details.
1628
+ */
1629
+ numberOfImages?: number;
1630
+ /**
1631
+ * The aspect ratio of the generated images. The default value is square 1:1.
1632
+ * Supported aspect ratios depend on the Imagen model, see {@link (ImagenAspectRatio:type)}
1633
+ * for more details.
1634
+ */
1635
+ aspectRatio?: ImagenAspectRatio;
1636
+ /**
1637
+ * The image format of the generated images. The default is PNG.
1638
+ *
1639
+ * See {@link ImagenImageFormat} for more details.
1640
+ */
1641
+ imageFormat?: ImagenImageFormat;
1642
+ /**
1643
+ * Whether to add an invisible watermark to generated images.
1644
+ *
1645
+ * If set to `true`, an invisible SynthID watermark is embedded in generated images to indicate
1646
+ * that they are AI generated. If set to `false`, watermarking will be disabled.
1647
+ *
1648
+ * For Imagen 3 models, the default value is `true`; see the <a href="http://firebase.google.com/docs/vertex-ai/model-parameters#imagen">addWatermark</a>
1649
+ * documentation for more details.
1650
+ *
1651
+ * When using the Gemini Developer API ({@link GoogleAIBackend}), this will default to true,
1652
+ * and cannot be turned off.
1653
+ */
1654
+ addWatermark?: boolean;
1655
+ }
1656
+
1657
+ /**
1658
+ * The response from a request to generate images with Imagen.
1659
+ *
1660
+ * @public
1661
+ */
1662
+ export declare interface ImagenGenerationResponse<T extends ImagenInlineImage | ImagenGCSImage> {
1663
+ /**
1664
+ * The images generated by Imagen.
1665
+ *
1666
+ * The number of images generated may be fewer than the number requested if one or more were
1667
+ * filtered out; see `filteredReason`.
1668
+ */
1669
+ images: T[];
1670
+ /**
1671
+ * The reason that images were filtered out. This property will only be defined if one
1672
+ * or more images were filtered.
1673
+ *
1674
+ * Images may be filtered out due to the {@link (ImagenSafetyFilterLevel:type)},
1675
+ * {@link (ImagenPersonFilterLevel:type)}, or filtering included in the model.
1676
+ * The filter levels may be adjusted in your {@link ImagenSafetySettings}.
1677
+ *
1678
+ * See the {@link https://cloud.google.com/vertex-ai/generative-ai/docs/image/responsible-ai-imagen | Responsible AI and usage guidelines for Imagen}
1679
+ * for more details.
1680
+ */
1681
+ filteredReason?: string;
1682
+ }
1683
+
1684
+ /**
1685
+ * @license
1686
+ * Copyright 2025 Google LLC
1687
+ *
1688
+ * Licensed under the Apache License, Version 2.0 (the "License");
1689
+ * you may not use this file except in compliance with the License.
1690
+ * You may obtain a copy of the License at
1691
+ *
1692
+ * http://www.apache.org/licenses/LICENSE-2.0
1693
+ *
1694
+ * Unless required by applicable law or agreed to in writing, software
1695
+ * distributed under the License is distributed on an "AS IS" BASIS,
1696
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
1697
+ * See the License for the specific language governing permissions and
1698
+ * limitations under the License.
1699
+ */
1700
+ /**
1701
+ * Defines the image format for images generated by Imagen.
1702
+ *
1703
+ * Use this class to specify the desired format (JPEG or PNG) and compression quality
1704
+ * for images generated by Imagen. This is typically included as part of
1705
+ * {@link ImagenModelParams}.
1706
+ *
1707
+ * @example
1708
+ * ```javascript
1709
+ * const imagenModelParams = {
1710
+ * // ... other ImagenModelParams
1711
+ * imageFormat: ImagenImageFormat.jpeg(75) // JPEG with a compression level of 75.
1712
+ * }
1713
+ * ```
1714
+ *
1715
+ * @public
1716
+ */
1717
+ export declare class ImagenImageFormat {
1718
+ /**
1719
+ * The MIME type.
1720
+ */
1721
+ mimeType: string;
1722
+ /**
1723
+ * The level of compression (a number between 0 and 100).
1724
+ */
1725
+ compressionQuality?: number;
1726
+ private constructor();
1727
+ /**
1728
+ * Creates an {@link ImagenImageFormat} for a JPEG image.
1729
+ *
1730
+ * @param compressionQuality - The level of compression (a number between 0 and 100).
1731
+ * @returns An {@link ImagenImageFormat} object for a JPEG image.
1732
+ *
1733
+ * @public
1734
+ */
1735
+ static jpeg(compressionQuality?: number): ImagenImageFormat;
1736
+ /**
1737
+ * Creates an {@link ImagenImageFormat} for a PNG image.
1738
+ *
1739
+ * @returns An {@link ImagenImageFormat} object for a PNG image.
1740
+ *
1741
+ * @public
1742
+ */
1743
+ static png(): ImagenImageFormat;
1744
+ }
1745
+
1746
+ /**
1747
+ * @license
1748
+ * Copyright 2025 Google LLC
1749
+ *
1750
+ * Licensed under the Apache License, Version 2.0 (the "License");
1751
+ * you may not use this file except in compliance with the License.
1752
+ * You may obtain a copy of the License at
1753
+ *
1754
+ * http://www.apache.org/licenses/LICENSE-2.0
1755
+ *
1756
+ * Unless required by applicable law or agreed to in writing, software
1757
+ * distributed under the License is distributed on an "AS IS" BASIS,
1758
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
1759
+ * See the License for the specific language governing permissions and
1760
+ * limitations under the License.
1761
+ */
1762
+ /**
1763
+ * An image generated by Imagen, represented as inline data.
1764
+ *
1765
+ * @public
1766
+ */
1767
+ export declare interface ImagenInlineImage {
1768
+ /**
1769
+ * The MIME type of the image; either `"image/png"` or `"image/jpeg"`.
1770
+ *
1771
+ * To request a different format, set the `imageFormat` property in your {@link ImagenGenerationConfig}.
1772
+ */
1773
+ mimeType: string;
1774
+ /**
1775
+ * The base64-encoded image data.
1776
+ */
1777
+ bytesBase64Encoded: string;
1778
+ }
1779
+
1780
+ /**
1781
+ * Class for Imagen model APIs.
1782
+ *
1783
+ * This class provides methods for generating images using the Imagen model.
1784
+ *
1785
+ * @example
1786
+ * ```javascript
1787
+ * const imagen = new ImagenModel(
1788
+ * ai,
1789
+ * {
1790
+ * model: 'imagen-3.0-generate-002'
1791
+ * }
1792
+ * );
1793
+ *
1794
+ * const response = await imagen.generateImages('A photo of a cat');
1795
+ * if (response.images.length > 0) {
1796
+ * console.log(response.images[0].bytesBase64Encoded);
1797
+ * }
1798
+ * ```
1799
+ *
1800
+ * @public
1801
+ */
1802
+ export declare class ImagenModel extends AIModel {
1803
+ requestOptions?: RequestOptions | undefined;
1804
+ /**
1805
+ * The Imagen generation configuration.
1806
+ */
1807
+ generationConfig?: ImagenGenerationConfig;
1808
+ /**
1809
+ * Safety settings for filtering inappropriate content.
1810
+ */
1811
+ safetySettings?: ImagenSafetySettings;
1812
+ /**
1813
+ * Constructs a new instance of the {@link ImagenModel} class.
1814
+ *
1815
+ * @param ai - an {@link AI} instance.
1816
+ * @param modelParams - Parameters to use when making requests to Imagen.
1817
+ * @param requestOptions - Additional options to use when making requests.
1818
+ *
1819
+ * @throws If the `apiKey` or `projectId` fields are missing in your
1820
+ * Firebase config.
1821
+ */
1822
+ constructor(ai: AI, modelParams: ImagenModelParams, requestOptions?: RequestOptions | undefined);
1823
+ /**
1824
+ * Generates images using the Imagen model and returns them as
1825
+ * base64-encoded strings.
1826
+ *
1827
+ * @param prompt - A text prompt describing the image(s) to generate.
1828
+ * @returns A promise that resolves to an {@link ImagenGenerationResponse}
1829
+ * object containing the generated images.
1830
+ *
1831
+ * @throws If the request to generate images fails. This happens if the
1832
+ * prompt is blocked.
1833
+ *
1834
+ * @remarks
1835
+ * If the prompt was not blocked, but one or more of the generated images were filtered, the
1836
+ * returned object will have a `filteredReason` property.
1837
+ * If all images are filtered, the `images` array will be empty.
1838
+ *
1839
+ * @public
1840
+ */
1841
+ generateImages(prompt: string, singleRequestOptions?: SingleRequestOptions): Promise<ImagenGenerationResponse<ImagenInlineImage>>;
1842
+ /**
1843
+ * Generates images to Cloud Storage for Firebase using the Imagen model.
1844
+ *
1845
+ * @internal This method is temporarily internal.
1846
+ *
1847
+ * @param prompt - A text prompt describing the image(s) to generate.
1848
+ * @param gcsURI - The URI of file stored in a Cloud Storage for Firebase bucket.
1849
+ * This should be a directory. For example, `gs://my-bucket/my-directory/`.
1850
+ * @returns A promise that resolves to an {@link ImagenGenerationResponse}
1851
+ * object containing the URLs of the generated images.
1852
+ *
1853
+ * @throws If the request fails to generate images fails. This happens if
1854
+ * the prompt is blocked.
1855
+ *
1856
+ * @remarks
1857
+ * If the prompt was not blocked, but one or more of the generated images were filtered, the
1858
+ * returned object will have a `filteredReason` property.
1859
+ * If all images are filtered, the `images` array will be empty.
1860
+ */
1861
+ generateImagesGCS(prompt: string, gcsURI: string, singleRequestOptions?: SingleRequestOptions): Promise<ImagenGenerationResponse<ImagenGCSImage>>;
1862
+ }
1863
+
1864
+ /**
1865
+ * Parameters for configuring an {@link ImagenModel}.
1866
+ *
1867
+ * @public
1868
+ */
1869
+ export declare interface ImagenModelParams {
1870
+ /**
1871
+ * The Imagen model to use for generating images.
1872
+ * For example: `imagen-3.0-generate-002`.
1873
+ *
1874
+ * Only Imagen 3 models (named `imagen-3.0-*`) are supported.
1875
+ *
1876
+ * See {@link https://firebase.google.com/docs/vertex-ai/models | model versions}
1877
+ * for a full list of supported Imagen 3 models.
1878
+ */
1879
+ model: string;
1880
+ /**
1881
+ * Configuration options for generating images with Imagen.
1882
+ */
1883
+ generationConfig?: ImagenGenerationConfig;
1884
+ /**
1885
+ * Safety settings for filtering potentially inappropriate content.
1886
+ */
1887
+ safetySettings?: ImagenSafetySettings;
1888
+ }
1889
+
1890
+ /**
1891
+ * A filter level controlling whether generation of images containing people or faces is allowed.
1892
+ *
1893
+ * See the <a href="http://firebase.google.com/docs/vertex-ai/generate-images">personGeneration</a>
1894
+ * documentation for more details.
1895
+ *
1896
+ * @public
1897
+ */
1898
+ export declare const ImagenPersonFilterLevel: {
1899
+ /**
1900
+ * Disallow generation of images containing people or faces; images of people are filtered out.
1901
+ */
1902
+ readonly BLOCK_ALL: "dont_allow";
1903
+ /**
1904
+ * Allow generation of images containing adults only; images of children are filtered out.
1905
+ *
1906
+ * Generation of images containing people or faces may require your use case to be
1907
+ * reviewed and approved by Cloud support; see the {@link https://cloud.google.com/vertex-ai/generative-ai/docs/image/responsible-ai-imagen#person-face-gen | Responsible AI and usage guidelines}
1908
+ * for more details.
1909
+ */
1910
+ readonly ALLOW_ADULT: "allow_adult";
1911
+ /**
1912
+ * Allow generation of images containing adults only; images of children are filtered out.
1913
+ *
1914
+ * Generation of images containing people or faces may require your use case to be
1915
+ * reviewed and approved by Cloud support; see the {@link https://cloud.google.com/vertex-ai/generative-ai/docs/image/responsible-ai-imagen#person-face-gen | Responsible AI and usage guidelines}
1916
+ * for more details.
1917
+ */
1918
+ readonly ALLOW_ALL: "allow_all";
1919
+ };
1920
+
1921
+ /**
1922
+ * A filter level controlling whether generation of images containing people or faces is allowed.
1923
+ *
1924
+ * See the <a href="http://firebase.google.com/docs/vertex-ai/generate-images">personGeneration</a>
1925
+ * documentation for more details.
1926
+ *
1927
+ * @public
1928
+ */
1929
+ export declare type ImagenPersonFilterLevel = (typeof ImagenPersonFilterLevel)[keyof typeof ImagenPersonFilterLevel];
1930
+
1931
+ /**
1932
+ * A filter level controlling how aggressively to filter sensitive content.
1933
+ *
1934
+ * Text prompts provided as inputs and images (generated or uploaded) through Imagen on Vertex AI
1935
+ * are assessed against a list of safety filters, which include 'harmful categories' (for example,
1936
+ * `violence`, `sexual`, `derogatory`, and `toxic`). This filter level controls how aggressively to
1937
+ * filter out potentially harmful content from responses. See the {@link http://firebase.google.com/docs/vertex-ai/generate-images | documentation }
1938
+ * and the {@link https://cloud.google.com/vertex-ai/generative-ai/docs/image/responsible-ai-imagen#safety-filters | Responsible AI and usage guidelines}
1939
+ * for more details.
1940
+ *
1941
+ * @public
1942
+ */
1943
+ export declare const ImagenSafetyFilterLevel: {
1944
+ /**
1945
+ * The most aggressive filtering level; most strict blocking.
1946
+ */
1947
+ readonly BLOCK_LOW_AND_ABOVE: "block_low_and_above";
1948
+ /**
1949
+ * Blocks some sensitive prompts and responses.
1950
+ */
1951
+ readonly BLOCK_MEDIUM_AND_ABOVE: "block_medium_and_above";
1952
+ /**
1953
+ * Blocks few sensitive prompts and responses.
1954
+ */
1955
+ readonly BLOCK_ONLY_HIGH: "block_only_high";
1956
+ /**
1957
+ * The least aggressive filtering level; blocks very few sensitive prompts and responses.
1958
+ *
1959
+ * Access to this feature is restricted and may require your case to be reviewed and approved by
1960
+ * Cloud support.
1961
+ */
1962
+ readonly BLOCK_NONE: "block_none";
1963
+ };
1964
+
1965
+ /**
1966
+ * A filter level controlling how aggressively to filter sensitive content.
1967
+ *
1968
+ * Text prompts provided as inputs and images (generated or uploaded) through Imagen on Vertex AI
1969
+ * are assessed against a list of safety filters, which include 'harmful categories' (for example,
1970
+ * `violence`, `sexual`, `derogatory`, and `toxic`). This filter level controls how aggressively to
1971
+ * filter out potentially harmful content from responses. See the {@link http://firebase.google.com/docs/vertex-ai/generate-images | documentation }
1972
+ * and the {@link https://cloud.google.com/vertex-ai/generative-ai/docs/image/responsible-ai-imagen#safety-filters | Responsible AI and usage guidelines}
1973
+ * for more details.
1974
+ *
1975
+ * @public
1976
+ */
1977
+ export declare type ImagenSafetyFilterLevel = (typeof ImagenSafetyFilterLevel)[keyof typeof ImagenSafetyFilterLevel];
1978
+
1979
+ /**
1980
+ * Settings for controlling the aggressiveness of filtering out sensitive content.
1981
+ *
1982
+ * See the {@link http://firebase.google.com/docs/vertex-ai/generate-images | documentation }
1983
+ * for more details.
1984
+ *
1985
+ * @public
1986
+ */
1987
+ export declare interface ImagenSafetySettings {
1988
+ /**
1989
+ * A filter level controlling how aggressive to filter out sensitive content from generated
1990
+ * images.
1991
+ */
1992
+ safetyFilterLevel?: ImagenSafetyFilterLevel;
1993
+ /**
1994
+ * A filter level controlling whether generation of images containing people or faces is allowed.
1995
+ */
1996
+ personFilterLevel?: ImagenPersonFilterLevel;
1997
+ }
1998
+
1999
+ /**
2000
+ * Determines whether inference happens on-device or in-cloud.
2001
+ *
2002
+ * @remarks
2003
+ * <b>PREFER_ON_DEVICE:</b> Attempt to make inference calls using an
2004
+ * on-device model. If on-device inference is not available, the SDK
2005
+ * will fall back to using a cloud-hosted model.
2006
+ * <br/>
2007
+ * <b>ONLY_ON_DEVICE:</b> Only attempt to make inference calls using an
2008
+ * on-device model. The SDK will not fall back to a cloud-hosted model.
2009
+ * If on-device inference is not available, inference methods will throw.
2010
+ * <br/>
2011
+ * <b>ONLY_IN_CLOUD:</b> Only attempt to make inference calls using a
2012
+ * cloud-hosted model. The SDK will not fall back to an on-device model.
2013
+ * <br/>
2014
+ * <b>PREFER_IN_CLOUD:</b> Attempt to make inference calls to a
2015
+ * cloud-hosted model. If not available, the SDK will fall back to an
2016
+ * on-device model.
2017
+ *
2018
+ * @beta
2019
+ */
2020
+ export declare const InferenceMode: {
2021
+ readonly PREFER_ON_DEVICE: "prefer_on_device";
2022
+ readonly ONLY_ON_DEVICE: "only_on_device";
2023
+ readonly ONLY_IN_CLOUD: "only_in_cloud";
2024
+ readonly PREFER_IN_CLOUD: "prefer_in_cloud";
2025
+ };
2026
+
2027
+ /**
2028
+ * Determines whether inference happens on-device or in-cloud.
2029
+ *
2030
+ * @beta
2031
+ */
2032
+ export declare type InferenceMode = (typeof InferenceMode)[keyof typeof InferenceMode];
2033
+
2034
+ /**
2035
+ * Indicates whether inference happened on-device or in-cloud.
2036
+ *
2037
+ * @beta
2038
+ */
2039
+ export declare const InferenceSource: {
2040
+ readonly ON_DEVICE: "on_device";
2041
+ readonly IN_CLOUD: "in_cloud";
2042
+ };
2043
+
2044
+ /**
2045
+ * Indicates whether inference happened on-device or in-cloud.
2046
+ *
2047
+ * @beta
2048
+ */
2049
+ export declare type InferenceSource = (typeof InferenceSource)[keyof typeof InferenceSource];
2050
+
2051
+ /**
2052
+ * Content part interface if the part represents an image.
2053
+ * @public
2054
+ */
2055
+ export declare interface InlineDataPart {
2056
+ text?: never;
2057
+ inlineData: GenerativeContentBlob;
2058
+ functionCall?: never;
2059
+ functionResponse?: never;
2060
+ /**
2061
+ * Applicable if `inlineData` is a video.
2062
+ */
2063
+ videoMetadata?: VideoMetadata;
2064
+ thought?: boolean;
2065
+ /**
2066
+ * @internal
2067
+ */
2068
+ thoughtSignature?: never;
2069
+ executableCode?: never;
2070
+ codeExecutionResult?: never;
2071
+ }
2072
+
2073
+ /**
2074
+ * Schema class for "integer" types.
2075
+ * @public
2076
+ */
2077
+ export declare class IntegerSchema extends Schema {
2078
+ constructor(schemaParams?: SchemaParams);
2079
+ }
2080
+
2081
+ /**
2082
+ * The programming language of the code.
2083
+ *
2084
+ * @public
2085
+ */
2086
+ export declare const Language: {
2087
+ UNSPECIFIED: string;
2088
+ PYTHON: string;
2089
+ };
2090
+
2091
+ /**
2092
+ * The programming language of the code.
2093
+ *
2094
+ * @public
2095
+ */
2096
+ export declare type Language = (typeof Language)[keyof typeof Language];
2097
+
2098
+ /**
2099
+ * Configures the creation of an on-device language model session.
2100
+ * @beta
2101
+ */
2102
+ export declare interface LanguageModelCreateCoreOptions {
2103
+ topK?: number;
2104
+ temperature?: number;
2105
+ expectedInputs?: LanguageModelExpected[];
2106
+ }
2107
+
2108
+ /**
2109
+ * Configures the creation of an on-device language model session.
2110
+ * @beta
2111
+ */
2112
+ export declare interface LanguageModelCreateOptions extends LanguageModelCreateCoreOptions {
2113
+ signal?: AbortSignal;
2114
+ initialPrompts?: LanguageModelMessage[];
2115
+ }
2116
+
2117
+ /**
2118
+ * Options for the expected inputs for an on-device language model.
2119
+ * @beta
2120
+ */ export declare interface LanguageModelExpected {
2121
+ type: LanguageModelMessageType;
2122
+ languages?: string[];
2123
+ }
2124
+
2125
+ /**
2126
+ * An on-device language model message.
2127
+ * @beta
2128
+ */
2129
+ export declare interface LanguageModelMessage {
2130
+ role: LanguageModelMessageRole;
2131
+ content: LanguageModelMessageContent[];
2132
+ }
2133
+
2134
+ /**
2135
+ * An on-device language model content object.
2136
+ * @beta
2137
+ */
2138
+ export declare interface LanguageModelMessageContent {
2139
+ type: LanguageModelMessageType;
2140
+ value: LanguageModelMessageContentValue;
2141
+ }
2142
+
2143
+ /**
2144
+ * Content formats that can be provided as on-device message content.
2145
+ * @beta
2146
+ */
2147
+ export declare type LanguageModelMessageContentValue = ImageBitmapSource | AudioBuffer | BufferSource | string;
2148
+
2149
+ /**
2150
+ * Allowable roles for on-device language model usage.
2151
+ * @beta
2152
+ */
2153
+ export declare type LanguageModelMessageRole = 'system' | 'user' | 'assistant';
2154
+
2155
+ /**
2156
+ * Allowable types for on-device language model messages.
2157
+ * @beta
2158
+ */
2159
+ export declare type LanguageModelMessageType = 'text' | 'image' | 'audio';
2160
+
2161
+ /**
2162
+ * Options for an on-device language model prompt.
2163
+ * @beta
2164
+ */
2165
+ export declare interface LanguageModelPromptOptions {
2166
+ responseConstraint?: object;
2167
+ }
2168
+
2169
+ /**
2170
+ * Configuration parameters used by {@link LiveGenerativeModel} to control live content generation.
2171
+ *
2172
+ * @beta
2173
+ */
2174
+ export declare interface LiveGenerationConfig {
2175
+ /**
2176
+ * Configuration for speech synthesis.
2177
+ */
2178
+ speechConfig?: SpeechConfig;
2179
+ /**
2180
+ * Specifies the maximum number of tokens that can be generated in the response. The number of
2181
+ * tokens per word varies depending on the language outputted. Is unbounded by default.
2182
+ */
2183
+ maxOutputTokens?: number;
2184
+ /**
2185
+ * Controls the degree of randomness in token selection. A `temperature` value of 0 means that the highest
2186
+ * probability tokens are always selected. In this case, responses for a given prompt are mostly
2187
+ * deterministic, but a small amount of variation is still possible.
2188
+ */
2189
+ temperature?: number;
2190
+ /**
2191
+ * Changes how the model selects tokens for output. Tokens are
2192
+ * selected from the most to least probable until the sum of their probabilities equals the `topP`
2193
+ * value. For example, if tokens A, B, and C have probabilities of 0.3, 0.2, and 0.1 respectively
2194
+ * and the `topP` value is 0.5, then the model will select either A or B as the next token by using
2195
+ * the `temperature` and exclude C as a candidate. Defaults to 0.95 if unset.
2196
+ */
2197
+ topP?: number;
2198
+ /**
2199
+ * Changes how the model selects token for output. A `topK` value of 1 means the select token is
2200
+ * the most probable among all tokens in the model's vocabulary, while a `topK` value 3 means that
2201
+ * the next token is selected from among the 3 most probably using probabilities sampled. Tokens
2202
+ * are then further filtered with the highest selected `temperature` sampling. Defaults to 40
2203
+ * if unspecified.
2204
+ */
2205
+ topK?: number;
2206
+ /**
2207
+ * Positive penalties.
2208
+ */
2209
+ presencePenalty?: number;
2210
+ /**
2211
+ * Frequency penalties.
2212
+ */
2213
+ frequencyPenalty?: number;
2214
+ /**
2215
+ * The modalities of the response.
2216
+ */
2217
+ responseModalities?: ResponseModality[];
2218
+ /**
2219
+ * Enables transcription of audio input.
2220
+ *
2221
+ * When enabled, the model will respond with transcriptions of your audio input in the `inputTranscriptions` property
2222
+ * in {@link LiveServerContent} messages. Note that the transcriptions are broken up across
2223
+ * messages, so you may only receive small amounts of text per message. For example, if you ask the model
2224
+ * "How are you today?", the model may transcribe that input across three messages, broken up as "How a", "re yo", "u today?".
2225
+ */
2226
+ inputAudioTranscription?: AudioTranscriptionConfig;
2227
+ /**
2228
+ * Enables transcription of audio input.
2229
+ *
2230
+ * When enabled, the model will respond with transcriptions of its audio output in the `outputTranscription` property
2231
+ * in {@link LiveServerContent} messages. Note that the transcriptions are broken up across
2232
+ * messages, so you may only receive small amounts of text per message. For example, if the model says
2233
+ * "How are you today?", the model may transcribe that output across three messages, broken up as "How a", "re yo", "u today?".
2234
+ */
2235
+ outputAudioTranscription?: AudioTranscriptionConfig;
2236
+ }
2237
+
2238
+ /**
2239
+ * Class for Live generative model APIs. The Live API enables low-latency, two-way multimodal
2240
+ * interactions with Gemini.
2241
+ *
2242
+ * This class should only be instantiated with {@link getLiveGenerativeModel}.
2243
+ *
2244
+ * @beta
2245
+ */
2246
+ export declare class LiveGenerativeModel extends AIModel {
2247
+ /**
2248
+ * @internal
2249
+ */
2250
+ private _webSocketHandler;
2251
+ generationConfig: LiveGenerationConfig;
2252
+ tools?: Tool[];
2253
+ toolConfig?: ToolConfig;
2254
+ systemInstruction?: Content;
2255
+ /**
2256
+ * @internal
2257
+ */
2258
+ constructor(ai: AI, modelParams: LiveModelParams,
2259
+ /**
2260
+ * @internal
2261
+ */
2262
+ _webSocketHandler: WebSocketHandler);
2263
+ /**
2264
+ * Starts a {@link LiveSession}.
2265
+ *
2266
+ * @returns A {@link LiveSession}.
2267
+ * @throws If the connection failed to be established with the server.
2268
+ *
2269
+ * @beta
2270
+ */
2271
+ connect(): Promise<LiveSession>;
2272
+ }
2273
+
2274
+ /**
2275
+ * Params passed to {@link getLiveGenerativeModel}.
2276
+ * @beta
2277
+ */
2278
+ export declare interface LiveModelParams {
2279
+ model: string;
2280
+ generationConfig?: LiveGenerationConfig;
2281
+ tools?: Tool[];
2282
+ toolConfig?: ToolConfig;
2283
+ systemInstruction?: string | Part | Content;
2284
+ }
2285
+
2286
+ /**
2287
+ * The types of responses that can be returned by {@link LiveSession.receive}.
2288
+ *
2289
+ * @beta
2290
+ */
2291
+ export declare const LiveResponseType: {
2292
+ SERVER_CONTENT: string;
2293
+ TOOL_CALL: string;
2294
+ TOOL_CALL_CANCELLATION: string;
2295
+ GOING_AWAY_NOTICE: string;
2296
+ };
2297
+
2298
+ /**
2299
+ * The types of responses that can be returned by {@link LiveSession.receive}.
2300
+ * This is a property on all messages that can be used for type narrowing. This property is not
2301
+ * returned by the server, it is assigned to a server message object once it's parsed.
2302
+ *
2303
+ * @beta
2304
+ */
2305
+ export declare type LiveResponseType = (typeof LiveResponseType)[keyof typeof LiveResponseType];
2306
+
2307
+ /**
2308
+ * An incremental content update from the model.
2309
+ *
2310
+ * @beta
2311
+ */
2312
+ export declare interface LiveServerContent {
2313
+ type: 'serverContent';
2314
+ /**
2315
+ * The content that the model has generated as part of the current conversation with the user.
2316
+ */
2317
+ modelTurn?: Content;
2318
+ /**
2319
+ * Indicates whether the turn is complete. This is `undefined` if the turn is not complete.
2320
+ */
2321
+ turnComplete?: boolean;
2322
+ /**
2323
+ * Indicates whether the model was interrupted by the client. An interruption occurs when
2324
+ * the client sends a message before the model finishes it's turn. This is `undefined` if the
2325
+ * model was not interrupted.
2326
+ */
2327
+ interrupted?: boolean;
2328
+ /**
2329
+ * Transcription of the audio that was input to the model.
2330
+ */
2331
+ inputTranscription?: Transcription;
2332
+ /**
2333
+ * Transcription of the audio output from the model.
2334
+ */
2335
+ outputTranscription?: Transcription;
2336
+ }
2337
+
2338
+ /**
2339
+ * Notification that the server will not be able to service the client soon.
2340
+ *
2341
+ * @beta
2342
+ */
2343
+ export declare interface LiveServerGoingAwayNotice {
2344
+ type: 'goingAwayNotice';
2345
+ /**
2346
+ * The remaining time (in seconds) before the connection will be terminated.
2347
+ */
2348
+ timeLeft: number;
2349
+ }
2350
+
2351
+ /**
2352
+ * A request from the model for the client to execute one or more functions.
2353
+ *
2354
+ * @beta
2355
+ */
2356
+ export declare interface LiveServerToolCall {
2357
+ type: 'toolCall';
2358
+ /**
2359
+ * An array of function calls to run.
2360
+ */
2361
+ functionCalls: FunctionCall[];
2362
+ }
2363
+
2364
+ /**
2365
+ * Notification to cancel a previous function call triggered by {@link LiveServerToolCall}.
2366
+ *
2367
+ * @beta
2368
+ */
2369
+ export declare interface LiveServerToolCallCancellation {
2370
+ type: 'toolCallCancellation';
2371
+ /**
2372
+ * IDs of function calls that were cancelled. These refer to the `id` property of a {@link FunctionCall}.
2373
+ */
2374
+ functionIds: string[];
2375
+ }
2376
+
2377
+ /**
2378
+ * Represents an active, real-time, bidirectional conversation with the model.
2379
+ *
2380
+ * This class should only be instantiated by calling {@link LiveGenerativeModel.connect}.
2381
+ *
2382
+ * @beta
2383
+ */
2384
+ export declare class LiveSession {
2385
+ private webSocketHandler;
2386
+ private serverMessages;
2387
+ /**
2388
+ * Indicates whether this Live session is closed.
2389
+ *
2390
+ * @beta
2391
+ */
2392
+ isClosed: boolean;
2393
+ /**
2394
+ * Indicates whether this Live session is being controlled by an `AudioConversationController`.
2395
+ *
2396
+ * @beta
2397
+ */
2398
+ inConversation: boolean;
2399
+ /**
2400
+ * @internal
2401
+ */
2402
+ constructor(webSocketHandler: WebSocketHandler, serverMessages: AsyncGenerator<unknown>);
2403
+ /**
2404
+ * Sends content to the server.
2405
+ *
2406
+ * @param request - The message to send to the model.
2407
+ * @param turnComplete - Indicates if the turn is complete. Defaults to false.
2408
+ * @throws If this session has been closed.
2409
+ *
2410
+ * @beta
2411
+ */
2412
+ send(request: string | Array<string | Part>, turnComplete?: boolean): Promise<void>;
2413
+ /**
2414
+ * Sends text to the server in realtime.
2415
+ *
2416
+ * @example
2417
+ * ```javascript
2418
+ * liveSession.sendTextRealtime("Hello, how are you?");
2419
+ * ```
2420
+ *
2421
+ * @param text - The text data to send.
2422
+ * @throws If this session has been closed.
2423
+ *
2424
+ * @beta
2425
+ */
2426
+ sendTextRealtime(text: string): Promise<void>;
2427
+ /**
2428
+ * Sends audio data to the server in realtime.
2429
+ *
2430
+ * @remarks The server requires that the audio data is base64-encoded 16-bit PCM at 16kHz
2431
+ * little-endian.
2432
+ *
2433
+ * @example
2434
+ * ```javascript
2435
+ * // const pcmData = ... base64-encoded 16-bit PCM at 16kHz little-endian.
2436
+ * const blob = { mimeType: "audio/pcm", data: pcmData };
2437
+ * liveSession.sendAudioRealtime(blob);
2438
+ * ```
2439
+ *
2440
+ * @param blob - The base64-encoded PCM data to send to the server in realtime.
2441
+ * @throws If this session has been closed.
2442
+ *
2443
+ * @beta
2444
+ */
2445
+ sendAudioRealtime(blob: GenerativeContentBlob): Promise<void>;
2446
+ /**
2447
+ * Sends video data to the server in realtime.
2448
+ *
2449
+ * @remarks The server requires that the video is sent as individual video frames at 1 FPS. It
2450
+ * is recommended to set `mimeType` to `image/jpeg`.
2451
+ *
2452
+ * @example
2453
+ * ```javascript
2454
+ * // const videoFrame = ... base64-encoded JPEG data
2455
+ * const blob = { mimeType: "image/jpeg", data: videoFrame };
2456
+ * liveSession.sendVideoRealtime(blob);
2457
+ * ```
2458
+ * @param blob - The base64-encoded video data to send to the server in realtime.
2459
+ * @throws If this session has been closed.
2460
+ *
2461
+ * @beta
2462
+ */
2463
+ sendVideoRealtime(blob: GenerativeContentBlob): Promise<void>;
2464
+ /**
2465
+ * Sends function responses to the server.
2466
+ *
2467
+ * @param functionResponses - The function responses to send.
2468
+ * @throws If this session has been closed.
2469
+ *
2470
+ * @beta
2471
+ */
2472
+ sendFunctionResponses(functionResponses: FunctionResponse[]): Promise<void>;
2473
+ /**
2474
+ * Yields messages received from the server.
2475
+ * This can only be used by one consumer at a time.
2476
+ *
2477
+ * @returns An `AsyncGenerator` that yields server messages as they arrive.
2478
+ * @throws If the session is already closed, or if we receive a response that we don't support.
2479
+ *
2480
+ * @beta
2481
+ */
2482
+ receive(): AsyncGenerator<LiveServerContent | LiveServerToolCall | LiveServerToolCallCancellation | LiveServerGoingAwayNotice>;
2483
+ /**
2484
+ * Closes this session.
2485
+ * All methods on this session will throw an error once this resolves.
2486
+ *
2487
+ * @beta
2488
+ */
2489
+ close(): Promise<void>;
2490
+ /**
2491
+ * Sends realtime input to the server.
2492
+ *
2493
+ * @deprecated Use `sendTextRealtime()`, `sendAudioRealtime()`, and `sendVideoRealtime()` instead.
2494
+ *
2495
+ * @param mediaChunks - The media chunks to send.
2496
+ * @throws If this session has been closed.
2497
+ *
2498
+ * @beta
2499
+ */
2500
+ sendMediaChunks(mediaChunks: GenerativeContentBlob[]): Promise<void>;
2501
+ /**
2502
+ * @deprecated Use `sendTextRealtime()`, `sendAudioRealtime()`, and `sendVideoRealtime()` instead.
2503
+ *
2504
+ * Sends a stream of {@link GenerativeContentBlob}.
2505
+ *
2506
+ * @param mediaChunkStream - The stream of {@link GenerativeContentBlob} to send.
2507
+ * @throws If this session has been closed.
2508
+ *
2509
+ * @beta
2510
+ */
2511
+ sendMediaStream(mediaChunkStream: ReadableStream<GenerativeContentBlob>): Promise<void>;
2512
+ }
2513
+
2514
+ /**
2515
+ * Content part modality.
2516
+ * @public
2517
+ */
2518
+ export declare const Modality: {
2519
+ /**
2520
+ * Unspecified modality.
2521
+ */
2522
+ readonly MODALITY_UNSPECIFIED: "MODALITY_UNSPECIFIED";
2523
+ /**
2524
+ * Plain text.
2525
+ */
2526
+ readonly TEXT: "TEXT";
2527
+ /**
2528
+ * Image.
2529
+ */
2530
+ readonly IMAGE: "IMAGE";
2531
+ /**
2532
+ * Video.
2533
+ */
2534
+ readonly VIDEO: "VIDEO";
2535
+ /**
2536
+ * Audio.
2537
+ */
2538
+ readonly AUDIO: "AUDIO";
2539
+ /**
2540
+ * Document (for example, PDF).
2541
+ */
2542
+ readonly DOCUMENT: "DOCUMENT";
2543
+ };
2544
+
2545
+ /**
2546
+ * Content part modality.
2547
+ * @public
2548
+ */
2549
+ export declare type Modality = (typeof Modality)[keyof typeof Modality];
2550
+
2551
+ /**
2552
+ * Represents token counting info for a single modality.
2553
+ *
2554
+ * @public
2555
+ */
2556
+ export declare interface ModalityTokenCount {
2557
+ /** The modality associated with this token count. */
2558
+ modality: Modality;
2559
+ /** The number of tokens counted. */
2560
+ tokenCount: number;
2561
+ }
2562
+
2563
+ /**
2564
+ * Params passed to {@link getGenerativeModel}.
2565
+ * @public
2566
+ */
2567
+ export declare interface ModelParams extends BaseParams {
2568
+ model: string;
2569
+ tools?: Tool[];
2570
+ toolConfig?: ToolConfig;
2571
+ systemInstruction?: string | Part | Content;
2572
+ }
2573
+
2574
+ /**
2575
+ * Schema class for "number" types.
2576
+ * @public
2577
+ */
2578
+ export declare class NumberSchema extends Schema {
2579
+ constructor(schemaParams?: SchemaParams);
2580
+ }
2581
+
2582
+ /**
2583
+ * Schema class for "object" types.
2584
+ * The `properties` param must be a map of `Schema` objects.
2585
+ * @public
2586
+ */
2587
+ export declare class ObjectSchema extends Schema {
2588
+ properties: {
2589
+ [k: string]: TypedSchema;
2590
+ };
2591
+ optionalProperties: string[];
2592
+ constructor(schemaParams: SchemaParams, properties: {
2593
+ [k: string]: TypedSchema;
2594
+ }, optionalProperties?: string[]);
2595
+ /**
2596
+ * @internal
2597
+ */
2598
+ toJSON(): SchemaRequest;
2599
+ }
2600
+
2601
+ /**
2602
+ * Interface for JSON parameters in a schema of {@link (SchemaType:type)}
2603
+ * "object" when not using the `Schema.object()` helper.
2604
+ * @public
2605
+ */
2606
+ export declare interface ObjectSchemaRequest extends SchemaRequest {
2607
+ type: 'object';
2608
+ /**
2609
+ * This is not a property accepted in the final request to the backend, but is
2610
+ * a client-side convenience property that is only usable by constructing
2611
+ * a schema through the `Schema.object()` helper method. Populating this
2612
+ * property will cause response errors if the object is not wrapped with
2613
+ * `Schema.object()`.
2614
+ */
2615
+ optionalProperties?: never;
2616
+ }
2617
+
2618
+ /**
2619
+ * Encapsulates configuration for on-device inference.
2620
+ *
2621
+ * @beta
2622
+ */
2623
+ export declare interface OnDeviceParams {
2624
+ createOptions?: LanguageModelCreateOptions;
2625
+ promptOptions?: LanguageModelPromptOptions;
2626
+ }
2627
+
2628
+ /**
2629
+ * Represents the result of the code execution.
2630
+ *
2631
+ * @public
2632
+ */
2633
+ export declare const Outcome: {
2634
+ UNSPECIFIED: string;
2635
+ OK: string;
2636
+ FAILED: string;
2637
+ DEADLINE_EXCEEDED: string;
2638
+ };
2639
+
2640
+ /**
2641
+ * Represents the result of the code execution.
2642
+ *
2643
+ * @public
2644
+ */
2645
+ export declare type Outcome = (typeof Outcome)[keyof typeof Outcome];
2646
+
2647
+ /**
2648
+ * Content part - includes text, image/video, or function call/response
2649
+ * part types.
2650
+ * @public
2651
+ */
2652
+ export declare type Part = TextPart | InlineDataPart | FunctionCallPart | FunctionResponsePart | FileDataPart | ExecutableCodePart | CodeExecutionResultPart;
2653
+
2654
+ /**
2655
+ * Possible roles.
2656
+ * @public
2657
+ */
2658
+ export declare const POSSIBLE_ROLES: readonly ["user", "model", "function", "system"];
2659
+
2660
+ /**
2661
+ * Configuration for a pre-built voice.
2662
+ *
2663
+ * @beta
2664
+ */
2665
+ export declare interface PrebuiltVoiceConfig {
2666
+ /**
2667
+ * The voice name to use for speech synthesis.
2668
+ *
2669
+ * For a full list of names and demos of what each voice sounds like, see {@link https://cloud.google.com/text-to-speech/docs/chirp3-hd | Chirp 3: HD Voices}.
2670
+ */
2671
+ voiceName?: string;
2672
+ }
2673
+
2674
+ /**
2675
+ * If the prompt was blocked, this will be populated with `blockReason` and
2676
+ * the relevant `safetyRatings`.
2677
+ * @public
2678
+ */
2679
+ export declare interface PromptFeedback {
2680
+ blockReason?: BlockReason;
2681
+ safetyRatings: SafetyRating[];
2682
+ /**
2683
+ * A human-readable description of the `blockReason`.
2684
+ *
2685
+ * This property is only supported in the Vertex AI Gemini API ({@link VertexAIBackend}).
2686
+ */
2687
+ blockReasonMessage?: string;
2688
+ }
2689
+
2690
+ /**
2691
+ * Params passed to {@link getGenerativeModel}.
2692
+ * @public
2693
+ */
2694
+ export declare interface RequestOptions {
2695
+ /**
2696
+ * Request timeout in milliseconds. Defaults to 180 seconds (180000ms).
2697
+ */
2698
+ timeout?: number;
2699
+ /**
2700
+ * Base url for endpoint. Defaults to
2701
+ * https://firebasevertexai.googleapis.com, which is the
2702
+ * {@link https://console.cloud.google.com/apis/library/firebasevertexai.googleapis.com?project=_ | Firebase AI Logic API}
2703
+ * (used regardless of your chosen Gemini API provider).
2704
+ */
2705
+ baseUrl?: string;
2706
+ /**
2707
+ * Limits amount of sequential function calls the SDK can make during automatic
2708
+ * function calling, in order to prevent infinite loops. If not specified,
2709
+ * this value defaults to 10.
2710
+ *
2711
+ * When it reaches this limit, it will return the last response received
2712
+ * from the model, whether it is a text response or further function calls.
2713
+ */
2714
+ maxSequentalFunctionCalls?: number;
2715
+ }
2716
+
2717
+ /**
2718
+ * Generation modalities to be returned in generation responses.
2719
+ *
2720
+ * @beta
2721
+ */
2722
+ export declare const ResponseModality: {
2723
+ /**
2724
+ * Text.
2725
+ * @beta
2726
+ */
2727
+ readonly TEXT: "TEXT";
2728
+ /**
2729
+ * Image.
2730
+ * @beta
2731
+ */
2732
+ readonly IMAGE: "IMAGE";
2733
+ /**
2734
+ * Audio.
2735
+ * @beta
2736
+ */
2737
+ readonly AUDIO: "AUDIO";
2738
+ };
2739
+
2740
+ /**
2741
+ * Generation modalities to be returned in generation responses.
2742
+ *
2743
+ * @beta
2744
+ */
2745
+ export declare type ResponseModality = (typeof ResponseModality)[keyof typeof ResponseModality];
2746
+
2747
+ /**
2748
+ * @public
2749
+ */
2750
+ export declare interface RetrievedContextAttribution {
2751
+ uri: string;
2752
+ title: string;
2753
+ }
2754
+
2755
+ /**
2756
+ * @license
2757
+ * Copyright 2024 Google LLC
2758
+ *
2759
+ * Licensed under the Apache License, Version 2.0 (the "License");
2760
+ * you may not use this file except in compliance with the License.
2761
+ * You may obtain a copy of the License at
2762
+ *
2763
+ * http://www.apache.org/licenses/LICENSE-2.0
2764
+ *
2765
+ * Unless required by applicable law or agreed to in writing, software
2766
+ * distributed under the License is distributed on an "AS IS" BASIS,
2767
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
2768
+ * See the License for the specific language governing permissions and
2769
+ * limitations under the License.
2770
+ */
2771
+ /**
2772
+ * Role is the producer of the content.
2773
+ * @public
2774
+ */
2775
+ export declare type Role = (typeof POSSIBLE_ROLES)[number];
2776
+
2777
+ /**
2778
+ * A safety rating associated with a {@link GenerateContentCandidate}
2779
+ * @public
2780
+ */
2781
+ export declare interface SafetyRating {
2782
+ category: HarmCategory;
2783
+ probability: HarmProbability;
2784
+ /**
2785
+ * The harm severity level.
2786
+ *
2787
+ * This property is only supported when using the Vertex AI Gemini API ({@link VertexAIBackend}).
2788
+ * When using the Gemini Developer API ({@link GoogleAIBackend}), this property is not supported and will default to `HarmSeverity.UNSUPPORTED`.
2789
+ */
2790
+ severity: HarmSeverity;
2791
+ /**
2792
+ * The probability score of the harm category.
2793
+ *
2794
+ * This property is only supported when using the Vertex AI Gemini API ({@link VertexAIBackend}).
2795
+ * When using the Gemini Developer API ({@link GoogleAIBackend}), this property is not supported and will default to 0.
2796
+ */
2797
+ probabilityScore: number;
2798
+ /**
2799
+ * The severity score of the harm category.
2800
+ *
2801
+ * This property is only supported when using the Vertex AI Gemini API ({@link VertexAIBackend}).
2802
+ * When using the Gemini Developer API ({@link GoogleAIBackend}), this property is not supported and will default to 0.
2803
+ */
2804
+ severityScore: number;
2805
+ blocked: boolean;
2806
+ }
2807
+
2808
+ /**
2809
+ * Safety setting that can be sent as part of request parameters.
2810
+ * @public
2811
+ */
2812
+ export declare interface SafetySetting {
2813
+ category: HarmCategory;
2814
+ threshold: HarmBlockThreshold;
2815
+ /**
2816
+ * The harm block method.
2817
+ *
2818
+ * This property is only supported in the Vertex AI Gemini API ({@link VertexAIBackend}).
2819
+ * When using the Gemini Developer API ({@link GoogleAIBackend}), an {@link AIError} will be
2820
+ * thrown if this property is defined.
2821
+ */
2822
+ method?: HarmBlockMethod;
2823
+ }
2824
+
2825
+ /**
2826
+ * Parent class encompassing all Schema types, with static methods that
2827
+ * allow building specific Schema types. This class can be converted with
2828
+ * `JSON.stringify()` into a JSON string accepted by Vertex AI REST endpoints.
2829
+ * (This string conversion is automatically done when calling SDK methods.)
2830
+ * @public
2831
+ */
2832
+ export declare abstract class Schema implements SchemaInterface {
2833
+ /**
2834
+ * Optional. The type of the property.
2835
+ * This can only be undefined when using `anyOf` schemas, which do not have an
2836
+ * explicit type in the {@link https://swagger.io/docs/specification/v3_0/data-models/data-types/#any-type | OpenAPI specification}.
2837
+ */
2838
+ type?: SchemaType;
2839
+ /** Optional. The format of the property.
2840
+ * Supported formats:<br/>
2841
+ * <ul>
2842
+ * <li>for NUMBER type: "float", "double"</li>
2843
+ * <li>for INTEGER type: "int32", "int64"</li>
2844
+ * <li>for STRING type: "email", "byte", etc</li>
2845
+ * </ul>
2846
+ */
2847
+ format?: string;
2848
+ /** Optional. The description of the property. */
2849
+ description?: string;
2850
+ /** Optional. The items of the property. */
2851
+ items?: SchemaInterface;
2852
+ /** The minimum number of items (elements) in a schema of {@link (SchemaType:type)} `array`. */
2853
+ minItems?: number;
2854
+ /** The maximum number of items (elements) in a schema of {@link (SchemaType:type)} `array`. */
2855
+ maxItems?: number;
2856
+ /** Optional. Whether the property is nullable. Defaults to false. */
2857
+ nullable: boolean;
2858
+ /** Optional. The example of the property. */
2859
+ example?: unknown;
2860
+ /**
2861
+ * Allows user to add other schema properties that have not yet
2862
+ * been officially added to the SDK.
2863
+ */
2864
+ [key: string]: unknown;
2865
+ constructor(schemaParams: SchemaInterface);
2866
+ /**
2867
+ * Defines how this Schema should be serialized as JSON.
2868
+ * See https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/JSON/stringify#tojson_behavior
2869
+ * @internal
2870
+ */
2871
+ toJSON(): SchemaRequest;
2872
+ static array(arrayParams: SchemaParams & {
2873
+ items: Schema;
2874
+ }): ArraySchema;
2875
+ static object(objectParams: SchemaParams & {
2876
+ properties: {
2877
+ [k: string]: Schema;
2878
+ };
2879
+ optionalProperties?: string[];
2880
+ }): ObjectSchema;
2881
+ static string(stringParams?: SchemaParams): StringSchema;
2882
+ static enumString(stringParams: SchemaParams & {
2883
+ enum: string[];
2884
+ }): StringSchema;
2885
+ static integer(integerParams?: SchemaParams): IntegerSchema;
2886
+ static number(numberParams?: SchemaParams): NumberSchema;
2887
+ static boolean(booleanParams?: SchemaParams): BooleanSchema;
2888
+ static anyOf(anyOfParams: SchemaParams & {
2889
+ anyOf: TypedSchema[];
2890
+ }): AnyOfSchema;
2891
+ }
2892
+
2893
+ /**
2894
+ * Interface for {@link Schema} class.
2895
+ * @public
2896
+ */
2897
+ export declare interface SchemaInterface extends SchemaShared<SchemaInterface> {
2898
+ /**
2899
+ * The type of the property. this can only be undefined when using `anyof` schemas,
2900
+ * which do not have an explicit type in the {@link https://swagger.io/docs/specification/v3_0/data-models/data-types/#any-type | OpenAPI Specification}.
2901
+ */
2902
+ type?: SchemaType;
2903
+ }
2904
+
2905
+ /**
2906
+ * Params passed to {@link Schema} static methods to create specific
2907
+ * {@link Schema} classes.
2908
+ * @public
2909
+ */
2910
+ export declare interface SchemaParams extends SchemaShared<SchemaInterface> {
2911
+ }
2912
+
2913
+ /**
2914
+ * Final format for {@link Schema} params passed to backend requests.
2915
+ * @public
2916
+ */
2917
+ export declare interface SchemaRequest extends SchemaShared<SchemaRequest> {
2918
+ /**
2919
+ * The type of the property. this can only be undefined when using `anyOf` schemas,
2920
+ * which do not have an explicit type in the {@link https://swagger.io/docs/specification/v3_0/data-models/data-types/#any-type | OpenAPI specification }.
2921
+ */
2922
+ type?: SchemaType;
2923
+ /** Optional. Array of required property. */
2924
+ required?: string[];
2925
+ }
2926
+
2927
+ /**
2928
+ * Basic {@link Schema} properties shared across several Schema-related
2929
+ * types.
2930
+ * @public
2931
+ */
2932
+ export declare interface SchemaShared<T> {
2933
+ /**
2934
+ * An array of {@link Schema}. The generated data must be valid against any of the schemas
2935
+ * listed in this array. This allows specifying multiple possible structures or types for a
2936
+ * single field.
2937
+ */
2938
+ anyOf?: T[];
2939
+ /** Optional. The format of the property.
2940
+ * When using the Gemini Developer API ({@link GoogleAIBackend}), this must be either `'enum'` or
2941
+ * `'date-time'`, otherwise requests will fail.
2942
+ */
2943
+ format?: string;
2944
+ /** Optional. The description of the property. */
2945
+ description?: string;
2946
+ /**
2947
+ * The title of the property. This helps document the schema's purpose but does not typically
2948
+ * constrain the generated value. It can subtly guide the model by clarifying the intent of a
2949
+ * field.
2950
+ */
2951
+ title?: string;
2952
+ /** Optional. The items of the property. */
2953
+ items?: T;
2954
+ /** The minimum number of items (elements) in a schema of {@link (SchemaType:type)} `array`. */
2955
+ minItems?: number;
2956
+ /** The maximum number of items (elements) in a schema of {@link (SchemaType:type)} `array`. */
2957
+ maxItems?: number;
2958
+ /** Optional. Map of `Schema` objects. */
2959
+ properties?: {
2960
+ [k: string]: T;
2961
+ };
2962
+ /** A hint suggesting the order in which the keys should appear in the generated JSON string. */
2963
+ propertyOrdering?: string[];
2964
+ /** Optional. The enum of the property. */
2965
+ enum?: string[];
2966
+ /** Optional. The example of the property. */
2967
+ example?: unknown;
2968
+ /** Optional. Whether the property is nullable. */
2969
+ nullable?: boolean;
2970
+ /** The minimum value of a numeric type. */
2971
+ minimum?: number;
2972
+ /** The maximum value of a numeric type. */
2973
+ maximum?: number;
2974
+ [key: string]: unknown;
2975
+ }
2976
+
2977
+ /**
2978
+ * Contains the list of OpenAPI data types
2979
+ * as defined by the
2980
+ * {@link https://swagger.io/docs/specification/data-models/data-types/ | OpenAPI specification}
2981
+ * @public
2982
+ */
2983
+ export declare const SchemaType: {
2984
+ /** String type. */
2985
+ readonly STRING: "string";
2986
+ /** Number type. */
2987
+ readonly NUMBER: "number";
2988
+ /** Integer type. */
2989
+ readonly INTEGER: "integer";
2990
+ /** Boolean type. */
2991
+ readonly BOOLEAN: "boolean";
2992
+ /** Array type. */
2993
+ readonly ARRAY: "array";
2994
+ /** Object type. */
2995
+ readonly OBJECT: "object";
2996
+ };
2997
+
2998
+ /**
2999
+ * Contains the list of OpenAPI data types
3000
+ * as defined by the
3001
+ * {@link https://swagger.io/docs/specification/data-models/data-types/ | OpenAPI specification}
3002
+ * @public
3003
+ */
3004
+ export declare type SchemaType = (typeof SchemaType)[keyof typeof SchemaType];
3005
+
3006
+ /**
3007
+ * Google search entry point.
3008
+ *
3009
+ * @public
3010
+ */
3011
+ export declare interface SearchEntrypoint {
3012
+ /**
3013
+ * HTML/CSS snippet that must be embedded in a web page. The snippet is designed to avoid
3014
+ * undesired interaction with the rest of the page's CSS.
3015
+ *
3016
+ * To ensure proper rendering and prevent CSS conflicts, it is recommended
3017
+ * to encapsulate this `renderedContent` within a shadow DOM when embedding it
3018
+ * into a webpage. See {@link https://developer.mozilla.org/en-US/docs/Web/API/Web_components/Using_shadow_DOM | MDN: Using shadow DOM}.
3019
+ *
3020
+ * @example
3021
+ * ```javascript
3022
+ * const container = document.createElement('div');
3023
+ * document.body.appendChild(container);
3024
+ * container.attachShadow({ mode: 'open' }).innerHTML = renderedContent;
3025
+ * ```
3026
+ */
3027
+ renderedContent?: string;
3028
+ }
3029
+
3030
+ /**
3031
+ * Represents a specific segment within a {@link Content} object, often used to
3032
+ * pinpoint the exact location of text or data that grounding information refers to.
3033
+ *
3034
+ * @public
3035
+ */
3036
+ export declare interface Segment {
3037
+ /**
3038
+ * The zero-based index of the {@link Part} object within the `parts` array
3039
+ * of its parent {@link Content} object. This identifies which part of the
3040
+ * content the segment belongs to.
3041
+ */
3042
+ partIndex: number;
3043
+ /**
3044
+ * The zero-based start index of the segment within the specified `Part`,
3045
+ * measured in UTF-8 bytes. This offset is inclusive, starting from 0 at the
3046
+ * beginning of the part's content (e.g., `Part.text`).
3047
+ */
3048
+ startIndex: number;
3049
+ /**
3050
+ * The zero-based end index of the segment within the specified `Part`,
3051
+ * measured in UTF-8 bytes. This offset is exclusive, meaning the character
3052
+ * at this index is not included in the segment.
3053
+ */
3054
+ endIndex: number;
3055
+ /**
3056
+ * The text corresponding to the segment from the response.
3057
+ */
3058
+ text: string;
3059
+ }
3060
+
3061
+ /**
3062
+ * Options that can be provided per-request.
3063
+ * Extends the base {@link RequestOptions} (like `timeout` and `baseUrl`)
3064
+ * with request-specific controls like cancellation via `AbortSignal`.
3065
+ *
3066
+ * Options specified here will override any default {@link RequestOptions}
3067
+ * configured on a model (for example, {@link GenerativeModel}).
3068
+ *
3069
+ * @public
3070
+ */
3071
+ export declare interface SingleRequestOptions extends RequestOptions {
3072
+ /**
3073
+ * An `AbortSignal` instance that allows cancelling ongoing requests (like `generateContent` or
3074
+ * `generateImages`).
3075
+ *
3076
+ * If provided, calling `abort()` on the corresponding `AbortController`
3077
+ * will attempt to cancel the underlying HTTP request. An `AbortError` will be thrown
3078
+ * if cancellation is successful.
3079
+ *
3080
+ * Note that this will not cancel the request in the backend, so any applicable billing charges
3081
+ * will still be applied despite cancellation.
3082
+ *
3083
+ * @example
3084
+ * ```javascript
3085
+ * const controller = new AbortController();
3086
+ * const model = getGenerativeModel({
3087
+ * // ...
3088
+ * });
3089
+ * model.generateContent(
3090
+ * "Write a story about a magic backpack.",
3091
+ * { signal: controller.signal }
3092
+ * );
3093
+ *
3094
+ * // To cancel request:
3095
+ * controller.abort();
3096
+ * ```
3097
+ * @see https://developer.mozilla.org/en-US/docs/Web/API/AbortSignal
3098
+ */
3099
+ signal?: AbortSignal;
3100
+ }
3101
+
3102
+ /**
3103
+ * Configures speech synthesis.
3104
+ *
3105
+ * @beta
3106
+ */
3107
+ export declare interface SpeechConfig {
3108
+ /**
3109
+ * Configures the voice to be used in speech synthesis.
3110
+ */
3111
+ voiceConfig?: VoiceConfig;
3112
+ }
3113
+
3114
+ /**
3115
+ * Starts a real-time, bidirectional audio conversation with the model. This helper function manages
3116
+ * the complexities of microphone access, audio recording, playback, and interruptions.
3117
+ *
3118
+ * @remarks Important: This function must be called in response to a user gesture
3119
+ * (for example, a button click) to comply with {@link https://developer.mozilla.org/en-US/docs/Web/API/Web_Audio_API/Best_practices#autoplay_policy | browser autoplay policies}.
3120
+ *
3121
+ * @example
3122
+ * ```javascript
3123
+ * const liveSession = await model.connect();
3124
+ * let conversationController;
3125
+ *
3126
+ * // This function must be called from within a click handler.
3127
+ * async function startConversation() {
3128
+ * try {
3129
+ * conversationController = await startAudioConversation(liveSession);
3130
+ * } catch (e) {
3131
+ * // Handle AI-specific errors
3132
+ * if (e instanceof AIError) {
3133
+ * console.error("AI Error:", e.message);
3134
+ * }
3135
+ * // Handle microphone permission and hardware errors
3136
+ * else if (e instanceof DOMException) {
3137
+ * console.error("Microphone Error:", e.message);
3138
+ * }
3139
+ * // Handle other unexpected errors
3140
+ * else {
3141
+ * console.error("An unexpected error occurred:", e);
3142
+ * }
3143
+ * }
3144
+ * }
3145
+ *
3146
+ * // Later, to stop the conversation:
3147
+ * // if (conversationController) {
3148
+ * // await conversationController.stop();
3149
+ * // }
3150
+ * ```
3151
+ *
3152
+ * @param liveSession - An active {@link LiveSession} instance.
3153
+ * @param options - Configuration options for the audio conversation.
3154
+ * @returns A `Promise` that resolves with an {@link AudioConversationController}.
3155
+ * @throws `AIError` if the environment does not support required Web APIs (`UNSUPPORTED`), if a conversation is already active (`REQUEST_ERROR`), the session is closed (`SESSION_CLOSED`), or if an unexpected initialization error occurs (`ERROR`).
3156
+ * @throws `DOMException` Thrown by `navigator.mediaDevices.getUserMedia()` if issues occur with microphone access, such as permissions being denied (`NotAllowedError`) or no compatible hardware being found (`NotFoundError`). See the {@link https://developer.mozilla.org/en-US/docs/Web/API/MediaDevices/getUserMedia#exceptions | MDN documentation} for a full list of exceptions.
3157
+ *
3158
+ * @beta
3159
+ */
3160
+ export declare function startAudioConversation(liveSession: LiveSession, options?: StartAudioConversationOptions): Promise<AudioConversationController>;
3161
+
3162
+ /**
3163
+ * Options for {@link startAudioConversation}.
3164
+ *
3165
+ * @beta
3166
+ */
3167
+ export declare interface StartAudioConversationOptions {
3168
+ /**
3169
+ * An async handler that is called when the model requests a function to be executed.
3170
+ * The handler should perform the function call and return the result as a `Part`,
3171
+ * which will then be sent back to the model.
3172
+ */
3173
+ functionCallingHandler?: (functionCalls: FunctionCall[]) => Promise<FunctionResponse>;
3174
+ }
3175
+
3176
+ /**
3177
+ * Params for {@link GenerativeModel.startChat}.
3178
+ * @public
3179
+ */
3180
+ export declare interface StartChatParams extends BaseParams {
3181
+ history?: Content[];
3182
+ tools?: Tool[];
3183
+ toolConfig?: ToolConfig;
3184
+ systemInstruction?: string | Part | Content;
3185
+ }
3186
+
3187
+ /**
3188
+ * Schema class for "string" types. Can be used with or without
3189
+ * enum values.
3190
+ * @public
3191
+ */
3192
+ export declare class StringSchema extends Schema {
3193
+ enum?: string[];
3194
+ constructor(schemaParams?: SchemaParams, enumValues?: string[]);
3195
+ /**
3196
+ * @internal
3197
+ */
3198
+ toJSON(): SchemaRequest;
3199
+ }
3200
+
3201
+ /**
3202
+ * {@link GenerativeModel} APIs that execute on a server-side template.
3203
+ *
3204
+ * This class should only be instantiated with {@link getTemplateGenerativeModel}.
3205
+ *
3206
+ * @beta
3207
+ */
3208
+ export declare class TemplateGenerativeModel {
3209
+ /**
3210
+ * @internal
3211
+ */
3212
+ _apiSettings: ApiSettings;
3213
+ /**
3214
+ * Additional options to use when making requests.
3215
+ */
3216
+ requestOptions?: RequestOptions;
3217
+ /**
3218
+ * @hideconstructor
3219
+ */
3220
+ constructor(ai: AI, requestOptions?: RequestOptions);
3221
+ /**
3222
+ * Makes a single non-streaming call to the model and returns an object
3223
+ * containing a single {@link GenerateContentResponse}.
3224
+ *
3225
+ * @param templateId - The ID of the server-side template to execute.
3226
+ * @param templateVariables - A key-value map of variables to populate the
3227
+ * template with.
3228
+ *
3229
+ * @beta
3230
+ */
3231
+ generateContent(templateId: string, templateVariables: object, singleRequestOptions?: SingleRequestOptions): Promise<GenerateContentResult>;
3232
+ /**
3233
+ * Makes a single streaming call to the model and returns an object
3234
+ * containing an iterable stream that iterates over all chunks in the
3235
+ * streaming response as well as a promise that returns the final aggregated
3236
+ * response.
3237
+ *
3238
+ * @param templateId - The ID of the server-side template to execute.
3239
+ * @param templateVariables - A key-value map of variables to populate the
3240
+ * template with.
3241
+ *
3242
+ * @beta
3243
+ */
3244
+ generateContentStream(templateId: string, templateVariables: object, singleRequestOptions?: SingleRequestOptions): Promise<GenerateContentStreamResult>;
3245
+ }
3246
+
3247
+ /**
3248
+ * Class for Imagen model APIs that execute on a server-side template.
3249
+ *
3250
+ * This class should only be instantiated with {@link getTemplateImagenModel}.
3251
+ *
3252
+ * @beta
3253
+ */
3254
+ export declare class TemplateImagenModel {
3255
+ /**
3256
+ * @internal
3257
+ */
3258
+ _apiSettings: ApiSettings;
3259
+ /**
3260
+ * Additional options to use when making requests.
3261
+ */
3262
+ requestOptions?: RequestOptions;
3263
+ /**
3264
+ * @hideconstructor
3265
+ */
3266
+ constructor(ai: AI, requestOptions?: RequestOptions);
3267
+ /**
3268
+ * Makes a single call to the model and returns an object containing a single
3269
+ * {@link ImagenGenerationResponse}.
3270
+ *
3271
+ * @param templateId - The ID of the server-side template to execute.
3272
+ * @param templateVariables - A key-value map of variables to populate the
3273
+ * template with.
3274
+ *
3275
+ * @beta
3276
+ */
3277
+ generateImages(templateId: string, templateVariables: object, singleRequestOptions?: SingleRequestOptions): Promise<ImagenGenerationResponse<ImagenInlineImage>>;
3278
+ }
3279
+
3280
+ /**
3281
+ * Content part interface if the part represents a text string.
3282
+ * @public
3283
+ */
3284
+ export declare interface TextPart {
3285
+ text: string;
3286
+ inlineData?: never;
3287
+ functionCall?: never;
3288
+ functionResponse?: never;
3289
+ thought?: boolean;
3290
+ /**
3291
+ * @internal
3292
+ */
3293
+ thoughtSignature?: string;
3294
+ executableCode?: never;
3295
+ codeExecutionResult?: never;
3296
+ }
3297
+
3298
+ /**
3299
+ * Configuration for "thinking" behavior of compatible Gemini models.
3300
+ *
3301
+ * Certain models utilize a thinking process before generating a response. This allows them to
3302
+ * reason through complex problems and plan a more coherent and accurate answer.
3303
+ *
3304
+ * @public
3305
+ */
3306
+ export declare interface ThinkingConfig {
3307
+ /**
3308
+ * The thinking budget, in tokens.
3309
+ *
3310
+ * @remarks
3311
+ * This parameter sets an upper limit on the number of tokens the model can use for its internal
3312
+ * "thinking" process. A higher budget may result in higher quality responses for complex tasks
3313
+ * but can also increase latency and cost.
3314
+ *
3315
+ * The range of supported thinking budget values depends on the model.
3316
+ *
3317
+ * <ul>
3318
+ * <li>To use the default thinking budget for a model, leave
3319
+ * this value undefined.</li>
3320
+ *
3321
+ * <li>To disable thinking, when supported by the model, set this value
3322
+ * to `0`.</li>
3323
+ *
3324
+ * <li>To use dynamic thinking, which allows the model to decide on the thinking
3325
+ * budget based on the task, set this value to `-1`.</li>
3326
+ * </ul>
3327
+ *
3328
+ * An error will be thrown if you set a thinking budget for a model that does not support this
3329
+ * feature or if the specified budget is not within the model's supported range.
3330
+ *
3331
+ * The model will also error if `thinkingLevel` and `thinkingBudget` are
3332
+ * both set.
3333
+ */
3334
+ thinkingBudget?: number;
3335
+ /**
3336
+ * If not specified, Gemini will use the model's default dynamic thinking level.
3337
+ *
3338
+ * @remarks
3339
+ * Note: The model will error if `thinkingLevel` and `thinkingBudget` are
3340
+ * both set.
3341
+ *
3342
+ * Important: Gemini 2.5 series models do not support thinking levels; use
3343
+ * `thinkingBudget` to set a thinking budget instead.
3344
+ */
3345
+ thinkingLevel?: ThinkingLevel;
3346
+ /**
3347
+ * Whether to include "thought summaries" in the model's response.
3348
+ *
3349
+ * @remarks
3350
+ * Thought summaries provide a brief overview of the model's internal thinking process,
3351
+ * offering insight into how it arrived at the final answer. This can be useful for
3352
+ * debugging, understanding the model's reasoning, and verifying its accuracy.
3353
+ */
3354
+ includeThoughts?: boolean;
3355
+ }
3356
+
3357
+ /**
3358
+ * A preset that controls the model's "thinking" process. Use
3359
+ * `ThinkingLevel.LOW` for faster responses on less complex tasks, and
3360
+ * `ThinkingLevel.HIGH` for better reasoning on more complex tasks.
3361
+ *
3362
+ * @public
3363
+ */
3364
+ export declare const ThinkingLevel: {
3365
+ MINIMAL: string;
3366
+ LOW: string;
3367
+ MEDIUM: string;
3368
+ HIGH: string;
3369
+ };
3370
+
3371
+ /**
3372
+ * A preset that controls the model's "thinking" process. Use
3373
+ * `ThinkingLevel.LOW` for faster responses on less complex tasks, and
3374
+ * `ThinkingLevel.HIGH` for better reasoning on more complex tasks.
3375
+ *
3376
+ * @public
3377
+ */
3378
+ export declare type ThinkingLevel = (typeof ThinkingLevel)[keyof typeof ThinkingLevel];
3379
+
3380
+ /**
3381
+ * Defines a tool that model can call to access external knowledge.
3382
+ * @public
3383
+ */
3384
+ export declare type Tool = FunctionDeclarationsTool | GoogleSearchTool | CodeExecutionTool | URLContextTool;
3385
+
3386
+ /**
3387
+ * Tool config. This config is shared for all tools provided in the request.
3388
+ * @public
3389
+ */
3390
+ export declare interface ToolConfig {
3391
+ functionCallingConfig?: FunctionCallingConfig;
3392
+ }
3393
+
3394
+ /**
3395
+ * Transcription of audio. This can be returned from a {@link LiveGenerativeModel} if transcription
3396
+ * is enabled with the `inputAudioTranscription` or `outputAudioTranscription` properties on
3397
+ * the {@link LiveGenerationConfig}.
3398
+ *
3399
+ * @beta
3400
+ */
3401
+ export declare interface Transcription {
3402
+ /**
3403
+ * The text transcription of the audio.
3404
+ */
3405
+ text?: string;
3406
+ }
3407
+
3408
+ /**
3409
+ * A type that includes all specific Schema types.
3410
+ * @public
3411
+ */
3412
+ export declare type TypedSchema = IntegerSchema | NumberSchema | StringSchema | BooleanSchema | ObjectSchema | ArraySchema | AnyOfSchema;
3413
+
3414
+ /**
3415
+ * Specifies the URL Context configuration.
3416
+ *
3417
+ * @beta
3418
+ */
3419
+ export declare interface URLContext {
3420
+ }
3421
+
3422
+ /**
3423
+ * Metadata related to {@link URLContextTool}.
3424
+ *
3425
+ * @public
3426
+ */
3427
+ export declare interface URLContextMetadata {
3428
+ /**
3429
+ * List of URL metadata used to provide context to the Gemini model.
3430
+ */
3431
+ urlMetadata: URLMetadata[];
3432
+ }
3433
+
3434
+ /**
3435
+ * A tool that allows you to provide additional context to the models in the form of public web
3436
+ * URLs. By including URLs in your request, the Gemini model will access the content from those
3437
+ * pages to inform and enhance its response.
3438
+ *
3439
+ * @beta
3440
+ */
3441
+ export declare interface URLContextTool {
3442
+ /**
3443
+ * Specifies the URL Context configuration.
3444
+ */
3445
+ urlContext: URLContext;
3446
+ }
3447
+
3448
+ /**
3449
+ * Metadata for a single URL retrieved by the {@link URLContextTool} tool.
3450
+ *
3451
+ * @public
3452
+ */
3453
+ export declare interface URLMetadata {
3454
+ /**
3455
+ * The retrieved URL.
3456
+ */
3457
+ retrievedUrl?: string;
3458
+ /**
3459
+ * The status of the URL retrieval.
3460
+ */
3461
+ urlRetrievalStatus?: URLRetrievalStatus;
3462
+ }
3463
+
3464
+ /**
3465
+ * The status of a URL retrieval.
3466
+ *
3467
+ * @remarks
3468
+ * <b>URL_RETRIEVAL_STATUS_UNSPECIFIED:</b> Unspecified retrieval status.
3469
+ * <br/>
3470
+ * <b>URL_RETRIEVAL_STATUS_SUCCESS:</b> The URL retrieval was successful.
3471
+ * <br/>
3472
+ * <b>URL_RETRIEVAL_STATUS_ERROR:</b> The URL retrieval failed.
3473
+ * <br/>
3474
+ * <b>URL_RETRIEVAL_STATUS_PAYWALL:</b> The URL retrieval failed because the content is behind a paywall.
3475
+ * <br/>
3476
+ * <b>URL_RETRIEVAL_STATUS_UNSAFE:</b> The URL retrieval failed because the content is unsafe.
3477
+ * <br/>
3478
+ *
3479
+ * @public
3480
+ */
3481
+ export declare const URLRetrievalStatus: {
3482
+ /**
3483
+ * Unspecified retrieval status.
3484
+ */
3485
+ URL_RETRIEVAL_STATUS_UNSPECIFIED: string;
3486
+ /**
3487
+ * The URL retrieval was successful.
3488
+ */
3489
+ URL_RETRIEVAL_STATUS_SUCCESS: string;
3490
+ /**
3491
+ * The URL retrieval failed.
3492
+ */
3493
+ URL_RETRIEVAL_STATUS_ERROR: string;
3494
+ /**
3495
+ * The URL retrieval failed because the content is behind a paywall.
3496
+ */
3497
+ URL_RETRIEVAL_STATUS_PAYWALL: string;
3498
+ /**
3499
+ * The URL retrieval failed because the content is unsafe.
3500
+ */
3501
+ URL_RETRIEVAL_STATUS_UNSAFE: string;
3502
+ };
3503
+
3504
+ /**
3505
+ * The status of a URL retrieval.
3506
+ *
3507
+ * @remarks
3508
+ * <b>URL_RETRIEVAL_STATUS_UNSPECIFIED:</b> Unspecified retrieval status.
3509
+ * <br/>
3510
+ * <b>URL_RETRIEVAL_STATUS_SUCCESS:</b> The URL retrieval was successful.
3511
+ * <br/>
3512
+ * <b>URL_RETRIEVAL_STATUS_ERROR:</b> The URL retrieval failed.
3513
+ * <br/>
3514
+ * <b>URL_RETRIEVAL_STATUS_PAYWALL:</b> The URL retrieval failed because the content is behind a paywall.
3515
+ * <br/>
3516
+ * <b>URL_RETRIEVAL_STATUS_UNSAFE:</b> The URL retrieval failed because the content is unsafe.
3517
+ * <br/>
3518
+ *
3519
+ * @public
3520
+ */
3521
+ export declare type URLRetrievalStatus = (typeof URLRetrievalStatus)[keyof typeof URLRetrievalStatus];
3522
+
3523
+ /**
3524
+ * Usage metadata about a {@link GenerateContentResponse}.
3525
+ *
3526
+ * @public
3527
+ */
3528
+ export declare interface UsageMetadata {
3529
+ promptTokenCount: number;
3530
+ candidatesTokenCount: number;
3531
+ /**
3532
+ * The number of tokens used by the model's internal "thinking" process.
3533
+ */
3534
+ thoughtsTokenCount?: number;
3535
+ totalTokenCount: number;
3536
+ /**
3537
+ * The number of tokens used by tools.
3538
+ */
3539
+ toolUsePromptTokenCount?: number;
3540
+ promptTokensDetails?: ModalityTokenCount[];
3541
+ candidatesTokensDetails?: ModalityTokenCount[];
3542
+ /**
3543
+ * A list of tokens used by tools, broken down by modality.
3544
+ */
3545
+ toolUsePromptTokensDetails?: ModalityTokenCount[];
3546
+ /**
3547
+ * The number of tokens in the prompt that were served from the cache.
3548
+ * If implicit caching is not active or no content was cached,
3549
+ * this will be 0.
3550
+ */
3551
+ cachedContentTokenCount?: number;
3552
+ /**
3553
+ * Detailed breakdown of the cached tokens by modality (for example, text or
3554
+ * image). This list provides granular insight into which parts of
3555
+ * the content were cached.
3556
+ */
3557
+ cacheTokensDetails?: ModalityTokenCount[];
3558
+ }
3559
+
3560
+ /**
3561
+ * Configuration class for the Vertex AI Gemini API.
3562
+ *
3563
+ * Use this with {@link AIOptions} when initializing the AI service via
3564
+ * {@link getAI | getAI()} to specify the Vertex AI Gemini API as the backend.
3565
+ *
3566
+ * @public
3567
+ */
3568
+ export declare class VertexAIBackend extends Backend {
3569
+ /**
3570
+ * The region identifier.
3571
+ * See {@link https://firebase.google.com/docs/vertex-ai/locations#available-locations | Vertex AI locations}
3572
+ * for a list of supported locations.
3573
+ */
3574
+ readonly location: string;
3575
+ /**
3576
+ * Creates a configuration object for the Vertex AI backend.
3577
+ *
3578
+ * @param location - The region identifier, defaulting to `us-central1`;
3579
+ * see {@link https://firebase.google.com/docs/vertex-ai/locations#available-locations | Vertex AI locations}
3580
+ * for a list of supported locations.
3581
+ */
3582
+ constructor(location?: string);
3583
+ /**
3584
+ * @internal
3585
+ */
3586
+ _getModelPath(project: string, model: string): string;
3587
+ /**
3588
+ * @internal
3589
+ */
3590
+ _getTemplatePath(project: string, templateId: string): string;
3591
+ }
3592
+
3593
+ /**
3594
+ * Describes the input video content.
3595
+ * @public
3596
+ */
3597
+ export declare interface VideoMetadata {
3598
+ /**
3599
+ * The start offset of the video in
3600
+ * protobuf {@link https://cloud.google.com/ruby/docs/reference/google-cloud-workflows-v1/latest/Google-Protobuf-Duration#json-mapping | Duration} format.
3601
+ */
3602
+ startOffset: string;
3603
+ /**
3604
+ * The end offset of the video in
3605
+ * protobuf {@link https://cloud.google.com/ruby/docs/reference/google-cloud-workflows-v1/latest/Google-Protobuf-Duration#json-mapping | Duration} format.
3606
+ */
3607
+ endOffset: string;
3608
+ }
3609
+
3610
+ /**
3611
+ * Configuration for the voice to used in speech synthesis.
3612
+ *
3613
+ * @beta
3614
+ */
3615
+ export declare interface VoiceConfig {
3616
+ /**
3617
+ * Configures the voice using a pre-built voice configuration.
3618
+ */
3619
+ prebuiltVoiceConfig?: PrebuiltVoiceConfig;
3620
+ }
3621
+
3622
+ /**
3623
+ * @public
3624
+ */
3625
+ export declare interface WebAttribution {
3626
+ uri: string;
3627
+ title: string;
3628
+ }
3629
+
3630
+ /**
3631
+ * A grounding chunk from the web.
3632
+ *
3633
+ * Important: If using Grounding with Google Search, you are required to comply with the
3634
+ * {@link https://cloud.google.com/terms/service-terms | Service Specific Terms} for "Grounding with Google Search".
3635
+ *
3636
+ * @public
3637
+ */
3638
+ export declare interface WebGroundingChunk {
3639
+ /**
3640
+ * The URI of the retrieved web page.
3641
+ */
3642
+ uri?: string;
3643
+ /**
3644
+ * The title of the retrieved web page.
3645
+ */
3646
+ title?: string;
3647
+ /**
3648
+ * The domain of the original URI from which the content was retrieved.
3649
+ *
3650
+ * This property is only supported in the Vertex AI Gemini API ({@link VertexAIBackend}).
3651
+ * When using the Gemini Developer API ({@link GoogleAIBackend}), this property will be
3652
+ * `undefined`.
3653
+ */
3654
+ domain?: string;
3655
+ }
3656
+
3657
+ /**
3658
+ * @license
3659
+ * Copyright 2025 Google LLC
3660
+ *
3661
+ * Licensed under the Apache License, Version 2.0 (the "License");
3662
+ * you may not use this file except in compliance with the License.
3663
+ * You may obtain a copy of the License at
3664
+ *
3665
+ * http://www.apache.org/licenses/LICENSE-2.0
3666
+ *
3667
+ * Unless required by applicable law or agreed to in writing, software
3668
+ * distributed under the License is distributed on an "AS IS" BASIS,
3669
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
3670
+ * See the License for the specific language governing permissions and
3671
+ * limitations under the License.
3672
+ */
3673
+ /**
3674
+ * A standardized interface for interacting with a WebSocket connection.
3675
+ * This abstraction allows the SDK to use the appropriate WebSocket implementation
3676
+ * for the current JS environment (Browser vs. Node) without
3677
+ * changing the core logic of the `LiveSession`.
3678
+ * @internal
3679
+ */
3680
+ declare interface WebSocketHandler {
3681
+ /**
3682
+ * Establishes a connection to the given URL.
3683
+ *
3684
+ * @param url The WebSocket URL (e.g., wss://...).
3685
+ * @returns A promise that resolves on successful connection or rejects on failure.
3686
+ */
3687
+ connect(url: string): Promise<void>;
3688
+ /**
3689
+ * Sends data over the WebSocket.
3690
+ *
3691
+ * @param data The string or binary data to send.
3692
+ */
3693
+ send(data: string | ArrayBuffer): void;
3694
+ /**
3695
+ * Returns an async generator that yields parsed JSON objects from the server.
3696
+ * The yielded type is `unknown` because the handler cannot guarantee the shape of the data.
3697
+ * The consumer is responsible for type validation.
3698
+ * The generator terminates when the connection is closed.
3699
+ *
3700
+ * @returns A generator that allows consumers to pull messages using a `for await...of` loop.
3701
+ */
3702
+ listen(): AsyncGenerator<unknown>;
3703
+ /**
3704
+ * Closes the WebSocket connection.
3705
+ *
3706
+ * @param code - A numeric status code explaining why the connection is closing.
3707
+ * @param reason - A human-readable string explaining why the connection is closing.
3708
+ */
3709
+ close(code?: number, reason?: string): Promise<void>;
3710
+ }
3711
+
3712
+ export { }