@firebase/ai 0.0.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (90) hide show
  1. package/README.md +5 -0
  2. package/dist/ai-public.d.ts +1972 -0
  3. package/dist/ai.d.ts +2073 -0
  4. package/dist/esm/index.esm2017.js +2624 -0
  5. package/dist/esm/index.esm2017.js.map +1 -0
  6. package/dist/esm/package.json +1 -0
  7. package/dist/esm/src/api.d.ts +120 -0
  8. package/dist/esm/src/backend.d.ts +74 -0
  9. package/dist/esm/src/constants.d.ts +23 -0
  10. package/dist/esm/src/errors.d.ts +35 -0
  11. package/dist/esm/src/googleai-mappers.d.ts +73 -0
  12. package/dist/esm/src/helpers.d.ts +30 -0
  13. package/dist/esm/src/index.d.ts +12 -0
  14. package/dist/esm/src/index.node.d.ts +7 -0
  15. package/dist/esm/src/logger.d.ts +18 -0
  16. package/dist/esm/src/methods/chat-session-helpers.d.ts +18 -0
  17. package/dist/esm/src/methods/chat-session.d.ts +50 -0
  18. package/dist/esm/src/methods/count-tokens.d.ts +19 -0
  19. package/dist/esm/src/methods/generate-content.d.ts +20 -0
  20. package/dist/esm/src/models/ai-model.d.ts +72 -0
  21. package/dist/esm/src/models/generative-model.d.ts +54 -0
  22. package/dist/esm/src/models/imagen-model.d.ts +102 -0
  23. package/dist/esm/src/models/index.d.ts +19 -0
  24. package/dist/esm/src/public-types.d.ts +106 -0
  25. package/dist/esm/src/requests/imagen-image-format.d.ts +61 -0
  26. package/dist/esm/src/requests/request-helpers.d.ts +28 -0
  27. package/dist/esm/src/requests/request.d.ts +43 -0
  28. package/dist/esm/src/requests/response-helpers.d.ts +53 -0
  29. package/dist/esm/src/requests/schema-builder.d.ts +145 -0
  30. package/dist/esm/src/requests/stream-reader.d.ts +38 -0
  31. package/dist/esm/src/service.d.ts +31 -0
  32. package/dist/esm/src/types/content.d.ts +144 -0
  33. package/dist/esm/src/types/enums.d.ts +263 -0
  34. package/dist/esm/src/types/error.d.ts +81 -0
  35. package/dist/esm/src/types/googleai.d.ts +56 -0
  36. package/dist/esm/src/types/imagen/index.d.ts +18 -0
  37. package/dist/esm/src/types/imagen/internal.d.ts +124 -0
  38. package/dist/esm/src/types/imagen/requests.d.ts +211 -0
  39. package/dist/esm/src/types/imagen/responses.d.ts +78 -0
  40. package/dist/esm/src/types/index.d.ts +24 -0
  41. package/dist/esm/src/types/internal.d.ts +33 -0
  42. package/dist/esm/src/types/requests.d.ts +213 -0
  43. package/dist/esm/src/types/responses.d.ts +257 -0
  44. package/dist/esm/src/types/schema.d.ts +102 -0
  45. package/dist/index.cjs.js +2652 -0
  46. package/dist/index.cjs.js.map +1 -0
  47. package/dist/index.node.cjs.js +2652 -0
  48. package/dist/index.node.cjs.js.map +1 -0
  49. package/dist/index.node.mjs +2624 -0
  50. package/dist/index.node.mjs.map +1 -0
  51. package/dist/src/api.d.ts +120 -0
  52. package/dist/src/backend.d.ts +74 -0
  53. package/dist/src/constants.d.ts +23 -0
  54. package/dist/src/errors.d.ts +35 -0
  55. package/dist/src/googleai-mappers.d.ts +73 -0
  56. package/dist/src/helpers.d.ts +30 -0
  57. package/dist/src/index.d.ts +12 -0
  58. package/dist/src/index.node.d.ts +7 -0
  59. package/dist/src/logger.d.ts +18 -0
  60. package/dist/src/methods/chat-session-helpers.d.ts +18 -0
  61. package/dist/src/methods/chat-session.d.ts +50 -0
  62. package/dist/src/methods/count-tokens.d.ts +19 -0
  63. package/dist/src/methods/generate-content.d.ts +20 -0
  64. package/dist/src/models/ai-model.d.ts +72 -0
  65. package/dist/src/models/generative-model.d.ts +54 -0
  66. package/dist/src/models/imagen-model.d.ts +102 -0
  67. package/dist/src/models/index.d.ts +19 -0
  68. package/dist/src/public-types.d.ts +106 -0
  69. package/dist/src/requests/imagen-image-format.d.ts +61 -0
  70. package/dist/src/requests/request-helpers.d.ts +28 -0
  71. package/dist/src/requests/request.d.ts +43 -0
  72. package/dist/src/requests/response-helpers.d.ts +53 -0
  73. package/dist/src/requests/schema-builder.d.ts +145 -0
  74. package/dist/src/requests/stream-reader.d.ts +38 -0
  75. package/dist/src/service.d.ts +31 -0
  76. package/dist/src/tsdoc-metadata.json +11 -0
  77. package/dist/src/types/content.d.ts +144 -0
  78. package/dist/src/types/enums.d.ts +263 -0
  79. package/dist/src/types/error.d.ts +81 -0
  80. package/dist/src/types/googleai.d.ts +56 -0
  81. package/dist/src/types/imagen/index.d.ts +18 -0
  82. package/dist/src/types/imagen/internal.d.ts +124 -0
  83. package/dist/src/types/imagen/requests.d.ts +211 -0
  84. package/dist/src/types/imagen/responses.d.ts +78 -0
  85. package/dist/src/types/index.d.ts +24 -0
  86. package/dist/src/types/internal.d.ts +33 -0
  87. package/dist/src/types/requests.d.ts +213 -0
  88. package/dist/src/types/responses.d.ts +257 -0
  89. package/dist/src/types/schema.d.ts +102 -0
  90. package/package.json +81 -0
package/dist/ai.d.ts ADDED
@@ -0,0 +1,2073 @@
1
+ /**
2
+ * The Firebase AI Web SDK.
3
+ *
4
+ * @packageDocumentation
5
+ */
6
+
7
+ import { AppCheckTokenResult } from '@firebase/app-check-interop-types';
8
+ import { FirebaseApp } from '@firebase/app';
9
+ import { FirebaseAuthTokenData } from '@firebase/auth-interop-types';
10
+ import { FirebaseError } from '@firebase/util';
11
+
12
+ /**
13
+ * An instance of the Firebase AI SDK.
14
+ *
15
+ * Do not create this instance directly. Instead, use {@link getAI | getAI()}.
16
+ *
17
+ * @public
18
+ */
19
+ export declare interface AI {
20
+ /**
21
+ * The {@link @firebase/app#FirebaseApp} this {@link AI} instance is associated with.
22
+ */
23
+ app: FirebaseApp;
24
+ /**
25
+ * A {@link Backend} instance that specifies the configuration for the target backend,
26
+ * either the Gemini Developer API (using {@link GoogleAIBackend}) or the
27
+ * Vertex AI Gemini API (using {@link VertexAIBackend}).
28
+ */
29
+ backend: Backend;
30
+ /**
31
+ * @deprecated use `AI.backend.location` instead.
32
+ *
33
+ * The location configured for this AI service instance, relevant for Vertex AI backends.
34
+ */
35
+ location: string;
36
+ }
37
+
38
+ /**
39
+ * Error class for the Firebase AI SDK.
40
+ *
41
+ * @public
42
+ */
43
+ export declare class AIError extends FirebaseError {
44
+ readonly code: AIErrorCode;
45
+ readonly customErrorData?: CustomErrorData | undefined;
46
+ /**
47
+ * Constructs a new instance of the `AIError` class.
48
+ *
49
+ * @param code - The error code from {@link AIErrorCode}.
50
+ * @param message - A human-readable message describing the error.
51
+ * @param customErrorData - Optional error data.
52
+ */
53
+ constructor(code: AIErrorCode, message: string, customErrorData?: CustomErrorData | undefined);
54
+ }
55
+
56
+ /**
57
+ * Standardized error codes that {@link AIError} can have.
58
+ *
59
+ * @public
60
+ */
61
+ declare const enum AIErrorCode {
62
+ /** A generic error occurred. */
63
+ ERROR = "error",
64
+ /** An error occurred in a request. */
65
+ REQUEST_ERROR = "request-error",
66
+ /** An error occurred in a response. */
67
+ RESPONSE_ERROR = "response-error",
68
+ /** An error occurred while performing a fetch. */
69
+ FETCH_ERROR = "fetch-error",
70
+ /** An error associated with a Content object. */
71
+ INVALID_CONTENT = "invalid-content",
72
+ /** An error due to the Firebase API not being enabled in the Console. */
73
+ API_NOT_ENABLED = "api-not-enabled",
74
+ /** An error due to invalid Schema input. */
75
+ INVALID_SCHEMA = "invalid-schema",
76
+ /** An error occurred due to a missing Firebase API key. */
77
+ NO_API_KEY = "no-api-key",
78
+ /** An error occured due to a missing Firebase app ID. */
79
+ NO_APP_ID = "no-app-id",
80
+ /** An error occurred due to a model name not being specified during initialization. */
81
+ NO_MODEL = "no-model",
82
+ /** An error occurred due to a missing project ID. */
83
+ NO_PROJECT_ID = "no-project-id",
84
+ /** An error occurred while parsing. */
85
+ PARSE_FAILED = "parse-failed",
86
+ /** An error occured due an attempt to use an unsupported feature. */
87
+ UNSUPPORTED = "unsupported"
88
+ }
89
+ export { AIErrorCode }
90
+ export { AIErrorCode as VertexAIErrorCode }
91
+
92
+ /**
93
+ * Base class for Firebase AI model APIs.
94
+ *
95
+ * Instances of this class are associated with a specific Firebase AI {@link Backend}
96
+ * and provide methods for interacting with the configured generative model.
97
+ *
98
+ * @public
99
+ */
100
+ export declare abstract class AIModel {
101
+ /**
102
+ * The fully qualified model resource name to use for generating images
103
+ * (for example, `publishers/google/models/imagen-3.0-generate-002`).
104
+ */
105
+ readonly model: string;
106
+ /**
107
+ * @internal
108
+ */
109
+ protected _apiSettings: ApiSettings;
110
+ /**
111
+ * Constructs a new instance of the {@link AIModel} class.
112
+ *
113
+ * This constructor should only be called from subclasses that provide
114
+ * a model API.
115
+ *
116
+ * @param ai - an {@link AI} instance.
117
+ * @param modelName - The name of the model being used. It can be in one of the following formats:
118
+ * - `my-model` (short name, will resolve to `publishers/google/models/my-model`)
119
+ * - `models/my-model` (will resolve to `publishers/google/models/my-model`)
120
+ * - `publishers/my-publisher/models/my-model` (fully qualified model name)
121
+ *
122
+ * @throws If the `apiKey` or `projectId` fields are missing in your
123
+ * Firebase config.
124
+ *
125
+ * @internal
126
+ */
127
+ protected constructor(ai: AI, modelName: string);
128
+ /**
129
+ * Normalizes the given model name to a fully qualified model resource name.
130
+ *
131
+ * @param modelName - The model name to normalize.
132
+ * @returns The fully qualified model resource name.
133
+ *
134
+ * @internal
135
+ */
136
+ static normalizeModelName(modelName: string, backendType: BackendType): string;
137
+ /**
138
+ * @internal
139
+ */
140
+ private static normalizeGoogleAIModelName;
141
+ /**
142
+ * @internal
143
+ */
144
+ private static normalizeVertexAIModelName;
145
+ }
146
+
147
+ /**
148
+ * Options for initializing the AI service using {@link getAI | getAI()}.
149
+ * This allows specifying which backend to use (Vertex AI Gemini API or Gemini Developer API)
150
+ * and configuring its specific options (like location for Vertex AI).
151
+ *
152
+ * @public
153
+ */
154
+ export declare interface AIOptions {
155
+ /**
156
+ * The backend configuration to use for the AI service instance.
157
+ */
158
+ backend: Backend;
159
+ }
160
+
161
+ declare interface ApiSettings {
162
+ apiKey: string;
163
+ project: string;
164
+ appId: string;
165
+ automaticDataCollectionEnabled?: boolean;
166
+ /**
167
+ * @deprecated Use `backend.location` instead.
168
+ */
169
+ location: string;
170
+ backend: Backend;
171
+ getAuthToken?: () => Promise<FirebaseAuthTokenData | null>;
172
+ getAppCheckToken?: () => Promise<AppCheckTokenResult>;
173
+ }
174
+
175
+ /**
176
+ * Schema class for "array" types.
177
+ * The `items` param should refer to the type of item that can be a member
178
+ * of the array.
179
+ * @public
180
+ */
181
+ export declare class ArraySchema extends Schema {
182
+ items: TypedSchema;
183
+ constructor(schemaParams: SchemaParams, items: TypedSchema);
184
+ /**
185
+ * @internal
186
+ */
187
+ toJSON(): SchemaRequest;
188
+ }
189
+
190
+ /**
191
+ * Abstract base class representing the configuration for an AI service backend.
192
+ * This class should not be instantiated directly. Use its subclasses; {@link GoogleAIBackend} for
193
+ * the Gemini Developer API (via {@link https://ai.google/ | Google AI}), and
194
+ * {@link VertexAIBackend} for the Vertex AI Gemini API.
195
+ *
196
+ * @public
197
+ */
198
+ export declare abstract class Backend {
199
+ /**
200
+ * Specifies the backend type.
201
+ */
202
+ readonly backendType: BackendType;
203
+ /**
204
+ * Protected constructor for use by subclasses.
205
+ * @param type - The backend type.
206
+ */
207
+ protected constructor(type: BackendType);
208
+ }
209
+
210
+ /**
211
+ * An enum-like object containing constants that represent the supported backends
212
+ * for the Firebase AI SDK.
213
+ * This determines which backend service (Vertex AI Gemini API or Gemini Developer API)
214
+ * the SDK will communicate with.
215
+ *
216
+ * These values are assigned to the `backendType` property within the specific backend
217
+ * configuration objects ({@link GoogleAIBackend} or {@link VertexAIBackend}) to identify
218
+ * which service to target.
219
+ *
220
+ * @public
221
+ */
222
+ export declare const BackendType: {
223
+ /**
224
+ * Identifies the backend service for the Vertex AI Gemini API provided through Google Cloud.
225
+ * Use this constant when creating a {@link VertexAIBackend} configuration.
226
+ */
227
+ readonly VERTEX_AI: "VERTEX_AI";
228
+ /**
229
+ * Identifies the backend service for the Gemini Developer API ({@link https://ai.google/ | Google AI}).
230
+ * Use this constant when creating a {@link GoogleAIBackend} configuration.
231
+ */
232
+ readonly GOOGLE_AI: "GOOGLE_AI";
233
+ };
234
+
235
+ /**
236
+ * Type alias representing valid backend types.
237
+ * It can be either `'VERTEX_AI'` or `'GOOGLE_AI'`.
238
+ *
239
+ * @public
240
+ */
241
+ export declare type BackendType = (typeof BackendType)[keyof typeof BackendType];
242
+
243
+ /**
244
+ * Base parameters for a number of methods.
245
+ * @public
246
+ */
247
+ export declare interface BaseParams {
248
+ safetySettings?: SafetySetting[];
249
+ generationConfig?: GenerationConfig;
250
+ }
251
+
252
+ /**
253
+ * Reason that a prompt was blocked.
254
+ * @public
255
+ */
256
+ export declare enum BlockReason {
257
+ /**
258
+ * Content was blocked by safety settings.
259
+ */
260
+ SAFETY = "SAFETY",
261
+ /**
262
+ * Content was blocked, but the reason is uncategorized.
263
+ */
264
+ OTHER = "OTHER",
265
+ /**
266
+ * Content was blocked because it contained terms from the terminology blocklist.
267
+ */
268
+ BLOCKLIST = "BLOCKLIST",
269
+ /**
270
+ * Content was blocked due to prohibited content.
271
+ */
272
+ PROHIBITED_CONTENT = "PROHIBITED_CONTENT"
273
+ }
274
+
275
+ /**
276
+ * Schema class for "boolean" types.
277
+ * @public
278
+ */
279
+ export declare class BooleanSchema extends Schema {
280
+ constructor(schemaParams?: SchemaParams);
281
+ }
282
+
283
+ /**
284
+ * ChatSession class that enables sending chat messages and stores
285
+ * history of sent and received messages so far.
286
+ *
287
+ * @public
288
+ */
289
+ export declare class ChatSession {
290
+ model: string;
291
+ params?: StartChatParams | undefined;
292
+ requestOptions?: RequestOptions | undefined;
293
+ private _apiSettings;
294
+ private _history;
295
+ private _sendPromise;
296
+ constructor(apiSettings: ApiSettings, model: string, params?: StartChatParams | undefined, requestOptions?: RequestOptions | undefined);
297
+ /**
298
+ * Gets the chat history so far. Blocked prompts are not added to history.
299
+ * Neither blocked candidates nor the prompts that generated them are added
300
+ * to history.
301
+ */
302
+ getHistory(): Promise<Content[]>;
303
+ /**
304
+ * Sends a chat message and receives a non-streaming
305
+ * {@link GenerateContentResult}
306
+ */
307
+ sendMessage(request: string | Array<string | Part>): Promise<GenerateContentResult>;
308
+ /**
309
+ * Sends a chat message and receives the response as a
310
+ * {@link GenerateContentStreamResult} containing an iterable stream
311
+ * and a response promise.
312
+ */
313
+ sendMessageStream(request: string | Array<string | Part>): Promise<GenerateContentStreamResult>;
314
+ }
315
+
316
+ /**
317
+ * A single citation.
318
+ * @public
319
+ */
320
+ export declare interface Citation {
321
+ startIndex?: number;
322
+ endIndex?: number;
323
+ uri?: string;
324
+ license?: string;
325
+ /**
326
+ * The title of the cited source, if available.
327
+ *
328
+ * This property is only supported in the Vertex AI Gemini API ({@link VertexAIBackend}).
329
+ */
330
+ title?: string;
331
+ /**
332
+ * The publication date of the cited source, if available.
333
+ *
334
+ * This property is only supported in the Vertex AI Gemini API ({@link VertexAIBackend}).
335
+ */
336
+ publicationDate?: Date_2;
337
+ }
338
+
339
+ /**
340
+ * Citation metadata that may be found on a {@link GenerateContentCandidate}.
341
+ * @public
342
+ */
343
+ export declare interface CitationMetadata {
344
+ citations: Citation[];
345
+ }
346
+
347
+ /**
348
+ * Content type for both prompts and response candidates.
349
+ * @public
350
+ */
351
+ export declare interface Content {
352
+ role: Role;
353
+ parts: Part[];
354
+ }
355
+
356
+ /**
357
+ * Params for calling {@link GenerativeModel.countTokens}
358
+ * @public
359
+ */
360
+ export declare interface CountTokensRequest {
361
+ contents: Content[];
362
+ /**
363
+ * Instructions that direct the model to behave a certain way.
364
+ */
365
+ systemInstruction?: string | Part | Content;
366
+ /**
367
+ * {@link Tool} configuration.
368
+ */
369
+ tools?: Tool[];
370
+ /**
371
+ * Configuration options that control how the model generates a response.
372
+ */
373
+ generationConfig?: GenerationConfig;
374
+ }
375
+
376
+ /**
377
+ * Response from calling {@link GenerativeModel.countTokens}.
378
+ * @public
379
+ */
380
+ export declare interface CountTokensResponse {
381
+ /**
382
+ * The total number of tokens counted across all instances from the request.
383
+ */
384
+ totalTokens: number;
385
+ /**
386
+ * The total number of billable characters counted across all instances
387
+ * from the request.
388
+ *
389
+ * This property is only supported when using the Vertex AI Gemini API ({@link VertexAIBackend}).
390
+ * When using the Gemini Developer API ({@link GoogleAIBackend}), this property is not supported and will default to 0.
391
+ */
392
+ totalBillableCharacters?: number;
393
+ /**
394
+ * The breakdown, by modality, of how many tokens are consumed by the prompt.
395
+ */
396
+ promptTokensDetails?: ModalityTokenCount[];
397
+ }
398
+
399
+ /**
400
+ * Details object that contains data originating from a bad HTTP response.
401
+ *
402
+ * @public
403
+ */
404
+ export declare interface CustomErrorData {
405
+ /** HTTP status code of the error response. */
406
+ status?: number;
407
+ /** HTTP status text of the error response. */
408
+ statusText?: string;
409
+ /** Response from a {@link GenerateContentRequest} */
410
+ response?: GenerateContentResponse;
411
+ /** Optional additional details about the error. */
412
+ errorDetails?: ErrorDetails[];
413
+ }
414
+
415
+ /**
416
+ * Protobuf google.type.Date
417
+ * @public
418
+ */
419
+ declare interface Date_2 {
420
+ year: number;
421
+ month: number;
422
+ day: number;
423
+ }
424
+ export { Date_2 as Date }
425
+
426
+ /**
427
+ * Response object wrapped with helper methods.
428
+ *
429
+ * @public
430
+ */
431
+ export declare interface EnhancedGenerateContentResponse extends GenerateContentResponse {
432
+ /**
433
+ * Returns the text string from the response, if available.
434
+ * Throws if the prompt or candidate was blocked.
435
+ */
436
+ text: () => string;
437
+ /**
438
+ * Aggregates and returns all {@link InlineDataPart}s from the {@link GenerateContentResponse}'s
439
+ * first candidate.
440
+ *
441
+ * @returns An array of {@link InlineDataPart}s containing data from the response, if available.
442
+ *
443
+ * @throws If the prompt or candidate was blocked.
444
+ */
445
+ inlineDataParts: () => InlineDataPart[] | undefined;
446
+ functionCalls: () => FunctionCall[] | undefined;
447
+ }
448
+
449
+ /**
450
+ * Details object that may be included in an error response.
451
+ *
452
+ * @public
453
+ */
454
+ export declare interface ErrorDetails {
455
+ '@type'?: string;
456
+ /** The reason for the error. */
457
+ reason?: string;
458
+ /** The domain where the error occurred. */
459
+ domain?: string;
460
+ /** Additional metadata about the error. */
461
+ metadata?: Record<string, unknown>;
462
+ /** Any other relevant information about the error. */
463
+ [key: string]: unknown;
464
+ }
465
+
466
+ /**
467
+ * Data pointing to a file uploaded on Google Cloud Storage.
468
+ * @public
469
+ */
470
+ export declare interface FileData {
471
+ mimeType: string;
472
+ fileUri: string;
473
+ }
474
+
475
+ /**
476
+ * Content part interface if the part represents {@link FileData}
477
+ * @public
478
+ */
479
+ export declare interface FileDataPart {
480
+ text?: never;
481
+ inlineData?: never;
482
+ functionCall?: never;
483
+ functionResponse?: never;
484
+ fileData: FileData;
485
+ }
486
+
487
+ /**
488
+ * Reason that a candidate finished.
489
+ * @public
490
+ */
491
+ export declare enum FinishReason {
492
+ /**
493
+ * Natural stop point of the model or provided stop sequence.
494
+ */
495
+ STOP = "STOP",
496
+ /**
497
+ * The maximum number of tokens as specified in the request was reached.
498
+ */
499
+ MAX_TOKENS = "MAX_TOKENS",
500
+ /**
501
+ * The candidate content was flagged for safety reasons.
502
+ */
503
+ SAFETY = "SAFETY",
504
+ /**
505
+ * The candidate content was flagged for recitation reasons.
506
+ */
507
+ RECITATION = "RECITATION",
508
+ /**
509
+ * Unknown reason.
510
+ */
511
+ OTHER = "OTHER",
512
+ /**
513
+ * The candidate content contained forbidden terms.
514
+ */
515
+ BLOCKLIST = "BLOCKLIST",
516
+ /**
517
+ * The candidate content potentially contained prohibited content.
518
+ */
519
+ PROHIBITED_CONTENT = "PROHIBITED_CONTENT",
520
+ /**
521
+ * The candidate content potentially contained Sensitive Personally Identifiable Information (SPII).
522
+ */
523
+ SPII = "SPII",
524
+ /**
525
+ * The function call generated by the model was invalid.
526
+ */
527
+ MALFORMED_FUNCTION_CALL = "MALFORMED_FUNCTION_CALL"
528
+ }
529
+
530
+ /**
531
+ * A predicted {@link FunctionCall} returned from the model
532
+ * that contains a string representing the {@link FunctionDeclaration.name}
533
+ * and a structured JSON object containing the parameters and their values.
534
+ * @public
535
+ */
536
+ export declare interface FunctionCall {
537
+ name: string;
538
+ args: object;
539
+ }
540
+
541
+ /**
542
+ * @public
543
+ */
544
+ export declare interface FunctionCallingConfig {
545
+ mode?: FunctionCallingMode;
546
+ allowedFunctionNames?: string[];
547
+ }
548
+
549
+ /**
550
+ * @public
551
+ */
552
+ export declare enum FunctionCallingMode {
553
+ /**
554
+ * Default model behavior; model decides to predict either a function call
555
+ * or a natural language response.
556
+ */
557
+ AUTO = "AUTO",
558
+ /**
559
+ * Model is constrained to always predicting a function call only.
560
+ * If `allowed_function_names` is set, the predicted function call will be
561
+ * limited to any one of `allowed_function_names`, else the predicted
562
+ * function call will be any one of the provided `function_declarations`.
563
+ */
564
+ ANY = "ANY",
565
+ /**
566
+ * Model will not predict any function call. Model behavior is same as when
567
+ * not passing any function declarations.
568
+ */
569
+ NONE = "NONE"
570
+ }
571
+
572
+ /**
573
+ * Content part interface if the part represents a {@link FunctionCall}.
574
+ * @public
575
+ */
576
+ export declare interface FunctionCallPart {
577
+ text?: never;
578
+ inlineData?: never;
579
+ functionCall: FunctionCall;
580
+ functionResponse?: never;
581
+ }
582
+
583
+ /**
584
+ * Structured representation of a function declaration as defined by the
585
+ * {@link https://spec.openapis.org/oas/v3.0.3 | OpenAPI 3.0 specification}.
586
+ * Included
587
+ * in this declaration are the function name and parameters. This
588
+ * `FunctionDeclaration` is a representation of a block of code that can be used
589
+ * as a Tool by the model and executed by the client.
590
+ * @public
591
+ */
592
+ export declare interface FunctionDeclaration {
593
+ /**
594
+ * The name of the function to call. Must start with a letter or an
595
+ * underscore. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with
596
+ * a max length of 64.
597
+ */
598
+ name: string;
599
+ /**
600
+ * Description and purpose of the function. Model uses it to decide
601
+ * how and whether to call the function.
602
+ */
603
+ description: string;
604
+ /**
605
+ * Optional. Describes the parameters to this function in JSON Schema Object
606
+ * format. Reflects the Open API 3.03 Parameter Object. Parameter names are
607
+ * case-sensitive. For a function with no parameters, this can be left unset.
608
+ */
609
+ parameters?: ObjectSchemaInterface;
610
+ }
611
+
612
+ /**
613
+ * A `FunctionDeclarationsTool` is a piece of code that enables the system to
614
+ * interact with external systems to perform an action, or set of actions,
615
+ * outside of knowledge and scope of the model.
616
+ * @public
617
+ */
618
+ export declare interface FunctionDeclarationsTool {
619
+ /**
620
+ * Optional. One or more function declarations
621
+ * to be passed to the model along with the current user query. Model may
622
+ * decide to call a subset of these functions by populating
623
+ * {@link FunctionCall} in the response. User should
624
+ * provide a {@link FunctionResponse} for each
625
+ * function call in the next turn. Based on the function responses, the model will
626
+ * generate the final response back to the user. Maximum 64 function
627
+ * declarations can be provided.
628
+ */
629
+ functionDeclarations?: FunctionDeclaration[];
630
+ }
631
+
632
+ /**
633
+ * The result output from a {@link FunctionCall} that contains a string
634
+ * representing the {@link FunctionDeclaration.name}
635
+ * and a structured JSON object containing any output
636
+ * from the function is used as context to the model.
637
+ * This should contain the result of a {@link FunctionCall}
638
+ * made based on model prediction.
639
+ * @public
640
+ */
641
+ export declare interface FunctionResponse {
642
+ name: string;
643
+ response: object;
644
+ }
645
+
646
+ /**
647
+ * Content part interface if the part represents {@link FunctionResponse}.
648
+ * @public
649
+ */
650
+ export declare interface FunctionResponsePart {
651
+ text?: never;
652
+ inlineData?: never;
653
+ functionCall?: never;
654
+ functionResponse: FunctionResponse;
655
+ }
656
+
657
+ /**
658
+ * A candidate returned as part of a {@link GenerateContentResponse}.
659
+ * @public
660
+ */
661
+ export declare interface GenerateContentCandidate {
662
+ index: number;
663
+ content: Content;
664
+ finishReason?: FinishReason;
665
+ finishMessage?: string;
666
+ safetyRatings?: SafetyRating[];
667
+ citationMetadata?: CitationMetadata;
668
+ groundingMetadata?: GroundingMetadata;
669
+ }
670
+
671
+ /**
672
+ * Request sent through {@link GenerativeModel.generateContent}
673
+ * @public
674
+ */
675
+ export declare interface GenerateContentRequest extends BaseParams {
676
+ contents: Content[];
677
+ tools?: Tool[];
678
+ toolConfig?: ToolConfig;
679
+ systemInstruction?: string | Part | Content;
680
+ }
681
+
682
+ /**
683
+ * Individual response from {@link GenerativeModel.generateContent} and
684
+ * {@link GenerativeModel.generateContentStream}.
685
+ * `generateContentStream()` will return one in each chunk until
686
+ * the stream is done.
687
+ * @public
688
+ */
689
+ export declare interface GenerateContentResponse {
690
+ candidates?: GenerateContentCandidate[];
691
+ promptFeedback?: PromptFeedback;
692
+ usageMetadata?: UsageMetadata;
693
+ }
694
+
695
+ /**
696
+ * Result object returned from {@link GenerativeModel.generateContent} call.
697
+ *
698
+ * @public
699
+ */
700
+ export declare interface GenerateContentResult {
701
+ response: EnhancedGenerateContentResponse;
702
+ }
703
+
704
+ /**
705
+ * Result object returned from {@link GenerativeModel.generateContentStream} call.
706
+ * Iterate over `stream` to get chunks as they come in and/or
707
+ * use the `response` promise to get the aggregated response when
708
+ * the stream is done.
709
+ *
710
+ * @public
711
+ */
712
+ export declare interface GenerateContentStreamResult {
713
+ stream: AsyncGenerator<EnhancedGenerateContentResponse>;
714
+ response: Promise<EnhancedGenerateContentResponse>;
715
+ }
716
+
717
+ /**
718
+ * Config options for content-related requests
719
+ * @public
720
+ */
721
+ export declare interface GenerationConfig {
722
+ candidateCount?: number;
723
+ stopSequences?: string[];
724
+ maxOutputTokens?: number;
725
+ temperature?: number;
726
+ topP?: number;
727
+ topK?: number;
728
+ presencePenalty?: number;
729
+ frequencyPenalty?: number;
730
+ /**
731
+ * Output response MIME type of the generated candidate text.
732
+ * Supported MIME types are `text/plain` (default, text output),
733
+ * `application/json` (JSON response in the candidates), and
734
+ * `text/x.enum`.
735
+ */
736
+ responseMimeType?: string;
737
+ /**
738
+ * Output response schema of the generated candidate text. This
739
+ * value can be a class generated with a {@link Schema} static method
740
+ * like `Schema.string()` or `Schema.object()` or it can be a plain
741
+ * JS object matching the {@link SchemaRequest} interface.
742
+ * <br/>Note: This only applies when the specified `responseMIMEType` supports a schema; currently
743
+ * this is limited to `application/json` and `text/x.enum`.
744
+ */
745
+ responseSchema?: TypedSchema | SchemaRequest;
746
+ /**
747
+ * Generation modalities to be returned in generation responses.
748
+ *
749
+ * @remarks
750
+ * - Multimodal response generation is only supported by some Gemini models and versions; see {@link https://firebase.google.com/docs/vertex-ai/models | model versions}.
751
+ * - Only image generation (`ResponseModality.IMAGE`) is supported.
752
+ *
753
+ * @beta
754
+ */
755
+ responseModalities?: ResponseModality[];
756
+ }
757
+
758
+ /**
759
+ * Interface for sending an image.
760
+ * @public
761
+ */
762
+ export declare interface GenerativeContentBlob {
763
+ mimeType: string;
764
+ /**
765
+ * Image as a base64 string.
766
+ */
767
+ data: string;
768
+ }
769
+
770
+ /**
771
+ * Class for generative model APIs.
772
+ * @public
773
+ */
774
+ export declare class GenerativeModel extends AIModel {
775
+ generationConfig: GenerationConfig;
776
+ safetySettings: SafetySetting[];
777
+ requestOptions?: RequestOptions;
778
+ tools?: Tool[];
779
+ toolConfig?: ToolConfig;
780
+ systemInstruction?: Content;
781
+ constructor(ai: AI, modelParams: ModelParams, requestOptions?: RequestOptions);
782
+ /**
783
+ * Makes a single non-streaming call to the model
784
+ * and returns an object containing a single {@link GenerateContentResponse}.
785
+ */
786
+ generateContent(request: GenerateContentRequest | string | Array<string | Part>): Promise<GenerateContentResult>;
787
+ /**
788
+ * Makes a single streaming call to the model
789
+ * and returns an object containing an iterable stream that iterates
790
+ * over all chunks in the streaming response as well as
791
+ * a promise that returns the final aggregated response.
792
+ */
793
+ generateContentStream(request: GenerateContentRequest | string | Array<string | Part>): Promise<GenerateContentStreamResult>;
794
+ /**
795
+ * Gets a new {@link ChatSession} instance which can be used for
796
+ * multi-turn chats.
797
+ */
798
+ startChat(startChatParams?: StartChatParams): ChatSession;
799
+ /**
800
+ * Counts the tokens in the provided request.
801
+ */
802
+ countTokens(request: CountTokensRequest | string | Array<string | Part>): Promise<CountTokensResponse>;
803
+ }
804
+
805
+ /**
806
+ * Returns the default {@link AI} instance that is associated with the provided
807
+ * {@link @firebase/app#FirebaseApp}. If no instance exists, initializes a new instance with the
808
+ * default settings.
809
+ *
810
+ * @example
811
+ * ```javascript
812
+ * const ai = getAI(app);
813
+ * ```
814
+ *
815
+ * @example
816
+ * ```javascript
817
+ * // Get an AI instance configured to use the Gemini Developer API (via Google AI).
818
+ * const ai = getAI(app, { backend: new GoogleAIBackend() });
819
+ * ```
820
+ *
821
+ * @example
822
+ * ```javascript
823
+ * // Get an AI instance configured to use the Vertex AI Gemini API.
824
+ * const ai = getAI(app, { backend: new VertexAIBackend() });
825
+ * ```
826
+ *
827
+ * @param app - The {@link @firebase/app#FirebaseApp} to use.
828
+ * @param options - {@link AIOptions} that configure the AI instance.
829
+ * @returns The default {@link AI} instance for the given {@link @firebase/app#FirebaseApp}.
830
+ *
831
+ * @public
832
+ */
833
+ export declare function getAI(app?: FirebaseApp, options?: AIOptions): AI;
834
+
835
+ /**
836
+ * Returns a {@link GenerativeModel} class with methods for inference
837
+ * and other functionality.
838
+ *
839
+ * @public
840
+ */
841
+ export declare function getGenerativeModel(ai: AI, modelParams: ModelParams, requestOptions?: RequestOptions): GenerativeModel;
842
+
843
+ /**
844
+ * Returns an {@link ImagenModel} class with methods for using Imagen.
845
+ *
846
+ * Only Imagen 3 models (named `imagen-3.0-*`) are supported.
847
+ *
848
+ * @param ai - An {@link AI} instance.
849
+ * @param modelParams - Parameters to use when making Imagen requests.
850
+ * @param requestOptions - Additional options to use when making requests.
851
+ *
852
+ * @throws If the `apiKey` or `projectId` fields are missing in your
853
+ * Firebase config.
854
+ *
855
+ * @beta
856
+ */
857
+ export declare function getImagenModel(ai: AI, modelParams: ImagenModelParams, requestOptions?: RequestOptions): ImagenModel;
858
+
859
+ /**
860
+ * @deprecated Use the new {@link getAI | getAI()} instead. The Vertex AI in Firebase SDK has been
861
+ * replaced with the Firebase AI SDK to accommodate the evolving set of supported features and
862
+ * services. For migration details, see the {@link https://firebase.google.com/docs/vertex-ai/migrate-to-latest-sdk | migration guide}.
863
+ *
864
+ * Returns a {@link VertexAI} instance for the given app, configured to use the
865
+ * Vertex AI Gemini API. This instance will be
866
+ * configured to use the Vertex AI Gemini API.
867
+ *
868
+ * @param app - The {@link @firebase/app#FirebaseApp} to use.
869
+ * @param options - Options to configure the Vertex AI instance, including the location.
870
+ *
871
+ * @public
872
+ */
873
+ export declare function getVertexAI(app?: FirebaseApp, options?: VertexAIOptions): VertexAI;
874
+
875
+ /**
876
+ * Configuration class for the Gemini Developer API.
877
+ *
878
+ * Use this with {@link AIOptions} when initializing the AI service via
879
+ * {@link getAI | getAI()} to specify the Gemini Developer API as the backend.
880
+ *
881
+ * @public
882
+ */
883
+ export declare class GoogleAIBackend extends Backend {
884
+ /**
885
+ * Creates a configuration object for the Gemini Developer API backend.
886
+ */
887
+ constructor();
888
+ }
889
+
890
+ /**
891
+ * @internal
892
+ */
893
+ export declare interface GoogleAICitationMetadata {
894
+ citationSources: Citation[];
895
+ }
896
+
897
+ /**
898
+ * @internal
899
+ */
900
+ export declare interface GoogleAICountTokensRequest {
901
+ generateContentRequest: {
902
+ model: string;
903
+ contents: Content[];
904
+ systemInstruction?: string | Part | Content;
905
+ tools?: Tool[];
906
+ generationConfig?: GenerationConfig;
907
+ };
908
+ }
909
+
910
+ /**
911
+ * @internal
912
+ */
913
+ export declare interface GoogleAIGenerateContentCandidate {
914
+ index: number;
915
+ content: Content;
916
+ finishReason?: FinishReason;
917
+ finishMessage?: string;
918
+ safetyRatings?: SafetyRating[];
919
+ citationMetadata?: GoogleAICitationMetadata;
920
+ groundingMetadata?: GroundingMetadata;
921
+ }
922
+
923
+ /**
924
+ * @internal
925
+ */
926
+ export declare interface GoogleAIGenerateContentResponse {
927
+ candidates?: GoogleAIGenerateContentCandidate[];
928
+ promptFeedback?: PromptFeedback;
929
+ usageMetadata?: UsageMetadata;
930
+ }
931
+
932
+ /**
933
+ * @deprecated
934
+ * @public
935
+ */
936
+ export declare interface GroundingAttribution {
937
+ segment: Segment;
938
+ confidenceScore?: number;
939
+ web?: WebAttribution;
940
+ retrievedContext?: RetrievedContextAttribution;
941
+ }
942
+
943
+ /**
944
+ * Metadata returned to client when grounding is enabled.
945
+ * @public
946
+ */
947
+ export declare interface GroundingMetadata {
948
+ webSearchQueries?: string[];
949
+ retrievalQueries?: string[];
950
+ /**
951
+ * @deprecated
952
+ */
953
+ groundingAttributions: GroundingAttribution[];
954
+ }
955
+
956
+ /**
957
+ * This property is not supported in the Gemini Developer API ({@link GoogleAIBackend}).
958
+ *
959
+ * @public
960
+ */
961
+ export declare enum HarmBlockMethod {
962
+ /**
963
+ * The harm block method uses both probability and severity scores.
964
+ */
965
+ SEVERITY = "SEVERITY",
966
+ /**
967
+ * The harm block method uses the probability score.
968
+ */
969
+ PROBABILITY = "PROBABILITY"
970
+ }
971
+
972
+ /**
973
+ * Threshold above which a prompt or candidate will be blocked.
974
+ * @public
975
+ */
976
+ export declare enum HarmBlockThreshold {
977
+ /**
978
+ * Content with `NEGLIGIBLE` will be allowed.
979
+ */
980
+ BLOCK_LOW_AND_ABOVE = "BLOCK_LOW_AND_ABOVE",
981
+ /**
982
+ * Content with `NEGLIGIBLE` and `LOW` will be allowed.
983
+ */
984
+ BLOCK_MEDIUM_AND_ABOVE = "BLOCK_MEDIUM_AND_ABOVE",
985
+ /**
986
+ * Content with `NEGLIGIBLE`, `LOW`, and `MEDIUM` will be allowed.
987
+ */
988
+ BLOCK_ONLY_HIGH = "BLOCK_ONLY_HIGH",
989
+ /**
990
+ * All content will be allowed.
991
+ */
992
+ BLOCK_NONE = "BLOCK_NONE"
993
+ }
994
+
995
+ /**
996
+ * Harm categories that would cause prompts or candidates to be blocked.
997
+ * @public
998
+ */
999
+ export declare enum HarmCategory {
1000
+ HARM_CATEGORY_HATE_SPEECH = "HARM_CATEGORY_HATE_SPEECH",
1001
+ HARM_CATEGORY_SEXUALLY_EXPLICIT = "HARM_CATEGORY_SEXUALLY_EXPLICIT",
1002
+ HARM_CATEGORY_HARASSMENT = "HARM_CATEGORY_HARASSMENT",
1003
+ HARM_CATEGORY_DANGEROUS_CONTENT = "HARM_CATEGORY_DANGEROUS_CONTENT"
1004
+ }
1005
+
1006
+ /**
1007
+ * Probability that a prompt or candidate matches a harm category.
1008
+ * @public
1009
+ */
1010
+ export declare enum HarmProbability {
1011
+ /**
1012
+ * Content has a negligible chance of being unsafe.
1013
+ */
1014
+ NEGLIGIBLE = "NEGLIGIBLE",
1015
+ /**
1016
+ * Content has a low chance of being unsafe.
1017
+ */
1018
+ LOW = "LOW",
1019
+ /**
1020
+ * Content has a medium chance of being unsafe.
1021
+ */
1022
+ MEDIUM = "MEDIUM",
1023
+ /**
1024
+ * Content has a high chance of being unsafe.
1025
+ */
1026
+ HIGH = "HIGH"
1027
+ }
1028
+
1029
+ /**
1030
+ * Harm severity levels.
1031
+ * @public
1032
+ */
1033
+ export declare enum HarmSeverity {
1034
+ /**
1035
+ * Negligible level of harm severity.
1036
+ */
1037
+ HARM_SEVERITY_NEGLIGIBLE = "HARM_SEVERITY_NEGLIGIBLE",
1038
+ /**
1039
+ * Low level of harm severity.
1040
+ */
1041
+ HARM_SEVERITY_LOW = "HARM_SEVERITY_LOW",
1042
+ /**
1043
+ * Medium level of harm severity.
1044
+ */
1045
+ HARM_SEVERITY_MEDIUM = "HARM_SEVERITY_MEDIUM",
1046
+ /**
1047
+ * High level of harm severity.
1048
+ */
1049
+ HARM_SEVERITY_HIGH = "HARM_SEVERITY_HIGH",
1050
+ /**
1051
+ * Harm severity is not supported.
1052
+ *
1053
+ * @remarks
1054
+ * The GoogleAI backend does not support `HarmSeverity`, so this value is used as a fallback.
1055
+ */
1056
+ HARM_SEVERITY_UNSUPPORTED = "HARM_SEVERITY_UNSUPPORTED"
1057
+ }
1058
+
1059
+ /**
1060
+ * Aspect ratios for Imagen images.
1061
+ *
1062
+ * To specify an aspect ratio for generated images, set the `aspectRatio` property in your
1063
+ * {@link ImagenGenerationConfig}.
1064
+ *
1065
+ * See the the {@link http://firebase.google.com/docs/vertex-ai/generate-images | documentation }
1066
+ * for more details and examples of the supported aspect ratios.
1067
+ *
1068
+ * @beta
1069
+ */
1070
+ export declare enum ImagenAspectRatio {
1071
+ /**
1072
+ * Square (1:1) aspect ratio.
1073
+ */
1074
+ SQUARE = "1:1",
1075
+ /**
1076
+ * Landscape (3:4) aspect ratio.
1077
+ */
1078
+ LANDSCAPE_3x4 = "3:4",
1079
+ /**
1080
+ * Portrait (4:3) aspect ratio.
1081
+ */
1082
+ PORTRAIT_4x3 = "4:3",
1083
+ /**
1084
+ * Landscape (16:9) aspect ratio.
1085
+ */
1086
+ LANDSCAPE_16x9 = "16:9",
1087
+ /**
1088
+ * Portrait (9:16) aspect ratio.
1089
+ */
1090
+ PORTRAIT_9x16 = "9:16"
1091
+ }
1092
+
1093
+ /**
1094
+ * An image generated by Imagen, stored in a Cloud Storage for Firebase bucket.
1095
+ *
1096
+ * This feature is not available yet.
1097
+ */
1098
+ export declare interface ImagenGCSImage {
1099
+ /**
1100
+ * The MIME type of the image; either `"image/png"` or `"image/jpeg"`.
1101
+ *
1102
+ * To request a different format, set the `imageFormat` property in your {@link ImagenGenerationConfig}.
1103
+ */
1104
+ mimeType: string;
1105
+ /**
1106
+ * The URI of the file stored in a Cloud Storage for Firebase bucket.
1107
+ *
1108
+ * @example `"gs://bucket-name/path/sample_0.jpg"`.
1109
+ */
1110
+ gcsURI: string;
1111
+ }
1112
+
1113
+ /**
1114
+ * Configuration options for generating images with Imagen.
1115
+ *
1116
+ * See the {@link http://firebase.google.com/docs/vertex-ai/generate-images-imagen | documentation} for
1117
+ * more details.
1118
+ *
1119
+ * @beta
1120
+ */
1121
+ export declare interface ImagenGenerationConfig {
1122
+ /**
1123
+ * A description of what should be omitted from the generated images.
1124
+ *
1125
+ * Support for negative prompts depends on the Imagen model.
1126
+ *
1127
+ * See the {@link http://firebase.google.com/docs/vertex-ai/model-parameters#imagen | documentation} for more details.
1128
+ *
1129
+ * This is no longer supported in the Gemini Developer API ({@link GoogleAIBackend}) in versions
1130
+ * greater than `imagen-3.0-generate-002`.
1131
+ */
1132
+ negativePrompt?: string;
1133
+ /**
1134
+ * The number of images to generate. The default value is 1.
1135
+ *
1136
+ * The number of sample images that may be generated in each request depends on the model
1137
+ * (typically up to 4); see the <a href="http://firebase.google.com/docs/vertex-ai/model-parameters#imagen">sampleCount</a>
1138
+ * documentation for more details.
1139
+ */
1140
+ numberOfImages?: number;
1141
+ /**
1142
+ * The aspect ratio of the generated images. The default value is square 1:1.
1143
+ * Supported aspect ratios depend on the Imagen model, see {@link ImagenAspectRatio}
1144
+ * for more details.
1145
+ */
1146
+ aspectRatio?: ImagenAspectRatio;
1147
+ /**
1148
+ * The image format of the generated images. The default is PNG.
1149
+ *
1150
+ * See {@link ImagenImageFormat} for more details.
1151
+ */
1152
+ imageFormat?: ImagenImageFormat;
1153
+ /**
1154
+ * Whether to add an invisible watermark to generated images.
1155
+ *
1156
+ * If set to `true`, an invisible SynthID watermark is embedded in generated images to indicate
1157
+ * that they are AI generated. If set to `false`, watermarking will be disabled.
1158
+ *
1159
+ * For Imagen 3 models, the default value is `true`; see the <a href="http://firebase.google.com/docs/vertex-ai/model-parameters#imagen">addWatermark</a>
1160
+ * documentation for more details.
1161
+ *
1162
+ * When using the Gemini Developer API ({@link GoogleAIBackend}), this will default to true,
1163
+ * and cannot be turned off.
1164
+ */
1165
+ addWatermark?: boolean;
1166
+ }
1167
+
1168
+ /**
1169
+ * The response from a request to generate images with Imagen.
1170
+ *
1171
+ * @beta
1172
+ */
1173
+ export declare interface ImagenGenerationResponse<T extends ImagenInlineImage | ImagenGCSImage> {
1174
+ /**
1175
+ * The images generated by Imagen.
1176
+ *
1177
+ * The number of images generated may be fewer than the number requested if one or more were
1178
+ * filtered out; see `filteredReason`.
1179
+ */
1180
+ images: T[];
1181
+ /**
1182
+ * The reason that images were filtered out. This property will only be defined if one
1183
+ * or more images were filtered.
1184
+ *
1185
+ * Images may be filtered out due to the {@link ImagenSafetyFilterLevel},
1186
+ * {@link ImagenPersonFilterLevel}, or filtering included in the model.
1187
+ * The filter levels may be adjusted in your {@link ImagenSafetySettings}.
1188
+ *
1189
+ * See the {@link https://cloud.google.com/vertex-ai/generative-ai/docs/image/responsible-ai-imagen | Responsible AI and usage guidelines for Imagen}
1190
+ * for more details.
1191
+ */
1192
+ filteredReason?: string;
1193
+ }
1194
+
1195
+ /**
1196
+ * @license
1197
+ * Copyright 2025 Google LLC
1198
+ *
1199
+ * Licensed under the Apache License, Version 2.0 (the "License");
1200
+ * you may not use this file except in compliance with the License.
1201
+ * You may obtain a copy of the License at
1202
+ *
1203
+ * http://www.apache.org/licenses/LICENSE-2.0
1204
+ *
1205
+ * Unless required by applicable law or agreed to in writing, software
1206
+ * distributed under the License is distributed on an "AS IS" BASIS,
1207
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
1208
+ * See the License for the specific language governing permissions and
1209
+ * limitations under the License.
1210
+ */
1211
+ /**
1212
+ * Defines the image format for images generated by Imagen.
1213
+ *
1214
+ * Use this class to specify the desired format (JPEG or PNG) and compression quality
1215
+ * for images generated by Imagen. This is typically included as part of
1216
+ * {@link ImagenModelParams}.
1217
+ *
1218
+ * @example
1219
+ * ```javascript
1220
+ * const imagenModelParams = {
1221
+ * // ... other ImagenModelParams
1222
+ * imageFormat: ImagenImageFormat.jpeg(75) // JPEG with a compression level of 75.
1223
+ * }
1224
+ * ```
1225
+ *
1226
+ * @beta
1227
+ */
1228
+ export declare class ImagenImageFormat {
1229
+ /**
1230
+ * The MIME type.
1231
+ */
1232
+ mimeType: string;
1233
+ /**
1234
+ * The level of compression (a number between 0 and 100).
1235
+ */
1236
+ compressionQuality?: number;
1237
+ private constructor();
1238
+ /**
1239
+ * Creates an {@link ImagenImageFormat} for a JPEG image.
1240
+ *
1241
+ * @param compressionQuality - The level of compression (a number between 0 and 100).
1242
+ * @returns An {@link ImagenImageFormat} object for a JPEG image.
1243
+ *
1244
+ * @beta
1245
+ */
1246
+ static jpeg(compressionQuality?: number): ImagenImageFormat;
1247
+ /**
1248
+ * Creates an {@link ImagenImageFormat} for a PNG image.
1249
+ *
1250
+ * @returns An {@link ImagenImageFormat} object for a PNG image.
1251
+ *
1252
+ * @beta
1253
+ */
1254
+ static png(): ImagenImageFormat;
1255
+ }
1256
+
1257
+ /**
1258
+ * @license
1259
+ * Copyright 2025 Google LLC
1260
+ *
1261
+ * Licensed under the Apache License, Version 2.0 (the "License");
1262
+ * you may not use this file except in compliance with the License.
1263
+ * You may obtain a copy of the License at
1264
+ *
1265
+ * http://www.apache.org/licenses/LICENSE-2.0
1266
+ *
1267
+ * Unless required by applicable law or agreed to in writing, software
1268
+ * distributed under the License is distributed on an "AS IS" BASIS,
1269
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
1270
+ * See the License for the specific language governing permissions and
1271
+ * limitations under the License.
1272
+ */
1273
+ /**
1274
+ * An image generated by Imagen, represented as inline data.
1275
+ *
1276
+ * @beta
1277
+ */
1278
+ export declare interface ImagenInlineImage {
1279
+ /**
1280
+ * The MIME type of the image; either `"image/png"` or `"image/jpeg"`.
1281
+ *
1282
+ * To request a different format, set the `imageFormat` property in your {@link ImagenGenerationConfig}.
1283
+ */
1284
+ mimeType: string;
1285
+ /**
1286
+ * The base64-encoded image data.
1287
+ */
1288
+ bytesBase64Encoded: string;
1289
+ }
1290
+
1291
+ /**
1292
+ * Class for Imagen model APIs.
1293
+ *
1294
+ * This class provides methods for generating images using the Imagen model.
1295
+ *
1296
+ * @example
1297
+ * ```javascript
1298
+ * const imagen = new ImagenModel(
1299
+ * ai,
1300
+ * {
1301
+ * model: 'imagen-3.0-generate-002'
1302
+ * }
1303
+ * );
1304
+ *
1305
+ * const response = await imagen.generateImages('A photo of a cat');
1306
+ * if (response.images.length > 0) {
1307
+ * console.log(response.images[0].bytesBase64Encoded);
1308
+ * }
1309
+ * ```
1310
+ *
1311
+ * @beta
1312
+ */
1313
+ export declare class ImagenModel extends AIModel {
1314
+ requestOptions?: RequestOptions | undefined;
1315
+ /**
1316
+ * The Imagen generation configuration.
1317
+ */
1318
+ generationConfig?: ImagenGenerationConfig;
1319
+ /**
1320
+ * Safety settings for filtering inappropriate content.
1321
+ */
1322
+ safetySettings?: ImagenSafetySettings;
1323
+ /**
1324
+ * Constructs a new instance of the {@link ImagenModel} class.
1325
+ *
1326
+ * @param ai - an {@link AI} instance.
1327
+ * @param modelParams - Parameters to use when making requests to Imagen.
1328
+ * @param requestOptions - Additional options to use when making requests.
1329
+ *
1330
+ * @throws If the `apiKey` or `projectId` fields are missing in your
1331
+ * Firebase config.
1332
+ */
1333
+ constructor(ai: AI, modelParams: ImagenModelParams, requestOptions?: RequestOptions | undefined);
1334
+ /**
1335
+ * Generates images using the Imagen model and returns them as
1336
+ * base64-encoded strings.
1337
+ *
1338
+ * @param prompt - A text prompt describing the image(s) to generate.
1339
+ * @returns A promise that resolves to an {@link ImagenGenerationResponse}
1340
+ * object containing the generated images.
1341
+ *
1342
+ * @throws If the request to generate images fails. This happens if the
1343
+ * prompt is blocked.
1344
+ *
1345
+ * @remarks
1346
+ * If the prompt was not blocked, but one or more of the generated images were filtered, the
1347
+ * returned object will have a `filteredReason` property.
1348
+ * If all images are filtered, the `images` array will be empty.
1349
+ *
1350
+ * @beta
1351
+ */
1352
+ generateImages(prompt: string): Promise<ImagenGenerationResponse<ImagenInlineImage>>;
1353
+ /**
1354
+ * Generates images to Cloud Storage for Firebase using the Imagen model.
1355
+ *
1356
+ * @internal This method is temporarily internal.
1357
+ *
1358
+ * @param prompt - A text prompt describing the image(s) to generate.
1359
+ * @param gcsURI - The URI of file stored in a Cloud Storage for Firebase bucket.
1360
+ * This should be a directory. For example, `gs://my-bucket/my-directory/`.
1361
+ * @returns A promise that resolves to an {@link ImagenGenerationResponse}
1362
+ * object containing the URLs of the generated images.
1363
+ *
1364
+ * @throws If the request fails to generate images fails. This happens if
1365
+ * the prompt is blocked.
1366
+ *
1367
+ * @remarks
1368
+ * If the prompt was not blocked, but one or more of the generated images were filtered, the
1369
+ * returned object will have a `filteredReason` property.
1370
+ * If all images are filtered, the `images` array will be empty.
1371
+ */
1372
+ generateImagesGCS(prompt: string, gcsURI: string): Promise<ImagenGenerationResponse<ImagenGCSImage>>;
1373
+ }
1374
+
1375
+ /**
1376
+ * Parameters for configuring an {@link ImagenModel}.
1377
+ *
1378
+ * @beta
1379
+ */
1380
+ export declare interface ImagenModelParams {
1381
+ /**
1382
+ * The Imagen model to use for generating images.
1383
+ * For example: `imagen-3.0-generate-002`.
1384
+ *
1385
+ * Only Imagen 3 models (named `imagen-3.0-*`) are supported.
1386
+ *
1387
+ * See {@link https://firebase.google.com/docs/vertex-ai/models | model versions}
1388
+ * for a full list of supported Imagen 3 models.
1389
+ */
1390
+ model: string;
1391
+ /**
1392
+ * Configuration options for generating images with Imagen.
1393
+ */
1394
+ generationConfig?: ImagenGenerationConfig;
1395
+ /**
1396
+ * Safety settings for filtering potentially inappropriate content.
1397
+ */
1398
+ safetySettings?: ImagenSafetySettings;
1399
+ }
1400
+
1401
+ /**
1402
+ * A filter level controlling whether generation of images containing people or faces is allowed.
1403
+ *
1404
+ * See the <a href="http://firebase.google.com/docs/vertex-ai/generate-images">personGeneration</a>
1405
+ * documentation for more details.
1406
+ *
1407
+ * @beta
1408
+ */
1409
+ export declare enum ImagenPersonFilterLevel {
1410
+ /**
1411
+ * Disallow generation of images containing people or faces; images of people are filtered out.
1412
+ */
1413
+ BLOCK_ALL = "dont_allow",
1414
+ /**
1415
+ * Allow generation of images containing adults only; images of children are filtered out.
1416
+ *
1417
+ * Generation of images containing people or faces may require your use case to be
1418
+ * reviewed and approved by Cloud support; see the {@link https://cloud.google.com/vertex-ai/generative-ai/docs/image/responsible-ai-imagen#person-face-gen | Responsible AI and usage guidelines}
1419
+ * for more details.
1420
+ */
1421
+ ALLOW_ADULT = "allow_adult",
1422
+ /**
1423
+ * Allow generation of images containing adults only; images of children are filtered out.
1424
+ *
1425
+ * Generation of images containing people or faces may require your use case to be
1426
+ * reviewed and approved by Cloud support; see the {@link https://cloud.google.com/vertex-ai/generative-ai/docs/image/responsible-ai-imagen#person-face-gen | Responsible AI and usage guidelines}
1427
+ * for more details.
1428
+ */
1429
+ ALLOW_ALL = "allow_all"
1430
+ }
1431
+
1432
+ /**
1433
+ * A filter level controlling how aggressively to filter sensitive content.
1434
+ *
1435
+ * Text prompts provided as inputs and images (generated or uploaded) through Imagen on Vertex AI
1436
+ * are assessed against a list of safety filters, which include 'harmful categories' (for example,
1437
+ * `violence`, `sexual`, `derogatory`, and `toxic`). This filter level controls how aggressively to
1438
+ * filter out potentially harmful content from responses. See the {@link http://firebase.google.com/docs/vertex-ai/generate-images | documentation }
1439
+ * and the {@link https://cloud.google.com/vertex-ai/generative-ai/docs/image/responsible-ai-imagen#safety-filters | Responsible AI and usage guidelines}
1440
+ * for more details.
1441
+ *
1442
+ * @beta
1443
+ */
1444
+ export declare enum ImagenSafetyFilterLevel {
1445
+ /**
1446
+ * The most aggressive filtering level; most strict blocking.
1447
+ */
1448
+ BLOCK_LOW_AND_ABOVE = "block_low_and_above",
1449
+ /**
1450
+ * Blocks some sensitive prompts and responses.
1451
+ */
1452
+ BLOCK_MEDIUM_AND_ABOVE = "block_medium_and_above",
1453
+ /**
1454
+ * Blocks few sensitive prompts and responses.
1455
+ */
1456
+ BLOCK_ONLY_HIGH = "block_only_high",
1457
+ /**
1458
+ * The least aggressive filtering level; blocks very few sensitive prompts and responses.
1459
+ *
1460
+ * Access to this feature is restricted and may require your case to be reviewed and approved by
1461
+ * Cloud support.
1462
+ */
1463
+ BLOCK_NONE = "block_none"
1464
+ }
1465
+
1466
+ /**
1467
+ * Settings for controlling the aggressiveness of filtering out sensitive content.
1468
+ *
1469
+ * See the {@link http://firebase.google.com/docs/vertex-ai/generate-images | documentation }
1470
+ * for more details.
1471
+ *
1472
+ * @beta
1473
+ */
1474
+ export declare interface ImagenSafetySettings {
1475
+ /**
1476
+ * A filter level controlling how aggressive to filter out sensitive content from generated
1477
+ * images.
1478
+ */
1479
+ safetyFilterLevel?: ImagenSafetyFilterLevel;
1480
+ /**
1481
+ * A filter level controlling whether generation of images containing people or faces is allowed.
1482
+ */
1483
+ personFilterLevel?: ImagenPersonFilterLevel;
1484
+ }
1485
+
1486
+ /**
1487
+ * Content part interface if the part represents an image.
1488
+ * @public
1489
+ */
1490
+ export declare interface InlineDataPart {
1491
+ text?: never;
1492
+ inlineData: GenerativeContentBlob;
1493
+ functionCall?: never;
1494
+ functionResponse?: never;
1495
+ /**
1496
+ * Applicable if `inlineData` is a video.
1497
+ */
1498
+ videoMetadata?: VideoMetadata;
1499
+ }
1500
+
1501
+ /**
1502
+ * Schema class for "integer" types.
1503
+ * @public
1504
+ */
1505
+ export declare class IntegerSchema extends Schema {
1506
+ constructor(schemaParams?: SchemaParams);
1507
+ }
1508
+
1509
+ /**
1510
+ * Content part modality.
1511
+ * @public
1512
+ */
1513
+ export declare enum Modality {
1514
+ /**
1515
+ * Unspecified modality.
1516
+ */
1517
+ MODALITY_UNSPECIFIED = "MODALITY_UNSPECIFIED",
1518
+ /**
1519
+ * Plain text.
1520
+ */
1521
+ TEXT = "TEXT",
1522
+ /**
1523
+ * Image.
1524
+ */
1525
+ IMAGE = "IMAGE",
1526
+ /**
1527
+ * Video.
1528
+ */
1529
+ VIDEO = "VIDEO",
1530
+ /**
1531
+ * Audio.
1532
+ */
1533
+ AUDIO = "AUDIO",
1534
+ /**
1535
+ * Document (for example, PDF).
1536
+ */
1537
+ DOCUMENT = "DOCUMENT"
1538
+ }
1539
+
1540
+ /**
1541
+ * Represents token counting info for a single modality.
1542
+ *
1543
+ * @public
1544
+ */
1545
+ export declare interface ModalityTokenCount {
1546
+ /** The modality associated with this token count. */
1547
+ modality: Modality;
1548
+ /** The number of tokens counted. */
1549
+ tokenCount: number;
1550
+ }
1551
+
1552
+ /**
1553
+ * Params passed to {@link getGenerativeModel}.
1554
+ * @public
1555
+ */
1556
+ export declare interface ModelParams extends BaseParams {
1557
+ model: string;
1558
+ tools?: Tool[];
1559
+ toolConfig?: ToolConfig;
1560
+ systemInstruction?: string | Part | Content;
1561
+ }
1562
+
1563
+ /**
1564
+ * Schema class for "number" types.
1565
+ * @public
1566
+ */
1567
+ export declare class NumberSchema extends Schema {
1568
+ constructor(schemaParams?: SchemaParams);
1569
+ }
1570
+
1571
+ /**
1572
+ * Schema class for "object" types.
1573
+ * The `properties` param must be a map of `Schema` objects.
1574
+ * @public
1575
+ */
1576
+ export declare class ObjectSchema extends Schema {
1577
+ properties: {
1578
+ [k: string]: TypedSchema;
1579
+ };
1580
+ optionalProperties: string[];
1581
+ constructor(schemaParams: SchemaParams, properties: {
1582
+ [k: string]: TypedSchema;
1583
+ }, optionalProperties?: string[]);
1584
+ /**
1585
+ * @internal
1586
+ */
1587
+ toJSON(): SchemaRequest;
1588
+ }
1589
+
1590
+ /**
1591
+ * Interface for {@link ObjectSchema} class.
1592
+ * @public
1593
+ */
1594
+ export declare interface ObjectSchemaInterface extends SchemaInterface {
1595
+ type: SchemaType.OBJECT;
1596
+ optionalProperties?: string[];
1597
+ }
1598
+
1599
+ /**
1600
+ * Content part - includes text, image/video, or function call/response
1601
+ * part types.
1602
+ * @public
1603
+ */
1604
+ export declare type Part = TextPart | InlineDataPart | FunctionCallPart | FunctionResponsePart | FileDataPart;
1605
+
1606
+ /**
1607
+ * Possible roles.
1608
+ * @public
1609
+ */
1610
+ export declare const POSSIBLE_ROLES: readonly ["user", "model", "function", "system"];
1611
+
1612
+ /**
1613
+ * If the prompt was blocked, this will be populated with `blockReason` and
1614
+ * the relevant `safetyRatings`.
1615
+ * @public
1616
+ */
1617
+ export declare interface PromptFeedback {
1618
+ blockReason?: BlockReason;
1619
+ safetyRatings: SafetyRating[];
1620
+ /**
1621
+ * A human-readable description of the `blockReason`.
1622
+ *
1623
+ * This property is only supported in the Vertex AI Gemini API ({@link VertexAIBackend}).
1624
+ */
1625
+ blockReasonMessage?: string;
1626
+ }
1627
+
1628
+ /**
1629
+ * Params passed to {@link getGenerativeModel}.
1630
+ * @public
1631
+ */
1632
+ export declare interface RequestOptions {
1633
+ /**
1634
+ * Request timeout in milliseconds. Defaults to 180 seconds (180000ms).
1635
+ */
1636
+ timeout?: number;
1637
+ /**
1638
+ * Base url for endpoint. Defaults to https://firebasevertexai.googleapis.com
1639
+ */
1640
+ baseUrl?: string;
1641
+ }
1642
+
1643
+ /**
1644
+ * Generation modalities to be returned in generation responses.
1645
+ *
1646
+ * @beta
1647
+ */
1648
+ export declare const ResponseModality: {
1649
+ /**
1650
+ * Text.
1651
+ * @beta
1652
+ */
1653
+ readonly TEXT: "TEXT";
1654
+ /**
1655
+ * Image.
1656
+ * @beta
1657
+ */
1658
+ readonly IMAGE: "IMAGE";
1659
+ };
1660
+
1661
+ /**
1662
+ * Generation modalities to be returned in generation responses.
1663
+ *
1664
+ * @beta
1665
+ */
1666
+ export declare type ResponseModality = (typeof ResponseModality)[keyof typeof ResponseModality];
1667
+
1668
+ /**
1669
+ * @public
1670
+ */
1671
+ export declare interface RetrievedContextAttribution {
1672
+ uri: string;
1673
+ title: string;
1674
+ }
1675
+
1676
+ /**
1677
+ * @license
1678
+ * Copyright 2024 Google LLC
1679
+ *
1680
+ * Licensed under the Apache License, Version 2.0 (the "License");
1681
+ * you may not use this file except in compliance with the License.
1682
+ * You may obtain a copy of the License at
1683
+ *
1684
+ * http://www.apache.org/licenses/LICENSE-2.0
1685
+ *
1686
+ * Unless required by applicable law or agreed to in writing, software
1687
+ * distributed under the License is distributed on an "AS IS" BASIS,
1688
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
1689
+ * See the License for the specific language governing permissions and
1690
+ * limitations under the License.
1691
+ */
1692
+ /**
1693
+ * Role is the producer of the content.
1694
+ * @public
1695
+ */
1696
+ export declare type Role = (typeof POSSIBLE_ROLES)[number];
1697
+
1698
+ /**
1699
+ * A safety rating associated with a {@link GenerateContentCandidate}
1700
+ * @public
1701
+ */
1702
+ export declare interface SafetyRating {
1703
+ category: HarmCategory;
1704
+ probability: HarmProbability;
1705
+ /**
1706
+ * The harm severity level.
1707
+ *
1708
+ * This property is only supported when using the Vertex AI Gemini API ({@link VertexAIBackend}).
1709
+ * When using the Gemini Developer API ({@link GoogleAIBackend}), this property is not supported and will default to `HarmSeverity.UNSUPPORTED`.
1710
+ */
1711
+ severity: HarmSeverity;
1712
+ /**
1713
+ * The probability score of the harm category.
1714
+ *
1715
+ * This property is only supported when using the Vertex AI Gemini API ({@link VertexAIBackend}).
1716
+ * When using the Gemini Developer API ({@link GoogleAIBackend}), this property is not supported and will default to 0.
1717
+ */
1718
+ probabilityScore: number;
1719
+ /**
1720
+ * The severity score of the harm category.
1721
+ *
1722
+ * This property is only supported when using the Vertex AI Gemini API ({@link VertexAIBackend}).
1723
+ * When using the Gemini Developer API ({@link GoogleAIBackend}), this property is not supported and will default to 0.
1724
+ */
1725
+ severityScore: number;
1726
+ blocked: boolean;
1727
+ }
1728
+
1729
+ /**
1730
+ * Safety setting that can be sent as part of request parameters.
1731
+ * @public
1732
+ */
1733
+ export declare interface SafetySetting {
1734
+ category: HarmCategory;
1735
+ threshold: HarmBlockThreshold;
1736
+ /**
1737
+ * The harm block method.
1738
+ *
1739
+ * This property is only supported in the Vertex AI Gemini API ({@link VertexAIBackend}).
1740
+ * When using the Gemini Developer API ({@link GoogleAIBackend}), an {@link AIError} will be
1741
+ * thrown if this property is defined.
1742
+ */
1743
+ method?: HarmBlockMethod;
1744
+ }
1745
+
1746
+ /**
1747
+ * Parent class encompassing all Schema types, with static methods that
1748
+ * allow building specific Schema types. This class can be converted with
1749
+ * `JSON.stringify()` into a JSON string accepted by Vertex AI REST endpoints.
1750
+ * (This string conversion is automatically done when calling SDK methods.)
1751
+ * @public
1752
+ */
1753
+ export declare abstract class Schema implements SchemaInterface {
1754
+ /**
1755
+ * Optional. The type of the property. {@link
1756
+ * SchemaType}.
1757
+ */
1758
+ type: SchemaType;
1759
+ /** Optional. The format of the property.
1760
+ * Supported formats:<br/>
1761
+ * <ul>
1762
+ * <li>for NUMBER type: "float", "double"</li>
1763
+ * <li>for INTEGER type: "int32", "int64"</li>
1764
+ * <li>for STRING type: "email", "byte", etc</li>
1765
+ * </ul>
1766
+ */
1767
+ format?: string;
1768
+ /** Optional. The description of the property. */
1769
+ description?: string;
1770
+ /** Optional. Whether the property is nullable. Defaults to false. */
1771
+ nullable: boolean;
1772
+ /** Optional. The example of the property. */
1773
+ example?: unknown;
1774
+ /**
1775
+ * Allows user to add other schema properties that have not yet
1776
+ * been officially added to the SDK.
1777
+ */
1778
+ [key: string]: unknown;
1779
+ constructor(schemaParams: SchemaInterface);
1780
+ /**
1781
+ * Defines how this Schema should be serialized as JSON.
1782
+ * See https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/JSON/stringify#tojson_behavior
1783
+ * @internal
1784
+ */
1785
+ toJSON(): SchemaRequest;
1786
+ static array(arrayParams: SchemaParams & {
1787
+ items: Schema;
1788
+ }): ArraySchema;
1789
+ static object(objectParams: SchemaParams & {
1790
+ properties: {
1791
+ [k: string]: Schema;
1792
+ };
1793
+ optionalProperties?: string[];
1794
+ }): ObjectSchema;
1795
+ static string(stringParams?: SchemaParams): StringSchema;
1796
+ static enumString(stringParams: SchemaParams & {
1797
+ enum: string[];
1798
+ }): StringSchema;
1799
+ static integer(integerParams?: SchemaParams): IntegerSchema;
1800
+ static number(numberParams?: SchemaParams): NumberSchema;
1801
+ static boolean(booleanParams?: SchemaParams): BooleanSchema;
1802
+ }
1803
+
1804
+ /**
1805
+ * Interface for {@link Schema} class.
1806
+ * @public
1807
+ */
1808
+ export declare interface SchemaInterface extends SchemaShared<SchemaInterface> {
1809
+ /**
1810
+ * The type of the property. {@link
1811
+ * SchemaType}.
1812
+ */
1813
+ type: SchemaType;
1814
+ }
1815
+
1816
+ /**
1817
+ * Params passed to {@link Schema} static methods to create specific
1818
+ * {@link Schema} classes.
1819
+ * @public
1820
+ */
1821
+ export declare interface SchemaParams extends SchemaShared<SchemaInterface> {
1822
+ }
1823
+
1824
+ /**
1825
+ * Final format for {@link Schema} params passed to backend requests.
1826
+ * @public
1827
+ */
1828
+ export declare interface SchemaRequest extends SchemaShared<SchemaRequest> {
1829
+ /**
1830
+ * The type of the property. {@link
1831
+ * SchemaType}.
1832
+ */
1833
+ type: SchemaType;
1834
+ /** Optional. Array of required property. */
1835
+ required?: string[];
1836
+ }
1837
+
1838
+ /**
1839
+ * Basic {@link Schema} properties shared across several Schema-related
1840
+ * types.
1841
+ * @public
1842
+ */
1843
+ export declare interface SchemaShared<T> {
1844
+ /** Optional. The format of the property.
1845
+ * When using the Gemini Developer API ({@link GoogleAIBackend}), this must be either `'enum'` or
1846
+ * `'date-time'`, otherwise requests will fail.
1847
+ */
1848
+ format?: string;
1849
+ /** Optional. The description of the property. */
1850
+ description?: string;
1851
+ /** Optional. The items of the property. */
1852
+ items?: T;
1853
+ /** Optional. Map of `Schema` objects. */
1854
+ properties?: {
1855
+ [k: string]: T;
1856
+ };
1857
+ /** Optional. The enum of the property. */
1858
+ enum?: string[];
1859
+ /** Optional. The example of the property. */
1860
+ example?: unknown;
1861
+ /** Optional. Whether the property is nullable. */
1862
+ nullable?: boolean;
1863
+ [key: string]: unknown;
1864
+ }
1865
+
1866
+ /**
1867
+ * @license
1868
+ * Copyright 2024 Google LLC
1869
+ *
1870
+ * Licensed under the Apache License, Version 2.0 (the "License");
1871
+ * you may not use this file except in compliance with the License.
1872
+ * You may obtain a copy of the License at
1873
+ *
1874
+ * http://www.apache.org/licenses/LICENSE-2.0
1875
+ *
1876
+ * Unless required by applicable law or agreed to in writing, software
1877
+ * distributed under the License is distributed on an "AS IS" BASIS,
1878
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
1879
+ * See the License for the specific language governing permissions and
1880
+ * limitations under the License.
1881
+ */
1882
+ /**
1883
+ * Contains the list of OpenAPI data types
1884
+ * as defined by the
1885
+ * {@link https://swagger.io/docs/specification/data-models/data-types/ | OpenAPI specification}
1886
+ * @public
1887
+ */
1888
+ export declare enum SchemaType {
1889
+ /** String type. */
1890
+ STRING = "string",
1891
+ /** Number type. */
1892
+ NUMBER = "number",
1893
+ /** Integer type. */
1894
+ INTEGER = "integer",
1895
+ /** Boolean type. */
1896
+ BOOLEAN = "boolean",
1897
+ /** Array type. */
1898
+ ARRAY = "array",
1899
+ /** Object type. */
1900
+ OBJECT = "object"
1901
+ }
1902
+
1903
+ /**
1904
+ * @public
1905
+ */
1906
+ export declare interface Segment {
1907
+ partIndex: number;
1908
+ startIndex: number;
1909
+ endIndex: number;
1910
+ }
1911
+
1912
+ /**
1913
+ * Params for {@link GenerativeModel.startChat}.
1914
+ * @public
1915
+ */
1916
+ export declare interface StartChatParams extends BaseParams {
1917
+ history?: Content[];
1918
+ tools?: Tool[];
1919
+ toolConfig?: ToolConfig;
1920
+ systemInstruction?: string | Part | Content;
1921
+ }
1922
+
1923
+ /**
1924
+ * Schema class for "string" types. Can be used with or without
1925
+ * enum values.
1926
+ * @public
1927
+ */
1928
+ export declare class StringSchema extends Schema {
1929
+ enum?: string[];
1930
+ constructor(schemaParams?: SchemaParams, enumValues?: string[]);
1931
+ /**
1932
+ * @internal
1933
+ */
1934
+ toJSON(): SchemaRequest;
1935
+ }
1936
+
1937
+ /**
1938
+ * Content part interface if the part represents a text string.
1939
+ * @public
1940
+ */
1941
+ export declare interface TextPart {
1942
+ text: string;
1943
+ inlineData?: never;
1944
+ functionCall?: never;
1945
+ functionResponse?: never;
1946
+ }
1947
+
1948
+ /**
1949
+ * Defines a tool that model can call to access external knowledge.
1950
+ * @public
1951
+ */
1952
+ export declare type Tool = FunctionDeclarationsTool;
1953
+
1954
+ /**
1955
+ * Tool config. This config is shared for all tools provided in the request.
1956
+ * @public
1957
+ */
1958
+ export declare interface ToolConfig {
1959
+ functionCallingConfig?: FunctionCallingConfig;
1960
+ }
1961
+
1962
+ /**
1963
+ * A type that includes all specific Schema types.
1964
+ * @public
1965
+ */
1966
+ export declare type TypedSchema = IntegerSchema | NumberSchema | StringSchema | BooleanSchema | ObjectSchema | ArraySchema;
1967
+
1968
+ /**
1969
+ * Usage metadata about a {@link GenerateContentResponse}.
1970
+ *
1971
+ * @public
1972
+ */
1973
+ export declare interface UsageMetadata {
1974
+ promptTokenCount: number;
1975
+ candidatesTokenCount: number;
1976
+ totalTokenCount: number;
1977
+ promptTokensDetails?: ModalityTokenCount[];
1978
+ candidatesTokensDetails?: ModalityTokenCount[];
1979
+ }
1980
+
1981
+ /**
1982
+ * @deprecated Use the new {@link AI | AI} instead. The Vertex AI in Firebase SDK has been
1983
+ * replaced with the Firebase AI SDK to accommodate the evolving set of supported features and
1984
+ * services. For migration details, see the {@link https://firebase.google.com/docs/vertex-ai/migrate-to-latest-sdk | migration guide}.
1985
+ *
1986
+ * An instance of the Firebase AI SDK.
1987
+ *
1988
+ * @public
1989
+ */
1990
+ export declare type VertexAI = AI;
1991
+
1992
+ /**
1993
+ * Configuration class for the Vertex AI Gemini API.
1994
+ *
1995
+ * Use this with {@link AIOptions} when initializing the AI service via
1996
+ * {@link getAI | getAI()} to specify the Vertex AI Gemini API as the backend.
1997
+ *
1998
+ * @public
1999
+ */
2000
+ export declare class VertexAIBackend extends Backend {
2001
+ /**
2002
+ * The region identifier.
2003
+ * See {@link https://firebase.google.com/docs/vertex-ai/locations#available-locations | Vertex AI locations}
2004
+ * for a list of supported locations.
2005
+ */
2006
+ readonly location: string;
2007
+ /**
2008
+ * Creates a configuration object for the Vertex AI backend.
2009
+ *
2010
+ * @param location - The region identifier, defaulting to `us-central1`;
2011
+ * see {@link https://firebase.google.com/docs/vertex-ai/locations#available-locations | Vertex AI locations}
2012
+ * for a list of supported locations.
2013
+ */
2014
+ constructor(location?: string);
2015
+ }
2016
+
2017
+ /**
2018
+ * @deprecated Use the new {@link AIError} instead. The Vertex AI in Firebase SDK has been
2019
+ * replaced with the Firebase AI SDK to accommodate the evolving set of supported features and
2020
+ * services. For migration details, see the {@link https://firebase.google.com/docs/vertex-ai/migrate-to-latest-sdk | migration guide}.
2021
+ *
2022
+ * Error class for the Firebase AI SDK.
2023
+ *
2024
+ * @public
2025
+ */
2026
+ export declare const VertexAIError: typeof AIError;
2027
+
2028
+ /**
2029
+ * @deprecated Use the new {@link AIModel} instead. The Vertex AI in Firebase SDK has been
2030
+ * replaced with the Firebase AI SDK to accommodate the evolving set of supported features and
2031
+ * services. For migration details, see the {@link https://firebase.google.com/docs/vertex-ai/migrate-to-latest-sdk | migration guide}.
2032
+ *
2033
+ * Base class for Firebase AI model APIs.
2034
+ *
2035
+ * @public
2036
+ */
2037
+ export declare const VertexAIModel: typeof AIModel;
2038
+
2039
+ /**
2040
+ * Options when initializing the Firebase AI SDK.
2041
+ *
2042
+ * @public
2043
+ */
2044
+ export declare interface VertexAIOptions {
2045
+ location?: string;
2046
+ }
2047
+
2048
+ /**
2049
+ * Describes the input video content.
2050
+ * @public
2051
+ */
2052
+ export declare interface VideoMetadata {
2053
+ /**
2054
+ * The start offset of the video in
2055
+ * protobuf {@link https://cloud.google.com/ruby/docs/reference/google-cloud-workflows-v1/latest/Google-Protobuf-Duration#json-mapping | Duration} format.
2056
+ */
2057
+ startOffset: string;
2058
+ /**
2059
+ * The end offset of the video in
2060
+ * protobuf {@link https://cloud.google.com/ruby/docs/reference/google-cloud-workflows-v1/latest/Google-Protobuf-Duration#json-mapping | Duration} format.
2061
+ */
2062
+ endOffset: string;
2063
+ }
2064
+
2065
+ /**
2066
+ * @public
2067
+ */
2068
+ export declare interface WebAttribution {
2069
+ uri: string;
2070
+ title: string;
2071
+ }
2072
+
2073
+ export { }