@depup/firebase__ai 2.9.0-depup.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +31 -0
- package/changes.json +10 -0
- package/dist/ai-public.d.ts +3472 -0
- package/dist/ai.d.ts +3712 -0
- package/dist/esm/index.esm.js +4765 -0
- package/dist/esm/index.esm.js.map +1 -0
- package/dist/esm/package.json +1 -0
- package/dist/esm/src/api.d.ts +121 -0
- package/dist/esm/src/backend.d.ts +98 -0
- package/dist/esm/src/constants.d.ts +29 -0
- package/dist/esm/src/errors.d.ts +35 -0
- package/dist/esm/src/factory-browser.d.ts +19 -0
- package/dist/esm/src/factory-node.d.ts +19 -0
- package/dist/esm/src/googleai-mappers.d.ts +73 -0
- package/dist/esm/src/helpers.d.ts +30 -0
- package/dist/esm/src/index.d.ts +13 -0
- package/dist/esm/src/index.node.d.ts +7 -0
- package/dist/esm/src/logger.d.ts +18 -0
- package/dist/esm/src/methods/chat-session-helpers.d.ts +18 -0
- package/dist/esm/src/methods/chat-session.d.ts +77 -0
- package/dist/esm/src/methods/chrome-adapter.d.ts +124 -0
- package/dist/esm/src/methods/count-tokens.d.ts +21 -0
- package/dist/esm/src/methods/generate-content.d.ts +25 -0
- package/dist/esm/src/methods/live-session-helpers.d.ts +154 -0
- package/dist/esm/src/methods/live-session.d.ts +154 -0
- package/dist/esm/src/models/ai-model.d.ts +72 -0
- package/dist/esm/src/models/generative-model.d.ts +56 -0
- package/dist/esm/src/models/imagen-model.d.ts +102 -0
- package/dist/esm/src/models/index.d.ts +20 -0
- package/dist/esm/src/models/live-generative-model.d.ts +55 -0
- package/dist/esm/src/models/template-generative-model.d.ts +64 -0
- package/dist/esm/src/models/template-imagen-model.d.ts +51 -0
- package/dist/esm/src/models/utils.d.ts +26 -0
- package/dist/esm/src/public-types.d.ts +97 -0
- package/dist/esm/src/requests/hybrid-helpers.d.ts +33 -0
- package/dist/esm/src/requests/imagen-image-format.d.ts +61 -0
- package/dist/esm/src/requests/request-helpers.d.ts +28 -0
- package/dist/esm/src/requests/request.d.ts +69 -0
- package/dist/esm/src/requests/response-helpers.d.ts +57 -0
- package/dist/esm/src/requests/schema-builder.d.ts +170 -0
- package/dist/esm/src/requests/stream-reader.d.ts +39 -0
- package/dist/esm/src/service.d.ts +35 -0
- package/dist/esm/src/types/chrome-adapter.d.ts +61 -0
- package/dist/esm/src/types/content.d.ts +266 -0
- package/dist/esm/src/types/enums.d.ts +419 -0
- package/dist/esm/src/types/error.d.ts +89 -0
- package/dist/esm/src/types/googleai.d.ts +57 -0
- package/dist/esm/src/types/imagen/index.d.ts +18 -0
- package/dist/esm/src/types/imagen/internal.d.ts +134 -0
- package/dist/esm/src/types/imagen/requests.d.ts +245 -0
- package/dist/esm/src/types/imagen/responses.d.ts +79 -0
- package/dist/esm/src/types/index.d.ts +26 -0
- package/dist/esm/src/types/internal.d.ts +35 -0
- package/dist/esm/src/types/language-model.d.ts +107 -0
- package/dist/esm/src/types/live-responses.d.ts +79 -0
- package/dist/esm/src/types/requests.d.ts +543 -0
- package/dist/esm/src/types/responses.d.ts +607 -0
- package/dist/esm/src/types/schema.d.ts +139 -0
- package/dist/esm/src/websocket.d.ts +67 -0
- package/dist/index.cjs.js +4820 -0
- package/dist/index.cjs.js.map +1 -0
- package/dist/index.node.cjs.js +4512 -0
- package/dist/index.node.cjs.js.map +1 -0
- package/dist/index.node.mjs +4457 -0
- package/dist/index.node.mjs.map +1 -0
- package/dist/src/api.d.ts +121 -0
- package/dist/src/backend.d.ts +98 -0
- package/dist/src/constants.d.ts +29 -0
- package/dist/src/errors.d.ts +35 -0
- package/dist/src/factory-browser.d.ts +19 -0
- package/dist/src/factory-node.d.ts +19 -0
- package/dist/src/googleai-mappers.d.ts +73 -0
- package/dist/src/helpers.d.ts +30 -0
- package/dist/src/index.d.ts +13 -0
- package/dist/src/index.node.d.ts +7 -0
- package/dist/src/logger.d.ts +18 -0
- package/dist/src/methods/chat-session-helpers.d.ts +18 -0
- package/dist/src/methods/chat-session.d.ts +77 -0
- package/dist/src/methods/chrome-adapter.d.ts +124 -0
- package/dist/src/methods/count-tokens.d.ts +21 -0
- package/dist/src/methods/generate-content.d.ts +25 -0
- package/dist/src/methods/live-session-helpers.d.ts +154 -0
- package/dist/src/methods/live-session.d.ts +154 -0
- package/dist/src/models/ai-model.d.ts +72 -0
- package/dist/src/models/generative-model.d.ts +56 -0
- package/dist/src/models/imagen-model.d.ts +102 -0
- package/dist/src/models/index.d.ts +20 -0
- package/dist/src/models/live-generative-model.d.ts +55 -0
- package/dist/src/models/template-generative-model.d.ts +64 -0
- package/dist/src/models/template-imagen-model.d.ts +51 -0
- package/dist/src/models/utils.d.ts +26 -0
- package/dist/src/public-types.d.ts +97 -0
- package/dist/src/requests/hybrid-helpers.d.ts +33 -0
- package/dist/src/requests/imagen-image-format.d.ts +61 -0
- package/dist/src/requests/request-helpers.d.ts +28 -0
- package/dist/src/requests/request.d.ts +69 -0
- package/dist/src/requests/response-helpers.d.ts +57 -0
- package/dist/src/requests/schema-builder.d.ts +170 -0
- package/dist/src/requests/stream-reader.d.ts +39 -0
- package/dist/src/service.d.ts +35 -0
- package/dist/src/tsdoc-metadata.json +11 -0
- package/dist/src/types/chrome-adapter.d.ts +61 -0
- package/dist/src/types/content.d.ts +266 -0
- package/dist/src/types/enums.d.ts +419 -0
- package/dist/src/types/error.d.ts +89 -0
- package/dist/src/types/googleai.d.ts +57 -0
- package/dist/src/types/imagen/index.d.ts +18 -0
- package/dist/src/types/imagen/internal.d.ts +134 -0
- package/dist/src/types/imagen/requests.d.ts +245 -0
- package/dist/src/types/imagen/responses.d.ts +79 -0
- package/dist/src/types/index.d.ts +26 -0
- package/dist/src/types/internal.d.ts +35 -0
- package/dist/src/types/language-model.d.ts +107 -0
- package/dist/src/types/live-responses.d.ts +79 -0
- package/dist/src/types/requests.d.ts +543 -0
- package/dist/src/types/responses.d.ts +607 -0
- package/dist/src/types/schema.d.ts +139 -0
- package/dist/src/websocket.d.ts +67 -0
- package/package.json +106 -0
|
@@ -0,0 +1,3472 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* The Firebase AI Web SDK.
|
|
3
|
+
*
|
|
4
|
+
* @packageDocumentation
|
|
5
|
+
*/
|
|
6
|
+
|
|
7
|
+
import { AppCheckTokenResult } from '@firebase/app-check-interop-types';
|
|
8
|
+
import { FirebaseApp } from '@firebase/app';
|
|
9
|
+
import { FirebaseAuthTokenData } from '@firebase/auth-interop-types';
|
|
10
|
+
import { FirebaseError } from '@firebase/util';
|
|
11
|
+
|
|
12
|
+
/**
|
|
13
|
+
* An instance of the Firebase AI SDK.
|
|
14
|
+
*
|
|
15
|
+
* Do not create this instance directly. Instead, use {@link getAI | getAI()}.
|
|
16
|
+
*
|
|
17
|
+
* @public
|
|
18
|
+
*/
|
|
19
|
+
export declare interface AI {
|
|
20
|
+
/**
|
|
21
|
+
* The {@link @firebase/app#FirebaseApp} this {@link AI} instance is associated with.
|
|
22
|
+
*/
|
|
23
|
+
app: FirebaseApp;
|
|
24
|
+
/**
|
|
25
|
+
* A {@link Backend} instance that specifies the configuration for the target backend,
|
|
26
|
+
* either the Gemini Developer API (using {@link GoogleAIBackend}) or the
|
|
27
|
+
* Vertex AI Gemini API (using {@link VertexAIBackend}).
|
|
28
|
+
*/
|
|
29
|
+
backend: Backend;
|
|
30
|
+
/**
|
|
31
|
+
* Options applied to this {@link AI} instance.
|
|
32
|
+
*/
|
|
33
|
+
options?: AIOptions;
|
|
34
|
+
/**
|
|
35
|
+
* @deprecated use `AI.backend.location` instead.
|
|
36
|
+
*
|
|
37
|
+
* The location configured for this AI service instance, relevant for Vertex AI backends.
|
|
38
|
+
*/
|
|
39
|
+
location: string;
|
|
40
|
+
}
|
|
41
|
+
|
|
42
|
+
/**
|
|
43
|
+
* Error class for the Firebase AI SDK.
|
|
44
|
+
*
|
|
45
|
+
* @public
|
|
46
|
+
*/
|
|
47
|
+
export declare class AIError extends FirebaseError {
|
|
48
|
+
readonly code: AIErrorCode;
|
|
49
|
+
readonly customErrorData?: CustomErrorData | undefined;
|
|
50
|
+
/**
|
|
51
|
+
* Constructs a new instance of the `AIError` class.
|
|
52
|
+
*
|
|
53
|
+
* @param code - The error code from {@link (AIErrorCode:type)}.
|
|
54
|
+
* @param message - A human-readable message describing the error.
|
|
55
|
+
* @param customErrorData - Optional error data.
|
|
56
|
+
*/
|
|
57
|
+
constructor(code: AIErrorCode, message: string, customErrorData?: CustomErrorData | undefined);
|
|
58
|
+
}
|
|
59
|
+
|
|
60
|
+
/**
|
|
61
|
+
* Standardized error codes that {@link AIError} can have.
|
|
62
|
+
*
|
|
63
|
+
* @public
|
|
64
|
+
*/
|
|
65
|
+
export declare const AIErrorCode: {
|
|
66
|
+
/** A generic error occurred. */
|
|
67
|
+
readonly ERROR: "error";
|
|
68
|
+
/** An error occurred in a request. */
|
|
69
|
+
readonly REQUEST_ERROR: "request-error";
|
|
70
|
+
/** An error occurred in a response. */
|
|
71
|
+
readonly RESPONSE_ERROR: "response-error";
|
|
72
|
+
/** An error occurred while performing a fetch. */
|
|
73
|
+
readonly FETCH_ERROR: "fetch-error";
|
|
74
|
+
/** An error occurred because an operation was attempted on a closed session. */
|
|
75
|
+
readonly SESSION_CLOSED: "session-closed";
|
|
76
|
+
/** An error associated with a Content object. */
|
|
77
|
+
readonly INVALID_CONTENT: "invalid-content";
|
|
78
|
+
/** An error due to the Firebase API not being enabled in the Console. */
|
|
79
|
+
readonly API_NOT_ENABLED: "api-not-enabled";
|
|
80
|
+
/** An error due to invalid Schema input. */
|
|
81
|
+
readonly INVALID_SCHEMA: "invalid-schema";
|
|
82
|
+
/** An error occurred due to a missing Firebase API key. */
|
|
83
|
+
readonly NO_API_KEY: "no-api-key";
|
|
84
|
+
/** An error occurred due to a missing Firebase app ID. */
|
|
85
|
+
readonly NO_APP_ID: "no-app-id";
|
|
86
|
+
/** An error occurred due to a model name not being specified during initialization. */
|
|
87
|
+
readonly NO_MODEL: "no-model";
|
|
88
|
+
/** An error occurred due to a missing project ID. */
|
|
89
|
+
readonly NO_PROJECT_ID: "no-project-id";
|
|
90
|
+
/** An error occurred while parsing. */
|
|
91
|
+
readonly PARSE_FAILED: "parse-failed";
|
|
92
|
+
/** An error occurred due an attempt to use an unsupported feature. */
|
|
93
|
+
readonly UNSUPPORTED: "unsupported";
|
|
94
|
+
};
|
|
95
|
+
|
|
96
|
+
/**
|
|
97
|
+
* Standardized error codes that {@link AIError} can have.
|
|
98
|
+
*
|
|
99
|
+
* @public
|
|
100
|
+
*/
|
|
101
|
+
export declare type AIErrorCode = (typeof AIErrorCode)[keyof typeof AIErrorCode];
|
|
102
|
+
|
|
103
|
+
/**
|
|
104
|
+
* Base class for Firebase AI model APIs.
|
|
105
|
+
*
|
|
106
|
+
* Instances of this class are associated with a specific Firebase AI {@link Backend}
|
|
107
|
+
* and provide methods for interacting with the configured generative model.
|
|
108
|
+
*
|
|
109
|
+
* @public
|
|
110
|
+
*/
|
|
111
|
+
export declare abstract class AIModel {
|
|
112
|
+
/**
|
|
113
|
+
* The fully qualified model resource name to use for generating images
|
|
114
|
+
* (for example, `publishers/google/models/imagen-3.0-generate-002`).
|
|
115
|
+
*/
|
|
116
|
+
readonly model: string;
|
|
117
|
+
/* Excluded from this release type: _apiSettings */
|
|
118
|
+
/* Excluded from this release type: __constructor */
|
|
119
|
+
/* Excluded from this release type: normalizeModelName */
|
|
120
|
+
/* Excluded from this release type: normalizeGoogleAIModelName */
|
|
121
|
+
/* Excluded from this release type: normalizeVertexAIModelName */
|
|
122
|
+
}
|
|
123
|
+
|
|
124
|
+
/**
|
|
125
|
+
* Options for initializing the AI service using {@link getAI | getAI()}.
|
|
126
|
+
* This allows specifying which backend to use (Vertex AI Gemini API or Gemini Developer API)
|
|
127
|
+
* and configuring its specific options (like location for Vertex AI).
|
|
128
|
+
*
|
|
129
|
+
* @public
|
|
130
|
+
*/
|
|
131
|
+
export declare interface AIOptions {
|
|
132
|
+
/**
|
|
133
|
+
* The backend configuration to use for the AI service instance.
|
|
134
|
+
* Defaults to the Gemini Developer API backend ({@link GoogleAIBackend}).
|
|
135
|
+
*/
|
|
136
|
+
backend?: Backend;
|
|
137
|
+
/**
|
|
138
|
+
* Whether to use App Check limited use tokens. Defaults to false.
|
|
139
|
+
*/
|
|
140
|
+
useLimitedUseAppCheckTokens?: boolean;
|
|
141
|
+
}
|
|
142
|
+
|
|
143
|
+
/**
|
|
144
|
+
* Schema class representing a value that can conform to any of the provided sub-schemas. This is
|
|
145
|
+
* useful when a field can accept multiple distinct types or structures.
|
|
146
|
+
* @public
|
|
147
|
+
*/
|
|
148
|
+
export declare class AnyOfSchema extends Schema {
|
|
149
|
+
anyOf: TypedSchema[];
|
|
150
|
+
constructor(schemaParams: SchemaParams & {
|
|
151
|
+
anyOf: TypedSchema[];
|
|
152
|
+
});
|
|
153
|
+
/* Excluded from this release type: toJSON */
|
|
154
|
+
}
|
|
155
|
+
|
|
156
|
+
declare interface ApiSettings {
|
|
157
|
+
apiKey: string;
|
|
158
|
+
project: string;
|
|
159
|
+
appId: string;
|
|
160
|
+
automaticDataCollectionEnabled?: boolean;
|
|
161
|
+
/**
|
|
162
|
+
* @deprecated Use `backend.location` instead.
|
|
163
|
+
*/
|
|
164
|
+
location: string;
|
|
165
|
+
backend: Backend;
|
|
166
|
+
getAuthToken?: () => Promise<FirebaseAuthTokenData | null>;
|
|
167
|
+
getAppCheckToken?: () => Promise<AppCheckTokenResult>;
|
|
168
|
+
inferenceMode?: InferenceMode;
|
|
169
|
+
}
|
|
170
|
+
|
|
171
|
+
/**
|
|
172
|
+
* Schema class for "array" types.
|
|
173
|
+
* The `items` param should refer to the type of item that can be a member
|
|
174
|
+
* of the array.
|
|
175
|
+
* @public
|
|
176
|
+
*/
|
|
177
|
+
export declare class ArraySchema extends Schema {
|
|
178
|
+
items: TypedSchema;
|
|
179
|
+
constructor(schemaParams: SchemaParams, items: TypedSchema);
|
|
180
|
+
/* Excluded from this release type: toJSON */
|
|
181
|
+
}
|
|
182
|
+
|
|
183
|
+
/**
|
|
184
|
+
* A controller for managing an active audio conversation.
|
|
185
|
+
*
|
|
186
|
+
* @beta
|
|
187
|
+
*/
|
|
188
|
+
export declare interface AudioConversationController {
|
|
189
|
+
/**
|
|
190
|
+
* Stops the audio conversation, closes the microphone connection, and
|
|
191
|
+
* cleans up resources. Returns a promise that resolves when cleanup is complete.
|
|
192
|
+
*/
|
|
193
|
+
stop: () => Promise<void>;
|
|
194
|
+
}
|
|
195
|
+
|
|
196
|
+
/**
|
|
197
|
+
* The audio transcription configuration.
|
|
198
|
+
*/
|
|
199
|
+
export declare interface AudioTranscriptionConfig {
|
|
200
|
+
}
|
|
201
|
+
|
|
202
|
+
/**
|
|
203
|
+
* Abstract base class representing the configuration for an AI service backend.
|
|
204
|
+
* This class should not be instantiated directly. Use its subclasses; {@link GoogleAIBackend} for
|
|
205
|
+
* the Gemini Developer API (via {@link https://ai.google/ | Google AI}), and
|
|
206
|
+
* {@link VertexAIBackend} for the Vertex AI Gemini API.
|
|
207
|
+
*
|
|
208
|
+
* @public
|
|
209
|
+
*/
|
|
210
|
+
export declare abstract class Backend {
|
|
211
|
+
/**
|
|
212
|
+
* Specifies the backend type.
|
|
213
|
+
*/
|
|
214
|
+
readonly backendType: BackendType;
|
|
215
|
+
/**
|
|
216
|
+
* Protected constructor for use by subclasses.
|
|
217
|
+
* @param type - The backend type.
|
|
218
|
+
*/
|
|
219
|
+
protected constructor(type: BackendType);
|
|
220
|
+
/* Excluded from this release type: _getModelPath */
|
|
221
|
+
/* Excluded from this release type: _getTemplatePath */
|
|
222
|
+
}
|
|
223
|
+
|
|
224
|
+
/**
|
|
225
|
+
* An enum-like object containing constants that represent the supported backends
|
|
226
|
+
* for the Firebase AI SDK.
|
|
227
|
+
* This determines which backend service (Vertex AI Gemini API or Gemini Developer API)
|
|
228
|
+
* the SDK will communicate with.
|
|
229
|
+
*
|
|
230
|
+
* These values are assigned to the `backendType` property within the specific backend
|
|
231
|
+
* configuration objects ({@link GoogleAIBackend} or {@link VertexAIBackend}) to identify
|
|
232
|
+
* which service to target.
|
|
233
|
+
*
|
|
234
|
+
* @public
|
|
235
|
+
*/
|
|
236
|
+
export declare const BackendType: {
|
|
237
|
+
/**
|
|
238
|
+
* Identifies the backend service for the Vertex AI Gemini API provided through Google Cloud.
|
|
239
|
+
* Use this constant when creating a {@link VertexAIBackend} configuration.
|
|
240
|
+
*/
|
|
241
|
+
readonly VERTEX_AI: "VERTEX_AI";
|
|
242
|
+
/**
|
|
243
|
+
* Identifies the backend service for the Gemini Developer API ({@link https://ai.google/ | Google AI}).
|
|
244
|
+
* Use this constant when creating a {@link GoogleAIBackend} configuration.
|
|
245
|
+
*/
|
|
246
|
+
readonly GOOGLE_AI: "GOOGLE_AI";
|
|
247
|
+
};
|
|
248
|
+
|
|
249
|
+
/**
|
|
250
|
+
* Type alias representing valid backend types.
|
|
251
|
+
* It can be either `'VERTEX_AI'` or `'GOOGLE_AI'`.
|
|
252
|
+
*
|
|
253
|
+
* @public
|
|
254
|
+
*/
|
|
255
|
+
export declare type BackendType = (typeof BackendType)[keyof typeof BackendType];
|
|
256
|
+
|
|
257
|
+
/**
|
|
258
|
+
* Base parameters for a number of methods.
|
|
259
|
+
* @public
|
|
260
|
+
*/
|
|
261
|
+
export declare interface BaseParams {
|
|
262
|
+
safetySettings?: SafetySetting[];
|
|
263
|
+
generationConfig?: GenerationConfig;
|
|
264
|
+
}
|
|
265
|
+
|
|
266
|
+
/**
|
|
267
|
+
* Reason that a prompt was blocked.
|
|
268
|
+
* @public
|
|
269
|
+
*/
|
|
270
|
+
export declare const BlockReason: {
|
|
271
|
+
/**
|
|
272
|
+
* Content was blocked by safety settings.
|
|
273
|
+
*/
|
|
274
|
+
readonly SAFETY: "SAFETY";
|
|
275
|
+
/**
|
|
276
|
+
* Content was blocked, but the reason is uncategorized.
|
|
277
|
+
*/
|
|
278
|
+
readonly OTHER: "OTHER";
|
|
279
|
+
/**
|
|
280
|
+
* Content was blocked because it contained terms from the terminology blocklist.
|
|
281
|
+
*/
|
|
282
|
+
readonly BLOCKLIST: "BLOCKLIST";
|
|
283
|
+
/**
|
|
284
|
+
* Content was blocked due to prohibited content.
|
|
285
|
+
*/
|
|
286
|
+
readonly PROHIBITED_CONTENT: "PROHIBITED_CONTENT";
|
|
287
|
+
};
|
|
288
|
+
|
|
289
|
+
/**
|
|
290
|
+
* Reason that a prompt was blocked.
|
|
291
|
+
* @public
|
|
292
|
+
*/
|
|
293
|
+
export declare type BlockReason = (typeof BlockReason)[keyof typeof BlockReason];
|
|
294
|
+
|
|
295
|
+
/**
|
|
296
|
+
* Schema class for "boolean" types.
|
|
297
|
+
* @public
|
|
298
|
+
*/
|
|
299
|
+
export declare class BooleanSchema extends Schema {
|
|
300
|
+
constructor(schemaParams?: SchemaParams);
|
|
301
|
+
}
|
|
302
|
+
|
|
303
|
+
/**
|
|
304
|
+
* ChatSession class that enables sending chat messages and stores
|
|
305
|
+
* history of sent and received messages so far.
|
|
306
|
+
*
|
|
307
|
+
* @public
|
|
308
|
+
*/
|
|
309
|
+
export declare class ChatSession {
|
|
310
|
+
model: string;
|
|
311
|
+
private chromeAdapter?;
|
|
312
|
+
params?: StartChatParams | undefined;
|
|
313
|
+
requestOptions?: RequestOptions | undefined;
|
|
314
|
+
private _apiSettings;
|
|
315
|
+
private _history;
|
|
316
|
+
/**
|
|
317
|
+
* Ensures sequential execution of chat messages to maintain history order.
|
|
318
|
+
* Each call waits for the previous one to settle before proceeding.
|
|
319
|
+
*/
|
|
320
|
+
private _sendPromise;
|
|
321
|
+
constructor(apiSettings: ApiSettings, model: string, chromeAdapter?: ChromeAdapter | undefined, params?: StartChatParams | undefined, requestOptions?: RequestOptions | undefined);
|
|
322
|
+
/**
|
|
323
|
+
* Gets the chat history so far. Blocked prompts are not added to history.
|
|
324
|
+
* Neither blocked candidates nor the prompts that generated them are added
|
|
325
|
+
* to history.
|
|
326
|
+
*/
|
|
327
|
+
getHistory(): Promise<Content[]>;
|
|
328
|
+
/* Excluded from this release type: _formatRequest */
|
|
329
|
+
/**
|
|
330
|
+
* Sends a chat message and receives a non-streaming
|
|
331
|
+
* {@link GenerateContentResult}
|
|
332
|
+
*/
|
|
333
|
+
sendMessage(request: string | Array<string | Part>, singleRequestOptions?: SingleRequestOptions): Promise<GenerateContentResult>;
|
|
334
|
+
/**
|
|
335
|
+
* Sends a chat message and receives the response as a
|
|
336
|
+
* {@link GenerateContentStreamResult} containing an iterable stream
|
|
337
|
+
* and a response promise.
|
|
338
|
+
*/
|
|
339
|
+
sendMessageStream(request: string | Array<string | Part>, singleRequestOptions?: SingleRequestOptions): Promise<GenerateContentStreamResult>;
|
|
340
|
+
/* Excluded from this release type: _getCallableFunctionCalls */
|
|
341
|
+
/* Excluded from this release type: _callFunctionsAsNeeded */
|
|
342
|
+
}
|
|
343
|
+
|
|
344
|
+
/**
|
|
345
|
+
* Defines an inference "backend" that uses Chrome's on-device model,
|
|
346
|
+
* and encapsulates logic for detecting when on-device inference is
|
|
347
|
+
* possible.
|
|
348
|
+
*
|
|
349
|
+
* These methods should not be called directly by the user.
|
|
350
|
+
*
|
|
351
|
+
* @beta
|
|
352
|
+
*/
|
|
353
|
+
export declare interface ChromeAdapter {
|
|
354
|
+
/* Excluded from this release type: mode */
|
|
355
|
+
/**
|
|
356
|
+
* Checks if the on-device model is capable of handling a given
|
|
357
|
+
* request.
|
|
358
|
+
* @param request - A potential request to be passed to the model.
|
|
359
|
+
*/
|
|
360
|
+
isAvailable(request: GenerateContentRequest): Promise<boolean>;
|
|
361
|
+
/**
|
|
362
|
+
* Generates content using on-device inference.
|
|
363
|
+
*
|
|
364
|
+
* @remarks
|
|
365
|
+
* This is comparable to {@link GenerativeModel.generateContent} for generating
|
|
366
|
+
* content using in-cloud inference.
|
|
367
|
+
* @param request - a standard Firebase AI {@link GenerateContentRequest}
|
|
368
|
+
*/
|
|
369
|
+
generateContent(request: GenerateContentRequest): Promise<Response>;
|
|
370
|
+
/**
|
|
371
|
+
* Generates a content stream using on-device inference.
|
|
372
|
+
*
|
|
373
|
+
* @remarks
|
|
374
|
+
* This is comparable to {@link GenerativeModel.generateContentStream} for generating
|
|
375
|
+
* a content stream using in-cloud inference.
|
|
376
|
+
* @param request - a standard Firebase AI {@link GenerateContentRequest}
|
|
377
|
+
*/
|
|
378
|
+
generateContentStream(request: GenerateContentRequest): Promise<Response>;
|
|
379
|
+
/* Excluded from this release type: countTokens */
|
|
380
|
+
}
|
|
381
|
+
|
|
382
|
+
/**
|
|
383
|
+
* A single citation.
|
|
384
|
+
* @public
|
|
385
|
+
*/
|
|
386
|
+
export declare interface Citation {
|
|
387
|
+
startIndex?: number;
|
|
388
|
+
endIndex?: number;
|
|
389
|
+
uri?: string;
|
|
390
|
+
license?: string;
|
|
391
|
+
/**
|
|
392
|
+
* The title of the cited source, if available.
|
|
393
|
+
*
|
|
394
|
+
* This property is only supported in the Vertex AI Gemini API ({@link VertexAIBackend}).
|
|
395
|
+
*/
|
|
396
|
+
title?: string;
|
|
397
|
+
/**
|
|
398
|
+
* The publication date of the cited source, if available.
|
|
399
|
+
*
|
|
400
|
+
* This property is only supported in the Vertex AI Gemini API ({@link VertexAIBackend}).
|
|
401
|
+
*/
|
|
402
|
+
publicationDate?: Date_2;
|
|
403
|
+
}
|
|
404
|
+
|
|
405
|
+
/**
|
|
406
|
+
* Citation metadata that may be found on a {@link GenerateContentCandidate}.
|
|
407
|
+
* @public
|
|
408
|
+
*/
|
|
409
|
+
export declare interface CitationMetadata {
|
|
410
|
+
citations: Citation[];
|
|
411
|
+
}
|
|
412
|
+
|
|
413
|
+
/**
|
|
414
|
+
* The results of code execution run by the model.
|
|
415
|
+
*
|
|
416
|
+
* @public
|
|
417
|
+
*/
|
|
418
|
+
export declare interface CodeExecutionResult {
|
|
419
|
+
/**
|
|
420
|
+
* The result of the code execution.
|
|
421
|
+
*/
|
|
422
|
+
outcome?: Outcome;
|
|
423
|
+
/**
|
|
424
|
+
* The output from the code execution, or an error message
|
|
425
|
+
* if it failed.
|
|
426
|
+
*/
|
|
427
|
+
output?: string;
|
|
428
|
+
}
|
|
429
|
+
|
|
430
|
+
/**
|
|
431
|
+
* Represents the code execution result from the model.
|
|
432
|
+
*
|
|
433
|
+
* @public
|
|
434
|
+
*/
|
|
435
|
+
export declare interface CodeExecutionResultPart {
|
|
436
|
+
text?: never;
|
|
437
|
+
inlineData?: never;
|
|
438
|
+
functionCall?: never;
|
|
439
|
+
functionResponse?: never;
|
|
440
|
+
fileData: never;
|
|
441
|
+
thought?: never;
|
|
442
|
+
/* Excluded from this release type: thoughtSignature */
|
|
443
|
+
executableCode?: never;
|
|
444
|
+
codeExecutionResult?: CodeExecutionResult;
|
|
445
|
+
}
|
|
446
|
+
|
|
447
|
+
/**
|
|
448
|
+
* A tool that enables the model to use code execution.
|
|
449
|
+
*
|
|
450
|
+
* @beta
|
|
451
|
+
*/
|
|
452
|
+
export declare interface CodeExecutionTool {
|
|
453
|
+
/**
|
|
454
|
+
* Specifies the Google Search configuration.
|
|
455
|
+
* Currently, this is an empty object, but it's reserved for future configuration options.
|
|
456
|
+
*/
|
|
457
|
+
codeExecution: {};
|
|
458
|
+
}
|
|
459
|
+
|
|
460
|
+
/**
|
|
461
|
+
* Content type for both prompts and response candidates.
|
|
462
|
+
* @public
|
|
463
|
+
*/
|
|
464
|
+
export declare interface Content {
|
|
465
|
+
role: Role;
|
|
466
|
+
parts: Part[];
|
|
467
|
+
}
|
|
468
|
+
|
|
469
|
+
/**
|
|
470
|
+
* Params for calling {@link GenerativeModel.countTokens}
|
|
471
|
+
* @public
|
|
472
|
+
*/
|
|
473
|
+
export declare interface CountTokensRequest {
|
|
474
|
+
contents: Content[];
|
|
475
|
+
/**
|
|
476
|
+
* Instructions that direct the model to behave a certain way.
|
|
477
|
+
*/
|
|
478
|
+
systemInstruction?: string | Part | Content;
|
|
479
|
+
/**
|
|
480
|
+
* {@link Tool} configuration.
|
|
481
|
+
*/
|
|
482
|
+
tools?: Tool[];
|
|
483
|
+
/**
|
|
484
|
+
* Configuration options that control how the model generates a response.
|
|
485
|
+
*/
|
|
486
|
+
generationConfig?: GenerationConfig;
|
|
487
|
+
}
|
|
488
|
+
|
|
489
|
+
/**
|
|
490
|
+
* Response from calling {@link GenerativeModel.countTokens}.
|
|
491
|
+
* @public
|
|
492
|
+
*/
|
|
493
|
+
export declare interface CountTokensResponse {
|
|
494
|
+
/**
|
|
495
|
+
* The total number of tokens counted across all instances from the request.
|
|
496
|
+
*/
|
|
497
|
+
totalTokens: number;
|
|
498
|
+
/**
|
|
499
|
+
* @deprecated Use `totalTokens` instead. This property is undefined when using models greater than `gemini-1.5-*`.
|
|
500
|
+
*
|
|
501
|
+
* The total number of billable characters counted across all instances
|
|
502
|
+
* from the request.
|
|
503
|
+
*/
|
|
504
|
+
totalBillableCharacters?: number;
|
|
505
|
+
/**
|
|
506
|
+
* The breakdown, by modality, of how many tokens are consumed by the prompt.
|
|
507
|
+
*/
|
|
508
|
+
promptTokensDetails?: ModalityTokenCount[];
|
|
509
|
+
}
|
|
510
|
+
|
|
511
|
+
/**
|
|
512
|
+
* Details object that contains data originating from a bad HTTP response.
|
|
513
|
+
*
|
|
514
|
+
* @public
|
|
515
|
+
*/
|
|
516
|
+
export declare interface CustomErrorData {
|
|
517
|
+
/** HTTP status code of the error response. */
|
|
518
|
+
status?: number;
|
|
519
|
+
/** HTTP status text of the error response. */
|
|
520
|
+
statusText?: string;
|
|
521
|
+
/** Response from a {@link GenerateContentRequest} */
|
|
522
|
+
response?: GenerateContentResponse;
|
|
523
|
+
/** Optional additional details about the error. */
|
|
524
|
+
errorDetails?: ErrorDetails[];
|
|
525
|
+
}
|
|
526
|
+
|
|
527
|
+
/**
|
|
528
|
+
* Protobuf google.type.Date
|
|
529
|
+
* @public
|
|
530
|
+
*/
|
|
531
|
+
declare interface Date_2 {
|
|
532
|
+
year: number;
|
|
533
|
+
month: number;
|
|
534
|
+
day: number;
|
|
535
|
+
}
|
|
536
|
+
export { Date_2 as Date }
|
|
537
|
+
|
|
538
|
+
/**
|
|
539
|
+
* Response object wrapped with helper methods.
|
|
540
|
+
*
|
|
541
|
+
* @public
|
|
542
|
+
*/
|
|
543
|
+
export declare interface EnhancedGenerateContentResponse extends GenerateContentResponse {
|
|
544
|
+
/**
|
|
545
|
+
* Returns the text string from the response, if available.
|
|
546
|
+
* Throws if the prompt or candidate was blocked.
|
|
547
|
+
*/
|
|
548
|
+
text: () => string;
|
|
549
|
+
/**
|
|
550
|
+
* Aggregates and returns every {@link InlineDataPart} from the first candidate of
|
|
551
|
+
* {@link GenerateContentResponse}.
|
|
552
|
+
*
|
|
553
|
+
* @throws If the prompt or candidate was blocked.
|
|
554
|
+
*/
|
|
555
|
+
inlineDataParts: () => InlineDataPart[] | undefined;
|
|
556
|
+
/**
|
|
557
|
+
* Aggregates and returns every {@link FunctionCall} from the first candidate of
|
|
558
|
+
* {@link GenerateContentResponse}.
|
|
559
|
+
*
|
|
560
|
+
* @throws If the prompt or candidate was blocked.
|
|
561
|
+
*/
|
|
562
|
+
functionCalls: () => FunctionCall[] | undefined;
|
|
563
|
+
/**
|
|
564
|
+
* Aggregates and returns every {@link TextPart} with their `thought` property set
|
|
565
|
+
* to `true` from the first candidate of {@link GenerateContentResponse}.
|
|
566
|
+
*
|
|
567
|
+
* @throws If the prompt or candidate was blocked.
|
|
568
|
+
*
|
|
569
|
+
* @remarks
|
|
570
|
+
* Thought summaries provide a brief overview of the model's internal thinking process,
|
|
571
|
+
* offering insight into how it arrived at the final answer. This can be useful for
|
|
572
|
+
* debugging, understanding the model's reasoning, and verifying its accuracy.
|
|
573
|
+
*
|
|
574
|
+
* Thoughts will only be included if {@link ThinkingConfig.includeThoughts} is
|
|
575
|
+
* set to `true`.
|
|
576
|
+
*/
|
|
577
|
+
thoughtSummary: () => string | undefined;
|
|
578
|
+
/**
|
|
579
|
+
* Indicates whether inference happened on-device or in-cloud.
|
|
580
|
+
*
|
|
581
|
+
* @beta
|
|
582
|
+
*/
|
|
583
|
+
inferenceSource?: InferenceSource;
|
|
584
|
+
}
|
|
585
|
+
|
|
586
|
+
/**
|
|
587
|
+
* Details object that may be included in an error response.
|
|
588
|
+
*
|
|
589
|
+
* @public
|
|
590
|
+
*/
|
|
591
|
+
export declare interface ErrorDetails {
|
|
592
|
+
'@type'?: string;
|
|
593
|
+
/** The reason for the error. */
|
|
594
|
+
reason?: string;
|
|
595
|
+
/** The domain where the error occurred. */
|
|
596
|
+
domain?: string;
|
|
597
|
+
/** Additional metadata about the error. */
|
|
598
|
+
metadata?: Record<string, unknown>;
|
|
599
|
+
/** Any other relevant information about the error. */
|
|
600
|
+
[key: string]: unknown;
|
|
601
|
+
}
|
|
602
|
+
|
|
603
|
+
/**
|
|
604
|
+
* An interface for executable code returned by the model.
|
|
605
|
+
*
|
|
606
|
+
* @public
|
|
607
|
+
*/
|
|
608
|
+
export declare interface ExecutableCode {
|
|
609
|
+
/**
|
|
610
|
+
* The programming language of the code.
|
|
611
|
+
*/
|
|
612
|
+
language?: Language;
|
|
613
|
+
/**
|
|
614
|
+
* The source code to be executed.
|
|
615
|
+
*/
|
|
616
|
+
code?: string;
|
|
617
|
+
}
|
|
618
|
+
|
|
619
|
+
/**
|
|
620
|
+
* Represents the code that is executed by the model.
|
|
621
|
+
*
|
|
622
|
+
* @public
|
|
623
|
+
*/
|
|
624
|
+
export declare interface ExecutableCodePart {
|
|
625
|
+
text?: never;
|
|
626
|
+
inlineData?: never;
|
|
627
|
+
functionCall?: never;
|
|
628
|
+
functionResponse?: never;
|
|
629
|
+
fileData: never;
|
|
630
|
+
thought?: never;
|
|
631
|
+
/* Excluded from this release type: thoughtSignature */
|
|
632
|
+
executableCode?: ExecutableCode;
|
|
633
|
+
codeExecutionResult?: never;
|
|
634
|
+
}
|
|
635
|
+
|
|
636
|
+
/**
|
|
637
|
+
* Data pointing to a file uploaded on Google Cloud Storage.
|
|
638
|
+
* @public
|
|
639
|
+
*/
|
|
640
|
+
export declare interface FileData {
|
|
641
|
+
mimeType: string;
|
|
642
|
+
fileUri: string;
|
|
643
|
+
}
|
|
644
|
+
|
|
645
|
+
/**
|
|
646
|
+
* Content part interface if the part represents {@link FileData}
|
|
647
|
+
* @public
|
|
648
|
+
*/
|
|
649
|
+
export declare interface FileDataPart {
|
|
650
|
+
text?: never;
|
|
651
|
+
inlineData?: never;
|
|
652
|
+
functionCall?: never;
|
|
653
|
+
functionResponse?: never;
|
|
654
|
+
fileData: FileData;
|
|
655
|
+
thought?: boolean;
|
|
656
|
+
/* Excluded from this release type: thoughtSignature */
|
|
657
|
+
executableCode?: never;
|
|
658
|
+
codeExecutionResult?: never;
|
|
659
|
+
}
|
|
660
|
+
|
|
661
|
+
/**
|
|
662
|
+
* Reason that a candidate finished.
|
|
663
|
+
* @public
|
|
664
|
+
*/
|
|
665
|
+
export declare const FinishReason: {
|
|
666
|
+
/**
|
|
667
|
+
* Natural stop point of the model or provided stop sequence.
|
|
668
|
+
*/
|
|
669
|
+
readonly STOP: "STOP";
|
|
670
|
+
/**
|
|
671
|
+
* The maximum number of tokens as specified in the request was reached.
|
|
672
|
+
*/
|
|
673
|
+
readonly MAX_TOKENS: "MAX_TOKENS";
|
|
674
|
+
/**
|
|
675
|
+
* The candidate content was flagged for safety reasons.
|
|
676
|
+
*/
|
|
677
|
+
readonly SAFETY: "SAFETY";
|
|
678
|
+
/**
|
|
679
|
+
* The candidate content was flagged for recitation reasons.
|
|
680
|
+
*/
|
|
681
|
+
readonly RECITATION: "RECITATION";
|
|
682
|
+
/**
|
|
683
|
+
* Unknown reason.
|
|
684
|
+
*/
|
|
685
|
+
readonly OTHER: "OTHER";
|
|
686
|
+
/**
|
|
687
|
+
* The candidate content contained forbidden terms.
|
|
688
|
+
*/
|
|
689
|
+
readonly BLOCKLIST: "BLOCKLIST";
|
|
690
|
+
/**
|
|
691
|
+
* The candidate content potentially contained prohibited content.
|
|
692
|
+
*/
|
|
693
|
+
readonly PROHIBITED_CONTENT: "PROHIBITED_CONTENT";
|
|
694
|
+
/**
|
|
695
|
+
* The candidate content potentially contained Sensitive Personally Identifiable Information (SPII).
|
|
696
|
+
*/
|
|
697
|
+
readonly SPII: "SPII";
|
|
698
|
+
/**
|
|
699
|
+
* The function call generated by the model was invalid.
|
|
700
|
+
*/
|
|
701
|
+
readonly MALFORMED_FUNCTION_CALL: "MALFORMED_FUNCTION_CALL";
|
|
702
|
+
};
|
|
703
|
+
|
|
704
|
+
/**
|
|
705
|
+
* Reason that a candidate finished.
|
|
706
|
+
* @public
|
|
707
|
+
*/
|
|
708
|
+
export declare type FinishReason = (typeof FinishReason)[keyof typeof FinishReason];
|
|
709
|
+
|
|
710
|
+
/**
|
|
711
|
+
* A predicted {@link FunctionCall} returned from the model
|
|
712
|
+
* that contains a string representing the {@link FunctionDeclaration.name}
|
|
713
|
+
* and a structured JSON object containing the parameters and their values.
|
|
714
|
+
* @public
|
|
715
|
+
*/
|
|
716
|
+
export declare interface FunctionCall {
|
|
717
|
+
/**
|
|
718
|
+
* The id of the function call. This must be sent back in the associated {@link FunctionResponse}.
|
|
719
|
+
*
|
|
720
|
+
*
|
|
721
|
+
* @remarks This property is only supported in the Gemini Developer API ({@link GoogleAIBackend}).
|
|
722
|
+
* When using the Gemini Developer API ({@link GoogleAIBackend}), this property will be
|
|
723
|
+
* `undefined`.
|
|
724
|
+
*/
|
|
725
|
+
id?: string;
|
|
726
|
+
name: string;
|
|
727
|
+
args: object;
|
|
728
|
+
}
|
|
729
|
+
|
|
730
|
+
/**
|
|
731
|
+
* @public
|
|
732
|
+
*/
|
|
733
|
+
export declare interface FunctionCallingConfig {
|
|
734
|
+
mode?: FunctionCallingMode;
|
|
735
|
+
allowedFunctionNames?: string[];
|
|
736
|
+
}
|
|
737
|
+
|
|
738
|
+
/**
|
|
739
|
+
* @public
|
|
740
|
+
*/
|
|
741
|
+
export declare const FunctionCallingMode: {
|
|
742
|
+
/**
|
|
743
|
+
* Default model behavior; model decides to predict either a function call
|
|
744
|
+
* or a natural language response.
|
|
745
|
+
*/
|
|
746
|
+
readonly AUTO: "AUTO";
|
|
747
|
+
/**
|
|
748
|
+
* Model is constrained to always predicting a function call only.
|
|
749
|
+
* If `allowed_function_names` is set, the predicted function call will be
|
|
750
|
+
* limited to any one of `allowed_function_names`, else the predicted
|
|
751
|
+
* function call will be any one of the provided `function_declarations`.
|
|
752
|
+
*/
|
|
753
|
+
readonly ANY: "ANY";
|
|
754
|
+
/**
|
|
755
|
+
* Model will not predict any function call. Model behavior is same as when
|
|
756
|
+
* not passing any function declarations.
|
|
757
|
+
*/
|
|
758
|
+
readonly NONE: "NONE";
|
|
759
|
+
};
|
|
760
|
+
|
|
761
|
+
/**
|
|
762
|
+
* @public
|
|
763
|
+
*/
|
|
764
|
+
export declare type FunctionCallingMode = (typeof FunctionCallingMode)[keyof typeof FunctionCallingMode];
|
|
765
|
+
|
|
766
|
+
/**
|
|
767
|
+
* Content part interface if the part represents a {@link FunctionCall}.
|
|
768
|
+
* @public
|
|
769
|
+
*/
|
|
770
|
+
export declare interface FunctionCallPart {
|
|
771
|
+
text?: never;
|
|
772
|
+
inlineData?: never;
|
|
773
|
+
functionCall: FunctionCall;
|
|
774
|
+
functionResponse?: never;
|
|
775
|
+
thought?: boolean;
|
|
776
|
+
/* Excluded from this release type: thoughtSignature */
|
|
777
|
+
executableCode?: never;
|
|
778
|
+
codeExecutionResult?: never;
|
|
779
|
+
}
|
|
780
|
+
|
|
781
|
+
/**
|
|
782
|
+
* Structured representation of a function declaration as defined by the
|
|
783
|
+
* {@link https://spec.openapis.org/oas/v3.0.3 | OpenAPI 3.0 specification}.
|
|
784
|
+
* Included
|
|
785
|
+
* in this declaration are the function name and parameters. This
|
|
786
|
+
* `FunctionDeclaration` is a representation of a block of code that can be used
|
|
787
|
+
* as a Tool by the model and executed by the client.
|
|
788
|
+
* @public
|
|
789
|
+
*/
|
|
790
|
+
export declare interface FunctionDeclaration {
|
|
791
|
+
/**
|
|
792
|
+
* The name of the function to call. Must start with a letter or an
|
|
793
|
+
* underscore. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with
|
|
794
|
+
* a max length of 64.
|
|
795
|
+
*/
|
|
796
|
+
name: string;
|
|
797
|
+
/**
|
|
798
|
+
* Description and purpose of the function. Model uses it to decide
|
|
799
|
+
* how and whether to call the function.
|
|
800
|
+
*/
|
|
801
|
+
description: string;
|
|
802
|
+
/**
|
|
803
|
+
* Optional. Describes the parameters to this function in JSON Schema Object
|
|
804
|
+
* format. Reflects the Open API 3.03 Parameter Object. Parameter names are
|
|
805
|
+
* case-sensitive. For a function with no parameters, this can be left unset.
|
|
806
|
+
*/
|
|
807
|
+
parameters?: ObjectSchema | ObjectSchemaRequest;
|
|
808
|
+
/**
|
|
809
|
+
* Reference to an actual function to call. Specifying this will cause the
|
|
810
|
+
* function to be called automatically when requested by the model.
|
|
811
|
+
*/
|
|
812
|
+
functionReference?: Function;
|
|
813
|
+
}
|
|
814
|
+
|
|
815
|
+
/**
|
|
816
|
+
* A `FunctionDeclarationsTool` is a piece of code that enables the system to
|
|
817
|
+
* interact with external systems to perform an action, or set of actions,
|
|
818
|
+
* outside of knowledge and scope of the model.
|
|
819
|
+
* @public
|
|
820
|
+
*/
|
|
821
|
+
export declare interface FunctionDeclarationsTool {
|
|
822
|
+
/**
|
|
823
|
+
* Optional. One or more function declarations
|
|
824
|
+
* to be passed to the model along with the current user query. Model may
|
|
825
|
+
* decide to call a subset of these functions by populating
|
|
826
|
+
* {@link FunctionCall} in the response. User should
|
|
827
|
+
* provide a {@link FunctionResponse} for each
|
|
828
|
+
* function call in the next turn. Based on the function responses, the model will
|
|
829
|
+
* generate the final response back to the user. Maximum 64 function
|
|
830
|
+
* declarations can be provided.
|
|
831
|
+
*/
|
|
832
|
+
functionDeclarations?: FunctionDeclaration[];
|
|
833
|
+
}
|
|
834
|
+
|
|
835
|
+
/**
|
|
836
|
+
* The result output from a {@link FunctionCall} that contains a string
|
|
837
|
+
* representing the {@link FunctionDeclaration.name}
|
|
838
|
+
* and a structured JSON object containing any output
|
|
839
|
+
* from the function is used as context to the model.
|
|
840
|
+
* This should contain the result of a {@link FunctionCall}
|
|
841
|
+
* made based on model prediction.
|
|
842
|
+
* @public
|
|
843
|
+
*/
|
|
844
|
+
export declare interface FunctionResponse {
|
|
845
|
+
/**
|
|
846
|
+
* The id of the {@link FunctionCall}.
|
|
847
|
+
*
|
|
848
|
+
* @remarks This property is only supported in the Gemini Developer API ({@link GoogleAIBackend}).
|
|
849
|
+
* When using the Gemini Developer API ({@link GoogleAIBackend}), this property will be
|
|
850
|
+
* `undefined`.
|
|
851
|
+
*/
|
|
852
|
+
id?: string;
|
|
853
|
+
name: string;
|
|
854
|
+
response: object;
|
|
855
|
+
parts?: Part[];
|
|
856
|
+
}
|
|
857
|
+
|
|
858
|
+
/**
|
|
859
|
+
* Content part interface if the part represents {@link FunctionResponse}.
|
|
860
|
+
* @public
|
|
861
|
+
*/
|
|
862
|
+
export declare interface FunctionResponsePart {
|
|
863
|
+
text?: never;
|
|
864
|
+
inlineData?: never;
|
|
865
|
+
functionCall?: never;
|
|
866
|
+
functionResponse: FunctionResponse;
|
|
867
|
+
thought?: boolean;
|
|
868
|
+
/* Excluded from this release type: thoughtSignature */
|
|
869
|
+
executableCode?: never;
|
|
870
|
+
codeExecutionResult?: never;
|
|
871
|
+
}
|
|
872
|
+
|
|
873
|
+
/**
|
|
874
|
+
* A candidate returned as part of a {@link GenerateContentResponse}.
|
|
875
|
+
* @public
|
|
876
|
+
*/
|
|
877
|
+
export declare interface GenerateContentCandidate {
|
|
878
|
+
index: number;
|
|
879
|
+
content: Content;
|
|
880
|
+
finishReason?: FinishReason;
|
|
881
|
+
finishMessage?: string;
|
|
882
|
+
safetyRatings?: SafetyRating[];
|
|
883
|
+
citationMetadata?: CitationMetadata;
|
|
884
|
+
groundingMetadata?: GroundingMetadata;
|
|
885
|
+
urlContextMetadata?: URLContextMetadata;
|
|
886
|
+
}
|
|
887
|
+
|
|
888
|
+
/**
|
|
889
|
+
* Request sent through {@link GenerativeModel.generateContent}
|
|
890
|
+
* @public
|
|
891
|
+
*/
|
|
892
|
+
export declare interface GenerateContentRequest extends BaseParams {
|
|
893
|
+
contents: Content[];
|
|
894
|
+
tools?: Tool[];
|
|
895
|
+
toolConfig?: ToolConfig;
|
|
896
|
+
systemInstruction?: string | Part | Content;
|
|
897
|
+
}
|
|
898
|
+
|
|
899
|
+
/**
|
|
900
|
+
* Individual response from {@link GenerativeModel.generateContent} and
|
|
901
|
+
* {@link GenerativeModel.generateContentStream}.
|
|
902
|
+
* `generateContentStream()` will return one in each chunk until
|
|
903
|
+
* the stream is done.
|
|
904
|
+
* @public
|
|
905
|
+
*/
|
|
906
|
+
export declare interface GenerateContentResponse {
|
|
907
|
+
candidates?: GenerateContentCandidate[];
|
|
908
|
+
promptFeedback?: PromptFeedback;
|
|
909
|
+
usageMetadata?: UsageMetadata;
|
|
910
|
+
}
|
|
911
|
+
|
|
912
|
+
/**
|
|
913
|
+
* Result object returned from {@link GenerativeModel.generateContent} call.
|
|
914
|
+
*
|
|
915
|
+
* @public
|
|
916
|
+
*/
|
|
917
|
+
export declare interface GenerateContentResult {
|
|
918
|
+
response: EnhancedGenerateContentResponse;
|
|
919
|
+
}
|
|
920
|
+
|
|
921
|
+
/**
|
|
922
|
+
* Result object returned from {@link GenerativeModel.generateContentStream} call.
|
|
923
|
+
* Iterate over `stream` to get chunks as they come in and/or
|
|
924
|
+
* use the `response` promise to get the aggregated response when
|
|
925
|
+
* the stream is done.
|
|
926
|
+
*
|
|
927
|
+
* @public
|
|
928
|
+
*/
|
|
929
|
+
export declare interface GenerateContentStreamResult {
|
|
930
|
+
stream: AsyncGenerator<EnhancedGenerateContentResponse>;
|
|
931
|
+
response: Promise<EnhancedGenerateContentResponse>;
|
|
932
|
+
}
|
|
933
|
+
|
|
934
|
+
/**
|
|
935
|
+
* Config options for content-related requests
|
|
936
|
+
* @public
|
|
937
|
+
*/
|
|
938
|
+
export declare interface GenerationConfig {
|
|
939
|
+
candidateCount?: number;
|
|
940
|
+
stopSequences?: string[];
|
|
941
|
+
maxOutputTokens?: number;
|
|
942
|
+
temperature?: number;
|
|
943
|
+
topP?: number;
|
|
944
|
+
topK?: number;
|
|
945
|
+
presencePenalty?: number;
|
|
946
|
+
frequencyPenalty?: number;
|
|
947
|
+
/**
|
|
948
|
+
* Output response MIME type of the generated candidate text.
|
|
949
|
+
* Supported MIME types are `text/plain` (default, text output),
|
|
950
|
+
* `application/json` (JSON response in the candidates), and
|
|
951
|
+
* `text/x.enum`.
|
|
952
|
+
*/
|
|
953
|
+
responseMimeType?: string;
|
|
954
|
+
/**
|
|
955
|
+
* Output response schema of the generated candidate text. This
|
|
956
|
+
* value can be a class generated with a {@link Schema} static method
|
|
957
|
+
* like `Schema.string()` or `Schema.object()` or it can be a plain
|
|
958
|
+
* JS object matching the {@link SchemaRequest} interface.
|
|
959
|
+
* <br/>Note: This only applies when the specified `responseMimeType` supports a schema; currently
|
|
960
|
+
* this is limited to `application/json` and `text/x.enum`.
|
|
961
|
+
*/
|
|
962
|
+
responseSchema?: TypedSchema | SchemaRequest;
|
|
963
|
+
/**
|
|
964
|
+
* Generation modalities to be returned in generation responses.
|
|
965
|
+
*
|
|
966
|
+
* @remarks
|
|
967
|
+
* - Multimodal response generation is only supported by some Gemini models and versions; see {@link https://firebase.google.com/docs/vertex-ai/models | model versions}.
|
|
968
|
+
* - Only image generation (`ResponseModality.IMAGE`) is supported.
|
|
969
|
+
*
|
|
970
|
+
* @beta
|
|
971
|
+
*/
|
|
972
|
+
responseModalities?: ResponseModality[];
|
|
973
|
+
/**
|
|
974
|
+
* Configuration for "thinking" behavior of compatible Gemini models.
|
|
975
|
+
*/
|
|
976
|
+
thinkingConfig?: ThinkingConfig;
|
|
977
|
+
}
|
|
978
|
+
|
|
979
|
+
/**
|
|
980
|
+
* Interface for sending an image.
|
|
981
|
+
* @public
|
|
982
|
+
*/
|
|
983
|
+
export declare interface GenerativeContentBlob {
|
|
984
|
+
mimeType: string;
|
|
985
|
+
/**
|
|
986
|
+
* Image as a base64 string.
|
|
987
|
+
*/
|
|
988
|
+
data: string;
|
|
989
|
+
}
|
|
990
|
+
|
|
991
|
+
/**
|
|
992
|
+
* Class for generative model APIs.
|
|
993
|
+
* @public
|
|
994
|
+
*/
|
|
995
|
+
export declare class GenerativeModel extends AIModel {
|
|
996
|
+
private chromeAdapter?;
|
|
997
|
+
generationConfig: GenerationConfig;
|
|
998
|
+
safetySettings: SafetySetting[];
|
|
999
|
+
requestOptions?: RequestOptions;
|
|
1000
|
+
tools?: Tool[];
|
|
1001
|
+
toolConfig?: ToolConfig;
|
|
1002
|
+
systemInstruction?: Content;
|
|
1003
|
+
constructor(ai: AI, modelParams: ModelParams, requestOptions?: RequestOptions, chromeAdapter?: ChromeAdapter | undefined);
|
|
1004
|
+
/**
|
|
1005
|
+
* Makes a single non-streaming call to the model
|
|
1006
|
+
* and returns an object containing a single {@link GenerateContentResponse}.
|
|
1007
|
+
*/
|
|
1008
|
+
generateContent(request: GenerateContentRequest | string | Array<string | Part>, singleRequestOptions?: SingleRequestOptions): Promise<GenerateContentResult>;
|
|
1009
|
+
/**
|
|
1010
|
+
* Makes a single streaming call to the model
|
|
1011
|
+
* and returns an object containing an iterable stream that iterates
|
|
1012
|
+
* over all chunks in the streaming response as well as
|
|
1013
|
+
* a promise that returns the final aggregated response.
|
|
1014
|
+
*/
|
|
1015
|
+
generateContentStream(request: GenerateContentRequest | string | Array<string | Part>, singleRequestOptions?: SingleRequestOptions): Promise<GenerateContentStreamResult>;
|
|
1016
|
+
/**
|
|
1017
|
+
* Gets a new {@link ChatSession} instance which can be used for
|
|
1018
|
+
* multi-turn chats.
|
|
1019
|
+
*/
|
|
1020
|
+
startChat(startChatParams?: StartChatParams): ChatSession;
|
|
1021
|
+
/**
|
|
1022
|
+
* Counts the tokens in the provided request.
|
|
1023
|
+
*/
|
|
1024
|
+
countTokens(request: CountTokensRequest | string | Array<string | Part>, singleRequestOptions?: SingleRequestOptions): Promise<CountTokensResponse>;
|
|
1025
|
+
}
|
|
1026
|
+
|
|
1027
|
+
/**
|
|
1028
|
+
* Returns the default {@link AI} instance that is associated with the provided
|
|
1029
|
+
* {@link @firebase/app#FirebaseApp}. If no instance exists, initializes a new instance with the
|
|
1030
|
+
* default settings.
|
|
1031
|
+
*
|
|
1032
|
+
* @example
|
|
1033
|
+
* ```javascript
|
|
1034
|
+
* const ai = getAI(app);
|
|
1035
|
+
* ```
|
|
1036
|
+
*
|
|
1037
|
+
* @example
|
|
1038
|
+
* ```javascript
|
|
1039
|
+
* // Get an AI instance configured to use the Gemini Developer API (via Google AI).
|
|
1040
|
+
* const ai = getAI(app, { backend: new GoogleAIBackend() });
|
|
1041
|
+
* ```
|
|
1042
|
+
*
|
|
1043
|
+
* @example
|
|
1044
|
+
* ```javascript
|
|
1045
|
+
* // Get an AI instance configured to use the Vertex AI Gemini API.
|
|
1046
|
+
* const ai = getAI(app, { backend: new VertexAIBackend() });
|
|
1047
|
+
* ```
|
|
1048
|
+
*
|
|
1049
|
+
* @param app - The {@link @firebase/app#FirebaseApp} to use.
|
|
1050
|
+
* @param options - {@link AIOptions} that configure the AI instance.
|
|
1051
|
+
* @returns The default {@link AI} instance for the given {@link @firebase/app#FirebaseApp}.
|
|
1052
|
+
*
|
|
1053
|
+
* @public
|
|
1054
|
+
*/
|
|
1055
|
+
export declare function getAI(app?: FirebaseApp, options?: AIOptions): AI;
|
|
1056
|
+
|
|
1057
|
+
/**
|
|
1058
|
+
* Returns a {@link GenerativeModel} class with methods for inference
|
|
1059
|
+
* and other functionality.
|
|
1060
|
+
*
|
|
1061
|
+
* @public
|
|
1062
|
+
*/
|
|
1063
|
+
export declare function getGenerativeModel(ai: AI, modelParams: ModelParams | HybridParams, requestOptions?: RequestOptions): GenerativeModel;
|
|
1064
|
+
|
|
1065
|
+
/**
|
|
1066
|
+
* Returns an {@link ImagenModel} class with methods for using Imagen.
|
|
1067
|
+
*
|
|
1068
|
+
* Only Imagen 3 models (named `imagen-3.0-*`) are supported.
|
|
1069
|
+
*
|
|
1070
|
+
* @param ai - An {@link AI} instance.
|
|
1071
|
+
* @param modelParams - Parameters to use when making Imagen requests.
|
|
1072
|
+
* @param requestOptions - Additional options to use when making requests.
|
|
1073
|
+
*
|
|
1074
|
+
* @throws If the `apiKey` or `projectId` fields are missing in your
|
|
1075
|
+
* Firebase config.
|
|
1076
|
+
*
|
|
1077
|
+
* @public
|
|
1078
|
+
*/
|
|
1079
|
+
export declare function getImagenModel(ai: AI, modelParams: ImagenModelParams, requestOptions?: RequestOptions): ImagenModel;
|
|
1080
|
+
|
|
1081
|
+
/**
|
|
1082
|
+
* Returns a {@link LiveGenerativeModel} class for real-time, bidirectional communication.
|
|
1083
|
+
*
|
|
1084
|
+
* The Live API is only supported in modern browser windows and Node >= 22.
|
|
1085
|
+
*
|
|
1086
|
+
* @param ai - An {@link AI} instance.
|
|
1087
|
+
* @param modelParams - Parameters to use when setting up a {@link LiveSession}.
|
|
1088
|
+
* @throws If the `apiKey` or `projectId` fields are missing in your
|
|
1089
|
+
* Firebase config.
|
|
1090
|
+
*
|
|
1091
|
+
* @beta
|
|
1092
|
+
*/
|
|
1093
|
+
export declare function getLiveGenerativeModel(ai: AI, modelParams: LiveModelParams): LiveGenerativeModel;
|
|
1094
|
+
|
|
1095
|
+
/**
|
|
1096
|
+
* Returns a {@link TemplateGenerativeModel} class for executing server-side
|
|
1097
|
+
* templates.
|
|
1098
|
+
*
|
|
1099
|
+
* @param ai - An {@link AI} instance.
|
|
1100
|
+
* @param requestOptions - Additional options to use when making requests.
|
|
1101
|
+
*
|
|
1102
|
+
* @beta
|
|
1103
|
+
*/
|
|
1104
|
+
export declare function getTemplateGenerativeModel(ai: AI, requestOptions?: RequestOptions): TemplateGenerativeModel;
|
|
1105
|
+
|
|
1106
|
+
/**
|
|
1107
|
+
* Returns a {@link TemplateImagenModel} class for executing server-side
|
|
1108
|
+
* Imagen templates.
|
|
1109
|
+
*
|
|
1110
|
+
* @param ai - An {@link AI} instance.
|
|
1111
|
+
* @param requestOptions - Additional options to use when making requests.
|
|
1112
|
+
*
|
|
1113
|
+
* @beta
|
|
1114
|
+
*/
|
|
1115
|
+
export declare function getTemplateImagenModel(ai: AI, requestOptions?: RequestOptions): TemplateImagenModel;
|
|
1116
|
+
|
|
1117
|
+
/**
|
|
1118
|
+
* Configuration class for the Gemini Developer API.
|
|
1119
|
+
*
|
|
1120
|
+
* Use this with {@link AIOptions} when initializing the AI service via
|
|
1121
|
+
* {@link getAI | getAI()} to specify the Gemini Developer API as the backend.
|
|
1122
|
+
*
|
|
1123
|
+
* @public
|
|
1124
|
+
*/
|
|
1125
|
+
export declare class GoogleAIBackend extends Backend {
|
|
1126
|
+
/**
|
|
1127
|
+
* Creates a configuration object for the Gemini Developer API backend.
|
|
1128
|
+
*/
|
|
1129
|
+
constructor();
|
|
1130
|
+
/* Excluded from this release type: _getModelPath */
|
|
1131
|
+
/* Excluded from this release type: _getTemplatePath */
|
|
1132
|
+
}
|
|
1133
|
+
|
|
1134
|
+
/* Excluded from this release type: GoogleAICitationMetadata */
|
|
1135
|
+
|
|
1136
|
+
/* Excluded from this release type: GoogleAICountTokensRequest */
|
|
1137
|
+
|
|
1138
|
+
/* Excluded from this release type: GoogleAIGenerateContentCandidate */
|
|
1139
|
+
|
|
1140
|
+
/* Excluded from this release type: GoogleAIGenerateContentResponse */
|
|
1141
|
+
|
|
1142
|
+
/**
|
|
1143
|
+
* Specifies the Google Search configuration.
|
|
1144
|
+
*
|
|
1145
|
+
* @remarks Currently, this is an empty object, but it's reserved for future configuration options.
|
|
1146
|
+
*
|
|
1147
|
+
* @public
|
|
1148
|
+
*/
|
|
1149
|
+
export declare interface GoogleSearch {
|
|
1150
|
+
}
|
|
1151
|
+
|
|
1152
|
+
/**
|
|
1153
|
+
* A tool that allows a Gemini model to connect to Google Search to access and incorporate
|
|
1154
|
+
* up-to-date information from the web into its responses.
|
|
1155
|
+
*
|
|
1156
|
+
* Important: If using Grounding with Google Search, you are required to comply with the
|
|
1157
|
+
* "Grounding with Google Search" usage requirements for your chosen API provider: {@link https://ai.google.dev/gemini-api/terms#grounding-with-google-search | Gemini Developer API}
|
|
1158
|
+
* or Vertex AI Gemini API (see {@link https://cloud.google.com/terms/service-terms | Service Terms}
|
|
1159
|
+
* section within the Service Specific Terms).
|
|
1160
|
+
*
|
|
1161
|
+
* @public
|
|
1162
|
+
*/
|
|
1163
|
+
export declare interface GoogleSearchTool {
|
|
1164
|
+
/**
|
|
1165
|
+
* Specifies the Google Search configuration.
|
|
1166
|
+
* Currently, this is an empty object, but it's reserved for future configuration options.
|
|
1167
|
+
*
|
|
1168
|
+
* When using this feature, you are required to comply with the "Grounding with Google Search"
|
|
1169
|
+
* usage requirements for your chosen API provider: {@link https://ai.google.dev/gemini-api/terms#grounding-with-google-search | Gemini Developer API}
|
|
1170
|
+
* or Vertex AI Gemini API (see {@link https://cloud.google.com/terms/service-terms | Service Terms}
|
|
1171
|
+
* section within the Service Specific Terms).
|
|
1172
|
+
*/
|
|
1173
|
+
googleSearch: GoogleSearch;
|
|
1174
|
+
}
|
|
1175
|
+
|
|
1176
|
+
/**
|
|
1177
|
+
* Represents a chunk of retrieved data that supports a claim in the model's response. This is part
|
|
1178
|
+
* of the grounding information provided when grounding is enabled.
|
|
1179
|
+
*
|
|
1180
|
+
* @public
|
|
1181
|
+
*/
|
|
1182
|
+
export declare interface GroundingChunk {
|
|
1183
|
+
/**
|
|
1184
|
+
* Contains details if the grounding chunk is from a web source.
|
|
1185
|
+
*/
|
|
1186
|
+
web?: WebGroundingChunk;
|
|
1187
|
+
}
|
|
1188
|
+
|
|
1189
|
+
/**
|
|
1190
|
+
* Metadata returned when grounding is enabled.
|
|
1191
|
+
*
|
|
1192
|
+
* Currently, only Grounding with Google Search is supported (see {@link GoogleSearchTool}).
|
|
1193
|
+
*
|
|
1194
|
+
* Important: If using Grounding with Google Search, you are required to comply with the
|
|
1195
|
+
* "Grounding with Google Search" usage requirements for your chosen API provider: {@link https://ai.google.dev/gemini-api/terms#grounding-with-google-search | Gemini Developer API}
|
|
1196
|
+
* or Vertex AI Gemini API (see {@link https://cloud.google.com/terms/service-terms | Service Terms}
|
|
1197
|
+
* section within the Service Specific Terms).
|
|
1198
|
+
*
|
|
1199
|
+
* @public
|
|
1200
|
+
*/
|
|
1201
|
+
export declare interface GroundingMetadata {
|
|
1202
|
+
/**
|
|
1203
|
+
* Google Search entry point for web searches. This contains an HTML/CSS snippet that must be
|
|
1204
|
+
* embedded in an app to display a Google Search entry point for follow-up web searches related to
|
|
1205
|
+
* a model's "Grounded Response".
|
|
1206
|
+
*/
|
|
1207
|
+
searchEntryPoint?: SearchEntrypoint;
|
|
1208
|
+
/**
|
|
1209
|
+
* A list of {@link GroundingChunk} objects. Each chunk represents a piece of retrieved content
|
|
1210
|
+
* (for example, from a web page). that the model used to ground its response.
|
|
1211
|
+
*/
|
|
1212
|
+
groundingChunks?: GroundingChunk[];
|
|
1213
|
+
/**
|
|
1214
|
+
* A list of {@link GroundingSupport} objects. Each object details how specific segments of the
|
|
1215
|
+
* model's response are supported by the `groundingChunks`.
|
|
1216
|
+
*/
|
|
1217
|
+
groundingSupports?: GroundingSupport[];
|
|
1218
|
+
/**
|
|
1219
|
+
* A list of web search queries that the model performed to gather the grounding information.
|
|
1220
|
+
* These can be used to allow users to explore the search results themselves.
|
|
1221
|
+
*/
|
|
1222
|
+
webSearchQueries?: string[];
|
|
1223
|
+
/**
|
|
1224
|
+
* @deprecated Use {@link GroundingSupport} instead.
|
|
1225
|
+
*/
|
|
1226
|
+
retrievalQueries?: string[];
|
|
1227
|
+
}
|
|
1228
|
+
|
|
1229
|
+
/**
|
|
1230
|
+
* Provides information about how a specific segment of the model's response is supported by the
|
|
1231
|
+
* retrieved grounding chunks.
|
|
1232
|
+
*
|
|
1233
|
+
* @public
|
|
1234
|
+
*/
|
|
1235
|
+
export declare interface GroundingSupport {
|
|
1236
|
+
/**
|
|
1237
|
+
* Specifies the segment of the model's response content that this grounding support pertains to.
|
|
1238
|
+
*/
|
|
1239
|
+
segment?: Segment;
|
|
1240
|
+
/**
|
|
1241
|
+
* A list of indices that refer to specific {@link GroundingChunk} objects within the
|
|
1242
|
+
* {@link GroundingMetadata.groundingChunks} array. These referenced chunks
|
|
1243
|
+
* are the sources that support the claim made in the associated `segment` of the response.
|
|
1244
|
+
* For example, an array `[1, 3, 4]` means that `groundingChunks[1]`, `groundingChunks[3]`,
|
|
1245
|
+
* and `groundingChunks[4]` are the retrieved content supporting this part of the response.
|
|
1246
|
+
*/
|
|
1247
|
+
groundingChunkIndices?: number[];
|
|
1248
|
+
}
|
|
1249
|
+
|
|
1250
|
+
/**
|
|
1251
|
+
* This property is not supported in the Gemini Developer API ({@link GoogleAIBackend}).
|
|
1252
|
+
*
|
|
1253
|
+
* @public
|
|
1254
|
+
*/
|
|
1255
|
+
export declare const HarmBlockMethod: {
|
|
1256
|
+
/**
|
|
1257
|
+
* The harm block method uses both probability and severity scores.
|
|
1258
|
+
*/
|
|
1259
|
+
readonly SEVERITY: "SEVERITY";
|
|
1260
|
+
/**
|
|
1261
|
+
* The harm block method uses the probability score.
|
|
1262
|
+
*/
|
|
1263
|
+
readonly PROBABILITY: "PROBABILITY";
|
|
1264
|
+
};
|
|
1265
|
+
|
|
1266
|
+
/**
|
|
1267
|
+
* This property is not supported in the Gemini Developer API ({@link GoogleAIBackend}).
|
|
1268
|
+
*
|
|
1269
|
+
* @public
|
|
1270
|
+
*/
|
|
1271
|
+
export declare type HarmBlockMethod = (typeof HarmBlockMethod)[keyof typeof HarmBlockMethod];
|
|
1272
|
+
|
|
1273
|
+
/**
|
|
1274
|
+
* Threshold above which a prompt or candidate will be blocked.
|
|
1275
|
+
* @public
|
|
1276
|
+
*/
|
|
1277
|
+
export declare const HarmBlockThreshold: {
|
|
1278
|
+
/**
|
|
1279
|
+
* Content with `NEGLIGIBLE` will be allowed.
|
|
1280
|
+
*/
|
|
1281
|
+
readonly BLOCK_LOW_AND_ABOVE: "BLOCK_LOW_AND_ABOVE";
|
|
1282
|
+
/**
|
|
1283
|
+
* Content with `NEGLIGIBLE` and `LOW` will be allowed.
|
|
1284
|
+
*/
|
|
1285
|
+
readonly BLOCK_MEDIUM_AND_ABOVE: "BLOCK_MEDIUM_AND_ABOVE";
|
|
1286
|
+
/**
|
|
1287
|
+
* Content with `NEGLIGIBLE`, `LOW`, and `MEDIUM` will be allowed.
|
|
1288
|
+
*/
|
|
1289
|
+
readonly BLOCK_ONLY_HIGH: "BLOCK_ONLY_HIGH";
|
|
1290
|
+
/**
|
|
1291
|
+
* All content will be allowed.
|
|
1292
|
+
*/
|
|
1293
|
+
readonly BLOCK_NONE: "BLOCK_NONE";
|
|
1294
|
+
/**
|
|
1295
|
+
* All content will be allowed. This is the same as `BLOCK_NONE`, but the metadata corresponding
|
|
1296
|
+
* to the {@link (HarmCategory:type)} will not be present in the response.
|
|
1297
|
+
*/
|
|
1298
|
+
readonly OFF: "OFF";
|
|
1299
|
+
};
|
|
1300
|
+
|
|
1301
|
+
/**
|
|
1302
|
+
* Threshold above which a prompt or candidate will be blocked.
|
|
1303
|
+
* @public
|
|
1304
|
+
*/
|
|
1305
|
+
export declare type HarmBlockThreshold = (typeof HarmBlockThreshold)[keyof typeof HarmBlockThreshold];
|
|
1306
|
+
|
|
1307
|
+
/**
|
|
1308
|
+
* Harm categories that would cause prompts or candidates to be blocked.
|
|
1309
|
+
* @public
|
|
1310
|
+
*/
|
|
1311
|
+
export declare const HarmCategory: {
|
|
1312
|
+
readonly HARM_CATEGORY_HATE_SPEECH: "HARM_CATEGORY_HATE_SPEECH";
|
|
1313
|
+
readonly HARM_CATEGORY_SEXUALLY_EXPLICIT: "HARM_CATEGORY_SEXUALLY_EXPLICIT";
|
|
1314
|
+
readonly HARM_CATEGORY_HARASSMENT: "HARM_CATEGORY_HARASSMENT";
|
|
1315
|
+
readonly HARM_CATEGORY_DANGEROUS_CONTENT: "HARM_CATEGORY_DANGEROUS_CONTENT";
|
|
1316
|
+
};
|
|
1317
|
+
|
|
1318
|
+
/**
|
|
1319
|
+
* Harm categories that would cause prompts or candidates to be blocked.
|
|
1320
|
+
* @public
|
|
1321
|
+
*/
|
|
1322
|
+
export declare type HarmCategory = (typeof HarmCategory)[keyof typeof HarmCategory];
|
|
1323
|
+
|
|
1324
|
+
/**
|
|
1325
|
+
* Probability that a prompt or candidate matches a harm category.
|
|
1326
|
+
* @public
|
|
1327
|
+
*/
|
|
1328
|
+
export declare const HarmProbability: {
|
|
1329
|
+
/**
|
|
1330
|
+
* Content has a negligible chance of being unsafe.
|
|
1331
|
+
*/
|
|
1332
|
+
readonly NEGLIGIBLE: "NEGLIGIBLE";
|
|
1333
|
+
/**
|
|
1334
|
+
* Content has a low chance of being unsafe.
|
|
1335
|
+
*/
|
|
1336
|
+
readonly LOW: "LOW";
|
|
1337
|
+
/**
|
|
1338
|
+
* Content has a medium chance of being unsafe.
|
|
1339
|
+
*/
|
|
1340
|
+
readonly MEDIUM: "MEDIUM";
|
|
1341
|
+
/**
|
|
1342
|
+
* Content has a high chance of being unsafe.
|
|
1343
|
+
*/
|
|
1344
|
+
readonly HIGH: "HIGH";
|
|
1345
|
+
};
|
|
1346
|
+
|
|
1347
|
+
/**
|
|
1348
|
+
* Probability that a prompt or candidate matches a harm category.
|
|
1349
|
+
* @public
|
|
1350
|
+
*/
|
|
1351
|
+
export declare type HarmProbability = (typeof HarmProbability)[keyof typeof HarmProbability];
|
|
1352
|
+
|
|
1353
|
+
/**
|
|
1354
|
+
* Harm severity levels.
|
|
1355
|
+
* @public
|
|
1356
|
+
*/
|
|
1357
|
+
export declare const HarmSeverity: {
|
|
1358
|
+
/**
|
|
1359
|
+
* Negligible level of harm severity.
|
|
1360
|
+
*/
|
|
1361
|
+
readonly HARM_SEVERITY_NEGLIGIBLE: "HARM_SEVERITY_NEGLIGIBLE";
|
|
1362
|
+
/**
|
|
1363
|
+
* Low level of harm severity.
|
|
1364
|
+
*/
|
|
1365
|
+
readonly HARM_SEVERITY_LOW: "HARM_SEVERITY_LOW";
|
|
1366
|
+
/**
|
|
1367
|
+
* Medium level of harm severity.
|
|
1368
|
+
*/
|
|
1369
|
+
readonly HARM_SEVERITY_MEDIUM: "HARM_SEVERITY_MEDIUM";
|
|
1370
|
+
/**
|
|
1371
|
+
* High level of harm severity.
|
|
1372
|
+
*/
|
|
1373
|
+
readonly HARM_SEVERITY_HIGH: "HARM_SEVERITY_HIGH";
|
|
1374
|
+
/**
|
|
1375
|
+
* Harm severity is not supported.
|
|
1376
|
+
*
|
|
1377
|
+
* @remarks
|
|
1378
|
+
* The GoogleAI backend does not support `HarmSeverity`, so this value is used as a fallback.
|
|
1379
|
+
*/
|
|
1380
|
+
readonly HARM_SEVERITY_UNSUPPORTED: "HARM_SEVERITY_UNSUPPORTED";
|
|
1381
|
+
};
|
|
1382
|
+
|
|
1383
|
+
/**
|
|
1384
|
+
* Harm severity levels.
|
|
1385
|
+
* @public
|
|
1386
|
+
*/
|
|
1387
|
+
export declare type HarmSeverity = (typeof HarmSeverity)[keyof typeof HarmSeverity];
|
|
1388
|
+
|
|
1389
|
+
/**
|
|
1390
|
+
* Configures hybrid inference.
|
|
1391
|
+
* @beta
|
|
1392
|
+
*/
|
|
1393
|
+
export declare interface HybridParams {
|
|
1394
|
+
/**
|
|
1395
|
+
* Specifies on-device or in-cloud inference. Defaults to prefer on-device.
|
|
1396
|
+
*/
|
|
1397
|
+
mode: InferenceMode;
|
|
1398
|
+
/**
|
|
1399
|
+
* Optional. Specifies advanced params for on-device inference.
|
|
1400
|
+
*/
|
|
1401
|
+
onDeviceParams?: OnDeviceParams;
|
|
1402
|
+
/**
|
|
1403
|
+
* Optional. Specifies advanced params for in-cloud inference.
|
|
1404
|
+
*/
|
|
1405
|
+
inCloudParams?: ModelParams;
|
|
1406
|
+
}
|
|
1407
|
+
|
|
1408
|
+
/**
|
|
1409
|
+
* Aspect ratios for Imagen images.
|
|
1410
|
+
*
|
|
1411
|
+
* To specify an aspect ratio for generated images, set the `aspectRatio` property in your
|
|
1412
|
+
* {@link ImagenGenerationConfig}.
|
|
1413
|
+
*
|
|
1414
|
+
* See the {@link http://firebase.google.com/docs/vertex-ai/generate-images | documentation }
|
|
1415
|
+
* for more details and examples of the supported aspect ratios.
|
|
1416
|
+
*
|
|
1417
|
+
* @public
|
|
1418
|
+
*/
|
|
1419
|
+
export declare const ImagenAspectRatio: {
|
|
1420
|
+
/**
|
|
1421
|
+
* Square (1:1) aspect ratio.
|
|
1422
|
+
*/
|
|
1423
|
+
readonly SQUARE: "1:1";
|
|
1424
|
+
/**
|
|
1425
|
+
* Landscape (3:4) aspect ratio.
|
|
1426
|
+
*/
|
|
1427
|
+
readonly LANDSCAPE_3x4: "3:4";
|
|
1428
|
+
/**
|
|
1429
|
+
* Portrait (4:3) aspect ratio.
|
|
1430
|
+
*/
|
|
1431
|
+
readonly PORTRAIT_4x3: "4:3";
|
|
1432
|
+
/**
|
|
1433
|
+
* Landscape (16:9) aspect ratio.
|
|
1434
|
+
*/
|
|
1435
|
+
readonly LANDSCAPE_16x9: "16:9";
|
|
1436
|
+
/**
|
|
1437
|
+
* Portrait (9:16) aspect ratio.
|
|
1438
|
+
*/
|
|
1439
|
+
readonly PORTRAIT_9x16: "9:16";
|
|
1440
|
+
};
|
|
1441
|
+
|
|
1442
|
+
/**
|
|
1443
|
+
* Aspect ratios for Imagen images.
|
|
1444
|
+
*
|
|
1445
|
+
* To specify an aspect ratio for generated images, set the `aspectRatio` property in your
|
|
1446
|
+
* {@link ImagenGenerationConfig}.
|
|
1447
|
+
*
|
|
1448
|
+
* See the {@link http://firebase.google.com/docs/vertex-ai/generate-images | documentation }
|
|
1449
|
+
* for more details and examples of the supported aspect ratios.
|
|
1450
|
+
*
|
|
1451
|
+
* @public
|
|
1452
|
+
*/
|
|
1453
|
+
export declare type ImagenAspectRatio = (typeof ImagenAspectRatio)[keyof typeof ImagenAspectRatio];
|
|
1454
|
+
|
|
1455
|
+
/**
|
|
1456
|
+
* An image generated by Imagen, stored in a Cloud Storage for Firebase bucket.
|
|
1457
|
+
*
|
|
1458
|
+
* This feature is not available yet.
|
|
1459
|
+
* @public
|
|
1460
|
+
*/
|
|
1461
|
+
export declare interface ImagenGCSImage {
|
|
1462
|
+
/**
|
|
1463
|
+
* The MIME type of the image; either `"image/png"` or `"image/jpeg"`.
|
|
1464
|
+
*
|
|
1465
|
+
* To request a different format, set the `imageFormat` property in your {@link ImagenGenerationConfig}.
|
|
1466
|
+
*/
|
|
1467
|
+
mimeType: string;
|
|
1468
|
+
/**
|
|
1469
|
+
* The URI of the file stored in a Cloud Storage for Firebase bucket.
|
|
1470
|
+
*
|
|
1471
|
+
* @example `"gs://bucket-name/path/sample_0.jpg"`.
|
|
1472
|
+
*/
|
|
1473
|
+
gcsURI: string;
|
|
1474
|
+
}
|
|
1475
|
+
|
|
1476
|
+
/**
|
|
1477
|
+
* Configuration options for generating images with Imagen.
|
|
1478
|
+
*
|
|
1479
|
+
* See the {@link http://firebase.google.com/docs/vertex-ai/generate-images-imagen | documentation} for
|
|
1480
|
+
* more details.
|
|
1481
|
+
*
|
|
1482
|
+
* @public
|
|
1483
|
+
*/
|
|
1484
|
+
export declare interface ImagenGenerationConfig {
|
|
1485
|
+
/**
|
|
1486
|
+
* A description of what should be omitted from the generated images.
|
|
1487
|
+
*
|
|
1488
|
+
* Support for negative prompts depends on the Imagen model.
|
|
1489
|
+
*
|
|
1490
|
+
* See the {@link http://firebase.google.com/docs/vertex-ai/model-parameters#imagen | documentation} for more details.
|
|
1491
|
+
*
|
|
1492
|
+
* This is no longer supported in the Gemini Developer API ({@link GoogleAIBackend}) in versions
|
|
1493
|
+
* greater than `imagen-3.0-generate-002`.
|
|
1494
|
+
*/
|
|
1495
|
+
negativePrompt?: string;
|
|
1496
|
+
/**
|
|
1497
|
+
* The number of images to generate. The default value is 1.
|
|
1498
|
+
*
|
|
1499
|
+
* The number of sample images that may be generated in each request depends on the model
|
|
1500
|
+
* (typically up to 4); see the <a href="http://firebase.google.com/docs/vertex-ai/model-parameters#imagen">sampleCount</a>
|
|
1501
|
+
* documentation for more details.
|
|
1502
|
+
*/
|
|
1503
|
+
numberOfImages?: number;
|
|
1504
|
+
/**
|
|
1505
|
+
* The aspect ratio of the generated images. The default value is square 1:1.
|
|
1506
|
+
* Supported aspect ratios depend on the Imagen model, see {@link (ImagenAspectRatio:type)}
|
|
1507
|
+
* for more details.
|
|
1508
|
+
*/
|
|
1509
|
+
aspectRatio?: ImagenAspectRatio;
|
|
1510
|
+
/**
|
|
1511
|
+
* The image format of the generated images. The default is PNG.
|
|
1512
|
+
*
|
|
1513
|
+
* See {@link ImagenImageFormat} for more details.
|
|
1514
|
+
*/
|
|
1515
|
+
imageFormat?: ImagenImageFormat;
|
|
1516
|
+
/**
|
|
1517
|
+
* Whether to add an invisible watermark to generated images.
|
|
1518
|
+
*
|
|
1519
|
+
* If set to `true`, an invisible SynthID watermark is embedded in generated images to indicate
|
|
1520
|
+
* that they are AI generated. If set to `false`, watermarking will be disabled.
|
|
1521
|
+
*
|
|
1522
|
+
* For Imagen 3 models, the default value is `true`; see the <a href="http://firebase.google.com/docs/vertex-ai/model-parameters#imagen">addWatermark</a>
|
|
1523
|
+
* documentation for more details.
|
|
1524
|
+
*
|
|
1525
|
+
* When using the Gemini Developer API ({@link GoogleAIBackend}), this will default to true,
|
|
1526
|
+
* and cannot be turned off.
|
|
1527
|
+
*/
|
|
1528
|
+
addWatermark?: boolean;
|
|
1529
|
+
}
|
|
1530
|
+
|
|
1531
|
+
/**
|
|
1532
|
+
* The response from a request to generate images with Imagen.
|
|
1533
|
+
*
|
|
1534
|
+
* @public
|
|
1535
|
+
*/
|
|
1536
|
+
export declare interface ImagenGenerationResponse<T extends ImagenInlineImage | ImagenGCSImage> {
|
|
1537
|
+
/**
|
|
1538
|
+
* The images generated by Imagen.
|
|
1539
|
+
*
|
|
1540
|
+
* The number of images generated may be fewer than the number requested if one or more were
|
|
1541
|
+
* filtered out; see `filteredReason`.
|
|
1542
|
+
*/
|
|
1543
|
+
images: T[];
|
|
1544
|
+
/**
|
|
1545
|
+
* The reason that images were filtered out. This property will only be defined if one
|
|
1546
|
+
* or more images were filtered.
|
|
1547
|
+
*
|
|
1548
|
+
* Images may be filtered out due to the {@link (ImagenSafetyFilterLevel:type)},
|
|
1549
|
+
* {@link (ImagenPersonFilterLevel:type)}, or filtering included in the model.
|
|
1550
|
+
* The filter levels may be adjusted in your {@link ImagenSafetySettings}.
|
|
1551
|
+
*
|
|
1552
|
+
* See the {@link https://cloud.google.com/vertex-ai/generative-ai/docs/image/responsible-ai-imagen | Responsible AI and usage guidelines for Imagen}
|
|
1553
|
+
* for more details.
|
|
1554
|
+
*/
|
|
1555
|
+
filteredReason?: string;
|
|
1556
|
+
}
|
|
1557
|
+
|
|
1558
|
+
/**
|
|
1559
|
+
* @license
|
|
1560
|
+
* Copyright 2025 Google LLC
|
|
1561
|
+
*
|
|
1562
|
+
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
1563
|
+
* you may not use this file except in compliance with the License.
|
|
1564
|
+
* You may obtain a copy of the License at
|
|
1565
|
+
*
|
|
1566
|
+
* http://www.apache.org/licenses/LICENSE-2.0
|
|
1567
|
+
*
|
|
1568
|
+
* Unless required by applicable law or agreed to in writing, software
|
|
1569
|
+
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
1570
|
+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
1571
|
+
* See the License for the specific language governing permissions and
|
|
1572
|
+
* limitations under the License.
|
|
1573
|
+
*/
|
|
1574
|
+
/**
|
|
1575
|
+
* Defines the image format for images generated by Imagen.
|
|
1576
|
+
*
|
|
1577
|
+
* Use this class to specify the desired format (JPEG or PNG) and compression quality
|
|
1578
|
+
* for images generated by Imagen. This is typically included as part of
|
|
1579
|
+
* {@link ImagenModelParams}.
|
|
1580
|
+
*
|
|
1581
|
+
* @example
|
|
1582
|
+
* ```javascript
|
|
1583
|
+
* const imagenModelParams = {
|
|
1584
|
+
* // ... other ImagenModelParams
|
|
1585
|
+
* imageFormat: ImagenImageFormat.jpeg(75) // JPEG with a compression level of 75.
|
|
1586
|
+
* }
|
|
1587
|
+
* ```
|
|
1588
|
+
*
|
|
1589
|
+
* @public
|
|
1590
|
+
*/
|
|
1591
|
+
export declare class ImagenImageFormat {
|
|
1592
|
+
/**
|
|
1593
|
+
* The MIME type.
|
|
1594
|
+
*/
|
|
1595
|
+
mimeType: string;
|
|
1596
|
+
/**
|
|
1597
|
+
* The level of compression (a number between 0 and 100).
|
|
1598
|
+
*/
|
|
1599
|
+
compressionQuality?: number;
|
|
1600
|
+
private constructor();
|
|
1601
|
+
/**
|
|
1602
|
+
* Creates an {@link ImagenImageFormat} for a JPEG image.
|
|
1603
|
+
*
|
|
1604
|
+
* @param compressionQuality - The level of compression (a number between 0 and 100).
|
|
1605
|
+
* @returns An {@link ImagenImageFormat} object for a JPEG image.
|
|
1606
|
+
*
|
|
1607
|
+
* @public
|
|
1608
|
+
*/
|
|
1609
|
+
static jpeg(compressionQuality?: number): ImagenImageFormat;
|
|
1610
|
+
/**
|
|
1611
|
+
* Creates an {@link ImagenImageFormat} for a PNG image.
|
|
1612
|
+
*
|
|
1613
|
+
* @returns An {@link ImagenImageFormat} object for a PNG image.
|
|
1614
|
+
*
|
|
1615
|
+
* @public
|
|
1616
|
+
*/
|
|
1617
|
+
static png(): ImagenImageFormat;
|
|
1618
|
+
}
|
|
1619
|
+
|
|
1620
|
+
/**
|
|
1621
|
+
* @license
|
|
1622
|
+
* Copyright 2025 Google LLC
|
|
1623
|
+
*
|
|
1624
|
+
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
1625
|
+
* you may not use this file except in compliance with the License.
|
|
1626
|
+
* You may obtain a copy of the License at
|
|
1627
|
+
*
|
|
1628
|
+
* http://www.apache.org/licenses/LICENSE-2.0
|
|
1629
|
+
*
|
|
1630
|
+
* Unless required by applicable law or agreed to in writing, software
|
|
1631
|
+
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
1632
|
+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
1633
|
+
* See the License for the specific language governing permissions and
|
|
1634
|
+
* limitations under the License.
|
|
1635
|
+
*/
|
|
1636
|
+
/**
|
|
1637
|
+
* An image generated by Imagen, represented as inline data.
|
|
1638
|
+
*
|
|
1639
|
+
* @public
|
|
1640
|
+
*/
|
|
1641
|
+
export declare interface ImagenInlineImage {
|
|
1642
|
+
/**
|
|
1643
|
+
* The MIME type of the image; either `"image/png"` or `"image/jpeg"`.
|
|
1644
|
+
*
|
|
1645
|
+
* To request a different format, set the `imageFormat` property in your {@link ImagenGenerationConfig}.
|
|
1646
|
+
*/
|
|
1647
|
+
mimeType: string;
|
|
1648
|
+
/**
|
|
1649
|
+
* The base64-encoded image data.
|
|
1650
|
+
*/
|
|
1651
|
+
bytesBase64Encoded: string;
|
|
1652
|
+
}
|
|
1653
|
+
|
|
1654
|
+
/**
|
|
1655
|
+
* Class for Imagen model APIs.
|
|
1656
|
+
*
|
|
1657
|
+
* This class provides methods for generating images using the Imagen model.
|
|
1658
|
+
*
|
|
1659
|
+
* @example
|
|
1660
|
+
* ```javascript
|
|
1661
|
+
* const imagen = new ImagenModel(
|
|
1662
|
+
* ai,
|
|
1663
|
+
* {
|
|
1664
|
+
* model: 'imagen-3.0-generate-002'
|
|
1665
|
+
* }
|
|
1666
|
+
* );
|
|
1667
|
+
*
|
|
1668
|
+
* const response = await imagen.generateImages('A photo of a cat');
|
|
1669
|
+
* if (response.images.length > 0) {
|
|
1670
|
+
* console.log(response.images[0].bytesBase64Encoded);
|
|
1671
|
+
* }
|
|
1672
|
+
* ```
|
|
1673
|
+
*
|
|
1674
|
+
* @public
|
|
1675
|
+
*/
|
|
1676
|
+
export declare class ImagenModel extends AIModel {
|
|
1677
|
+
requestOptions?: RequestOptions | undefined;
|
|
1678
|
+
/**
|
|
1679
|
+
* The Imagen generation configuration.
|
|
1680
|
+
*/
|
|
1681
|
+
generationConfig?: ImagenGenerationConfig;
|
|
1682
|
+
/**
|
|
1683
|
+
* Safety settings for filtering inappropriate content.
|
|
1684
|
+
*/
|
|
1685
|
+
safetySettings?: ImagenSafetySettings;
|
|
1686
|
+
/**
|
|
1687
|
+
* Constructs a new instance of the {@link ImagenModel} class.
|
|
1688
|
+
*
|
|
1689
|
+
* @param ai - an {@link AI} instance.
|
|
1690
|
+
* @param modelParams - Parameters to use when making requests to Imagen.
|
|
1691
|
+
* @param requestOptions - Additional options to use when making requests.
|
|
1692
|
+
*
|
|
1693
|
+
* @throws If the `apiKey` or `projectId` fields are missing in your
|
|
1694
|
+
* Firebase config.
|
|
1695
|
+
*/
|
|
1696
|
+
constructor(ai: AI, modelParams: ImagenModelParams, requestOptions?: RequestOptions | undefined);
|
|
1697
|
+
/**
|
|
1698
|
+
* Generates images using the Imagen model and returns them as
|
|
1699
|
+
* base64-encoded strings.
|
|
1700
|
+
*
|
|
1701
|
+
* @param prompt - A text prompt describing the image(s) to generate.
|
|
1702
|
+
* @returns A promise that resolves to an {@link ImagenGenerationResponse}
|
|
1703
|
+
* object containing the generated images.
|
|
1704
|
+
*
|
|
1705
|
+
* @throws If the request to generate images fails. This happens if the
|
|
1706
|
+
* prompt is blocked.
|
|
1707
|
+
*
|
|
1708
|
+
* @remarks
|
|
1709
|
+
* If the prompt was not blocked, but one or more of the generated images were filtered, the
|
|
1710
|
+
* returned object will have a `filteredReason` property.
|
|
1711
|
+
* If all images are filtered, the `images` array will be empty.
|
|
1712
|
+
*
|
|
1713
|
+
* @public
|
|
1714
|
+
*/
|
|
1715
|
+
generateImages(prompt: string, singleRequestOptions?: SingleRequestOptions): Promise<ImagenGenerationResponse<ImagenInlineImage>>;
|
|
1716
|
+
/* Excluded from this release type: generateImagesGCS */
|
|
1717
|
+
}
|
|
1718
|
+
|
|
1719
|
+
/**
|
|
1720
|
+
* Parameters for configuring an {@link ImagenModel}.
|
|
1721
|
+
*
|
|
1722
|
+
* @public
|
|
1723
|
+
*/
|
|
1724
|
+
export declare interface ImagenModelParams {
|
|
1725
|
+
/**
|
|
1726
|
+
* The Imagen model to use for generating images.
|
|
1727
|
+
* For example: `imagen-3.0-generate-002`.
|
|
1728
|
+
*
|
|
1729
|
+
* Only Imagen 3 models (named `imagen-3.0-*`) are supported.
|
|
1730
|
+
*
|
|
1731
|
+
* See {@link https://firebase.google.com/docs/vertex-ai/models | model versions}
|
|
1732
|
+
* for a full list of supported Imagen 3 models.
|
|
1733
|
+
*/
|
|
1734
|
+
model: string;
|
|
1735
|
+
/**
|
|
1736
|
+
* Configuration options for generating images with Imagen.
|
|
1737
|
+
*/
|
|
1738
|
+
generationConfig?: ImagenGenerationConfig;
|
|
1739
|
+
/**
|
|
1740
|
+
* Safety settings for filtering potentially inappropriate content.
|
|
1741
|
+
*/
|
|
1742
|
+
safetySettings?: ImagenSafetySettings;
|
|
1743
|
+
}
|
|
1744
|
+
|
|
1745
|
+
/**
|
|
1746
|
+
* A filter level controlling whether generation of images containing people or faces is allowed.
|
|
1747
|
+
*
|
|
1748
|
+
* See the <a href="http://firebase.google.com/docs/vertex-ai/generate-images">personGeneration</a>
|
|
1749
|
+
* documentation for more details.
|
|
1750
|
+
*
|
|
1751
|
+
* @public
|
|
1752
|
+
*/
|
|
1753
|
+
export declare const ImagenPersonFilterLevel: {
|
|
1754
|
+
/**
|
|
1755
|
+
* Disallow generation of images containing people or faces; images of people are filtered out.
|
|
1756
|
+
*/
|
|
1757
|
+
readonly BLOCK_ALL: "dont_allow";
|
|
1758
|
+
/**
|
|
1759
|
+
* Allow generation of images containing adults only; images of children are filtered out.
|
|
1760
|
+
*
|
|
1761
|
+
* Generation of images containing people or faces may require your use case to be
|
|
1762
|
+
* reviewed and approved by Cloud support; see the {@link https://cloud.google.com/vertex-ai/generative-ai/docs/image/responsible-ai-imagen#person-face-gen | Responsible AI and usage guidelines}
|
|
1763
|
+
* for more details.
|
|
1764
|
+
*/
|
|
1765
|
+
readonly ALLOW_ADULT: "allow_adult";
|
|
1766
|
+
/**
|
|
1767
|
+
* Allow generation of images containing adults only; images of children are filtered out.
|
|
1768
|
+
*
|
|
1769
|
+
* Generation of images containing people or faces may require your use case to be
|
|
1770
|
+
* reviewed and approved by Cloud support; see the {@link https://cloud.google.com/vertex-ai/generative-ai/docs/image/responsible-ai-imagen#person-face-gen | Responsible AI and usage guidelines}
|
|
1771
|
+
* for more details.
|
|
1772
|
+
*/
|
|
1773
|
+
readonly ALLOW_ALL: "allow_all";
|
|
1774
|
+
};
|
|
1775
|
+
|
|
1776
|
+
/**
|
|
1777
|
+
* A filter level controlling whether generation of images containing people or faces is allowed.
|
|
1778
|
+
*
|
|
1779
|
+
* See the <a href="http://firebase.google.com/docs/vertex-ai/generate-images">personGeneration</a>
|
|
1780
|
+
* documentation for more details.
|
|
1781
|
+
*
|
|
1782
|
+
* @public
|
|
1783
|
+
*/
|
|
1784
|
+
export declare type ImagenPersonFilterLevel = (typeof ImagenPersonFilterLevel)[keyof typeof ImagenPersonFilterLevel];
|
|
1785
|
+
|
|
1786
|
+
/**
|
|
1787
|
+
* A filter level controlling how aggressively to filter sensitive content.
|
|
1788
|
+
*
|
|
1789
|
+
* Text prompts provided as inputs and images (generated or uploaded) through Imagen on Vertex AI
|
|
1790
|
+
* are assessed against a list of safety filters, which include 'harmful categories' (for example,
|
|
1791
|
+
* `violence`, `sexual`, `derogatory`, and `toxic`). This filter level controls how aggressively to
|
|
1792
|
+
* filter out potentially harmful content from responses. See the {@link http://firebase.google.com/docs/vertex-ai/generate-images | documentation }
|
|
1793
|
+
* and the {@link https://cloud.google.com/vertex-ai/generative-ai/docs/image/responsible-ai-imagen#safety-filters | Responsible AI and usage guidelines}
|
|
1794
|
+
* for more details.
|
|
1795
|
+
*
|
|
1796
|
+
* @public
|
|
1797
|
+
*/
|
|
1798
|
+
export declare const ImagenSafetyFilterLevel: {
|
|
1799
|
+
/**
|
|
1800
|
+
* The most aggressive filtering level; most strict blocking.
|
|
1801
|
+
*/
|
|
1802
|
+
readonly BLOCK_LOW_AND_ABOVE: "block_low_and_above";
|
|
1803
|
+
/**
|
|
1804
|
+
* Blocks some sensitive prompts and responses.
|
|
1805
|
+
*/
|
|
1806
|
+
readonly BLOCK_MEDIUM_AND_ABOVE: "block_medium_and_above";
|
|
1807
|
+
/**
|
|
1808
|
+
* Blocks few sensitive prompts and responses.
|
|
1809
|
+
*/
|
|
1810
|
+
readonly BLOCK_ONLY_HIGH: "block_only_high";
|
|
1811
|
+
/**
|
|
1812
|
+
* The least aggressive filtering level; blocks very few sensitive prompts and responses.
|
|
1813
|
+
*
|
|
1814
|
+
* Access to this feature is restricted and may require your case to be reviewed and approved by
|
|
1815
|
+
* Cloud support.
|
|
1816
|
+
*/
|
|
1817
|
+
readonly BLOCK_NONE: "block_none";
|
|
1818
|
+
};
|
|
1819
|
+
|
|
1820
|
+
/**
|
|
1821
|
+
* A filter level controlling how aggressively to filter sensitive content.
|
|
1822
|
+
*
|
|
1823
|
+
* Text prompts provided as inputs and images (generated or uploaded) through Imagen on Vertex AI
|
|
1824
|
+
* are assessed against a list of safety filters, which include 'harmful categories' (for example,
|
|
1825
|
+
* `violence`, `sexual`, `derogatory`, and `toxic`). This filter level controls how aggressively to
|
|
1826
|
+
* filter out potentially harmful content from responses. See the {@link http://firebase.google.com/docs/vertex-ai/generate-images | documentation }
|
|
1827
|
+
* and the {@link https://cloud.google.com/vertex-ai/generative-ai/docs/image/responsible-ai-imagen#safety-filters | Responsible AI and usage guidelines}
|
|
1828
|
+
* for more details.
|
|
1829
|
+
*
|
|
1830
|
+
* @public
|
|
1831
|
+
*/
|
|
1832
|
+
export declare type ImagenSafetyFilterLevel = (typeof ImagenSafetyFilterLevel)[keyof typeof ImagenSafetyFilterLevel];
|
|
1833
|
+
|
|
1834
|
+
/**
|
|
1835
|
+
* Settings for controlling the aggressiveness of filtering out sensitive content.
|
|
1836
|
+
*
|
|
1837
|
+
* See the {@link http://firebase.google.com/docs/vertex-ai/generate-images | documentation }
|
|
1838
|
+
* for more details.
|
|
1839
|
+
*
|
|
1840
|
+
* @public
|
|
1841
|
+
*/
|
|
1842
|
+
export declare interface ImagenSafetySettings {
|
|
1843
|
+
/**
|
|
1844
|
+
* A filter level controlling how aggressive to filter out sensitive content from generated
|
|
1845
|
+
* images.
|
|
1846
|
+
*/
|
|
1847
|
+
safetyFilterLevel?: ImagenSafetyFilterLevel;
|
|
1848
|
+
/**
|
|
1849
|
+
* A filter level controlling whether generation of images containing people or faces is allowed.
|
|
1850
|
+
*/
|
|
1851
|
+
personFilterLevel?: ImagenPersonFilterLevel;
|
|
1852
|
+
}
|
|
1853
|
+
|
|
1854
|
+
/**
|
|
1855
|
+
* Determines whether inference happens on-device or in-cloud.
|
|
1856
|
+
*
|
|
1857
|
+
* @remarks
|
|
1858
|
+
* <b>PREFER_ON_DEVICE:</b> Attempt to make inference calls using an
|
|
1859
|
+
* on-device model. If on-device inference is not available, the SDK
|
|
1860
|
+
* will fall back to using a cloud-hosted model.
|
|
1861
|
+
* <br/>
|
|
1862
|
+
* <b>ONLY_ON_DEVICE:</b> Only attempt to make inference calls using an
|
|
1863
|
+
* on-device model. The SDK will not fall back to a cloud-hosted model.
|
|
1864
|
+
* If on-device inference is not available, inference methods will throw.
|
|
1865
|
+
* <br/>
|
|
1866
|
+
* <b>ONLY_IN_CLOUD:</b> Only attempt to make inference calls using a
|
|
1867
|
+
* cloud-hosted model. The SDK will not fall back to an on-device model.
|
|
1868
|
+
* <br/>
|
|
1869
|
+
* <b>PREFER_IN_CLOUD:</b> Attempt to make inference calls to a
|
|
1870
|
+
* cloud-hosted model. If not available, the SDK will fall back to an
|
|
1871
|
+
* on-device model.
|
|
1872
|
+
*
|
|
1873
|
+
* @beta
|
|
1874
|
+
*/
|
|
1875
|
+
export declare const InferenceMode: {
|
|
1876
|
+
readonly PREFER_ON_DEVICE: "prefer_on_device";
|
|
1877
|
+
readonly ONLY_ON_DEVICE: "only_on_device";
|
|
1878
|
+
readonly ONLY_IN_CLOUD: "only_in_cloud";
|
|
1879
|
+
readonly PREFER_IN_CLOUD: "prefer_in_cloud";
|
|
1880
|
+
};
|
|
1881
|
+
|
|
1882
|
+
/**
|
|
1883
|
+
* Determines whether inference happens on-device or in-cloud.
|
|
1884
|
+
*
|
|
1885
|
+
* @beta
|
|
1886
|
+
*/
|
|
1887
|
+
export declare type InferenceMode = (typeof InferenceMode)[keyof typeof InferenceMode];
|
|
1888
|
+
|
|
1889
|
+
/**
|
|
1890
|
+
* Indicates whether inference happened on-device or in-cloud.
|
|
1891
|
+
*
|
|
1892
|
+
* @beta
|
|
1893
|
+
*/
|
|
1894
|
+
export declare const InferenceSource: {
|
|
1895
|
+
readonly ON_DEVICE: "on_device";
|
|
1896
|
+
readonly IN_CLOUD: "in_cloud";
|
|
1897
|
+
};
|
|
1898
|
+
|
|
1899
|
+
/**
|
|
1900
|
+
* Indicates whether inference happened on-device or in-cloud.
|
|
1901
|
+
*
|
|
1902
|
+
* @beta
|
|
1903
|
+
*/
|
|
1904
|
+
export declare type InferenceSource = (typeof InferenceSource)[keyof typeof InferenceSource];
|
|
1905
|
+
|
|
1906
|
+
/**
|
|
1907
|
+
* Content part interface if the part represents an image.
|
|
1908
|
+
* @public
|
|
1909
|
+
*/
|
|
1910
|
+
export declare interface InlineDataPart {
|
|
1911
|
+
text?: never;
|
|
1912
|
+
inlineData: GenerativeContentBlob;
|
|
1913
|
+
functionCall?: never;
|
|
1914
|
+
functionResponse?: never;
|
|
1915
|
+
/**
|
|
1916
|
+
* Applicable if `inlineData` is a video.
|
|
1917
|
+
*/
|
|
1918
|
+
videoMetadata?: VideoMetadata;
|
|
1919
|
+
thought?: boolean;
|
|
1920
|
+
/* Excluded from this release type: thoughtSignature */
|
|
1921
|
+
executableCode?: never;
|
|
1922
|
+
codeExecutionResult?: never;
|
|
1923
|
+
}
|
|
1924
|
+
|
|
1925
|
+
/**
|
|
1926
|
+
* Schema class for "integer" types.
|
|
1927
|
+
* @public
|
|
1928
|
+
*/
|
|
1929
|
+
export declare class IntegerSchema extends Schema {
|
|
1930
|
+
constructor(schemaParams?: SchemaParams);
|
|
1931
|
+
}
|
|
1932
|
+
|
|
1933
|
+
/**
|
|
1934
|
+
* The programming language of the code.
|
|
1935
|
+
*
|
|
1936
|
+
* @public
|
|
1937
|
+
*/
|
|
1938
|
+
export declare const Language: {
|
|
1939
|
+
UNSPECIFIED: string;
|
|
1940
|
+
PYTHON: string;
|
|
1941
|
+
};
|
|
1942
|
+
|
|
1943
|
+
/**
|
|
1944
|
+
* The programming language of the code.
|
|
1945
|
+
*
|
|
1946
|
+
* @public
|
|
1947
|
+
*/
|
|
1948
|
+
export declare type Language = (typeof Language)[keyof typeof Language];
|
|
1949
|
+
|
|
1950
|
+
/**
|
|
1951
|
+
* Configures the creation of an on-device language model session.
|
|
1952
|
+
* @beta
|
|
1953
|
+
*/
|
|
1954
|
+
export declare interface LanguageModelCreateCoreOptions {
|
|
1955
|
+
topK?: number;
|
|
1956
|
+
temperature?: number;
|
|
1957
|
+
expectedInputs?: LanguageModelExpected[];
|
|
1958
|
+
}
|
|
1959
|
+
|
|
1960
|
+
/**
|
|
1961
|
+
* Configures the creation of an on-device language model session.
|
|
1962
|
+
* @beta
|
|
1963
|
+
*/
|
|
1964
|
+
export declare interface LanguageModelCreateOptions extends LanguageModelCreateCoreOptions {
|
|
1965
|
+
signal?: AbortSignal;
|
|
1966
|
+
initialPrompts?: LanguageModelMessage[];
|
|
1967
|
+
}
|
|
1968
|
+
|
|
1969
|
+
/**
|
|
1970
|
+
* Options for the expected inputs for an on-device language model.
|
|
1971
|
+
* @beta
|
|
1972
|
+
*/ export declare interface LanguageModelExpected {
|
|
1973
|
+
type: LanguageModelMessageType;
|
|
1974
|
+
languages?: string[];
|
|
1975
|
+
}
|
|
1976
|
+
|
|
1977
|
+
/**
|
|
1978
|
+
* An on-device language model message.
|
|
1979
|
+
* @beta
|
|
1980
|
+
*/
|
|
1981
|
+
export declare interface LanguageModelMessage {
|
|
1982
|
+
role: LanguageModelMessageRole;
|
|
1983
|
+
content: LanguageModelMessageContent[];
|
|
1984
|
+
}
|
|
1985
|
+
|
|
1986
|
+
/**
|
|
1987
|
+
* An on-device language model content object.
|
|
1988
|
+
* @beta
|
|
1989
|
+
*/
|
|
1990
|
+
export declare interface LanguageModelMessageContent {
|
|
1991
|
+
type: LanguageModelMessageType;
|
|
1992
|
+
value: LanguageModelMessageContentValue;
|
|
1993
|
+
}
|
|
1994
|
+
|
|
1995
|
+
/**
|
|
1996
|
+
* Content formats that can be provided as on-device message content.
|
|
1997
|
+
* @beta
|
|
1998
|
+
*/
|
|
1999
|
+
export declare type LanguageModelMessageContentValue = ImageBitmapSource | AudioBuffer | BufferSource | string;
|
|
2000
|
+
|
|
2001
|
+
/**
|
|
2002
|
+
* Allowable roles for on-device language model usage.
|
|
2003
|
+
* @beta
|
|
2004
|
+
*/
|
|
2005
|
+
export declare type LanguageModelMessageRole = 'system' | 'user' | 'assistant';
|
|
2006
|
+
|
|
2007
|
+
/**
|
|
2008
|
+
* Allowable types for on-device language model messages.
|
|
2009
|
+
* @beta
|
|
2010
|
+
*/
|
|
2011
|
+
export declare type LanguageModelMessageType = 'text' | 'image' | 'audio';
|
|
2012
|
+
|
|
2013
|
+
/**
|
|
2014
|
+
* Options for an on-device language model prompt.
|
|
2015
|
+
* @beta
|
|
2016
|
+
*/
|
|
2017
|
+
export declare interface LanguageModelPromptOptions {
|
|
2018
|
+
responseConstraint?: object;
|
|
2019
|
+
}
|
|
2020
|
+
|
|
2021
|
+
/**
|
|
2022
|
+
* Configuration parameters used by {@link LiveGenerativeModel} to control live content generation.
|
|
2023
|
+
*
|
|
2024
|
+
* @beta
|
|
2025
|
+
*/
|
|
2026
|
+
export declare interface LiveGenerationConfig {
|
|
2027
|
+
/**
|
|
2028
|
+
* Configuration for speech synthesis.
|
|
2029
|
+
*/
|
|
2030
|
+
speechConfig?: SpeechConfig;
|
|
2031
|
+
/**
|
|
2032
|
+
* Specifies the maximum number of tokens that can be generated in the response. The number of
|
|
2033
|
+
* tokens per word varies depending on the language outputted. Is unbounded by default.
|
|
2034
|
+
*/
|
|
2035
|
+
maxOutputTokens?: number;
|
|
2036
|
+
/**
|
|
2037
|
+
* Controls the degree of randomness in token selection. A `temperature` value of 0 means that the highest
|
|
2038
|
+
* probability tokens are always selected. In this case, responses for a given prompt are mostly
|
|
2039
|
+
* deterministic, but a small amount of variation is still possible.
|
|
2040
|
+
*/
|
|
2041
|
+
temperature?: number;
|
|
2042
|
+
/**
|
|
2043
|
+
* Changes how the model selects tokens for output. Tokens are
|
|
2044
|
+
* selected from the most to least probable until the sum of their probabilities equals the `topP`
|
|
2045
|
+
* value. For example, if tokens A, B, and C have probabilities of 0.3, 0.2, and 0.1 respectively
|
|
2046
|
+
* and the `topP` value is 0.5, then the model will select either A or B as the next token by using
|
|
2047
|
+
* the `temperature` and exclude C as a candidate. Defaults to 0.95 if unset.
|
|
2048
|
+
*/
|
|
2049
|
+
topP?: number;
|
|
2050
|
+
/**
|
|
2051
|
+
* Changes how the model selects token for output. A `topK` value of 1 means the select token is
|
|
2052
|
+
* the most probable among all tokens in the model's vocabulary, while a `topK` value 3 means that
|
|
2053
|
+
* the next token is selected from among the 3 most probably using probabilities sampled. Tokens
|
|
2054
|
+
* are then further filtered with the highest selected `temperature` sampling. Defaults to 40
|
|
2055
|
+
* if unspecified.
|
|
2056
|
+
*/
|
|
2057
|
+
topK?: number;
|
|
2058
|
+
/**
|
|
2059
|
+
* Positive penalties.
|
|
2060
|
+
*/
|
|
2061
|
+
presencePenalty?: number;
|
|
2062
|
+
/**
|
|
2063
|
+
* Frequency penalties.
|
|
2064
|
+
*/
|
|
2065
|
+
frequencyPenalty?: number;
|
|
2066
|
+
/**
|
|
2067
|
+
* The modalities of the response.
|
|
2068
|
+
*/
|
|
2069
|
+
responseModalities?: ResponseModality[];
|
|
2070
|
+
/**
|
|
2071
|
+
* Enables transcription of audio input.
|
|
2072
|
+
*
|
|
2073
|
+
* When enabled, the model will respond with transcriptions of your audio input in the `inputTranscriptions` property
|
|
2074
|
+
* in {@link LiveServerContent} messages. Note that the transcriptions are broken up across
|
|
2075
|
+
* messages, so you may only receive small amounts of text per message. For example, if you ask the model
|
|
2076
|
+
* "How are you today?", the model may transcribe that input across three messages, broken up as "How a", "re yo", "u today?".
|
|
2077
|
+
*/
|
|
2078
|
+
inputAudioTranscription?: AudioTranscriptionConfig;
|
|
2079
|
+
/**
|
|
2080
|
+
* Enables transcription of audio input.
|
|
2081
|
+
*
|
|
2082
|
+
* When enabled, the model will respond with transcriptions of its audio output in the `outputTranscription` property
|
|
2083
|
+
* in {@link LiveServerContent} messages. Note that the transcriptions are broken up across
|
|
2084
|
+
* messages, so you may only receive small amounts of text per message. For example, if the model says
|
|
2085
|
+
* "How are you today?", the model may transcribe that output across three messages, broken up as "How a", "re yo", "u today?".
|
|
2086
|
+
*/
|
|
2087
|
+
outputAudioTranscription?: AudioTranscriptionConfig;
|
|
2088
|
+
}
|
|
2089
|
+
|
|
2090
|
+
/**
|
|
2091
|
+
* Class for Live generative model APIs. The Live API enables low-latency, two-way multimodal
|
|
2092
|
+
* interactions with Gemini.
|
|
2093
|
+
*
|
|
2094
|
+
* This class should only be instantiated with {@link getLiveGenerativeModel}.
|
|
2095
|
+
*
|
|
2096
|
+
* @beta
|
|
2097
|
+
*/
|
|
2098
|
+
export declare class LiveGenerativeModel extends AIModel {
|
|
2099
|
+
/* Excluded from this release type: _webSocketHandler */
|
|
2100
|
+
generationConfig: LiveGenerationConfig;
|
|
2101
|
+
tools?: Tool[];
|
|
2102
|
+
toolConfig?: ToolConfig;
|
|
2103
|
+
systemInstruction?: Content;
|
|
2104
|
+
/* Excluded from this release type: __constructor */
|
|
2105
|
+
/**
|
|
2106
|
+
* Starts a {@link LiveSession}.
|
|
2107
|
+
*
|
|
2108
|
+
* @returns A {@link LiveSession}.
|
|
2109
|
+
* @throws If the connection failed to be established with the server.
|
|
2110
|
+
*
|
|
2111
|
+
* @beta
|
|
2112
|
+
*/
|
|
2113
|
+
connect(): Promise<LiveSession>;
|
|
2114
|
+
}
|
|
2115
|
+
|
|
2116
|
+
/**
|
|
2117
|
+
* Params passed to {@link getLiveGenerativeModel}.
|
|
2118
|
+
* @beta
|
|
2119
|
+
*/
|
|
2120
|
+
export declare interface LiveModelParams {
|
|
2121
|
+
model: string;
|
|
2122
|
+
generationConfig?: LiveGenerationConfig;
|
|
2123
|
+
tools?: Tool[];
|
|
2124
|
+
toolConfig?: ToolConfig;
|
|
2125
|
+
systemInstruction?: string | Part | Content;
|
|
2126
|
+
}
|
|
2127
|
+
|
|
2128
|
+
/**
|
|
2129
|
+
* The types of responses that can be returned by {@link LiveSession.receive}.
|
|
2130
|
+
*
|
|
2131
|
+
* @beta
|
|
2132
|
+
*/
|
|
2133
|
+
export declare const LiveResponseType: {
|
|
2134
|
+
SERVER_CONTENT: string;
|
|
2135
|
+
TOOL_CALL: string;
|
|
2136
|
+
TOOL_CALL_CANCELLATION: string;
|
|
2137
|
+
GOING_AWAY_NOTICE: string;
|
|
2138
|
+
};
|
|
2139
|
+
|
|
2140
|
+
/**
|
|
2141
|
+
* The types of responses that can be returned by {@link LiveSession.receive}.
|
|
2142
|
+
* This is a property on all messages that can be used for type narrowing. This property is not
|
|
2143
|
+
* returned by the server, it is assigned to a server message object once it's parsed.
|
|
2144
|
+
*
|
|
2145
|
+
* @beta
|
|
2146
|
+
*/
|
|
2147
|
+
export declare type LiveResponseType = (typeof LiveResponseType)[keyof typeof LiveResponseType];
|
|
2148
|
+
|
|
2149
|
+
/**
|
|
2150
|
+
* An incremental content update from the model.
|
|
2151
|
+
*
|
|
2152
|
+
* @beta
|
|
2153
|
+
*/
|
|
2154
|
+
export declare interface LiveServerContent {
|
|
2155
|
+
type: 'serverContent';
|
|
2156
|
+
/**
|
|
2157
|
+
* The content that the model has generated as part of the current conversation with the user.
|
|
2158
|
+
*/
|
|
2159
|
+
modelTurn?: Content;
|
|
2160
|
+
/**
|
|
2161
|
+
* Indicates whether the turn is complete. This is `undefined` if the turn is not complete.
|
|
2162
|
+
*/
|
|
2163
|
+
turnComplete?: boolean;
|
|
2164
|
+
/**
|
|
2165
|
+
* Indicates whether the model was interrupted by the client. An interruption occurs when
|
|
2166
|
+
* the client sends a message before the model finishes it's turn. This is `undefined` if the
|
|
2167
|
+
* model was not interrupted.
|
|
2168
|
+
*/
|
|
2169
|
+
interrupted?: boolean;
|
|
2170
|
+
/**
|
|
2171
|
+
* Transcription of the audio that was input to the model.
|
|
2172
|
+
*/
|
|
2173
|
+
inputTranscription?: Transcription;
|
|
2174
|
+
/**
|
|
2175
|
+
* Transcription of the audio output from the model.
|
|
2176
|
+
*/
|
|
2177
|
+
outputTranscription?: Transcription;
|
|
2178
|
+
}
|
|
2179
|
+
|
|
2180
|
+
/**
|
|
2181
|
+
* Notification that the server will not be able to service the client soon.
|
|
2182
|
+
*
|
|
2183
|
+
* @beta
|
|
2184
|
+
*/
|
|
2185
|
+
export declare interface LiveServerGoingAwayNotice {
|
|
2186
|
+
type: 'goingAwayNotice';
|
|
2187
|
+
/**
|
|
2188
|
+
* The remaining time (in seconds) before the connection will be terminated.
|
|
2189
|
+
*/
|
|
2190
|
+
timeLeft: number;
|
|
2191
|
+
}
|
|
2192
|
+
|
|
2193
|
+
/**
|
|
2194
|
+
* A request from the model for the client to execute one or more functions.
|
|
2195
|
+
*
|
|
2196
|
+
* @beta
|
|
2197
|
+
*/
|
|
2198
|
+
export declare interface LiveServerToolCall {
|
|
2199
|
+
type: 'toolCall';
|
|
2200
|
+
/**
|
|
2201
|
+
* An array of function calls to run.
|
|
2202
|
+
*/
|
|
2203
|
+
functionCalls: FunctionCall[];
|
|
2204
|
+
}
|
|
2205
|
+
|
|
2206
|
+
/**
|
|
2207
|
+
* Notification to cancel a previous function call triggered by {@link LiveServerToolCall}.
|
|
2208
|
+
*
|
|
2209
|
+
* @beta
|
|
2210
|
+
*/
|
|
2211
|
+
export declare interface LiveServerToolCallCancellation {
|
|
2212
|
+
type: 'toolCallCancellation';
|
|
2213
|
+
/**
|
|
2214
|
+
* IDs of function calls that were cancelled. These refer to the `id` property of a {@link FunctionCall}.
|
|
2215
|
+
*/
|
|
2216
|
+
functionIds: string[];
|
|
2217
|
+
}
|
|
2218
|
+
|
|
2219
|
+
/**
|
|
2220
|
+
* Represents an active, real-time, bidirectional conversation with the model.
|
|
2221
|
+
*
|
|
2222
|
+
* This class should only be instantiated by calling {@link LiveGenerativeModel.connect}.
|
|
2223
|
+
*
|
|
2224
|
+
* @beta
|
|
2225
|
+
*/
|
|
2226
|
+
export declare class LiveSession {
|
|
2227
|
+
private webSocketHandler;
|
|
2228
|
+
private serverMessages;
|
|
2229
|
+
/**
|
|
2230
|
+
* Indicates whether this Live session is closed.
|
|
2231
|
+
*
|
|
2232
|
+
* @beta
|
|
2233
|
+
*/
|
|
2234
|
+
isClosed: boolean;
|
|
2235
|
+
/**
|
|
2236
|
+
* Indicates whether this Live session is being controlled by an `AudioConversationController`.
|
|
2237
|
+
*
|
|
2238
|
+
* @beta
|
|
2239
|
+
*/
|
|
2240
|
+
inConversation: boolean;
|
|
2241
|
+
/* Excluded from this release type: __constructor */
|
|
2242
|
+
/**
|
|
2243
|
+
* Sends content to the server.
|
|
2244
|
+
*
|
|
2245
|
+
* @param request - The message to send to the model.
|
|
2246
|
+
* @param turnComplete - Indicates if the turn is complete. Defaults to false.
|
|
2247
|
+
* @throws If this session has been closed.
|
|
2248
|
+
*
|
|
2249
|
+
* @beta
|
|
2250
|
+
*/
|
|
2251
|
+
send(request: string | Array<string | Part>, turnComplete?: boolean): Promise<void>;
|
|
2252
|
+
/**
|
|
2253
|
+
* Sends text to the server in realtime.
|
|
2254
|
+
*
|
|
2255
|
+
* @example
|
|
2256
|
+
* ```javascript
|
|
2257
|
+
* liveSession.sendTextRealtime("Hello, how are you?");
|
|
2258
|
+
* ```
|
|
2259
|
+
*
|
|
2260
|
+
* @param text - The text data to send.
|
|
2261
|
+
* @throws If this session has been closed.
|
|
2262
|
+
*
|
|
2263
|
+
* @beta
|
|
2264
|
+
*/
|
|
2265
|
+
sendTextRealtime(text: string): Promise<void>;
|
|
2266
|
+
/**
|
|
2267
|
+
* Sends audio data to the server in realtime.
|
|
2268
|
+
*
|
|
2269
|
+
* @remarks The server requires that the audio data is base64-encoded 16-bit PCM at 16kHz
|
|
2270
|
+
* little-endian.
|
|
2271
|
+
*
|
|
2272
|
+
* @example
|
|
2273
|
+
* ```javascript
|
|
2274
|
+
* // const pcmData = ... base64-encoded 16-bit PCM at 16kHz little-endian.
|
|
2275
|
+
* const blob = { mimeType: "audio/pcm", data: pcmData };
|
|
2276
|
+
* liveSession.sendAudioRealtime(blob);
|
|
2277
|
+
* ```
|
|
2278
|
+
*
|
|
2279
|
+
* @param blob - The base64-encoded PCM data to send to the server in realtime.
|
|
2280
|
+
* @throws If this session has been closed.
|
|
2281
|
+
*
|
|
2282
|
+
* @beta
|
|
2283
|
+
*/
|
|
2284
|
+
sendAudioRealtime(blob: GenerativeContentBlob): Promise<void>;
|
|
2285
|
+
/**
|
|
2286
|
+
* Sends video data to the server in realtime.
|
|
2287
|
+
*
|
|
2288
|
+
* @remarks The server requires that the video is sent as individual video frames at 1 FPS. It
|
|
2289
|
+
* is recommended to set `mimeType` to `image/jpeg`.
|
|
2290
|
+
*
|
|
2291
|
+
* @example
|
|
2292
|
+
* ```javascript
|
|
2293
|
+
* // const videoFrame = ... base64-encoded JPEG data
|
|
2294
|
+
* const blob = { mimeType: "image/jpeg", data: videoFrame };
|
|
2295
|
+
* liveSession.sendVideoRealtime(blob);
|
|
2296
|
+
* ```
|
|
2297
|
+
* @param blob - The base64-encoded video data to send to the server in realtime.
|
|
2298
|
+
* @throws If this session has been closed.
|
|
2299
|
+
*
|
|
2300
|
+
* @beta
|
|
2301
|
+
*/
|
|
2302
|
+
sendVideoRealtime(blob: GenerativeContentBlob): Promise<void>;
|
|
2303
|
+
/**
|
|
2304
|
+
* Sends function responses to the server.
|
|
2305
|
+
*
|
|
2306
|
+
* @param functionResponses - The function responses to send.
|
|
2307
|
+
* @throws If this session has been closed.
|
|
2308
|
+
*
|
|
2309
|
+
* @beta
|
|
2310
|
+
*/
|
|
2311
|
+
sendFunctionResponses(functionResponses: FunctionResponse[]): Promise<void>;
|
|
2312
|
+
/**
|
|
2313
|
+
* Yields messages received from the server.
|
|
2314
|
+
* This can only be used by one consumer at a time.
|
|
2315
|
+
*
|
|
2316
|
+
* @returns An `AsyncGenerator` that yields server messages as they arrive.
|
|
2317
|
+
* @throws If the session is already closed, or if we receive a response that we don't support.
|
|
2318
|
+
*
|
|
2319
|
+
* @beta
|
|
2320
|
+
*/
|
|
2321
|
+
receive(): AsyncGenerator<LiveServerContent | LiveServerToolCall | LiveServerToolCallCancellation | LiveServerGoingAwayNotice>;
|
|
2322
|
+
/**
|
|
2323
|
+
* Closes this session.
|
|
2324
|
+
* All methods on this session will throw an error once this resolves.
|
|
2325
|
+
*
|
|
2326
|
+
* @beta
|
|
2327
|
+
*/
|
|
2328
|
+
close(): Promise<void>;
|
|
2329
|
+
/**
|
|
2330
|
+
* Sends realtime input to the server.
|
|
2331
|
+
*
|
|
2332
|
+
* @deprecated Use `sendTextRealtime()`, `sendAudioRealtime()`, and `sendVideoRealtime()` instead.
|
|
2333
|
+
*
|
|
2334
|
+
* @param mediaChunks - The media chunks to send.
|
|
2335
|
+
* @throws If this session has been closed.
|
|
2336
|
+
*
|
|
2337
|
+
* @beta
|
|
2338
|
+
*/
|
|
2339
|
+
sendMediaChunks(mediaChunks: GenerativeContentBlob[]): Promise<void>;
|
|
2340
|
+
/**
|
|
2341
|
+
* @deprecated Use `sendTextRealtime()`, `sendAudioRealtime()`, and `sendVideoRealtime()` instead.
|
|
2342
|
+
*
|
|
2343
|
+
* Sends a stream of {@link GenerativeContentBlob}.
|
|
2344
|
+
*
|
|
2345
|
+
* @param mediaChunkStream - The stream of {@link GenerativeContentBlob} to send.
|
|
2346
|
+
* @throws If this session has been closed.
|
|
2347
|
+
*
|
|
2348
|
+
* @beta
|
|
2349
|
+
*/
|
|
2350
|
+
sendMediaStream(mediaChunkStream: ReadableStream<GenerativeContentBlob>): Promise<void>;
|
|
2351
|
+
}
|
|
2352
|
+
|
|
2353
|
+
/**
|
|
2354
|
+
* Content part modality.
|
|
2355
|
+
* @public
|
|
2356
|
+
*/
|
|
2357
|
+
export declare const Modality: {
|
|
2358
|
+
/**
|
|
2359
|
+
* Unspecified modality.
|
|
2360
|
+
*/
|
|
2361
|
+
readonly MODALITY_UNSPECIFIED: "MODALITY_UNSPECIFIED";
|
|
2362
|
+
/**
|
|
2363
|
+
* Plain text.
|
|
2364
|
+
*/
|
|
2365
|
+
readonly TEXT: "TEXT";
|
|
2366
|
+
/**
|
|
2367
|
+
* Image.
|
|
2368
|
+
*/
|
|
2369
|
+
readonly IMAGE: "IMAGE";
|
|
2370
|
+
/**
|
|
2371
|
+
* Video.
|
|
2372
|
+
*/
|
|
2373
|
+
readonly VIDEO: "VIDEO";
|
|
2374
|
+
/**
|
|
2375
|
+
* Audio.
|
|
2376
|
+
*/
|
|
2377
|
+
readonly AUDIO: "AUDIO";
|
|
2378
|
+
/**
|
|
2379
|
+
* Document (for example, PDF).
|
|
2380
|
+
*/
|
|
2381
|
+
readonly DOCUMENT: "DOCUMENT";
|
|
2382
|
+
};
|
|
2383
|
+
|
|
2384
|
+
/**
|
|
2385
|
+
* Content part modality.
|
|
2386
|
+
* @public
|
|
2387
|
+
*/
|
|
2388
|
+
export declare type Modality = (typeof Modality)[keyof typeof Modality];
|
|
2389
|
+
|
|
2390
|
+
/**
|
|
2391
|
+
* Represents token counting info for a single modality.
|
|
2392
|
+
*
|
|
2393
|
+
* @public
|
|
2394
|
+
*/
|
|
2395
|
+
export declare interface ModalityTokenCount {
|
|
2396
|
+
/** The modality associated with this token count. */
|
|
2397
|
+
modality: Modality;
|
|
2398
|
+
/** The number of tokens counted. */
|
|
2399
|
+
tokenCount: number;
|
|
2400
|
+
}
|
|
2401
|
+
|
|
2402
|
+
/**
|
|
2403
|
+
* Params passed to {@link getGenerativeModel}.
|
|
2404
|
+
* @public
|
|
2405
|
+
*/
|
|
2406
|
+
export declare interface ModelParams extends BaseParams {
|
|
2407
|
+
model: string;
|
|
2408
|
+
tools?: Tool[];
|
|
2409
|
+
toolConfig?: ToolConfig;
|
|
2410
|
+
systemInstruction?: string | Part | Content;
|
|
2411
|
+
}
|
|
2412
|
+
|
|
2413
|
+
/**
|
|
2414
|
+
* Schema class for "number" types.
|
|
2415
|
+
* @public
|
|
2416
|
+
*/
|
|
2417
|
+
export declare class NumberSchema extends Schema {
|
|
2418
|
+
constructor(schemaParams?: SchemaParams);
|
|
2419
|
+
}
|
|
2420
|
+
|
|
2421
|
+
/**
|
|
2422
|
+
* Schema class for "object" types.
|
|
2423
|
+
* The `properties` param must be a map of `Schema` objects.
|
|
2424
|
+
* @public
|
|
2425
|
+
*/
|
|
2426
|
+
export declare class ObjectSchema extends Schema {
|
|
2427
|
+
properties: {
|
|
2428
|
+
[k: string]: TypedSchema;
|
|
2429
|
+
};
|
|
2430
|
+
optionalProperties: string[];
|
|
2431
|
+
constructor(schemaParams: SchemaParams, properties: {
|
|
2432
|
+
[k: string]: TypedSchema;
|
|
2433
|
+
}, optionalProperties?: string[]);
|
|
2434
|
+
/* Excluded from this release type: toJSON */
|
|
2435
|
+
}
|
|
2436
|
+
|
|
2437
|
+
/**
|
|
2438
|
+
* Interface for JSON parameters in a schema of {@link (SchemaType:type)}
|
|
2439
|
+
* "object" when not using the `Schema.object()` helper.
|
|
2440
|
+
* @public
|
|
2441
|
+
*/
|
|
2442
|
+
export declare interface ObjectSchemaRequest extends SchemaRequest {
|
|
2443
|
+
type: 'object';
|
|
2444
|
+
/**
|
|
2445
|
+
* This is not a property accepted in the final request to the backend, but is
|
|
2446
|
+
* a client-side convenience property that is only usable by constructing
|
|
2447
|
+
* a schema through the `Schema.object()` helper method. Populating this
|
|
2448
|
+
* property will cause response errors if the object is not wrapped with
|
|
2449
|
+
* `Schema.object()`.
|
|
2450
|
+
*/
|
|
2451
|
+
optionalProperties?: never;
|
|
2452
|
+
}
|
|
2453
|
+
|
|
2454
|
+
/**
|
|
2455
|
+
* Encapsulates configuration for on-device inference.
|
|
2456
|
+
*
|
|
2457
|
+
* @beta
|
|
2458
|
+
*/
|
|
2459
|
+
export declare interface OnDeviceParams {
|
|
2460
|
+
createOptions?: LanguageModelCreateOptions;
|
|
2461
|
+
promptOptions?: LanguageModelPromptOptions;
|
|
2462
|
+
}
|
|
2463
|
+
|
|
2464
|
+
/**
|
|
2465
|
+
* Represents the result of the code execution.
|
|
2466
|
+
*
|
|
2467
|
+
* @public
|
|
2468
|
+
*/
|
|
2469
|
+
export declare const Outcome: {
|
|
2470
|
+
UNSPECIFIED: string;
|
|
2471
|
+
OK: string;
|
|
2472
|
+
FAILED: string;
|
|
2473
|
+
DEADLINE_EXCEEDED: string;
|
|
2474
|
+
};
|
|
2475
|
+
|
|
2476
|
+
/**
|
|
2477
|
+
* Represents the result of the code execution.
|
|
2478
|
+
*
|
|
2479
|
+
* @public
|
|
2480
|
+
*/
|
|
2481
|
+
export declare type Outcome = (typeof Outcome)[keyof typeof Outcome];
|
|
2482
|
+
|
|
2483
|
+
/**
|
|
2484
|
+
* Content part - includes text, image/video, or function call/response
|
|
2485
|
+
* part types.
|
|
2486
|
+
* @public
|
|
2487
|
+
*/
|
|
2488
|
+
export declare type Part = TextPart | InlineDataPart | FunctionCallPart | FunctionResponsePart | FileDataPart | ExecutableCodePart | CodeExecutionResultPart;
|
|
2489
|
+
|
|
2490
|
+
/**
|
|
2491
|
+
* Possible roles.
|
|
2492
|
+
* @public
|
|
2493
|
+
*/
|
|
2494
|
+
export declare const POSSIBLE_ROLES: readonly ["user", "model", "function", "system"];
|
|
2495
|
+
|
|
2496
|
+
/**
|
|
2497
|
+
* Configuration for a pre-built voice.
|
|
2498
|
+
*
|
|
2499
|
+
* @beta
|
|
2500
|
+
*/
|
|
2501
|
+
export declare interface PrebuiltVoiceConfig {
|
|
2502
|
+
/**
|
|
2503
|
+
* The voice name to use for speech synthesis.
|
|
2504
|
+
*
|
|
2505
|
+
* For a full list of names and demos of what each voice sounds like, see {@link https://cloud.google.com/text-to-speech/docs/chirp3-hd | Chirp 3: HD Voices}.
|
|
2506
|
+
*/
|
|
2507
|
+
voiceName?: string;
|
|
2508
|
+
}
|
|
2509
|
+
|
|
2510
|
+
/**
|
|
2511
|
+
* If the prompt was blocked, this will be populated with `blockReason` and
|
|
2512
|
+
* the relevant `safetyRatings`.
|
|
2513
|
+
* @public
|
|
2514
|
+
*/
|
|
2515
|
+
export declare interface PromptFeedback {
|
|
2516
|
+
blockReason?: BlockReason;
|
|
2517
|
+
safetyRatings: SafetyRating[];
|
|
2518
|
+
/**
|
|
2519
|
+
* A human-readable description of the `blockReason`.
|
|
2520
|
+
*
|
|
2521
|
+
* This property is only supported in the Vertex AI Gemini API ({@link VertexAIBackend}).
|
|
2522
|
+
*/
|
|
2523
|
+
blockReasonMessage?: string;
|
|
2524
|
+
}
|
|
2525
|
+
|
|
2526
|
+
/**
|
|
2527
|
+
* Params passed to {@link getGenerativeModel}.
|
|
2528
|
+
* @public
|
|
2529
|
+
*/
|
|
2530
|
+
export declare interface RequestOptions {
|
|
2531
|
+
/**
|
|
2532
|
+
* Request timeout in milliseconds. Defaults to 180 seconds (180000ms).
|
|
2533
|
+
*/
|
|
2534
|
+
timeout?: number;
|
|
2535
|
+
/**
|
|
2536
|
+
* Base url for endpoint. Defaults to
|
|
2537
|
+
* https://firebasevertexai.googleapis.com, which is the
|
|
2538
|
+
* {@link https://console.cloud.google.com/apis/library/firebasevertexai.googleapis.com?project=_ | Firebase AI Logic API}
|
|
2539
|
+
* (used regardless of your chosen Gemini API provider).
|
|
2540
|
+
*/
|
|
2541
|
+
baseUrl?: string;
|
|
2542
|
+
/**
|
|
2543
|
+
* Limits amount of sequential function calls the SDK can make during automatic
|
|
2544
|
+
* function calling, in order to prevent infinite loops. If not specified,
|
|
2545
|
+
* this value defaults to 10.
|
|
2546
|
+
*
|
|
2547
|
+
* When it reaches this limit, it will return the last response received
|
|
2548
|
+
* from the model, whether it is a text response or further function calls.
|
|
2549
|
+
*/
|
|
2550
|
+
maxSequentalFunctionCalls?: number;
|
|
2551
|
+
}
|
|
2552
|
+
|
|
2553
|
+
/**
|
|
2554
|
+
* Generation modalities to be returned in generation responses.
|
|
2555
|
+
*
|
|
2556
|
+
* @beta
|
|
2557
|
+
*/
|
|
2558
|
+
export declare const ResponseModality: {
|
|
2559
|
+
/**
|
|
2560
|
+
* Text.
|
|
2561
|
+
* @beta
|
|
2562
|
+
*/
|
|
2563
|
+
readonly TEXT: "TEXT";
|
|
2564
|
+
/**
|
|
2565
|
+
* Image.
|
|
2566
|
+
* @beta
|
|
2567
|
+
*/
|
|
2568
|
+
readonly IMAGE: "IMAGE";
|
|
2569
|
+
/**
|
|
2570
|
+
* Audio.
|
|
2571
|
+
* @beta
|
|
2572
|
+
*/
|
|
2573
|
+
readonly AUDIO: "AUDIO";
|
|
2574
|
+
};
|
|
2575
|
+
|
|
2576
|
+
/**
|
|
2577
|
+
* Generation modalities to be returned in generation responses.
|
|
2578
|
+
*
|
|
2579
|
+
* @beta
|
|
2580
|
+
*/
|
|
2581
|
+
export declare type ResponseModality = (typeof ResponseModality)[keyof typeof ResponseModality];
|
|
2582
|
+
|
|
2583
|
+
/**
|
|
2584
|
+
* @public
|
|
2585
|
+
*/
|
|
2586
|
+
export declare interface RetrievedContextAttribution {
|
|
2587
|
+
uri: string;
|
|
2588
|
+
title: string;
|
|
2589
|
+
}
|
|
2590
|
+
|
|
2591
|
+
/**
|
|
2592
|
+
* @license
|
|
2593
|
+
* Copyright 2024 Google LLC
|
|
2594
|
+
*
|
|
2595
|
+
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
2596
|
+
* you may not use this file except in compliance with the License.
|
|
2597
|
+
* You may obtain a copy of the License at
|
|
2598
|
+
*
|
|
2599
|
+
* http://www.apache.org/licenses/LICENSE-2.0
|
|
2600
|
+
*
|
|
2601
|
+
* Unless required by applicable law or agreed to in writing, software
|
|
2602
|
+
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
2603
|
+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
2604
|
+
* See the License for the specific language governing permissions and
|
|
2605
|
+
* limitations under the License.
|
|
2606
|
+
*/
|
|
2607
|
+
/**
|
|
2608
|
+
* Role is the producer of the content.
|
|
2609
|
+
* @public
|
|
2610
|
+
*/
|
|
2611
|
+
export declare type Role = (typeof POSSIBLE_ROLES)[number];
|
|
2612
|
+
|
|
2613
|
+
/**
|
|
2614
|
+
* A safety rating associated with a {@link GenerateContentCandidate}
|
|
2615
|
+
* @public
|
|
2616
|
+
*/
|
|
2617
|
+
export declare interface SafetyRating {
|
|
2618
|
+
category: HarmCategory;
|
|
2619
|
+
probability: HarmProbability;
|
|
2620
|
+
/**
|
|
2621
|
+
* The harm severity level.
|
|
2622
|
+
*
|
|
2623
|
+
* This property is only supported when using the Vertex AI Gemini API ({@link VertexAIBackend}).
|
|
2624
|
+
* When using the Gemini Developer API ({@link GoogleAIBackend}), this property is not supported and will default to `HarmSeverity.UNSUPPORTED`.
|
|
2625
|
+
*/
|
|
2626
|
+
severity: HarmSeverity;
|
|
2627
|
+
/**
|
|
2628
|
+
* The probability score of the harm category.
|
|
2629
|
+
*
|
|
2630
|
+
* This property is only supported when using the Vertex AI Gemini API ({@link VertexAIBackend}).
|
|
2631
|
+
* When using the Gemini Developer API ({@link GoogleAIBackend}), this property is not supported and will default to 0.
|
|
2632
|
+
*/
|
|
2633
|
+
probabilityScore: number;
|
|
2634
|
+
/**
|
|
2635
|
+
* The severity score of the harm category.
|
|
2636
|
+
*
|
|
2637
|
+
* This property is only supported when using the Vertex AI Gemini API ({@link VertexAIBackend}).
|
|
2638
|
+
* When using the Gemini Developer API ({@link GoogleAIBackend}), this property is not supported and will default to 0.
|
|
2639
|
+
*/
|
|
2640
|
+
severityScore: number;
|
|
2641
|
+
blocked: boolean;
|
|
2642
|
+
}
|
|
2643
|
+
|
|
2644
|
+
/**
|
|
2645
|
+
* Safety setting that can be sent as part of request parameters.
|
|
2646
|
+
* @public
|
|
2647
|
+
*/
|
|
2648
|
+
export declare interface SafetySetting {
|
|
2649
|
+
category: HarmCategory;
|
|
2650
|
+
threshold: HarmBlockThreshold;
|
|
2651
|
+
/**
|
|
2652
|
+
* The harm block method.
|
|
2653
|
+
*
|
|
2654
|
+
* This property is only supported in the Vertex AI Gemini API ({@link VertexAIBackend}).
|
|
2655
|
+
* When using the Gemini Developer API ({@link GoogleAIBackend}), an {@link AIError} will be
|
|
2656
|
+
* thrown if this property is defined.
|
|
2657
|
+
*/
|
|
2658
|
+
method?: HarmBlockMethod;
|
|
2659
|
+
}
|
|
2660
|
+
|
|
2661
|
+
/**
|
|
2662
|
+
* Parent class encompassing all Schema types, with static methods that
|
|
2663
|
+
* allow building specific Schema types. This class can be converted with
|
|
2664
|
+
* `JSON.stringify()` into a JSON string accepted by Vertex AI REST endpoints.
|
|
2665
|
+
* (This string conversion is automatically done when calling SDK methods.)
|
|
2666
|
+
* @public
|
|
2667
|
+
*/
|
|
2668
|
+
export declare abstract class Schema implements SchemaInterface {
|
|
2669
|
+
/**
|
|
2670
|
+
* Optional. The type of the property.
|
|
2671
|
+
* This can only be undefined when using `anyOf` schemas, which do not have an
|
|
2672
|
+
* explicit type in the {@link https://swagger.io/docs/specification/v3_0/data-models/data-types/#any-type | OpenAPI specification}.
|
|
2673
|
+
*/
|
|
2674
|
+
type?: SchemaType;
|
|
2675
|
+
/** Optional. The format of the property.
|
|
2676
|
+
* Supported formats:<br/>
|
|
2677
|
+
* <ul>
|
|
2678
|
+
* <li>for NUMBER type: "float", "double"</li>
|
|
2679
|
+
* <li>for INTEGER type: "int32", "int64"</li>
|
|
2680
|
+
* <li>for STRING type: "email", "byte", etc</li>
|
|
2681
|
+
* </ul>
|
|
2682
|
+
*/
|
|
2683
|
+
format?: string;
|
|
2684
|
+
/** Optional. The description of the property. */
|
|
2685
|
+
description?: string;
|
|
2686
|
+
/** Optional. The items of the property. */
|
|
2687
|
+
items?: SchemaInterface;
|
|
2688
|
+
/** The minimum number of items (elements) in a schema of {@link (SchemaType:type)} `array`. */
|
|
2689
|
+
minItems?: number;
|
|
2690
|
+
/** The maximum number of items (elements) in a schema of {@link (SchemaType:type)} `array`. */
|
|
2691
|
+
maxItems?: number;
|
|
2692
|
+
/** Optional. Whether the property is nullable. Defaults to false. */
|
|
2693
|
+
nullable: boolean;
|
|
2694
|
+
/** Optional. The example of the property. */
|
|
2695
|
+
example?: unknown;
|
|
2696
|
+
/**
|
|
2697
|
+
* Allows user to add other schema properties that have not yet
|
|
2698
|
+
* been officially added to the SDK.
|
|
2699
|
+
*/
|
|
2700
|
+
[key: string]: unknown;
|
|
2701
|
+
constructor(schemaParams: SchemaInterface);
|
|
2702
|
+
/* Excluded from this release type: toJSON */
|
|
2703
|
+
static array(arrayParams: SchemaParams & {
|
|
2704
|
+
items: Schema;
|
|
2705
|
+
}): ArraySchema;
|
|
2706
|
+
static object(objectParams: SchemaParams & {
|
|
2707
|
+
properties: {
|
|
2708
|
+
[k: string]: Schema;
|
|
2709
|
+
};
|
|
2710
|
+
optionalProperties?: string[];
|
|
2711
|
+
}): ObjectSchema;
|
|
2712
|
+
static string(stringParams?: SchemaParams): StringSchema;
|
|
2713
|
+
static enumString(stringParams: SchemaParams & {
|
|
2714
|
+
enum: string[];
|
|
2715
|
+
}): StringSchema;
|
|
2716
|
+
static integer(integerParams?: SchemaParams): IntegerSchema;
|
|
2717
|
+
static number(numberParams?: SchemaParams): NumberSchema;
|
|
2718
|
+
static boolean(booleanParams?: SchemaParams): BooleanSchema;
|
|
2719
|
+
static anyOf(anyOfParams: SchemaParams & {
|
|
2720
|
+
anyOf: TypedSchema[];
|
|
2721
|
+
}): AnyOfSchema;
|
|
2722
|
+
}
|
|
2723
|
+
|
|
2724
|
+
/**
|
|
2725
|
+
* Interface for {@link Schema} class.
|
|
2726
|
+
* @public
|
|
2727
|
+
*/
|
|
2728
|
+
export declare interface SchemaInterface extends SchemaShared<SchemaInterface> {
|
|
2729
|
+
/**
|
|
2730
|
+
* The type of the property. this can only be undefined when using `anyof` schemas,
|
|
2731
|
+
* which do not have an explicit type in the {@link https://swagger.io/docs/specification/v3_0/data-models/data-types/#any-type | OpenAPI Specification}.
|
|
2732
|
+
*/
|
|
2733
|
+
type?: SchemaType;
|
|
2734
|
+
}
|
|
2735
|
+
|
|
2736
|
+
/**
|
|
2737
|
+
* Params passed to {@link Schema} static methods to create specific
|
|
2738
|
+
* {@link Schema} classes.
|
|
2739
|
+
* @public
|
|
2740
|
+
*/
|
|
2741
|
+
export declare interface SchemaParams extends SchemaShared<SchemaInterface> {
|
|
2742
|
+
}
|
|
2743
|
+
|
|
2744
|
+
/**
|
|
2745
|
+
* Final format for {@link Schema} params passed to backend requests.
|
|
2746
|
+
* @public
|
|
2747
|
+
*/
|
|
2748
|
+
export declare interface SchemaRequest extends SchemaShared<SchemaRequest> {
|
|
2749
|
+
/**
|
|
2750
|
+
* The type of the property. this can only be undefined when using `anyOf` schemas,
|
|
2751
|
+
* which do not have an explicit type in the {@link https://swagger.io/docs/specification/v3_0/data-models/data-types/#any-type | OpenAPI specification }.
|
|
2752
|
+
*/
|
|
2753
|
+
type?: SchemaType;
|
|
2754
|
+
/** Optional. Array of required property. */
|
|
2755
|
+
required?: string[];
|
|
2756
|
+
}
|
|
2757
|
+
|
|
2758
|
+
/**
|
|
2759
|
+
* Basic {@link Schema} properties shared across several Schema-related
|
|
2760
|
+
* types.
|
|
2761
|
+
* @public
|
|
2762
|
+
*/
|
|
2763
|
+
export declare interface SchemaShared<T> {
|
|
2764
|
+
/**
|
|
2765
|
+
* An array of {@link Schema}. The generated data must be valid against any of the schemas
|
|
2766
|
+
* listed in this array. This allows specifying multiple possible structures or types for a
|
|
2767
|
+
* single field.
|
|
2768
|
+
*/
|
|
2769
|
+
anyOf?: T[];
|
|
2770
|
+
/** Optional. The format of the property.
|
|
2771
|
+
* When using the Gemini Developer API ({@link GoogleAIBackend}), this must be either `'enum'` or
|
|
2772
|
+
* `'date-time'`, otherwise requests will fail.
|
|
2773
|
+
*/
|
|
2774
|
+
format?: string;
|
|
2775
|
+
/** Optional. The description of the property. */
|
|
2776
|
+
description?: string;
|
|
2777
|
+
/**
|
|
2778
|
+
* The title of the property. This helps document the schema's purpose but does not typically
|
|
2779
|
+
* constrain the generated value. It can subtly guide the model by clarifying the intent of a
|
|
2780
|
+
* field.
|
|
2781
|
+
*/
|
|
2782
|
+
title?: string;
|
|
2783
|
+
/** Optional. The items of the property. */
|
|
2784
|
+
items?: T;
|
|
2785
|
+
/** The minimum number of items (elements) in a schema of {@link (SchemaType:type)} `array`. */
|
|
2786
|
+
minItems?: number;
|
|
2787
|
+
/** The maximum number of items (elements) in a schema of {@link (SchemaType:type)} `array`. */
|
|
2788
|
+
maxItems?: number;
|
|
2789
|
+
/** Optional. Map of `Schema` objects. */
|
|
2790
|
+
properties?: {
|
|
2791
|
+
[k: string]: T;
|
|
2792
|
+
};
|
|
2793
|
+
/** A hint suggesting the order in which the keys should appear in the generated JSON string. */
|
|
2794
|
+
propertyOrdering?: string[];
|
|
2795
|
+
/** Optional. The enum of the property. */
|
|
2796
|
+
enum?: string[];
|
|
2797
|
+
/** Optional. The example of the property. */
|
|
2798
|
+
example?: unknown;
|
|
2799
|
+
/** Optional. Whether the property is nullable. */
|
|
2800
|
+
nullable?: boolean;
|
|
2801
|
+
/** The minimum value of a numeric type. */
|
|
2802
|
+
minimum?: number;
|
|
2803
|
+
/** The maximum value of a numeric type. */
|
|
2804
|
+
maximum?: number;
|
|
2805
|
+
[key: string]: unknown;
|
|
2806
|
+
}
|
|
2807
|
+
|
|
2808
|
+
/**
|
|
2809
|
+
* Contains the list of OpenAPI data types
|
|
2810
|
+
* as defined by the
|
|
2811
|
+
* {@link https://swagger.io/docs/specification/data-models/data-types/ | OpenAPI specification}
|
|
2812
|
+
* @public
|
|
2813
|
+
*/
|
|
2814
|
+
export declare const SchemaType: {
|
|
2815
|
+
/** String type. */
|
|
2816
|
+
readonly STRING: "string";
|
|
2817
|
+
/** Number type. */
|
|
2818
|
+
readonly NUMBER: "number";
|
|
2819
|
+
/** Integer type. */
|
|
2820
|
+
readonly INTEGER: "integer";
|
|
2821
|
+
/** Boolean type. */
|
|
2822
|
+
readonly BOOLEAN: "boolean";
|
|
2823
|
+
/** Array type. */
|
|
2824
|
+
readonly ARRAY: "array";
|
|
2825
|
+
/** Object type. */
|
|
2826
|
+
readonly OBJECT: "object";
|
|
2827
|
+
};
|
|
2828
|
+
|
|
2829
|
+
/**
|
|
2830
|
+
* Contains the list of OpenAPI data types
|
|
2831
|
+
* as defined by the
|
|
2832
|
+
* {@link https://swagger.io/docs/specification/data-models/data-types/ | OpenAPI specification}
|
|
2833
|
+
* @public
|
|
2834
|
+
*/
|
|
2835
|
+
export declare type SchemaType = (typeof SchemaType)[keyof typeof SchemaType];
|
|
2836
|
+
|
|
2837
|
+
/**
|
|
2838
|
+
* Google search entry point.
|
|
2839
|
+
*
|
|
2840
|
+
* @public
|
|
2841
|
+
*/
|
|
2842
|
+
export declare interface SearchEntrypoint {
|
|
2843
|
+
/**
|
|
2844
|
+
* HTML/CSS snippet that must be embedded in a web page. The snippet is designed to avoid
|
|
2845
|
+
* undesired interaction with the rest of the page's CSS.
|
|
2846
|
+
*
|
|
2847
|
+
* To ensure proper rendering and prevent CSS conflicts, it is recommended
|
|
2848
|
+
* to encapsulate this `renderedContent` within a shadow DOM when embedding it
|
|
2849
|
+
* into a webpage. See {@link https://developer.mozilla.org/en-US/docs/Web/API/Web_components/Using_shadow_DOM | MDN: Using shadow DOM}.
|
|
2850
|
+
*
|
|
2851
|
+
* @example
|
|
2852
|
+
* ```javascript
|
|
2853
|
+
* const container = document.createElement('div');
|
|
2854
|
+
* document.body.appendChild(container);
|
|
2855
|
+
* container.attachShadow({ mode: 'open' }).innerHTML = renderedContent;
|
|
2856
|
+
* ```
|
|
2857
|
+
*/
|
|
2858
|
+
renderedContent?: string;
|
|
2859
|
+
}
|
|
2860
|
+
|
|
2861
|
+
/**
|
|
2862
|
+
* Represents a specific segment within a {@link Content} object, often used to
|
|
2863
|
+
* pinpoint the exact location of text or data that grounding information refers to.
|
|
2864
|
+
*
|
|
2865
|
+
* @public
|
|
2866
|
+
*/
|
|
2867
|
+
export declare interface Segment {
|
|
2868
|
+
/**
|
|
2869
|
+
* The zero-based index of the {@link Part} object within the `parts` array
|
|
2870
|
+
* of its parent {@link Content} object. This identifies which part of the
|
|
2871
|
+
* content the segment belongs to.
|
|
2872
|
+
*/
|
|
2873
|
+
partIndex: number;
|
|
2874
|
+
/**
|
|
2875
|
+
* The zero-based start index of the segment within the specified `Part`,
|
|
2876
|
+
* measured in UTF-8 bytes. This offset is inclusive, starting from 0 at the
|
|
2877
|
+
* beginning of the part's content (e.g., `Part.text`).
|
|
2878
|
+
*/
|
|
2879
|
+
startIndex: number;
|
|
2880
|
+
/**
|
|
2881
|
+
* The zero-based end index of the segment within the specified `Part`,
|
|
2882
|
+
* measured in UTF-8 bytes. This offset is exclusive, meaning the character
|
|
2883
|
+
* at this index is not included in the segment.
|
|
2884
|
+
*/
|
|
2885
|
+
endIndex: number;
|
|
2886
|
+
/**
|
|
2887
|
+
* The text corresponding to the segment from the response.
|
|
2888
|
+
*/
|
|
2889
|
+
text: string;
|
|
2890
|
+
}
|
|
2891
|
+
|
|
2892
|
+
/**
|
|
2893
|
+
* Options that can be provided per-request.
|
|
2894
|
+
* Extends the base {@link RequestOptions} (like `timeout` and `baseUrl`)
|
|
2895
|
+
* with request-specific controls like cancellation via `AbortSignal`.
|
|
2896
|
+
*
|
|
2897
|
+
* Options specified here will override any default {@link RequestOptions}
|
|
2898
|
+
* configured on a model (for example, {@link GenerativeModel}).
|
|
2899
|
+
*
|
|
2900
|
+
* @public
|
|
2901
|
+
*/
|
|
2902
|
+
export declare interface SingleRequestOptions extends RequestOptions {
|
|
2903
|
+
/**
|
|
2904
|
+
* An `AbortSignal` instance that allows cancelling ongoing requests (like `generateContent` or
|
|
2905
|
+
* `generateImages`).
|
|
2906
|
+
*
|
|
2907
|
+
* If provided, calling `abort()` on the corresponding `AbortController`
|
|
2908
|
+
* will attempt to cancel the underlying HTTP request. An `AbortError` will be thrown
|
|
2909
|
+
* if cancellation is successful.
|
|
2910
|
+
*
|
|
2911
|
+
* Note that this will not cancel the request in the backend, so any applicable billing charges
|
|
2912
|
+
* will still be applied despite cancellation.
|
|
2913
|
+
*
|
|
2914
|
+
* @example
|
|
2915
|
+
* ```javascript
|
|
2916
|
+
* const controller = new AbortController();
|
|
2917
|
+
* const model = getGenerativeModel({
|
|
2918
|
+
* // ...
|
|
2919
|
+
* });
|
|
2920
|
+
* model.generateContent(
|
|
2921
|
+
* "Write a story about a magic backpack.",
|
|
2922
|
+
* { signal: controller.signal }
|
|
2923
|
+
* );
|
|
2924
|
+
*
|
|
2925
|
+
* // To cancel request:
|
|
2926
|
+
* controller.abort();
|
|
2927
|
+
* ```
|
|
2928
|
+
* @see https://developer.mozilla.org/en-US/docs/Web/API/AbortSignal
|
|
2929
|
+
*/
|
|
2930
|
+
signal?: AbortSignal;
|
|
2931
|
+
}
|
|
2932
|
+
|
|
2933
|
+
/**
|
|
2934
|
+
* Configures speech synthesis.
|
|
2935
|
+
*
|
|
2936
|
+
* @beta
|
|
2937
|
+
*/
|
|
2938
|
+
export declare interface SpeechConfig {
|
|
2939
|
+
/**
|
|
2940
|
+
* Configures the voice to be used in speech synthesis.
|
|
2941
|
+
*/
|
|
2942
|
+
voiceConfig?: VoiceConfig;
|
|
2943
|
+
}
|
|
2944
|
+
|
|
2945
|
+
/**
|
|
2946
|
+
* Starts a real-time, bidirectional audio conversation with the model. This helper function manages
|
|
2947
|
+
* the complexities of microphone access, audio recording, playback, and interruptions.
|
|
2948
|
+
*
|
|
2949
|
+
* @remarks Important: This function must be called in response to a user gesture
|
|
2950
|
+
* (for example, a button click) to comply with {@link https://developer.mozilla.org/en-US/docs/Web/API/Web_Audio_API/Best_practices#autoplay_policy | browser autoplay policies}.
|
|
2951
|
+
*
|
|
2952
|
+
* @example
|
|
2953
|
+
* ```javascript
|
|
2954
|
+
* const liveSession = await model.connect();
|
|
2955
|
+
* let conversationController;
|
|
2956
|
+
*
|
|
2957
|
+
* // This function must be called from within a click handler.
|
|
2958
|
+
* async function startConversation() {
|
|
2959
|
+
* try {
|
|
2960
|
+
* conversationController = await startAudioConversation(liveSession);
|
|
2961
|
+
* } catch (e) {
|
|
2962
|
+
* // Handle AI-specific errors
|
|
2963
|
+
* if (e instanceof AIError) {
|
|
2964
|
+
* console.error("AI Error:", e.message);
|
|
2965
|
+
* }
|
|
2966
|
+
* // Handle microphone permission and hardware errors
|
|
2967
|
+
* else if (e instanceof DOMException) {
|
|
2968
|
+
* console.error("Microphone Error:", e.message);
|
|
2969
|
+
* }
|
|
2970
|
+
* // Handle other unexpected errors
|
|
2971
|
+
* else {
|
|
2972
|
+
* console.error("An unexpected error occurred:", e);
|
|
2973
|
+
* }
|
|
2974
|
+
* }
|
|
2975
|
+
* }
|
|
2976
|
+
*
|
|
2977
|
+
* // Later, to stop the conversation:
|
|
2978
|
+
* // if (conversationController) {
|
|
2979
|
+
* // await conversationController.stop();
|
|
2980
|
+
* // }
|
|
2981
|
+
* ```
|
|
2982
|
+
*
|
|
2983
|
+
* @param liveSession - An active {@link LiveSession} instance.
|
|
2984
|
+
* @param options - Configuration options for the audio conversation.
|
|
2985
|
+
* @returns A `Promise` that resolves with an {@link AudioConversationController}.
|
|
2986
|
+
* @throws `AIError` if the environment does not support required Web APIs (`UNSUPPORTED`), if a conversation is already active (`REQUEST_ERROR`), the session is closed (`SESSION_CLOSED`), or if an unexpected initialization error occurs (`ERROR`).
|
|
2987
|
+
* @throws `DOMException` Thrown by `navigator.mediaDevices.getUserMedia()` if issues occur with microphone access, such as permissions being denied (`NotAllowedError`) or no compatible hardware being found (`NotFoundError`). See the {@link https://developer.mozilla.org/en-US/docs/Web/API/MediaDevices/getUserMedia#exceptions | MDN documentation} for a full list of exceptions.
|
|
2988
|
+
*
|
|
2989
|
+
* @beta
|
|
2990
|
+
*/
|
|
2991
|
+
export declare function startAudioConversation(liveSession: LiveSession, options?: StartAudioConversationOptions): Promise<AudioConversationController>;
|
|
2992
|
+
|
|
2993
|
+
/**
|
|
2994
|
+
* Options for {@link startAudioConversation}.
|
|
2995
|
+
*
|
|
2996
|
+
* @beta
|
|
2997
|
+
*/
|
|
2998
|
+
export declare interface StartAudioConversationOptions {
|
|
2999
|
+
/**
|
|
3000
|
+
* An async handler that is called when the model requests a function to be executed.
|
|
3001
|
+
* The handler should perform the function call and return the result as a `Part`,
|
|
3002
|
+
* which will then be sent back to the model.
|
|
3003
|
+
*/
|
|
3004
|
+
functionCallingHandler?: (functionCalls: FunctionCall[]) => Promise<FunctionResponse>;
|
|
3005
|
+
}
|
|
3006
|
+
|
|
3007
|
+
/**
|
|
3008
|
+
* Params for {@link GenerativeModel.startChat}.
|
|
3009
|
+
* @public
|
|
3010
|
+
*/
|
|
3011
|
+
export declare interface StartChatParams extends BaseParams {
|
|
3012
|
+
history?: Content[];
|
|
3013
|
+
tools?: Tool[];
|
|
3014
|
+
toolConfig?: ToolConfig;
|
|
3015
|
+
systemInstruction?: string | Part | Content;
|
|
3016
|
+
}
|
|
3017
|
+
|
|
3018
|
+
/**
|
|
3019
|
+
* Schema class for "string" types. Can be used with or without
|
|
3020
|
+
* enum values.
|
|
3021
|
+
* @public
|
|
3022
|
+
*/
|
|
3023
|
+
export declare class StringSchema extends Schema {
|
|
3024
|
+
enum?: string[];
|
|
3025
|
+
constructor(schemaParams?: SchemaParams, enumValues?: string[]);
|
|
3026
|
+
/* Excluded from this release type: toJSON */
|
|
3027
|
+
}
|
|
3028
|
+
|
|
3029
|
+
/**
|
|
3030
|
+
* {@link GenerativeModel} APIs that execute on a server-side template.
|
|
3031
|
+
*
|
|
3032
|
+
* This class should only be instantiated with {@link getTemplateGenerativeModel}.
|
|
3033
|
+
*
|
|
3034
|
+
* @beta
|
|
3035
|
+
*/
|
|
3036
|
+
export declare class TemplateGenerativeModel {
|
|
3037
|
+
/* Excluded from this release type: _apiSettings */
|
|
3038
|
+
/**
|
|
3039
|
+
* Additional options to use when making requests.
|
|
3040
|
+
*/
|
|
3041
|
+
requestOptions?: RequestOptions;
|
|
3042
|
+
/**
|
|
3043
|
+
* @hideconstructor
|
|
3044
|
+
*/
|
|
3045
|
+
constructor(ai: AI, requestOptions?: RequestOptions);
|
|
3046
|
+
/**
|
|
3047
|
+
* Makes a single non-streaming call to the model and returns an object
|
|
3048
|
+
* containing a single {@link GenerateContentResponse}.
|
|
3049
|
+
*
|
|
3050
|
+
* @param templateId - The ID of the server-side template to execute.
|
|
3051
|
+
* @param templateVariables - A key-value map of variables to populate the
|
|
3052
|
+
* template with.
|
|
3053
|
+
*
|
|
3054
|
+
* @beta
|
|
3055
|
+
*/
|
|
3056
|
+
generateContent(templateId: string, templateVariables: object, singleRequestOptions?: SingleRequestOptions): Promise<GenerateContentResult>;
|
|
3057
|
+
/**
|
|
3058
|
+
* Makes a single streaming call to the model and returns an object
|
|
3059
|
+
* containing an iterable stream that iterates over all chunks in the
|
|
3060
|
+
* streaming response as well as a promise that returns the final aggregated
|
|
3061
|
+
* response.
|
|
3062
|
+
*
|
|
3063
|
+
* @param templateId - The ID of the server-side template to execute.
|
|
3064
|
+
* @param templateVariables - A key-value map of variables to populate the
|
|
3065
|
+
* template with.
|
|
3066
|
+
*
|
|
3067
|
+
* @beta
|
|
3068
|
+
*/
|
|
3069
|
+
generateContentStream(templateId: string, templateVariables: object, singleRequestOptions?: SingleRequestOptions): Promise<GenerateContentStreamResult>;
|
|
3070
|
+
}
|
|
3071
|
+
|
|
3072
|
+
/**
|
|
3073
|
+
* Class for Imagen model APIs that execute on a server-side template.
|
|
3074
|
+
*
|
|
3075
|
+
* This class should only be instantiated with {@link getTemplateImagenModel}.
|
|
3076
|
+
*
|
|
3077
|
+
* @beta
|
|
3078
|
+
*/
|
|
3079
|
+
export declare class TemplateImagenModel {
|
|
3080
|
+
/* Excluded from this release type: _apiSettings */
|
|
3081
|
+
/**
|
|
3082
|
+
* Additional options to use when making requests.
|
|
3083
|
+
*/
|
|
3084
|
+
requestOptions?: RequestOptions;
|
|
3085
|
+
/**
|
|
3086
|
+
* @hideconstructor
|
|
3087
|
+
*/
|
|
3088
|
+
constructor(ai: AI, requestOptions?: RequestOptions);
|
|
3089
|
+
/**
|
|
3090
|
+
* Makes a single call to the model and returns an object containing a single
|
|
3091
|
+
* {@link ImagenGenerationResponse}.
|
|
3092
|
+
*
|
|
3093
|
+
* @param templateId - The ID of the server-side template to execute.
|
|
3094
|
+
* @param templateVariables - A key-value map of variables to populate the
|
|
3095
|
+
* template with.
|
|
3096
|
+
*
|
|
3097
|
+
* @beta
|
|
3098
|
+
*/
|
|
3099
|
+
generateImages(templateId: string, templateVariables: object, singleRequestOptions?: SingleRequestOptions): Promise<ImagenGenerationResponse<ImagenInlineImage>>;
|
|
3100
|
+
}
|
|
3101
|
+
|
|
3102
|
+
/**
|
|
3103
|
+
* Content part interface if the part represents a text string.
|
|
3104
|
+
* @public
|
|
3105
|
+
*/
|
|
3106
|
+
export declare interface TextPart {
|
|
3107
|
+
text: string;
|
|
3108
|
+
inlineData?: never;
|
|
3109
|
+
functionCall?: never;
|
|
3110
|
+
functionResponse?: never;
|
|
3111
|
+
thought?: boolean;
|
|
3112
|
+
/* Excluded from this release type: thoughtSignature */
|
|
3113
|
+
executableCode?: never;
|
|
3114
|
+
codeExecutionResult?: never;
|
|
3115
|
+
}
|
|
3116
|
+
|
|
3117
|
+
/**
|
|
3118
|
+
* Configuration for "thinking" behavior of compatible Gemini models.
|
|
3119
|
+
*
|
|
3120
|
+
* Certain models utilize a thinking process before generating a response. This allows them to
|
|
3121
|
+
* reason through complex problems and plan a more coherent and accurate answer.
|
|
3122
|
+
*
|
|
3123
|
+
* @public
|
|
3124
|
+
*/
|
|
3125
|
+
export declare interface ThinkingConfig {
|
|
3126
|
+
/**
|
|
3127
|
+
* The thinking budget, in tokens.
|
|
3128
|
+
*
|
|
3129
|
+
* @remarks
|
|
3130
|
+
* This parameter sets an upper limit on the number of tokens the model can use for its internal
|
|
3131
|
+
* "thinking" process. A higher budget may result in higher quality responses for complex tasks
|
|
3132
|
+
* but can also increase latency and cost.
|
|
3133
|
+
*
|
|
3134
|
+
* The range of supported thinking budget values depends on the model.
|
|
3135
|
+
*
|
|
3136
|
+
* <ul>
|
|
3137
|
+
* <li>To use the default thinking budget for a model, leave
|
|
3138
|
+
* this value undefined.</li>
|
|
3139
|
+
*
|
|
3140
|
+
* <li>To disable thinking, when supported by the model, set this value
|
|
3141
|
+
* to `0`.</li>
|
|
3142
|
+
*
|
|
3143
|
+
* <li>To use dynamic thinking, which allows the model to decide on the thinking
|
|
3144
|
+
* budget based on the task, set this value to `-1`.</li>
|
|
3145
|
+
* </ul>
|
|
3146
|
+
*
|
|
3147
|
+
* An error will be thrown if you set a thinking budget for a model that does not support this
|
|
3148
|
+
* feature or if the specified budget is not within the model's supported range.
|
|
3149
|
+
*
|
|
3150
|
+
* The model will also error if `thinkingLevel` and `thinkingBudget` are
|
|
3151
|
+
* both set.
|
|
3152
|
+
*/
|
|
3153
|
+
thinkingBudget?: number;
|
|
3154
|
+
/**
|
|
3155
|
+
* If not specified, Gemini will use the model's default dynamic thinking level.
|
|
3156
|
+
*
|
|
3157
|
+
* @remarks
|
|
3158
|
+
* Note: The model will error if `thinkingLevel` and `thinkingBudget` are
|
|
3159
|
+
* both set.
|
|
3160
|
+
*
|
|
3161
|
+
* Important: Gemini 2.5 series models do not support thinking levels; use
|
|
3162
|
+
* `thinkingBudget` to set a thinking budget instead.
|
|
3163
|
+
*/
|
|
3164
|
+
thinkingLevel?: ThinkingLevel;
|
|
3165
|
+
/**
|
|
3166
|
+
* Whether to include "thought summaries" in the model's response.
|
|
3167
|
+
*
|
|
3168
|
+
* @remarks
|
|
3169
|
+
* Thought summaries provide a brief overview of the model's internal thinking process,
|
|
3170
|
+
* offering insight into how it arrived at the final answer. This can be useful for
|
|
3171
|
+
* debugging, understanding the model's reasoning, and verifying its accuracy.
|
|
3172
|
+
*/
|
|
3173
|
+
includeThoughts?: boolean;
|
|
3174
|
+
}
|
|
3175
|
+
|
|
3176
|
+
/**
|
|
3177
|
+
* A preset that controls the model's "thinking" process. Use
|
|
3178
|
+
* `ThinkingLevel.LOW` for faster responses on less complex tasks, and
|
|
3179
|
+
* `ThinkingLevel.HIGH` for better reasoning on more complex tasks.
|
|
3180
|
+
*
|
|
3181
|
+
* @public
|
|
3182
|
+
*/
|
|
3183
|
+
export declare const ThinkingLevel: {
|
|
3184
|
+
MINIMAL: string;
|
|
3185
|
+
LOW: string;
|
|
3186
|
+
MEDIUM: string;
|
|
3187
|
+
HIGH: string;
|
|
3188
|
+
};
|
|
3189
|
+
|
|
3190
|
+
/**
|
|
3191
|
+
* A preset that controls the model's "thinking" process. Use
|
|
3192
|
+
* `ThinkingLevel.LOW` for faster responses on less complex tasks, and
|
|
3193
|
+
* `ThinkingLevel.HIGH` for better reasoning on more complex tasks.
|
|
3194
|
+
*
|
|
3195
|
+
* @public
|
|
3196
|
+
*/
|
|
3197
|
+
export declare type ThinkingLevel = (typeof ThinkingLevel)[keyof typeof ThinkingLevel];
|
|
3198
|
+
|
|
3199
|
+
/**
|
|
3200
|
+
* Defines a tool that model can call to access external knowledge.
|
|
3201
|
+
* @public
|
|
3202
|
+
*/
|
|
3203
|
+
export declare type Tool = FunctionDeclarationsTool | GoogleSearchTool | CodeExecutionTool | URLContextTool;
|
|
3204
|
+
|
|
3205
|
+
/**
|
|
3206
|
+
* Tool config. This config is shared for all tools provided in the request.
|
|
3207
|
+
* @public
|
|
3208
|
+
*/
|
|
3209
|
+
export declare interface ToolConfig {
|
|
3210
|
+
functionCallingConfig?: FunctionCallingConfig;
|
|
3211
|
+
}
|
|
3212
|
+
|
|
3213
|
+
/**
|
|
3214
|
+
* Transcription of audio. This can be returned from a {@link LiveGenerativeModel} if transcription
|
|
3215
|
+
* is enabled with the `inputAudioTranscription` or `outputAudioTranscription` properties on
|
|
3216
|
+
* the {@link LiveGenerationConfig}.
|
|
3217
|
+
*
|
|
3218
|
+
* @beta
|
|
3219
|
+
*/
|
|
3220
|
+
export declare interface Transcription {
|
|
3221
|
+
/**
|
|
3222
|
+
* The text transcription of the audio.
|
|
3223
|
+
*/
|
|
3224
|
+
text?: string;
|
|
3225
|
+
}
|
|
3226
|
+
|
|
3227
|
+
/**
|
|
3228
|
+
* A type that includes all specific Schema types.
|
|
3229
|
+
* @public
|
|
3230
|
+
*/
|
|
3231
|
+
export declare type TypedSchema = IntegerSchema | NumberSchema | StringSchema | BooleanSchema | ObjectSchema | ArraySchema | AnyOfSchema;
|
|
3232
|
+
|
|
3233
|
+
/**
|
|
3234
|
+
* Specifies the URL Context configuration.
|
|
3235
|
+
*
|
|
3236
|
+
* @beta
|
|
3237
|
+
*/
|
|
3238
|
+
export declare interface URLContext {
|
|
3239
|
+
}
|
|
3240
|
+
|
|
3241
|
+
/**
|
|
3242
|
+
* Metadata related to {@link URLContextTool}.
|
|
3243
|
+
*
|
|
3244
|
+
* @public
|
|
3245
|
+
*/
|
|
3246
|
+
export declare interface URLContextMetadata {
|
|
3247
|
+
/**
|
|
3248
|
+
* List of URL metadata used to provide context to the Gemini model.
|
|
3249
|
+
*/
|
|
3250
|
+
urlMetadata: URLMetadata[];
|
|
3251
|
+
}
|
|
3252
|
+
|
|
3253
|
+
/**
|
|
3254
|
+
* A tool that allows you to provide additional context to the models in the form of public web
|
|
3255
|
+
* URLs. By including URLs in your request, the Gemini model will access the content from those
|
|
3256
|
+
* pages to inform and enhance its response.
|
|
3257
|
+
*
|
|
3258
|
+
* @beta
|
|
3259
|
+
*/
|
|
3260
|
+
export declare interface URLContextTool {
|
|
3261
|
+
/**
|
|
3262
|
+
* Specifies the URL Context configuration.
|
|
3263
|
+
*/
|
|
3264
|
+
urlContext: URLContext;
|
|
3265
|
+
}
|
|
3266
|
+
|
|
3267
|
+
/**
|
|
3268
|
+
* Metadata for a single URL retrieved by the {@link URLContextTool} tool.
|
|
3269
|
+
*
|
|
3270
|
+
* @public
|
|
3271
|
+
*/
|
|
3272
|
+
export declare interface URLMetadata {
|
|
3273
|
+
/**
|
|
3274
|
+
* The retrieved URL.
|
|
3275
|
+
*/
|
|
3276
|
+
retrievedUrl?: string;
|
|
3277
|
+
/**
|
|
3278
|
+
* The status of the URL retrieval.
|
|
3279
|
+
*/
|
|
3280
|
+
urlRetrievalStatus?: URLRetrievalStatus;
|
|
3281
|
+
}
|
|
3282
|
+
|
|
3283
|
+
/**
|
|
3284
|
+
* The status of a URL retrieval.
|
|
3285
|
+
*
|
|
3286
|
+
* @remarks
|
|
3287
|
+
* <b>URL_RETRIEVAL_STATUS_UNSPECIFIED:</b> Unspecified retrieval status.
|
|
3288
|
+
* <br/>
|
|
3289
|
+
* <b>URL_RETRIEVAL_STATUS_SUCCESS:</b> The URL retrieval was successful.
|
|
3290
|
+
* <br/>
|
|
3291
|
+
* <b>URL_RETRIEVAL_STATUS_ERROR:</b> The URL retrieval failed.
|
|
3292
|
+
* <br/>
|
|
3293
|
+
* <b>URL_RETRIEVAL_STATUS_PAYWALL:</b> The URL retrieval failed because the content is behind a paywall.
|
|
3294
|
+
* <br/>
|
|
3295
|
+
* <b>URL_RETRIEVAL_STATUS_UNSAFE:</b> The URL retrieval failed because the content is unsafe.
|
|
3296
|
+
* <br/>
|
|
3297
|
+
*
|
|
3298
|
+
* @public
|
|
3299
|
+
*/
|
|
3300
|
+
export declare const URLRetrievalStatus: {
|
|
3301
|
+
/**
|
|
3302
|
+
* Unspecified retrieval status.
|
|
3303
|
+
*/
|
|
3304
|
+
URL_RETRIEVAL_STATUS_UNSPECIFIED: string;
|
|
3305
|
+
/**
|
|
3306
|
+
* The URL retrieval was successful.
|
|
3307
|
+
*/
|
|
3308
|
+
URL_RETRIEVAL_STATUS_SUCCESS: string;
|
|
3309
|
+
/**
|
|
3310
|
+
* The URL retrieval failed.
|
|
3311
|
+
*/
|
|
3312
|
+
URL_RETRIEVAL_STATUS_ERROR: string;
|
|
3313
|
+
/**
|
|
3314
|
+
* The URL retrieval failed because the content is behind a paywall.
|
|
3315
|
+
*/
|
|
3316
|
+
URL_RETRIEVAL_STATUS_PAYWALL: string;
|
|
3317
|
+
/**
|
|
3318
|
+
* The URL retrieval failed because the content is unsafe.
|
|
3319
|
+
*/
|
|
3320
|
+
URL_RETRIEVAL_STATUS_UNSAFE: string;
|
|
3321
|
+
};
|
|
3322
|
+
|
|
3323
|
+
/**
|
|
3324
|
+
* The status of a URL retrieval.
|
|
3325
|
+
*
|
|
3326
|
+
* @remarks
|
|
3327
|
+
* <b>URL_RETRIEVAL_STATUS_UNSPECIFIED:</b> Unspecified retrieval status.
|
|
3328
|
+
* <br/>
|
|
3329
|
+
* <b>URL_RETRIEVAL_STATUS_SUCCESS:</b> The URL retrieval was successful.
|
|
3330
|
+
* <br/>
|
|
3331
|
+
* <b>URL_RETRIEVAL_STATUS_ERROR:</b> The URL retrieval failed.
|
|
3332
|
+
* <br/>
|
|
3333
|
+
* <b>URL_RETRIEVAL_STATUS_PAYWALL:</b> The URL retrieval failed because the content is behind a paywall.
|
|
3334
|
+
* <br/>
|
|
3335
|
+
* <b>URL_RETRIEVAL_STATUS_UNSAFE:</b> The URL retrieval failed because the content is unsafe.
|
|
3336
|
+
* <br/>
|
|
3337
|
+
*
|
|
3338
|
+
* @public
|
|
3339
|
+
*/
|
|
3340
|
+
export declare type URLRetrievalStatus = (typeof URLRetrievalStatus)[keyof typeof URLRetrievalStatus];
|
|
3341
|
+
|
|
3342
|
+
/**
|
|
3343
|
+
* Usage metadata about a {@link GenerateContentResponse}.
|
|
3344
|
+
*
|
|
3345
|
+
* @public
|
|
3346
|
+
*/
|
|
3347
|
+
export declare interface UsageMetadata {
|
|
3348
|
+
promptTokenCount: number;
|
|
3349
|
+
candidatesTokenCount: number;
|
|
3350
|
+
/**
|
|
3351
|
+
* The number of tokens used by the model's internal "thinking" process.
|
|
3352
|
+
*/
|
|
3353
|
+
thoughtsTokenCount?: number;
|
|
3354
|
+
totalTokenCount: number;
|
|
3355
|
+
/**
|
|
3356
|
+
* The number of tokens used by tools.
|
|
3357
|
+
*/
|
|
3358
|
+
toolUsePromptTokenCount?: number;
|
|
3359
|
+
promptTokensDetails?: ModalityTokenCount[];
|
|
3360
|
+
candidatesTokensDetails?: ModalityTokenCount[];
|
|
3361
|
+
/**
|
|
3362
|
+
* A list of tokens used by tools, broken down by modality.
|
|
3363
|
+
*/
|
|
3364
|
+
toolUsePromptTokensDetails?: ModalityTokenCount[];
|
|
3365
|
+
/**
|
|
3366
|
+
* The number of tokens in the prompt that were served from the cache.
|
|
3367
|
+
* If implicit caching is not active or no content was cached,
|
|
3368
|
+
* this will be 0.
|
|
3369
|
+
*/
|
|
3370
|
+
cachedContentTokenCount?: number;
|
|
3371
|
+
/**
|
|
3372
|
+
* Detailed breakdown of the cached tokens by modality (for example, text or
|
|
3373
|
+
* image). This list provides granular insight into which parts of
|
|
3374
|
+
* the content were cached.
|
|
3375
|
+
*/
|
|
3376
|
+
cacheTokensDetails?: ModalityTokenCount[];
|
|
3377
|
+
}
|
|
3378
|
+
|
|
3379
|
+
/**
|
|
3380
|
+
* Configuration class for the Vertex AI Gemini API.
|
|
3381
|
+
*
|
|
3382
|
+
* Use this with {@link AIOptions} when initializing the AI service via
|
|
3383
|
+
* {@link getAI | getAI()} to specify the Vertex AI Gemini API as the backend.
|
|
3384
|
+
*
|
|
3385
|
+
* @public
|
|
3386
|
+
*/
|
|
3387
|
+
export declare class VertexAIBackend extends Backend {
|
|
3388
|
+
/**
|
|
3389
|
+
* The region identifier.
|
|
3390
|
+
* See {@link https://firebase.google.com/docs/vertex-ai/locations#available-locations | Vertex AI locations}
|
|
3391
|
+
* for a list of supported locations.
|
|
3392
|
+
*/
|
|
3393
|
+
readonly location: string;
|
|
3394
|
+
/**
|
|
3395
|
+
* Creates a configuration object for the Vertex AI backend.
|
|
3396
|
+
*
|
|
3397
|
+
* @param location - The region identifier, defaulting to `us-central1`;
|
|
3398
|
+
* see {@link https://firebase.google.com/docs/vertex-ai/locations#available-locations | Vertex AI locations}
|
|
3399
|
+
* for a list of supported locations.
|
|
3400
|
+
*/
|
|
3401
|
+
constructor(location?: string);
|
|
3402
|
+
/* Excluded from this release type: _getModelPath */
|
|
3403
|
+
/* Excluded from this release type: _getTemplatePath */
|
|
3404
|
+
}
|
|
3405
|
+
|
|
3406
|
+
/**
|
|
3407
|
+
* Describes the input video content.
|
|
3408
|
+
* @public
|
|
3409
|
+
*/
|
|
3410
|
+
export declare interface VideoMetadata {
|
|
3411
|
+
/**
|
|
3412
|
+
* The start offset of the video in
|
|
3413
|
+
* protobuf {@link https://cloud.google.com/ruby/docs/reference/google-cloud-workflows-v1/latest/Google-Protobuf-Duration#json-mapping | Duration} format.
|
|
3414
|
+
*/
|
|
3415
|
+
startOffset: string;
|
|
3416
|
+
/**
|
|
3417
|
+
* The end offset of the video in
|
|
3418
|
+
* protobuf {@link https://cloud.google.com/ruby/docs/reference/google-cloud-workflows-v1/latest/Google-Protobuf-Duration#json-mapping | Duration} format.
|
|
3419
|
+
*/
|
|
3420
|
+
endOffset: string;
|
|
3421
|
+
}
|
|
3422
|
+
|
|
3423
|
+
/**
|
|
3424
|
+
* Configuration for the voice to used in speech synthesis.
|
|
3425
|
+
*
|
|
3426
|
+
* @beta
|
|
3427
|
+
*/
|
|
3428
|
+
export declare interface VoiceConfig {
|
|
3429
|
+
/**
|
|
3430
|
+
* Configures the voice using a pre-built voice configuration.
|
|
3431
|
+
*/
|
|
3432
|
+
prebuiltVoiceConfig?: PrebuiltVoiceConfig;
|
|
3433
|
+
}
|
|
3434
|
+
|
|
3435
|
+
/**
|
|
3436
|
+
* @public
|
|
3437
|
+
*/
|
|
3438
|
+
export declare interface WebAttribution {
|
|
3439
|
+
uri: string;
|
|
3440
|
+
title: string;
|
|
3441
|
+
}
|
|
3442
|
+
|
|
3443
|
+
/**
|
|
3444
|
+
* A grounding chunk from the web.
|
|
3445
|
+
*
|
|
3446
|
+
* Important: If using Grounding with Google Search, you are required to comply with the
|
|
3447
|
+
* {@link https://cloud.google.com/terms/service-terms | Service Specific Terms} for "Grounding with Google Search".
|
|
3448
|
+
*
|
|
3449
|
+
* @public
|
|
3450
|
+
*/
|
|
3451
|
+
export declare interface WebGroundingChunk {
|
|
3452
|
+
/**
|
|
3453
|
+
* The URI of the retrieved web page.
|
|
3454
|
+
*/
|
|
3455
|
+
uri?: string;
|
|
3456
|
+
/**
|
|
3457
|
+
* The title of the retrieved web page.
|
|
3458
|
+
*/
|
|
3459
|
+
title?: string;
|
|
3460
|
+
/**
|
|
3461
|
+
* The domain of the original URI from which the content was retrieved.
|
|
3462
|
+
*
|
|
3463
|
+
* This property is only supported in the Vertex AI Gemini API ({@link VertexAIBackend}).
|
|
3464
|
+
* When using the Gemini Developer API ({@link GoogleAIBackend}), this property will be
|
|
3465
|
+
* `undefined`.
|
|
3466
|
+
*/
|
|
3467
|
+
domain?: string;
|
|
3468
|
+
}
|
|
3469
|
+
|
|
3470
|
+
/* Excluded from this release type: WebSocketHandler */
|
|
3471
|
+
|
|
3472
|
+
export { }
|