@depup/firebase__ai 2.9.0-depup.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (119) hide show
  1. package/README.md +31 -0
  2. package/changes.json +10 -0
  3. package/dist/ai-public.d.ts +3472 -0
  4. package/dist/ai.d.ts +3712 -0
  5. package/dist/esm/index.esm.js +4765 -0
  6. package/dist/esm/index.esm.js.map +1 -0
  7. package/dist/esm/package.json +1 -0
  8. package/dist/esm/src/api.d.ts +121 -0
  9. package/dist/esm/src/backend.d.ts +98 -0
  10. package/dist/esm/src/constants.d.ts +29 -0
  11. package/dist/esm/src/errors.d.ts +35 -0
  12. package/dist/esm/src/factory-browser.d.ts +19 -0
  13. package/dist/esm/src/factory-node.d.ts +19 -0
  14. package/dist/esm/src/googleai-mappers.d.ts +73 -0
  15. package/dist/esm/src/helpers.d.ts +30 -0
  16. package/dist/esm/src/index.d.ts +13 -0
  17. package/dist/esm/src/index.node.d.ts +7 -0
  18. package/dist/esm/src/logger.d.ts +18 -0
  19. package/dist/esm/src/methods/chat-session-helpers.d.ts +18 -0
  20. package/dist/esm/src/methods/chat-session.d.ts +77 -0
  21. package/dist/esm/src/methods/chrome-adapter.d.ts +124 -0
  22. package/dist/esm/src/methods/count-tokens.d.ts +21 -0
  23. package/dist/esm/src/methods/generate-content.d.ts +25 -0
  24. package/dist/esm/src/methods/live-session-helpers.d.ts +154 -0
  25. package/dist/esm/src/methods/live-session.d.ts +154 -0
  26. package/dist/esm/src/models/ai-model.d.ts +72 -0
  27. package/dist/esm/src/models/generative-model.d.ts +56 -0
  28. package/dist/esm/src/models/imagen-model.d.ts +102 -0
  29. package/dist/esm/src/models/index.d.ts +20 -0
  30. package/dist/esm/src/models/live-generative-model.d.ts +55 -0
  31. package/dist/esm/src/models/template-generative-model.d.ts +64 -0
  32. package/dist/esm/src/models/template-imagen-model.d.ts +51 -0
  33. package/dist/esm/src/models/utils.d.ts +26 -0
  34. package/dist/esm/src/public-types.d.ts +97 -0
  35. package/dist/esm/src/requests/hybrid-helpers.d.ts +33 -0
  36. package/dist/esm/src/requests/imagen-image-format.d.ts +61 -0
  37. package/dist/esm/src/requests/request-helpers.d.ts +28 -0
  38. package/dist/esm/src/requests/request.d.ts +69 -0
  39. package/dist/esm/src/requests/response-helpers.d.ts +57 -0
  40. package/dist/esm/src/requests/schema-builder.d.ts +170 -0
  41. package/dist/esm/src/requests/stream-reader.d.ts +39 -0
  42. package/dist/esm/src/service.d.ts +35 -0
  43. package/dist/esm/src/types/chrome-adapter.d.ts +61 -0
  44. package/dist/esm/src/types/content.d.ts +266 -0
  45. package/dist/esm/src/types/enums.d.ts +419 -0
  46. package/dist/esm/src/types/error.d.ts +89 -0
  47. package/dist/esm/src/types/googleai.d.ts +57 -0
  48. package/dist/esm/src/types/imagen/index.d.ts +18 -0
  49. package/dist/esm/src/types/imagen/internal.d.ts +134 -0
  50. package/dist/esm/src/types/imagen/requests.d.ts +245 -0
  51. package/dist/esm/src/types/imagen/responses.d.ts +79 -0
  52. package/dist/esm/src/types/index.d.ts +26 -0
  53. package/dist/esm/src/types/internal.d.ts +35 -0
  54. package/dist/esm/src/types/language-model.d.ts +107 -0
  55. package/dist/esm/src/types/live-responses.d.ts +79 -0
  56. package/dist/esm/src/types/requests.d.ts +543 -0
  57. package/dist/esm/src/types/responses.d.ts +607 -0
  58. package/dist/esm/src/types/schema.d.ts +139 -0
  59. package/dist/esm/src/websocket.d.ts +67 -0
  60. package/dist/index.cjs.js +4820 -0
  61. package/dist/index.cjs.js.map +1 -0
  62. package/dist/index.node.cjs.js +4512 -0
  63. package/dist/index.node.cjs.js.map +1 -0
  64. package/dist/index.node.mjs +4457 -0
  65. package/dist/index.node.mjs.map +1 -0
  66. package/dist/src/api.d.ts +121 -0
  67. package/dist/src/backend.d.ts +98 -0
  68. package/dist/src/constants.d.ts +29 -0
  69. package/dist/src/errors.d.ts +35 -0
  70. package/dist/src/factory-browser.d.ts +19 -0
  71. package/dist/src/factory-node.d.ts +19 -0
  72. package/dist/src/googleai-mappers.d.ts +73 -0
  73. package/dist/src/helpers.d.ts +30 -0
  74. package/dist/src/index.d.ts +13 -0
  75. package/dist/src/index.node.d.ts +7 -0
  76. package/dist/src/logger.d.ts +18 -0
  77. package/dist/src/methods/chat-session-helpers.d.ts +18 -0
  78. package/dist/src/methods/chat-session.d.ts +77 -0
  79. package/dist/src/methods/chrome-adapter.d.ts +124 -0
  80. package/dist/src/methods/count-tokens.d.ts +21 -0
  81. package/dist/src/methods/generate-content.d.ts +25 -0
  82. package/dist/src/methods/live-session-helpers.d.ts +154 -0
  83. package/dist/src/methods/live-session.d.ts +154 -0
  84. package/dist/src/models/ai-model.d.ts +72 -0
  85. package/dist/src/models/generative-model.d.ts +56 -0
  86. package/dist/src/models/imagen-model.d.ts +102 -0
  87. package/dist/src/models/index.d.ts +20 -0
  88. package/dist/src/models/live-generative-model.d.ts +55 -0
  89. package/dist/src/models/template-generative-model.d.ts +64 -0
  90. package/dist/src/models/template-imagen-model.d.ts +51 -0
  91. package/dist/src/models/utils.d.ts +26 -0
  92. package/dist/src/public-types.d.ts +97 -0
  93. package/dist/src/requests/hybrid-helpers.d.ts +33 -0
  94. package/dist/src/requests/imagen-image-format.d.ts +61 -0
  95. package/dist/src/requests/request-helpers.d.ts +28 -0
  96. package/dist/src/requests/request.d.ts +69 -0
  97. package/dist/src/requests/response-helpers.d.ts +57 -0
  98. package/dist/src/requests/schema-builder.d.ts +170 -0
  99. package/dist/src/requests/stream-reader.d.ts +39 -0
  100. package/dist/src/service.d.ts +35 -0
  101. package/dist/src/tsdoc-metadata.json +11 -0
  102. package/dist/src/types/chrome-adapter.d.ts +61 -0
  103. package/dist/src/types/content.d.ts +266 -0
  104. package/dist/src/types/enums.d.ts +419 -0
  105. package/dist/src/types/error.d.ts +89 -0
  106. package/dist/src/types/googleai.d.ts +57 -0
  107. package/dist/src/types/imagen/index.d.ts +18 -0
  108. package/dist/src/types/imagen/internal.d.ts +134 -0
  109. package/dist/src/types/imagen/requests.d.ts +245 -0
  110. package/dist/src/types/imagen/responses.d.ts +79 -0
  111. package/dist/src/types/index.d.ts +26 -0
  112. package/dist/src/types/internal.d.ts +35 -0
  113. package/dist/src/types/language-model.d.ts +107 -0
  114. package/dist/src/types/live-responses.d.ts +79 -0
  115. package/dist/src/types/requests.d.ts +543 -0
  116. package/dist/src/types/responses.d.ts +607 -0
  117. package/dist/src/types/schema.d.ts +139 -0
  118. package/dist/src/websocket.d.ts +67 -0
  119. package/package.json +106 -0
@@ -0,0 +1,79 @@
1
+ /**
2
+ * @license
3
+ * Copyright 2025 Google LLC
4
+ *
5
+ * Licensed under the Apache License, Version 2.0 (the "License");
6
+ * you may not use this file except in compliance with the License.
7
+ * You may obtain a copy of the License at
8
+ *
9
+ * http://www.apache.org/licenses/LICENSE-2.0
10
+ *
11
+ * Unless required by applicable law or agreed to in writing, software
12
+ * distributed under the License is distributed on an "AS IS" BASIS,
13
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ * See the License for the specific language governing permissions and
15
+ * limitations under the License.
16
+ */
17
+ import { Content, FunctionResponse, GenerativeContentBlob, Part } from './content';
18
+ import { AudioTranscriptionConfig, LiveGenerationConfig, Tool, ToolConfig } from './requests';
19
+ import { Transcription } from './responses';
20
+ /**
21
+ * User input that is sent to the model.
22
+ *
23
+ * @internal
24
+ */
25
+ export interface _LiveClientContent {
26
+ clientContent: {
27
+ turns: [Content];
28
+ turnComplete: boolean;
29
+ inputTranscription?: Transcription;
30
+ outputTranscription?: Transcription;
31
+ };
32
+ }
33
+ /**
34
+ * User input that is sent to the model in real time.
35
+ *
36
+ * @internal
37
+ */
38
+ export interface _LiveClientRealtimeInput {
39
+ realtimeInput: {
40
+ text?: string;
41
+ audio?: GenerativeContentBlob;
42
+ video?: GenerativeContentBlob;
43
+ /**
44
+ * @deprecated Use `text`, `audio`, and `video` instead.
45
+ */
46
+ mediaChunks?: GenerativeContentBlob[];
47
+ };
48
+ }
49
+ /**
50
+ * Function responses that are sent to the model in real time.
51
+ */
52
+ export interface _LiveClientToolResponse {
53
+ toolResponse: {
54
+ functionResponses: FunctionResponse[];
55
+ };
56
+ }
57
+ /**
58
+ * The first message in a Live session, used to configure generation options.
59
+ *
60
+ * @internal
61
+ */
62
+ export interface _LiveClientSetup {
63
+ setup: {
64
+ model: string;
65
+ generationConfig?: _LiveGenerationConfig;
66
+ tools?: Tool[];
67
+ toolConfig?: ToolConfig;
68
+ systemInstruction?: string | Part | Content;
69
+ inputAudioTranscription?: AudioTranscriptionConfig;
70
+ outputAudioTranscription?: AudioTranscriptionConfig;
71
+ };
72
+ }
73
+ /**
74
+ * The Live Generation Config.
75
+ *
76
+ * The public API ({@link LiveGenerationConfig}) has `inputAudioTranscription` and `outputAudioTranscription`,
77
+ * but the server expects these fields to be in the top-level `setup` message. This was a conscious API decision.
78
+ */
79
+ export type _LiveGenerationConfig = Omit<LiveGenerationConfig, 'inputAudioTranscription' | 'outputAudioTranscription'>;
@@ -0,0 +1,543 @@
1
+ /**
2
+ * @license
3
+ * Copyright 2024 Google LLC
4
+ *
5
+ * Licensed under the Apache License, Version 2.0 (the "License");
6
+ * you may not use this file except in compliance with the License.
7
+ * You may obtain a copy of the License at
8
+ *
9
+ * http://www.apache.org/licenses/LICENSE-2.0
10
+ *
11
+ * Unless required by applicable law or agreed to in writing, software
12
+ * distributed under the License is distributed on an "AS IS" BASIS,
13
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ * See the License for the specific language governing permissions and
15
+ * limitations under the License.
16
+ */
17
+ import { ObjectSchema, TypedSchema } from '../requests/schema-builder';
18
+ import { Content, Part } from './content';
19
+ import { LanguageModelCreateOptions, LanguageModelPromptOptions } from './language-model';
20
+ import { FunctionCallingMode, HarmBlockMethod, HarmBlockThreshold, HarmCategory, InferenceMode, ResponseModality, ThinkingLevel } from './enums';
21
+ import { ObjectSchemaRequest, SchemaRequest } from './schema';
22
+ /**
23
+ * Base parameters for a number of methods.
24
+ * @public
25
+ */
26
+ export interface BaseParams {
27
+ safetySettings?: SafetySetting[];
28
+ generationConfig?: GenerationConfig;
29
+ }
30
+ /**
31
+ * Params passed to {@link getGenerativeModel}.
32
+ * @public
33
+ */
34
+ export interface ModelParams extends BaseParams {
35
+ model: string;
36
+ tools?: Tool[];
37
+ toolConfig?: ToolConfig;
38
+ systemInstruction?: string | Part | Content;
39
+ }
40
+ /**
41
+ * Params passed to {@link getLiveGenerativeModel}.
42
+ * @beta
43
+ */
44
+ export interface LiveModelParams {
45
+ model: string;
46
+ generationConfig?: LiveGenerationConfig;
47
+ tools?: Tool[];
48
+ toolConfig?: ToolConfig;
49
+ systemInstruction?: string | Part | Content;
50
+ }
51
+ /**
52
+ * Request sent through {@link GenerativeModel.generateContent}
53
+ * @public
54
+ */
55
+ export interface GenerateContentRequest extends BaseParams {
56
+ contents: Content[];
57
+ tools?: Tool[];
58
+ toolConfig?: ToolConfig;
59
+ systemInstruction?: string | Part | Content;
60
+ }
61
+ /**
62
+ * Safety setting that can be sent as part of request parameters.
63
+ * @public
64
+ */
65
+ export interface SafetySetting {
66
+ category: HarmCategory;
67
+ threshold: HarmBlockThreshold;
68
+ /**
69
+ * The harm block method.
70
+ *
71
+ * This property is only supported in the Vertex AI Gemini API ({@link VertexAIBackend}).
72
+ * When using the Gemini Developer API ({@link GoogleAIBackend}), an {@link AIError} will be
73
+ * thrown if this property is defined.
74
+ */
75
+ method?: HarmBlockMethod;
76
+ }
77
+ /**
78
+ * Config options for content-related requests
79
+ * @public
80
+ */
81
+ export interface GenerationConfig {
82
+ candidateCount?: number;
83
+ stopSequences?: string[];
84
+ maxOutputTokens?: number;
85
+ temperature?: number;
86
+ topP?: number;
87
+ topK?: number;
88
+ presencePenalty?: number;
89
+ frequencyPenalty?: number;
90
+ /**
91
+ * Output response MIME type of the generated candidate text.
92
+ * Supported MIME types are `text/plain` (default, text output),
93
+ * `application/json` (JSON response in the candidates), and
94
+ * `text/x.enum`.
95
+ */
96
+ responseMimeType?: string;
97
+ /**
98
+ * Output response schema of the generated candidate text. This
99
+ * value can be a class generated with a {@link Schema} static method
100
+ * like `Schema.string()` or `Schema.object()` or it can be a plain
101
+ * JS object matching the {@link SchemaRequest} interface.
102
+ * <br/>Note: This only applies when the specified `responseMimeType` supports a schema; currently
103
+ * this is limited to `application/json` and `text/x.enum`.
104
+ */
105
+ responseSchema?: TypedSchema | SchemaRequest;
106
+ /**
107
+ * Generation modalities to be returned in generation responses.
108
+ *
109
+ * @remarks
110
+ * - Multimodal response generation is only supported by some Gemini models and versions; see {@link https://firebase.google.com/docs/vertex-ai/models | model versions}.
111
+ * - Only image generation (`ResponseModality.IMAGE`) is supported.
112
+ *
113
+ * @beta
114
+ */
115
+ responseModalities?: ResponseModality[];
116
+ /**
117
+ * Configuration for "thinking" behavior of compatible Gemini models.
118
+ */
119
+ thinkingConfig?: ThinkingConfig;
120
+ }
121
+ /**
122
+ * Configuration parameters used by {@link LiveGenerativeModel} to control live content generation.
123
+ *
124
+ * @beta
125
+ */
126
+ export interface LiveGenerationConfig {
127
+ /**
128
+ * Configuration for speech synthesis.
129
+ */
130
+ speechConfig?: SpeechConfig;
131
+ /**
132
+ * Specifies the maximum number of tokens that can be generated in the response. The number of
133
+ * tokens per word varies depending on the language outputted. Is unbounded by default.
134
+ */
135
+ maxOutputTokens?: number;
136
+ /**
137
+ * Controls the degree of randomness in token selection. A `temperature` value of 0 means that the highest
138
+ * probability tokens are always selected. In this case, responses for a given prompt are mostly
139
+ * deterministic, but a small amount of variation is still possible.
140
+ */
141
+ temperature?: number;
142
+ /**
143
+ * Changes how the model selects tokens for output. Tokens are
144
+ * selected from the most to least probable until the sum of their probabilities equals the `topP`
145
+ * value. For example, if tokens A, B, and C have probabilities of 0.3, 0.2, and 0.1 respectively
146
+ * and the `topP` value is 0.5, then the model will select either A or B as the next token by using
147
+ * the `temperature` and exclude C as a candidate. Defaults to 0.95 if unset.
148
+ */
149
+ topP?: number;
150
+ /**
151
+ * Changes how the model selects token for output. A `topK` value of 1 means the select token is
152
+ * the most probable among all tokens in the model's vocabulary, while a `topK` value 3 means that
153
+ * the next token is selected from among the 3 most probably using probabilities sampled. Tokens
154
+ * are then further filtered with the highest selected `temperature` sampling. Defaults to 40
155
+ * if unspecified.
156
+ */
157
+ topK?: number;
158
+ /**
159
+ * Positive penalties.
160
+ */
161
+ presencePenalty?: number;
162
+ /**
163
+ * Frequency penalties.
164
+ */
165
+ frequencyPenalty?: number;
166
+ /**
167
+ * The modalities of the response.
168
+ */
169
+ responseModalities?: ResponseModality[];
170
+ /**
171
+ * Enables transcription of audio input.
172
+ *
173
+ * When enabled, the model will respond with transcriptions of your audio input in the `inputTranscriptions` property
174
+ * in {@link LiveServerContent} messages. Note that the transcriptions are broken up across
175
+ * messages, so you may only receive small amounts of text per message. For example, if you ask the model
176
+ * "How are you today?", the model may transcribe that input across three messages, broken up as "How a", "re yo", "u today?".
177
+ */
178
+ inputAudioTranscription?: AudioTranscriptionConfig;
179
+ /**
180
+ * Enables transcription of audio input.
181
+ *
182
+ * When enabled, the model will respond with transcriptions of its audio output in the `outputTranscription` property
183
+ * in {@link LiveServerContent} messages. Note that the transcriptions are broken up across
184
+ * messages, so you may only receive small amounts of text per message. For example, if the model says
185
+ * "How are you today?", the model may transcribe that output across three messages, broken up as "How a", "re yo", "u today?".
186
+ */
187
+ outputAudioTranscription?: AudioTranscriptionConfig;
188
+ }
189
+ /**
190
+ * Params for {@link GenerativeModel.startChat}.
191
+ * @public
192
+ */
193
+ export interface StartChatParams extends BaseParams {
194
+ history?: Content[];
195
+ tools?: Tool[];
196
+ toolConfig?: ToolConfig;
197
+ systemInstruction?: string | Part | Content;
198
+ }
199
+ /**
200
+ * Params for calling {@link GenerativeModel.countTokens}
201
+ * @public
202
+ */
203
+ export interface CountTokensRequest {
204
+ contents: Content[];
205
+ /**
206
+ * Instructions that direct the model to behave a certain way.
207
+ */
208
+ systemInstruction?: string | Part | Content;
209
+ /**
210
+ * {@link Tool} configuration.
211
+ */
212
+ tools?: Tool[];
213
+ /**
214
+ * Configuration options that control how the model generates a response.
215
+ */
216
+ generationConfig?: GenerationConfig;
217
+ }
218
+ /**
219
+ * Params passed to {@link getGenerativeModel}.
220
+ * @public
221
+ */
222
+ export interface RequestOptions {
223
+ /**
224
+ * Request timeout in milliseconds. Defaults to 180 seconds (180000ms).
225
+ */
226
+ timeout?: number;
227
+ /**
228
+ * Base url for endpoint. Defaults to
229
+ * https://firebasevertexai.googleapis.com, which is the
230
+ * {@link https://console.cloud.google.com/apis/library/firebasevertexai.googleapis.com?project=_ | Firebase AI Logic API}
231
+ * (used regardless of your chosen Gemini API provider).
232
+ */
233
+ baseUrl?: string;
234
+ /**
235
+ * Limits amount of sequential function calls the SDK can make during automatic
236
+ * function calling, in order to prevent infinite loops. If not specified,
237
+ * this value defaults to 10.
238
+ *
239
+ * When it reaches this limit, it will return the last response received
240
+ * from the model, whether it is a text response or further function calls.
241
+ */
242
+ maxSequentalFunctionCalls?: number;
243
+ }
244
+ /**
245
+ * Options that can be provided per-request.
246
+ * Extends the base {@link RequestOptions} (like `timeout` and `baseUrl`)
247
+ * with request-specific controls like cancellation via `AbortSignal`.
248
+ *
249
+ * Options specified here will override any default {@link RequestOptions}
250
+ * configured on a model (for example, {@link GenerativeModel}).
251
+ *
252
+ * @public
253
+ */
254
+ export interface SingleRequestOptions extends RequestOptions {
255
+ /**
256
+ * An `AbortSignal` instance that allows cancelling ongoing requests (like `generateContent` or
257
+ * `generateImages`).
258
+ *
259
+ * If provided, calling `abort()` on the corresponding `AbortController`
260
+ * will attempt to cancel the underlying HTTP request. An `AbortError` will be thrown
261
+ * if cancellation is successful.
262
+ *
263
+ * Note that this will not cancel the request in the backend, so any applicable billing charges
264
+ * will still be applied despite cancellation.
265
+ *
266
+ * @example
267
+ * ```javascript
268
+ * const controller = new AbortController();
269
+ * const model = getGenerativeModel({
270
+ * // ...
271
+ * });
272
+ * model.generateContent(
273
+ * "Write a story about a magic backpack.",
274
+ * { signal: controller.signal }
275
+ * );
276
+ *
277
+ * // To cancel request:
278
+ * controller.abort();
279
+ * ```
280
+ * @see https://developer.mozilla.org/en-US/docs/Web/API/AbortSignal
281
+ */
282
+ signal?: AbortSignal;
283
+ }
284
+ /**
285
+ * Defines a tool that model can call to access external knowledge.
286
+ * @public
287
+ */
288
+ export type Tool = FunctionDeclarationsTool | GoogleSearchTool | CodeExecutionTool | URLContextTool;
289
+ /**
290
+ * Structured representation of a function declaration as defined by the
291
+ * {@link https://spec.openapis.org/oas/v3.0.3 | OpenAPI 3.0 specification}.
292
+ * Included
293
+ * in this declaration are the function name and parameters. This
294
+ * `FunctionDeclaration` is a representation of a block of code that can be used
295
+ * as a Tool by the model and executed by the client.
296
+ * @public
297
+ */
298
+ export interface FunctionDeclaration {
299
+ /**
300
+ * The name of the function to call. Must start with a letter or an
301
+ * underscore. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with
302
+ * a max length of 64.
303
+ */
304
+ name: string;
305
+ /**
306
+ * Description and purpose of the function. Model uses it to decide
307
+ * how and whether to call the function.
308
+ */
309
+ description: string;
310
+ /**
311
+ * Optional. Describes the parameters to this function in JSON Schema Object
312
+ * format. Reflects the Open API 3.03 Parameter Object. Parameter names are
313
+ * case-sensitive. For a function with no parameters, this can be left unset.
314
+ */
315
+ parameters?: ObjectSchema | ObjectSchemaRequest;
316
+ /**
317
+ * Reference to an actual function to call. Specifying this will cause the
318
+ * function to be called automatically when requested by the model.
319
+ */
320
+ functionReference?: Function;
321
+ }
322
+ /**
323
+ * A tool that allows a Gemini model to connect to Google Search to access and incorporate
324
+ * up-to-date information from the web into its responses.
325
+ *
326
+ * Important: If using Grounding with Google Search, you are required to comply with the
327
+ * "Grounding with Google Search" usage requirements for your chosen API provider: {@link https://ai.google.dev/gemini-api/terms#grounding-with-google-search | Gemini Developer API}
328
+ * or Vertex AI Gemini API (see {@link https://cloud.google.com/terms/service-terms | Service Terms}
329
+ * section within the Service Specific Terms).
330
+ *
331
+ * @public
332
+ */
333
+ export interface GoogleSearchTool {
334
+ /**
335
+ * Specifies the Google Search configuration.
336
+ * Currently, this is an empty object, but it's reserved for future configuration options.
337
+ *
338
+ * When using this feature, you are required to comply with the "Grounding with Google Search"
339
+ * usage requirements for your chosen API provider: {@link https://ai.google.dev/gemini-api/terms#grounding-with-google-search | Gemini Developer API}
340
+ * or Vertex AI Gemini API (see {@link https://cloud.google.com/terms/service-terms | Service Terms}
341
+ * section within the Service Specific Terms).
342
+ */
343
+ googleSearch: GoogleSearch;
344
+ }
345
+ /**
346
+ * A tool that enables the model to use code execution.
347
+ *
348
+ * @beta
349
+ */
350
+ export interface CodeExecutionTool {
351
+ /**
352
+ * Specifies the Google Search configuration.
353
+ * Currently, this is an empty object, but it's reserved for future configuration options.
354
+ */
355
+ codeExecution: {};
356
+ }
357
+ /**
358
+ * Specifies the Google Search configuration.
359
+ *
360
+ * @remarks Currently, this is an empty object, but it's reserved for future configuration options.
361
+ *
362
+ * @public
363
+ */
364
+ export interface GoogleSearch {
365
+ }
366
+ /**
367
+ * A tool that allows you to provide additional context to the models in the form of public web
368
+ * URLs. By including URLs in your request, the Gemini model will access the content from those
369
+ * pages to inform and enhance its response.
370
+ *
371
+ * @beta
372
+ */
373
+ export interface URLContextTool {
374
+ /**
375
+ * Specifies the URL Context configuration.
376
+ */
377
+ urlContext: URLContext;
378
+ }
379
+ /**
380
+ * Specifies the URL Context configuration.
381
+ *
382
+ * @beta
383
+ */
384
+ export interface URLContext {
385
+ }
386
+ /**
387
+ * A `FunctionDeclarationsTool` is a piece of code that enables the system to
388
+ * interact with external systems to perform an action, or set of actions,
389
+ * outside of knowledge and scope of the model.
390
+ * @public
391
+ */
392
+ export interface FunctionDeclarationsTool {
393
+ /**
394
+ * Optional. One or more function declarations
395
+ * to be passed to the model along with the current user query. Model may
396
+ * decide to call a subset of these functions by populating
397
+ * {@link FunctionCall} in the response. User should
398
+ * provide a {@link FunctionResponse} for each
399
+ * function call in the next turn. Based on the function responses, the model will
400
+ * generate the final response back to the user. Maximum 64 function
401
+ * declarations can be provided.
402
+ */
403
+ functionDeclarations?: FunctionDeclaration[];
404
+ }
405
+ /**
406
+ * Tool config. This config is shared for all tools provided in the request.
407
+ * @public
408
+ */
409
+ export interface ToolConfig {
410
+ functionCallingConfig?: FunctionCallingConfig;
411
+ }
412
+ /**
413
+ * @public
414
+ */
415
+ export interface FunctionCallingConfig {
416
+ mode?: FunctionCallingMode;
417
+ allowedFunctionNames?: string[];
418
+ }
419
+ /**
420
+ * Encapsulates configuration for on-device inference.
421
+ *
422
+ * @beta
423
+ */
424
+ export interface OnDeviceParams {
425
+ createOptions?: LanguageModelCreateOptions;
426
+ promptOptions?: LanguageModelPromptOptions;
427
+ }
428
+ /**
429
+ * Configures hybrid inference.
430
+ * @beta
431
+ */
432
+ export interface HybridParams {
433
+ /**
434
+ * Specifies on-device or in-cloud inference. Defaults to prefer on-device.
435
+ */
436
+ mode: InferenceMode;
437
+ /**
438
+ * Optional. Specifies advanced params for on-device inference.
439
+ */
440
+ onDeviceParams?: OnDeviceParams;
441
+ /**
442
+ * Optional. Specifies advanced params for in-cloud inference.
443
+ */
444
+ inCloudParams?: ModelParams;
445
+ }
446
+ /**
447
+ * Configuration for "thinking" behavior of compatible Gemini models.
448
+ *
449
+ * Certain models utilize a thinking process before generating a response. This allows them to
450
+ * reason through complex problems and plan a more coherent and accurate answer.
451
+ *
452
+ * @public
453
+ */
454
+ export interface ThinkingConfig {
455
+ /**
456
+ * The thinking budget, in tokens.
457
+ *
458
+ * @remarks
459
+ * This parameter sets an upper limit on the number of tokens the model can use for its internal
460
+ * "thinking" process. A higher budget may result in higher quality responses for complex tasks
461
+ * but can also increase latency and cost.
462
+ *
463
+ * The range of supported thinking budget values depends on the model.
464
+ *
465
+ * <ul>
466
+ * <li>To use the default thinking budget for a model, leave
467
+ * this value undefined.</li>
468
+ *
469
+ * <li>To disable thinking, when supported by the model, set this value
470
+ * to `0`.</li>
471
+ *
472
+ * <li>To use dynamic thinking, which allows the model to decide on the thinking
473
+ * budget based on the task, set this value to `-1`.</li>
474
+ * </ul>
475
+ *
476
+ * An error will be thrown if you set a thinking budget for a model that does not support this
477
+ * feature or if the specified budget is not within the model's supported range.
478
+ *
479
+ * The model will also error if `thinkingLevel` and `thinkingBudget` are
480
+ * both set.
481
+ */
482
+ thinkingBudget?: number;
483
+ /**
484
+ * If not specified, Gemini will use the model's default dynamic thinking level.
485
+ *
486
+ * @remarks
487
+ * Note: The model will error if `thinkingLevel` and `thinkingBudget` are
488
+ * both set.
489
+ *
490
+ * Important: Gemini 2.5 series models do not support thinking levels; use
491
+ * `thinkingBudget` to set a thinking budget instead.
492
+ */
493
+ thinkingLevel?: ThinkingLevel;
494
+ /**
495
+ * Whether to include "thought summaries" in the model's response.
496
+ *
497
+ * @remarks
498
+ * Thought summaries provide a brief overview of the model's internal thinking process,
499
+ * offering insight into how it arrived at the final answer. This can be useful for
500
+ * debugging, understanding the model's reasoning, and verifying its accuracy.
501
+ */
502
+ includeThoughts?: boolean;
503
+ }
504
+ /**
505
+ * Configuration for a pre-built voice.
506
+ *
507
+ * @beta
508
+ */
509
+ export interface PrebuiltVoiceConfig {
510
+ /**
511
+ * The voice name to use for speech synthesis.
512
+ *
513
+ * For a full list of names and demos of what each voice sounds like, see {@link https://cloud.google.com/text-to-speech/docs/chirp3-hd | Chirp 3: HD Voices}.
514
+ */
515
+ voiceName?: string;
516
+ }
517
+ /**
518
+ * Configuration for the voice to used in speech synthesis.
519
+ *
520
+ * @beta
521
+ */
522
+ export interface VoiceConfig {
523
+ /**
524
+ * Configures the voice using a pre-built voice configuration.
525
+ */
526
+ prebuiltVoiceConfig?: PrebuiltVoiceConfig;
527
+ }
528
+ /**
529
+ * Configures speech synthesis.
530
+ *
531
+ * @beta
532
+ */
533
+ export interface SpeechConfig {
534
+ /**
535
+ * Configures the voice to be used in speech synthesis.
536
+ */
537
+ voiceConfig?: VoiceConfig;
538
+ }
539
+ /**
540
+ * The audio transcription configuration.
541
+ */
542
+ export interface AudioTranscriptionConfig {
543
+ }