@firebase/ai 2.2.1-canary.a4848b401 → 2.2.1-canary.c1237662e

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,28 @@
1
+ /**
2
+ * @license
3
+ * Copyright 2025 Google LLC
4
+ *
5
+ * Licensed under the Apache License, Version 2.0 (the "License");
6
+ * you may not use this file except in compliance with the License.
7
+ * You may obtain a copy of the License at
8
+ *
9
+ * http://www.apache.org/licenses/LICENSE-2.0
10
+ *
11
+ * Unless required by applicable law or agreed to in writing, software
12
+ * distributed under the License is distributed on an "AS IS" BASIS,
13
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ * See the License for the specific language governing permissions and
15
+ * limitations under the License.
16
+ */
17
+ import { GenerateContentRequest, ChromeAdapter } from '../types';
18
+ /**
19
+ * Dispatches a request to the appropriate backend (on-device or in-cloud)
20
+ * based on the inference mode.
21
+ *
22
+ * @param request - The request to be sent.
23
+ * @param chromeAdapter - The on-device model adapter.
24
+ * @param onDeviceCall - The function to call for on-device inference.
25
+ * @param inCloudCall - The function to call for in-cloud inference.
26
+ * @returns The response from the backend.
27
+ */
28
+ export declare function callCloudOrDevice<Response>(request: GenerateContentRequest, chromeAdapter: ChromeAdapter | undefined, onDeviceCall: () => Promise<Response>, inCloudCall: () => Promise<Response>): Promise<Response>;
@@ -16,13 +16,13 @@
16
16
  */
17
17
  import { CountTokensRequest, GenerateContentRequest } from './requests';
18
18
  /**
19
- * <b>(EXPERIMENTAL)</b> Defines an inference "backend" that uses Chrome's on-device model,
19
+ * Defines an inference "backend" that uses Chrome's on-device model,
20
20
  * and encapsulates logic for detecting when on-device inference is
21
21
  * possible.
22
22
  *
23
23
  * These methods should not be called directly by the user.
24
24
  *
25
- * @public
25
+ * @beta
26
26
  */
27
27
  export interface ChromeAdapter {
28
28
  /**
@@ -14,7 +14,7 @@
14
14
  * See the License for the specific language governing permissions and
15
15
  * limitations under the License.
16
16
  */
17
- import { Role } from './enums';
17
+ import { Language, Outcome, Role } from './enums';
18
18
  /**
19
19
  * Content type for both prompts and response candidates.
20
20
  * @public
@@ -28,7 +28,7 @@ export interface Content {
28
28
  * part types.
29
29
  * @public
30
30
  */
31
- export type Part = TextPart | InlineDataPart | FunctionCallPart | FunctionResponsePart | FileDataPart;
31
+ export type Part = TextPart | InlineDataPart | FunctionCallPart | FunctionResponsePart | FileDataPart | ExecutableCodePart | CodeExecutionResultPart;
32
32
  /**
33
33
  * Content part interface if the part represents a text string.
34
34
  * @public
@@ -43,6 +43,8 @@ export interface TextPart {
43
43
  * @internal
44
44
  */
45
45
  thoughtSignature?: string;
46
+ executableCode?: never;
47
+ codeExecutionResult?: never;
46
48
  }
47
49
  /**
48
50
  * Content part interface if the part represents an image.
@@ -62,6 +64,8 @@ export interface InlineDataPart {
62
64
  * @internal
63
65
  */
64
66
  thoughtSignature?: never;
67
+ executableCode?: never;
68
+ codeExecutionResult?: never;
65
69
  }
66
70
  /**
67
71
  * Describes the input video content.
@@ -93,6 +97,8 @@ export interface FunctionCallPart {
93
97
  * @internal
94
98
  */
95
99
  thoughtSignature?: never;
100
+ executableCode?: never;
101
+ codeExecutionResult?: never;
96
102
  }
97
103
  /**
98
104
  * Content part interface if the part represents {@link FunctionResponse}.
@@ -108,6 +114,8 @@ export interface FunctionResponsePart {
108
114
  * @internal
109
115
  */
110
116
  thoughtSignature?: never;
117
+ executableCode?: never;
118
+ codeExecutionResult?: never;
111
119
  }
112
120
  /**
113
121
  * Content part interface if the part represents {@link FileData}
@@ -124,6 +132,77 @@ export interface FileDataPart {
124
132
  * @internal
125
133
  */
126
134
  thoughtSignature?: never;
135
+ executableCode?: never;
136
+ codeExecutionResult?: never;
137
+ }
138
+ /**
139
+ * Represents the code that is executed by the model.
140
+ *
141
+ * @public
142
+ */
143
+ export interface ExecutableCodePart {
144
+ text?: never;
145
+ inlineData?: never;
146
+ functionCall?: never;
147
+ functionResponse?: never;
148
+ fileData: never;
149
+ thought?: never;
150
+ /**
151
+ * @internal
152
+ */
153
+ thoughtSignature?: never;
154
+ executableCode?: ExecutableCode;
155
+ codeExecutionResult?: never;
156
+ }
157
+ /**
158
+ * Represents the code execution result from the model.
159
+ *
160
+ * @public
161
+ */
162
+ export interface CodeExecutionResultPart {
163
+ text?: never;
164
+ inlineData?: never;
165
+ functionCall?: never;
166
+ functionResponse?: never;
167
+ fileData: never;
168
+ thought?: never;
169
+ /**
170
+ * @internal
171
+ */
172
+ thoughtSignature?: never;
173
+ executableCode?: never;
174
+ codeExecutionResult?: CodeExecutionResult;
175
+ }
176
+ /**
177
+ * An interface for executable code returned by the model.
178
+ *
179
+ * @public
180
+ */
181
+ export interface ExecutableCode {
182
+ /**
183
+ * The programming language of the code.
184
+ */
185
+ language?: Language;
186
+ /**
187
+ * The source code to be executed.
188
+ */
189
+ code?: string;
190
+ }
191
+ /**
192
+ * The results of code execution run by the model.
193
+ *
194
+ * @public
195
+ */
196
+ export interface CodeExecutionResult {
197
+ /**
198
+ * The result of the code execution.
199
+ */
200
+ outcome?: Outcome;
201
+ /**
202
+ * The output from the code execution, or an error message
203
+ * if it failed.
204
+ */
205
+ output?: string;
127
206
  }
128
207
  /**
129
208
  * A predicted {@link FunctionCall} returned from the model
@@ -317,18 +317,67 @@ export declare const ResponseModality: {
317
317
  */
318
318
  export type ResponseModality = (typeof ResponseModality)[keyof typeof ResponseModality];
319
319
  /**
320
- * <b>(EXPERIMENTAL)</b>
321
320
  * Determines whether inference happens on-device or in-cloud.
322
- * @public
321
+ *
322
+ * @remarks
323
+ * <b>PREFER_ON_DEVICE:</b> Attempt to make inference calls using an
324
+ * on-device model. If on-device inference is not available, the SDK
325
+ * will fall back to using a cloud-hosted model.
326
+ * <br/>
327
+ * <b>ONLY_ON_DEVICE:</b> Only attempt to make inference calls using an
328
+ * on-device model. The SDK will not fall back to a cloud-hosted model.
329
+ * If on-device inference is not available, inference methods will throw.
330
+ * <br/>
331
+ * <b>ONLY_IN_CLOUD:</b> Only attempt to make inference calls using a
332
+ * cloud-hosted model. The SDK will not fall back to an on-device model.
333
+ * <br/>
334
+ * <b>PREFER_IN_CLOUD:</b> Attempt to make inference calls to a
335
+ * cloud-hosted model. If not available, the SDK will fall back to an
336
+ * on-device model.
337
+ *
338
+ * @beta
323
339
  */
324
340
  export declare const InferenceMode: {
325
341
  readonly PREFER_ON_DEVICE: "prefer_on_device";
326
342
  readonly ONLY_ON_DEVICE: "only_on_device";
327
343
  readonly ONLY_IN_CLOUD: "only_in_cloud";
344
+ readonly PREFER_IN_CLOUD: "prefer_in_cloud";
328
345
  };
329
346
  /**
330
- * <b>(EXPERIMENTAL)</b>
331
347
  * Determines whether inference happens on-device or in-cloud.
332
- * @public
348
+ *
349
+ * @beta
333
350
  */
334
351
  export type InferenceMode = (typeof InferenceMode)[keyof typeof InferenceMode];
352
+ /**
353
+ * Represents the result of the code execution.
354
+ *
355
+ * @public
356
+ */
357
+ export declare const Outcome: {
358
+ UNSPECIFIED: string;
359
+ OK: string;
360
+ FAILED: string;
361
+ DEADLINE_EXCEEDED: string;
362
+ };
363
+ /**
364
+ * Represents the result of the code execution.
365
+ *
366
+ * @public
367
+ */
368
+ export type Outcome = (typeof Outcome)[keyof typeof Outcome];
369
+ /**
370
+ * The programming language of the code.
371
+ *
372
+ * @public
373
+ */
374
+ export declare const Language: {
375
+ UNSPECIFIED: string;
376
+ PYTHON: string;
377
+ };
378
+ /**
379
+ * The programming language of the code.
380
+ *
381
+ * @public
382
+ */
383
+ export type Language = (typeof Language)[keyof typeof Language];
@@ -39,9 +39,8 @@ export declare enum Availability {
39
39
  'AVAILABLE' = "available"
40
40
  }
41
41
  /**
42
- * <b>(EXPERIMENTAL)</b>
43
42
  * Configures the creation of an on-device language model session.
44
- * @public
43
+ * @beta
45
44
  */
46
45
  export interface LanguageModelCreateCoreOptions {
47
46
  topK?: number;
@@ -49,69 +48,60 @@ export interface LanguageModelCreateCoreOptions {
49
48
  expectedInputs?: LanguageModelExpected[];
50
49
  }
51
50
  /**
52
- * <b>(EXPERIMENTAL)</b>
53
51
  * Configures the creation of an on-device language model session.
54
- * @public
52
+ * @beta
55
53
  */
56
54
  export interface LanguageModelCreateOptions extends LanguageModelCreateCoreOptions {
57
55
  signal?: AbortSignal;
58
56
  initialPrompts?: LanguageModelMessage[];
59
57
  }
60
58
  /**
61
- * <b>(EXPERIMENTAL)</b>
62
59
  * Options for an on-device language model prompt.
63
- * @public
60
+ * @beta
64
61
  */
65
62
  export interface LanguageModelPromptOptions {
66
63
  responseConstraint?: object;
67
64
  }
68
65
  /**
69
- * <b>(EXPERIMENTAL)</b>
70
66
  * Options for the expected inputs for an on-device language model.
71
- * @public
67
+ * @beta
72
68
  */ export interface LanguageModelExpected {
73
69
  type: LanguageModelMessageType;
74
70
  languages?: string[];
75
71
  }
76
72
  /**
77
- * <b>(EXPERIMENTAL)</b>
78
73
  * An on-device language model prompt.
79
- * @public
74
+ * @beta
80
75
  */
81
76
  export type LanguageModelPrompt = LanguageModelMessage[];
82
77
  /**
83
- * <b>(EXPERIMENTAL)</b>
84
78
  * An on-device language model message.
85
- * @public
79
+ * @beta
86
80
  */
87
81
  export interface LanguageModelMessage {
88
82
  role: LanguageModelMessageRole;
89
83
  content: LanguageModelMessageContent[];
90
84
  }
91
85
  /**
92
- * <b>(EXPERIMENTAL)</b>
93
86
  * An on-device language model content object.
94
- * @public
87
+ * @beta
95
88
  */
96
89
  export interface LanguageModelMessageContent {
97
90
  type: LanguageModelMessageType;
98
91
  value: LanguageModelMessageContentValue;
99
92
  }
100
93
  /**
101
- * <b>(EXPERIMENTAL)</b>
102
94
  * Allowable roles for on-device language model usage.
103
- * @public
95
+ * @beta
104
96
  */
105
97
  export type LanguageModelMessageRole = 'system' | 'user' | 'assistant';
106
98
  /**
107
- * <b>(EXPERIMENTAL)</b>
108
99
  * Allowable types for on-device language model messages.
109
- * @public
100
+ * @beta
110
101
  */
111
102
  export type LanguageModelMessageType = 'text' | 'image' | 'audio';
112
103
  /**
113
- * <b>(EXPERIMENTAL)</b>
114
104
  * Content formats that can be provided as on-device message content.
115
- * @public
105
+ * @beta
116
106
  */
117
107
  export type LanguageModelMessageContentValue = ImageBitmapSource | AudioBuffer | BufferSource | string;
@@ -218,7 +218,7 @@ export interface RequestOptions {
218
218
  * Defines a tool that model can call to access external knowledge.
219
219
  * @public
220
220
  */
221
- export type Tool = FunctionDeclarationsTool | GoogleSearchTool;
221
+ export type Tool = FunctionDeclarationsTool | GoogleSearchTool | CodeExecutionTool;
222
222
  /**
223
223
  * Structured representation of a function declaration as defined by the
224
224
  * {@link https://spec.openapis.org/oas/v3.0.3 | OpenAPI 3.0 specification}.
@@ -262,8 +262,6 @@ export interface GoogleSearchTool {
262
262
  /**
263
263
  * Specifies the Google Search configuration.
264
264
  * Currently, this is an empty object, but it's reserved for future configuration options.
265
- * Specifies the Google Search configuration. Currently, this is an empty object, but it's
266
- * reserved for future configuration options.
267
265
  *
268
266
  * When using this feature, you are required to comply with the "Grounding with Google Search"
269
267
  * usage requirements for your chosen API provider: {@link https://ai.google.dev/gemini-api/terms#grounding-with-google-search | Gemini Developer API}
@@ -272,6 +270,18 @@ export interface GoogleSearchTool {
272
270
  */
273
271
  googleSearch: GoogleSearch;
274
272
  }
273
+ /**
274
+ * A tool that enables the model to use code execution.
275
+ *
276
+ * @public
277
+ */
278
+ export interface CodeExecutionTool {
279
+ /**
280
+ * Specifies the Google Search configuration.
281
+ * Currently, this is an empty object, but it's reserved for future configuration options.
282
+ */
283
+ codeExecution: {};
284
+ }
275
285
  /**
276
286
  * Specifies the Google Search configuration.
277
287
  *
@@ -315,19 +325,17 @@ export interface FunctionCallingConfig {
315
325
  allowedFunctionNames?: string[];
316
326
  }
317
327
  /**
318
- * <b>(EXPERIMENTAL)</b>
319
328
  * Encapsulates configuration for on-device inference.
320
329
  *
321
- * @public
330
+ * @beta
322
331
  */
323
332
  export interface OnDeviceParams {
324
333
  createOptions?: LanguageModelCreateOptions;
325
334
  promptOptions?: LanguageModelPromptOptions;
326
335
  }
327
336
  /**
328
- * <b>(EXPERIMENTAL)</b>
329
337
  * Configures hybrid inference.
330
- * @public
338
+ * @beta
331
339
  */
332
340
  export interface HybridParams {
333
341
  /**
package/dist/index.cjs.js CHANGED
@@ -8,7 +8,7 @@ var util = require('@firebase/util');
8
8
  var logger$1 = require('@firebase/logger');
9
9
 
10
10
  var name = "@firebase/ai";
11
- var version = "2.2.1-canary.a4848b401";
11
+ var version = "2.2.1-canary.c1237662e";
12
12
 
13
13
  /**
14
14
  * @license
@@ -357,14 +357,51 @@ const ResponseModality = {
357
357
  AUDIO: 'AUDIO'
358
358
  };
359
359
  /**
360
- * <b>(EXPERIMENTAL)</b>
361
360
  * Determines whether inference happens on-device or in-cloud.
362
- * @public
361
+ *
362
+ * @remarks
363
+ * <b>PREFER_ON_DEVICE:</b> Attempt to make inference calls using an
364
+ * on-device model. If on-device inference is not available, the SDK
365
+ * will fall back to using a cloud-hosted model.
366
+ * <br/>
367
+ * <b>ONLY_ON_DEVICE:</b> Only attempt to make inference calls using an
368
+ * on-device model. The SDK will not fall back to a cloud-hosted model.
369
+ * If on-device inference is not available, inference methods will throw.
370
+ * <br/>
371
+ * <b>ONLY_IN_CLOUD:</b> Only attempt to make inference calls using a
372
+ * cloud-hosted model. The SDK will not fall back to an on-device model.
373
+ * <br/>
374
+ * <b>PREFER_IN_CLOUD:</b> Attempt to make inference calls to a
375
+ * cloud-hosted model. If not available, the SDK will fall back to an
376
+ * on-device model.
377
+ *
378
+ * @beta
363
379
  */
364
380
  const InferenceMode = {
365
381
  'PREFER_ON_DEVICE': 'prefer_on_device',
366
382
  'ONLY_ON_DEVICE': 'only_on_device',
367
- 'ONLY_IN_CLOUD': 'only_in_cloud'
383
+ 'ONLY_IN_CLOUD': 'only_in_cloud',
384
+ 'PREFER_IN_CLOUD': 'prefer_in_cloud'
385
+ };
386
+ /**
387
+ * Represents the result of the code execution.
388
+ *
389
+ * @public
390
+ */
391
+ const Outcome = {
392
+ UNSPECIFIED: 'OUTCOME_UNSPECIFIED',
393
+ OK: 'OUTCOME_OK',
394
+ FAILED: 'OUTCOME_FAILED',
395
+ DEADLINE_EXCEEDED: 'OUTCOME_DEADLINE_EXCEEDED'
396
+ };
397
+ /**
398
+ * The programming language of the code.
399
+ *
400
+ * @public
401
+ */
402
+ const Language = {
403
+ UNSPECIFIED: 'LANGUAGE_UNSPECIFIED',
404
+ PYTHON: 'PYTHON'
368
405
  };
369
406
 
370
407
  /**
@@ -2061,6 +2098,72 @@ function aggregateResponses(responses) {
2061
2098
  return aggregatedResponse;
2062
2099
  }
2063
2100
 
2101
+ /**
2102
+ * @license
2103
+ * Copyright 2025 Google LLC
2104
+ *
2105
+ * Licensed under the Apache License, Version 2.0 (the "License");
2106
+ * you may not use this file except in compliance with the License.
2107
+ * You may obtain a copy of the License at
2108
+ *
2109
+ * http://www.apache.org/licenses/LICENSE-2.0
2110
+ *
2111
+ * Unless required by applicable law or agreed to in writing, software
2112
+ * distributed under the License is distributed on an "AS IS" BASIS,
2113
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
2114
+ * See the License for the specific language governing permissions and
2115
+ * limitations under the License.
2116
+ */
2117
+ const errorsCausingFallback = [
2118
+ // most network errors
2119
+ AIErrorCode.FETCH_ERROR,
2120
+ // fallback code for all other errors in makeRequest
2121
+ AIErrorCode.ERROR,
2122
+ // error due to API not being enabled in project
2123
+ AIErrorCode.API_NOT_ENABLED
2124
+ ];
2125
+ /**
2126
+ * Dispatches a request to the appropriate backend (on-device or in-cloud)
2127
+ * based on the inference mode.
2128
+ *
2129
+ * @param request - The request to be sent.
2130
+ * @param chromeAdapter - The on-device model adapter.
2131
+ * @param onDeviceCall - The function to call for on-device inference.
2132
+ * @param inCloudCall - The function to call for in-cloud inference.
2133
+ * @returns The response from the backend.
2134
+ */
2135
+ async function callCloudOrDevice(request, chromeAdapter, onDeviceCall, inCloudCall) {
2136
+ if (!chromeAdapter) {
2137
+ return inCloudCall();
2138
+ }
2139
+ switch (chromeAdapter.mode) {
2140
+ case InferenceMode.ONLY_ON_DEVICE:
2141
+ if (await chromeAdapter.isAvailable(request)) {
2142
+ return onDeviceCall();
2143
+ }
2144
+ throw new AIError(AIErrorCode.UNSUPPORTED, 'Inference mode is ONLY_ON_DEVICE, but an on-device model is not available.');
2145
+ case InferenceMode.ONLY_IN_CLOUD:
2146
+ return inCloudCall();
2147
+ case InferenceMode.PREFER_IN_CLOUD:
2148
+ try {
2149
+ return await inCloudCall();
2150
+ }
2151
+ catch (e) {
2152
+ if (e instanceof AIError && errorsCausingFallback.includes(e.code)) {
2153
+ return onDeviceCall();
2154
+ }
2155
+ throw e;
2156
+ }
2157
+ case InferenceMode.PREFER_ON_DEVICE:
2158
+ if (await chromeAdapter.isAvailable(request)) {
2159
+ return onDeviceCall();
2160
+ }
2161
+ return inCloudCall();
2162
+ default:
2163
+ throw new AIError(AIErrorCode.ERROR, `Unexpected infererence mode: ${chromeAdapter.mode}`);
2164
+ }
2165
+ }
2166
+
2064
2167
  /**
2065
2168
  * @license
2066
2169
  * Copyright 2024 Google LLC
@@ -2085,13 +2188,7 @@ async function generateContentStreamOnCloud(apiSettings, model, params, requestO
2085
2188
  /* stream */ true, JSON.stringify(params), requestOptions);
2086
2189
  }
2087
2190
  async function generateContentStream(apiSettings, model, params, chromeAdapter, requestOptions) {
2088
- let response;
2089
- if (chromeAdapter && (await chromeAdapter.isAvailable(params))) {
2090
- response = await chromeAdapter.generateContentStream(params);
2091
- }
2092
- else {
2093
- response = await generateContentStreamOnCloud(apiSettings, model, params, requestOptions);
2094
- }
2191
+ const response = await callCloudOrDevice(params, chromeAdapter, () => chromeAdapter.generateContentStream(params), () => generateContentStreamOnCloud(apiSettings, model, params, requestOptions));
2095
2192
  return processStream(response, apiSettings); // TODO: Map streaming responses
2096
2193
  }
2097
2194
  async function generateContentOnCloud(apiSettings, model, params, requestOptions) {
@@ -2102,13 +2199,7 @@ async function generateContentOnCloud(apiSettings, model, params, requestOptions
2102
2199
  /* stream */ false, JSON.stringify(params), requestOptions);
2103
2200
  }
2104
2201
  async function generateContent(apiSettings, model, params, chromeAdapter, requestOptions) {
2105
- let response;
2106
- if (chromeAdapter && (await chromeAdapter.isAvailable(params))) {
2107
- response = await chromeAdapter.generateContent(params);
2108
- }
2109
- else {
2110
- response = await generateContentOnCloud(apiSettings, model, params, requestOptions);
2111
- }
2202
+ const response = await callCloudOrDevice(params, chromeAdapter, () => chromeAdapter.generateContent(params), () => generateContentOnCloud(apiSettings, model, params, requestOptions));
2112
2203
  const generateContentResponse = await processGenerateContentResponse(response, apiSettings);
2113
2204
  const enhancedResponse = createEnhancedContentResponse(generateContentResponse);
2114
2205
  return {
@@ -2318,7 +2409,9 @@ function validateChatHistory(history) {
2318
2409
  functionCall: 0,
2319
2410
  functionResponse: 0,
2320
2411
  thought: 0,
2321
- thoughtSignature: 0
2412
+ thoughtSignature: 0,
2413
+ executableCode: 0,
2414
+ codeExecutionResult: 0
2322
2415
  };
2323
2416
  for (const part of parts) {
2324
2417
  for (const key of VALID_PART_FIELDS) {
@@ -2519,8 +2612,8 @@ async function countTokensOnCloud(apiSettings, model, params, requestOptions) {
2519
2612
  return response.json();
2520
2613
  }
2521
2614
  async function countTokens(apiSettings, model, params, chromeAdapter, requestOptions) {
2522
- if (chromeAdapter && (await chromeAdapter.isAvailable(params))) {
2523
- return (await chromeAdapter.countTokens(params)).json();
2615
+ if (chromeAdapter?.mode === InferenceMode.ONLY_ON_DEVICE) {
2616
+ throw new AIError(AIErrorCode.UNSUPPORTED, 'countTokens() is not supported for on-device models.');
2524
2617
  }
2525
2618
  return countTokensOnCloud(apiSettings, model, params, requestOptions);
2526
2619
  }
@@ -3972,12 +4065,14 @@ exports.ImagenPersonFilterLevel = ImagenPersonFilterLevel;
3972
4065
  exports.ImagenSafetyFilterLevel = ImagenSafetyFilterLevel;
3973
4066
  exports.InferenceMode = InferenceMode;
3974
4067
  exports.IntegerSchema = IntegerSchema;
4068
+ exports.Language = Language;
3975
4069
  exports.LiveGenerativeModel = LiveGenerativeModel;
3976
4070
  exports.LiveResponseType = LiveResponseType;
3977
4071
  exports.LiveSession = LiveSession;
3978
4072
  exports.Modality = Modality;
3979
4073
  exports.NumberSchema = NumberSchema;
3980
4074
  exports.ObjectSchema = ObjectSchema;
4075
+ exports.Outcome = Outcome;
3981
4076
  exports.POSSIBLE_ROLES = POSSIBLE_ROLES;
3982
4077
  exports.ResponseModality = ResponseModality;
3983
4078
  exports.Schema = Schema;