@firebase/ai 2.2.1-canary.55f3f83a7 → 2.2.1-canary.9b8ab02c5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,28 @@
1
+ /**
2
+ * @license
3
+ * Copyright 2025 Google LLC
4
+ *
5
+ * Licensed under the Apache License, Version 2.0 (the "License");
6
+ * you may not use this file except in compliance with the License.
7
+ * You may obtain a copy of the License at
8
+ *
9
+ * http://www.apache.org/licenses/LICENSE-2.0
10
+ *
11
+ * Unless required by applicable law or agreed to in writing, software
12
+ * distributed under the License is distributed on an "AS IS" BASIS,
13
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ * See the License for the specific language governing permissions and
15
+ * limitations under the License.
16
+ */
17
+ import { GenerateContentRequest, ChromeAdapter } from '../types';
18
+ /**
19
+ * Dispatches a request to the appropriate backend (on-device or in-cloud)
20
+ * based on the inference mode.
21
+ *
22
+ * @param request - The request to be sent.
23
+ * @param chromeAdapter - The on-device model adapter.
24
+ * @param onDeviceCall - The function to call for on-device inference.
25
+ * @param inCloudCall - The function to call for in-cloud inference.
26
+ * @returns The response from the backend.
27
+ */
28
+ export declare function callCloudOrDevice<Response>(request: GenerateContentRequest, chromeAdapter: ChromeAdapter | undefined, onDeviceCall: () => Promise<Response>, inCloudCall: () => Promise<Response>): Promise<Response>;
@@ -14,7 +14,7 @@
14
14
  * See the License for the specific language governing permissions and
15
15
  * limitations under the License.
16
16
  */
17
- import { Role } from './enums';
17
+ import { Language, Outcome, Role } from './enums';
18
18
  /**
19
19
  * Content type for both prompts and response candidates.
20
20
  * @public
@@ -28,7 +28,7 @@ export interface Content {
28
28
  * part types.
29
29
  * @public
30
30
  */
31
- export type Part = TextPart | InlineDataPart | FunctionCallPart | FunctionResponsePart | FileDataPart;
31
+ export type Part = TextPart | InlineDataPart | FunctionCallPart | FunctionResponsePart | FileDataPart | ExecutableCodePart | CodeExecutionResultPart;
32
32
  /**
33
33
  * Content part interface if the part represents a text string.
34
34
  * @public
@@ -43,6 +43,8 @@ export interface TextPart {
43
43
  * @internal
44
44
  */
45
45
  thoughtSignature?: string;
46
+ executableCode?: never;
47
+ codeExecutionResult?: never;
46
48
  }
47
49
  /**
48
50
  * Content part interface if the part represents an image.
@@ -62,6 +64,8 @@ export interface InlineDataPart {
62
64
  * @internal
63
65
  */
64
66
  thoughtSignature?: never;
67
+ executableCode?: never;
68
+ codeExecutionResult?: never;
65
69
  }
66
70
  /**
67
71
  * Describes the input video content.
@@ -93,6 +97,8 @@ export interface FunctionCallPart {
93
97
  * @internal
94
98
  */
95
99
  thoughtSignature?: never;
100
+ executableCode?: never;
101
+ codeExecutionResult?: never;
96
102
  }
97
103
  /**
98
104
  * Content part interface if the part represents {@link FunctionResponse}.
@@ -108,6 +114,8 @@ export interface FunctionResponsePart {
108
114
  * @internal
109
115
  */
110
116
  thoughtSignature?: never;
117
+ executableCode?: never;
118
+ codeExecutionResult?: never;
111
119
  }
112
120
  /**
113
121
  * Content part interface if the part represents {@link FileData}
@@ -124,6 +132,77 @@ export interface FileDataPart {
124
132
  * @internal
125
133
  */
126
134
  thoughtSignature?: never;
135
+ executableCode?: never;
136
+ codeExecutionResult?: never;
137
+ }
138
+ /**
139
+ * Represents the code that is executed by the model.
140
+ *
141
+ * @public
142
+ */
143
+ export interface ExecutableCodePart {
144
+ text?: never;
145
+ inlineData?: never;
146
+ functionCall?: never;
147
+ functionResponse?: never;
148
+ fileData: never;
149
+ thought?: never;
150
+ /**
151
+ * @internal
152
+ */
153
+ thoughtSignature?: never;
154
+ executableCode?: ExecutableCode;
155
+ codeExecutionResult?: never;
156
+ }
157
+ /**
158
+ * Represents the code execution result from the model.
159
+ *
160
+ * @public
161
+ */
162
+ export interface CodeExecutionResultPart {
163
+ text?: never;
164
+ inlineData?: never;
165
+ functionCall?: never;
166
+ functionResponse?: never;
167
+ fileData: never;
168
+ thought?: never;
169
+ /**
170
+ * @internal
171
+ */
172
+ thoughtSignature?: never;
173
+ executableCode?: never;
174
+ codeExecutionResult?: CodeExecutionResult;
175
+ }
176
+ /**
177
+ * An interface for executable code returned by the model.
178
+ *
179
+ * @public
180
+ */
181
+ export interface ExecutableCode {
182
+ /**
183
+ * The programming language of the code.
184
+ */
185
+ language?: Language;
186
+ /**
187
+ * The source code to be executed.
188
+ */
189
+ code?: string;
190
+ }
191
+ /**
192
+ * The results of code execution run by the model.
193
+ *
194
+ * @public
195
+ */
196
+ export interface CodeExecutionResult {
197
+ /**
198
+ * The result of the code execution.
199
+ */
200
+ outcome?: Outcome;
201
+ /**
202
+ * The output from the code execution, or an error message
203
+ * if it failed.
204
+ */
205
+ output?: string;
127
206
  }
128
207
  /**
129
208
  * A predicted {@link FunctionCall} returned from the model
@@ -319,16 +319,67 @@ export type ResponseModality = (typeof ResponseModality)[keyof typeof ResponseMo
319
319
  /**
320
320
  * <b>(EXPERIMENTAL)</b>
321
321
  * Determines whether inference happens on-device or in-cloud.
322
+ *
323
+ * @remarks
324
+ * <b>PREFER_ON_DEVICE:</b> Attempt to make inference calls using an
325
+ * on-device model. If on-device inference is not available, the SDK
326
+ * will fall back to using a cloud-hosted model.
327
+ * <br/>
328
+ * <b>ONLY_ON_DEVICE:</b> Only attempt to make inference calls using an
329
+ * on-device model. The SDK will not fall back to a cloud-hosted model.
330
+ * If on-device inference is not available, inference methods will throw.
331
+ * <br/>
332
+ * <b>ONLY_IN_CLOUD:</b> Only attempt to make inference calls using a
333
+ * cloud-hosted model. The SDK will not fall back to an on-device model.
334
+ * <br/>
335
+ * <b>PREFER_IN_CLOUD:</b> Attempt to make inference calls to a
336
+ * cloud-hosted model. If not available, the SDK will fall back to an
337
+ * on-device model.
338
+ *
322
339
  * @public
323
340
  */
324
341
  export declare const InferenceMode: {
325
342
  readonly PREFER_ON_DEVICE: "prefer_on_device";
326
343
  readonly ONLY_ON_DEVICE: "only_on_device";
327
344
  readonly ONLY_IN_CLOUD: "only_in_cloud";
345
+ readonly PREFER_IN_CLOUD: "prefer_in_cloud";
328
346
  };
329
347
  /**
330
348
  * <b>(EXPERIMENTAL)</b>
331
349
  * Determines whether inference happens on-device or in-cloud.
350
+ *
332
351
  * @public
333
352
  */
334
353
  export type InferenceMode = (typeof InferenceMode)[keyof typeof InferenceMode];
354
+ /**
355
+ * Represents the result of the code execution.
356
+ *
357
+ * @public
358
+ */
359
+ export declare const Outcome: {
360
+ UNSPECIFIED: string;
361
+ OK: string;
362
+ FAILED: string;
363
+ DEADLINE_EXCEEDED: string;
364
+ };
365
+ /**
366
+ * Represents the result of the code execution.
367
+ *
368
+ * @public
369
+ */
370
+ export type Outcome = (typeof Outcome)[keyof typeof Outcome];
371
+ /**
372
+ * The programming language of the code.
373
+ *
374
+ * @public
375
+ */
376
+ export declare const Language: {
377
+ UNSPECIFIED: string;
378
+ PYTHON: string;
379
+ };
380
+ /**
381
+ * The programming language of the code.
382
+ *
383
+ * @public
384
+ */
385
+ export type Language = (typeof Language)[keyof typeof Language];
@@ -218,7 +218,7 @@ export interface RequestOptions {
218
218
  * Defines a tool that model can call to access external knowledge.
219
219
  * @public
220
220
  */
221
- export type Tool = FunctionDeclarationsTool | GoogleSearchTool;
221
+ export type Tool = FunctionDeclarationsTool | GoogleSearchTool | CodeExecutionTool;
222
222
  /**
223
223
  * Structured representation of a function declaration as defined by the
224
224
  * {@link https://spec.openapis.org/oas/v3.0.3 | OpenAPI 3.0 specification}.
@@ -262,8 +262,6 @@ export interface GoogleSearchTool {
262
262
  /**
263
263
  * Specifies the Google Search configuration.
264
264
  * Currently, this is an empty object, but it's reserved for future configuration options.
265
- * Specifies the Google Search configuration. Currently, this is an empty object, but it's
266
- * reserved for future configuration options.
267
265
  *
268
266
  * When using this feature, you are required to comply with the "Grounding with Google Search"
269
267
  * usage requirements for your chosen API provider: {@link https://ai.google.dev/gemini-api/terms#grounding-with-google-search | Gemini Developer API}
@@ -272,6 +270,18 @@ export interface GoogleSearchTool {
272
270
  */
273
271
  googleSearch: GoogleSearch;
274
272
  }
273
+ /**
274
+ * A tool that enables the model to use code execution.
275
+ *
276
+ * @public
277
+ */
278
+ export interface CodeExecutionTool {
279
+ /**
280
+ * Specifies the Google Search configuration.
281
+ * Currently, this is an empty object, but it's reserved for future configuration options.
282
+ */
283
+ codeExecution: {};
284
+ }
275
285
  /**
276
286
  * Specifies the Google Search configuration.
277
287
  *
package/dist/index.cjs.js CHANGED
@@ -8,7 +8,7 @@ var util = require('@firebase/util');
8
8
  var logger$1 = require('@firebase/logger');
9
9
 
10
10
  var name = "@firebase/ai";
11
- var version = "2.2.1-canary.55f3f83a7";
11
+ var version = "2.2.1-canary.9b8ab02c5";
12
12
 
13
13
  /**
14
14
  * @license
@@ -359,12 +359,50 @@ const ResponseModality = {
359
359
  /**
360
360
  * <b>(EXPERIMENTAL)</b>
361
361
  * Determines whether inference happens on-device or in-cloud.
362
+ *
363
+ * @remarks
364
+ * <b>PREFER_ON_DEVICE:</b> Attempt to make inference calls using an
365
+ * on-device model. If on-device inference is not available, the SDK
366
+ * will fall back to using a cloud-hosted model.
367
+ * <br/>
368
+ * <b>ONLY_ON_DEVICE:</b> Only attempt to make inference calls using an
369
+ * on-device model. The SDK will not fall back to a cloud-hosted model.
370
+ * If on-device inference is not available, inference methods will throw.
371
+ * <br/>
372
+ * <b>ONLY_IN_CLOUD:</b> Only attempt to make inference calls using a
373
+ * cloud-hosted model. The SDK will not fall back to an on-device model.
374
+ * <br/>
375
+ * <b>PREFER_IN_CLOUD:</b> Attempt to make inference calls to a
376
+ * cloud-hosted model. If not available, the SDK will fall back to an
377
+ * on-device model.
378
+ *
362
379
  * @public
363
380
  */
364
381
  const InferenceMode = {
365
382
  'PREFER_ON_DEVICE': 'prefer_on_device',
366
383
  'ONLY_ON_DEVICE': 'only_on_device',
367
- 'ONLY_IN_CLOUD': 'only_in_cloud'
384
+ 'ONLY_IN_CLOUD': 'only_in_cloud',
385
+ 'PREFER_IN_CLOUD': 'prefer_in_cloud'
386
+ };
387
+ /**
388
+ * Represents the result of the code execution.
389
+ *
390
+ * @public
391
+ */
392
+ const Outcome = {
393
+ UNSPECIFIED: 'OUTCOME_UNSPECIFIED',
394
+ OK: 'OUTCOME_OK',
395
+ FAILED: 'OUTCOME_FAILED',
396
+ DEADLINE_EXCEEDED: 'OUTCOME_DEADLINE_EXCEEDED'
397
+ };
398
+ /**
399
+ * The programming language of the code.
400
+ *
401
+ * @public
402
+ */
403
+ const Language = {
404
+ UNSPECIFIED: 'LANGUAGE_UNSPECIFIED',
405
+ PYTHON: 'PYTHON'
368
406
  };
369
407
 
370
408
  /**
@@ -2061,6 +2099,72 @@ function aggregateResponses(responses) {
2061
2099
  return aggregatedResponse;
2062
2100
  }
2063
2101
 
2102
+ /**
2103
+ * @license
2104
+ * Copyright 2025 Google LLC
2105
+ *
2106
+ * Licensed under the Apache License, Version 2.0 (the "License");
2107
+ * you may not use this file except in compliance with the License.
2108
+ * You may obtain a copy of the License at
2109
+ *
2110
+ * http://www.apache.org/licenses/LICENSE-2.0
2111
+ *
2112
+ * Unless required by applicable law or agreed to in writing, software
2113
+ * distributed under the License is distributed on an "AS IS" BASIS,
2114
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
2115
+ * See the License for the specific language governing permissions and
2116
+ * limitations under the License.
2117
+ */
2118
+ const errorsCausingFallback = [
2119
+ // most network errors
2120
+ AIErrorCode.FETCH_ERROR,
2121
+ // fallback code for all other errors in makeRequest
2122
+ AIErrorCode.ERROR,
2123
+ // error due to API not being enabled in project
2124
+ AIErrorCode.API_NOT_ENABLED
2125
+ ];
2126
+ /**
2127
+ * Dispatches a request to the appropriate backend (on-device or in-cloud)
2128
+ * based on the inference mode.
2129
+ *
2130
+ * @param request - The request to be sent.
2131
+ * @param chromeAdapter - The on-device model adapter.
2132
+ * @param onDeviceCall - The function to call for on-device inference.
2133
+ * @param inCloudCall - The function to call for in-cloud inference.
2134
+ * @returns The response from the backend.
2135
+ */
2136
+ async function callCloudOrDevice(request, chromeAdapter, onDeviceCall, inCloudCall) {
2137
+ if (!chromeAdapter) {
2138
+ return inCloudCall();
2139
+ }
2140
+ switch (chromeAdapter.mode) {
2141
+ case InferenceMode.ONLY_ON_DEVICE:
2142
+ if (await chromeAdapter.isAvailable(request)) {
2143
+ return onDeviceCall();
2144
+ }
2145
+ throw new AIError(AIErrorCode.UNSUPPORTED, 'Inference mode is ONLY_ON_DEVICE, but an on-device model is not available.');
2146
+ case InferenceMode.ONLY_IN_CLOUD:
2147
+ return inCloudCall();
2148
+ case InferenceMode.PREFER_IN_CLOUD:
2149
+ try {
2150
+ return await inCloudCall();
2151
+ }
2152
+ catch (e) {
2153
+ if (e instanceof AIError && errorsCausingFallback.includes(e.code)) {
2154
+ return onDeviceCall();
2155
+ }
2156
+ throw e;
2157
+ }
2158
+ case InferenceMode.PREFER_ON_DEVICE:
2159
+ if (await chromeAdapter.isAvailable(request)) {
2160
+ return onDeviceCall();
2161
+ }
2162
+ return inCloudCall();
2163
+ default:
2164
+ throw new AIError(AIErrorCode.ERROR, `Unexpected infererence mode: ${chromeAdapter.mode}`);
2165
+ }
2166
+ }
2167
+
2064
2168
  /**
2065
2169
  * @license
2066
2170
  * Copyright 2024 Google LLC
@@ -2085,13 +2189,7 @@ async function generateContentStreamOnCloud(apiSettings, model, params, requestO
2085
2189
  /* stream */ true, JSON.stringify(params), requestOptions);
2086
2190
  }
2087
2191
  async function generateContentStream(apiSettings, model, params, chromeAdapter, requestOptions) {
2088
- let response;
2089
- if (chromeAdapter && (await chromeAdapter.isAvailable(params))) {
2090
- response = await chromeAdapter.generateContentStream(params);
2091
- }
2092
- else {
2093
- response = await generateContentStreamOnCloud(apiSettings, model, params, requestOptions);
2094
- }
2192
+ const response = await callCloudOrDevice(params, chromeAdapter, () => chromeAdapter.generateContentStream(params), () => generateContentStreamOnCloud(apiSettings, model, params, requestOptions));
2095
2193
  return processStream(response, apiSettings); // TODO: Map streaming responses
2096
2194
  }
2097
2195
  async function generateContentOnCloud(apiSettings, model, params, requestOptions) {
@@ -2102,13 +2200,7 @@ async function generateContentOnCloud(apiSettings, model, params, requestOptions
2102
2200
  /* stream */ false, JSON.stringify(params), requestOptions);
2103
2201
  }
2104
2202
  async function generateContent(apiSettings, model, params, chromeAdapter, requestOptions) {
2105
- let response;
2106
- if (chromeAdapter && (await chromeAdapter.isAvailable(params))) {
2107
- response = await chromeAdapter.generateContent(params);
2108
- }
2109
- else {
2110
- response = await generateContentOnCloud(apiSettings, model, params, requestOptions);
2111
- }
2203
+ const response = await callCloudOrDevice(params, chromeAdapter, () => chromeAdapter.generateContent(params), () => generateContentOnCloud(apiSettings, model, params, requestOptions));
2112
2204
  const generateContentResponse = await processGenerateContentResponse(response, apiSettings);
2113
2205
  const enhancedResponse = createEnhancedContentResponse(generateContentResponse);
2114
2206
  return {
@@ -2318,7 +2410,9 @@ function validateChatHistory(history) {
2318
2410
  functionCall: 0,
2319
2411
  functionResponse: 0,
2320
2412
  thought: 0,
2321
- thoughtSignature: 0
2413
+ thoughtSignature: 0,
2414
+ executableCode: 0,
2415
+ codeExecutionResult: 0
2322
2416
  };
2323
2417
  for (const part of parts) {
2324
2418
  for (const key of VALID_PART_FIELDS) {
@@ -2519,8 +2613,8 @@ async function countTokensOnCloud(apiSettings, model, params, requestOptions) {
2519
2613
  return response.json();
2520
2614
  }
2521
2615
  async function countTokens(apiSettings, model, params, chromeAdapter, requestOptions) {
2522
- if (chromeAdapter && (await chromeAdapter.isAvailable(params))) {
2523
- return (await chromeAdapter.countTokens(params)).json();
2616
+ if (chromeAdapter?.mode === InferenceMode.ONLY_ON_DEVICE) {
2617
+ throw new AIError(AIErrorCode.UNSUPPORTED, 'countTokens() is not supported for on-device models.');
2524
2618
  }
2525
2619
  return countTokensOnCloud(apiSettings, model, params, requestOptions);
2526
2620
  }
@@ -3972,12 +4066,14 @@ exports.ImagenPersonFilterLevel = ImagenPersonFilterLevel;
3972
4066
  exports.ImagenSafetyFilterLevel = ImagenSafetyFilterLevel;
3973
4067
  exports.InferenceMode = InferenceMode;
3974
4068
  exports.IntegerSchema = IntegerSchema;
4069
+ exports.Language = Language;
3975
4070
  exports.LiveGenerativeModel = LiveGenerativeModel;
3976
4071
  exports.LiveResponseType = LiveResponseType;
3977
4072
  exports.LiveSession = LiveSession;
3978
4073
  exports.Modality = Modality;
3979
4074
  exports.NumberSchema = NumberSchema;
3980
4075
  exports.ObjectSchema = ObjectSchema;
4076
+ exports.Outcome = Outcome;
3981
4077
  exports.POSSIBLE_ROLES = POSSIBLE_ROLES;
3982
4078
  exports.ResponseModality = ResponseModality;
3983
4079
  exports.Schema = Schema;