@firebase/ai 2.2.1 → 2.3.0-canary.cb3bdd812
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/ai-public.d.ts +162 -32
- package/dist/ai.d.ts +168 -32
- package/dist/esm/index.esm.js +618 -508
- package/dist/esm/index.esm.js.map +1 -1
- package/dist/esm/src/factory-browser.d.ts +19 -0
- package/dist/esm/src/requests/hybrid-helpers.d.ts +28 -0
- package/dist/esm/src/types/chrome-adapter.d.ts +2 -2
- package/dist/esm/src/types/content.d.ts +81 -2
- package/dist/esm/src/types/enums.d.ts +53 -4
- package/dist/esm/src/types/language-model.d.ts +10 -20
- package/dist/esm/src/types/requests.d.ts +15 -7
- package/dist/index.cjs.js +619 -507
- package/dist/index.cjs.js.map +1 -1
- package/dist/index.node.cjs.js +116 -21
- package/dist/index.node.cjs.js.map +1 -1
- package/dist/index.node.mjs +115 -22
- package/dist/index.node.mjs.map +1 -1
- package/dist/src/factory-browser.d.ts +19 -0
- package/dist/src/requests/hybrid-helpers.d.ts +28 -0
- package/dist/src/types/chrome-adapter.d.ts +2 -2
- package/dist/src/types/content.d.ts +81 -2
- package/dist/src/types/enums.d.ts +53 -4
- package/dist/src/types/language-model.d.ts +10 -20
- package/dist/src/types/requests.d.ts +15 -7
- package/package.json +10 -9
package/dist/index.node.mjs
CHANGED
|
@@ -4,7 +4,7 @@ import { FirebaseError, Deferred, getModularInstance } from '@firebase/util';
|
|
|
4
4
|
import { Logger } from '@firebase/logger';
|
|
5
5
|
|
|
6
6
|
var name = "@firebase/ai";
|
|
7
|
-
var version = "2.
|
|
7
|
+
var version = "2.3.0-canary.cb3bdd812";
|
|
8
8
|
|
|
9
9
|
/**
|
|
10
10
|
* @license
|
|
@@ -297,14 +297,51 @@ const ResponseModality = {
|
|
|
297
297
|
AUDIO: 'AUDIO'
|
|
298
298
|
};
|
|
299
299
|
/**
|
|
300
|
-
* <b>(EXPERIMENTAL)</b>
|
|
301
300
|
* Determines whether inference happens on-device or in-cloud.
|
|
302
|
-
*
|
|
301
|
+
*
|
|
302
|
+
* @remarks
|
|
303
|
+
* <b>PREFER_ON_DEVICE:</b> Attempt to make inference calls using an
|
|
304
|
+
* on-device model. If on-device inference is not available, the SDK
|
|
305
|
+
* will fall back to using a cloud-hosted model.
|
|
306
|
+
* <br/>
|
|
307
|
+
* <b>ONLY_ON_DEVICE:</b> Only attempt to make inference calls using an
|
|
308
|
+
* on-device model. The SDK will not fall back to a cloud-hosted model.
|
|
309
|
+
* If on-device inference is not available, inference methods will throw.
|
|
310
|
+
* <br/>
|
|
311
|
+
* <b>ONLY_IN_CLOUD:</b> Only attempt to make inference calls using a
|
|
312
|
+
* cloud-hosted model. The SDK will not fall back to an on-device model.
|
|
313
|
+
* <br/>
|
|
314
|
+
* <b>PREFER_IN_CLOUD:</b> Attempt to make inference calls to a
|
|
315
|
+
* cloud-hosted model. If not available, the SDK will fall back to an
|
|
316
|
+
* on-device model.
|
|
317
|
+
*
|
|
318
|
+
* @beta
|
|
303
319
|
*/
|
|
304
320
|
const InferenceMode = {
|
|
305
321
|
'PREFER_ON_DEVICE': 'prefer_on_device',
|
|
306
322
|
'ONLY_ON_DEVICE': 'only_on_device',
|
|
307
|
-
'ONLY_IN_CLOUD': 'only_in_cloud'
|
|
323
|
+
'ONLY_IN_CLOUD': 'only_in_cloud',
|
|
324
|
+
'PREFER_IN_CLOUD': 'prefer_in_cloud'
|
|
325
|
+
};
|
|
326
|
+
/**
|
|
327
|
+
* Represents the result of the code execution.
|
|
328
|
+
*
|
|
329
|
+
* @public
|
|
330
|
+
*/
|
|
331
|
+
const Outcome = {
|
|
332
|
+
UNSPECIFIED: 'OUTCOME_UNSPECIFIED',
|
|
333
|
+
OK: 'OUTCOME_OK',
|
|
334
|
+
FAILED: 'OUTCOME_FAILED',
|
|
335
|
+
DEADLINE_EXCEEDED: 'OUTCOME_DEADLINE_EXCEEDED'
|
|
336
|
+
};
|
|
337
|
+
/**
|
|
338
|
+
* The programming language of the code.
|
|
339
|
+
*
|
|
340
|
+
* @public
|
|
341
|
+
*/
|
|
342
|
+
const Language = {
|
|
343
|
+
UNSPECIFIED: 'LANGUAGE_UNSPECIFIED',
|
|
344
|
+
PYTHON: 'PYTHON'
|
|
308
345
|
};
|
|
309
346
|
|
|
310
347
|
/**
|
|
@@ -1734,6 +1771,72 @@ function aggregateResponses(responses) {
|
|
|
1734
1771
|
return aggregatedResponse;
|
|
1735
1772
|
}
|
|
1736
1773
|
|
|
1774
|
+
/**
|
|
1775
|
+
* @license
|
|
1776
|
+
* Copyright 2025 Google LLC
|
|
1777
|
+
*
|
|
1778
|
+
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
1779
|
+
* you may not use this file except in compliance with the License.
|
|
1780
|
+
* You may obtain a copy of the License at
|
|
1781
|
+
*
|
|
1782
|
+
* http://www.apache.org/licenses/LICENSE-2.0
|
|
1783
|
+
*
|
|
1784
|
+
* Unless required by applicable law or agreed to in writing, software
|
|
1785
|
+
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
1786
|
+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
1787
|
+
* See the License for the specific language governing permissions and
|
|
1788
|
+
* limitations under the License.
|
|
1789
|
+
*/
|
|
1790
|
+
const errorsCausingFallback = [
|
|
1791
|
+
// most network errors
|
|
1792
|
+
AIErrorCode.FETCH_ERROR,
|
|
1793
|
+
// fallback code for all other errors in makeRequest
|
|
1794
|
+
AIErrorCode.ERROR,
|
|
1795
|
+
// error due to API not being enabled in project
|
|
1796
|
+
AIErrorCode.API_NOT_ENABLED
|
|
1797
|
+
];
|
|
1798
|
+
/**
|
|
1799
|
+
* Dispatches a request to the appropriate backend (on-device or in-cloud)
|
|
1800
|
+
* based on the inference mode.
|
|
1801
|
+
*
|
|
1802
|
+
* @param request - The request to be sent.
|
|
1803
|
+
* @param chromeAdapter - The on-device model adapter.
|
|
1804
|
+
* @param onDeviceCall - The function to call for on-device inference.
|
|
1805
|
+
* @param inCloudCall - The function to call for in-cloud inference.
|
|
1806
|
+
* @returns The response from the backend.
|
|
1807
|
+
*/
|
|
1808
|
+
async function callCloudOrDevice(request, chromeAdapter, onDeviceCall, inCloudCall) {
|
|
1809
|
+
if (!chromeAdapter) {
|
|
1810
|
+
return inCloudCall();
|
|
1811
|
+
}
|
|
1812
|
+
switch (chromeAdapter.mode) {
|
|
1813
|
+
case InferenceMode.ONLY_ON_DEVICE:
|
|
1814
|
+
if (await chromeAdapter.isAvailable(request)) {
|
|
1815
|
+
return onDeviceCall();
|
|
1816
|
+
}
|
|
1817
|
+
throw new AIError(AIErrorCode.UNSUPPORTED, 'Inference mode is ONLY_ON_DEVICE, but an on-device model is not available.');
|
|
1818
|
+
case InferenceMode.ONLY_IN_CLOUD:
|
|
1819
|
+
return inCloudCall();
|
|
1820
|
+
case InferenceMode.PREFER_IN_CLOUD:
|
|
1821
|
+
try {
|
|
1822
|
+
return await inCloudCall();
|
|
1823
|
+
}
|
|
1824
|
+
catch (e) {
|
|
1825
|
+
if (e instanceof AIError && errorsCausingFallback.includes(e.code)) {
|
|
1826
|
+
return onDeviceCall();
|
|
1827
|
+
}
|
|
1828
|
+
throw e;
|
|
1829
|
+
}
|
|
1830
|
+
case InferenceMode.PREFER_ON_DEVICE:
|
|
1831
|
+
if (await chromeAdapter.isAvailable(request)) {
|
|
1832
|
+
return onDeviceCall();
|
|
1833
|
+
}
|
|
1834
|
+
return inCloudCall();
|
|
1835
|
+
default:
|
|
1836
|
+
throw new AIError(AIErrorCode.ERROR, `Unexpected infererence mode: ${chromeAdapter.mode}`);
|
|
1837
|
+
}
|
|
1838
|
+
}
|
|
1839
|
+
|
|
1737
1840
|
/**
|
|
1738
1841
|
* @license
|
|
1739
1842
|
* Copyright 2024 Google LLC
|
|
@@ -1758,13 +1861,7 @@ async function generateContentStreamOnCloud(apiSettings, model, params, requestO
|
|
|
1758
1861
|
/* stream */ true, JSON.stringify(params), requestOptions);
|
|
1759
1862
|
}
|
|
1760
1863
|
async function generateContentStream(apiSettings, model, params, chromeAdapter, requestOptions) {
|
|
1761
|
-
|
|
1762
|
-
if (chromeAdapter && (await chromeAdapter.isAvailable(params))) {
|
|
1763
|
-
response = await chromeAdapter.generateContentStream(params);
|
|
1764
|
-
}
|
|
1765
|
-
else {
|
|
1766
|
-
response = await generateContentStreamOnCloud(apiSettings, model, params, requestOptions);
|
|
1767
|
-
}
|
|
1864
|
+
const response = await callCloudOrDevice(params, chromeAdapter, () => chromeAdapter.generateContentStream(params), () => generateContentStreamOnCloud(apiSettings, model, params, requestOptions));
|
|
1768
1865
|
return processStream(response, apiSettings); // TODO: Map streaming responses
|
|
1769
1866
|
}
|
|
1770
1867
|
async function generateContentOnCloud(apiSettings, model, params, requestOptions) {
|
|
@@ -1775,13 +1872,7 @@ async function generateContentOnCloud(apiSettings, model, params, requestOptions
|
|
|
1775
1872
|
/* stream */ false, JSON.stringify(params), requestOptions);
|
|
1776
1873
|
}
|
|
1777
1874
|
async function generateContent(apiSettings, model, params, chromeAdapter, requestOptions) {
|
|
1778
|
-
|
|
1779
|
-
if (chromeAdapter && (await chromeAdapter.isAvailable(params))) {
|
|
1780
|
-
response = await chromeAdapter.generateContent(params);
|
|
1781
|
-
}
|
|
1782
|
-
else {
|
|
1783
|
-
response = await generateContentOnCloud(apiSettings, model, params, requestOptions);
|
|
1784
|
-
}
|
|
1875
|
+
const response = await callCloudOrDevice(params, chromeAdapter, () => chromeAdapter.generateContent(params), () => generateContentOnCloud(apiSettings, model, params, requestOptions));
|
|
1785
1876
|
const generateContentResponse = await processGenerateContentResponse(response, apiSettings);
|
|
1786
1877
|
const enhancedResponse = createEnhancedContentResponse(generateContentResponse);
|
|
1787
1878
|
return {
|
|
@@ -1991,7 +2082,9 @@ function validateChatHistory(history) {
|
|
|
1991
2082
|
functionCall: 0,
|
|
1992
2083
|
functionResponse: 0,
|
|
1993
2084
|
thought: 0,
|
|
1994
|
-
thoughtSignature: 0
|
|
2085
|
+
thoughtSignature: 0,
|
|
2086
|
+
executableCode: 0,
|
|
2087
|
+
codeExecutionResult: 0
|
|
1995
2088
|
};
|
|
1996
2089
|
for (const part of parts) {
|
|
1997
2090
|
for (const key of VALID_PART_FIELDS) {
|
|
@@ -2192,8 +2285,8 @@ async function countTokensOnCloud(apiSettings, model, params, requestOptions) {
|
|
|
2192
2285
|
return response.json();
|
|
2193
2286
|
}
|
|
2194
2287
|
async function countTokens(apiSettings, model, params, chromeAdapter, requestOptions) {
|
|
2195
|
-
if (chromeAdapter
|
|
2196
|
-
|
|
2288
|
+
if (chromeAdapter?.mode === InferenceMode.ONLY_ON_DEVICE) {
|
|
2289
|
+
throw new AIError(AIErrorCode.UNSUPPORTED, 'countTokens() is not supported for on-device models.');
|
|
2197
2290
|
}
|
|
2198
2291
|
return countTokensOnCloud(apiSettings, model, params, requestOptions);
|
|
2199
2292
|
}
|
|
@@ -3629,5 +3722,5 @@ function registerAI() {
|
|
|
3629
3722
|
}
|
|
3630
3723
|
registerAI();
|
|
3631
3724
|
|
|
3632
|
-
export { AIError, AIErrorCode, AIModel, AnyOfSchema, ArraySchema, Backend, BackendType, BlockReason, BooleanSchema, ChatSession, FinishReason, FunctionCallingMode, GenerativeModel, GoogleAIBackend, HarmBlockMethod, HarmBlockThreshold, HarmCategory, HarmProbability, HarmSeverity, ImagenAspectRatio, ImagenImageFormat, ImagenModel, ImagenPersonFilterLevel, ImagenSafetyFilterLevel, InferenceMode, IntegerSchema, LiveGenerativeModel, LiveResponseType, LiveSession, Modality, NumberSchema, ObjectSchema, POSSIBLE_ROLES, ResponseModality, Schema, SchemaType, StringSchema, VertexAIBackend, getAI, getGenerativeModel, getImagenModel, getLiveGenerativeModel, startAudioConversation };
|
|
3725
|
+
export { AIError, AIErrorCode, AIModel, AnyOfSchema, ArraySchema, Backend, BackendType, BlockReason, BooleanSchema, ChatSession, FinishReason, FunctionCallingMode, GenerativeModel, GoogleAIBackend, HarmBlockMethod, HarmBlockThreshold, HarmCategory, HarmProbability, HarmSeverity, ImagenAspectRatio, ImagenImageFormat, ImagenModel, ImagenPersonFilterLevel, ImagenSafetyFilterLevel, InferenceMode, IntegerSchema, Language, LiveGenerativeModel, LiveResponseType, LiveSession, Modality, NumberSchema, ObjectSchema, Outcome, POSSIBLE_ROLES, ResponseModality, Schema, SchemaType, StringSchema, VertexAIBackend, getAI, getGenerativeModel, getImagenModel, getLiveGenerativeModel, startAudioConversation };
|
|
3633
3726
|
//# sourceMappingURL=index.node.mjs.map
|