@firebase/ai 2.4.0-canary.261508183 → 2.4.0-canary.91c218db2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/ai-public.d.ts +23 -0
- package/dist/ai.d.ts +23 -0
- package/dist/esm/index.esm.js +74 -30
- package/dist/esm/index.esm.js.map +1 -1
- package/dist/esm/src/methods/chrome-adapter.d.ts +1 -1
- package/dist/esm/src/requests/hybrid-helpers.d.ts +7 -2
- package/dist/esm/src/requests/response-helpers.d.ts +2 -2
- package/dist/esm/src/requests/stream-reader.d.ts +2 -1
- package/dist/esm/src/types/enums.d.ts +15 -0
- package/dist/esm/src/types/responses.d.ts +7 -1
- package/dist/index.cjs.js +74 -29
- package/dist/index.cjs.js.map +1 -1
- package/dist/index.node.cjs.js +54 -22
- package/dist/index.node.cjs.js.map +1 -1
- package/dist/index.node.mjs +54 -23
- package/dist/index.node.mjs.map +1 -1
- package/dist/src/methods/chrome-adapter.d.ts +1 -1
- package/dist/src/requests/hybrid-helpers.d.ts +7 -2
- package/dist/src/requests/response-helpers.d.ts +2 -2
- package/dist/src/requests/stream-reader.d.ts +2 -1
- package/dist/src/types/enums.d.ts +15 -0
- package/dist/src/types/responses.d.ts +7 -1
- package/package.json +8 -8
package/dist/index.node.mjs
CHANGED
|
@@ -4,7 +4,7 @@ import { FirebaseError, Deferred, getModularInstance } from '@firebase/util';
|
|
|
4
4
|
import { Logger } from '@firebase/logger';
|
|
5
5
|
|
|
6
6
|
var name = "@firebase/ai";
|
|
7
|
-
var version = "2.4.0-canary.
|
|
7
|
+
var version = "2.4.0-canary.91c218db2";
|
|
8
8
|
|
|
9
9
|
/**
|
|
10
10
|
* @license
|
|
@@ -323,6 +323,15 @@ const InferenceMode = {
|
|
|
323
323
|
'ONLY_IN_CLOUD': 'only_in_cloud',
|
|
324
324
|
'PREFER_IN_CLOUD': 'prefer_in_cloud'
|
|
325
325
|
};
|
|
326
|
+
/**
|
|
327
|
+
* Indicates whether inference happened on-device or in-cloud.
|
|
328
|
+
*
|
|
329
|
+
* @beta
|
|
330
|
+
*/
|
|
331
|
+
const InferenceSource = {
|
|
332
|
+
'ON_DEVICE': 'on_device',
|
|
333
|
+
'IN_CLOUD': 'in_cloud'
|
|
334
|
+
};
|
|
326
335
|
/**
|
|
327
336
|
* Represents the result of the code execution.
|
|
328
337
|
*
|
|
@@ -1269,7 +1278,7 @@ function hasValidCandidates(response) {
|
|
|
1269
1278
|
* Creates an EnhancedGenerateContentResponse object that has helper functions and
|
|
1270
1279
|
* other modifications that improve usability.
|
|
1271
1280
|
*/
|
|
1272
|
-
function createEnhancedContentResponse(response) {
|
|
1281
|
+
function createEnhancedContentResponse(response, inferenceSource = InferenceSource.IN_CLOUD) {
|
|
1273
1282
|
/**
|
|
1274
1283
|
* The Vertex AI backend omits default values.
|
|
1275
1284
|
* This causes the `index` property to be omitted from the first candidate in the
|
|
@@ -1280,6 +1289,7 @@ function createEnhancedContentResponse(response) {
|
|
|
1280
1289
|
response.candidates[0].index = 0;
|
|
1281
1290
|
}
|
|
1282
1291
|
const responseWithHelpers = addHelpers(response);
|
|
1292
|
+
responseWithHelpers.inferenceSource = inferenceSource;
|
|
1283
1293
|
return responseWithHelpers;
|
|
1284
1294
|
}
|
|
1285
1295
|
/**
|
|
@@ -1656,16 +1666,16 @@ const responseLineRE = /^data\: (.*)(?:\n\n|\r\r|\r\n\r\n)/;
|
|
|
1656
1666
|
*
|
|
1657
1667
|
* @param response - Response from a fetch call
|
|
1658
1668
|
*/
|
|
1659
|
-
function processStream(response, apiSettings) {
|
|
1669
|
+
function processStream(response, apiSettings, inferenceSource) {
|
|
1660
1670
|
const inputStream = response.body.pipeThrough(new TextDecoderStream('utf8', { fatal: true }));
|
|
1661
1671
|
const responseStream = getResponseStream(inputStream);
|
|
1662
1672
|
const [stream1, stream2] = responseStream.tee();
|
|
1663
1673
|
return {
|
|
1664
|
-
stream: generateResponseSequence(stream1, apiSettings),
|
|
1665
|
-
response: getResponsePromise(stream2, apiSettings)
|
|
1674
|
+
stream: generateResponseSequence(stream1, apiSettings, inferenceSource),
|
|
1675
|
+
response: getResponsePromise(stream2, apiSettings, inferenceSource)
|
|
1666
1676
|
};
|
|
1667
1677
|
}
|
|
1668
|
-
async function getResponsePromise(stream, apiSettings) {
|
|
1678
|
+
async function getResponsePromise(stream, apiSettings, inferenceSource) {
|
|
1669
1679
|
const allResponses = [];
|
|
1670
1680
|
const reader = stream.getReader();
|
|
1671
1681
|
while (true) {
|
|
@@ -1675,12 +1685,12 @@ async function getResponsePromise(stream, apiSettings) {
|
|
|
1675
1685
|
if (apiSettings.backend.backendType === BackendType.GOOGLE_AI) {
|
|
1676
1686
|
generateContentResponse = mapGenerateContentResponse(generateContentResponse);
|
|
1677
1687
|
}
|
|
1678
|
-
return createEnhancedContentResponse(generateContentResponse);
|
|
1688
|
+
return createEnhancedContentResponse(generateContentResponse, inferenceSource);
|
|
1679
1689
|
}
|
|
1680
1690
|
allResponses.push(value);
|
|
1681
1691
|
}
|
|
1682
1692
|
}
|
|
1683
|
-
async function* generateResponseSequence(stream, apiSettings) {
|
|
1693
|
+
async function* generateResponseSequence(stream, apiSettings, inferenceSource) {
|
|
1684
1694
|
const reader = stream.getReader();
|
|
1685
1695
|
while (true) {
|
|
1686
1696
|
const { value, done } = await reader.read();
|
|
@@ -1689,10 +1699,10 @@ async function* generateResponseSequence(stream, apiSettings) {
|
|
|
1689
1699
|
}
|
|
1690
1700
|
let enhancedResponse;
|
|
1691
1701
|
if (apiSettings.backend.backendType === BackendType.GOOGLE_AI) {
|
|
1692
|
-
enhancedResponse = createEnhancedContentResponse(mapGenerateContentResponse(value));
|
|
1702
|
+
enhancedResponse = createEnhancedContentResponse(mapGenerateContentResponse(value), inferenceSource);
|
|
1693
1703
|
}
|
|
1694
1704
|
else {
|
|
1695
|
-
enhancedResponse = createEnhancedContentResponse(value);
|
|
1705
|
+
enhancedResponse = createEnhancedContentResponse(value, inferenceSource);
|
|
1696
1706
|
}
|
|
1697
1707
|
const firstCandidate = enhancedResponse.candidates?.[0];
|
|
1698
1708
|
// Don't yield a response with no useful data for the developer.
|
|
@@ -1862,31 +1872,52 @@ const errorsCausingFallback = [
|
|
|
1862
1872
|
*/
|
|
1863
1873
|
async function callCloudOrDevice(request, chromeAdapter, onDeviceCall, inCloudCall) {
|
|
1864
1874
|
if (!chromeAdapter) {
|
|
1865
|
-
return
|
|
1875
|
+
return {
|
|
1876
|
+
response: await inCloudCall(),
|
|
1877
|
+
inferenceSource: InferenceSource.IN_CLOUD
|
|
1878
|
+
};
|
|
1866
1879
|
}
|
|
1867
1880
|
switch (chromeAdapter.mode) {
|
|
1868
1881
|
case InferenceMode.ONLY_ON_DEVICE:
|
|
1869
1882
|
if (await chromeAdapter.isAvailable(request)) {
|
|
1870
|
-
return
|
|
1883
|
+
return {
|
|
1884
|
+
response: await onDeviceCall(),
|
|
1885
|
+
inferenceSource: InferenceSource.ON_DEVICE
|
|
1886
|
+
};
|
|
1871
1887
|
}
|
|
1872
1888
|
throw new AIError(AIErrorCode.UNSUPPORTED, 'Inference mode is ONLY_ON_DEVICE, but an on-device model is not available.');
|
|
1873
1889
|
case InferenceMode.ONLY_IN_CLOUD:
|
|
1874
|
-
return
|
|
1890
|
+
return {
|
|
1891
|
+
response: await inCloudCall(),
|
|
1892
|
+
inferenceSource: InferenceSource.IN_CLOUD
|
|
1893
|
+
};
|
|
1875
1894
|
case InferenceMode.PREFER_IN_CLOUD:
|
|
1876
1895
|
try {
|
|
1877
|
-
return
|
|
1896
|
+
return {
|
|
1897
|
+
response: await inCloudCall(),
|
|
1898
|
+
inferenceSource: InferenceSource.IN_CLOUD
|
|
1899
|
+
};
|
|
1878
1900
|
}
|
|
1879
1901
|
catch (e) {
|
|
1880
1902
|
if (e instanceof AIError && errorsCausingFallback.includes(e.code)) {
|
|
1881
|
-
return
|
|
1903
|
+
return {
|
|
1904
|
+
response: await onDeviceCall(),
|
|
1905
|
+
inferenceSource: InferenceSource.ON_DEVICE
|
|
1906
|
+
};
|
|
1882
1907
|
}
|
|
1883
1908
|
throw e;
|
|
1884
1909
|
}
|
|
1885
1910
|
case InferenceMode.PREFER_ON_DEVICE:
|
|
1886
1911
|
if (await chromeAdapter.isAvailable(request)) {
|
|
1887
|
-
return
|
|
1912
|
+
return {
|
|
1913
|
+
response: await onDeviceCall(),
|
|
1914
|
+
inferenceSource: InferenceSource.ON_DEVICE
|
|
1915
|
+
};
|
|
1888
1916
|
}
|
|
1889
|
-
return
|
|
1917
|
+
return {
|
|
1918
|
+
response: await inCloudCall(),
|
|
1919
|
+
inferenceSource: InferenceSource.IN_CLOUD
|
|
1920
|
+
};
|
|
1890
1921
|
default:
|
|
1891
1922
|
throw new AIError(AIErrorCode.ERROR, `Unexpected infererence mode: ${chromeAdapter.mode}`);
|
|
1892
1923
|
}
|
|
@@ -1916,8 +1947,8 @@ async function generateContentStreamOnCloud(apiSettings, model, params, requestO
|
|
|
1916
1947
|
/* stream */ true, JSON.stringify(params), requestOptions);
|
|
1917
1948
|
}
|
|
1918
1949
|
async function generateContentStream(apiSettings, model, params, chromeAdapter, requestOptions) {
|
|
1919
|
-
const
|
|
1920
|
-
return processStream(response, apiSettings); // TODO: Map streaming responses
|
|
1950
|
+
const callResult = await callCloudOrDevice(params, chromeAdapter, () => chromeAdapter.generateContentStream(params), () => generateContentStreamOnCloud(apiSettings, model, params, requestOptions));
|
|
1951
|
+
return processStream(callResult.response, apiSettings); // TODO: Map streaming responses
|
|
1921
1952
|
}
|
|
1922
1953
|
async function generateContentOnCloud(apiSettings, model, params, requestOptions) {
|
|
1923
1954
|
if (apiSettings.backend.backendType === BackendType.GOOGLE_AI) {
|
|
@@ -1927,9 +1958,9 @@ async function generateContentOnCloud(apiSettings, model, params, requestOptions
|
|
|
1927
1958
|
/* stream */ false, JSON.stringify(params), requestOptions);
|
|
1928
1959
|
}
|
|
1929
1960
|
async function generateContent(apiSettings, model, params, chromeAdapter, requestOptions) {
|
|
1930
|
-
const
|
|
1931
|
-
const generateContentResponse = await processGenerateContentResponse(response, apiSettings);
|
|
1932
|
-
const enhancedResponse = createEnhancedContentResponse(generateContentResponse);
|
|
1961
|
+
const callResult = await callCloudOrDevice(params, chromeAdapter, () => chromeAdapter.generateContent(params), () => generateContentOnCloud(apiSettings, model, params, requestOptions));
|
|
1962
|
+
const generateContentResponse = await processGenerateContentResponse(callResult.response, apiSettings);
|
|
1963
|
+
const enhancedResponse = createEnhancedContentResponse(generateContentResponse, callResult.inferenceSource);
|
|
1933
1964
|
return {
|
|
1934
1965
|
response: enhancedResponse
|
|
1935
1966
|
};
|
|
@@ -3796,5 +3827,5 @@ function registerAI() {
|
|
|
3796
3827
|
}
|
|
3797
3828
|
registerAI();
|
|
3798
3829
|
|
|
3799
|
-
export { AIError, AIErrorCode, AIModel, AnyOfSchema, ArraySchema, Backend, BackendType, BlockReason, BooleanSchema, ChatSession, FinishReason, FunctionCallingMode, GenerativeModel, GoogleAIBackend, HarmBlockMethod, HarmBlockThreshold, HarmCategory, HarmProbability, HarmSeverity, ImagenAspectRatio, ImagenImageFormat, ImagenModel, ImagenPersonFilterLevel, ImagenSafetyFilterLevel, InferenceMode, IntegerSchema, Language, LiveGenerativeModel, LiveResponseType, LiveSession, Modality, NumberSchema, ObjectSchema, Outcome, POSSIBLE_ROLES, ResponseModality, Schema, SchemaType, StringSchema, URLRetrievalStatus, VertexAIBackend, getAI, getGenerativeModel, getImagenModel, getLiveGenerativeModel, startAudioConversation };
|
|
3830
|
+
export { AIError, AIErrorCode, AIModel, AnyOfSchema, ArraySchema, Backend, BackendType, BlockReason, BooleanSchema, ChatSession, FinishReason, FunctionCallingMode, GenerativeModel, GoogleAIBackend, HarmBlockMethod, HarmBlockThreshold, HarmCategory, HarmProbability, HarmSeverity, ImagenAspectRatio, ImagenImageFormat, ImagenModel, ImagenPersonFilterLevel, ImagenSafetyFilterLevel, InferenceMode, InferenceSource, IntegerSchema, Language, LiveGenerativeModel, LiveResponseType, LiveSession, Modality, NumberSchema, ObjectSchema, Outcome, POSSIBLE_ROLES, ResponseModality, Schema, SchemaType, StringSchema, URLRetrievalStatus, VertexAIBackend, getAI, getGenerativeModel, getImagenModel, getLiveGenerativeModel, startAudioConversation };
|
|
3800
3831
|
//# sourceMappingURL=index.node.mjs.map
|