@firebase/ai 2.2.1-20250829000033 → 2.2.1-canary.55f3f83a7

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.cjs.js CHANGED
@@ -8,7 +8,7 @@ var util = require('@firebase/util');
8
8
  var logger$1 = require('@firebase/logger');
9
9
 
10
10
  var name = "@firebase/ai";
11
- var version = "2.2.1-20250829000033";
11
+ var version = "2.2.1-canary.55f3f83a7";
12
12
 
13
13
  /**
14
14
  * @license
@@ -38,6 +38,62 @@ const DEFAULT_FETCH_TIMEOUT_MS = 180 * 1000;
38
38
  */
39
39
  const DEFAULT_HYBRID_IN_CLOUD_MODEL = 'gemini-2.0-flash-lite';
40
40
 
41
+ /**
42
+ * @license
43
+ * Copyright 2024 Google LLC
44
+ *
45
+ * Licensed under the Apache License, Version 2.0 (the "License");
46
+ * you may not use this file except in compliance with the License.
47
+ * You may obtain a copy of the License at
48
+ *
49
+ * http://www.apache.org/licenses/LICENSE-2.0
50
+ *
51
+ * Unless required by applicable law or agreed to in writing, software
52
+ * distributed under the License is distributed on an "AS IS" BASIS,
53
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
54
+ * See the License for the specific language governing permissions and
55
+ * limitations under the License.
56
+ */
57
+ /**
58
+ * Error class for the Firebase AI SDK.
59
+ *
60
+ * @public
61
+ */
62
+ class AIError extends util.FirebaseError {
63
+ /**
64
+ * Constructs a new instance of the `AIError` class.
65
+ *
66
+ * @param code - The error code from {@link (AIErrorCode:type)}.
67
+ * @param message - A human-readable message describing the error.
68
+ * @param customErrorData - Optional error data.
69
+ */
70
+ constructor(code, message, customErrorData) {
71
+ // Match error format used by FirebaseError from ErrorFactory
72
+ const service = AI_TYPE;
73
+ const fullCode = `${service}/${code}`;
74
+ const fullMessage = `${service}: ${message} (${fullCode})`;
75
+ super(code, fullMessage);
76
+ this.code = code;
77
+ this.customErrorData = customErrorData;
78
+ // FirebaseError initializes a stack trace, but it assumes the error is created from the error
79
+ // factory. Since we break this assumption, we set the stack trace to be originating from this
80
+ // constructor.
81
+ // This is only supported in V8.
82
+ if (Error.captureStackTrace) {
83
+ // Allows us to initialize the stack trace without including the constructor itself at the
84
+ // top level of the stack trace.
85
+ Error.captureStackTrace(this, AIError);
86
+ }
87
+ // Allows instanceof AIError in ES5/ES6
88
+ // https://github.com/Microsoft/TypeScript-wiki/blob/master/Breaking-Changes.md#extending-built-ins-like-error-array-and-map-may-no-longer-work
89
+ // TODO(dlarocque): Replace this with `new.target`: https://www.typescriptlang.org/docs/handbook/release-notes/typescript-2-2.html#support-for-newtarget
90
+ // which we can now use since we no longer target ES5.
91
+ Object.setPrototypeOf(this, AIError.prototype);
92
+ // Since Error is an interface, we don't inherit toString and so we define it ourselves.
93
+ this.toString = () => fullMessage;
94
+ }
95
+ }
96
+
41
97
  /**
42
98
  * @license
43
99
  * Copyright 2024 Google LLC
@@ -657,105 +713,6 @@ class VertexAIBackend extends Backend {
657
713
  }
658
714
  }
659
715
 
660
- /**
661
- * @license
662
- * Copyright 2024 Google LLC
663
- *
664
- * Licensed under the Apache License, Version 2.0 (the "License");
665
- * you may not use this file except in compliance with the License.
666
- * You may obtain a copy of the License at
667
- *
668
- * http://www.apache.org/licenses/LICENSE-2.0
669
- *
670
- * Unless required by applicable law or agreed to in writing, software
671
- * distributed under the License is distributed on an "AS IS" BASIS,
672
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
673
- * See the License for the specific language governing permissions and
674
- * limitations under the License.
675
- */
676
- class AIService {
677
- constructor(app, backend, authProvider, appCheckProvider, chromeAdapterFactory) {
678
- this.app = app;
679
- this.backend = backend;
680
- this.chromeAdapterFactory = chromeAdapterFactory;
681
- const appCheck = appCheckProvider?.getImmediate({ optional: true });
682
- const auth = authProvider?.getImmediate({ optional: true });
683
- this.auth = auth || null;
684
- this.appCheck = appCheck || null;
685
- if (backend instanceof VertexAIBackend) {
686
- this.location = backend.location;
687
- }
688
- else {
689
- this.location = '';
690
- }
691
- }
692
- _delete() {
693
- return Promise.resolve();
694
- }
695
- set options(optionsToSet) {
696
- this._options = optionsToSet;
697
- }
698
- get options() {
699
- return this._options;
700
- }
701
- }
702
-
703
- /**
704
- * @license
705
- * Copyright 2024 Google LLC
706
- *
707
- * Licensed under the Apache License, Version 2.0 (the "License");
708
- * you may not use this file except in compliance with the License.
709
- * You may obtain a copy of the License at
710
- *
711
- * http://www.apache.org/licenses/LICENSE-2.0
712
- *
713
- * Unless required by applicable law or agreed to in writing, software
714
- * distributed under the License is distributed on an "AS IS" BASIS,
715
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
716
- * See the License for the specific language governing permissions and
717
- * limitations under the License.
718
- */
719
- /**
720
- * Error class for the Firebase AI SDK.
721
- *
722
- * @public
723
- */
724
- class AIError extends util.FirebaseError {
725
- /**
726
- * Constructs a new instance of the `AIError` class.
727
- *
728
- * @param code - The error code from {@link (AIErrorCode:type)}.
729
- * @param message - A human-readable message describing the error.
730
- * @param customErrorData - Optional error data.
731
- */
732
- constructor(code, message, customErrorData) {
733
- // Match error format used by FirebaseError from ErrorFactory
734
- const service = AI_TYPE;
735
- const fullCode = `${service}/${code}`;
736
- const fullMessage = `${service}: ${message} (${fullCode})`;
737
- super(code, fullMessage);
738
- this.code = code;
739
- this.customErrorData = customErrorData;
740
- // FirebaseError initializes a stack trace, but it assumes the error is created from the error
741
- // factory. Since we break this assumption, we set the stack trace to be originating from this
742
- // constructor.
743
- // This is only supported in V8.
744
- if (Error.captureStackTrace) {
745
- // Allows us to initialize the stack trace without including the constructor itself at the
746
- // top level of the stack trace.
747
- Error.captureStackTrace(this, AIError);
748
- }
749
- // Allows instanceof AIError in ES5/ES6
750
- // https://github.com/Microsoft/TypeScript-wiki/blob/master/Breaking-Changes.md#extending-built-ins-like-error-array-and-map-may-no-longer-work
751
- // TODO(dlarocque): Replace this with `new.target`: https://www.typescriptlang.org/docs/handbook/release-notes/typescript-2-2.html#support-for-newtarget
752
- // which we can now use since we no longer target ES5.
753
- Object.setPrototypeOf(this, AIError.prototype);
754
- // Since Error is an interface, we don't inherit toString and so we define it ourselves.
755
- this.toString = () => fullMessage;
756
- }
757
- }
758
-
759
716
  /**
760
717
  * @license
761
718
  * Copyright 2025 Google LLC
@@ -816,7 +773,7 @@ function decodeInstanceIdentifier(instanceIdentifier) {
816
773
 
817
774
  /**
818
775
  * @license
819
- * Copyright 2025 Google LLC
776
+ * Copyright 2024 Google LLC
820
777
  *
821
778
  * Licensed under the Apache License, Version 2.0 (the "License");
822
779
  * you may not use this file except in compliance with the License.
@@ -830,133 +787,302 @@ function decodeInstanceIdentifier(instanceIdentifier) {
830
787
  * See the License for the specific language governing permissions and
831
788
  * limitations under the License.
832
789
  */
790
+ const logger = new logger$1.Logger('@firebase/vertexai');
791
+
833
792
  /**
834
- * Base class for Firebase AI model APIs.
793
+ * @internal
794
+ */
795
+ var Availability;
796
+ (function (Availability) {
797
+ Availability["UNAVAILABLE"] = "unavailable";
798
+ Availability["DOWNLOADABLE"] = "downloadable";
799
+ Availability["DOWNLOADING"] = "downloading";
800
+ Availability["AVAILABLE"] = "available";
801
+ })(Availability || (Availability = {}));
802
+
803
+ /**
804
+ * @license
805
+ * Copyright 2025 Google LLC
835
806
  *
836
- * Instances of this class are associated with a specific Firebase AI {@link Backend}
837
- * and provide methods for interacting with the configured generative model.
807
+ * Licensed under the Apache License, Version 2.0 (the "License");
808
+ * you may not use this file except in compliance with the License.
809
+ * You may obtain a copy of the License at
838
810
  *
839
- * @public
811
+ * http://www.apache.org/licenses/LICENSE-2.0
812
+ *
813
+ * Unless required by applicable law or agreed to in writing, software
814
+ * distributed under the License is distributed on an "AS IS" BASIS,
815
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
816
+ * See the License for the specific language governing permissions and
817
+ * limitations under the License.
840
818
  */
841
- class AIModel {
819
+ /**
820
+ * Defines an inference "backend" that uses Chrome's on-device model,
821
+ * and encapsulates logic for detecting when on-device inference is
822
+ * possible.
823
+ */
824
+ class ChromeAdapterImpl {
825
+ constructor(languageModelProvider, mode, onDeviceParams = {
826
+ createOptions: {
827
+ // Defaults to support image inputs for convenience.
828
+ expectedInputs: [{ type: 'image' }]
829
+ }
830
+ }) {
831
+ this.languageModelProvider = languageModelProvider;
832
+ this.mode = mode;
833
+ this.onDeviceParams = onDeviceParams;
834
+ this.isDownloading = false;
835
+ }
842
836
  /**
843
- * Constructs a new instance of the {@link AIModel} class.
844
- *
845
- * This constructor should only be called from subclasses that provide
846
- * a model API.
837
+ * Checks if a given request can be made on-device.
847
838
  *
848
- * @param ai - an {@link AI} instance.
849
- * @param modelName - The name of the model being used. It can be in one of the following formats:
850
- * - `my-model` (short name, will resolve to `publishers/google/models/my-model`)
851
- * - `models/my-model` (will resolve to `publishers/google/models/my-model`)
852
- * - `publishers/my-publisher/models/my-model` (fully qualified model name)
839
+ * Encapsulates a few concerns:
840
+ * the mode
841
+ * API existence
842
+ * prompt formatting
843
+ * model availability, including triggering download if necessary
853
844
  *
854
- * @throws If the `apiKey` or `projectId` fields are missing in your
855
- * Firebase config.
856
845
  *
857
- * @internal
846
+ * Pros: callers needn't be concerned with details of on-device availability.</p>
847
+ * Cons: this method spans a few concerns and splits request validation from usage.
848
+ * If instance variables weren't already part of the API, we could consider a better
849
+ * separation of concerns.
858
850
  */
859
- constructor(ai, modelName) {
860
- if (!ai.app?.options?.apiKey) {
861
- throw new AIError(AIErrorCode.NO_API_KEY, `The "apiKey" field is empty in the local Firebase config. Firebase AI requires this field to contain a valid API key.`);
851
+ async isAvailable(request) {
852
+ if (!this.mode) {
853
+ logger.debug(`On-device inference unavailable because mode is undefined.`);
854
+ return false;
862
855
  }
863
- else if (!ai.app?.options?.projectId) {
864
- throw new AIError(AIErrorCode.NO_PROJECT_ID, `The "projectId" field is empty in the local Firebase config. Firebase AI requires this field to contain a valid project ID.`);
856
+ if (this.mode === InferenceMode.ONLY_IN_CLOUD) {
857
+ logger.debug(`On-device inference unavailable because mode is "only_in_cloud".`);
858
+ return false;
865
859
  }
866
- else if (!ai.app?.options?.appId) {
867
- throw new AIError(AIErrorCode.NO_APP_ID, `The "appId" field is empty in the local Firebase config. Firebase AI requires this field to contain a valid app ID.`);
860
+ // Triggers out-of-band download so model will eventually become available.
861
+ const availability = await this.downloadIfAvailable();
862
+ if (this.mode === InferenceMode.ONLY_ON_DEVICE) {
863
+ // If it will never be available due to API inavailability, throw.
864
+ if (availability === Availability.UNAVAILABLE) {
865
+ throw new AIError(AIErrorCode.API_NOT_ENABLED, 'Local LanguageModel API not available in this environment.');
866
+ }
867
+ else if (availability === Availability.DOWNLOADABLE ||
868
+ availability === Availability.DOWNLOADING) {
869
+ // TODO(chholland): Better user experience during download - progress?
870
+ logger.debug(`Waiting for download of LanguageModel to complete.`);
871
+ await this.downloadPromise;
872
+ return true;
873
+ }
874
+ return true;
868
875
  }
869
- else {
870
- this._apiSettings = {
871
- apiKey: ai.app.options.apiKey,
872
- project: ai.app.options.projectId,
873
- appId: ai.app.options.appId,
874
- automaticDataCollectionEnabled: ai.app.automaticDataCollectionEnabled,
875
- location: ai.location,
876
- backend: ai.backend
877
- };
878
- if (app._isFirebaseServerApp(ai.app) && ai.app.settings.appCheckToken) {
879
- const token = ai.app.settings.appCheckToken;
880
- this._apiSettings.getAppCheckToken = () => {
881
- return Promise.resolve({ token });
882
- };
876
+ // Applies prefer_on_device logic.
877
+ if (availability !== Availability.AVAILABLE) {
878
+ logger.debug(`On-device inference unavailable because availability is "${availability}".`);
879
+ return false;
880
+ }
881
+ if (!ChromeAdapterImpl.isOnDeviceRequest(request)) {
882
+ logger.debug(`On-device inference unavailable because request is incompatible.`);
883
+ return false;
884
+ }
885
+ return true;
886
+ }
887
+ /**
888
+ * Generates content on device.
889
+ *
890
+ * @remarks
891
+ * This is comparable to {@link GenerativeModel.generateContent} for generating content in
892
+ * Cloud.
893
+ * @param request - a standard Firebase AI {@link GenerateContentRequest}
894
+ * @returns {@link Response}, so we can reuse common response formatting.
895
+ */
896
+ async generateContent(request) {
897
+ const session = await this.createSession();
898
+ const contents = await Promise.all(request.contents.map(ChromeAdapterImpl.toLanguageModelMessage));
899
+ const text = await session.prompt(contents, this.onDeviceParams.promptOptions);
900
+ return ChromeAdapterImpl.toResponse(text);
901
+ }
902
+ /**
903
+ * Generates content stream on device.
904
+ *
905
+ * @remarks
906
+ * This is comparable to {@link GenerativeModel.generateContentStream} for generating content in
907
+ * Cloud.
908
+ * @param request - a standard Firebase AI {@link GenerateContentRequest}
909
+ * @returns {@link Response}, so we can reuse common response formatting.
910
+ */
911
+ async generateContentStream(request) {
912
+ const session = await this.createSession();
913
+ const contents = await Promise.all(request.contents.map(ChromeAdapterImpl.toLanguageModelMessage));
914
+ const stream = session.promptStreaming(contents, this.onDeviceParams.promptOptions);
915
+ return ChromeAdapterImpl.toStreamResponse(stream);
916
+ }
917
+ async countTokens(_request) {
918
+ throw new AIError(AIErrorCode.REQUEST_ERROR, 'Count Tokens is not yet available for on-device model.');
919
+ }
920
+ /**
921
+ * Asserts inference for the given request can be performed by an on-device model.
922
+ */
923
+ static isOnDeviceRequest(request) {
924
+ // Returns false if the prompt is empty.
925
+ if (request.contents.length === 0) {
926
+ logger.debug('Empty prompt rejected for on-device inference.');
927
+ return false;
928
+ }
929
+ for (const content of request.contents) {
930
+ if (content.role === 'function') {
931
+ logger.debug(`"Function" role rejected for on-device inference.`);
932
+ return false;
883
933
  }
884
- else if (ai.appCheck) {
885
- if (ai.options?.useLimitedUseAppCheckTokens) {
886
- this._apiSettings.getAppCheckToken = () => ai.appCheck.getLimitedUseToken();
887
- }
888
- else {
889
- this._apiSettings.getAppCheckToken = () => ai.appCheck.getToken();
934
+ // Returns false if request contains an image with an unsupported mime type.
935
+ for (const part of content.parts) {
936
+ if (part.inlineData &&
937
+ ChromeAdapterImpl.SUPPORTED_MIME_TYPES.indexOf(part.inlineData.mimeType) === -1) {
938
+ logger.debug(`Unsupported mime type "${part.inlineData.mimeType}" rejected for on-device inference.`);
939
+ return false;
890
940
  }
891
941
  }
892
- if (ai.auth) {
893
- this._apiSettings.getAuthToken = () => ai.auth.getToken();
894
- }
895
- this.model = AIModel.normalizeModelName(modelName, this._apiSettings.backend.backendType);
896
942
  }
943
+ return true;
897
944
  }
898
945
  /**
899
- * Normalizes the given model name to a fully qualified model resource name.
946
+ * Encapsulates logic to get availability and download a model if one is downloadable.
947
+ */
948
+ async downloadIfAvailable() {
949
+ const availability = await this.languageModelProvider?.availability(this.onDeviceParams.createOptions);
950
+ if (availability === Availability.DOWNLOADABLE) {
951
+ this.download();
952
+ }
953
+ return availability;
954
+ }
955
+ /**
956
+ * Triggers out-of-band download of an on-device model.
900
957
  *
901
- * @param modelName - The model name to normalize.
902
- * @returns The fully qualified model resource name.
958
+ * Chrome only downloads models as needed. Chrome knows a model is needed when code calls
959
+ * LanguageModel.create.
903
960
  *
904
- * @internal
961
+ * Since Chrome manages the download, the SDK can only avoid redundant download requests by
962
+ * tracking if a download has previously been requested.
905
963
  */
906
- static normalizeModelName(modelName, backendType) {
907
- if (backendType === BackendType.GOOGLE_AI) {
908
- return AIModel.normalizeGoogleAIModelName(modelName);
964
+ download() {
965
+ if (this.isDownloading) {
966
+ return;
909
967
  }
910
- else {
911
- return AIModel.normalizeVertexAIModelName(modelName);
968
+ this.isDownloading = true;
969
+ this.downloadPromise = this.languageModelProvider
970
+ ?.create(this.onDeviceParams.createOptions)
971
+ .finally(() => {
972
+ this.isDownloading = false;
973
+ });
974
+ }
975
+ /**
976
+ * Converts Firebase AI {@link Content} object to a Chrome {@link LanguageModelMessage} object.
977
+ */
978
+ static async toLanguageModelMessage(content) {
979
+ const languageModelMessageContents = await Promise.all(content.parts.map(ChromeAdapterImpl.toLanguageModelMessageContent));
980
+ return {
981
+ role: ChromeAdapterImpl.toLanguageModelMessageRole(content.role),
982
+ content: languageModelMessageContents
983
+ };
984
+ }
985
+ /**
986
+ * Converts a Firebase AI Part object to a Chrome LanguageModelMessageContent object.
987
+ */
988
+ static async toLanguageModelMessageContent(part) {
989
+ if (part.text) {
990
+ return {
991
+ type: 'text',
992
+ value: part.text
993
+ };
994
+ }
995
+ else if (part.inlineData) {
996
+ const formattedImageContent = await fetch(`data:${part.inlineData.mimeType};base64,${part.inlineData.data}`);
997
+ const imageBlob = await formattedImageContent.blob();
998
+ const imageBitmap = await createImageBitmap(imageBlob);
999
+ return {
1000
+ type: 'image',
1001
+ value: imageBitmap
1002
+ };
912
1003
  }
1004
+ throw new AIError(AIErrorCode.REQUEST_ERROR, `Processing of this Part type is not currently supported.`);
913
1005
  }
914
1006
  /**
915
- * @internal
1007
+ * Converts a Firebase AI {@link Role} string to a {@link LanguageModelMessageRole} string.
916
1008
  */
917
- static normalizeGoogleAIModelName(modelName) {
918
- return `models/${modelName}`;
1009
+ static toLanguageModelMessageRole(role) {
1010
+ // Assumes 'function' rule has been filtered by isOnDeviceRequest
1011
+ return role === 'model' ? 'assistant' : 'user';
919
1012
  }
920
1013
  /**
921
- * @internal
1014
+ * Abstracts Chrome session creation.
1015
+ *
1016
+ * Chrome uses a multi-turn session for all inference. Firebase AI uses single-turn for all
1017
+ * inference. To map the Firebase AI API to Chrome's API, the SDK creates a new session for all
1018
+ * inference.
1019
+ *
1020
+ * Chrome will remove a model from memory if it's no longer in use, so this method ensures a
1021
+ * new session is created before an old session is destroyed.
922
1022
  */
923
- static normalizeVertexAIModelName(modelName) {
924
- let model;
925
- if (modelName.includes('/')) {
926
- if (modelName.startsWith('models/')) {
927
- // Add 'publishers/google' if the user is only passing in 'models/model-name'.
928
- model = `publishers/google/${modelName}`;
929
- }
930
- else {
931
- // Any other custom format (e.g. tuned models) must be passed in correctly.
932
- model = modelName;
933
- }
1023
+ async createSession() {
1024
+ if (!this.languageModelProvider) {
1025
+ throw new AIError(AIErrorCode.UNSUPPORTED, 'Chrome AI requested for unsupported browser version.');
934
1026
  }
935
- else {
936
- // If path is not included, assume it's a non-tuned model.
937
- model = `publishers/google/models/${modelName}`;
1027
+ const newSession = await this.languageModelProvider.create(this.onDeviceParams.createOptions);
1028
+ if (this.oldSession) {
1029
+ this.oldSession.destroy();
938
1030
  }
939
- return model;
1031
+ // Holds session reference, so model isn't unloaded from memory.
1032
+ this.oldSession = newSession;
1033
+ return newSession;
1034
+ }
1035
+ /**
1036
+ * Formats string returned by Chrome as a {@link Response} returned by Firebase AI.
1037
+ */
1038
+ static toResponse(text) {
1039
+ return {
1040
+ json: async () => ({
1041
+ candidates: [
1042
+ {
1043
+ content: {
1044
+ parts: [{ text }]
1045
+ }
1046
+ }
1047
+ ]
1048
+ })
1049
+ };
1050
+ }
1051
+ /**
1052
+ * Formats string stream returned by Chrome as SSE returned by Firebase AI.
1053
+ */
1054
+ static toStreamResponse(stream) {
1055
+ const encoder = new TextEncoder();
1056
+ return {
1057
+ body: stream.pipeThrough(new TransformStream({
1058
+ transform(chunk, controller) {
1059
+ const json = JSON.stringify({
1060
+ candidates: [
1061
+ {
1062
+ content: {
1063
+ role: 'model',
1064
+ parts: [{ text: chunk }]
1065
+ }
1066
+ }
1067
+ ]
1068
+ });
1069
+ controller.enqueue(encoder.encode(`data: ${json}\n\n`));
1070
+ }
1071
+ }))
1072
+ };
940
1073
  }
941
1074
  }
942
-
1075
+ // Visible for testing
1076
+ ChromeAdapterImpl.SUPPORTED_MIME_TYPES = ['image/jpeg', 'image/png'];
943
1077
  /**
944
- * @license
945
- * Copyright 2024 Google LLC
946
- *
947
- * Licensed under the Apache License, Version 2.0 (the "License");
948
- * you may not use this file except in compliance with the License.
949
- * You may obtain a copy of the License at
950
- *
951
- * http://www.apache.org/licenses/LICENSE-2.0
952
- *
953
- * Unless required by applicable law or agreed to in writing, software
954
- * distributed under the License is distributed on an "AS IS" BASIS,
955
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
956
- * See the License for the specific language governing permissions and
957
- * limitations under the License.
1078
+ * Creates a ChromeAdapterImpl on demand.
958
1079
  */
959
- const logger = new logger$1.Logger('@firebase/vertexai');
1080
+ function chromeAdapterFactory(mode, window, params) {
1081
+ // Do not initialize a ChromeAdapter if we are not in hybrid mode.
1082
+ if (typeof window !== 'undefined' && mode) {
1083
+ return new ChromeAdapterImpl(window.LanguageModel, mode, params);
1084
+ }
1085
+ }
960
1086
 
961
1087
  /**
962
1088
  * @license
@@ -974,8 +1100,205 @@ const logger = new logger$1.Logger('@firebase/vertexai');
974
1100
  * See the License for the specific language governing permissions and
975
1101
  * limitations under the License.
976
1102
  */
977
- var Task;
978
- (function (Task) {
1103
+ class AIService {
1104
+ constructor(app, backend, authProvider, appCheckProvider, chromeAdapterFactory) {
1105
+ this.app = app;
1106
+ this.backend = backend;
1107
+ this.chromeAdapterFactory = chromeAdapterFactory;
1108
+ const appCheck = appCheckProvider?.getImmediate({ optional: true });
1109
+ const auth = authProvider?.getImmediate({ optional: true });
1110
+ this.auth = auth || null;
1111
+ this.appCheck = appCheck || null;
1112
+ if (backend instanceof VertexAIBackend) {
1113
+ this.location = backend.location;
1114
+ }
1115
+ else {
1116
+ this.location = '';
1117
+ }
1118
+ }
1119
+ _delete() {
1120
+ return Promise.resolve();
1121
+ }
1122
+ set options(optionsToSet) {
1123
+ this._options = optionsToSet;
1124
+ }
1125
+ get options() {
1126
+ return this._options;
1127
+ }
1128
+ }
1129
+
1130
+ /**
1131
+ * @license
1132
+ * Copyright 2025 Google LLC
1133
+ *
1134
+ * Licensed under the Apache License, Version 2.0 (the "License");
1135
+ * you may not use this file except in compliance with the License.
1136
+ * You may obtain a copy of the License at
1137
+ *
1138
+ * http://www.apache.org/licenses/LICENSE-2.0
1139
+ *
1140
+ * Unless required by applicable law or agreed to in writing, software
1141
+ * distributed under the License is distributed on an "AS IS" BASIS,
1142
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
1143
+ * See the License for the specific language governing permissions and
1144
+ * limitations under the License.
1145
+ */
1146
+ function factory(container, { instanceIdentifier }) {
1147
+ if (!instanceIdentifier) {
1148
+ throw new AIError(AIErrorCode.ERROR, 'AIService instance identifier is undefined.');
1149
+ }
1150
+ const backend = decodeInstanceIdentifier(instanceIdentifier);
1151
+ // getImmediate for FirebaseApp will always succeed
1152
+ const app = container.getProvider('app').getImmediate();
1153
+ const auth = container.getProvider('auth-internal');
1154
+ const appCheckProvider = container.getProvider('app-check-internal');
1155
+ return new AIService(app, backend, auth, appCheckProvider, chromeAdapterFactory);
1156
+ }
1157
+
1158
+ /**
1159
+ * @license
1160
+ * Copyright 2025 Google LLC
1161
+ *
1162
+ * Licensed under the Apache License, Version 2.0 (the "License");
1163
+ * you may not use this file except in compliance with the License.
1164
+ * You may obtain a copy of the License at
1165
+ *
1166
+ * http://www.apache.org/licenses/LICENSE-2.0
1167
+ *
1168
+ * Unless required by applicable law or agreed to in writing, software
1169
+ * distributed under the License is distributed on an "AS IS" BASIS,
1170
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
1171
+ * See the License for the specific language governing permissions and
1172
+ * limitations under the License.
1173
+ */
1174
+ /**
1175
+ * Base class for Firebase AI model APIs.
1176
+ *
1177
+ * Instances of this class are associated with a specific Firebase AI {@link Backend}
1178
+ * and provide methods for interacting with the configured generative model.
1179
+ *
1180
+ * @public
1181
+ */
1182
+ class AIModel {
1183
+ /**
1184
+ * Constructs a new instance of the {@link AIModel} class.
1185
+ *
1186
+ * This constructor should only be called from subclasses that provide
1187
+ * a model API.
1188
+ *
1189
+ * @param ai - an {@link AI} instance.
1190
+ * @param modelName - The name of the model being used. It can be in one of the following formats:
1191
+ * - `my-model` (short name, will resolve to `publishers/google/models/my-model`)
1192
+ * - `models/my-model` (will resolve to `publishers/google/models/my-model`)
1193
+ * - `publishers/my-publisher/models/my-model` (fully qualified model name)
1194
+ *
1195
+ * @throws If the `apiKey` or `projectId` fields are missing in your
1196
+ * Firebase config.
1197
+ *
1198
+ * @internal
1199
+ */
1200
+ constructor(ai, modelName) {
1201
+ if (!ai.app?.options?.apiKey) {
1202
+ throw new AIError(AIErrorCode.NO_API_KEY, `The "apiKey" field is empty in the local Firebase config. Firebase AI requires this field to contain a valid API key.`);
1203
+ }
1204
+ else if (!ai.app?.options?.projectId) {
1205
+ throw new AIError(AIErrorCode.NO_PROJECT_ID, `The "projectId" field is empty in the local Firebase config. Firebase AI requires this field to contain a valid project ID.`);
1206
+ }
1207
+ else if (!ai.app?.options?.appId) {
1208
+ throw new AIError(AIErrorCode.NO_APP_ID, `The "appId" field is empty in the local Firebase config. Firebase AI requires this field to contain a valid app ID.`);
1209
+ }
1210
+ else {
1211
+ this._apiSettings = {
1212
+ apiKey: ai.app.options.apiKey,
1213
+ project: ai.app.options.projectId,
1214
+ appId: ai.app.options.appId,
1215
+ automaticDataCollectionEnabled: ai.app.automaticDataCollectionEnabled,
1216
+ location: ai.location,
1217
+ backend: ai.backend
1218
+ };
1219
+ if (app._isFirebaseServerApp(ai.app) && ai.app.settings.appCheckToken) {
1220
+ const token = ai.app.settings.appCheckToken;
1221
+ this._apiSettings.getAppCheckToken = () => {
1222
+ return Promise.resolve({ token });
1223
+ };
1224
+ }
1225
+ else if (ai.appCheck) {
1226
+ if (ai.options?.useLimitedUseAppCheckTokens) {
1227
+ this._apiSettings.getAppCheckToken = () => ai.appCheck.getLimitedUseToken();
1228
+ }
1229
+ else {
1230
+ this._apiSettings.getAppCheckToken = () => ai.appCheck.getToken();
1231
+ }
1232
+ }
1233
+ if (ai.auth) {
1234
+ this._apiSettings.getAuthToken = () => ai.auth.getToken();
1235
+ }
1236
+ this.model = AIModel.normalizeModelName(modelName, this._apiSettings.backend.backendType);
1237
+ }
1238
+ }
1239
+ /**
1240
+ * Normalizes the given model name to a fully qualified model resource name.
1241
+ *
1242
+ * @param modelName - The model name to normalize.
1243
+ * @returns The fully qualified model resource name.
1244
+ *
1245
+ * @internal
1246
+ */
1247
+ static normalizeModelName(modelName, backendType) {
1248
+ if (backendType === BackendType.GOOGLE_AI) {
1249
+ return AIModel.normalizeGoogleAIModelName(modelName);
1250
+ }
1251
+ else {
1252
+ return AIModel.normalizeVertexAIModelName(modelName);
1253
+ }
1254
+ }
1255
+ /**
1256
+ * @internal
1257
+ */
1258
+ static normalizeGoogleAIModelName(modelName) {
1259
+ return `models/${modelName}`;
1260
+ }
1261
+ /**
1262
+ * @internal
1263
+ */
1264
+ static normalizeVertexAIModelName(modelName) {
1265
+ let model;
1266
+ if (modelName.includes('/')) {
1267
+ if (modelName.startsWith('models/')) {
1268
+ // Add 'publishers/google' if the user is only passing in 'models/model-name'.
1269
+ model = `publishers/google/${modelName}`;
1270
+ }
1271
+ else {
1272
+ // Any other custom format (e.g. tuned models) must be passed in correctly.
1273
+ model = modelName;
1274
+ }
1275
+ }
1276
+ else {
1277
+ // If path is not included, assume it's a non-tuned model.
1278
+ model = `publishers/google/models/${modelName}`;
1279
+ }
1280
+ return model;
1281
+ }
1282
+ }
1283
+
1284
+ /**
1285
+ * @license
1286
+ * Copyright 2024 Google LLC
1287
+ *
1288
+ * Licensed under the Apache License, Version 2.0 (the "License");
1289
+ * you may not use this file except in compliance with the License.
1290
+ * You may obtain a copy of the License at
1291
+ *
1292
+ * http://www.apache.org/licenses/LICENSE-2.0
1293
+ *
1294
+ * Unless required by applicable law or agreed to in writing, software
1295
+ * distributed under the License is distributed on an "AS IS" BASIS,
1296
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
1297
+ * See the License for the specific language governing permissions and
1298
+ * limitations under the License.
1299
+ */
1300
+ var Task;
1301
+ (function (Task) {
979
1302
  Task["GENERATE_CONTENT"] = "generateContent";
980
1303
  Task["STREAM_GENERATE_CONTENT"] = "streamGenerateContent";
981
1304
  Task["COUNT_TOKENS"] = "countTokens";
@@ -3610,317 +3933,11 @@ function getLiveGenerativeModel(ai, modelParams) {
3610
3933
  return new LiveGenerativeModel(ai, modelParams, webSocketHandler);
3611
3934
  }
3612
3935
 
3613
- /**
3614
- * @internal
3615
- */
3616
- var Availability;
3617
- (function (Availability) {
3618
- Availability["UNAVAILABLE"] = "unavailable";
3619
- Availability["DOWNLOADABLE"] = "downloadable";
3620
- Availability["DOWNLOADING"] = "downloading";
3621
- Availability["AVAILABLE"] = "available";
3622
- })(Availability || (Availability = {}));
3623
-
3624
- /**
3625
- * @license
3626
- * Copyright 2025 Google LLC
3627
- *
3628
- * Licensed under the Apache License, Version 2.0 (the "License");
3629
- * you may not use this file except in compliance with the License.
3630
- * You may obtain a copy of the License at
3631
- *
3632
- * http://www.apache.org/licenses/LICENSE-2.0
3633
- *
3634
- * Unless required by applicable law or agreed to in writing, software
3635
- * distributed under the License is distributed on an "AS IS" BASIS,
3636
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
3637
- * See the License for the specific language governing permissions and
3638
- * limitations under the License.
3639
- */
3640
- /**
3641
- * Defines an inference "backend" that uses Chrome's on-device model,
3642
- * and encapsulates logic for detecting when on-device inference is
3643
- * possible.
3644
- */
3645
- class ChromeAdapterImpl {
3646
- constructor(languageModelProvider, mode, onDeviceParams = {
3647
- createOptions: {
3648
- // Defaults to support image inputs for convenience.
3649
- expectedInputs: [{ type: 'image' }]
3650
- }
3651
- }) {
3652
- this.languageModelProvider = languageModelProvider;
3653
- this.mode = mode;
3654
- this.onDeviceParams = onDeviceParams;
3655
- this.isDownloading = false;
3656
- }
3657
- /**
3658
- * Checks if a given request can be made on-device.
3659
- *
3660
- * Encapsulates a few concerns:
3661
- * the mode
3662
- * API existence
3663
- * prompt formatting
3664
- * model availability, including triggering download if necessary
3665
- *
3666
- *
3667
- * Pros: callers needn't be concerned with details of on-device availability.</p>
3668
- * Cons: this method spans a few concerns and splits request validation from usage.
3669
- * If instance variables weren't already part of the API, we could consider a better
3670
- * separation of concerns.
3671
- */
3672
- async isAvailable(request) {
3673
- if (!this.mode) {
3674
- logger.debug(`On-device inference unavailable because mode is undefined.`);
3675
- return false;
3676
- }
3677
- if (this.mode === InferenceMode.ONLY_IN_CLOUD) {
3678
- logger.debug(`On-device inference unavailable because mode is "only_in_cloud".`);
3679
- return false;
3680
- }
3681
- // Triggers out-of-band download so model will eventually become available.
3682
- const availability = await this.downloadIfAvailable();
3683
- if (this.mode === InferenceMode.ONLY_ON_DEVICE) {
3684
- // If it will never be available due to API inavailability, throw.
3685
- if (availability === Availability.UNAVAILABLE) {
3686
- throw new AIError(AIErrorCode.API_NOT_ENABLED, 'Local LanguageModel API not available in this environment.');
3687
- }
3688
- else if (availability === Availability.DOWNLOADABLE ||
3689
- availability === Availability.DOWNLOADING) {
3690
- // TODO(chholland): Better user experience during download - progress?
3691
- logger.debug(`Waiting for download of LanguageModel to complete.`);
3692
- await this.downloadPromise;
3693
- return true;
3694
- }
3695
- return true;
3696
- }
3697
- // Applies prefer_on_device logic.
3698
- if (availability !== Availability.AVAILABLE) {
3699
- logger.debug(`On-device inference unavailable because availability is "${availability}".`);
3700
- return false;
3701
- }
3702
- if (!ChromeAdapterImpl.isOnDeviceRequest(request)) {
3703
- logger.debug(`On-device inference unavailable because request is incompatible.`);
3704
- return false;
3705
- }
3706
- return true;
3707
- }
3708
- /**
3709
- * Generates content on device.
3710
- *
3711
- * @remarks
3712
- * This is comparable to {@link GenerativeModel.generateContent} for generating content in
3713
- * Cloud.
3714
- * @param request - a standard Firebase AI {@link GenerateContentRequest}
3715
- * @returns {@link Response}, so we can reuse common response formatting.
3716
- */
3717
- async generateContent(request) {
3718
- const session = await this.createSession();
3719
- const contents = await Promise.all(request.contents.map(ChromeAdapterImpl.toLanguageModelMessage));
3720
- const text = await session.prompt(contents, this.onDeviceParams.promptOptions);
3721
- return ChromeAdapterImpl.toResponse(text);
3722
- }
3723
- /**
3724
- * Generates content stream on device.
3725
- *
3726
- * @remarks
3727
- * This is comparable to {@link GenerativeModel.generateContentStream} for generating content in
3728
- * Cloud.
3729
- * @param request - a standard Firebase AI {@link GenerateContentRequest}
3730
- * @returns {@link Response}, so we can reuse common response formatting.
3731
- */
3732
- async generateContentStream(request) {
3733
- const session = await this.createSession();
3734
- const contents = await Promise.all(request.contents.map(ChromeAdapterImpl.toLanguageModelMessage));
3735
- const stream = session.promptStreaming(contents, this.onDeviceParams.promptOptions);
3736
- return ChromeAdapterImpl.toStreamResponse(stream);
3737
- }
3738
- async countTokens(_request) {
3739
- throw new AIError(AIErrorCode.REQUEST_ERROR, 'Count Tokens is not yet available for on-device model.');
3740
- }
3741
- /**
3742
- * Asserts inference for the given request can be performed by an on-device model.
3743
- */
3744
- static isOnDeviceRequest(request) {
3745
- // Returns false if the prompt is empty.
3746
- if (request.contents.length === 0) {
3747
- logger.debug('Empty prompt rejected for on-device inference.');
3748
- return false;
3749
- }
3750
- for (const content of request.contents) {
3751
- if (content.role === 'function') {
3752
- logger.debug(`"Function" role rejected for on-device inference.`);
3753
- return false;
3754
- }
3755
- // Returns false if request contains an image with an unsupported mime type.
3756
- for (const part of content.parts) {
3757
- if (part.inlineData &&
3758
- ChromeAdapterImpl.SUPPORTED_MIME_TYPES.indexOf(part.inlineData.mimeType) === -1) {
3759
- logger.debug(`Unsupported mime type "${part.inlineData.mimeType}" rejected for on-device inference.`);
3760
- return false;
3761
- }
3762
- }
3763
- }
3764
- return true;
3765
- }
3766
- /**
3767
- * Encapsulates logic to get availability and download a model if one is downloadable.
3768
- */
3769
- async downloadIfAvailable() {
3770
- const availability = await this.languageModelProvider?.availability(this.onDeviceParams.createOptions);
3771
- if (availability === Availability.DOWNLOADABLE) {
3772
- this.download();
3773
- }
3774
- return availability;
3775
- }
3776
- /**
3777
- * Triggers out-of-band download of an on-device model.
3778
- *
3779
- * Chrome only downloads models as needed. Chrome knows a model is needed when code calls
3780
- * LanguageModel.create.
3781
- *
3782
- * Since Chrome manages the download, the SDK can only avoid redundant download requests by
3783
- * tracking if a download has previously been requested.
3784
- */
3785
- download() {
3786
- if (this.isDownloading) {
3787
- return;
3788
- }
3789
- this.isDownloading = true;
3790
- this.downloadPromise = this.languageModelProvider
3791
- ?.create(this.onDeviceParams.createOptions)
3792
- .finally(() => {
3793
- this.isDownloading = false;
3794
- });
3795
- }
3796
- /**
3797
- * Converts Firebase AI {@link Content} object to a Chrome {@link LanguageModelMessage} object.
3798
- */
3799
- static async toLanguageModelMessage(content) {
3800
- const languageModelMessageContents = await Promise.all(content.parts.map(ChromeAdapterImpl.toLanguageModelMessageContent));
3801
- return {
3802
- role: ChromeAdapterImpl.toLanguageModelMessageRole(content.role),
3803
- content: languageModelMessageContents
3804
- };
3805
- }
3806
- /**
3807
- * Converts a Firebase AI Part object to a Chrome LanguageModelMessageContent object.
3808
- */
3809
- static async toLanguageModelMessageContent(part) {
3810
- if (part.text) {
3811
- return {
3812
- type: 'text',
3813
- value: part.text
3814
- };
3815
- }
3816
- else if (part.inlineData) {
3817
- const formattedImageContent = await fetch(`data:${part.inlineData.mimeType};base64,${part.inlineData.data}`);
3818
- const imageBlob = await formattedImageContent.blob();
3819
- const imageBitmap = await createImageBitmap(imageBlob);
3820
- return {
3821
- type: 'image',
3822
- value: imageBitmap
3823
- };
3824
- }
3825
- throw new AIError(AIErrorCode.REQUEST_ERROR, `Processing of this Part type is not currently supported.`);
3826
- }
3827
- /**
3828
- * Converts a Firebase AI {@link Role} string to a {@link LanguageModelMessageRole} string.
3829
- */
3830
- static toLanguageModelMessageRole(role) {
3831
- // Assumes 'function' rule has been filtered by isOnDeviceRequest
3832
- return role === 'model' ? 'assistant' : 'user';
3833
- }
3834
- /**
3835
- * Abstracts Chrome session creation.
3836
- *
3837
- * Chrome uses a multi-turn session for all inference. Firebase AI uses single-turn for all
3838
- * inference. To map the Firebase AI API to Chrome's API, the SDK creates a new session for all
3839
- * inference.
3840
- *
3841
- * Chrome will remove a model from memory if it's no longer in use, so this method ensures a
3842
- * new session is created before an old session is destroyed.
3843
- */
3844
- async createSession() {
3845
- if (!this.languageModelProvider) {
3846
- throw new AIError(AIErrorCode.UNSUPPORTED, 'Chrome AI requested for unsupported browser version.');
3847
- }
3848
- const newSession = await this.languageModelProvider.create(this.onDeviceParams.createOptions);
3849
- if (this.oldSession) {
3850
- this.oldSession.destroy();
3851
- }
3852
- // Holds session reference, so model isn't unloaded from memory.
3853
- this.oldSession = newSession;
3854
- return newSession;
3855
- }
3856
- /**
3857
- * Formats string returned by Chrome as a {@link Response} returned by Firebase AI.
3858
- */
3859
- static toResponse(text) {
3860
- return {
3861
- json: async () => ({
3862
- candidates: [
3863
- {
3864
- content: {
3865
- parts: [{ text }]
3866
- }
3867
- }
3868
- ]
3869
- })
3870
- };
3871
- }
3872
- /**
3873
- * Formats string stream returned by Chrome as SSE returned by Firebase AI.
3874
- */
3875
- static toStreamResponse(stream) {
3876
- const encoder = new TextEncoder();
3877
- return {
3878
- body: stream.pipeThrough(new TransformStream({
3879
- transform(chunk, controller) {
3880
- const json = JSON.stringify({
3881
- candidates: [
3882
- {
3883
- content: {
3884
- role: 'model',
3885
- parts: [{ text: chunk }]
3886
- }
3887
- }
3888
- ]
3889
- });
3890
- controller.enqueue(encoder.encode(`data: ${json}\n\n`));
3891
- }
3892
- }))
3893
- };
3894
- }
3895
- }
3896
- // Visible for testing
3897
- ChromeAdapterImpl.SUPPORTED_MIME_TYPES = ['image/jpeg', 'image/png'];
3898
- /**
3899
- * Creates a ChromeAdapterImpl on demand.
3900
- */
3901
- function chromeAdapterFactory(mode, window, params) {
3902
- // Do not initialize a ChromeAdapter if we are not in hybrid mode.
3903
- if (typeof window !== 'undefined' && mode) {
3904
- return new ChromeAdapterImpl(window.LanguageModel, mode, params);
3905
- }
3906
- }
3907
-
3908
3936
  /**
3909
3937
  * The Firebase AI Web SDK.
3910
3938
  *
3911
3939
  * @packageDocumentation
3912
3940
  */
3913
- function factory(container, { instanceIdentifier }) {
3914
- if (!instanceIdentifier) {
3915
- throw new AIError(AIErrorCode.ERROR, 'AIService instance identifier is undefined.');
3916
- }
3917
- const backend = decodeInstanceIdentifier(instanceIdentifier);
3918
- // getImmediate for FirebaseApp will always succeed
3919
- const app = container.getProvider('app').getImmediate();
3920
- const auth = container.getProvider('auth-internal');
3921
- const appCheckProvider = container.getProvider('app-check-internal');
3922
- return new AIService(app, backend, auth, appCheckProvider, chromeAdapterFactory);
3923
- }
3924
3941
  function registerAI() {
3925
3942
  app._registerComponent(new component.Component(AI_TYPE, factory, "PUBLIC" /* ComponentType.PUBLIC */).setMultipleInstances(true));
3926
3943
  app.registerVersion(name, version);