@firebase/ai 2.0.0 → 2.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (43) hide show
  1. package/dist/ai-public.d.ts +178 -8
  2. package/dist/ai.d.ts +181 -8
  3. package/dist/esm/index.esm.js +359 -19
  4. package/dist/esm/index.esm.js.map +1 -1
  5. package/dist/esm/src/api.d.ts +2 -2
  6. package/dist/esm/src/constants.d.ts +4 -0
  7. package/dist/esm/src/methods/chat-session.d.ts +3 -1
  8. package/dist/esm/src/methods/chrome-adapter.d.ts +118 -0
  9. package/dist/esm/src/methods/count-tokens.d.ts +3 -1
  10. package/dist/esm/src/methods/generate-content.d.ts +3 -2
  11. package/dist/esm/src/models/generative-model.d.ts +3 -1
  12. package/dist/esm/src/types/chrome-adapter.d.ts +54 -0
  13. package/dist/esm/src/types/enums.d.ts +20 -1
  14. package/dist/esm/src/types/imagen/requests.d.ts +2 -2
  15. package/dist/esm/src/types/imagen/responses.d.ts +1 -0
  16. package/dist/esm/src/types/index.d.ts +2 -0
  17. package/dist/esm/src/types/language-model.d.ts +117 -0
  18. package/dist/esm/src/types/requests.d.ts +31 -1
  19. package/dist/esm/src/types/responses.d.ts +1 -1
  20. package/dist/esm/src/types/schema.d.ts +1 -1
  21. package/dist/index.cjs.js +359 -18
  22. package/dist/index.cjs.js.map +1 -1
  23. package/dist/index.node.cjs.js +359 -18
  24. package/dist/index.node.cjs.js.map +1 -1
  25. package/dist/index.node.mjs +359 -19
  26. package/dist/index.node.mjs.map +1 -1
  27. package/dist/src/api.d.ts +2 -2
  28. package/dist/src/constants.d.ts +4 -0
  29. package/dist/src/methods/chat-session.d.ts +3 -1
  30. package/dist/src/methods/chrome-adapter.d.ts +118 -0
  31. package/dist/src/methods/count-tokens.d.ts +3 -1
  32. package/dist/src/methods/generate-content.d.ts +3 -2
  33. package/dist/src/models/generative-model.d.ts +3 -1
  34. package/dist/src/types/chrome-adapter.d.ts +54 -0
  35. package/dist/src/types/enums.d.ts +20 -1
  36. package/dist/src/types/imagen/requests.d.ts +2 -2
  37. package/dist/src/types/imagen/responses.d.ts +1 -0
  38. package/dist/src/types/index.d.ts +2 -0
  39. package/dist/src/types/language-model.d.ts +117 -0
  40. package/dist/src/types/requests.d.ts +31 -1
  41. package/dist/src/types/responses.d.ts +1 -1
  42. package/dist/src/types/schema.d.ts +1 -1
  43. package/package.json +2 -2
package/dist/src/api.d.ts CHANGED
@@ -18,7 +18,7 @@ import { FirebaseApp } from '@firebase/app';
18
18
  import { AI_TYPE } from './constants';
19
19
  import { AIService } from './service';
20
20
  import { AI, AIOptions } from './public-types';
21
- import { ImagenModelParams, ModelParams, RequestOptions } from './types';
21
+ import { ImagenModelParams, HybridParams, ModelParams, RequestOptions } from './types';
22
22
  import { AIError } from './errors';
23
23
  import { AIModel, GenerativeModel, ImagenModel } from './models';
24
24
  export { ChatSession } from './methods/chat-session';
@@ -66,7 +66,7 @@ export declare function getAI(app?: FirebaseApp, options?: AIOptions): AI;
66
66
  *
67
67
  * @public
68
68
  */
69
- export declare function getGenerativeModel(ai: AI, modelParams: ModelParams, requestOptions?: RequestOptions): GenerativeModel;
69
+ export declare function getGenerativeModel(ai: AI, modelParams: ModelParams | HybridParams, requestOptions?: RequestOptions): GenerativeModel;
70
70
  /**
71
71
  * Returns an {@link ImagenModel} class with methods for using Imagen.
72
72
  *
@@ -21,3 +21,7 @@ export declare const DEFAULT_API_VERSION = "v1beta";
21
21
  export declare const PACKAGE_VERSION: string;
22
22
  export declare const LANGUAGE_TAG = "gl-js";
23
23
  export declare const DEFAULT_FETCH_TIMEOUT_MS: number;
24
+ /**
25
+ * Defines the name of the default in-cloud model to use for hybrid inference.
26
+ */
27
+ export declare const DEFAULT_HYBRID_IN_CLOUD_MODEL = "gemini-2.0-flash-lite";
@@ -16,6 +16,7 @@
16
16
  */
17
17
  import { Content, GenerateContentResult, GenerateContentStreamResult, Part, RequestOptions, StartChatParams } from '../types';
18
18
  import { ApiSettings } from '../types/internal';
19
+ import { ChromeAdapter } from '../types/chrome-adapter';
19
20
  /**
20
21
  * ChatSession class that enables sending chat messages and stores
21
22
  * history of sent and received messages so far.
@@ -24,12 +25,13 @@ import { ApiSettings } from '../types/internal';
24
25
  */
25
26
  export declare class ChatSession {
26
27
  model: string;
28
+ private chromeAdapter?;
27
29
  params?: StartChatParams | undefined;
28
30
  requestOptions?: RequestOptions | undefined;
29
31
  private _apiSettings;
30
32
  private _history;
31
33
  private _sendPromise;
32
- constructor(apiSettings: ApiSettings, model: string, params?: StartChatParams | undefined, requestOptions?: RequestOptions | undefined);
34
+ constructor(apiSettings: ApiSettings, model: string, chromeAdapter?: ChromeAdapter | undefined, params?: StartChatParams | undefined, requestOptions?: RequestOptions | undefined);
33
35
  /**
34
36
  * Gets the chat history so far. Blocked prompts are not added to history.
35
37
  * Neither blocked candidates nor the prompts that generated them are added
@@ -0,0 +1,118 @@
1
+ /**
2
+ * @license
3
+ * Copyright 2025 Google LLC
4
+ *
5
+ * Licensed under the Apache License, Version 2.0 (the "License");
6
+ * you may not use this file except in compliance with the License.
7
+ * You may obtain a copy of the License at
8
+ *
9
+ * http://www.apache.org/licenses/LICENSE-2.0
10
+ *
11
+ * Unless required by applicable law or agreed to in writing, software
12
+ * distributed under the License is distributed on an "AS IS" BASIS,
13
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ * See the License for the specific language governing permissions and
15
+ * limitations under the License.
16
+ */
17
+ import { CountTokensRequest, GenerateContentRequest, InferenceMode, OnDeviceParams } from '../types';
18
+ import { ChromeAdapter } from '../types/chrome-adapter';
19
+ import { LanguageModel } from '../types/language-model';
20
+ /**
21
+ * Defines an inference "backend" that uses Chrome's on-device model,
22
+ * and encapsulates logic for detecting when on-device inference is
23
+ * possible.
24
+ */
25
+ export declare class ChromeAdapterImpl implements ChromeAdapter {
26
+ private languageModelProvider;
27
+ private mode;
28
+ private onDeviceParams;
29
+ static SUPPORTED_MIME_TYPES: string[];
30
+ private isDownloading;
31
+ private downloadPromise;
32
+ private oldSession;
33
+ constructor(languageModelProvider: LanguageModel, mode: InferenceMode, onDeviceParams?: OnDeviceParams);
34
+ /**
35
+ * Checks if a given request can be made on-device.
36
+ *
37
+ * <ol>Encapsulates a few concerns:
38
+ * <li>the mode</li>
39
+ * <li>API existence</li>
40
+ * <li>prompt formatting</li>
41
+ * <li>model availability, including triggering download if necessary</li>
42
+ * </ol>
43
+ *
44
+ * <p>Pros: callers needn't be concerned with details of on-device availability.</p>
45
+ * <p>Cons: this method spans a few concerns and splits request validation from usage.
46
+ * If instance variables weren't already part of the API, we could consider a better
47
+ * separation of concerns.</p>
48
+ */
49
+ isAvailable(request: GenerateContentRequest): Promise<boolean>;
50
+ /**
51
+ * Generates content on device.
52
+ *
53
+ * <p>This is comparable to {@link GenerativeModel.generateContent} for generating content in
54
+ * Cloud.</p>
55
+ * @param request - a standard Firebase AI {@link GenerateContentRequest}
56
+ * @returns {@link Response}, so we can reuse common response formatting.
57
+ */
58
+ generateContent(request: GenerateContentRequest): Promise<Response>;
59
+ /**
60
+ * Generates content stream on device.
61
+ *
62
+ * <p>This is comparable to {@link GenerativeModel.generateContentStream} for generating content in
63
+ * Cloud.</p>
64
+ * @param request - a standard Firebase AI {@link GenerateContentRequest}
65
+ * @returns {@link Response}, so we can reuse common response formatting.
66
+ */
67
+ generateContentStream(request: GenerateContentRequest): Promise<Response>;
68
+ countTokens(_request: CountTokensRequest): Promise<Response>;
69
+ /**
70
+ * Asserts inference for the given request can be performed by an on-device model.
71
+ */
72
+ private static isOnDeviceRequest;
73
+ /**
74
+ * Encapsulates logic to get availability and download a model if one is downloadable.
75
+ */
76
+ private downloadIfAvailable;
77
+ /**
78
+ * Triggers out-of-band download of an on-device model.
79
+ *
80
+ * <p>Chrome only downloads models as needed. Chrome knows a model is needed when code calls
81
+ * LanguageModel.create.</p>
82
+ *
83
+ * <p>Since Chrome manages the download, the SDK can only avoid redundant download requests by
84
+ * tracking if a download has previously been requested.</p>
85
+ */
86
+ private download;
87
+ /**
88
+ * Converts Firebase AI {@link Content} object to a Chrome {@link LanguageModelMessage} object.
89
+ */
90
+ private static toLanguageModelMessage;
91
+ /**
92
+ * Converts a Firebase AI Part object to a Chrome LanguageModelMessageContent object.
93
+ */
94
+ private static toLanguageModelMessageContent;
95
+ /**
96
+ * Converts a Firebase AI {@link Role} string to a {@link LanguageModelMessageRole} string.
97
+ */
98
+ private static toLanguageModelMessageRole;
99
+ /**
100
+ * Abstracts Chrome session creation.
101
+ *
102
+ * <p>Chrome uses a multi-turn session for all inference. Firebase AI uses single-turn for all
103
+ * inference. To map the Firebase AI API to Chrome's API, the SDK creates a new session for all
104
+ * inference.</p>
105
+ *
106
+ * <p>Chrome will remove a model from memory if it's no longer in use, so this method ensures a
107
+ * new session is created before an old session is destroyed.</p>
108
+ */
109
+ private createSession;
110
+ /**
111
+ * Formats string returned by Chrome as a {@link Response} returned by Firebase AI.
112
+ */
113
+ private static toResponse;
114
+ /**
115
+ * Formats string stream returned by Chrome as SSE returned by Firebase AI.
116
+ */
117
+ private static toStreamResponse;
118
+ }
@@ -16,4 +16,6 @@
16
16
  */
17
17
  import { CountTokensRequest, CountTokensResponse, RequestOptions } from '../types';
18
18
  import { ApiSettings } from '../types/internal';
19
- export declare function countTokens(apiSettings: ApiSettings, model: string, params: CountTokensRequest, requestOptions?: RequestOptions): Promise<CountTokensResponse>;
19
+ import { ChromeAdapter } from '../types/chrome-adapter';
20
+ export declare function countTokensOnCloud(apiSettings: ApiSettings, model: string, params: CountTokensRequest, requestOptions?: RequestOptions): Promise<CountTokensResponse>;
21
+ export declare function countTokens(apiSettings: ApiSettings, model: string, params: CountTokensRequest, chromeAdapter?: ChromeAdapter, requestOptions?: RequestOptions): Promise<CountTokensResponse>;
@@ -16,5 +16,6 @@
16
16
  */
17
17
  import { GenerateContentRequest, GenerateContentResult, GenerateContentStreamResult, RequestOptions } from '../types';
18
18
  import { ApiSettings } from '../types/internal';
19
- export declare function generateContentStream(apiSettings: ApiSettings, model: string, params: GenerateContentRequest, requestOptions?: RequestOptions): Promise<GenerateContentStreamResult>;
20
- export declare function generateContent(apiSettings: ApiSettings, model: string, params: GenerateContentRequest, requestOptions?: RequestOptions): Promise<GenerateContentResult>;
19
+ import { ChromeAdapter } from '../types/chrome-adapter';
20
+ export declare function generateContentStream(apiSettings: ApiSettings, model: string, params: GenerateContentRequest, chromeAdapter?: ChromeAdapter, requestOptions?: RequestOptions): Promise<GenerateContentStreamResult>;
21
+ export declare function generateContent(apiSettings: ApiSettings, model: string, params: GenerateContentRequest, chromeAdapter?: ChromeAdapter, requestOptions?: RequestOptions): Promise<GenerateContentResult>;
@@ -18,18 +18,20 @@ import { Content, CountTokensRequest, CountTokensResponse, GenerateContentReques
18
18
  import { ChatSession } from '../methods/chat-session';
19
19
  import { AI } from '../public-types';
20
20
  import { AIModel } from './ai-model';
21
+ import { ChromeAdapter } from '../types/chrome-adapter';
21
22
  /**
22
23
  * Class for generative model APIs.
23
24
  * @public
24
25
  */
25
26
  export declare class GenerativeModel extends AIModel {
27
+ private chromeAdapter?;
26
28
  generationConfig: GenerationConfig;
27
29
  safetySettings: SafetySetting[];
28
30
  requestOptions?: RequestOptions;
29
31
  tools?: Tool[];
30
32
  toolConfig?: ToolConfig;
31
33
  systemInstruction?: Content;
32
- constructor(ai: AI, modelParams: ModelParams, requestOptions?: RequestOptions);
34
+ constructor(ai: AI, modelParams: ModelParams, requestOptions?: RequestOptions, chromeAdapter?: ChromeAdapter | undefined);
33
35
  /**
34
36
  * Makes a single non-streaming call to the model
35
37
  * and returns an object containing a single {@link GenerateContentResponse}.
@@ -0,0 +1,54 @@
1
+ /**
2
+ * @license
3
+ * Copyright 2025 Google LLC
4
+ *
5
+ * Licensed under the Apache License, Version 2.0 (the "License");
6
+ * you may not use this file except in compliance with the License.
7
+ * You may obtain a copy of the License at
8
+ *
9
+ * http://www.apache.org/licenses/LICENSE-2.0
10
+ *
11
+ * Unless required by applicable law or agreed to in writing, software
12
+ * distributed under the License is distributed on an "AS IS" BASIS,
13
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ * See the License for the specific language governing permissions and
15
+ * limitations under the License.
16
+ */
17
+ import { CountTokensRequest, GenerateContentRequest } from './requests';
18
+ /**
19
+ * <b>(EXPERIMENTAL)</b> Defines an inference "backend" that uses Chrome's on-device model,
20
+ * and encapsulates logic for detecting when on-device inference is
21
+ * possible.
22
+ *
23
+ * These methods should not be called directly by the user.
24
+ *
25
+ * @public
26
+ */
27
+ export interface ChromeAdapter {
28
+ /**
29
+ * Checks if the on-device model is capable of handling a given
30
+ * request.
31
+ * @param request - A potential request to be passed to the model.
32
+ */
33
+ isAvailable(request: GenerateContentRequest): Promise<boolean>;
34
+ /**
35
+ * Generates content using on-device inference.
36
+ *
37
+ * <p>This is comparable to {@link GenerativeModel.generateContent} for generating
38
+ * content using in-cloud inference.</p>
39
+ * @param request - a standard Firebase AI {@link GenerateContentRequest}
40
+ */
41
+ generateContent(request: GenerateContentRequest): Promise<Response>;
42
+ /**
43
+ * Generates a content stream using on-device inference.
44
+ *
45
+ * <p>This is comparable to {@link GenerativeModel.generateContentStream} for generating
46
+ * a content stream using in-cloud inference.</p>
47
+ * @param request - a standard Firebase AI {@link GenerateContentRequest}
48
+ */
49
+ generateContentStream(request: GenerateContentRequest): Promise<Response>;
50
+ /**
51
+ * @internal
52
+ */
53
+ countTokens(request: CountTokensRequest): Promise<Response>;
54
+ }
@@ -62,7 +62,7 @@ export declare const HarmBlockThreshold: {
62
62
  readonly BLOCK_NONE: "BLOCK_NONE";
63
63
  /**
64
64
  * All content will be allowed. This is the same as `BLOCK_NONE`, but the metadata corresponding
65
- * to the {@link HarmCategory} will not be present in the response.
65
+ * to the {@link (HarmCategory:type)} will not be present in the response.
66
66
  */
67
67
  readonly OFF: "OFF";
68
68
  };
@@ -249,6 +249,9 @@ export declare const FunctionCallingMode: {
249
249
  */
250
250
  readonly NONE: "NONE";
251
251
  };
252
+ /**
253
+ * @public
254
+ */
252
255
  export type FunctionCallingMode = (typeof FunctionCallingMode)[keyof typeof FunctionCallingMode];
253
256
  /**
254
257
  * Content part modality.
@@ -308,3 +311,19 @@ export declare const ResponseModality: {
308
311
  * @beta
309
312
  */
310
313
  export type ResponseModality = (typeof ResponseModality)[keyof typeof ResponseModality];
314
+ /**
315
+ * <b>(EXPERIMENTAL)</b>
316
+ * Determines whether inference happens on-device or in-cloud.
317
+ * @public
318
+ */
319
+ export declare const InferenceMode: {
320
+ readonly PREFER_ON_DEVICE: "prefer_on_device";
321
+ readonly ONLY_ON_DEVICE: "only_on_device";
322
+ readonly ONLY_IN_CLOUD: "only_in_cloud";
323
+ };
324
+ /**
325
+ * <b>(EXPERIMENTAL)</b>
326
+ * Determines whether inference happens on-device or in-cloud.
327
+ * @public
328
+ */
329
+ export type InferenceMode = (typeof InferenceMode)[keyof typeof InferenceMode];
@@ -204,7 +204,7 @@ export interface ImagenSafetySettings {
204
204
  * To specify an aspect ratio for generated images, set the `aspectRatio` property in your
205
205
  * {@link ImagenGenerationConfig}.
206
206
  *
207
- * See the the {@link http://firebase.google.com/docs/vertex-ai/generate-images | documentation }
207
+ * See the {@link http://firebase.google.com/docs/vertex-ai/generate-images | documentation }
208
208
  * for more details and examples of the supported aspect ratios.
209
209
  *
210
210
  * @beta
@@ -237,7 +237,7 @@ export declare const ImagenAspectRatio: {
237
237
  * To specify an aspect ratio for generated images, set the `aspectRatio` property in your
238
238
  * {@link ImagenGenerationConfig}.
239
239
  *
240
- * See the the {@link http://firebase.google.com/docs/vertex-ai/generate-images | documentation }
240
+ * See the {@link http://firebase.google.com/docs/vertex-ai/generate-images | documentation }
241
241
  * for more details and examples of the supported aspect ratios.
242
242
  *
243
243
  * @beta
@@ -35,6 +35,7 @@ export interface ImagenInlineImage {
35
35
  * An image generated by Imagen, stored in a Cloud Storage for Firebase bucket.
36
36
  *
37
37
  * This feature is not available yet.
38
+ * @beta
38
39
  */
39
40
  export interface ImagenGCSImage {
40
41
  /**
@@ -22,3 +22,5 @@ export * from './error';
22
22
  export * from './schema';
23
23
  export * from './imagen';
24
24
  export * from './googleai';
25
+ export { LanguageModelCreateOptions, LanguageModelCreateCoreOptions, LanguageModelExpected, LanguageModelMessage, LanguageModelMessageContent, LanguageModelMessageContentValue, LanguageModelMessageRole, LanguageModelMessageType, LanguageModelPromptOptions } from './language-model';
26
+ export * from './chrome-adapter';
@@ -0,0 +1,117 @@
1
+ /**
2
+ * @license
3
+ * Copyright 2025 Google LLC
4
+ *
5
+ * Licensed under the Apache License, Version 2.0 (the "License");
6
+ * you may not use this file except in compliance with the License.
7
+ * You may obtain a copy of the License at
8
+ *
9
+ * http://www.apache.org/licenses/LICENSE-2.0
10
+ *
11
+ * Unless required by applicable law or agreed to in writing, software
12
+ * distributed under the License is distributed on an "AS IS" BASIS,
13
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ * See the License for the specific language governing permissions and
15
+ * limitations under the License.
16
+ */
17
+ /**
18
+ * The subset of the Prompt API
19
+ * (see {@link https://github.com/webmachinelearning/prompt-api#full-api-surface-in-web-idl }
20
+ * required for hybrid functionality.
21
+ *
22
+ * @internal
23
+ */
24
+ export interface LanguageModel extends EventTarget {
25
+ create(options?: LanguageModelCreateOptions): Promise<LanguageModel>;
26
+ availability(options?: LanguageModelCreateCoreOptions): Promise<Availability>;
27
+ prompt(input: LanguageModelPrompt, options?: LanguageModelPromptOptions): Promise<string>;
28
+ promptStreaming(input: LanguageModelPrompt, options?: LanguageModelPromptOptions): ReadableStream;
29
+ measureInputUsage(input: LanguageModelPrompt, options?: LanguageModelPromptOptions): Promise<number>;
30
+ destroy(): undefined;
31
+ }
32
+ /**
33
+ * @internal
34
+ */
35
+ export declare enum Availability {
36
+ 'UNAVAILABLE' = "unavailable",
37
+ 'DOWNLOADABLE' = "downloadable",
38
+ 'DOWNLOADING' = "downloading",
39
+ 'AVAILABLE' = "available"
40
+ }
41
+ /**
42
+ * <b>(EXPERIMENTAL)</b>
43
+ * Configures the creation of an on-device language model session.
44
+ * @public
45
+ */
46
+ export interface LanguageModelCreateCoreOptions {
47
+ topK?: number;
48
+ temperature?: number;
49
+ expectedInputs?: LanguageModelExpected[];
50
+ }
51
+ /**
52
+ * <b>(EXPERIMENTAL)</b>
53
+ * Configures the creation of an on-device language model session.
54
+ * @public
55
+ */
56
+ export interface LanguageModelCreateOptions extends LanguageModelCreateCoreOptions {
57
+ signal?: AbortSignal;
58
+ initialPrompts?: LanguageModelMessage[];
59
+ }
60
+ /**
61
+ * <b>(EXPERIMENTAL)</b>
62
+ * Options for an on-device language model prompt.
63
+ * @public
64
+ */
65
+ export interface LanguageModelPromptOptions {
66
+ responseConstraint?: object;
67
+ }
68
+ /**
69
+ * <b>(EXPERIMENTAL)</b>
70
+ * Options for the expected inputs for an on-device language model.
71
+ * @public
72
+ */ export interface LanguageModelExpected {
73
+ type: LanguageModelMessageType;
74
+ languages?: string[];
75
+ }
76
+ /**
77
+ * <b>(EXPERIMENTAL)</b>
78
+ * An on-device language model prompt.
79
+ * @public
80
+ */
81
+ export type LanguageModelPrompt = LanguageModelMessage[];
82
+ /**
83
+ * <b>(EXPERIMENTAL)</b>
84
+ * An on-device language model message.
85
+ * @public
86
+ */
87
+ export interface LanguageModelMessage {
88
+ role: LanguageModelMessageRole;
89
+ content: LanguageModelMessageContent[];
90
+ }
91
+ /**
92
+ * <b>(EXPERIMENTAL)</b>
93
+ * An on-device language model content object.
94
+ * @public
95
+ */
96
+ export interface LanguageModelMessageContent {
97
+ type: LanguageModelMessageType;
98
+ value: LanguageModelMessageContentValue;
99
+ }
100
+ /**
101
+ * <b>(EXPERIMENTAL)</b>
102
+ * Allowable roles for on-device language model usage.
103
+ * @public
104
+ */
105
+ export type LanguageModelMessageRole = 'system' | 'user' | 'assistant';
106
+ /**
107
+ * <b>(EXPERIMENTAL)</b>
108
+ * Allowable types for on-device language model messages.
109
+ * @public
110
+ */
111
+ export type LanguageModelMessageType = 'text' | 'image' | 'audio';
112
+ /**
113
+ * <b>(EXPERIMENTAL)</b>
114
+ * Content formats that can be provided as on-device message content.
115
+ * @public
116
+ */
117
+ export type LanguageModelMessageContentValue = ImageBitmapSource | AudioBuffer | BufferSource | string;
@@ -16,7 +16,8 @@
16
16
  */
17
17
  import { ObjectSchema, TypedSchema } from '../requests/schema-builder';
18
18
  import { Content, Part } from './content';
19
- import { FunctionCallingMode, HarmBlockMethod, HarmBlockThreshold, HarmCategory, ResponseModality } from './enums';
19
+ import { LanguageModelCreateOptions, LanguageModelPromptOptions } from './language-model';
20
+ import { FunctionCallingMode, HarmBlockMethod, HarmBlockThreshold, HarmCategory, InferenceMode, ResponseModality } from './enums';
20
21
  import { ObjectSchemaRequest, SchemaRequest } from './schema';
21
22
  /**
22
23
  * Base parameters for a number of methods.
@@ -249,6 +250,35 @@ export interface FunctionCallingConfig {
249
250
  mode?: FunctionCallingMode;
250
251
  allowedFunctionNames?: string[];
251
252
  }
253
+ /**
254
+ * <b>(EXPERIMENTAL)</b>
255
+ * Encapsulates configuration for on-device inference.
256
+ *
257
+ * @public
258
+ */
259
+ export interface OnDeviceParams {
260
+ createOptions?: LanguageModelCreateOptions;
261
+ promptOptions?: LanguageModelPromptOptions;
262
+ }
263
+ /**
264
+ * <b>(EXPERIMENTAL)</b>
265
+ * Configures hybrid inference.
266
+ * @public
267
+ */
268
+ export interface HybridParams {
269
+ /**
270
+ * Specifies on-device or in-cloud inference. Defaults to prefer on-device.
271
+ */
272
+ mode: InferenceMode;
273
+ /**
274
+ * Optional. Specifies advanced params for on-device inference.
275
+ */
276
+ onDeviceParams?: OnDeviceParams;
277
+ /**
278
+ * Optional. Specifies advanced params for in-cloud inference.
279
+ */
280
+ inCloudParams?: ModelParams;
281
+ }
252
282
  /**
253
283
  * Configuration for "thinking" behavior of compatible Gemini models.
254
284
  *
@@ -170,7 +170,7 @@ export interface GroundingMetadata {
170
170
  /**
171
171
  * Google Search entry point for web searches. This contains an HTML/CSS snippet that must be
172
172
  * embedded in an app to display a Google Search entry point for follow-up web searches related to
173
- * a model's Grounded Response”.
173
+ * a model's "Grounded Response".
174
174
  */
175
175
  searchEntryPoint?: SearchEntrypoint;
176
176
  /**
@@ -122,7 +122,7 @@ export interface SchemaInterface extends SchemaShared<SchemaInterface> {
122
122
  type?: SchemaType;
123
123
  }
124
124
  /**
125
- * Interface for JSON parameters in a schema of {@link SchemaType}
125
+ * Interface for JSON parameters in a schema of {@link (SchemaType:type)}
126
126
  * "object" when not using the `Schema.object()` helper.
127
127
  * @public
128
128
  */
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@firebase/ai",
3
- "version": "2.0.0",
3
+ "version": "2.1.0",
4
4
  "description": "The Firebase AI SDK",
5
5
  "author": "Firebase <firebase-support@google.com> (https://firebase.google.com/)",
6
6
  "engines": {
@@ -57,7 +57,7 @@
57
57
  },
58
58
  "license": "Apache-2.0",
59
59
  "devDependencies": {
60
- "@firebase/app": "0.14.0",
60
+ "@firebase/app": "0.14.1",
61
61
  "@rollup/plugin-json": "6.1.0",
62
62
  "rollup": "2.79.2",
63
63
  "rollup-plugin-replace": "2.2.0",