@firebase/ai 2.1.0 → 2.2.0-canary.095c098de

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (55) hide show
  1. package/dist/ai-public.d.ts +443 -10
  2. package/dist/ai.d.ts +525 -11
  3. package/dist/esm/index.esm.js +1255 -368
  4. package/dist/esm/index.esm.js.map +1 -1
  5. package/dist/esm/src/api.d.ts +18 -3
  6. package/dist/esm/src/constants.d.ts +1 -1
  7. package/dist/esm/src/index.d.ts +2 -1
  8. package/dist/esm/src/methods/chrome-adapter.d.ts +30 -24
  9. package/dist/esm/src/methods/live-session-helpers.d.ts +154 -0
  10. package/dist/esm/src/methods/live-session.d.ts +90 -0
  11. package/dist/esm/src/models/ai-model.d.ts +1 -1
  12. package/dist/esm/src/models/index.d.ts +1 -0
  13. package/dist/esm/src/models/live-generative-model.d.ts +55 -0
  14. package/dist/esm/src/public-types.d.ts +10 -1
  15. package/dist/esm/src/requests/request.d.ts +6 -0
  16. package/dist/esm/src/requests/response-helpers.d.ts +9 -5
  17. package/dist/esm/src/service.d.ts +7 -2
  18. package/dist/esm/src/types/chrome-adapter.d.ts +6 -4
  19. package/dist/esm/src/types/content.d.ts +42 -0
  20. package/dist/esm/src/types/enums.d.ts +5 -0
  21. package/dist/esm/src/types/error.d.ts +2 -0
  22. package/dist/esm/src/types/imagen/internal.d.ts +10 -0
  23. package/dist/esm/src/types/live-responses.d.ts +53 -0
  24. package/dist/esm/src/types/requests.d.ts +109 -1
  25. package/dist/esm/src/types/responses.d.ts +87 -4
  26. package/dist/esm/src/websocket.d.ts +67 -0
  27. package/dist/index.cjs.js +1258 -366
  28. package/dist/index.cjs.js.map +1 -1
  29. package/dist/index.node.cjs.js +907 -311
  30. package/dist/index.node.cjs.js.map +1 -1
  31. package/dist/index.node.mjs +904 -313
  32. package/dist/index.node.mjs.map +1 -1
  33. package/dist/src/api.d.ts +18 -3
  34. package/dist/src/constants.d.ts +1 -1
  35. package/dist/src/index.d.ts +2 -1
  36. package/dist/src/methods/chrome-adapter.d.ts +30 -24
  37. package/dist/src/methods/live-session-helpers.d.ts +154 -0
  38. package/dist/src/methods/live-session.d.ts +90 -0
  39. package/dist/src/models/ai-model.d.ts +1 -1
  40. package/dist/src/models/index.d.ts +1 -0
  41. package/dist/src/models/live-generative-model.d.ts +55 -0
  42. package/dist/src/public-types.d.ts +10 -1
  43. package/dist/src/requests/request.d.ts +6 -0
  44. package/dist/src/requests/response-helpers.d.ts +9 -5
  45. package/dist/src/service.d.ts +7 -2
  46. package/dist/src/types/chrome-adapter.d.ts +6 -4
  47. package/dist/src/types/content.d.ts +42 -0
  48. package/dist/src/types/enums.d.ts +5 -0
  49. package/dist/src/types/error.d.ts +2 -0
  50. package/dist/src/types/imagen/internal.d.ts +10 -0
  51. package/dist/src/types/live-responses.d.ts +53 -0
  52. package/dist/src/types/requests.d.ts +109 -1
  53. package/dist/src/types/responses.d.ts +87 -4
  54. package/dist/src/websocket.d.ts +67 -0
  55. package/package.json +10 -8
package/dist/src/api.d.ts CHANGED
@@ -18,14 +18,16 @@ import { FirebaseApp } from '@firebase/app';
18
18
  import { AI_TYPE } from './constants';
19
19
  import { AIService } from './service';
20
20
  import { AI, AIOptions } from './public-types';
21
- import { ImagenModelParams, HybridParams, ModelParams, RequestOptions } from './types';
21
+ import { ImagenModelParams, HybridParams, ModelParams, RequestOptions, LiveModelParams } from './types';
22
22
  import { AIError } from './errors';
23
- import { AIModel, GenerativeModel, ImagenModel } from './models';
23
+ import { AIModel, GenerativeModel, LiveGenerativeModel, ImagenModel } from './models';
24
24
  export { ChatSession } from './methods/chat-session';
25
+ export { LiveSession } from './methods/live-session';
25
26
  export * from './requests/schema-builder';
26
27
  export { ImagenImageFormat } from './requests/imagen-image-format';
27
- export { AIModel, GenerativeModel, ImagenModel, AIError };
28
+ export { AIModel, GenerativeModel, LiveGenerativeModel, ImagenModel, AIError };
28
29
  export { Backend, VertexAIBackend, GoogleAIBackend } from './backend';
30
+ export { startAudioConversation, AudioConversationController, StartAudioConversationOptions } from './methods/live-session-helpers';
29
31
  declare module '@firebase/component' {
30
32
  interface NameServiceMapping {
31
33
  [AI_TYPE]: AIService;
@@ -82,3 +84,16 @@ export declare function getGenerativeModel(ai: AI, modelParams: ModelParams | Hy
82
84
  * @beta
83
85
  */
84
86
  export declare function getImagenModel(ai: AI, modelParams: ImagenModelParams, requestOptions?: RequestOptions): ImagenModel;
87
+ /**
88
+ * Returns a {@link LiveGenerativeModel} class for real-time, bidirectional communication.
89
+ *
90
+ * The Live API is only supported in modern browser windows and Node >= 22.
91
+ *
92
+ * @param ai - An {@link AI} instance.
93
+ * @param modelParams - Parameters to use when setting up a {@link LiveSession}.
94
+ * @throws If the `apiKey` or `projectId` fields are missing in your
95
+ * Firebase config.
96
+ *
97
+ * @beta
98
+ */
99
+ export declare function getLiveGenerativeModel(ai: AI, modelParams: LiveModelParams): LiveGenerativeModel;
@@ -16,7 +16,7 @@
16
16
  */
17
17
  export declare const AI_TYPE = "AI";
18
18
  export declare const DEFAULT_LOCATION = "us-central1";
19
- export declare const DEFAULT_BASE_URL = "https://firebasevertexai.googleapis.com";
19
+ export declare const DEFAULT_DOMAIN = "firebasevertexai.googleapis.com";
20
20
  export declare const DEFAULT_API_VERSION = "v1beta";
21
21
  export declare const PACKAGE_VERSION: string;
22
22
  export declare const LANGUAGE_TAG = "gl-js";
@@ -3,9 +3,10 @@
3
3
  *
4
4
  * @packageDocumentation
5
5
  */
6
+ import { LanguageModel } from './types/language-model';
6
7
  declare global {
7
8
  interface Window {
8
- [key: string]: unknown;
9
+ LanguageModel: LanguageModel;
9
10
  }
10
11
  }
11
12
  export * from './api';
@@ -23,9 +23,9 @@ import { LanguageModel } from '../types/language-model';
23
23
  * possible.
24
24
  */
25
25
  export declare class ChromeAdapterImpl implements ChromeAdapter {
26
- private languageModelProvider;
27
- private mode;
28
- private onDeviceParams;
26
+ languageModelProvider: LanguageModel;
27
+ mode: InferenceMode;
28
+ onDeviceParams: OnDeviceParams;
29
29
  static SUPPORTED_MIME_TYPES: string[];
30
30
  private isDownloading;
31
31
  private downloadPromise;
@@ -34,24 +34,25 @@ export declare class ChromeAdapterImpl implements ChromeAdapter {
34
34
  /**
35
35
  * Checks if a given request can be made on-device.
36
36
  *
37
- * <ol>Encapsulates a few concerns:
38
- * <li>the mode</li>
39
- * <li>API existence</li>
40
- * <li>prompt formatting</li>
41
- * <li>model availability, including triggering download if necessary</li>
42
- * </ol>
37
+ * Encapsulates a few concerns:
38
+ * the mode
39
+ * API existence
40
+ * prompt formatting
41
+ * model availability, including triggering download if necessary
43
42
  *
44
- * <p>Pros: callers needn't be concerned with details of on-device availability.</p>
45
- * <p>Cons: this method spans a few concerns and splits request validation from usage.
43
+ *
44
+ * Pros: callers needn't be concerned with details of on-device availability.</p>
45
+ * Cons: this method spans a few concerns and splits request validation from usage.
46
46
  * If instance variables weren't already part of the API, we could consider a better
47
- * separation of concerns.</p>
47
+ * separation of concerns.
48
48
  */
49
49
  isAvailable(request: GenerateContentRequest): Promise<boolean>;
50
50
  /**
51
51
  * Generates content on device.
52
52
  *
53
- * <p>This is comparable to {@link GenerativeModel.generateContent} for generating content in
54
- * Cloud.</p>
53
+ * @remarks
54
+ * This is comparable to {@link GenerativeModel.generateContent} for generating content in
55
+ * Cloud.
55
56
  * @param request - a standard Firebase AI {@link GenerateContentRequest}
56
57
  * @returns {@link Response}, so we can reuse common response formatting.
57
58
  */
@@ -59,8 +60,9 @@ export declare class ChromeAdapterImpl implements ChromeAdapter {
59
60
  /**
60
61
  * Generates content stream on device.
61
62
  *
62
- * <p>This is comparable to {@link GenerativeModel.generateContentStream} for generating content in
63
- * Cloud.</p>
63
+ * @remarks
64
+ * This is comparable to {@link GenerativeModel.generateContentStream} for generating content in
65
+ * Cloud.
64
66
  * @param request - a standard Firebase AI {@link GenerateContentRequest}
65
67
  * @returns {@link Response}, so we can reuse common response formatting.
66
68
  */
@@ -77,11 +79,11 @@ export declare class ChromeAdapterImpl implements ChromeAdapter {
77
79
  /**
78
80
  * Triggers out-of-band download of an on-device model.
79
81
  *
80
- * <p>Chrome only downloads models as needed. Chrome knows a model is needed when code calls
81
- * LanguageModel.create.</p>
82
+ * Chrome only downloads models as needed. Chrome knows a model is needed when code calls
83
+ * LanguageModel.create.
82
84
  *
83
- * <p>Since Chrome manages the download, the SDK can only avoid redundant download requests by
84
- * tracking if a download has previously been requested.</p>
85
+ * Since Chrome manages the download, the SDK can only avoid redundant download requests by
86
+ * tracking if a download has previously been requested.
85
87
  */
86
88
  private download;
87
89
  /**
@@ -99,12 +101,12 @@ export declare class ChromeAdapterImpl implements ChromeAdapter {
99
101
  /**
100
102
  * Abstracts Chrome session creation.
101
103
  *
102
- * <p>Chrome uses a multi-turn session for all inference. Firebase AI uses single-turn for all
104
+ * Chrome uses a multi-turn session for all inference. Firebase AI uses single-turn for all
103
105
  * inference. To map the Firebase AI API to Chrome's API, the SDK creates a new session for all
104
- * inference.</p>
106
+ * inference.
105
107
  *
106
- * <p>Chrome will remove a model from memory if it's no longer in use, so this method ensures a
107
- * new session is created before an old session is destroyed.</p>
108
+ * Chrome will remove a model from memory if it's no longer in use, so this method ensures a
109
+ * new session is created before an old session is destroyed.
108
110
  */
109
111
  private createSession;
110
112
  /**
@@ -116,3 +118,7 @@ export declare class ChromeAdapterImpl implements ChromeAdapter {
116
118
  */
117
119
  private static toStreamResponse;
118
120
  }
121
+ /**
122
+ * Creates a ChromeAdapterImpl on demand.
123
+ */
124
+ export declare function chromeAdapterFactory(mode: InferenceMode, window?: Window, params?: OnDeviceParams): ChromeAdapterImpl | undefined;
@@ -0,0 +1,154 @@
1
+ /**
2
+ * @license
3
+ * Copyright 2025 Google LLC
4
+ *
5
+ * Licensed under the Apache License, Version 2.0 (the "License");
6
+ * you may not use this file except in compliance with the License.
7
+ * You may obtain a copy of the License at
8
+ *
9
+ * http://www.apache.org/licenses/LICENSE-2.0
10
+ *
11
+ * Unless required by applicable law or agreed to in writing, software
12
+ * distributed under the License is distributed on an "AS IS" BASIS,
13
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ * See the License for the specific language governing permissions and
15
+ * limitations under the License.
16
+ */
17
+ import { LiveServerToolCall, Part } from '../types';
18
+ import { LiveSession } from './live-session';
19
+ /**
20
+ * A controller for managing an active audio conversation.
21
+ *
22
+ * @beta
23
+ */
24
+ export interface AudioConversationController {
25
+ /**
26
+ * Stops the audio conversation, closes the microphone connection, and
27
+ * cleans up resources. Returns a promise that resolves when cleanup is complete.
28
+ */
29
+ stop: () => Promise<void>;
30
+ }
31
+ /**
32
+ * Options for {@link startAudioConversation}.
33
+ *
34
+ * @beta
35
+ */
36
+ export interface StartAudioConversationOptions {
37
+ /**
38
+ * An async handler that is called when the model requests a function to be executed.
39
+ * The handler should perform the function call and return the result as a `Part`,
40
+ * which will then be sent back to the model.
41
+ */
42
+ functionCallingHandler?: (functionCalls: LiveServerToolCall['functionCalls']) => Promise<Part>;
43
+ }
44
+ /**
45
+ * Dependencies needed by the {@link AudioConversationRunner}.
46
+ *
47
+ * @internal
48
+ */
49
+ interface RunnerDependencies {
50
+ audioContext: AudioContext;
51
+ mediaStream: MediaStream;
52
+ sourceNode: MediaStreamAudioSourceNode;
53
+ workletNode: AudioWorkletNode;
54
+ }
55
+ /**
56
+ * Encapsulates the core logic of an audio conversation.
57
+ *
58
+ * @internal
59
+ */
60
+ export declare class AudioConversationRunner {
61
+ private readonly liveSession;
62
+ private readonly options;
63
+ private readonly deps;
64
+ /** A flag to indicate if the conversation has been stopped. */
65
+ private isStopped;
66
+ /** A deferred that contains a promise that is resolved when stop() is called, to unblock the receive loop. */
67
+ private readonly stopDeferred;
68
+ /** A promise that tracks the lifecycle of the main `runReceiveLoop`. */
69
+ private readonly receiveLoopPromise;
70
+ /** A FIFO queue of 24kHz, 16-bit PCM audio chunks received from the server. */
71
+ private readonly playbackQueue;
72
+ /** Tracks scheduled audio sources. Used to cancel scheduled audio when the model is interrupted. */
73
+ private scheduledSources;
74
+ /** A high-precision timeline pointer for scheduling gapless audio playback. */
75
+ private nextStartTime;
76
+ /** A mutex to prevent the playback processing loop from running multiple times concurrently. */
77
+ private isPlaybackLoopRunning;
78
+ constructor(liveSession: LiveSession, options: StartAudioConversationOptions, deps: RunnerDependencies);
79
+ /**
80
+ * Stops the conversation and unblocks the main receive loop.
81
+ */
82
+ stop(): Promise<void>;
83
+ /**
84
+ * Cleans up all audio resources (nodes, stream tracks, context) and marks the
85
+ * session as no longer in a conversation.
86
+ */
87
+ private cleanup;
88
+ /**
89
+ * Adds audio data to the queue and ensures the playback loop is running.
90
+ */
91
+ private enqueueAndPlay;
92
+ /**
93
+ * Stops all current and pending audio playback and clears the queue. This is
94
+ * called when the server indicates the model's speech was interrupted with
95
+ * `LiveServerContent.modelTurn.interrupted`.
96
+ */
97
+ private interruptPlayback;
98
+ /**
99
+ * Processes the playback queue in a loop, scheduling each chunk in a gapless sequence.
100
+ */
101
+ private processPlaybackQueue;
102
+ /**
103
+ * The main loop that listens for and processes messages from the server.
104
+ */
105
+ private runReceiveLoop;
106
+ }
107
+ /**
108
+ * Starts a real-time, bidirectional audio conversation with the model. This helper function manages
109
+ * the complexities of microphone access, audio recording, playback, and interruptions.
110
+ *
111
+ * @remarks Important: This function must be called in response to a user gesture
112
+ * (for example, a button click) to comply with {@link https://developer.mozilla.org/en-US/docs/Web/API/Web_Audio_API/Best_practices#autoplay_policy | browser autoplay policies}.
113
+ *
114
+ * @example
115
+ * ```javascript
116
+ * const liveSession = await model.connect();
117
+ * let conversationController;
118
+ *
119
+ * // This function must be called from within a click handler.
120
+ * async function startConversation() {
121
+ * try {
122
+ * conversationController = await startAudioConversation(liveSession);
123
+ * } catch (e) {
124
+ * // Handle AI-specific errors
125
+ * if (e instanceof AIError) {
126
+ * console.error("AI Error:", e.message);
127
+ * }
128
+ * // Handle microphone permission and hardware errors
129
+ * else if (e instanceof DOMException) {
130
+ * console.error("Microphone Error:", e.message);
131
+ * }
132
+ * // Handle other unexpected errors
133
+ * else {
134
+ * console.error("An unexpected error occurred:", e);
135
+ * }
136
+ * }
137
+ * }
138
+ *
139
+ * // Later, to stop the conversation:
140
+ * // if (conversationController) {
141
+ * // await conversationController.stop();
142
+ * // }
143
+ * ```
144
+ *
145
+ * @param liveSession - An active {@link LiveSession} instance.
146
+ * @param options - Configuration options for the audio conversation.
147
+ * @returns A `Promise` that resolves with an {@link AudioConversationController}.
148
+ * @throws `AIError` if the environment does not support required Web APIs (`UNSUPPORTED`), if a conversation is already active (`REQUEST_ERROR`), the session is closed (`SESSION_CLOSED`), or if an unexpected initialization error occurs (`ERROR`).
149
+ * @throws `DOMException` Thrown by `navigator.mediaDevices.getUserMedia()` if issues occur with microphone access, such as permissions being denied (`NotAllowedError`) or no compatible hardware being found (`NotFoundError`). See the {@link https://developer.mozilla.org/en-US/docs/Web/API/MediaDevices/getUserMedia#exceptions | MDN documentation} for a full list of exceptions.
150
+ *
151
+ * @beta
152
+ */
153
+ export declare function startAudioConversation(liveSession: LiveSession, options?: StartAudioConversationOptions): Promise<AudioConversationController>;
154
+ export {};
@@ -0,0 +1,90 @@
1
+ /**
2
+ * @license
3
+ * Copyright 2025 Google LLC
4
+ *
5
+ * Licensed under the Apache License, Version 2.0 (the "License");
6
+ * you may not use this file except in compliance with the License.
7
+ * You may obtain a copy of the License at
8
+ *
9
+ * http://www.apache.org/licenses/LICENSE-2.0
10
+ *
11
+ * Unless required by applicable law or agreed to in writing, software
12
+ * distributed under the License is distributed on an "AS IS" BASIS,
13
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ * See the License for the specific language governing permissions and
15
+ * limitations under the License.
16
+ */
17
+ import { GenerativeContentBlob, LiveServerContent, LiveServerToolCall, LiveServerToolCallCancellation, Part } from '../public-types';
18
+ import { WebSocketHandler } from '../websocket';
19
+ /**
20
+ * Represents an active, real-time, bidirectional conversation with the model.
21
+ *
22
+ * This class should only be instantiated by calling {@link LiveGenerativeModel.connect}.
23
+ *
24
+ * @beta
25
+ */
26
+ export declare class LiveSession {
27
+ private webSocketHandler;
28
+ private serverMessages;
29
+ /**
30
+ * Indicates whether this Live session is closed.
31
+ *
32
+ * @beta
33
+ */
34
+ isClosed: boolean;
35
+ /**
36
+ * Indicates whether this Live session is being controlled by an `AudioConversationController`.
37
+ *
38
+ * @beta
39
+ */
40
+ inConversation: boolean;
41
+ /**
42
+ * @internal
43
+ */
44
+ constructor(webSocketHandler: WebSocketHandler, serverMessages: AsyncGenerator<unknown>);
45
+ /**
46
+ * Sends content to the server.
47
+ *
48
+ * @param request - The message to send to the model.
49
+ * @param turnComplete - Indicates if the turn is complete. Defaults to false.
50
+ * @throws If this session has been closed.
51
+ *
52
+ * @beta
53
+ */
54
+ send(request: string | Array<string | Part>, turnComplete?: boolean): Promise<void>;
55
+ /**
56
+ * Sends realtime input to the server.
57
+ *
58
+ * @param mediaChunks - The media chunks to send.
59
+ * @throws If this session has been closed.
60
+ *
61
+ * @beta
62
+ */
63
+ sendMediaChunks(mediaChunks: GenerativeContentBlob[]): Promise<void>;
64
+ /**
65
+ * Sends a stream of {@link GenerativeContentBlob}.
66
+ *
67
+ * @param mediaChunkStream - The stream of {@link GenerativeContentBlob} to send.
68
+ * @throws If this session has been closed.
69
+ *
70
+ * @beta
71
+ */
72
+ sendMediaStream(mediaChunkStream: ReadableStream<GenerativeContentBlob>): Promise<void>;
73
+ /**
74
+ * Yields messages received from the server.
75
+ * This can only be used by one consumer at a time.
76
+ *
77
+ * @returns An `AsyncGenerator` that yields server messages as they arrive.
78
+ * @throws If the session is already closed, or if we receive a response that we don't support.
79
+ *
80
+ * @beta
81
+ */
82
+ receive(): AsyncGenerator<LiveServerContent | LiveServerToolCall | LiveServerToolCallCancellation>;
83
+ /**
84
+ * Closes this session.
85
+ * All methods on this session will throw an error once this resolves.
86
+ *
87
+ * @beta
88
+ */
89
+ close(): Promise<void>;
90
+ }
@@ -33,7 +33,7 @@ export declare abstract class AIModel {
33
33
  /**
34
34
  * @internal
35
35
  */
36
- protected _apiSettings: ApiSettings;
36
+ _apiSettings: ApiSettings;
37
37
  /**
38
38
  * Constructs a new instance of the {@link AIModel} class.
39
39
  *
@@ -16,4 +16,5 @@
16
16
  */
17
17
  export * from './ai-model';
18
18
  export * from './generative-model';
19
+ export * from './live-generative-model';
19
20
  export * from './imagen-model';
@@ -0,0 +1,55 @@
1
+ /**
2
+ * @license
3
+ * Copyright 2025 Google LLC
4
+ *
5
+ * Licensed under the Apache License, Version 2.0 (the "License");
6
+ * you may not use this file except in compliance with the License.
7
+ * You may obtain a copy of the License at
8
+ *
9
+ * http://www.apache.org/licenses/LICENSE-2.0
10
+ *
11
+ * Unless required by applicable law or agreed to in writing, software
12
+ * distributed under the License is distributed on an "AS IS" BASIS,
13
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ * See the License for the specific language governing permissions and
15
+ * limitations under the License.
16
+ */
17
+ import { AIModel } from './ai-model';
18
+ import { LiveSession } from '../methods/live-session';
19
+ import { AI, Content, LiveGenerationConfig, LiveModelParams, Tool, ToolConfig } from '../public-types';
20
+ import { WebSocketHandler } from '../websocket';
21
+ /**
22
+ * Class for Live generative model APIs. The Live API enables low-latency, two-way multimodal
23
+ * interactions with Gemini.
24
+ *
25
+ * This class should only be instantiated with {@link getLiveGenerativeModel}.
26
+ *
27
+ * @beta
28
+ */
29
+ export declare class LiveGenerativeModel extends AIModel {
30
+ /**
31
+ * @internal
32
+ */
33
+ private _webSocketHandler;
34
+ generationConfig: LiveGenerationConfig;
35
+ tools?: Tool[];
36
+ toolConfig?: ToolConfig;
37
+ systemInstruction?: Content;
38
+ /**
39
+ * @internal
40
+ */
41
+ constructor(ai: AI, modelParams: LiveModelParams,
42
+ /**
43
+ * @internal
44
+ */
45
+ _webSocketHandler: WebSocketHandler);
46
+ /**
47
+ * Starts a {@link LiveSession}.
48
+ *
49
+ * @returns A {@link LiveSession}.
50
+ * @throws If the connection failed to be established with the server.
51
+ *
52
+ * @beta
53
+ */
54
+ connect(): Promise<LiveSession>;
55
+ }
@@ -35,6 +35,10 @@ export interface AI {
35
35
  * Vertex AI Gemini API (using {@link VertexAIBackend}).
36
36
  */
37
37
  backend: Backend;
38
+ /**
39
+ * Options applied to this {@link AI} instance.
40
+ */
41
+ options?: AIOptions;
38
42
  /**
39
43
  * @deprecated use `AI.backend.location` instead.
40
44
  *
@@ -83,6 +87,11 @@ export type BackendType = (typeof BackendType)[keyof typeof BackendType];
83
87
  export interface AIOptions {
84
88
  /**
85
89
  * The backend configuration to use for the AI service instance.
90
+ * Defaults to the Gemini Developer API backend ({@link GoogleAIBackend}).
86
91
  */
87
- backend: Backend;
92
+ backend?: Backend;
93
+ /**
94
+ * Whether to use App Check limited use tokens. Defaults to false.
95
+ */
96
+ useLimitedUseAppCheckTokens?: boolean;
88
97
  }
@@ -35,6 +35,12 @@ export declare class RequestUrl {
35
35
  private get modelPath();
36
36
  private get queryParams();
37
37
  }
38
+ export declare class WebSocketUrl {
39
+ apiSettings: ApiSettings;
40
+ constructor(apiSettings: ApiSettings);
41
+ toString(): string;
42
+ private get pathname();
43
+ }
38
44
  export declare function getHeaders(url: RequestUrl): Promise<Headers>;
39
45
  export declare function constructRequest(model: string, task: Task, apiSettings: ApiSettings, stream: boolean, body: string, requestOptions?: RequestOptions): Promise<{
40
46
  url: string;
@@ -14,7 +14,7 @@
14
14
  * See the License for the specific language governing permissions and
15
15
  * limitations under the License.
16
16
  */
17
- import { EnhancedGenerateContentResponse, FunctionCall, GenerateContentResponse, ImagenGCSImage, ImagenInlineImage, InlineDataPart } from '../types';
17
+ import { EnhancedGenerateContentResponse, FunctionCall, GenerateContentResponse, ImagenGCSImage, ImagenInlineImage, InlineDataPart, Part } from '../types';
18
18
  /**
19
19
  * Creates an EnhancedGenerateContentResponse object that has helper functions and
20
20
  * other modifications that improve usability.
@@ -26,15 +26,19 @@ export declare function createEnhancedContentResponse(response: GenerateContentR
26
26
  */
27
27
  export declare function addHelpers(response: GenerateContentResponse): EnhancedGenerateContentResponse;
28
28
  /**
29
- * Returns all text found in all parts of first candidate.
29
+ * Returns all text from the first candidate's parts, filtering by whether
30
+ * `partFilter()` returns true.
31
+ *
32
+ * @param response - The `GenerateContentResponse` from which to extract text.
33
+ * @param partFilter - Only return `Part`s for which this returns true
30
34
  */
31
- export declare function getText(response: GenerateContentResponse): string;
35
+ export declare function getText(response: GenerateContentResponse, partFilter: (part: Part) => boolean): string;
32
36
  /**
33
- * Returns {@link FunctionCall}s associated with first candidate.
37
+ * Returns every {@link FunctionCall} associated with first candidate.
34
38
  */
35
39
  export declare function getFunctionCalls(response: GenerateContentResponse): FunctionCall[] | undefined;
36
40
  /**
37
- * Returns {@link InlineDataPart}s in the first candidate if present.
41
+ * Returns every {@link InlineDataPart} in the first candidate if present.
38
42
  *
39
43
  * @internal
40
44
  */
@@ -15,17 +15,22 @@
15
15
  * limitations under the License.
16
16
  */
17
17
  import { FirebaseApp, _FirebaseService } from '@firebase/app';
18
- import { AI } from './public-types';
18
+ import { AI, AIOptions, InferenceMode, OnDeviceParams } from './public-types';
19
19
  import { AppCheckInternalComponentName, FirebaseAppCheckInternal } from '@firebase/app-check-interop-types';
20
20
  import { Provider } from '@firebase/component';
21
21
  import { FirebaseAuthInternal, FirebaseAuthInternalName } from '@firebase/auth-interop-types';
22
22
  import { Backend } from './backend';
23
+ import { ChromeAdapterImpl } from './methods/chrome-adapter';
23
24
  export declare class AIService implements AI, _FirebaseService {
24
25
  app: FirebaseApp;
25
26
  backend: Backend;
27
+ chromeAdapterFactory?: ((mode: InferenceMode, window?: Window, params?: OnDeviceParams) => ChromeAdapterImpl | undefined) | undefined;
26
28
  auth: FirebaseAuthInternal | null;
27
29
  appCheck: FirebaseAppCheckInternal | null;
30
+ _options?: Omit<AIOptions, 'backend'>;
28
31
  location: string;
29
- constructor(app: FirebaseApp, backend: Backend, authProvider?: Provider<FirebaseAuthInternalName>, appCheckProvider?: Provider<AppCheckInternalComponentName>);
32
+ constructor(app: FirebaseApp, backend: Backend, authProvider?: Provider<FirebaseAuthInternalName>, appCheckProvider?: Provider<AppCheckInternalComponentName>, chromeAdapterFactory?: ((mode: InferenceMode, window?: Window, params?: OnDeviceParams) => ChromeAdapterImpl | undefined) | undefined);
30
33
  _delete(): Promise<void>;
34
+ set options(optionsToSet: AIOptions);
35
+ get options(): AIOptions | undefined;
31
36
  }
@@ -34,16 +34,18 @@ export interface ChromeAdapter {
34
34
  /**
35
35
  * Generates content using on-device inference.
36
36
  *
37
- * <p>This is comparable to {@link GenerativeModel.generateContent} for generating
38
- * content using in-cloud inference.</p>
37
+ * @remarks
38
+ * This is comparable to {@link GenerativeModel.generateContent} for generating
39
+ * content using in-cloud inference.
39
40
  * @param request - a standard Firebase AI {@link GenerateContentRequest}
40
41
  */
41
42
  generateContent(request: GenerateContentRequest): Promise<Response>;
42
43
  /**
43
44
  * Generates a content stream using on-device inference.
44
45
  *
45
- * <p>This is comparable to {@link GenerativeModel.generateContentStream} for generating
46
- * a content stream using in-cloud inference.</p>
46
+ * @remarks
47
+ * This is comparable to {@link GenerativeModel.generateContentStream} for generating
48
+ * a content stream using in-cloud inference.
47
49
  * @param request - a standard Firebase AI {@link GenerateContentRequest}
48
50
  */
49
51
  generateContentStream(request: GenerateContentRequest): Promise<Response>;