@firebase/ai 2.2.0 → 2.2.1-canary.4d834deb2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -4,17 +4,10 @@
4
4
  * @packageDocumentation
5
5
  */
6
6
 
7
- import { AppCheckInternalComponentName } from '@firebase/app-check-interop-types';
8
7
  import { AppCheckTokenResult } from '@firebase/app-check-interop-types';
9
- import { ComponentContainer } from '@firebase/component';
10
8
  import { FirebaseApp } from '@firebase/app';
11
- import { FirebaseAppCheckInternal } from '@firebase/app-check-interop-types';
12
- import { FirebaseAuthInternal } from '@firebase/auth-interop-types';
13
- import { FirebaseAuthInternalName } from '@firebase/auth-interop-types';
14
9
  import { FirebaseAuthTokenData } from '@firebase/auth-interop-types';
15
10
  import { FirebaseError } from '@firebase/util';
16
- import { InstanceFactoryOptions } from '@firebase/component';
17
- import { Provider } from '@firebase/component';
18
11
 
19
12
  /**
20
13
  * An instance of the Firebase AI SDK.
@@ -147,20 +140,6 @@ export declare interface AIOptions {
147
140
  useLimitedUseAppCheckTokens?: boolean;
148
141
  }
149
142
 
150
- declare class AIService implements AI, _FirebaseService {
151
- app: FirebaseApp;
152
- backend: Backend;
153
- chromeAdapterFactory?: ((mode: InferenceMode, window?: Window, params?: OnDeviceParams) => ChromeAdapterImpl | undefined) | undefined;
154
- auth: FirebaseAuthInternal | null;
155
- appCheck: FirebaseAppCheckInternal | null;
156
- _options?: Omit<AIOptions, 'backend'>;
157
- location: string;
158
- constructor(app: FirebaseApp, backend: Backend, authProvider?: Provider<FirebaseAuthInternalName>, appCheckProvider?: Provider<AppCheckInternalComponentName>, chromeAdapterFactory?: ((mode: InferenceMode, window?: Window, params?: OnDeviceParams) => ChromeAdapterImpl | undefined) | undefined);
159
- _delete(): Promise<void>;
160
- set options(optionsToSet: AIOptions);
161
- get options(): AIOptions | undefined;
162
- }
163
-
164
143
  /**
165
144
  * Schema class representing a value that can conform to any of the provided sub-schemas. This is
166
145
  * useful when a field can accept multiple distinct types or structures.
@@ -213,8 +192,6 @@ export declare interface AudioConversationController {
213
192
  stop: () => Promise<void>;
214
193
  }
215
194
 
216
- /* Excluded from this release type: Availability */
217
-
218
195
  /**
219
196
  * Abstract base class representing the configuration for an AI service backend.
220
197
  * This class should not be instantiated directly. Use its subclasses; {@link GoogleAIBackend} for
@@ -385,108 +362,6 @@ export declare interface ChromeAdapter {
385
362
  /* Excluded from this release type: countTokens */
386
363
  }
387
364
 
388
- /**
389
- * Defines an inference "backend" that uses Chrome's on-device model,
390
- * and encapsulates logic for detecting when on-device inference is
391
- * possible.
392
- */
393
- declare class ChromeAdapterImpl implements ChromeAdapter {
394
- languageModelProvider: LanguageModel;
395
- mode: InferenceMode;
396
- onDeviceParams: OnDeviceParams;
397
- static SUPPORTED_MIME_TYPES: string[];
398
- private isDownloading;
399
- private downloadPromise;
400
- private oldSession;
401
- constructor(languageModelProvider: LanguageModel, mode: InferenceMode, onDeviceParams?: OnDeviceParams);
402
- /**
403
- * Checks if a given request can be made on-device.
404
- *
405
- * Encapsulates a few concerns:
406
- * the mode
407
- * API existence
408
- * prompt formatting
409
- * model availability, including triggering download if necessary
410
- *
411
- *
412
- * Pros: callers needn't be concerned with details of on-device availability.</p>
413
- * Cons: this method spans a few concerns and splits request validation from usage.
414
- * If instance variables weren't already part of the API, we could consider a better
415
- * separation of concerns.
416
- */
417
- isAvailable(request: GenerateContentRequest): Promise<boolean>;
418
- /**
419
- * Generates content on device.
420
- *
421
- * @remarks
422
- * This is comparable to {@link GenerativeModel.generateContent} for generating content in
423
- * Cloud.
424
- * @param request - a standard Firebase AI {@link GenerateContentRequest}
425
- * @returns {@link Response}, so we can reuse common response formatting.
426
- */
427
- generateContent(request: GenerateContentRequest): Promise<Response>;
428
- /**
429
- * Generates content stream on device.
430
- *
431
- * @remarks
432
- * This is comparable to {@link GenerativeModel.generateContentStream} for generating content in
433
- * Cloud.
434
- * @param request - a standard Firebase AI {@link GenerateContentRequest}
435
- * @returns {@link Response}, so we can reuse common response formatting.
436
- */
437
- generateContentStream(request: GenerateContentRequest): Promise<Response>;
438
- countTokens(_request: CountTokensRequest): Promise<Response>;
439
- /**
440
- * Asserts inference for the given request can be performed by an on-device model.
441
- */
442
- private static isOnDeviceRequest;
443
- /**
444
- * Encapsulates logic to get availability and download a model if one is downloadable.
445
- */
446
- private downloadIfAvailable;
447
- /**
448
- * Triggers out-of-band download of an on-device model.
449
- *
450
- * Chrome only downloads models as needed. Chrome knows a model is needed when code calls
451
- * LanguageModel.create.
452
- *
453
- * Since Chrome manages the download, the SDK can only avoid redundant download requests by
454
- * tracking if a download has previously been requested.
455
- */
456
- private download;
457
- /**
458
- * Converts Firebase AI {@link Content} object to a Chrome {@link LanguageModelMessage} object.
459
- */
460
- private static toLanguageModelMessage;
461
- /**
462
- * Converts a Firebase AI Part object to a Chrome LanguageModelMessageContent object.
463
- */
464
- private static toLanguageModelMessageContent;
465
- /**
466
- * Converts a Firebase AI {@link Role} string to a {@link LanguageModelMessageRole} string.
467
- */
468
- private static toLanguageModelMessageRole;
469
- /**
470
- * Abstracts Chrome session creation.
471
- *
472
- * Chrome uses a multi-turn session for all inference. Firebase AI uses single-turn for all
473
- * inference. To map the Firebase AI API to Chrome's API, the SDK creates a new session for all
474
- * inference.
475
- *
476
- * Chrome will remove a model from memory if it's no longer in use, so this method ensures a
477
- * new session is created before an old session is destroyed.
478
- */
479
- private createSession;
480
- /**
481
- * Formats string returned by Chrome as a {@link Response} returned by Firebase AI.
482
- */
483
- private static toResponse;
484
- /**
485
- * Formats string stream returned by Chrome as SSE returned by Firebase AI.
486
- */
487
- private static toStreamResponse;
488
- }
489
-
490
365
  /**
491
366
  * A single citation.
492
367
  * @public
@@ -655,8 +530,6 @@ export declare interface ErrorDetails {
655
530
  [key: string]: unknown;
656
531
  }
657
532
 
658
- export declare function factory(container: ComponentContainer, { instanceIdentifier }: InstanceFactoryOptions): AIService;
659
-
660
533
  /**
661
534
  * Data pointing to a file uploaded on Google Cloud Storage.
662
535
  * @public
@@ -729,8 +602,6 @@ export declare const FinishReason: {
729
602
  */
730
603
  export declare type FinishReason = (typeof FinishReason)[keyof typeof FinishReason];
731
604
 
732
- /* Excluded from this release type: _FirebaseService */
733
-
734
605
  /**
735
606
  * A predicted {@link FunctionCall} returned from the model
736
607
  * that contains a string representing the {@link FunctionDeclaration.name}
@@ -1886,8 +1757,6 @@ export declare class IntegerSchema extends Schema {
1886
1757
  constructor(schemaParams?: SchemaParams);
1887
1758
  }
1888
1759
 
1889
- /* Excluded from this release type: LanguageModel */
1890
-
1891
1760
  /**
1892
1761
  * <b>(EXPERIMENTAL)</b>
1893
1762
  * Configures the creation of an on-device language model session.
@@ -1959,13 +1828,6 @@ export declare type LanguageModelMessageRole = 'system' | 'user' | 'assistant';
1959
1828
  */
1960
1829
  export declare type LanguageModelMessageType = 'text' | 'image' | 'audio';
1961
1830
 
1962
- /**
1963
- * <b>(EXPERIMENTAL)</b>
1964
- * An on-device language model prompt.
1965
- * @public
1966
- */
1967
- declare type LanguageModelPrompt = LanguageModelMessage[];
1968
-
1969
1831
  /**
1970
1832
  * <b>(EXPERIMENTAL)</b>
1971
1833
  * Options for an on-device language model prompt.
package/dist/ai.d.ts CHANGED
@@ -4,18 +4,10 @@
4
4
  * @packageDocumentation
5
5
  */
6
6
 
7
- import { AppCheckInternalComponentName } from '@firebase/app-check-interop-types';
8
7
  import { AppCheckTokenResult } from '@firebase/app-check-interop-types';
9
- import { ComponentContainer } from '@firebase/component';
10
8
  import { FirebaseApp } from '@firebase/app';
11
- import { FirebaseAppCheckInternal } from '@firebase/app-check-interop-types';
12
- import { FirebaseAuthInternal } from '@firebase/auth-interop-types';
13
- import { FirebaseAuthInternalName } from '@firebase/auth-interop-types';
14
9
  import { FirebaseAuthTokenData } from '@firebase/auth-interop-types';
15
10
  import { FirebaseError } from '@firebase/util';
16
- import { _FirebaseService } from '@firebase/app';
17
- import { InstanceFactoryOptions } from '@firebase/component';
18
- import { Provider } from '@firebase/component';
19
11
 
20
12
  /**
21
13
  * An instance of the Firebase AI SDK.
@@ -182,20 +174,6 @@ export declare interface AIOptions {
182
174
  useLimitedUseAppCheckTokens?: boolean;
183
175
  }
184
176
 
185
- declare class AIService implements AI, _FirebaseService {
186
- app: FirebaseApp;
187
- backend: Backend;
188
- chromeAdapterFactory?: ((mode: InferenceMode, window?: Window, params?: OnDeviceParams) => ChromeAdapterImpl | undefined) | undefined;
189
- auth: FirebaseAuthInternal | null;
190
- appCheck: FirebaseAppCheckInternal | null;
191
- _options?: Omit<AIOptions, 'backend'>;
192
- location: string;
193
- constructor(app: FirebaseApp, backend: Backend, authProvider?: Provider<FirebaseAuthInternalName>, appCheckProvider?: Provider<AppCheckInternalComponentName>, chromeAdapterFactory?: ((mode: InferenceMode, window?: Window, params?: OnDeviceParams) => ChromeAdapterImpl | undefined) | undefined);
194
- _delete(): Promise<void>;
195
- set options(optionsToSet: AIOptions);
196
- get options(): AIOptions | undefined;
197
- }
198
-
199
177
  /**
200
178
  * Schema class representing a value that can conform to any of the provided sub-schemas. This is
201
179
  * useful when a field can accept multiple distinct types or structures.
@@ -254,16 +232,6 @@ export declare interface AudioConversationController {
254
232
  stop: () => Promise<void>;
255
233
  }
256
234
 
257
- /**
258
- * @internal
259
- */
260
- declare enum Availability {
261
- 'UNAVAILABLE' = "unavailable",
262
- 'DOWNLOADABLE' = "downloadable",
263
- 'DOWNLOADING' = "downloading",
264
- 'AVAILABLE' = "available"
265
- }
266
-
267
235
  /**
268
236
  * Abstract base class representing the configuration for an AI service backend.
269
237
  * This class should not be instantiated directly. Use its subclasses; {@link GoogleAIBackend} for
@@ -437,108 +405,6 @@ export declare interface ChromeAdapter {
437
405
  countTokens(request: CountTokensRequest): Promise<Response>;
438
406
  }
439
407
 
440
- /**
441
- * Defines an inference "backend" that uses Chrome's on-device model,
442
- * and encapsulates logic for detecting when on-device inference is
443
- * possible.
444
- */
445
- declare class ChromeAdapterImpl implements ChromeAdapter {
446
- languageModelProvider: LanguageModel;
447
- mode: InferenceMode;
448
- onDeviceParams: OnDeviceParams;
449
- static SUPPORTED_MIME_TYPES: string[];
450
- private isDownloading;
451
- private downloadPromise;
452
- private oldSession;
453
- constructor(languageModelProvider: LanguageModel, mode: InferenceMode, onDeviceParams?: OnDeviceParams);
454
- /**
455
- * Checks if a given request can be made on-device.
456
- *
457
- * Encapsulates a few concerns:
458
- * the mode
459
- * API existence
460
- * prompt formatting
461
- * model availability, including triggering download if necessary
462
- *
463
- *
464
- * Pros: callers needn't be concerned with details of on-device availability.</p>
465
- * Cons: this method spans a few concerns and splits request validation from usage.
466
- * If instance variables weren't already part of the API, we could consider a better
467
- * separation of concerns.
468
- */
469
- isAvailable(request: GenerateContentRequest): Promise<boolean>;
470
- /**
471
- * Generates content on device.
472
- *
473
- * @remarks
474
- * This is comparable to {@link GenerativeModel.generateContent} for generating content in
475
- * Cloud.
476
- * @param request - a standard Firebase AI {@link GenerateContentRequest}
477
- * @returns {@link Response}, so we can reuse common response formatting.
478
- */
479
- generateContent(request: GenerateContentRequest): Promise<Response>;
480
- /**
481
- * Generates content stream on device.
482
- *
483
- * @remarks
484
- * This is comparable to {@link GenerativeModel.generateContentStream} for generating content in
485
- * Cloud.
486
- * @param request - a standard Firebase AI {@link GenerateContentRequest}
487
- * @returns {@link Response}, so we can reuse common response formatting.
488
- */
489
- generateContentStream(request: GenerateContentRequest): Promise<Response>;
490
- countTokens(_request: CountTokensRequest): Promise<Response>;
491
- /**
492
- * Asserts inference for the given request can be performed by an on-device model.
493
- */
494
- private static isOnDeviceRequest;
495
- /**
496
- * Encapsulates logic to get availability and download a model if one is downloadable.
497
- */
498
- private downloadIfAvailable;
499
- /**
500
- * Triggers out-of-band download of an on-device model.
501
- *
502
- * Chrome only downloads models as needed. Chrome knows a model is needed when code calls
503
- * LanguageModel.create.
504
- *
505
- * Since Chrome manages the download, the SDK can only avoid redundant download requests by
506
- * tracking if a download has previously been requested.
507
- */
508
- private download;
509
- /**
510
- * Converts Firebase AI {@link Content} object to a Chrome {@link LanguageModelMessage} object.
511
- */
512
- private static toLanguageModelMessage;
513
- /**
514
- * Converts a Firebase AI Part object to a Chrome LanguageModelMessageContent object.
515
- */
516
- private static toLanguageModelMessageContent;
517
- /**
518
- * Converts a Firebase AI {@link Role} string to a {@link LanguageModelMessageRole} string.
519
- */
520
- private static toLanguageModelMessageRole;
521
- /**
522
- * Abstracts Chrome session creation.
523
- *
524
- * Chrome uses a multi-turn session for all inference. Firebase AI uses single-turn for all
525
- * inference. To map the Firebase AI API to Chrome's API, the SDK creates a new session for all
526
- * inference.
527
- *
528
- * Chrome will remove a model from memory if it's no longer in use, so this method ensures a
529
- * new session is created before an old session is destroyed.
530
- */
531
- private createSession;
532
- /**
533
- * Formats string returned by Chrome as a {@link Response} returned by Firebase AI.
534
- */
535
- private static toResponse;
536
- /**
537
- * Formats string stream returned by Chrome as SSE returned by Firebase AI.
538
- */
539
- private static toStreamResponse;
540
- }
541
-
542
408
  /**
543
409
  * A single citation.
544
410
  * @public
@@ -707,8 +573,6 @@ export declare interface ErrorDetails {
707
573
  [key: string]: unknown;
708
574
  }
709
575
 
710
- export declare function factory(container: ComponentContainer, { instanceIdentifier }: InstanceFactoryOptions): AIService;
711
-
712
576
  /**
713
577
  * Data pointing to a file uploaded on Google Cloud Storage.
714
578
  * @public
@@ -2001,38 +1865,6 @@ export declare class IntegerSchema extends Schema {
2001
1865
  constructor(schemaParams?: SchemaParams);
2002
1866
  }
2003
1867
 
2004
- /**
2005
- * @license
2006
- * Copyright 2025 Google LLC
2007
- *
2008
- * Licensed under the Apache License, Version 2.0 (the "License");
2009
- * you may not use this file except in compliance with the License.
2010
- * You may obtain a copy of the License at
2011
- *
2012
- * http://www.apache.org/licenses/LICENSE-2.0
2013
- *
2014
- * Unless required by applicable law or agreed to in writing, software
2015
- * distributed under the License is distributed on an "AS IS" BASIS,
2016
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
2017
- * See the License for the specific language governing permissions and
2018
- * limitations under the License.
2019
- */
2020
- /**
2021
- * The subset of the Prompt API
2022
- * (see {@link https://github.com/webmachinelearning/prompt-api#full-api-surface-in-web-idl }
2023
- * required for hybrid functionality.
2024
- *
2025
- * @internal
2026
- */
2027
- declare interface LanguageModel extends EventTarget {
2028
- create(options?: LanguageModelCreateOptions): Promise<LanguageModel>;
2029
- availability(options?: LanguageModelCreateCoreOptions): Promise<Availability>;
2030
- prompt(input: LanguageModelPrompt, options?: LanguageModelPromptOptions): Promise<string>;
2031
- promptStreaming(input: LanguageModelPrompt, options?: LanguageModelPromptOptions): ReadableStream;
2032
- measureInputUsage(input: LanguageModelPrompt, options?: LanguageModelPromptOptions): Promise<number>;
2033
- destroy(): undefined;
2034
- }
2035
-
2036
1868
  /**
2037
1869
  * <b>(EXPERIMENTAL)</b>
2038
1870
  * Configures the creation of an on-device language model session.
@@ -2104,13 +1936,6 @@ export declare type LanguageModelMessageRole = 'system' | 'user' | 'assistant';
2104
1936
  */
2105
1937
  export declare type LanguageModelMessageType = 'text' | 'image' | 'audio';
2106
1938
 
2107
- /**
2108
- * <b>(EXPERIMENTAL)</b>
2109
- * An on-device language model prompt.
2110
- * @public
2111
- */
2112
- declare type LanguageModelPrompt = LanguageModelMessage[];
2113
-
2114
1939
  /**
2115
1940
  * <b>(EXPERIMENTAL)</b>
2116
1941
  * Options for an on-device language model prompt.
@@ -4,7 +4,7 @@ import { FirebaseError, Deferred, getModularInstance } from '@firebase/util';
4
4
  import { Logger } from '@firebase/logger';
5
5
 
6
6
  var name = "@firebase/ai";
7
- var version = "2.2.0";
7
+ var version = "2.2.1-canary.4d834deb2";
8
8
 
9
9
  /**
10
10
  * @license
@@ -3925,5 +3925,5 @@ function registerAI() {
3925
3925
  }
3926
3926
  registerAI();
3927
3927
 
3928
- export { AIError, AIErrorCode, AIModel, AnyOfSchema, ArraySchema, Backend, BackendType, BlockReason, BooleanSchema, ChatSession, FinishReason, FunctionCallingMode, GenerativeModel, GoogleAIBackend, HarmBlockMethod, HarmBlockThreshold, HarmCategory, HarmProbability, HarmSeverity, ImagenAspectRatio, ImagenImageFormat, ImagenModel, ImagenPersonFilterLevel, ImagenSafetyFilterLevel, InferenceMode, IntegerSchema, LiveGenerativeModel, LiveResponseType, LiveSession, Modality, NumberSchema, ObjectSchema, POSSIBLE_ROLES, ResponseModality, Schema, SchemaType, StringSchema, VertexAIBackend, factory, getAI, getGenerativeModel, getImagenModel, getLiveGenerativeModel, startAudioConversation };
3928
+ export { AIError, AIErrorCode, AIModel, AnyOfSchema, ArraySchema, Backend, BackendType, BlockReason, BooleanSchema, ChatSession, FinishReason, FunctionCallingMode, GenerativeModel, GoogleAIBackend, HarmBlockMethod, HarmBlockThreshold, HarmCategory, HarmProbability, HarmSeverity, ImagenAspectRatio, ImagenImageFormat, ImagenModel, ImagenPersonFilterLevel, ImagenSafetyFilterLevel, InferenceMode, IntegerSchema, LiveGenerativeModel, LiveResponseType, LiveSession, Modality, NumberSchema, ObjectSchema, POSSIBLE_ROLES, ResponseModality, Schema, SchemaType, StringSchema, VertexAIBackend, getAI, getGenerativeModel, getImagenModel, getLiveGenerativeModel, startAudioConversation };
3929
3929
  //# sourceMappingURL=index.esm.js.map