@firebase/ai 2.1.0-canary.c5f08a9bc → 2.1.0-canary.cbef6c6e5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -148,11 +148,12 @@ export declare interface AIOptions {
148
148
  declare class AIService implements AI, _FirebaseService {
149
149
  app: FirebaseApp;
150
150
  backend: Backend;
151
+ chromeAdapterFactory?: ((mode: InferenceMode, window?: Window, params?: OnDeviceParams) => ChromeAdapterImpl | undefined) | undefined;
151
152
  auth: FirebaseAuthInternal | null;
152
153
  appCheck: FirebaseAppCheckInternal | null;
153
154
  _options?: Omit<AIOptions, 'backend'>;
154
155
  location: string;
155
- constructor(app: FirebaseApp, backend: Backend, authProvider?: Provider<FirebaseAuthInternalName>, appCheckProvider?: Provider<AppCheckInternalComponentName>);
156
+ constructor(app: FirebaseApp, backend: Backend, authProvider?: Provider<FirebaseAuthInternalName>, appCheckProvider?: Provider<AppCheckInternalComponentName>, chromeAdapterFactory?: ((mode: InferenceMode, window?: Window, params?: OnDeviceParams) => ChromeAdapterImpl | undefined) | undefined);
156
157
  _delete(): Promise<void>;
157
158
  set options(optionsToSet: AIOptions);
158
159
  get options(): AIOptions | undefined;
@@ -197,6 +198,8 @@ export declare class ArraySchema extends Schema {
197
198
  /* Excluded from this release type: toJSON */
198
199
  }
199
200
 
201
+ /* Excluded from this release type: Availability */
202
+
200
203
  /**
201
204
  * Abstract base class representing the configuration for an AI service backend.
202
205
  * This class should not be instantiated directly. Use its subclasses; {@link GoogleAIBackend} for
@@ -367,6 +370,108 @@ export declare interface ChromeAdapter {
367
370
  /* Excluded from this release type: countTokens */
368
371
  }
369
372
 
373
+ /**
374
+ * Defines an inference "backend" that uses Chrome's on-device model,
375
+ * and encapsulates logic for detecting when on-device inference is
376
+ * possible.
377
+ */
378
+ declare class ChromeAdapterImpl implements ChromeAdapter {
379
+ languageModelProvider: LanguageModel;
380
+ mode: InferenceMode;
381
+ onDeviceParams: OnDeviceParams;
382
+ static SUPPORTED_MIME_TYPES: string[];
383
+ private isDownloading;
384
+ private downloadPromise;
385
+ private oldSession;
386
+ constructor(languageModelProvider: LanguageModel, mode: InferenceMode, onDeviceParams?: OnDeviceParams);
387
+ /**
388
+ * Checks if a given request can be made on-device.
389
+ *
390
+ * Encapsulates a few concerns:
391
+ * the mode
392
+ * API existence
393
+ * prompt formatting
394
+ * model availability, including triggering download if necessary
395
+ *
396
+ *
397
+ * Pros: callers needn't be concerned with details of on-device availability.</p>
398
+ * Cons: this method spans a few concerns and splits request validation from usage.
399
+ * If instance variables weren't already part of the API, we could consider a better
400
+ * separation of concerns.
401
+ */
402
+ isAvailable(request: GenerateContentRequest): Promise<boolean>;
403
+ /**
404
+ * Generates content on device.
405
+ *
406
+ * @remarks
407
+ * This is comparable to {@link GenerativeModel.generateContent} for generating content in
408
+ * Cloud.
409
+ * @param request - a standard Firebase AI {@link GenerateContentRequest}
410
+ * @returns {@link Response}, so we can reuse common response formatting.
411
+ */
412
+ generateContent(request: GenerateContentRequest): Promise<Response>;
413
+ /**
414
+ * Generates content stream on device.
415
+ *
416
+ * @remarks
417
+ * This is comparable to {@link GenerativeModel.generateContentStream} for generating content in
418
+ * Cloud.
419
+ * @param request - a standard Firebase AI {@link GenerateContentRequest}
420
+ * @returns {@link Response}, so we can reuse common response formatting.
421
+ */
422
+ generateContentStream(request: GenerateContentRequest): Promise<Response>;
423
+ countTokens(_request: CountTokensRequest): Promise<Response>;
424
+ /**
425
+ * Asserts inference for the given request can be performed by an on-device model.
426
+ */
427
+ private static isOnDeviceRequest;
428
+ /**
429
+ * Encapsulates logic to get availability and download a model if one is downloadable.
430
+ */
431
+ private downloadIfAvailable;
432
+ /**
433
+ * Triggers out-of-band download of an on-device model.
434
+ *
435
+ * Chrome only downloads models as needed. Chrome knows a model is needed when code calls
436
+ * LanguageModel.create.
437
+ *
438
+ * Since Chrome manages the download, the SDK can only avoid redundant download requests by
439
+ * tracking if a download has previously been requested.
440
+ */
441
+ private download;
442
+ /**
443
+ * Converts Firebase AI {@link Content} object to a Chrome {@link LanguageModelMessage} object.
444
+ */
445
+ private static toLanguageModelMessage;
446
+ /**
447
+ * Converts a Firebase AI Part object to a Chrome LanguageModelMessageContent object.
448
+ */
449
+ private static toLanguageModelMessageContent;
450
+ /**
451
+ * Converts a Firebase AI {@link Role} string to a {@link LanguageModelMessageRole} string.
452
+ */
453
+ private static toLanguageModelMessageRole;
454
+ /**
455
+ * Abstracts Chrome session creation.
456
+ *
457
+ * Chrome uses a multi-turn session for all inference. Firebase AI uses single-turn for all
458
+ * inference. To map the Firebase AI API to Chrome's API, the SDK creates a new session for all
459
+ * inference.
460
+ *
461
+ * Chrome will remove a model from memory if it's no longer in use, so this method ensures a
462
+ * new session is created before an old session is destroyed.
463
+ */
464
+ private createSession;
465
+ /**
466
+ * Formats string returned by Chrome as a {@link Response} returned by Firebase AI.
467
+ */
468
+ private static toResponse;
469
+ /**
470
+ * Formats string stream returned by Chrome as SSE returned by Firebase AI.
471
+ */
472
+ private static toStreamResponse;
473
+ }
474
+
370
475
  /**
371
476
  * A single citation.
372
477
  * @public
@@ -1735,6 +1840,8 @@ export declare class IntegerSchema extends Schema {
1735
1840
  constructor(schemaParams?: SchemaParams);
1736
1841
  }
1737
1842
 
1843
+ /* Excluded from this release type: LanguageModel */
1844
+
1738
1845
  /**
1739
1846
  * <b>(EXPERIMENTAL)</b>
1740
1847
  * Configures the creation of an on-device language model session.
@@ -1806,6 +1913,13 @@ export declare type LanguageModelMessageRole = 'system' | 'user' | 'assistant';
1806
1913
  */
1807
1914
  export declare type LanguageModelMessageType = 'text' | 'image' | 'audio';
1808
1915
 
1916
+ /**
1917
+ * <b>(EXPERIMENTAL)</b>
1918
+ * An on-device language model prompt.
1919
+ * @public
1920
+ */
1921
+ declare type LanguageModelPrompt = LanguageModelMessage[];
1922
+
1809
1923
  /**
1810
1924
  * <b>(EXPERIMENTAL)</b>
1811
1925
  * Options for an on-device language model prompt.
package/dist/ai.d.ts CHANGED
@@ -183,11 +183,12 @@ export declare interface AIOptions {
183
183
  declare class AIService implements AI, _FirebaseService {
184
184
  app: FirebaseApp;
185
185
  backend: Backend;
186
+ chromeAdapterFactory?: ((mode: InferenceMode, window?: Window, params?: OnDeviceParams) => ChromeAdapterImpl | undefined) | undefined;
186
187
  auth: FirebaseAuthInternal | null;
187
188
  appCheck: FirebaseAppCheckInternal | null;
188
189
  _options?: Omit<AIOptions, 'backend'>;
189
190
  location: string;
190
- constructor(app: FirebaseApp, backend: Backend, authProvider?: Provider<FirebaseAuthInternalName>, appCheckProvider?: Provider<AppCheckInternalComponentName>);
191
+ constructor(app: FirebaseApp, backend: Backend, authProvider?: Provider<FirebaseAuthInternalName>, appCheckProvider?: Provider<AppCheckInternalComponentName>, chromeAdapterFactory?: ((mode: InferenceMode, window?: Window, params?: OnDeviceParams) => ChromeAdapterImpl | undefined) | undefined);
191
192
  _delete(): Promise<void>;
192
193
  set options(optionsToSet: AIOptions);
193
194
  get options(): AIOptions | undefined;
@@ -238,6 +239,16 @@ export declare class ArraySchema extends Schema {
238
239
  toJSON(): SchemaRequest;
239
240
  }
240
241
 
242
+ /**
243
+ * @internal
244
+ */
245
+ declare enum Availability {
246
+ 'UNAVAILABLE' = "unavailable",
247
+ 'DOWNLOADABLE' = "downloadable",
248
+ 'DOWNLOADING' = "downloading",
249
+ 'AVAILABLE' = "available"
250
+ }
251
+
241
252
  /**
242
253
  * Abstract base class representing the configuration for an AI service backend.
243
254
  * This class should not be instantiated directly. Use its subclasses; {@link GoogleAIBackend} for
@@ -411,6 +422,108 @@ export declare interface ChromeAdapter {
411
422
  countTokens(request: CountTokensRequest): Promise<Response>;
412
423
  }
413
424
 
425
+ /**
426
+ * Defines an inference "backend" that uses Chrome's on-device model,
427
+ * and encapsulates logic for detecting when on-device inference is
428
+ * possible.
429
+ */
430
+ declare class ChromeAdapterImpl implements ChromeAdapter {
431
+ languageModelProvider: LanguageModel;
432
+ mode: InferenceMode;
433
+ onDeviceParams: OnDeviceParams;
434
+ static SUPPORTED_MIME_TYPES: string[];
435
+ private isDownloading;
436
+ private downloadPromise;
437
+ private oldSession;
438
+ constructor(languageModelProvider: LanguageModel, mode: InferenceMode, onDeviceParams?: OnDeviceParams);
439
+ /**
440
+ * Checks if a given request can be made on-device.
441
+ *
442
+ * Encapsulates a few concerns:
443
+ * the mode
444
+ * API existence
445
+ * prompt formatting
446
+ * model availability, including triggering download if necessary
447
+ *
448
+ *
449
+ * Pros: callers needn't be concerned with details of on-device availability.</p>
450
+ * Cons: this method spans a few concerns and splits request validation from usage.
451
+ * If instance variables weren't already part of the API, we could consider a better
452
+ * separation of concerns.
453
+ */
454
+ isAvailable(request: GenerateContentRequest): Promise<boolean>;
455
+ /**
456
+ * Generates content on device.
457
+ *
458
+ * @remarks
459
+ * This is comparable to {@link GenerativeModel.generateContent} for generating content in
460
+ * Cloud.
461
+ * @param request - a standard Firebase AI {@link GenerateContentRequest}
462
+ * @returns {@link Response}, so we can reuse common response formatting.
463
+ */
464
+ generateContent(request: GenerateContentRequest): Promise<Response>;
465
+ /**
466
+ * Generates content stream on device.
467
+ *
468
+ * @remarks
469
+ * This is comparable to {@link GenerativeModel.generateContentStream} for generating content in
470
+ * Cloud.
471
+ * @param request - a standard Firebase AI {@link GenerateContentRequest}
472
+ * @returns {@link Response}, so we can reuse common response formatting.
473
+ */
474
+ generateContentStream(request: GenerateContentRequest): Promise<Response>;
475
+ countTokens(_request: CountTokensRequest): Promise<Response>;
476
+ /**
477
+ * Asserts inference for the given request can be performed by an on-device model.
478
+ */
479
+ private static isOnDeviceRequest;
480
+ /**
481
+ * Encapsulates logic to get availability and download a model if one is downloadable.
482
+ */
483
+ private downloadIfAvailable;
484
+ /**
485
+ * Triggers out-of-band download of an on-device model.
486
+ *
487
+ * Chrome only downloads models as needed. Chrome knows a model is needed when code calls
488
+ * LanguageModel.create.
489
+ *
490
+ * Since Chrome manages the download, the SDK can only avoid redundant download requests by
491
+ * tracking if a download has previously been requested.
492
+ */
493
+ private download;
494
+ /**
495
+ * Converts Firebase AI {@link Content} object to a Chrome {@link LanguageModelMessage} object.
496
+ */
497
+ private static toLanguageModelMessage;
498
+ /**
499
+ * Converts a Firebase AI Part object to a Chrome LanguageModelMessageContent object.
500
+ */
501
+ private static toLanguageModelMessageContent;
502
+ /**
503
+ * Converts a Firebase AI {@link Role} string to a {@link LanguageModelMessageRole} string.
504
+ */
505
+ private static toLanguageModelMessageRole;
506
+ /**
507
+ * Abstracts Chrome session creation.
508
+ *
509
+ * Chrome uses a multi-turn session for all inference. Firebase AI uses single-turn for all
510
+ * inference. To map the Firebase AI API to Chrome's API, the SDK creates a new session for all
511
+ * inference.
512
+ *
513
+ * Chrome will remove a model from memory if it's no longer in use, so this method ensures a
514
+ * new session is created before an old session is destroyed.
515
+ */
516
+ private createSession;
517
+ /**
518
+ * Formats string returned by Chrome as a {@link Response} returned by Firebase AI.
519
+ */
520
+ private static toResponse;
521
+ /**
522
+ * Formats string stream returned by Chrome as SSE returned by Firebase AI.
523
+ */
524
+ private static toStreamResponse;
525
+ }
526
+
414
527
  /**
415
528
  * A single citation.
416
529
  * @public
@@ -1842,6 +1955,38 @@ export declare class IntegerSchema extends Schema {
1842
1955
  constructor(schemaParams?: SchemaParams);
1843
1956
  }
1844
1957
 
1958
+ /**
1959
+ * @license
1960
+ * Copyright 2025 Google LLC
1961
+ *
1962
+ * Licensed under the Apache License, Version 2.0 (the "License");
1963
+ * you may not use this file except in compliance with the License.
1964
+ * You may obtain a copy of the License at
1965
+ *
1966
+ * http://www.apache.org/licenses/LICENSE-2.0
1967
+ *
1968
+ * Unless required by applicable law or agreed to in writing, software
1969
+ * distributed under the License is distributed on an "AS IS" BASIS,
1970
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
1971
+ * See the License for the specific language governing permissions and
1972
+ * limitations under the License.
1973
+ */
1974
+ /**
1975
+ * The subset of the Prompt API
1976
+ * (see {@link https://github.com/webmachinelearning/prompt-api#full-api-surface-in-web-idl }
1977
+ * required for hybrid functionality.
1978
+ *
1979
+ * @internal
1980
+ */
1981
+ declare interface LanguageModel extends EventTarget {
1982
+ create(options?: LanguageModelCreateOptions): Promise<LanguageModel>;
1983
+ availability(options?: LanguageModelCreateCoreOptions): Promise<Availability>;
1984
+ prompt(input: LanguageModelPrompt, options?: LanguageModelPromptOptions): Promise<string>;
1985
+ promptStreaming(input: LanguageModelPrompt, options?: LanguageModelPromptOptions): ReadableStream;
1986
+ measureInputUsage(input: LanguageModelPrompt, options?: LanguageModelPromptOptions): Promise<number>;
1987
+ destroy(): undefined;
1988
+ }
1989
+
1845
1990
  /**
1846
1991
  * <b>(EXPERIMENTAL)</b>
1847
1992
  * Configures the creation of an on-device language model session.
@@ -1913,6 +2058,13 @@ export declare type LanguageModelMessageRole = 'system' | 'user' | 'assistant';
1913
2058
  */
1914
2059
  export declare type LanguageModelMessageType = 'text' | 'image' | 'audio';
1915
2060
 
2061
+ /**
2062
+ * <b>(EXPERIMENTAL)</b>
2063
+ * An on-device language model prompt.
2064
+ * @public
2065
+ */
2066
+ declare type LanguageModelPrompt = LanguageModelMessage[];
2067
+
1916
2068
  /**
1917
2069
  * <b>(EXPERIMENTAL)</b>
1918
2070
  * Options for an on-device language model prompt.