@firebase/ai 2.0.0 → 2.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (43) hide show
  1. package/dist/ai-public.d.ts +178 -8
  2. package/dist/ai.d.ts +181 -8
  3. package/dist/esm/index.esm.js +359 -19
  4. package/dist/esm/index.esm.js.map +1 -1
  5. package/dist/esm/src/api.d.ts +2 -2
  6. package/dist/esm/src/constants.d.ts +4 -0
  7. package/dist/esm/src/methods/chat-session.d.ts +3 -1
  8. package/dist/esm/src/methods/chrome-adapter.d.ts +118 -0
  9. package/dist/esm/src/methods/count-tokens.d.ts +3 -1
  10. package/dist/esm/src/methods/generate-content.d.ts +3 -2
  11. package/dist/esm/src/models/generative-model.d.ts +3 -1
  12. package/dist/esm/src/types/chrome-adapter.d.ts +54 -0
  13. package/dist/esm/src/types/enums.d.ts +20 -1
  14. package/dist/esm/src/types/imagen/requests.d.ts +2 -2
  15. package/dist/esm/src/types/imagen/responses.d.ts +1 -0
  16. package/dist/esm/src/types/index.d.ts +2 -0
  17. package/dist/esm/src/types/language-model.d.ts +117 -0
  18. package/dist/esm/src/types/requests.d.ts +31 -1
  19. package/dist/esm/src/types/responses.d.ts +1 -1
  20. package/dist/esm/src/types/schema.d.ts +1 -1
  21. package/dist/index.cjs.js +359 -18
  22. package/dist/index.cjs.js.map +1 -1
  23. package/dist/index.node.cjs.js +359 -18
  24. package/dist/index.node.cjs.js.map +1 -1
  25. package/dist/index.node.mjs +359 -19
  26. package/dist/index.node.mjs.map +1 -1
  27. package/dist/src/api.d.ts +2 -2
  28. package/dist/src/constants.d.ts +4 -0
  29. package/dist/src/methods/chat-session.d.ts +3 -1
  30. package/dist/src/methods/chrome-adapter.d.ts +118 -0
  31. package/dist/src/methods/count-tokens.d.ts +3 -1
  32. package/dist/src/methods/generate-content.d.ts +3 -2
  33. package/dist/src/models/generative-model.d.ts +3 -1
  34. package/dist/src/types/chrome-adapter.d.ts +54 -0
  35. package/dist/src/types/enums.d.ts +20 -1
  36. package/dist/src/types/imagen/requests.d.ts +2 -2
  37. package/dist/src/types/imagen/responses.d.ts +1 -0
  38. package/dist/src/types/index.d.ts +2 -0
  39. package/dist/src/types/language-model.d.ts +117 -0
  40. package/dist/src/types/requests.d.ts +31 -1
  41. package/dist/src/types/responses.d.ts +1 -1
  42. package/dist/src/types/schema.d.ts +1 -1
  43. package/package.json +2 -2
@@ -4,7 +4,7 @@ import { FirebaseError, getModularInstance } from '@firebase/util';
4
4
  import { Logger } from '@firebase/logger';
5
5
 
6
6
  var name = "@firebase/ai";
7
- var version = "2.0.0";
7
+ var version = "2.1.0";
8
8
 
9
9
  /**
10
10
  * @license
@@ -29,6 +29,10 @@ const DEFAULT_API_VERSION = 'v1beta';
29
29
  const PACKAGE_VERSION = version;
30
30
  const LANGUAGE_TAG = 'gl-js';
31
31
  const DEFAULT_FETCH_TIMEOUT_MS = 180 * 1000;
32
+ /**
33
+ * Defines the name of the default in-cloud model to use for hybrid inference.
34
+ */
35
+ const DEFAULT_HYBRID_IN_CLOUD_MODEL = 'gemini-2.0-flash-lite';
32
36
 
33
37
  /**
34
38
  * @license
@@ -84,7 +88,7 @@ const HarmBlockThreshold = {
84
88
  BLOCK_NONE: 'BLOCK_NONE',
85
89
  /**
86
90
  * All content will be allowed. This is the same as `BLOCK_NONE`, but the metadata corresponding
87
- * to the {@link HarmCategory} will not be present in the response.
91
+ * to the {@link (HarmCategory:type)} will not be present in the response.
88
92
  */
89
93
  OFF: 'OFF'
90
94
  };
@@ -287,6 +291,16 @@ const ResponseModality = {
287
291
  */
288
292
  IMAGE: 'IMAGE'
289
293
  };
294
+ /**
295
+ * <b>(EXPERIMENTAL)</b>
296
+ * Determines whether inference happens on-device or in-cloud.
297
+ * @public
298
+ */
299
+ const InferenceMode = {
300
+ 'PREFER_ON_DEVICE': 'prefer_on_device',
301
+ 'ONLY_ON_DEVICE': 'only_on_device',
302
+ 'ONLY_IN_CLOUD': 'only_in_cloud'
303
+ };
290
304
 
291
305
  /**
292
306
  * @license
@@ -460,7 +474,7 @@ const ImagenPersonFilterLevel = {
460
474
  * To specify an aspect ratio for generated images, set the `aspectRatio` property in your
461
475
  * {@link ImagenGenerationConfig}.
462
476
  *
463
- * See the the {@link http://firebase.google.com/docs/vertex-ai/generate-images | documentation }
477
+ * See the {@link http://firebase.google.com/docs/vertex-ai/generate-images | documentation }
464
478
  * for more details and examples of the supported aspect ratios.
465
479
  *
466
480
  * @beta
@@ -1660,20 +1674,38 @@ function aggregateResponses(responses) {
1660
1674
  * See the License for the specific language governing permissions and
1661
1675
  * limitations under the License.
1662
1676
  */
1663
- async function generateContentStream(apiSettings, model, params, requestOptions) {
1677
+ async function generateContentStreamOnCloud(apiSettings, model, params, requestOptions) {
1664
1678
  if (apiSettings.backend.backendType === BackendType.GOOGLE_AI) {
1665
1679
  params = mapGenerateContentRequest(params);
1666
1680
  }
1667
- const response = await makeRequest(model, Task.STREAM_GENERATE_CONTENT, apiSettings,
1681
+ return makeRequest(model, Task.STREAM_GENERATE_CONTENT, apiSettings,
1668
1682
  /* stream */ true, JSON.stringify(params), requestOptions);
1683
+ }
1684
+ async function generateContentStream(apiSettings, model, params, chromeAdapter, requestOptions) {
1685
+ let response;
1686
+ if (chromeAdapter && (await chromeAdapter.isAvailable(params))) {
1687
+ response = await chromeAdapter.generateContentStream(params);
1688
+ }
1689
+ else {
1690
+ response = await generateContentStreamOnCloud(apiSettings, model, params, requestOptions);
1691
+ }
1669
1692
  return processStream(response, apiSettings); // TODO: Map streaming responses
1670
1693
  }
1671
- async function generateContent(apiSettings, model, params, requestOptions) {
1694
+ async function generateContentOnCloud(apiSettings, model, params, requestOptions) {
1672
1695
  if (apiSettings.backend.backendType === BackendType.GOOGLE_AI) {
1673
1696
  params = mapGenerateContentRequest(params);
1674
1697
  }
1675
- const response = await makeRequest(model, Task.GENERATE_CONTENT, apiSettings,
1698
+ return makeRequest(model, Task.GENERATE_CONTENT, apiSettings,
1676
1699
  /* stream */ false, JSON.stringify(params), requestOptions);
1700
+ }
1701
+ async function generateContent(apiSettings, model, params, chromeAdapter, requestOptions) {
1702
+ let response;
1703
+ if (chromeAdapter && (await chromeAdapter.isAvailable(params))) {
1704
+ response = await chromeAdapter.generateContent(params);
1705
+ }
1706
+ else {
1707
+ response = await generateContentOnCloud(apiSettings, model, params, requestOptions);
1708
+ }
1677
1709
  const generateContentResponse = await processGenerateContentResponse(response, apiSettings);
1678
1710
  const enhancedResponse = createEnhancedContentResponse(generateContentResponse);
1679
1711
  return {
@@ -1930,8 +1962,9 @@ const SILENT_ERROR = 'SILENT_ERROR';
1930
1962
  * @public
1931
1963
  */
1932
1964
  class ChatSession {
1933
- constructor(apiSettings, model, params, requestOptions) {
1965
+ constructor(apiSettings, model, chromeAdapter, params, requestOptions) {
1934
1966
  this.model = model;
1967
+ this.chromeAdapter = chromeAdapter;
1935
1968
  this.params = params;
1936
1969
  this.requestOptions = requestOptions;
1937
1970
  this._history = [];
@@ -1969,7 +2002,7 @@ class ChatSession {
1969
2002
  let finalResult = {};
1970
2003
  // Add onto the chain.
1971
2004
  this._sendPromise = this._sendPromise
1972
- .then(() => generateContent(this._apiSettings, this.model, generateContentRequest, this.requestOptions))
2005
+ .then(() => generateContent(this._apiSettings, this.model, generateContentRequest, this.chromeAdapter, this.requestOptions))
1973
2006
  .then(result => {
1974
2007
  if (result.response.candidates &&
1975
2008
  result.response.candidates.length > 0) {
@@ -2008,7 +2041,7 @@ class ChatSession {
2008
2041
  systemInstruction: this.params?.systemInstruction,
2009
2042
  contents: [...this._history, newContent]
2010
2043
  };
2011
- const streamPromise = generateContentStream(this._apiSettings, this.model, generateContentRequest, this.requestOptions);
2044
+ const streamPromise = generateContentStream(this._apiSettings, this.model, generateContentRequest, this.chromeAdapter, this.requestOptions);
2012
2045
  // Add onto the chain.
2013
2046
  this._sendPromise = this._sendPromise
2014
2047
  .then(() => streamPromise)
@@ -2065,7 +2098,7 @@ class ChatSession {
2065
2098
  * See the License for the specific language governing permissions and
2066
2099
  * limitations under the License.
2067
2100
  */
2068
- async function countTokens(apiSettings, model, params, requestOptions) {
2101
+ async function countTokensOnCloud(apiSettings, model, params, requestOptions) {
2069
2102
  let body = '';
2070
2103
  if (apiSettings.backend.backendType === BackendType.GOOGLE_AI) {
2071
2104
  const mappedParams = mapCountTokensRequest(params, model);
@@ -2077,6 +2110,12 @@ async function countTokens(apiSettings, model, params, requestOptions) {
2077
2110
  const response = await makeRequest(model, Task.COUNT_TOKENS, apiSettings, false, body, requestOptions);
2078
2111
  return response.json();
2079
2112
  }
2113
+ async function countTokens(apiSettings, model, params, chromeAdapter, requestOptions) {
2114
+ if (chromeAdapter && (await chromeAdapter.isAvailable(params))) {
2115
+ return (await chromeAdapter.countTokens(params)).json();
2116
+ }
2117
+ return countTokensOnCloud(apiSettings, model, params, requestOptions);
2118
+ }
2080
2119
 
2081
2120
  /**
2082
2121
  * @license
@@ -2099,8 +2138,9 @@ async function countTokens(apiSettings, model, params, requestOptions) {
2099
2138
  * @public
2100
2139
  */
2101
2140
  class GenerativeModel extends AIModel {
2102
- constructor(ai, modelParams, requestOptions) {
2141
+ constructor(ai, modelParams, requestOptions, chromeAdapter) {
2103
2142
  super(ai, modelParams.model);
2143
+ this.chromeAdapter = chromeAdapter;
2104
2144
  this.generationConfig = modelParams.generationConfig || {};
2105
2145
  this.safetySettings = modelParams.safetySettings || [];
2106
2146
  this.tools = modelParams.tools;
@@ -2121,7 +2161,7 @@ class GenerativeModel extends AIModel {
2121
2161
  toolConfig: this.toolConfig,
2122
2162
  systemInstruction: this.systemInstruction,
2123
2163
  ...formattedParams
2124
- }, this.requestOptions);
2164
+ }, this.chromeAdapter, this.requestOptions);
2125
2165
  }
2126
2166
  /**
2127
2167
  * Makes a single streaming call to the model
@@ -2138,14 +2178,14 @@ class GenerativeModel extends AIModel {
2138
2178
  toolConfig: this.toolConfig,
2139
2179
  systemInstruction: this.systemInstruction,
2140
2180
  ...formattedParams
2141
- }, this.requestOptions);
2181
+ }, this.chromeAdapter, this.requestOptions);
2142
2182
  }
2143
2183
  /**
2144
2184
  * Gets a new {@link ChatSession} instance which can be used for
2145
2185
  * multi-turn chats.
2146
2186
  */
2147
2187
  startChat(startChatParams) {
2148
- return new ChatSession(this._apiSettings, this.model, {
2188
+ return new ChatSession(this._apiSettings, this.model, this.chromeAdapter, {
2149
2189
  tools: this.tools,
2150
2190
  toolConfig: this.toolConfig,
2151
2191
  systemInstruction: this.systemInstruction,
@@ -2164,7 +2204,7 @@ class GenerativeModel extends AIModel {
2164
2204
  */
2165
2205
  async countTokens(request) {
2166
2206
  const formattedParams = formatGenerateContentInput(request);
2167
- return countTokens(this._apiSettings, this.model, formattedParams);
2207
+ return countTokens(this._apiSettings, this.model, formattedParams, this.chromeAdapter);
2168
2208
  }
2169
2209
  }
2170
2210
 
@@ -2282,6 +2322,290 @@ class ImagenModel extends AIModel {
2282
2322
  }
2283
2323
  }
2284
2324
 
2325
+ /**
2326
+ * @internal
2327
+ */
2328
+ var Availability;
2329
+ (function (Availability) {
2330
+ Availability["UNAVAILABLE"] = "unavailable";
2331
+ Availability["DOWNLOADABLE"] = "downloadable";
2332
+ Availability["DOWNLOADING"] = "downloading";
2333
+ Availability["AVAILABLE"] = "available";
2334
+ })(Availability || (Availability = {}));
2335
+
2336
+ /**
2337
+ * @license
2338
+ * Copyright 2025 Google LLC
2339
+ *
2340
+ * Licensed under the Apache License, Version 2.0 (the "License");
2341
+ * you may not use this file except in compliance with the License.
2342
+ * You may obtain a copy of the License at
2343
+ *
2344
+ * http://www.apache.org/licenses/LICENSE-2.0
2345
+ *
2346
+ * Unless required by applicable law or agreed to in writing, software
2347
+ * distributed under the License is distributed on an "AS IS" BASIS,
2348
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
2349
+ * See the License for the specific language governing permissions and
2350
+ * limitations under the License.
2351
+ */
2352
+ /**
2353
+ * Defines an inference "backend" that uses Chrome's on-device model,
2354
+ * and encapsulates logic for detecting when on-device inference is
2355
+ * possible.
2356
+ */
2357
+ class ChromeAdapterImpl {
2358
+ constructor(languageModelProvider, mode, onDeviceParams = {
2359
+ createOptions: {
2360
+ // Defaults to support image inputs for convenience.
2361
+ expectedInputs: [{ type: 'image' }]
2362
+ }
2363
+ }) {
2364
+ this.languageModelProvider = languageModelProvider;
2365
+ this.mode = mode;
2366
+ this.onDeviceParams = onDeviceParams;
2367
+ this.isDownloading = false;
2368
+ }
2369
+ /**
2370
+ * Checks if a given request can be made on-device.
2371
+ *
2372
+ * <ol>Encapsulates a few concerns:
2373
+ * <li>the mode</li>
2374
+ * <li>API existence</li>
2375
+ * <li>prompt formatting</li>
2376
+ * <li>model availability, including triggering download if necessary</li>
2377
+ * </ol>
2378
+ *
2379
+ * <p>Pros: callers needn't be concerned with details of on-device availability.</p>
2380
+ * <p>Cons: this method spans a few concerns and splits request validation from usage.
2381
+ * If instance variables weren't already part of the API, we could consider a better
2382
+ * separation of concerns.</p>
2383
+ */
2384
+ async isAvailable(request) {
2385
+ if (!this.mode) {
2386
+ logger.debug(`On-device inference unavailable because mode is undefined.`);
2387
+ return false;
2388
+ }
2389
+ if (this.mode === InferenceMode.ONLY_IN_CLOUD) {
2390
+ logger.debug(`On-device inference unavailable because mode is "only_in_cloud".`);
2391
+ return false;
2392
+ }
2393
+ // Triggers out-of-band download so model will eventually become available.
2394
+ const availability = await this.downloadIfAvailable();
2395
+ if (this.mode === InferenceMode.ONLY_ON_DEVICE) {
2396
+ // If it will never be available due to API inavailability, throw.
2397
+ if (availability === Availability.UNAVAILABLE) {
2398
+ throw new AIError(AIErrorCode.API_NOT_ENABLED, 'Local LanguageModel API not available in this environment.');
2399
+ }
2400
+ else if (availability === Availability.DOWNLOADABLE ||
2401
+ availability === Availability.DOWNLOADING) {
2402
+ // TODO(chholland): Better user experience during download - progress?
2403
+ logger.debug(`Waiting for download of LanguageModel to complete.`);
2404
+ await this.downloadPromise;
2405
+ return true;
2406
+ }
2407
+ return true;
2408
+ }
2409
+ // Applies prefer_on_device logic.
2410
+ if (availability !== Availability.AVAILABLE) {
2411
+ logger.debug(`On-device inference unavailable because availability is "${availability}".`);
2412
+ return false;
2413
+ }
2414
+ if (!ChromeAdapterImpl.isOnDeviceRequest(request)) {
2415
+ logger.debug(`On-device inference unavailable because request is incompatible.`);
2416
+ return false;
2417
+ }
2418
+ return true;
2419
+ }
2420
+ /**
2421
+ * Generates content on device.
2422
+ *
2423
+ * <p>This is comparable to {@link GenerativeModel.generateContent} for generating content in
2424
+ * Cloud.</p>
2425
+ * @param request - a standard Firebase AI {@link GenerateContentRequest}
2426
+ * @returns {@link Response}, so we can reuse common response formatting.
2427
+ */
2428
+ async generateContent(request) {
2429
+ const session = await this.createSession();
2430
+ const contents = await Promise.all(request.contents.map(ChromeAdapterImpl.toLanguageModelMessage));
2431
+ const text = await session.prompt(contents, this.onDeviceParams.promptOptions);
2432
+ return ChromeAdapterImpl.toResponse(text);
2433
+ }
2434
+ /**
2435
+ * Generates content stream on device.
2436
+ *
2437
+ * <p>This is comparable to {@link GenerativeModel.generateContentStream} for generating content in
2438
+ * Cloud.</p>
2439
+ * @param request - a standard Firebase AI {@link GenerateContentRequest}
2440
+ * @returns {@link Response}, so we can reuse common response formatting.
2441
+ */
2442
+ async generateContentStream(request) {
2443
+ const session = await this.createSession();
2444
+ const contents = await Promise.all(request.contents.map(ChromeAdapterImpl.toLanguageModelMessage));
2445
+ const stream = session.promptStreaming(contents, this.onDeviceParams.promptOptions);
2446
+ return ChromeAdapterImpl.toStreamResponse(stream);
2447
+ }
2448
+ async countTokens(_request) {
2449
+ throw new AIError(AIErrorCode.REQUEST_ERROR, 'Count Tokens is not yet available for on-device model.');
2450
+ }
2451
+ /**
2452
+ * Asserts inference for the given request can be performed by an on-device model.
2453
+ */
2454
+ static isOnDeviceRequest(request) {
2455
+ // Returns false if the prompt is empty.
2456
+ if (request.contents.length === 0) {
2457
+ logger.debug('Empty prompt rejected for on-device inference.');
2458
+ return false;
2459
+ }
2460
+ for (const content of request.contents) {
2461
+ if (content.role === 'function') {
2462
+ logger.debug(`"Function" role rejected for on-device inference.`);
2463
+ return false;
2464
+ }
2465
+ // Returns false if request contains an image with an unsupported mime type.
2466
+ for (const part of content.parts) {
2467
+ if (part.inlineData &&
2468
+ ChromeAdapterImpl.SUPPORTED_MIME_TYPES.indexOf(part.inlineData.mimeType) === -1) {
2469
+ logger.debug(`Unsupported mime type "${part.inlineData.mimeType}" rejected for on-device inference.`);
2470
+ return false;
2471
+ }
2472
+ }
2473
+ }
2474
+ return true;
2475
+ }
2476
+ /**
2477
+ * Encapsulates logic to get availability and download a model if one is downloadable.
2478
+ */
2479
+ async downloadIfAvailable() {
2480
+ const availability = await this.languageModelProvider?.availability(this.onDeviceParams.createOptions);
2481
+ if (availability === Availability.DOWNLOADABLE) {
2482
+ this.download();
2483
+ }
2484
+ return availability;
2485
+ }
2486
+ /**
2487
+ * Triggers out-of-band download of an on-device model.
2488
+ *
2489
+ * <p>Chrome only downloads models as needed. Chrome knows a model is needed when code calls
2490
+ * LanguageModel.create.</p>
2491
+ *
2492
+ * <p>Since Chrome manages the download, the SDK can only avoid redundant download requests by
2493
+ * tracking if a download has previously been requested.</p>
2494
+ */
2495
+ download() {
2496
+ if (this.isDownloading) {
2497
+ return;
2498
+ }
2499
+ this.isDownloading = true;
2500
+ this.downloadPromise = this.languageModelProvider
2501
+ ?.create(this.onDeviceParams.createOptions)
2502
+ .finally(() => {
2503
+ this.isDownloading = false;
2504
+ });
2505
+ }
2506
+ /**
2507
+ * Converts Firebase AI {@link Content} object to a Chrome {@link LanguageModelMessage} object.
2508
+ */
2509
+ static async toLanguageModelMessage(content) {
2510
+ const languageModelMessageContents = await Promise.all(content.parts.map(ChromeAdapterImpl.toLanguageModelMessageContent));
2511
+ return {
2512
+ role: ChromeAdapterImpl.toLanguageModelMessageRole(content.role),
2513
+ content: languageModelMessageContents
2514
+ };
2515
+ }
2516
+ /**
2517
+ * Converts a Firebase AI Part object to a Chrome LanguageModelMessageContent object.
2518
+ */
2519
+ static async toLanguageModelMessageContent(part) {
2520
+ if (part.text) {
2521
+ return {
2522
+ type: 'text',
2523
+ value: part.text
2524
+ };
2525
+ }
2526
+ else if (part.inlineData) {
2527
+ const formattedImageContent = await fetch(`data:${part.inlineData.mimeType};base64,${part.inlineData.data}`);
2528
+ const imageBlob = await formattedImageContent.blob();
2529
+ const imageBitmap = await createImageBitmap(imageBlob);
2530
+ return {
2531
+ type: 'image',
2532
+ value: imageBitmap
2533
+ };
2534
+ }
2535
+ throw new AIError(AIErrorCode.REQUEST_ERROR, `Processing of this Part type is not currently supported.`);
2536
+ }
2537
+ /**
2538
+ * Converts a Firebase AI {@link Role} string to a {@link LanguageModelMessageRole} string.
2539
+ */
2540
+ static toLanguageModelMessageRole(role) {
2541
+ // Assumes 'function' rule has been filtered by isOnDeviceRequest
2542
+ return role === 'model' ? 'assistant' : 'user';
2543
+ }
2544
+ /**
2545
+ * Abstracts Chrome session creation.
2546
+ *
2547
+ * <p>Chrome uses a multi-turn session for all inference. Firebase AI uses single-turn for all
2548
+ * inference. To map the Firebase AI API to Chrome's API, the SDK creates a new session for all
2549
+ * inference.</p>
2550
+ *
2551
+ * <p>Chrome will remove a model from memory if it's no longer in use, so this method ensures a
2552
+ * new session is created before an old session is destroyed.</p>
2553
+ */
2554
+ async createSession() {
2555
+ if (!this.languageModelProvider) {
2556
+ throw new AIError(AIErrorCode.UNSUPPORTED, 'Chrome AI requested for unsupported browser version.');
2557
+ }
2558
+ const newSession = await this.languageModelProvider.create(this.onDeviceParams.createOptions);
2559
+ if (this.oldSession) {
2560
+ this.oldSession.destroy();
2561
+ }
2562
+ // Holds session reference, so model isn't unloaded from memory.
2563
+ this.oldSession = newSession;
2564
+ return newSession;
2565
+ }
2566
+ /**
2567
+ * Formats string returned by Chrome as a {@link Response} returned by Firebase AI.
2568
+ */
2569
+ static toResponse(text) {
2570
+ return {
2571
+ json: async () => ({
2572
+ candidates: [
2573
+ {
2574
+ content: {
2575
+ parts: [{ text }]
2576
+ }
2577
+ }
2578
+ ]
2579
+ })
2580
+ };
2581
+ }
2582
+ /**
2583
+ * Formats string stream returned by Chrome as SSE returned by Firebase AI.
2584
+ */
2585
+ static toStreamResponse(stream) {
2586
+ const encoder = new TextEncoder();
2587
+ return {
2588
+ body: stream.pipeThrough(new TransformStream({
2589
+ transform(chunk, controller) {
2590
+ const json = JSON.stringify({
2591
+ candidates: [
2592
+ {
2593
+ content: {
2594
+ role: 'model',
2595
+ parts: [{ text: chunk }]
2596
+ }
2597
+ }
2598
+ ]
2599
+ });
2600
+ controller.enqueue(encoder.encode(`data: ${json}\n\n`));
2601
+ }
2602
+ }))
2603
+ };
2604
+ }
2605
+ }
2606
+ // Visible for testing
2607
+ ChromeAdapterImpl.SUPPORTED_MIME_TYPES = ['image/jpeg', 'image/png'];
2608
+
2285
2609
  /**
2286
2610
  * @license
2287
2611
  * Copyright 2024 Google LLC
@@ -2649,10 +2973,26 @@ function getAI(app = getApp(), options = { backend: new GoogleAIBackend() }) {
2649
2973
  * @public
2650
2974
  */
2651
2975
  function getGenerativeModel(ai, modelParams, requestOptions) {
2652
- if (!modelParams.model) {
2976
+ // Uses the existence of HybridParams.mode to clarify the type of the modelParams input.
2977
+ const hybridParams = modelParams;
2978
+ let inCloudParams;
2979
+ if (hybridParams.mode) {
2980
+ inCloudParams = hybridParams.inCloudParams || {
2981
+ model: DEFAULT_HYBRID_IN_CLOUD_MODEL
2982
+ };
2983
+ }
2984
+ else {
2985
+ inCloudParams = modelParams;
2986
+ }
2987
+ if (!inCloudParams.model) {
2653
2988
  throw new AIError(AIErrorCode.NO_MODEL, `Must provide a model name. Example: getGenerativeModel({ model: 'my-model-name' })`);
2654
2989
  }
2655
- return new GenerativeModel(ai, modelParams, requestOptions);
2990
+ let chromeAdapter;
2991
+ // Do not initialize a ChromeAdapter if we are not in hybrid mode.
2992
+ if (typeof window !== 'undefined' && hybridParams.mode) {
2993
+ chromeAdapter = new ChromeAdapterImpl(window.LanguageModel, hybridParams.mode, hybridParams.onDeviceParams);
2994
+ }
2995
+ return new GenerativeModel(ai, inCloudParams, requestOptions, chromeAdapter);
2656
2996
  }
2657
2997
  /**
2658
2998
  * Returns an {@link ImagenModel} class with methods for using Imagen.
@@ -2698,5 +3038,5 @@ function registerAI() {
2698
3038
  }
2699
3039
  registerAI();
2700
3040
 
2701
- export { AIError, AIErrorCode, AIModel, AnyOfSchema, ArraySchema, Backend, BackendType, BlockReason, BooleanSchema, ChatSession, FinishReason, FunctionCallingMode, GenerativeModel, GoogleAIBackend, HarmBlockMethod, HarmBlockThreshold, HarmCategory, HarmProbability, HarmSeverity, ImagenAspectRatio, ImagenImageFormat, ImagenModel, ImagenPersonFilterLevel, ImagenSafetyFilterLevel, IntegerSchema, Modality, NumberSchema, ObjectSchema, POSSIBLE_ROLES, ResponseModality, Schema, SchemaType, StringSchema, VertexAIBackend, getAI, getGenerativeModel, getImagenModel };
3041
+ export { AIError, AIErrorCode, AIModel, AnyOfSchema, ArraySchema, Backend, BackendType, BlockReason, BooleanSchema, ChatSession, FinishReason, FunctionCallingMode, GenerativeModel, GoogleAIBackend, HarmBlockMethod, HarmBlockThreshold, HarmCategory, HarmProbability, HarmSeverity, ImagenAspectRatio, ImagenImageFormat, ImagenModel, ImagenPersonFilterLevel, ImagenSafetyFilterLevel, InferenceMode, IntegerSchema, Modality, NumberSchema, ObjectSchema, POSSIBLE_ROLES, ResponseModality, Schema, SchemaType, StringSchema, VertexAIBackend, getAI, getGenerativeModel, getImagenModel };
2702
3042
  //# sourceMappingURL=index.node.mjs.map