@firebase/ai 1.2.2-canary.f92069a21 → 1.2.2-eap-ai-hybridinference.58d92df33

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -150,6 +150,13 @@ export declare class ArraySchema extends Schema {
150
150
  /* Excluded from this release type: toJSON */
151
151
  }
152
152
 
153
+ declare enum Availability {
154
+ 'unavailable' = "unavailable",
155
+ 'downloadable' = "downloadable",
156
+ 'downloading' = "downloading",
157
+ 'available' = "available"
158
+ }
159
+
153
160
  /**
154
161
  * Abstract base class representing the configuration for an AI service backend.
155
162
  * This class should not be instantiated directly. Use its subclasses; {@link GoogleAIBackend} for
@@ -251,12 +258,13 @@ export declare class BooleanSchema extends Schema {
251
258
  */
252
259
  export declare class ChatSession {
253
260
  model: string;
261
+ private chromeAdapter;
254
262
  params?: StartChatParams | undefined;
255
263
  requestOptions?: RequestOptions | undefined;
256
264
  private _apiSettings;
257
265
  private _history;
258
266
  private _sendPromise;
259
- constructor(apiSettings: ApiSettings, model: string, params?: StartChatParams | undefined, requestOptions?: RequestOptions | undefined);
267
+ constructor(apiSettings: ApiSettings, model: string, chromeAdapter: ChromeAdapter, params?: StartChatParams | undefined, requestOptions?: RequestOptions | undefined);
260
268
  /**
261
269
  * Gets the chat history so far. Blocked prompts are not added to history.
262
270
  * Neither blocked candidates nor the prompts that generated them are added
@@ -276,6 +284,105 @@ export declare class ChatSession {
276
284
  sendMessageStream(request: string | Array<string | Part>): Promise<GenerateContentStreamResult>;
277
285
  }
278
286
 
287
+ /**
288
+ * Defines an inference "backend" that uses Chrome's on-device model,
289
+ * and encapsulates logic for detecting when on-device is possible.
290
+ */
291
+ declare class ChromeAdapter {
292
+ private languageModelProvider?;
293
+ private mode?;
294
+ private onDeviceParams;
295
+ static SUPPORTED_MIME_TYPES: string[];
296
+ private isDownloading;
297
+ private downloadPromise;
298
+ private oldSession;
299
+ constructor(languageModelProvider?: LanguageModel | undefined, mode?: InferenceMode | undefined, onDeviceParams?: OnDeviceParams);
300
+ /**
301
+ * Checks if a given request can be made on-device.
302
+ *
303
+ * <ol>Encapsulates a few concerns:
304
+ * <li>the mode</li>
305
+ * <li>API existence</li>
306
+ * <li>prompt formatting</li>
307
+ * <li>model availability, including triggering download if necessary</li>
308
+ * </ol>
309
+ *
310
+ * <p>Pros: callers needn't be concerned with details of on-device availability.</p>
311
+ * <p>Cons: this method spans a few concerns and splits request validation from usage.
312
+ * If instance variables weren't already part of the API, we could consider a better
313
+ * separation of concerns.</p>
314
+ */
315
+ isAvailable(request: GenerateContentRequest): Promise<boolean>;
316
+ /**
317
+ * Generates content on device.
318
+ *
319
+ * <p>This is comparable to {@link GenerativeModel.generateContent} for generating content in
320
+ * Cloud.</p>
321
+ * @param request - a standard Vertex {@link GenerateContentRequest}
322
+ * @returns {@link Response}, so we can reuse common response formatting.
323
+ */
324
+ generateContent(request: GenerateContentRequest): Promise<Response>;
325
+ /**
326
+ * Generates content stream on device.
327
+ *
328
+ * <p>This is comparable to {@link GenerativeModel.generateContentStream} for generating content in
329
+ * Cloud.</p>
330
+ * @param request - a standard Vertex {@link GenerateContentRequest}
331
+ * @returns {@link Response}, so we can reuse common response formatting.
332
+ */
333
+ generateContentStream(request: GenerateContentRequest): Promise<Response>;
334
+ countTokens(_request: CountTokensRequest): Promise<Response>;
335
+ /**
336
+ * Asserts inference for the given request can be performed by an on-device model.
337
+ */
338
+ private static isOnDeviceRequest;
339
+ /**
340
+ * Encapsulates logic to get availability and download a model if one is downloadable.
341
+ */
342
+ private downloadIfAvailable;
343
+ /**
344
+ * Triggers out-of-band download of an on-device model.
345
+ *
346
+ * <p>Chrome only downloads models as needed. Chrome knows a model is needed when code calls
347
+ * LanguageModel.create.</p>
348
+ *
349
+ * <p>Since Chrome manages the download, the SDK can only avoid redundant download requests by
350
+ * tracking if a download has previously been requested.</p>
351
+ */
352
+ private download;
353
+ /**
354
+ * Converts Vertex {@link Content} object to a Chrome {@link LanguageModelMessage} object.
355
+ */
356
+ private static toLanguageModelMessage;
357
+ /**
358
+ * Converts a Vertex Part object to a Chrome LanguageModelMessageContent object.
359
+ */
360
+ private static toLanguageModelMessageContent;
361
+ /**
362
+ * Converts a Vertex {@link Role} string to a {@link LanguageModelMessageRole} string.
363
+ */
364
+ private static toLanguageModelMessageRole;
365
+ /**
366
+ * Abstracts Chrome session creation.
367
+ *
368
+ * <p>Chrome uses a multi-turn session for all inference. Vertex uses single-turn for all
369
+ * inference. To map the Vertex API to Chrome's API, the SDK creates a new session for all
370
+ * inference.</p>
371
+ *
372
+ * <p>Chrome will remove a model from memory if it's no longer in use, so this method ensures a
373
+ * new session is created before an old session is destroyed.</p>
374
+ */
375
+ private createSession;
376
+ /**
377
+ * Formats string returned by Chrome as a {@link Response} returned by Vertex.
378
+ */
379
+ private static toResponse;
380
+ /**
381
+ * Formats string stream returned by Chrome as SSE returned by Vertex.
382
+ */
383
+ private static toStreamResponse;
384
+ }
385
+
279
386
  /**
280
387
  * A single citation.
281
388
  * @public
@@ -735,13 +842,18 @@ export declare interface GenerativeContentBlob {
735
842
  * @public
736
843
  */
737
844
  export declare class GenerativeModel extends AIModel {
845
+ private chromeAdapter;
846
+ /**
847
+ * Defines the name of the default in-cloud model to use for hybrid inference.
848
+ */
849
+ static DEFAULT_HYBRID_IN_CLOUD_MODEL: string;
738
850
  generationConfig: GenerationConfig;
739
851
  safetySettings: SafetySetting[];
740
852
  requestOptions?: RequestOptions;
741
853
  tools?: Tool[];
742
854
  toolConfig?: ToolConfig;
743
855
  systemInstruction?: Content;
744
- constructor(ai: AI, modelParams: ModelParams, requestOptions?: RequestOptions);
856
+ constructor(ai: AI, modelParams: ModelParams, chromeAdapter: ChromeAdapter, requestOptions?: RequestOptions);
745
857
  /**
746
858
  * Makes a single non-streaming call to the model
747
859
  * and returns an object containing a single {@link GenerateContentResponse}.
@@ -801,7 +913,7 @@ export declare function getAI(app?: FirebaseApp, options?: AIOptions): AI;
801
913
  *
802
914
  * @public
803
915
  */
804
- export declare function getGenerativeModel(ai: AI, modelParams: ModelParams, requestOptions?: RequestOptions): GenerativeModel;
916
+ export declare function getGenerativeModel(ai: AI, modelParams: ModelParams | HybridParams, requestOptions?: RequestOptions): GenerativeModel;
805
917
 
806
918
  /**
807
919
  * Returns an {@link ImagenModel} class with methods for using Imagen.
@@ -985,6 +1097,24 @@ export declare enum HarmSeverity {
985
1097
  HARM_SEVERITY_UNSUPPORTED = "HARM_SEVERITY_UNSUPPORTED"
986
1098
  }
987
1099
 
1100
+ /**
1101
+ * Toggles hybrid inference.
1102
+ */
1103
+ export declare interface HybridParams {
1104
+ /**
1105
+ * Specifies on-device or in-cloud inference. Defaults to prefer on-device.
1106
+ */
1107
+ mode: InferenceMode;
1108
+ /**
1109
+ * Optional. Specifies advanced params for on-device inference.
1110
+ */
1111
+ onDeviceParams?: OnDeviceParams;
1112
+ /**
1113
+ * Optional. Specifies advanced params for in-cloud inference.
1114
+ */
1115
+ inCloudParams?: ModelParams;
1116
+ }
1117
+
988
1118
  /**
989
1119
  * Aspect ratios for Imagen images.
990
1120
  *
@@ -1393,6 +1523,11 @@ export declare interface ImagenSafetySettings {
1393
1523
  personFilterLevel?: ImagenPersonFilterLevel;
1394
1524
  }
1395
1525
 
1526
+ /**
1527
+ * Determines whether inference happens on-device or in-cloud.
1528
+ */
1529
+ export declare type InferenceMode = 'prefer_on_device' | 'only_on_device' | 'only_in_cloud';
1530
+
1396
1531
  /**
1397
1532
  * Content part interface if the part represents an image.
1398
1533
  * @public
@@ -1416,6 +1551,74 @@ export declare class IntegerSchema extends Schema {
1416
1551
  constructor(schemaParams?: SchemaParams);
1417
1552
  }
1418
1553
 
1554
+ /**
1555
+ * @license
1556
+ * Copyright 2025 Google LLC
1557
+ *
1558
+ * Licensed under the Apache License, Version 2.0 (the "License");
1559
+ * you may not use this file except in compliance with the License.
1560
+ * You may obtain a copy of the License at
1561
+ *
1562
+ * http://www.apache.org/licenses/LICENSE-2.0
1563
+ *
1564
+ * Unless required by applicable law or agreed to in writing, software
1565
+ * distributed under the License is distributed on an "AS IS" BASIS,
1566
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
1567
+ * See the License for the specific language governing permissions and
1568
+ * limitations under the License.
1569
+ */
1570
+ /**
1571
+ * The subset of the Prompt API
1572
+ * ({@see https://github.com/webmachinelearning/prompt-api#full-api-surface-in-web-idl})
1573
+ * required for hybrid functionality.
1574
+ */
1575
+ declare interface LanguageModel extends EventTarget {
1576
+ create(options?: LanguageModelCreateOptions): Promise<LanguageModel>;
1577
+ availability(options?: LanguageModelCreateCoreOptions): Promise<Availability>;
1578
+ prompt(input: LanguageModelPrompt, options?: LanguageModelPromptOptions): Promise<string>;
1579
+ promptStreaming(input: LanguageModelPrompt, options?: LanguageModelPromptOptions): ReadableStream;
1580
+ measureInputUsage(input: LanguageModelPrompt, options?: LanguageModelPromptOptions): Promise<number>;
1581
+ destroy(): undefined;
1582
+ }
1583
+
1584
+ export declare interface LanguageModelCreateCoreOptions {
1585
+ topK?: number;
1586
+ temperature?: number;
1587
+ expectedInputs?: LanguageModelExpected[];
1588
+ }
1589
+
1590
+ export declare interface LanguageModelCreateOptions extends LanguageModelCreateCoreOptions {
1591
+ signal?: AbortSignal;
1592
+ initialPrompts?: LanguageModelMessage[];
1593
+ }
1594
+
1595
+ export declare interface LanguageModelExpected {
1596
+ type: LanguageModelMessageType;
1597
+ languages?: string[];
1598
+ }
1599
+
1600
+ export declare interface LanguageModelMessage {
1601
+ role: LanguageModelMessageRole;
1602
+ content: LanguageModelMessageContent[];
1603
+ }
1604
+
1605
+ export declare interface LanguageModelMessageContent {
1606
+ type: LanguageModelMessageType;
1607
+ value: LanguageModelMessageContentValue;
1608
+ }
1609
+
1610
+ export declare type LanguageModelMessageContentValue = ImageBitmapSource | AudioBuffer | BufferSource | string;
1611
+
1612
+ export declare type LanguageModelMessageRole = 'system' | 'user' | 'assistant';
1613
+
1614
+ export declare type LanguageModelMessageType = 'text' | 'image' | 'audio';
1615
+
1616
+ declare type LanguageModelPrompt = LanguageModelMessage[];
1617
+
1618
+ declare interface LanguageModelPromptOptions {
1619
+ responseConstraint?: object;
1620
+ }
1621
+
1419
1622
  /**
1420
1623
  * Content part modality.
1421
1624
  * @public
@@ -1503,6 +1706,14 @@ export declare interface ObjectSchemaInterface extends SchemaInterface {
1503
1706
  optionalProperties?: string[];
1504
1707
  }
1505
1708
 
1709
+ /**
1710
+ * Encapsulates configuration for on-device inference.
1711
+ */
1712
+ export declare interface OnDeviceParams {
1713
+ createOptions?: LanguageModelCreateOptions;
1714
+ promptOptions?: LanguageModelPromptOptions;
1715
+ }
1716
+
1506
1717
  /**
1507
1718
  * Content part - includes text, image/video, or function call/response
1508
1719
  * part types.
package/dist/ai.d.ts CHANGED
@@ -187,6 +187,13 @@ export declare class ArraySchema extends Schema {
187
187
  toJSON(): SchemaRequest;
188
188
  }
189
189
 
190
+ declare enum Availability {
191
+ 'unavailable' = "unavailable",
192
+ 'downloadable' = "downloadable",
193
+ 'downloading' = "downloading",
194
+ 'available' = "available"
195
+ }
196
+
190
197
  /**
191
198
  * Abstract base class representing the configuration for an AI service backend.
192
199
  * This class should not be instantiated directly. Use its subclasses; {@link GoogleAIBackend} for
@@ -288,12 +295,13 @@ export declare class BooleanSchema extends Schema {
288
295
  */
289
296
  export declare class ChatSession {
290
297
  model: string;
298
+ private chromeAdapter;
291
299
  params?: StartChatParams | undefined;
292
300
  requestOptions?: RequestOptions | undefined;
293
301
  private _apiSettings;
294
302
  private _history;
295
303
  private _sendPromise;
296
- constructor(apiSettings: ApiSettings, model: string, params?: StartChatParams | undefined, requestOptions?: RequestOptions | undefined);
304
+ constructor(apiSettings: ApiSettings, model: string, chromeAdapter: ChromeAdapter, params?: StartChatParams | undefined, requestOptions?: RequestOptions | undefined);
297
305
  /**
298
306
  * Gets the chat history so far. Blocked prompts are not added to history.
299
307
  * Neither blocked candidates nor the prompts that generated them are added
@@ -313,6 +321,105 @@ export declare class ChatSession {
313
321
  sendMessageStream(request: string | Array<string | Part>): Promise<GenerateContentStreamResult>;
314
322
  }
315
323
 
324
+ /**
325
+ * Defines an inference "backend" that uses Chrome's on-device model,
326
+ * and encapsulates logic for detecting when on-device is possible.
327
+ */
328
+ declare class ChromeAdapter {
329
+ private languageModelProvider?;
330
+ private mode?;
331
+ private onDeviceParams;
332
+ static SUPPORTED_MIME_TYPES: string[];
333
+ private isDownloading;
334
+ private downloadPromise;
335
+ private oldSession;
336
+ constructor(languageModelProvider?: LanguageModel | undefined, mode?: InferenceMode | undefined, onDeviceParams?: OnDeviceParams);
337
+ /**
338
+ * Checks if a given request can be made on-device.
339
+ *
340
+ * <ol>Encapsulates a few concerns:
341
+ * <li>the mode</li>
342
+ * <li>API existence</li>
343
+ * <li>prompt formatting</li>
344
+ * <li>model availability, including triggering download if necessary</li>
345
+ * </ol>
346
+ *
347
+ * <p>Pros: callers needn't be concerned with details of on-device availability.</p>
348
+ * <p>Cons: this method spans a few concerns and splits request validation from usage.
349
+ * If instance variables weren't already part of the API, we could consider a better
350
+ * separation of concerns.</p>
351
+ */
352
+ isAvailable(request: GenerateContentRequest): Promise<boolean>;
353
+ /**
354
+ * Generates content on device.
355
+ *
356
+ * <p>This is comparable to {@link GenerativeModel.generateContent} for generating content in
357
+ * Cloud.</p>
358
+ * @param request - a standard Vertex {@link GenerateContentRequest}
359
+ * @returns {@link Response}, so we can reuse common response formatting.
360
+ */
361
+ generateContent(request: GenerateContentRequest): Promise<Response>;
362
+ /**
363
+ * Generates content stream on device.
364
+ *
365
+ * <p>This is comparable to {@link GenerativeModel.generateContentStream} for generating content in
366
+ * Cloud.</p>
367
+ * @param request - a standard Vertex {@link GenerateContentRequest}
368
+ * @returns {@link Response}, so we can reuse common response formatting.
369
+ */
370
+ generateContentStream(request: GenerateContentRequest): Promise<Response>;
371
+ countTokens(_request: CountTokensRequest): Promise<Response>;
372
+ /**
373
+ * Asserts inference for the given request can be performed by an on-device model.
374
+ */
375
+ private static isOnDeviceRequest;
376
+ /**
377
+ * Encapsulates logic to get availability and download a model if one is downloadable.
378
+ */
379
+ private downloadIfAvailable;
380
+ /**
381
+ * Triggers out-of-band download of an on-device model.
382
+ *
383
+ * <p>Chrome only downloads models as needed. Chrome knows a model is needed when code calls
384
+ * LanguageModel.create.</p>
385
+ *
386
+ * <p>Since Chrome manages the download, the SDK can only avoid redundant download requests by
387
+ * tracking if a download has previously been requested.</p>
388
+ */
389
+ private download;
390
+ /**
391
+ * Converts Vertex {@link Content} object to a Chrome {@link LanguageModelMessage} object.
392
+ */
393
+ private static toLanguageModelMessage;
394
+ /**
395
+ * Converts a Vertex Part object to a Chrome LanguageModelMessageContent object.
396
+ */
397
+ private static toLanguageModelMessageContent;
398
+ /**
399
+ * Converts a Vertex {@link Role} string to a {@link LanguageModelMessageRole} string.
400
+ */
401
+ private static toLanguageModelMessageRole;
402
+ /**
403
+ * Abstracts Chrome session creation.
404
+ *
405
+ * <p>Chrome uses a multi-turn session for all inference. Vertex uses single-turn for all
406
+ * inference. To map the Vertex API to Chrome's API, the SDK creates a new session for all
407
+ * inference.</p>
408
+ *
409
+ * <p>Chrome will remove a model from memory if it's no longer in use, so this method ensures a
410
+ * new session is created before an old session is destroyed.</p>
411
+ */
412
+ private createSession;
413
+ /**
414
+ * Formats string returned by Chrome as a {@link Response} returned by Vertex.
415
+ */
416
+ private static toResponse;
417
+ /**
418
+ * Formats string stream returned by Chrome as SSE returned by Vertex.
419
+ */
420
+ private static toStreamResponse;
421
+ }
422
+
316
423
  /**
317
424
  * A single citation.
318
425
  * @public
@@ -772,13 +879,18 @@ export declare interface GenerativeContentBlob {
772
879
  * @public
773
880
  */
774
881
  export declare class GenerativeModel extends AIModel {
882
+ private chromeAdapter;
883
+ /**
884
+ * Defines the name of the default in-cloud model to use for hybrid inference.
885
+ */
886
+ static DEFAULT_HYBRID_IN_CLOUD_MODEL: string;
775
887
  generationConfig: GenerationConfig;
776
888
  safetySettings: SafetySetting[];
777
889
  requestOptions?: RequestOptions;
778
890
  tools?: Tool[];
779
891
  toolConfig?: ToolConfig;
780
892
  systemInstruction?: Content;
781
- constructor(ai: AI, modelParams: ModelParams, requestOptions?: RequestOptions);
893
+ constructor(ai: AI, modelParams: ModelParams, chromeAdapter: ChromeAdapter, requestOptions?: RequestOptions);
782
894
  /**
783
895
  * Makes a single non-streaming call to the model
784
896
  * and returns an object containing a single {@link GenerateContentResponse}.
@@ -838,7 +950,7 @@ export declare function getAI(app?: FirebaseApp, options?: AIOptions): AI;
838
950
  *
839
951
  * @public
840
952
  */
841
- export declare function getGenerativeModel(ai: AI, modelParams: ModelParams, requestOptions?: RequestOptions): GenerativeModel;
953
+ export declare function getGenerativeModel(ai: AI, modelParams: ModelParams | HybridParams, requestOptions?: RequestOptions): GenerativeModel;
842
954
 
843
955
  /**
844
956
  * Returns an {@link ImagenModel} class with methods for using Imagen.
@@ -1056,6 +1168,24 @@ export declare enum HarmSeverity {
1056
1168
  HARM_SEVERITY_UNSUPPORTED = "HARM_SEVERITY_UNSUPPORTED"
1057
1169
  }
1058
1170
 
1171
+ /**
1172
+ * Toggles hybrid inference.
1173
+ */
1174
+ export declare interface HybridParams {
1175
+ /**
1176
+ * Specifies on-device or in-cloud inference. Defaults to prefer on-device.
1177
+ */
1178
+ mode: InferenceMode;
1179
+ /**
1180
+ * Optional. Specifies advanced params for on-device inference.
1181
+ */
1182
+ onDeviceParams?: OnDeviceParams;
1183
+ /**
1184
+ * Optional. Specifies advanced params for in-cloud inference.
1185
+ */
1186
+ inCloudParams?: ModelParams;
1187
+ }
1188
+
1059
1189
  /**
1060
1190
  * Aspect ratios for Imagen images.
1061
1191
  *
@@ -1483,6 +1613,11 @@ export declare interface ImagenSafetySettings {
1483
1613
  personFilterLevel?: ImagenPersonFilterLevel;
1484
1614
  }
1485
1615
 
1616
+ /**
1617
+ * Determines whether inference happens on-device or in-cloud.
1618
+ */
1619
+ export declare type InferenceMode = 'prefer_on_device' | 'only_on_device' | 'only_in_cloud';
1620
+
1486
1621
  /**
1487
1622
  * Content part interface if the part represents an image.
1488
1623
  * @public
@@ -1506,6 +1641,74 @@ export declare class IntegerSchema extends Schema {
1506
1641
  constructor(schemaParams?: SchemaParams);
1507
1642
  }
1508
1643
 
1644
+ /**
1645
+ * @license
1646
+ * Copyright 2025 Google LLC
1647
+ *
1648
+ * Licensed under the Apache License, Version 2.0 (the "License");
1649
+ * you may not use this file except in compliance with the License.
1650
+ * You may obtain a copy of the License at
1651
+ *
1652
+ * http://www.apache.org/licenses/LICENSE-2.0
1653
+ *
1654
+ * Unless required by applicable law or agreed to in writing, software
1655
+ * distributed under the License is distributed on an "AS IS" BASIS,
1656
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
1657
+ * See the License for the specific language governing permissions and
1658
+ * limitations under the License.
1659
+ */
1660
+ /**
1661
+ * The subset of the Prompt API
1662
+ * ({@see https://github.com/webmachinelearning/prompt-api#full-api-surface-in-web-idl})
1663
+ * required for hybrid functionality.
1664
+ */
1665
+ declare interface LanguageModel extends EventTarget {
1666
+ create(options?: LanguageModelCreateOptions): Promise<LanguageModel>;
1667
+ availability(options?: LanguageModelCreateCoreOptions): Promise<Availability>;
1668
+ prompt(input: LanguageModelPrompt, options?: LanguageModelPromptOptions): Promise<string>;
1669
+ promptStreaming(input: LanguageModelPrompt, options?: LanguageModelPromptOptions): ReadableStream;
1670
+ measureInputUsage(input: LanguageModelPrompt, options?: LanguageModelPromptOptions): Promise<number>;
1671
+ destroy(): undefined;
1672
+ }
1673
+
1674
+ export declare interface LanguageModelCreateCoreOptions {
1675
+ topK?: number;
1676
+ temperature?: number;
1677
+ expectedInputs?: LanguageModelExpected[];
1678
+ }
1679
+
1680
+ export declare interface LanguageModelCreateOptions extends LanguageModelCreateCoreOptions {
1681
+ signal?: AbortSignal;
1682
+ initialPrompts?: LanguageModelMessage[];
1683
+ }
1684
+
1685
+ export declare interface LanguageModelExpected {
1686
+ type: LanguageModelMessageType;
1687
+ languages?: string[];
1688
+ }
1689
+
1690
+ export declare interface LanguageModelMessage {
1691
+ role: LanguageModelMessageRole;
1692
+ content: LanguageModelMessageContent[];
1693
+ }
1694
+
1695
+ export declare interface LanguageModelMessageContent {
1696
+ type: LanguageModelMessageType;
1697
+ value: LanguageModelMessageContentValue;
1698
+ }
1699
+
1700
+ export declare type LanguageModelMessageContentValue = ImageBitmapSource | AudioBuffer | BufferSource | string;
1701
+
1702
+ export declare type LanguageModelMessageRole = 'system' | 'user' | 'assistant';
1703
+
1704
+ export declare type LanguageModelMessageType = 'text' | 'image' | 'audio';
1705
+
1706
+ declare type LanguageModelPrompt = LanguageModelMessage[];
1707
+
1708
+ declare interface LanguageModelPromptOptions {
1709
+ responseConstraint?: object;
1710
+ }
1711
+
1509
1712
  /**
1510
1713
  * Content part modality.
1511
1714
  * @public
@@ -1596,6 +1799,14 @@ export declare interface ObjectSchemaInterface extends SchemaInterface {
1596
1799
  optionalProperties?: string[];
1597
1800
  }
1598
1801
 
1802
+ /**
1803
+ * Encapsulates configuration for on-device inference.
1804
+ */
1805
+ export declare interface OnDeviceParams {
1806
+ createOptions?: LanguageModelCreateOptions;
1807
+ promptOptions?: LanguageModelPromptOptions;
1808
+ }
1809
+
1599
1810
  /**
1600
1811
  * Content part - includes text, image/video, or function call/response
1601
1812
  * part types.