@firebase/ai 1.2.2-canary.f92069a21 → 1.2.2-eap-ai-hybridinference.c16cbf1a3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -150,6 +150,13 @@ export declare class ArraySchema extends Schema {
150
150
  /* Excluded from this release type: toJSON */
151
151
  }
152
152
 
153
+ declare enum Availability {
154
+ 'unavailable' = "unavailable",
155
+ 'downloadable' = "downloadable",
156
+ 'downloading' = "downloading",
157
+ 'available' = "available"
158
+ }
159
+
153
160
  /**
154
161
  * Abstract base class representing the configuration for an AI service backend.
155
162
  * This class should not be instantiated directly. Use its subclasses; {@link GoogleAIBackend} for
@@ -251,12 +258,13 @@ export declare class BooleanSchema extends Schema {
251
258
  */
252
259
  export declare class ChatSession {
253
260
  model: string;
261
+ private chromeAdapter;
254
262
  params?: StartChatParams | undefined;
255
263
  requestOptions?: RequestOptions | undefined;
256
264
  private _apiSettings;
257
265
  private _history;
258
266
  private _sendPromise;
259
- constructor(apiSettings: ApiSettings, model: string, params?: StartChatParams | undefined, requestOptions?: RequestOptions | undefined);
267
+ constructor(apiSettings: ApiSettings, model: string, chromeAdapter: ChromeAdapter, params?: StartChatParams | undefined, requestOptions?: RequestOptions | undefined);
260
268
  /**
261
269
  * Gets the chat history so far. Blocked prompts are not added to history.
262
270
  * Neither blocked candidates nor the prompts that generated them are added
@@ -276,6 +284,97 @@ export declare class ChatSession {
276
284
  sendMessageStream(request: string | Array<string | Part>): Promise<GenerateContentStreamResult>;
277
285
  }
278
286
 
287
+ /**
288
+ * Defines an inference "backend" that uses Chrome's on-device model,
289
+ * and encapsulates logic for detecting when on-device is possible.
290
+ */
291
+ declare class ChromeAdapter {
292
+ private languageModelProvider?;
293
+ private mode?;
294
+ private onDeviceParams;
295
+ static SUPPORTED_MIME_TYPES: string[];
296
+ private isDownloading;
297
+ private downloadPromise;
298
+ private oldSession;
299
+ constructor(languageModelProvider?: LanguageModel | undefined, mode?: InferenceMode | undefined, onDeviceParams?: OnDeviceParams);
300
+ /**
301
+ * Checks if a given request can be made on-device.
302
+ *
303
+ * <ol>Encapsulates a few concerns:
304
+ * <li>the mode</li>
305
+ * <li>API existence</li>
306
+ * <li>prompt formatting</li>
307
+ * <li>model availability, including triggering download if necessary</li>
308
+ * </ol>
309
+ *
310
+ * <p>Pros: callers needn't be concerned with details of on-device availability.</p>
311
+ * <p>Cons: this method spans a few concerns and splits request validation from usage.
312
+ * If instance variables weren't already part of the API, we could consider a better
313
+ * separation of concerns.</p>
314
+ */
315
+ isAvailable(request: GenerateContentRequest): Promise<boolean>;
316
+ /**
317
+ * Generates content on device.
318
+ *
319
+ * <p>This is comparable to {@link GenerativeModel.generateContent} for generating content in
320
+ * Cloud.</p>
321
+ * @param request a standard Vertex {@link GenerateContentRequest}
322
+ * @returns {@link Response}, so we can reuse common response formatting.
323
+ */
324
+ generateContent(request: GenerateContentRequest): Promise<Response>;
325
+ /**
326
+ * Generates content stream on device.
327
+ *
328
+ * <p>This is comparable to {@link GenerativeModel.generateContentStream} for generating content in
329
+ * Cloud.</p>
330
+ * @param request a standard Vertex {@link GenerateContentRequest}
331
+ * @returns {@link Response}, so we can reuse common response formatting.
332
+ */
333
+ generateContentStream(request: GenerateContentRequest): Promise<Response>;
334
+ countTokens(_request: CountTokensRequest): Promise<Response>;
335
+ /**
336
+ * Asserts inference for the given request can be performed by an on-device model.
337
+ */
338
+ private static isOnDeviceRequest;
339
+ /**
340
+ * Encapsulates logic to get availability and download a model if one is downloadable.
341
+ */
342
+ private downloadIfAvailable;
343
+ /**
344
+ * Triggers out-of-band download of an on-device model.
345
+ *
346
+ * <p>Chrome only downloads models as needed. Chrome knows a model is needed when code calls
347
+ * LanguageModel.create.</p>
348
+ *
349
+ * <p>Since Chrome manages the download, the SDK can only avoid redundant download requests by
350
+ * tracking if a download has previously been requested.</p>
351
+ */
352
+ private download;
353
+ /**
354
+ * Converts a Vertex Part object to a Chrome LanguageModelMessageContent object.
355
+ */
356
+ private static toLanguageModelMessageContent;
357
+ /**
358
+ * Abstracts Chrome session creation.
359
+ *
360
+ * <p>Chrome uses a multi-turn session for all inference. Vertex uses single-turn for all
361
+ * inference. To map the Vertex API to Chrome's API, the SDK creates a new session for all
362
+ * inference.</p>
363
+ *
364
+ * <p>Chrome will remove a model from memory if it's no longer in use, so this method ensures a
365
+ * new session is created before an old session is destroyed.</p>
366
+ */
367
+ private createSession;
368
+ /**
369
+ * Formats string returned by Chrome as a {@link Response} returned by Vertex.
370
+ */
371
+ private static toResponse;
372
+ /**
373
+ * Formats string stream returned by Chrome as SSE returned by Vertex.
374
+ */
375
+ private static toStreamResponse;
376
+ }
377
+
279
378
  /**
280
379
  * A single citation.
281
380
  * @public
@@ -735,13 +834,18 @@ export declare interface GenerativeContentBlob {
735
834
  * @public
736
835
  */
737
836
  export declare class GenerativeModel extends AIModel {
837
+ private chromeAdapter;
838
+ /**
839
+ * Defines the name of the default in-cloud model to use for hybrid inference.
840
+ */
841
+ static DEFAULT_HYBRID_IN_CLOUD_MODEL: string;
738
842
  generationConfig: GenerationConfig;
739
843
  safetySettings: SafetySetting[];
740
844
  requestOptions?: RequestOptions;
741
845
  tools?: Tool[];
742
846
  toolConfig?: ToolConfig;
743
847
  systemInstruction?: Content;
744
- constructor(ai: AI, modelParams: ModelParams, requestOptions?: RequestOptions);
848
+ constructor(ai: AI, modelParams: ModelParams, chromeAdapter: ChromeAdapter, requestOptions?: RequestOptions);
745
849
  /**
746
850
  * Makes a single non-streaming call to the model
747
851
  * and returns an object containing a single {@link GenerateContentResponse}.
@@ -801,7 +905,7 @@ export declare function getAI(app?: FirebaseApp, options?: AIOptions): AI;
801
905
  *
802
906
  * @public
803
907
  */
804
- export declare function getGenerativeModel(ai: AI, modelParams: ModelParams, requestOptions?: RequestOptions): GenerativeModel;
908
+ export declare function getGenerativeModel(ai: AI, modelParams: ModelParams | HybridParams, requestOptions?: RequestOptions): GenerativeModel;
805
909
 
806
910
  /**
807
911
  * Returns an {@link ImagenModel} class with methods for using Imagen.
@@ -985,6 +1089,24 @@ export declare enum HarmSeverity {
985
1089
  HARM_SEVERITY_UNSUPPORTED = "HARM_SEVERITY_UNSUPPORTED"
986
1090
  }
987
1091
 
1092
+ /**
1093
+ * Toggles hybrid inference.
1094
+ */
1095
+ export declare interface HybridParams {
1096
+ /**
1097
+ * Specifies on-device or in-cloud inference. Defaults to prefer on-device.
1098
+ */
1099
+ mode: InferenceMode;
1100
+ /**
1101
+ * Optional. Specifies advanced params for on-device inference.
1102
+ */
1103
+ onDeviceParams?: OnDeviceParams;
1104
+ /**
1105
+ * Optional. Specifies advanced params for in-cloud inference.
1106
+ */
1107
+ inCloudParams?: ModelParams;
1108
+ }
1109
+
988
1110
  /**
989
1111
  * Aspect ratios for Imagen images.
990
1112
  *
@@ -1393,6 +1515,11 @@ export declare interface ImagenSafetySettings {
1393
1515
  personFilterLevel?: ImagenPersonFilterLevel;
1394
1516
  }
1395
1517
 
1518
+ /**
1519
+ * Determines whether inference happens on-device or in-cloud.
1520
+ */
1521
+ export declare type InferenceMode = 'prefer_on_device' | 'only_on_device' | 'only_in_cloud';
1522
+
1396
1523
  /**
1397
1524
  * Content part interface if the part represents an image.
1398
1525
  * @public
@@ -1416,6 +1543,77 @@ export declare class IntegerSchema extends Schema {
1416
1543
  constructor(schemaParams?: SchemaParams);
1417
1544
  }
1418
1545
 
1546
+ /**
1547
+ * @license
1548
+ * Copyright 2025 Google LLC
1549
+ *
1550
+ * Licensed under the Apache License, Version 2.0 (the "License");
1551
+ * you may not use this file except in compliance with the License.
1552
+ * You may obtain a copy of the License at
1553
+ *
1554
+ * http://www.apache.org/licenses/LICENSE-2.0
1555
+ *
1556
+ * Unless required by applicable law or agreed to in writing, software
1557
+ * distributed under the License is distributed on an "AS IS" BASIS,
1558
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
1559
+ * See the License for the specific language governing permissions and
1560
+ * limitations under the License.
1561
+ */
1562
+ declare interface LanguageModel extends EventTarget {
1563
+ create(options?: LanguageModelCreateOptions): Promise<LanguageModel>;
1564
+ availability(options?: LanguageModelCreateCoreOptions): Promise<Availability>;
1565
+ prompt(input: LanguageModelPrompt, options?: LanguageModelPromptOptions): Promise<string>;
1566
+ promptStreaming(input: LanguageModelPrompt, options?: LanguageModelPromptOptions): ReadableStream;
1567
+ measureInputUsage(input: LanguageModelPrompt, options?: LanguageModelPromptOptions): Promise<number>;
1568
+ destroy(): undefined;
1569
+ }
1570
+
1571
+ declare interface LanguageModelCreateCoreOptions {
1572
+ topK?: number;
1573
+ temperature?: number;
1574
+ expectedInputs?: LanguageModelExpectedInput[];
1575
+ }
1576
+
1577
+ declare interface LanguageModelCreateOptions extends LanguageModelCreateCoreOptions {
1578
+ signal?: AbortSignal;
1579
+ systemPrompt?: string;
1580
+ initialPrompts?: LanguageModelInitialPrompts;
1581
+ }
1582
+
1583
+ declare interface LanguageModelExpectedInput {
1584
+ type: LanguageModelMessageType;
1585
+ languages?: string[];
1586
+ }
1587
+
1588
+ declare type LanguageModelInitialPrompts = LanguageModelMessage[] | LanguageModelMessageShorthand[];
1589
+
1590
+ declare interface LanguageModelMessage {
1591
+ role: LanguageModelMessageRole;
1592
+ content: LanguageModelMessageContent[];
1593
+ }
1594
+
1595
+ declare interface LanguageModelMessageContent {
1596
+ type: LanguageModelMessageType;
1597
+ content: LanguageModelMessageContentValue;
1598
+ }
1599
+
1600
+ declare type LanguageModelMessageContentValue = ImageBitmapSource | AudioBuffer | BufferSource | string;
1601
+
1602
+ declare type LanguageModelMessageRole = 'system' | 'user' | 'assistant';
1603
+
1604
+ declare interface LanguageModelMessageShorthand {
1605
+ role: LanguageModelMessageRole;
1606
+ content: string;
1607
+ }
1608
+
1609
+ declare type LanguageModelMessageType = 'text' | 'image' | 'audio';
1610
+
1611
+ declare type LanguageModelPrompt = LanguageModelMessageContent[];
1612
+
1613
+ declare interface LanguageModelPromptOptions {
1614
+ responseConstraint?: object;
1615
+ }
1616
+
1419
1617
  /**
1420
1618
  * Content part modality.
1421
1619
  * @public
@@ -1503,6 +1701,14 @@ export declare interface ObjectSchemaInterface extends SchemaInterface {
1503
1701
  optionalProperties?: string[];
1504
1702
  }
1505
1703
 
1704
+ /**
1705
+ * Encapsulates configuration for on-device inference.
1706
+ */
1707
+ export declare interface OnDeviceParams {
1708
+ createOptions?: LanguageModelCreateOptions;
1709
+ promptOptions?: LanguageModelPromptOptions;
1710
+ }
1711
+
1506
1712
  /**
1507
1713
  * Content part - includes text, image/video, or function call/response
1508
1714
  * part types.
package/dist/ai.d.ts CHANGED
@@ -187,6 +187,13 @@ export declare class ArraySchema extends Schema {
187
187
  toJSON(): SchemaRequest;
188
188
  }
189
189
 
190
+ declare enum Availability {
191
+ 'unavailable' = "unavailable",
192
+ 'downloadable' = "downloadable",
193
+ 'downloading' = "downloading",
194
+ 'available' = "available"
195
+ }
196
+
190
197
  /**
191
198
  * Abstract base class representing the configuration for an AI service backend.
192
199
  * This class should not be instantiated directly. Use its subclasses; {@link GoogleAIBackend} for
@@ -288,12 +295,13 @@ export declare class BooleanSchema extends Schema {
288
295
  */
289
296
  export declare class ChatSession {
290
297
  model: string;
298
+ private chromeAdapter;
291
299
  params?: StartChatParams | undefined;
292
300
  requestOptions?: RequestOptions | undefined;
293
301
  private _apiSettings;
294
302
  private _history;
295
303
  private _sendPromise;
296
- constructor(apiSettings: ApiSettings, model: string, params?: StartChatParams | undefined, requestOptions?: RequestOptions | undefined);
304
+ constructor(apiSettings: ApiSettings, model: string, chromeAdapter: ChromeAdapter, params?: StartChatParams | undefined, requestOptions?: RequestOptions | undefined);
297
305
  /**
298
306
  * Gets the chat history so far. Blocked prompts are not added to history.
299
307
  * Neither blocked candidates nor the prompts that generated them are added
@@ -313,6 +321,97 @@ export declare class ChatSession {
313
321
  sendMessageStream(request: string | Array<string | Part>): Promise<GenerateContentStreamResult>;
314
322
  }
315
323
 
324
+ /**
325
+ * Defines an inference "backend" that uses Chrome's on-device model,
326
+ * and encapsulates logic for detecting when on-device is possible.
327
+ */
328
+ declare class ChromeAdapter {
329
+ private languageModelProvider?;
330
+ private mode?;
331
+ private onDeviceParams;
332
+ static SUPPORTED_MIME_TYPES: string[];
333
+ private isDownloading;
334
+ private downloadPromise;
335
+ private oldSession;
336
+ constructor(languageModelProvider?: LanguageModel | undefined, mode?: InferenceMode | undefined, onDeviceParams?: OnDeviceParams);
337
+ /**
338
+ * Checks if a given request can be made on-device.
339
+ *
340
+ * <ol>Encapsulates a few concerns:
341
+ * <li>the mode</li>
342
+ * <li>API existence</li>
343
+ * <li>prompt formatting</li>
344
+ * <li>model availability, including triggering download if necessary</li>
345
+ * </ol>
346
+ *
347
+ * <p>Pros: callers needn't be concerned with details of on-device availability.</p>
348
+ * <p>Cons: this method spans a few concerns and splits request validation from usage.
349
+ * If instance variables weren't already part of the API, we could consider a better
350
+ * separation of concerns.</p>
351
+ */
352
+ isAvailable(request: GenerateContentRequest): Promise<boolean>;
353
+ /**
354
+ * Generates content on device.
355
+ *
356
+ * <p>This is comparable to {@link GenerativeModel.generateContent} for generating content in
357
+ * Cloud.</p>
358
+ * @param request a standard Vertex {@link GenerateContentRequest}
359
+ * @returns {@link Response}, so we can reuse common response formatting.
360
+ */
361
+ generateContent(request: GenerateContentRequest): Promise<Response>;
362
+ /**
363
+ * Generates content stream on device.
364
+ *
365
+ * <p>This is comparable to {@link GenerativeModel.generateContentStream} for generating content in
366
+ * Cloud.</p>
367
+ * @param request a standard Vertex {@link GenerateContentRequest}
368
+ * @returns {@link Response}, so we can reuse common response formatting.
369
+ */
370
+ generateContentStream(request: GenerateContentRequest): Promise<Response>;
371
+ countTokens(_request: CountTokensRequest): Promise<Response>;
372
+ /**
373
+ * Asserts inference for the given request can be performed by an on-device model.
374
+ */
375
+ private static isOnDeviceRequest;
376
+ /**
377
+ * Encapsulates logic to get availability and download a model if one is downloadable.
378
+ */
379
+ private downloadIfAvailable;
380
+ /**
381
+ * Triggers out-of-band download of an on-device model.
382
+ *
383
+ * <p>Chrome only downloads models as needed. Chrome knows a model is needed when code calls
384
+ * LanguageModel.create.</p>
385
+ *
386
+ * <p>Since Chrome manages the download, the SDK can only avoid redundant download requests by
387
+ * tracking if a download has previously been requested.</p>
388
+ */
389
+ private download;
390
+ /**
391
+ * Converts a Vertex Part object to a Chrome LanguageModelMessageContent object.
392
+ */
393
+ private static toLanguageModelMessageContent;
394
+ /**
395
+ * Abstracts Chrome session creation.
396
+ *
397
+ * <p>Chrome uses a multi-turn session for all inference. Vertex uses single-turn for all
398
+ * inference. To map the Vertex API to Chrome's API, the SDK creates a new session for all
399
+ * inference.</p>
400
+ *
401
+ * <p>Chrome will remove a model from memory if it's no longer in use, so this method ensures a
402
+ * new session is created before an old session is destroyed.</p>
403
+ */
404
+ private createSession;
405
+ /**
406
+ * Formats string returned by Chrome as a {@link Response} returned by Vertex.
407
+ */
408
+ private static toResponse;
409
+ /**
410
+ * Formats string stream returned by Chrome as SSE returned by Vertex.
411
+ */
412
+ private static toStreamResponse;
413
+ }
414
+
316
415
  /**
317
416
  * A single citation.
318
417
  * @public
@@ -772,13 +871,18 @@ export declare interface GenerativeContentBlob {
772
871
  * @public
773
872
  */
774
873
  export declare class GenerativeModel extends AIModel {
874
+ private chromeAdapter;
875
+ /**
876
+ * Defines the name of the default in-cloud model to use for hybrid inference.
877
+ */
878
+ static DEFAULT_HYBRID_IN_CLOUD_MODEL: string;
775
879
  generationConfig: GenerationConfig;
776
880
  safetySettings: SafetySetting[];
777
881
  requestOptions?: RequestOptions;
778
882
  tools?: Tool[];
779
883
  toolConfig?: ToolConfig;
780
884
  systemInstruction?: Content;
781
- constructor(ai: AI, modelParams: ModelParams, requestOptions?: RequestOptions);
885
+ constructor(ai: AI, modelParams: ModelParams, chromeAdapter: ChromeAdapter, requestOptions?: RequestOptions);
782
886
  /**
783
887
  * Makes a single non-streaming call to the model
784
888
  * and returns an object containing a single {@link GenerateContentResponse}.
@@ -838,7 +942,7 @@ export declare function getAI(app?: FirebaseApp, options?: AIOptions): AI;
838
942
  *
839
943
  * @public
840
944
  */
841
- export declare function getGenerativeModel(ai: AI, modelParams: ModelParams, requestOptions?: RequestOptions): GenerativeModel;
945
+ export declare function getGenerativeModel(ai: AI, modelParams: ModelParams | HybridParams, requestOptions?: RequestOptions): GenerativeModel;
842
946
 
843
947
  /**
844
948
  * Returns an {@link ImagenModel} class with methods for using Imagen.
@@ -1056,6 +1160,24 @@ export declare enum HarmSeverity {
1056
1160
  HARM_SEVERITY_UNSUPPORTED = "HARM_SEVERITY_UNSUPPORTED"
1057
1161
  }
1058
1162
 
1163
+ /**
1164
+ * Toggles hybrid inference.
1165
+ */
1166
+ export declare interface HybridParams {
1167
+ /**
1168
+ * Specifies on-device or in-cloud inference. Defaults to prefer on-device.
1169
+ */
1170
+ mode: InferenceMode;
1171
+ /**
1172
+ * Optional. Specifies advanced params for on-device inference.
1173
+ */
1174
+ onDeviceParams?: OnDeviceParams;
1175
+ /**
1176
+ * Optional. Specifies advanced params for in-cloud inference.
1177
+ */
1178
+ inCloudParams?: ModelParams;
1179
+ }
1180
+
1059
1181
  /**
1060
1182
  * Aspect ratios for Imagen images.
1061
1183
  *
@@ -1483,6 +1605,11 @@ export declare interface ImagenSafetySettings {
1483
1605
  personFilterLevel?: ImagenPersonFilterLevel;
1484
1606
  }
1485
1607
 
1608
+ /**
1609
+ * Determines whether inference happens on-device or in-cloud.
1610
+ */
1611
+ export declare type InferenceMode = 'prefer_on_device' | 'only_on_device' | 'only_in_cloud';
1612
+
1486
1613
  /**
1487
1614
  * Content part interface if the part represents an image.
1488
1615
  * @public
@@ -1506,6 +1633,77 @@ export declare class IntegerSchema extends Schema {
1506
1633
  constructor(schemaParams?: SchemaParams);
1507
1634
  }
1508
1635
 
1636
+ /**
1637
+ * @license
1638
+ * Copyright 2025 Google LLC
1639
+ *
1640
+ * Licensed under the Apache License, Version 2.0 (the "License");
1641
+ * you may not use this file except in compliance with the License.
1642
+ * You may obtain a copy of the License at
1643
+ *
1644
+ * http://www.apache.org/licenses/LICENSE-2.0
1645
+ *
1646
+ * Unless required by applicable law or agreed to in writing, software
1647
+ * distributed under the License is distributed on an "AS IS" BASIS,
1648
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
1649
+ * See the License for the specific language governing permissions and
1650
+ * limitations under the License.
1651
+ */
1652
+ declare interface LanguageModel extends EventTarget {
1653
+ create(options?: LanguageModelCreateOptions): Promise<LanguageModel>;
1654
+ availability(options?: LanguageModelCreateCoreOptions): Promise<Availability>;
1655
+ prompt(input: LanguageModelPrompt, options?: LanguageModelPromptOptions): Promise<string>;
1656
+ promptStreaming(input: LanguageModelPrompt, options?: LanguageModelPromptOptions): ReadableStream;
1657
+ measureInputUsage(input: LanguageModelPrompt, options?: LanguageModelPromptOptions): Promise<number>;
1658
+ destroy(): undefined;
1659
+ }
1660
+
1661
+ declare interface LanguageModelCreateCoreOptions {
1662
+ topK?: number;
1663
+ temperature?: number;
1664
+ expectedInputs?: LanguageModelExpectedInput[];
1665
+ }
1666
+
1667
+ declare interface LanguageModelCreateOptions extends LanguageModelCreateCoreOptions {
1668
+ signal?: AbortSignal;
1669
+ systemPrompt?: string;
1670
+ initialPrompts?: LanguageModelInitialPrompts;
1671
+ }
1672
+
1673
+ declare interface LanguageModelExpectedInput {
1674
+ type: LanguageModelMessageType;
1675
+ languages?: string[];
1676
+ }
1677
+
1678
+ declare type LanguageModelInitialPrompts = LanguageModelMessage[] | LanguageModelMessageShorthand[];
1679
+
1680
+ declare interface LanguageModelMessage {
1681
+ role: LanguageModelMessageRole;
1682
+ content: LanguageModelMessageContent[];
1683
+ }
1684
+
1685
+ declare interface LanguageModelMessageContent {
1686
+ type: LanguageModelMessageType;
1687
+ content: LanguageModelMessageContentValue;
1688
+ }
1689
+
1690
+ declare type LanguageModelMessageContentValue = ImageBitmapSource | AudioBuffer | BufferSource | string;
1691
+
1692
+ declare type LanguageModelMessageRole = 'system' | 'user' | 'assistant';
1693
+
1694
+ declare interface LanguageModelMessageShorthand {
1695
+ role: LanguageModelMessageRole;
1696
+ content: string;
1697
+ }
1698
+
1699
+ declare type LanguageModelMessageType = 'text' | 'image' | 'audio';
1700
+
1701
+ declare type LanguageModelPrompt = LanguageModelMessageContent[];
1702
+
1703
+ declare interface LanguageModelPromptOptions {
1704
+ responseConstraint?: object;
1705
+ }
1706
+
1509
1707
  /**
1510
1708
  * Content part modality.
1511
1709
  * @public
@@ -1596,6 +1794,14 @@ export declare interface ObjectSchemaInterface extends SchemaInterface {
1596
1794
  optionalProperties?: string[];
1597
1795
  }
1598
1796
 
1797
+ /**
1798
+ * Encapsulates configuration for on-device inference.
1799
+ */
1800
+ export declare interface OnDeviceParams {
1801
+ createOptions?: LanguageModelCreateOptions;
1802
+ promptOptions?: LanguageModelPromptOptions;
1803
+ }
1804
+
1599
1805
  /**
1600
1806
  * Content part - includes text, image/video, or function call/response
1601
1807
  * part types.