@firebase/ai 2.0.0 → 2.1.0-canary.02280d747
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/ai-public.d.ts +217 -9
- package/dist/ai.d.ts +220 -10
- package/dist/esm/index.esm.js +394 -34
- package/dist/esm/index.esm.js.map +1 -1
- package/dist/esm/src/api.d.ts +2 -2
- package/dist/esm/src/constants.d.ts +4 -0
- package/dist/esm/src/index.d.ts +3 -0
- package/dist/esm/src/methods/chat-session.d.ts +3 -1
- package/dist/esm/src/methods/chrome-adapter.d.ts +120 -0
- package/dist/esm/src/methods/count-tokens.d.ts +3 -1
- package/dist/esm/src/methods/generate-content.d.ts +3 -2
- package/dist/esm/src/models/ai-model.d.ts +1 -1
- package/dist/esm/src/models/generative-model.d.ts +3 -1
- package/dist/esm/src/public-types.d.ts +10 -1
- package/dist/esm/src/service.d.ts +4 -1
- package/dist/esm/src/types/chrome-adapter.d.ts +56 -0
- package/dist/esm/src/types/enums.d.ts +20 -1
- package/dist/esm/src/types/imagen/requests.d.ts +2 -2
- package/dist/esm/src/types/imagen/responses.d.ts +1 -0
- package/dist/esm/src/types/index.d.ts +2 -0
- package/dist/esm/src/types/language-model.d.ts +117 -0
- package/dist/esm/src/types/requests.d.ts +35 -2
- package/dist/esm/src/types/responses.d.ts +1 -1
- package/dist/esm/src/types/schema.d.ts +1 -1
- package/dist/index.cjs.js +395 -33
- package/dist/index.cjs.js.map +1 -1
- package/dist/index.node.cjs.js +382 -22
- package/dist/index.node.cjs.js.map +1 -1
- package/dist/index.node.mjs +382 -23
- package/dist/index.node.mjs.map +1 -1
- package/dist/src/api.d.ts +2 -2
- package/dist/src/constants.d.ts +4 -0
- package/dist/src/index.d.ts +3 -0
- package/dist/src/methods/chat-session.d.ts +3 -1
- package/dist/src/methods/chrome-adapter.d.ts +120 -0
- package/dist/src/methods/count-tokens.d.ts +3 -1
- package/dist/src/methods/generate-content.d.ts +3 -2
- package/dist/src/models/ai-model.d.ts +1 -1
- package/dist/src/models/generative-model.d.ts +3 -1
- package/dist/src/public-types.d.ts +10 -1
- package/dist/src/service.d.ts +4 -1
- package/dist/src/types/chrome-adapter.d.ts +56 -0
- package/dist/src/types/enums.d.ts +20 -1
- package/dist/src/types/imagen/requests.d.ts +2 -2
- package/dist/src/types/imagen/responses.d.ts +1 -0
- package/dist/src/types/index.d.ts +2 -0
- package/dist/src/types/language-model.d.ts +117 -0
- package/dist/src/types/requests.d.ts +35 -2
- package/dist/src/types/responses.d.ts +1 -1
- package/dist/src/types/schema.d.ts +1 -1
- package/package.json +8 -8
package/dist/ai.d.ts
CHANGED
|
@@ -4,10 +4,18 @@
|
|
|
4
4
|
* @packageDocumentation
|
|
5
5
|
*/
|
|
6
6
|
|
|
7
|
+
import { AppCheckInternalComponentName } from '@firebase/app-check-interop-types';
|
|
7
8
|
import { AppCheckTokenResult } from '@firebase/app-check-interop-types';
|
|
9
|
+
import { ComponentContainer } from '@firebase/component';
|
|
8
10
|
import { FirebaseApp } from '@firebase/app';
|
|
11
|
+
import { FirebaseAppCheckInternal } from '@firebase/app-check-interop-types';
|
|
12
|
+
import { FirebaseAuthInternal } from '@firebase/auth-interop-types';
|
|
13
|
+
import { FirebaseAuthInternalName } from '@firebase/auth-interop-types';
|
|
9
14
|
import { FirebaseAuthTokenData } from '@firebase/auth-interop-types';
|
|
10
15
|
import { FirebaseError } from '@firebase/util';
|
|
16
|
+
import { _FirebaseService } from '@firebase/app';
|
|
17
|
+
import { InstanceFactoryOptions } from '@firebase/component';
|
|
18
|
+
import { Provider } from '@firebase/component';
|
|
11
19
|
|
|
12
20
|
/**
|
|
13
21
|
* An instance of the Firebase AI SDK.
|
|
@@ -27,6 +35,10 @@ export declare interface AI {
|
|
|
27
35
|
* Vertex AI Gemini API (using {@link VertexAIBackend}).
|
|
28
36
|
*/
|
|
29
37
|
backend: Backend;
|
|
38
|
+
/**
|
|
39
|
+
* Options applied to this {@link AI} instance.
|
|
40
|
+
*/
|
|
41
|
+
options?: AIOptions;
|
|
30
42
|
/**
|
|
31
43
|
* @deprecated use `AI.backend.location` instead.
|
|
32
44
|
*
|
|
@@ -111,7 +123,7 @@ export declare abstract class AIModel {
|
|
|
111
123
|
/**
|
|
112
124
|
* @internal
|
|
113
125
|
*/
|
|
114
|
-
|
|
126
|
+
_apiSettings: ApiSettings;
|
|
115
127
|
/**
|
|
116
128
|
* Constructs a new instance of the {@link AIModel} class.
|
|
117
129
|
*
|
|
@@ -159,8 +171,26 @@ export declare abstract class AIModel {
|
|
|
159
171
|
export declare interface AIOptions {
|
|
160
172
|
/**
|
|
161
173
|
* The backend configuration to use for the AI service instance.
|
|
174
|
+
* Defaults to the Gemini Developer API backend ({@link GoogleAIBackend}).
|
|
175
|
+
*/
|
|
176
|
+
backend?: Backend;
|
|
177
|
+
/**
|
|
178
|
+
* Whether to use App Check limited use tokens. Defaults to false.
|
|
162
179
|
*/
|
|
180
|
+
useLimitedUseAppCheckTokens?: boolean;
|
|
181
|
+
}
|
|
182
|
+
|
|
183
|
+
declare class AIService implements AI, _FirebaseService {
|
|
184
|
+
app: FirebaseApp;
|
|
163
185
|
backend: Backend;
|
|
186
|
+
auth: FirebaseAuthInternal | null;
|
|
187
|
+
appCheck: FirebaseAppCheckInternal | null;
|
|
188
|
+
_options?: Omit<AIOptions, 'backend'>;
|
|
189
|
+
location: string;
|
|
190
|
+
constructor(app: FirebaseApp, backend: Backend, authProvider?: Provider<FirebaseAuthInternalName>, appCheckProvider?: Provider<AppCheckInternalComponentName>);
|
|
191
|
+
_delete(): Promise<void>;
|
|
192
|
+
set options(optionsToSet: AIOptions);
|
|
193
|
+
get options(): AIOptions | undefined;
|
|
164
194
|
}
|
|
165
195
|
|
|
166
196
|
/**
|
|
@@ -315,12 +345,13 @@ export declare class BooleanSchema extends Schema {
|
|
|
315
345
|
*/
|
|
316
346
|
export declare class ChatSession {
|
|
317
347
|
model: string;
|
|
348
|
+
private chromeAdapter?;
|
|
318
349
|
params?: StartChatParams | undefined;
|
|
319
350
|
requestOptions?: RequestOptions | undefined;
|
|
320
351
|
private _apiSettings;
|
|
321
352
|
private _history;
|
|
322
353
|
private _sendPromise;
|
|
323
|
-
constructor(apiSettings: ApiSettings, model: string, params?: StartChatParams | undefined, requestOptions?: RequestOptions | undefined);
|
|
354
|
+
constructor(apiSettings: ApiSettings, model: string, chromeAdapter?: ChromeAdapter | undefined, params?: StartChatParams | undefined, requestOptions?: RequestOptions | undefined);
|
|
324
355
|
/**
|
|
325
356
|
* Gets the chat history so far. Blocked prompts are not added to history.
|
|
326
357
|
* Neither blocked candidates nor the prompts that generated them are added
|
|
@@ -340,6 +371,46 @@ export declare class ChatSession {
|
|
|
340
371
|
sendMessageStream(request: string | Array<string | Part>): Promise<GenerateContentStreamResult>;
|
|
341
372
|
}
|
|
342
373
|
|
|
374
|
+
/**
|
|
375
|
+
* <b>(EXPERIMENTAL)</b> Defines an inference "backend" that uses Chrome's on-device model,
|
|
376
|
+
* and encapsulates logic for detecting when on-device inference is
|
|
377
|
+
* possible.
|
|
378
|
+
*
|
|
379
|
+
* These methods should not be called directly by the user.
|
|
380
|
+
*
|
|
381
|
+
* @public
|
|
382
|
+
*/
|
|
383
|
+
export declare interface ChromeAdapter {
|
|
384
|
+
/**
|
|
385
|
+
* Checks if the on-device model is capable of handling a given
|
|
386
|
+
* request.
|
|
387
|
+
* @param request - A potential request to be passed to the model.
|
|
388
|
+
*/
|
|
389
|
+
isAvailable(request: GenerateContentRequest): Promise<boolean>;
|
|
390
|
+
/**
|
|
391
|
+
* Generates content using on-device inference.
|
|
392
|
+
*
|
|
393
|
+
* @remarks
|
|
394
|
+
* This is comparable to {@link GenerativeModel.generateContent} for generating
|
|
395
|
+
* content using in-cloud inference.
|
|
396
|
+
* @param request - a standard Firebase AI {@link GenerateContentRequest}
|
|
397
|
+
*/
|
|
398
|
+
generateContent(request: GenerateContentRequest): Promise<Response>;
|
|
399
|
+
/**
|
|
400
|
+
* Generates a content stream using on-device inference.
|
|
401
|
+
*
|
|
402
|
+
* @remarks
|
|
403
|
+
* This is comparable to {@link GenerativeModel.generateContentStream} for generating
|
|
404
|
+
* a content stream using in-cloud inference.
|
|
405
|
+
* @param request - a standard Firebase AI {@link GenerateContentRequest}
|
|
406
|
+
*/
|
|
407
|
+
generateContentStream(request: GenerateContentRequest): Promise<Response>;
|
|
408
|
+
/**
|
|
409
|
+
* @internal
|
|
410
|
+
*/
|
|
411
|
+
countTokens(request: CountTokensRequest): Promise<Response>;
|
|
412
|
+
}
|
|
413
|
+
|
|
343
414
|
/**
|
|
344
415
|
* A single citation.
|
|
345
416
|
* @public
|
|
@@ -489,6 +560,8 @@ export declare interface ErrorDetails {
|
|
|
489
560
|
[key: string]: unknown;
|
|
490
561
|
}
|
|
491
562
|
|
|
563
|
+
export declare function factory(container: ComponentContainer, { instanceIdentifier }: InstanceFactoryOptions): AIService;
|
|
564
|
+
|
|
492
565
|
/**
|
|
493
566
|
* Data pointing to a file uploaded on Google Cloud Storage.
|
|
494
567
|
* @public
|
|
@@ -601,6 +674,9 @@ export declare const FunctionCallingMode: {
|
|
|
601
674
|
readonly NONE: "NONE";
|
|
602
675
|
};
|
|
603
676
|
|
|
677
|
+
/**
|
|
678
|
+
* @public
|
|
679
|
+
*/
|
|
604
680
|
export declare type FunctionCallingMode = (typeof FunctionCallingMode)[keyof typeof FunctionCallingMode];
|
|
605
681
|
|
|
606
682
|
/**
|
|
@@ -810,13 +886,14 @@ export declare interface GenerativeContentBlob {
|
|
|
810
886
|
* @public
|
|
811
887
|
*/
|
|
812
888
|
export declare class GenerativeModel extends AIModel {
|
|
889
|
+
private chromeAdapter?;
|
|
813
890
|
generationConfig: GenerationConfig;
|
|
814
891
|
safetySettings: SafetySetting[];
|
|
815
892
|
requestOptions?: RequestOptions;
|
|
816
893
|
tools?: Tool[];
|
|
817
894
|
toolConfig?: ToolConfig;
|
|
818
895
|
systemInstruction?: Content;
|
|
819
|
-
constructor(ai: AI, modelParams: ModelParams, requestOptions?: RequestOptions);
|
|
896
|
+
constructor(ai: AI, modelParams: ModelParams, requestOptions?: RequestOptions, chromeAdapter?: ChromeAdapter | undefined);
|
|
820
897
|
/**
|
|
821
898
|
* Makes a single non-streaming call to the model
|
|
822
899
|
* and returns an object containing a single {@link GenerateContentResponse}.
|
|
@@ -876,7 +953,7 @@ export declare function getAI(app?: FirebaseApp, options?: AIOptions): AI;
|
|
|
876
953
|
*
|
|
877
954
|
* @public
|
|
878
955
|
*/
|
|
879
|
-
export declare function getGenerativeModel(ai: AI, modelParams: ModelParams, requestOptions?: RequestOptions): GenerativeModel;
|
|
956
|
+
export declare function getGenerativeModel(ai: AI, modelParams: ModelParams | HybridParams, requestOptions?: RequestOptions): GenerativeModel;
|
|
880
957
|
|
|
881
958
|
/**
|
|
882
959
|
* Returns an {@link ImagenModel} class with methods for using Imagen.
|
|
@@ -1016,7 +1093,7 @@ export declare interface GroundingMetadata {
|
|
|
1016
1093
|
/**
|
|
1017
1094
|
* Google Search entry point for web searches. This contains an HTML/CSS snippet that must be
|
|
1018
1095
|
* embedded in an app to display a Google Search entry point for follow-up web searches related to
|
|
1019
|
-
* a model's
|
|
1096
|
+
* a model's "Grounded Response".
|
|
1020
1097
|
*/
|
|
1021
1098
|
searchEntryPoint?: SearchEntrypoint;
|
|
1022
1099
|
/**
|
|
@@ -1107,7 +1184,7 @@ export declare const HarmBlockThreshold: {
|
|
|
1107
1184
|
readonly BLOCK_NONE: "BLOCK_NONE";
|
|
1108
1185
|
/**
|
|
1109
1186
|
* All content will be allowed. This is the same as `BLOCK_NONE`, but the metadata corresponding
|
|
1110
|
-
* to the {@link HarmCategory} will not be present in the response.
|
|
1187
|
+
* to the {@link (HarmCategory:type)} will not be present in the response.
|
|
1111
1188
|
*/
|
|
1112
1189
|
readonly OFF: "OFF";
|
|
1113
1190
|
};
|
|
@@ -1200,13 +1277,33 @@ export declare const HarmSeverity: {
|
|
|
1200
1277
|
*/
|
|
1201
1278
|
export declare type HarmSeverity = (typeof HarmSeverity)[keyof typeof HarmSeverity];
|
|
1202
1279
|
|
|
1280
|
+
/**
|
|
1281
|
+
* <b>(EXPERIMENTAL)</b>
|
|
1282
|
+
* Configures hybrid inference.
|
|
1283
|
+
* @public
|
|
1284
|
+
*/
|
|
1285
|
+
export declare interface HybridParams {
|
|
1286
|
+
/**
|
|
1287
|
+
* Specifies on-device or in-cloud inference. Defaults to prefer on-device.
|
|
1288
|
+
*/
|
|
1289
|
+
mode: InferenceMode;
|
|
1290
|
+
/**
|
|
1291
|
+
* Optional. Specifies advanced params for on-device inference.
|
|
1292
|
+
*/
|
|
1293
|
+
onDeviceParams?: OnDeviceParams;
|
|
1294
|
+
/**
|
|
1295
|
+
* Optional. Specifies advanced params for in-cloud inference.
|
|
1296
|
+
*/
|
|
1297
|
+
inCloudParams?: ModelParams;
|
|
1298
|
+
}
|
|
1299
|
+
|
|
1203
1300
|
/**
|
|
1204
1301
|
* Aspect ratios for Imagen images.
|
|
1205
1302
|
*
|
|
1206
1303
|
* To specify an aspect ratio for generated images, set the `aspectRatio` property in your
|
|
1207
1304
|
* {@link ImagenGenerationConfig}.
|
|
1208
1305
|
*
|
|
1209
|
-
* See the
|
|
1306
|
+
* See the {@link http://firebase.google.com/docs/vertex-ai/generate-images | documentation }
|
|
1210
1307
|
* for more details and examples of the supported aspect ratios.
|
|
1211
1308
|
*
|
|
1212
1309
|
* @beta
|
|
@@ -1240,7 +1337,7 @@ export declare const ImagenAspectRatio: {
|
|
|
1240
1337
|
* To specify an aspect ratio for generated images, set the `aspectRatio` property in your
|
|
1241
1338
|
* {@link ImagenGenerationConfig}.
|
|
1242
1339
|
*
|
|
1243
|
-
* See the
|
|
1340
|
+
* See the {@link http://firebase.google.com/docs/vertex-ai/generate-images | documentation }
|
|
1244
1341
|
* for more details and examples of the supported aspect ratios.
|
|
1245
1342
|
*
|
|
1246
1343
|
* @beta
|
|
@@ -1251,6 +1348,7 @@ export declare type ImagenAspectRatio = (typeof ImagenAspectRatio)[keyof typeof
|
|
|
1251
1348
|
* An image generated by Imagen, stored in a Cloud Storage for Firebase bucket.
|
|
1252
1349
|
*
|
|
1253
1350
|
* This feature is not available yet.
|
|
1351
|
+
* @beta
|
|
1254
1352
|
*/
|
|
1255
1353
|
export declare interface ImagenGCSImage {
|
|
1256
1354
|
/**
|
|
@@ -1664,6 +1762,24 @@ export declare interface ImagenSafetySettings {
|
|
|
1664
1762
|
personFilterLevel?: ImagenPersonFilterLevel;
|
|
1665
1763
|
}
|
|
1666
1764
|
|
|
1765
|
+
/**
|
|
1766
|
+
* <b>(EXPERIMENTAL)</b>
|
|
1767
|
+
* Determines whether inference happens on-device or in-cloud.
|
|
1768
|
+
* @public
|
|
1769
|
+
*/
|
|
1770
|
+
export declare const InferenceMode: {
|
|
1771
|
+
readonly PREFER_ON_DEVICE: "prefer_on_device";
|
|
1772
|
+
readonly ONLY_ON_DEVICE: "only_on_device";
|
|
1773
|
+
readonly ONLY_IN_CLOUD: "only_in_cloud";
|
|
1774
|
+
};
|
|
1775
|
+
|
|
1776
|
+
/**
|
|
1777
|
+
* <b>(EXPERIMENTAL)</b>
|
|
1778
|
+
* Determines whether inference happens on-device or in-cloud.
|
|
1779
|
+
* @public
|
|
1780
|
+
*/
|
|
1781
|
+
export declare type InferenceMode = (typeof InferenceMode)[keyof typeof InferenceMode];
|
|
1782
|
+
|
|
1667
1783
|
/**
|
|
1668
1784
|
* Content part interface if the part represents an image.
|
|
1669
1785
|
* @public
|
|
@@ -1687,6 +1803,86 @@ export declare class IntegerSchema extends Schema {
|
|
|
1687
1803
|
constructor(schemaParams?: SchemaParams);
|
|
1688
1804
|
}
|
|
1689
1805
|
|
|
1806
|
+
/**
|
|
1807
|
+
* <b>(EXPERIMENTAL)</b>
|
|
1808
|
+
* Configures the creation of an on-device language model session.
|
|
1809
|
+
* @public
|
|
1810
|
+
*/
|
|
1811
|
+
export declare interface LanguageModelCreateCoreOptions {
|
|
1812
|
+
topK?: number;
|
|
1813
|
+
temperature?: number;
|
|
1814
|
+
expectedInputs?: LanguageModelExpected[];
|
|
1815
|
+
}
|
|
1816
|
+
|
|
1817
|
+
/**
|
|
1818
|
+
* <b>(EXPERIMENTAL)</b>
|
|
1819
|
+
* Configures the creation of an on-device language model session.
|
|
1820
|
+
* @public
|
|
1821
|
+
*/
|
|
1822
|
+
export declare interface LanguageModelCreateOptions extends LanguageModelCreateCoreOptions {
|
|
1823
|
+
signal?: AbortSignal;
|
|
1824
|
+
initialPrompts?: LanguageModelMessage[];
|
|
1825
|
+
}
|
|
1826
|
+
|
|
1827
|
+
/**
|
|
1828
|
+
* <b>(EXPERIMENTAL)</b>
|
|
1829
|
+
* Options for the expected inputs for an on-device language model.
|
|
1830
|
+
* @public
|
|
1831
|
+
*/ export declare interface LanguageModelExpected {
|
|
1832
|
+
type: LanguageModelMessageType;
|
|
1833
|
+
languages?: string[];
|
|
1834
|
+
}
|
|
1835
|
+
|
|
1836
|
+
/**
|
|
1837
|
+
* <b>(EXPERIMENTAL)</b>
|
|
1838
|
+
* An on-device language model message.
|
|
1839
|
+
* @public
|
|
1840
|
+
*/
|
|
1841
|
+
export declare interface LanguageModelMessage {
|
|
1842
|
+
role: LanguageModelMessageRole;
|
|
1843
|
+
content: LanguageModelMessageContent[];
|
|
1844
|
+
}
|
|
1845
|
+
|
|
1846
|
+
/**
|
|
1847
|
+
* <b>(EXPERIMENTAL)</b>
|
|
1848
|
+
* An on-device language model content object.
|
|
1849
|
+
* @public
|
|
1850
|
+
*/
|
|
1851
|
+
export declare interface LanguageModelMessageContent {
|
|
1852
|
+
type: LanguageModelMessageType;
|
|
1853
|
+
value: LanguageModelMessageContentValue;
|
|
1854
|
+
}
|
|
1855
|
+
|
|
1856
|
+
/**
|
|
1857
|
+
* <b>(EXPERIMENTAL)</b>
|
|
1858
|
+
* Content formats that can be provided as on-device message content.
|
|
1859
|
+
* @public
|
|
1860
|
+
*/
|
|
1861
|
+
export declare type LanguageModelMessageContentValue = ImageBitmapSource | AudioBuffer | BufferSource | string;
|
|
1862
|
+
|
|
1863
|
+
/**
|
|
1864
|
+
* <b>(EXPERIMENTAL)</b>
|
|
1865
|
+
* Allowable roles for on-device language model usage.
|
|
1866
|
+
* @public
|
|
1867
|
+
*/
|
|
1868
|
+
export declare type LanguageModelMessageRole = 'system' | 'user' | 'assistant';
|
|
1869
|
+
|
|
1870
|
+
/**
|
|
1871
|
+
* <b>(EXPERIMENTAL)</b>
|
|
1872
|
+
* Allowable types for on-device language model messages.
|
|
1873
|
+
* @public
|
|
1874
|
+
*/
|
|
1875
|
+
export declare type LanguageModelMessageType = 'text' | 'image' | 'audio';
|
|
1876
|
+
|
|
1877
|
+
/**
|
|
1878
|
+
* <b>(EXPERIMENTAL)</b>
|
|
1879
|
+
* Options for an on-device language model prompt.
|
|
1880
|
+
* @public
|
|
1881
|
+
*/
|
|
1882
|
+
export declare interface LanguageModelPromptOptions {
|
|
1883
|
+
responseConstraint?: object;
|
|
1884
|
+
}
|
|
1885
|
+
|
|
1690
1886
|
/**
|
|
1691
1887
|
* Content part modality.
|
|
1692
1888
|
* @public
|
|
@@ -1775,7 +1971,7 @@ export declare class ObjectSchema extends Schema {
|
|
|
1775
1971
|
}
|
|
1776
1972
|
|
|
1777
1973
|
/**
|
|
1778
|
-
* Interface for JSON parameters in a schema of {@link SchemaType}
|
|
1974
|
+
* Interface for JSON parameters in a schema of {@link (SchemaType:type)}
|
|
1779
1975
|
* "object" when not using the `Schema.object()` helper.
|
|
1780
1976
|
* @public
|
|
1781
1977
|
*/
|
|
@@ -1791,6 +1987,17 @@ export declare interface ObjectSchemaRequest extends SchemaRequest {
|
|
|
1791
1987
|
optionalProperties?: never;
|
|
1792
1988
|
}
|
|
1793
1989
|
|
|
1990
|
+
/**
|
|
1991
|
+
* <b>(EXPERIMENTAL)</b>
|
|
1992
|
+
* Encapsulates configuration for on-device inference.
|
|
1993
|
+
*
|
|
1994
|
+
* @public
|
|
1995
|
+
*/
|
|
1996
|
+
export declare interface OnDeviceParams {
|
|
1997
|
+
createOptions?: LanguageModelCreateOptions;
|
|
1998
|
+
promptOptions?: LanguageModelPromptOptions;
|
|
1999
|
+
}
|
|
2000
|
+
|
|
1794
2001
|
/**
|
|
1795
2002
|
* Content part - includes text, image/video, or function call/response
|
|
1796
2003
|
* part types.
|
|
@@ -1830,7 +2037,10 @@ export declare interface RequestOptions {
|
|
|
1830
2037
|
*/
|
|
1831
2038
|
timeout?: number;
|
|
1832
2039
|
/**
|
|
1833
|
-
* Base url for endpoint. Defaults to
|
|
2040
|
+
* Base url for endpoint. Defaults to
|
|
2041
|
+
* https://firebasevertexai.googleapis.com, which is the
|
|
2042
|
+
* {@link https://console.cloud.google.com/apis/library/firebasevertexai.googleapis.com?project=_ | Firebase AI Logic API}
|
|
2043
|
+
* (used regardless of your chosen Gemini API provider).
|
|
1834
2044
|
*/
|
|
1835
2045
|
baseUrl?: string;
|
|
1836
2046
|
}
|