@firebase/ai 2.6.1-20251215180848 → 2.6.1-canary.5579b387a
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/ai-public.d.ts +104 -11
- package/dist/ai.d.ts +105 -12
- package/dist/esm/index.esm.js +157 -77
- package/dist/esm/index.esm.js.map +1 -1
- package/dist/esm/src/constants.d.ts +1 -1
- package/dist/esm/src/methods/chat-session.d.ts +7 -3
- package/dist/esm/src/methods/count-tokens.d.ts +2 -2
- package/dist/esm/src/methods/generate-content.d.ts +5 -5
- package/dist/esm/src/models/generative-model.d.ts +4 -4
- package/dist/esm/src/models/imagen-model.d.ts +3 -3
- package/dist/esm/src/models/template-generative-model.d.ts +3 -3
- package/dist/esm/src/models/template-imagen-model.d.ts +2 -2
- package/dist/esm/src/requests/request.d.ts +4 -2
- package/dist/esm/src/requests/stream-reader.d.ts +1 -3
- package/dist/esm/src/types/enums.d.ts +21 -0
- package/dist/esm/src/types/imagen/internal.d.ts +1 -1
- package/dist/esm/src/types/requests.d.ts +68 -3
- package/dist/index.cjs.js +157 -76
- package/dist/index.cjs.js.map +1 -1
- package/dist/index.node.cjs.js +157 -76
- package/dist/index.node.cjs.js.map +1 -1
- package/dist/index.node.mjs +157 -77
- package/dist/index.node.mjs.map +1 -1
- package/dist/src/constants.d.ts +1 -1
- package/dist/src/methods/chat-session.d.ts +7 -3
- package/dist/src/methods/count-tokens.d.ts +2 -2
- package/dist/src/methods/generate-content.d.ts +5 -5
- package/dist/src/models/generative-model.d.ts +4 -4
- package/dist/src/models/imagen-model.d.ts +3 -3
- package/dist/src/models/template-generative-model.d.ts +3 -3
- package/dist/src/models/template-imagen-model.d.ts +2 -2
- package/dist/src/requests/request.d.ts +4 -2
- package/dist/src/requests/stream-reader.d.ts +1 -3
- package/dist/src/types/enums.d.ts +21 -0
- package/dist/src/types/imagen/internal.d.ts +1 -1
- package/dist/src/types/requests.d.ts +68 -3
- package/package.json +8 -8
package/dist/ai-public.d.ts
CHANGED
|
@@ -312,6 +312,10 @@ export declare class ChatSession {
|
|
|
312
312
|
requestOptions?: RequestOptions | undefined;
|
|
313
313
|
private _apiSettings;
|
|
314
314
|
private _history;
|
|
315
|
+
/**
|
|
316
|
+
* Ensures sequential execution of chat messages to maintain history order.
|
|
317
|
+
* Each call waits for the previous one to settle before proceeding.
|
|
318
|
+
*/
|
|
315
319
|
private _sendPromise;
|
|
316
320
|
constructor(apiSettings: ApiSettings, model: string, chromeAdapter?: ChromeAdapter | undefined, params?: StartChatParams | undefined, requestOptions?: RequestOptions | undefined);
|
|
317
321
|
/**
|
|
@@ -324,13 +328,13 @@ export declare class ChatSession {
|
|
|
324
328
|
* Sends a chat message and receives a non-streaming
|
|
325
329
|
* {@link GenerateContentResult}
|
|
326
330
|
*/
|
|
327
|
-
sendMessage(request: string | Array<string | Part
|
|
331
|
+
sendMessage(request: string | Array<string | Part>, singleRequestOptions?: SingleRequestOptions): Promise<GenerateContentResult>;
|
|
328
332
|
/**
|
|
329
333
|
* Sends a chat message and receives the response as a
|
|
330
334
|
* {@link GenerateContentStreamResult} containing an iterable stream
|
|
331
335
|
* and a response promise.
|
|
332
336
|
*/
|
|
333
|
-
sendMessageStream(request: string | Array<string | Part
|
|
337
|
+
sendMessageStream(request: string | Array<string | Part>, singleRequestOptions?: SingleRequestOptions): Promise<GenerateContentStreamResult>;
|
|
334
338
|
}
|
|
335
339
|
|
|
336
340
|
/**
|
|
@@ -991,14 +995,14 @@ export declare class GenerativeModel extends AIModel {
|
|
|
991
995
|
* Makes a single non-streaming call to the model
|
|
992
996
|
* and returns an object containing a single {@link GenerateContentResponse}.
|
|
993
997
|
*/
|
|
994
|
-
generateContent(request: GenerateContentRequest | string | Array<string | Part
|
|
998
|
+
generateContent(request: GenerateContentRequest | string | Array<string | Part>, singleRequestOptions?: SingleRequestOptions): Promise<GenerateContentResult>;
|
|
995
999
|
/**
|
|
996
1000
|
* Makes a single streaming call to the model
|
|
997
1001
|
* and returns an object containing an iterable stream that iterates
|
|
998
1002
|
* over all chunks in the streaming response as well as
|
|
999
1003
|
* a promise that returns the final aggregated response.
|
|
1000
1004
|
*/
|
|
1001
|
-
generateContentStream(request: GenerateContentRequest | string | Array<string | Part
|
|
1005
|
+
generateContentStream(request: GenerateContentRequest | string | Array<string | Part>, singleRequestOptions?: SingleRequestOptions): Promise<GenerateContentStreamResult>;
|
|
1002
1006
|
/**
|
|
1003
1007
|
* Gets a new {@link ChatSession} instance which can be used for
|
|
1004
1008
|
* multi-turn chats.
|
|
@@ -1007,7 +1011,7 @@ export declare class GenerativeModel extends AIModel {
|
|
|
1007
1011
|
/**
|
|
1008
1012
|
* Counts the tokens in the provided request.
|
|
1009
1013
|
*/
|
|
1010
|
-
countTokens(request: CountTokensRequest | string | Array<string | Part
|
|
1014
|
+
countTokens(request: CountTokensRequest | string | Array<string | Part>, singleRequestOptions?: SingleRequestOptions): Promise<CountTokensResponse>;
|
|
1011
1015
|
}
|
|
1012
1016
|
|
|
1013
1017
|
/**
|
|
@@ -1698,7 +1702,7 @@ export declare class ImagenModel extends AIModel {
|
|
|
1698
1702
|
*
|
|
1699
1703
|
* @public
|
|
1700
1704
|
*/
|
|
1701
|
-
generateImages(prompt: string): Promise<ImagenGenerationResponse<ImagenInlineImage>>;
|
|
1705
|
+
generateImages(prompt: string, singleRequestOptions?: SingleRequestOptions): Promise<ImagenGenerationResponse<ImagenInlineImage>>;
|
|
1702
1706
|
/* Excluded from this release type: generateImagesGCS */
|
|
1703
1707
|
}
|
|
1704
1708
|
|
|
@@ -2852,6 +2856,47 @@ export declare interface Segment {
|
|
|
2852
2856
|
text: string;
|
|
2853
2857
|
}
|
|
2854
2858
|
|
|
2859
|
+
/**
|
|
2860
|
+
* Options that can be provided per-request.
|
|
2861
|
+
* Extends the base {@link RequestOptions} (like `timeout` and `baseUrl`)
|
|
2862
|
+
* with request-specific controls like cancellation via `AbortSignal`.
|
|
2863
|
+
*
|
|
2864
|
+
* Options specified here will override any default {@link RequestOptions}
|
|
2865
|
+
* configured on a model (for example, {@link GenerativeModel}).
|
|
2866
|
+
*
|
|
2867
|
+
* @public
|
|
2868
|
+
*/
|
|
2869
|
+
export declare interface SingleRequestOptions extends RequestOptions {
|
|
2870
|
+
/**
|
|
2871
|
+
* An `AbortSignal` instance that allows cancelling ongoing requests (like `generateContent` or
|
|
2872
|
+
* `generateImages`).
|
|
2873
|
+
*
|
|
2874
|
+
* If provided, calling `abort()` on the corresponding `AbortController`
|
|
2875
|
+
* will attempt to cancel the underlying HTTP request. An `AbortError` will be thrown
|
|
2876
|
+
* if cancellation is successful.
|
|
2877
|
+
*
|
|
2878
|
+
* Note that this will not cancel the request in the backend, so any applicable billing charges
|
|
2879
|
+
* will still be applied despite cancellation.
|
|
2880
|
+
*
|
|
2881
|
+
* @example
|
|
2882
|
+
* ```javascript
|
|
2883
|
+
* const controller = new AbortController();
|
|
2884
|
+
* const model = getGenerativeModel({
|
|
2885
|
+
* // ...
|
|
2886
|
+
* });
|
|
2887
|
+
* model.generateContent(
|
|
2888
|
+
* "Write a story about a magic backpack.",
|
|
2889
|
+
* { signal: controller.signal }
|
|
2890
|
+
* );
|
|
2891
|
+
*
|
|
2892
|
+
* // To cancel request:
|
|
2893
|
+
* controller.abort();
|
|
2894
|
+
* ```
|
|
2895
|
+
* @see https://developer.mozilla.org/en-US/docs/Web/API/AbortSignal
|
|
2896
|
+
*/
|
|
2897
|
+
signal?: AbortSignal;
|
|
2898
|
+
}
|
|
2899
|
+
|
|
2855
2900
|
/**
|
|
2856
2901
|
* Configures speech synthesis.
|
|
2857
2902
|
*
|
|
@@ -2975,7 +3020,7 @@ export declare class TemplateGenerativeModel {
|
|
|
2975
3020
|
*
|
|
2976
3021
|
* @beta
|
|
2977
3022
|
*/
|
|
2978
|
-
generateContent(templateId: string, templateVariables: object): Promise<GenerateContentResult>;
|
|
3023
|
+
generateContent(templateId: string, templateVariables: object, singleRequestOptions?: SingleRequestOptions): Promise<GenerateContentResult>;
|
|
2979
3024
|
/**
|
|
2980
3025
|
* Makes a single streaming call to the model and returns an object
|
|
2981
3026
|
* containing an iterable stream that iterates over all chunks in the
|
|
@@ -2988,7 +3033,7 @@ export declare class TemplateGenerativeModel {
|
|
|
2988
3033
|
*
|
|
2989
3034
|
* @beta
|
|
2990
3035
|
*/
|
|
2991
|
-
generateContentStream(templateId: string, templateVariables: object): Promise<GenerateContentStreamResult>;
|
|
3036
|
+
generateContentStream(templateId: string, templateVariables: object, singleRequestOptions?: SingleRequestOptions): Promise<GenerateContentStreamResult>;
|
|
2992
3037
|
}
|
|
2993
3038
|
|
|
2994
3039
|
/**
|
|
@@ -3018,7 +3063,7 @@ export declare class TemplateImagenModel {
|
|
|
3018
3063
|
*
|
|
3019
3064
|
* @beta
|
|
3020
3065
|
*/
|
|
3021
|
-
generateImages(templateId: string, templateVariables: object): Promise<ImagenGenerationResponse<ImagenInlineImage>>;
|
|
3066
|
+
generateImages(templateId: string, templateVariables: object, singleRequestOptions?: SingleRequestOptions): Promise<ImagenGenerationResponse<ImagenInlineImage>>;
|
|
3022
3067
|
}
|
|
3023
3068
|
|
|
3024
3069
|
/**
|
|
@@ -3048,17 +3093,42 @@ export declare interface ThinkingConfig {
|
|
|
3048
3093
|
/**
|
|
3049
3094
|
* The thinking budget, in tokens.
|
|
3050
3095
|
*
|
|
3096
|
+
* @remarks
|
|
3051
3097
|
* This parameter sets an upper limit on the number of tokens the model can use for its internal
|
|
3052
3098
|
* "thinking" process. A higher budget may result in higher quality responses for complex tasks
|
|
3053
3099
|
* but can also increase latency and cost.
|
|
3054
3100
|
*
|
|
3055
|
-
*
|
|
3056
|
-
*
|
|
3101
|
+
* The range of supported thinking budget values depends on the model.
|
|
3102
|
+
*
|
|
3103
|
+
* <ul>
|
|
3104
|
+
* <li>To use the default thinking budget for a model, leave
|
|
3105
|
+
* this value undefined.</li>
|
|
3106
|
+
*
|
|
3107
|
+
* <li>To disable thinking, when supported by the model, set this value
|
|
3108
|
+
* to `0`.</li>
|
|
3109
|
+
*
|
|
3110
|
+
* <li>To use dynamic thinking, which allows the model to decide on the thinking
|
|
3111
|
+
* budget based on the task, set this value to `-1`.</li>
|
|
3112
|
+
* </ul>
|
|
3057
3113
|
*
|
|
3058
3114
|
* An error will be thrown if you set a thinking budget for a model that does not support this
|
|
3059
3115
|
* feature or if the specified budget is not within the model's supported range.
|
|
3116
|
+
*
|
|
3117
|
+
* The model will also error if `thinkingLevel` and `thinkingBudget` are
|
|
3118
|
+
* both set.
|
|
3060
3119
|
*/
|
|
3061
3120
|
thinkingBudget?: number;
|
|
3121
|
+
/**
|
|
3122
|
+
* If not specified, Gemini will use the model's default dynamic thinking level.
|
|
3123
|
+
*
|
|
3124
|
+
* @remarks
|
|
3125
|
+
* Note: The model will error if `thinkingLevel` and `thinkingBudget` are
|
|
3126
|
+
* both set.
|
|
3127
|
+
*
|
|
3128
|
+
* Important: Gemini 2.5 series models do not support thinking levels; use
|
|
3129
|
+
* `thinkingBudget` to set a thinking budget instead.
|
|
3130
|
+
*/
|
|
3131
|
+
thinkingLevel?: ThinkingLevel;
|
|
3062
3132
|
/**
|
|
3063
3133
|
* Whether to include "thought summaries" in the model's response.
|
|
3064
3134
|
*
|
|
@@ -3070,6 +3140,29 @@ export declare interface ThinkingConfig {
|
|
|
3070
3140
|
includeThoughts?: boolean;
|
|
3071
3141
|
}
|
|
3072
3142
|
|
|
3143
|
+
/**
|
|
3144
|
+
* A preset that controls the model's "thinking" process. Use
|
|
3145
|
+
* `ThinkingLevel.LOW` for faster responses on less complex tasks, and
|
|
3146
|
+
* `ThinkingLevel.HIGH` for better reasoning on more complex tasks.
|
|
3147
|
+
*
|
|
3148
|
+
* @public
|
|
3149
|
+
*/
|
|
3150
|
+
export declare const ThinkingLevel: {
|
|
3151
|
+
MINIMAL: string;
|
|
3152
|
+
LOW: string;
|
|
3153
|
+
MEDIUM: string;
|
|
3154
|
+
HIGH: string;
|
|
3155
|
+
};
|
|
3156
|
+
|
|
3157
|
+
/**
|
|
3158
|
+
* A preset that controls the model's "thinking" process. Use
|
|
3159
|
+
* `ThinkingLevel.LOW` for faster responses on less complex tasks, and
|
|
3160
|
+
* `ThinkingLevel.HIGH` for better reasoning on more complex tasks.
|
|
3161
|
+
*
|
|
3162
|
+
* @public
|
|
3163
|
+
*/
|
|
3164
|
+
export declare type ThinkingLevel = (typeof ThinkingLevel)[keyof typeof ThinkingLevel];
|
|
3165
|
+
|
|
3073
3166
|
/**
|
|
3074
3167
|
* Defines a tool that model can call to access external knowledge.
|
|
3075
3168
|
* @public
|
package/dist/ai.d.ts
CHANGED
|
@@ -358,6 +358,10 @@ export declare class ChatSession {
|
|
|
358
358
|
requestOptions?: RequestOptions | undefined;
|
|
359
359
|
private _apiSettings;
|
|
360
360
|
private _history;
|
|
361
|
+
/**
|
|
362
|
+
* Ensures sequential execution of chat messages to maintain history order.
|
|
363
|
+
* Each call waits for the previous one to settle before proceeding.
|
|
364
|
+
*/
|
|
361
365
|
private _sendPromise;
|
|
362
366
|
constructor(apiSettings: ApiSettings, model: string, chromeAdapter?: ChromeAdapter | undefined, params?: StartChatParams | undefined, requestOptions?: RequestOptions | undefined);
|
|
363
367
|
/**
|
|
@@ -370,13 +374,13 @@ export declare class ChatSession {
|
|
|
370
374
|
* Sends a chat message and receives a non-streaming
|
|
371
375
|
* {@link GenerateContentResult}
|
|
372
376
|
*/
|
|
373
|
-
sendMessage(request: string | Array<string | Part
|
|
377
|
+
sendMessage(request: string | Array<string | Part>, singleRequestOptions?: SingleRequestOptions): Promise<GenerateContentResult>;
|
|
374
378
|
/**
|
|
375
379
|
* Sends a chat message and receives the response as a
|
|
376
380
|
* {@link GenerateContentStreamResult} containing an iterable stream
|
|
377
381
|
* and a response promise.
|
|
378
382
|
*/
|
|
379
|
-
sendMessageStream(request: string | Array<string | Part
|
|
383
|
+
sendMessageStream(request: string | Array<string | Part>, singleRequestOptions?: SingleRequestOptions): Promise<GenerateContentStreamResult>;
|
|
380
384
|
}
|
|
381
385
|
|
|
382
386
|
/**
|
|
@@ -1058,14 +1062,14 @@ export declare class GenerativeModel extends AIModel {
|
|
|
1058
1062
|
* Makes a single non-streaming call to the model
|
|
1059
1063
|
* and returns an object containing a single {@link GenerateContentResponse}.
|
|
1060
1064
|
*/
|
|
1061
|
-
generateContent(request: GenerateContentRequest | string | Array<string | Part
|
|
1065
|
+
generateContent(request: GenerateContentRequest | string | Array<string | Part>, singleRequestOptions?: SingleRequestOptions): Promise<GenerateContentResult>;
|
|
1062
1066
|
/**
|
|
1063
1067
|
* Makes a single streaming call to the model
|
|
1064
1068
|
* and returns an object containing an iterable stream that iterates
|
|
1065
1069
|
* over all chunks in the streaming response as well as
|
|
1066
1070
|
* a promise that returns the final aggregated response.
|
|
1067
1071
|
*/
|
|
1068
|
-
generateContentStream(request: GenerateContentRequest | string | Array<string | Part
|
|
1072
|
+
generateContentStream(request: GenerateContentRequest | string | Array<string | Part>, singleRequestOptions?: SingleRequestOptions): Promise<GenerateContentStreamResult>;
|
|
1069
1073
|
/**
|
|
1070
1074
|
* Gets a new {@link ChatSession} instance which can be used for
|
|
1071
1075
|
* multi-turn chats.
|
|
@@ -1074,7 +1078,7 @@ export declare class GenerativeModel extends AIModel {
|
|
|
1074
1078
|
/**
|
|
1075
1079
|
* Counts the tokens in the provided request.
|
|
1076
1080
|
*/
|
|
1077
|
-
countTokens(request: CountTokensRequest | string | Array<string | Part
|
|
1081
|
+
countTokens(request: CountTokensRequest | string | Array<string | Part>, singleRequestOptions?: SingleRequestOptions): Promise<CountTokensResponse>;
|
|
1078
1082
|
}
|
|
1079
1083
|
|
|
1080
1084
|
/**
|
|
@@ -1806,7 +1810,7 @@ export declare class ImagenModel extends AIModel {
|
|
|
1806
1810
|
*
|
|
1807
1811
|
* @public
|
|
1808
1812
|
*/
|
|
1809
|
-
generateImages(prompt: string): Promise<ImagenGenerationResponse<ImagenInlineImage>>;
|
|
1813
|
+
generateImages(prompt: string, singleRequestOptions?: SingleRequestOptions): Promise<ImagenGenerationResponse<ImagenInlineImage>>;
|
|
1810
1814
|
/**
|
|
1811
1815
|
* Generates images to Cloud Storage for Firebase using the Imagen model.
|
|
1812
1816
|
*
|
|
@@ -1826,7 +1830,7 @@ export declare class ImagenModel extends AIModel {
|
|
|
1826
1830
|
* returned object will have a `filteredReason` property.
|
|
1827
1831
|
* If all images are filtered, the `images` array will be empty.
|
|
1828
1832
|
*/
|
|
1829
|
-
generateImagesGCS(prompt: string, gcsURI: string): Promise<ImagenGenerationResponse<ImagenGCSImage>>;
|
|
1833
|
+
generateImagesGCS(prompt: string, gcsURI: string, singleRequestOptions?: SingleRequestOptions): Promise<ImagenGenerationResponse<ImagenGCSImage>>;
|
|
1830
1834
|
}
|
|
1831
1835
|
|
|
1832
1836
|
/**
|
|
@@ -3003,6 +3007,47 @@ export declare interface Segment {
|
|
|
3003
3007
|
text: string;
|
|
3004
3008
|
}
|
|
3005
3009
|
|
|
3010
|
+
/**
|
|
3011
|
+
* Options that can be provided per-request.
|
|
3012
|
+
* Extends the base {@link RequestOptions} (like `timeout` and `baseUrl`)
|
|
3013
|
+
* with request-specific controls like cancellation via `AbortSignal`.
|
|
3014
|
+
*
|
|
3015
|
+
* Options specified here will override any default {@link RequestOptions}
|
|
3016
|
+
* configured on a model (for example, {@link GenerativeModel}).
|
|
3017
|
+
*
|
|
3018
|
+
* @public
|
|
3019
|
+
*/
|
|
3020
|
+
export declare interface SingleRequestOptions extends RequestOptions {
|
|
3021
|
+
/**
|
|
3022
|
+
* An `AbortSignal` instance that allows cancelling ongoing requests (like `generateContent` or
|
|
3023
|
+
* `generateImages`).
|
|
3024
|
+
*
|
|
3025
|
+
* If provided, calling `abort()` on the corresponding `AbortController`
|
|
3026
|
+
* will attempt to cancel the underlying HTTP request. An `AbortError` will be thrown
|
|
3027
|
+
* if cancellation is successful.
|
|
3028
|
+
*
|
|
3029
|
+
* Note that this will not cancel the request in the backend, so any applicable billing charges
|
|
3030
|
+
* will still be applied despite cancellation.
|
|
3031
|
+
*
|
|
3032
|
+
* @example
|
|
3033
|
+
* ```javascript
|
|
3034
|
+
* const controller = new AbortController();
|
|
3035
|
+
* const model = getGenerativeModel({
|
|
3036
|
+
* // ...
|
|
3037
|
+
* });
|
|
3038
|
+
* model.generateContent(
|
|
3039
|
+
* "Write a story about a magic backpack.",
|
|
3040
|
+
* { signal: controller.signal }
|
|
3041
|
+
* );
|
|
3042
|
+
*
|
|
3043
|
+
* // To cancel request:
|
|
3044
|
+
* controller.abort();
|
|
3045
|
+
* ```
|
|
3046
|
+
* @see https://developer.mozilla.org/en-US/docs/Web/API/AbortSignal
|
|
3047
|
+
*/
|
|
3048
|
+
signal?: AbortSignal;
|
|
3049
|
+
}
|
|
3050
|
+
|
|
3006
3051
|
/**
|
|
3007
3052
|
* Configures speech synthesis.
|
|
3008
3053
|
*
|
|
@@ -3132,7 +3177,7 @@ export declare class TemplateGenerativeModel {
|
|
|
3132
3177
|
*
|
|
3133
3178
|
* @beta
|
|
3134
3179
|
*/
|
|
3135
|
-
generateContent(templateId: string, templateVariables: object): Promise<GenerateContentResult>;
|
|
3180
|
+
generateContent(templateId: string, templateVariables: object, singleRequestOptions?: SingleRequestOptions): Promise<GenerateContentResult>;
|
|
3136
3181
|
/**
|
|
3137
3182
|
* Makes a single streaming call to the model and returns an object
|
|
3138
3183
|
* containing an iterable stream that iterates over all chunks in the
|
|
@@ -3145,7 +3190,7 @@ export declare class TemplateGenerativeModel {
|
|
|
3145
3190
|
*
|
|
3146
3191
|
* @beta
|
|
3147
3192
|
*/
|
|
3148
|
-
generateContentStream(templateId: string, templateVariables: object): Promise<GenerateContentStreamResult>;
|
|
3193
|
+
generateContentStream(templateId: string, templateVariables: object, singleRequestOptions?: SingleRequestOptions): Promise<GenerateContentStreamResult>;
|
|
3149
3194
|
}
|
|
3150
3195
|
|
|
3151
3196
|
/**
|
|
@@ -3178,7 +3223,7 @@ export declare class TemplateImagenModel {
|
|
|
3178
3223
|
*
|
|
3179
3224
|
* @beta
|
|
3180
3225
|
*/
|
|
3181
|
-
generateImages(templateId: string, templateVariables: object): Promise<ImagenGenerationResponse<ImagenInlineImage>>;
|
|
3226
|
+
generateImages(templateId: string, templateVariables: object, singleRequestOptions?: SingleRequestOptions): Promise<ImagenGenerationResponse<ImagenInlineImage>>;
|
|
3182
3227
|
}
|
|
3183
3228
|
|
|
3184
3229
|
/**
|
|
@@ -3211,17 +3256,42 @@ export declare interface ThinkingConfig {
|
|
|
3211
3256
|
/**
|
|
3212
3257
|
* The thinking budget, in tokens.
|
|
3213
3258
|
*
|
|
3259
|
+
* @remarks
|
|
3214
3260
|
* This parameter sets an upper limit on the number of tokens the model can use for its internal
|
|
3215
3261
|
* "thinking" process. A higher budget may result in higher quality responses for complex tasks
|
|
3216
3262
|
* but can also increase latency and cost.
|
|
3217
3263
|
*
|
|
3218
|
-
*
|
|
3219
|
-
*
|
|
3264
|
+
* The range of supported thinking budget values depends on the model.
|
|
3265
|
+
*
|
|
3266
|
+
* <ul>
|
|
3267
|
+
* <li>To use the default thinking budget for a model, leave
|
|
3268
|
+
* this value undefined.</li>
|
|
3269
|
+
*
|
|
3270
|
+
* <li>To disable thinking, when supported by the model, set this value
|
|
3271
|
+
* to `0`.</li>
|
|
3272
|
+
*
|
|
3273
|
+
* <li>To use dynamic thinking, which allows the model to decide on the thinking
|
|
3274
|
+
* budget based on the task, set this value to `-1`.</li>
|
|
3275
|
+
* </ul>
|
|
3220
3276
|
*
|
|
3221
3277
|
* An error will be thrown if you set a thinking budget for a model that does not support this
|
|
3222
3278
|
* feature or if the specified budget is not within the model's supported range.
|
|
3279
|
+
*
|
|
3280
|
+
* The model will also error if `thinkingLevel` and `thinkingBudget` are
|
|
3281
|
+
* both set.
|
|
3223
3282
|
*/
|
|
3224
3283
|
thinkingBudget?: number;
|
|
3284
|
+
/**
|
|
3285
|
+
* If not specified, Gemini will use the model's default dynamic thinking level.
|
|
3286
|
+
*
|
|
3287
|
+
* @remarks
|
|
3288
|
+
* Note: The model will error if `thinkingLevel` and `thinkingBudget` are
|
|
3289
|
+
* both set.
|
|
3290
|
+
*
|
|
3291
|
+
* Important: Gemini 2.5 series models do not support thinking levels; use
|
|
3292
|
+
* `thinkingBudget` to set a thinking budget instead.
|
|
3293
|
+
*/
|
|
3294
|
+
thinkingLevel?: ThinkingLevel;
|
|
3225
3295
|
/**
|
|
3226
3296
|
* Whether to include "thought summaries" in the model's response.
|
|
3227
3297
|
*
|
|
@@ -3233,6 +3303,29 @@ export declare interface ThinkingConfig {
|
|
|
3233
3303
|
includeThoughts?: boolean;
|
|
3234
3304
|
}
|
|
3235
3305
|
|
|
3306
|
+
/**
|
|
3307
|
+
* A preset that controls the model's "thinking" process. Use
|
|
3308
|
+
* `ThinkingLevel.LOW` for faster responses on less complex tasks, and
|
|
3309
|
+
* `ThinkingLevel.HIGH` for better reasoning on more complex tasks.
|
|
3310
|
+
*
|
|
3311
|
+
* @public
|
|
3312
|
+
*/
|
|
3313
|
+
export declare const ThinkingLevel: {
|
|
3314
|
+
MINIMAL: string;
|
|
3315
|
+
LOW: string;
|
|
3316
|
+
MEDIUM: string;
|
|
3317
|
+
HIGH: string;
|
|
3318
|
+
};
|
|
3319
|
+
|
|
3320
|
+
/**
|
|
3321
|
+
* A preset that controls the model's "thinking" process. Use
|
|
3322
|
+
* `ThinkingLevel.LOW` for faster responses on less complex tasks, and
|
|
3323
|
+
* `ThinkingLevel.HIGH` for better reasoning on more complex tasks.
|
|
3324
|
+
*
|
|
3325
|
+
* @public
|
|
3326
|
+
*/
|
|
3327
|
+
export declare type ThinkingLevel = (typeof ThinkingLevel)[keyof typeof ThinkingLevel];
|
|
3328
|
+
|
|
3236
3329
|
/**
|
|
3237
3330
|
* Defines a tool that model can call to access external knowledge.
|
|
3238
3331
|
* @public
|