@firebase/ai 2.1.0-canary.5501791d0 → 2.1.0-canary.9b63cd60e

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -4,10 +4,17 @@
4
4
  * @packageDocumentation
5
5
  */
6
6
 
7
+ import { AppCheckInternalComponentName } from '@firebase/app-check-interop-types';
7
8
  import { AppCheckTokenResult } from '@firebase/app-check-interop-types';
9
+ import { ComponentContainer } from '@firebase/component';
8
10
  import { FirebaseApp } from '@firebase/app';
11
+ import { FirebaseAppCheckInternal } from '@firebase/app-check-interop-types';
12
+ import { FirebaseAuthInternal } from '@firebase/auth-interop-types';
13
+ import { FirebaseAuthInternalName } from '@firebase/auth-interop-types';
9
14
  import { FirebaseAuthTokenData } from '@firebase/auth-interop-types';
10
15
  import { FirebaseError } from '@firebase/util';
16
+ import { InstanceFactoryOptions } from '@firebase/component';
17
+ import { Provider } from '@firebase/component';
11
18
 
12
19
  /**
13
20
  * An instance of the Firebase AI SDK.
@@ -27,6 +34,10 @@ export declare interface AI {
27
34
  * Vertex AI Gemini API (using {@link VertexAIBackend}).
28
35
  */
29
36
  backend: Backend;
37
+ /**
38
+ * Options applied to this {@link AI} instance.
39
+ */
40
+ options?: AIOptions;
30
41
  /**
31
42
  * @deprecated use `AI.backend.location` instead.
32
43
  *
@@ -125,8 +136,26 @@ export declare abstract class AIModel {
125
136
  export declare interface AIOptions {
126
137
  /**
127
138
  * The backend configuration to use for the AI service instance.
139
+ * Defaults to the Gemini Developer API backend ({@link GoogleAIBackend}).
140
+ */
141
+ backend?: Backend;
142
+ /**
143
+ * Whether to use App Check limited use tokens. Defaults to false.
128
144
  */
145
+ useLimitedUseAppCheckTokens?: boolean;
146
+ }
147
+
148
+ declare class AIService implements AI, _FirebaseService {
149
+ app: FirebaseApp;
129
150
  backend: Backend;
151
+ auth: FirebaseAuthInternal | null;
152
+ appCheck: FirebaseAppCheckInternal | null;
153
+ _options?: Omit<AIOptions, 'backend'>;
154
+ location: string;
155
+ constructor(app: FirebaseApp, backend: Backend, authProvider?: Provider<FirebaseAuthInternalName>, appCheckProvider?: Provider<AppCheckInternalComponentName>);
156
+ _delete(): Promise<void>;
157
+ set options(optionsToSet: AIOptions);
158
+ get options(): AIOptions | undefined;
130
159
  }
131
160
 
132
161
  /**
@@ -459,15 +488,34 @@ export declare interface EnhancedGenerateContentResponse extends GenerateContent
459
488
  */
460
489
  text: () => string;
461
490
  /**
462
- * Aggregates and returns all {@link InlineDataPart}s from the {@link GenerateContentResponse}'s
463
- * first candidate.
464
- *
465
- * @returns An array of {@link InlineDataPart}s containing data from the response, if available.
491
+ * Aggregates and returns every {@link InlineDataPart} from the first candidate of
492
+ * {@link GenerateContentResponse}.
466
493
  *
467
494
  * @throws If the prompt or candidate was blocked.
468
495
  */
469
496
  inlineDataParts: () => InlineDataPart[] | undefined;
497
+ /**
498
+ * Aggregates and returns every {@link FunctionCall} from the first candidate of
499
+ * {@link GenerateContentResponse}.
500
+ *
501
+ * @throws If the prompt or candidate was blocked.
502
+ */
470
503
  functionCalls: () => FunctionCall[] | undefined;
504
+ /**
505
+ * Aggregates and returns every {@link TextPart} with their `thought` property set
506
+ * to `true` from the first candidate of {@link GenerateContentResponse}.
507
+ *
508
+ * @throws If the prompt or candidate was blocked.
509
+ *
510
+ * @remarks
511
+ * Thought summaries provide a brief overview of the model's internal thinking process,
512
+ * offering insight into how it arrived at the final answer. This can be useful for
513
+ * debugging, understanding the model's reasoning, and verifying its accuracy.
514
+ *
515
+ * Thoughts will only be included if {@link ThinkingConfig.includeThoughts} is
516
+ * set to `true`.
517
+ */
518
+ thoughtSummary: () => string | undefined;
471
519
  }
472
520
 
473
521
  /**
@@ -487,6 +535,8 @@ export declare interface ErrorDetails {
487
535
  [key: string]: unknown;
488
536
  }
489
537
 
538
+ export declare function factory(container: ComponentContainer, { instanceIdentifier }: InstanceFactoryOptions): AIService;
539
+
490
540
  /**
491
541
  * Data pointing to a file uploaded on Google Cloud Storage.
492
542
  * @public
@@ -506,6 +556,8 @@ export declare interface FileDataPart {
506
556
  functionCall?: never;
507
557
  functionResponse?: never;
508
558
  fileData: FileData;
559
+ thought?: boolean;
560
+ /* Excluded from this release type: thoughtSignature */
509
561
  }
510
562
 
511
563
  /**
@@ -557,6 +609,8 @@ export declare const FinishReason: {
557
609
  */
558
610
  export declare type FinishReason = (typeof FinishReason)[keyof typeof FinishReason];
559
611
 
612
+ /* Excluded from this release type: _FirebaseService */
613
+
560
614
  /**
561
615
  * A predicted {@link FunctionCall} returned from the model
562
616
  * that contains a string representing the {@link FunctionDeclaration.name}
@@ -613,6 +667,8 @@ export declare interface FunctionCallPart {
613
667
  inlineData?: never;
614
668
  functionCall: FunctionCall;
615
669
  functionResponse?: never;
670
+ thought?: boolean;
671
+ /* Excluded from this release type: thoughtSignature */
616
672
  }
617
673
 
618
674
  /**
@@ -687,6 +743,8 @@ export declare interface FunctionResponsePart {
687
743
  inlineData?: never;
688
744
  functionCall?: never;
689
745
  functionResponse: FunctionResponse;
746
+ thought?: boolean;
747
+ /* Excluded from this release type: thoughtSignature */
690
748
  }
691
749
 
692
750
  /**
@@ -1665,6 +1723,8 @@ export declare interface InlineDataPart {
1665
1723
  * Applicable if `inlineData` is a video.
1666
1724
  */
1667
1725
  videoMetadata?: VideoMetadata;
1726
+ thought?: boolean;
1727
+ /* Excluded from this release type: thoughtSignature */
1668
1728
  }
1669
1729
 
1670
1730
  /**
@@ -2279,6 +2339,8 @@ export declare interface TextPart {
2279
2339
  inlineData?: never;
2280
2340
  functionCall?: never;
2281
2341
  functionResponse?: never;
2342
+ thought?: boolean;
2343
+ /* Excluded from this release type: thoughtSignature */
2282
2344
  }
2283
2345
 
2284
2346
  /**
@@ -2304,6 +2366,15 @@ export declare interface ThinkingConfig {
2304
2366
  * feature or if the specified budget is not within the model's supported range.
2305
2367
  */
2306
2368
  thinkingBudget?: number;
2369
+ /**
2370
+ * Whether to include "thought summaries" in the model's response.
2371
+ *
2372
+ * @remarks
2373
+ * Thought summaries provide a brief overview of the model's internal thinking process,
2374
+ * offering insight into how it arrived at the final answer. This can be useful for
2375
+ * debugging, understanding the model's reasoning, and verifying its accuracy.
2376
+ */
2377
+ includeThoughts?: boolean;
2307
2378
  }
2308
2379
 
2309
2380
  /**
package/dist/ai.d.ts CHANGED
@@ -4,10 +4,18 @@
4
4
  * @packageDocumentation
5
5
  */
6
6
 
7
+ import { AppCheckInternalComponentName } from '@firebase/app-check-interop-types';
7
8
  import { AppCheckTokenResult } from '@firebase/app-check-interop-types';
9
+ import { ComponentContainer } from '@firebase/component';
8
10
  import { FirebaseApp } from '@firebase/app';
11
+ import { FirebaseAppCheckInternal } from '@firebase/app-check-interop-types';
12
+ import { FirebaseAuthInternal } from '@firebase/auth-interop-types';
13
+ import { FirebaseAuthInternalName } from '@firebase/auth-interop-types';
9
14
  import { FirebaseAuthTokenData } from '@firebase/auth-interop-types';
10
15
  import { FirebaseError } from '@firebase/util';
16
+ import { _FirebaseService } from '@firebase/app';
17
+ import { InstanceFactoryOptions } from '@firebase/component';
18
+ import { Provider } from '@firebase/component';
11
19
 
12
20
  /**
13
21
  * An instance of the Firebase AI SDK.
@@ -27,6 +35,10 @@ export declare interface AI {
27
35
  * Vertex AI Gemini API (using {@link VertexAIBackend}).
28
36
  */
29
37
  backend: Backend;
38
+ /**
39
+ * Options applied to this {@link AI} instance.
40
+ */
41
+ options?: AIOptions;
30
42
  /**
31
43
  * @deprecated use `AI.backend.location` instead.
32
44
  *
@@ -111,7 +123,7 @@ export declare abstract class AIModel {
111
123
  /**
112
124
  * @internal
113
125
  */
114
- protected _apiSettings: ApiSettings;
126
+ _apiSettings: ApiSettings;
115
127
  /**
116
128
  * Constructs a new instance of the {@link AIModel} class.
117
129
  *
@@ -159,8 +171,26 @@ export declare abstract class AIModel {
159
171
  export declare interface AIOptions {
160
172
  /**
161
173
  * The backend configuration to use for the AI service instance.
174
+ * Defaults to the Gemini Developer API backend ({@link GoogleAIBackend}).
162
175
  */
176
+ backend?: Backend;
177
+ /**
178
+ * Whether to use App Check limited use tokens. Defaults to false.
179
+ */
180
+ useLimitedUseAppCheckTokens?: boolean;
181
+ }
182
+
183
+ declare class AIService implements AI, _FirebaseService {
184
+ app: FirebaseApp;
163
185
  backend: Backend;
186
+ auth: FirebaseAuthInternal | null;
187
+ appCheck: FirebaseAppCheckInternal | null;
188
+ _options?: Omit<AIOptions, 'backend'>;
189
+ location: string;
190
+ constructor(app: FirebaseApp, backend: Backend, authProvider?: Provider<FirebaseAuthInternalName>, appCheckProvider?: Provider<AppCheckInternalComponentName>);
191
+ _delete(): Promise<void>;
192
+ set options(optionsToSet: AIOptions);
193
+ get options(): AIOptions | undefined;
164
194
  }
165
195
 
166
196
  /**
@@ -502,15 +532,34 @@ export declare interface EnhancedGenerateContentResponse extends GenerateContent
502
532
  */
503
533
  text: () => string;
504
534
  /**
505
- * Aggregates and returns all {@link InlineDataPart}s from the {@link GenerateContentResponse}'s
506
- * first candidate.
507
- *
508
- * @returns An array of {@link InlineDataPart}s containing data from the response, if available.
535
+ * Aggregates and returns every {@link InlineDataPart} from the first candidate of
536
+ * {@link GenerateContentResponse}.
509
537
  *
510
538
  * @throws If the prompt or candidate was blocked.
511
539
  */
512
540
  inlineDataParts: () => InlineDataPart[] | undefined;
541
+ /**
542
+ * Aggregates and returns every {@link FunctionCall} from the first candidate of
543
+ * {@link GenerateContentResponse}.
544
+ *
545
+ * @throws If the prompt or candidate was blocked.
546
+ */
513
547
  functionCalls: () => FunctionCall[] | undefined;
548
+ /**
549
+ * Aggregates and returns every {@link TextPart} with their `thought` property set
550
+ * to `true` from the first candidate of {@link GenerateContentResponse}.
551
+ *
552
+ * @throws If the prompt or candidate was blocked.
553
+ *
554
+ * @remarks
555
+ * Thought summaries provide a brief overview of the model's internal thinking process,
556
+ * offering insight into how it arrived at the final answer. This can be useful for
557
+ * debugging, understanding the model's reasoning, and verifying its accuracy.
558
+ *
559
+ * Thoughts will only be included if {@link ThinkingConfig.includeThoughts} is
560
+ * set to `true`.
561
+ */
562
+ thoughtSummary: () => string | undefined;
514
563
  }
515
564
 
516
565
  /**
@@ -530,6 +579,8 @@ export declare interface ErrorDetails {
530
579
  [key: string]: unknown;
531
580
  }
532
581
 
582
+ export declare function factory(container: ComponentContainer, { instanceIdentifier }: InstanceFactoryOptions): AIService;
583
+
533
584
  /**
534
585
  * Data pointing to a file uploaded on Google Cloud Storage.
535
586
  * @public
@@ -549,6 +600,11 @@ export declare interface FileDataPart {
549
600
  functionCall?: never;
550
601
  functionResponse?: never;
551
602
  fileData: FileData;
603
+ thought?: boolean;
604
+ /**
605
+ * @internal
606
+ */
607
+ thoughtSignature?: never;
552
608
  }
553
609
 
554
610
  /**
@@ -656,6 +712,11 @@ export declare interface FunctionCallPart {
656
712
  inlineData?: never;
657
713
  functionCall: FunctionCall;
658
714
  functionResponse?: never;
715
+ thought?: boolean;
716
+ /**
717
+ * @internal
718
+ */
719
+ thoughtSignature?: never;
659
720
  }
660
721
 
661
722
  /**
@@ -730,6 +791,11 @@ export declare interface FunctionResponsePart {
730
791
  inlineData?: never;
731
792
  functionCall?: never;
732
793
  functionResponse: FunctionResponse;
794
+ thought?: boolean;
795
+ /**
796
+ * @internal
797
+ */
798
+ thoughtSignature?: never;
733
799
  }
734
800
 
735
801
  /**
@@ -1761,6 +1827,11 @@ export declare interface InlineDataPart {
1761
1827
  * Applicable if `inlineData` is a video.
1762
1828
  */
1763
1829
  videoMetadata?: VideoMetadata;
1830
+ thought?: boolean;
1831
+ /**
1832
+ * @internal
1833
+ */
1834
+ thoughtSignature?: never;
1764
1835
  }
1765
1836
 
1766
1837
  /**
@@ -2386,6 +2457,11 @@ export declare interface TextPart {
2386
2457
  inlineData?: never;
2387
2458
  functionCall?: never;
2388
2459
  functionResponse?: never;
2460
+ thought?: boolean;
2461
+ /**
2462
+ * @internal
2463
+ */
2464
+ thoughtSignature?: string;
2389
2465
  }
2390
2466
 
2391
2467
  /**
@@ -2411,6 +2487,15 @@ export declare interface ThinkingConfig {
2411
2487
  * feature or if the specified budget is not within the model's supported range.
2412
2488
  */
2413
2489
  thinkingBudget?: number;
2490
+ /**
2491
+ * Whether to include "thought summaries" in the model's response.
2492
+ *
2493
+ * @remarks
2494
+ * Thought summaries provide a brief overview of the model's internal thinking process,
2495
+ * offering insight into how it arrived at the final answer. This can be useful for
2496
+ * debugging, understanding the model's reasoning, and verifying its accuracy.
2497
+ */
2498
+ includeThoughts?: boolean;
2414
2499
  }
2415
2500
 
2416
2501
  /**
@@ -4,7 +4,7 @@ import { FirebaseError, getModularInstance } from '@firebase/util';
4
4
  import { Logger } from '@firebase/logger';
5
5
 
6
6
  var name = "@firebase/ai";
7
- var version = "2.1.0-canary.5501791d0";
7
+ var version = "2.1.0-canary.9b63cd60e";
8
8
 
9
9
  /**
10
10
  * @license
@@ -653,6 +653,12 @@ class AIService {
653
653
  _delete() {
654
654
  return Promise.resolve();
655
655
  }
656
+ set options(optionsToSet) {
657
+ this._options = optionsToSet;
658
+ }
659
+ get options() {
660
+ return this._options;
661
+ }
656
662
  }
657
663
 
658
664
  /**
@@ -837,7 +843,12 @@ class AIModel {
837
843
  };
838
844
  }
839
845
  else if (ai.appCheck) {
840
- this._apiSettings.getAppCheckToken = () => ai.appCheck.getToken();
846
+ if (ai.options?.useLimitedUseAppCheckTokens) {
847
+ this._apiSettings.getAppCheckToken = () => ai.appCheck.getLimitedUseToken();
848
+ }
849
+ else {
850
+ this._apiSettings.getAppCheckToken = () => ai.appCheck.getToken();
851
+ }
841
852
  }
842
853
  if (ai.auth) {
843
854
  this._apiSettings.getAuthToken = () => ai.auth.getToken();
@@ -1100,6 +1111,28 @@ async function makeRequest(model, task, apiSettings, stream, body, requestOption
1100
1111
  * See the License for the specific language governing permissions and
1101
1112
  * limitations under the License.
1102
1113
  */
1114
+ /**
1115
+ * Check that at least one candidate exists and does not have a bad
1116
+ * finish reason. Warns if multiple candidates exist.
1117
+ */
1118
+ function hasValidCandidates(response) {
1119
+ if (response.candidates && response.candidates.length > 0) {
1120
+ if (response.candidates.length > 1) {
1121
+ logger.warn(`This response had ${response.candidates.length} ` +
1122
+ `candidates. Returning text from the first candidate only. ` +
1123
+ `Access response.candidates directly to use the other candidates.`);
1124
+ }
1125
+ if (hadBadFinishReason(response.candidates[0])) {
1126
+ throw new AIError(AIErrorCode.RESPONSE_ERROR, `Response error: ${formatBlockErrorMessage(response)}. Response body stored in error.response`, {
1127
+ response
1128
+ });
1129
+ }
1130
+ return true;
1131
+ }
1132
+ else {
1133
+ return false;
1134
+ }
1135
+ }
1103
1136
  /**
1104
1137
  * Creates an EnhancedGenerateContentResponse object that has helper functions and
1105
1138
  * other modifications that improve usability.
@@ -1123,18 +1156,8 @@ function createEnhancedContentResponse(response) {
1123
1156
  */
1124
1157
  function addHelpers(response) {
1125
1158
  response.text = () => {
1126
- if (response.candidates && response.candidates.length > 0) {
1127
- if (response.candidates.length > 1) {
1128
- logger.warn(`This response had ${response.candidates.length} ` +
1129
- `candidates. Returning text from the first candidate only. ` +
1130
- `Access response.candidates directly to use the other candidates.`);
1131
- }
1132
- if (hadBadFinishReason(response.candidates[0])) {
1133
- throw new AIError(AIErrorCode.RESPONSE_ERROR, `Response error: ${formatBlockErrorMessage(response)}. Response body stored in error.response`, {
1134
- response
1135
- });
1136
- }
1137
- return getText(response);
1159
+ if (hasValidCandidates(response)) {
1160
+ return getText(response, part => !part.thought);
1138
1161
  }
1139
1162
  else if (response.promptFeedback) {
1140
1163
  throw new AIError(AIErrorCode.RESPONSE_ERROR, `Text not available. ${formatBlockErrorMessage(response)}`, {
@@ -1143,18 +1166,20 @@ function addHelpers(response) {
1143
1166
  }
1144
1167
  return '';
1145
1168
  };
1169
+ response.thoughtSummary = () => {
1170
+ if (hasValidCandidates(response)) {
1171
+ const result = getText(response, part => !!part.thought);
1172
+ return result === '' ? undefined : result;
1173
+ }
1174
+ else if (response.promptFeedback) {
1175
+ throw new AIError(AIErrorCode.RESPONSE_ERROR, `Thought summary not available. ${formatBlockErrorMessage(response)}`, {
1176
+ response
1177
+ });
1178
+ }
1179
+ return undefined;
1180
+ };
1146
1181
  response.inlineDataParts = () => {
1147
- if (response.candidates && response.candidates.length > 0) {
1148
- if (response.candidates.length > 1) {
1149
- logger.warn(`This response had ${response.candidates.length} ` +
1150
- `candidates. Returning data from the first candidate only. ` +
1151
- `Access response.candidates directly to use the other candidates.`);
1152
- }
1153
- if (hadBadFinishReason(response.candidates[0])) {
1154
- throw new AIError(AIErrorCode.RESPONSE_ERROR, `Response error: ${formatBlockErrorMessage(response)}. Response body stored in error.response`, {
1155
- response
1156
- });
1157
- }
1182
+ if (hasValidCandidates(response)) {
1158
1183
  return getInlineDataParts(response);
1159
1184
  }
1160
1185
  else if (response.promptFeedback) {
@@ -1165,17 +1190,7 @@ function addHelpers(response) {
1165
1190
  return undefined;
1166
1191
  };
1167
1192
  response.functionCalls = () => {
1168
- if (response.candidates && response.candidates.length > 0) {
1169
- if (response.candidates.length > 1) {
1170
- logger.warn(`This response had ${response.candidates.length} ` +
1171
- `candidates. Returning function calls from the first candidate only. ` +
1172
- `Access response.candidates directly to use the other candidates.`);
1173
- }
1174
- if (hadBadFinishReason(response.candidates[0])) {
1175
- throw new AIError(AIErrorCode.RESPONSE_ERROR, `Response error: ${formatBlockErrorMessage(response)}. Response body stored in error.response`, {
1176
- response
1177
- });
1178
- }
1193
+ if (hasValidCandidates(response)) {
1179
1194
  return getFunctionCalls(response);
1180
1195
  }
1181
1196
  else if (response.promptFeedback) {
@@ -1188,13 +1203,17 @@ function addHelpers(response) {
1188
1203
  return response;
1189
1204
  }
1190
1205
  /**
1191
- * Returns all text found in all parts of first candidate.
1206
+ * Returns all text from the first candidate's parts, filtering by whether
1207
+ * `partFilter()` returns true.
1208
+ *
1209
+ * @param response - The `GenerateContentResponse` from which to extract text.
1210
+ * @param partFilter - Only return `Part`s for which this returns true
1192
1211
  */
1193
- function getText(response) {
1212
+ function getText(response, partFilter) {
1194
1213
  const textStrings = [];
1195
1214
  if (response.candidates?.[0].content?.parts) {
1196
1215
  for (const part of response.candidates?.[0].content?.parts) {
1197
- if (part.text) {
1216
+ if (part.text && partFilter(part)) {
1198
1217
  textStrings.push(part.text);
1199
1218
  }
1200
1219
  }
@@ -1207,7 +1226,7 @@ function getText(response) {
1207
1226
  }
1208
1227
  }
1209
1228
  /**
1210
- * Returns {@link FunctionCall}s associated with first candidate.
1229
+ * Returns every {@link FunctionCall} associated with first candidate.
1211
1230
  */
1212
1231
  function getFunctionCalls(response) {
1213
1232
  const functionCalls = [];
@@ -1226,7 +1245,7 @@ function getFunctionCalls(response) {
1226
1245
  }
1227
1246
  }
1228
1247
  /**
1229
- * Returns {@link InlineDataPart}s in the first candidate if present.
1248
+ * Returns every {@link InlineDataPart} in the first candidate if present.
1230
1249
  *
1231
1250
  * @internal
1232
1251
  */
@@ -1874,12 +1893,14 @@ const VALID_PART_FIELDS = [
1874
1893
  'text',
1875
1894
  'inlineData',
1876
1895
  'functionCall',
1877
- 'functionResponse'
1896
+ 'functionResponse',
1897
+ 'thought',
1898
+ 'thoughtSignature'
1878
1899
  ];
1879
1900
  const VALID_PARTS_PER_ROLE = {
1880
1901
  user: ['text', 'inlineData'],
1881
1902
  function: ['functionResponse'],
1882
- model: ['text', 'functionCall'],
1903
+ model: ['text', 'functionCall', 'thought', 'thoughtSignature'],
1883
1904
  // System instructions shouldn't be in history anyway.
1884
1905
  system: ['text']
1885
1906
  };
@@ -1901,7 +1922,7 @@ function validateChatHistory(history) {
1901
1922
  throw new AIError(AIErrorCode.INVALID_CONTENT, `Each item should include role field. Got ${role} but valid roles are: ${JSON.stringify(POSSIBLE_ROLES)}`);
1902
1923
  }
1903
1924
  if (!Array.isArray(parts)) {
1904
- throw new AIError(AIErrorCode.INVALID_CONTENT, `Content should have 'parts' but property with an array of Parts`);
1925
+ throw new AIError(AIErrorCode.INVALID_CONTENT, `Content should have 'parts' property with an array of Parts`);
1905
1926
  }
1906
1927
  if (parts.length === 0) {
1907
1928
  throw new AIError(AIErrorCode.INVALID_CONTENT, `Each Content should have at least one part`);
@@ -1910,7 +1931,9 @@ function validateChatHistory(history) {
1910
1931
  text: 0,
1911
1932
  inlineData: 0,
1912
1933
  functionCall: 0,
1913
- functionResponse: 0
1934
+ functionResponse: 0,
1935
+ thought: 0,
1936
+ thoughtSignature: 0
1914
1937
  };
1915
1938
  for (const part of parts) {
1916
1939
  for (const key of VALID_PART_FIELDS) {
@@ -2959,14 +2982,20 @@ class ImagenImageFormat {
2959
2982
  *
2960
2983
  * @public
2961
2984
  */
2962
- function getAI(app = getApp(), options = { backend: new GoogleAIBackend() }) {
2985
+ function getAI(app = getApp(), options) {
2963
2986
  app = getModularInstance(app);
2964
2987
  // Dependencies
2965
2988
  const AIProvider = _getProvider(app, AI_TYPE);
2966
- const identifier = encodeInstanceIdentifier(options.backend);
2967
- return AIProvider.getImmediate({
2989
+ const backend = options?.backend ?? new GoogleAIBackend();
2990
+ const finalOptions = {
2991
+ useLimitedUseAppCheckTokens: options?.useLimitedUseAppCheckTokens ?? false
2992
+ };
2993
+ const identifier = encodeInstanceIdentifier(backend);
2994
+ const aiInstance = AIProvider.getImmediate({
2968
2995
  identifier
2969
2996
  });
2997
+ aiInstance.options = finalOptions;
2998
+ return aiInstance;
2970
2999
  }
2971
3000
  /**
2972
3001
  * Returns a {@link GenerativeModel} class with methods for inference
@@ -3022,23 +3051,24 @@ function getImagenModel(ai, modelParams, requestOptions) {
3022
3051
  *
3023
3052
  * @packageDocumentation
3024
3053
  */
3054
+ function factory(container, { instanceIdentifier }) {
3055
+ if (!instanceIdentifier) {
3056
+ throw new AIError(AIErrorCode.ERROR, 'AIService instance identifier is undefined.');
3057
+ }
3058
+ const backend = decodeInstanceIdentifier(instanceIdentifier);
3059
+ // getImmediate for FirebaseApp will always succeed
3060
+ const app = container.getProvider('app').getImmediate();
3061
+ const auth = container.getProvider('auth-internal');
3062
+ const appCheckProvider = container.getProvider('app-check-internal');
3063
+ return new AIService(app, backend, auth, appCheckProvider);
3064
+ }
3025
3065
  function registerAI() {
3026
- _registerComponent(new Component(AI_TYPE, (container, { instanceIdentifier }) => {
3027
- if (!instanceIdentifier) {
3028
- throw new AIError(AIErrorCode.ERROR, 'AIService instance identifier is undefined.');
3029
- }
3030
- const backend = decodeInstanceIdentifier(instanceIdentifier);
3031
- // getImmediate for FirebaseApp will always succeed
3032
- const app = container.getProvider('app').getImmediate();
3033
- const auth = container.getProvider('auth-internal');
3034
- const appCheckProvider = container.getProvider('app-check-internal');
3035
- return new AIService(app, backend, auth, appCheckProvider);
3036
- }, "PUBLIC" /* ComponentType.PUBLIC */).setMultipleInstances(true));
3066
+ _registerComponent(new Component(AI_TYPE, factory, "PUBLIC" /* ComponentType.PUBLIC */).setMultipleInstances(true));
3037
3067
  registerVersion(name, version);
3038
3068
  // BUILD_TARGET will be replaced by values like esm, cjs, etc during the compilation
3039
3069
  registerVersion(name, version, 'esm2020');
3040
3070
  }
3041
3071
  registerAI();
3042
3072
 
3043
- export { AIError, AIErrorCode, AIModel, AnyOfSchema, ArraySchema, Backend, BackendType, BlockReason, BooleanSchema, ChatSession, FinishReason, FunctionCallingMode, GenerativeModel, GoogleAIBackend, HarmBlockMethod, HarmBlockThreshold, HarmCategory, HarmProbability, HarmSeverity, ImagenAspectRatio, ImagenImageFormat, ImagenModel, ImagenPersonFilterLevel, ImagenSafetyFilterLevel, InferenceMode, IntegerSchema, Modality, NumberSchema, ObjectSchema, POSSIBLE_ROLES, ResponseModality, Schema, SchemaType, StringSchema, VertexAIBackend, getAI, getGenerativeModel, getImagenModel };
3073
+ export { AIError, AIErrorCode, AIModel, AnyOfSchema, ArraySchema, Backend, BackendType, BlockReason, BooleanSchema, ChatSession, FinishReason, FunctionCallingMode, GenerativeModel, GoogleAIBackend, HarmBlockMethod, HarmBlockThreshold, HarmCategory, HarmProbability, HarmSeverity, ImagenAspectRatio, ImagenImageFormat, ImagenModel, ImagenPersonFilterLevel, ImagenSafetyFilterLevel, InferenceMode, IntegerSchema, Modality, NumberSchema, ObjectSchema, POSSIBLE_ROLES, ResponseModality, Schema, SchemaType, StringSchema, VertexAIBackend, factory, getAI, getGenerativeModel, getImagenModel };
3044
3074
  //# sourceMappingURL=index.esm.js.map