@langchain/google-common 0.2.18 → 1.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (169) hide show
  1. package/CHANGELOG.md +17 -0
  2. package/LICENSE +6 -6
  3. package/dist/_virtual/rolldown_runtime.cjs +25 -0
  4. package/dist/auth.cjs +82 -116
  5. package/dist/auth.cjs.map +1 -0
  6. package/dist/auth.d.cts +46 -0
  7. package/dist/auth.d.cts.map +1 -0
  8. package/dist/auth.d.ts +41 -36
  9. package/dist/auth.d.ts.map +1 -0
  10. package/dist/auth.js +80 -110
  11. package/dist/auth.js.map +1 -0
  12. package/dist/chat_models.cjs +251 -466
  13. package/dist/chat_models.cjs.map +1 -0
  14. package/dist/chat_models.d.cts +98 -0
  15. package/dist/chat_models.d.cts.map +1 -0
  16. package/dist/chat_models.d.ts +87 -73
  17. package/dist/chat_models.d.ts.map +1 -0
  18. package/dist/chat_models.js +245 -457
  19. package/dist/chat_models.js.map +1 -0
  20. package/dist/connection.cjs +321 -466
  21. package/dist/connection.cjs.map +1 -0
  22. package/dist/connection.d.cts +109 -0
  23. package/dist/connection.d.cts.map +1 -0
  24. package/dist/connection.d.ts +98 -91
  25. package/dist/connection.d.ts.map +1 -0
  26. package/dist/connection.js +317 -459
  27. package/dist/connection.js.map +1 -0
  28. package/dist/embeddings.cjs +135 -186
  29. package/dist/embeddings.cjs.map +1 -0
  30. package/dist/embeddings.d.cts +44 -0
  31. package/dist/embeddings.d.cts.map +1 -0
  32. package/dist/embeddings.d.ts +38 -32
  33. package/dist/embeddings.d.ts.map +1 -0
  34. package/dist/embeddings.js +133 -181
  35. package/dist/embeddings.js.map +1 -0
  36. package/dist/experimental/media.cjs +380 -482
  37. package/dist/experimental/media.cjs.map +1 -0
  38. package/dist/experimental/media.d.cts +198 -0
  39. package/dist/experimental/media.d.cts.map +1 -0
  40. package/dist/experimental/media.d.ts +190 -202
  41. package/dist/experimental/media.d.ts.map +1 -0
  42. package/dist/experimental/media.js +369 -468
  43. package/dist/experimental/media.js.map +1 -0
  44. package/dist/experimental/utils/media_core.cjs +403 -517
  45. package/dist/experimental/utils/media_core.cjs.map +1 -0
  46. package/dist/experimental/utils/media_core.d.cts +215 -0
  47. package/dist/experimental/utils/media_core.d.cts.map +1 -0
  48. package/dist/experimental/utils/media_core.d.ts +171 -165
  49. package/dist/experimental/utils/media_core.d.ts.map +1 -0
  50. package/dist/experimental/utils/media_core.js +395 -506
  51. package/dist/experimental/utils/media_core.js.map +1 -0
  52. package/dist/index.cjs +58 -27
  53. package/dist/index.d.cts +13 -0
  54. package/dist/index.d.ts +13 -11
  55. package/dist/index.js +13 -11
  56. package/dist/llms.cjs +157 -244
  57. package/dist/llms.cjs.map +1 -0
  58. package/dist/llms.d.cts +72 -0
  59. package/dist/llms.d.cts.map +1 -0
  60. package/dist/llms.d.ts +64 -54
  61. package/dist/llms.d.ts.map +1 -0
  62. package/dist/llms.js +154 -238
  63. package/dist/llms.js.map +1 -0
  64. package/dist/output_parsers.cjs +148 -173
  65. package/dist/output_parsers.cjs.map +1 -0
  66. package/dist/output_parsers.d.cts +53 -0
  67. package/dist/output_parsers.d.cts.map +1 -0
  68. package/dist/output_parsers.d.ts +46 -42
  69. package/dist/output_parsers.d.ts.map +1 -0
  70. package/dist/output_parsers.js +146 -168
  71. package/dist/output_parsers.js.map +1 -0
  72. package/dist/types-anthropic.d.cts +229 -0
  73. package/dist/types-anthropic.d.cts.map +1 -0
  74. package/dist/types-anthropic.d.ts +221 -215
  75. package/dist/types-anthropic.d.ts.map +1 -0
  76. package/dist/types.cjs +51 -62
  77. package/dist/types.cjs.map +1 -0
  78. package/dist/types.d.cts +748 -0
  79. package/dist/types.d.cts.map +1 -0
  80. package/dist/types.d.ts +669 -656
  81. package/dist/types.d.ts.map +1 -0
  82. package/dist/types.js +46 -45
  83. package/dist/types.js.map +1 -0
  84. package/dist/utils/anthropic.cjs +598 -821
  85. package/dist/utils/anthropic.cjs.map +1 -0
  86. package/dist/utils/anthropic.js +597 -818
  87. package/dist/utils/anthropic.js.map +1 -0
  88. package/dist/utils/common.cjs +130 -211
  89. package/dist/utils/common.cjs.map +1 -0
  90. package/dist/utils/common.d.cts +13 -0
  91. package/dist/utils/common.d.cts.map +1 -0
  92. package/dist/utils/common.d.ts +12 -7
  93. package/dist/utils/common.d.ts.map +1 -0
  94. package/dist/utils/common.js +128 -207
  95. package/dist/utils/common.js.map +1 -0
  96. package/dist/utils/failed_handler.cjs +28 -30
  97. package/dist/utils/failed_handler.cjs.map +1 -0
  98. package/dist/utils/failed_handler.d.cts +9 -0
  99. package/dist/utils/failed_handler.d.cts.map +1 -0
  100. package/dist/utils/failed_handler.d.ts +8 -2
  101. package/dist/utils/failed_handler.d.ts.map +1 -0
  102. package/dist/utils/failed_handler.js +28 -28
  103. package/dist/utils/failed_handler.js.map +1 -0
  104. package/dist/utils/gemini.cjs +1020 -1488
  105. package/dist/utils/gemini.cjs.map +1 -0
  106. package/dist/utils/gemini.d.cts +51 -0
  107. package/dist/utils/gemini.d.cts.map +1 -0
  108. package/dist/utils/gemini.d.ts +51 -48
  109. package/dist/utils/gemini.d.ts.map +1 -0
  110. package/dist/utils/gemini.js +1015 -1479
  111. package/dist/utils/gemini.js.map +1 -0
  112. package/dist/utils/index.cjs +38 -23
  113. package/dist/utils/index.d.cts +8 -0
  114. package/dist/utils/index.d.ts +8 -7
  115. package/dist/utils/index.js +8 -7
  116. package/dist/utils/palm.d.cts +11 -0
  117. package/dist/utils/palm.d.cts.map +1 -0
  118. package/dist/utils/palm.d.ts +9 -4
  119. package/dist/utils/palm.d.ts.map +1 -0
  120. package/dist/utils/safety.cjs +13 -22
  121. package/dist/utils/safety.cjs.map +1 -0
  122. package/dist/utils/safety.d.cts +12 -0
  123. package/dist/utils/safety.d.cts.map +1 -0
  124. package/dist/utils/safety.d.ts +10 -4
  125. package/dist/utils/safety.d.ts.map +1 -0
  126. package/dist/utils/safety.js +13 -19
  127. package/dist/utils/safety.js.map +1 -0
  128. package/dist/utils/stream.cjs +296 -475
  129. package/dist/utils/stream.cjs.map +1 -0
  130. package/dist/utils/stream.d.cts +165 -0
  131. package/dist/utils/stream.d.cts.map +1 -0
  132. package/dist/utils/stream.d.ts +156 -131
  133. package/dist/utils/stream.d.ts.map +1 -0
  134. package/dist/utils/stream.js +293 -469
  135. package/dist/utils/stream.js.map +1 -0
  136. package/dist/utils/zod_to_gemini_parameters.cjs +43 -81
  137. package/dist/utils/zod_to_gemini_parameters.cjs.map +1 -0
  138. package/dist/utils/zod_to_gemini_parameters.d.cts +22 -0
  139. package/dist/utils/zod_to_gemini_parameters.d.cts.map +1 -0
  140. package/dist/utils/zod_to_gemini_parameters.d.ts +21 -6
  141. package/dist/utils/zod_to_gemini_parameters.d.ts.map +1 -0
  142. package/dist/utils/zod_to_gemini_parameters.js +40 -76
  143. package/dist/utils/zod_to_gemini_parameters.js.map +1 -0
  144. package/package.json +69 -85
  145. package/dist/types-anthropic.cjs +0 -2
  146. package/dist/types-anthropic.js +0 -1
  147. package/dist/utils/anthropic.d.ts +0 -4
  148. package/dist/utils/palm.cjs +0 -2
  149. package/dist/utils/palm.js +0 -1
  150. package/experimental/media.cjs +0 -1
  151. package/experimental/media.d.cts +0 -1
  152. package/experimental/media.d.ts +0 -1
  153. package/experimental/media.js +0 -1
  154. package/experimental/utils/media_core.cjs +0 -1
  155. package/experimental/utils/media_core.d.cts +0 -1
  156. package/experimental/utils/media_core.d.ts +0 -1
  157. package/experimental/utils/media_core.js +0 -1
  158. package/index.cjs +0 -1
  159. package/index.d.cts +0 -1
  160. package/index.d.ts +0 -1
  161. package/index.js +0 -1
  162. package/types.cjs +0 -1
  163. package/types.d.cts +0 -1
  164. package/types.d.ts +0 -1
  165. package/types.js +0 -1
  166. package/utils.cjs +0 -1
  167. package/utils.d.cts +0 -1
  168. package/utils.d.ts +0 -1
  169. package/utils.js +0 -1
@@ -0,0 +1,748 @@
1
+ import { MediaManager } from "./experimental/utils/media_core.cjs";
2
+ import { JsonStream } from "./utils/stream.cjs";
3
+ import { AnthropicAPIConfig, AnthropicCacheControl, AnthropicContent, AnthropicContentRedactedThinking, AnthropicContentText, AnthropicContentThinking, AnthropicContentToolUse, AnthropicMessage, AnthropicMessageContent, AnthropicMessageContentDocument, AnthropicMessageContentImage, AnthropicMessageContentRedactedThinking, AnthropicMessageContentText, AnthropicMessageContentThinking, AnthropicMessageContentToolResult, AnthropicMessageContentToolResultContent, AnthropicMessageContentToolUse, AnthropicMessageContentToolUseInput, AnthropicMetadata, AnthropicRequest, AnthropicRequestSettings, AnthropicResponseData, AnthropicResponseMessage, AnthropicStreamBaseDelta, AnthropicStreamBaseEvent, AnthropicStreamContentBlockDeltaEvent, AnthropicStreamContentBlockStartEvent, AnthropicStreamContentBlockStopEvent, AnthropicStreamDelta, AnthropicStreamDeltaType, AnthropicStreamErrorEvent, AnthropicStreamEventType, AnthropicStreamInputJsonDelta, AnthropicStreamMessageDeltaEvent, AnthropicStreamMessageStartEvent, AnthropicStreamMessageStopEvent, AnthropicStreamPingEvent, AnthropicStreamTextDelta, AnthropicThinking, AnthropicThinkingDisabled, AnthropicThinkingEnabled, AnthropicTool, AnthropicToolChoice, AnthropicToolChoiceAny, AnthropicToolChoiceAuto, AnthropicToolChoiceTool, AnthropicToolInputSchema, AnthropicUsage } from "./types-anthropic.cjs";
4
+ import { AsyncCallerCallOptions } from "@langchain/core/utils/async_caller";
5
+ import { BaseLLMParams } from "@langchain/core/language_models/llms";
6
+ import { BaseChatModelCallOptions, BindToolsInput } from "@langchain/core/language_models/chat_models";
7
+ import { BaseMessage, BaseMessageChunk, MessageContent } from "@langchain/core/messages";
8
+ import { ChatGenerationChunk, ChatResult } from "@langchain/core/outputs";
9
+ import { EmbeddingsParams } from "@langchain/core/embeddings";
10
+
11
+ //#region src/types.d.ts
12
+
13
+ /**
14
+ * Parameters needed to setup the client connection.
15
+ * AuthOptions are something like GoogleAuthOptions (from google-auth-library)
16
+ * or WebGoogleAuthOptions.
17
+ */
18
+ interface GoogleClientParams<AuthOptions> {
19
+ authOptions?: AuthOptions;
20
+ /** Some APIs allow an API key instead */
21
+ apiKey?: string;
22
+ }
23
+ /**
24
+ * What platform is this running on?
25
+ * gai - Google AI Studio / MakerSuite / Generative AI platform
26
+ * gcp - Google Cloud Platform
27
+ */
28
+ type GooglePlatformType = "gai" | "gcp";
29
+ interface GoogleConnectionParams<AuthOptions> extends GoogleClientParams<AuthOptions> {
30
+ /** Hostname for the API call (if this is running on GCP) */
31
+ endpoint?: string;
32
+ /** Region where the LLM is stored (if this is running on GCP) */
33
+ location?: string;
34
+ /** The version of the API functions. Part of the path. */
35
+ apiVersion?: string;
36
+ /**
37
+ * What platform to run the service on.
38
+ * If not specified, the class should determine this from other
39
+ * means. Either way, the platform actually used will be in
40
+ * the "platform" getter.
41
+ */
42
+ platformType?: GooglePlatformType;
43
+ /**
44
+ * For compatibility with Google's libraries, should this use Vertex?
45
+ * The "platformType" parmeter takes precedence.
46
+ */
47
+ vertexai?: boolean;
48
+ }
49
+ declare const GoogleAISafetyCategory: {
50
+ readonly Harassment: "HARM_CATEGORY_HARASSMENT";
51
+ readonly HARASSMENT: "HARM_CATEGORY_HARASSMENT";
52
+ readonly HARM_CATEGORY_HARASSMENT: "HARM_CATEGORY_HARASSMENT";
53
+ readonly HateSpeech: "HARM_CATEGORY_HATE_SPEECH";
54
+ readonly HATE_SPEECH: "HARM_CATEGORY_HATE_SPEECH";
55
+ readonly HARM_CATEGORY_HATE_SPEECH: "HARM_CATEGORY_HATE_SPEECH";
56
+ readonly SexuallyExplicit: "HARM_CATEGORY_SEXUALLY_EXPLICIT";
57
+ readonly SEXUALLY_EXPLICIT: "HARM_CATEGORY_SEXUALLY_EXPLICIT";
58
+ readonly HARM_CATEGORY_SEXUALLY_EXPLICIT: "HARM_CATEGORY_SEXUALLY_EXPLICIT";
59
+ readonly Dangerous: "HARM_CATEGORY_DANGEROUS";
60
+ readonly DANGEROUS: "HARM_CATEGORY_DANGEROUS";
61
+ readonly HARM_CATEGORY_DANGEROUS: "HARM_CATEGORY_DANGEROUS";
62
+ readonly CivicIntegrity: "HARM_CATEGORY_CIVIC_INTEGRITY";
63
+ readonly CIVIC_INTEGRITY: "HARM_CATEGORY_CIVIC_INTEGRITY";
64
+ readonly HARM_CATEGORY_CIVIC_INTEGRITY: "HARM_CATEGORY_CIVIC_INTEGRITY";
65
+ };
66
+ type GoogleAISafetyCategory = (typeof GoogleAISafetyCategory)[keyof typeof GoogleAISafetyCategory];
67
+ declare const GoogleAISafetyThreshold: {
68
+ readonly None: "BLOCK_NONE";
69
+ readonly NONE: "BLOCK_NONE";
70
+ readonly BLOCK_NONE: "BLOCK_NONE";
71
+ readonly Few: "BLOCK_ONLY_HIGH";
72
+ readonly FEW: "BLOCK_ONLY_HIGH";
73
+ readonly BLOCK_ONLY_HIGH: "BLOCK_ONLY_HIGH";
74
+ readonly Some: "BLOCK_MEDIUM_AND_ABOVE";
75
+ readonly SOME: "BLOCK_MEDIUM_AND_ABOVE";
76
+ readonly BLOCK_MEDIUM_AND_ABOVE: "BLOCK_MEDIUM_AND_ABOVE";
77
+ readonly Most: "BLOCK_LOW_AND_ABOVE";
78
+ readonly MOST: "BLOCK_LOW_AND_ABOVE";
79
+ readonly BLOCK_LOW_AND_ABOVE: "BLOCK_LOW_AND_ABOVE";
80
+ readonly Off: "OFF";
81
+ readonly OFF: "OFF";
82
+ readonly BLOCK_OFF: "OFF";
83
+ };
84
+ type GoogleAISafetyThreshold = (typeof GoogleAISafetyThreshold)[keyof typeof GoogleAISafetyThreshold];
85
+ declare const GoogleAISafetyMethod: {
86
+ readonly Severity: "SEVERITY";
87
+ readonly Probability: "PROBABILITY";
88
+ };
89
+ type GoogleAISafetyMethod = (typeof GoogleAISafetyMethod)[keyof typeof GoogleAISafetyMethod];
90
+ interface GoogleAISafetySetting {
91
+ category: GoogleAISafetyCategory | string;
92
+ threshold: GoogleAISafetyThreshold | string;
93
+ method?: GoogleAISafetyMethod | string; // Just for Vertex AI?
94
+ }
95
+ type GoogleAIResponseMimeType = "text/plain" | "application/json";
96
+ type GoogleAIModelModality = "TEXT" | "IMAGE" | "AUDIO" | string;
97
+ interface GoogleThinkingConfig {
98
+ thinkingBudget?: number;
99
+ includeThoughts?: boolean;
100
+ }
101
+ type GooglePrebuiltVoiceName = string;
102
+ interface GooglePrebuiltVoiceConfig {
103
+ voiceName: GooglePrebuiltVoiceName;
104
+ }
105
+ interface GoogleVoiceConfig {
106
+ prebuiltVoiceConfig: GooglePrebuiltVoiceConfig;
107
+ }
108
+ interface GoogleSpeakerVoiceConfig {
109
+ speaker: string;
110
+ voiceConfig: GoogleVoiceConfig;
111
+ }
112
+ interface GoogleMultiSpeakerVoiceConfig {
113
+ speakerVoiceConfigs: GoogleSpeakerVoiceConfig[];
114
+ }
115
+ interface GoogleSpeechConfigSingle {
116
+ voiceConfig: GoogleVoiceConfig;
117
+ languageCode?: string;
118
+ }
119
+ interface GoogleSpeechConfigMulti {
120
+ multiSpeakerVoiceConfig: GoogleMultiSpeakerVoiceConfig;
121
+ languageCode?: string;
122
+ }
123
+ type GoogleSpeechConfig = GoogleSpeechConfigSingle | GoogleSpeechConfigMulti;
124
+ /**
125
+ * A simplified version of the GoogleSpeakerVoiceConfig
126
+ */
127
+ interface GoogleSpeechSpeakerName {
128
+ speaker: string;
129
+ name: GooglePrebuiltVoiceName;
130
+ }
131
+ type GoogleSpeechVoice = GooglePrebuiltVoiceName | GoogleSpeechSpeakerName | GoogleSpeechSpeakerName[];
132
+ interface GoogleSpeechVoiceLanguage {
133
+ voice: GoogleSpeechVoice;
134
+ languageCode: string;
135
+ }
136
+ interface GoogleSpeechVoicesLanguage {
137
+ voices: GoogleSpeechVoice;
138
+ languageCode: string;
139
+ }
140
+ /**
141
+ * A simplified way to represent the voice (or voices) and language code.
142
+ * "voice" and "voices" are semantically the same, we're not enforcing
143
+ * that one is an array and one isn't.
144
+ */
145
+ type GoogleSpeechSimplifiedLanguage = GoogleSpeechVoiceLanguage | GoogleSpeechVoicesLanguage;
146
+ /**
147
+ * A simplified way to represent the voices.
148
+ * It can either be the voice (or voices), or the voice or voices with language configuration
149
+ */
150
+ type GoogleSpeechConfigSimplified = GoogleSpeechVoice | GoogleSpeechSimplifiedLanguage;
151
+ interface GoogleModelParams {
152
+ /** Model to use */
153
+ model?: string;
154
+ /**
155
+ * Model to use
156
+ * Alias for `model`
157
+ */
158
+ modelName?: string;
159
+ }
160
+ interface GoogleAIModelParams extends GoogleModelParams {
161
+ /** Sampling temperature to use */
162
+ temperature?: number;
163
+ /**
164
+ * Maximum number of tokens to generate in the completion.
165
+ * This may include reasoning tokens (for backwards compatibility).
166
+ */
167
+ maxOutputTokens?: number;
168
+ /**
169
+ * The maximum number of the output tokens that will be used
170
+ * for the "thinking" or "reasoning" stages.
171
+ */
172
+ maxReasoningTokens?: number;
173
+ /**
174
+ * An alias for "maxReasoningTokens"
175
+ */
176
+ thinkingBudget?: number;
177
+ /**
178
+ * An OpenAI compatible parameter that will map to "maxReasoningTokens"
179
+ */
180
+ reasoningEffort?: "low" | "medium" | "high";
181
+ /**
182
+ * Top-p changes how the model selects tokens for output.
183
+ *
184
+ * Tokens are selected from most probable to least until the sum
185
+ * of their probabilities equals the top-p value.
186
+ *
187
+ * For example, if tokens A, B, and C have a probability of
188
+ * .3, .2, and .1 and the top-p value is .5, then the model will
189
+ * select either A or B as the next token (using temperature).
190
+ */
191
+ topP?: number;
192
+ /**
193
+ * Top-k changes how the model selects tokens for output.
194
+ *
195
+ * A top-k of 1 means the selected token is the most probable among
196
+ * all tokens in the model’s vocabulary (also called greedy decoding),
197
+ * while a top-k of 3 means that the next token is selected from
198
+ * among the 3 most probable tokens (using temperature).
199
+ */
200
+ topK?: number;
201
+ /**
202
+ * Seed used in decoding. If not set, the request uses a randomly generated seed.
203
+ */
204
+ seed?: number;
205
+ /**
206
+ * Presence penalty applied to the next token's logprobs
207
+ * if the token has already been seen in the response.
208
+ * This penalty is binary on/off and not dependant on the
209
+ * number of times the token is used (after the first).
210
+ * Use frequencyPenalty for a penalty that increases with each use.
211
+ * A positive penalty will discourage the use of tokens that have
212
+ * already been used in the response, increasing the vocabulary.
213
+ * A negative penalty will encourage the use of tokens that have
214
+ * already been used in the response, decreasing the vocabulary.
215
+ */
216
+ presencePenalty?: number;
217
+ /**
218
+ * Frequency penalty applied to the next token's logprobs,
219
+ * multiplied by the number of times each token has been seen
220
+ * in the respponse so far.
221
+ * A positive penalty will discourage the use of tokens that
222
+ * have already been used, proportional to the number of times
223
+ * the token has been used:
224
+ * The more a token is used, the more dificult it is for the model
225
+ * to use that token again increasing the vocabulary of responses.
226
+ * Caution: A _negative_ penalty will encourage the model to reuse
227
+ * tokens proportional to the number of times the token has been used.
228
+ * Small negative values will reduce the vocabulary of a response.
229
+ * Larger negative values will cause the model to start repeating
230
+ * a common token until it hits the maxOutputTokens limit.
231
+ */
232
+ frequencyPenalty?: number;
233
+ stopSequences?: string[];
234
+ safetySettings?: GoogleAISafetySetting[];
235
+ convertSystemMessageToHumanContent?: boolean;
236
+ /**
237
+ * Available for `gemini-1.5-pro`.
238
+ * The output format of the generated candidate text.
239
+ * Supported MIME types:
240
+ * - `text/plain`: Text output.
241
+ * - `application/json`: JSON response in the candidates.
242
+ *
243
+ * @default "text/plain"
244
+ */
245
+ responseMimeType?: GoogleAIResponseMimeType;
246
+ /**
247
+ * Whether or not to stream.
248
+ * @default false
249
+ */
250
+ streaming?: boolean;
251
+ /**
252
+ * Whether to return log probabilities of the output tokens or not.
253
+ * If true, returns the log probabilities of each output token
254
+ * returned in the content of message.
255
+ */
256
+ logprobs?: boolean;
257
+ /**
258
+ * An integer between 0 and 5 specifying the number of
259
+ * most likely tokens to return at each token position,
260
+ * each with an associated log probability.
261
+ * logprobs must be set to true if this parameter is used.
262
+ */
263
+ topLogprobs?: number;
264
+ /**
265
+ * The modalities of the response.
266
+ */
267
+ responseModalities?: GoogleAIModelModality[];
268
+ /**
269
+ * Custom metadata labels to associate with the request.
270
+ * Only supported on Vertex AI (Google Cloud Platform).
271
+ * Labels are key-value pairs where both keys and values must be strings.
272
+ *
273
+ * Example:
274
+ * ```typescript
275
+ * {
276
+ * labels: {
277
+ * "team": "research",
278
+ * "component": "frontend",
279
+ * "environment": "production"
280
+ * }
281
+ * }
282
+ * ```
283
+ */
284
+ labels?: Record<string, string>;
285
+ /**
286
+ * Speech generation configuration.
287
+ * You can use either Google's definition of the speech configuration,
288
+ * or a simplified version we've defined (which can be as simple
289
+ * as the name of a pre-defined voice).
290
+ */
291
+ speechConfig?: GoogleSpeechConfig | GoogleSpeechConfigSimplified;
292
+ }
293
+ type GoogleAIToolType = BindToolsInput | GeminiTool;
294
+ /**
295
+ * The params which can be passed to the API at request time.
296
+ */
297
+ interface GoogleAIModelRequestParams extends GoogleAIModelParams {
298
+ tools?: GoogleAIToolType[];
299
+ /**
300
+ * Force the model to use tools in a specific way.
301
+ *
302
+ * | Mode | Description |
303
+ * |----------|---------------------------------------------------------------------------------------------------------------------------------------------------------|
304
+ * | "auto" | The default model behavior. The model decides whether to predict a function call or a natural language response. |
305
+ * | "any" | The model must predict only function calls. To limit the model to a subset of functions, define the allowed function names in `allowed_function_names`. |
306
+ * | "none" | The model must not predict function calls. This behavior is equivalent to a model request without any associated function declarations. |
307
+ * | string | The string value must be one of the function names. This will force the model to predict the specified function call. |
308
+ *
309
+ * The tool configuration's "any" mode ("forced function calling") is supported for Gemini 1.5 Pro models only.
310
+ */
311
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
312
+ tool_choice?: string | "auto" | "any" | "none" | Record<string, any>;
313
+ /**
314
+ * Allowed functions to call when the mode is "any".
315
+ * If empty, any one of the provided functions are called.
316
+ */
317
+ allowed_function_names?: string[];
318
+ /**
319
+ * Used to specify a previously created context cache to use with generation.
320
+ * For Vertex, this should be of the form:
321
+ * "projects/PROJECT_NUMBER/locations/LOCATION/cachedContents/CACHE_ID",
322
+ *
323
+ * See these guides for more information on how to use context caching:
324
+ * https://cloud.google.com/vertex-ai/generative-ai/docs/context-cache/context-cache-create
325
+ * https://cloud.google.com/vertex-ai/generative-ai/docs/context-cache/context-cache-use
326
+ */
327
+ cachedContent?: string;
328
+ }
329
+ interface GoogleAIBaseLLMInput<AuthOptions> extends BaseLLMParams, GoogleConnectionParams<AuthOptions>, GoogleAIModelParams, GoogleAISafetyParams, GoogleAIAPIParams {}
330
+ interface GoogleAIBaseLanguageModelCallOptions extends BaseChatModelCallOptions, GoogleAIModelRequestParams, GoogleAISafetyParams {
331
+ /**
332
+ * Whether or not to include usage data, like token counts
333
+ * in the streamed response chunks.
334
+ * @default true
335
+ */
336
+ streamUsage?: boolean;
337
+ }
338
+ /**
339
+ * Input to LLM class.
340
+ */
341
+ interface GoogleBaseLLMInput<AuthOptions> extends GoogleAIBaseLLMInput<AuthOptions> {}
342
+ interface GoogleResponse {
343
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
344
+ data: any;
345
+ }
346
+ interface GoogleRawResponse extends GoogleResponse {
347
+ data: Blob;
348
+ }
349
+ interface GeminiPartBase {
350
+ thought?: boolean; // Output only
351
+ thoughtSignature?: string;
352
+ }
353
+ interface GeminiVideoMetadata {
354
+ fps?: number; // Double in range (0.0, 24.0]
355
+ startOffset?: string;
356
+ endOffset?: string;
357
+ }
358
+ interface GeminiPartBaseFile extends GeminiPartBase {
359
+ videoMetadata?: GeminiVideoMetadata;
360
+ }
361
+ interface GeminiPartText extends GeminiPartBase {
362
+ text: string;
363
+ }
364
+ interface GeminiPartInlineData extends GeminiPartBaseFile {
365
+ inlineData: {
366
+ mimeType: string;
367
+ data: string;
368
+ };
369
+ }
370
+ interface GeminiPartFileData extends GeminiPartBaseFile {
371
+ fileData: {
372
+ mimeType: string;
373
+ fileUri: string;
374
+ };
375
+ }
376
+ // AI Studio only?
377
+ interface GeminiPartFunctionCall extends GeminiPartBase {
378
+ functionCall: {
379
+ name: string;
380
+ args?: object;
381
+ };
382
+ }
383
+ // AI Studio Only?
384
+ interface GeminiPartFunctionResponse extends GeminiPartBase {
385
+ functionResponse: {
386
+ name: string;
387
+ response: object;
388
+ };
389
+ }
390
+ type GeminiPart = GeminiPartText | GeminiPartInlineData | GeminiPartFileData | GeminiPartFunctionCall | GeminiPartFunctionResponse;
391
+ interface GeminiSafetySetting {
392
+ category: string;
393
+ threshold: string;
394
+ }
395
+ type GeminiSafetyRating = {
396
+ category: string;
397
+ probability: string;
398
+ } & Record<string, unknown>;
399
+ interface GeminiCitationMetadata {
400
+ citations: GeminiCitation[];
401
+ }
402
+ interface GeminiCitation {
403
+ startIndex: number;
404
+ endIndex: number;
405
+ uri: string;
406
+ title: string;
407
+ license: string;
408
+ publicationDate: GoogleTypeDate;
409
+ }
410
+ interface GoogleTypeDate {
411
+ year: number; // 1-9999 or 0 to specify a date without a year
412
+ month: number; // 1-12 or 0 to specify a year without a month and day
413
+ day: number; // Must be from 1 to 31 and valid for the year and month, or 0 to specify a year by itself or a year and month where the day isn't significant
414
+ }
415
+ interface GeminiGroundingMetadata {
416
+ webSearchQueries?: string[];
417
+ searchEntryPoint?: GeminiSearchEntryPoint;
418
+ groundingChunks: GeminiGroundingChunk[];
419
+ groundingSupports?: GeminiGroundingSupport[];
420
+ retrievalMetadata?: GeminiRetrievalMetadata;
421
+ }
422
+ interface GeminiSearchEntryPoint {
423
+ renderedContent?: string;
424
+ sdkBlob?: string; // Base64 encoded JSON representing array of tuple.
425
+ }
426
+ interface GeminiGroundingChunk {
427
+ web: GeminiGroundingChunkWeb;
428
+ retrievedContext: GeminiGroundingChunkRetrievedContext;
429
+ }
430
+ interface GeminiGroundingChunkWeb {
431
+ uri: string;
432
+ title: string;
433
+ }
434
+ interface GeminiGroundingChunkRetrievedContext {
435
+ uri: string;
436
+ title: string;
437
+ text: string;
438
+ }
439
+ interface GeminiGroundingSupport {
440
+ segment: GeminiSegment;
441
+ groundingChunkIndices: number[];
442
+ confidenceScores: number[];
443
+ }
444
+ interface GeminiSegment {
445
+ partIndex: number;
446
+ startIndex: number;
447
+ endIndex: number;
448
+ text: string;
449
+ }
450
+ interface GeminiRetrievalMetadata {
451
+ googleSearchDynamicRetrievalScore: number;
452
+ }
453
+ type GeminiUrlRetrievalStatus = "URL_RETRIEVAL_STATUS_SUCCESS" | "URL_RETRIEVAL_STATUS_ERROR";
454
+ interface GeminiUrlRetrievalContext {
455
+ retrievedUrl: string;
456
+ urlRetrievalStatus: GeminiUrlRetrievalStatus;
457
+ }
458
+ interface GeminiUrlRetrievalMetadata {
459
+ urlRetrievalContexts: GeminiUrlRetrievalContext[];
460
+ }
461
+ type GeminiUrlMetadata = GeminiUrlRetrievalContext;
462
+ interface GeminiUrlContextMetadata {
463
+ urlMetadata: GeminiUrlMetadata[];
464
+ }
465
+ interface GeminiLogprobsResult {
466
+ topCandidates: GeminiLogprobsTopCandidate[];
467
+ chosenCandidates: GeminiLogprobsResultCandidate[];
468
+ }
469
+ interface GeminiLogprobsTopCandidate {
470
+ candidates: GeminiLogprobsResultCandidate[];
471
+ }
472
+ interface GeminiLogprobsResultCandidate {
473
+ token: string;
474
+ tokenId: number;
475
+ logProbability: number;
476
+ }
477
+ // The "system" content appears to only be valid in the systemInstruction
478
+ type GeminiRole = "system" | "user" | "model" | "function";
479
+ interface GeminiContent {
480
+ parts: GeminiPart[];
481
+ role: GeminiRole; // Vertex AI requires the role
482
+ }
483
+ /*
484
+ * If additional attributes are added here, they should also be
485
+ * added to the attributes below
486
+ */
487
+ interface GeminiTool {
488
+ functionDeclarations?: GeminiFunctionDeclaration[];
489
+ googleSearchRetrieval?: GoogleSearchRetrieval; // Gemini-1.5
490
+ googleSearch?: GoogleSearch; // Gemini-2.0
491
+ urlContext?: UrlContext;
492
+ retrieval?: VertexAIRetrieval;
493
+ }
494
+ /*
495
+ * The known strings in this type should match those in GeminiSearchToolAttribuets
496
+ */
497
+ type GoogleSearchToolSetting = boolean | "googleSearchRetrieval" | "googleSearch" | string;
498
+ declare const GeminiSearchToolAttributes: string[];
499
+ declare const GeminiToolAttributes: string[];
500
+ interface GoogleSearchRetrieval {
501
+ dynamicRetrievalConfig?: {
502
+ mode?: string;
503
+ dynamicThreshold?: number;
504
+ };
505
+ }
506
+ interface GoogleSearch {}
507
+ interface UrlContext {}
508
+ interface VertexAIRetrieval {
509
+ vertexAiSearch: {
510
+ datastore: string;
511
+ };
512
+ disableAttribution?: boolean;
513
+ }
514
+ interface GeminiFunctionDeclaration {
515
+ name: string;
516
+ description: string;
517
+ parameters?: GeminiFunctionSchema;
518
+ }
519
+ interface GeminiFunctionSchema {
520
+ type: GeminiFunctionSchemaType;
521
+ format?: string;
522
+ description?: string;
523
+ nullable?: boolean;
524
+ enum?: string[];
525
+ properties?: Record<string, GeminiFunctionSchema>;
526
+ required?: string[];
527
+ items?: GeminiFunctionSchema;
528
+ }
529
+ type GeminiFunctionSchemaType = "string" | "number" | "integer" | "boolean" | "array" | "object";
530
+ interface GeminiGenerationConfig {
531
+ stopSequences?: string[];
532
+ candidateCount?: number;
533
+ maxOutputTokens?: number;
534
+ temperature?: number;
535
+ topP?: number;
536
+ topK?: number;
537
+ seed?: number;
538
+ presencePenalty?: number;
539
+ frequencyPenalty?: number;
540
+ responseMimeType?: GoogleAIResponseMimeType;
541
+ responseLogprobs?: boolean;
542
+ logprobs?: number;
543
+ responseModalities?: GoogleAIModelModality[];
544
+ thinkingConfig?: GoogleThinkingConfig;
545
+ speechConfig?: GoogleSpeechConfig;
546
+ }
547
+ interface GeminiRequest {
548
+ contents?: GeminiContent[];
549
+ systemInstruction?: GeminiContent;
550
+ tools?: GeminiTool[];
551
+ toolConfig?: {
552
+ functionCallingConfig: {
553
+ mode: "auto" | "any" | "none";
554
+ allowedFunctionNames?: string[];
555
+ };
556
+ };
557
+ safetySettings?: GeminiSafetySetting[];
558
+ generationConfig?: GeminiGenerationConfig;
559
+ cachedContent?: string;
560
+ /**
561
+ * Custom metadata labels to associate with the API call.
562
+ */
563
+ labels?: Record<string, string>;
564
+ }
565
+ interface GeminiResponseCandidate {
566
+ content: {
567
+ parts: GeminiPart[];
568
+ role: string;
569
+ };
570
+ finishReason: string;
571
+ index: number;
572
+ tokenCount?: number;
573
+ safetyRatings: GeminiSafetyRating[];
574
+ citationMetadata?: GeminiCitationMetadata;
575
+ groundingMetadata?: GeminiGroundingMetadata;
576
+ urlRetrievalMetadata?: GeminiUrlRetrievalMetadata;
577
+ urlContextMetadata?: GeminiUrlContextMetadata;
578
+ avgLogprobs?: number;
579
+ logprobsResult: GeminiLogprobsResult;
580
+ finishMessage?: string;
581
+ }
582
+ interface GeminiResponsePromptFeedback {
583
+ blockReason?: string;
584
+ safetyRatings: GeminiSafetyRating[];
585
+ }
586
+ type ModalityEnum = "TEXT" | "IMAGE" | "VIDEO" | "AUDIO" | "DOCUMENT" | string;
587
+ interface ModalityTokenCount {
588
+ modality: ModalityEnum;
589
+ tokenCount: number;
590
+ }
591
+ interface GenerateContentResponseUsageMetadata {
592
+ promptTokenCount: number;
593
+ toolUsePromptTokenCount: number;
594
+ cachedContentTokenCount: number;
595
+ thoughtsTokenCount: number;
596
+ candidatesTokenCount: number;
597
+ totalTokenCount: number;
598
+ promptTokensDetails: ModalityTokenCount[];
599
+ toolUsePromptTokensDetails: ModalityTokenCount[];
600
+ cacheTokensDetails: ModalityTokenCount[];
601
+ candidatesTokensDetails: ModalityTokenCount[];
602
+ [key: string]: unknown;
603
+ }
604
+ interface GenerateContentResponseData {
605
+ candidates: GeminiResponseCandidate[];
606
+ promptFeedback: GeminiResponsePromptFeedback;
607
+ usageMetadata: GenerateContentResponseUsageMetadata;
608
+ }
609
+ type GoogleLLMModelFamily = null | "palm" | "gemini" | "gemma";
610
+ type VertexModelFamily = GoogleLLMModelFamily | "claude";
611
+ type GoogleLLMResponseData = JsonStream | GenerateContentResponseData | GenerateContentResponseData[];
612
+ interface GoogleLLMResponse extends GoogleResponse {
613
+ data: GoogleLLMResponseData | AnthropicResponseData;
614
+ }
615
+ interface GoogleAISafetyHandler {
616
+ /**
617
+ * A function that will take a response and return the, possibly modified,
618
+ * response or throw an exception if there are safety issues.
619
+ *
620
+ * @throws GoogleAISafetyError
621
+ */
622
+ handle(response: GoogleLLMResponse): GoogleLLMResponse;
623
+ }
624
+ interface GoogleAISafetyParams {
625
+ safetyHandler?: GoogleAISafetyHandler;
626
+ }
627
+ type GeminiJsonSchema = Record<string, unknown> & {
628
+ properties?: Record<string, GeminiJsonSchema>;
629
+ type: GeminiFunctionSchemaType;
630
+ nullable?: boolean;
631
+ };
632
+ interface GeminiJsonSchemaDirty extends GeminiJsonSchema {
633
+ items?: GeminiJsonSchemaDirty;
634
+ properties?: Record<string, GeminiJsonSchemaDirty>;
635
+ additionalProperties?: boolean;
636
+ }
637
+ type GoogleAIAPI = {
638
+ messageContentToParts?: (content: MessageContent) => Promise<GeminiPart[]>;
639
+ baseMessageToContent?: (message: BaseMessage, prevMessage: BaseMessage | undefined, useSystemInstruction: boolean) => Promise<GeminiContent[]>;
640
+ responseToString: (response: GoogleLLMResponse) => string;
641
+ responseToChatGeneration: (response: GoogleLLMResponse) => ChatGenerationChunk | null;
642
+ chunkToString: (chunk: BaseMessageChunk) => string;
643
+ responseToBaseMessage: (response: GoogleLLMResponse) => BaseMessage;
644
+ responseToChatResult: (response: GoogleLLMResponse) => ChatResult;
645
+ formatData: (input: unknown, parameters: GoogleAIModelRequestParams) => Promise<unknown>;
646
+ };
647
+ interface GeminiAPIConfig {
648
+ safetyHandler?: GoogleAISafetyHandler;
649
+ mediaManager?: MediaManager;
650
+ useSystemInstruction?: boolean;
651
+ /**
652
+ * How to handle the Google Search tool, since the name (and format)
653
+ * of the tool changes between Gemini 1.5 and Gemini 2.0.
654
+ * true - Change based on the model version. (Default)
655
+ * false - Do not change the tool name provided
656
+ * string value - Use this as the attribute name for the search
657
+ * tool, adapting any tool attributes if possible.
658
+ * When the model is created, a "true" or default setting
659
+ * will be changed to a string based on the model.
660
+ */
661
+ googleSearchToolAdjustment?: GoogleSearchToolSetting;
662
+ }
663
+ type GoogleAIAPIConfig = GeminiAPIConfig | AnthropicAPIConfig;
664
+ interface GoogleAIAPIParams {
665
+ apiName?: string;
666
+ apiConfig?: GoogleAIAPIConfig;
667
+ }
668
+ // Embeddings
669
+ /**
670
+ * Defines the parameters required to initialize a
671
+ * GoogleEmbeddings instance. It extends EmbeddingsParams and
672
+ * GoogleConnectionParams.
673
+ */
674
+ interface BaseGoogleEmbeddingsParams<AuthOptions> extends EmbeddingsParams, GoogleConnectionParams<AuthOptions> {
675
+ model: string;
676
+ /**
677
+ * Used to specify output embedding size.
678
+ * If set, output embeddings will be truncated to the size specified.
679
+ */
680
+ dimensions?: number;
681
+ /**
682
+ * An alias for "dimensions"
683
+ */
684
+ outputDimensionality?: number;
685
+ }
686
+ /**
687
+ * Defines additional options specific to the
688
+ * GoogleEmbeddingsInstance. It extends AsyncCallerCallOptions.
689
+ */
690
+ interface BaseGoogleEmbeddingsOptions extends AsyncCallerCallOptions {}
691
+ type GoogleEmbeddingsTaskType = "RETRIEVAL_QUERY" | "RETRIEVAL_DOCUMENT" | "SEMANTIC_SIMILARITY" | "CLASSIFICATION" | "CLUSTERING" | "QUESTION_ANSWERING" | "FACT_VERIFICATION" | "CODE_RETRIEVAL_QUERY" | string;
692
+ /**
693
+ * Represents an instance for generating embeddings using the Google
694
+ * Vertex AI API. It contains the content to be embedded.
695
+ */
696
+ interface VertexEmbeddingsInstance {
697
+ content: string;
698
+ taskType?: GoogleEmbeddingsTaskType;
699
+ title?: string;
700
+ }
701
+ interface VertexEmbeddingsParameters extends GoogleModelParams {
702
+ autoTruncate?: boolean;
703
+ outputDimensionality?: number;
704
+ }
705
+ interface VertexEmbeddingsRequest {
706
+ instances: VertexEmbeddingsInstance[];
707
+ parameters?: VertexEmbeddingsParameters;
708
+ }
709
+ interface AIStudioEmbeddingsRequest {
710
+ content: {
711
+ parts: GeminiPartText[];
712
+ };
713
+ model?: string; // Documentation says required, but tests say otherwise
714
+ taskType?: GoogleEmbeddingsTaskType;
715
+ title?: string;
716
+ outputDimensionality?: number;
717
+ }
718
+ type GoogleEmbeddingsRequest = VertexEmbeddingsRequest | AIStudioEmbeddingsRequest;
719
+ interface VertexEmbeddingsResponsePrediction {
720
+ embeddings: {
721
+ statistics: {
722
+ token_count: number;
723
+ truncated: boolean;
724
+ };
725
+ values: number[];
726
+ };
727
+ }
728
+ /**
729
+ * Defines the structure of the embeddings results returned by the Google
730
+ * Vertex AI API. It extends GoogleBasePrediction and contains the
731
+ * embeddings and their statistics.
732
+ */
733
+ interface VertexEmbeddingsResponse extends GoogleResponse {
734
+ data: {
735
+ predictions: VertexEmbeddingsResponsePrediction[];
736
+ };
737
+ }
738
+ interface AIStudioEmbeddingsResponse extends GoogleResponse {
739
+ data: {
740
+ embedding: {
741
+ values: number[];
742
+ };
743
+ };
744
+ }
745
+ type GoogleEmbeddingsResponse = VertexEmbeddingsResponse | AIStudioEmbeddingsResponse;
746
+ //#endregion
747
+ export { AIStudioEmbeddingsRequest, AIStudioEmbeddingsResponse, AnthropicAPIConfig, AnthropicCacheControl, AnthropicContent, AnthropicContentRedactedThinking, AnthropicContentText, AnthropicContentThinking, AnthropicContentToolUse, AnthropicMessage, AnthropicMessageContent, AnthropicMessageContentDocument, AnthropicMessageContentImage, AnthropicMessageContentRedactedThinking, AnthropicMessageContentText, AnthropicMessageContentThinking, AnthropicMessageContentToolResult, AnthropicMessageContentToolResultContent, AnthropicMessageContentToolUse, AnthropicMessageContentToolUseInput, AnthropicMetadata, AnthropicRequest, AnthropicRequestSettings, AnthropicResponseData, AnthropicResponseMessage, AnthropicStreamBaseDelta, AnthropicStreamBaseEvent, AnthropicStreamContentBlockDeltaEvent, AnthropicStreamContentBlockStartEvent, AnthropicStreamContentBlockStopEvent, AnthropicStreamDelta, AnthropicStreamDeltaType, AnthropicStreamErrorEvent, AnthropicStreamEventType, AnthropicStreamInputJsonDelta, AnthropicStreamMessageDeltaEvent, AnthropicStreamMessageStartEvent, AnthropicStreamMessageStopEvent, AnthropicStreamPingEvent, AnthropicStreamTextDelta, AnthropicThinking, AnthropicThinkingDisabled, AnthropicThinkingEnabled, AnthropicTool, AnthropicToolChoice, AnthropicToolChoiceAny, AnthropicToolChoiceAuto, AnthropicToolChoiceTool, AnthropicToolInputSchema, AnthropicUsage, BaseGoogleEmbeddingsOptions, BaseGoogleEmbeddingsParams, GeminiAPIConfig, GeminiCitation, GeminiCitationMetadata, GeminiContent, GeminiFunctionDeclaration, GeminiFunctionSchema, GeminiFunctionSchemaType, GeminiGenerationConfig, GeminiGroundingChunk, GeminiGroundingChunkRetrievedContext, GeminiGroundingChunkWeb, GeminiGroundingMetadata, GeminiGroundingSupport, GeminiJsonSchema, GeminiJsonSchemaDirty, GeminiLogprobsResult, GeminiLogprobsResultCandidate, GeminiLogprobsTopCandidate, GeminiPart, GeminiPartBase, GeminiPartBaseFile, GeminiPartFileData, GeminiPartFunctionCall, GeminiPartFunctionResponse, GeminiPartInlineData, GeminiPartText, GeminiRequest, GeminiResponseCandidate, GeminiRetrievalMetadata, GeminiRole, GeminiSafetyRating, GeminiSafetySetting, GeminiSearchEntryPoint, GeminiSearchToolAttributes, GeminiSegment, GeminiTool, GeminiToolAttributes, GeminiUrlContextMetadata, GeminiUrlMetadata, GeminiUrlRetrievalContext, GeminiUrlRetrievalMetadata, GeminiUrlRetrievalStatus, GeminiVideoMetadata, GenerateContentResponseData, GenerateContentResponseUsageMetadata, GoogleAIAPI, GoogleAIAPIConfig, GoogleAIAPIParams, GoogleAIBaseLLMInput, GoogleAIBaseLanguageModelCallOptions, GoogleAIModelModality, GoogleAIModelParams, GoogleAIModelRequestParams, GoogleAIResponseMimeType, GoogleAISafetyCategory, GoogleAISafetyHandler, GoogleAISafetyMethod, GoogleAISafetyParams, GoogleAISafetySetting, GoogleAISafetyThreshold, GoogleAIToolType, GoogleBaseLLMInput, GoogleClientParams, GoogleConnectionParams, GoogleEmbeddingsRequest, GoogleEmbeddingsResponse, GoogleEmbeddingsTaskType, GoogleLLMModelFamily, GoogleLLMResponse, GoogleLLMResponseData, GoogleModelParams, GoogleMultiSpeakerVoiceConfig, GooglePlatformType, GooglePrebuiltVoiceConfig, GooglePrebuiltVoiceName, GoogleRawResponse, GoogleResponse, GoogleSearch, GoogleSearchRetrieval, GoogleSearchToolSetting, GoogleSpeakerVoiceConfig, GoogleSpeechConfig, GoogleSpeechConfigMulti, GoogleSpeechConfigSimplified, GoogleSpeechConfigSingle, GoogleSpeechSimplifiedLanguage, GoogleSpeechSpeakerName, GoogleSpeechVoice, GoogleSpeechVoiceLanguage, GoogleSpeechVoicesLanguage, GoogleThinkingConfig, GoogleTypeDate, GoogleVoiceConfig, ModalityEnum, ModalityTokenCount, UrlContext, VertexAIRetrieval, VertexEmbeddingsInstance, VertexEmbeddingsParameters, VertexEmbeddingsRequest, VertexEmbeddingsResponse, VertexEmbeddingsResponsePrediction, VertexModelFamily };
748
+ //# sourceMappingURL=types.d.cts.map