@langchain/google-common 0.2.18 → 1.0.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (173) hide show
  1. package/CHANGELOG.md +23 -0
  2. package/LICENSE +6 -6
  3. package/dist/_virtual/rolldown_runtime.cjs +25 -0
  4. package/dist/auth.cjs +82 -116
  5. package/dist/auth.cjs.map +1 -0
  6. package/dist/auth.d.cts +46 -0
  7. package/dist/auth.d.cts.map +1 -0
  8. package/dist/auth.d.ts +41 -36
  9. package/dist/auth.d.ts.map +1 -0
  10. package/dist/auth.js +80 -110
  11. package/dist/auth.js.map +1 -0
  12. package/dist/chat_models.cjs +264 -466
  13. package/dist/chat_models.cjs.map +1 -0
  14. package/dist/chat_models.d.cts +109 -0
  15. package/dist/chat_models.d.cts.map +1 -0
  16. package/dist/chat_models.d.ts +98 -73
  17. package/dist/chat_models.d.ts.map +1 -0
  18. package/dist/chat_models.js +258 -457
  19. package/dist/chat_models.js.map +1 -0
  20. package/dist/connection.cjs +321 -466
  21. package/dist/connection.cjs.map +1 -0
  22. package/dist/connection.d.cts +109 -0
  23. package/dist/connection.d.cts.map +1 -0
  24. package/dist/connection.d.ts +98 -91
  25. package/dist/connection.d.ts.map +1 -0
  26. package/dist/connection.js +317 -459
  27. package/dist/connection.js.map +1 -0
  28. package/dist/embeddings.cjs +135 -186
  29. package/dist/embeddings.cjs.map +1 -0
  30. package/dist/embeddings.d.cts +44 -0
  31. package/dist/embeddings.d.cts.map +1 -0
  32. package/dist/embeddings.d.ts +38 -32
  33. package/dist/embeddings.d.ts.map +1 -0
  34. package/dist/embeddings.js +133 -181
  35. package/dist/embeddings.js.map +1 -0
  36. package/dist/experimental/media.cjs +380 -482
  37. package/dist/experimental/media.cjs.map +1 -0
  38. package/dist/experimental/media.d.cts +198 -0
  39. package/dist/experimental/media.d.cts.map +1 -0
  40. package/dist/experimental/media.d.ts +190 -202
  41. package/dist/experimental/media.d.ts.map +1 -0
  42. package/dist/experimental/media.js +369 -468
  43. package/dist/experimental/media.js.map +1 -0
  44. package/dist/experimental/utils/media_core.cjs +403 -517
  45. package/dist/experimental/utils/media_core.cjs.map +1 -0
  46. package/dist/experimental/utils/media_core.d.cts +215 -0
  47. package/dist/experimental/utils/media_core.d.cts.map +1 -0
  48. package/dist/experimental/utils/media_core.d.ts +171 -165
  49. package/dist/experimental/utils/media_core.d.ts.map +1 -0
  50. package/dist/experimental/utils/media_core.js +395 -506
  51. package/dist/experimental/utils/media_core.js.map +1 -0
  52. package/dist/index.cjs +58 -27
  53. package/dist/index.d.cts +13 -0
  54. package/dist/index.d.ts +13 -11
  55. package/dist/index.js +13 -11
  56. package/dist/llms.cjs +157 -244
  57. package/dist/llms.cjs.map +1 -0
  58. package/dist/llms.d.cts +72 -0
  59. package/dist/llms.d.cts.map +1 -0
  60. package/dist/llms.d.ts +64 -54
  61. package/dist/llms.d.ts.map +1 -0
  62. package/dist/llms.js +154 -238
  63. package/dist/llms.js.map +1 -0
  64. package/dist/output_parsers.cjs +148 -173
  65. package/dist/output_parsers.cjs.map +1 -0
  66. package/dist/output_parsers.d.cts +53 -0
  67. package/dist/output_parsers.d.cts.map +1 -0
  68. package/dist/output_parsers.d.ts +46 -42
  69. package/dist/output_parsers.d.ts.map +1 -0
  70. package/dist/output_parsers.js +146 -168
  71. package/dist/output_parsers.js.map +1 -0
  72. package/dist/profiles.cjs +219 -0
  73. package/dist/profiles.cjs.map +1 -0
  74. package/dist/profiles.js +218 -0
  75. package/dist/profiles.js.map +1 -0
  76. package/dist/types-anthropic.d.cts +229 -0
  77. package/dist/types-anthropic.d.cts.map +1 -0
  78. package/dist/types-anthropic.d.ts +221 -215
  79. package/dist/types-anthropic.d.ts.map +1 -0
  80. package/dist/types.cjs +51 -62
  81. package/dist/types.cjs.map +1 -0
  82. package/dist/types.d.cts +748 -0
  83. package/dist/types.d.cts.map +1 -0
  84. package/dist/types.d.ts +669 -656
  85. package/dist/types.d.ts.map +1 -0
  86. package/dist/types.js +46 -45
  87. package/dist/types.js.map +1 -0
  88. package/dist/utils/anthropic.cjs +598 -821
  89. package/dist/utils/anthropic.cjs.map +1 -0
  90. package/dist/utils/anthropic.js +597 -818
  91. package/dist/utils/anthropic.js.map +1 -0
  92. package/dist/utils/common.cjs +130 -211
  93. package/dist/utils/common.cjs.map +1 -0
  94. package/dist/utils/common.d.cts +13 -0
  95. package/dist/utils/common.d.cts.map +1 -0
  96. package/dist/utils/common.d.ts +12 -7
  97. package/dist/utils/common.d.ts.map +1 -0
  98. package/dist/utils/common.js +128 -207
  99. package/dist/utils/common.js.map +1 -0
  100. package/dist/utils/failed_handler.cjs +28 -30
  101. package/dist/utils/failed_handler.cjs.map +1 -0
  102. package/dist/utils/failed_handler.d.cts +9 -0
  103. package/dist/utils/failed_handler.d.cts.map +1 -0
  104. package/dist/utils/failed_handler.d.ts +8 -2
  105. package/dist/utils/failed_handler.d.ts.map +1 -0
  106. package/dist/utils/failed_handler.js +28 -28
  107. package/dist/utils/failed_handler.js.map +1 -0
  108. package/dist/utils/gemini.cjs +1020 -1488
  109. package/dist/utils/gemini.cjs.map +1 -0
  110. package/dist/utils/gemini.d.cts +51 -0
  111. package/dist/utils/gemini.d.cts.map +1 -0
  112. package/dist/utils/gemini.d.ts +51 -48
  113. package/dist/utils/gemini.d.ts.map +1 -0
  114. package/dist/utils/gemini.js +1015 -1479
  115. package/dist/utils/gemini.js.map +1 -0
  116. package/dist/utils/index.cjs +38 -23
  117. package/dist/utils/index.d.cts +8 -0
  118. package/dist/utils/index.d.ts +8 -7
  119. package/dist/utils/index.js +8 -7
  120. package/dist/utils/palm.d.cts +11 -0
  121. package/dist/utils/palm.d.cts.map +1 -0
  122. package/dist/utils/palm.d.ts +9 -4
  123. package/dist/utils/palm.d.ts.map +1 -0
  124. package/dist/utils/safety.cjs +13 -22
  125. package/dist/utils/safety.cjs.map +1 -0
  126. package/dist/utils/safety.d.cts +12 -0
  127. package/dist/utils/safety.d.cts.map +1 -0
  128. package/dist/utils/safety.d.ts +10 -4
  129. package/dist/utils/safety.d.ts.map +1 -0
  130. package/dist/utils/safety.js +13 -19
  131. package/dist/utils/safety.js.map +1 -0
  132. package/dist/utils/stream.cjs +296 -475
  133. package/dist/utils/stream.cjs.map +1 -0
  134. package/dist/utils/stream.d.cts +165 -0
  135. package/dist/utils/stream.d.cts.map +1 -0
  136. package/dist/utils/stream.d.ts +156 -131
  137. package/dist/utils/stream.d.ts.map +1 -0
  138. package/dist/utils/stream.js +293 -469
  139. package/dist/utils/stream.js.map +1 -0
  140. package/dist/utils/zod_to_gemini_parameters.cjs +43 -81
  141. package/dist/utils/zod_to_gemini_parameters.cjs.map +1 -0
  142. package/dist/utils/zod_to_gemini_parameters.d.cts +22 -0
  143. package/dist/utils/zod_to_gemini_parameters.d.cts.map +1 -0
  144. package/dist/utils/zod_to_gemini_parameters.d.ts +21 -6
  145. package/dist/utils/zod_to_gemini_parameters.d.ts.map +1 -0
  146. package/dist/utils/zod_to_gemini_parameters.js +40 -76
  147. package/dist/utils/zod_to_gemini_parameters.js.map +1 -0
  148. package/package.json +72 -85
  149. package/dist/types-anthropic.cjs +0 -2
  150. package/dist/types-anthropic.js +0 -1
  151. package/dist/utils/anthropic.d.ts +0 -4
  152. package/dist/utils/palm.cjs +0 -2
  153. package/dist/utils/palm.js +0 -1
  154. package/experimental/media.cjs +0 -1
  155. package/experimental/media.d.cts +0 -1
  156. package/experimental/media.d.ts +0 -1
  157. package/experimental/media.js +0 -1
  158. package/experimental/utils/media_core.cjs +0 -1
  159. package/experimental/utils/media_core.d.cts +0 -1
  160. package/experimental/utils/media_core.d.ts +0 -1
  161. package/experimental/utils/media_core.js +0 -1
  162. package/index.cjs +0 -1
  163. package/index.d.cts +0 -1
  164. package/index.d.ts +0 -1
  165. package/index.js +0 -1
  166. package/types.cjs +0 -1
  167. package/types.d.cts +0 -1
  168. package/types.d.ts +0 -1
  169. package/types.js +0 -1
  170. package/utils.cjs +0 -1
  171. package/utils.d.cts +0 -1
  172. package/utils.d.ts +0 -1
  173. package/utils.js +0 -1
package/dist/types.d.ts CHANGED
@@ -1,735 +1,748 @@
1
- import type { BaseLLMParams } from "@langchain/core/language_models/llms";
2
- import type { BaseChatModelCallOptions, BindToolsInput } from "@langchain/core/language_models/chat_models";
3
- import { BaseMessage, BaseMessageChunk, MessageContent } from "@langchain/core/messages";
1
+ import { MediaManager } from "./experimental/utils/media_core.js";
2
+ import { JsonStream } from "./utils/stream.js";
3
+ import { AnthropicAPIConfig, AnthropicCacheControl, AnthropicContent, AnthropicContentRedactedThinking, AnthropicContentText, AnthropicContentThinking, AnthropicContentToolUse, AnthropicMessage, AnthropicMessageContent, AnthropicMessageContentDocument, AnthropicMessageContentImage, AnthropicMessageContentRedactedThinking, AnthropicMessageContentText, AnthropicMessageContentThinking, AnthropicMessageContentToolResult, AnthropicMessageContentToolResultContent, AnthropicMessageContentToolUse, AnthropicMessageContentToolUseInput, AnthropicMetadata, AnthropicRequest, AnthropicRequestSettings, AnthropicResponseData, AnthropicResponseMessage, AnthropicStreamBaseDelta, AnthropicStreamBaseEvent, AnthropicStreamContentBlockDeltaEvent, AnthropicStreamContentBlockStartEvent, AnthropicStreamContentBlockStopEvent, AnthropicStreamDelta, AnthropicStreamDeltaType, AnthropicStreamErrorEvent, AnthropicStreamEventType, AnthropicStreamInputJsonDelta, AnthropicStreamMessageDeltaEvent, AnthropicStreamMessageStartEvent, AnthropicStreamMessageStopEvent, AnthropicStreamPingEvent, AnthropicStreamTextDelta, AnthropicThinking, AnthropicThinkingDisabled, AnthropicThinkingEnabled, AnthropicTool, AnthropicToolChoice, AnthropicToolChoiceAny, AnthropicToolChoiceAuto, AnthropicToolChoiceTool, AnthropicToolInputSchema, AnthropicUsage } from "./types-anthropic.js";
4
+ import { BaseChatModelCallOptions, BindToolsInput } from "@langchain/core/language_models/chat_models";
4
5
  import { ChatGenerationChunk, ChatResult } from "@langchain/core/outputs";
6
+ import { BaseMessage, BaseMessageChunk, MessageContent } from "@langchain/core/messages";
7
+ import { BaseLLMParams } from "@langchain/core/language_models/llms";
5
8
  import { EmbeddingsParams } from "@langchain/core/embeddings";
6
9
  import { AsyncCallerCallOptions } from "@langchain/core/utils/async_caller";
7
- import type { JsonStream } from "./utils/stream.js";
8
- import { MediaManager } from "./experimental/utils/media_core.js";
9
- import { AnthropicResponseData, AnthropicAPIConfig } from "./types-anthropic.js";
10
- export * from "./types-anthropic.js";
10
+
11
+ //#region src/types.d.ts
12
+
11
13
  /**
12
14
  * Parameters needed to setup the client connection.
13
15
  * AuthOptions are something like GoogleAuthOptions (from google-auth-library)
14
16
  * or WebGoogleAuthOptions.
15
17
  */
16
- export interface GoogleClientParams<AuthOptions> {
17
- authOptions?: AuthOptions;
18
- /** Some APIs allow an API key instead */
19
- apiKey?: string;
18
+ interface GoogleClientParams<AuthOptions> {
19
+ authOptions?: AuthOptions;
20
+ /** Some APIs allow an API key instead */
21
+ apiKey?: string;
20
22
  }
21
23
  /**
22
24
  * What platform is this running on?
23
25
  * gai - Google AI Studio / MakerSuite / Generative AI platform
24
26
  * gcp - Google Cloud Platform
25
27
  */
26
- export type GooglePlatformType = "gai" | "gcp";
27
- export interface GoogleConnectionParams<AuthOptions> extends GoogleClientParams<AuthOptions> {
28
- /** Hostname for the API call (if this is running on GCP) */
29
- endpoint?: string;
30
- /** Region where the LLM is stored (if this is running on GCP) */
31
- location?: string;
32
- /** The version of the API functions. Part of the path. */
33
- apiVersion?: string;
34
- /**
35
- * What platform to run the service on.
36
- * If not specified, the class should determine this from other
37
- * means. Either way, the platform actually used will be in
38
- * the "platform" getter.
39
- */
40
- platformType?: GooglePlatformType;
41
- /**
42
- * For compatibility with Google's libraries, should this use Vertex?
43
- * The "platformType" parmeter takes precedence.
44
- */
45
- vertexai?: boolean;
46
- }
47
- export declare const GoogleAISafetyCategory: {
48
- readonly Harassment: "HARM_CATEGORY_HARASSMENT";
49
- readonly HARASSMENT: "HARM_CATEGORY_HARASSMENT";
50
- readonly HARM_CATEGORY_HARASSMENT: "HARM_CATEGORY_HARASSMENT";
51
- readonly HateSpeech: "HARM_CATEGORY_HATE_SPEECH";
52
- readonly HATE_SPEECH: "HARM_CATEGORY_HATE_SPEECH";
53
- readonly HARM_CATEGORY_HATE_SPEECH: "HARM_CATEGORY_HATE_SPEECH";
54
- readonly SexuallyExplicit: "HARM_CATEGORY_SEXUALLY_EXPLICIT";
55
- readonly SEXUALLY_EXPLICIT: "HARM_CATEGORY_SEXUALLY_EXPLICIT";
56
- readonly HARM_CATEGORY_SEXUALLY_EXPLICIT: "HARM_CATEGORY_SEXUALLY_EXPLICIT";
57
- readonly Dangerous: "HARM_CATEGORY_DANGEROUS";
58
- readonly DANGEROUS: "HARM_CATEGORY_DANGEROUS";
59
- readonly HARM_CATEGORY_DANGEROUS: "HARM_CATEGORY_DANGEROUS";
60
- readonly CivicIntegrity: "HARM_CATEGORY_CIVIC_INTEGRITY";
61
- readonly CIVIC_INTEGRITY: "HARM_CATEGORY_CIVIC_INTEGRITY";
62
- readonly HARM_CATEGORY_CIVIC_INTEGRITY: "HARM_CATEGORY_CIVIC_INTEGRITY";
28
+ type GooglePlatformType = "gai" | "gcp";
29
+ interface GoogleConnectionParams<AuthOptions> extends GoogleClientParams<AuthOptions> {
30
+ /** Hostname for the API call (if this is running on GCP) */
31
+ endpoint?: string;
32
+ /** Region where the LLM is stored (if this is running on GCP) */
33
+ location?: string;
34
+ /** The version of the API functions. Part of the path. */
35
+ apiVersion?: string;
36
+ /**
37
+ * What platform to run the service on.
38
+ * If not specified, the class should determine this from other
39
+ * means. Either way, the platform actually used will be in
40
+ * the "platform" getter.
41
+ */
42
+ platformType?: GooglePlatformType;
43
+ /**
44
+ * For compatibility with Google's libraries, should this use Vertex?
45
+ * The "platformType" parmeter takes precedence.
46
+ */
47
+ vertexai?: boolean;
48
+ }
49
+ declare const GoogleAISafetyCategory: {
50
+ readonly Harassment: "HARM_CATEGORY_HARASSMENT";
51
+ readonly HARASSMENT: "HARM_CATEGORY_HARASSMENT";
52
+ readonly HARM_CATEGORY_HARASSMENT: "HARM_CATEGORY_HARASSMENT";
53
+ readonly HateSpeech: "HARM_CATEGORY_HATE_SPEECH";
54
+ readonly HATE_SPEECH: "HARM_CATEGORY_HATE_SPEECH";
55
+ readonly HARM_CATEGORY_HATE_SPEECH: "HARM_CATEGORY_HATE_SPEECH";
56
+ readonly SexuallyExplicit: "HARM_CATEGORY_SEXUALLY_EXPLICIT";
57
+ readonly SEXUALLY_EXPLICIT: "HARM_CATEGORY_SEXUALLY_EXPLICIT";
58
+ readonly HARM_CATEGORY_SEXUALLY_EXPLICIT: "HARM_CATEGORY_SEXUALLY_EXPLICIT";
59
+ readonly Dangerous: "HARM_CATEGORY_DANGEROUS";
60
+ readonly DANGEROUS: "HARM_CATEGORY_DANGEROUS";
61
+ readonly HARM_CATEGORY_DANGEROUS: "HARM_CATEGORY_DANGEROUS";
62
+ readonly CivicIntegrity: "HARM_CATEGORY_CIVIC_INTEGRITY";
63
+ readonly CIVIC_INTEGRITY: "HARM_CATEGORY_CIVIC_INTEGRITY";
64
+ readonly HARM_CATEGORY_CIVIC_INTEGRITY: "HARM_CATEGORY_CIVIC_INTEGRITY";
63
65
  };
64
- export type GoogleAISafetyCategory = (typeof GoogleAISafetyCategory)[keyof typeof GoogleAISafetyCategory];
65
- export declare const GoogleAISafetyThreshold: {
66
- readonly None: "BLOCK_NONE";
67
- readonly NONE: "BLOCK_NONE";
68
- readonly BLOCK_NONE: "BLOCK_NONE";
69
- readonly Few: "BLOCK_ONLY_HIGH";
70
- readonly FEW: "BLOCK_ONLY_HIGH";
71
- readonly BLOCK_ONLY_HIGH: "BLOCK_ONLY_HIGH";
72
- readonly Some: "BLOCK_MEDIUM_AND_ABOVE";
73
- readonly SOME: "BLOCK_MEDIUM_AND_ABOVE";
74
- readonly BLOCK_MEDIUM_AND_ABOVE: "BLOCK_MEDIUM_AND_ABOVE";
75
- readonly Most: "BLOCK_LOW_AND_ABOVE";
76
- readonly MOST: "BLOCK_LOW_AND_ABOVE";
77
- readonly BLOCK_LOW_AND_ABOVE: "BLOCK_LOW_AND_ABOVE";
78
- readonly Off: "OFF";
79
- readonly OFF: "OFF";
80
- readonly BLOCK_OFF: "OFF";
66
+ type GoogleAISafetyCategory = (typeof GoogleAISafetyCategory)[keyof typeof GoogleAISafetyCategory];
67
+ declare const GoogleAISafetyThreshold: {
68
+ readonly None: "BLOCK_NONE";
69
+ readonly NONE: "BLOCK_NONE";
70
+ readonly BLOCK_NONE: "BLOCK_NONE";
71
+ readonly Few: "BLOCK_ONLY_HIGH";
72
+ readonly FEW: "BLOCK_ONLY_HIGH";
73
+ readonly BLOCK_ONLY_HIGH: "BLOCK_ONLY_HIGH";
74
+ readonly Some: "BLOCK_MEDIUM_AND_ABOVE";
75
+ readonly SOME: "BLOCK_MEDIUM_AND_ABOVE";
76
+ readonly BLOCK_MEDIUM_AND_ABOVE: "BLOCK_MEDIUM_AND_ABOVE";
77
+ readonly Most: "BLOCK_LOW_AND_ABOVE";
78
+ readonly MOST: "BLOCK_LOW_AND_ABOVE";
79
+ readonly BLOCK_LOW_AND_ABOVE: "BLOCK_LOW_AND_ABOVE";
80
+ readonly Off: "OFF";
81
+ readonly OFF: "OFF";
82
+ readonly BLOCK_OFF: "OFF";
81
83
  };
82
- export type GoogleAISafetyThreshold = (typeof GoogleAISafetyThreshold)[keyof typeof GoogleAISafetyThreshold];
83
- export declare const GoogleAISafetyMethod: {
84
- readonly Severity: "SEVERITY";
85
- readonly Probability: "PROBABILITY";
84
+ type GoogleAISafetyThreshold = (typeof GoogleAISafetyThreshold)[keyof typeof GoogleAISafetyThreshold];
85
+ declare const GoogleAISafetyMethod: {
86
+ readonly Severity: "SEVERITY";
87
+ readonly Probability: "PROBABILITY";
86
88
  };
87
- export type GoogleAISafetyMethod = (typeof GoogleAISafetyMethod)[keyof typeof GoogleAISafetyMethod];
88
- export interface GoogleAISafetySetting {
89
- category: GoogleAISafetyCategory | string;
90
- threshold: GoogleAISafetyThreshold | string;
91
- method?: GoogleAISafetyMethod | string;
89
+ type GoogleAISafetyMethod = (typeof GoogleAISafetyMethod)[keyof typeof GoogleAISafetyMethod];
90
+ interface GoogleAISafetySetting {
91
+ category: GoogleAISafetyCategory | string;
92
+ threshold: GoogleAISafetyThreshold | string;
93
+ method?: GoogleAISafetyMethod | string; // Just for Vertex AI?
92
94
  }
93
- export type GoogleAIResponseMimeType = "text/plain" | "application/json";
94
- export type GoogleAIModelModality = "TEXT" | "IMAGE" | "AUDIO" | string;
95
- export interface GoogleThinkingConfig {
96
- thinkingBudget?: number;
97
- includeThoughts?: boolean;
95
+ type GoogleAIResponseMimeType = "text/plain" | "application/json";
96
+ type GoogleAIModelModality = "TEXT" | "IMAGE" | "AUDIO" | string;
97
+ interface GoogleThinkingConfig {
98
+ thinkingBudget?: number;
99
+ includeThoughts?: boolean;
98
100
  }
99
- export type GooglePrebuiltVoiceName = string;
100
- export interface GooglePrebuiltVoiceConfig {
101
- voiceName: GooglePrebuiltVoiceName;
101
+ type GooglePrebuiltVoiceName = string;
102
+ interface GooglePrebuiltVoiceConfig {
103
+ voiceName: GooglePrebuiltVoiceName;
102
104
  }
103
- export interface GoogleVoiceConfig {
104
- prebuiltVoiceConfig: GooglePrebuiltVoiceConfig;
105
+ interface GoogleVoiceConfig {
106
+ prebuiltVoiceConfig: GooglePrebuiltVoiceConfig;
105
107
  }
106
- export interface GoogleSpeakerVoiceConfig {
107
- speaker: string;
108
- voiceConfig: GoogleVoiceConfig;
108
+ interface GoogleSpeakerVoiceConfig {
109
+ speaker: string;
110
+ voiceConfig: GoogleVoiceConfig;
109
111
  }
110
- export interface GoogleMultiSpeakerVoiceConfig {
111
- speakerVoiceConfigs: GoogleSpeakerVoiceConfig[];
112
+ interface GoogleMultiSpeakerVoiceConfig {
113
+ speakerVoiceConfigs: GoogleSpeakerVoiceConfig[];
112
114
  }
113
- export interface GoogleSpeechConfigSingle {
114
- voiceConfig: GoogleVoiceConfig;
115
- languageCode?: string;
115
+ interface GoogleSpeechConfigSingle {
116
+ voiceConfig: GoogleVoiceConfig;
117
+ languageCode?: string;
116
118
  }
117
- export interface GoogleSpeechConfigMulti {
118
- multiSpeakerVoiceConfig: GoogleMultiSpeakerVoiceConfig;
119
- languageCode?: string;
119
+ interface GoogleSpeechConfigMulti {
120
+ multiSpeakerVoiceConfig: GoogleMultiSpeakerVoiceConfig;
121
+ languageCode?: string;
120
122
  }
121
- export type GoogleSpeechConfig = GoogleSpeechConfigSingle | GoogleSpeechConfigMulti;
123
+ type GoogleSpeechConfig = GoogleSpeechConfigSingle | GoogleSpeechConfigMulti;
122
124
  /**
123
125
  * A simplified version of the GoogleSpeakerVoiceConfig
124
126
  */
125
- export interface GoogleSpeechSpeakerName {
126
- speaker: string;
127
- name: GooglePrebuiltVoiceName;
127
+ interface GoogleSpeechSpeakerName {
128
+ speaker: string;
129
+ name: GooglePrebuiltVoiceName;
128
130
  }
129
- export type GoogleSpeechVoice = GooglePrebuiltVoiceName | GoogleSpeechSpeakerName | GoogleSpeechSpeakerName[];
130
- export interface GoogleSpeechVoiceLanguage {
131
- voice: GoogleSpeechVoice;
132
- languageCode: string;
131
+ type GoogleSpeechVoice = GooglePrebuiltVoiceName | GoogleSpeechSpeakerName | GoogleSpeechSpeakerName[];
132
+ interface GoogleSpeechVoiceLanguage {
133
+ voice: GoogleSpeechVoice;
134
+ languageCode: string;
133
135
  }
134
- export interface GoogleSpeechVoicesLanguage {
135
- voices: GoogleSpeechVoice;
136
- languageCode: string;
136
+ interface GoogleSpeechVoicesLanguage {
137
+ voices: GoogleSpeechVoice;
138
+ languageCode: string;
137
139
  }
138
140
  /**
139
141
  * A simplified way to represent the voice (or voices) and language code.
140
142
  * "voice" and "voices" are semantically the same, we're not enforcing
141
143
  * that one is an array and one isn't.
142
144
  */
143
- export type GoogleSpeechSimplifiedLanguage = GoogleSpeechVoiceLanguage | GoogleSpeechVoicesLanguage;
145
+ type GoogleSpeechSimplifiedLanguage = GoogleSpeechVoiceLanguage | GoogleSpeechVoicesLanguage;
144
146
  /**
145
147
  * A simplified way to represent the voices.
146
148
  * It can either be the voice (or voices), or the voice or voices with language configuration
147
149
  */
148
- export type GoogleSpeechConfigSimplified = GoogleSpeechVoice | GoogleSpeechSimplifiedLanguage;
149
- export interface GoogleModelParams {
150
- /** Model to use */
151
- model?: string;
152
- /**
153
- * Model to use
154
- * Alias for `model`
155
- */
156
- modelName?: string;
157
- }
158
- export interface GoogleAIModelParams extends GoogleModelParams {
159
- /** Sampling temperature to use */
160
- temperature?: number;
161
- /**
162
- * Maximum number of tokens to generate in the completion.
163
- * This may include reasoning tokens (for backwards compatibility).
164
- */
165
- maxOutputTokens?: number;
166
- /**
167
- * The maximum number of the output tokens that will be used
168
- * for the "thinking" or "reasoning" stages.
169
- */
170
- maxReasoningTokens?: number;
171
- /**
172
- * An alias for "maxReasoningTokens"
173
- */
174
- thinkingBudget?: number;
175
- /**
176
- * An OpenAI compatible parameter that will map to "maxReasoningTokens"
177
- */
178
- reasoningEffort?: "low" | "medium" | "high";
179
- /**
180
- * Top-p changes how the model selects tokens for output.
181
- *
182
- * Tokens are selected from most probable to least until the sum
183
- * of their probabilities equals the top-p value.
184
- *
185
- * For example, if tokens A, B, and C have a probability of
186
- * .3, .2, and .1 and the top-p value is .5, then the model will
187
- * select either A or B as the next token (using temperature).
188
- */
189
- topP?: number;
190
- /**
191
- * Top-k changes how the model selects tokens for output.
192
- *
193
- * A top-k of 1 means the selected token is the most probable among
194
- * all tokens in the model’s vocabulary (also called greedy decoding),
195
- * while a top-k of 3 means that the next token is selected from
196
- * among the 3 most probable tokens (using temperature).
197
- */
198
- topK?: number;
199
- /**
200
- * Seed used in decoding. If not set, the request uses a randomly generated seed.
201
- */
202
- seed?: number;
203
- /**
204
- * Presence penalty applied to the next token's logprobs
205
- * if the token has already been seen in the response.
206
- * This penalty is binary on/off and not dependant on the
207
- * number of times the token is used (after the first).
208
- * Use frequencyPenalty for a penalty that increases with each use.
209
- * A positive penalty will discourage the use of tokens that have
210
- * already been used in the response, increasing the vocabulary.
211
- * A negative penalty will encourage the use of tokens that have
212
- * already been used in the response, decreasing the vocabulary.
213
- */
214
- presencePenalty?: number;
215
- /**
216
- * Frequency penalty applied to the next token's logprobs,
217
- * multiplied by the number of times each token has been seen
218
- * in the respponse so far.
219
- * A positive penalty will discourage the use of tokens that
220
- * have already been used, proportional to the number of times
221
- * the token has been used:
222
- * The more a token is used, the more dificult it is for the model
223
- * to use that token again increasing the vocabulary of responses.
224
- * Caution: A _negative_ penalty will encourage the model to reuse
225
- * tokens proportional to the number of times the token has been used.
226
- * Small negative values will reduce the vocabulary of a response.
227
- * Larger negative values will cause the model to start repeating
228
- * a common token until it hits the maxOutputTokens limit.
229
- */
230
- frequencyPenalty?: number;
231
- stopSequences?: string[];
232
- safetySettings?: GoogleAISafetySetting[];
233
- convertSystemMessageToHumanContent?: boolean;
234
- /**
235
- * Available for `gemini-1.5-pro`.
236
- * The output format of the generated candidate text.
237
- * Supported MIME types:
238
- * - `text/plain`: Text output.
239
- * - `application/json`: JSON response in the candidates.
240
- *
241
- * @default "text/plain"
242
- */
243
- responseMimeType?: GoogleAIResponseMimeType;
244
- /**
245
- * Whether or not to stream.
246
- * @default false
247
- */
248
- streaming?: boolean;
249
- /**
250
- * Whether to return log probabilities of the output tokens or not.
251
- * If true, returns the log probabilities of each output token
252
- * returned in the content of message.
253
- */
254
- logprobs?: boolean;
255
- /**
256
- * An integer between 0 and 5 specifying the number of
257
- * most likely tokens to return at each token position,
258
- * each with an associated log probability.
259
- * logprobs must be set to true if this parameter is used.
260
- */
261
- topLogprobs?: number;
262
- /**
263
- * The modalities of the response.
264
- */
265
- responseModalities?: GoogleAIModelModality[];
266
- /**
267
- * Custom metadata labels to associate with the request.
268
- * Only supported on Vertex AI (Google Cloud Platform).
269
- * Labels are key-value pairs where both keys and values must be strings.
270
- *
271
- * Example:
272
- * ```typescript
273
- * {
274
- * labels: {
275
- * "team": "research",
276
- * "component": "frontend",
277
- * "environment": "production"
278
- * }
279
- * }
280
- * ```
281
- */
282
- labels?: Record<string, string>;
283
- /**
284
- * Speech generation configuration.
285
- * You can use either Google's definition of the speech configuration,
286
- * or a simplified version we've defined (which can be as simple
287
- * as the name of a pre-defined voice).
288
- */
289
- speechConfig?: GoogleSpeechConfig | GoogleSpeechConfigSimplified;
290
- }
291
- export type GoogleAIToolType = BindToolsInput | GeminiTool;
150
+ type GoogleSpeechConfigSimplified = GoogleSpeechVoice | GoogleSpeechSimplifiedLanguage;
151
+ interface GoogleModelParams {
152
+ /** Model to use */
153
+ model?: string;
154
+ /**
155
+ * Model to use
156
+ * Alias for `model`
157
+ */
158
+ modelName?: string;
159
+ }
160
+ interface GoogleAIModelParams extends GoogleModelParams {
161
+ /** Sampling temperature to use */
162
+ temperature?: number;
163
+ /**
164
+ * Maximum number of tokens to generate in the completion.
165
+ * This may include reasoning tokens (for backwards compatibility).
166
+ */
167
+ maxOutputTokens?: number;
168
+ /**
169
+ * The maximum number of the output tokens that will be used
170
+ * for the "thinking" or "reasoning" stages.
171
+ */
172
+ maxReasoningTokens?: number;
173
+ /**
174
+ * An alias for "maxReasoningTokens"
175
+ */
176
+ thinkingBudget?: number;
177
+ /**
178
+ * An OpenAI compatible parameter that will map to "maxReasoningTokens"
179
+ */
180
+ reasoningEffort?: "low" | "medium" | "high";
181
+ /**
182
+ * Top-p changes how the model selects tokens for output.
183
+ *
184
+ * Tokens are selected from most probable to least until the sum
185
+ * of their probabilities equals the top-p value.
186
+ *
187
+ * For example, if tokens A, B, and C have a probability of
188
+ * .3, .2, and .1 and the top-p value is .5, then the model will
189
+ * select either A or B as the next token (using temperature).
190
+ */
191
+ topP?: number;
192
+ /**
193
+ * Top-k changes how the model selects tokens for output.
194
+ *
195
+ * A top-k of 1 means the selected token is the most probable among
196
+ * all tokens in the model’s vocabulary (also called greedy decoding),
197
+ * while a top-k of 3 means that the next token is selected from
198
+ * among the 3 most probable tokens (using temperature).
199
+ */
200
+ topK?: number;
201
+ /**
202
+ * Seed used in decoding. If not set, the request uses a randomly generated seed.
203
+ */
204
+ seed?: number;
205
+ /**
206
+ * Presence penalty applied to the next token's logprobs
207
+ * if the token has already been seen in the response.
208
+ * This penalty is binary on/off and not dependant on the
209
+ * number of times the token is used (after the first).
210
+ * Use frequencyPenalty for a penalty that increases with each use.
211
+ * A positive penalty will discourage the use of tokens that have
212
+ * already been used in the response, increasing the vocabulary.
213
+ * A negative penalty will encourage the use of tokens that have
214
+ * already been used in the response, decreasing the vocabulary.
215
+ */
216
+ presencePenalty?: number;
217
+ /**
218
+ * Frequency penalty applied to the next token's logprobs,
219
+ * multiplied by the number of times each token has been seen
220
+ * in the respponse so far.
221
+ * A positive penalty will discourage the use of tokens that
222
+ * have already been used, proportional to the number of times
223
+ * the token has been used:
224
+ * The more a token is used, the more dificult it is for the model
225
+ * to use that token again increasing the vocabulary of responses.
226
+ * Caution: A _negative_ penalty will encourage the model to reuse
227
+ * tokens proportional to the number of times the token has been used.
228
+ * Small negative values will reduce the vocabulary of a response.
229
+ * Larger negative values will cause the model to start repeating
230
+ * a common token until it hits the maxOutputTokens limit.
231
+ */
232
+ frequencyPenalty?: number;
233
+ stopSequences?: string[];
234
+ safetySettings?: GoogleAISafetySetting[];
235
+ convertSystemMessageToHumanContent?: boolean;
236
+ /**
237
+ * Available for `gemini-1.5-pro`.
238
+ * The output format of the generated candidate text.
239
+ * Supported MIME types:
240
+ * - `text/plain`: Text output.
241
+ * - `application/json`: JSON response in the candidates.
242
+ *
243
+ * @default "text/plain"
244
+ */
245
+ responseMimeType?: GoogleAIResponseMimeType;
246
+ /**
247
+ * Whether or not to stream.
248
+ * @default false
249
+ */
250
+ streaming?: boolean;
251
+ /**
252
+ * Whether to return log probabilities of the output tokens or not.
253
+ * If true, returns the log probabilities of each output token
254
+ * returned in the content of message.
255
+ */
256
+ logprobs?: boolean;
257
+ /**
258
+ * An integer between 0 and 5 specifying the number of
259
+ * most likely tokens to return at each token position,
260
+ * each with an associated log probability.
261
+ * logprobs must be set to true if this parameter is used.
262
+ */
263
+ topLogprobs?: number;
264
+ /**
265
+ * The modalities of the response.
266
+ */
267
+ responseModalities?: GoogleAIModelModality[];
268
+ /**
269
+ * Custom metadata labels to associate with the request.
270
+ * Only supported on Vertex AI (Google Cloud Platform).
271
+ * Labels are key-value pairs where both keys and values must be strings.
272
+ *
273
+ * Example:
274
+ * ```typescript
275
+ * {
276
+ * labels: {
277
+ * "team": "research",
278
+ * "component": "frontend",
279
+ * "environment": "production"
280
+ * }
281
+ * }
282
+ * ```
283
+ */
284
+ labels?: Record<string, string>;
285
+ /**
286
+ * Speech generation configuration.
287
+ * You can use either Google's definition of the speech configuration,
288
+ * or a simplified version we've defined (which can be as simple
289
+ * as the name of a pre-defined voice).
290
+ */
291
+ speechConfig?: GoogleSpeechConfig | GoogleSpeechConfigSimplified;
292
+ }
293
+ type GoogleAIToolType = BindToolsInput | GeminiTool;
292
294
  /**
293
295
  * The params which can be passed to the API at request time.
294
296
  */
295
- export interface GoogleAIModelRequestParams extends GoogleAIModelParams {
296
- tools?: GoogleAIToolType[];
297
- /**
298
- * Force the model to use tools in a specific way.
299
- *
300
- * | Mode | Description |
301
- * |----------|---------------------------------------------------------------------------------------------------------------------------------------------------------|
302
- * | "auto" | The default model behavior. The model decides whether to predict a function call or a natural language response. |
303
- * | "any" | The model must predict only function calls. To limit the model to a subset of functions, define the allowed function names in `allowed_function_names`. |
304
- * | "none" | The model must not predict function calls. This behavior is equivalent to a model request without any associated function declarations. |
305
- * | string | The string value must be one of the function names. This will force the model to predict the specified function call. |
306
- *
307
- * The tool configuration's "any" mode ("forced function calling") is supported for Gemini 1.5 Pro models only.
308
- */
309
- tool_choice?: string | "auto" | "any" | "none" | Record<string, any>;
310
- /**
311
- * Allowed functions to call when the mode is "any".
312
- * If empty, any one of the provided functions are called.
313
- */
314
- allowed_function_names?: string[];
315
- /**
316
- * Used to specify a previously created context cache to use with generation.
317
- * For Vertex, this should be of the form:
318
- * "projects/PROJECT_NUMBER/locations/LOCATION/cachedContents/CACHE_ID",
319
- *
320
- * See these guides for more information on how to use context caching:
321
- * https://cloud.google.com/vertex-ai/generative-ai/docs/context-cache/context-cache-create
322
- * https://cloud.google.com/vertex-ai/generative-ai/docs/context-cache/context-cache-use
323
- */
324
- cachedContent?: string;
325
- }
326
- export interface GoogleAIBaseLLMInput<AuthOptions> extends BaseLLMParams, GoogleConnectionParams<AuthOptions>, GoogleAIModelParams, GoogleAISafetyParams, GoogleAIAPIParams {
327
- }
328
- export interface GoogleAIBaseLanguageModelCallOptions extends BaseChatModelCallOptions, GoogleAIModelRequestParams, GoogleAISafetyParams {
329
- /**
330
- * Whether or not to include usage data, like token counts
331
- * in the streamed response chunks.
332
- * @default true
333
- */
334
- streamUsage?: boolean;
297
+ interface GoogleAIModelRequestParams extends GoogleAIModelParams {
298
+ tools?: GoogleAIToolType[];
299
+ /**
300
+ * Force the model to use tools in a specific way.
301
+ *
302
+ * | Mode | Description |
303
+ * |----------|---------------------------------------------------------------------------------------------------------------------------------------------------------|
304
+ * | "auto" | The default model behavior. The model decides whether to predict a function call or a natural language response. |
305
+ * | "any" | The model must predict only function calls. To limit the model to a subset of functions, define the allowed function names in `allowed_function_names`. |
306
+ * | "none" | The model must not predict function calls. This behavior is equivalent to a model request without any associated function declarations. |
307
+ * | string | The string value must be one of the function names. This will force the model to predict the specified function call. |
308
+ *
309
+ * The tool configuration's "any" mode ("forced function calling") is supported for Gemini 1.5 Pro models only.
310
+ */
311
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
312
+ tool_choice?: string | "auto" | "any" | "none" | Record<string, any>;
313
+ /**
314
+ * Allowed functions to call when the mode is "any".
315
+ * If empty, any one of the provided functions are called.
316
+ */
317
+ allowed_function_names?: string[];
318
+ /**
319
+ * Used to specify a previously created context cache to use with generation.
320
+ * For Vertex, this should be of the form:
321
+ * "projects/PROJECT_NUMBER/locations/LOCATION/cachedContents/CACHE_ID",
322
+ *
323
+ * See these guides for more information on how to use context caching:
324
+ * https://cloud.google.com/vertex-ai/generative-ai/docs/context-cache/context-cache-create
325
+ * https://cloud.google.com/vertex-ai/generative-ai/docs/context-cache/context-cache-use
326
+ */
327
+ cachedContent?: string;
328
+ }
329
+ interface GoogleAIBaseLLMInput<AuthOptions> extends BaseLLMParams, GoogleConnectionParams<AuthOptions>, GoogleAIModelParams, GoogleAISafetyParams, GoogleAIAPIParams {}
330
+ interface GoogleAIBaseLanguageModelCallOptions extends BaseChatModelCallOptions, GoogleAIModelRequestParams, GoogleAISafetyParams {
331
+ /**
332
+ * Whether or not to include usage data, like token counts
333
+ * in the streamed response chunks.
334
+ * @default true
335
+ */
336
+ streamUsage?: boolean;
335
337
  }
336
338
  /**
337
339
  * Input to LLM class.
338
340
  */
339
- export interface GoogleBaseLLMInput<AuthOptions> extends GoogleAIBaseLLMInput<AuthOptions> {
340
- }
341
- export interface GoogleResponse {
342
- data: any;
343
- }
344
- export interface GoogleRawResponse extends GoogleResponse {
345
- data: Blob;
346
- }
347
- export interface GeminiPartBase {
348
- thought?: boolean;
349
- thoughtSignature?: string;
350
- }
351
- export interface GeminiVideoMetadata {
352
- fps?: number;
353
- startOffset?: string;
354
- endOffset?: string;
355
- }
356
- export interface GeminiPartBaseFile extends GeminiPartBase {
357
- videoMetadata?: GeminiVideoMetadata;
358
- }
359
- export interface GeminiPartText extends GeminiPartBase {
360
- text: string;
361
- }
362
- export interface GeminiPartInlineData extends GeminiPartBaseFile {
363
- inlineData: {
364
- mimeType: string;
365
- data: string;
366
- };
367
- }
368
- export interface GeminiPartFileData extends GeminiPartBaseFile {
369
- fileData: {
370
- mimeType: string;
371
- fileUri: string;
372
- };
373
- }
374
- export interface GeminiPartFunctionCall extends GeminiPartBase {
375
- functionCall: {
376
- name: string;
377
- args?: object;
378
- };
341
+ interface GoogleBaseLLMInput<AuthOptions> extends GoogleAIBaseLLMInput<AuthOptions> {}
342
+ interface GoogleResponse {
343
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
344
+ data: any;
345
+ }
346
+ interface GoogleRawResponse extends GoogleResponse {
347
+ data: Blob;
348
+ }
349
+ interface GeminiPartBase {
350
+ thought?: boolean; // Output only
351
+ thoughtSignature?: string;
352
+ }
353
+ interface GeminiVideoMetadata {
354
+ fps?: number; // Double in range (0.0, 24.0]
355
+ startOffset?: string;
356
+ endOffset?: string;
357
+ }
358
+ interface GeminiPartBaseFile extends GeminiPartBase {
359
+ videoMetadata?: GeminiVideoMetadata;
360
+ }
361
+ interface GeminiPartText extends GeminiPartBase {
362
+ text: string;
363
+ }
364
+ interface GeminiPartInlineData extends GeminiPartBaseFile {
365
+ inlineData: {
366
+ mimeType: string;
367
+ data: string;
368
+ };
369
+ }
370
+ interface GeminiPartFileData extends GeminiPartBaseFile {
371
+ fileData: {
372
+ mimeType: string;
373
+ fileUri: string;
374
+ };
375
+ }
376
+ // AI Studio only?
377
+ interface GeminiPartFunctionCall extends GeminiPartBase {
378
+ functionCall: {
379
+ name: string;
380
+ args?: object;
381
+ };
379
382
  }
380
- export interface GeminiPartFunctionResponse extends GeminiPartBase {
381
- functionResponse: {
382
- name: string;
383
- response: object;
384
- };
383
+ // AI Studio Only?
384
+ interface GeminiPartFunctionResponse extends GeminiPartBase {
385
+ functionResponse: {
386
+ name: string;
387
+ response: object;
388
+ };
385
389
  }
386
- export type GeminiPart = GeminiPartText | GeminiPartInlineData | GeminiPartFileData | GeminiPartFunctionCall | GeminiPartFunctionResponse;
387
- export interface GeminiSafetySetting {
388
- category: string;
389
- threshold: string;
390
+ type GeminiPart = GeminiPartText | GeminiPartInlineData | GeminiPartFileData | GeminiPartFunctionCall | GeminiPartFunctionResponse;
391
+ interface GeminiSafetySetting {
392
+ category: string;
393
+ threshold: string;
390
394
  }
391
- export type GeminiSafetyRating = {
392
- category: string;
393
- probability: string;
395
+ type GeminiSafetyRating = {
396
+ category: string;
397
+ probability: string;
394
398
  } & Record<string, unknown>;
395
- export interface GeminiCitationMetadata {
396
- citations: GeminiCitation[];
397
- }
398
- export interface GeminiCitation {
399
- startIndex: number;
400
- endIndex: number;
401
- uri: string;
402
- title: string;
403
- license: string;
404
- publicationDate: GoogleTypeDate;
405
- }
406
- export interface GoogleTypeDate {
407
- year: number;
408
- month: number;
409
- day: number;
410
- }
411
- export interface GeminiGroundingMetadata {
412
- webSearchQueries?: string[];
413
- searchEntryPoint?: GeminiSearchEntryPoint;
414
- groundingChunks: GeminiGroundingChunk[];
415
- groundingSupports?: GeminiGroundingSupport[];
416
- retrievalMetadata?: GeminiRetrievalMetadata;
417
- }
418
- export interface GeminiSearchEntryPoint {
419
- renderedContent?: string;
420
- sdkBlob?: string;
421
- }
422
- export interface GeminiGroundingChunk {
423
- web: GeminiGroundingChunkWeb;
424
- retrievedContext: GeminiGroundingChunkRetrievedContext;
425
- }
426
- export interface GeminiGroundingChunkWeb {
427
- uri: string;
428
- title: string;
429
- }
430
- export interface GeminiGroundingChunkRetrievedContext {
431
- uri: string;
432
- title: string;
433
- text: string;
434
- }
435
- export interface GeminiGroundingSupport {
436
- segment: GeminiSegment;
437
- groundingChunkIndices: number[];
438
- confidenceScores: number[];
439
- }
440
- export interface GeminiSegment {
441
- partIndex: number;
442
- startIndex: number;
443
- endIndex: number;
444
- text: string;
445
- }
446
- export interface GeminiRetrievalMetadata {
447
- googleSearchDynamicRetrievalScore: number;
448
- }
449
- export type GeminiUrlRetrievalStatus = "URL_RETRIEVAL_STATUS_SUCCESS" | "URL_RETRIEVAL_STATUS_ERROR";
450
- export interface GeminiUrlRetrievalContext {
451
- retrievedUrl: string;
452
- urlRetrievalStatus: GeminiUrlRetrievalStatus;
453
- }
454
- export interface GeminiUrlRetrievalMetadata {
455
- urlRetrievalContexts: GeminiUrlRetrievalContext[];
456
- }
457
- export type GeminiUrlMetadata = GeminiUrlRetrievalContext;
458
- export interface GeminiUrlContextMetadata {
459
- urlMetadata: GeminiUrlMetadata[];
460
- }
461
- export interface GeminiLogprobsResult {
462
- topCandidates: GeminiLogprobsTopCandidate[];
463
- chosenCandidates: GeminiLogprobsResultCandidate[];
464
- }
465
- export interface GeminiLogprobsTopCandidate {
466
- candidates: GeminiLogprobsResultCandidate[];
467
- }
468
- export interface GeminiLogprobsResultCandidate {
469
- token: string;
470
- tokenId: number;
471
- logProbability: number;
472
- }
473
- export type GeminiRole = "system" | "user" | "model" | "function";
474
- export interface GeminiContent {
475
- parts: GeminiPart[];
476
- role: GeminiRole;
477
- }
478
- export interface GeminiTool {
479
- functionDeclarations?: GeminiFunctionDeclaration[];
480
- googleSearchRetrieval?: GoogleSearchRetrieval;
481
- googleSearch?: GoogleSearch;
482
- urlContext?: UrlContext;
483
- retrieval?: VertexAIRetrieval;
484
- }
485
- export type GoogleSearchToolSetting = boolean | "googleSearchRetrieval" | "googleSearch" | string;
486
- export declare const GeminiSearchToolAttributes: string[];
487
- export declare const GeminiToolAttributes: string[];
488
- export interface GoogleSearchRetrieval {
489
- dynamicRetrievalConfig?: {
490
- mode?: string;
491
- dynamicThreshold?: number;
492
- };
493
- }
494
- export interface GoogleSearch {
495
- }
496
- export interface UrlContext {
497
- }
498
- export interface VertexAIRetrieval {
499
- vertexAiSearch: {
500
- datastore: string;
501
- };
502
- disableAttribution?: boolean;
503
- }
504
- export interface GeminiFunctionDeclaration {
505
- name: string;
506
- description: string;
507
- parameters?: GeminiFunctionSchema;
508
- }
509
- export interface GeminiFunctionSchema {
510
- type: GeminiFunctionSchemaType;
511
- format?: string;
512
- description?: string;
513
- nullable?: boolean;
514
- enum?: string[];
515
- properties?: Record<string, GeminiFunctionSchema>;
516
- required?: string[];
517
- items?: GeminiFunctionSchema;
518
- }
519
- export type GeminiFunctionSchemaType = "string" | "number" | "integer" | "boolean" | "array" | "object";
520
- export interface GeminiGenerationConfig {
521
- stopSequences?: string[];
522
- candidateCount?: number;
523
- maxOutputTokens?: number;
524
- temperature?: number;
525
- topP?: number;
526
- topK?: number;
527
- seed?: number;
528
- presencePenalty?: number;
529
- frequencyPenalty?: number;
530
- responseMimeType?: GoogleAIResponseMimeType;
531
- responseLogprobs?: boolean;
532
- logprobs?: number;
533
- responseModalities?: GoogleAIModelModality[];
534
- thinkingConfig?: GoogleThinkingConfig;
535
- speechConfig?: GoogleSpeechConfig;
536
- }
537
- export interface GeminiRequest {
538
- contents?: GeminiContent[];
539
- systemInstruction?: GeminiContent;
540
- tools?: GeminiTool[];
541
- toolConfig?: {
542
- functionCallingConfig: {
543
- mode: "auto" | "any" | "none";
544
- allowedFunctionNames?: string[];
545
- };
546
- };
547
- safetySettings?: GeminiSafetySetting[];
548
- generationConfig?: GeminiGenerationConfig;
549
- cachedContent?: string;
550
- /**
551
- * Custom metadata labels to associate with the API call.
552
- */
553
- labels?: Record<string, string>;
554
- }
555
- export interface GeminiResponseCandidate {
556
- content: {
557
- parts: GeminiPart[];
558
- role: string;
399
+ interface GeminiCitationMetadata {
400
+ citations: GeminiCitation[];
401
+ }
402
+ interface GeminiCitation {
403
+ startIndex: number;
404
+ endIndex: number;
405
+ uri: string;
406
+ title: string;
407
+ license: string;
408
+ publicationDate: GoogleTypeDate;
409
+ }
410
+ interface GoogleTypeDate {
411
+ year: number; // 1-9999 or 0 to specify a date without a year
412
+ month: number; // 1-12 or 0 to specify a year without a month and day
413
+ day: number; // Must be from 1 to 31 and valid for the year and month, or 0 to specify a year by itself or a year and month where the day isn't significant
414
+ }
415
+ interface GeminiGroundingMetadata {
416
+ webSearchQueries?: string[];
417
+ searchEntryPoint?: GeminiSearchEntryPoint;
418
+ groundingChunks: GeminiGroundingChunk[];
419
+ groundingSupports?: GeminiGroundingSupport[];
420
+ retrievalMetadata?: GeminiRetrievalMetadata;
421
+ }
422
+ interface GeminiSearchEntryPoint {
423
+ renderedContent?: string;
424
+ sdkBlob?: string; // Base64 encoded JSON representing array of tuple.
425
+ }
426
+ interface GeminiGroundingChunk {
427
+ web: GeminiGroundingChunkWeb;
428
+ retrievedContext: GeminiGroundingChunkRetrievedContext;
429
+ }
430
+ interface GeminiGroundingChunkWeb {
431
+ uri: string;
432
+ title: string;
433
+ }
434
+ interface GeminiGroundingChunkRetrievedContext {
435
+ uri: string;
436
+ title: string;
437
+ text: string;
438
+ }
439
+ interface GeminiGroundingSupport {
440
+ segment: GeminiSegment;
441
+ groundingChunkIndices: number[];
442
+ confidenceScores: number[];
443
+ }
444
+ interface GeminiSegment {
445
+ partIndex: number;
446
+ startIndex: number;
447
+ endIndex: number;
448
+ text: string;
449
+ }
450
+ interface GeminiRetrievalMetadata {
451
+ googleSearchDynamicRetrievalScore: number;
452
+ }
453
+ type GeminiUrlRetrievalStatus = "URL_RETRIEVAL_STATUS_SUCCESS" | "URL_RETRIEVAL_STATUS_ERROR";
454
+ interface GeminiUrlRetrievalContext {
455
+ retrievedUrl: string;
456
+ urlRetrievalStatus: GeminiUrlRetrievalStatus;
457
+ }
458
+ interface GeminiUrlRetrievalMetadata {
459
+ urlRetrievalContexts: GeminiUrlRetrievalContext[];
460
+ }
461
+ type GeminiUrlMetadata = GeminiUrlRetrievalContext;
462
+ interface GeminiUrlContextMetadata {
463
+ urlMetadata: GeminiUrlMetadata[];
464
+ }
465
+ interface GeminiLogprobsResult {
466
+ topCandidates: GeminiLogprobsTopCandidate[];
467
+ chosenCandidates: GeminiLogprobsResultCandidate[];
468
+ }
469
+ interface GeminiLogprobsTopCandidate {
470
+ candidates: GeminiLogprobsResultCandidate[];
471
+ }
472
+ interface GeminiLogprobsResultCandidate {
473
+ token: string;
474
+ tokenId: number;
475
+ logProbability: number;
476
+ }
477
+ // The "system" content appears to only be valid in the systemInstruction
478
+ type GeminiRole = "system" | "user" | "model" | "function";
479
+ interface GeminiContent {
480
+ parts: GeminiPart[];
481
+ role: GeminiRole; // Vertex AI requires the role
482
+ }
483
+ /*
484
+ * If additional attributes are added here, they should also be
485
+ * added to the attributes below
486
+ */
487
+ interface GeminiTool {
488
+ functionDeclarations?: GeminiFunctionDeclaration[];
489
+ googleSearchRetrieval?: GoogleSearchRetrieval; // Gemini-1.5
490
+ googleSearch?: GoogleSearch; // Gemini-2.0
491
+ urlContext?: UrlContext;
492
+ retrieval?: VertexAIRetrieval;
493
+ }
494
+ /*
495
+ * The known strings in this type should match those in GeminiSearchToolAttribuets
496
+ */
497
+ type GoogleSearchToolSetting = boolean | "googleSearchRetrieval" | "googleSearch" | string;
498
+ declare const GeminiSearchToolAttributes: string[];
499
+ declare const GeminiToolAttributes: string[];
500
+ interface GoogleSearchRetrieval {
501
+ dynamicRetrievalConfig?: {
502
+ mode?: string;
503
+ dynamicThreshold?: number;
504
+ };
505
+ }
506
+ interface GoogleSearch {}
507
+ interface UrlContext {}
508
+ interface VertexAIRetrieval {
509
+ vertexAiSearch: {
510
+ datastore: string;
511
+ };
512
+ disableAttribution?: boolean;
513
+ }
514
+ interface GeminiFunctionDeclaration {
515
+ name: string;
516
+ description: string;
517
+ parameters?: GeminiFunctionSchema;
518
+ }
519
+ interface GeminiFunctionSchema {
520
+ type: GeminiFunctionSchemaType;
521
+ format?: string;
522
+ description?: string;
523
+ nullable?: boolean;
524
+ enum?: string[];
525
+ properties?: Record<string, GeminiFunctionSchema>;
526
+ required?: string[];
527
+ items?: GeminiFunctionSchema;
528
+ }
529
+ type GeminiFunctionSchemaType = "string" | "number" | "integer" | "boolean" | "array" | "object";
530
+ interface GeminiGenerationConfig {
531
+ stopSequences?: string[];
532
+ candidateCount?: number;
533
+ maxOutputTokens?: number;
534
+ temperature?: number;
535
+ topP?: number;
536
+ topK?: number;
537
+ seed?: number;
538
+ presencePenalty?: number;
539
+ frequencyPenalty?: number;
540
+ responseMimeType?: GoogleAIResponseMimeType;
541
+ responseLogprobs?: boolean;
542
+ logprobs?: number;
543
+ responseModalities?: GoogleAIModelModality[];
544
+ thinkingConfig?: GoogleThinkingConfig;
545
+ speechConfig?: GoogleSpeechConfig;
546
+ }
547
+ interface GeminiRequest {
548
+ contents?: GeminiContent[];
549
+ systemInstruction?: GeminiContent;
550
+ tools?: GeminiTool[];
551
+ toolConfig?: {
552
+ functionCallingConfig: {
553
+ mode: "auto" | "any" | "none";
554
+ allowedFunctionNames?: string[];
559
555
  };
560
- finishReason: string;
561
- index: number;
562
- tokenCount?: number;
563
- safetyRatings: GeminiSafetyRating[];
564
- citationMetadata?: GeminiCitationMetadata;
565
- groundingMetadata?: GeminiGroundingMetadata;
566
- urlRetrievalMetadata?: GeminiUrlRetrievalMetadata;
567
- urlContextMetadata?: GeminiUrlContextMetadata;
568
- avgLogprobs?: number;
569
- logprobsResult: GeminiLogprobsResult;
570
- finishMessage?: string;
556
+ };
557
+ safetySettings?: GeminiSafetySetting[];
558
+ generationConfig?: GeminiGenerationConfig;
559
+ cachedContent?: string;
560
+ /**
561
+ * Custom metadata labels to associate with the API call.
562
+ */
563
+ labels?: Record<string, string>;
564
+ }
565
+ interface GeminiResponseCandidate {
566
+ content: {
567
+ parts: GeminiPart[];
568
+ role: string;
569
+ };
570
+ finishReason: string;
571
+ index: number;
572
+ tokenCount?: number;
573
+ safetyRatings: GeminiSafetyRating[];
574
+ citationMetadata?: GeminiCitationMetadata;
575
+ groundingMetadata?: GeminiGroundingMetadata;
576
+ urlRetrievalMetadata?: GeminiUrlRetrievalMetadata;
577
+ urlContextMetadata?: GeminiUrlContextMetadata;
578
+ avgLogprobs?: number;
579
+ logprobsResult: GeminiLogprobsResult;
580
+ finishMessage?: string;
571
581
  }
572
582
  interface GeminiResponsePromptFeedback {
573
- blockReason?: string;
574
- safetyRatings: GeminiSafetyRating[];
575
- }
576
- export type ModalityEnum = "TEXT" | "IMAGE" | "VIDEO" | "AUDIO" | "DOCUMENT" | string;
577
- export interface ModalityTokenCount {
578
- modality: ModalityEnum;
579
- tokenCount: number;
580
- }
581
- export interface GenerateContentResponseUsageMetadata {
582
- promptTokenCount: number;
583
- toolUsePromptTokenCount: number;
584
- cachedContentTokenCount: number;
585
- thoughtsTokenCount: number;
586
- candidatesTokenCount: number;
587
- totalTokenCount: number;
588
- promptTokensDetails: ModalityTokenCount[];
589
- toolUsePromptTokensDetails: ModalityTokenCount[];
590
- cacheTokensDetails: ModalityTokenCount[];
591
- candidatesTokensDetails: ModalityTokenCount[];
592
- [key: string]: unknown;
593
- }
594
- export interface GenerateContentResponseData {
595
- candidates: GeminiResponseCandidate[];
596
- promptFeedback: GeminiResponsePromptFeedback;
597
- usageMetadata: GenerateContentResponseUsageMetadata;
598
- }
599
- export type GoogleLLMModelFamily = null | "palm" | "gemini" | "gemma";
600
- export type VertexModelFamily = GoogleLLMModelFamily | "claude";
601
- export type GoogleLLMResponseData = JsonStream | GenerateContentResponseData | GenerateContentResponseData[];
602
- export interface GoogleLLMResponse extends GoogleResponse {
603
- data: GoogleLLMResponseData | AnthropicResponseData;
604
- }
605
- export interface GoogleAISafetyHandler {
606
- /**
607
- * A function that will take a response and return the, possibly modified,
608
- * response or throw an exception if there are safety issues.
609
- *
610
- * @throws GoogleAISafetyError
611
- */
612
- handle(response: GoogleLLMResponse): GoogleLLMResponse;
613
- }
614
- export interface GoogleAISafetyParams {
615
- safetyHandler?: GoogleAISafetyHandler;
616
- }
617
- export type GeminiJsonSchema = Record<string, unknown> & {
618
- properties?: Record<string, GeminiJsonSchema>;
619
- type: GeminiFunctionSchemaType;
620
- nullable?: boolean;
583
+ blockReason?: string;
584
+ safetyRatings: GeminiSafetyRating[];
585
+ }
586
+ type ModalityEnum = "TEXT" | "IMAGE" | "VIDEO" | "AUDIO" | "DOCUMENT" | string;
587
+ interface ModalityTokenCount {
588
+ modality: ModalityEnum;
589
+ tokenCount: number;
590
+ }
591
+ interface GenerateContentResponseUsageMetadata {
592
+ promptTokenCount: number;
593
+ toolUsePromptTokenCount: number;
594
+ cachedContentTokenCount: number;
595
+ thoughtsTokenCount: number;
596
+ candidatesTokenCount: number;
597
+ totalTokenCount: number;
598
+ promptTokensDetails: ModalityTokenCount[];
599
+ toolUsePromptTokensDetails: ModalityTokenCount[];
600
+ cacheTokensDetails: ModalityTokenCount[];
601
+ candidatesTokensDetails: ModalityTokenCount[];
602
+ [key: string]: unknown;
603
+ }
604
+ interface GenerateContentResponseData {
605
+ candidates: GeminiResponseCandidate[];
606
+ promptFeedback: GeminiResponsePromptFeedback;
607
+ usageMetadata: GenerateContentResponseUsageMetadata;
608
+ }
609
+ type GoogleLLMModelFamily = null | "palm" | "gemini" | "gemma";
610
+ type VertexModelFamily = GoogleLLMModelFamily | "claude";
611
+ type GoogleLLMResponseData = JsonStream | GenerateContentResponseData | GenerateContentResponseData[];
612
+ interface GoogleLLMResponse extends GoogleResponse {
613
+ data: GoogleLLMResponseData | AnthropicResponseData;
614
+ }
615
+ interface GoogleAISafetyHandler {
616
+ /**
617
+ * A function that will take a response and return the, possibly modified,
618
+ * response or throw an exception if there are safety issues.
619
+ *
620
+ * @throws GoogleAISafetyError
621
+ */
622
+ handle(response: GoogleLLMResponse): GoogleLLMResponse;
623
+ }
624
+ interface GoogleAISafetyParams {
625
+ safetyHandler?: GoogleAISafetyHandler;
626
+ }
627
+ type GeminiJsonSchema = Record<string, unknown> & {
628
+ properties?: Record<string, GeminiJsonSchema>;
629
+ type: GeminiFunctionSchemaType;
630
+ nullable?: boolean;
621
631
  };
622
- export interface GeminiJsonSchemaDirty extends GeminiJsonSchema {
623
- items?: GeminiJsonSchemaDirty;
624
- properties?: Record<string, GeminiJsonSchemaDirty>;
625
- additionalProperties?: boolean;
626
- }
627
- export type GoogleAIAPI = {
628
- messageContentToParts?: (content: MessageContent) => Promise<GeminiPart[]>;
629
- baseMessageToContent?: (message: BaseMessage, prevMessage: BaseMessage | undefined, useSystemInstruction: boolean) => Promise<GeminiContent[]>;
630
- responseToString: (response: GoogleLLMResponse) => string;
631
- responseToChatGeneration: (response: GoogleLLMResponse) => ChatGenerationChunk | null;
632
- chunkToString: (chunk: BaseMessageChunk) => string;
633
- responseToBaseMessage: (response: GoogleLLMResponse) => BaseMessage;
634
- responseToChatResult: (response: GoogleLLMResponse) => ChatResult;
635
- formatData: (input: unknown, parameters: GoogleAIModelRequestParams) => Promise<unknown>;
632
+ interface GeminiJsonSchemaDirty extends GeminiJsonSchema {
633
+ items?: GeminiJsonSchemaDirty;
634
+ properties?: Record<string, GeminiJsonSchemaDirty>;
635
+ additionalProperties?: boolean;
636
+ }
637
+ type GoogleAIAPI = {
638
+ messageContentToParts?: (content: MessageContent) => Promise<GeminiPart[]>;
639
+ baseMessageToContent?: (message: BaseMessage, prevMessage: BaseMessage | undefined, useSystemInstruction: boolean) => Promise<GeminiContent[]>;
640
+ responseToString: (response: GoogleLLMResponse) => string;
641
+ responseToChatGeneration: (response: GoogleLLMResponse) => ChatGenerationChunk | null;
642
+ chunkToString: (chunk: BaseMessageChunk) => string;
643
+ responseToBaseMessage: (response: GoogleLLMResponse) => BaseMessage;
644
+ responseToChatResult: (response: GoogleLLMResponse) => ChatResult;
645
+ formatData: (input: unknown, parameters: GoogleAIModelRequestParams) => Promise<unknown>;
636
646
  };
637
- export interface GeminiAPIConfig {
638
- safetyHandler?: GoogleAISafetyHandler;
639
- mediaManager?: MediaManager;
640
- useSystemInstruction?: boolean;
641
- /**
642
- * How to handle the Google Search tool, since the name (and format)
643
- * of the tool changes between Gemini 1.5 and Gemini 2.0.
644
- * true - Change based on the model version. (Default)
645
- * false - Do not change the tool name provided
646
- * string value - Use this as the attribute name for the search
647
- * tool, adapting any tool attributes if possible.
648
- * When the model is created, a "true" or default setting
649
- * will be changed to a string based on the model.
650
- */
651
- googleSearchToolAdjustment?: GoogleSearchToolSetting;
652
- }
653
- export type GoogleAIAPIConfig = GeminiAPIConfig | AnthropicAPIConfig;
654
- export interface GoogleAIAPIParams {
655
- apiName?: string;
656
- apiConfig?: GoogleAIAPIConfig;
657
- }
647
+ interface GeminiAPIConfig {
648
+ safetyHandler?: GoogleAISafetyHandler;
649
+ mediaManager?: MediaManager;
650
+ useSystemInstruction?: boolean;
651
+ /**
652
+ * How to handle the Google Search tool, since the name (and format)
653
+ * of the tool changes between Gemini 1.5 and Gemini 2.0.
654
+ * true - Change based on the model version. (Default)
655
+ * false - Do not change the tool name provided
656
+ * string value - Use this as the attribute name for the search
657
+ * tool, adapting any tool attributes if possible.
658
+ * When the model is created, a "true" or default setting
659
+ * will be changed to a string based on the model.
660
+ */
661
+ googleSearchToolAdjustment?: GoogleSearchToolSetting;
662
+ }
663
+ type GoogleAIAPIConfig = GeminiAPIConfig | AnthropicAPIConfig;
664
+ interface GoogleAIAPIParams {
665
+ apiName?: string;
666
+ apiConfig?: GoogleAIAPIConfig;
667
+ }
668
+ // Embeddings
658
669
  /**
659
670
  * Defines the parameters required to initialize a
660
671
  * GoogleEmbeddings instance. It extends EmbeddingsParams and
661
672
  * GoogleConnectionParams.
662
673
  */
663
- export interface BaseGoogleEmbeddingsParams<AuthOptions> extends EmbeddingsParams, GoogleConnectionParams<AuthOptions> {
664
- model: string;
665
- /**
666
- * Used to specify output embedding size.
667
- * If set, output embeddings will be truncated to the size specified.
668
- */
669
- dimensions?: number;
670
- /**
671
- * An alias for "dimensions"
672
- */
673
- outputDimensionality?: number;
674
+ interface BaseGoogleEmbeddingsParams<AuthOptions> extends EmbeddingsParams, GoogleConnectionParams<AuthOptions> {
675
+ model: string;
676
+ /**
677
+ * Used to specify output embedding size.
678
+ * If set, output embeddings will be truncated to the size specified.
679
+ */
680
+ dimensions?: number;
681
+ /**
682
+ * An alias for "dimensions"
683
+ */
684
+ outputDimensionality?: number;
674
685
  }
675
686
  /**
676
687
  * Defines additional options specific to the
677
688
  * GoogleEmbeddingsInstance. It extends AsyncCallerCallOptions.
678
689
  */
679
- export interface BaseGoogleEmbeddingsOptions extends AsyncCallerCallOptions {
680
- }
681
- export type GoogleEmbeddingsTaskType = "RETRIEVAL_QUERY" | "RETRIEVAL_DOCUMENT" | "SEMANTIC_SIMILARITY" | "CLASSIFICATION" | "CLUSTERING" | "QUESTION_ANSWERING" | "FACT_VERIFICATION" | "CODE_RETRIEVAL_QUERY" | string;
690
+ interface BaseGoogleEmbeddingsOptions extends AsyncCallerCallOptions {}
691
+ type GoogleEmbeddingsTaskType = "RETRIEVAL_QUERY" | "RETRIEVAL_DOCUMENT" | "SEMANTIC_SIMILARITY" | "CLASSIFICATION" | "CLUSTERING" | "QUESTION_ANSWERING" | "FACT_VERIFICATION" | "CODE_RETRIEVAL_QUERY" | string;
682
692
  /**
683
693
  * Represents an instance for generating embeddings using the Google
684
694
  * Vertex AI API. It contains the content to be embedded.
685
695
  */
686
- export interface VertexEmbeddingsInstance {
687
- content: string;
688
- taskType?: GoogleEmbeddingsTaskType;
689
- title?: string;
690
- }
691
- export interface VertexEmbeddingsParameters extends GoogleModelParams {
692
- autoTruncate?: boolean;
693
- outputDimensionality?: number;
694
- }
695
- export interface VertexEmbeddingsRequest {
696
- instances: VertexEmbeddingsInstance[];
697
- parameters?: VertexEmbeddingsParameters;
698
- }
699
- export interface AIStudioEmbeddingsRequest {
700
- content: {
701
- parts: GeminiPartText[];
702
- };
703
- model?: string;
704
- taskType?: GoogleEmbeddingsTaskType;
705
- title?: string;
706
- outputDimensionality?: number;
707
- }
708
- export type GoogleEmbeddingsRequest = VertexEmbeddingsRequest | AIStudioEmbeddingsRequest;
709
- export interface VertexEmbeddingsResponsePrediction {
710
- embeddings: {
711
- statistics: {
712
- token_count: number;
713
- truncated: boolean;
714
- };
715
- values: number[];
696
+ interface VertexEmbeddingsInstance {
697
+ content: string;
698
+ taskType?: GoogleEmbeddingsTaskType;
699
+ title?: string;
700
+ }
701
+ interface VertexEmbeddingsParameters extends GoogleModelParams {
702
+ autoTruncate?: boolean;
703
+ outputDimensionality?: number;
704
+ }
705
+ interface VertexEmbeddingsRequest {
706
+ instances: VertexEmbeddingsInstance[];
707
+ parameters?: VertexEmbeddingsParameters;
708
+ }
709
+ interface AIStudioEmbeddingsRequest {
710
+ content: {
711
+ parts: GeminiPartText[];
712
+ };
713
+ model?: string; // Documentation says required, but tests say otherwise
714
+ taskType?: GoogleEmbeddingsTaskType;
715
+ title?: string;
716
+ outputDimensionality?: number;
717
+ }
718
+ type GoogleEmbeddingsRequest = VertexEmbeddingsRequest | AIStudioEmbeddingsRequest;
719
+ interface VertexEmbeddingsResponsePrediction {
720
+ embeddings: {
721
+ statistics: {
722
+ token_count: number;
723
+ truncated: boolean;
716
724
  };
725
+ values: number[];
726
+ };
717
727
  }
718
728
  /**
719
729
  * Defines the structure of the embeddings results returned by the Google
720
730
  * Vertex AI API. It extends GoogleBasePrediction and contains the
721
731
  * embeddings and their statistics.
722
732
  */
723
- export interface VertexEmbeddingsResponse extends GoogleResponse {
724
- data: {
725
- predictions: VertexEmbeddingsResponsePrediction[];
726
- };
727
- }
728
- export interface AIStudioEmbeddingsResponse extends GoogleResponse {
729
- data: {
730
- embedding: {
731
- values: number[];
732
- };
733
+ interface VertexEmbeddingsResponse extends GoogleResponse {
734
+ data: {
735
+ predictions: VertexEmbeddingsResponsePrediction[];
736
+ };
737
+ }
738
+ interface AIStudioEmbeddingsResponse extends GoogleResponse {
739
+ data: {
740
+ embedding: {
741
+ values: number[];
733
742
  };
743
+ };
734
744
  }
735
- export type GoogleEmbeddingsResponse = VertexEmbeddingsResponse | AIStudioEmbeddingsResponse;
745
+ type GoogleEmbeddingsResponse = VertexEmbeddingsResponse | AIStudioEmbeddingsResponse;
746
+ //#endregion
747
+ export { AIStudioEmbeddingsRequest, AIStudioEmbeddingsResponse, AnthropicAPIConfig, AnthropicCacheControl, AnthropicContent, AnthropicContentRedactedThinking, AnthropicContentText, AnthropicContentThinking, AnthropicContentToolUse, AnthropicMessage, AnthropicMessageContent, AnthropicMessageContentDocument, AnthropicMessageContentImage, AnthropicMessageContentRedactedThinking, AnthropicMessageContentText, AnthropicMessageContentThinking, AnthropicMessageContentToolResult, AnthropicMessageContentToolResultContent, AnthropicMessageContentToolUse, AnthropicMessageContentToolUseInput, AnthropicMetadata, AnthropicRequest, AnthropicRequestSettings, AnthropicResponseData, AnthropicResponseMessage, AnthropicStreamBaseDelta, AnthropicStreamBaseEvent, AnthropicStreamContentBlockDeltaEvent, AnthropicStreamContentBlockStartEvent, AnthropicStreamContentBlockStopEvent, AnthropicStreamDelta, AnthropicStreamDeltaType, AnthropicStreamErrorEvent, AnthropicStreamEventType, AnthropicStreamInputJsonDelta, AnthropicStreamMessageDeltaEvent, AnthropicStreamMessageStartEvent, AnthropicStreamMessageStopEvent, AnthropicStreamPingEvent, AnthropicStreamTextDelta, AnthropicThinking, AnthropicThinkingDisabled, AnthropicThinkingEnabled, AnthropicTool, AnthropicToolChoice, AnthropicToolChoiceAny, AnthropicToolChoiceAuto, AnthropicToolChoiceTool, AnthropicToolInputSchema, AnthropicUsage, BaseGoogleEmbeddingsOptions, BaseGoogleEmbeddingsParams, GeminiAPIConfig, GeminiCitation, GeminiCitationMetadata, GeminiContent, GeminiFunctionDeclaration, GeminiFunctionSchema, GeminiFunctionSchemaType, GeminiGenerationConfig, GeminiGroundingChunk, GeminiGroundingChunkRetrievedContext, GeminiGroundingChunkWeb, GeminiGroundingMetadata, GeminiGroundingSupport, GeminiJsonSchema, GeminiJsonSchemaDirty, GeminiLogprobsResult, GeminiLogprobsResultCandidate, GeminiLogprobsTopCandidate, GeminiPart, GeminiPartBase, GeminiPartBaseFile, GeminiPartFileData, GeminiPartFunctionCall, GeminiPartFunctionResponse, GeminiPartInlineData, GeminiPartText, GeminiRequest, GeminiResponseCandidate, GeminiRetrievalMetadata, GeminiRole, GeminiSafetyRating, GeminiSafetySetting, GeminiSearchEntryPoint, GeminiSearchToolAttributes, GeminiSegment, GeminiTool, GeminiToolAttributes, GeminiUrlContextMetadata, GeminiUrlMetadata, GeminiUrlRetrievalContext, GeminiUrlRetrievalMetadata, GeminiUrlRetrievalStatus, GeminiVideoMetadata, GenerateContentResponseData, GenerateContentResponseUsageMetadata, GoogleAIAPI, GoogleAIAPIConfig, GoogleAIAPIParams, GoogleAIBaseLLMInput, GoogleAIBaseLanguageModelCallOptions, GoogleAIModelModality, GoogleAIModelParams, GoogleAIModelRequestParams, GoogleAIResponseMimeType, GoogleAISafetyCategory, GoogleAISafetyHandler, GoogleAISafetyMethod, GoogleAISafetyParams, GoogleAISafetySetting, GoogleAISafetyThreshold, GoogleAIToolType, GoogleBaseLLMInput, GoogleClientParams, GoogleConnectionParams, GoogleEmbeddingsRequest, GoogleEmbeddingsResponse, GoogleEmbeddingsTaskType, GoogleLLMModelFamily, GoogleLLMResponse, GoogleLLMResponseData, GoogleModelParams, GoogleMultiSpeakerVoiceConfig, GooglePlatformType, GooglePrebuiltVoiceConfig, GooglePrebuiltVoiceName, GoogleRawResponse, GoogleResponse, GoogleSearch, GoogleSearchRetrieval, GoogleSearchToolSetting, GoogleSpeakerVoiceConfig, GoogleSpeechConfig, GoogleSpeechConfigMulti, GoogleSpeechConfigSimplified, GoogleSpeechConfigSingle, GoogleSpeechSimplifiedLanguage, GoogleSpeechSpeakerName, GoogleSpeechVoice, GoogleSpeechVoiceLanguage, GoogleSpeechVoicesLanguage, GoogleThinkingConfig, GoogleTypeDate, GoogleVoiceConfig, ModalityEnum, ModalityTokenCount, UrlContext, VertexAIRetrieval, VertexEmbeddingsInstance, VertexEmbeddingsParameters, VertexEmbeddingsRequest, VertexEmbeddingsResponse, VertexEmbeddingsResponsePrediction, VertexModelFamily };
748
+ //# sourceMappingURL=types.d.ts.map