@langchain/google-common 0.2.17 → 1.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (169) hide show
  1. package/CHANGELOG.md +17 -0
  2. package/LICENSE +6 -6
  3. package/dist/_virtual/rolldown_runtime.cjs +25 -0
  4. package/dist/auth.cjs +82 -116
  5. package/dist/auth.cjs.map +1 -0
  6. package/dist/auth.d.cts +46 -0
  7. package/dist/auth.d.cts.map +1 -0
  8. package/dist/auth.d.ts +41 -36
  9. package/dist/auth.d.ts.map +1 -0
  10. package/dist/auth.js +80 -110
  11. package/dist/auth.js.map +1 -0
  12. package/dist/chat_models.cjs +251 -466
  13. package/dist/chat_models.cjs.map +1 -0
  14. package/dist/chat_models.d.cts +98 -0
  15. package/dist/chat_models.d.cts.map +1 -0
  16. package/dist/chat_models.d.ts +87 -73
  17. package/dist/chat_models.d.ts.map +1 -0
  18. package/dist/chat_models.js +245 -457
  19. package/dist/chat_models.js.map +1 -0
  20. package/dist/connection.cjs +321 -466
  21. package/dist/connection.cjs.map +1 -0
  22. package/dist/connection.d.cts +109 -0
  23. package/dist/connection.d.cts.map +1 -0
  24. package/dist/connection.d.ts +98 -91
  25. package/dist/connection.d.ts.map +1 -0
  26. package/dist/connection.js +317 -459
  27. package/dist/connection.js.map +1 -0
  28. package/dist/embeddings.cjs +135 -186
  29. package/dist/embeddings.cjs.map +1 -0
  30. package/dist/embeddings.d.cts +44 -0
  31. package/dist/embeddings.d.cts.map +1 -0
  32. package/dist/embeddings.d.ts +38 -32
  33. package/dist/embeddings.d.ts.map +1 -0
  34. package/dist/embeddings.js +133 -181
  35. package/dist/embeddings.js.map +1 -0
  36. package/dist/experimental/media.cjs +380 -482
  37. package/dist/experimental/media.cjs.map +1 -0
  38. package/dist/experimental/media.d.cts +198 -0
  39. package/dist/experimental/media.d.cts.map +1 -0
  40. package/dist/experimental/media.d.ts +190 -202
  41. package/dist/experimental/media.d.ts.map +1 -0
  42. package/dist/experimental/media.js +369 -468
  43. package/dist/experimental/media.js.map +1 -0
  44. package/dist/experimental/utils/media_core.cjs +403 -517
  45. package/dist/experimental/utils/media_core.cjs.map +1 -0
  46. package/dist/experimental/utils/media_core.d.cts +215 -0
  47. package/dist/experimental/utils/media_core.d.cts.map +1 -0
  48. package/dist/experimental/utils/media_core.d.ts +171 -165
  49. package/dist/experimental/utils/media_core.d.ts.map +1 -0
  50. package/dist/experimental/utils/media_core.js +395 -506
  51. package/dist/experimental/utils/media_core.js.map +1 -0
  52. package/dist/index.cjs +58 -27
  53. package/dist/index.d.cts +13 -0
  54. package/dist/index.d.ts +13 -11
  55. package/dist/index.js +13 -11
  56. package/dist/llms.cjs +157 -244
  57. package/dist/llms.cjs.map +1 -0
  58. package/dist/llms.d.cts +72 -0
  59. package/dist/llms.d.cts.map +1 -0
  60. package/dist/llms.d.ts +64 -54
  61. package/dist/llms.d.ts.map +1 -0
  62. package/dist/llms.js +154 -238
  63. package/dist/llms.js.map +1 -0
  64. package/dist/output_parsers.cjs +148 -173
  65. package/dist/output_parsers.cjs.map +1 -0
  66. package/dist/output_parsers.d.cts +53 -0
  67. package/dist/output_parsers.d.cts.map +1 -0
  68. package/dist/output_parsers.d.ts +46 -42
  69. package/dist/output_parsers.d.ts.map +1 -0
  70. package/dist/output_parsers.js +146 -168
  71. package/dist/output_parsers.js.map +1 -0
  72. package/dist/types-anthropic.d.cts +229 -0
  73. package/dist/types-anthropic.d.cts.map +1 -0
  74. package/dist/types-anthropic.d.ts +221 -215
  75. package/dist/types-anthropic.d.ts.map +1 -0
  76. package/dist/types.cjs +51 -62
  77. package/dist/types.cjs.map +1 -0
  78. package/dist/types.d.cts +748 -0
  79. package/dist/types.d.cts.map +1 -0
  80. package/dist/types.d.ts +669 -656
  81. package/dist/types.d.ts.map +1 -0
  82. package/dist/types.js +46 -45
  83. package/dist/types.js.map +1 -0
  84. package/dist/utils/anthropic.cjs +598 -821
  85. package/dist/utils/anthropic.cjs.map +1 -0
  86. package/dist/utils/anthropic.js +597 -818
  87. package/dist/utils/anthropic.js.map +1 -0
  88. package/dist/utils/common.cjs +130 -211
  89. package/dist/utils/common.cjs.map +1 -0
  90. package/dist/utils/common.d.cts +13 -0
  91. package/dist/utils/common.d.cts.map +1 -0
  92. package/dist/utils/common.d.ts +12 -7
  93. package/dist/utils/common.d.ts.map +1 -0
  94. package/dist/utils/common.js +128 -207
  95. package/dist/utils/common.js.map +1 -0
  96. package/dist/utils/failed_handler.cjs +28 -30
  97. package/dist/utils/failed_handler.cjs.map +1 -0
  98. package/dist/utils/failed_handler.d.cts +9 -0
  99. package/dist/utils/failed_handler.d.cts.map +1 -0
  100. package/dist/utils/failed_handler.d.ts +8 -2
  101. package/dist/utils/failed_handler.d.ts.map +1 -0
  102. package/dist/utils/failed_handler.js +28 -28
  103. package/dist/utils/failed_handler.js.map +1 -0
  104. package/dist/utils/gemini.cjs +1020 -1488
  105. package/dist/utils/gemini.cjs.map +1 -0
  106. package/dist/utils/gemini.d.cts +51 -0
  107. package/dist/utils/gemini.d.cts.map +1 -0
  108. package/dist/utils/gemini.d.ts +51 -48
  109. package/dist/utils/gemini.d.ts.map +1 -0
  110. package/dist/utils/gemini.js +1015 -1479
  111. package/dist/utils/gemini.js.map +1 -0
  112. package/dist/utils/index.cjs +38 -23
  113. package/dist/utils/index.d.cts +8 -0
  114. package/dist/utils/index.d.ts +8 -7
  115. package/dist/utils/index.js +8 -7
  116. package/dist/utils/palm.d.cts +11 -0
  117. package/dist/utils/palm.d.cts.map +1 -0
  118. package/dist/utils/palm.d.ts +9 -4
  119. package/dist/utils/palm.d.ts.map +1 -0
  120. package/dist/utils/safety.cjs +13 -22
  121. package/dist/utils/safety.cjs.map +1 -0
  122. package/dist/utils/safety.d.cts +12 -0
  123. package/dist/utils/safety.d.cts.map +1 -0
  124. package/dist/utils/safety.d.ts +10 -4
  125. package/dist/utils/safety.d.ts.map +1 -0
  126. package/dist/utils/safety.js +13 -19
  127. package/dist/utils/safety.js.map +1 -0
  128. package/dist/utils/stream.cjs +296 -475
  129. package/dist/utils/stream.cjs.map +1 -0
  130. package/dist/utils/stream.d.cts +165 -0
  131. package/dist/utils/stream.d.cts.map +1 -0
  132. package/dist/utils/stream.d.ts +156 -131
  133. package/dist/utils/stream.d.ts.map +1 -0
  134. package/dist/utils/stream.js +293 -469
  135. package/dist/utils/stream.js.map +1 -0
  136. package/dist/utils/zod_to_gemini_parameters.cjs +43 -81
  137. package/dist/utils/zod_to_gemini_parameters.cjs.map +1 -0
  138. package/dist/utils/zod_to_gemini_parameters.d.cts +22 -0
  139. package/dist/utils/zod_to_gemini_parameters.d.cts.map +1 -0
  140. package/dist/utils/zod_to_gemini_parameters.d.ts +21 -6
  141. package/dist/utils/zod_to_gemini_parameters.d.ts.map +1 -0
  142. package/dist/utils/zod_to_gemini_parameters.js +40 -76
  143. package/dist/utils/zod_to_gemini_parameters.js.map +1 -0
  144. package/package.json +69 -85
  145. package/dist/types-anthropic.cjs +0 -2
  146. package/dist/types-anthropic.js +0 -1
  147. package/dist/utils/anthropic.d.ts +0 -4
  148. package/dist/utils/palm.cjs +0 -2
  149. package/dist/utils/palm.js +0 -1
  150. package/experimental/media.cjs +0 -1
  151. package/experimental/media.d.cts +0 -1
  152. package/experimental/media.d.ts +0 -1
  153. package/experimental/media.js +0 -1
  154. package/experimental/utils/media_core.cjs +0 -1
  155. package/experimental/utils/media_core.d.cts +0 -1
  156. package/experimental/utils/media_core.d.ts +0 -1
  157. package/experimental/utils/media_core.js +0 -1
  158. package/index.cjs +0 -1
  159. package/index.d.cts +0 -1
  160. package/index.d.ts +0 -1
  161. package/index.js +0 -1
  162. package/types.cjs +0 -1
  163. package/types.d.cts +0 -1
  164. package/types.d.ts +0 -1
  165. package/types.js +0 -1
  166. package/utils.cjs +0 -1
  167. package/utils.d.cts +0 -1
  168. package/utils.d.ts +0 -1
  169. package/utils.js +0 -1
@@ -1,1489 +1,1025 @@
1
- import { v4 as uuidv4 } from "uuid";
2
- import { AIMessage, AIMessageChunk, isAIMessage, parseBase64DataUrl, isDataContentBlock, convertToProviderContentBlock, } from "@langchain/core/messages";
3
- import { ChatGenerationChunk, } from "@langchain/core/outputs";
4
- import { isLangChainTool } from "@langchain/core/utils/function_calling";
5
- import { concat } from "@langchain/core/utils/stream";
6
- import { GeminiSearchToolAttributes, } from "../types.js";
1
+ import { GeminiSearchToolAttributes } from "../types.js";
7
2
  import { GoogleAISafetyError } from "./safety.js";
8
3
  import { schemaToGeminiParameters } from "./zod_to_gemini_parameters.js";
9
- export class DefaultGeminiSafetyHandler {
10
- constructor(settings) {
11
- Object.defineProperty(this, "errorFinish", {
12
- enumerable: true,
13
- configurable: true,
14
- writable: true,
15
- value: ["SAFETY", "RECITATION", "OTHER"]
16
- });
17
- this.errorFinish = settings?.errorFinish ?? this.errorFinish;
18
- }
19
- handleDataPromptFeedback(response, data) {
20
- // Check to see if our prompt was blocked in the first place
21
- const promptFeedback = data?.promptFeedback;
22
- const blockReason = promptFeedback?.blockReason;
23
- if (blockReason) {
24
- throw new GoogleAISafetyError(response, `Prompt blocked: ${blockReason}`);
25
- }
26
- return data;
27
- }
28
- handleDataFinishReason(response, data) {
29
- const firstCandidate = data?.candidates?.[0];
30
- const finishReason = firstCandidate?.finishReason;
31
- if (this.errorFinish.includes(finishReason)) {
32
- throw new GoogleAISafetyError(response, `Finish reason: ${finishReason}`);
33
- }
34
- return data;
35
- }
36
- handleData(response, data) {
37
- let ret = data;
38
- ret = this.handleDataPromptFeedback(response, ret);
39
- ret = this.handleDataFinishReason(response, ret);
40
- return ret;
41
- }
42
- handle(response) {
43
- let newdata;
44
- if ("nextChunk" in response.data) {
45
- // TODO: This is a stream. How to handle?
46
- newdata = response.data;
47
- }
48
- else if (Array.isArray(response.data)) {
49
- // If it is an array, try to handle every item in the array
50
- try {
51
- newdata = response.data.map((item) => this.handleData(response, item));
52
- }
53
- catch (xx) {
54
- // eslint-disable-next-line no-instanceof/no-instanceof
55
- if (xx instanceof GoogleAISafetyError) {
56
- throw new GoogleAISafetyError(response, xx.message);
57
- }
58
- else {
59
- throw xx;
60
- }
61
- }
62
- }
63
- else {
64
- const data = response.data;
65
- newdata = this.handleData(response, data);
66
- }
67
- return {
68
- ...response,
69
- data: newdata,
70
- };
71
- }
72
- }
73
- export class MessageGeminiSafetyHandler extends DefaultGeminiSafetyHandler {
74
- constructor(settings) {
75
- super(settings);
76
- Object.defineProperty(this, "msg", {
77
- enumerable: true,
78
- configurable: true,
79
- writable: true,
80
- value: ""
81
- });
82
- Object.defineProperty(this, "forceNewMessage", {
83
- enumerable: true,
84
- configurable: true,
85
- writable: true,
86
- value: false
87
- });
88
- this.msg = settings?.msg ?? this.msg;
89
- this.forceNewMessage = settings?.forceNewMessage ?? this.forceNewMessage;
90
- }
91
- setMessage(data) {
92
- const ret = data;
93
- if (this.forceNewMessage ||
94
- !data?.candidates?.[0]?.content?.parts?.length) {
95
- ret.candidates = data.candidates ?? [];
96
- ret.candidates[0] = data.candidates[0] ?? {};
97
- ret.candidates[0].content = data.candidates[0].content ?? {};
98
- ret.candidates[0].content = {
99
- role: "model",
100
- parts: [{ text: this.msg }],
101
- };
102
- }
103
- return ret;
104
- }
105
- handleData(response, data) {
106
- try {
107
- return super.handleData(response, data);
108
- }
109
- catch (xx) {
110
- return this.setMessage(data);
111
- }
112
- }
113
- }
4
+ import { ChatGenerationChunk } from "@langchain/core/outputs";
5
+ import { AIMessage, AIMessageChunk, HumanMessage, SystemMessage, ToolMessage, convertToProviderContentBlock, isAIMessage, isDataContentBlock, parseBase64DataUrl } from "@langchain/core/messages";
6
+ import { concat } from "@langchain/core/utils/stream";
7
+ import { isLangChainTool } from "@langchain/core/utils/function_calling";
8
+ import { v4 } from "uuid";
9
+
10
+ //#region src/utils/gemini.ts
11
+ var DefaultGeminiSafetyHandler = class {
12
+ errorFinish = [
13
+ "SAFETY",
14
+ "RECITATION",
15
+ "OTHER"
16
+ ];
17
+ constructor(settings) {
18
+ this.errorFinish = settings?.errorFinish ?? this.errorFinish;
19
+ }
20
+ handleDataPromptFeedback(response, data) {
21
+ const promptFeedback = data?.promptFeedback;
22
+ const blockReason = promptFeedback?.blockReason;
23
+ if (blockReason) throw new GoogleAISafetyError(response, `Prompt blocked: ${blockReason}`);
24
+ return data;
25
+ }
26
+ handleDataFinishReason(response, data) {
27
+ const firstCandidate = data?.candidates?.[0];
28
+ const finishReason = firstCandidate?.finishReason;
29
+ if (this.errorFinish.includes(finishReason)) throw new GoogleAISafetyError(response, `Finish reason: ${finishReason}`);
30
+ return data;
31
+ }
32
+ handleData(response, data) {
33
+ let ret = data;
34
+ ret = this.handleDataPromptFeedback(response, ret);
35
+ ret = this.handleDataFinishReason(response, ret);
36
+ return ret;
37
+ }
38
+ handle(response) {
39
+ let newdata;
40
+ if ("nextChunk" in response.data) newdata = response.data;
41
+ else if (Array.isArray(response.data)) try {
42
+ newdata = response.data.map((item) => this.handleData(response, item));
43
+ } catch (xx) {
44
+ if (xx instanceof GoogleAISafetyError) throw new GoogleAISafetyError(response, xx.message);
45
+ else throw xx;
46
+ }
47
+ else {
48
+ const data = response.data;
49
+ newdata = this.handleData(response, data);
50
+ }
51
+ return {
52
+ ...response,
53
+ data: newdata
54
+ };
55
+ }
56
+ };
57
+ var MessageGeminiSafetyHandler = class extends DefaultGeminiSafetyHandler {
58
+ msg = "";
59
+ forceNewMessage = false;
60
+ constructor(settings) {
61
+ super(settings);
62
+ this.msg = settings?.msg ?? this.msg;
63
+ this.forceNewMessage = settings?.forceNewMessage ?? this.forceNewMessage;
64
+ }
65
+ setMessage(data) {
66
+ const ret = data;
67
+ if (this.forceNewMessage || !data?.candidates?.[0]?.content?.parts?.length) {
68
+ ret.candidates = data.candidates ?? [];
69
+ ret.candidates[0] = data.candidates[0] ?? {};
70
+ ret.candidates[0].content = data.candidates[0].content ?? {};
71
+ ret.candidates[0].content = {
72
+ role: "model",
73
+ parts: [{ text: this.msg }]
74
+ };
75
+ }
76
+ return ret;
77
+ }
78
+ handleData(response, data) {
79
+ try {
80
+ return super.handleData(response, data);
81
+ } catch {
82
+ return this.setMessage(data);
83
+ }
84
+ }
85
+ };
114
86
  const extractMimeType = (str) => {
115
- if (str.startsWith("data:")) {
116
- return {
117
- mimeType: str.split(":")[1].split(";")[0],
118
- data: str.split(",")[1],
119
- };
120
- }
121
- return null;
87
+ if (str.startsWith("data:")) return {
88
+ mimeType: str.split(":")[1].split(";")[0],
89
+ data: str.split(",")[1]
90
+ };
91
+ return null;
122
92
  };
123
- export function normalizeSpeechConfig(config) {
124
- function isSpeechConfig(config) {
125
- return (typeof config === "object" &&
126
- (Object.hasOwn(config, "voiceConfig") ||
127
- Object.hasOwn(config, "multiSpeakerVoiceConfig")));
128
- }
129
- function hasLanguage(config) {
130
- return typeof config === "object" && Object.hasOwn(config, "languageCode");
131
- }
132
- function hasVoice(config) {
133
- return Object.hasOwn(config, "voice");
134
- }
135
- if (typeof config === "undefined") {
136
- return undefined;
137
- }
138
- // If this is already a GoogleSpeechConfig, just return it
139
- if (isSpeechConfig(config)) {
140
- return config;
141
- }
142
- let languageCode;
143
- let voice;
144
- if (hasLanguage(config)) {
145
- languageCode = config.languageCode;
146
- voice = hasVoice(config) ? config.voice : config.voices;
147
- }
148
- else {
149
- languageCode = undefined;
150
- voice = config;
151
- }
152
- let ret;
153
- if (typeof voice === "string") {
154
- // They just provided the prebuilt voice configuration name. Use it.
155
- ret = {
156
- voiceConfig: {
157
- prebuiltVoiceConfig: {
158
- voiceName: voice,
159
- },
160
- },
161
- };
162
- }
163
- else {
164
- // This is multi-speaker, so we have speaker/name pairs
165
- // If we have just one (why?), turn it into an array for the moment
166
- const voices = Array.isArray(voice)
167
- ? voice
168
- : [voice];
169
- // Go through all the speaker/name pairs and turn this into the voice config array
170
- const speakerVoiceConfigs = voices.map((v) => ({
171
- speaker: v.speaker,
172
- voiceConfig: {
173
- prebuiltVoiceConfig: {
174
- voiceName: v.name,
175
- },
176
- },
177
- }));
178
- // Create the multi-speaker voice configuration
179
- ret = {
180
- multiSpeakerVoiceConfig: {
181
- speakerVoiceConfigs,
182
- },
183
- };
184
- }
185
- if (languageCode) {
186
- ret.languageCode = languageCode;
187
- }
188
- return ret;
93
+ /**
94
+ * Infers the MIME type from a URL based on its file extension.
95
+ * This is used as a fallback when the MIME type is not provided.
96
+ *
97
+ * @param url - The URL to infer the MIME type from
98
+ * @returns The inferred MIME type or undefined if it cannot be determined
99
+ */
100
+ function inferMimeTypeFromUrl(url) {
101
+ const mimeTypeMap = {
102
+ jpg: "image/jpeg",
103
+ jpeg: "image/jpeg",
104
+ png: "image/png",
105
+ gif: "image/gif",
106
+ webp: "image/webp",
107
+ bmp: "image/bmp",
108
+ svg: "image/svg+xml",
109
+ ico: "image/x-icon",
110
+ tiff: "image/tiff",
111
+ tif: "image/tiff"
112
+ };
113
+ try {
114
+ const pathname = new URL(url).pathname;
115
+ const extension = pathname.split(".").pop()?.toLowerCase().split(/[?#]/)[0];
116
+ return extension ? mimeTypeMap[extension] : void 0;
117
+ } catch {
118
+ const match = url.match(/\.([a-zA-Z0-9]+)(?:[?#]|$)/);
119
+ if (match) {
120
+ const extension = match[1].toLowerCase();
121
+ return mimeTypeMap[extension];
122
+ }
123
+ return void 0;
124
+ }
189
125
  }
190
- // Compatibility layer for other well known content block types
191
- export function normalizeMessageContentComplex(content) {
192
- return content.map((c) => {
193
- // OpenAI completions `input_audio`
194
- if (c.type === "input_audio" &&
195
- "input_audio" in c &&
196
- typeof c.input_audio === "object") {
197
- const { format, data } = c.input_audio;
198
- if (format === "wav") {
199
- return {
200
- type: "audio",
201
- source_type: "base64",
202
- mime_type: "audio/wav",
203
- data,
204
- };
205
- }
206
- }
207
- // OpenAI completions `image_url`
208
- if (c.type === "image_url" &&
209
- "image_url" in c &&
210
- typeof c.image_url === "object") {
211
- const { url } = c.image_url;
212
- return {
213
- type: "image",
214
- source_type: "url",
215
- url,
216
- };
217
- }
218
- // OpenAI completions `file`
219
- if (c.type === "file" &&
220
- "file" in c &&
221
- typeof c.file === "object" &&
222
- "file_data" in c.file) {
223
- const { file_data } = c.file;
224
- return {
225
- type: "file",
226
- source_type: "base64",
227
- data: file_data,
228
- };
229
- }
230
- return c;
231
- });
126
+ function normalizeSpeechConfig(config) {
127
+ function isSpeechConfig(config$1) {
128
+ return typeof config$1 === "object" && (Object.hasOwn(config$1, "voiceConfig") || Object.hasOwn(config$1, "multiSpeakerVoiceConfig"));
129
+ }
130
+ function hasLanguage(config$1) {
131
+ return typeof config$1 === "object" && Object.hasOwn(config$1, "languageCode");
132
+ }
133
+ function hasVoice(config$1) {
134
+ return Object.hasOwn(config$1, "voice");
135
+ }
136
+ if (typeof config === "undefined") return void 0;
137
+ if (isSpeechConfig(config)) return config;
138
+ let languageCode;
139
+ let voice;
140
+ if (hasLanguage(config)) {
141
+ languageCode = config.languageCode;
142
+ voice = hasVoice(config) ? config.voice : config.voices;
143
+ } else {
144
+ languageCode = void 0;
145
+ voice = config;
146
+ }
147
+ let ret;
148
+ if (typeof voice === "string") ret = { voiceConfig: { prebuiltVoiceConfig: { voiceName: voice } } };
149
+ else {
150
+ const voices = Array.isArray(voice) ? voice : [voice];
151
+ const speakerVoiceConfigs = voices.map((v) => ({
152
+ speaker: v.speaker,
153
+ voiceConfig: { prebuiltVoiceConfig: { voiceName: v.name } }
154
+ }));
155
+ ret = { multiSpeakerVoiceConfig: { speakerVoiceConfigs } };
156
+ }
157
+ if (languageCode) ret.languageCode = languageCode;
158
+ return ret;
232
159
  }
233
- export function getGeminiAPI(config) {
234
- function messageContentText(content) {
235
- if (content?.text && content?.text.length > 0) {
236
- return {
237
- text: content.text,
238
- };
239
- }
240
- else {
241
- return null;
242
- }
243
- }
244
- function messageContentImageUrlData(content) {
245
- const url = typeof content.image_url === "string"
246
- ? content.image_url
247
- : content.image_url.url;
248
- if (!url) {
249
- throw new Error("Missing Image URL");
250
- }
251
- const mimeTypeAndData = extractMimeType(url);
252
- if (mimeTypeAndData) {
253
- return {
254
- inlineData: mimeTypeAndData,
255
- };
256
- }
257
- else {
258
- // FIXME - need some way to get mime type
259
- return {
260
- fileData: {
261
- mimeType: "image/png",
262
- fileUri: url,
263
- },
264
- };
265
- }
266
- }
267
- function messageContentImageUrl(content) {
268
- const ret = messageContentImageUrlData(content);
269
- supplementVideoMetadata(content, ret);
270
- return ret;
271
- }
272
- async function blobToFileData(blob) {
273
- return {
274
- fileData: {
275
- fileUri: blob.path,
276
- mimeType: blob.mimetype,
277
- },
278
- };
279
- }
280
- async function fileUriContentToBlob(uri) {
281
- return config?.mediaManager?.getMediaBlob(uri);
282
- }
283
- async function messageContentMediaData(
284
- // eslint-disable-next-line @typescript-eslint/no-explicit-any
285
- content) {
286
- if ("mimeType" in content && "data" in content) {
287
- return {
288
- inlineData: {
289
- mimeType: content.mimeType,
290
- data: content.data,
291
- },
292
- };
293
- }
294
- else if ("mimeType" in content && "fileUri" in content) {
295
- return {
296
- fileData: {
297
- mimeType: content.mimeType,
298
- fileUri: content.fileUri,
299
- },
300
- };
301
- }
302
- else {
303
- const uri = content.fileUri;
304
- const blob = await fileUriContentToBlob(uri);
305
- if (blob) {
306
- return await blobToFileData(blob);
307
- }
308
- }
309
- throw new Error(`Invalid media content: ${JSON.stringify(content, null, 1)}`);
310
- }
311
- function supplementVideoMetadata(
312
- // eslint-disable-next-line @typescript-eslint/no-explicit-any
313
- content, ret) {
314
- // Add videoMetadata if defined
315
- if ("videoMetadata" in content && typeof ret === "object") {
316
- // eslint-disable-next-line no-param-reassign
317
- ret.videoMetadata = content.videoMetadata;
318
- }
319
- return ret;
320
- }
321
- async function messageContentMedia(
322
- // eslint-disable-next-line @typescript-eslint/no-explicit-any
323
- content) {
324
- const ret = await messageContentMediaData(content);
325
- supplementVideoMetadata(content, ret);
326
- return ret;
327
- }
328
- function messageContentReasoning(content) {
329
- if (content?.reasoning && content?.reasoning.length > 0) {
330
- return {
331
- text: content.reasoning,
332
- thought: true,
333
- };
334
- }
335
- else {
336
- return null;
337
- }
338
- }
339
- const standardContentBlockConverter = {
340
- providerName: "Google Gemini",
341
- fromStandardTextBlock(block) {
342
- return {
343
- text: block.text,
344
- };
345
- },
346
- fromStandardImageBlock(block) {
347
- if (block.source_type === "url") {
348
- const data = parseBase64DataUrl({ dataUrl: block.url });
349
- if (data) {
350
- return {
351
- inlineData: {
352
- mimeType: data.mime_type,
353
- data: data.data,
354
- },
355
- };
356
- }
357
- else {
358
- return {
359
- fileData: {
360
- mimeType: block.mime_type ?? "",
361
- fileUri: block.url,
362
- },
363
- };
364
- }
365
- }
366
- if (block.source_type === "base64") {
367
- return {
368
- inlineData: {
369
- mimeType: block.mime_type ?? "",
370
- data: block.data,
371
- },
372
- };
373
- }
374
- throw new Error(`Unsupported source type: ${block.source_type}`);
375
- },
376
- fromStandardAudioBlock(block) {
377
- if (block.source_type === "url") {
378
- const data = parseBase64DataUrl({ dataUrl: block.url });
379
- if (data) {
380
- return {
381
- inlineData: {
382
- mimeType: data.mime_type,
383
- data: data.data,
384
- },
385
- };
386
- }
387
- else {
388
- return {
389
- fileData: {
390
- mimeType: block.mime_type ?? "",
391
- fileUri: block.url,
392
- },
393
- };
394
- }
395
- }
396
- if (block.source_type === "base64") {
397
- return {
398
- inlineData: {
399
- mimeType: block.mime_type ?? "",
400
- data: block.data,
401
- },
402
- };
403
- }
404
- throw new Error(`Unsupported source type: ${block.source_type}`);
405
- },
406
- fromStandardFileBlock(block) {
407
- if (block.source_type === "text") {
408
- return {
409
- text: block.text,
410
- };
411
- }
412
- if (block.source_type === "url") {
413
- const data = parseBase64DataUrl({ dataUrl: block.url });
414
- if (data) {
415
- return {
416
- inlineData: {
417
- mimeType: data.mime_type,
418
- data: data.data,
419
- },
420
- };
421
- }
422
- else {
423
- return {
424
- fileData: {
425
- mimeType: block.mime_type ?? "",
426
- fileUri: block.url,
427
- },
428
- };
429
- }
430
- }
431
- if (block.source_type === "base64") {
432
- return {
433
- inlineData: {
434
- mimeType: block.mime_type ?? "",
435
- data: block.data,
436
- },
437
- };
438
- }
439
- throw new Error(`Unsupported source type: ${block.source_type}`);
440
- },
441
- };
442
- async function messageContentComplexToPart(content) {
443
- switch (content.type) {
444
- case "text":
445
- if ("text" in content) {
446
- return messageContentText(content);
447
- }
448
- break;
449
- case "image_url":
450
- if ("image_url" in content) {
451
- // Type guard for MessageContentImageUrl
452
- return messageContentImageUrl(content);
453
- }
454
- break;
455
- case "media":
456
- return await messageContentMedia(content);
457
- case "reasoning":
458
- return messageContentReasoning(content);
459
- default:
460
- throw new Error(`Unsupported type "${content.type}" received while converting message to message parts: ${JSON.stringify(content)}`);
461
- }
462
- throw new Error(`Cannot coerce "${content.type}" message part into a string.`);
463
- }
464
- async function messageContentComplexToParts(content) {
465
- const contents = content.map((m) => isDataContentBlock(m)
466
- ? convertToProviderContentBlock(m, standardContentBlockConverter)
467
- : messageContentComplexToPart(m));
468
- return Promise.all(contents);
469
- }
470
- async function messageContentToParts(content) {
471
- // Convert a string to a text type MessageContent if needed
472
- const messageContent = typeof content === "string"
473
- ? [
474
- {
475
- type: "text",
476
- text: content,
477
- },
478
- ]
479
- : content;
480
- // Normalize the content to use standard format
481
- const normalizedContent = normalizeMessageContentComplex(messageContent);
482
- // Get all of the parts, even those that don't correctly resolve
483
- const allParts = await messageContentComplexToParts(normalizedContent);
484
- // Remove any invalid parts
485
- const parts = allParts.reduce((acc, val) => {
486
- if (val) {
487
- return [...acc, val];
488
- }
489
- else {
490
- return acc;
491
- }
492
- }, []);
493
- return parts;
494
- }
495
- function messageToolCallsToParts(toolCalls) {
496
- if (!toolCalls || toolCalls.length === 0) {
497
- return [];
498
- }
499
- return toolCalls.map((tool) => {
500
- let args = {};
501
- if (tool?.function?.arguments) {
502
- const argStr = tool.function.arguments;
503
- args = JSON.parse(argStr);
504
- }
505
- return {
506
- functionCall: {
507
- name: tool.function.name,
508
- args,
509
- },
510
- };
511
- });
512
- }
513
- function messageKwargsToParts(kwargs) {
514
- const ret = [];
515
- if (kwargs?.tool_calls) {
516
- ret.push(...messageToolCallsToParts(kwargs.tool_calls));
517
- }
518
- return ret;
519
- }
520
- async function roleMessageToContent(role, message) {
521
- const contentParts = await messageContentToParts(message.content);
522
- let toolParts;
523
- if (isAIMessage(message) && !!message.tool_calls?.length) {
524
- toolParts = message.tool_calls.map((toolCall) => ({
525
- functionCall: {
526
- name: toolCall.name,
527
- args: toolCall.args,
528
- },
529
- }));
530
- }
531
- else {
532
- toolParts = messageKwargsToParts(message.additional_kwargs);
533
- }
534
- const parts = [...contentParts, ...toolParts];
535
- const signatures = message?.additional_kwargs?.signatures ?? [];
536
- if (signatures.length === parts.length) {
537
- for (let co = 0; co < signatures.length; co += 1) {
538
- const signature = signatures[co];
539
- if (signature && signature.length > 0) {
540
- parts[co].thoughtSignature = signature;
541
- }
542
- }
543
- }
544
- return [
545
- {
546
- role,
547
- parts,
548
- },
549
- ];
550
- }
551
- async function systemMessageToContent(message) {
552
- return config?.useSystemInstruction
553
- ? roleMessageToContent("system", message)
554
- : [
555
- ...(await roleMessageToContent("user", message)),
556
- ...(await roleMessageToContent("model", new AIMessage("Ok"))),
557
- ];
558
- }
559
- function toolMessageToContent(message, prevMessage) {
560
- const contentStr = typeof message.content === "string"
561
- ? message.content
562
- : message.content.reduce((acc, content) => {
563
- if (content.type === "text") {
564
- return acc + content.text;
565
- }
566
- else {
567
- return acc;
568
- }
569
- }, "");
570
- // Hacky :(
571
- const responseName = (isAIMessage(prevMessage) && !!prevMessage.tool_calls?.length
572
- ? prevMessage.tool_calls[0].name
573
- : prevMessage.name) ?? message.tool_call_id;
574
- try {
575
- const content = JSON.parse(contentStr);
576
- return [
577
- {
578
- role: "function",
579
- parts: [
580
- {
581
- functionResponse: {
582
- name: responseName,
583
- response: { content },
584
- },
585
- },
586
- ],
587
- },
588
- ];
589
- }
590
- catch (_) {
591
- return [
592
- {
593
- role: "function",
594
- parts: [
595
- {
596
- functionResponse: {
597
- name: responseName,
598
- response: { content: contentStr },
599
- },
600
- },
601
- ],
602
- },
603
- ];
604
- }
605
- }
606
- async function baseMessageToContent(message, prevMessage) {
607
- const type = message._getType();
608
- switch (type) {
609
- case "system":
610
- return systemMessageToContent(message);
611
- case "human":
612
- return roleMessageToContent("user", message);
613
- case "ai":
614
- return roleMessageToContent("model", message);
615
- case "tool":
616
- if (!prevMessage) {
617
- throw new Error("Tool messages cannot be the first message passed to the model.");
618
- }
619
- return toolMessageToContent(message, prevMessage);
620
- default:
621
- console.log(`Unsupported message type: ${type}`);
622
- return [];
623
- }
624
- }
625
- function thoughtPartToMessageContent(part) {
626
- return {
627
- type: "reasoning",
628
- reasoning: part.text,
629
- };
630
- }
631
- function textPartToMessageContent(part) {
632
- return {
633
- type: "text",
634
- text: part.text,
635
- };
636
- }
637
- function inlineDataPartToMessageContentImage(part) {
638
- return {
639
- type: "image_url",
640
- image_url: `data:${part.inlineData.mimeType};base64,${part.inlineData.data}`,
641
- };
642
- }
643
- function inlineDataPartToMessageContentMedia(part
644
- // eslint-disable-next-line @typescript-eslint/no-explicit-any
645
- ) {
646
- return {
647
- type: "media",
648
- mimeType: part.inlineData.mimeType,
649
- data: part.inlineData.data,
650
- };
651
- }
652
- function inlineDataPartToMessageContent(part
653
- // eslint-disable-next-line @typescript-eslint/no-explicit-any
654
- ) {
655
- const mimeType = part?.inlineData?.mimeType ?? "";
656
- if (mimeType.startsWith("image")) {
657
- return inlineDataPartToMessageContentImage(part);
658
- }
659
- else {
660
- return inlineDataPartToMessageContentMedia(part);
661
- }
662
- }
663
- function fileDataPartToMessageContent(part) {
664
- return {
665
- type: "image_url",
666
- image_url: part.fileData.fileUri,
667
- };
668
- }
669
- function partsToMessageContent(parts) {
670
- return parts
671
- .map((part) => {
672
- if (part === undefined || part === null) {
673
- return null;
674
- }
675
- else if (part.thought) {
676
- return thoughtPartToMessageContent(part);
677
- }
678
- else if ("text" in part) {
679
- return textPartToMessageContent(part);
680
- }
681
- else if ("inlineData" in part) {
682
- return inlineDataPartToMessageContent(part);
683
- }
684
- else if ("fileData" in part) {
685
- return fileDataPartToMessageContent(part);
686
- }
687
- else {
688
- return null;
689
- }
690
- })
691
- .reduce((acc, content) => {
692
- if (content) {
693
- acc.push(content);
694
- }
695
- return acc;
696
- }, []);
697
- }
698
- function toolRawToTool(raw) {
699
- return {
700
- id: raw.id,
701
- type: raw.type,
702
- function: {
703
- name: raw.function.name,
704
- arguments: JSON.stringify(raw.function.arguments),
705
- },
706
- };
707
- }
708
- function functionCallPartToToolRaw(part) {
709
- return {
710
- id: uuidv4().replace(/-/g, ""),
711
- type: "function",
712
- function: {
713
- name: part.functionCall.name,
714
- arguments: part.functionCall.args ?? {},
715
- },
716
- };
717
- }
718
- function partsToToolsRaw(parts) {
719
- return parts
720
- .map((part) => {
721
- if (part === undefined || part === null) {
722
- return null;
723
- }
724
- else if ("functionCall" in part) {
725
- return functionCallPartToToolRaw(part);
726
- }
727
- else {
728
- return null;
729
- }
730
- })
731
- .reduce((acc, content) => {
732
- if (content) {
733
- acc.push(content);
734
- }
735
- return acc;
736
- }, []);
737
- }
738
- function toolsRawToTools(raws) {
739
- return raws.map((raw) => toolRawToTool(raw));
740
- }
741
- function responseToGenerateContentResponseData(response) {
742
- if ("nextChunk" in response.data) {
743
- throw new Error("Cannot convert Stream to GenerateContentResponseData");
744
- }
745
- else if (Array.isArray(response.data)) {
746
- // Collapse the array of response data as if it was a single one
747
- return response.data.reduce((acc, val) => {
748
- // Add all the parts
749
- // FIXME: Handle other candidates?
750
- const valParts = val?.candidates?.[0]?.content?.parts ?? [];
751
- acc.candidates[0].content.parts.push(...valParts);
752
- // FIXME: Merge promptFeedback and safety settings
753
- acc.promptFeedback = val.promptFeedback;
754
- return acc;
755
- });
756
- }
757
- else {
758
- return response.data;
759
- }
760
- }
761
- function responseToParts(response) {
762
- const responseData = responseToGenerateContentResponseData(response);
763
- const parts = responseData?.candidates?.[0]?.content?.parts ?? [];
764
- return parts;
765
- }
766
- function partToText(part) {
767
- return "text" in part ? part.text : "";
768
- }
769
- function responseToString(response) {
770
- const parts = responseToParts(response);
771
- const ret = parts.reduce((acc, part) => {
772
- const val = partToText(part);
773
- return acc + val;
774
- }, "");
775
- return ret;
776
- }
777
- function safeResponseTo(response, responseTo) {
778
- const safetyHandler = config?.safetyHandler ?? new DefaultGeminiSafetyHandler();
779
- try {
780
- const safeResponse = safetyHandler.handle(response);
781
- return responseTo(safeResponse);
782
- }
783
- catch (xx) {
784
- // eslint-disable-next-line no-instanceof/no-instanceof
785
- if (xx instanceof GoogleAISafetyError) {
786
- const ret = responseTo(xx.response);
787
- xx.reply = ret;
788
- }
789
- throw xx;
790
- }
791
- }
792
- function safeResponseToString(response) {
793
- return safeResponseTo(response, responseToString);
794
- }
795
- function logprobResultToLogprob(result) {
796
- const token = result?.token;
797
- const logprob = result?.logProbability;
798
- const encoder = new TextEncoder();
799
- const bytes = Array.from(encoder.encode(token));
800
- return {
801
- token,
802
- logprob,
803
- bytes,
804
- };
805
- }
806
- function candidateToLogprobs(candidate) {
807
- const logprobs = candidate?.logprobsResult;
808
- const chosenTokens = logprobs?.chosenCandidates ?? [];
809
- const topTokens = logprobs?.topCandidates ?? [];
810
- const content = [];
811
- for (let co = 0; co < chosenTokens.length; co += 1) {
812
- const chosen = chosenTokens[co];
813
- const top = topTokens[co]?.candidates ?? [];
814
- const logprob = logprobResultToLogprob(chosen);
815
- logprob.top_logprobs = top.map((l) => logprobResultToLogprob(l));
816
- content.push(logprob);
817
- }
818
- return {
819
- content,
820
- };
821
- }
822
- function candidateToUrlContextMetadata(candidate) {
823
- const retrieval = candidate?.urlRetrievalMetadata?.urlRetrievalContexts ?? [];
824
- const context = candidate?.urlContextMetadata?.urlMetadata ?? [];
825
- const all = [...retrieval, ...context];
826
- if (all.length === 0) {
827
- return undefined;
828
- }
829
- else {
830
- return {
831
- urlMetadata: all,
832
- };
833
- }
834
- }
835
- function addModalityCounts(modalityTokenCounts, details) {
836
- modalityTokenCounts?.forEach((modalityTokenCount) => {
837
- const { modality, tokenCount } = modalityTokenCount;
838
- const modalityLc = modality.toLowerCase();
839
- const currentCount = details[modalityLc] ?? 0;
840
- // eslint-disable-next-line no-param-reassign
841
- details[modalityLc] = currentCount + tokenCount;
842
- });
843
- }
844
- function responseToUsageMetadata(response) {
845
- if ("usageMetadata" in response.data) {
846
- const data = response?.data;
847
- const usageMetadata = data?.usageMetadata;
848
- const input_tokens = usageMetadata.promptTokenCount ?? 0;
849
- const candidatesTokenCount = usageMetadata.candidatesTokenCount ?? 0;
850
- const thoughtsTokenCount = usageMetadata.thoughtsTokenCount ?? 0;
851
- const output_tokens = candidatesTokenCount + thoughtsTokenCount;
852
- const total_tokens = usageMetadata.totalTokenCount ?? input_tokens + output_tokens;
853
- const input_token_details = {};
854
- addModalityCounts(usageMetadata.promptTokensDetails, input_token_details);
855
- if (typeof usageMetadata?.cachedContentTokenCount === "number") {
856
- input_token_details.cache_read = usageMetadata.cachedContentTokenCount;
857
- }
858
- const output_token_details = {};
859
- addModalityCounts(usageMetadata?.candidatesTokensDetails, output_token_details);
860
- if (typeof usageMetadata?.thoughtsTokenCount === "number") {
861
- output_token_details.reasoning = usageMetadata.thoughtsTokenCount;
862
- }
863
- const ret = {
864
- input_tokens,
865
- output_tokens,
866
- total_tokens,
867
- input_token_details,
868
- output_token_details,
869
- };
870
- return ret;
871
- }
872
- return undefined;
873
- }
874
- function responseToGenerationInfo(response) {
875
- const data =
876
- // eslint-disable-next-line no-nested-ternary
877
- Array.isArray(response.data) && response.data[0]
878
- ? response.data[0]
879
- : response.data &&
880
- response.data.candidates
881
- ? response.data
882
- : undefined;
883
- if (!data) {
884
- return {};
885
- }
886
- const finish_reason = data.candidates[0]?.finishReason;
887
- // eslint-disable-next-line @typescript-eslint/no-explicit-any
888
- const ret = {
889
- safety_ratings: data.candidates[0]?.safetyRatings?.map((rating) => ({
890
- category: rating.category,
891
- probability: rating.probability,
892
- probability_score: rating.probabilityScore,
893
- severity: rating.severity,
894
- severity_score: rating.severityScore,
895
- })),
896
- citation_metadata: data.candidates[0]?.citationMetadata,
897
- grounding_metadata: data.candidates[0]?.groundingMetadata,
898
- finish_reason,
899
- finish_message: data.candidates[0]?.finishMessage,
900
- url_context_metadata: candidateToUrlContextMetadata(data.candidates[0]),
901
- avgLogprobs: data.candidates[0]?.avgLogprobs,
902
- logprobs: candidateToLogprobs(data.candidates[0]),
903
- };
904
- // Only add the usage_metadata on the last chunk
905
- // sent while streaming (see issue 8102).
906
- if (typeof finish_reason === "string") {
907
- ret.usage_metadata = responseToUsageMetadata(response);
908
- }
909
- return ret;
910
- }
911
- function responseToChatGeneration(response) {
912
- return new ChatGenerationChunk({
913
- text: responseToString(response),
914
- message: partToMessageChunk(responseToParts(response)[0]),
915
- generationInfo: responseToGenerationInfo(response),
916
- });
917
- }
918
- function safeResponseToChatGeneration(response) {
919
- return safeResponseTo(response, responseToChatGeneration);
920
- }
921
- function chunkToString(chunk) {
922
- if (chunk === null) {
923
- return "";
924
- }
925
- else if (typeof chunk.content === "string") {
926
- return chunk.content;
927
- }
928
- else if (chunk.content.length === 0) {
929
- return "";
930
- }
931
- else if (chunk.content[0].type === "text") {
932
- return chunk.content[0].text;
933
- }
934
- else {
935
- throw new Error(`Unexpected chunk: ${chunk}`);
936
- }
937
- }
938
- function partToMessageChunk(part) {
939
- const fields = partsToBaseMessageChunkFields([part]);
940
- if (typeof fields.content === "string") {
941
- return new AIMessageChunk(fields);
942
- }
943
- else if (fields.content.every((item) => item.type === "text")) {
944
- const newContent = fields.content
945
- .map((item) => ("text" in item ? item.text : ""))
946
- .join("");
947
- return new AIMessageChunk({
948
- ...fields,
949
- content: newContent,
950
- });
951
- }
952
- return new AIMessageChunk(fields);
953
- }
954
- function partToChatGeneration(part) {
955
- const message = partToMessageChunk(part);
956
- const text = partToText(part);
957
- // eslint-disable-next-line @typescript-eslint/no-explicit-any
958
- const generationInfo = {};
959
- return new ChatGenerationChunk({
960
- text,
961
- message,
962
- generationInfo,
963
- });
964
- }
965
- function groundingSupportByPart(groundingSupports) {
966
- const ret = [];
967
- if (!groundingSupports || groundingSupports.length === 0) {
968
- return [];
969
- }
970
- groundingSupports?.forEach((groundingSupport) => {
971
- const segment = groundingSupport?.segment;
972
- const partIndex = segment?.partIndex ?? 0;
973
- if (ret[partIndex]) {
974
- ret[partIndex].push(groundingSupport);
975
- }
976
- else {
977
- ret[partIndex] = [groundingSupport];
978
- }
979
- });
980
- return ret;
981
- }
982
- function responseToGroundedChatGenerations(response) {
983
- const parts = responseToParts(response);
984
- if (parts.length === 0) {
985
- return [];
986
- }
987
- // Citation and grounding information connected to each part / ChatGeneration
988
- // to make sure they are available in downstream filters.
989
- const candidate = response?.data
990
- ?.candidates?.[0];
991
- const groundingMetadata = candidate?.groundingMetadata;
992
- const citationMetadata = candidate?.citationMetadata;
993
- const groundingParts = groundingSupportByPart(groundingMetadata?.groundingSupports);
994
- const ret = parts.map((part, index) => {
995
- const gen = partToChatGeneration(part);
996
- if (!gen.generationInfo) {
997
- gen.generationInfo = {};
998
- }
999
- if (groundingMetadata) {
1000
- gen.generationInfo.groundingMetadata = groundingMetadata;
1001
- const groundingPart = groundingParts[index];
1002
- if (groundingPart) {
1003
- gen.generationInfo.groundingSupport = groundingPart;
1004
- }
1005
- }
1006
- if (citationMetadata) {
1007
- gen.generationInfo.citationMetadata = citationMetadata;
1008
- }
1009
- return gen;
1010
- });
1011
- return ret;
1012
- }
1013
- function combineContent(gen, forceComplex = false) {
1014
- const allString = gen.every((item) => typeof item.message.content === "string");
1015
- if (allString && !forceComplex) {
1016
- // Everything is a string, and we don't want to force it to return
1017
- // MessageContentComplex[], so concatenate the content into one string
1018
- return gen.map((item) => item.message.content).join("");
1019
- }
1020
- else {
1021
- // We either have complex types, or we want to force them, so turn
1022
- // it into an array of complex types.
1023
- const ret = [];
1024
- gen.forEach((item) => {
1025
- if (typeof item.message.content === "string") {
1026
- // If this is a string, turn it into a text type
1027
- ret.push({
1028
- type: "text",
1029
- text: item.message.content,
1030
- });
1031
- }
1032
- else {
1033
- // Otherwise, add all the complex types to what we're returning
1034
- item.message.content.forEach((c) => {
1035
- ret.push(c);
1036
- });
1037
- }
1038
- });
1039
- return ret;
1040
- }
1041
- }
1042
- function combineText(gen) {
1043
- return gen.map((item) => item.text ?? "").join("");
1044
- }
1045
- /*
1046
- * We don't really need the entire AIMessageChunk here, but it is
1047
- * a conventient way to combine all the Tool Calling information.
1048
- */
1049
- function combineToolCalls(gen) {
1050
- let ret = new AIMessageChunk("");
1051
- gen.forEach((item) => {
1052
- const message = item?.message;
1053
- ret = concat(ret, message);
1054
- });
1055
- return ret;
1056
- }
1057
- function combineAdditionalKwargs(gen) {
1058
- const ret = {};
1059
- gen.forEach((item) => {
1060
- const message = item?.message;
1061
- const kwargs = message?.additional_kwargs ?? {};
1062
- const keys = Object.keys(kwargs);
1063
- keys.forEach((key) => {
1064
- const value = kwargs[key];
1065
- if (Object.hasOwn(ret, key) &&
1066
- Array.isArray(ret[key]) &&
1067
- Array.isArray(value)) {
1068
- ret[key].push(...value);
1069
- }
1070
- else {
1071
- ret[key] = value;
1072
- }
1073
- });
1074
- });
1075
- return ret;
1076
- }
1077
- function combineGenerations(generations, response) {
1078
- const gen = splitGenerationTypes(generations, response);
1079
- const combinedContent = combineContent(gen.content);
1080
- const combinedText = combineText(gen.content);
1081
- const combinedToolCalls = combineToolCalls(gen.content);
1082
- const kwargs = combineAdditionalKwargs(gen.content);
1083
- const lastContent = gen.content[gen.content.length - 1];
1084
- // Add usage metadata
1085
- const usage_metadata = responseToUsageMetadata(response);
1086
- // Add thinking / reasoning
1087
- // if (gen.reasoning && gen.reasoning.length > 0) {
1088
- // kwargs.reasoning_content = combineContent(gen.reasoning, true);
1089
- // }
1090
- // Build the message and the generation chunk to return
1091
- const message = new AIMessageChunk({
1092
- content: combinedContent,
1093
- additional_kwargs: kwargs,
1094
- usage_metadata,
1095
- tool_calls: combinedToolCalls.tool_calls,
1096
- invalid_tool_calls: combinedToolCalls.invalid_tool_calls,
1097
- });
1098
- return [
1099
- new ChatGenerationChunk({
1100
- message,
1101
- text: combinedText,
1102
- generationInfo: lastContent.generationInfo,
1103
- }),
1104
- ];
1105
- }
1106
- function splitGenerationTypes(generations, _response) {
1107
- const content = [];
1108
- const reasoning = [];
1109
- generations.forEach((gen) => {
1110
- if (gen?.generationInfo?.thought) {
1111
- reasoning.push(gen);
1112
- }
1113
- else {
1114
- content.push(gen);
1115
- }
1116
- });
1117
- return {
1118
- content,
1119
- reasoning,
1120
- };
1121
- }
1122
- /**
1123
- * Although this returns an array, only the first (or maybe last)
1124
- * element in the array is used. So we need to combine them into
1125
- * just one element that contains everything we need.
1126
- * @param response
1127
- */
1128
- function responseToChatGenerations(response) {
1129
- const generations = responseToGroundedChatGenerations(response);
1130
- if (generations.length === 0) {
1131
- return [];
1132
- }
1133
- const ret = combineGenerations(generations, response);
1134
- // Add logprobs information to the message
1135
- const candidate = response?.data
1136
- ?.candidates?.[0];
1137
- const avgLogprobs = candidate?.avgLogprobs;
1138
- const logprobs = candidateToLogprobs(candidate);
1139
- if (logprobs) {
1140
- ret[0].message.response_metadata = {
1141
- ...ret[0].message.response_metadata,
1142
- logprobs,
1143
- avgLogprobs,
1144
- };
1145
- }
1146
- return ret;
1147
- }
1148
- function responseToBaseMessageFields(response) {
1149
- const parts = responseToParts(response);
1150
- return partsToBaseMessageChunkFields(parts);
1151
- }
1152
- function partsToSignatures(parts) {
1153
- return parts.map((part) => part?.thoughtSignature ?? "");
1154
- }
1155
- function partsToBaseMessageChunkFields(parts) {
1156
- const fields = {
1157
- content: partsToMessageContent(parts),
1158
- tool_call_chunks: [],
1159
- tool_calls: [],
1160
- invalid_tool_calls: [],
1161
- };
1162
- fields.additional_kwargs = {};
1163
- const rawTools = partsToToolsRaw(parts);
1164
- if (rawTools.length > 0) {
1165
- const tools = toolsRawToTools(rawTools);
1166
- for (const tool of tools) {
1167
- fields.tool_call_chunks?.push({
1168
- name: tool.function.name,
1169
- args: tool.function.arguments,
1170
- id: tool.id,
1171
- type: "tool_call_chunk",
1172
- });
1173
- try {
1174
- fields.tool_calls?.push({
1175
- name: tool.function.name,
1176
- args: JSON.parse(tool.function.arguments),
1177
- id: tool.id,
1178
- });
1179
- // eslint-disable-next-line @typescript-eslint/no-explicit-any
1180
- }
1181
- catch (e) {
1182
- fields.invalid_tool_calls?.push({
1183
- name: tool.function.name,
1184
- args: tool.function.arguments,
1185
- id: tool.id,
1186
- error: e.message,
1187
- type: "invalid_tool_call",
1188
- });
1189
- }
1190
- }
1191
- fields.additional_kwargs.tool_calls = tools;
1192
- }
1193
- fields.additional_kwargs.signatures = partsToSignatures(parts);
1194
- return fields;
1195
- }
1196
- function responseToBaseMessage(response) {
1197
- const fields = responseToBaseMessageFields(response);
1198
- return new AIMessage(fields);
1199
- }
1200
- function safeResponseToBaseMessage(response) {
1201
- return safeResponseTo(response, responseToBaseMessage);
1202
- }
1203
- function responseToChatResult(response) {
1204
- const generations = responseToChatGenerations(response);
1205
- return {
1206
- generations,
1207
- llmOutput: responseToGenerationInfo(response),
1208
- };
1209
- }
1210
- function safeResponseToChatResult(response) {
1211
- return safeResponseTo(response, responseToChatResult);
1212
- }
1213
- function inputType(input) {
1214
- if (typeof input === "string") {
1215
- return "MessageContent";
1216
- }
1217
- else {
1218
- const firstItem = input[0];
1219
- if (Object.hasOwn(firstItem, "content")) {
1220
- return "BaseMessageArray";
1221
- }
1222
- else {
1223
- return "MessageContent";
1224
- }
1225
- }
1226
- }
1227
- async function formatMessageContents(input, _parameters) {
1228
- const parts = await messageContentToParts(input);
1229
- const contents = [
1230
- {
1231
- role: "user", // Required by Vertex AI
1232
- parts,
1233
- },
1234
- ];
1235
- return contents;
1236
- }
1237
- async function formatBaseMessageContents(input, _parameters) {
1238
- const inputPromises = input.map((msg, i) => baseMessageToContent(msg, input[i - 1]));
1239
- const inputs = await Promise.all(inputPromises);
1240
- return inputs.reduce((acc, cur) => {
1241
- // Filter out the system content
1242
- if (cur.every((content) => content.role === "system")) {
1243
- return acc;
1244
- }
1245
- // Combine adjacent function messages
1246
- if (cur[0]?.role === "function" &&
1247
- acc.length > 0 &&
1248
- acc[acc.length - 1].role === "function") {
1249
- acc[acc.length - 1].parts = [
1250
- ...acc[acc.length - 1].parts,
1251
- ...cur[0].parts,
1252
- ];
1253
- }
1254
- else {
1255
- acc.push(...cur);
1256
- }
1257
- return acc;
1258
- }, []);
1259
- }
1260
- async function formatContents(input, parameters) {
1261
- const it = inputType(input);
1262
- switch (it) {
1263
- case "MessageContent":
1264
- return formatMessageContents(input, parameters);
1265
- case "BaseMessageArray":
1266
- return formatBaseMessageContents(input, parameters);
1267
- default:
1268
- throw new Error(`Unknown input type "${it}": ${input}`);
1269
- }
1270
- }
1271
- function formatGenerationConfig(parameters) {
1272
- const ret = {
1273
- temperature: parameters.temperature,
1274
- topK: parameters.topK,
1275
- topP: parameters.topP,
1276
- seed: parameters.seed,
1277
- presencePenalty: parameters.presencePenalty,
1278
- frequencyPenalty: parameters.frequencyPenalty,
1279
- maxOutputTokens: parameters.maxOutputTokens,
1280
- stopSequences: parameters.stopSequences,
1281
- responseMimeType: parameters.responseMimeType,
1282
- responseModalities: parameters.responseModalities,
1283
- speechConfig: normalizeSpeechConfig(parameters.speechConfig),
1284
- };
1285
- // Add the logprobs if explicitly set
1286
- if (typeof parameters.logprobs !== "undefined") {
1287
- ret.responseLogprobs = parameters.logprobs;
1288
- if (parameters.logprobs &&
1289
- typeof parameters.topLogprobs !== "undefined") {
1290
- ret.logprobs = parameters.topLogprobs;
1291
- }
1292
- }
1293
- // Add thinking configuration if explicitly set
1294
- // Note that you cannot have thinkingBudget set to 0 and includeThoughts true
1295
- if (typeof parameters.maxReasoningTokens !== "undefined") {
1296
- const includeThoughts = parameters.maxReasoningTokens !== 0;
1297
- ret.thinkingConfig = {
1298
- thinkingBudget: parameters.maxReasoningTokens,
1299
- includeThoughts,
1300
- };
1301
- }
1302
- // Remove any undefined properties, so we don't send them
1303
- let attribute;
1304
- for (attribute in ret) {
1305
- if (ret[attribute] === undefined) {
1306
- delete ret[attribute];
1307
- }
1308
- }
1309
- return ret;
1310
- }
1311
- function formatSafetySettings(parameters) {
1312
- return parameters.safetySettings ?? [];
1313
- }
1314
- async function formatBaseMessageSystemInstruction(input) {
1315
- let ret = {};
1316
- for (let index = 0; index < input.length; index += 1) {
1317
- const message = input[index];
1318
- if (message._getType() === "system") {
1319
- // For system types, we only want it if it is the first message,
1320
- // if it appears anywhere else, it should be an error.
1321
- if (index === 0) {
1322
- // eslint-disable-next-line prefer-destructuring
1323
- ret = (await baseMessageToContent(message, undefined))[0];
1324
- }
1325
- else {
1326
- throw new Error("System messages are only permitted as the first passed message.");
1327
- }
1328
- }
1329
- }
1330
- return ret;
1331
- }
1332
- async function formatSystemInstruction(input) {
1333
- if (!config?.useSystemInstruction) {
1334
- return {};
1335
- }
1336
- const it = inputType(input);
1337
- switch (it) {
1338
- case "BaseMessageArray":
1339
- return formatBaseMessageSystemInstruction(input);
1340
- default:
1341
- return {};
1342
- }
1343
- }
1344
- function structuredToolToFunctionDeclaration(tool) {
1345
- const jsonSchema = schemaToGeminiParameters(tool.schema);
1346
- return {
1347
- name: tool.name,
1348
- description: tool.description ?? `A function available to call.`,
1349
- parameters: jsonSchema,
1350
- };
1351
- }
1352
- function searchToolName(tool) {
1353
- for (const name of GeminiSearchToolAttributes) {
1354
- if (name in tool) {
1355
- return name;
1356
- }
1357
- }
1358
- return undefined;
1359
- }
1360
- function cleanGeminiTool(tool) {
1361
- const orig = searchToolName(tool);
1362
- const adj = config?.googleSearchToolAdjustment;
1363
- if (orig && adj && adj !== orig) {
1364
- return {
1365
- [adj]: {},
1366
- };
1367
- }
1368
- else {
1369
- return tool;
1370
- }
1371
- }
1372
- function formatTools(parameters) {
1373
- const tools = parameters?.tools;
1374
- if (!tools || tools.length === 0) {
1375
- return [];
1376
- }
1377
- // Group all LangChain tools into a single functionDeclarations array.
1378
- // Gemini Tools may be normalized to different tool names
1379
- const langChainTools = [];
1380
- const otherTools = [];
1381
- tools.forEach((tool) => {
1382
- if (isLangChainTool(tool)) {
1383
- langChainTools.push(tool);
1384
- }
1385
- else {
1386
- otherTools.push(cleanGeminiTool(tool));
1387
- }
1388
- });
1389
- const result = [...otherTools];
1390
- if (langChainTools.length > 0) {
1391
- result.push({
1392
- functionDeclarations: langChainTools.map(structuredToolToFunctionDeclaration),
1393
- });
1394
- }
1395
- return result;
1396
- }
1397
- function formatToolConfig(parameters) {
1398
- if (!parameters.tool_choice || typeof parameters.tool_choice !== "string") {
1399
- return undefined;
1400
- }
1401
- if (["auto", "any", "none"].includes(parameters.tool_choice)) {
1402
- return {
1403
- functionCallingConfig: {
1404
- mode: parameters.tool_choice,
1405
- allowedFunctionNames: parameters.allowed_function_names,
1406
- },
1407
- };
1408
- }
1409
- // force tool choice to be a single function name in case of structured output
1410
- return {
1411
- functionCallingConfig: {
1412
- mode: "any",
1413
- allowedFunctionNames: [parameters.tool_choice],
1414
- },
1415
- };
1416
- }
1417
- async function formatData(input, parameters) {
1418
- const typedInput = input;
1419
- const contents = await formatContents(typedInput, parameters);
1420
- const generationConfig = formatGenerationConfig(parameters);
1421
- const tools = formatTools(parameters);
1422
- const toolConfig = formatToolConfig(parameters);
1423
- const safetySettings = formatSafetySettings(parameters);
1424
- const systemInstruction = await formatSystemInstruction(typedInput);
1425
- const ret = {
1426
- contents,
1427
- generationConfig,
1428
- };
1429
- if (tools && tools.length) {
1430
- ret.tools = tools;
1431
- }
1432
- if (toolConfig) {
1433
- ret.toolConfig = toolConfig;
1434
- }
1435
- if (safetySettings && safetySettings.length) {
1436
- ret.safetySettings = safetySettings;
1437
- }
1438
- if (systemInstruction?.role &&
1439
- systemInstruction?.parts &&
1440
- systemInstruction?.parts?.length) {
1441
- ret.systemInstruction = systemInstruction;
1442
- }
1443
- if (parameters.cachedContent) {
1444
- ret.cachedContent = parameters.cachedContent;
1445
- }
1446
- if (parameters.labels && Object.keys(parameters.labels).length > 0) {
1447
- ret.labels = parameters.labels;
1448
- }
1449
- return ret;
1450
- }
1451
- return {
1452
- messageContentToParts,
1453
- baseMessageToContent,
1454
- responseToString: safeResponseToString,
1455
- responseToChatGeneration: safeResponseToChatGeneration,
1456
- chunkToString,
1457
- responseToBaseMessage: safeResponseToBaseMessage,
1458
- responseToChatResult: safeResponseToChatResult,
1459
- formatData,
1460
- };
160
+ function getGeminiAPI(config) {
161
+ function messageContentText(content) {
162
+ if (content?.text && content?.text.length > 0) return { text: content.text };
163
+ else return null;
164
+ }
165
+ function messageContentImageUrlData(content) {
166
+ const url = typeof content.image_url === "string" ? content.image_url : content.image_url.url;
167
+ if (!url) throw new Error("Missing Image URL");
168
+ const mimeTypeAndData = extractMimeType(url);
169
+ if (mimeTypeAndData) return { inlineData: mimeTypeAndData };
170
+ else {
171
+ const mimeType = inferMimeTypeFromUrl(url) || "image/png";
172
+ return { fileData: {
173
+ mimeType,
174
+ fileUri: url
175
+ } };
176
+ }
177
+ }
178
+ function messageContentImageUrl(content) {
179
+ const ret = messageContentImageUrlData(content);
180
+ supplementVideoMetadata(content, ret);
181
+ return ret;
182
+ }
183
+ async function blobToFileData(blob) {
184
+ return { fileData: {
185
+ fileUri: blob.path,
186
+ mimeType: blob.mimetype
187
+ } };
188
+ }
189
+ async function fileUriContentToBlob(uri) {
190
+ return config?.mediaManager?.getMediaBlob(uri);
191
+ }
192
+ async function messageContentMediaData(content) {
193
+ if ("mimeType" in content && "data" in content) return { inlineData: {
194
+ mimeType: content.mimeType,
195
+ data: content.data
196
+ } };
197
+ else if ("mimeType" in content && "fileUri" in content) return { fileData: {
198
+ mimeType: content.mimeType,
199
+ fileUri: content.fileUri
200
+ } };
201
+ else {
202
+ const uri = content.fileUri;
203
+ const blob = await fileUriContentToBlob(uri);
204
+ if (blob) return await blobToFileData(blob);
205
+ }
206
+ throw new Error(`Invalid media content: ${JSON.stringify(content, null, 1)}`);
207
+ }
208
+ function supplementVideoMetadata(content, ret) {
209
+ if ("videoMetadata" in content && typeof ret === "object") ret.videoMetadata = content.videoMetadata;
210
+ return ret;
211
+ }
212
+ async function messageContentMedia(content) {
213
+ const ret = await messageContentMediaData(content);
214
+ supplementVideoMetadata(content, ret);
215
+ return ret;
216
+ }
217
+ function messageContentReasoning(content) {
218
+ if (content?.reasoning && content?.reasoning.length > 0) return {
219
+ text: content.reasoning,
220
+ thought: true
221
+ };
222
+ else return null;
223
+ }
224
+ const standardContentBlockConverter = {
225
+ providerName: "Google Gemini",
226
+ fromStandardTextBlock(block) {
227
+ return { text: block.text };
228
+ },
229
+ fromStandardImageBlock(block) {
230
+ if (block.source_type === "url") {
231
+ const data = parseBase64DataUrl({ dataUrl: block.url });
232
+ if (data) return { inlineData: {
233
+ mimeType: data.mime_type,
234
+ data: data.data
235
+ } };
236
+ else {
237
+ let mimeType = block.mime_type;
238
+ if (!mimeType || mimeType === "") mimeType = inferMimeTypeFromUrl(block.url) || "image/png";
239
+ return { fileData: {
240
+ mimeType,
241
+ fileUri: block.url
242
+ } };
243
+ }
244
+ }
245
+ if (block.source_type === "base64") return { inlineData: {
246
+ mimeType: block.mime_type || "image/png",
247
+ data: block.data
248
+ } };
249
+ throw new Error(`Unsupported source type: ${block.source_type}`);
250
+ },
251
+ fromStandardAudioBlock(block) {
252
+ if (block.source_type === "url") {
253
+ const data = parseBase64DataUrl({ dataUrl: block.url });
254
+ if (data) return { inlineData: {
255
+ mimeType: data.mime_type,
256
+ data: data.data
257
+ } };
258
+ else return { fileData: {
259
+ mimeType: block.mime_type || "audio/mpeg",
260
+ fileUri: block.url
261
+ } };
262
+ }
263
+ if (block.source_type === "base64") return { inlineData: {
264
+ mimeType: block.mime_type || "audio/mpeg",
265
+ data: block.data
266
+ } };
267
+ throw new Error(`Unsupported source type: ${block.source_type}`);
268
+ },
269
+ fromStandardFileBlock(block) {
270
+ if (block.source_type === "text") return { text: block.text };
271
+ if (block.source_type === "url") {
272
+ const data = parseBase64DataUrl({ dataUrl: block.url });
273
+ if (data) return { inlineData: {
274
+ mimeType: data.mime_type,
275
+ data: data.data
276
+ } };
277
+ else return { fileData: {
278
+ mimeType: block.mime_type || "application/octet-stream",
279
+ fileUri: block.url
280
+ } };
281
+ }
282
+ if (block.source_type === "base64") return { inlineData: {
283
+ mimeType: block.mime_type || "application/octet-stream",
284
+ data: block.data
285
+ } };
286
+ throw new Error(`Unsupported source type: ${block.source_type}`);
287
+ }
288
+ };
289
+ async function messageContentComplexToPart(content) {
290
+ switch (content.type) {
291
+ case "text":
292
+ if ("text" in content) return messageContentText(content);
293
+ break;
294
+ case "image_url":
295
+ if ("image_url" in content) return messageContentImageUrl(content);
296
+ break;
297
+ case "media": return await messageContentMedia(content);
298
+ case "reasoning": return messageContentReasoning(content);
299
+ default: throw new Error(`Unsupported type "${content.type}" received while converting message to message parts: ${JSON.stringify(content)}`);
300
+ }
301
+ throw new Error(`Cannot coerce "${content.type}" message part into a string.`);
302
+ }
303
+ async function messageContentComplexToParts(content) {
304
+ const contents = content.map((m) => isDataContentBlock(m) ? convertToProviderContentBlock(m, standardContentBlockConverter) : messageContentComplexToPart(m));
305
+ return Promise.all(contents);
306
+ }
307
+ async function messageContentToParts(content) {
308
+ const messageContent = typeof content === "string" ? [{
309
+ type: "text",
310
+ text: content
311
+ }] : content;
312
+ const allParts = await messageContentComplexToParts(messageContent);
313
+ const parts = allParts.reduce((acc, val) => {
314
+ if (val) return [...acc, val];
315
+ else return acc;
316
+ }, []);
317
+ return parts;
318
+ }
319
+ function messageToolCallsToParts(toolCalls) {
320
+ if (!toolCalls || toolCalls.length === 0) return [];
321
+ return toolCalls.map((tool) => {
322
+ let args = {};
323
+ if (tool?.function?.arguments) {
324
+ const argStr = tool.function.arguments;
325
+ args = JSON.parse(argStr);
326
+ }
327
+ return { functionCall: {
328
+ name: tool.function.name,
329
+ args
330
+ } };
331
+ });
332
+ }
333
+ function messageKwargsToParts(kwargs) {
334
+ const ret = [];
335
+ if (kwargs?.tool_calls) ret.push(...messageToolCallsToParts(kwargs.tool_calls));
336
+ return ret;
337
+ }
338
+ async function roleMessageToContent(role, message) {
339
+ const contentParts = await messageContentToParts(message.content);
340
+ let toolParts;
341
+ if (isAIMessage(message) && !!message.tool_calls?.length) toolParts = message.tool_calls.map((toolCall) => ({ functionCall: {
342
+ name: toolCall.name,
343
+ args: toolCall.args
344
+ } }));
345
+ else toolParts = messageKwargsToParts(message.additional_kwargs);
346
+ const parts = [...contentParts, ...toolParts];
347
+ const signatures = message?.additional_kwargs?.signatures ?? [];
348
+ if (signatures.length === parts.length) for (let co = 0; co < signatures.length; co += 1) {
349
+ const signature = signatures[co];
350
+ if (signature && signature.length > 0) parts[co].thoughtSignature = signature;
351
+ }
352
+ return [{
353
+ role,
354
+ parts
355
+ }];
356
+ }
357
+ async function systemMessageToContent(message) {
358
+ return config?.useSystemInstruction ? roleMessageToContent("system", message) : [...await roleMessageToContent("user", message), ...await roleMessageToContent("model", new AIMessage("Ok"))];
359
+ }
360
+ function toolMessageToContent(message, prevMessage) {
361
+ const contentStr = typeof message.content === "string" ? message.content : message.content.reduce((acc, content) => {
362
+ if (content.type === "text") return acc + content.text;
363
+ else return acc;
364
+ }, "");
365
+ const responseName = (isAIMessage(prevMessage) && !!prevMessage.tool_calls?.length ? prevMessage.tool_calls[0].name : prevMessage.name) ?? message.tool_call_id;
366
+ try {
367
+ const content = JSON.parse(contentStr);
368
+ return [{
369
+ role: "function",
370
+ parts: [{ functionResponse: {
371
+ name: responseName,
372
+ response: { content }
373
+ } }]
374
+ }];
375
+ } catch (_) {
376
+ return [{
377
+ role: "function",
378
+ parts: [{ functionResponse: {
379
+ name: responseName,
380
+ response: { content: contentStr }
381
+ } }]
382
+ }];
383
+ }
384
+ }
385
+ async function baseMessageToContent(message, prevMessage) {
386
+ if (SystemMessage.isInstance(message)) return systemMessageToContent(message);
387
+ else if (HumanMessage.isInstance(message)) return roleMessageToContent("user", message);
388
+ else if (AIMessage.isInstance(message)) return roleMessageToContent("model", message);
389
+ else if (ToolMessage.isInstance(message)) {
390
+ if (!prevMessage) throw new Error("Tool messages cannot be the first message passed to the model.");
391
+ return toolMessageToContent(message, prevMessage);
392
+ } else {
393
+ console.log(`Unsupported message type: ${message.type}`);
394
+ return [];
395
+ }
396
+ }
397
+ function thoughtPartToMessageContent(part) {
398
+ return {
399
+ type: "reasoning",
400
+ reasoning: part.text
401
+ };
402
+ }
403
+ function textPartToMessageContent(part) {
404
+ return {
405
+ type: "text",
406
+ text: part.text
407
+ };
408
+ }
409
+ function inlineDataPartToMessageContentImage(part) {
410
+ return {
411
+ type: "image_url",
412
+ image_url: `data:${part.inlineData.mimeType};base64,${part.inlineData.data}`
413
+ };
414
+ }
415
+ function inlineDataPartToMessageContentMedia(part) {
416
+ return {
417
+ type: "media",
418
+ mimeType: part.inlineData.mimeType,
419
+ data: part.inlineData.data
420
+ };
421
+ }
422
+ function inlineDataPartToMessageContent(part) {
423
+ const mimeType = part?.inlineData?.mimeType ?? "";
424
+ if (mimeType.startsWith("image")) return inlineDataPartToMessageContentImage(part);
425
+ else return inlineDataPartToMessageContentMedia(part);
426
+ }
427
+ function fileDataPartToMessageContent(part) {
428
+ return {
429
+ type: "image_url",
430
+ image_url: part.fileData.fileUri
431
+ };
432
+ }
433
+ function partsToMessageContent(parts) {
434
+ return parts.map((part) => {
435
+ if (part === void 0 || part === null) return null;
436
+ else if (part.thought) return thoughtPartToMessageContent(part);
437
+ else if ("text" in part) return textPartToMessageContent(part);
438
+ else if ("inlineData" in part) return inlineDataPartToMessageContent(part);
439
+ else if ("fileData" in part) return fileDataPartToMessageContent(part);
440
+ else return null;
441
+ }).reduce((acc, content) => {
442
+ if (content) acc.push(content);
443
+ return acc;
444
+ }, []);
445
+ }
446
+ function toolRawToTool(raw) {
447
+ return {
448
+ id: raw.id,
449
+ type: raw.type,
450
+ function: {
451
+ name: raw.function.name,
452
+ arguments: JSON.stringify(raw.function.arguments)
453
+ }
454
+ };
455
+ }
456
+ function functionCallPartToToolRaw(part) {
457
+ return {
458
+ id: v4().replace(/-/g, ""),
459
+ type: "function",
460
+ function: {
461
+ name: part.functionCall.name,
462
+ arguments: part.functionCall.args ?? {}
463
+ }
464
+ };
465
+ }
466
+ function partsToToolsRaw(parts) {
467
+ return parts.map((part) => {
468
+ if (part === void 0 || part === null) return null;
469
+ else if ("functionCall" in part) return functionCallPartToToolRaw(part);
470
+ else return null;
471
+ }).reduce((acc, content) => {
472
+ if (content) acc.push(content);
473
+ return acc;
474
+ }, []);
475
+ }
476
+ function toolsRawToTools(raws) {
477
+ return raws.map((raw) => toolRawToTool(raw));
478
+ }
479
+ function responseToGenerateContentResponseData(response) {
480
+ if ("nextChunk" in response.data) throw new Error("Cannot convert Stream to GenerateContentResponseData");
481
+ else if (Array.isArray(response.data)) return response.data.reduce((acc, val) => {
482
+ const valParts = val?.candidates?.[0]?.content?.parts ?? [];
483
+ acc.candidates[0].content.parts.push(...valParts);
484
+ acc.promptFeedback = val.promptFeedback;
485
+ return acc;
486
+ });
487
+ else return response.data;
488
+ }
489
+ function responseToParts(response) {
490
+ const responseData = responseToGenerateContentResponseData(response);
491
+ const parts = responseData?.candidates?.[0]?.content?.parts ?? [];
492
+ return parts;
493
+ }
494
+ function partToText(part) {
495
+ return "text" in part ? part.text : "";
496
+ }
497
+ function responseToString(response) {
498
+ const parts = responseToParts(response);
499
+ const ret = parts.reduce((acc, part) => {
500
+ const val = partToText(part);
501
+ return acc + val;
502
+ }, "");
503
+ return ret;
504
+ }
505
+ function safeResponseTo(response, responseTo) {
506
+ const safetyHandler = config?.safetyHandler ?? new DefaultGeminiSafetyHandler();
507
+ try {
508
+ const safeResponse = safetyHandler.handle(response);
509
+ return responseTo(safeResponse);
510
+ } catch (xx) {
511
+ if (xx instanceof GoogleAISafetyError) {
512
+ const ret = responseTo(xx.response);
513
+ xx.reply = ret;
514
+ }
515
+ throw xx;
516
+ }
517
+ }
518
+ function safeResponseToString(response) {
519
+ return safeResponseTo(response, responseToString);
520
+ }
521
+ function logprobResultToLogprob(result) {
522
+ const token = result?.token;
523
+ const logprob = result?.logProbability;
524
+ const encoder = new TextEncoder();
525
+ const bytes = Array.from(encoder.encode(token));
526
+ return {
527
+ token,
528
+ logprob,
529
+ bytes
530
+ };
531
+ }
532
+ function candidateToLogprobs(candidate) {
533
+ const logprobs = candidate?.logprobsResult;
534
+ const chosenTokens = logprobs?.chosenCandidates ?? [];
535
+ const topTokens = logprobs?.topCandidates ?? [];
536
+ const content = [];
537
+ for (let co = 0; co < chosenTokens.length; co += 1) {
538
+ const chosen = chosenTokens[co];
539
+ const top = topTokens[co]?.candidates ?? [];
540
+ const logprob = logprobResultToLogprob(chosen);
541
+ logprob.top_logprobs = top.map((l) => logprobResultToLogprob(l));
542
+ content.push(logprob);
543
+ }
544
+ return { content };
545
+ }
546
+ function candidateToUrlContextMetadata(candidate) {
547
+ const retrieval = candidate?.urlRetrievalMetadata?.urlRetrievalContexts ?? [];
548
+ const context = candidate?.urlContextMetadata?.urlMetadata ?? [];
549
+ const all = [...retrieval, ...context];
550
+ if (all.length === 0) return void 0;
551
+ else return { urlMetadata: all };
552
+ }
553
+ function addModalityCounts(modalityTokenCounts, details) {
554
+ modalityTokenCounts?.forEach((modalityTokenCount) => {
555
+ const { modality, tokenCount } = modalityTokenCount;
556
+ const modalityLc = modality.toLowerCase();
557
+ const currentCount = details[modalityLc] ?? 0;
558
+ details[modalityLc] = currentCount + tokenCount;
559
+ });
560
+ }
561
+ function responseToUsageMetadata(response) {
562
+ if ("usageMetadata" in response.data) {
563
+ const data = response?.data;
564
+ const usageMetadata = data?.usageMetadata;
565
+ const input_tokens = usageMetadata.promptTokenCount ?? 0;
566
+ const candidatesTokenCount = usageMetadata.candidatesTokenCount ?? 0;
567
+ const thoughtsTokenCount = usageMetadata.thoughtsTokenCount ?? 0;
568
+ const output_tokens = candidatesTokenCount + thoughtsTokenCount;
569
+ const total_tokens = usageMetadata.totalTokenCount ?? input_tokens + output_tokens;
570
+ const input_token_details = {};
571
+ addModalityCounts(usageMetadata.promptTokensDetails, input_token_details);
572
+ if (typeof usageMetadata?.cachedContentTokenCount === "number") input_token_details.cache_read = usageMetadata.cachedContentTokenCount;
573
+ const output_token_details = {};
574
+ addModalityCounts(usageMetadata?.candidatesTokensDetails, output_token_details);
575
+ if (typeof usageMetadata?.thoughtsTokenCount === "number") output_token_details.reasoning = usageMetadata.thoughtsTokenCount;
576
+ const ret = {
577
+ input_tokens,
578
+ output_tokens,
579
+ total_tokens,
580
+ input_token_details,
581
+ output_token_details
582
+ };
583
+ return ret;
584
+ }
585
+ return void 0;
586
+ }
587
+ function responseToGenerationInfo(response) {
588
+ const data = Array.isArray(response.data) && response.data[0] ? response.data[0] : response.data && response.data.candidates ? response.data : void 0;
589
+ if (!data) return {};
590
+ const finish_reason = data.candidates[0]?.finishReason;
591
+ const ret = {
592
+ safety_ratings: data.candidates[0]?.safetyRatings?.map((rating) => ({
593
+ category: rating.category,
594
+ probability: rating.probability,
595
+ probability_score: rating.probabilityScore,
596
+ severity: rating.severity,
597
+ severity_score: rating.severityScore
598
+ })),
599
+ citation_metadata: data.candidates[0]?.citationMetadata,
600
+ grounding_metadata: data.candidates[0]?.groundingMetadata,
601
+ finish_reason,
602
+ finish_message: data.candidates[0]?.finishMessage,
603
+ url_context_metadata: candidateToUrlContextMetadata(data.candidates[0]),
604
+ avgLogprobs: data.candidates[0]?.avgLogprobs,
605
+ logprobs: candidateToLogprobs(data.candidates[0])
606
+ };
607
+ if (typeof finish_reason === "string") ret.usage_metadata = responseToUsageMetadata(response);
608
+ return ret;
609
+ }
610
+ function responseToChatGeneration(response) {
611
+ return new ChatGenerationChunk({
612
+ text: responseToString(response),
613
+ message: partToMessageChunk(responseToParts(response)[0]),
614
+ generationInfo: responseToGenerationInfo(response)
615
+ });
616
+ }
617
+ function safeResponseToChatGeneration(response) {
618
+ return safeResponseTo(response, responseToChatGeneration);
619
+ }
620
+ function chunkToString(chunk) {
621
+ if (chunk === null) return "";
622
+ else if (typeof chunk.content === "string") return chunk.content;
623
+ else if (chunk.content.length === 0) return "";
624
+ else if (chunk.content[0].type === "text") return chunk.content[0].text;
625
+ else throw new Error(`Unexpected chunk: ${chunk}`);
626
+ }
627
+ function partToMessageChunk(part) {
628
+ const fields = partsToBaseMessageChunkFields([part]);
629
+ if (typeof fields.content === "string") return new AIMessageChunk(fields);
630
+ else if (fields.content?.every((item) => item.type === "text")) {
631
+ const newContent = fields.content.map((item) => "text" in item ? item.text : "").join("");
632
+ return new AIMessageChunk({
633
+ ...fields,
634
+ content: newContent,
635
+ response_metadata: {
636
+ ...fields.response_metadata,
637
+ model_provider: "google-vertexai"
638
+ }
639
+ });
640
+ }
641
+ return new AIMessageChunk(fields);
642
+ }
643
+ function partToChatGeneration(part) {
644
+ const message = partToMessageChunk(part);
645
+ const text = partToText(part);
646
+ const generationInfo = {};
647
+ return new ChatGenerationChunk({
648
+ text,
649
+ message,
650
+ generationInfo
651
+ });
652
+ }
653
+ function groundingSupportByPart(groundingSupports) {
654
+ const ret = [];
655
+ if (!groundingSupports || groundingSupports.length === 0) return [];
656
+ groundingSupports?.forEach((groundingSupport) => {
657
+ const segment = groundingSupport?.segment;
658
+ const partIndex = segment?.partIndex ?? 0;
659
+ if (ret[partIndex]) ret[partIndex].push(groundingSupport);
660
+ else ret[partIndex] = [groundingSupport];
661
+ });
662
+ return ret;
663
+ }
664
+ function responseToGroundedChatGenerations(response) {
665
+ const parts = responseToParts(response);
666
+ if (parts.length === 0) return [];
667
+ const candidate = (response?.data)?.candidates?.[0];
668
+ const groundingMetadata = candidate?.groundingMetadata;
669
+ const citationMetadata = candidate?.citationMetadata;
670
+ const groundingParts = groundingSupportByPart(groundingMetadata?.groundingSupports);
671
+ const ret = parts.map((part, index) => {
672
+ const gen = partToChatGeneration(part);
673
+ if (!gen.generationInfo) gen.generationInfo = {};
674
+ if (groundingMetadata) {
675
+ gen.generationInfo.groundingMetadata = groundingMetadata;
676
+ const groundingPart = groundingParts[index];
677
+ if (groundingPart) gen.generationInfo.groundingSupport = groundingPart;
678
+ }
679
+ if (citationMetadata) gen.generationInfo.citationMetadata = citationMetadata;
680
+ return gen;
681
+ });
682
+ return ret;
683
+ }
684
+ function combineContent(gen, forceComplex = false) {
685
+ const allString = gen.every((item) => typeof item.message.content === "string");
686
+ if (allString && !forceComplex) return gen.map((item) => item.message.content).join("");
687
+ else {
688
+ const ret = [];
689
+ gen.forEach((item) => {
690
+ if (typeof item.message.content === "string") ret.push({
691
+ type: "text",
692
+ text: item.message.content
693
+ });
694
+ else item.message.content.forEach((c) => {
695
+ ret.push(c);
696
+ });
697
+ });
698
+ return ret;
699
+ }
700
+ }
701
+ function combineText(gen) {
702
+ return gen.map((item) => item.text ?? "").join("");
703
+ }
704
+ function combineToolCalls(gen) {
705
+ let ret = new AIMessageChunk("");
706
+ gen.forEach((item) => {
707
+ const message = item?.message;
708
+ ret = concat(ret, message);
709
+ });
710
+ return ret;
711
+ }
712
+ function combineAdditionalKwargs(gen) {
713
+ const ret = {};
714
+ gen.forEach((item) => {
715
+ const message = item?.message;
716
+ const kwargs = message?.additional_kwargs ?? {};
717
+ const keys = Object.keys(kwargs);
718
+ keys.forEach((key) => {
719
+ const value = kwargs[key];
720
+ if (Object.hasOwn(ret, key) && Array.isArray(ret[key]) && Array.isArray(value)) ret[key].push(...value);
721
+ else ret[key] = value;
722
+ });
723
+ });
724
+ return ret;
725
+ }
726
+ function combineGenerations(generations, response) {
727
+ const gen = splitGenerationTypes(generations, response);
728
+ const combinedContent = combineContent(gen.content);
729
+ const combinedText = combineText(gen.content);
730
+ const combinedToolCalls = combineToolCalls(gen.content);
731
+ const kwargs = combineAdditionalKwargs(gen.content);
732
+ const lastContent = gen.content[gen.content.length - 1];
733
+ const usage_metadata = responseToUsageMetadata(response);
734
+ const message = new AIMessageChunk({
735
+ content: combinedContent,
736
+ additional_kwargs: kwargs,
737
+ response_metadata: { model_provider: "google-vertexai" },
738
+ usage_metadata,
739
+ tool_calls: combinedToolCalls.tool_calls,
740
+ invalid_tool_calls: combinedToolCalls.invalid_tool_calls
741
+ });
742
+ return [new ChatGenerationChunk({
743
+ message,
744
+ text: combinedText,
745
+ generationInfo: lastContent.generationInfo
746
+ })];
747
+ }
748
+ function splitGenerationTypes(generations, _response) {
749
+ const content = [];
750
+ const reasoning = [];
751
+ generations.forEach((gen) => {
752
+ if (gen?.generationInfo?.thought) reasoning.push(gen);
753
+ else content.push(gen);
754
+ });
755
+ return {
756
+ content,
757
+ reasoning
758
+ };
759
+ }
760
+ /**
761
+ * Although this returns an array, only the first (or maybe last)
762
+ * element in the array is used. So we need to combine them into
763
+ * just one element that contains everything we need.
764
+ * @param response
765
+ */
766
+ function responseToChatGenerations(response) {
767
+ const generations = responseToGroundedChatGenerations(response);
768
+ if (generations.length === 0) return [];
769
+ const ret = combineGenerations(generations, response);
770
+ const candidate = (response?.data)?.candidates?.[0];
771
+ const avgLogprobs = candidate?.avgLogprobs;
772
+ const logprobs = candidateToLogprobs(candidate);
773
+ if (logprobs) ret[0].message.response_metadata = {
774
+ model_provider: "google-vertexai",
775
+ ...ret[0].message.response_metadata,
776
+ logprobs,
777
+ avgLogprobs
778
+ };
779
+ return ret;
780
+ }
781
+ function responseToBaseMessageFields(response) {
782
+ const parts = responseToParts(response);
783
+ return partsToBaseMessageChunkFields(parts);
784
+ }
785
+ function partsToSignatures(parts) {
786
+ return parts.map((part) => part?.thoughtSignature ?? "");
787
+ }
788
+ function partsToBaseMessageChunkFields(parts) {
789
+ const fields = {
790
+ content: partsToMessageContent(parts),
791
+ tool_call_chunks: [],
792
+ tool_calls: [],
793
+ invalid_tool_calls: [],
794
+ response_metadata: { model_provider: "google-vertexai" }
795
+ };
796
+ fields.additional_kwargs = {};
797
+ const rawTools = partsToToolsRaw(parts);
798
+ if (rawTools.length > 0) {
799
+ const tools = toolsRawToTools(rawTools);
800
+ for (const tool of tools) {
801
+ fields.tool_call_chunks?.push({
802
+ name: tool.function.name,
803
+ args: tool.function.arguments,
804
+ id: tool.id,
805
+ type: "tool_call_chunk"
806
+ });
807
+ try {
808
+ fields.tool_calls?.push({
809
+ name: tool.function.name,
810
+ args: JSON.parse(tool.function.arguments),
811
+ id: tool.id
812
+ });
813
+ } catch (e) {
814
+ fields.invalid_tool_calls?.push({
815
+ name: tool.function.name,
816
+ args: tool.function.arguments,
817
+ id: tool.id,
818
+ error: e.message,
819
+ type: "invalid_tool_call"
820
+ });
821
+ }
822
+ }
823
+ fields.additional_kwargs.tool_calls = tools;
824
+ }
825
+ fields.additional_kwargs.signatures = partsToSignatures(parts);
826
+ return fields;
827
+ }
828
+ function responseToBaseMessage(response) {
829
+ const fields = responseToBaseMessageFields(response);
830
+ return new AIMessage(fields);
831
+ }
832
+ function safeResponseToBaseMessage(response) {
833
+ return safeResponseTo(response, responseToBaseMessage);
834
+ }
835
+ function responseToChatResult(response) {
836
+ const generations = responseToChatGenerations(response);
837
+ return {
838
+ generations,
839
+ llmOutput: responseToGenerationInfo(response)
840
+ };
841
+ }
842
+ function safeResponseToChatResult(response) {
843
+ return safeResponseTo(response, responseToChatResult);
844
+ }
845
+ function inputType(input) {
846
+ if (typeof input === "string") return "MessageContent";
847
+ else {
848
+ const firstItem = input[0];
849
+ if (Object.hasOwn(firstItem, "content")) return "BaseMessageArray";
850
+ else return "MessageContent";
851
+ }
852
+ }
853
+ async function formatMessageContents(input, _parameters) {
854
+ const parts = await messageContentToParts(input);
855
+ const contents = [{
856
+ role: "user",
857
+ parts
858
+ }];
859
+ return contents;
860
+ }
861
+ async function formatBaseMessageContents(input, _parameters) {
862
+ const inputPromises = input.map((msg, i) => baseMessageToContent(msg, input[i - 1]));
863
+ const inputs = await Promise.all(inputPromises);
864
+ return inputs.reduce((acc, cur) => {
865
+ if (cur.every((content) => content.role === "system")) return acc;
866
+ if (cur[0]?.role === "function" && acc.length > 0 && acc[acc.length - 1].role === "function") acc[acc.length - 1].parts = [...acc[acc.length - 1].parts, ...cur[0].parts];
867
+ else acc.push(...cur);
868
+ return acc;
869
+ }, []);
870
+ }
871
+ async function formatContents(input, parameters) {
872
+ const it = inputType(input);
873
+ switch (it) {
874
+ case "MessageContent": return formatMessageContents(input, parameters);
875
+ case "BaseMessageArray": return formatBaseMessageContents(input, parameters);
876
+ default: throw new Error(`Unknown input type "${it}": ${input}`);
877
+ }
878
+ }
879
+ function formatGenerationConfig(parameters) {
880
+ const ret = {
881
+ temperature: parameters.temperature,
882
+ topK: parameters.topK,
883
+ topP: parameters.topP,
884
+ seed: parameters.seed,
885
+ presencePenalty: parameters.presencePenalty,
886
+ frequencyPenalty: parameters.frequencyPenalty,
887
+ maxOutputTokens: parameters.maxOutputTokens,
888
+ stopSequences: parameters.stopSequences,
889
+ responseMimeType: parameters.responseMimeType,
890
+ responseModalities: parameters.responseModalities,
891
+ speechConfig: normalizeSpeechConfig(parameters.speechConfig)
892
+ };
893
+ if (typeof parameters.logprobs !== "undefined") {
894
+ ret.responseLogprobs = parameters.logprobs;
895
+ if (parameters.logprobs && typeof parameters.topLogprobs !== "undefined") ret.logprobs = parameters.topLogprobs;
896
+ }
897
+ if (typeof parameters.maxReasoningTokens !== "undefined") {
898
+ const includeThoughts = parameters.maxReasoningTokens !== 0;
899
+ ret.thinkingConfig = {
900
+ thinkingBudget: parameters.maxReasoningTokens,
901
+ includeThoughts
902
+ };
903
+ }
904
+ let attribute;
905
+ for (attribute in ret) if (ret[attribute] === void 0) delete ret[attribute];
906
+ return ret;
907
+ }
908
+ function formatSafetySettings(parameters) {
909
+ return parameters.safetySettings ?? [];
910
+ }
911
+ async function formatBaseMessageSystemInstruction(input) {
912
+ let ret = {};
913
+ for (let index = 0; index < input.length; index += 1) {
914
+ const message = input[index];
915
+ if (message.getType() === "system") if (index === 0) ret = (await baseMessageToContent(message, void 0))[0];
916
+ else throw new Error("System messages are only permitted as the first passed message.");
917
+ }
918
+ return ret;
919
+ }
920
+ async function formatSystemInstruction(input) {
921
+ if (!config?.useSystemInstruction) return {};
922
+ const it = inputType(input);
923
+ switch (it) {
924
+ case "BaseMessageArray": return formatBaseMessageSystemInstruction(input);
925
+ default: return {};
926
+ }
927
+ }
928
+ function structuredToolToFunctionDeclaration(tool) {
929
+ const jsonSchema = schemaToGeminiParameters(tool.schema);
930
+ return {
931
+ name: tool.name,
932
+ description: tool.description ?? `A function available to call.`,
933
+ parameters: jsonSchema
934
+ };
935
+ }
936
+ function searchToolName(tool) {
937
+ for (const name of GeminiSearchToolAttributes) if (name in tool) return name;
938
+ return void 0;
939
+ }
940
+ function cleanGeminiTool(tool) {
941
+ const orig = searchToolName(tool);
942
+ const adj = config?.googleSearchToolAdjustment;
943
+ if (orig && adj && adj !== orig) return { [adj]: {} };
944
+ else return tool;
945
+ }
946
+ function formatTools(parameters) {
947
+ const tools = parameters?.tools;
948
+ if (!tools || tools.length === 0) return [];
949
+ const langChainTools = [];
950
+ const otherTools = [];
951
+ tools.forEach((tool) => {
952
+ if (isLangChainTool(tool)) langChainTools.push(tool);
953
+ else otherTools.push(cleanGeminiTool(tool));
954
+ });
955
+ const result = [...otherTools];
956
+ if (langChainTools.length > 0) result.push({ functionDeclarations: langChainTools.map(structuredToolToFunctionDeclaration) });
957
+ return result;
958
+ }
959
+ function formatToolConfig(parameters) {
960
+ if (!parameters.tool_choice || typeof parameters.tool_choice !== "string") return void 0;
961
+ if ([
962
+ "auto",
963
+ "any",
964
+ "none"
965
+ ].includes(parameters.tool_choice)) return { functionCallingConfig: {
966
+ mode: parameters.tool_choice,
967
+ allowedFunctionNames: parameters.allowed_function_names
968
+ } };
969
+ return { functionCallingConfig: {
970
+ mode: "any",
971
+ allowedFunctionNames: [parameters.tool_choice]
972
+ } };
973
+ }
974
+ async function formatData(input, parameters) {
975
+ const typedInput = input;
976
+ const contents = await formatContents(typedInput, parameters);
977
+ const generationConfig = formatGenerationConfig(parameters);
978
+ const tools = formatTools(parameters);
979
+ const toolConfig = formatToolConfig(parameters);
980
+ const safetySettings = formatSafetySettings(parameters);
981
+ const systemInstruction = await formatSystemInstruction(typedInput);
982
+ const ret = {
983
+ contents,
984
+ generationConfig
985
+ };
986
+ if (tools && tools.length) ret.tools = tools;
987
+ if (toolConfig) ret.toolConfig = toolConfig;
988
+ if (safetySettings && safetySettings.length) ret.safetySettings = safetySettings;
989
+ if (systemInstruction?.role && systemInstruction?.parts && systemInstruction?.parts?.length) ret.systemInstruction = systemInstruction;
990
+ if (parameters.cachedContent) ret.cachedContent = parameters.cachedContent;
991
+ if (parameters.labels && Object.keys(parameters.labels).length > 0) ret.labels = parameters.labels;
992
+ return ret;
993
+ }
994
+ return {
995
+ messageContentToParts,
996
+ baseMessageToContent,
997
+ responseToString: safeResponseToString,
998
+ responseToChatGeneration: safeResponseToChatGeneration,
999
+ chunkToString,
1000
+ responseToBaseMessage: safeResponseToBaseMessage,
1001
+ responseToChatResult: safeResponseToChatResult,
1002
+ formatData
1003
+ };
1461
1004
  }
1462
- export function validateGeminiParams(params) {
1463
- if (params.maxOutputTokens && params.maxOutputTokens < 0) {
1464
- throw new Error("`maxOutputTokens` must be a positive integer");
1465
- }
1466
- if (typeof params.maxReasoningTokens !== "undefined") {
1467
- if (typeof params.maxOutputTokens !== "undefined") {
1468
- if (params.maxReasoningTokens >= params.maxOutputTokens) {
1469
- throw new Error("`maxOutputTokens` must be greater than `maxReasoningTokens`");
1470
- }
1471
- }
1472
- }
1473
- if (params.temperature &&
1474
- (params.temperature < 0 || params.temperature > 2)) {
1475
- throw new Error("`temperature` must be in the range of [0.0,2.0]");
1476
- }
1477
- if (params.topP && (params.topP < 0 || params.topP > 1)) {
1478
- throw new Error("`topP` must be in the range of [0.0,1.0]");
1479
- }
1480
- if (params.topK && params.topK < 0) {
1481
- throw new Error("`topK` must be a positive integer");
1482
- }
1005
+ function validateGeminiParams(params) {
1006
+ if (params.maxOutputTokens && params.maxOutputTokens < 0) throw new Error("`maxOutputTokens` must be a positive integer");
1007
+ if (typeof params.maxReasoningTokens !== "undefined") {
1008
+ if (typeof params.maxOutputTokens !== "undefined") {
1009
+ if (params.maxReasoningTokens >= params.maxOutputTokens) throw new Error("`maxOutputTokens` must be greater than `maxReasoningTokens`");
1010
+ }
1011
+ }
1012
+ if (params.temperature && (params.temperature < 0 || params.temperature > 2)) throw new Error("`temperature` must be in the range of [0.0,2.0]");
1013
+ if (params.topP && (params.topP < 0 || params.topP > 1)) throw new Error("`topP` must be in the range of [0.0,1.0]");
1014
+ if (params.topK && params.topK < 0) throw new Error("`topK` must be a positive integer");
1483
1015
  }
1484
- export function isModelGemini(modelName) {
1485
- return modelName.toLowerCase().startsWith("gemini");
1016
+ function isModelGemini(modelName) {
1017
+ return modelName.toLowerCase().startsWith("gemini");
1486
1018
  }
1487
- export function isModelGemma(modelName) {
1488
- return modelName.toLowerCase().startsWith("gemma");
1019
+ function isModelGemma(modelName) {
1020
+ return modelName.toLowerCase().startsWith("gemma");
1489
1021
  }
1022
+
1023
+ //#endregion
1024
+ export { DefaultGeminiSafetyHandler, MessageGeminiSafetyHandler, getGeminiAPI, isModelGemini, isModelGemma, normalizeSpeechConfig, validateGeminiParams };
1025
+ //# sourceMappingURL=gemini.js.map