@langchain/google-common 0.2.18 → 1.0.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (173) hide show
  1. package/CHANGELOG.md +23 -0
  2. package/LICENSE +6 -6
  3. package/dist/_virtual/rolldown_runtime.cjs +25 -0
  4. package/dist/auth.cjs +82 -116
  5. package/dist/auth.cjs.map +1 -0
  6. package/dist/auth.d.cts +46 -0
  7. package/dist/auth.d.cts.map +1 -0
  8. package/dist/auth.d.ts +41 -36
  9. package/dist/auth.d.ts.map +1 -0
  10. package/dist/auth.js +80 -110
  11. package/dist/auth.js.map +1 -0
  12. package/dist/chat_models.cjs +264 -466
  13. package/dist/chat_models.cjs.map +1 -0
  14. package/dist/chat_models.d.cts +109 -0
  15. package/dist/chat_models.d.cts.map +1 -0
  16. package/dist/chat_models.d.ts +98 -73
  17. package/dist/chat_models.d.ts.map +1 -0
  18. package/dist/chat_models.js +258 -457
  19. package/dist/chat_models.js.map +1 -0
  20. package/dist/connection.cjs +321 -466
  21. package/dist/connection.cjs.map +1 -0
  22. package/dist/connection.d.cts +109 -0
  23. package/dist/connection.d.cts.map +1 -0
  24. package/dist/connection.d.ts +98 -91
  25. package/dist/connection.d.ts.map +1 -0
  26. package/dist/connection.js +317 -459
  27. package/dist/connection.js.map +1 -0
  28. package/dist/embeddings.cjs +135 -186
  29. package/dist/embeddings.cjs.map +1 -0
  30. package/dist/embeddings.d.cts +44 -0
  31. package/dist/embeddings.d.cts.map +1 -0
  32. package/dist/embeddings.d.ts +38 -32
  33. package/dist/embeddings.d.ts.map +1 -0
  34. package/dist/embeddings.js +133 -181
  35. package/dist/embeddings.js.map +1 -0
  36. package/dist/experimental/media.cjs +380 -482
  37. package/dist/experimental/media.cjs.map +1 -0
  38. package/dist/experimental/media.d.cts +198 -0
  39. package/dist/experimental/media.d.cts.map +1 -0
  40. package/dist/experimental/media.d.ts +190 -202
  41. package/dist/experimental/media.d.ts.map +1 -0
  42. package/dist/experimental/media.js +369 -468
  43. package/dist/experimental/media.js.map +1 -0
  44. package/dist/experimental/utils/media_core.cjs +403 -517
  45. package/dist/experimental/utils/media_core.cjs.map +1 -0
  46. package/dist/experimental/utils/media_core.d.cts +215 -0
  47. package/dist/experimental/utils/media_core.d.cts.map +1 -0
  48. package/dist/experimental/utils/media_core.d.ts +171 -165
  49. package/dist/experimental/utils/media_core.d.ts.map +1 -0
  50. package/dist/experimental/utils/media_core.js +395 -506
  51. package/dist/experimental/utils/media_core.js.map +1 -0
  52. package/dist/index.cjs +58 -27
  53. package/dist/index.d.cts +13 -0
  54. package/dist/index.d.ts +13 -11
  55. package/dist/index.js +13 -11
  56. package/dist/llms.cjs +157 -244
  57. package/dist/llms.cjs.map +1 -0
  58. package/dist/llms.d.cts +72 -0
  59. package/dist/llms.d.cts.map +1 -0
  60. package/dist/llms.d.ts +64 -54
  61. package/dist/llms.d.ts.map +1 -0
  62. package/dist/llms.js +154 -238
  63. package/dist/llms.js.map +1 -0
  64. package/dist/output_parsers.cjs +148 -173
  65. package/dist/output_parsers.cjs.map +1 -0
  66. package/dist/output_parsers.d.cts +53 -0
  67. package/dist/output_parsers.d.cts.map +1 -0
  68. package/dist/output_parsers.d.ts +46 -42
  69. package/dist/output_parsers.d.ts.map +1 -0
  70. package/dist/output_parsers.js +146 -168
  71. package/dist/output_parsers.js.map +1 -0
  72. package/dist/profiles.cjs +219 -0
  73. package/dist/profiles.cjs.map +1 -0
  74. package/dist/profiles.js +218 -0
  75. package/dist/profiles.js.map +1 -0
  76. package/dist/types-anthropic.d.cts +229 -0
  77. package/dist/types-anthropic.d.cts.map +1 -0
  78. package/dist/types-anthropic.d.ts +221 -215
  79. package/dist/types-anthropic.d.ts.map +1 -0
  80. package/dist/types.cjs +51 -62
  81. package/dist/types.cjs.map +1 -0
  82. package/dist/types.d.cts +748 -0
  83. package/dist/types.d.cts.map +1 -0
  84. package/dist/types.d.ts +669 -656
  85. package/dist/types.d.ts.map +1 -0
  86. package/dist/types.js +46 -45
  87. package/dist/types.js.map +1 -0
  88. package/dist/utils/anthropic.cjs +598 -821
  89. package/dist/utils/anthropic.cjs.map +1 -0
  90. package/dist/utils/anthropic.js +597 -818
  91. package/dist/utils/anthropic.js.map +1 -0
  92. package/dist/utils/common.cjs +130 -211
  93. package/dist/utils/common.cjs.map +1 -0
  94. package/dist/utils/common.d.cts +13 -0
  95. package/dist/utils/common.d.cts.map +1 -0
  96. package/dist/utils/common.d.ts +12 -7
  97. package/dist/utils/common.d.ts.map +1 -0
  98. package/dist/utils/common.js +128 -207
  99. package/dist/utils/common.js.map +1 -0
  100. package/dist/utils/failed_handler.cjs +28 -30
  101. package/dist/utils/failed_handler.cjs.map +1 -0
  102. package/dist/utils/failed_handler.d.cts +9 -0
  103. package/dist/utils/failed_handler.d.cts.map +1 -0
  104. package/dist/utils/failed_handler.d.ts +8 -2
  105. package/dist/utils/failed_handler.d.ts.map +1 -0
  106. package/dist/utils/failed_handler.js +28 -28
  107. package/dist/utils/failed_handler.js.map +1 -0
  108. package/dist/utils/gemini.cjs +1020 -1488
  109. package/dist/utils/gemini.cjs.map +1 -0
  110. package/dist/utils/gemini.d.cts +51 -0
  111. package/dist/utils/gemini.d.cts.map +1 -0
  112. package/dist/utils/gemini.d.ts +51 -48
  113. package/dist/utils/gemini.d.ts.map +1 -0
  114. package/dist/utils/gemini.js +1015 -1479
  115. package/dist/utils/gemini.js.map +1 -0
  116. package/dist/utils/index.cjs +38 -23
  117. package/dist/utils/index.d.cts +8 -0
  118. package/dist/utils/index.d.ts +8 -7
  119. package/dist/utils/index.js +8 -7
  120. package/dist/utils/palm.d.cts +11 -0
  121. package/dist/utils/palm.d.cts.map +1 -0
  122. package/dist/utils/palm.d.ts +9 -4
  123. package/dist/utils/palm.d.ts.map +1 -0
  124. package/dist/utils/safety.cjs +13 -22
  125. package/dist/utils/safety.cjs.map +1 -0
  126. package/dist/utils/safety.d.cts +12 -0
  127. package/dist/utils/safety.d.cts.map +1 -0
  128. package/dist/utils/safety.d.ts +10 -4
  129. package/dist/utils/safety.d.ts.map +1 -0
  130. package/dist/utils/safety.js +13 -19
  131. package/dist/utils/safety.js.map +1 -0
  132. package/dist/utils/stream.cjs +296 -475
  133. package/dist/utils/stream.cjs.map +1 -0
  134. package/dist/utils/stream.d.cts +165 -0
  135. package/dist/utils/stream.d.cts.map +1 -0
  136. package/dist/utils/stream.d.ts +156 -131
  137. package/dist/utils/stream.d.ts.map +1 -0
  138. package/dist/utils/stream.js +293 -469
  139. package/dist/utils/stream.js.map +1 -0
  140. package/dist/utils/zod_to_gemini_parameters.cjs +43 -81
  141. package/dist/utils/zod_to_gemini_parameters.cjs.map +1 -0
  142. package/dist/utils/zod_to_gemini_parameters.d.cts +22 -0
  143. package/dist/utils/zod_to_gemini_parameters.d.cts.map +1 -0
  144. package/dist/utils/zod_to_gemini_parameters.d.ts +21 -6
  145. package/dist/utils/zod_to_gemini_parameters.d.ts.map +1 -0
  146. package/dist/utils/zod_to_gemini_parameters.js +40 -76
  147. package/dist/utils/zod_to_gemini_parameters.js.map +1 -0
  148. package/package.json +72 -85
  149. package/dist/types-anthropic.cjs +0 -2
  150. package/dist/types-anthropic.js +0 -1
  151. package/dist/utils/anthropic.d.ts +0 -4
  152. package/dist/utils/palm.cjs +0 -2
  153. package/dist/utils/palm.js +0 -1
  154. package/experimental/media.cjs +0 -1
  155. package/experimental/media.d.cts +0 -1
  156. package/experimental/media.d.ts +0 -1
  157. package/experimental/media.js +0 -1
  158. package/experimental/utils/media_core.cjs +0 -1
  159. package/experimental/utils/media_core.d.cts +0 -1
  160. package/experimental/utils/media_core.d.ts +0 -1
  161. package/experimental/utils/media_core.js +0 -1
  162. package/index.cjs +0 -1
  163. package/index.d.cts +0 -1
  164. package/index.d.ts +0 -1
  165. package/index.js +0 -1
  166. package/types.cjs +0 -1
  167. package/types.d.cts +0 -1
  168. package/types.d.ts +0 -1
  169. package/types.js +0 -1
  170. package/utils.cjs +0 -1
  171. package/utils.d.cts +0 -1
  172. package/utils.d.ts +0 -1
  173. package/utils.js +0 -1
@@ -1,1500 +1,1032 @@
1
- "use strict";
2
- Object.defineProperty(exports, "__esModule", { value: true });
3
- exports.MessageGeminiSafetyHandler = exports.DefaultGeminiSafetyHandler = void 0;
4
- exports.normalizeSpeechConfig = normalizeSpeechConfig;
5
- exports.normalizeMessageContentComplex = normalizeMessageContentComplex;
6
- exports.getGeminiAPI = getGeminiAPI;
7
- exports.validateGeminiParams = validateGeminiParams;
8
- exports.isModelGemini = isModelGemini;
9
- exports.isModelGemma = isModelGemma;
10
- const uuid_1 = require("uuid");
11
- const messages_1 = require("@langchain/core/messages");
12
- const outputs_1 = require("@langchain/core/outputs");
13
- const function_calling_1 = require("@langchain/core/utils/function_calling");
14
- const stream_1 = require("@langchain/core/utils/stream");
15
- const types_js_1 = require("../types.cjs");
16
- const safety_js_1 = require("./safety.cjs");
17
- const zod_to_gemini_parameters_js_1 = require("./zod_to_gemini_parameters.cjs");
18
- class DefaultGeminiSafetyHandler {
19
- constructor(settings) {
20
- Object.defineProperty(this, "errorFinish", {
21
- enumerable: true,
22
- configurable: true,
23
- writable: true,
24
- value: ["SAFETY", "RECITATION", "OTHER"]
25
- });
26
- this.errorFinish = settings?.errorFinish ?? this.errorFinish;
27
- }
28
- handleDataPromptFeedback(response, data) {
29
- // Check to see if our prompt was blocked in the first place
30
- const promptFeedback = data?.promptFeedback;
31
- const blockReason = promptFeedback?.blockReason;
32
- if (blockReason) {
33
- throw new safety_js_1.GoogleAISafetyError(response, `Prompt blocked: ${blockReason}`);
34
- }
35
- return data;
36
- }
37
- handleDataFinishReason(response, data) {
38
- const firstCandidate = data?.candidates?.[0];
39
- const finishReason = firstCandidate?.finishReason;
40
- if (this.errorFinish.includes(finishReason)) {
41
- throw new safety_js_1.GoogleAISafetyError(response, `Finish reason: ${finishReason}`);
42
- }
43
- return data;
44
- }
45
- handleData(response, data) {
46
- let ret = data;
47
- ret = this.handleDataPromptFeedback(response, ret);
48
- ret = this.handleDataFinishReason(response, ret);
49
- return ret;
50
- }
51
- handle(response) {
52
- let newdata;
53
- if ("nextChunk" in response.data) {
54
- // TODO: This is a stream. How to handle?
55
- newdata = response.data;
56
- }
57
- else if (Array.isArray(response.data)) {
58
- // If it is an array, try to handle every item in the array
59
- try {
60
- newdata = response.data.map((item) => this.handleData(response, item));
61
- }
62
- catch (xx) {
63
- // eslint-disable-next-line no-instanceof/no-instanceof
64
- if (xx instanceof safety_js_1.GoogleAISafetyError) {
65
- throw new safety_js_1.GoogleAISafetyError(response, xx.message);
66
- }
67
- else {
68
- throw xx;
69
- }
70
- }
71
- }
72
- else {
73
- const data = response.data;
74
- newdata = this.handleData(response, data);
75
- }
76
- return {
77
- ...response,
78
- data: newdata,
79
- };
80
- }
81
- }
82
- exports.DefaultGeminiSafetyHandler = DefaultGeminiSafetyHandler;
83
- class MessageGeminiSafetyHandler extends DefaultGeminiSafetyHandler {
84
- constructor(settings) {
85
- super(settings);
86
- Object.defineProperty(this, "msg", {
87
- enumerable: true,
88
- configurable: true,
89
- writable: true,
90
- value: ""
91
- });
92
- Object.defineProperty(this, "forceNewMessage", {
93
- enumerable: true,
94
- configurable: true,
95
- writable: true,
96
- value: false
97
- });
98
- this.msg = settings?.msg ?? this.msg;
99
- this.forceNewMessage = settings?.forceNewMessage ?? this.forceNewMessage;
100
- }
101
- setMessage(data) {
102
- const ret = data;
103
- if (this.forceNewMessage ||
104
- !data?.candidates?.[0]?.content?.parts?.length) {
105
- ret.candidates = data.candidates ?? [];
106
- ret.candidates[0] = data.candidates[0] ?? {};
107
- ret.candidates[0].content = data.candidates[0].content ?? {};
108
- ret.candidates[0].content = {
109
- role: "model",
110
- parts: [{ text: this.msg }],
111
- };
112
- }
113
- return ret;
114
- }
115
- handleData(response, data) {
116
- try {
117
- return super.handleData(response, data);
118
- }
119
- catch (xx) {
120
- return this.setMessage(data);
121
- }
122
- }
123
- }
124
- exports.MessageGeminiSafetyHandler = MessageGeminiSafetyHandler;
1
+ const require_rolldown_runtime = require('../_virtual/rolldown_runtime.cjs');
2
+ const require_types = require('../types.cjs');
3
+ const require_safety = require('./safety.cjs');
4
+ const require_zod_to_gemini_parameters = require('./zod_to_gemini_parameters.cjs');
5
+ const __langchain_core_outputs = require_rolldown_runtime.__toESM(require("@langchain/core/outputs"));
6
+ const __langchain_core_messages = require_rolldown_runtime.__toESM(require("@langchain/core/messages"));
7
+ const __langchain_core_utils_stream = require_rolldown_runtime.__toESM(require("@langchain/core/utils/stream"));
8
+ const __langchain_core_utils_function_calling = require_rolldown_runtime.__toESM(require("@langchain/core/utils/function_calling"));
9
+ const uuid = require_rolldown_runtime.__toESM(require("uuid"));
10
+
11
+ //#region src/utils/gemini.ts
12
+ var DefaultGeminiSafetyHandler = class {
13
+ errorFinish = [
14
+ "SAFETY",
15
+ "RECITATION",
16
+ "OTHER"
17
+ ];
18
+ constructor(settings) {
19
+ this.errorFinish = settings?.errorFinish ?? this.errorFinish;
20
+ }
21
+ handleDataPromptFeedback(response, data) {
22
+ const promptFeedback = data?.promptFeedback;
23
+ const blockReason = promptFeedback?.blockReason;
24
+ if (blockReason) throw new require_safety.GoogleAISafetyError(response, `Prompt blocked: ${blockReason}`);
25
+ return data;
26
+ }
27
+ handleDataFinishReason(response, data) {
28
+ const firstCandidate = data?.candidates?.[0];
29
+ const finishReason = firstCandidate?.finishReason;
30
+ if (this.errorFinish.includes(finishReason)) throw new require_safety.GoogleAISafetyError(response, `Finish reason: ${finishReason}`);
31
+ return data;
32
+ }
33
+ handleData(response, data) {
34
+ let ret = data;
35
+ ret = this.handleDataPromptFeedback(response, ret);
36
+ ret = this.handleDataFinishReason(response, ret);
37
+ return ret;
38
+ }
39
+ handle(response) {
40
+ let newdata;
41
+ if ("nextChunk" in response.data) newdata = response.data;
42
+ else if (Array.isArray(response.data)) try {
43
+ newdata = response.data.map((item) => this.handleData(response, item));
44
+ } catch (xx) {
45
+ if (xx instanceof require_safety.GoogleAISafetyError) throw new require_safety.GoogleAISafetyError(response, xx.message);
46
+ else throw xx;
47
+ }
48
+ else {
49
+ const data = response.data;
50
+ newdata = this.handleData(response, data);
51
+ }
52
+ return {
53
+ ...response,
54
+ data: newdata
55
+ };
56
+ }
57
+ };
58
+ var MessageGeminiSafetyHandler = class extends DefaultGeminiSafetyHandler {
59
+ msg = "";
60
+ forceNewMessage = false;
61
+ constructor(settings) {
62
+ super(settings);
63
+ this.msg = settings?.msg ?? this.msg;
64
+ this.forceNewMessage = settings?.forceNewMessage ?? this.forceNewMessage;
65
+ }
66
+ setMessage(data) {
67
+ const ret = data;
68
+ if (this.forceNewMessage || !data?.candidates?.[0]?.content?.parts?.length) {
69
+ ret.candidates = data.candidates ?? [];
70
+ ret.candidates[0] = data.candidates[0] ?? {};
71
+ ret.candidates[0].content = data.candidates[0].content ?? {};
72
+ ret.candidates[0].content = {
73
+ role: "model",
74
+ parts: [{ text: this.msg }]
75
+ };
76
+ }
77
+ return ret;
78
+ }
79
+ handleData(response, data) {
80
+ try {
81
+ return super.handleData(response, data);
82
+ } catch {
83
+ return this.setMessage(data);
84
+ }
85
+ }
86
+ };
125
87
  const extractMimeType = (str) => {
126
- if (str.startsWith("data:")) {
127
- return {
128
- mimeType: str.split(":")[1].split(";")[0],
129
- data: str.split(",")[1],
130
- };
131
- }
132
- return null;
88
+ if (str.startsWith("data:")) return {
89
+ mimeType: str.split(":")[1].split(";")[0],
90
+ data: str.split(",")[1]
91
+ };
92
+ return null;
133
93
  };
134
- function normalizeSpeechConfig(config) {
135
- function isSpeechConfig(config) {
136
- return (typeof config === "object" &&
137
- (Object.hasOwn(config, "voiceConfig") ||
138
- Object.hasOwn(config, "multiSpeakerVoiceConfig")));
139
- }
140
- function hasLanguage(config) {
141
- return typeof config === "object" && Object.hasOwn(config, "languageCode");
142
- }
143
- function hasVoice(config) {
144
- return Object.hasOwn(config, "voice");
145
- }
146
- if (typeof config === "undefined") {
147
- return undefined;
148
- }
149
- // If this is already a GoogleSpeechConfig, just return it
150
- if (isSpeechConfig(config)) {
151
- return config;
152
- }
153
- let languageCode;
154
- let voice;
155
- if (hasLanguage(config)) {
156
- languageCode = config.languageCode;
157
- voice = hasVoice(config) ? config.voice : config.voices;
158
- }
159
- else {
160
- languageCode = undefined;
161
- voice = config;
162
- }
163
- let ret;
164
- if (typeof voice === "string") {
165
- // They just provided the prebuilt voice configuration name. Use it.
166
- ret = {
167
- voiceConfig: {
168
- prebuiltVoiceConfig: {
169
- voiceName: voice,
170
- },
171
- },
172
- };
173
- }
174
- else {
175
- // This is multi-speaker, so we have speaker/name pairs
176
- // If we have just one (why?), turn it into an array for the moment
177
- const voices = Array.isArray(voice)
178
- ? voice
179
- : [voice];
180
- // Go through all the speaker/name pairs and turn this into the voice config array
181
- const speakerVoiceConfigs = voices.map((v) => ({
182
- speaker: v.speaker,
183
- voiceConfig: {
184
- prebuiltVoiceConfig: {
185
- voiceName: v.name,
186
- },
187
- },
188
- }));
189
- // Create the multi-speaker voice configuration
190
- ret = {
191
- multiSpeakerVoiceConfig: {
192
- speakerVoiceConfigs,
193
- },
194
- };
195
- }
196
- if (languageCode) {
197
- ret.languageCode = languageCode;
198
- }
199
- return ret;
94
+ /**
95
+ * Infers the MIME type from a URL based on its file extension.
96
+ * This is used as a fallback when the MIME type is not provided.
97
+ *
98
+ * @param url - The URL to infer the MIME type from
99
+ * @returns The inferred MIME type or undefined if it cannot be determined
100
+ */
101
+ function inferMimeTypeFromUrl(url) {
102
+ const mimeTypeMap = {
103
+ jpg: "image/jpeg",
104
+ jpeg: "image/jpeg",
105
+ png: "image/png",
106
+ gif: "image/gif",
107
+ webp: "image/webp",
108
+ bmp: "image/bmp",
109
+ svg: "image/svg+xml",
110
+ ico: "image/x-icon",
111
+ tiff: "image/tiff",
112
+ tif: "image/tiff"
113
+ };
114
+ try {
115
+ const pathname = new URL(url).pathname;
116
+ const extension = pathname.split(".").pop()?.toLowerCase().split(/[?#]/)[0];
117
+ return extension ? mimeTypeMap[extension] : void 0;
118
+ } catch {
119
+ const match = url.match(/\.([a-zA-Z0-9]+)(?:[?#]|$)/);
120
+ if (match) {
121
+ const extension = match[1].toLowerCase();
122
+ return mimeTypeMap[extension];
123
+ }
124
+ return void 0;
125
+ }
200
126
  }
201
- // Compatibility layer for other well known content block types
202
- function normalizeMessageContentComplex(content) {
203
- return content.map((c) => {
204
- // OpenAI completions `input_audio`
205
- if (c.type === "input_audio" &&
206
- "input_audio" in c &&
207
- typeof c.input_audio === "object") {
208
- const { format, data } = c.input_audio;
209
- if (format === "wav") {
210
- return {
211
- type: "audio",
212
- source_type: "base64",
213
- mime_type: "audio/wav",
214
- data,
215
- };
216
- }
217
- }
218
- // OpenAI completions `image_url`
219
- if (c.type === "image_url" &&
220
- "image_url" in c &&
221
- typeof c.image_url === "object") {
222
- const { url } = c.image_url;
223
- return {
224
- type: "image",
225
- source_type: "url",
226
- url,
227
- };
228
- }
229
- // OpenAI completions `file`
230
- if (c.type === "file" &&
231
- "file" in c &&
232
- typeof c.file === "object" &&
233
- "file_data" in c.file) {
234
- const { file_data } = c.file;
235
- return {
236
- type: "file",
237
- source_type: "base64",
238
- data: file_data,
239
- };
240
- }
241
- return c;
242
- });
127
+ function normalizeSpeechConfig(config) {
128
+ function isSpeechConfig(config$1) {
129
+ return typeof config$1 === "object" && (Object.hasOwn(config$1, "voiceConfig") || Object.hasOwn(config$1, "multiSpeakerVoiceConfig"));
130
+ }
131
+ function hasLanguage(config$1) {
132
+ return typeof config$1 === "object" && Object.hasOwn(config$1, "languageCode");
133
+ }
134
+ function hasVoice(config$1) {
135
+ return Object.hasOwn(config$1, "voice");
136
+ }
137
+ if (typeof config === "undefined") return void 0;
138
+ if (isSpeechConfig(config)) return config;
139
+ let languageCode;
140
+ let voice;
141
+ if (hasLanguage(config)) {
142
+ languageCode = config.languageCode;
143
+ voice = hasVoice(config) ? config.voice : config.voices;
144
+ } else {
145
+ languageCode = void 0;
146
+ voice = config;
147
+ }
148
+ let ret;
149
+ if (typeof voice === "string") ret = { voiceConfig: { prebuiltVoiceConfig: { voiceName: voice } } };
150
+ else {
151
+ const voices = Array.isArray(voice) ? voice : [voice];
152
+ const speakerVoiceConfigs = voices.map((v) => ({
153
+ speaker: v.speaker,
154
+ voiceConfig: { prebuiltVoiceConfig: { voiceName: v.name } }
155
+ }));
156
+ ret = { multiSpeakerVoiceConfig: { speakerVoiceConfigs } };
157
+ }
158
+ if (languageCode) ret.languageCode = languageCode;
159
+ return ret;
243
160
  }
244
161
  function getGeminiAPI(config) {
245
- function messageContentText(content) {
246
- if (content?.text && content?.text.length > 0) {
247
- return {
248
- text: content.text,
249
- };
250
- }
251
- else {
252
- return null;
253
- }
254
- }
255
- function messageContentImageUrlData(content) {
256
- const url = typeof content.image_url === "string"
257
- ? content.image_url
258
- : content.image_url.url;
259
- if (!url) {
260
- throw new Error("Missing Image URL");
261
- }
262
- const mimeTypeAndData = extractMimeType(url);
263
- if (mimeTypeAndData) {
264
- return {
265
- inlineData: mimeTypeAndData,
266
- };
267
- }
268
- else {
269
- // FIXME - need some way to get mime type
270
- return {
271
- fileData: {
272
- mimeType: "image/png",
273
- fileUri: url,
274
- },
275
- };
276
- }
277
- }
278
- function messageContentImageUrl(content) {
279
- const ret = messageContentImageUrlData(content);
280
- supplementVideoMetadata(content, ret);
281
- return ret;
282
- }
283
- async function blobToFileData(blob) {
284
- return {
285
- fileData: {
286
- fileUri: blob.path,
287
- mimeType: blob.mimetype,
288
- },
289
- };
290
- }
291
- async function fileUriContentToBlob(uri) {
292
- return config?.mediaManager?.getMediaBlob(uri);
293
- }
294
- async function messageContentMediaData(
295
- // eslint-disable-next-line @typescript-eslint/no-explicit-any
296
- content) {
297
- if ("mimeType" in content && "data" in content) {
298
- return {
299
- inlineData: {
300
- mimeType: content.mimeType,
301
- data: content.data,
302
- },
303
- };
304
- }
305
- else if ("mimeType" in content && "fileUri" in content) {
306
- return {
307
- fileData: {
308
- mimeType: content.mimeType,
309
- fileUri: content.fileUri,
310
- },
311
- };
312
- }
313
- else {
314
- const uri = content.fileUri;
315
- const blob = await fileUriContentToBlob(uri);
316
- if (blob) {
317
- return await blobToFileData(blob);
318
- }
319
- }
320
- throw new Error(`Invalid media content: ${JSON.stringify(content, null, 1)}`);
321
- }
322
- function supplementVideoMetadata(
323
- // eslint-disable-next-line @typescript-eslint/no-explicit-any
324
- content, ret) {
325
- // Add videoMetadata if defined
326
- if ("videoMetadata" in content && typeof ret === "object") {
327
- // eslint-disable-next-line no-param-reassign
328
- ret.videoMetadata = content.videoMetadata;
329
- }
330
- return ret;
331
- }
332
- async function messageContentMedia(
333
- // eslint-disable-next-line @typescript-eslint/no-explicit-any
334
- content) {
335
- const ret = await messageContentMediaData(content);
336
- supplementVideoMetadata(content, ret);
337
- return ret;
338
- }
339
- function messageContentReasoning(content) {
340
- if (content?.reasoning && content?.reasoning.length > 0) {
341
- return {
342
- text: content.reasoning,
343
- thought: true,
344
- };
345
- }
346
- else {
347
- return null;
348
- }
349
- }
350
- const standardContentBlockConverter = {
351
- providerName: "Google Gemini",
352
- fromStandardTextBlock(block) {
353
- return {
354
- text: block.text,
355
- };
356
- },
357
- fromStandardImageBlock(block) {
358
- if (block.source_type === "url") {
359
- const data = (0, messages_1.parseBase64DataUrl)({ dataUrl: block.url });
360
- if (data) {
361
- return {
362
- inlineData: {
363
- mimeType: data.mime_type,
364
- data: data.data,
365
- },
366
- };
367
- }
368
- else {
369
- return {
370
- fileData: {
371
- mimeType: block.mime_type ?? "",
372
- fileUri: block.url,
373
- },
374
- };
375
- }
376
- }
377
- if (block.source_type === "base64") {
378
- return {
379
- inlineData: {
380
- mimeType: block.mime_type ?? "",
381
- data: block.data,
382
- },
383
- };
384
- }
385
- throw new Error(`Unsupported source type: ${block.source_type}`);
386
- },
387
- fromStandardAudioBlock(block) {
388
- if (block.source_type === "url") {
389
- const data = (0, messages_1.parseBase64DataUrl)({ dataUrl: block.url });
390
- if (data) {
391
- return {
392
- inlineData: {
393
- mimeType: data.mime_type,
394
- data: data.data,
395
- },
396
- };
397
- }
398
- else {
399
- return {
400
- fileData: {
401
- mimeType: block.mime_type ?? "",
402
- fileUri: block.url,
403
- },
404
- };
405
- }
406
- }
407
- if (block.source_type === "base64") {
408
- return {
409
- inlineData: {
410
- mimeType: block.mime_type ?? "",
411
- data: block.data,
412
- },
413
- };
414
- }
415
- throw new Error(`Unsupported source type: ${block.source_type}`);
416
- },
417
- fromStandardFileBlock(block) {
418
- if (block.source_type === "text") {
419
- return {
420
- text: block.text,
421
- };
422
- }
423
- if (block.source_type === "url") {
424
- const data = (0, messages_1.parseBase64DataUrl)({ dataUrl: block.url });
425
- if (data) {
426
- return {
427
- inlineData: {
428
- mimeType: data.mime_type,
429
- data: data.data,
430
- },
431
- };
432
- }
433
- else {
434
- return {
435
- fileData: {
436
- mimeType: block.mime_type ?? "",
437
- fileUri: block.url,
438
- },
439
- };
440
- }
441
- }
442
- if (block.source_type === "base64") {
443
- return {
444
- inlineData: {
445
- mimeType: block.mime_type ?? "",
446
- data: block.data,
447
- },
448
- };
449
- }
450
- throw new Error(`Unsupported source type: ${block.source_type}`);
451
- },
452
- };
453
- async function messageContentComplexToPart(content) {
454
- switch (content.type) {
455
- case "text":
456
- if ("text" in content) {
457
- return messageContentText(content);
458
- }
459
- break;
460
- case "image_url":
461
- if ("image_url" in content) {
462
- // Type guard for MessageContentImageUrl
463
- return messageContentImageUrl(content);
464
- }
465
- break;
466
- case "media":
467
- return await messageContentMedia(content);
468
- case "reasoning":
469
- return messageContentReasoning(content);
470
- default:
471
- throw new Error(`Unsupported type "${content.type}" received while converting message to message parts: ${JSON.stringify(content)}`);
472
- }
473
- throw new Error(`Cannot coerce "${content.type}" message part into a string.`);
474
- }
475
- async function messageContentComplexToParts(content) {
476
- const contents = content.map((m) => (0, messages_1.isDataContentBlock)(m)
477
- ? (0, messages_1.convertToProviderContentBlock)(m, standardContentBlockConverter)
478
- : messageContentComplexToPart(m));
479
- return Promise.all(contents);
480
- }
481
- async function messageContentToParts(content) {
482
- // Convert a string to a text type MessageContent if needed
483
- const messageContent = typeof content === "string"
484
- ? [
485
- {
486
- type: "text",
487
- text: content,
488
- },
489
- ]
490
- : content;
491
- // Normalize the content to use standard format
492
- const normalizedContent = normalizeMessageContentComplex(messageContent);
493
- // Get all of the parts, even those that don't correctly resolve
494
- const allParts = await messageContentComplexToParts(normalizedContent);
495
- // Remove any invalid parts
496
- const parts = allParts.reduce((acc, val) => {
497
- if (val) {
498
- return [...acc, val];
499
- }
500
- else {
501
- return acc;
502
- }
503
- }, []);
504
- return parts;
505
- }
506
- function messageToolCallsToParts(toolCalls) {
507
- if (!toolCalls || toolCalls.length === 0) {
508
- return [];
509
- }
510
- return toolCalls.map((tool) => {
511
- let args = {};
512
- if (tool?.function?.arguments) {
513
- const argStr = tool.function.arguments;
514
- args = JSON.parse(argStr);
515
- }
516
- return {
517
- functionCall: {
518
- name: tool.function.name,
519
- args,
520
- },
521
- };
522
- });
523
- }
524
- function messageKwargsToParts(kwargs) {
525
- const ret = [];
526
- if (kwargs?.tool_calls) {
527
- ret.push(...messageToolCallsToParts(kwargs.tool_calls));
528
- }
529
- return ret;
530
- }
531
- async function roleMessageToContent(role, message) {
532
- const contentParts = await messageContentToParts(message.content);
533
- let toolParts;
534
- if ((0, messages_1.isAIMessage)(message) && !!message.tool_calls?.length) {
535
- toolParts = message.tool_calls.map((toolCall) => ({
536
- functionCall: {
537
- name: toolCall.name,
538
- args: toolCall.args,
539
- },
540
- }));
541
- }
542
- else {
543
- toolParts = messageKwargsToParts(message.additional_kwargs);
544
- }
545
- const parts = [...contentParts, ...toolParts];
546
- const signatures = message?.additional_kwargs?.signatures ?? [];
547
- if (signatures.length === parts.length) {
548
- for (let co = 0; co < signatures.length; co += 1) {
549
- const signature = signatures[co];
550
- if (signature && signature.length > 0) {
551
- parts[co].thoughtSignature = signature;
552
- }
553
- }
554
- }
555
- return [
556
- {
557
- role,
558
- parts,
559
- },
560
- ];
561
- }
562
- async function systemMessageToContent(message) {
563
- return config?.useSystemInstruction
564
- ? roleMessageToContent("system", message)
565
- : [
566
- ...(await roleMessageToContent("user", message)),
567
- ...(await roleMessageToContent("model", new messages_1.AIMessage("Ok"))),
568
- ];
569
- }
570
- function toolMessageToContent(message, prevMessage) {
571
- const contentStr = typeof message.content === "string"
572
- ? message.content
573
- : message.content.reduce((acc, content) => {
574
- if (content.type === "text") {
575
- return acc + content.text;
576
- }
577
- else {
578
- return acc;
579
- }
580
- }, "");
581
- // Hacky :(
582
- const responseName = ((0, messages_1.isAIMessage)(prevMessage) && !!prevMessage.tool_calls?.length
583
- ? prevMessage.tool_calls[0].name
584
- : prevMessage.name) ?? message.tool_call_id;
585
- try {
586
- const content = JSON.parse(contentStr);
587
- return [
588
- {
589
- role: "function",
590
- parts: [
591
- {
592
- functionResponse: {
593
- name: responseName,
594
- response: { content },
595
- },
596
- },
597
- ],
598
- },
599
- ];
600
- }
601
- catch (_) {
602
- return [
603
- {
604
- role: "function",
605
- parts: [
606
- {
607
- functionResponse: {
608
- name: responseName,
609
- response: { content: contentStr },
610
- },
611
- },
612
- ],
613
- },
614
- ];
615
- }
616
- }
617
- async function baseMessageToContent(message, prevMessage) {
618
- const type = message._getType();
619
- switch (type) {
620
- case "system":
621
- return systemMessageToContent(message);
622
- case "human":
623
- return roleMessageToContent("user", message);
624
- case "ai":
625
- return roleMessageToContent("model", message);
626
- case "tool":
627
- if (!prevMessage) {
628
- throw new Error("Tool messages cannot be the first message passed to the model.");
629
- }
630
- return toolMessageToContent(message, prevMessage);
631
- default:
632
- console.log(`Unsupported message type: ${type}`);
633
- return [];
634
- }
635
- }
636
- function thoughtPartToMessageContent(part) {
637
- return {
638
- type: "reasoning",
639
- reasoning: part.text,
640
- };
641
- }
642
- function textPartToMessageContent(part) {
643
- return {
644
- type: "text",
645
- text: part.text,
646
- };
647
- }
648
- function inlineDataPartToMessageContentImage(part) {
649
- return {
650
- type: "image_url",
651
- image_url: `data:${part.inlineData.mimeType};base64,${part.inlineData.data}`,
652
- };
653
- }
654
- function inlineDataPartToMessageContentMedia(part
655
- // eslint-disable-next-line @typescript-eslint/no-explicit-any
656
- ) {
657
- return {
658
- type: "media",
659
- mimeType: part.inlineData.mimeType,
660
- data: part.inlineData.data,
661
- };
662
- }
663
- function inlineDataPartToMessageContent(part
664
- // eslint-disable-next-line @typescript-eslint/no-explicit-any
665
- ) {
666
- const mimeType = part?.inlineData?.mimeType ?? "";
667
- if (mimeType.startsWith("image")) {
668
- return inlineDataPartToMessageContentImage(part);
669
- }
670
- else {
671
- return inlineDataPartToMessageContentMedia(part);
672
- }
673
- }
674
- function fileDataPartToMessageContent(part) {
675
- return {
676
- type: "image_url",
677
- image_url: part.fileData.fileUri,
678
- };
679
- }
680
- function partsToMessageContent(parts) {
681
- return parts
682
- .map((part) => {
683
- if (part === undefined || part === null) {
684
- return null;
685
- }
686
- else if (part.thought) {
687
- return thoughtPartToMessageContent(part);
688
- }
689
- else if ("text" in part) {
690
- return textPartToMessageContent(part);
691
- }
692
- else if ("inlineData" in part) {
693
- return inlineDataPartToMessageContent(part);
694
- }
695
- else if ("fileData" in part) {
696
- return fileDataPartToMessageContent(part);
697
- }
698
- else {
699
- return null;
700
- }
701
- })
702
- .reduce((acc, content) => {
703
- if (content) {
704
- acc.push(content);
705
- }
706
- return acc;
707
- }, []);
708
- }
709
- function toolRawToTool(raw) {
710
- return {
711
- id: raw.id,
712
- type: raw.type,
713
- function: {
714
- name: raw.function.name,
715
- arguments: JSON.stringify(raw.function.arguments),
716
- },
717
- };
718
- }
719
- function functionCallPartToToolRaw(part) {
720
- return {
721
- id: (0, uuid_1.v4)().replace(/-/g, ""),
722
- type: "function",
723
- function: {
724
- name: part.functionCall.name,
725
- arguments: part.functionCall.args ?? {},
726
- },
727
- };
728
- }
729
- function partsToToolsRaw(parts) {
730
- return parts
731
- .map((part) => {
732
- if (part === undefined || part === null) {
733
- return null;
734
- }
735
- else if ("functionCall" in part) {
736
- return functionCallPartToToolRaw(part);
737
- }
738
- else {
739
- return null;
740
- }
741
- })
742
- .reduce((acc, content) => {
743
- if (content) {
744
- acc.push(content);
745
- }
746
- return acc;
747
- }, []);
748
- }
749
- function toolsRawToTools(raws) {
750
- return raws.map((raw) => toolRawToTool(raw));
751
- }
752
- function responseToGenerateContentResponseData(response) {
753
- if ("nextChunk" in response.data) {
754
- throw new Error("Cannot convert Stream to GenerateContentResponseData");
755
- }
756
- else if (Array.isArray(response.data)) {
757
- // Collapse the array of response data as if it was a single one
758
- return response.data.reduce((acc, val) => {
759
- // Add all the parts
760
- // FIXME: Handle other candidates?
761
- const valParts = val?.candidates?.[0]?.content?.parts ?? [];
762
- acc.candidates[0].content.parts.push(...valParts);
763
- // FIXME: Merge promptFeedback and safety settings
764
- acc.promptFeedback = val.promptFeedback;
765
- return acc;
766
- });
767
- }
768
- else {
769
- return response.data;
770
- }
771
- }
772
- function responseToParts(response) {
773
- const responseData = responseToGenerateContentResponseData(response);
774
- const parts = responseData?.candidates?.[0]?.content?.parts ?? [];
775
- return parts;
776
- }
777
- function partToText(part) {
778
- return "text" in part ? part.text : "";
779
- }
780
- function responseToString(response) {
781
- const parts = responseToParts(response);
782
- const ret = parts.reduce((acc, part) => {
783
- const val = partToText(part);
784
- return acc + val;
785
- }, "");
786
- return ret;
787
- }
788
- function safeResponseTo(response, responseTo) {
789
- const safetyHandler = config?.safetyHandler ?? new DefaultGeminiSafetyHandler();
790
- try {
791
- const safeResponse = safetyHandler.handle(response);
792
- return responseTo(safeResponse);
793
- }
794
- catch (xx) {
795
- // eslint-disable-next-line no-instanceof/no-instanceof
796
- if (xx instanceof safety_js_1.GoogleAISafetyError) {
797
- const ret = responseTo(xx.response);
798
- xx.reply = ret;
799
- }
800
- throw xx;
801
- }
802
- }
803
- function safeResponseToString(response) {
804
- return safeResponseTo(response, responseToString);
805
- }
806
- function logprobResultToLogprob(result) {
807
- const token = result?.token;
808
- const logprob = result?.logProbability;
809
- const encoder = new TextEncoder();
810
- const bytes = Array.from(encoder.encode(token));
811
- return {
812
- token,
813
- logprob,
814
- bytes,
815
- };
816
- }
817
- function candidateToLogprobs(candidate) {
818
- const logprobs = candidate?.logprobsResult;
819
- const chosenTokens = logprobs?.chosenCandidates ?? [];
820
- const topTokens = logprobs?.topCandidates ?? [];
821
- const content = [];
822
- for (let co = 0; co < chosenTokens.length; co += 1) {
823
- const chosen = chosenTokens[co];
824
- const top = topTokens[co]?.candidates ?? [];
825
- const logprob = logprobResultToLogprob(chosen);
826
- logprob.top_logprobs = top.map((l) => logprobResultToLogprob(l));
827
- content.push(logprob);
828
- }
829
- return {
830
- content,
831
- };
832
- }
833
- function candidateToUrlContextMetadata(candidate) {
834
- const retrieval = candidate?.urlRetrievalMetadata?.urlRetrievalContexts ?? [];
835
- const context = candidate?.urlContextMetadata?.urlMetadata ?? [];
836
- const all = [...retrieval, ...context];
837
- if (all.length === 0) {
838
- return undefined;
839
- }
840
- else {
841
- return {
842
- urlMetadata: all,
843
- };
844
- }
845
- }
846
- function addModalityCounts(modalityTokenCounts, details) {
847
- modalityTokenCounts?.forEach((modalityTokenCount) => {
848
- const { modality, tokenCount } = modalityTokenCount;
849
- const modalityLc = modality.toLowerCase();
850
- const currentCount = details[modalityLc] ?? 0;
851
- // eslint-disable-next-line no-param-reassign
852
- details[modalityLc] = currentCount + tokenCount;
853
- });
854
- }
855
- function responseToUsageMetadata(response) {
856
- if ("usageMetadata" in response.data) {
857
- const data = response?.data;
858
- const usageMetadata = data?.usageMetadata;
859
- const input_tokens = usageMetadata.promptTokenCount ?? 0;
860
- const candidatesTokenCount = usageMetadata.candidatesTokenCount ?? 0;
861
- const thoughtsTokenCount = usageMetadata.thoughtsTokenCount ?? 0;
862
- const output_tokens = candidatesTokenCount + thoughtsTokenCount;
863
- const total_tokens = usageMetadata.totalTokenCount ?? input_tokens + output_tokens;
864
- const input_token_details = {};
865
- addModalityCounts(usageMetadata.promptTokensDetails, input_token_details);
866
- if (typeof usageMetadata?.cachedContentTokenCount === "number") {
867
- input_token_details.cache_read = usageMetadata.cachedContentTokenCount;
868
- }
869
- const output_token_details = {};
870
- addModalityCounts(usageMetadata?.candidatesTokensDetails, output_token_details);
871
- if (typeof usageMetadata?.thoughtsTokenCount === "number") {
872
- output_token_details.reasoning = usageMetadata.thoughtsTokenCount;
873
- }
874
- const ret = {
875
- input_tokens,
876
- output_tokens,
877
- total_tokens,
878
- input_token_details,
879
- output_token_details,
880
- };
881
- return ret;
882
- }
883
- return undefined;
884
- }
885
- function responseToGenerationInfo(response) {
886
- const data =
887
- // eslint-disable-next-line no-nested-ternary
888
- Array.isArray(response.data) && response.data[0]
889
- ? response.data[0]
890
- : response.data &&
891
- response.data.candidates
892
- ? response.data
893
- : undefined;
894
- if (!data) {
895
- return {};
896
- }
897
- const finish_reason = data.candidates[0]?.finishReason;
898
- // eslint-disable-next-line @typescript-eslint/no-explicit-any
899
- const ret = {
900
- safety_ratings: data.candidates[0]?.safetyRatings?.map((rating) => ({
901
- category: rating.category,
902
- probability: rating.probability,
903
- probability_score: rating.probabilityScore,
904
- severity: rating.severity,
905
- severity_score: rating.severityScore,
906
- })),
907
- citation_metadata: data.candidates[0]?.citationMetadata,
908
- grounding_metadata: data.candidates[0]?.groundingMetadata,
909
- finish_reason,
910
- finish_message: data.candidates[0]?.finishMessage,
911
- url_context_metadata: candidateToUrlContextMetadata(data.candidates[0]),
912
- avgLogprobs: data.candidates[0]?.avgLogprobs,
913
- logprobs: candidateToLogprobs(data.candidates[0]),
914
- };
915
- // Only add the usage_metadata on the last chunk
916
- // sent while streaming (see issue 8102).
917
- if (typeof finish_reason === "string") {
918
- ret.usage_metadata = responseToUsageMetadata(response);
919
- }
920
- return ret;
921
- }
922
- function responseToChatGeneration(response) {
923
- return new outputs_1.ChatGenerationChunk({
924
- text: responseToString(response),
925
- message: partToMessageChunk(responseToParts(response)[0]),
926
- generationInfo: responseToGenerationInfo(response),
927
- });
928
- }
929
- function safeResponseToChatGeneration(response) {
930
- return safeResponseTo(response, responseToChatGeneration);
931
- }
932
- function chunkToString(chunk) {
933
- if (chunk === null) {
934
- return "";
935
- }
936
- else if (typeof chunk.content === "string") {
937
- return chunk.content;
938
- }
939
- else if (chunk.content.length === 0) {
940
- return "";
941
- }
942
- else if (chunk.content[0].type === "text") {
943
- return chunk.content[0].text;
944
- }
945
- else {
946
- throw new Error(`Unexpected chunk: ${chunk}`);
947
- }
948
- }
949
- function partToMessageChunk(part) {
950
- const fields = partsToBaseMessageChunkFields([part]);
951
- if (typeof fields.content === "string") {
952
- return new messages_1.AIMessageChunk(fields);
953
- }
954
- else if (fields.content.every((item) => item.type === "text")) {
955
- const newContent = fields.content
956
- .map((item) => ("text" in item ? item.text : ""))
957
- .join("");
958
- return new messages_1.AIMessageChunk({
959
- ...fields,
960
- content: newContent,
961
- });
962
- }
963
- return new messages_1.AIMessageChunk(fields);
964
- }
965
- function partToChatGeneration(part) {
966
- const message = partToMessageChunk(part);
967
- const text = partToText(part);
968
- // eslint-disable-next-line @typescript-eslint/no-explicit-any
969
- const generationInfo = {};
970
- return new outputs_1.ChatGenerationChunk({
971
- text,
972
- message,
973
- generationInfo,
974
- });
975
- }
976
- function groundingSupportByPart(groundingSupports) {
977
- const ret = [];
978
- if (!groundingSupports || groundingSupports.length === 0) {
979
- return [];
980
- }
981
- groundingSupports?.forEach((groundingSupport) => {
982
- const segment = groundingSupport?.segment;
983
- const partIndex = segment?.partIndex ?? 0;
984
- if (ret[partIndex]) {
985
- ret[partIndex].push(groundingSupport);
986
- }
987
- else {
988
- ret[partIndex] = [groundingSupport];
989
- }
990
- });
991
- return ret;
992
- }
993
- function responseToGroundedChatGenerations(response) {
994
- const parts = responseToParts(response);
995
- if (parts.length === 0) {
996
- return [];
997
- }
998
- // Citation and grounding information connected to each part / ChatGeneration
999
- // to make sure they are available in downstream filters.
1000
- const candidate = response?.data
1001
- ?.candidates?.[0];
1002
- const groundingMetadata = candidate?.groundingMetadata;
1003
- const citationMetadata = candidate?.citationMetadata;
1004
- const groundingParts = groundingSupportByPart(groundingMetadata?.groundingSupports);
1005
- const ret = parts.map((part, index) => {
1006
- const gen = partToChatGeneration(part);
1007
- if (!gen.generationInfo) {
1008
- gen.generationInfo = {};
1009
- }
1010
- if (groundingMetadata) {
1011
- gen.generationInfo.groundingMetadata = groundingMetadata;
1012
- const groundingPart = groundingParts[index];
1013
- if (groundingPart) {
1014
- gen.generationInfo.groundingSupport = groundingPart;
1015
- }
1016
- }
1017
- if (citationMetadata) {
1018
- gen.generationInfo.citationMetadata = citationMetadata;
1019
- }
1020
- return gen;
1021
- });
1022
- return ret;
1023
- }
1024
- function combineContent(gen, forceComplex = false) {
1025
- const allString = gen.every((item) => typeof item.message.content === "string");
1026
- if (allString && !forceComplex) {
1027
- // Everything is a string, and we don't want to force it to return
1028
- // MessageContentComplex[], so concatenate the content into one string
1029
- return gen.map((item) => item.message.content).join("");
1030
- }
1031
- else {
1032
- // We either have complex types, or we want to force them, so turn
1033
- // it into an array of complex types.
1034
- const ret = [];
1035
- gen.forEach((item) => {
1036
- if (typeof item.message.content === "string") {
1037
- // If this is a string, turn it into a text type
1038
- ret.push({
1039
- type: "text",
1040
- text: item.message.content,
1041
- });
1042
- }
1043
- else {
1044
- // Otherwise, add all the complex types to what we're returning
1045
- item.message.content.forEach((c) => {
1046
- ret.push(c);
1047
- });
1048
- }
1049
- });
1050
- return ret;
1051
- }
1052
- }
1053
- function combineText(gen) {
1054
- return gen.map((item) => item.text ?? "").join("");
1055
- }
1056
- /*
1057
- * We don't really need the entire AIMessageChunk here, but it is
1058
- * a conventient way to combine all the Tool Calling information.
1059
- */
1060
- function combineToolCalls(gen) {
1061
- let ret = new messages_1.AIMessageChunk("");
1062
- gen.forEach((item) => {
1063
- const message = item?.message;
1064
- ret = (0, stream_1.concat)(ret, message);
1065
- });
1066
- return ret;
1067
- }
1068
- function combineAdditionalKwargs(gen) {
1069
- const ret = {};
1070
- gen.forEach((item) => {
1071
- const message = item?.message;
1072
- const kwargs = message?.additional_kwargs ?? {};
1073
- const keys = Object.keys(kwargs);
1074
- keys.forEach((key) => {
1075
- const value = kwargs[key];
1076
- if (Object.hasOwn(ret, key) &&
1077
- Array.isArray(ret[key]) &&
1078
- Array.isArray(value)) {
1079
- ret[key].push(...value);
1080
- }
1081
- else {
1082
- ret[key] = value;
1083
- }
1084
- });
1085
- });
1086
- return ret;
1087
- }
1088
- function combineGenerations(generations, response) {
1089
- const gen = splitGenerationTypes(generations, response);
1090
- const combinedContent = combineContent(gen.content);
1091
- const combinedText = combineText(gen.content);
1092
- const combinedToolCalls = combineToolCalls(gen.content);
1093
- const kwargs = combineAdditionalKwargs(gen.content);
1094
- const lastContent = gen.content[gen.content.length - 1];
1095
- // Add usage metadata
1096
- const usage_metadata = responseToUsageMetadata(response);
1097
- // Add thinking / reasoning
1098
- // if (gen.reasoning && gen.reasoning.length > 0) {
1099
- // kwargs.reasoning_content = combineContent(gen.reasoning, true);
1100
- // }
1101
- // Build the message and the generation chunk to return
1102
- const message = new messages_1.AIMessageChunk({
1103
- content: combinedContent,
1104
- additional_kwargs: kwargs,
1105
- usage_metadata,
1106
- tool_calls: combinedToolCalls.tool_calls,
1107
- invalid_tool_calls: combinedToolCalls.invalid_tool_calls,
1108
- });
1109
- return [
1110
- new outputs_1.ChatGenerationChunk({
1111
- message,
1112
- text: combinedText,
1113
- generationInfo: lastContent.generationInfo,
1114
- }),
1115
- ];
1116
- }
1117
- function splitGenerationTypes(generations, _response) {
1118
- const content = [];
1119
- const reasoning = [];
1120
- generations.forEach((gen) => {
1121
- if (gen?.generationInfo?.thought) {
1122
- reasoning.push(gen);
1123
- }
1124
- else {
1125
- content.push(gen);
1126
- }
1127
- });
1128
- return {
1129
- content,
1130
- reasoning,
1131
- };
1132
- }
1133
- /**
1134
- * Although this returns an array, only the first (or maybe last)
1135
- * element in the array is used. So we need to combine them into
1136
- * just one element that contains everything we need.
1137
- * @param response
1138
- */
1139
- function responseToChatGenerations(response) {
1140
- const generations = responseToGroundedChatGenerations(response);
1141
- if (generations.length === 0) {
1142
- return [];
1143
- }
1144
- const ret = combineGenerations(generations, response);
1145
- // Add logprobs information to the message
1146
- const candidate = response?.data
1147
- ?.candidates?.[0];
1148
- const avgLogprobs = candidate?.avgLogprobs;
1149
- const logprobs = candidateToLogprobs(candidate);
1150
- if (logprobs) {
1151
- ret[0].message.response_metadata = {
1152
- ...ret[0].message.response_metadata,
1153
- logprobs,
1154
- avgLogprobs,
1155
- };
1156
- }
1157
- return ret;
1158
- }
1159
- function responseToBaseMessageFields(response) {
1160
- const parts = responseToParts(response);
1161
- return partsToBaseMessageChunkFields(parts);
1162
- }
1163
- function partsToSignatures(parts) {
1164
- return parts.map((part) => part?.thoughtSignature ?? "");
1165
- }
1166
- function partsToBaseMessageChunkFields(parts) {
1167
- const fields = {
1168
- content: partsToMessageContent(parts),
1169
- tool_call_chunks: [],
1170
- tool_calls: [],
1171
- invalid_tool_calls: [],
1172
- };
1173
- fields.additional_kwargs = {};
1174
- const rawTools = partsToToolsRaw(parts);
1175
- if (rawTools.length > 0) {
1176
- const tools = toolsRawToTools(rawTools);
1177
- for (const tool of tools) {
1178
- fields.tool_call_chunks?.push({
1179
- name: tool.function.name,
1180
- args: tool.function.arguments,
1181
- id: tool.id,
1182
- type: "tool_call_chunk",
1183
- });
1184
- try {
1185
- fields.tool_calls?.push({
1186
- name: tool.function.name,
1187
- args: JSON.parse(tool.function.arguments),
1188
- id: tool.id,
1189
- });
1190
- // eslint-disable-next-line @typescript-eslint/no-explicit-any
1191
- }
1192
- catch (e) {
1193
- fields.invalid_tool_calls?.push({
1194
- name: tool.function.name,
1195
- args: tool.function.arguments,
1196
- id: tool.id,
1197
- error: e.message,
1198
- type: "invalid_tool_call",
1199
- });
1200
- }
1201
- }
1202
- fields.additional_kwargs.tool_calls = tools;
1203
- }
1204
- fields.additional_kwargs.signatures = partsToSignatures(parts);
1205
- return fields;
1206
- }
1207
- function responseToBaseMessage(response) {
1208
- const fields = responseToBaseMessageFields(response);
1209
- return new messages_1.AIMessage(fields);
1210
- }
1211
- function safeResponseToBaseMessage(response) {
1212
- return safeResponseTo(response, responseToBaseMessage);
1213
- }
1214
- function responseToChatResult(response) {
1215
- const generations = responseToChatGenerations(response);
1216
- return {
1217
- generations,
1218
- llmOutput: responseToGenerationInfo(response),
1219
- };
1220
- }
1221
- function safeResponseToChatResult(response) {
1222
- return safeResponseTo(response, responseToChatResult);
1223
- }
1224
- function inputType(input) {
1225
- if (typeof input === "string") {
1226
- return "MessageContent";
1227
- }
1228
- else {
1229
- const firstItem = input[0];
1230
- if (Object.hasOwn(firstItem, "content")) {
1231
- return "BaseMessageArray";
1232
- }
1233
- else {
1234
- return "MessageContent";
1235
- }
1236
- }
1237
- }
1238
- async function formatMessageContents(input, _parameters) {
1239
- const parts = await messageContentToParts(input);
1240
- const contents = [
1241
- {
1242
- role: "user", // Required by Vertex AI
1243
- parts,
1244
- },
1245
- ];
1246
- return contents;
1247
- }
1248
- async function formatBaseMessageContents(input, _parameters) {
1249
- const inputPromises = input.map((msg, i) => baseMessageToContent(msg, input[i - 1]));
1250
- const inputs = await Promise.all(inputPromises);
1251
- return inputs.reduce((acc, cur) => {
1252
- // Filter out the system content
1253
- if (cur.every((content) => content.role === "system")) {
1254
- return acc;
1255
- }
1256
- // Combine adjacent function messages
1257
- if (cur[0]?.role === "function" &&
1258
- acc.length > 0 &&
1259
- acc[acc.length - 1].role === "function") {
1260
- acc[acc.length - 1].parts = [
1261
- ...acc[acc.length - 1].parts,
1262
- ...cur[0].parts,
1263
- ];
1264
- }
1265
- else {
1266
- acc.push(...cur);
1267
- }
1268
- return acc;
1269
- }, []);
1270
- }
1271
- async function formatContents(input, parameters) {
1272
- const it = inputType(input);
1273
- switch (it) {
1274
- case "MessageContent":
1275
- return formatMessageContents(input, parameters);
1276
- case "BaseMessageArray":
1277
- return formatBaseMessageContents(input, parameters);
1278
- default:
1279
- throw new Error(`Unknown input type "${it}": ${input}`);
1280
- }
1281
- }
1282
- function formatGenerationConfig(parameters) {
1283
- const ret = {
1284
- temperature: parameters.temperature,
1285
- topK: parameters.topK,
1286
- topP: parameters.topP,
1287
- seed: parameters.seed,
1288
- presencePenalty: parameters.presencePenalty,
1289
- frequencyPenalty: parameters.frequencyPenalty,
1290
- maxOutputTokens: parameters.maxOutputTokens,
1291
- stopSequences: parameters.stopSequences,
1292
- responseMimeType: parameters.responseMimeType,
1293
- responseModalities: parameters.responseModalities,
1294
- speechConfig: normalizeSpeechConfig(parameters.speechConfig),
1295
- };
1296
- // Add the logprobs if explicitly set
1297
- if (typeof parameters.logprobs !== "undefined") {
1298
- ret.responseLogprobs = parameters.logprobs;
1299
- if (parameters.logprobs &&
1300
- typeof parameters.topLogprobs !== "undefined") {
1301
- ret.logprobs = parameters.topLogprobs;
1302
- }
1303
- }
1304
- // Add thinking configuration if explicitly set
1305
- // Note that you cannot have thinkingBudget set to 0 and includeThoughts true
1306
- if (typeof parameters.maxReasoningTokens !== "undefined") {
1307
- const includeThoughts = parameters.maxReasoningTokens !== 0;
1308
- ret.thinkingConfig = {
1309
- thinkingBudget: parameters.maxReasoningTokens,
1310
- includeThoughts,
1311
- };
1312
- }
1313
- // Remove any undefined properties, so we don't send them
1314
- let attribute;
1315
- for (attribute in ret) {
1316
- if (ret[attribute] === undefined) {
1317
- delete ret[attribute];
1318
- }
1319
- }
1320
- return ret;
1321
- }
1322
- function formatSafetySettings(parameters) {
1323
- return parameters.safetySettings ?? [];
1324
- }
1325
- async function formatBaseMessageSystemInstruction(input) {
1326
- let ret = {};
1327
- for (let index = 0; index < input.length; index += 1) {
1328
- const message = input[index];
1329
- if (message._getType() === "system") {
1330
- // For system types, we only want it if it is the first message,
1331
- // if it appears anywhere else, it should be an error.
1332
- if (index === 0) {
1333
- // eslint-disable-next-line prefer-destructuring
1334
- ret = (await baseMessageToContent(message, undefined))[0];
1335
- }
1336
- else {
1337
- throw new Error("System messages are only permitted as the first passed message.");
1338
- }
1339
- }
1340
- }
1341
- return ret;
1342
- }
1343
- async function formatSystemInstruction(input) {
1344
- if (!config?.useSystemInstruction) {
1345
- return {};
1346
- }
1347
- const it = inputType(input);
1348
- switch (it) {
1349
- case "BaseMessageArray":
1350
- return formatBaseMessageSystemInstruction(input);
1351
- default:
1352
- return {};
1353
- }
1354
- }
1355
- function structuredToolToFunctionDeclaration(tool) {
1356
- const jsonSchema = (0, zod_to_gemini_parameters_js_1.schemaToGeminiParameters)(tool.schema);
1357
- return {
1358
- name: tool.name,
1359
- description: tool.description ?? `A function available to call.`,
1360
- parameters: jsonSchema,
1361
- };
1362
- }
1363
- function searchToolName(tool) {
1364
- for (const name of types_js_1.GeminiSearchToolAttributes) {
1365
- if (name in tool) {
1366
- return name;
1367
- }
1368
- }
1369
- return undefined;
1370
- }
1371
- function cleanGeminiTool(tool) {
1372
- const orig = searchToolName(tool);
1373
- const adj = config?.googleSearchToolAdjustment;
1374
- if (orig && adj && adj !== orig) {
1375
- return {
1376
- [adj]: {},
1377
- };
1378
- }
1379
- else {
1380
- return tool;
1381
- }
1382
- }
1383
- function formatTools(parameters) {
1384
- const tools = parameters?.tools;
1385
- if (!tools || tools.length === 0) {
1386
- return [];
1387
- }
1388
- // Group all LangChain tools into a single functionDeclarations array.
1389
- // Gemini Tools may be normalized to different tool names
1390
- const langChainTools = [];
1391
- const otherTools = [];
1392
- tools.forEach((tool) => {
1393
- if ((0, function_calling_1.isLangChainTool)(tool)) {
1394
- langChainTools.push(tool);
1395
- }
1396
- else {
1397
- otherTools.push(cleanGeminiTool(tool));
1398
- }
1399
- });
1400
- const result = [...otherTools];
1401
- if (langChainTools.length > 0) {
1402
- result.push({
1403
- functionDeclarations: langChainTools.map(structuredToolToFunctionDeclaration),
1404
- });
1405
- }
1406
- return result;
1407
- }
1408
- function formatToolConfig(parameters) {
1409
- if (!parameters.tool_choice || typeof parameters.tool_choice !== "string") {
1410
- return undefined;
1411
- }
1412
- if (["auto", "any", "none"].includes(parameters.tool_choice)) {
1413
- return {
1414
- functionCallingConfig: {
1415
- mode: parameters.tool_choice,
1416
- allowedFunctionNames: parameters.allowed_function_names,
1417
- },
1418
- };
1419
- }
1420
- // force tool choice to be a single function name in case of structured output
1421
- return {
1422
- functionCallingConfig: {
1423
- mode: "any",
1424
- allowedFunctionNames: [parameters.tool_choice],
1425
- },
1426
- };
1427
- }
1428
- async function formatData(input, parameters) {
1429
- const typedInput = input;
1430
- const contents = await formatContents(typedInput, parameters);
1431
- const generationConfig = formatGenerationConfig(parameters);
1432
- const tools = formatTools(parameters);
1433
- const toolConfig = formatToolConfig(parameters);
1434
- const safetySettings = formatSafetySettings(parameters);
1435
- const systemInstruction = await formatSystemInstruction(typedInput);
1436
- const ret = {
1437
- contents,
1438
- generationConfig,
1439
- };
1440
- if (tools && tools.length) {
1441
- ret.tools = tools;
1442
- }
1443
- if (toolConfig) {
1444
- ret.toolConfig = toolConfig;
1445
- }
1446
- if (safetySettings && safetySettings.length) {
1447
- ret.safetySettings = safetySettings;
1448
- }
1449
- if (systemInstruction?.role &&
1450
- systemInstruction?.parts &&
1451
- systemInstruction?.parts?.length) {
1452
- ret.systemInstruction = systemInstruction;
1453
- }
1454
- if (parameters.cachedContent) {
1455
- ret.cachedContent = parameters.cachedContent;
1456
- }
1457
- if (parameters.labels && Object.keys(parameters.labels).length > 0) {
1458
- ret.labels = parameters.labels;
1459
- }
1460
- return ret;
1461
- }
1462
- return {
1463
- messageContentToParts,
1464
- baseMessageToContent,
1465
- responseToString: safeResponseToString,
1466
- responseToChatGeneration: safeResponseToChatGeneration,
1467
- chunkToString,
1468
- responseToBaseMessage: safeResponseToBaseMessage,
1469
- responseToChatResult: safeResponseToChatResult,
1470
- formatData,
1471
- };
162
+ function messageContentText(content) {
163
+ if (content?.text && content?.text.length > 0) return { text: content.text };
164
+ else return null;
165
+ }
166
+ function messageContentImageUrlData(content) {
167
+ const url = typeof content.image_url === "string" ? content.image_url : content.image_url.url;
168
+ if (!url) throw new Error("Missing Image URL");
169
+ const mimeTypeAndData = extractMimeType(url);
170
+ if (mimeTypeAndData) return { inlineData: mimeTypeAndData };
171
+ else {
172
+ const mimeType = inferMimeTypeFromUrl(url) || "image/png";
173
+ return { fileData: {
174
+ mimeType,
175
+ fileUri: url
176
+ } };
177
+ }
178
+ }
179
+ function messageContentImageUrl(content) {
180
+ const ret = messageContentImageUrlData(content);
181
+ supplementVideoMetadata(content, ret);
182
+ return ret;
183
+ }
184
+ async function blobToFileData(blob) {
185
+ return { fileData: {
186
+ fileUri: blob.path,
187
+ mimeType: blob.mimetype
188
+ } };
189
+ }
190
+ async function fileUriContentToBlob(uri) {
191
+ return config?.mediaManager?.getMediaBlob(uri);
192
+ }
193
+ async function messageContentMediaData(content) {
194
+ if ("mimeType" in content && "data" in content) return { inlineData: {
195
+ mimeType: content.mimeType,
196
+ data: content.data
197
+ } };
198
+ else if ("mimeType" in content && "fileUri" in content) return { fileData: {
199
+ mimeType: content.mimeType,
200
+ fileUri: content.fileUri
201
+ } };
202
+ else {
203
+ const uri = content.fileUri;
204
+ const blob = await fileUriContentToBlob(uri);
205
+ if (blob) return await blobToFileData(blob);
206
+ }
207
+ throw new Error(`Invalid media content: ${JSON.stringify(content, null, 1)}`);
208
+ }
209
+ function supplementVideoMetadata(content, ret) {
210
+ if ("videoMetadata" in content && typeof ret === "object") ret.videoMetadata = content.videoMetadata;
211
+ return ret;
212
+ }
213
+ async function messageContentMedia(content) {
214
+ const ret = await messageContentMediaData(content);
215
+ supplementVideoMetadata(content, ret);
216
+ return ret;
217
+ }
218
+ function messageContentReasoning(content) {
219
+ if (content?.reasoning && content?.reasoning.length > 0) return {
220
+ text: content.reasoning,
221
+ thought: true
222
+ };
223
+ else return null;
224
+ }
225
+ const standardContentBlockConverter = {
226
+ providerName: "Google Gemini",
227
+ fromStandardTextBlock(block) {
228
+ return { text: block.text };
229
+ },
230
+ fromStandardImageBlock(block) {
231
+ if (block.source_type === "url") {
232
+ const data = (0, __langchain_core_messages.parseBase64DataUrl)({ dataUrl: block.url });
233
+ if (data) return { inlineData: {
234
+ mimeType: data.mime_type,
235
+ data: data.data
236
+ } };
237
+ else {
238
+ let mimeType = block.mime_type;
239
+ if (!mimeType || mimeType === "") mimeType = inferMimeTypeFromUrl(block.url) || "image/png";
240
+ return { fileData: {
241
+ mimeType,
242
+ fileUri: block.url
243
+ } };
244
+ }
245
+ }
246
+ if (block.source_type === "base64") return { inlineData: {
247
+ mimeType: block.mime_type || "image/png",
248
+ data: block.data
249
+ } };
250
+ throw new Error(`Unsupported source type: ${block.source_type}`);
251
+ },
252
+ fromStandardAudioBlock(block) {
253
+ if (block.source_type === "url") {
254
+ const data = (0, __langchain_core_messages.parseBase64DataUrl)({ dataUrl: block.url });
255
+ if (data) return { inlineData: {
256
+ mimeType: data.mime_type,
257
+ data: data.data
258
+ } };
259
+ else return { fileData: {
260
+ mimeType: block.mime_type || "audio/mpeg",
261
+ fileUri: block.url
262
+ } };
263
+ }
264
+ if (block.source_type === "base64") return { inlineData: {
265
+ mimeType: block.mime_type || "audio/mpeg",
266
+ data: block.data
267
+ } };
268
+ throw new Error(`Unsupported source type: ${block.source_type}`);
269
+ },
270
+ fromStandardFileBlock(block) {
271
+ if (block.source_type === "text") return { text: block.text };
272
+ if (block.source_type === "url") {
273
+ const data = (0, __langchain_core_messages.parseBase64DataUrl)({ dataUrl: block.url });
274
+ if (data) return { inlineData: {
275
+ mimeType: data.mime_type,
276
+ data: data.data
277
+ } };
278
+ else return { fileData: {
279
+ mimeType: block.mime_type || "application/octet-stream",
280
+ fileUri: block.url
281
+ } };
282
+ }
283
+ if (block.source_type === "base64") return { inlineData: {
284
+ mimeType: block.mime_type || "application/octet-stream",
285
+ data: block.data
286
+ } };
287
+ throw new Error(`Unsupported source type: ${block.source_type}`);
288
+ }
289
+ };
290
+ async function messageContentComplexToPart(content) {
291
+ switch (content.type) {
292
+ case "text":
293
+ if ("text" in content) return messageContentText(content);
294
+ break;
295
+ case "image_url":
296
+ if ("image_url" in content) return messageContentImageUrl(content);
297
+ break;
298
+ case "media": return await messageContentMedia(content);
299
+ case "reasoning": return messageContentReasoning(content);
300
+ default: throw new Error(`Unsupported type "${content.type}" received while converting message to message parts: ${JSON.stringify(content)}`);
301
+ }
302
+ throw new Error(`Cannot coerce "${content.type}" message part into a string.`);
303
+ }
304
+ async function messageContentComplexToParts(content) {
305
+ const contents = content.map((m) => (0, __langchain_core_messages.isDataContentBlock)(m) ? (0, __langchain_core_messages.convertToProviderContentBlock)(m, standardContentBlockConverter) : messageContentComplexToPart(m));
306
+ return Promise.all(contents);
307
+ }
308
+ async function messageContentToParts(content) {
309
+ const messageContent = typeof content === "string" ? [{
310
+ type: "text",
311
+ text: content
312
+ }] : content;
313
+ const allParts = await messageContentComplexToParts(messageContent);
314
+ const parts = allParts.reduce((acc, val) => {
315
+ if (val) return [...acc, val];
316
+ else return acc;
317
+ }, []);
318
+ return parts;
319
+ }
320
+ function messageToolCallsToParts(toolCalls) {
321
+ if (!toolCalls || toolCalls.length === 0) return [];
322
+ return toolCalls.map((tool) => {
323
+ let args = {};
324
+ if (tool?.function?.arguments) {
325
+ const argStr = tool.function.arguments;
326
+ args = JSON.parse(argStr);
327
+ }
328
+ return { functionCall: {
329
+ name: tool.function.name,
330
+ args
331
+ } };
332
+ });
333
+ }
334
+ function messageKwargsToParts(kwargs) {
335
+ const ret = [];
336
+ if (kwargs?.tool_calls) ret.push(...messageToolCallsToParts(kwargs.tool_calls));
337
+ return ret;
338
+ }
339
+ async function roleMessageToContent(role, message) {
340
+ const contentParts = await messageContentToParts(message.content);
341
+ let toolParts;
342
+ if ((0, __langchain_core_messages.isAIMessage)(message) && !!message.tool_calls?.length) toolParts = message.tool_calls.map((toolCall) => ({ functionCall: {
343
+ name: toolCall.name,
344
+ args: toolCall.args
345
+ } }));
346
+ else toolParts = messageKwargsToParts(message.additional_kwargs);
347
+ const parts = [...contentParts, ...toolParts];
348
+ const signatures = message?.additional_kwargs?.signatures ?? [];
349
+ if (signatures.length === parts.length) for (let co = 0; co < signatures.length; co += 1) {
350
+ const signature = signatures[co];
351
+ if (signature && signature.length > 0) parts[co].thoughtSignature = signature;
352
+ }
353
+ return [{
354
+ role,
355
+ parts
356
+ }];
357
+ }
358
+ async function systemMessageToContent(message) {
359
+ return config?.useSystemInstruction ? roleMessageToContent("system", message) : [...await roleMessageToContent("user", message), ...await roleMessageToContent("model", new __langchain_core_messages.AIMessage("Ok"))];
360
+ }
361
+ function toolMessageToContent(message, prevMessage) {
362
+ const contentStr = typeof message.content === "string" ? message.content : message.content.reduce((acc, content) => {
363
+ if (content.type === "text") return acc + content.text;
364
+ else return acc;
365
+ }, "");
366
+ const responseName = ((0, __langchain_core_messages.isAIMessage)(prevMessage) && !!prevMessage.tool_calls?.length ? prevMessage.tool_calls[0].name : prevMessage.name) ?? message.tool_call_id;
367
+ try {
368
+ const content = JSON.parse(contentStr);
369
+ return [{
370
+ role: "function",
371
+ parts: [{ functionResponse: {
372
+ name: responseName,
373
+ response: { content }
374
+ } }]
375
+ }];
376
+ } catch (_) {
377
+ return [{
378
+ role: "function",
379
+ parts: [{ functionResponse: {
380
+ name: responseName,
381
+ response: { content: contentStr }
382
+ } }]
383
+ }];
384
+ }
385
+ }
386
+ async function baseMessageToContent(message, prevMessage) {
387
+ if (__langchain_core_messages.SystemMessage.isInstance(message)) return systemMessageToContent(message);
388
+ else if (__langchain_core_messages.HumanMessage.isInstance(message)) return roleMessageToContent("user", message);
389
+ else if (__langchain_core_messages.AIMessage.isInstance(message)) return roleMessageToContent("model", message);
390
+ else if (__langchain_core_messages.ToolMessage.isInstance(message)) {
391
+ if (!prevMessage) throw new Error("Tool messages cannot be the first message passed to the model.");
392
+ return toolMessageToContent(message, prevMessage);
393
+ } else {
394
+ console.log(`Unsupported message type: ${message.type}`);
395
+ return [];
396
+ }
397
+ }
398
+ function thoughtPartToMessageContent(part) {
399
+ return {
400
+ type: "reasoning",
401
+ reasoning: part.text
402
+ };
403
+ }
404
+ function textPartToMessageContent(part) {
405
+ return {
406
+ type: "text",
407
+ text: part.text
408
+ };
409
+ }
410
+ function inlineDataPartToMessageContentImage(part) {
411
+ return {
412
+ type: "image_url",
413
+ image_url: `data:${part.inlineData.mimeType};base64,${part.inlineData.data}`
414
+ };
415
+ }
416
+ function inlineDataPartToMessageContentMedia(part) {
417
+ return {
418
+ type: "media",
419
+ mimeType: part.inlineData.mimeType,
420
+ data: part.inlineData.data
421
+ };
422
+ }
423
+ function inlineDataPartToMessageContent(part) {
424
+ const mimeType = part?.inlineData?.mimeType ?? "";
425
+ if (mimeType.startsWith("image")) return inlineDataPartToMessageContentImage(part);
426
+ else return inlineDataPartToMessageContentMedia(part);
427
+ }
428
+ function fileDataPartToMessageContent(part) {
429
+ return {
430
+ type: "image_url",
431
+ image_url: part.fileData.fileUri
432
+ };
433
+ }
434
+ function partsToMessageContent(parts) {
435
+ return parts.map((part) => {
436
+ if (part === void 0 || part === null) return null;
437
+ else if (part.thought) return thoughtPartToMessageContent(part);
438
+ else if ("text" in part) return textPartToMessageContent(part);
439
+ else if ("inlineData" in part) return inlineDataPartToMessageContent(part);
440
+ else if ("fileData" in part) return fileDataPartToMessageContent(part);
441
+ else return null;
442
+ }).reduce((acc, content) => {
443
+ if (content) acc.push(content);
444
+ return acc;
445
+ }, []);
446
+ }
447
+ function toolRawToTool(raw) {
448
+ return {
449
+ id: raw.id,
450
+ type: raw.type,
451
+ function: {
452
+ name: raw.function.name,
453
+ arguments: JSON.stringify(raw.function.arguments)
454
+ }
455
+ };
456
+ }
457
+ function functionCallPartToToolRaw(part) {
458
+ return {
459
+ id: (0, uuid.v4)().replace(/-/g, ""),
460
+ type: "function",
461
+ function: {
462
+ name: part.functionCall.name,
463
+ arguments: part.functionCall.args ?? {}
464
+ }
465
+ };
466
+ }
467
+ function partsToToolsRaw(parts) {
468
+ return parts.map((part) => {
469
+ if (part === void 0 || part === null) return null;
470
+ else if ("functionCall" in part) return functionCallPartToToolRaw(part);
471
+ else return null;
472
+ }).reduce((acc, content) => {
473
+ if (content) acc.push(content);
474
+ return acc;
475
+ }, []);
476
+ }
477
+ function toolsRawToTools(raws) {
478
+ return raws.map((raw) => toolRawToTool(raw));
479
+ }
480
+ function responseToGenerateContentResponseData(response) {
481
+ if ("nextChunk" in response.data) throw new Error("Cannot convert Stream to GenerateContentResponseData");
482
+ else if (Array.isArray(response.data)) return response.data.reduce((acc, val) => {
483
+ const valParts = val?.candidates?.[0]?.content?.parts ?? [];
484
+ acc.candidates[0].content.parts.push(...valParts);
485
+ acc.promptFeedback = val.promptFeedback;
486
+ return acc;
487
+ });
488
+ else return response.data;
489
+ }
490
+ function responseToParts(response) {
491
+ const responseData = responseToGenerateContentResponseData(response);
492
+ const parts = responseData?.candidates?.[0]?.content?.parts ?? [];
493
+ return parts;
494
+ }
495
+ function partToText(part) {
496
+ return "text" in part ? part.text : "";
497
+ }
498
+ function responseToString(response) {
499
+ const parts = responseToParts(response);
500
+ const ret = parts.reduce((acc, part) => {
501
+ const val = partToText(part);
502
+ return acc + val;
503
+ }, "");
504
+ return ret;
505
+ }
506
+ function safeResponseTo(response, responseTo) {
507
+ const safetyHandler = config?.safetyHandler ?? new DefaultGeminiSafetyHandler();
508
+ try {
509
+ const safeResponse = safetyHandler.handle(response);
510
+ return responseTo(safeResponse);
511
+ } catch (xx) {
512
+ if (xx instanceof require_safety.GoogleAISafetyError) {
513
+ const ret = responseTo(xx.response);
514
+ xx.reply = ret;
515
+ }
516
+ throw xx;
517
+ }
518
+ }
519
+ function safeResponseToString(response) {
520
+ return safeResponseTo(response, responseToString);
521
+ }
522
+ function logprobResultToLogprob(result) {
523
+ const token = result?.token;
524
+ const logprob = result?.logProbability;
525
+ const encoder = new TextEncoder();
526
+ const bytes = Array.from(encoder.encode(token));
527
+ return {
528
+ token,
529
+ logprob,
530
+ bytes
531
+ };
532
+ }
533
+ function candidateToLogprobs(candidate) {
534
+ const logprobs = candidate?.logprobsResult;
535
+ const chosenTokens = logprobs?.chosenCandidates ?? [];
536
+ const topTokens = logprobs?.topCandidates ?? [];
537
+ const content = [];
538
+ for (let co = 0; co < chosenTokens.length; co += 1) {
539
+ const chosen = chosenTokens[co];
540
+ const top = topTokens[co]?.candidates ?? [];
541
+ const logprob = logprobResultToLogprob(chosen);
542
+ logprob.top_logprobs = top.map((l) => logprobResultToLogprob(l));
543
+ content.push(logprob);
544
+ }
545
+ return { content };
546
+ }
547
+ function candidateToUrlContextMetadata(candidate) {
548
+ const retrieval = candidate?.urlRetrievalMetadata?.urlRetrievalContexts ?? [];
549
+ const context = candidate?.urlContextMetadata?.urlMetadata ?? [];
550
+ const all = [...retrieval, ...context];
551
+ if (all.length === 0) return void 0;
552
+ else return { urlMetadata: all };
553
+ }
554
+ function addModalityCounts(modalityTokenCounts, details) {
555
+ modalityTokenCounts?.forEach((modalityTokenCount) => {
556
+ const { modality, tokenCount } = modalityTokenCount;
557
+ const modalityLc = modality.toLowerCase();
558
+ const currentCount = details[modalityLc] ?? 0;
559
+ details[modalityLc] = currentCount + tokenCount;
560
+ });
561
+ }
562
+ function responseToUsageMetadata(response) {
563
+ if ("usageMetadata" in response.data) {
564
+ const data = response?.data;
565
+ const usageMetadata = data?.usageMetadata;
566
+ const input_tokens = usageMetadata.promptTokenCount ?? 0;
567
+ const candidatesTokenCount = usageMetadata.candidatesTokenCount ?? 0;
568
+ const thoughtsTokenCount = usageMetadata.thoughtsTokenCount ?? 0;
569
+ const output_tokens = candidatesTokenCount + thoughtsTokenCount;
570
+ const total_tokens = usageMetadata.totalTokenCount ?? input_tokens + output_tokens;
571
+ const input_token_details = {};
572
+ addModalityCounts(usageMetadata.promptTokensDetails, input_token_details);
573
+ if (typeof usageMetadata?.cachedContentTokenCount === "number") input_token_details.cache_read = usageMetadata.cachedContentTokenCount;
574
+ const output_token_details = {};
575
+ addModalityCounts(usageMetadata?.candidatesTokensDetails, output_token_details);
576
+ if (typeof usageMetadata?.thoughtsTokenCount === "number") output_token_details.reasoning = usageMetadata.thoughtsTokenCount;
577
+ const ret = {
578
+ input_tokens,
579
+ output_tokens,
580
+ total_tokens,
581
+ input_token_details,
582
+ output_token_details
583
+ };
584
+ return ret;
585
+ }
586
+ return void 0;
587
+ }
588
+ function responseToGenerationInfo(response) {
589
+ const data = Array.isArray(response.data) && response.data[0] ? response.data[0] : response.data && response.data.candidates ? response.data : void 0;
590
+ if (!data) return {};
591
+ const finish_reason = data.candidates[0]?.finishReason;
592
+ const ret = {
593
+ safety_ratings: data.candidates[0]?.safetyRatings?.map((rating) => ({
594
+ category: rating.category,
595
+ probability: rating.probability,
596
+ probability_score: rating.probabilityScore,
597
+ severity: rating.severity,
598
+ severity_score: rating.severityScore
599
+ })),
600
+ citation_metadata: data.candidates[0]?.citationMetadata,
601
+ grounding_metadata: data.candidates[0]?.groundingMetadata,
602
+ finish_reason,
603
+ finish_message: data.candidates[0]?.finishMessage,
604
+ url_context_metadata: candidateToUrlContextMetadata(data.candidates[0]),
605
+ avgLogprobs: data.candidates[0]?.avgLogprobs,
606
+ logprobs: candidateToLogprobs(data.candidates[0])
607
+ };
608
+ if (typeof finish_reason === "string") ret.usage_metadata = responseToUsageMetadata(response);
609
+ return ret;
610
+ }
611
+ function responseToChatGeneration(response) {
612
+ return new __langchain_core_outputs.ChatGenerationChunk({
613
+ text: responseToString(response),
614
+ message: partToMessageChunk(responseToParts(response)[0]),
615
+ generationInfo: responseToGenerationInfo(response)
616
+ });
617
+ }
618
+ function safeResponseToChatGeneration(response) {
619
+ return safeResponseTo(response, responseToChatGeneration);
620
+ }
621
+ function chunkToString(chunk) {
622
+ if (chunk === null) return "";
623
+ else if (typeof chunk.content === "string") return chunk.content;
624
+ else if (chunk.content.length === 0) return "";
625
+ else if (chunk.content[0].type === "text") return chunk.content[0].text;
626
+ else throw new Error(`Unexpected chunk: ${chunk}`);
627
+ }
628
+ function partToMessageChunk(part) {
629
+ const fields = partsToBaseMessageChunkFields([part]);
630
+ if (typeof fields.content === "string") return new __langchain_core_messages.AIMessageChunk(fields);
631
+ else if (fields.content?.every((item) => item.type === "text")) {
632
+ const newContent = fields.content.map((item) => "text" in item ? item.text : "").join("");
633
+ return new __langchain_core_messages.AIMessageChunk({
634
+ ...fields,
635
+ content: newContent,
636
+ response_metadata: {
637
+ ...fields.response_metadata,
638
+ model_provider: "google-vertexai"
639
+ }
640
+ });
641
+ }
642
+ return new __langchain_core_messages.AIMessageChunk(fields);
643
+ }
644
+ function partToChatGeneration(part) {
645
+ const message = partToMessageChunk(part);
646
+ const text = partToText(part);
647
+ const generationInfo = {};
648
+ return new __langchain_core_outputs.ChatGenerationChunk({
649
+ text,
650
+ message,
651
+ generationInfo
652
+ });
653
+ }
654
+ function groundingSupportByPart(groundingSupports) {
655
+ const ret = [];
656
+ if (!groundingSupports || groundingSupports.length === 0) return [];
657
+ groundingSupports?.forEach((groundingSupport) => {
658
+ const segment = groundingSupport?.segment;
659
+ const partIndex = segment?.partIndex ?? 0;
660
+ if (ret[partIndex]) ret[partIndex].push(groundingSupport);
661
+ else ret[partIndex] = [groundingSupport];
662
+ });
663
+ return ret;
664
+ }
665
+ function responseToGroundedChatGenerations(response) {
666
+ const parts = responseToParts(response);
667
+ if (parts.length === 0) return [];
668
+ const candidate = (response?.data)?.candidates?.[0];
669
+ const groundingMetadata = candidate?.groundingMetadata;
670
+ const citationMetadata = candidate?.citationMetadata;
671
+ const groundingParts = groundingSupportByPart(groundingMetadata?.groundingSupports);
672
+ const ret = parts.map((part, index) => {
673
+ const gen = partToChatGeneration(part);
674
+ if (!gen.generationInfo) gen.generationInfo = {};
675
+ if (groundingMetadata) {
676
+ gen.generationInfo.groundingMetadata = groundingMetadata;
677
+ const groundingPart = groundingParts[index];
678
+ if (groundingPart) gen.generationInfo.groundingSupport = groundingPart;
679
+ }
680
+ if (citationMetadata) gen.generationInfo.citationMetadata = citationMetadata;
681
+ return gen;
682
+ });
683
+ return ret;
684
+ }
685
+ function combineContent(gen, forceComplex = false) {
686
+ const allString = gen.every((item) => typeof item.message.content === "string");
687
+ if (allString && !forceComplex) return gen.map((item) => item.message.content).join("");
688
+ else {
689
+ const ret = [];
690
+ gen.forEach((item) => {
691
+ if (typeof item.message.content === "string") ret.push({
692
+ type: "text",
693
+ text: item.message.content
694
+ });
695
+ else item.message.content.forEach((c) => {
696
+ ret.push(c);
697
+ });
698
+ });
699
+ return ret;
700
+ }
701
+ }
702
+ function combineText(gen) {
703
+ return gen.map((item) => item.text ?? "").join("");
704
+ }
705
+ function combineToolCalls(gen) {
706
+ let ret = new __langchain_core_messages.AIMessageChunk("");
707
+ gen.forEach((item) => {
708
+ const message = item?.message;
709
+ ret = (0, __langchain_core_utils_stream.concat)(ret, message);
710
+ });
711
+ return ret;
712
+ }
713
+ function combineAdditionalKwargs(gen) {
714
+ const ret = {};
715
+ gen.forEach((item) => {
716
+ const message = item?.message;
717
+ const kwargs = message?.additional_kwargs ?? {};
718
+ const keys = Object.keys(kwargs);
719
+ keys.forEach((key) => {
720
+ const value = kwargs[key];
721
+ if (Object.hasOwn(ret, key) && Array.isArray(ret[key]) && Array.isArray(value)) ret[key].push(...value);
722
+ else ret[key] = value;
723
+ });
724
+ });
725
+ return ret;
726
+ }
727
+ function combineGenerations(generations, response) {
728
+ const gen = splitGenerationTypes(generations, response);
729
+ const combinedContent = combineContent(gen.content);
730
+ const combinedText = combineText(gen.content);
731
+ const combinedToolCalls = combineToolCalls(gen.content);
732
+ const kwargs = combineAdditionalKwargs(gen.content);
733
+ const lastContent = gen.content[gen.content.length - 1];
734
+ const usage_metadata = responseToUsageMetadata(response);
735
+ const message = new __langchain_core_messages.AIMessageChunk({
736
+ content: combinedContent,
737
+ additional_kwargs: kwargs,
738
+ response_metadata: { model_provider: "google-vertexai" },
739
+ usage_metadata,
740
+ tool_calls: combinedToolCalls.tool_calls,
741
+ invalid_tool_calls: combinedToolCalls.invalid_tool_calls
742
+ });
743
+ return [new __langchain_core_outputs.ChatGenerationChunk({
744
+ message,
745
+ text: combinedText,
746
+ generationInfo: lastContent.generationInfo
747
+ })];
748
+ }
749
+ function splitGenerationTypes(generations, _response) {
750
+ const content = [];
751
+ const reasoning = [];
752
+ generations.forEach((gen) => {
753
+ if (gen?.generationInfo?.thought) reasoning.push(gen);
754
+ else content.push(gen);
755
+ });
756
+ return {
757
+ content,
758
+ reasoning
759
+ };
760
+ }
761
+ /**
762
+ * Although this returns an array, only the first (or maybe last)
763
+ * element in the array is used. So we need to combine them into
764
+ * just one element that contains everything we need.
765
+ * @param response
766
+ */
767
+ function responseToChatGenerations(response) {
768
+ const generations = responseToGroundedChatGenerations(response);
769
+ if (generations.length === 0) return [];
770
+ const ret = combineGenerations(generations, response);
771
+ const candidate = (response?.data)?.candidates?.[0];
772
+ const avgLogprobs = candidate?.avgLogprobs;
773
+ const logprobs = candidateToLogprobs(candidate);
774
+ if (logprobs) ret[0].message.response_metadata = {
775
+ model_provider: "google-vertexai",
776
+ ...ret[0].message.response_metadata,
777
+ logprobs,
778
+ avgLogprobs
779
+ };
780
+ return ret;
781
+ }
782
+ function responseToBaseMessageFields(response) {
783
+ const parts = responseToParts(response);
784
+ return partsToBaseMessageChunkFields(parts);
785
+ }
786
+ function partsToSignatures(parts) {
787
+ return parts.map((part) => part?.thoughtSignature ?? "");
788
+ }
789
+ function partsToBaseMessageChunkFields(parts) {
790
+ const fields = {
791
+ content: partsToMessageContent(parts),
792
+ tool_call_chunks: [],
793
+ tool_calls: [],
794
+ invalid_tool_calls: [],
795
+ response_metadata: { model_provider: "google-vertexai" }
796
+ };
797
+ fields.additional_kwargs = {};
798
+ const rawTools = partsToToolsRaw(parts);
799
+ if (rawTools.length > 0) {
800
+ const tools = toolsRawToTools(rawTools);
801
+ for (const tool of tools) {
802
+ fields.tool_call_chunks?.push({
803
+ name: tool.function.name,
804
+ args: tool.function.arguments,
805
+ id: tool.id,
806
+ type: "tool_call_chunk"
807
+ });
808
+ try {
809
+ fields.tool_calls?.push({
810
+ name: tool.function.name,
811
+ args: JSON.parse(tool.function.arguments),
812
+ id: tool.id
813
+ });
814
+ } catch (e) {
815
+ fields.invalid_tool_calls?.push({
816
+ name: tool.function.name,
817
+ args: tool.function.arguments,
818
+ id: tool.id,
819
+ error: e.message,
820
+ type: "invalid_tool_call"
821
+ });
822
+ }
823
+ }
824
+ fields.additional_kwargs.tool_calls = tools;
825
+ }
826
+ fields.additional_kwargs.signatures = partsToSignatures(parts);
827
+ return fields;
828
+ }
829
+ function responseToBaseMessage(response) {
830
+ const fields = responseToBaseMessageFields(response);
831
+ return new __langchain_core_messages.AIMessage(fields);
832
+ }
833
+ function safeResponseToBaseMessage(response) {
834
+ return safeResponseTo(response, responseToBaseMessage);
835
+ }
836
+ function responseToChatResult(response) {
837
+ const generations = responseToChatGenerations(response);
838
+ return {
839
+ generations,
840
+ llmOutput: responseToGenerationInfo(response)
841
+ };
842
+ }
843
+ function safeResponseToChatResult(response) {
844
+ return safeResponseTo(response, responseToChatResult);
845
+ }
846
+ function inputType(input) {
847
+ if (typeof input === "string") return "MessageContent";
848
+ else {
849
+ const firstItem = input[0];
850
+ if (Object.hasOwn(firstItem, "content")) return "BaseMessageArray";
851
+ else return "MessageContent";
852
+ }
853
+ }
854
+ async function formatMessageContents(input, _parameters) {
855
+ const parts = await messageContentToParts(input);
856
+ const contents = [{
857
+ role: "user",
858
+ parts
859
+ }];
860
+ return contents;
861
+ }
862
+ async function formatBaseMessageContents(input, _parameters) {
863
+ const inputPromises = input.map((msg, i) => baseMessageToContent(msg, input[i - 1]));
864
+ const inputs = await Promise.all(inputPromises);
865
+ return inputs.reduce((acc, cur) => {
866
+ if (cur.every((content) => content.role === "system")) return acc;
867
+ if (cur[0]?.role === "function" && acc.length > 0 && acc[acc.length - 1].role === "function") acc[acc.length - 1].parts = [...acc[acc.length - 1].parts, ...cur[0].parts];
868
+ else acc.push(...cur);
869
+ return acc;
870
+ }, []);
871
+ }
872
+ async function formatContents(input, parameters) {
873
+ const it = inputType(input);
874
+ switch (it) {
875
+ case "MessageContent": return formatMessageContents(input, parameters);
876
+ case "BaseMessageArray": return formatBaseMessageContents(input, parameters);
877
+ default: throw new Error(`Unknown input type "${it}": ${input}`);
878
+ }
879
+ }
880
+ function formatGenerationConfig(parameters) {
881
+ const ret = {
882
+ temperature: parameters.temperature,
883
+ topK: parameters.topK,
884
+ topP: parameters.topP,
885
+ seed: parameters.seed,
886
+ presencePenalty: parameters.presencePenalty,
887
+ frequencyPenalty: parameters.frequencyPenalty,
888
+ maxOutputTokens: parameters.maxOutputTokens,
889
+ stopSequences: parameters.stopSequences,
890
+ responseMimeType: parameters.responseMimeType,
891
+ responseModalities: parameters.responseModalities,
892
+ speechConfig: normalizeSpeechConfig(parameters.speechConfig)
893
+ };
894
+ if (typeof parameters.logprobs !== "undefined") {
895
+ ret.responseLogprobs = parameters.logprobs;
896
+ if (parameters.logprobs && typeof parameters.topLogprobs !== "undefined") ret.logprobs = parameters.topLogprobs;
897
+ }
898
+ if (typeof parameters.maxReasoningTokens !== "undefined") {
899
+ const includeThoughts = parameters.maxReasoningTokens !== 0;
900
+ ret.thinkingConfig = {
901
+ thinkingBudget: parameters.maxReasoningTokens,
902
+ includeThoughts
903
+ };
904
+ }
905
+ let attribute;
906
+ for (attribute in ret) if (ret[attribute] === void 0) delete ret[attribute];
907
+ return ret;
908
+ }
909
+ function formatSafetySettings(parameters) {
910
+ return parameters.safetySettings ?? [];
911
+ }
912
+ async function formatBaseMessageSystemInstruction(input) {
913
+ let ret = {};
914
+ for (let index = 0; index < input.length; index += 1) {
915
+ const message = input[index];
916
+ if (message.getType() === "system") if (index === 0) ret = (await baseMessageToContent(message, void 0))[0];
917
+ else throw new Error("System messages are only permitted as the first passed message.");
918
+ }
919
+ return ret;
920
+ }
921
+ async function formatSystemInstruction(input) {
922
+ if (!config?.useSystemInstruction) return {};
923
+ const it = inputType(input);
924
+ switch (it) {
925
+ case "BaseMessageArray": return formatBaseMessageSystemInstruction(input);
926
+ default: return {};
927
+ }
928
+ }
929
+ function structuredToolToFunctionDeclaration(tool) {
930
+ const jsonSchema = require_zod_to_gemini_parameters.schemaToGeminiParameters(tool.schema);
931
+ return {
932
+ name: tool.name,
933
+ description: tool.description ?? `A function available to call.`,
934
+ parameters: jsonSchema
935
+ };
936
+ }
937
+ function searchToolName(tool) {
938
+ for (const name of require_types.GeminiSearchToolAttributes) if (name in tool) return name;
939
+ return void 0;
940
+ }
941
+ function cleanGeminiTool(tool) {
942
+ const orig = searchToolName(tool);
943
+ const adj = config?.googleSearchToolAdjustment;
944
+ if (orig && adj && adj !== orig) return { [adj]: {} };
945
+ else return tool;
946
+ }
947
+ function formatTools(parameters) {
948
+ const tools = parameters?.tools;
949
+ if (!tools || tools.length === 0) return [];
950
+ const langChainTools = [];
951
+ const otherTools = [];
952
+ tools.forEach((tool) => {
953
+ if ((0, __langchain_core_utils_function_calling.isLangChainTool)(tool)) langChainTools.push(tool);
954
+ else otherTools.push(cleanGeminiTool(tool));
955
+ });
956
+ const result = [...otherTools];
957
+ if (langChainTools.length > 0) result.push({ functionDeclarations: langChainTools.map(structuredToolToFunctionDeclaration) });
958
+ return result;
959
+ }
960
+ function formatToolConfig(parameters) {
961
+ if (!parameters.tool_choice || typeof parameters.tool_choice !== "string") return void 0;
962
+ if ([
963
+ "auto",
964
+ "any",
965
+ "none"
966
+ ].includes(parameters.tool_choice)) return { functionCallingConfig: {
967
+ mode: parameters.tool_choice,
968
+ allowedFunctionNames: parameters.allowed_function_names
969
+ } };
970
+ return { functionCallingConfig: {
971
+ mode: "any",
972
+ allowedFunctionNames: [parameters.tool_choice]
973
+ } };
974
+ }
975
+ async function formatData(input, parameters) {
976
+ const typedInput = input;
977
+ const contents = await formatContents(typedInput, parameters);
978
+ const generationConfig = formatGenerationConfig(parameters);
979
+ const tools = formatTools(parameters);
980
+ const toolConfig = formatToolConfig(parameters);
981
+ const safetySettings = formatSafetySettings(parameters);
982
+ const systemInstruction = await formatSystemInstruction(typedInput);
983
+ const ret = {
984
+ contents,
985
+ generationConfig
986
+ };
987
+ if (tools && tools.length) ret.tools = tools;
988
+ if (toolConfig) ret.toolConfig = toolConfig;
989
+ if (safetySettings && safetySettings.length) ret.safetySettings = safetySettings;
990
+ if (systemInstruction?.role && systemInstruction?.parts && systemInstruction?.parts?.length) ret.systemInstruction = systemInstruction;
991
+ if (parameters.cachedContent) ret.cachedContent = parameters.cachedContent;
992
+ if (parameters.labels && Object.keys(parameters.labels).length > 0) ret.labels = parameters.labels;
993
+ return ret;
994
+ }
995
+ return {
996
+ messageContentToParts,
997
+ baseMessageToContent,
998
+ responseToString: safeResponseToString,
999
+ responseToChatGeneration: safeResponseToChatGeneration,
1000
+ chunkToString,
1001
+ responseToBaseMessage: safeResponseToBaseMessage,
1002
+ responseToChatResult: safeResponseToChatResult,
1003
+ formatData
1004
+ };
1472
1005
  }
1473
1006
  function validateGeminiParams(params) {
1474
- if (params.maxOutputTokens && params.maxOutputTokens < 0) {
1475
- throw new Error("`maxOutputTokens` must be a positive integer");
1476
- }
1477
- if (typeof params.maxReasoningTokens !== "undefined") {
1478
- if (typeof params.maxOutputTokens !== "undefined") {
1479
- if (params.maxReasoningTokens >= params.maxOutputTokens) {
1480
- throw new Error("`maxOutputTokens` must be greater than `maxReasoningTokens`");
1481
- }
1482
- }
1483
- }
1484
- if (params.temperature &&
1485
- (params.temperature < 0 || params.temperature > 2)) {
1486
- throw new Error("`temperature` must be in the range of [0.0,2.0]");
1487
- }
1488
- if (params.topP && (params.topP < 0 || params.topP > 1)) {
1489
- throw new Error("`topP` must be in the range of [0.0,1.0]");
1490
- }
1491
- if (params.topK && params.topK < 0) {
1492
- throw new Error("`topK` must be a positive integer");
1493
- }
1007
+ if (params.maxOutputTokens && params.maxOutputTokens < 0) throw new Error("`maxOutputTokens` must be a positive integer");
1008
+ if (typeof params.maxReasoningTokens !== "undefined") {
1009
+ if (typeof params.maxOutputTokens !== "undefined") {
1010
+ if (params.maxReasoningTokens >= params.maxOutputTokens) throw new Error("`maxOutputTokens` must be greater than `maxReasoningTokens`");
1011
+ }
1012
+ }
1013
+ if (params.temperature && (params.temperature < 0 || params.temperature > 2)) throw new Error("`temperature` must be in the range of [0.0,2.0]");
1014
+ if (params.topP && (params.topP < 0 || params.topP > 1)) throw new Error("`topP` must be in the range of [0.0,1.0]");
1015
+ if (params.topK && params.topK < 0) throw new Error("`topK` must be a positive integer");
1494
1016
  }
1495
1017
  function isModelGemini(modelName) {
1496
- return modelName.toLowerCase().startsWith("gemini");
1018
+ return modelName.toLowerCase().startsWith("gemini");
1497
1019
  }
1498
1020
  function isModelGemma(modelName) {
1499
- return modelName.toLowerCase().startsWith("gemma");
1021
+ return modelName.toLowerCase().startsWith("gemma");
1500
1022
  }
1023
+
1024
+ //#endregion
1025
+ exports.DefaultGeminiSafetyHandler = DefaultGeminiSafetyHandler;
1026
+ exports.MessageGeminiSafetyHandler = MessageGeminiSafetyHandler;
1027
+ exports.getGeminiAPI = getGeminiAPI;
1028
+ exports.isModelGemini = isModelGemini;
1029
+ exports.isModelGemma = isModelGemma;
1030
+ exports.normalizeSpeechConfig = normalizeSpeechConfig;
1031
+ exports.validateGeminiParams = validateGeminiParams;
1032
+ //# sourceMappingURL=gemini.cjs.map