@langchain/google-genai 0.2.18 → 1.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (55) hide show
  1. package/CHANGELOG.md +11 -0
  2. package/LICENSE +6 -6
  3. package/README.md +8 -8
  4. package/dist/_virtual/rolldown_runtime.cjs +25 -0
  5. package/dist/chat_models.cjs +667 -847
  6. package/dist/chat_models.cjs.map +1 -0
  7. package/dist/chat_models.d.cts +556 -0
  8. package/dist/chat_models.d.cts.map +1 -0
  9. package/dist/chat_models.d.ts +171 -157
  10. package/dist/chat_models.d.ts.map +1 -0
  11. package/dist/chat_models.js +665 -842
  12. package/dist/chat_models.js.map +1 -0
  13. package/dist/embeddings.cjs +97 -151
  14. package/dist/embeddings.cjs.map +1 -0
  15. package/dist/embeddings.d.cts +104 -0
  16. package/dist/embeddings.d.cts.map +1 -0
  17. package/dist/embeddings.d.ts +76 -70
  18. package/dist/embeddings.d.ts.map +1 -0
  19. package/dist/embeddings.js +93 -144
  20. package/dist/embeddings.js.map +1 -0
  21. package/dist/index.cjs +5 -18
  22. package/dist/index.d.cts +3 -0
  23. package/dist/index.d.ts +3 -2
  24. package/dist/index.js +4 -2
  25. package/dist/output_parsers.cjs +47 -75
  26. package/dist/output_parsers.cjs.map +1 -0
  27. package/dist/output_parsers.js +47 -72
  28. package/dist/output_parsers.js.map +1 -0
  29. package/dist/types.d.cts +8 -0
  30. package/dist/types.d.cts.map +1 -0
  31. package/dist/types.d.ts +7 -2
  32. package/dist/types.d.ts.map +1 -0
  33. package/dist/utils/common.cjs +356 -549
  34. package/dist/utils/common.cjs.map +1 -0
  35. package/dist/utils/common.js +357 -545
  36. package/dist/utils/common.js.map +1 -0
  37. package/dist/utils/tools.cjs +65 -102
  38. package/dist/utils/tools.cjs.map +1 -0
  39. package/dist/utils/tools.js +64 -99
  40. package/dist/utils/tools.js.map +1 -0
  41. package/dist/utils/zod_to_genai_parameters.cjs +31 -49
  42. package/dist/utils/zod_to_genai_parameters.cjs.map +1 -0
  43. package/dist/utils/zod_to_genai_parameters.js +29 -45
  44. package/dist/utils/zod_to_genai_parameters.js.map +1 -0
  45. package/package.json +42 -51
  46. package/dist/output_parsers.d.ts +0 -20
  47. package/dist/types.cjs +0 -2
  48. package/dist/types.js +0 -1
  49. package/dist/utils/common.d.ts +0 -22
  50. package/dist/utils/tools.d.ts +0 -10
  51. package/dist/utils/zod_to_genai_parameters.d.ts +0 -14
  52. package/index.cjs +0 -1
  53. package/index.d.cts +0 -1
  54. package/index.d.ts +0 -1
  55. package/index.js +0 -1
@@ -1,572 +1,379 @@
1
- "use strict";
2
- Object.defineProperty(exports, "__esModule", { value: true });
3
- exports.getMessageAuthor = getMessageAuthor;
4
- exports.convertAuthorToRole = convertAuthorToRole;
5
- exports.convertMessageContentToParts = convertMessageContentToParts;
6
- exports.convertBaseMessagesToContent = convertBaseMessagesToContent;
7
- exports.mapGenerateContentResultToChatResult = mapGenerateContentResultToChatResult;
8
- exports.convertResponseContentToChatGenerationChunk = convertResponseContentToChatGenerationChunk;
9
- exports.convertToGenerativeAITools = convertToGenerativeAITools;
10
- const messages_1 = require("@langchain/core/messages");
11
- const outputs_1 = require("@langchain/core/outputs");
12
- const function_calling_1 = require("@langchain/core/utils/function_calling");
13
- const base_1 = require("@langchain/core/language_models/base");
14
- const uuid_1 = require("uuid");
15
- const zod_to_genai_parameters_js_1 = require("./zod_to_genai_parameters.cjs");
1
+ const require_rolldown_runtime = require('../_virtual/rolldown_runtime.cjs');
2
+ const require_zod_to_genai_parameters = require('./zod_to_genai_parameters.cjs');
3
+ const __langchain_core_messages = require_rolldown_runtime.__toESM(require("@langchain/core/messages"));
4
+ const __langchain_core_outputs = require_rolldown_runtime.__toESM(require("@langchain/core/outputs"));
5
+ const __langchain_core_utils_function_calling = require_rolldown_runtime.__toESM(require("@langchain/core/utils/function_calling"));
6
+ const __langchain_core_language_models_base = require_rolldown_runtime.__toESM(require("@langchain/core/language_models/base"));
7
+ const uuid = require_rolldown_runtime.__toESM(require("uuid"));
8
+
9
+ //#region src/utils/common.ts
16
10
  function getMessageAuthor(message) {
17
- const type = message._getType();
18
- if (messages_1.ChatMessage.isInstance(message)) {
19
- return message.role;
20
- }
21
- if (type === "tool") {
22
- return type;
23
- }
24
- return message.name ?? type;
11
+ const type = message._getType();
12
+ if (__langchain_core_messages.ChatMessage.isInstance(message)) return message.role;
13
+ if (type === "tool") return type;
14
+ return message.name ?? type;
25
15
  }
26
16
  /**
27
- * Maps a message type to a Google Generative AI chat author.
28
- * @param message The message to map.
29
- * @param model The model to use for mapping.
30
- * @returns The message type mapped to a Google Generative AI chat author.
31
- */
17
+ * Maps a message type to a Google Generative AI chat author.
18
+ * @param message The message to map.
19
+ * @param model The model to use for mapping.
20
+ * @returns The message type mapped to a Google Generative AI chat author.
21
+ */
32
22
  function convertAuthorToRole(author) {
33
- switch (author) {
34
- /**
35
- * Note: Gemini currently is not supporting system messages
36
- * we will convert them to human messages and merge with following
37
- * */
38
- case "supervisor":
39
- case "ai":
40
- case "model": // getMessageAuthor returns message.name. code ex.: return message.name ?? type;
41
- return "model";
42
- case "system":
43
- return "system";
44
- case "human":
45
- return "user";
46
- case "tool":
47
- case "function":
48
- return "function";
49
- default:
50
- throw new Error(`Unknown / unsupported author: ${author}`);
51
- }
23
+ switch (author) {
24
+ case "supervisor":
25
+ case "ai":
26
+ case "model": return "model";
27
+ case "system": return "system";
28
+ case "human": return "user";
29
+ case "tool":
30
+ case "function": return "function";
31
+ default: throw new Error(`Unknown / unsupported author: ${author}`);
32
+ }
52
33
  }
53
34
  function messageContentMedia(content) {
54
- if ("mimeType" in content && "data" in content) {
55
- return {
56
- inlineData: {
57
- mimeType: content.mimeType,
58
- data: content.data,
59
- },
60
- };
61
- }
62
- if ("mimeType" in content && "fileUri" in content) {
63
- return {
64
- fileData: {
65
- mimeType: content.mimeType,
66
- fileUri: content.fileUri,
67
- },
68
- };
69
- }
70
- throw new Error("Invalid media content");
35
+ if ("mimeType" in content && "data" in content) return { inlineData: {
36
+ mimeType: content.mimeType,
37
+ data: content.data
38
+ } };
39
+ if ("mimeType" in content && "fileUri" in content) return { fileData: {
40
+ mimeType: content.mimeType,
41
+ fileUri: content.fileUri
42
+ } };
43
+ throw new Error("Invalid media content");
71
44
  }
72
45
  function inferToolNameFromPreviousMessages(message, previousMessages) {
73
- return previousMessages
74
- .map((msg) => {
75
- if ((0, messages_1.isAIMessage)(msg)) {
76
- return msg.tool_calls ?? [];
77
- }
78
- return [];
79
- })
80
- .flat()
81
- .find((toolCall) => {
82
- return toolCall.id === message.tool_call_id;
83
- })?.name;
46
+ return previousMessages.map((msg) => {
47
+ if ((0, __langchain_core_messages.isAIMessage)(msg)) return msg.tool_calls ?? [];
48
+ return [];
49
+ }).flat().find((toolCall) => {
50
+ return toolCall.id === message.tool_call_id;
51
+ })?.name;
84
52
  }
85
53
  function _getStandardContentBlockConverter(isMultimodalModel) {
86
- const standardContentBlockConverter = {
87
- providerName: "Google Gemini",
88
- fromStandardTextBlock(block) {
89
- return {
90
- text: block.text,
91
- };
92
- },
93
- fromStandardImageBlock(block) {
94
- if (!isMultimodalModel) {
95
- throw new Error("This model does not support images");
96
- }
97
- if (block.source_type === "url") {
98
- const data = (0, messages_1.parseBase64DataUrl)({ dataUrl: block.url });
99
- if (data) {
100
- return {
101
- inlineData: {
102
- mimeType: data.mime_type,
103
- data: data.data,
104
- },
105
- };
106
- }
107
- else {
108
- return {
109
- fileData: {
110
- mimeType: block.mime_type ?? "",
111
- fileUri: block.url,
112
- },
113
- };
114
- }
115
- }
116
- if (block.source_type === "base64") {
117
- return {
118
- inlineData: {
119
- mimeType: block.mime_type ?? "",
120
- data: block.data,
121
- },
122
- };
123
- }
124
- throw new Error(`Unsupported source type: ${block.source_type}`);
125
- },
126
- fromStandardAudioBlock(block) {
127
- if (!isMultimodalModel) {
128
- throw new Error("This model does not support audio");
129
- }
130
- if (block.source_type === "url") {
131
- const data = (0, messages_1.parseBase64DataUrl)({ dataUrl: block.url });
132
- if (data) {
133
- return {
134
- inlineData: {
135
- mimeType: data.mime_type,
136
- data: data.data,
137
- },
138
- };
139
- }
140
- else {
141
- return {
142
- fileData: {
143
- mimeType: block.mime_type ?? "",
144
- fileUri: block.url,
145
- },
146
- };
147
- }
148
- }
149
- if (block.source_type === "base64") {
150
- return {
151
- inlineData: {
152
- mimeType: block.mime_type ?? "",
153
- data: block.data,
154
- },
155
- };
156
- }
157
- throw new Error(`Unsupported source type: ${block.source_type}`);
158
- },
159
- fromStandardFileBlock(block) {
160
- if (!isMultimodalModel) {
161
- throw new Error("This model does not support files");
162
- }
163
- if (block.source_type === "text") {
164
- return {
165
- text: block.text,
166
- };
167
- }
168
- if (block.source_type === "url") {
169
- const data = (0, messages_1.parseBase64DataUrl)({ dataUrl: block.url });
170
- if (data) {
171
- return {
172
- inlineData: {
173
- mimeType: data.mime_type,
174
- data: data.data,
175
- },
176
- };
177
- }
178
- else {
179
- return {
180
- fileData: {
181
- mimeType: block.mime_type ?? "",
182
- fileUri: block.url,
183
- },
184
- };
185
- }
186
- }
187
- if (block.source_type === "base64") {
188
- return {
189
- inlineData: {
190
- mimeType: block.mime_type ?? "",
191
- data: block.data,
192
- },
193
- };
194
- }
195
- throw new Error(`Unsupported source type: ${block.source_type}`);
196
- },
197
- };
198
- return standardContentBlockConverter;
54
+ const standardContentBlockConverter = {
55
+ providerName: "Google Gemini",
56
+ fromStandardTextBlock(block) {
57
+ return { text: block.text };
58
+ },
59
+ fromStandardImageBlock(block) {
60
+ if (!isMultimodalModel) throw new Error("This model does not support images");
61
+ if (block.source_type === "url") {
62
+ const data = (0, __langchain_core_messages.parseBase64DataUrl)({ dataUrl: block.url });
63
+ if (data) return { inlineData: {
64
+ mimeType: data.mime_type,
65
+ data: data.data
66
+ } };
67
+ else return { fileData: {
68
+ mimeType: block.mime_type ?? "",
69
+ fileUri: block.url
70
+ } };
71
+ }
72
+ if (block.source_type === "base64") return { inlineData: {
73
+ mimeType: block.mime_type ?? "",
74
+ data: block.data
75
+ } };
76
+ throw new Error(`Unsupported source type: ${block.source_type}`);
77
+ },
78
+ fromStandardAudioBlock(block) {
79
+ if (!isMultimodalModel) throw new Error("This model does not support audio");
80
+ if (block.source_type === "url") {
81
+ const data = (0, __langchain_core_messages.parseBase64DataUrl)({ dataUrl: block.url });
82
+ if (data) return { inlineData: {
83
+ mimeType: data.mime_type,
84
+ data: data.data
85
+ } };
86
+ else return { fileData: {
87
+ mimeType: block.mime_type ?? "",
88
+ fileUri: block.url
89
+ } };
90
+ }
91
+ if (block.source_type === "base64") return { inlineData: {
92
+ mimeType: block.mime_type ?? "",
93
+ data: block.data
94
+ } };
95
+ throw new Error(`Unsupported source type: ${block.source_type}`);
96
+ },
97
+ fromStandardFileBlock(block) {
98
+ if (!isMultimodalModel) throw new Error("This model does not support files");
99
+ if (block.source_type === "text") return { text: block.text };
100
+ if (block.source_type === "url") {
101
+ const data = (0, __langchain_core_messages.parseBase64DataUrl)({ dataUrl: block.url });
102
+ if (data) return { inlineData: {
103
+ mimeType: data.mime_type,
104
+ data: data.data
105
+ } };
106
+ else return { fileData: {
107
+ mimeType: block.mime_type ?? "",
108
+ fileUri: block.url
109
+ } };
110
+ }
111
+ if (block.source_type === "base64") return { inlineData: {
112
+ mimeType: block.mime_type ?? "",
113
+ data: block.data
114
+ } };
115
+ throw new Error(`Unsupported source type: ${block.source_type}`);
116
+ }
117
+ };
118
+ return standardContentBlockConverter;
199
119
  }
200
120
  function _convertLangChainContentToPart(content, isMultimodalModel) {
201
- if ((0, messages_1.isDataContentBlock)(content)) {
202
- return (0, messages_1.convertToProviderContentBlock)(content, _getStandardContentBlockConverter(isMultimodalModel));
203
- }
204
- if (content.type === "text") {
205
- return { text: content.text };
206
- }
207
- else if (content.type === "executableCode") {
208
- return { executableCode: content.executableCode };
209
- }
210
- else if (content.type === "codeExecutionResult") {
211
- return { codeExecutionResult: content.codeExecutionResult };
212
- }
213
- else if (content.type === "image_url") {
214
- if (!isMultimodalModel) {
215
- throw new Error(`This model does not support images`);
216
- }
217
- let source;
218
- if (typeof content.image_url === "string") {
219
- source = content.image_url;
220
- }
221
- else if (typeof content.image_url === "object" &&
222
- "url" in content.image_url) {
223
- source = content.image_url.url;
224
- }
225
- else {
226
- throw new Error("Please provide image as base64 encoded data URL");
227
- }
228
- const [dm, data] = source.split(",");
229
- if (!dm.startsWith("data:")) {
230
- throw new Error("Please provide image as base64 encoded data URL");
231
- }
232
- const [mimeType, encoding] = dm.replace(/^data:/, "").split(";");
233
- if (encoding !== "base64") {
234
- throw new Error("Please provide image as base64 encoded data URL");
235
- }
236
- return {
237
- inlineData: {
238
- data,
239
- mimeType,
240
- },
241
- };
242
- }
243
- else if (content.type === "media") {
244
- return messageContentMedia(content);
245
- }
246
- else if (content.type === "tool_use") {
247
- return {
248
- functionCall: {
249
- name: content.name,
250
- args: content.input,
251
- },
252
- };
253
- }
254
- else if (content.type?.includes("/") &&
255
- // Ensure it's a single slash.
256
- content.type.split("/").length === 2 &&
257
- "data" in content &&
258
- typeof content.data === "string") {
259
- return {
260
- inlineData: {
261
- mimeType: content.type,
262
- data: content.data,
263
- },
264
- };
265
- }
266
- else if ("functionCall" in content) {
267
- // No action needed here — function calls will be added later from message.tool_calls
268
- return undefined;
269
- }
270
- else {
271
- if ("type" in content) {
272
- throw new Error(`Unknown content type ${content.type}`);
273
- }
274
- else {
275
- throw new Error(`Unknown content ${JSON.stringify(content)}`);
276
- }
277
- }
121
+ if ((0, __langchain_core_messages.isDataContentBlock)(content)) return (0, __langchain_core_messages.convertToProviderContentBlock)(content, _getStandardContentBlockConverter(isMultimodalModel));
122
+ if (content.type === "text") return { text: content.text };
123
+ else if (content.type === "executableCode") return { executableCode: content.executableCode };
124
+ else if (content.type === "codeExecutionResult") return { codeExecutionResult: content.codeExecutionResult };
125
+ else if (content.type === "image_url") {
126
+ if (!isMultimodalModel) throw new Error(`This model does not support images`);
127
+ let source;
128
+ if (typeof content.image_url === "string") source = content.image_url;
129
+ else if (typeof content.image_url === "object" && "url" in content.image_url) source = content.image_url.url;
130
+ else throw new Error("Please provide image as base64 encoded data URL");
131
+ const [dm, data] = source.split(",");
132
+ if (!dm.startsWith("data:")) throw new Error("Please provide image as base64 encoded data URL");
133
+ const [mimeType, encoding] = dm.replace(/^data:/, "").split(";");
134
+ if (encoding !== "base64") throw new Error("Please provide image as base64 encoded data URL");
135
+ return { inlineData: {
136
+ data,
137
+ mimeType
138
+ } };
139
+ } else if (content.type === "media") return messageContentMedia(content);
140
+ else if (content.type === "tool_use") return { functionCall: {
141
+ name: content.name,
142
+ args: content.input
143
+ } };
144
+ else if (content.type?.includes("/") && content.type.split("/").length === 2 && "data" in content && typeof content.data === "string") return { inlineData: {
145
+ mimeType: content.type,
146
+ data: content.data
147
+ } };
148
+ else if ("functionCall" in content) return void 0;
149
+ else if ("type" in content) throw new Error(`Unknown content type ${content.type}`);
150
+ else throw new Error(`Unknown content ${JSON.stringify(content)}`);
278
151
  }
279
152
  function convertMessageContentToParts(message, isMultimodalModel, previousMessages) {
280
- if ((0, messages_1.isToolMessage)(message)) {
281
- const messageName = message.name ??
282
- inferToolNameFromPreviousMessages(message, previousMessages);
283
- if (messageName === undefined) {
284
- throw new Error(`Google requires a tool name for each tool call response, and we could not infer a called tool name for ToolMessage "${message.id}" from your passed messages. Please populate a "name" field on that ToolMessage explicitly.`);
285
- }
286
- const result = Array.isArray(message.content)
287
- ? message.content
288
- .map((c) => _convertLangChainContentToPart(c, isMultimodalModel))
289
- .filter((p) => p !== undefined)
290
- : message.content;
291
- if (message.status === "error") {
292
- return [
293
- {
294
- functionResponse: {
295
- name: messageName,
296
- // The API expects an object with an `error` field if the function call fails.
297
- // `error` must be a valid object (not a string or array), so we wrap `message.content` here
298
- response: { error: { details: result } },
299
- },
300
- },
301
- ];
302
- }
303
- return [
304
- {
305
- functionResponse: {
306
- name: messageName,
307
- // again, can't have a string or array value for `response`, so we wrap it as an object here
308
- response: { result },
309
- },
310
- },
311
- ];
312
- }
313
- let functionCalls = [];
314
- const messageParts = [];
315
- if (typeof message.content === "string" && message.content) {
316
- messageParts.push({ text: message.content });
317
- }
318
- if (Array.isArray(message.content)) {
319
- messageParts.push(...message.content
320
- .map((c) => _convertLangChainContentToPart(c, isMultimodalModel))
321
- .filter((p) => p !== undefined));
322
- }
323
- if ((0, messages_1.isAIMessage)(message) && message.tool_calls?.length) {
324
- functionCalls = message.tool_calls.map((tc) => {
325
- return {
326
- functionCall: {
327
- name: tc.name,
328
- args: tc.args,
329
- },
330
- };
331
- });
332
- }
333
- return [...messageParts, ...functionCalls];
153
+ if ((0, __langchain_core_messages.isToolMessage)(message)) {
154
+ const messageName = message.name ?? inferToolNameFromPreviousMessages(message, previousMessages);
155
+ if (messageName === void 0) throw new Error(`Google requires a tool name for each tool call response, and we could not infer a called tool name for ToolMessage "${message.id}" from your passed messages. Please populate a "name" field on that ToolMessage explicitly.`);
156
+ const result = Array.isArray(message.content) ? message.content.map((c) => _convertLangChainContentToPart(c, isMultimodalModel)).filter((p) => p !== void 0) : message.content;
157
+ if (message.status === "error") return [{ functionResponse: {
158
+ name: messageName,
159
+ response: { error: { details: result } }
160
+ } }];
161
+ return [{ functionResponse: {
162
+ name: messageName,
163
+ response: { result }
164
+ } }];
165
+ }
166
+ let functionCalls = [];
167
+ const messageParts = [];
168
+ if (typeof message.content === "string" && message.content) messageParts.push({ text: message.content });
169
+ if (Array.isArray(message.content)) messageParts.push(...message.content.map((c) => _convertLangChainContentToPart(c, isMultimodalModel)).filter((p) => p !== void 0));
170
+ if ((0, __langchain_core_messages.isAIMessage)(message) && message.tool_calls?.length) functionCalls = message.tool_calls.map((tc) => {
171
+ return { functionCall: {
172
+ name: tc.name,
173
+ args: tc.args
174
+ } };
175
+ });
176
+ return [...messageParts, ...functionCalls];
334
177
  }
335
178
  function convertBaseMessagesToContent(messages, isMultimodalModel, convertSystemMessageToHumanContent = false) {
336
- return messages.reduce((acc, message, index) => {
337
- if (!(0, messages_1.isBaseMessage)(message)) {
338
- throw new Error("Unsupported message input");
339
- }
340
- const author = getMessageAuthor(message);
341
- if (author === "system" && index !== 0) {
342
- throw new Error("System message should be the first one");
343
- }
344
- const role = convertAuthorToRole(author);
345
- const prevContent = acc.content[acc.content.length];
346
- if (!acc.mergeWithPreviousContent &&
347
- prevContent &&
348
- prevContent.role === role) {
349
- throw new Error("Google Generative AI requires alternate messages between authors");
350
- }
351
- const parts = convertMessageContentToParts(message, isMultimodalModel, messages.slice(0, index));
352
- if (acc.mergeWithPreviousContent) {
353
- const prevContent = acc.content[acc.content.length - 1];
354
- if (!prevContent) {
355
- throw new Error("There was a problem parsing your system message. Please try a prompt without one.");
356
- }
357
- prevContent.parts.push(...parts);
358
- return {
359
- mergeWithPreviousContent: false,
360
- content: acc.content,
361
- };
362
- }
363
- let actualRole = role;
364
- if (actualRole === "function" ||
365
- (actualRole === "system" && !convertSystemMessageToHumanContent)) {
366
- // GenerativeAI API will throw an error if the role is not "user" or "model."
367
- actualRole = "user";
368
- }
369
- const content = {
370
- role: actualRole,
371
- parts,
372
- };
373
- return {
374
- mergeWithPreviousContent: author === "system" && !convertSystemMessageToHumanContent,
375
- content: [...acc.content, content],
376
- };
377
- }, { content: [], mergeWithPreviousContent: false }).content;
179
+ return messages.reduce((acc, message, index) => {
180
+ if (!(0, __langchain_core_messages.isBaseMessage)(message)) throw new Error("Unsupported message input");
181
+ const author = getMessageAuthor(message);
182
+ if (author === "system" && index !== 0) throw new Error("System message should be the first one");
183
+ const role = convertAuthorToRole(author);
184
+ const prevContent = acc.content[acc.content.length];
185
+ if (!acc.mergeWithPreviousContent && prevContent && prevContent.role === role) throw new Error("Google Generative AI requires alternate messages between authors");
186
+ const parts = convertMessageContentToParts(message, isMultimodalModel, messages.slice(0, index));
187
+ if (acc.mergeWithPreviousContent) {
188
+ const prevContent$1 = acc.content[acc.content.length - 1];
189
+ if (!prevContent$1) throw new Error("There was a problem parsing your system message. Please try a prompt without one.");
190
+ prevContent$1.parts.push(...parts);
191
+ return {
192
+ mergeWithPreviousContent: false,
193
+ content: acc.content
194
+ };
195
+ }
196
+ let actualRole = role;
197
+ if (actualRole === "function" || actualRole === "system" && !convertSystemMessageToHumanContent) actualRole = "user";
198
+ const content = {
199
+ role: actualRole,
200
+ parts
201
+ };
202
+ return {
203
+ mergeWithPreviousContent: author === "system" && !convertSystemMessageToHumanContent,
204
+ content: [...acc.content, content]
205
+ };
206
+ }, {
207
+ content: [],
208
+ mergeWithPreviousContent: false
209
+ }).content;
378
210
  }
379
211
  function mapGenerateContentResultToChatResult(response, extra) {
380
- // if rejected or error, return empty generations with reason in filters
381
- if (!response.candidates ||
382
- response.candidates.length === 0 ||
383
- !response.candidates[0]) {
384
- return {
385
- generations: [],
386
- llmOutput: {
387
- filters: response.promptFeedback,
388
- },
389
- };
390
- }
391
- const functionCalls = response.functionCalls();
392
- const [candidate] = response.candidates;
393
- const { content: candidateContent, ...generationInfo } = candidate;
394
- let content;
395
- if (Array.isArray(candidateContent?.parts) &&
396
- candidateContent.parts.length === 1 &&
397
- candidateContent.parts[0].text) {
398
- content = candidateContent.parts[0].text;
399
- }
400
- else if (Array.isArray(candidateContent?.parts) &&
401
- candidateContent.parts.length > 0) {
402
- content = candidateContent.parts.map((p) => {
403
- if ("text" in p) {
404
- return {
405
- type: "text",
406
- text: p.text,
407
- };
408
- }
409
- else if ("executableCode" in p) {
410
- return {
411
- type: "executableCode",
412
- executableCode: p.executableCode,
413
- };
414
- }
415
- else if ("codeExecutionResult" in p) {
416
- return {
417
- type: "codeExecutionResult",
418
- codeExecutionResult: p.codeExecutionResult,
419
- };
420
- }
421
- return p;
422
- });
423
- }
424
- else {
425
- // no content returned - likely due to abnormal stop reason, e.g. malformed function call
426
- content = [];
427
- }
428
- let text = "";
429
- if (typeof content === "string") {
430
- text = content;
431
- }
432
- else if (Array.isArray(content) && content.length > 0) {
433
- const block = content.find((b) => "text" in b);
434
- text = block?.text ?? text;
435
- }
436
- const generation = {
437
- text,
438
- message: new messages_1.AIMessage({
439
- content: content ?? "",
440
- tool_calls: functionCalls?.map((fc) => {
441
- return {
442
- ...fc,
443
- type: "tool_call",
444
- id: "id" in fc && typeof fc.id === "string" ? fc.id : (0, uuid_1.v4)(),
445
- };
446
- }),
447
- additional_kwargs: {
448
- ...generationInfo,
449
- },
450
- usage_metadata: extra?.usageMetadata,
451
- }),
452
- generationInfo,
453
- };
454
- return {
455
- generations: [generation],
456
- llmOutput: {
457
- tokenUsage: {
458
- promptTokens: extra?.usageMetadata?.input_tokens,
459
- completionTokens: extra?.usageMetadata?.output_tokens,
460
- totalTokens: extra?.usageMetadata?.total_tokens,
461
- },
462
- },
463
- };
212
+ if (!response.candidates || response.candidates.length === 0 || !response.candidates[0]) return {
213
+ generations: [],
214
+ llmOutput: { filters: response.promptFeedback }
215
+ };
216
+ const functionCalls = response.functionCalls();
217
+ const [candidate] = response.candidates;
218
+ const { content: candidateContent,...generationInfo } = candidate;
219
+ let content;
220
+ if (Array.isArray(candidateContent?.parts) && candidateContent.parts.length === 1 && candidateContent.parts[0].text) content = candidateContent.parts[0].text;
221
+ else if (Array.isArray(candidateContent?.parts) && candidateContent.parts.length > 0) content = candidateContent.parts.map((p) => {
222
+ if ("text" in p) return {
223
+ type: "text",
224
+ text: p.text
225
+ };
226
+ else if ("inlineData" in p) return {
227
+ type: "inlineData",
228
+ inlineData: p.inlineData
229
+ };
230
+ else if ("functionCall" in p) return {
231
+ type: "functionCall",
232
+ functionCall: p.functionCall
233
+ };
234
+ else if ("functionResponse" in p) return {
235
+ type: "functionResponse",
236
+ functionResponse: p.functionResponse
237
+ };
238
+ else if ("fileData" in p) return {
239
+ type: "fileData",
240
+ fileData: p.fileData
241
+ };
242
+ else if ("executableCode" in p) return {
243
+ type: "executableCode",
244
+ executableCode: p.executableCode
245
+ };
246
+ else if ("codeExecutionResult" in p) return {
247
+ type: "codeExecutionResult",
248
+ codeExecutionResult: p.codeExecutionResult
249
+ };
250
+ return p;
251
+ });
252
+ else content = [];
253
+ let text = "";
254
+ if (typeof content === "string") text = content;
255
+ else if (Array.isArray(content) && content.length > 0) {
256
+ const block = content.find((b) => "text" in b);
257
+ text = block?.text ?? text;
258
+ }
259
+ const generation = {
260
+ text,
261
+ message: new __langchain_core_messages.AIMessage({
262
+ content: content ?? "",
263
+ tool_calls: functionCalls?.map((fc) => {
264
+ return {
265
+ ...fc,
266
+ type: "tool_call",
267
+ id: "id" in fc && typeof fc.id === "string" ? fc.id : (0, uuid.v4)()
268
+ };
269
+ }),
270
+ additional_kwargs: { ...generationInfo },
271
+ usage_metadata: extra?.usageMetadata
272
+ }),
273
+ generationInfo
274
+ };
275
+ return {
276
+ generations: [generation],
277
+ llmOutput: { tokenUsage: {
278
+ promptTokens: extra?.usageMetadata?.input_tokens,
279
+ completionTokens: extra?.usageMetadata?.output_tokens,
280
+ totalTokens: extra?.usageMetadata?.total_tokens
281
+ } }
282
+ };
464
283
  }
465
284
  function convertResponseContentToChatGenerationChunk(response, extra) {
466
- if (!response.candidates || response.candidates.length === 0) {
467
- return null;
468
- }
469
- const functionCalls = response.functionCalls();
470
- const [candidate] = response.candidates;
471
- const { content: candidateContent, ...generationInfo } = candidate;
472
- let content;
473
- // Checks if some parts do not have text. If false, it means that the content is a string.
474
- if (Array.isArray(candidateContent?.parts) &&
475
- candidateContent.parts.every((p) => "text" in p)) {
476
- content = candidateContent.parts.map((p) => p.text).join("");
477
- }
478
- else if (Array.isArray(candidateContent?.parts)) {
479
- content = candidateContent.parts.map((p) => {
480
- if ("text" in p) {
481
- return {
482
- type: "text",
483
- text: p.text,
484
- };
485
- }
486
- else if ("executableCode" in p) {
487
- return {
488
- type: "executableCode",
489
- executableCode: p.executableCode,
490
- };
491
- }
492
- else if ("codeExecutionResult" in p) {
493
- return {
494
- type: "codeExecutionResult",
495
- codeExecutionResult: p.codeExecutionResult,
496
- };
497
- }
498
- return p;
499
- });
500
- }
501
- else {
502
- // no content returned - likely due to abnormal stop reason, e.g. malformed function call
503
- content = [];
504
- }
505
- let text = "";
506
- if (content && typeof content === "string") {
507
- text = content;
508
- }
509
- else if (Array.isArray(content)) {
510
- const block = content.find((b) => "text" in b);
511
- text = block?.text ?? "";
512
- }
513
- const toolCallChunks = [];
514
- if (functionCalls) {
515
- toolCallChunks.push(...functionCalls.map((fc) => ({
516
- ...fc,
517
- args: JSON.stringify(fc.args),
518
- index: extra.index,
519
- type: "tool_call_chunk",
520
- id: "id" in fc && typeof fc.id === "string" ? fc.id : (0, uuid_1.v4)(),
521
- })));
522
- }
523
- return new outputs_1.ChatGenerationChunk({
524
- text,
525
- message: new messages_1.AIMessageChunk({
526
- content: content || "",
527
- name: !candidateContent ? undefined : candidateContent.role,
528
- tool_call_chunks: toolCallChunks,
529
- // Each chunk can have unique "generationInfo", and merging strategy is unclear,
530
- // so leave blank for now.
531
- additional_kwargs: {},
532
- usage_metadata: extra.usageMetadata,
533
- }),
534
- generationInfo,
535
- });
285
+ if (!response.candidates || response.candidates.length === 0) return null;
286
+ const functionCalls = response.functionCalls();
287
+ const [candidate] = response.candidates;
288
+ const { content: candidateContent,...generationInfo } = candidate;
289
+ let content;
290
+ if (Array.isArray(candidateContent?.parts) && candidateContent.parts.every((p) => "text" in p)) content = candidateContent.parts.map((p) => p.text).join("");
291
+ else if (Array.isArray(candidateContent?.parts)) content = candidateContent.parts.map((p) => {
292
+ if ("text" in p) return {
293
+ type: "text",
294
+ text: p.text
295
+ };
296
+ else if ("inlineData" in p) return {
297
+ type: "inlineData",
298
+ inlineData: p.inlineData
299
+ };
300
+ else if ("functionCall" in p) return {
301
+ type: "functionCall",
302
+ functionCall: p.functionCall
303
+ };
304
+ else if ("functionResponse" in p) return {
305
+ type: "functionResponse",
306
+ functionResponse: p.functionResponse
307
+ };
308
+ else if ("fileData" in p) return {
309
+ type: "fileData",
310
+ fileData: p.fileData
311
+ };
312
+ else if ("executableCode" in p) return {
313
+ type: "executableCode",
314
+ executableCode: p.executableCode
315
+ };
316
+ else if ("codeExecutionResult" in p) return {
317
+ type: "codeExecutionResult",
318
+ codeExecutionResult: p.codeExecutionResult
319
+ };
320
+ return p;
321
+ });
322
+ else content = [];
323
+ let text = "";
324
+ if (content && typeof content === "string") text = content;
325
+ else if (Array.isArray(content)) {
326
+ const block = content.find((b) => "text" in b);
327
+ text = block?.text ?? "";
328
+ }
329
+ const toolCallChunks = [];
330
+ if (functionCalls) toolCallChunks.push(...functionCalls.map((fc) => ({
331
+ ...fc,
332
+ args: JSON.stringify(fc.args),
333
+ index: extra.index,
334
+ type: "tool_call_chunk",
335
+ id: "id" in fc && typeof fc.id === "string" ? fc.id : (0, uuid.v4)()
336
+ })));
337
+ return new __langchain_core_outputs.ChatGenerationChunk({
338
+ text,
339
+ message: new __langchain_core_messages.AIMessageChunk({
340
+ content: content || "",
341
+ name: !candidateContent ? void 0 : candidateContent.role,
342
+ tool_call_chunks: toolCallChunks,
343
+ additional_kwargs: {},
344
+ response_metadata: { model_provider: "google-genai" },
345
+ usage_metadata: extra.usageMetadata
346
+ }),
347
+ generationInfo
348
+ });
536
349
  }
537
350
  function convertToGenerativeAITools(tools) {
538
- if (tools.every((tool) => "functionDeclarations" in tool &&
539
- Array.isArray(tool.functionDeclarations))) {
540
- return tools;
541
- }
542
- return [
543
- {
544
- functionDeclarations: tools.map((tool) => {
545
- if ((0, function_calling_1.isLangChainTool)(tool)) {
546
- const jsonSchema = (0, zod_to_genai_parameters_js_1.schemaToGenerativeAIParameters)(tool.schema);
547
- if (jsonSchema.type === "object" &&
548
- "properties" in jsonSchema &&
549
- Object.keys(jsonSchema.properties).length === 0) {
550
- return {
551
- name: tool.name,
552
- description: tool.description,
553
- };
554
- }
555
- return {
556
- name: tool.name,
557
- description: tool.description,
558
- parameters: jsonSchema,
559
- };
560
- }
561
- if ((0, base_1.isOpenAITool)(tool)) {
562
- return {
563
- name: tool.function.name,
564
- description: tool.function.description ?? `A function available to call.`,
565
- parameters: (0, zod_to_genai_parameters_js_1.jsonSchemaToGeminiParameters)(tool.function.parameters),
566
- };
567
- }
568
- return tool;
569
- }),
570
- },
571
- ];
351
+ if (tools.every((tool) => "functionDeclarations" in tool && Array.isArray(tool.functionDeclarations))) return tools;
352
+ return [{ functionDeclarations: tools.map((tool) => {
353
+ if ((0, __langchain_core_utils_function_calling.isLangChainTool)(tool)) {
354
+ const jsonSchema = require_zod_to_genai_parameters.schemaToGenerativeAIParameters(tool.schema);
355
+ if (jsonSchema.type === "object" && "properties" in jsonSchema && Object.keys(jsonSchema.properties).length === 0) return {
356
+ name: tool.name,
357
+ description: tool.description
358
+ };
359
+ return {
360
+ name: tool.name,
361
+ description: tool.description,
362
+ parameters: jsonSchema
363
+ };
364
+ }
365
+ if ((0, __langchain_core_language_models_base.isOpenAITool)(tool)) return {
366
+ name: tool.function.name,
367
+ description: tool.function.description ?? `A function available to call.`,
368
+ parameters: require_zod_to_genai_parameters.jsonSchemaToGeminiParameters(tool.function.parameters)
369
+ };
370
+ return tool;
371
+ }) }];
572
372
  }
373
+
374
+ //#endregion
375
+ exports.convertBaseMessagesToContent = convertBaseMessagesToContent;
376
+ exports.convertResponseContentToChatGenerationChunk = convertResponseContentToChatGenerationChunk;
377
+ exports.convertToGenerativeAITools = convertToGenerativeAITools;
378
+ exports.mapGenerateContentResultToChatResult = mapGenerateContentResultToChatResult;
379
+ //# sourceMappingURL=common.cjs.map