@blaxel/langgraph 0.2.49-dev.214 → 0.2.49-dev1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/cjs/.tsbuildinfo +1 -1
- package/dist/cjs/types/tools.d.ts +6 -2
- package/dist/esm/.tsbuildinfo +1 -1
- package/dist/index.d.ts +3 -0
- package/dist/index.js +19 -0
- package/dist/model/cohere.d.ts +6 -0
- package/dist/model/cohere.js +172 -0
- package/dist/model/google-genai/chat_models.d.ts +557 -0
- package/dist/model/google-genai/chat_models.js +755 -0
- package/dist/model/google-genai/embeddings.d.ts +94 -0
- package/dist/model/google-genai/embeddings.js +111 -0
- package/dist/model/google-genai/index.d.ts +2 -0
- package/dist/model/google-genai/index.js +18 -0
- package/dist/model/google-genai/output_parsers.d.ts +20 -0
- package/dist/model/google-genai/output_parsers.js +50 -0
- package/dist/model/google-genai/types.d.ts +3 -0
- package/dist/model/google-genai/types.js +2 -0
- package/dist/model/google-genai/utils/common.d.ts +22 -0
- package/dist/model/google-genai/utils/common.js +386 -0
- package/dist/model/google-genai/utils/tools.d.ts +10 -0
- package/dist/model/google-genai/utils/tools.js +110 -0
- package/dist/model/google-genai/utils/zod_to_genai_parameters.d.ts +13 -0
- package/dist/model/google-genai/utils/zod_to_genai_parameters.js +46 -0
- package/dist/model/google-genai.d.ts +11 -0
- package/dist/model/google-genai.js +30 -0
- package/dist/model/xai.d.ts +41 -0
- package/dist/model/xai.js +82 -0
- package/dist/model.d.ts +2 -0
- package/dist/model.js +141 -0
- package/dist/telemetry.d.ts +1 -0
- package/dist/telemetry.js +24 -0
- package/dist/tools.d.ts +15 -0
- package/dist/tools.js +24 -0
- package/package.json +2 -2
|
@@ -0,0 +1,386 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.getMessageAuthor = getMessageAuthor;
|
|
4
|
+
exports.convertAuthorToRole = convertAuthorToRole;
|
|
5
|
+
exports.convertMessageContentToParts = convertMessageContentToParts;
|
|
6
|
+
exports.convertBaseMessagesToContent = convertBaseMessagesToContent;
|
|
7
|
+
exports.mapGenerateContentResultToChatResult = mapGenerateContentResultToChatResult;
|
|
8
|
+
exports.convertResponseContentToChatGenerationChunk = convertResponseContentToChatGenerationChunk;
|
|
9
|
+
exports.convertToGenerativeAITools = convertToGenerativeAITools;
|
|
10
|
+
const base_1 = require("@langchain/core/language_models/base");
|
|
11
|
+
const messages_1 = require("@langchain/core/messages");
|
|
12
|
+
const outputs_1 = require("@langchain/core/outputs");
|
|
13
|
+
const function_calling_1 = require("@langchain/core/utils/function_calling");
|
|
14
|
+
const zod_to_genai_parameters_js_1 = require("./zod_to_genai_parameters.js");
|
|
15
|
+
function getMessageAuthor(message) {
|
|
16
|
+
const type = message._getType();
|
|
17
|
+
if (messages_1.ChatMessage.isInstance(message)) {
|
|
18
|
+
return message.role;
|
|
19
|
+
}
|
|
20
|
+
if (type === "tool") {
|
|
21
|
+
return type;
|
|
22
|
+
}
|
|
23
|
+
return message.name ?? type;
|
|
24
|
+
}
|
|
25
|
+
/**
|
|
26
|
+
* Maps a message type to a Google Generative AI chat author.
|
|
27
|
+
* @param message The message to map.
|
|
28
|
+
* @param model The model to use for mapping.
|
|
29
|
+
* @returns The message type mapped to a Google Generative AI chat author.
|
|
30
|
+
*/
|
|
31
|
+
function convertAuthorToRole(author) {
|
|
32
|
+
switch (author) {
|
|
33
|
+
/**
|
|
34
|
+
* Note: Gemini currently is not supporting system messages
|
|
35
|
+
* we will convert them to human messages and merge with following
|
|
36
|
+
* */
|
|
37
|
+
case "ai":
|
|
38
|
+
case "model": // getMessageAuthor returns message.name. code ex.: return message.name ?? type;
|
|
39
|
+
return "model";
|
|
40
|
+
case "system":
|
|
41
|
+
return "system";
|
|
42
|
+
case "human":
|
|
43
|
+
return "user";
|
|
44
|
+
case "tool":
|
|
45
|
+
case "function":
|
|
46
|
+
return "function";
|
|
47
|
+
default:
|
|
48
|
+
throw new Error(`Unknown / unsupported author: ${author}`);
|
|
49
|
+
}
|
|
50
|
+
}
|
|
51
|
+
function messageContentMedia(content) {
|
|
52
|
+
if ("mimeType" in content && "data" in content) {
|
|
53
|
+
return {
|
|
54
|
+
inlineData: {
|
|
55
|
+
mimeType: content.mimeType,
|
|
56
|
+
data: content.data,
|
|
57
|
+
},
|
|
58
|
+
};
|
|
59
|
+
}
|
|
60
|
+
if ("mimeType" in content && "fileUri" in content) {
|
|
61
|
+
return {
|
|
62
|
+
fileData: {
|
|
63
|
+
mimeType: content.mimeType,
|
|
64
|
+
fileUri: content.fileUri,
|
|
65
|
+
},
|
|
66
|
+
};
|
|
67
|
+
}
|
|
68
|
+
throw new Error("Invalid media content");
|
|
69
|
+
}
|
|
70
|
+
function convertMessageContentToParts(message, isMultimodalModel) {
|
|
71
|
+
if (typeof message.content === "string" && message.content !== "") {
|
|
72
|
+
return [{ text: message.content }];
|
|
73
|
+
}
|
|
74
|
+
let functionCalls = [];
|
|
75
|
+
let functionResponses = [];
|
|
76
|
+
let messageParts = [];
|
|
77
|
+
if ("tool_calls" in message &&
|
|
78
|
+
Array.isArray(message.tool_calls) &&
|
|
79
|
+
message.tool_calls.length > 0) {
|
|
80
|
+
functionCalls = message.tool_calls
|
|
81
|
+
.map((tc) => {
|
|
82
|
+
if (typeof tc.name === "string") {
|
|
83
|
+
return {
|
|
84
|
+
functionCall: {
|
|
85
|
+
name: tc.name,
|
|
86
|
+
args: tc.args,
|
|
87
|
+
},
|
|
88
|
+
};
|
|
89
|
+
}
|
|
90
|
+
return null;
|
|
91
|
+
})
|
|
92
|
+
.filter(Boolean);
|
|
93
|
+
}
|
|
94
|
+
else if (message.getType() === "tool" && message.name && message.content) {
|
|
95
|
+
functionResponses = [
|
|
96
|
+
{
|
|
97
|
+
functionResponse: {
|
|
98
|
+
name: message.name,
|
|
99
|
+
response: message.content,
|
|
100
|
+
},
|
|
101
|
+
},
|
|
102
|
+
];
|
|
103
|
+
}
|
|
104
|
+
else if (Array.isArray(message.content)) {
|
|
105
|
+
messageParts = message.content.map((c) => {
|
|
106
|
+
if (c.type === "text") {
|
|
107
|
+
return {
|
|
108
|
+
text: c.text,
|
|
109
|
+
};
|
|
110
|
+
}
|
|
111
|
+
else if (c.type === "executableCode") {
|
|
112
|
+
return {
|
|
113
|
+
executableCode: c.executableCode,
|
|
114
|
+
};
|
|
115
|
+
}
|
|
116
|
+
else if (c.type === "codeExecutionResult") {
|
|
117
|
+
return {
|
|
118
|
+
codeExecutionResult: c.codeExecutionResult,
|
|
119
|
+
};
|
|
120
|
+
}
|
|
121
|
+
if (c.type === "image_url") {
|
|
122
|
+
if (!isMultimodalModel) {
|
|
123
|
+
throw new Error(`This model does not support images`);
|
|
124
|
+
}
|
|
125
|
+
let source;
|
|
126
|
+
if (typeof c.image_url === "string") {
|
|
127
|
+
source = c.image_url;
|
|
128
|
+
}
|
|
129
|
+
else if (typeof c.image_url === "object" && "url" in c.image_url) {
|
|
130
|
+
source = c.image_url.url;
|
|
131
|
+
}
|
|
132
|
+
else {
|
|
133
|
+
throw new Error("Please provide image as base64 encoded data URL");
|
|
134
|
+
}
|
|
135
|
+
const [dm, data] = source.split(",");
|
|
136
|
+
if (!dm.startsWith("data:")) {
|
|
137
|
+
throw new Error("Please provide image as base64 encoded data URL");
|
|
138
|
+
}
|
|
139
|
+
const [mimeType, encoding] = dm.replace(/^data:/, "").split(";");
|
|
140
|
+
if (encoding !== "base64") {
|
|
141
|
+
throw new Error("Please provide image as base64 encoded data URL");
|
|
142
|
+
}
|
|
143
|
+
return {
|
|
144
|
+
inlineData: {
|
|
145
|
+
data,
|
|
146
|
+
mimeType,
|
|
147
|
+
},
|
|
148
|
+
};
|
|
149
|
+
}
|
|
150
|
+
else if (c.type === "media") {
|
|
151
|
+
return messageContentMedia(c);
|
|
152
|
+
}
|
|
153
|
+
else if (c.type === "tool_use") {
|
|
154
|
+
return {
|
|
155
|
+
functionCall: {
|
|
156
|
+
name: c.name,
|
|
157
|
+
args: c.input,
|
|
158
|
+
},
|
|
159
|
+
};
|
|
160
|
+
}
|
|
161
|
+
else if (c.type?.includes("/") &&
|
|
162
|
+
// Ensure it's a single slash.
|
|
163
|
+
c.type.split("/").length === 2 &&
|
|
164
|
+
"data" in c &&
|
|
165
|
+
typeof c.data === "string") {
|
|
166
|
+
return {
|
|
167
|
+
inlineData: {
|
|
168
|
+
mimeType: c.type,
|
|
169
|
+
data: c.data,
|
|
170
|
+
},
|
|
171
|
+
};
|
|
172
|
+
}
|
|
173
|
+
throw new Error(`Unknown content type ${c.type}`);
|
|
174
|
+
});
|
|
175
|
+
}
|
|
176
|
+
return [...messageParts, ...functionCalls, ...functionResponses];
|
|
177
|
+
}
|
|
178
|
+
function convertBaseMessagesToContent(messages, isMultimodalModel, convertSystemMessageToHumanContent = false) {
|
|
179
|
+
return messages.reduce((acc, message, index) => {
|
|
180
|
+
if (!(0, messages_1.isBaseMessage)(message)) {
|
|
181
|
+
throw new Error("Unsupported message input");
|
|
182
|
+
}
|
|
183
|
+
const author = getMessageAuthor(message);
|
|
184
|
+
if (author === "system" && index !== 0) {
|
|
185
|
+
throw new Error("System message should be the first one");
|
|
186
|
+
}
|
|
187
|
+
const role = convertAuthorToRole(author);
|
|
188
|
+
const prevContent = acc.content[acc.content.length];
|
|
189
|
+
if (!acc.mergeWithPreviousContent &&
|
|
190
|
+
prevContent &&
|
|
191
|
+
prevContent.role === role) {
|
|
192
|
+
throw new Error("Google Generative AI requires alternate messages between authors");
|
|
193
|
+
}
|
|
194
|
+
const parts = convertMessageContentToParts(message, isMultimodalModel);
|
|
195
|
+
if (acc.mergeWithPreviousContent) {
|
|
196
|
+
const prevContent = acc.content[acc.content.length - 1];
|
|
197
|
+
if (!prevContent) {
|
|
198
|
+
throw new Error("There was a problem parsing your system message. Please try a prompt without one.");
|
|
199
|
+
}
|
|
200
|
+
prevContent.parts.push(...parts);
|
|
201
|
+
return {
|
|
202
|
+
mergeWithPreviousContent: false,
|
|
203
|
+
content: acc.content,
|
|
204
|
+
};
|
|
205
|
+
}
|
|
206
|
+
let actualRole = role;
|
|
207
|
+
if (actualRole === "function" ||
|
|
208
|
+
(actualRole === "system" && !convertSystemMessageToHumanContent)) {
|
|
209
|
+
// GenerativeAI API will throw an error if the role is not "user" or "model."
|
|
210
|
+
actualRole = "user";
|
|
211
|
+
}
|
|
212
|
+
const content = {
|
|
213
|
+
role: actualRole,
|
|
214
|
+
parts,
|
|
215
|
+
};
|
|
216
|
+
return {
|
|
217
|
+
mergeWithPreviousContent: author === "system" && !convertSystemMessageToHumanContent,
|
|
218
|
+
content: [...acc.content, content],
|
|
219
|
+
};
|
|
220
|
+
}, { content: [], mergeWithPreviousContent: false }).content;
|
|
221
|
+
}
|
|
222
|
+
function mapGenerateContentResultToChatResult(response, extra) {
|
|
223
|
+
// if rejected or error, return empty generations with reason in filters
|
|
224
|
+
if (!response.candidates ||
|
|
225
|
+
response.candidates.length === 0 ||
|
|
226
|
+
!response.candidates[0]) {
|
|
227
|
+
return {
|
|
228
|
+
generations: [],
|
|
229
|
+
llmOutput: {
|
|
230
|
+
filters: response.promptFeedback,
|
|
231
|
+
},
|
|
232
|
+
};
|
|
233
|
+
}
|
|
234
|
+
const functionCalls = response.functionCalls();
|
|
235
|
+
const [candidate] = response.candidates;
|
|
236
|
+
const { content: candidateContent, ...generationInfo } = candidate;
|
|
237
|
+
let content;
|
|
238
|
+
if (candidateContent?.parts.length === 1 && candidateContent.parts[0].text) {
|
|
239
|
+
content = candidateContent.parts[0].text;
|
|
240
|
+
}
|
|
241
|
+
else {
|
|
242
|
+
content = candidateContent.parts.map((p) => {
|
|
243
|
+
if ("text" in p) {
|
|
244
|
+
return {
|
|
245
|
+
type: "text",
|
|
246
|
+
text: p.text,
|
|
247
|
+
};
|
|
248
|
+
}
|
|
249
|
+
else if ("executableCode" in p) {
|
|
250
|
+
return {
|
|
251
|
+
type: "executableCode",
|
|
252
|
+
executableCode: p.executableCode,
|
|
253
|
+
};
|
|
254
|
+
}
|
|
255
|
+
else if ("codeExecutionResult" in p) {
|
|
256
|
+
return {
|
|
257
|
+
type: "codeExecutionResult",
|
|
258
|
+
codeExecutionResult: p.codeExecutionResult,
|
|
259
|
+
};
|
|
260
|
+
}
|
|
261
|
+
return p;
|
|
262
|
+
});
|
|
263
|
+
}
|
|
264
|
+
let text = "";
|
|
265
|
+
if (typeof content === "string") {
|
|
266
|
+
text = content;
|
|
267
|
+
}
|
|
268
|
+
else if ("text" in content[0]) {
|
|
269
|
+
text = content[0].text;
|
|
270
|
+
}
|
|
271
|
+
const generation = {
|
|
272
|
+
text,
|
|
273
|
+
message: new messages_1.AIMessage({
|
|
274
|
+
content,
|
|
275
|
+
tool_calls: functionCalls?.map((fc) => ({
|
|
276
|
+
...fc,
|
|
277
|
+
type: "tool_call",
|
|
278
|
+
})),
|
|
279
|
+
additional_kwargs: {
|
|
280
|
+
...generationInfo,
|
|
281
|
+
},
|
|
282
|
+
usage_metadata: extra?.usageMetadata,
|
|
283
|
+
}),
|
|
284
|
+
generationInfo,
|
|
285
|
+
};
|
|
286
|
+
return {
|
|
287
|
+
generations: [generation],
|
|
288
|
+
};
|
|
289
|
+
}
|
|
290
|
+
function convertResponseContentToChatGenerationChunk(response, extra) {
|
|
291
|
+
if (!response.candidates || response.candidates.length === 0) {
|
|
292
|
+
return null;
|
|
293
|
+
}
|
|
294
|
+
const functionCalls = response.functionCalls();
|
|
295
|
+
const [candidate] = response.candidates;
|
|
296
|
+
const { content: candidateContent, ...generationInfo } = candidate;
|
|
297
|
+
let content;
|
|
298
|
+
// Checks if some parts do not have text. If false, it means that the content is a string.
|
|
299
|
+
if (candidateContent?.parts &&
|
|
300
|
+
candidateContent.parts.every((p) => "text" in p)) {
|
|
301
|
+
content = candidateContent.parts.map((p) => p.text).join("");
|
|
302
|
+
}
|
|
303
|
+
else if (candidateContent.parts) {
|
|
304
|
+
content = candidateContent.parts.map((p) => {
|
|
305
|
+
if ("text" in p) {
|
|
306
|
+
return {
|
|
307
|
+
type: "text",
|
|
308
|
+
text: p.text,
|
|
309
|
+
};
|
|
310
|
+
}
|
|
311
|
+
else if ("executableCode" in p) {
|
|
312
|
+
return {
|
|
313
|
+
type: "executableCode",
|
|
314
|
+
executableCode: p.executableCode,
|
|
315
|
+
};
|
|
316
|
+
}
|
|
317
|
+
else if ("codeExecutionResult" in p) {
|
|
318
|
+
return {
|
|
319
|
+
type: "codeExecutionResult",
|
|
320
|
+
codeExecutionResult: p.codeExecutionResult,
|
|
321
|
+
};
|
|
322
|
+
}
|
|
323
|
+
return p;
|
|
324
|
+
});
|
|
325
|
+
}
|
|
326
|
+
let text = "";
|
|
327
|
+
if (content && typeof content === "string") {
|
|
328
|
+
text = content;
|
|
329
|
+
}
|
|
330
|
+
else if (content && typeof content === "object" && "text" in content[0]) {
|
|
331
|
+
text = content[0].text;
|
|
332
|
+
}
|
|
333
|
+
const toolCallChunks = [];
|
|
334
|
+
if (functionCalls) {
|
|
335
|
+
toolCallChunks.push(...functionCalls.map((fc) => ({
|
|
336
|
+
...fc,
|
|
337
|
+
args: JSON.stringify(fc.args),
|
|
338
|
+
index: extra.index,
|
|
339
|
+
type: "tool_call_chunk",
|
|
340
|
+
})));
|
|
341
|
+
}
|
|
342
|
+
return new outputs_1.ChatGenerationChunk({
|
|
343
|
+
text,
|
|
344
|
+
message: new messages_1.AIMessageChunk({
|
|
345
|
+
content: content || "",
|
|
346
|
+
name: !candidateContent ? undefined : candidateContent.role,
|
|
347
|
+
tool_call_chunks: toolCallChunks,
|
|
348
|
+
// Each chunk can have unique "generationInfo", and merging strategy is unclear,
|
|
349
|
+
// so leave blank for now.
|
|
350
|
+
additional_kwargs: {},
|
|
351
|
+
usage_metadata: extra.usageMetadata,
|
|
352
|
+
}),
|
|
353
|
+
generationInfo,
|
|
354
|
+
});
|
|
355
|
+
}
|
|
356
|
+
function isZodType(schema) {
|
|
357
|
+
return typeof schema === "object" && schema !== null && "_def" in schema;
|
|
358
|
+
}
|
|
359
|
+
function convertToGenerativeAITools(tools) {
|
|
360
|
+
if (tools.every((tool) => "functionDeclarations" in tool &&
|
|
361
|
+
Array.isArray(tool.functionDeclarations))) {
|
|
362
|
+
return tools;
|
|
363
|
+
}
|
|
364
|
+
return [
|
|
365
|
+
{
|
|
366
|
+
functionDeclarations: tools.map((tool) => {
|
|
367
|
+
if ((0, function_calling_1.isLangChainTool)(tool) && isZodType(tool.schema)) {
|
|
368
|
+
const jsonSchema = (0, zod_to_genai_parameters_js_1.zodToGenerativeAIParameters)(tool.schema);
|
|
369
|
+
return {
|
|
370
|
+
name: tool.name,
|
|
371
|
+
description: tool.description,
|
|
372
|
+
parameters: jsonSchema,
|
|
373
|
+
};
|
|
374
|
+
}
|
|
375
|
+
if ((0, base_1.isOpenAITool)(tool)) {
|
|
376
|
+
return {
|
|
377
|
+
name: tool.function.name,
|
|
378
|
+
description: tool.function.description ?? `A function available to call.`,
|
|
379
|
+
parameters: (0, zod_to_genai_parameters_js_1.jsonSchemaToGeminiParameters)(tool.function.parameters),
|
|
380
|
+
};
|
|
381
|
+
}
|
|
382
|
+
return tool;
|
|
383
|
+
}),
|
|
384
|
+
},
|
|
385
|
+
];
|
|
386
|
+
}
|
|
@@ -0,0 +1,10 @@
|
|
|
1
|
+
import { Tool as GenerativeAITool, ToolConfig } from "@google/generative-ai";
|
|
2
|
+
import { ToolChoice } from "@langchain/core/language_models/chat_models";
|
|
3
|
+
import { GoogleGenerativeAIToolType } from "../types.js";
|
|
4
|
+
export declare function convertToolsToGenAI(tools: GoogleGenerativeAIToolType[], extra?: {
|
|
5
|
+
toolChoice?: ToolChoice;
|
|
6
|
+
allowedFunctionNames?: string[];
|
|
7
|
+
}): {
|
|
8
|
+
tools: GenerativeAITool[];
|
|
9
|
+
toolConfig?: ToolConfig;
|
|
10
|
+
};
|
|
@@ -0,0 +1,110 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.convertToolsToGenAI = convertToolsToGenAI;
|
|
4
|
+
const generative_ai_1 = require("@google/generative-ai");
|
|
5
|
+
const function_calling_1 = require("@langchain/core/utils/function_calling");
|
|
6
|
+
const base_1 = require("@langchain/core/language_models/base");
|
|
7
|
+
const common_js_1 = require("./common.js");
|
|
8
|
+
const zod_to_genai_parameters_js_1 = require("./zod_to_genai_parameters.js");
|
|
9
|
+
function convertToolsToGenAI(tools, extra) {
|
|
10
|
+
// Extract function declaration processing to a separate function
|
|
11
|
+
const genAITools = processTools(tools);
|
|
12
|
+
// Simplify tool config creation
|
|
13
|
+
const toolConfig = createToolConfig(genAITools, extra);
|
|
14
|
+
return { tools: genAITools, toolConfig };
|
|
15
|
+
}
|
|
16
|
+
function processTools(tools) {
|
|
17
|
+
let functionDeclarationTools = [];
|
|
18
|
+
const genAITools = [];
|
|
19
|
+
tools.forEach((tool) => {
|
|
20
|
+
if ((0, function_calling_1.isLangChainTool)(tool)) {
|
|
21
|
+
const [convertedTool] = (0, common_js_1.convertToGenerativeAITools)([
|
|
22
|
+
tool,
|
|
23
|
+
]);
|
|
24
|
+
if (convertedTool.functionDeclarations) {
|
|
25
|
+
functionDeclarationTools.push(...convertedTool.functionDeclarations);
|
|
26
|
+
}
|
|
27
|
+
}
|
|
28
|
+
else if ((0, base_1.isOpenAITool)(tool)) {
|
|
29
|
+
const { functionDeclarations } = convertOpenAIToolToGenAI(tool);
|
|
30
|
+
if (functionDeclarations) {
|
|
31
|
+
functionDeclarationTools.push(...functionDeclarations);
|
|
32
|
+
}
|
|
33
|
+
else {
|
|
34
|
+
throw new Error("Failed to convert OpenAI structured tool to GenerativeAI tool");
|
|
35
|
+
}
|
|
36
|
+
}
|
|
37
|
+
else {
|
|
38
|
+
genAITools.push(tool);
|
|
39
|
+
}
|
|
40
|
+
});
|
|
41
|
+
const genAIFunctionDeclaration = genAITools.find((t) => "functionDeclarations" in t);
|
|
42
|
+
if (genAIFunctionDeclaration) {
|
|
43
|
+
return genAITools.map((tool) => {
|
|
44
|
+
if (functionDeclarationTools?.length > 0 &&
|
|
45
|
+
"functionDeclarations" in tool) {
|
|
46
|
+
const newTool = {
|
|
47
|
+
functionDeclarations: [
|
|
48
|
+
...(tool.functionDeclarations || []),
|
|
49
|
+
...functionDeclarationTools,
|
|
50
|
+
],
|
|
51
|
+
};
|
|
52
|
+
// Clear the functionDeclarationTools array so it is not passed again
|
|
53
|
+
functionDeclarationTools = [];
|
|
54
|
+
return newTool;
|
|
55
|
+
}
|
|
56
|
+
return tool;
|
|
57
|
+
});
|
|
58
|
+
}
|
|
59
|
+
return [
|
|
60
|
+
...genAITools,
|
|
61
|
+
...(functionDeclarationTools.length > 0
|
|
62
|
+
? [
|
|
63
|
+
{
|
|
64
|
+
functionDeclarations: functionDeclarationTools,
|
|
65
|
+
},
|
|
66
|
+
]
|
|
67
|
+
: []),
|
|
68
|
+
];
|
|
69
|
+
}
|
|
70
|
+
function convertOpenAIToolToGenAI(tool) {
|
|
71
|
+
return {
|
|
72
|
+
functionDeclarations: [
|
|
73
|
+
{
|
|
74
|
+
name: tool.function.name,
|
|
75
|
+
description: tool.function.description,
|
|
76
|
+
parameters: (0, zod_to_genai_parameters_js_1.removeAdditionalProperties)(tool.function.parameters),
|
|
77
|
+
},
|
|
78
|
+
],
|
|
79
|
+
};
|
|
80
|
+
}
|
|
81
|
+
function createToolConfig(genAITools, extra) {
|
|
82
|
+
if (!genAITools.length || !extra)
|
|
83
|
+
return undefined;
|
|
84
|
+
const { toolChoice, allowedFunctionNames } = extra;
|
|
85
|
+
const modeMap = {
|
|
86
|
+
any: generative_ai_1.FunctionCallingMode.ANY,
|
|
87
|
+
auto: generative_ai_1.FunctionCallingMode.AUTO,
|
|
88
|
+
none: generative_ai_1.FunctionCallingMode.NONE,
|
|
89
|
+
};
|
|
90
|
+
if (toolChoice && ["any", "auto", "none"].includes(toolChoice)) {
|
|
91
|
+
return {
|
|
92
|
+
functionCallingConfig: {
|
|
93
|
+
mode: modeMap[toolChoice] ?? "MODE_UNSPECIFIED",
|
|
94
|
+
allowedFunctionNames,
|
|
95
|
+
},
|
|
96
|
+
};
|
|
97
|
+
}
|
|
98
|
+
if (typeof toolChoice === "string" || allowedFunctionNames) {
|
|
99
|
+
return {
|
|
100
|
+
functionCallingConfig: {
|
|
101
|
+
mode: generative_ai_1.FunctionCallingMode.ANY,
|
|
102
|
+
allowedFunctionNames: [
|
|
103
|
+
...(allowedFunctionNames ?? []),
|
|
104
|
+
...(toolChoice && typeof toolChoice === "string" ? [toolChoice] : []),
|
|
105
|
+
],
|
|
106
|
+
},
|
|
107
|
+
};
|
|
108
|
+
}
|
|
109
|
+
return undefined;
|
|
110
|
+
}
|
|
@@ -0,0 +1,13 @@
|
|
|
1
|
+
import { type SchemaType as FunctionDeclarationSchemaType, type FunctionDeclarationSchema as GenerativeAIFunctionDeclarationSchema } from "@google/generative-ai";
|
|
2
|
+
import type { z } from "zod";
|
|
3
|
+
export interface GenerativeAIJsonSchema extends Record<string, unknown> {
|
|
4
|
+
properties?: Record<string, GenerativeAIJsonSchema>;
|
|
5
|
+
type: FunctionDeclarationSchemaType;
|
|
6
|
+
}
|
|
7
|
+
export interface GenerativeAIJsonSchemaDirty extends GenerativeAIJsonSchema {
|
|
8
|
+
properties?: Record<string, GenerativeAIJsonSchemaDirty>;
|
|
9
|
+
additionalProperties?: boolean;
|
|
10
|
+
}
|
|
11
|
+
export declare function removeAdditionalProperties(obj: Record<string, unknown>): GenerativeAIJsonSchema;
|
|
12
|
+
export declare function zodToGenerativeAIParameters(zodObj: z.ZodType<any>): GenerativeAIFunctionDeclarationSchema;
|
|
13
|
+
export declare function jsonSchemaToGeminiParameters(schema: Record<string, any>): GenerativeAIFunctionDeclarationSchema;
|
|
@@ -0,0 +1,46 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
/* eslint-disable @typescript-eslint/no-unused-vars */
|
|
3
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
4
|
+
exports.removeAdditionalProperties = removeAdditionalProperties;
|
|
5
|
+
exports.zodToGenerativeAIParameters = zodToGenerativeAIParameters;
|
|
6
|
+
exports.jsonSchemaToGeminiParameters = jsonSchemaToGeminiParameters;
|
|
7
|
+
const zod_to_json_schema_1 = require("zod-to-json-schema");
|
|
8
|
+
function removeAdditionalProperties(
|
|
9
|
+
// eslint-disable @typescript-eslint/no-explicit-any
|
|
10
|
+
obj) {
|
|
11
|
+
if (typeof obj === "object" && obj !== null) {
|
|
12
|
+
const newObj = { ...obj };
|
|
13
|
+
if ("additionalProperties" in newObj) {
|
|
14
|
+
delete newObj.additionalProperties;
|
|
15
|
+
}
|
|
16
|
+
if ("$schema" in newObj) {
|
|
17
|
+
delete newObj.$schema;
|
|
18
|
+
}
|
|
19
|
+
for (const key in newObj) {
|
|
20
|
+
if (key in newObj) {
|
|
21
|
+
if (Array.isArray(newObj[key])) {
|
|
22
|
+
newObj[key] = newObj[key].map(removeAdditionalProperties);
|
|
23
|
+
}
|
|
24
|
+
else if (typeof newObj[key] === "object" && newObj[key] !== null) {
|
|
25
|
+
newObj[key] = removeAdditionalProperties(newObj[key]);
|
|
26
|
+
}
|
|
27
|
+
}
|
|
28
|
+
}
|
|
29
|
+
return newObj;
|
|
30
|
+
}
|
|
31
|
+
return obj;
|
|
32
|
+
}
|
|
33
|
+
function zodToGenerativeAIParameters(zodObj) {
|
|
34
|
+
// GenerativeAI doesn't accept either the $schema or additionalProperties
|
|
35
|
+
// attributes, so we need to explicitly remove them.
|
|
36
|
+
const jsonSchema = removeAdditionalProperties((0, zod_to_json_schema_1.zodToJsonSchema)(zodObj));
|
|
37
|
+
const { $schema, ...rest } = jsonSchema;
|
|
38
|
+
return rest;
|
|
39
|
+
}
|
|
40
|
+
function jsonSchemaToGeminiParameters(schema) {
|
|
41
|
+
// Gemini doesn't accept either the $schema or additionalProperties
|
|
42
|
+
// attributes, so we need to explicitly remove them.
|
|
43
|
+
const jsonSchema = removeAdditionalProperties(schema);
|
|
44
|
+
const { $schema, ...rest } = jsonSchema;
|
|
45
|
+
return rest;
|
|
46
|
+
}
|
|
@@ -0,0 +1,11 @@
|
|
|
1
|
+
import { CallbackManagerForLLMRun } from "@langchain/core/callbacks/manager";
|
|
2
|
+
import { BaseMessage } from "@langchain/core/messages";
|
|
3
|
+
import { ChatResult } from "@langchain/core/outputs";
|
|
4
|
+
import { ChatGoogleGenerativeAI } from "./google-genai/index.js";
|
|
5
|
+
/**
|
|
6
|
+
* Custom ChatGoogleGenerativeAI that ensures authentication before each request
|
|
7
|
+
*/
|
|
8
|
+
export declare class AuthenticatedChatGoogleGenerativeAI extends ChatGoogleGenerativeAI {
|
|
9
|
+
_generate(messages: BaseMessage[], options?: this["ParsedCallOptions"], runManager?: CallbackManagerForLLMRun): Promise<ChatResult>;
|
|
10
|
+
_streamResponseChunks(messages: BaseMessage[], options?: this["ParsedCallOptions"], runManager?: CallbackManagerForLLMRun): AsyncGenerator<any>;
|
|
11
|
+
}
|
|
@@ -0,0 +1,30 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.AuthenticatedChatGoogleGenerativeAI = void 0;
|
|
4
|
+
const core_1 = require("@blaxel/core");
|
|
5
|
+
const index_js_1 = require("./google-genai/index.js");
|
|
6
|
+
/**
|
|
7
|
+
* Custom ChatGoogleGenerativeAI that ensures authentication before each request
|
|
8
|
+
*/
|
|
9
|
+
class AuthenticatedChatGoogleGenerativeAI extends index_js_1.ChatGoogleGenerativeAI {
|
|
10
|
+
async _generate(messages, options, runManager) {
|
|
11
|
+
// Authenticate before making the request
|
|
12
|
+
await (0, core_1.authenticate)();
|
|
13
|
+
this.customHeaders = {};
|
|
14
|
+
for (const header in core_1.settings.headers) {
|
|
15
|
+
this.customHeaders[header] = core_1.settings.headers[header];
|
|
16
|
+
}
|
|
17
|
+
this.client = this.initClient();
|
|
18
|
+
return await super._generate(messages, options || {}, runManager);
|
|
19
|
+
}
|
|
20
|
+
async *_streamResponseChunks(messages, options, runManager) {
|
|
21
|
+
// Authenticate before making the request
|
|
22
|
+
await (0, core_1.authenticate)();
|
|
23
|
+
this.customHeaders = {};
|
|
24
|
+
for (const header in core_1.settings.headers) {
|
|
25
|
+
this.customHeaders[header] = core_1.settings.headers[header];
|
|
26
|
+
}
|
|
27
|
+
yield* super._streamResponseChunks(messages, options || {}, runManager);
|
|
28
|
+
}
|
|
29
|
+
}
|
|
30
|
+
exports.AuthenticatedChatGoogleGenerativeAI = AuthenticatedChatGoogleGenerativeAI;
|
|
@@ -0,0 +1,41 @@
|
|
|
1
|
+
import { Serialized } from "@langchain/core/load/serializable";
|
|
2
|
+
import { ChatOpenAI } from "@langchain/openai";
|
|
3
|
+
/**
|
|
4
|
+
* Extends the ChatOpenAI class to create a ChatXAI agent with additional configurations.
|
|
5
|
+
*/
|
|
6
|
+
export declare class ChatXAI extends ChatOpenAI {
|
|
7
|
+
/**
|
|
8
|
+
* Returns the name of the class for LangChain serialization.
|
|
9
|
+
* @returns The class name as a string.
|
|
10
|
+
*/
|
|
11
|
+
static lc_name(): string;
|
|
12
|
+
/**
|
|
13
|
+
* Specifies the type of the language model.
|
|
14
|
+
* @returns The type of the LLM as a string.
|
|
15
|
+
*/
|
|
16
|
+
_llmType(): string;
|
|
17
|
+
/**
|
|
18
|
+
* Specifies the secrets required for serialization.
|
|
19
|
+
* @returns An object mapping secret names to their keys.
|
|
20
|
+
*/
|
|
21
|
+
get lc_secrets(): {
|
|
22
|
+
apiKey: string;
|
|
23
|
+
};
|
|
24
|
+
/**
|
|
25
|
+
* Constructs a new ChatXAI instance.
|
|
26
|
+
* @param fields - Configuration fields, including the API key.
|
|
27
|
+
* @throws If the API key is not provided.
|
|
28
|
+
*/
|
|
29
|
+
constructor(fields: Record<string, unknown>);
|
|
30
|
+
/**
|
|
31
|
+
* Serializes the instance to JSON, removing sensitive information.
|
|
32
|
+
* @returns The serialized JSON object.
|
|
33
|
+
*/
|
|
34
|
+
toJSON(): Serialized;
|
|
35
|
+
/**
|
|
36
|
+
* Retrieves parameters for LangChain based on provided options.
|
|
37
|
+
* @param options - Additional options for parameter retrieval.
|
|
38
|
+
* @returns An object containing LangChain parameters.
|
|
39
|
+
*/
|
|
40
|
+
getLsParams(options: unknown): import("@langchain/core/language_models/chat_models", { with: { "resolution-mode": "import" } }).LangSmithParams;
|
|
41
|
+
}
|