@blaxel/langgraph 0.2.50-dev.215 → 0.2.50

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (35) hide show
  1. package/README.md +1 -1
  2. package/dist/cjs/.tsbuildinfo +1 -1
  3. package/dist/cjs/model/cohere.js +74 -0
  4. package/dist/cjs/model/google-genai.js +75 -12
  5. package/dist/cjs/model.js +1 -1
  6. package/dist/cjs/types/model/google-genai.d.ts +20 -3
  7. package/dist/esm/.tsbuildinfo +1 -1
  8. package/dist/esm/model/cohere.js +74 -0
  9. package/dist/esm/model/google-genai.js +74 -11
  10. package/dist/esm/model.js +1 -1
  11. package/package.json +3 -2
  12. package/dist/cjs/model/google-genai/chat_models.js +0 -766
  13. package/dist/cjs/model/google-genai/embeddings.js +0 -111
  14. package/dist/cjs/model/google-genai/index.js +0 -18
  15. package/dist/cjs/model/google-genai/output_parsers.js +0 -51
  16. package/dist/cjs/model/google-genai/types.js +0 -2
  17. package/dist/cjs/model/google-genai/utils/common.js +0 -390
  18. package/dist/cjs/model/google-genai/utils/tools.js +0 -110
  19. package/dist/cjs/model/google-genai/utils/zod_to_genai_parameters.js +0 -46
  20. package/dist/cjs/types/model/google-genai/chat_models.d.ts +0 -557
  21. package/dist/cjs/types/model/google-genai/embeddings.d.ts +0 -94
  22. package/dist/cjs/types/model/google-genai/index.d.ts +0 -2
  23. package/dist/cjs/types/model/google-genai/output_parsers.d.ts +0 -20
  24. package/dist/cjs/types/model/google-genai/types.d.ts +0 -3
  25. package/dist/cjs/types/model/google-genai/utils/common.d.ts +0 -22
  26. package/dist/cjs/types/model/google-genai/utils/tools.d.ts +0 -10
  27. package/dist/cjs/types/model/google-genai/utils/zod_to_genai_parameters.d.ts +0 -13
  28. package/dist/esm/model/google-genai/chat_models.js +0 -762
  29. package/dist/esm/model/google-genai/embeddings.js +0 -107
  30. package/dist/esm/model/google-genai/index.js +0 -2
  31. package/dist/esm/model/google-genai/output_parsers.js +0 -47
  32. package/dist/esm/model/google-genai/types.js +0 -1
  33. package/dist/esm/model/google-genai/utils/common.js +0 -381
  34. package/dist/esm/model/google-genai/utils/tools.js +0 -107
  35. package/dist/esm/model/google-genai/utils/zod_to_genai_parameters.js +0 -41
@@ -1,107 +0,0 @@
1
- import { GoogleGenerativeAI, TaskType, } from "@google/generative-ai";
2
- import { Embeddings } from "@langchain/core/embeddings";
3
- import { chunkArray } from "@langchain/core/utils/chunk_array";
4
- import { getEnvironmentVariable } from "@langchain/core/utils/env";
5
- /**
6
- * Class that extends the Embeddings class and provides methods for
7
- * generating embeddings using the Google Palm API.
8
- * @example
9
- * ```typescript
10
- * const model = new GoogleGenerativeAIEmbeddings({
11
- * apiKey: "<YOUR API KEY>",
12
- * modelName: "embedding-001",
13
- * });
14
- *
15
- * // Embed a single query
16
- * const res = await model.embedQuery(
17
- * "What would be a good company name for a company that makes colorful socks?"
18
- * );
19
- * console.log({ res });
20
- *
21
- * // Embed multiple documents
22
- * const documentRes = await model.embedDocuments(["Hello world", "Bye bye"]);
23
- * console.log({ documentRes });
24
- * ```
25
- */
26
- export class GoogleGenerativeAIEmbeddings extends Embeddings {
27
- apiKey;
28
- modelName = "embedding-001";
29
- model = "embedding-001";
30
- taskType;
31
- title;
32
- stripNewLines = true;
33
- maxBatchSize = 100; // Max batch size for embedDocuments set by GenerativeModel client's batchEmbedContents call
34
- client;
35
- constructor(fields) {
36
- super(fields ?? {});
37
- this.modelName =
38
- fields?.model?.replace(/^models\//, "") ??
39
- fields?.modelName?.replace(/^models\//, "") ??
40
- this.modelName;
41
- this.model = this.modelName;
42
- this.taskType = fields?.taskType ?? this.taskType;
43
- this.title = fields?.title ?? this.title;
44
- if (this.title && this.taskType !== TaskType.RETRIEVAL_DOCUMENT) {
45
- throw new Error("title can only be specified with TaskType.RETRIEVAL_DOCUMENT");
46
- }
47
- this.apiKey = fields?.apiKey ?? getEnvironmentVariable("GOOGLE_API_KEY");
48
- if (!this.apiKey) {
49
- throw new Error("Please set an API key for Google GenerativeAI " +
50
- "in the environmentb variable GOOGLE_API_KEY " +
51
- "or in the `apiKey` field of the " +
52
- "GoogleGenerativeAIEmbeddings constructor");
53
- }
54
- this.client = new GoogleGenerativeAI(this.apiKey).getGenerativeModel({
55
- model: this.model,
56
- });
57
- }
58
- _convertToContent(text) {
59
- const cleanedText = this.stripNewLines ? text.replace(/\n/g, " ") : text;
60
- return {
61
- content: { role: "user", parts: [{ text: cleanedText }] },
62
- taskType: this.taskType,
63
- title: this.title,
64
- };
65
- }
66
- async _embedQueryContent(text) {
67
- const req = this._convertToContent(text);
68
- const res = await this.client.embedContent(req);
69
- return res.embedding.values ?? [];
70
- }
71
- async _embedDocumentsContent(documents) {
72
- const batchEmbedChunks = chunkArray(documents, this.maxBatchSize);
73
- const batchEmbedRequests = batchEmbedChunks.map((chunk) => ({
74
- requests: chunk.map((doc) => this._convertToContent(doc)),
75
- }));
76
- const responses = await Promise.allSettled(batchEmbedRequests.map((req) => this.client.batchEmbedContents(req)));
77
- const embeddings = responses.flatMap((res, idx) => {
78
- if (res.status === "fulfilled") {
79
- return res.value.embeddings.map((e) => e.values || []);
80
- }
81
- else {
82
- return Array(batchEmbedChunks[idx].length).fill([]);
83
- }
84
- });
85
- return embeddings;
86
- }
87
- /**
88
- * Method that takes a document as input and returns a promise that
89
- * resolves to an embedding for the document. It calls the _embedText
90
- * method with the document as the input.
91
- * @param document Document for which to generate an embedding.
92
- * @returns Promise that resolves to an embedding for the input document.
93
- */
94
- embedQuery(document) {
95
- return this.caller.call(this._embedQueryContent.bind(this), document);
96
- }
97
- /**
98
- * Method that takes an array of documents as input and returns a promise
99
- * that resolves to a 2D array of embeddings for each document. It calls
100
- * the _embedText method for each document in the array.
101
- * @param documents Array of documents for which to generate embeddings.
102
- * @returns Promise that resolves to a 2D array of embeddings for each input document.
103
- */
104
- embedDocuments(documents) {
105
- return this.caller.call(this._embedDocumentsContent.bind(this), documents);
106
- }
107
- }
@@ -1,2 +0,0 @@
1
- export * from "./chat_models.js";
2
- export * from "./embeddings.js";
@@ -1,47 +0,0 @@
1
- import { BaseLLMOutputParser, OutputParserException, } from "@langchain/core/output_parsers";
2
- export class GoogleGenerativeAIToolsOutputParser extends BaseLLMOutputParser {
3
- static lc_name() {
4
- return "GoogleGenerativeAIToolsOutputParser";
5
- }
6
- lc_namespace = ["langchain", "google_genai", "output_parsers"];
7
- returnId = false;
8
- /** The type of tool calls to return. */
9
- keyName;
10
- /** Whether to return only the first tool call. */
11
- returnSingle = false;
12
- zodSchema;
13
- constructor(params) {
14
- super(params);
15
- this.keyName = params.keyName;
16
- this.returnSingle = params.returnSingle ?? this.returnSingle;
17
- // @ts-ignore - Type instantiation depth issue with Zod schemas
18
- this.zodSchema = params.zodSchema;
19
- }
20
- async _validateResult(result) {
21
- if (this.zodSchema === undefined) {
22
- return result;
23
- }
24
- const zodParsedResult = await this.zodSchema.safeParseAsync(result);
25
- if (zodParsedResult.success) {
26
- return zodParsedResult.data;
27
- }
28
- else {
29
- throw new OutputParserException(`Failed to parse. Text: "${JSON.stringify(result, null, 2)}". Error: ${JSON.stringify(zodParsedResult.error.errors)}`, JSON.stringify(result, null, 2));
30
- }
31
- }
32
- async parseResult(generations) {
33
- const tools = generations.flatMap((generation) => {
34
- const { message } = generation;
35
- if (!("tool_calls" in message) || !Array.isArray(message.tool_calls)) {
36
- return [];
37
- }
38
- return message.tool_calls;
39
- });
40
- if (tools[0] === undefined) {
41
- throw new Error("No parseable tool calls provided to GoogleGenerativeAIToolsOutputParser.");
42
- }
43
- const [tool] = tools;
44
- const validatedResult = await this._validateResult(tool.args);
45
- return validatedResult;
46
- }
47
- }
@@ -1 +0,0 @@
1
- export {};
@@ -1,381 +0,0 @@
1
- import { isOpenAITool } from "@langchain/core/language_models/base";
2
- import { AIMessage, AIMessageChunk, ChatMessage, isBaseMessage, } from "@langchain/core/messages";
3
- import { ChatGenerationChunk, } from "@langchain/core/outputs";
4
- import { isLangChainTool } from "@langchain/core/utils/function_calling";
5
- import { jsonSchemaToGeminiParameters, zodToGenerativeAIParameters, } from "./zod_to_genai_parameters.js";
6
- export function getMessageAuthor(message) {
7
- const type = message._getType();
8
- if (ChatMessage.isInstance(message)) {
9
- return message.role;
10
- }
11
- if (type === "tool") {
12
- return type;
13
- }
14
- return message.name ?? type;
15
- }
16
- /**
17
- * Maps a message type to a Google Generative AI chat author.
18
- * @param message The message to map.
19
- * @param model The model to use for mapping.
20
- * @returns The message type mapped to a Google Generative AI chat author.
21
- */
22
- export function convertAuthorToRole(author) {
23
- switch (author) {
24
- /**
25
- * Note: Gemini currently is not supporting system messages
26
- * we will convert them to human messages and merge with following
27
- * */
28
- case "ai":
29
- case "model": // getMessageAuthor returns message.name. code ex.: return message.name ?? type;
30
- return "model";
31
- case "system":
32
- return "system";
33
- case "human":
34
- return "user";
35
- case "tool":
36
- case "function":
37
- return "function";
38
- default:
39
- throw new Error(`Unknown / unsupported author: ${author}`);
40
- }
41
- }
42
- function messageContentMedia(content) {
43
- if ("mimeType" in content && "data" in content) {
44
- return {
45
- inlineData: {
46
- mimeType: content.mimeType,
47
- data: content.data,
48
- },
49
- };
50
- }
51
- if ("mimeType" in content && "fileUri" in content) {
52
- return {
53
- fileData: {
54
- mimeType: content.mimeType,
55
- fileUri: content.fileUri,
56
- },
57
- };
58
- }
59
- throw new Error("Invalid media content");
60
- }
61
- export function convertMessageContentToParts(message, isMultimodalModel) {
62
- if (typeof message.content === "string" && message.content !== "") {
63
- return [{ text: message.content }];
64
- }
65
- let functionCalls = [];
66
- let functionResponses = [];
67
- let messageParts = [];
68
- if ("tool_calls" in message &&
69
- Array.isArray(message.tool_calls) &&
70
- message.tool_calls.length > 0) {
71
- functionCalls = message.tool_calls
72
- .map((tc) => {
73
- if (typeof tc.name === "string") {
74
- return {
75
- functionCall: {
76
- name: tc.name,
77
- args: tc.args,
78
- },
79
- };
80
- }
81
- return null;
82
- })
83
- .filter(Boolean);
84
- }
85
- else if (message.getType() === "tool" && message.name && message.content) {
86
- functionResponses = [
87
- {
88
- functionResponse: {
89
- name: message.name,
90
- response: message.content,
91
- },
92
- },
93
- ];
94
- }
95
- else if (Array.isArray(message.content)) {
96
- messageParts = message.content.map((c) => {
97
- if (c.type === "text") {
98
- return {
99
- text: c.text,
100
- };
101
- }
102
- else if (c.type === "executableCode") {
103
- return {
104
- executableCode: c.executableCode,
105
- };
106
- }
107
- else if (c.type === "codeExecutionResult") {
108
- return {
109
- codeExecutionResult: c.codeExecutionResult,
110
- };
111
- }
112
- if (c.type === "image_url") {
113
- if (!isMultimodalModel) {
114
- throw new Error(`This model does not support images`);
115
- }
116
- if (!c.image_url) {
117
- throw new Error("Please provide image as base64 encoded data URL");
118
- }
119
- let source;
120
- if (typeof c.image_url === "string") {
121
- source = c.image_url;
122
- }
123
- else if (typeof c.image_url === "object" && "url" in c.image_url) {
124
- source = c.image_url.url;
125
- }
126
- else {
127
- throw new Error("Please provide image as base64 encoded data URL");
128
- }
129
- const [dm, data] = source.split(",");
130
- if (!dm.startsWith("data:")) {
131
- throw new Error("Please provide image as base64 encoded data URL");
132
- }
133
- const [mimeType, encoding] = dm.replace(/^data:/, "").split(";");
134
- if (encoding !== "base64") {
135
- throw new Error("Please provide image as base64 encoded data URL");
136
- }
137
- return {
138
- inlineData: {
139
- data,
140
- mimeType,
141
- },
142
- };
143
- }
144
- else if (c.type === "media") {
145
- return messageContentMedia(c);
146
- }
147
- else if (c.type === "tool_use") {
148
- return {
149
- functionCall: {
150
- name: c.name,
151
- args: c.input,
152
- },
153
- };
154
- }
155
- else if (c.type?.includes("/") &&
156
- // Ensure it's a single slash.
157
- c.type.split("/").length === 2 &&
158
- "data" in c &&
159
- typeof c.data === "string") {
160
- return {
161
- inlineData: {
162
- mimeType: c.type,
163
- data: c.data,
164
- },
165
- };
166
- }
167
- throw new Error(`Unknown content type ${c.type}`);
168
- });
169
- }
170
- return [...messageParts, ...functionCalls, ...functionResponses];
171
- }
172
- export function convertBaseMessagesToContent(messages, isMultimodalModel, convertSystemMessageToHumanContent = false) {
173
- return messages.reduce((acc, message, index) => {
174
- if (!isBaseMessage(message)) {
175
- throw new Error("Unsupported message input");
176
- }
177
- const author = getMessageAuthor(message);
178
- if (author === "system" && index !== 0) {
179
- throw new Error("System message should be the first one");
180
- }
181
- const role = convertAuthorToRole(author);
182
- const prevContent = acc.content[acc.content.length];
183
- if (!acc.mergeWithPreviousContent &&
184
- prevContent &&
185
- prevContent.role === role) {
186
- throw new Error("Google Generative AI requires alternate messages between authors");
187
- }
188
- const parts = convertMessageContentToParts(message, isMultimodalModel);
189
- if (acc.mergeWithPreviousContent) {
190
- const prevContent = acc.content[acc.content.length - 1];
191
- if (!prevContent) {
192
- throw new Error("There was a problem parsing your system message. Please try a prompt without one.");
193
- }
194
- prevContent.parts.push(...parts);
195
- return {
196
- mergeWithPreviousContent: false,
197
- content: acc.content,
198
- };
199
- }
200
- let actualRole = role;
201
- if (actualRole === "function" ||
202
- (actualRole === "system" && !convertSystemMessageToHumanContent)) {
203
- // GenerativeAI API will throw an error if the role is not "user" or "model."
204
- actualRole = "user";
205
- }
206
- const content = {
207
- role: actualRole,
208
- parts,
209
- };
210
- return {
211
- mergeWithPreviousContent: author === "system" && !convertSystemMessageToHumanContent,
212
- content: [...acc.content, content],
213
- };
214
- }, { content: [], mergeWithPreviousContent: false }).content;
215
- }
216
- export function mapGenerateContentResultToChatResult(response, extra) {
217
- // if rejected or error, return empty generations with reason in filters
218
- if (!response.candidates ||
219
- response.candidates.length === 0 ||
220
- !response.candidates[0]) {
221
- return {
222
- generations: [],
223
- llmOutput: {
224
- filters: response.promptFeedback,
225
- },
226
- };
227
- }
228
- const functionCalls = response.functionCalls();
229
- const [candidate] = response.candidates;
230
- const { content: candidateContent, ...generationInfo } = candidate;
231
- let content;
232
- if (candidateContent?.parts.length === 1 && candidateContent.parts[0].text) {
233
- content = candidateContent.parts[0].text;
234
- }
235
- else {
236
- content = candidateContent.parts.map((p) => {
237
- if ("text" in p) {
238
- return {
239
- type: "text",
240
- text: p.text,
241
- };
242
- }
243
- else if ("executableCode" in p) {
244
- return {
245
- type: "executableCode",
246
- executableCode: p.executableCode,
247
- };
248
- }
249
- else if ("codeExecutionResult" in p) {
250
- return {
251
- type: "codeExecutionResult",
252
- codeExecutionResult: p.codeExecutionResult,
253
- };
254
- }
255
- return p;
256
- });
257
- }
258
- let text = "";
259
- if (typeof content === "string") {
260
- text = content;
261
- }
262
- else if ("text" in content[0]) {
263
- text = content[0].text;
264
- }
265
- const generation = {
266
- text,
267
- message: new AIMessage({
268
- content,
269
- tool_calls: functionCalls?.map((fc) => ({
270
- ...fc,
271
- type: "tool_call",
272
- })),
273
- additional_kwargs: {
274
- ...generationInfo,
275
- },
276
- usage_metadata: extra?.usageMetadata,
277
- }),
278
- generationInfo,
279
- };
280
- return {
281
- generations: [generation],
282
- };
283
- }
284
- export function convertResponseContentToChatGenerationChunk(response, extra) {
285
- if (!response.candidates || response.candidates.length === 0) {
286
- return null;
287
- }
288
- const functionCalls = response.functionCalls();
289
- const [candidate] = response.candidates;
290
- const { content: candidateContent, ...generationInfo } = candidate;
291
- let content;
292
- // Checks if some parts do not have text. If false, it means that the content is a string.
293
- if (candidateContent?.parts &&
294
- candidateContent.parts.every((p) => "text" in p)) {
295
- content = candidateContent.parts.map((p) => p.text).join("");
296
- }
297
- else if (candidateContent.parts) {
298
- content = candidateContent.parts.map((p) => {
299
- if ("text" in p) {
300
- return {
301
- type: "text",
302
- text: p.text,
303
- };
304
- }
305
- else if ("executableCode" in p) {
306
- return {
307
- type: "executableCode",
308
- executableCode: p.executableCode,
309
- };
310
- }
311
- else if ("codeExecutionResult" in p) {
312
- return {
313
- type: "codeExecutionResult",
314
- codeExecutionResult: p.codeExecutionResult,
315
- };
316
- }
317
- return p;
318
- });
319
- }
320
- let text = "";
321
- if (content && typeof content === "string") {
322
- text = content;
323
- }
324
- else if (content && typeof content === "object" && "text" in content[0]) {
325
- text = content[0].text;
326
- }
327
- const toolCallChunks = [];
328
- if (functionCalls) {
329
- toolCallChunks.push(...functionCalls.map((fc) => ({
330
- ...fc,
331
- args: JSON.stringify(fc.args),
332
- index: extra.index,
333
- type: "tool_call_chunk",
334
- })));
335
- }
336
- return new ChatGenerationChunk({
337
- text,
338
- message: new AIMessageChunk({
339
- content: content || "",
340
- name: !candidateContent ? undefined : candidateContent.role,
341
- tool_call_chunks: toolCallChunks,
342
- // Each chunk can have unique "generationInfo", and merging strategy is unclear,
343
- // so leave blank for now.
344
- additional_kwargs: {},
345
- usage_metadata: extra.usageMetadata,
346
- }),
347
- generationInfo,
348
- });
349
- }
350
- function isZodType(schema) {
351
- return typeof schema === "object" && schema !== null && "_def" in schema;
352
- }
353
- export function convertToGenerativeAITools(tools) {
354
- if (tools.every((tool) => "functionDeclarations" in tool &&
355
- Array.isArray(tool.functionDeclarations))) {
356
- return tools;
357
- }
358
- return [
359
- {
360
- functionDeclarations: tools.map((tool) => {
361
- if (isLangChainTool(tool) && isZodType(tool.schema)) {
362
- // @ts-ignore - Type instantiation depth issue with Zod schemas
363
- const jsonSchema = zodToGenerativeAIParameters(tool.schema);
364
- return {
365
- name: tool.name,
366
- description: tool.description,
367
- parameters: jsonSchema,
368
- };
369
- }
370
- if (isOpenAITool(tool)) {
371
- return {
372
- name: tool.function.name,
373
- description: tool.function.description ?? `A function available to call.`,
374
- parameters: jsonSchemaToGeminiParameters(tool.function.parameters),
375
- };
376
- }
377
- return tool;
378
- }),
379
- },
380
- ];
381
- }
@@ -1,107 +0,0 @@
1
- import { FunctionCallingMode, } from "@google/generative-ai";
2
- import { isLangChainTool } from "@langchain/core/utils/function_calling";
3
- import { isOpenAITool, } from "@langchain/core/language_models/base";
4
- import { convertToGenerativeAITools } from "./common.js";
5
- import { removeAdditionalProperties } from "./zod_to_genai_parameters.js";
6
- export function convertToolsToGenAI(tools, extra) {
7
- // Extract function declaration processing to a separate function
8
- const genAITools = processTools(tools);
9
- // Simplify tool config creation
10
- const toolConfig = createToolConfig(genAITools, extra);
11
- return { tools: genAITools, toolConfig };
12
- }
13
- function processTools(tools) {
14
- let functionDeclarationTools = [];
15
- const genAITools = [];
16
- tools.forEach((tool) => {
17
- if (isLangChainTool(tool)) {
18
- const [convertedTool] = convertToGenerativeAITools([
19
- tool,
20
- ]);
21
- if (convertedTool.functionDeclarations) {
22
- functionDeclarationTools.push(...convertedTool.functionDeclarations);
23
- }
24
- }
25
- else if (isOpenAITool(tool)) {
26
- const { functionDeclarations } = convertOpenAIToolToGenAI(tool);
27
- if (functionDeclarations) {
28
- functionDeclarationTools.push(...functionDeclarations);
29
- }
30
- else {
31
- throw new Error("Failed to convert OpenAI structured tool to GenerativeAI tool");
32
- }
33
- }
34
- else {
35
- genAITools.push(tool);
36
- }
37
- });
38
- const genAIFunctionDeclaration = genAITools.find((t) => "functionDeclarations" in t);
39
- if (genAIFunctionDeclaration) {
40
- return genAITools.map((tool) => {
41
- if (functionDeclarationTools?.length > 0 &&
42
- "functionDeclarations" in tool) {
43
- const newTool = {
44
- functionDeclarations: [
45
- ...(tool.functionDeclarations || []),
46
- ...functionDeclarationTools,
47
- ],
48
- };
49
- // Clear the functionDeclarationTools array so it is not passed again
50
- functionDeclarationTools = [];
51
- return newTool;
52
- }
53
- return tool;
54
- });
55
- }
56
- return [
57
- ...genAITools,
58
- ...(functionDeclarationTools.length > 0
59
- ? [
60
- {
61
- functionDeclarations: functionDeclarationTools,
62
- },
63
- ]
64
- : []),
65
- ];
66
- }
67
- function convertOpenAIToolToGenAI(tool) {
68
- return {
69
- functionDeclarations: [
70
- {
71
- name: tool.function.name,
72
- description: tool.function.description,
73
- parameters: removeAdditionalProperties(tool.function.parameters),
74
- },
75
- ],
76
- };
77
- }
78
- function createToolConfig(genAITools, extra) {
79
- if (!genAITools.length || !extra)
80
- return undefined;
81
- const { toolChoice, allowedFunctionNames } = extra;
82
- const modeMap = {
83
- any: FunctionCallingMode.ANY,
84
- auto: FunctionCallingMode.AUTO,
85
- none: FunctionCallingMode.NONE,
86
- };
87
- if (toolChoice && ["any", "auto", "none"].includes(toolChoice)) {
88
- return {
89
- functionCallingConfig: {
90
- mode: modeMap[toolChoice] ?? "MODE_UNSPECIFIED",
91
- allowedFunctionNames,
92
- },
93
- };
94
- }
95
- if (typeof toolChoice === "string" || allowedFunctionNames) {
96
- return {
97
- functionCallingConfig: {
98
- mode: FunctionCallingMode.ANY,
99
- allowedFunctionNames: [
100
- ...(allowedFunctionNames ?? []),
101
- ...(toolChoice && typeof toolChoice === "string" ? [toolChoice] : []),
102
- ],
103
- },
104
- };
105
- }
106
- return undefined;
107
- }
@@ -1,41 +0,0 @@
1
- /* eslint-disable @typescript-eslint/no-unused-vars */
2
- import { zodToJsonSchema } from "zod-to-json-schema";
3
- export function removeAdditionalProperties(
4
- // eslint-disable @typescript-eslint/no-explicit-any
5
- obj) {
6
- if (typeof obj === "object" && obj !== null) {
7
- const newObj = { ...obj };
8
- if ("additionalProperties" in newObj) {
9
- delete newObj.additionalProperties;
10
- }
11
- if ("$schema" in newObj) {
12
- delete newObj.$schema;
13
- }
14
- for (const key in newObj) {
15
- if (key in newObj) {
16
- if (Array.isArray(newObj[key])) {
17
- newObj[key] = newObj[key].map(removeAdditionalProperties);
18
- }
19
- else if (typeof newObj[key] === "object" && newObj[key] !== null) {
20
- newObj[key] = removeAdditionalProperties(newObj[key]);
21
- }
22
- }
23
- }
24
- return newObj;
25
- }
26
- return obj;
27
- }
28
- export function zodToGenerativeAIParameters(zodObj) {
29
- // GenerativeAI doesn't accept either the $schema or additionalProperties
30
- // attributes, so we need to explicitly remove them.
31
- const jsonSchema = removeAdditionalProperties(zodToJsonSchema(zodObj));
32
- const { $schema, ...rest } = jsonSchema;
33
- return rest;
34
- }
35
- export function jsonSchemaToGeminiParameters(schema) {
36
- // Gemini doesn't accept either the $schema or additionalProperties
37
- // attributes, so we need to explicitly remove them.
38
- const jsonSchema = removeAdditionalProperties(schema);
39
- const { $schema, ...rest } = jsonSchema;
40
- return rest;
41
- }