@langchain/google-common 0.0.21 → 0.0.23
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/chat_models.cjs +42 -34
- package/dist/chat_models.d.ts +7 -7
- package/dist/chat_models.js +43 -35
- package/dist/connection.cjs +21 -8
- package/dist/connection.d.ts +4 -4
- package/dist/connection.js +21 -8
- package/dist/types.d.ts +34 -4
- package/dist/utils/common.cjs +72 -53
- package/dist/utils/common.d.ts +2 -1
- package/dist/utils/common.js +70 -52
- package/dist/utils/gemini.cjs +13 -6
- package/dist/utils/gemini.d.ts +2 -2
- package/dist/utils/gemini.js +11 -4
- package/package.json +5 -5
package/dist/chat_models.cjs
CHANGED
|
@@ -5,10 +5,9 @@ const env_1 = require("@langchain/core/utils/env");
|
|
|
5
5
|
const chat_models_1 = require("@langchain/core/language_models/chat_models");
|
|
6
6
|
const outputs_1 = require("@langchain/core/outputs");
|
|
7
7
|
const messages_1 = require("@langchain/core/messages");
|
|
8
|
-
const base_1 = require("@langchain/core/language_models/base");
|
|
9
8
|
const runnables_1 = require("@langchain/core/runnables");
|
|
10
9
|
const openai_tools_1 = require("@langchain/core/output_parsers/openai_tools");
|
|
11
|
-
const
|
|
10
|
+
const stream_1 = require("@langchain/core/utils/stream");
|
|
12
11
|
const common_js_1 = require("./utils/common.cjs");
|
|
13
12
|
const connection_js_1 = require("./connection.cjs");
|
|
14
13
|
const gemini_js_1 = require("./utils/gemini.cjs");
|
|
@@ -58,10 +57,23 @@ class ChatConnection extends connection_js_1.AbstractGoogleLLMConnection {
|
|
|
58
57
|
return input
|
|
59
58
|
.map((msg, i) => (0, gemini_js_1.baseMessageToContent)(msg, input[i - 1], this.useSystemInstruction))
|
|
60
59
|
.reduce((acc, cur) => {
|
|
61
|
-
// Filter out the system content
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
60
|
+
// Filter out the system content
|
|
61
|
+
if (cur.every((content) => content.role === "system")) {
|
|
62
|
+
return acc;
|
|
63
|
+
}
|
|
64
|
+
// Combine adjacent function messages
|
|
65
|
+
if (cur[0]?.role === "function" &&
|
|
66
|
+
acc.length > 0 &&
|
|
67
|
+
acc[acc.length - 1].role === "function") {
|
|
68
|
+
acc[acc.length - 1].parts = [
|
|
69
|
+
...acc[acc.length - 1].parts,
|
|
70
|
+
...cur[0].parts,
|
|
71
|
+
];
|
|
72
|
+
}
|
|
73
|
+
else {
|
|
74
|
+
acc.push(...cur);
|
|
75
|
+
}
|
|
76
|
+
return acc;
|
|
65
77
|
}, []);
|
|
66
78
|
}
|
|
67
79
|
formatSystemInstruction(input, _parameters) {
|
|
@@ -85,31 +97,6 @@ class ChatConnection extends connection_js_1.AbstractGoogleLLMConnection {
|
|
|
85
97
|
return ret;
|
|
86
98
|
}
|
|
87
99
|
}
|
|
88
|
-
function convertToGeminiTools(structuredTools) {
|
|
89
|
-
return [
|
|
90
|
-
{
|
|
91
|
-
functionDeclarations: structuredTools.map((structuredTool) => {
|
|
92
|
-
if ((0, function_calling_1.isStructuredTool)(structuredTool)) {
|
|
93
|
-
const jsonSchema = (0, zod_to_gemini_parameters_js_1.zodToGeminiParameters)(structuredTool.schema);
|
|
94
|
-
return {
|
|
95
|
-
name: structuredTool.name,
|
|
96
|
-
description: structuredTool.description,
|
|
97
|
-
parameters: jsonSchema,
|
|
98
|
-
};
|
|
99
|
-
}
|
|
100
|
-
if ((0, base_1.isOpenAITool)(structuredTool)) {
|
|
101
|
-
return {
|
|
102
|
-
name: structuredTool.function.name,
|
|
103
|
-
description: structuredTool.function.description ??
|
|
104
|
-
`A function available to call.`,
|
|
105
|
-
parameters: (0, zod_to_gemini_parameters_js_1.jsonSchemaToGeminiParameters)(structuredTool.function.parameters),
|
|
106
|
-
};
|
|
107
|
-
}
|
|
108
|
-
return structuredTool;
|
|
109
|
-
}),
|
|
110
|
-
},
|
|
111
|
-
];
|
|
112
|
-
}
|
|
113
100
|
/**
|
|
114
101
|
* Integration with a chat model.
|
|
115
102
|
*/
|
|
@@ -199,6 +186,12 @@ class ChatGoogleBase extends chat_models_1.BaseChatModel {
|
|
|
199
186
|
writable: true,
|
|
200
187
|
value: true
|
|
201
188
|
});
|
|
189
|
+
Object.defineProperty(this, "streaming", {
|
|
190
|
+
enumerable: true,
|
|
191
|
+
configurable: true,
|
|
192
|
+
writable: true,
|
|
193
|
+
value: false
|
|
194
|
+
});
|
|
202
195
|
Object.defineProperty(this, "connection", {
|
|
203
196
|
enumerable: true,
|
|
204
197
|
configurable: true,
|
|
@@ -252,7 +245,7 @@ class ChatGoogleBase extends chat_models_1.BaseChatModel {
|
|
|
252
245
|
return this.connection.platform;
|
|
253
246
|
}
|
|
254
247
|
bindTools(tools, kwargs) {
|
|
255
|
-
return this.bind({ tools: convertToGeminiTools(tools), ...kwargs });
|
|
248
|
+
return this.bind({ tools: (0, common_js_1.convertToGeminiTools)(tools), ...kwargs });
|
|
256
249
|
}
|
|
257
250
|
// Replace
|
|
258
251
|
_llmType() {
|
|
@@ -264,13 +257,27 @@ class ChatGoogleBase extends chat_models_1.BaseChatModel {
|
|
|
264
257
|
invocationParams(options) {
|
|
265
258
|
return (0, common_js_1.copyAIModelParams)(this, options);
|
|
266
259
|
}
|
|
267
|
-
async _generate(messages, options,
|
|
260
|
+
async _generate(messages, options, runManager) {
|
|
268
261
|
const parameters = this.invocationParams(options);
|
|
262
|
+
if (this.streaming) {
|
|
263
|
+
const stream = this._streamResponseChunks(messages, options, runManager);
|
|
264
|
+
let finalChunk = null;
|
|
265
|
+
for await (const chunk of stream) {
|
|
266
|
+
finalChunk = !finalChunk ? chunk : (0, stream_1.concat)(finalChunk, chunk);
|
|
267
|
+
}
|
|
268
|
+
if (!finalChunk) {
|
|
269
|
+
throw new Error("No chunks were returned from the stream.");
|
|
270
|
+
}
|
|
271
|
+
return {
|
|
272
|
+
generations: [finalChunk],
|
|
273
|
+
};
|
|
274
|
+
}
|
|
269
275
|
const response = await this.connection.request(messages, parameters, options);
|
|
270
276
|
const ret = (0, gemini_js_1.safeResponseToChatResult)(response, this.safetyHandler);
|
|
277
|
+
await runManager?.handleLLMNewToken(ret.generations[0].text);
|
|
271
278
|
return ret;
|
|
272
279
|
}
|
|
273
|
-
async *_streamResponseChunks(_messages, options,
|
|
280
|
+
async *_streamResponseChunks(_messages, options, runManager) {
|
|
274
281
|
// Make the call as a streaming request
|
|
275
282
|
const parameters = this.invocationParams(options);
|
|
276
283
|
const response = await this.streamedConnection.request(_messages, parameters, options);
|
|
@@ -303,6 +310,7 @@ class ChatGoogleBase extends chat_models_1.BaseChatModel {
|
|
|
303
310
|
}),
|
|
304
311
|
});
|
|
305
312
|
yield chunk;
|
|
313
|
+
await runManager?.handleLLMNewToken(chunk.text);
|
|
306
314
|
}
|
|
307
315
|
}
|
|
308
316
|
/** @ignore */
|
package/dist/chat_models.d.ts
CHANGED
|
@@ -3,15 +3,14 @@ import { CallbackManagerForLLMRun } from "@langchain/core/callbacks/manager";
|
|
|
3
3
|
import { BaseChatModel, LangSmithParams, type BaseChatModelParams } from "@langchain/core/language_models/chat_models";
|
|
4
4
|
import { ChatGenerationChunk, ChatResult } from "@langchain/core/outputs";
|
|
5
5
|
import { AIMessageChunk } from "@langchain/core/messages";
|
|
6
|
-
import { BaseLanguageModelInput, StructuredOutputMethodOptions
|
|
6
|
+
import { BaseLanguageModelInput, StructuredOutputMethodOptions } from "@langchain/core/language_models/base";
|
|
7
7
|
import type { z } from "zod";
|
|
8
|
-
import { Runnable
|
|
8
|
+
import { Runnable } from "@langchain/core/runnables";
|
|
9
9
|
import { AsyncCaller } from "@langchain/core/utils/async_caller";
|
|
10
|
-
import { StructuredToolInterface } from "@langchain/core/tools";
|
|
11
10
|
import { GoogleAIBaseLLMInput, GoogleAIModelParams, GoogleAISafetySetting, GoogleConnectionParams, GooglePlatformType, GeminiContent, GoogleAIBaseLanguageModelCallOptions } from "./types.js";
|
|
12
11
|
import { AbstractGoogleLLMConnection } from "./connection.js";
|
|
13
12
|
import { GoogleAbstractedClient } from "./auth.js";
|
|
14
|
-
import type { GoogleBaseLLMInput, GoogleAISafetyHandler, GoogleAISafetyParams } from "./types.js";
|
|
13
|
+
import type { GoogleBaseLLMInput, GoogleAISafetyHandler, GoogleAISafetyParams, GoogleAIToolType } from "./types.js";
|
|
15
14
|
declare class ChatConnection<AuthOptions> extends AbstractGoogleLLMConnection<BaseMessage[], AuthOptions> {
|
|
16
15
|
convertSystemMessageToHumanContent: boolean | undefined;
|
|
17
16
|
constructor(fields: GoogleAIBaseLLMInput<AuthOptions> | undefined, caller: AsyncCaller, client: GoogleAbstractedClient, streaming: boolean);
|
|
@@ -45,6 +44,7 @@ export declare abstract class ChatGoogleBase<AuthOptions> extends BaseChatModel<
|
|
|
45
44
|
convertSystemMessageToHumanContent: boolean | undefined;
|
|
46
45
|
safetyHandler: GoogleAISafetyHandler;
|
|
47
46
|
streamUsage: boolean;
|
|
47
|
+
streaming: boolean;
|
|
48
48
|
protected connection: ChatConnection<AuthOptions>;
|
|
49
49
|
protected streamedConnection: ChatConnection<AuthOptions>;
|
|
50
50
|
constructor(fields?: ChatGoogleBaseInput<AuthOptions>);
|
|
@@ -55,14 +55,14 @@ export declare abstract class ChatGoogleBase<AuthOptions> extends BaseChatModel<
|
|
|
55
55
|
buildClient(fields?: GoogleAIBaseLLMInput<AuthOptions>): GoogleAbstractedClient;
|
|
56
56
|
buildConnection(fields: GoogleBaseLLMInput<AuthOptions>, client: GoogleAbstractedClient): void;
|
|
57
57
|
get platform(): GooglePlatformType;
|
|
58
|
-
bindTools(tools:
|
|
58
|
+
bindTools(tools: GoogleAIToolType[], kwargs?: Partial<GoogleAIBaseLanguageModelCallOptions>): Runnable<BaseLanguageModelInput, AIMessageChunk, GoogleAIBaseLanguageModelCallOptions>;
|
|
59
59
|
_llmType(): string;
|
|
60
60
|
/**
|
|
61
61
|
* Get the parameters used to invoke the model
|
|
62
62
|
*/
|
|
63
63
|
invocationParams(options?: this["ParsedCallOptions"]): import("./types.js").GoogleAIModelRequestParams;
|
|
64
|
-
_generate(messages: BaseMessage[], options: this["ParsedCallOptions"],
|
|
65
|
-
_streamResponseChunks(_messages: BaseMessage[], options: this["ParsedCallOptions"],
|
|
64
|
+
_generate(messages: BaseMessage[], options: this["ParsedCallOptions"], runManager: CallbackManagerForLLMRun | undefined): Promise<ChatResult>;
|
|
65
|
+
_streamResponseChunks(_messages: BaseMessage[], options: this["ParsedCallOptions"], runManager?: CallbackManagerForLLMRun): AsyncGenerator<ChatGenerationChunk>;
|
|
66
66
|
/** @ignore */
|
|
67
67
|
_combineLLMOutput(): never[];
|
|
68
68
|
withStructuredOutput<RunOutput extends Record<string, any> = Record<string, any>>(outputSchema: z.ZodType<RunOutput> | Record<string, any>, config?: StructuredOutputMethodOptions<false>): Runnable<BaseLanguageModelInput, RunOutput>;
|
package/dist/chat_models.js
CHANGED
|
@@ -2,16 +2,15 @@ import { getEnvironmentVariable } from "@langchain/core/utils/env";
|
|
|
2
2
|
import { BaseChatModel, } from "@langchain/core/language_models/chat_models";
|
|
3
3
|
import { ChatGenerationChunk } from "@langchain/core/outputs";
|
|
4
4
|
import { AIMessageChunk } from "@langchain/core/messages";
|
|
5
|
-
import { isOpenAITool, } from "@langchain/core/language_models/base";
|
|
6
5
|
import { RunnablePassthrough, RunnableSequence, } from "@langchain/core/runnables";
|
|
7
6
|
import { JsonOutputKeyToolsParser } from "@langchain/core/output_parsers/openai_tools";
|
|
8
|
-
import {
|
|
9
|
-
import { copyAIModelParams, copyAndValidateModelParamsInto, } from "./utils/common.js";
|
|
7
|
+
import { concat } from "@langchain/core/utils/stream";
|
|
8
|
+
import { convertToGeminiTools, copyAIModelParams, copyAndValidateModelParamsInto, } from "./utils/common.js";
|
|
10
9
|
import { AbstractGoogleLLMConnection } from "./connection.js";
|
|
11
10
|
import { baseMessageToContent, safeResponseToChatGeneration, safeResponseToChatResult, DefaultGeminiSafetyHandler, } from "./utils/gemini.js";
|
|
12
11
|
import { ApiKeyGoogleAuth } from "./auth.js";
|
|
13
12
|
import { ensureParams } from "./utils/failed_handler.js";
|
|
14
|
-
import {
|
|
13
|
+
import { zodToGeminiParameters } from "./utils/zod_to_gemini_parameters.js";
|
|
15
14
|
class ChatConnection extends AbstractGoogleLLMConnection {
|
|
16
15
|
constructor(fields, caller, client, streaming) {
|
|
17
16
|
super(fields, caller, client, streaming);
|
|
@@ -55,10 +54,23 @@ class ChatConnection extends AbstractGoogleLLMConnection {
|
|
|
55
54
|
return input
|
|
56
55
|
.map((msg, i) => baseMessageToContent(msg, input[i - 1], this.useSystemInstruction))
|
|
57
56
|
.reduce((acc, cur) => {
|
|
58
|
-
// Filter out the system content
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
57
|
+
// Filter out the system content
|
|
58
|
+
if (cur.every((content) => content.role === "system")) {
|
|
59
|
+
return acc;
|
|
60
|
+
}
|
|
61
|
+
// Combine adjacent function messages
|
|
62
|
+
if (cur[0]?.role === "function" &&
|
|
63
|
+
acc.length > 0 &&
|
|
64
|
+
acc[acc.length - 1].role === "function") {
|
|
65
|
+
acc[acc.length - 1].parts = [
|
|
66
|
+
...acc[acc.length - 1].parts,
|
|
67
|
+
...cur[0].parts,
|
|
68
|
+
];
|
|
69
|
+
}
|
|
70
|
+
else {
|
|
71
|
+
acc.push(...cur);
|
|
72
|
+
}
|
|
73
|
+
return acc;
|
|
62
74
|
}, []);
|
|
63
75
|
}
|
|
64
76
|
formatSystemInstruction(input, _parameters) {
|
|
@@ -82,31 +94,6 @@ class ChatConnection extends AbstractGoogleLLMConnection {
|
|
|
82
94
|
return ret;
|
|
83
95
|
}
|
|
84
96
|
}
|
|
85
|
-
function convertToGeminiTools(structuredTools) {
|
|
86
|
-
return [
|
|
87
|
-
{
|
|
88
|
-
functionDeclarations: structuredTools.map((structuredTool) => {
|
|
89
|
-
if (isStructuredTool(structuredTool)) {
|
|
90
|
-
const jsonSchema = zodToGeminiParameters(structuredTool.schema);
|
|
91
|
-
return {
|
|
92
|
-
name: structuredTool.name,
|
|
93
|
-
description: structuredTool.description,
|
|
94
|
-
parameters: jsonSchema,
|
|
95
|
-
};
|
|
96
|
-
}
|
|
97
|
-
if (isOpenAITool(structuredTool)) {
|
|
98
|
-
return {
|
|
99
|
-
name: structuredTool.function.name,
|
|
100
|
-
description: structuredTool.function.description ??
|
|
101
|
-
`A function available to call.`,
|
|
102
|
-
parameters: jsonSchemaToGeminiParameters(structuredTool.function.parameters),
|
|
103
|
-
};
|
|
104
|
-
}
|
|
105
|
-
return structuredTool;
|
|
106
|
-
}),
|
|
107
|
-
},
|
|
108
|
-
];
|
|
109
|
-
}
|
|
110
97
|
/**
|
|
111
98
|
* Integration with a chat model.
|
|
112
99
|
*/
|
|
@@ -196,6 +183,12 @@ export class ChatGoogleBase extends BaseChatModel {
|
|
|
196
183
|
writable: true,
|
|
197
184
|
value: true
|
|
198
185
|
});
|
|
186
|
+
Object.defineProperty(this, "streaming", {
|
|
187
|
+
enumerable: true,
|
|
188
|
+
configurable: true,
|
|
189
|
+
writable: true,
|
|
190
|
+
value: false
|
|
191
|
+
});
|
|
199
192
|
Object.defineProperty(this, "connection", {
|
|
200
193
|
enumerable: true,
|
|
201
194
|
configurable: true,
|
|
@@ -261,13 +254,27 @@ export class ChatGoogleBase extends BaseChatModel {
|
|
|
261
254
|
invocationParams(options) {
|
|
262
255
|
return copyAIModelParams(this, options);
|
|
263
256
|
}
|
|
264
|
-
async _generate(messages, options,
|
|
257
|
+
async _generate(messages, options, runManager) {
|
|
265
258
|
const parameters = this.invocationParams(options);
|
|
259
|
+
if (this.streaming) {
|
|
260
|
+
const stream = this._streamResponseChunks(messages, options, runManager);
|
|
261
|
+
let finalChunk = null;
|
|
262
|
+
for await (const chunk of stream) {
|
|
263
|
+
finalChunk = !finalChunk ? chunk : concat(finalChunk, chunk);
|
|
264
|
+
}
|
|
265
|
+
if (!finalChunk) {
|
|
266
|
+
throw new Error("No chunks were returned from the stream.");
|
|
267
|
+
}
|
|
268
|
+
return {
|
|
269
|
+
generations: [finalChunk],
|
|
270
|
+
};
|
|
271
|
+
}
|
|
266
272
|
const response = await this.connection.request(messages, parameters, options);
|
|
267
273
|
const ret = safeResponseToChatResult(response, this.safetyHandler);
|
|
274
|
+
await runManager?.handleLLMNewToken(ret.generations[0].text);
|
|
268
275
|
return ret;
|
|
269
276
|
}
|
|
270
|
-
async *_streamResponseChunks(_messages, options,
|
|
277
|
+
async *_streamResponseChunks(_messages, options, runManager) {
|
|
271
278
|
// Make the call as a streaming request
|
|
272
279
|
const parameters = this.invocationParams(options);
|
|
273
280
|
const response = await this.streamedConnection.request(_messages, parameters, options);
|
|
@@ -300,6 +307,7 @@ export class ChatGoogleBase extends BaseChatModel {
|
|
|
300
307
|
}),
|
|
301
308
|
});
|
|
302
309
|
yield chunk;
|
|
310
|
+
await runManager?.handleLLMNewToken(chunk.text);
|
|
303
311
|
}
|
|
304
312
|
}
|
|
305
313
|
/** @ignore */
|
package/dist/connection.cjs
CHANGED
|
@@ -2,6 +2,7 @@
|
|
|
2
2
|
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
3
|
exports.AbstractGoogleLLMConnection = exports.GoogleAIConnection = exports.GoogleHostConnection = exports.GoogleConnection = void 0;
|
|
4
4
|
const env_1 = require("@langchain/core/utils/env");
|
|
5
|
+
const function_calling_1 = require("@langchain/core/utils/function_calling");
|
|
5
6
|
const zod_to_gemini_parameters_js_1 = require("./utils/zod_to_gemini_parameters.cjs");
|
|
6
7
|
class GoogleConnection {
|
|
7
8
|
constructor(caller, client, streaming) {
|
|
@@ -221,16 +222,11 @@ class AbstractGoogleLLMConnection extends GoogleAIConnection {
|
|
|
221
222
|
formatSystemInstruction(_input, _parameters) {
|
|
222
223
|
return {};
|
|
223
224
|
}
|
|
224
|
-
// Borrowed from the OpenAI invocation params test
|
|
225
|
-
isStructuredToolArray(tools) {
|
|
226
|
-
return (tools !== undefined &&
|
|
227
|
-
tools.every((tool) => Array.isArray(tool.lc_namespace)));
|
|
228
|
-
}
|
|
229
225
|
structuredToolToFunctionDeclaration(tool) {
|
|
230
226
|
const jsonSchema = (0, zod_to_gemini_parameters_js_1.zodToGeminiParameters)(tool.schema);
|
|
231
227
|
return {
|
|
232
228
|
name: tool.name,
|
|
233
|
-
description: tool.description
|
|
229
|
+
description: tool.description ?? `A function available to call.`,
|
|
234
230
|
parameters: jsonSchema,
|
|
235
231
|
};
|
|
236
232
|
}
|
|
@@ -246,20 +242,34 @@ class AbstractGoogleLLMConnection extends GoogleAIConnection {
|
|
|
246
242
|
if (!tools || tools.length === 0) {
|
|
247
243
|
return [];
|
|
248
244
|
}
|
|
249
|
-
if (
|
|
245
|
+
if (tools.every(function_calling_1.isLangChainTool)) {
|
|
250
246
|
return this.structuredToolsToGeminiTools(tools);
|
|
251
247
|
}
|
|
252
248
|
else {
|
|
253
|
-
if (tools.length === 1 &&
|
|
249
|
+
if (tools.length === 1 &&
|
|
250
|
+
(!("functionDeclarations" in tools[0]) ||
|
|
251
|
+
!tools[0].functionDeclarations?.length)) {
|
|
254
252
|
return [];
|
|
255
253
|
}
|
|
256
254
|
return tools;
|
|
257
255
|
}
|
|
258
256
|
}
|
|
257
|
+
formatToolConfig(parameters) {
|
|
258
|
+
if (!parameters.tool_choice || typeof parameters.tool_choice !== "string") {
|
|
259
|
+
return undefined;
|
|
260
|
+
}
|
|
261
|
+
return {
|
|
262
|
+
functionCallingConfig: {
|
|
263
|
+
mode: parameters.tool_choice,
|
|
264
|
+
allowedFunctionNames: parameters.allowed_function_names,
|
|
265
|
+
},
|
|
266
|
+
};
|
|
267
|
+
}
|
|
259
268
|
formatData(input, parameters) {
|
|
260
269
|
const contents = this.formatContents(input, parameters);
|
|
261
270
|
const generationConfig = this.formatGenerationConfig(input, parameters);
|
|
262
271
|
const tools = this.formatTools(input, parameters);
|
|
272
|
+
const toolConfig = this.formatToolConfig(parameters);
|
|
263
273
|
const safetySettings = this.formatSafetySettings(input, parameters);
|
|
264
274
|
const systemInstruction = this.formatSystemInstruction(input, parameters);
|
|
265
275
|
const ret = {
|
|
@@ -269,6 +279,9 @@ class AbstractGoogleLLMConnection extends GoogleAIConnection {
|
|
|
269
279
|
if (tools && tools.length) {
|
|
270
280
|
ret.tools = tools;
|
|
271
281
|
}
|
|
282
|
+
if (toolConfig) {
|
|
283
|
+
ret.toolConfig = toolConfig;
|
|
284
|
+
}
|
|
272
285
|
if (safetySettings && safetySettings.length) {
|
|
273
286
|
ret.safetySettings = safetySettings;
|
|
274
287
|
}
|
package/dist/connection.d.ts
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
import { BaseLanguageModelCallOptions } from "@langchain/core/language_models/base";
|
|
2
2
|
import { AsyncCaller, AsyncCallerCallOptions } from "@langchain/core/utils/async_caller";
|
|
3
|
-
import {
|
|
3
|
+
import { StructuredToolParams } from "@langchain/core/tools";
|
|
4
4
|
import type { GoogleAIBaseLLMInput, GoogleConnectionParams, GoogleLLMModelFamily, GooglePlatformType, GoogleResponse, GoogleLLMResponse, GeminiContent, GeminiGenerationConfig, GeminiRequest, GeminiSafetySetting, GeminiTool, GeminiFunctionDeclaration, GoogleAIModelRequestParams } from "./types.js";
|
|
5
5
|
import { GoogleAbstractedClient, GoogleAbstractedClientOpsMethod } from "./auth.js";
|
|
6
6
|
export declare abstract class GoogleConnection<CallOptions extends AsyncCallerCallOptions, ResponseType extends GoogleResponse> {
|
|
@@ -49,9 +49,9 @@ export declare abstract class AbstractGoogleLLMConnection<MessageType, AuthOptio
|
|
|
49
49
|
formatGenerationConfig(_input: MessageType, parameters: GoogleAIModelRequestParams): GeminiGenerationConfig;
|
|
50
50
|
formatSafetySettings(_input: MessageType, parameters: GoogleAIModelRequestParams): GeminiSafetySetting[];
|
|
51
51
|
formatSystemInstruction(_input: MessageType, _parameters: GoogleAIModelRequestParams): GeminiContent;
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
structuredToolsToGeminiTools(tools: StructuredToolInterface[]): GeminiTool[];
|
|
52
|
+
structuredToolToFunctionDeclaration(tool: StructuredToolParams): GeminiFunctionDeclaration;
|
|
53
|
+
structuredToolsToGeminiTools(tools: StructuredToolParams[]): GeminiTool[];
|
|
55
54
|
formatTools(_input: MessageType, parameters: GoogleAIModelRequestParams): GeminiTool[];
|
|
55
|
+
formatToolConfig(parameters: GoogleAIModelRequestParams): GeminiRequest["toolConfig"] | undefined;
|
|
56
56
|
formatData(input: MessageType, parameters: GoogleAIModelRequestParams): GeminiRequest;
|
|
57
57
|
}
|
package/dist/connection.js
CHANGED
|
@@ -1,4 +1,5 @@
|
|
|
1
1
|
import { getRuntimeEnvironment } from "@langchain/core/utils/env";
|
|
2
|
+
import { isLangChainTool } from "@langchain/core/utils/function_calling";
|
|
2
3
|
import { zodToGeminiParameters } from "./utils/zod_to_gemini_parameters.js";
|
|
3
4
|
export class GoogleConnection {
|
|
4
5
|
constructor(caller, client, streaming) {
|
|
@@ -215,16 +216,11 @@ export class AbstractGoogleLLMConnection extends GoogleAIConnection {
|
|
|
215
216
|
formatSystemInstruction(_input, _parameters) {
|
|
216
217
|
return {};
|
|
217
218
|
}
|
|
218
|
-
// Borrowed from the OpenAI invocation params test
|
|
219
|
-
isStructuredToolArray(tools) {
|
|
220
|
-
return (tools !== undefined &&
|
|
221
|
-
tools.every((tool) => Array.isArray(tool.lc_namespace)));
|
|
222
|
-
}
|
|
223
219
|
structuredToolToFunctionDeclaration(tool) {
|
|
224
220
|
const jsonSchema = zodToGeminiParameters(tool.schema);
|
|
225
221
|
return {
|
|
226
222
|
name: tool.name,
|
|
227
|
-
description: tool.description
|
|
223
|
+
description: tool.description ?? `A function available to call.`,
|
|
228
224
|
parameters: jsonSchema,
|
|
229
225
|
};
|
|
230
226
|
}
|
|
@@ -240,20 +236,34 @@ export class AbstractGoogleLLMConnection extends GoogleAIConnection {
|
|
|
240
236
|
if (!tools || tools.length === 0) {
|
|
241
237
|
return [];
|
|
242
238
|
}
|
|
243
|
-
if (
|
|
239
|
+
if (tools.every(isLangChainTool)) {
|
|
244
240
|
return this.structuredToolsToGeminiTools(tools);
|
|
245
241
|
}
|
|
246
242
|
else {
|
|
247
|
-
if (tools.length === 1 &&
|
|
243
|
+
if (tools.length === 1 &&
|
|
244
|
+
(!("functionDeclarations" in tools[0]) ||
|
|
245
|
+
!tools[0].functionDeclarations?.length)) {
|
|
248
246
|
return [];
|
|
249
247
|
}
|
|
250
248
|
return tools;
|
|
251
249
|
}
|
|
252
250
|
}
|
|
251
|
+
formatToolConfig(parameters) {
|
|
252
|
+
if (!parameters.tool_choice || typeof parameters.tool_choice !== "string") {
|
|
253
|
+
return undefined;
|
|
254
|
+
}
|
|
255
|
+
return {
|
|
256
|
+
functionCallingConfig: {
|
|
257
|
+
mode: parameters.tool_choice,
|
|
258
|
+
allowedFunctionNames: parameters.allowed_function_names,
|
|
259
|
+
},
|
|
260
|
+
};
|
|
261
|
+
}
|
|
253
262
|
formatData(input, parameters) {
|
|
254
263
|
const contents = this.formatContents(input, parameters);
|
|
255
264
|
const generationConfig = this.formatGenerationConfig(input, parameters);
|
|
256
265
|
const tools = this.formatTools(input, parameters);
|
|
266
|
+
const toolConfig = this.formatToolConfig(parameters);
|
|
257
267
|
const safetySettings = this.formatSafetySettings(input, parameters);
|
|
258
268
|
const systemInstruction = this.formatSystemInstruction(input, parameters);
|
|
259
269
|
const ret = {
|
|
@@ -263,6 +273,9 @@ export class AbstractGoogleLLMConnection extends GoogleAIConnection {
|
|
|
263
273
|
if (tools && tools.length) {
|
|
264
274
|
ret.tools = tools;
|
|
265
275
|
}
|
|
276
|
+
if (toolConfig) {
|
|
277
|
+
ret.toolConfig = toolConfig;
|
|
278
|
+
}
|
|
266
279
|
if (safetySettings && safetySettings.length) {
|
|
267
280
|
ret.safetySettings = safetySettings;
|
|
268
281
|
}
|
package/dist/types.d.ts
CHANGED
|
@@ -1,6 +1,5 @@
|
|
|
1
1
|
import type { BaseLLMParams } from "@langchain/core/language_models/llms";
|
|
2
|
-
import {
|
|
3
|
-
import { StructuredToolInterface } from "@langchain/core/tools";
|
|
2
|
+
import type { BaseChatModelCallOptions, BindToolsInput } from "@langchain/core/language_models/chat_models";
|
|
4
3
|
import type { JsonStream } from "./utils/stream.js";
|
|
5
4
|
/**
|
|
6
5
|
* Parameters needed to setup the client connection.
|
|
@@ -36,6 +35,7 @@ export interface GoogleConnectionParams<AuthOptions> extends GoogleClientParams<
|
|
|
36
35
|
export interface GoogleAISafetySetting {
|
|
37
36
|
category: string;
|
|
38
37
|
threshold: string;
|
|
38
|
+
method?: string;
|
|
39
39
|
}
|
|
40
40
|
export type GoogleAIResponseMimeType = "text/plain" | "application/json";
|
|
41
41
|
export interface GoogleAIModelParams {
|
|
@@ -85,16 +85,40 @@ export interface GoogleAIModelParams {
|
|
|
85
85
|
* @default "text/plain"
|
|
86
86
|
*/
|
|
87
87
|
responseMimeType?: GoogleAIResponseMimeType;
|
|
88
|
+
/**
|
|
89
|
+
* Whether or not to stream.
|
|
90
|
+
* @default false
|
|
91
|
+
*/
|
|
92
|
+
streaming?: boolean;
|
|
88
93
|
}
|
|
94
|
+
export type GoogleAIToolType = BindToolsInput | GeminiTool;
|
|
89
95
|
/**
|
|
90
96
|
* The params which can be passed to the API at request time.
|
|
91
97
|
*/
|
|
92
98
|
export interface GoogleAIModelRequestParams extends GoogleAIModelParams {
|
|
93
|
-
tools?:
|
|
99
|
+
tools?: GoogleAIToolType[];
|
|
100
|
+
/**
|
|
101
|
+
* Force the model to use tools in a specific way.
|
|
102
|
+
*
|
|
103
|
+
* | Mode | Description |
|
|
104
|
+
* |----------|---------------------------------------------------------------------------------------------------------------------------------------------------------|
|
|
105
|
+
* | "auto" | The default model behavior. The model decides whether to predict a function call or a natural language response. |
|
|
106
|
+
* | "any" | The model must predict only function calls. To limit the model to a subset of functions, define the allowed function names in `allowed_function_names`. |
|
|
107
|
+
* | "none" | The model must not predict function calls. This behavior is equivalent to a model request without any associated function declarations. |
|
|
108
|
+
* | string | The string value must be one of the function names. This will force the model to predict the specified function call. |
|
|
109
|
+
*
|
|
110
|
+
* The tool configuration's "any" mode ("forced function calling") is supported for Gemini 1.5 Pro models only.
|
|
111
|
+
*/
|
|
112
|
+
tool_choice?: string | "auto" | "any" | "none" | Record<string, any>;
|
|
113
|
+
/**
|
|
114
|
+
* Allowed functions to call when the mode is "any".
|
|
115
|
+
* If empty, any one of the provided functions are called.
|
|
116
|
+
*/
|
|
117
|
+
allowed_function_names?: string[];
|
|
94
118
|
}
|
|
95
119
|
export interface GoogleAIBaseLLMInput<AuthOptions> extends BaseLLMParams, GoogleConnectionParams<AuthOptions>, GoogleAIModelParams, GoogleAISafetyParams {
|
|
96
120
|
}
|
|
97
|
-
export interface GoogleAIBaseLanguageModelCallOptions extends
|
|
121
|
+
export interface GoogleAIBaseLanguageModelCallOptions extends BaseChatModelCallOptions, GoogleAIModelRequestParams, GoogleAISafetyParams {
|
|
98
122
|
/**
|
|
99
123
|
* Whether or not to include usage data, like token counts
|
|
100
124
|
* in the streamed response chunks.
|
|
@@ -183,6 +207,12 @@ export interface GeminiRequest {
|
|
|
183
207
|
contents?: GeminiContent[];
|
|
184
208
|
systemInstruction?: GeminiContent;
|
|
185
209
|
tools?: GeminiTool[];
|
|
210
|
+
toolConfig?: {
|
|
211
|
+
functionCallingConfig: {
|
|
212
|
+
mode: "auto" | "any" | "none";
|
|
213
|
+
allowedFunctionNames?: string[];
|
|
214
|
+
};
|
|
215
|
+
};
|
|
186
216
|
safetySettings?: GeminiSafetySetting[];
|
|
187
217
|
generationConfig?: GeminiGenerationConfig;
|
|
188
218
|
}
|
package/dist/utils/common.cjs
CHANGED
|
@@ -1,11 +1,72 @@
|
|
|
1
1
|
"use strict";
|
|
2
2
|
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
-
exports.copyAndValidateModelParamsInto = exports.validateModelParams = exports.modelToFamily = exports.copyAIModelParamsInto = exports.copyAIModelParams = void 0;
|
|
3
|
+
exports.copyAndValidateModelParamsInto = exports.validateModelParams = exports.modelToFamily = exports.copyAIModelParamsInto = exports.convertToGeminiTools = exports.copyAIModelParams = void 0;
|
|
4
|
+
const base_1 = require("@langchain/core/language_models/base");
|
|
5
|
+
const function_calling_1 = require("@langchain/core/utils/function_calling");
|
|
4
6
|
const gemini_js_1 = require("./gemini.cjs");
|
|
7
|
+
const zod_to_gemini_parameters_js_1 = require("./zod_to_gemini_parameters.cjs");
|
|
5
8
|
function copyAIModelParams(params, options) {
|
|
6
9
|
return copyAIModelParamsInto(params, options, {});
|
|
7
10
|
}
|
|
8
11
|
exports.copyAIModelParams = copyAIModelParams;
|
|
12
|
+
function processToolChoice(toolChoice, allowedFunctionNames) {
|
|
13
|
+
if (!toolChoice) {
|
|
14
|
+
if (allowedFunctionNames) {
|
|
15
|
+
// Allowed func names is passed, return 'any' so it forces the model to use a tool.
|
|
16
|
+
return {
|
|
17
|
+
tool_choice: "any",
|
|
18
|
+
allowed_function_names: allowedFunctionNames,
|
|
19
|
+
};
|
|
20
|
+
}
|
|
21
|
+
return undefined;
|
|
22
|
+
}
|
|
23
|
+
if (toolChoice === "any" || toolChoice === "auto" || toolChoice === "none") {
|
|
24
|
+
return {
|
|
25
|
+
tool_choice: toolChoice,
|
|
26
|
+
allowed_function_names: allowedFunctionNames,
|
|
27
|
+
};
|
|
28
|
+
}
|
|
29
|
+
if (typeof toolChoice === "string") {
|
|
30
|
+
// String representing the function name.
|
|
31
|
+
// Return any to force the model to predict the specified function call.
|
|
32
|
+
return {
|
|
33
|
+
tool_choice: "any",
|
|
34
|
+
allowed_function_names: [...(allowedFunctionNames ?? []), toolChoice],
|
|
35
|
+
};
|
|
36
|
+
}
|
|
37
|
+
throw new Error("Object inputs for tool_choice not supported.");
|
|
38
|
+
}
|
|
39
|
+
function convertToGeminiTools(tools) {
|
|
40
|
+
const geminiTools = [
|
|
41
|
+
{
|
|
42
|
+
functionDeclarations: [],
|
|
43
|
+
},
|
|
44
|
+
];
|
|
45
|
+
tools.forEach((tool) => {
|
|
46
|
+
if ("functionDeclarations" in tool &&
|
|
47
|
+
Array.isArray(tool.functionDeclarations)) {
|
|
48
|
+
const funcs = tool.functionDeclarations;
|
|
49
|
+
geminiTools[0].functionDeclarations?.push(...funcs);
|
|
50
|
+
}
|
|
51
|
+
else if ((0, function_calling_1.isLangChainTool)(tool)) {
|
|
52
|
+
const jsonSchema = (0, zod_to_gemini_parameters_js_1.zodToGeminiParameters)(tool.schema);
|
|
53
|
+
geminiTools[0].functionDeclarations?.push({
|
|
54
|
+
name: tool.name,
|
|
55
|
+
description: tool.description ?? `A function available to call.`,
|
|
56
|
+
parameters: jsonSchema,
|
|
57
|
+
});
|
|
58
|
+
}
|
|
59
|
+
else if ((0, base_1.isOpenAITool)(tool)) {
|
|
60
|
+
geminiTools[0].functionDeclarations?.push({
|
|
61
|
+
name: tool.function.name,
|
|
62
|
+
description: tool.function.description ?? `A function available to call.`,
|
|
63
|
+
parameters: (0, zod_to_gemini_parameters_js_1.jsonSchemaToGeminiParameters)(tool.function.parameters),
|
|
64
|
+
});
|
|
65
|
+
}
|
|
66
|
+
});
|
|
67
|
+
return geminiTools;
|
|
68
|
+
}
|
|
69
|
+
exports.convertToGeminiTools = convertToGeminiTools;
|
|
9
70
|
function copyAIModelParamsInto(params, options, target) {
|
|
10
71
|
const ret = target || {};
|
|
11
72
|
const model = options?.model ?? params?.model ?? target.model;
|
|
@@ -32,59 +93,17 @@ function copyAIModelParamsInto(params, options, target) {
|
|
|
32
93
|
options?.responseMimeType ??
|
|
33
94
|
params?.responseMimeType ??
|
|
34
95
|
target?.responseMimeType;
|
|
35
|
-
ret.
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
const cleanedParameters = castTool.parameters;
|
|
46
|
-
if ("$schema" in cleanedParameters) {
|
|
47
|
-
delete cleanedParameters.$schema;
|
|
48
|
-
}
|
|
49
|
-
if ("additionalProperties" in cleanedParameters) {
|
|
50
|
-
delete cleanedParameters.additionalProperties;
|
|
51
|
-
}
|
|
52
|
-
const toolInGeminiFormat = {
|
|
53
|
-
functionDeclarations: [
|
|
54
|
-
{
|
|
55
|
-
name: castTool.name,
|
|
56
|
-
description: castTool.description,
|
|
57
|
-
parameters: cleanedParameters,
|
|
58
|
-
},
|
|
59
|
-
],
|
|
60
|
-
};
|
|
61
|
-
return toolInGeminiFormat;
|
|
62
|
-
}
|
|
63
|
-
else if ("functionDeclarations" in tool) {
|
|
64
|
-
return tool;
|
|
65
|
-
}
|
|
66
|
-
else {
|
|
67
|
-
return null;
|
|
68
|
-
}
|
|
69
|
-
})
|
|
70
|
-
.filter((tool) => tool !== null);
|
|
71
|
-
const structuredOutputTools = options?.tools
|
|
72
|
-
?.map((tool) => {
|
|
73
|
-
if ("lc_namespace" in tool) {
|
|
74
|
-
return tool;
|
|
75
|
-
}
|
|
76
|
-
else {
|
|
77
|
-
return null;
|
|
78
|
-
}
|
|
79
|
-
})
|
|
80
|
-
.filter((tool) => tool !== null);
|
|
81
|
-
if (structuredOutputTools &&
|
|
82
|
-
structuredOutputTools.length > 0 &&
|
|
83
|
-
geminiTools &&
|
|
84
|
-
geminiTools.length > 0) {
|
|
85
|
-
throw new Error(`Cannot mix structured tools with Gemini tools.\nReceived ${structuredOutputTools.length} structured tools and ${geminiTools.length} Gemini tools.`);
|
|
96
|
+
ret.streaming = options?.streaming ?? params?.streaming ?? target?.streaming;
|
|
97
|
+
const toolChoice = processToolChoice(options?.tool_choice, options?.allowed_function_names);
|
|
98
|
+
if (toolChoice) {
|
|
99
|
+
ret.tool_choice = toolChoice.tool_choice;
|
|
100
|
+
ret.allowed_function_names = toolChoice.allowed_function_names;
|
|
101
|
+
}
|
|
102
|
+
const tools = options?.tools;
|
|
103
|
+
if (tools) {
|
|
104
|
+
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
105
|
+
ret.tools = convertToGeminiTools(tools);
|
|
86
106
|
}
|
|
87
|
-
ret.tools = geminiTools ?? structuredOutputTools;
|
|
88
107
|
return ret;
|
|
89
108
|
}
|
|
90
109
|
exports.copyAIModelParamsInto = copyAIModelParamsInto;
|
package/dist/utils/common.d.ts
CHANGED
|
@@ -1,5 +1,6 @@
|
|
|
1
|
-
import type { GoogleAIBaseLanguageModelCallOptions, GoogleAIModelParams, GoogleAIModelRequestParams, GoogleLLMModelFamily } from "../types.js";
|
|
1
|
+
import type { GeminiTool, GoogleAIBaseLanguageModelCallOptions, GoogleAIModelParams, GoogleAIModelRequestParams, GoogleAIToolType, GoogleLLMModelFamily } from "../types.js";
|
|
2
2
|
export declare function copyAIModelParams(params: GoogleAIModelParams | undefined, options: GoogleAIBaseLanguageModelCallOptions | undefined): GoogleAIModelRequestParams;
|
|
3
|
+
export declare function convertToGeminiTools(tools: GoogleAIToolType[]): GeminiTool[];
|
|
3
4
|
export declare function copyAIModelParamsInto(params: GoogleAIModelParams | undefined, options: GoogleAIBaseLanguageModelCallOptions | undefined, target: GoogleAIModelParams): GoogleAIModelRequestParams;
|
|
4
5
|
export declare function modelToFamily(modelName: string | undefined): GoogleLLMModelFamily;
|
|
5
6
|
export declare function validateModelParams(params: GoogleAIModelParams | undefined): void;
|
package/dist/utils/common.js
CHANGED
|
@@ -1,7 +1,67 @@
|
|
|
1
|
+
import { isOpenAITool } from "@langchain/core/language_models/base";
|
|
2
|
+
import { isLangChainTool } from "@langchain/core/utils/function_calling";
|
|
1
3
|
import { isModelGemini, validateGeminiParams } from "./gemini.js";
|
|
4
|
+
import { jsonSchemaToGeminiParameters, zodToGeminiParameters, } from "./zod_to_gemini_parameters.js";
|
|
2
5
|
export function copyAIModelParams(params, options) {
|
|
3
6
|
return copyAIModelParamsInto(params, options, {});
|
|
4
7
|
}
|
|
8
|
+
function processToolChoice(toolChoice, allowedFunctionNames) {
|
|
9
|
+
if (!toolChoice) {
|
|
10
|
+
if (allowedFunctionNames) {
|
|
11
|
+
// Allowed func names is passed, return 'any' so it forces the model to use a tool.
|
|
12
|
+
return {
|
|
13
|
+
tool_choice: "any",
|
|
14
|
+
allowed_function_names: allowedFunctionNames,
|
|
15
|
+
};
|
|
16
|
+
}
|
|
17
|
+
return undefined;
|
|
18
|
+
}
|
|
19
|
+
if (toolChoice === "any" || toolChoice === "auto" || toolChoice === "none") {
|
|
20
|
+
return {
|
|
21
|
+
tool_choice: toolChoice,
|
|
22
|
+
allowed_function_names: allowedFunctionNames,
|
|
23
|
+
};
|
|
24
|
+
}
|
|
25
|
+
if (typeof toolChoice === "string") {
|
|
26
|
+
// String representing the function name.
|
|
27
|
+
// Return any to force the model to predict the specified function call.
|
|
28
|
+
return {
|
|
29
|
+
tool_choice: "any",
|
|
30
|
+
allowed_function_names: [...(allowedFunctionNames ?? []), toolChoice],
|
|
31
|
+
};
|
|
32
|
+
}
|
|
33
|
+
throw new Error("Object inputs for tool_choice not supported.");
|
|
34
|
+
}
|
|
35
|
+
export function convertToGeminiTools(tools) {
|
|
36
|
+
const geminiTools = [
|
|
37
|
+
{
|
|
38
|
+
functionDeclarations: [],
|
|
39
|
+
},
|
|
40
|
+
];
|
|
41
|
+
tools.forEach((tool) => {
|
|
42
|
+
if ("functionDeclarations" in tool &&
|
|
43
|
+
Array.isArray(tool.functionDeclarations)) {
|
|
44
|
+
const funcs = tool.functionDeclarations;
|
|
45
|
+
geminiTools[0].functionDeclarations?.push(...funcs);
|
|
46
|
+
}
|
|
47
|
+
else if (isLangChainTool(tool)) {
|
|
48
|
+
const jsonSchema = zodToGeminiParameters(tool.schema);
|
|
49
|
+
geminiTools[0].functionDeclarations?.push({
|
|
50
|
+
name: tool.name,
|
|
51
|
+
description: tool.description ?? `A function available to call.`,
|
|
52
|
+
parameters: jsonSchema,
|
|
53
|
+
});
|
|
54
|
+
}
|
|
55
|
+
else if (isOpenAITool(tool)) {
|
|
56
|
+
geminiTools[0].functionDeclarations?.push({
|
|
57
|
+
name: tool.function.name,
|
|
58
|
+
description: tool.function.description ?? `A function available to call.`,
|
|
59
|
+
parameters: jsonSchemaToGeminiParameters(tool.function.parameters),
|
|
60
|
+
});
|
|
61
|
+
}
|
|
62
|
+
});
|
|
63
|
+
return geminiTools;
|
|
64
|
+
}
|
|
5
65
|
export function copyAIModelParamsInto(params, options, target) {
|
|
6
66
|
const ret = target || {};
|
|
7
67
|
const model = options?.model ?? params?.model ?? target.model;
|
|
@@ -28,59 +88,17 @@ export function copyAIModelParamsInto(params, options, target) {
|
|
|
28
88
|
options?.responseMimeType ??
|
|
29
89
|
params?.responseMimeType ??
|
|
30
90
|
target?.responseMimeType;
|
|
31
|
-
ret.
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
const cleanedParameters = castTool.parameters;
|
|
42
|
-
if ("$schema" in cleanedParameters) {
|
|
43
|
-
delete cleanedParameters.$schema;
|
|
44
|
-
}
|
|
45
|
-
if ("additionalProperties" in cleanedParameters) {
|
|
46
|
-
delete cleanedParameters.additionalProperties;
|
|
47
|
-
}
|
|
48
|
-
const toolInGeminiFormat = {
|
|
49
|
-
functionDeclarations: [
|
|
50
|
-
{
|
|
51
|
-
name: castTool.name,
|
|
52
|
-
description: castTool.description,
|
|
53
|
-
parameters: cleanedParameters,
|
|
54
|
-
},
|
|
55
|
-
],
|
|
56
|
-
};
|
|
57
|
-
return toolInGeminiFormat;
|
|
58
|
-
}
|
|
59
|
-
else if ("functionDeclarations" in tool) {
|
|
60
|
-
return tool;
|
|
61
|
-
}
|
|
62
|
-
else {
|
|
63
|
-
return null;
|
|
64
|
-
}
|
|
65
|
-
})
|
|
66
|
-
.filter((tool) => tool !== null);
|
|
67
|
-
const structuredOutputTools = options?.tools
|
|
68
|
-
?.map((tool) => {
|
|
69
|
-
if ("lc_namespace" in tool) {
|
|
70
|
-
return tool;
|
|
71
|
-
}
|
|
72
|
-
else {
|
|
73
|
-
return null;
|
|
74
|
-
}
|
|
75
|
-
})
|
|
76
|
-
.filter((tool) => tool !== null);
|
|
77
|
-
if (structuredOutputTools &&
|
|
78
|
-
structuredOutputTools.length > 0 &&
|
|
79
|
-
geminiTools &&
|
|
80
|
-
geminiTools.length > 0) {
|
|
81
|
-
throw new Error(`Cannot mix structured tools with Gemini tools.\nReceived ${structuredOutputTools.length} structured tools and ${geminiTools.length} Gemini tools.`);
|
|
91
|
+
ret.streaming = options?.streaming ?? params?.streaming ?? target?.streaming;
|
|
92
|
+
const toolChoice = processToolChoice(options?.tool_choice, options?.allowed_function_names);
|
|
93
|
+
if (toolChoice) {
|
|
94
|
+
ret.tool_choice = toolChoice.tool_choice;
|
|
95
|
+
ret.allowed_function_names = toolChoice.allowed_function_names;
|
|
96
|
+
}
|
|
97
|
+
const tools = options?.tools;
|
|
98
|
+
if (tools) {
|
|
99
|
+
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
100
|
+
ret.tools = convertToGeminiTools(tools);
|
|
82
101
|
}
|
|
83
|
-
ret.tools = geminiTools ?? structuredOutputTools;
|
|
84
102
|
return ret;
|
|
85
103
|
}
|
|
86
104
|
export function modelToFamily(modelName) {
|
package/dist/utils/gemini.cjs
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
"use strict";
|
|
2
2
|
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
-
exports.MessageGeminiSafetyHandler = exports.DefaultGeminiSafetyHandler = exports.isModelGemini = exports.validateGeminiParams = exports.safeResponseToChatResult = exports.responseToChatResult = exports.safeResponseToBaseMessage = exports.responseToBaseMessage = exports.
|
|
3
|
+
exports.MessageGeminiSafetyHandler = exports.DefaultGeminiSafetyHandler = exports.isModelGemini = exports.validateGeminiParams = exports.safeResponseToChatResult = exports.responseToChatResult = exports.safeResponseToBaseMessage = exports.responseToBaseMessage = exports.partsToBaseMessageChunkFields = exports.responseToBaseMessageFields = exports.responseToChatGenerations = exports.partToChatGeneration = exports.partToMessageChunk = exports.chunkToString = exports.safeResponseToChatGeneration = exports.responseToChatGeneration = exports.safeResponseToGeneration = exports.responseToGeneration = exports.responseToGenerationInfo = exports.safeResponseToString = exports.responseToString = exports.partToText = exports.responseToParts = exports.responseToGenerateContentResponseData = exports.toolsRawToTools = exports.partsToToolsRaw = exports.partsToMessageContent = exports.baseMessageToContent = exports.messageContentToParts = void 0;
|
|
4
4
|
const uuid_1 = require("uuid");
|
|
5
5
|
const messages_1 = require("@langchain/core/messages");
|
|
6
6
|
const outputs_1 = require("@langchain/core/outputs");
|
|
@@ -445,7 +445,7 @@ function chunkToString(chunk) {
|
|
|
445
445
|
}
|
|
446
446
|
exports.chunkToString = chunkToString;
|
|
447
447
|
function partToMessageChunk(part) {
|
|
448
|
-
const fields =
|
|
448
|
+
const fields = partsToBaseMessageChunkFields([part]);
|
|
449
449
|
if (typeof fields.content === "string") {
|
|
450
450
|
return new messages_1.AIMessageChunk(fields);
|
|
451
451
|
}
|
|
@@ -510,12 +510,13 @@ function responseToChatGenerations(response) {
|
|
|
510
510
|
exports.responseToChatGenerations = responseToChatGenerations;
|
|
511
511
|
function responseToBaseMessageFields(response) {
|
|
512
512
|
const parts = responseToParts(response);
|
|
513
|
-
return
|
|
513
|
+
return partsToBaseMessageChunkFields(parts);
|
|
514
514
|
}
|
|
515
515
|
exports.responseToBaseMessageFields = responseToBaseMessageFields;
|
|
516
|
-
function
|
|
516
|
+
function partsToBaseMessageChunkFields(parts) {
|
|
517
517
|
const fields = {
|
|
518
518
|
content: partsToMessageContent(parts),
|
|
519
|
+
tool_call_chunks: [],
|
|
519
520
|
tool_calls: [],
|
|
520
521
|
invalid_tool_calls: [],
|
|
521
522
|
};
|
|
@@ -523,6 +524,12 @@ function partsToBaseMessageFields(parts) {
|
|
|
523
524
|
if (rawTools.length > 0) {
|
|
524
525
|
const tools = toolsRawToTools(rawTools);
|
|
525
526
|
for (const tool of tools) {
|
|
527
|
+
fields.tool_call_chunks?.push({
|
|
528
|
+
name: tool.function.name,
|
|
529
|
+
args: tool.function.arguments,
|
|
530
|
+
id: tool.id,
|
|
531
|
+
type: "tool_call_chunk",
|
|
532
|
+
});
|
|
526
533
|
try {
|
|
527
534
|
fields.tool_calls?.push({
|
|
528
535
|
name: tool.function.name,
|
|
@@ -535,7 +542,7 @@ function partsToBaseMessageFields(parts) {
|
|
|
535
542
|
catch (e) {
|
|
536
543
|
fields.invalid_tool_calls?.push({
|
|
537
544
|
name: tool.function.name,
|
|
538
|
-
args:
|
|
545
|
+
args: tool.function.arguments,
|
|
539
546
|
id: tool.id,
|
|
540
547
|
error: e.message,
|
|
541
548
|
type: "invalid_tool_call",
|
|
@@ -548,7 +555,7 @@ function partsToBaseMessageFields(parts) {
|
|
|
548
555
|
}
|
|
549
556
|
return fields;
|
|
550
557
|
}
|
|
551
|
-
exports.
|
|
558
|
+
exports.partsToBaseMessageChunkFields = partsToBaseMessageChunkFields;
|
|
552
559
|
function responseToBaseMessage(response) {
|
|
553
560
|
const fields = responseToBaseMessageFields(response);
|
|
554
561
|
return new messages_1.AIMessage(fields);
|
package/dist/utils/gemini.d.ts
CHANGED
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
import {
|
|
1
|
+
import { AIMessageChunkFields, BaseMessage, BaseMessageChunk, BaseMessageFields, MessageContent } from "@langchain/core/messages";
|
|
2
2
|
import { ChatGeneration, ChatGenerationChunk, ChatResult, Generation } from "@langchain/core/outputs";
|
|
3
3
|
import type { GoogleLLMResponse, GoogleAIModelParams, GeminiPart, GeminiContent, GenerateContentResponseData, GoogleAISafetyHandler } from "../types.js";
|
|
4
4
|
export declare function messageContentToParts(content: MessageContent): GeminiPart[];
|
|
@@ -57,7 +57,7 @@ export declare function partToMessageChunk(part: GeminiPart): BaseMessageChunk;
|
|
|
57
57
|
export declare function partToChatGeneration(part: GeminiPart): ChatGeneration;
|
|
58
58
|
export declare function responseToChatGenerations(response: GoogleLLMResponse): ChatGeneration[];
|
|
59
59
|
export declare function responseToBaseMessageFields(response: GoogleLLMResponse): BaseMessageFields;
|
|
60
|
-
export declare function
|
|
60
|
+
export declare function partsToBaseMessageChunkFields(parts: GeminiPart[]): AIMessageChunkFields;
|
|
61
61
|
export declare function responseToBaseMessage(response: GoogleLLMResponse): BaseMessage;
|
|
62
62
|
export declare function safeResponseToBaseMessage(response: GoogleLLMResponse, safetyHandler: GoogleAISafetyHandler): BaseMessage;
|
|
63
63
|
export declare function responseToChatResult(response: GoogleLLMResponse): ChatResult;
|
package/dist/utils/gemini.js
CHANGED
|
@@ -426,7 +426,7 @@ export function chunkToString(chunk) {
|
|
|
426
426
|
}
|
|
427
427
|
}
|
|
428
428
|
export function partToMessageChunk(part) {
|
|
429
|
-
const fields =
|
|
429
|
+
const fields = partsToBaseMessageChunkFields([part]);
|
|
430
430
|
if (typeof fields.content === "string") {
|
|
431
431
|
return new AIMessageChunk(fields);
|
|
432
432
|
}
|
|
@@ -488,11 +488,12 @@ export function responseToChatGenerations(response) {
|
|
|
488
488
|
}
|
|
489
489
|
export function responseToBaseMessageFields(response) {
|
|
490
490
|
const parts = responseToParts(response);
|
|
491
|
-
return
|
|
491
|
+
return partsToBaseMessageChunkFields(parts);
|
|
492
492
|
}
|
|
493
|
-
export function
|
|
493
|
+
export function partsToBaseMessageChunkFields(parts) {
|
|
494
494
|
const fields = {
|
|
495
495
|
content: partsToMessageContent(parts),
|
|
496
|
+
tool_call_chunks: [],
|
|
496
497
|
tool_calls: [],
|
|
497
498
|
invalid_tool_calls: [],
|
|
498
499
|
};
|
|
@@ -500,6 +501,12 @@ export function partsToBaseMessageFields(parts) {
|
|
|
500
501
|
if (rawTools.length > 0) {
|
|
501
502
|
const tools = toolsRawToTools(rawTools);
|
|
502
503
|
for (const tool of tools) {
|
|
504
|
+
fields.tool_call_chunks?.push({
|
|
505
|
+
name: tool.function.name,
|
|
506
|
+
args: tool.function.arguments,
|
|
507
|
+
id: tool.id,
|
|
508
|
+
type: "tool_call_chunk",
|
|
509
|
+
});
|
|
503
510
|
try {
|
|
504
511
|
fields.tool_calls?.push({
|
|
505
512
|
name: tool.function.name,
|
|
@@ -512,7 +519,7 @@ export function partsToBaseMessageFields(parts) {
|
|
|
512
519
|
catch (e) {
|
|
513
520
|
fields.invalid_tool_calls?.push({
|
|
514
521
|
name: tool.function.name,
|
|
515
|
-
args:
|
|
522
|
+
args: tool.function.arguments,
|
|
516
523
|
id: tool.id,
|
|
517
524
|
error: e.message,
|
|
518
525
|
type: "invalid_tool_call",
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@langchain/google-common",
|
|
3
|
-
"version": "0.0.
|
|
3
|
+
"version": "0.0.23",
|
|
4
4
|
"description": "Core types and classes for Google services.",
|
|
5
5
|
"type": "module",
|
|
6
6
|
"engines": {
|
|
@@ -15,7 +15,7 @@
|
|
|
15
15
|
"homepage": "https://github.com/langchain-ai/langchainjs/tree/main/libs/langchain-google-common/",
|
|
16
16
|
"scripts": {
|
|
17
17
|
"build": "yarn turbo:command build:internal --filter=@langchain/google-common",
|
|
18
|
-
"build:internal": "yarn
|
|
18
|
+
"build:internal": "yarn lc_build_v2 --create-entrypoints --pre --tree-shaking",
|
|
19
19
|
"build:deps": "yarn run turbo:command build --filter=@langchain/core",
|
|
20
20
|
"build:esm": "NODE_OPTIONS=--max-old-space-size=4096 tsc --outDir dist/ && rm -rf dist/tests dist/**/tests",
|
|
21
21
|
"build:cjs": "NODE_OPTIONS=--max-old-space-size=4096 tsc --outDir dist-cjs/ -p tsconfig.cjs.json && yarn move-cjs-to-dist && rm -rf dist-cjs",
|
|
@@ -40,13 +40,13 @@
|
|
|
40
40
|
"author": "LangChain",
|
|
41
41
|
"license": "MIT",
|
|
42
42
|
"dependencies": {
|
|
43
|
-
"@langchain/core": ">=0.2.
|
|
43
|
+
"@langchain/core": ">=0.2.21 <0.3.0",
|
|
44
44
|
"uuid": "^10.0.0",
|
|
45
45
|
"zod-to-json-schema": "^3.22.4"
|
|
46
46
|
},
|
|
47
47
|
"devDependencies": {
|
|
48
48
|
"@jest/globals": "^29.5.0",
|
|
49
|
-
"@langchain/scripts": "~0.0.
|
|
49
|
+
"@langchain/scripts": "~0.0.20",
|
|
50
50
|
"@swc/core": "^1.3.90",
|
|
51
51
|
"@swc/jest": "^0.2.29",
|
|
52
52
|
"@tsconfig/recommended": "^1.0.3",
|
|
@@ -63,7 +63,7 @@
|
|
|
63
63
|
"jest": "^29.5.0",
|
|
64
64
|
"jest-environment-node": "^29.6.4",
|
|
65
65
|
"prettier": "^2.8.3",
|
|
66
|
-
"release-it": "^
|
|
66
|
+
"release-it": "^17.6.0",
|
|
67
67
|
"rollup": "^4.5.2",
|
|
68
68
|
"ts-jest": "^29.1.0",
|
|
69
69
|
"typescript": "<5.2.0",
|