@langchain/google-common 0.0.10 → 0.0.12
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/chat_models.cjs +74 -3
- package/dist/chat_models.d.ts +7 -0
- package/dist/chat_models.js +74 -3
- package/dist/connection.cjs +22 -7
- package/dist/connection.d.ts +5 -1
- package/dist/connection.js +22 -7
- package/dist/llms.cjs +6 -0
- package/dist/llms.d.ts +2 -1
- package/dist/llms.js +6 -0
- package/dist/types.d.ts +23 -1
- package/dist/utils/common.cjs +8 -0
- package/dist/utils/common.js +8 -0
- package/dist/utils/gemini.cjs +9 -7
- package/dist/utils/gemini.d.ts +1 -1
- package/dist/utils/gemini.js +9 -7
- package/dist/utils/zod_to_gemini_parameters.cjs +23 -2
- package/dist/utils/zod_to_gemini_parameters.js +23 -2
- package/package.json +1 -1
package/dist/chat_models.cjs
CHANGED
|
@@ -15,10 +15,73 @@ const auth_js_1 = require("./auth.cjs");
|
|
|
15
15
|
const failed_handler_js_1 = require("./utils/failed_handler.cjs");
|
|
16
16
|
const zod_to_gemini_parameters_js_1 = require("./utils/zod_to_gemini_parameters.cjs");
|
|
17
17
|
class ChatConnection extends connection_js_1.AbstractGoogleLLMConnection {
|
|
18
|
+
constructor(fields, caller, client, streaming) {
|
|
19
|
+
super(fields, caller, client, streaming);
|
|
20
|
+
Object.defineProperty(this, "convertSystemMessageToHumanContent", {
|
|
21
|
+
enumerable: true,
|
|
22
|
+
configurable: true,
|
|
23
|
+
writable: true,
|
|
24
|
+
value: void 0
|
|
25
|
+
});
|
|
26
|
+
this.convertSystemMessageToHumanContent =
|
|
27
|
+
fields?.convertSystemMessageToHumanContent;
|
|
28
|
+
}
|
|
29
|
+
get useSystemInstruction() {
|
|
30
|
+
return typeof this.convertSystemMessageToHumanContent === "boolean"
|
|
31
|
+
? !this.convertSystemMessageToHumanContent
|
|
32
|
+
: this.computeUseSystemInstruction;
|
|
33
|
+
}
|
|
34
|
+
get computeUseSystemInstruction() {
|
|
35
|
+
// This works on models from April 2024 and later
|
|
36
|
+
// Vertex AI: gemini-1.5-pro and gemini-1.0-002 and later
|
|
37
|
+
// AI Studio: gemini-1.5-pro-latest
|
|
38
|
+
if (this.modelFamily === "palm") {
|
|
39
|
+
return false;
|
|
40
|
+
}
|
|
41
|
+
else if (this.modelName === "gemini-1.0-pro-001") {
|
|
42
|
+
return false;
|
|
43
|
+
}
|
|
44
|
+
else if (this.modelName.startsWith("gemini-pro-vision")) {
|
|
45
|
+
return false;
|
|
46
|
+
}
|
|
47
|
+
else if (this.modelName.startsWith("gemini-1.0-pro-vision")) {
|
|
48
|
+
return false;
|
|
49
|
+
}
|
|
50
|
+
else if (this.modelName === "gemini-pro" && this.platform === "gai") {
|
|
51
|
+
// on AI Studio gemini-pro is still pointing at gemini-1.0-pro-001
|
|
52
|
+
return false;
|
|
53
|
+
}
|
|
54
|
+
return true;
|
|
55
|
+
}
|
|
18
56
|
formatContents(input, _parameters) {
|
|
19
57
|
return input
|
|
20
|
-
.map((msg, i) => (0, gemini_js_1.baseMessageToContent)(msg, input[i - 1]))
|
|
21
|
-
.reduce((acc, cur) =>
|
|
58
|
+
.map((msg, i) => (0, gemini_js_1.baseMessageToContent)(msg, input[i - 1], this.useSystemInstruction))
|
|
59
|
+
.reduce((acc, cur) => {
|
|
60
|
+
// Filter out the system content, since those don't belong
|
|
61
|
+
// in the actual content.
|
|
62
|
+
const hasNoSystem = cur.every((content) => content.role !== "system");
|
|
63
|
+
return hasNoSystem ? [...acc, ...cur] : acc;
|
|
64
|
+
}, []);
|
|
65
|
+
}
|
|
66
|
+
formatSystemInstruction(input, _parameters) {
|
|
67
|
+
if (!this.useSystemInstruction) {
|
|
68
|
+
return {};
|
|
69
|
+
}
|
|
70
|
+
let ret = {};
|
|
71
|
+
input.forEach((message, index) => {
|
|
72
|
+
if (message._getType() === "system") {
|
|
73
|
+
// For system types, we only want it if it is the first message,
|
|
74
|
+
// if it appears anywhere else, it should be an error.
|
|
75
|
+
if (index === 0) {
|
|
76
|
+
// eslint-disable-next-line prefer-destructuring
|
|
77
|
+
ret = (0, gemini_js_1.baseMessageToContent)(message, undefined, true)[0];
|
|
78
|
+
}
|
|
79
|
+
else {
|
|
80
|
+
throw new Error("System messages are only permitted as the first passed message.");
|
|
81
|
+
}
|
|
82
|
+
}
|
|
83
|
+
});
|
|
84
|
+
return ret;
|
|
22
85
|
}
|
|
23
86
|
}
|
|
24
87
|
function convertToGeminiTools(structuredTools) {
|
|
@@ -54,11 +117,12 @@ class ChatGoogleBase extends chat_models_1.BaseChatModel {
|
|
|
54
117
|
writable: true,
|
|
55
118
|
value: true
|
|
56
119
|
});
|
|
120
|
+
// Set based on modelName
|
|
57
121
|
Object.defineProperty(this, "model", {
|
|
58
122
|
enumerable: true,
|
|
59
123
|
configurable: true,
|
|
60
124
|
writable: true,
|
|
61
|
-
value:
|
|
125
|
+
value: void 0
|
|
62
126
|
});
|
|
63
127
|
Object.defineProperty(this, "modelName", {
|
|
64
128
|
enumerable: true,
|
|
@@ -102,6 +166,13 @@ class ChatGoogleBase extends chat_models_1.BaseChatModel {
|
|
|
102
166
|
writable: true,
|
|
103
167
|
value: []
|
|
104
168
|
});
|
|
169
|
+
// May intentionally be undefined, meaning to compute this.
|
|
170
|
+
Object.defineProperty(this, "convertSystemMessageToHumanContent", {
|
|
171
|
+
enumerable: true,
|
|
172
|
+
configurable: true,
|
|
173
|
+
writable: true,
|
|
174
|
+
value: void 0
|
|
175
|
+
});
|
|
105
176
|
Object.defineProperty(this, "safetyHandler", {
|
|
106
177
|
enumerable: true,
|
|
107
178
|
configurable: true,
|
package/dist/chat_models.d.ts
CHANGED
|
@@ -6,13 +6,19 @@ import { AIMessageChunk } from "@langchain/core/messages";
|
|
|
6
6
|
import { BaseLanguageModelInput, StructuredOutputMethodOptions } from "@langchain/core/language_models/base";
|
|
7
7
|
import type { z } from "zod";
|
|
8
8
|
import { Runnable, RunnableInterface } from "@langchain/core/runnables";
|
|
9
|
+
import { AsyncCaller } from "@langchain/core/utils/async_caller";
|
|
9
10
|
import { StructuredToolInterface } from "@langchain/core/tools";
|
|
10
11
|
import { GoogleAIBaseLLMInput, GoogleAIModelParams, GoogleAISafetySetting, GoogleConnectionParams, GooglePlatformType, GeminiContent, GoogleAIBaseLanguageModelCallOptions } from "./types.js";
|
|
11
12
|
import { AbstractGoogleLLMConnection } from "./connection.js";
|
|
12
13
|
import { GoogleAbstractedClient } from "./auth.js";
|
|
13
14
|
import type { GoogleBaseLLMInput, GoogleAISafetyHandler, GoogleAISafetyParams } from "./types.js";
|
|
14
15
|
declare class ChatConnection<AuthOptions> extends AbstractGoogleLLMConnection<BaseMessage[], AuthOptions> {
|
|
16
|
+
convertSystemMessageToHumanContent: boolean | undefined;
|
|
17
|
+
constructor(fields: GoogleAIBaseLLMInput<AuthOptions> | undefined, caller: AsyncCaller, client: GoogleAbstractedClient, streaming: boolean);
|
|
18
|
+
get useSystemInstruction(): boolean;
|
|
19
|
+
get computeUseSystemInstruction(): boolean;
|
|
15
20
|
formatContents(input: BaseMessage[], _parameters: GoogleAIModelParams): GeminiContent[];
|
|
21
|
+
formatSystemInstruction(input: BaseMessage[], _parameters: GoogleAIModelParams): GeminiContent;
|
|
16
22
|
}
|
|
17
23
|
/**
|
|
18
24
|
* Input to chat model class.
|
|
@@ -33,6 +39,7 @@ export declare abstract class ChatGoogleBase<AuthOptions> extends BaseChatModel<
|
|
|
33
39
|
topK: number;
|
|
34
40
|
stopSequences: string[];
|
|
35
41
|
safetySettings: GoogleAISafetySetting[];
|
|
42
|
+
convertSystemMessageToHumanContent: boolean | undefined;
|
|
36
43
|
safetyHandler: GoogleAISafetyHandler;
|
|
37
44
|
protected connection: ChatConnection<AuthOptions>;
|
|
38
45
|
protected streamedConnection: ChatConnection<AuthOptions>;
|
package/dist/chat_models.js
CHANGED
|
@@ -12,10 +12,73 @@ import { ApiKeyGoogleAuth } from "./auth.js";
|
|
|
12
12
|
import { ensureParams } from "./utils/failed_handler.js";
|
|
13
13
|
import { zodToGeminiParameters } from "./utils/zod_to_gemini_parameters.js";
|
|
14
14
|
class ChatConnection extends AbstractGoogleLLMConnection {
|
|
15
|
+
constructor(fields, caller, client, streaming) {
|
|
16
|
+
super(fields, caller, client, streaming);
|
|
17
|
+
Object.defineProperty(this, "convertSystemMessageToHumanContent", {
|
|
18
|
+
enumerable: true,
|
|
19
|
+
configurable: true,
|
|
20
|
+
writable: true,
|
|
21
|
+
value: void 0
|
|
22
|
+
});
|
|
23
|
+
this.convertSystemMessageToHumanContent =
|
|
24
|
+
fields?.convertSystemMessageToHumanContent;
|
|
25
|
+
}
|
|
26
|
+
get useSystemInstruction() {
|
|
27
|
+
return typeof this.convertSystemMessageToHumanContent === "boolean"
|
|
28
|
+
? !this.convertSystemMessageToHumanContent
|
|
29
|
+
: this.computeUseSystemInstruction;
|
|
30
|
+
}
|
|
31
|
+
get computeUseSystemInstruction() {
|
|
32
|
+
// This works on models from April 2024 and later
|
|
33
|
+
// Vertex AI: gemini-1.5-pro and gemini-1.0-002 and later
|
|
34
|
+
// AI Studio: gemini-1.5-pro-latest
|
|
35
|
+
if (this.modelFamily === "palm") {
|
|
36
|
+
return false;
|
|
37
|
+
}
|
|
38
|
+
else if (this.modelName === "gemini-1.0-pro-001") {
|
|
39
|
+
return false;
|
|
40
|
+
}
|
|
41
|
+
else if (this.modelName.startsWith("gemini-pro-vision")) {
|
|
42
|
+
return false;
|
|
43
|
+
}
|
|
44
|
+
else if (this.modelName.startsWith("gemini-1.0-pro-vision")) {
|
|
45
|
+
return false;
|
|
46
|
+
}
|
|
47
|
+
else if (this.modelName === "gemini-pro" && this.platform === "gai") {
|
|
48
|
+
// on AI Studio gemini-pro is still pointing at gemini-1.0-pro-001
|
|
49
|
+
return false;
|
|
50
|
+
}
|
|
51
|
+
return true;
|
|
52
|
+
}
|
|
15
53
|
formatContents(input, _parameters) {
|
|
16
54
|
return input
|
|
17
|
-
.map((msg, i) => baseMessageToContent(msg, input[i - 1]))
|
|
18
|
-
.reduce((acc, cur) =>
|
|
55
|
+
.map((msg, i) => baseMessageToContent(msg, input[i - 1], this.useSystemInstruction))
|
|
56
|
+
.reduce((acc, cur) => {
|
|
57
|
+
// Filter out the system content, since those don't belong
|
|
58
|
+
// in the actual content.
|
|
59
|
+
const hasNoSystem = cur.every((content) => content.role !== "system");
|
|
60
|
+
return hasNoSystem ? [...acc, ...cur] : acc;
|
|
61
|
+
}, []);
|
|
62
|
+
}
|
|
63
|
+
formatSystemInstruction(input, _parameters) {
|
|
64
|
+
if (!this.useSystemInstruction) {
|
|
65
|
+
return {};
|
|
66
|
+
}
|
|
67
|
+
let ret = {};
|
|
68
|
+
input.forEach((message, index) => {
|
|
69
|
+
if (message._getType() === "system") {
|
|
70
|
+
// For system types, we only want it if it is the first message,
|
|
71
|
+
// if it appears anywhere else, it should be an error.
|
|
72
|
+
if (index === 0) {
|
|
73
|
+
// eslint-disable-next-line prefer-destructuring
|
|
74
|
+
ret = baseMessageToContent(message, undefined, true)[0];
|
|
75
|
+
}
|
|
76
|
+
else {
|
|
77
|
+
throw new Error("System messages are only permitted as the first passed message.");
|
|
78
|
+
}
|
|
79
|
+
}
|
|
80
|
+
});
|
|
81
|
+
return ret;
|
|
19
82
|
}
|
|
20
83
|
}
|
|
21
84
|
function convertToGeminiTools(structuredTools) {
|
|
@@ -51,11 +114,12 @@ export class ChatGoogleBase extends BaseChatModel {
|
|
|
51
114
|
writable: true,
|
|
52
115
|
value: true
|
|
53
116
|
});
|
|
117
|
+
// Set based on modelName
|
|
54
118
|
Object.defineProperty(this, "model", {
|
|
55
119
|
enumerable: true,
|
|
56
120
|
configurable: true,
|
|
57
121
|
writable: true,
|
|
58
|
-
value:
|
|
122
|
+
value: void 0
|
|
59
123
|
});
|
|
60
124
|
Object.defineProperty(this, "modelName", {
|
|
61
125
|
enumerable: true,
|
|
@@ -99,6 +163,13 @@ export class ChatGoogleBase extends BaseChatModel {
|
|
|
99
163
|
writable: true,
|
|
100
164
|
value: []
|
|
101
165
|
});
|
|
166
|
+
// May intentionally be undefined, meaning to compute this.
|
|
167
|
+
Object.defineProperty(this, "convertSystemMessageToHumanContent", {
|
|
168
|
+
enumerable: true,
|
|
169
|
+
configurable: true,
|
|
170
|
+
writable: true,
|
|
171
|
+
value: void 0
|
|
172
|
+
});
|
|
102
173
|
Object.defineProperty(this, "safetyHandler", {
|
|
103
174
|
enumerable: true,
|
|
104
175
|
configurable: true,
|
package/dist/connection.cjs
CHANGED
|
@@ -28,21 +28,26 @@ class GoogleConnection {
|
|
|
28
28
|
this.streaming = streaming ?? false;
|
|
29
29
|
}
|
|
30
30
|
async _clientInfoHeaders() {
|
|
31
|
-
const clientLibraryVersion = await this.
|
|
31
|
+
const { userAgent, clientLibraryVersion } = await this._getClientInfo();
|
|
32
32
|
return {
|
|
33
|
-
"User-Agent":
|
|
33
|
+
"User-Agent": userAgent,
|
|
34
|
+
"Client-Info": clientLibraryVersion,
|
|
34
35
|
};
|
|
35
36
|
}
|
|
36
|
-
async
|
|
37
|
+
async _getClientInfo() {
|
|
37
38
|
const env = await (0, env_1.getRuntimeEnvironment)();
|
|
38
39
|
const langchain = env?.library ?? "langchain-js";
|
|
39
|
-
|
|
40
|
+
// TODO: Add an API for getting the current LangChain version
|
|
41
|
+
const langchainVersion = "0";
|
|
40
42
|
const moduleName = await this._moduleName();
|
|
41
|
-
let
|
|
43
|
+
let clientLibraryVersion = `${langchain}/${langchainVersion}`;
|
|
42
44
|
if (moduleName && moduleName.length) {
|
|
43
|
-
|
|
45
|
+
clientLibraryVersion = `${clientLibraryVersion}-${moduleName}`;
|
|
44
46
|
}
|
|
45
|
-
return
|
|
47
|
+
return {
|
|
48
|
+
userAgent: clientLibraryVersion,
|
|
49
|
+
clientLibraryVersion: `${langchainVersion}-${moduleName}`,
|
|
50
|
+
};
|
|
46
51
|
}
|
|
47
52
|
async _moduleName() {
|
|
48
53
|
return this.constructor.name;
|
|
@@ -208,11 +213,15 @@ class AbstractGoogleLLMConnection extends GoogleAIConnection {
|
|
|
208
213
|
topP: parameters.topP,
|
|
209
214
|
maxOutputTokens: parameters.maxOutputTokens,
|
|
210
215
|
stopSequences: parameters.stopSequences,
|
|
216
|
+
responseMimeType: parameters.responseMimeType,
|
|
211
217
|
};
|
|
212
218
|
}
|
|
213
219
|
formatSafetySettings(_input, parameters) {
|
|
214
220
|
return parameters.safetySettings ?? [];
|
|
215
221
|
}
|
|
222
|
+
formatSystemInstruction(_input, _parameters) {
|
|
223
|
+
return {};
|
|
224
|
+
}
|
|
216
225
|
// Borrowed from the OpenAI invocation params test
|
|
217
226
|
isStructuredToolArray(tools) {
|
|
218
227
|
return (tools !== undefined &&
|
|
@@ -253,6 +262,7 @@ class AbstractGoogleLLMConnection extends GoogleAIConnection {
|
|
|
253
262
|
const generationConfig = this.formatGenerationConfig(input, parameters);
|
|
254
263
|
const tools = this.formatTools(input, parameters);
|
|
255
264
|
const safetySettings = this.formatSafetySettings(input, parameters);
|
|
265
|
+
const systemInstruction = this.formatSystemInstruction(input, parameters);
|
|
256
266
|
const ret = {
|
|
257
267
|
contents,
|
|
258
268
|
generationConfig,
|
|
@@ -263,6 +273,11 @@ class AbstractGoogleLLMConnection extends GoogleAIConnection {
|
|
|
263
273
|
if (safetySettings && safetySettings.length) {
|
|
264
274
|
ret.safetySettings = safetySettings;
|
|
265
275
|
}
|
|
276
|
+
if (systemInstruction?.role &&
|
|
277
|
+
systemInstruction?.parts &&
|
|
278
|
+
systemInstruction?.parts?.length) {
|
|
279
|
+
ret.systemInstruction = systemInstruction;
|
|
280
|
+
}
|
|
266
281
|
return ret;
|
|
267
282
|
}
|
|
268
283
|
}
|
package/dist/connection.d.ts
CHANGED
|
@@ -11,7 +11,10 @@ export declare abstract class GoogleConnection<CallOptions extends AsyncCallerCa
|
|
|
11
11
|
abstract buildUrl(): Promise<string>;
|
|
12
12
|
abstract buildMethod(): GoogleAbstractedClientOpsMethod;
|
|
13
13
|
_clientInfoHeaders(): Promise<Record<string, string>>;
|
|
14
|
-
|
|
14
|
+
_getClientInfo(): Promise<{
|
|
15
|
+
userAgent: string;
|
|
16
|
+
clientLibraryVersion: string;
|
|
17
|
+
}>;
|
|
15
18
|
_moduleName(): Promise<string>;
|
|
16
19
|
_request(data: unknown | undefined, options: CallOptions): Promise<ResponseType>;
|
|
17
20
|
}
|
|
@@ -45,6 +48,7 @@ export declare abstract class AbstractGoogleLLMConnection<MessageType, AuthOptio
|
|
|
45
48
|
abstract formatContents(input: MessageType, parameters: GoogleAIModelRequestParams): GeminiContent[];
|
|
46
49
|
formatGenerationConfig(_input: MessageType, parameters: GoogleAIModelRequestParams): GeminiGenerationConfig;
|
|
47
50
|
formatSafetySettings(_input: MessageType, parameters: GoogleAIModelRequestParams): GeminiSafetySetting[];
|
|
51
|
+
formatSystemInstruction(_input: MessageType, _parameters: GoogleAIModelRequestParams): GeminiContent;
|
|
48
52
|
isStructuredToolArray(tools?: unknown[]): tools is StructuredToolInterface[];
|
|
49
53
|
structuredToolToFunctionDeclaration(tool: StructuredToolInterface): GeminiFunctionDeclaration;
|
|
50
54
|
structuredToolsToGeminiTools(tools: StructuredToolInterface[]): GeminiTool[];
|
package/dist/connection.js
CHANGED
|
@@ -25,21 +25,26 @@ export class GoogleConnection {
|
|
|
25
25
|
this.streaming = streaming ?? false;
|
|
26
26
|
}
|
|
27
27
|
async _clientInfoHeaders() {
|
|
28
|
-
const clientLibraryVersion = await this.
|
|
28
|
+
const { userAgent, clientLibraryVersion } = await this._getClientInfo();
|
|
29
29
|
return {
|
|
30
|
-
"User-Agent":
|
|
30
|
+
"User-Agent": userAgent,
|
|
31
|
+
"Client-Info": clientLibraryVersion,
|
|
31
32
|
};
|
|
32
33
|
}
|
|
33
|
-
async
|
|
34
|
+
async _getClientInfo() {
|
|
34
35
|
const env = await getRuntimeEnvironment();
|
|
35
36
|
const langchain = env?.library ?? "langchain-js";
|
|
36
|
-
|
|
37
|
+
// TODO: Add an API for getting the current LangChain version
|
|
38
|
+
const langchainVersion = "0";
|
|
37
39
|
const moduleName = await this._moduleName();
|
|
38
|
-
let
|
|
40
|
+
let clientLibraryVersion = `${langchain}/${langchainVersion}`;
|
|
39
41
|
if (moduleName && moduleName.length) {
|
|
40
|
-
|
|
42
|
+
clientLibraryVersion = `${clientLibraryVersion}-${moduleName}`;
|
|
41
43
|
}
|
|
42
|
-
return
|
|
44
|
+
return {
|
|
45
|
+
userAgent: clientLibraryVersion,
|
|
46
|
+
clientLibraryVersion: `${langchainVersion}-${moduleName}`,
|
|
47
|
+
};
|
|
43
48
|
}
|
|
44
49
|
async _moduleName() {
|
|
45
50
|
return this.constructor.name;
|
|
@@ -202,11 +207,15 @@ export class AbstractGoogleLLMConnection extends GoogleAIConnection {
|
|
|
202
207
|
topP: parameters.topP,
|
|
203
208
|
maxOutputTokens: parameters.maxOutputTokens,
|
|
204
209
|
stopSequences: parameters.stopSequences,
|
|
210
|
+
responseMimeType: parameters.responseMimeType,
|
|
205
211
|
};
|
|
206
212
|
}
|
|
207
213
|
formatSafetySettings(_input, parameters) {
|
|
208
214
|
return parameters.safetySettings ?? [];
|
|
209
215
|
}
|
|
216
|
+
formatSystemInstruction(_input, _parameters) {
|
|
217
|
+
return {};
|
|
218
|
+
}
|
|
210
219
|
// Borrowed from the OpenAI invocation params test
|
|
211
220
|
isStructuredToolArray(tools) {
|
|
212
221
|
return (tools !== undefined &&
|
|
@@ -247,6 +256,7 @@ export class AbstractGoogleLLMConnection extends GoogleAIConnection {
|
|
|
247
256
|
const generationConfig = this.formatGenerationConfig(input, parameters);
|
|
248
257
|
const tools = this.formatTools(input, parameters);
|
|
249
258
|
const safetySettings = this.formatSafetySettings(input, parameters);
|
|
259
|
+
const systemInstruction = this.formatSystemInstruction(input, parameters);
|
|
250
260
|
const ret = {
|
|
251
261
|
contents,
|
|
252
262
|
generationConfig,
|
|
@@ -257,6 +267,11 @@ export class AbstractGoogleLLMConnection extends GoogleAIConnection {
|
|
|
257
267
|
if (safetySettings && safetySettings.length) {
|
|
258
268
|
ret.safetySettings = safetySettings;
|
|
259
269
|
}
|
|
270
|
+
if (systemInstruction?.role &&
|
|
271
|
+
systemInstruction?.parts &&
|
|
272
|
+
systemInstruction?.parts?.length) {
|
|
273
|
+
ret.systemInstruction = systemInstruction;
|
|
274
|
+
}
|
|
260
275
|
return ret;
|
|
261
276
|
}
|
|
262
277
|
}
|
package/dist/llms.cjs
CHANGED
|
@@ -107,6 +107,12 @@ class GoogleBaseLLM extends llms_1.LLM {
|
|
|
107
107
|
writable: true,
|
|
108
108
|
value: void 0
|
|
109
109
|
});
|
|
110
|
+
Object.defineProperty(this, "responseMimeType", {
|
|
111
|
+
enumerable: true,
|
|
112
|
+
configurable: true,
|
|
113
|
+
writable: true,
|
|
114
|
+
value: "text/plain"
|
|
115
|
+
});
|
|
110
116
|
Object.defineProperty(this, "connection", {
|
|
111
117
|
enumerable: true,
|
|
112
118
|
configurable: true,
|
package/dist/llms.d.ts
CHANGED
|
@@ -3,7 +3,7 @@ import { LLM } from "@langchain/core/language_models/llms";
|
|
|
3
3
|
import { type BaseLanguageModelCallOptions, BaseLanguageModelInput } from "@langchain/core/language_models/base";
|
|
4
4
|
import { BaseMessage, MessageContent } from "@langchain/core/messages";
|
|
5
5
|
import { AbstractGoogleLLMConnection } from "./connection.js";
|
|
6
|
-
import { GoogleAIBaseLLMInput, GoogleAIModelParams, GoogleAISafetySetting, GooglePlatformType, GeminiContent } from "./types.js";
|
|
6
|
+
import { GoogleAIBaseLLMInput, GoogleAIModelParams, GoogleAISafetySetting, GooglePlatformType, GeminiContent, GoogleAIResponseMimeType } from "./types.js";
|
|
7
7
|
import { GoogleAbstractedClient } from "./auth.js";
|
|
8
8
|
import { ChatGoogleBase } from "./chat_models.js";
|
|
9
9
|
import type { GoogleBaseLLMInput, GoogleAISafetyHandler } from "./types.js";
|
|
@@ -27,6 +27,7 @@ export declare abstract class GoogleBaseLLM<AuthOptions> extends LLM<BaseLanguag
|
|
|
27
27
|
stopSequences: string[];
|
|
28
28
|
safetySettings: GoogleAISafetySetting[];
|
|
29
29
|
safetyHandler: GoogleAISafetyHandler;
|
|
30
|
+
responseMimeType: GoogleAIResponseMimeType;
|
|
30
31
|
protected connection: GoogleLLMConnection<AuthOptions>;
|
|
31
32
|
protected streamedConnection: GoogleLLMConnection<AuthOptions>;
|
|
32
33
|
constructor(fields?: GoogleBaseLLMInput<AuthOptions>);
|
package/dist/llms.js
CHANGED
|
@@ -104,6 +104,12 @@ export class GoogleBaseLLM extends LLM {
|
|
|
104
104
|
writable: true,
|
|
105
105
|
value: void 0
|
|
106
106
|
});
|
|
107
|
+
Object.defineProperty(this, "responseMimeType", {
|
|
108
|
+
enumerable: true,
|
|
109
|
+
configurable: true,
|
|
110
|
+
writable: true,
|
|
111
|
+
value: "text/plain"
|
|
112
|
+
});
|
|
107
113
|
Object.defineProperty(this, "connection", {
|
|
108
114
|
enumerable: true,
|
|
109
115
|
configurable: true,
|
package/dist/types.d.ts
CHANGED
|
@@ -37,6 +37,7 @@ export interface GoogleAISafetySetting {
|
|
|
37
37
|
category: string;
|
|
38
38
|
threshold: string;
|
|
39
39
|
}
|
|
40
|
+
export type GoogleAIResponseMimeType = "text/plain" | "application/json";
|
|
40
41
|
export interface GoogleAIModelParams {
|
|
41
42
|
/** Model to use */
|
|
42
43
|
model?: string;
|
|
@@ -73,6 +74,17 @@ export interface GoogleAIModelParams {
|
|
|
73
74
|
topK?: number;
|
|
74
75
|
stopSequences?: string[];
|
|
75
76
|
safetySettings?: GoogleAISafetySetting[];
|
|
77
|
+
convertSystemMessageToHumanContent?: boolean;
|
|
78
|
+
/**
|
|
79
|
+
* Available for `gemini-1.5-pro`.
|
|
80
|
+
* The output format of the generated candidate text.
|
|
81
|
+
* Supported MIME types:
|
|
82
|
+
* - `text/plain`: Text output.
|
|
83
|
+
* - `application/json`: JSON response in the candidates.
|
|
84
|
+
*
|
|
85
|
+
* @default "text/plain"
|
|
86
|
+
*/
|
|
87
|
+
responseMimeType?: GoogleAIResponseMimeType;
|
|
76
88
|
}
|
|
77
89
|
/**
|
|
78
90
|
* The params which can be passed to the API at request time.
|
|
@@ -128,7 +140,7 @@ export type GeminiSafetyRating = {
|
|
|
128
140
|
category: string;
|
|
129
141
|
probability: string;
|
|
130
142
|
} & Record<string, unknown>;
|
|
131
|
-
export type GeminiRole = "user" | "model" | "function";
|
|
143
|
+
export type GeminiRole = "system" | "user" | "model" | "function";
|
|
132
144
|
export interface GeminiContent {
|
|
133
145
|
parts: GeminiPart[];
|
|
134
146
|
role: GeminiRole;
|
|
@@ -159,9 +171,11 @@ export interface GeminiGenerationConfig {
|
|
|
159
171
|
temperature?: number;
|
|
160
172
|
topP?: number;
|
|
161
173
|
topK?: number;
|
|
174
|
+
responseMimeType?: GoogleAIResponseMimeType;
|
|
162
175
|
}
|
|
163
176
|
export interface GeminiRequest {
|
|
164
177
|
contents?: GeminiContent[];
|
|
178
|
+
systemInstruction?: GeminiContent;
|
|
165
179
|
tools?: GeminiTool[];
|
|
166
180
|
safetySettings?: GeminiSafetySetting[];
|
|
167
181
|
generationConfig?: GeminiGenerationConfig;
|
|
@@ -202,4 +216,12 @@ export interface GoogleAISafetyHandler {
|
|
|
202
216
|
export interface GoogleAISafetyParams {
|
|
203
217
|
safetyHandler?: GoogleAISafetyHandler;
|
|
204
218
|
}
|
|
219
|
+
export type GeminiJsonSchema = Record<string, unknown> & {
|
|
220
|
+
properties?: Record<string, GeminiJsonSchema>;
|
|
221
|
+
type: GeminiFunctionSchemaType;
|
|
222
|
+
};
|
|
223
|
+
export interface GeminiJsonSchemaDirty extends GeminiJsonSchema {
|
|
224
|
+
properties?: Record<string, GeminiJsonSchemaDirty>;
|
|
225
|
+
additionalProperties?: boolean;
|
|
226
|
+
}
|
|
205
227
|
export {};
|
package/dist/utils/common.cjs
CHANGED
|
@@ -24,6 +24,14 @@ function copyAIModelParamsInto(params, options, target) {
|
|
|
24
24
|
options?.stopSequences ?? params?.stopSequences ?? target.stopSequences;
|
|
25
25
|
ret.safetySettings =
|
|
26
26
|
options?.safetySettings ?? params?.safetySettings ?? target.safetySettings;
|
|
27
|
+
ret.convertSystemMessageToHumanContent =
|
|
28
|
+
options?.convertSystemMessageToHumanContent ??
|
|
29
|
+
params?.convertSystemMessageToHumanContent ??
|
|
30
|
+
target?.convertSystemMessageToHumanContent;
|
|
31
|
+
ret.responseMimeType =
|
|
32
|
+
options?.responseMimeType ??
|
|
33
|
+
params?.responseMimeType ??
|
|
34
|
+
target?.responseMimeType;
|
|
27
35
|
ret.tools = options?.tools;
|
|
28
36
|
// Ensure tools are formatted properly for Gemini
|
|
29
37
|
const geminiTools = options?.tools
|
package/dist/utils/common.js
CHANGED
|
@@ -20,6 +20,14 @@ export function copyAIModelParamsInto(params, options, target) {
|
|
|
20
20
|
options?.stopSequences ?? params?.stopSequences ?? target.stopSequences;
|
|
21
21
|
ret.safetySettings =
|
|
22
22
|
options?.safetySettings ?? params?.safetySettings ?? target.safetySettings;
|
|
23
|
+
ret.convertSystemMessageToHumanContent =
|
|
24
|
+
options?.convertSystemMessageToHumanContent ??
|
|
25
|
+
params?.convertSystemMessageToHumanContent ??
|
|
26
|
+
target?.convertSystemMessageToHumanContent;
|
|
27
|
+
ret.responseMimeType =
|
|
28
|
+
options?.responseMimeType ??
|
|
29
|
+
params?.responseMimeType ??
|
|
30
|
+
target?.responseMimeType;
|
|
23
31
|
ret.tools = options?.tools;
|
|
24
32
|
// Ensure tools are formatted properly for Gemini
|
|
25
33
|
const geminiTools = options?.tools
|
package/dist/utils/gemini.cjs
CHANGED
|
@@ -158,11 +158,13 @@ function roleMessageToContent(role, message) {
|
|
|
158
158
|
},
|
|
159
159
|
];
|
|
160
160
|
}
|
|
161
|
-
function systemMessageToContent(message) {
|
|
162
|
-
return
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
|
|
161
|
+
function systemMessageToContent(message, useSystemInstruction) {
|
|
162
|
+
return useSystemInstruction
|
|
163
|
+
? roleMessageToContent("system", message)
|
|
164
|
+
: [
|
|
165
|
+
...roleMessageToContent("user", message),
|
|
166
|
+
...roleMessageToContent("model", new messages_1.AIMessage("Ok")),
|
|
167
|
+
];
|
|
166
168
|
}
|
|
167
169
|
function toolMessageToContent(message, prevMessage) {
|
|
168
170
|
const contentStr = typeof message.content === "string"
|
|
@@ -211,11 +213,11 @@ function toolMessageToContent(message, prevMessage) {
|
|
|
211
213
|
];
|
|
212
214
|
}
|
|
213
215
|
}
|
|
214
|
-
function baseMessageToContent(message, prevMessage) {
|
|
216
|
+
function baseMessageToContent(message, prevMessage, useSystemInstruction) {
|
|
215
217
|
const type = message._getType();
|
|
216
218
|
switch (type) {
|
|
217
219
|
case "system":
|
|
218
|
-
return systemMessageToContent(message);
|
|
220
|
+
return systemMessageToContent(message, useSystemInstruction);
|
|
219
221
|
case "human":
|
|
220
222
|
return roleMessageToContent("user", message);
|
|
221
223
|
case "ai":
|
package/dist/utils/gemini.d.ts
CHANGED
|
@@ -2,7 +2,7 @@ import { AIMessageFields, BaseMessage, BaseMessageChunk, BaseMessageFields, Mess
|
|
|
2
2
|
import { ChatGeneration, ChatGenerationChunk, ChatResult, Generation } from "@langchain/core/outputs";
|
|
3
3
|
import type { GoogleLLMResponse, GoogleAIModelParams, GeminiPart, GeminiContent, GenerateContentResponseData, GoogleAISafetyHandler } from "../types.js";
|
|
4
4
|
export declare function messageContentToParts(content: MessageContent): GeminiPart[];
|
|
5
|
-
export declare function baseMessageToContent(message: BaseMessage, prevMessage
|
|
5
|
+
export declare function baseMessageToContent(message: BaseMessage, prevMessage: BaseMessage | undefined, useSystemInstruction: boolean): GeminiContent[];
|
|
6
6
|
export declare function partsToMessageContent(parts: GeminiPart[]): MessageContent;
|
|
7
7
|
interface FunctionCall {
|
|
8
8
|
name: string;
|
package/dist/utils/gemini.js
CHANGED
|
@@ -154,11 +154,13 @@ function roleMessageToContent(role, message) {
|
|
|
154
154
|
},
|
|
155
155
|
];
|
|
156
156
|
}
|
|
157
|
-
function systemMessageToContent(message) {
|
|
158
|
-
return
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
|
|
157
|
+
function systemMessageToContent(message, useSystemInstruction) {
|
|
158
|
+
return useSystemInstruction
|
|
159
|
+
? roleMessageToContent("system", message)
|
|
160
|
+
: [
|
|
161
|
+
...roleMessageToContent("user", message),
|
|
162
|
+
...roleMessageToContent("model", new AIMessage("Ok")),
|
|
163
|
+
];
|
|
162
164
|
}
|
|
163
165
|
function toolMessageToContent(message, prevMessage) {
|
|
164
166
|
const contentStr = typeof message.content === "string"
|
|
@@ -207,11 +209,11 @@ function toolMessageToContent(message, prevMessage) {
|
|
|
207
209
|
];
|
|
208
210
|
}
|
|
209
211
|
}
|
|
210
|
-
export function baseMessageToContent(message, prevMessage) {
|
|
212
|
+
export function baseMessageToContent(message, prevMessage, useSystemInstruction) {
|
|
211
213
|
const type = message._getType();
|
|
212
214
|
switch (type) {
|
|
213
215
|
case "system":
|
|
214
|
-
return systemMessageToContent(message);
|
|
216
|
+
return systemMessageToContent(message, useSystemInstruction);
|
|
215
217
|
case "human":
|
|
216
218
|
return roleMessageToContent("user", message);
|
|
217
219
|
case "ai":
|
|
@@ -3,14 +3,35 @@
|
|
|
3
3
|
Object.defineProperty(exports, "__esModule", { value: true });
|
|
4
4
|
exports.zodToGeminiParameters = void 0;
|
|
5
5
|
const zod_to_json_schema_1 = require("zod-to-json-schema");
|
|
6
|
+
function removeAdditionalProperties(schema) {
|
|
7
|
+
const updatedSchema = { ...schema };
|
|
8
|
+
if (Object.hasOwn(updatedSchema, "additionalProperties")) {
|
|
9
|
+
delete updatedSchema.additionalProperties;
|
|
10
|
+
}
|
|
11
|
+
if (updatedSchema.properties) {
|
|
12
|
+
const keys = Object.keys(updatedSchema.properties);
|
|
13
|
+
removeProperties(updatedSchema.properties, keys, 0);
|
|
14
|
+
}
|
|
15
|
+
return updatedSchema;
|
|
16
|
+
}
|
|
17
|
+
function removeProperties(properties, keys, index) {
|
|
18
|
+
if (index >= keys.length) {
|
|
19
|
+
return;
|
|
20
|
+
}
|
|
21
|
+
const key = keys[index];
|
|
22
|
+
// eslint-disable-next-line no-param-reassign
|
|
23
|
+
properties[key] = removeAdditionalProperties(properties[key]);
|
|
24
|
+
removeProperties(properties, keys, index + 1);
|
|
25
|
+
}
|
|
6
26
|
function zodToGeminiParameters(
|
|
7
27
|
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
8
28
|
zodObj) {
|
|
9
29
|
// Gemini doesn't accept either the $schema or additionalProperties
|
|
10
30
|
// attributes, so we need to explicitly remove them.
|
|
11
31
|
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
12
|
-
const jsonSchema =
|
|
13
|
-
const
|
|
32
|
+
// const jsonSchema = zodToJsonSchema(zodObj) as any;
|
|
33
|
+
const jsonSchema = removeAdditionalProperties((0, zod_to_json_schema_1.zodToJsonSchema)(zodObj));
|
|
34
|
+
const { $schema, ...rest } = jsonSchema;
|
|
14
35
|
return rest;
|
|
15
36
|
}
|
|
16
37
|
exports.zodToGeminiParameters = zodToGeminiParameters;
|
|
@@ -1,12 +1,33 @@
|
|
|
1
1
|
/* eslint-disable @typescript-eslint/no-unused-vars */
|
|
2
2
|
import { zodToJsonSchema } from "zod-to-json-schema";
|
|
3
|
+
function removeAdditionalProperties(schema) {
|
|
4
|
+
const updatedSchema = { ...schema };
|
|
5
|
+
if (Object.hasOwn(updatedSchema, "additionalProperties")) {
|
|
6
|
+
delete updatedSchema.additionalProperties;
|
|
7
|
+
}
|
|
8
|
+
if (updatedSchema.properties) {
|
|
9
|
+
const keys = Object.keys(updatedSchema.properties);
|
|
10
|
+
removeProperties(updatedSchema.properties, keys, 0);
|
|
11
|
+
}
|
|
12
|
+
return updatedSchema;
|
|
13
|
+
}
|
|
14
|
+
function removeProperties(properties, keys, index) {
|
|
15
|
+
if (index >= keys.length) {
|
|
16
|
+
return;
|
|
17
|
+
}
|
|
18
|
+
const key = keys[index];
|
|
19
|
+
// eslint-disable-next-line no-param-reassign
|
|
20
|
+
properties[key] = removeAdditionalProperties(properties[key]);
|
|
21
|
+
removeProperties(properties, keys, index + 1);
|
|
22
|
+
}
|
|
3
23
|
export function zodToGeminiParameters(
|
|
4
24
|
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
5
25
|
zodObj) {
|
|
6
26
|
// Gemini doesn't accept either the $schema or additionalProperties
|
|
7
27
|
// attributes, so we need to explicitly remove them.
|
|
8
28
|
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
9
|
-
const jsonSchema = zodToJsonSchema(zodObj);
|
|
10
|
-
const
|
|
29
|
+
// const jsonSchema = zodToJsonSchema(zodObj) as any;
|
|
30
|
+
const jsonSchema = removeAdditionalProperties(zodToJsonSchema(zodObj));
|
|
31
|
+
const { $schema, ...rest } = jsonSchema;
|
|
11
32
|
return rest;
|
|
12
33
|
}
|