@langchain/google-common 0.0.10 → 0.0.11
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/chat_models.cjs +74 -3
- package/dist/chat_models.d.ts +7 -0
- package/dist/chat_models.js +74 -3
- package/dist/connection.cjs +9 -0
- package/dist/connection.d.ts +1 -0
- package/dist/connection.js +9 -0
- package/dist/types.d.ts +3 -1
- package/dist/utils/common.cjs +4 -0
- package/dist/utils/common.js +4 -0
- package/dist/utils/gemini.cjs +9 -7
- package/dist/utils/gemini.d.ts +1 -1
- package/dist/utils/gemini.js +9 -7
- package/package.json +1 -1
package/dist/chat_models.cjs
CHANGED
|
@@ -15,10 +15,73 @@ const auth_js_1 = require("./auth.cjs");
|
|
|
15
15
|
const failed_handler_js_1 = require("./utils/failed_handler.cjs");
|
|
16
16
|
const zod_to_gemini_parameters_js_1 = require("./utils/zod_to_gemini_parameters.cjs");
|
|
17
17
|
class ChatConnection extends connection_js_1.AbstractGoogleLLMConnection {
|
|
18
|
+
constructor(fields, caller, client, streaming) {
|
|
19
|
+
super(fields, caller, client, streaming);
|
|
20
|
+
Object.defineProperty(this, "convertSystemMessageToHumanContent", {
|
|
21
|
+
enumerable: true,
|
|
22
|
+
configurable: true,
|
|
23
|
+
writable: true,
|
|
24
|
+
value: void 0
|
|
25
|
+
});
|
|
26
|
+
this.convertSystemMessageToHumanContent =
|
|
27
|
+
fields?.convertSystemMessageToHumanContent;
|
|
28
|
+
}
|
|
29
|
+
get useSystemInstruction() {
|
|
30
|
+
return typeof this.convertSystemMessageToHumanContent === "boolean"
|
|
31
|
+
? !this.convertSystemMessageToHumanContent
|
|
32
|
+
: this.computeUseSystemInstruction;
|
|
33
|
+
}
|
|
34
|
+
get computeUseSystemInstruction() {
|
|
35
|
+
// This works on models from April 2024 and later
|
|
36
|
+
// Vertex AI: gemini-1.5-pro and gemini-1.0-002 and later
|
|
37
|
+
// AI Studio: gemini-1.5-pro-latest
|
|
38
|
+
if (this.modelFamily === "palm") {
|
|
39
|
+
return false;
|
|
40
|
+
}
|
|
41
|
+
else if (this.modelName === "gemini-1.0-pro-001") {
|
|
42
|
+
return false;
|
|
43
|
+
}
|
|
44
|
+
else if (this.modelName.startsWith("gemini-pro-vision")) {
|
|
45
|
+
return false;
|
|
46
|
+
}
|
|
47
|
+
else if (this.modelName.startsWith("gemini-1.0-pro-vision")) {
|
|
48
|
+
return false;
|
|
49
|
+
}
|
|
50
|
+
else if (this.modelName === "gemini-pro" && this.platform === "gai") {
|
|
51
|
+
// on AI Studio gemini-pro is still pointing at gemini-1.0-pro-001
|
|
52
|
+
return false;
|
|
53
|
+
}
|
|
54
|
+
return true;
|
|
55
|
+
}
|
|
18
56
|
formatContents(input, _parameters) {
|
|
19
57
|
return input
|
|
20
|
-
.map((msg, i) => (0, gemini_js_1.baseMessageToContent)(msg, input[i - 1]))
|
|
21
|
-
.reduce((acc, cur) =>
|
|
58
|
+
.map((msg, i) => (0, gemini_js_1.baseMessageToContent)(msg, input[i - 1], this.useSystemInstruction))
|
|
59
|
+
.reduce((acc, cur) => {
|
|
60
|
+
// Filter out the system content, since those don't belong
|
|
61
|
+
// in the actual content.
|
|
62
|
+
const hasNoSystem = cur.every((content) => content.role !== "system");
|
|
63
|
+
return hasNoSystem ? [...acc, ...cur] : acc;
|
|
64
|
+
}, []);
|
|
65
|
+
}
|
|
66
|
+
formatSystemInstruction(input, _parameters) {
|
|
67
|
+
if (!this.useSystemInstruction) {
|
|
68
|
+
return {};
|
|
69
|
+
}
|
|
70
|
+
let ret = {};
|
|
71
|
+
input.forEach((message, index) => {
|
|
72
|
+
if (message._getType() === "system") {
|
|
73
|
+
// For system types, we only want it if it is the first message,
|
|
74
|
+
// if it appears anywhere else, it should be an error.
|
|
75
|
+
if (index === 0) {
|
|
76
|
+
// eslint-disable-next-line prefer-destructuring
|
|
77
|
+
ret = (0, gemini_js_1.baseMessageToContent)(message, undefined, true)[0];
|
|
78
|
+
}
|
|
79
|
+
else {
|
|
80
|
+
throw new Error("System messages are only permitted as the first passed message.");
|
|
81
|
+
}
|
|
82
|
+
}
|
|
83
|
+
});
|
|
84
|
+
return ret;
|
|
22
85
|
}
|
|
23
86
|
}
|
|
24
87
|
function convertToGeminiTools(structuredTools) {
|
|
@@ -54,11 +117,12 @@ class ChatGoogleBase extends chat_models_1.BaseChatModel {
|
|
|
54
117
|
writable: true,
|
|
55
118
|
value: true
|
|
56
119
|
});
|
|
120
|
+
// Set based on modelName
|
|
57
121
|
Object.defineProperty(this, "model", {
|
|
58
122
|
enumerable: true,
|
|
59
123
|
configurable: true,
|
|
60
124
|
writable: true,
|
|
61
|
-
value:
|
|
125
|
+
value: void 0
|
|
62
126
|
});
|
|
63
127
|
Object.defineProperty(this, "modelName", {
|
|
64
128
|
enumerable: true,
|
|
@@ -102,6 +166,13 @@ class ChatGoogleBase extends chat_models_1.BaseChatModel {
|
|
|
102
166
|
writable: true,
|
|
103
167
|
value: []
|
|
104
168
|
});
|
|
169
|
+
// May intentionally be undefined, meaning to compute this.
|
|
170
|
+
Object.defineProperty(this, "convertSystemMessageToHumanContent", {
|
|
171
|
+
enumerable: true,
|
|
172
|
+
configurable: true,
|
|
173
|
+
writable: true,
|
|
174
|
+
value: void 0
|
|
175
|
+
});
|
|
105
176
|
Object.defineProperty(this, "safetyHandler", {
|
|
106
177
|
enumerable: true,
|
|
107
178
|
configurable: true,
|
package/dist/chat_models.d.ts
CHANGED
|
@@ -6,13 +6,19 @@ import { AIMessageChunk } from "@langchain/core/messages";
|
|
|
6
6
|
import { BaseLanguageModelInput, StructuredOutputMethodOptions } from "@langchain/core/language_models/base";
|
|
7
7
|
import type { z } from "zod";
|
|
8
8
|
import { Runnable, RunnableInterface } from "@langchain/core/runnables";
|
|
9
|
+
import { AsyncCaller } from "@langchain/core/utils/async_caller";
|
|
9
10
|
import { StructuredToolInterface } from "@langchain/core/tools";
|
|
10
11
|
import { GoogleAIBaseLLMInput, GoogleAIModelParams, GoogleAISafetySetting, GoogleConnectionParams, GooglePlatformType, GeminiContent, GoogleAIBaseLanguageModelCallOptions } from "./types.js";
|
|
11
12
|
import { AbstractGoogleLLMConnection } from "./connection.js";
|
|
12
13
|
import { GoogleAbstractedClient } from "./auth.js";
|
|
13
14
|
import type { GoogleBaseLLMInput, GoogleAISafetyHandler, GoogleAISafetyParams } from "./types.js";
|
|
14
15
|
declare class ChatConnection<AuthOptions> extends AbstractGoogleLLMConnection<BaseMessage[], AuthOptions> {
|
|
16
|
+
convertSystemMessageToHumanContent: boolean | undefined;
|
|
17
|
+
constructor(fields: GoogleAIBaseLLMInput<AuthOptions> | undefined, caller: AsyncCaller, client: GoogleAbstractedClient, streaming: boolean);
|
|
18
|
+
get useSystemInstruction(): boolean;
|
|
19
|
+
get computeUseSystemInstruction(): boolean;
|
|
15
20
|
formatContents(input: BaseMessage[], _parameters: GoogleAIModelParams): GeminiContent[];
|
|
21
|
+
formatSystemInstruction(input: BaseMessage[], _parameters: GoogleAIModelParams): GeminiContent;
|
|
16
22
|
}
|
|
17
23
|
/**
|
|
18
24
|
* Input to chat model class.
|
|
@@ -33,6 +39,7 @@ export declare abstract class ChatGoogleBase<AuthOptions> extends BaseChatModel<
|
|
|
33
39
|
topK: number;
|
|
34
40
|
stopSequences: string[];
|
|
35
41
|
safetySettings: GoogleAISafetySetting[];
|
|
42
|
+
convertSystemMessageToHumanContent: boolean | undefined;
|
|
36
43
|
safetyHandler: GoogleAISafetyHandler;
|
|
37
44
|
protected connection: ChatConnection<AuthOptions>;
|
|
38
45
|
protected streamedConnection: ChatConnection<AuthOptions>;
|
package/dist/chat_models.js
CHANGED
|
@@ -12,10 +12,73 @@ import { ApiKeyGoogleAuth } from "./auth.js";
|
|
|
12
12
|
import { ensureParams } from "./utils/failed_handler.js";
|
|
13
13
|
import { zodToGeminiParameters } from "./utils/zod_to_gemini_parameters.js";
|
|
14
14
|
class ChatConnection extends AbstractGoogleLLMConnection {
|
|
15
|
+
constructor(fields, caller, client, streaming) {
|
|
16
|
+
super(fields, caller, client, streaming);
|
|
17
|
+
Object.defineProperty(this, "convertSystemMessageToHumanContent", {
|
|
18
|
+
enumerable: true,
|
|
19
|
+
configurable: true,
|
|
20
|
+
writable: true,
|
|
21
|
+
value: void 0
|
|
22
|
+
});
|
|
23
|
+
this.convertSystemMessageToHumanContent =
|
|
24
|
+
fields?.convertSystemMessageToHumanContent;
|
|
25
|
+
}
|
|
26
|
+
get useSystemInstruction() {
|
|
27
|
+
return typeof this.convertSystemMessageToHumanContent === "boolean"
|
|
28
|
+
? !this.convertSystemMessageToHumanContent
|
|
29
|
+
: this.computeUseSystemInstruction;
|
|
30
|
+
}
|
|
31
|
+
get computeUseSystemInstruction() {
|
|
32
|
+
// This works on models from April 2024 and later
|
|
33
|
+
// Vertex AI: gemini-1.5-pro and gemini-1.0-002 and later
|
|
34
|
+
// AI Studio: gemini-1.5-pro-latest
|
|
35
|
+
if (this.modelFamily === "palm") {
|
|
36
|
+
return false;
|
|
37
|
+
}
|
|
38
|
+
else if (this.modelName === "gemini-1.0-pro-001") {
|
|
39
|
+
return false;
|
|
40
|
+
}
|
|
41
|
+
else if (this.modelName.startsWith("gemini-pro-vision")) {
|
|
42
|
+
return false;
|
|
43
|
+
}
|
|
44
|
+
else if (this.modelName.startsWith("gemini-1.0-pro-vision")) {
|
|
45
|
+
return false;
|
|
46
|
+
}
|
|
47
|
+
else if (this.modelName === "gemini-pro" && this.platform === "gai") {
|
|
48
|
+
// on AI Studio gemini-pro is still pointing at gemini-1.0-pro-001
|
|
49
|
+
return false;
|
|
50
|
+
}
|
|
51
|
+
return true;
|
|
52
|
+
}
|
|
15
53
|
formatContents(input, _parameters) {
|
|
16
54
|
return input
|
|
17
|
-
.map((msg, i) => baseMessageToContent(msg, input[i - 1]))
|
|
18
|
-
.reduce((acc, cur) =>
|
|
55
|
+
.map((msg, i) => baseMessageToContent(msg, input[i - 1], this.useSystemInstruction))
|
|
56
|
+
.reduce((acc, cur) => {
|
|
57
|
+
// Filter out the system content, since those don't belong
|
|
58
|
+
// in the actual content.
|
|
59
|
+
const hasNoSystem = cur.every((content) => content.role !== "system");
|
|
60
|
+
return hasNoSystem ? [...acc, ...cur] : acc;
|
|
61
|
+
}, []);
|
|
62
|
+
}
|
|
63
|
+
formatSystemInstruction(input, _parameters) {
|
|
64
|
+
if (!this.useSystemInstruction) {
|
|
65
|
+
return {};
|
|
66
|
+
}
|
|
67
|
+
let ret = {};
|
|
68
|
+
input.forEach((message, index) => {
|
|
69
|
+
if (message._getType() === "system") {
|
|
70
|
+
// For system types, we only want it if it is the first message,
|
|
71
|
+
// if it appears anywhere else, it should be an error.
|
|
72
|
+
if (index === 0) {
|
|
73
|
+
// eslint-disable-next-line prefer-destructuring
|
|
74
|
+
ret = baseMessageToContent(message, undefined, true)[0];
|
|
75
|
+
}
|
|
76
|
+
else {
|
|
77
|
+
throw new Error("System messages are only permitted as the first passed message.");
|
|
78
|
+
}
|
|
79
|
+
}
|
|
80
|
+
});
|
|
81
|
+
return ret;
|
|
19
82
|
}
|
|
20
83
|
}
|
|
21
84
|
function convertToGeminiTools(structuredTools) {
|
|
@@ -51,11 +114,12 @@ export class ChatGoogleBase extends BaseChatModel {
|
|
|
51
114
|
writable: true,
|
|
52
115
|
value: true
|
|
53
116
|
});
|
|
117
|
+
// Set based on modelName
|
|
54
118
|
Object.defineProperty(this, "model", {
|
|
55
119
|
enumerable: true,
|
|
56
120
|
configurable: true,
|
|
57
121
|
writable: true,
|
|
58
|
-
value:
|
|
122
|
+
value: void 0
|
|
59
123
|
});
|
|
60
124
|
Object.defineProperty(this, "modelName", {
|
|
61
125
|
enumerable: true,
|
|
@@ -99,6 +163,13 @@ export class ChatGoogleBase extends BaseChatModel {
|
|
|
99
163
|
writable: true,
|
|
100
164
|
value: []
|
|
101
165
|
});
|
|
166
|
+
// May intentionally be undefined, meaning to compute this.
|
|
167
|
+
Object.defineProperty(this, "convertSystemMessageToHumanContent", {
|
|
168
|
+
enumerable: true,
|
|
169
|
+
configurable: true,
|
|
170
|
+
writable: true,
|
|
171
|
+
value: void 0
|
|
172
|
+
});
|
|
102
173
|
Object.defineProperty(this, "safetyHandler", {
|
|
103
174
|
enumerable: true,
|
|
104
175
|
configurable: true,
|
package/dist/connection.cjs
CHANGED
|
@@ -213,6 +213,9 @@ class AbstractGoogleLLMConnection extends GoogleAIConnection {
|
|
|
213
213
|
formatSafetySettings(_input, parameters) {
|
|
214
214
|
return parameters.safetySettings ?? [];
|
|
215
215
|
}
|
|
216
|
+
formatSystemInstruction(_input, _parameters) {
|
|
217
|
+
return {};
|
|
218
|
+
}
|
|
216
219
|
// Borrowed from the OpenAI invocation params test
|
|
217
220
|
isStructuredToolArray(tools) {
|
|
218
221
|
return (tools !== undefined &&
|
|
@@ -253,6 +256,7 @@ class AbstractGoogleLLMConnection extends GoogleAIConnection {
|
|
|
253
256
|
const generationConfig = this.formatGenerationConfig(input, parameters);
|
|
254
257
|
const tools = this.formatTools(input, parameters);
|
|
255
258
|
const safetySettings = this.formatSafetySettings(input, parameters);
|
|
259
|
+
const systemInstruction = this.formatSystemInstruction(input, parameters);
|
|
256
260
|
const ret = {
|
|
257
261
|
contents,
|
|
258
262
|
generationConfig,
|
|
@@ -263,6 +267,11 @@ class AbstractGoogleLLMConnection extends GoogleAIConnection {
|
|
|
263
267
|
if (safetySettings && safetySettings.length) {
|
|
264
268
|
ret.safetySettings = safetySettings;
|
|
265
269
|
}
|
|
270
|
+
if (systemInstruction?.role &&
|
|
271
|
+
systemInstruction?.parts &&
|
|
272
|
+
systemInstruction?.parts?.length) {
|
|
273
|
+
ret.systemInstruction = systemInstruction;
|
|
274
|
+
}
|
|
266
275
|
return ret;
|
|
267
276
|
}
|
|
268
277
|
}
|
package/dist/connection.d.ts
CHANGED
|
@@ -45,6 +45,7 @@ export declare abstract class AbstractGoogleLLMConnection<MessageType, AuthOptio
|
|
|
45
45
|
abstract formatContents(input: MessageType, parameters: GoogleAIModelRequestParams): GeminiContent[];
|
|
46
46
|
formatGenerationConfig(_input: MessageType, parameters: GoogleAIModelRequestParams): GeminiGenerationConfig;
|
|
47
47
|
formatSafetySettings(_input: MessageType, parameters: GoogleAIModelRequestParams): GeminiSafetySetting[];
|
|
48
|
+
formatSystemInstruction(_input: MessageType, _parameters: GoogleAIModelRequestParams): GeminiContent;
|
|
48
49
|
isStructuredToolArray(tools?: unknown[]): tools is StructuredToolInterface[];
|
|
49
50
|
structuredToolToFunctionDeclaration(tool: StructuredToolInterface): GeminiFunctionDeclaration;
|
|
50
51
|
structuredToolsToGeminiTools(tools: StructuredToolInterface[]): GeminiTool[];
|
package/dist/connection.js
CHANGED
|
@@ -207,6 +207,9 @@ export class AbstractGoogleLLMConnection extends GoogleAIConnection {
|
|
|
207
207
|
formatSafetySettings(_input, parameters) {
|
|
208
208
|
return parameters.safetySettings ?? [];
|
|
209
209
|
}
|
|
210
|
+
formatSystemInstruction(_input, _parameters) {
|
|
211
|
+
return {};
|
|
212
|
+
}
|
|
210
213
|
// Borrowed from the OpenAI invocation params test
|
|
211
214
|
isStructuredToolArray(tools) {
|
|
212
215
|
return (tools !== undefined &&
|
|
@@ -247,6 +250,7 @@ export class AbstractGoogleLLMConnection extends GoogleAIConnection {
|
|
|
247
250
|
const generationConfig = this.formatGenerationConfig(input, parameters);
|
|
248
251
|
const tools = this.formatTools(input, parameters);
|
|
249
252
|
const safetySettings = this.formatSafetySettings(input, parameters);
|
|
253
|
+
const systemInstruction = this.formatSystemInstruction(input, parameters);
|
|
250
254
|
const ret = {
|
|
251
255
|
contents,
|
|
252
256
|
generationConfig,
|
|
@@ -257,6 +261,11 @@ export class AbstractGoogleLLMConnection extends GoogleAIConnection {
|
|
|
257
261
|
if (safetySettings && safetySettings.length) {
|
|
258
262
|
ret.safetySettings = safetySettings;
|
|
259
263
|
}
|
|
264
|
+
if (systemInstruction?.role &&
|
|
265
|
+
systemInstruction?.parts &&
|
|
266
|
+
systemInstruction?.parts?.length) {
|
|
267
|
+
ret.systemInstruction = systemInstruction;
|
|
268
|
+
}
|
|
260
269
|
return ret;
|
|
261
270
|
}
|
|
262
271
|
}
|
package/dist/types.d.ts
CHANGED
|
@@ -73,6 +73,7 @@ export interface GoogleAIModelParams {
|
|
|
73
73
|
topK?: number;
|
|
74
74
|
stopSequences?: string[];
|
|
75
75
|
safetySettings?: GoogleAISafetySetting[];
|
|
76
|
+
convertSystemMessageToHumanContent?: boolean;
|
|
76
77
|
}
|
|
77
78
|
/**
|
|
78
79
|
* The params which can be passed to the API at request time.
|
|
@@ -128,7 +129,7 @@ export type GeminiSafetyRating = {
|
|
|
128
129
|
category: string;
|
|
129
130
|
probability: string;
|
|
130
131
|
} & Record<string, unknown>;
|
|
131
|
-
export type GeminiRole = "user" | "model" | "function";
|
|
132
|
+
export type GeminiRole = "system" | "user" | "model" | "function";
|
|
132
133
|
export interface GeminiContent {
|
|
133
134
|
parts: GeminiPart[];
|
|
134
135
|
role: GeminiRole;
|
|
@@ -162,6 +163,7 @@ export interface GeminiGenerationConfig {
|
|
|
162
163
|
}
|
|
163
164
|
export interface GeminiRequest {
|
|
164
165
|
contents?: GeminiContent[];
|
|
166
|
+
systemInstruction?: GeminiContent;
|
|
165
167
|
tools?: GeminiTool[];
|
|
166
168
|
safetySettings?: GeminiSafetySetting[];
|
|
167
169
|
generationConfig?: GeminiGenerationConfig;
|
package/dist/utils/common.cjs
CHANGED
|
@@ -24,6 +24,10 @@ function copyAIModelParamsInto(params, options, target) {
|
|
|
24
24
|
options?.stopSequences ?? params?.stopSequences ?? target.stopSequences;
|
|
25
25
|
ret.safetySettings =
|
|
26
26
|
options?.safetySettings ?? params?.safetySettings ?? target.safetySettings;
|
|
27
|
+
ret.convertSystemMessageToHumanContent =
|
|
28
|
+
options?.convertSystemMessageToHumanContent ??
|
|
29
|
+
params?.convertSystemMessageToHumanContent ??
|
|
30
|
+
target?.convertSystemMessageToHumanContent;
|
|
27
31
|
ret.tools = options?.tools;
|
|
28
32
|
// Ensure tools are formatted properly for Gemini
|
|
29
33
|
const geminiTools = options?.tools
|
package/dist/utils/common.js
CHANGED
|
@@ -20,6 +20,10 @@ export function copyAIModelParamsInto(params, options, target) {
|
|
|
20
20
|
options?.stopSequences ?? params?.stopSequences ?? target.stopSequences;
|
|
21
21
|
ret.safetySettings =
|
|
22
22
|
options?.safetySettings ?? params?.safetySettings ?? target.safetySettings;
|
|
23
|
+
ret.convertSystemMessageToHumanContent =
|
|
24
|
+
options?.convertSystemMessageToHumanContent ??
|
|
25
|
+
params?.convertSystemMessageToHumanContent ??
|
|
26
|
+
target?.convertSystemMessageToHumanContent;
|
|
23
27
|
ret.tools = options?.tools;
|
|
24
28
|
// Ensure tools are formatted properly for Gemini
|
|
25
29
|
const geminiTools = options?.tools
|
package/dist/utils/gemini.cjs
CHANGED
|
@@ -158,11 +158,13 @@ function roleMessageToContent(role, message) {
|
|
|
158
158
|
},
|
|
159
159
|
];
|
|
160
160
|
}
|
|
161
|
-
function systemMessageToContent(message) {
|
|
162
|
-
return
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
|
|
161
|
+
function systemMessageToContent(message, useSystemInstruction) {
|
|
162
|
+
return useSystemInstruction
|
|
163
|
+
? roleMessageToContent("system", message)
|
|
164
|
+
: [
|
|
165
|
+
...roleMessageToContent("user", message),
|
|
166
|
+
...roleMessageToContent("model", new messages_1.AIMessage("Ok")),
|
|
167
|
+
];
|
|
166
168
|
}
|
|
167
169
|
function toolMessageToContent(message, prevMessage) {
|
|
168
170
|
const contentStr = typeof message.content === "string"
|
|
@@ -211,11 +213,11 @@ function toolMessageToContent(message, prevMessage) {
|
|
|
211
213
|
];
|
|
212
214
|
}
|
|
213
215
|
}
|
|
214
|
-
function baseMessageToContent(message, prevMessage) {
|
|
216
|
+
function baseMessageToContent(message, prevMessage, useSystemInstruction) {
|
|
215
217
|
const type = message._getType();
|
|
216
218
|
switch (type) {
|
|
217
219
|
case "system":
|
|
218
|
-
return systemMessageToContent(message);
|
|
220
|
+
return systemMessageToContent(message, useSystemInstruction);
|
|
219
221
|
case "human":
|
|
220
222
|
return roleMessageToContent("user", message);
|
|
221
223
|
case "ai":
|
package/dist/utils/gemini.d.ts
CHANGED
|
@@ -2,7 +2,7 @@ import { AIMessageFields, BaseMessage, BaseMessageChunk, BaseMessageFields, Mess
|
|
|
2
2
|
import { ChatGeneration, ChatGenerationChunk, ChatResult, Generation } from "@langchain/core/outputs";
|
|
3
3
|
import type { GoogleLLMResponse, GoogleAIModelParams, GeminiPart, GeminiContent, GenerateContentResponseData, GoogleAISafetyHandler } from "../types.js";
|
|
4
4
|
export declare function messageContentToParts(content: MessageContent): GeminiPart[];
|
|
5
|
-
export declare function baseMessageToContent(message: BaseMessage, prevMessage
|
|
5
|
+
export declare function baseMessageToContent(message: BaseMessage, prevMessage: BaseMessage | undefined, useSystemInstruction: boolean): GeminiContent[];
|
|
6
6
|
export declare function partsToMessageContent(parts: GeminiPart[]): MessageContent;
|
|
7
7
|
interface FunctionCall {
|
|
8
8
|
name: string;
|
package/dist/utils/gemini.js
CHANGED
|
@@ -154,11 +154,13 @@ function roleMessageToContent(role, message) {
|
|
|
154
154
|
},
|
|
155
155
|
];
|
|
156
156
|
}
|
|
157
|
-
function systemMessageToContent(message) {
|
|
158
|
-
return
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
|
|
157
|
+
function systemMessageToContent(message, useSystemInstruction) {
|
|
158
|
+
return useSystemInstruction
|
|
159
|
+
? roleMessageToContent("system", message)
|
|
160
|
+
: [
|
|
161
|
+
...roleMessageToContent("user", message),
|
|
162
|
+
...roleMessageToContent("model", new AIMessage("Ok")),
|
|
163
|
+
];
|
|
162
164
|
}
|
|
163
165
|
function toolMessageToContent(message, prevMessage) {
|
|
164
166
|
const contentStr = typeof message.content === "string"
|
|
@@ -207,11 +209,11 @@ function toolMessageToContent(message, prevMessage) {
|
|
|
207
209
|
];
|
|
208
210
|
}
|
|
209
211
|
}
|
|
210
|
-
export function baseMessageToContent(message, prevMessage) {
|
|
212
|
+
export function baseMessageToContent(message, prevMessage, useSystemInstruction) {
|
|
211
213
|
const type = message._getType();
|
|
212
214
|
switch (type) {
|
|
213
215
|
case "system":
|
|
214
|
-
return systemMessageToContent(message);
|
|
216
|
+
return systemMessageToContent(message, useSystemInstruction);
|
|
215
217
|
case "human":
|
|
216
218
|
return roleMessageToContent("user", message);
|
|
217
219
|
case "ai":
|