@langchain/google-genai 0.0.17 → 0.0.19
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/chat_models.cjs +72 -23
- package/dist/chat_models.d.ts +10 -2
- package/dist/chat_models.js +72 -23
- package/dist/utils/common.cjs +65 -8
- package/dist/utils/common.d.ts +11 -6
- package/dist/utils/common.js +65 -8
- package/dist/utils/zod_to_genai_parameters.cjs +23 -19
- package/dist/utils/zod_to_genai_parameters.d.ts +1 -0
- package/dist/utils/zod_to_genai_parameters.js +21 -18
- package/package.json +3 -3
package/dist/chat_models.cjs
CHANGED
|
@@ -118,6 +118,12 @@ class ChatGoogleGenerativeAI extends chat_models_1.BaseChatModel {
|
|
|
118
118
|
writable: true,
|
|
119
119
|
value: false
|
|
120
120
|
});
|
|
121
|
+
Object.defineProperty(this, "streamUsage", {
|
|
122
|
+
enumerable: true,
|
|
123
|
+
configurable: true,
|
|
124
|
+
writable: true,
|
|
125
|
+
value: true
|
|
126
|
+
});
|
|
121
127
|
Object.defineProperty(this, "client", {
|
|
122
128
|
enumerable: true,
|
|
123
129
|
configurable: true,
|
|
@@ -179,6 +185,7 @@ class ChatGoogleGenerativeAI extends chat_models_1.BaseChatModel {
|
|
|
179
185
|
apiVersion: fields?.apiVersion,
|
|
180
186
|
baseUrl: fields?.baseUrl,
|
|
181
187
|
});
|
|
188
|
+
this.streamUsage = fields?.streamUsage ?? this.streamUsage;
|
|
182
189
|
}
|
|
183
190
|
getLsParams(options) {
|
|
184
191
|
return {
|
|
@@ -236,40 +243,67 @@ class ChatGoogleGenerativeAI extends chat_models_1.BaseChatModel {
|
|
|
236
243
|
.map(([_, value]) => value);
|
|
237
244
|
return { generations, llmOutput: { estimatedTokenUsage: tokenUsage } };
|
|
238
245
|
}
|
|
239
|
-
const res = await this.
|
|
240
|
-
|
|
241
|
-
|
|
242
|
-
|
|
243
|
-
|
|
244
|
-
|
|
245
|
-
|
|
246
|
-
|
|
247
|
-
|
|
248
|
-
|
|
249
|
-
|
|
250
|
-
|
|
251
|
-
|
|
252
|
-
|
|
253
|
-
|
|
254
|
-
}
|
|
255
|
-
return output;
|
|
246
|
+
const res = await this.completionWithRetry({
|
|
247
|
+
...parameters,
|
|
248
|
+
contents: prompt,
|
|
249
|
+
});
|
|
250
|
+
let usageMetadata;
|
|
251
|
+
if ("usageMetadata" in res.response) {
|
|
252
|
+
const genAIUsageMetadata = res.response.usageMetadata;
|
|
253
|
+
usageMetadata = {
|
|
254
|
+
input_tokens: genAIUsageMetadata.promptTokenCount ?? 0,
|
|
255
|
+
output_tokens: genAIUsageMetadata.candidatesTokenCount ?? 0,
|
|
256
|
+
total_tokens: genAIUsageMetadata.totalTokenCount ?? 0,
|
|
257
|
+
};
|
|
258
|
+
}
|
|
259
|
+
const generationResult = (0, common_js_1.mapGenerateContentResultToChatResult)(res.response, {
|
|
260
|
+
usageMetadata,
|
|
256
261
|
});
|
|
257
|
-
const generationResult = (0, common_js_1.mapGenerateContentResultToChatResult)(res.response);
|
|
258
262
|
await runManager?.handleLLMNewToken(generationResult.generations[0].text ?? "");
|
|
259
263
|
return generationResult;
|
|
260
264
|
}
|
|
261
265
|
async *_streamResponseChunks(messages, options, runManager) {
|
|
262
266
|
const prompt = (0, common_js_1.convertBaseMessagesToContent)(messages, this._isMultimodalModel);
|
|
263
267
|
const parameters = this.invocationParams(options);
|
|
268
|
+
const request = {
|
|
269
|
+
...parameters,
|
|
270
|
+
contents: prompt,
|
|
271
|
+
};
|
|
264
272
|
const stream = await this.caller.callWithOptions({ signal: options?.signal }, async () => {
|
|
265
|
-
const { stream } = await this.client.generateContentStream(
|
|
266
|
-
...parameters,
|
|
267
|
-
contents: prompt,
|
|
268
|
-
});
|
|
273
|
+
const { stream } = await this.client.generateContentStream(request);
|
|
269
274
|
return stream;
|
|
270
275
|
});
|
|
276
|
+
let usageMetadata;
|
|
277
|
+
let index = 0;
|
|
271
278
|
for await (const response of stream) {
|
|
272
|
-
|
|
279
|
+
if ("usageMetadata" in response &&
|
|
280
|
+
this.streamUsage !== false &&
|
|
281
|
+
options.streamUsage !== false) {
|
|
282
|
+
const genAIUsageMetadata = response.usageMetadata;
|
|
283
|
+
if (!usageMetadata) {
|
|
284
|
+
usageMetadata = {
|
|
285
|
+
input_tokens: genAIUsageMetadata.promptTokenCount,
|
|
286
|
+
output_tokens: genAIUsageMetadata.candidatesTokenCount,
|
|
287
|
+
total_tokens: genAIUsageMetadata.totalTokenCount,
|
|
288
|
+
};
|
|
289
|
+
}
|
|
290
|
+
else {
|
|
291
|
+
// Under the hood, LangChain combines the prompt tokens. Google returns the updated
|
|
292
|
+
// total each time, so we need to find the difference between the tokens.
|
|
293
|
+
const outputTokenDiff = genAIUsageMetadata.candidatesTokenCount -
|
|
294
|
+
usageMetadata.output_tokens;
|
|
295
|
+
usageMetadata = {
|
|
296
|
+
input_tokens: 0,
|
|
297
|
+
output_tokens: outputTokenDiff,
|
|
298
|
+
total_tokens: outputTokenDiff,
|
|
299
|
+
};
|
|
300
|
+
}
|
|
301
|
+
}
|
|
302
|
+
const chunk = (0, common_js_1.convertResponseContentToChatGenerationChunk)(response, {
|
|
303
|
+
usageMetadata,
|
|
304
|
+
index,
|
|
305
|
+
});
|
|
306
|
+
index += 1;
|
|
273
307
|
if (!chunk) {
|
|
274
308
|
continue;
|
|
275
309
|
}
|
|
@@ -277,6 +311,21 @@ class ChatGoogleGenerativeAI extends chat_models_1.BaseChatModel {
|
|
|
277
311
|
await runManager?.handleLLMNewToken(chunk.text ?? "");
|
|
278
312
|
}
|
|
279
313
|
}
|
|
314
|
+
async completionWithRetry(request, options) {
|
|
315
|
+
return this.caller.callWithOptions({ signal: options?.signal }, async () => {
|
|
316
|
+
try {
|
|
317
|
+
return this.client.generateContent(request);
|
|
318
|
+
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
319
|
+
}
|
|
320
|
+
catch (e) {
|
|
321
|
+
// TODO: Improve error handling
|
|
322
|
+
if (e.message?.includes("400 Bad Request")) {
|
|
323
|
+
e.status = 400;
|
|
324
|
+
}
|
|
325
|
+
throw e;
|
|
326
|
+
}
|
|
327
|
+
});
|
|
328
|
+
}
|
|
280
329
|
withStructuredOutput(outputSchema, config) {
|
|
281
330
|
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
282
331
|
const schema = outputSchema;
|
package/dist/chat_models.d.ts
CHANGED
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
import { FunctionDeclarationsTool as GoogleGenerativeAIFunctionDeclarationsTool, GenerateContentRequest, SafetySetting } from "@google/generative-ai";
|
|
1
|
+
import { FunctionDeclarationsTool as GoogleGenerativeAIFunctionDeclarationsTool, GenerateContentRequest, SafetySetting, Part as GenerativeAIPart } from "@google/generative-ai";
|
|
2
2
|
import { CallbackManagerForLLMRun } from "@langchain/core/callbacks/manager";
|
|
3
3
|
import { AIMessageChunk, BaseMessage } from "@langchain/core/messages";
|
|
4
4
|
import { ChatGenerationChunk, ChatResult } from "@langchain/core/outputs";
|
|
@@ -13,11 +13,17 @@ export type BaseMessageExamplePair = {
|
|
|
13
13
|
};
|
|
14
14
|
export interface GoogleGenerativeAIChatCallOptions extends BaseLanguageModelCallOptions {
|
|
15
15
|
tools?: StructuredToolInterface[] | GoogleGenerativeAIFunctionDeclarationsTool[];
|
|
16
|
+
/**
|
|
17
|
+
* Whether or not to include usage data, like token counts
|
|
18
|
+
* in the streamed response chunks.
|
|
19
|
+
* @default true
|
|
20
|
+
*/
|
|
21
|
+
streamUsage?: boolean;
|
|
16
22
|
}
|
|
17
23
|
/**
|
|
18
24
|
* An interface defining the input to the ChatGoogleGenerativeAI class.
|
|
19
25
|
*/
|
|
20
|
-
export interface GoogleGenerativeAIChatInput extends BaseChatModelParams {
|
|
26
|
+
export interface GoogleGenerativeAIChatInput extends BaseChatModelParams, Pick<GoogleGenerativeAIChatCallOptions, "streamUsage"> {
|
|
21
27
|
/**
|
|
22
28
|
* Model Name to use
|
|
23
29
|
*
|
|
@@ -147,6 +153,7 @@ export declare class ChatGoogleGenerativeAI extends BaseChatModel<GoogleGenerati
|
|
|
147
153
|
safetySettings?: SafetySetting[];
|
|
148
154
|
apiKey?: string;
|
|
149
155
|
streaming: boolean;
|
|
156
|
+
streamUsage: boolean;
|
|
150
157
|
private client;
|
|
151
158
|
get _isMultimodalModel(): boolean;
|
|
152
159
|
constructor(fields?: GoogleGenerativeAIChatInput);
|
|
@@ -157,6 +164,7 @@ export declare class ChatGoogleGenerativeAI extends BaseChatModel<GoogleGenerati
|
|
|
157
164
|
invocationParams(options?: this["ParsedCallOptions"]): Omit<GenerateContentRequest, "contents">;
|
|
158
165
|
_generate(messages: BaseMessage[], options: this["ParsedCallOptions"], runManager?: CallbackManagerForLLMRun): Promise<ChatResult>;
|
|
159
166
|
_streamResponseChunks(messages: BaseMessage[], options: this["ParsedCallOptions"], runManager?: CallbackManagerForLLMRun): AsyncGenerator<ChatGenerationChunk>;
|
|
167
|
+
completionWithRetry(request: string | GenerateContentRequest | (string | GenerativeAIPart)[], options?: this["ParsedCallOptions"]): Promise<import("@google/generative-ai").GenerateContentResult>;
|
|
160
168
|
withStructuredOutput<RunOutput extends Record<string, any> = Record<string, any>>(outputSchema: z.ZodType<RunOutput> | Record<string, any>, config?: StructuredOutputMethodOptions<false>): Runnable<BaseLanguageModelInput, RunOutput>;
|
|
161
169
|
withStructuredOutput<RunOutput extends Record<string, any> = Record<string, any>>(outputSchema: z.ZodType<RunOutput> | Record<string, any>, config?: StructuredOutputMethodOptions<true>): Runnable<BaseLanguageModelInput, {
|
|
162
170
|
raw: BaseMessage;
|
package/dist/chat_models.js
CHANGED
|
@@ -115,6 +115,12 @@ export class ChatGoogleGenerativeAI extends BaseChatModel {
|
|
|
115
115
|
writable: true,
|
|
116
116
|
value: false
|
|
117
117
|
});
|
|
118
|
+
Object.defineProperty(this, "streamUsage", {
|
|
119
|
+
enumerable: true,
|
|
120
|
+
configurable: true,
|
|
121
|
+
writable: true,
|
|
122
|
+
value: true
|
|
123
|
+
});
|
|
118
124
|
Object.defineProperty(this, "client", {
|
|
119
125
|
enumerable: true,
|
|
120
126
|
configurable: true,
|
|
@@ -176,6 +182,7 @@ export class ChatGoogleGenerativeAI extends BaseChatModel {
|
|
|
176
182
|
apiVersion: fields?.apiVersion,
|
|
177
183
|
baseUrl: fields?.baseUrl,
|
|
178
184
|
});
|
|
185
|
+
this.streamUsage = fields?.streamUsage ?? this.streamUsage;
|
|
179
186
|
}
|
|
180
187
|
getLsParams(options) {
|
|
181
188
|
return {
|
|
@@ -233,40 +240,67 @@ export class ChatGoogleGenerativeAI extends BaseChatModel {
|
|
|
233
240
|
.map(([_, value]) => value);
|
|
234
241
|
return { generations, llmOutput: { estimatedTokenUsage: tokenUsage } };
|
|
235
242
|
}
|
|
236
|
-
const res = await this.
|
|
237
|
-
|
|
238
|
-
|
|
239
|
-
|
|
240
|
-
|
|
241
|
-
|
|
242
|
-
|
|
243
|
-
|
|
244
|
-
|
|
245
|
-
|
|
246
|
-
|
|
247
|
-
|
|
248
|
-
|
|
249
|
-
|
|
250
|
-
|
|
251
|
-
}
|
|
252
|
-
return output;
|
|
243
|
+
const res = await this.completionWithRetry({
|
|
244
|
+
...parameters,
|
|
245
|
+
contents: prompt,
|
|
246
|
+
});
|
|
247
|
+
let usageMetadata;
|
|
248
|
+
if ("usageMetadata" in res.response) {
|
|
249
|
+
const genAIUsageMetadata = res.response.usageMetadata;
|
|
250
|
+
usageMetadata = {
|
|
251
|
+
input_tokens: genAIUsageMetadata.promptTokenCount ?? 0,
|
|
252
|
+
output_tokens: genAIUsageMetadata.candidatesTokenCount ?? 0,
|
|
253
|
+
total_tokens: genAIUsageMetadata.totalTokenCount ?? 0,
|
|
254
|
+
};
|
|
255
|
+
}
|
|
256
|
+
const generationResult = mapGenerateContentResultToChatResult(res.response, {
|
|
257
|
+
usageMetadata,
|
|
253
258
|
});
|
|
254
|
-
const generationResult = mapGenerateContentResultToChatResult(res.response);
|
|
255
259
|
await runManager?.handleLLMNewToken(generationResult.generations[0].text ?? "");
|
|
256
260
|
return generationResult;
|
|
257
261
|
}
|
|
258
262
|
async *_streamResponseChunks(messages, options, runManager) {
|
|
259
263
|
const prompt = convertBaseMessagesToContent(messages, this._isMultimodalModel);
|
|
260
264
|
const parameters = this.invocationParams(options);
|
|
265
|
+
const request = {
|
|
266
|
+
...parameters,
|
|
267
|
+
contents: prompt,
|
|
268
|
+
};
|
|
261
269
|
const stream = await this.caller.callWithOptions({ signal: options?.signal }, async () => {
|
|
262
|
-
const { stream } = await this.client.generateContentStream(
|
|
263
|
-
...parameters,
|
|
264
|
-
contents: prompt,
|
|
265
|
-
});
|
|
270
|
+
const { stream } = await this.client.generateContentStream(request);
|
|
266
271
|
return stream;
|
|
267
272
|
});
|
|
273
|
+
let usageMetadata;
|
|
274
|
+
let index = 0;
|
|
268
275
|
for await (const response of stream) {
|
|
269
|
-
|
|
276
|
+
if ("usageMetadata" in response &&
|
|
277
|
+
this.streamUsage !== false &&
|
|
278
|
+
options.streamUsage !== false) {
|
|
279
|
+
const genAIUsageMetadata = response.usageMetadata;
|
|
280
|
+
if (!usageMetadata) {
|
|
281
|
+
usageMetadata = {
|
|
282
|
+
input_tokens: genAIUsageMetadata.promptTokenCount,
|
|
283
|
+
output_tokens: genAIUsageMetadata.candidatesTokenCount,
|
|
284
|
+
total_tokens: genAIUsageMetadata.totalTokenCount,
|
|
285
|
+
};
|
|
286
|
+
}
|
|
287
|
+
else {
|
|
288
|
+
// Under the hood, LangChain combines the prompt tokens. Google returns the updated
|
|
289
|
+
// total each time, so we need to find the difference between the tokens.
|
|
290
|
+
const outputTokenDiff = genAIUsageMetadata.candidatesTokenCount -
|
|
291
|
+
usageMetadata.output_tokens;
|
|
292
|
+
usageMetadata = {
|
|
293
|
+
input_tokens: 0,
|
|
294
|
+
output_tokens: outputTokenDiff,
|
|
295
|
+
total_tokens: outputTokenDiff,
|
|
296
|
+
};
|
|
297
|
+
}
|
|
298
|
+
}
|
|
299
|
+
const chunk = convertResponseContentToChatGenerationChunk(response, {
|
|
300
|
+
usageMetadata,
|
|
301
|
+
index,
|
|
302
|
+
});
|
|
303
|
+
index += 1;
|
|
270
304
|
if (!chunk) {
|
|
271
305
|
continue;
|
|
272
306
|
}
|
|
@@ -274,6 +308,21 @@ export class ChatGoogleGenerativeAI extends BaseChatModel {
|
|
|
274
308
|
await runManager?.handleLLMNewToken(chunk.text ?? "");
|
|
275
309
|
}
|
|
276
310
|
}
|
|
311
|
+
async completionWithRetry(request, options) {
|
|
312
|
+
return this.caller.callWithOptions({ signal: options?.signal }, async () => {
|
|
313
|
+
try {
|
|
314
|
+
return this.client.generateContent(request);
|
|
315
|
+
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
316
|
+
}
|
|
317
|
+
catch (e) {
|
|
318
|
+
// TODO: Improve error handling
|
|
319
|
+
if (e.message?.includes("400 Bad Request")) {
|
|
320
|
+
e.status = 400;
|
|
321
|
+
}
|
|
322
|
+
throw e;
|
|
323
|
+
}
|
|
324
|
+
});
|
|
325
|
+
}
|
|
277
326
|
withStructuredOutput(outputSchema, config) {
|
|
278
327
|
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
279
328
|
const schema = outputSchema;
|
package/dist/utils/common.cjs
CHANGED
|
@@ -10,6 +10,9 @@ function getMessageAuthor(message) {
|
|
|
10
10
|
if (messages_1.ChatMessage.isInstance(message)) {
|
|
11
11
|
return message.role;
|
|
12
12
|
}
|
|
13
|
+
if (type === "tool") {
|
|
14
|
+
return type;
|
|
15
|
+
}
|
|
13
16
|
return message.name ?? type;
|
|
14
17
|
}
|
|
15
18
|
exports.getMessageAuthor = getMessageAuthor;
|
|
@@ -31,6 +34,9 @@ function convertAuthorToRole(author) {
|
|
|
31
34
|
case "system":
|
|
32
35
|
case "human":
|
|
33
36
|
return "user";
|
|
37
|
+
case "tool":
|
|
38
|
+
case "function":
|
|
39
|
+
return "function";
|
|
34
40
|
default:
|
|
35
41
|
throw new Error(`Unknown / unsupported author: ${author}`);
|
|
36
42
|
}
|
|
@@ -47,11 +53,36 @@ function messageContentMedia(content) {
|
|
|
47
53
|
}
|
|
48
54
|
throw new Error("Invalid media content");
|
|
49
55
|
}
|
|
50
|
-
function convertMessageContentToParts(
|
|
51
|
-
if (typeof content === "string") {
|
|
52
|
-
return [{ text: content }];
|
|
56
|
+
function convertMessageContentToParts(message, isMultimodalModel, role) {
|
|
57
|
+
if (typeof message.content === "string") {
|
|
58
|
+
return [{ text: message.content }];
|
|
59
|
+
}
|
|
60
|
+
let functionCallParts = [];
|
|
61
|
+
if (role === "function") {
|
|
62
|
+
if (message.name && typeof message.content === "string") {
|
|
63
|
+
functionCallParts.push({
|
|
64
|
+
functionResponse: {
|
|
65
|
+
name: message.name,
|
|
66
|
+
response: message.content,
|
|
67
|
+
},
|
|
68
|
+
});
|
|
69
|
+
}
|
|
70
|
+
else {
|
|
71
|
+
throw new Error("ChatGoogleGenerativeAI requires tool messages to contain the tool name, and a string content.");
|
|
72
|
+
}
|
|
73
|
+
}
|
|
74
|
+
if ("tool_calls" in message) {
|
|
75
|
+
const castMessage = message;
|
|
76
|
+
if (castMessage.tool_calls && castMessage.tool_calls.length > 0) {
|
|
77
|
+
functionCallParts = castMessage.tool_calls.map((tc) => ({
|
|
78
|
+
functionCall: {
|
|
79
|
+
name: tc.name,
|
|
80
|
+
args: tc.args,
|
|
81
|
+
},
|
|
82
|
+
}));
|
|
83
|
+
}
|
|
53
84
|
}
|
|
54
|
-
|
|
85
|
+
const messageContentParts = message.content.map((c) => {
|
|
55
86
|
if (c.type === "text") {
|
|
56
87
|
return {
|
|
57
88
|
text: c.text,
|
|
@@ -89,8 +120,17 @@ function convertMessageContentToParts(content, isMultimodalModel) {
|
|
|
89
120
|
else if (c.type === "media") {
|
|
90
121
|
return messageContentMedia(c);
|
|
91
122
|
}
|
|
123
|
+
else if (c.type === "tool_use") {
|
|
124
|
+
return {
|
|
125
|
+
functionCall: {
|
|
126
|
+
name: c.name,
|
|
127
|
+
args: c.input,
|
|
128
|
+
},
|
|
129
|
+
};
|
|
130
|
+
}
|
|
92
131
|
throw new Error(`Unknown content type ${c.type}`);
|
|
93
132
|
});
|
|
133
|
+
return [...messageContentParts, ...functionCallParts];
|
|
94
134
|
}
|
|
95
135
|
exports.convertMessageContentToParts = convertMessageContentToParts;
|
|
96
136
|
function convertBaseMessagesToContent(messages, isMultimodalModel) {
|
|
@@ -109,7 +149,7 @@ function convertBaseMessagesToContent(messages, isMultimodalModel) {
|
|
|
109
149
|
prevContent.role === role) {
|
|
110
150
|
throw new Error("Google Generative AI requires alternate messages between authors");
|
|
111
151
|
}
|
|
112
|
-
const parts = convertMessageContentToParts(message
|
|
152
|
+
const parts = convertMessageContentToParts(message, isMultimodalModel, role);
|
|
113
153
|
if (acc.mergeWithPreviousContent) {
|
|
114
154
|
const prevContent = acc.content[acc.content.length - 1];
|
|
115
155
|
if (!prevContent) {
|
|
@@ -121,8 +161,13 @@ function convertBaseMessagesToContent(messages, isMultimodalModel) {
|
|
|
121
161
|
content: acc.content,
|
|
122
162
|
};
|
|
123
163
|
}
|
|
164
|
+
let actualRole = role;
|
|
165
|
+
if (actualRole === "function") {
|
|
166
|
+
// GenerativeAI API will throw an error if the role is not "user" or "model."
|
|
167
|
+
actualRole = "user";
|
|
168
|
+
}
|
|
124
169
|
const content = {
|
|
125
|
-
role,
|
|
170
|
+
role: actualRole,
|
|
126
171
|
parts,
|
|
127
172
|
};
|
|
128
173
|
return {
|
|
@@ -132,7 +177,7 @@ function convertBaseMessagesToContent(messages, isMultimodalModel) {
|
|
|
132
177
|
}, { content: [], mergeWithPreviousContent: false }).content;
|
|
133
178
|
}
|
|
134
179
|
exports.convertBaseMessagesToContent = convertBaseMessagesToContent;
|
|
135
|
-
function mapGenerateContentResultToChatResult(response) {
|
|
180
|
+
function mapGenerateContentResultToChatResult(response, extra) {
|
|
136
181
|
// if rejected or error, return empty generations with reason in filters
|
|
137
182
|
if (!response.candidates ||
|
|
138
183
|
response.candidates.length === 0 ||
|
|
@@ -156,6 +201,7 @@ function mapGenerateContentResultToChatResult(response) {
|
|
|
156
201
|
additional_kwargs: {
|
|
157
202
|
...generationInfo,
|
|
158
203
|
},
|
|
204
|
+
usage_metadata: extra?.usageMetadata,
|
|
159
205
|
}),
|
|
160
206
|
generationInfo,
|
|
161
207
|
};
|
|
@@ -164,21 +210,32 @@ function mapGenerateContentResultToChatResult(response) {
|
|
|
164
210
|
};
|
|
165
211
|
}
|
|
166
212
|
exports.mapGenerateContentResultToChatResult = mapGenerateContentResultToChatResult;
|
|
167
|
-
function convertResponseContentToChatGenerationChunk(response) {
|
|
213
|
+
function convertResponseContentToChatGenerationChunk(response, extra) {
|
|
168
214
|
if (!response.candidates || response.candidates.length === 0) {
|
|
169
215
|
return null;
|
|
170
216
|
}
|
|
217
|
+
const functionCalls = response.functionCalls();
|
|
171
218
|
const [candidate] = response.candidates;
|
|
172
219
|
const { content, ...generationInfo } = candidate;
|
|
173
220
|
const text = content?.parts[0]?.text ?? "";
|
|
221
|
+
const toolCallChunks = [];
|
|
222
|
+
if (functionCalls) {
|
|
223
|
+
toolCallChunks.push(...functionCalls.map((fc) => ({
|
|
224
|
+
...fc,
|
|
225
|
+
args: JSON.stringify(fc.args),
|
|
226
|
+
index: extra.index,
|
|
227
|
+
})));
|
|
228
|
+
}
|
|
174
229
|
return new outputs_1.ChatGenerationChunk({
|
|
175
230
|
text,
|
|
176
231
|
message: new messages_1.AIMessageChunk({
|
|
177
232
|
content: text,
|
|
178
233
|
name: !content ? undefined : content.role,
|
|
234
|
+
tool_call_chunks: toolCallChunks,
|
|
179
235
|
// Each chunk can have unique "generationInfo", and merging strategy is unclear,
|
|
180
236
|
// so leave blank for now.
|
|
181
237
|
additional_kwargs: {},
|
|
238
|
+
usage_metadata: extra.usageMetadata,
|
|
182
239
|
}),
|
|
183
240
|
generationInfo,
|
|
184
241
|
});
|
package/dist/utils/common.d.ts
CHANGED
|
@@ -1,5 +1,5 @@
|
|
|
1
|
-
import { EnhancedGenerateContentResponse, Content, Part, type FunctionDeclarationsTool as GoogleGenerativeAIFunctionDeclarationsTool } from "@google/generative-ai";
|
|
2
|
-
import { BaseMessage,
|
|
1
|
+
import { EnhancedGenerateContentResponse, Content, Part, type FunctionDeclarationsTool as GoogleGenerativeAIFunctionDeclarationsTool, POSSIBLE_ROLES } from "@google/generative-ai";
|
|
2
|
+
import { BaseMessage, UsageMetadata } from "@langchain/core/messages";
|
|
3
3
|
import { ChatGenerationChunk, ChatResult } from "@langchain/core/outputs";
|
|
4
4
|
import { StructuredToolInterface } from "@langchain/core/tools";
|
|
5
5
|
export declare function getMessageAuthor(message: BaseMessage): string;
|
|
@@ -9,9 +9,14 @@ export declare function getMessageAuthor(message: BaseMessage): string;
|
|
|
9
9
|
* @param model The model to use for mapping.
|
|
10
10
|
* @returns The message type mapped to a Google Generative AI chat author.
|
|
11
11
|
*/
|
|
12
|
-
export declare function convertAuthorToRole(author: string):
|
|
13
|
-
export declare function convertMessageContentToParts(
|
|
12
|
+
export declare function convertAuthorToRole(author: string): (typeof POSSIBLE_ROLES)[number];
|
|
13
|
+
export declare function convertMessageContentToParts(message: BaseMessage, isMultimodalModel: boolean, role: (typeof POSSIBLE_ROLES)[number]): Part[];
|
|
14
14
|
export declare function convertBaseMessagesToContent(messages: BaseMessage[], isMultimodalModel: boolean): Content[];
|
|
15
|
-
export declare function mapGenerateContentResultToChatResult(response: EnhancedGenerateContentResponse
|
|
16
|
-
|
|
15
|
+
export declare function mapGenerateContentResultToChatResult(response: EnhancedGenerateContentResponse, extra?: {
|
|
16
|
+
usageMetadata: UsageMetadata | undefined;
|
|
17
|
+
}): ChatResult;
|
|
18
|
+
export declare function convertResponseContentToChatGenerationChunk(response: EnhancedGenerateContentResponse, extra: {
|
|
19
|
+
usageMetadata?: UsageMetadata | undefined;
|
|
20
|
+
index: number;
|
|
21
|
+
}): ChatGenerationChunk | null;
|
|
17
22
|
export declare function convertToGenerativeAITools(structuredTools: (StructuredToolInterface | Record<string, unknown>)[]): GoogleGenerativeAIFunctionDeclarationsTool[];
|
package/dist/utils/common.js
CHANGED
|
@@ -7,6 +7,9 @@ export function getMessageAuthor(message) {
|
|
|
7
7
|
if (ChatMessage.isInstance(message)) {
|
|
8
8
|
return message.role;
|
|
9
9
|
}
|
|
10
|
+
if (type === "tool") {
|
|
11
|
+
return type;
|
|
12
|
+
}
|
|
10
13
|
return message.name ?? type;
|
|
11
14
|
}
|
|
12
15
|
/**
|
|
@@ -27,6 +30,9 @@ export function convertAuthorToRole(author) {
|
|
|
27
30
|
case "system":
|
|
28
31
|
case "human":
|
|
29
32
|
return "user";
|
|
33
|
+
case "tool":
|
|
34
|
+
case "function":
|
|
35
|
+
return "function";
|
|
30
36
|
default:
|
|
31
37
|
throw new Error(`Unknown / unsupported author: ${author}`);
|
|
32
38
|
}
|
|
@@ -42,11 +48,36 @@ function messageContentMedia(content) {
|
|
|
42
48
|
}
|
|
43
49
|
throw new Error("Invalid media content");
|
|
44
50
|
}
|
|
45
|
-
export function convertMessageContentToParts(
|
|
46
|
-
if (typeof content === "string") {
|
|
47
|
-
return [{ text: content }];
|
|
51
|
+
export function convertMessageContentToParts(message, isMultimodalModel, role) {
|
|
52
|
+
if (typeof message.content === "string") {
|
|
53
|
+
return [{ text: message.content }];
|
|
54
|
+
}
|
|
55
|
+
let functionCallParts = [];
|
|
56
|
+
if (role === "function") {
|
|
57
|
+
if (message.name && typeof message.content === "string") {
|
|
58
|
+
functionCallParts.push({
|
|
59
|
+
functionResponse: {
|
|
60
|
+
name: message.name,
|
|
61
|
+
response: message.content,
|
|
62
|
+
},
|
|
63
|
+
});
|
|
64
|
+
}
|
|
65
|
+
else {
|
|
66
|
+
throw new Error("ChatGoogleGenerativeAI requires tool messages to contain the tool name, and a string content.");
|
|
67
|
+
}
|
|
68
|
+
}
|
|
69
|
+
if ("tool_calls" in message) {
|
|
70
|
+
const castMessage = message;
|
|
71
|
+
if (castMessage.tool_calls && castMessage.tool_calls.length > 0) {
|
|
72
|
+
functionCallParts = castMessage.tool_calls.map((tc) => ({
|
|
73
|
+
functionCall: {
|
|
74
|
+
name: tc.name,
|
|
75
|
+
args: tc.args,
|
|
76
|
+
},
|
|
77
|
+
}));
|
|
78
|
+
}
|
|
48
79
|
}
|
|
49
|
-
|
|
80
|
+
const messageContentParts = message.content.map((c) => {
|
|
50
81
|
if (c.type === "text") {
|
|
51
82
|
return {
|
|
52
83
|
text: c.text,
|
|
@@ -84,8 +115,17 @@ export function convertMessageContentToParts(content, isMultimodalModel) {
|
|
|
84
115
|
else if (c.type === "media") {
|
|
85
116
|
return messageContentMedia(c);
|
|
86
117
|
}
|
|
118
|
+
else if (c.type === "tool_use") {
|
|
119
|
+
return {
|
|
120
|
+
functionCall: {
|
|
121
|
+
name: c.name,
|
|
122
|
+
args: c.input,
|
|
123
|
+
},
|
|
124
|
+
};
|
|
125
|
+
}
|
|
87
126
|
throw new Error(`Unknown content type ${c.type}`);
|
|
88
127
|
});
|
|
128
|
+
return [...messageContentParts, ...functionCallParts];
|
|
89
129
|
}
|
|
90
130
|
export function convertBaseMessagesToContent(messages, isMultimodalModel) {
|
|
91
131
|
return messages.reduce((acc, message, index) => {
|
|
@@ -103,7 +143,7 @@ export function convertBaseMessagesToContent(messages, isMultimodalModel) {
|
|
|
103
143
|
prevContent.role === role) {
|
|
104
144
|
throw new Error("Google Generative AI requires alternate messages between authors");
|
|
105
145
|
}
|
|
106
|
-
const parts = convertMessageContentToParts(message
|
|
146
|
+
const parts = convertMessageContentToParts(message, isMultimodalModel, role);
|
|
107
147
|
if (acc.mergeWithPreviousContent) {
|
|
108
148
|
const prevContent = acc.content[acc.content.length - 1];
|
|
109
149
|
if (!prevContent) {
|
|
@@ -115,8 +155,13 @@ export function convertBaseMessagesToContent(messages, isMultimodalModel) {
|
|
|
115
155
|
content: acc.content,
|
|
116
156
|
};
|
|
117
157
|
}
|
|
158
|
+
let actualRole = role;
|
|
159
|
+
if (actualRole === "function") {
|
|
160
|
+
// GenerativeAI API will throw an error if the role is not "user" or "model."
|
|
161
|
+
actualRole = "user";
|
|
162
|
+
}
|
|
118
163
|
const content = {
|
|
119
|
-
role,
|
|
164
|
+
role: actualRole,
|
|
120
165
|
parts,
|
|
121
166
|
};
|
|
122
167
|
return {
|
|
@@ -125,7 +170,7 @@ export function convertBaseMessagesToContent(messages, isMultimodalModel) {
|
|
|
125
170
|
};
|
|
126
171
|
}, { content: [], mergeWithPreviousContent: false }).content;
|
|
127
172
|
}
|
|
128
|
-
export function mapGenerateContentResultToChatResult(response) {
|
|
173
|
+
export function mapGenerateContentResultToChatResult(response, extra) {
|
|
129
174
|
// if rejected or error, return empty generations with reason in filters
|
|
130
175
|
if (!response.candidates ||
|
|
131
176
|
response.candidates.length === 0 ||
|
|
@@ -149,6 +194,7 @@ export function mapGenerateContentResultToChatResult(response) {
|
|
|
149
194
|
additional_kwargs: {
|
|
150
195
|
...generationInfo,
|
|
151
196
|
},
|
|
197
|
+
usage_metadata: extra?.usageMetadata,
|
|
152
198
|
}),
|
|
153
199
|
generationInfo,
|
|
154
200
|
};
|
|
@@ -156,21 +202,32 @@ export function mapGenerateContentResultToChatResult(response) {
|
|
|
156
202
|
generations: [generation],
|
|
157
203
|
};
|
|
158
204
|
}
|
|
159
|
-
export function convertResponseContentToChatGenerationChunk(response) {
|
|
205
|
+
export function convertResponseContentToChatGenerationChunk(response, extra) {
|
|
160
206
|
if (!response.candidates || response.candidates.length === 0) {
|
|
161
207
|
return null;
|
|
162
208
|
}
|
|
209
|
+
const functionCalls = response.functionCalls();
|
|
163
210
|
const [candidate] = response.candidates;
|
|
164
211
|
const { content, ...generationInfo } = candidate;
|
|
165
212
|
const text = content?.parts[0]?.text ?? "";
|
|
213
|
+
const toolCallChunks = [];
|
|
214
|
+
if (functionCalls) {
|
|
215
|
+
toolCallChunks.push(...functionCalls.map((fc) => ({
|
|
216
|
+
...fc,
|
|
217
|
+
args: JSON.stringify(fc.args),
|
|
218
|
+
index: extra.index,
|
|
219
|
+
})));
|
|
220
|
+
}
|
|
166
221
|
return new ChatGenerationChunk({
|
|
167
222
|
text,
|
|
168
223
|
message: new AIMessageChunk({
|
|
169
224
|
content: text,
|
|
170
225
|
name: !content ? undefined : content.role,
|
|
226
|
+
tool_call_chunks: toolCallChunks,
|
|
171
227
|
// Each chunk can have unique "generationInfo", and merging strategy is unclear,
|
|
172
228
|
// so leave blank for now.
|
|
173
229
|
additional_kwargs: {},
|
|
230
|
+
usage_metadata: extra.usageMetadata,
|
|
174
231
|
}),
|
|
175
232
|
generationInfo,
|
|
176
233
|
});
|
|
@@ -1,28 +1,32 @@
|
|
|
1
1
|
"use strict";
|
|
2
2
|
/* eslint-disable @typescript-eslint/no-unused-vars */
|
|
3
3
|
Object.defineProperty(exports, "__esModule", { value: true });
|
|
4
|
-
exports.zodToGenerativeAIParameters = void 0;
|
|
4
|
+
exports.zodToGenerativeAIParameters = exports.removeAdditionalProperties = void 0;
|
|
5
5
|
const zod_to_json_schema_1 = require("zod-to-json-schema");
|
|
6
|
-
function removeAdditionalProperties(
|
|
7
|
-
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
6
|
+
function removeAdditionalProperties(
|
|
7
|
+
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
8
|
+
obj) {
|
|
9
|
+
if (typeof obj === "object" && obj !== null) {
|
|
10
|
+
const newObj = { ...obj };
|
|
11
|
+
if ("additionalProperties" in newObj &&
|
|
12
|
+
typeof newObj.additionalProperties === "boolean") {
|
|
13
|
+
delete newObj.additionalProperties;
|
|
14
|
+
}
|
|
15
|
+
for (const key in newObj) {
|
|
16
|
+
if (key in newObj) {
|
|
17
|
+
if (Array.isArray(newObj[key])) {
|
|
18
|
+
newObj[key] = newObj[key].map(removeAdditionalProperties);
|
|
19
|
+
}
|
|
20
|
+
else if (typeof newObj[key] === "object" && newObj[key] !== null) {
|
|
21
|
+
newObj[key] = removeAdditionalProperties(newObj[key]);
|
|
22
|
+
}
|
|
23
|
+
}
|
|
24
|
+
}
|
|
25
|
+
return newObj;
|
|
20
26
|
}
|
|
21
|
-
|
|
22
|
-
// eslint-disable-next-line no-param-reassign
|
|
23
|
-
properties[key] = removeAdditionalProperties(properties[key]);
|
|
24
|
-
removeProperties(properties, keys, index + 1);
|
|
27
|
+
return obj;
|
|
25
28
|
}
|
|
29
|
+
exports.removeAdditionalProperties = removeAdditionalProperties;
|
|
26
30
|
function zodToGenerativeAIParameters(
|
|
27
31
|
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
28
32
|
zodObj) {
|
|
@@ -8,4 +8,5 @@ export interface GenerativeAIJsonSchemaDirty extends GenerativeAIJsonSchema {
|
|
|
8
8
|
properties?: Record<string, GenerativeAIJsonSchemaDirty>;
|
|
9
9
|
additionalProperties?: boolean;
|
|
10
10
|
}
|
|
11
|
+
export declare function removeAdditionalProperties(obj: Record<string, any>): GenerativeAIJsonSchema;
|
|
11
12
|
export declare function zodToGenerativeAIParameters(zodObj: z.ZodType<any>): GenerativeAIFunctionDeclarationSchema;
|
|
@@ -1,24 +1,27 @@
|
|
|
1
1
|
/* eslint-disable @typescript-eslint/no-unused-vars */
|
|
2
2
|
import { zodToJsonSchema } from "zod-to-json-schema";
|
|
3
|
-
function removeAdditionalProperties(
|
|
4
|
-
|
|
5
|
-
|
|
6
|
-
|
|
7
|
-
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
|
|
3
|
+
export function removeAdditionalProperties(
|
|
4
|
+
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
5
|
+
obj) {
|
|
6
|
+
if (typeof obj === "object" && obj !== null) {
|
|
7
|
+
const newObj = { ...obj };
|
|
8
|
+
if ("additionalProperties" in newObj &&
|
|
9
|
+
typeof newObj.additionalProperties === "boolean") {
|
|
10
|
+
delete newObj.additionalProperties;
|
|
11
|
+
}
|
|
12
|
+
for (const key in newObj) {
|
|
13
|
+
if (key in newObj) {
|
|
14
|
+
if (Array.isArray(newObj[key])) {
|
|
15
|
+
newObj[key] = newObj[key].map(removeAdditionalProperties);
|
|
16
|
+
}
|
|
17
|
+
else if (typeof newObj[key] === "object" && newObj[key] !== null) {
|
|
18
|
+
newObj[key] = removeAdditionalProperties(newObj[key]);
|
|
19
|
+
}
|
|
20
|
+
}
|
|
21
|
+
}
|
|
22
|
+
return newObj;
|
|
17
23
|
}
|
|
18
|
-
|
|
19
|
-
// eslint-disable-next-line no-param-reassign
|
|
20
|
-
properties[key] = removeAdditionalProperties(properties[key]);
|
|
21
|
-
removeProperties(properties, keys, index + 1);
|
|
24
|
+
return obj;
|
|
22
25
|
}
|
|
23
26
|
export function zodToGenerativeAIParameters(
|
|
24
27
|
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@langchain/google-genai",
|
|
3
|
-
"version": "0.0.
|
|
3
|
+
"version": "0.0.19",
|
|
4
4
|
"description": "Sample integration for LangChain.js",
|
|
5
5
|
"type": "module",
|
|
6
6
|
"engines": {
|
|
@@ -36,13 +36,13 @@
|
|
|
36
36
|
"license": "MIT",
|
|
37
37
|
"dependencies": {
|
|
38
38
|
"@google/generative-ai": "^0.7.0",
|
|
39
|
-
"@langchain/core": "
|
|
39
|
+
"@langchain/core": ">=0.2.5 <0.3.0",
|
|
40
40
|
"zod-to-json-schema": "^3.22.4"
|
|
41
41
|
},
|
|
42
42
|
"devDependencies": {
|
|
43
43
|
"@jest/globals": "^29.5.0",
|
|
44
44
|
"@langchain/scripts": "~0.0.14",
|
|
45
|
-
"@langchain/standard-tests": "
|
|
45
|
+
"@langchain/standard-tests": "0.0.0",
|
|
46
46
|
"@swc/core": "^1.3.90",
|
|
47
47
|
"@swc/jest": "^0.2.29",
|
|
48
48
|
"@tsconfig/recommended": "^1.0.3",
|