@librechat/agents 3.0.25 → 3.0.26
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/cjs/llm/google/index.cjs +78 -9
- package/dist/cjs/llm/google/index.cjs.map +1 -1
- package/dist/cjs/llm/google/utils/common.cjs +185 -15
- package/dist/cjs/llm/google/utils/common.cjs.map +1 -1
- package/dist/esm/llm/google/index.mjs +79 -10
- package/dist/esm/llm/google/index.mjs.map +1 -1
- package/dist/esm/llm/google/utils/common.mjs +184 -17
- package/dist/esm/llm/google/utils/common.mjs.map +1 -1
- package/dist/types/llm/google/index.d.ts +10 -0
- package/dist/types/llm/google/types.d.ts +11 -1
- package/dist/types/llm/google/utils/common.d.ts +17 -2
- package/package.json +1 -1
- package/src/llm/google/data/gettysburg10.wav +0 -0
- package/src/llm/google/data/hotdog.jpg +0 -0
- package/src/llm/google/index.ts +129 -14
- package/src/llm/google/llm.spec.ts +932 -0
- package/src/llm/google/types.ts +14 -1
- package/src/llm/google/utils/common.ts +262 -35
|
@@ -0,0 +1,932 @@
|
|
|
1
|
+
import { config } from 'dotenv';
|
|
2
|
+
config();
|
|
3
|
+
import { test } from '@jest/globals';
|
|
4
|
+
import * as fs from 'node:fs/promises';
|
|
5
|
+
import * as path from 'node:path';
|
|
6
|
+
import {
|
|
7
|
+
AIMessage,
|
|
8
|
+
AIMessageChunk,
|
|
9
|
+
HumanMessage,
|
|
10
|
+
SystemMessage,
|
|
11
|
+
ToolMessage,
|
|
12
|
+
} from '@langchain/core/messages';
|
|
13
|
+
import {
|
|
14
|
+
ChatPromptTemplate,
|
|
15
|
+
MessagesPlaceholder,
|
|
16
|
+
} from '@langchain/core/prompts';
|
|
17
|
+
import { StructuredTool, tool } from '@langchain/core/tools';
|
|
18
|
+
import { z } from 'zod/v3';
|
|
19
|
+
import {
|
|
20
|
+
CodeExecutionTool,
|
|
21
|
+
DynamicRetrievalMode,
|
|
22
|
+
SchemaType as FunctionDeclarationSchemaType,
|
|
23
|
+
GoogleSearchRetrievalTool,
|
|
24
|
+
} from '@google/generative-ai';
|
|
25
|
+
import { concat } from '@langchain/core/utils/stream';
|
|
26
|
+
import { CustomChatGoogleGenerativeAI as ChatGoogleGenerativeAI } from './index';
|
|
27
|
+
import { _FUNCTION_CALL_THOUGHT_SIGNATURES_MAP_KEY } from './utils/common';
|
|
28
|
+
|
|
29
|
+
// Save the original value of the 'LANGCHAIN_CALLBACKS_BACKGROUND' environment variable
|
|
30
|
+
const originalBackground = process.env.LANGCHAIN_CALLBACKS_BACKGROUND;
|
|
31
|
+
|
|
32
|
+
const dummyToolResponse =
|
|
33
|
+
"[{\"title\":\"Weather in New York City\",\"url\":\"https://www.weatherapi.com/\",\"content\":\"{'location': {'name': 'New York', 'region': 'New York', 'country': 'United States of America', 'lat': 40.71, 'lon': -74.01, 'tz_id': 'America/New_York', 'localtime_epoch': 1718659486, 'localtime': '2024-06-17 17:24'}, 'current': {'last_updated_epoch': 1718658900, 'last_updated': '2024-06-17 17:15', 'temp_c': 27.8, 'temp_f': 82.0, 'is_day': 1, 'condition': {'text': 'Partly cloudy', 'icon': '//cdn.weatherapi.com/weather/64x64/day/116.png', 'code': 1003}, 'wind_mph': 2.2, 'wind_kph': 3.6, 'wind_degree': 159, 'wind_dir': 'SSE', 'pressure_mb': 1021.0, 'pressure_in': 30.15, 'precip_mm': 0.0, 'precip_in': 0.0, 'humidity': 58, 'cloud': 25, 'feelslike_c': 29.0, 'feelslike_f': 84.2, 'windchill_c': 26.9, 'windchill_f': 80.5, 'heatindex_c': 27.9, 'heatindex_f': 82.2, 'dewpoint_c': 17.1, 'dewpoint_f': 62.8, 'vis_km': 16.0, 'vis_miles': 9.0, 'uv': 7.0, 'gust_mph': 18.3, 'gust_kph': 29.4}}\",\"score\":0.98192,\"raw_content\":null},{\"title\":\"New York, NY Monthly Weather | AccuWeather\",\"url\":\"https://www.accuweather.com/en/us/new-york/10021/june-weather/349727\",\"content\":\"Get the monthly weather forecast for New York, NY, including daily high/low, historical averages, to help you plan ahead.\",\"score\":0.97504,\"raw_content\":null}]";
|
|
34
|
+
|
|
35
|
+
test('Test Google AI', async () => {
|
|
36
|
+
const model = new ChatGoogleGenerativeAI({ model: 'gemini-2.0-flash' });
|
|
37
|
+
const res = await model.invoke('what is 1 + 1?');
|
|
38
|
+
expect(res).toBeTruthy();
|
|
39
|
+
});
|
|
40
|
+
|
|
41
|
+
test('Test Google AI generation', async () => {
|
|
42
|
+
const model = new ChatGoogleGenerativeAI({ model: 'gemini-2.0-flash' });
|
|
43
|
+
const res = await model.generate([
|
|
44
|
+
[['human', 'Translate "I love programming" into Korean.']],
|
|
45
|
+
]);
|
|
46
|
+
expect(res).toBeTruthy();
|
|
47
|
+
});
|
|
48
|
+
|
|
49
|
+
test('Test Google AI generation with a stop sequence', async () => {
|
|
50
|
+
const model = new ChatGoogleGenerativeAI({
|
|
51
|
+
model: 'gemini-2.0-flash',
|
|
52
|
+
stopSequences: ['two', '2'],
|
|
53
|
+
});
|
|
54
|
+
const res = await model.invoke([
|
|
55
|
+
['human', 'What are the first three positive whole numbers?'],
|
|
56
|
+
]);
|
|
57
|
+
expect(res).toBeTruthy();
|
|
58
|
+
expect(res.additional_kwargs.finishReason).toBe('STOP');
|
|
59
|
+
expect(res.content).not.toContain('2');
|
|
60
|
+
expect(res.content).not.toContain('two');
|
|
61
|
+
});
|
|
62
|
+
|
|
63
|
+
test('Test Google AI generation with a system message', async () => {
|
|
64
|
+
const model = new ChatGoogleGenerativeAI({ model: 'gemini-2.0-flash' });
|
|
65
|
+
const res = await model.generate([
|
|
66
|
+
[
|
|
67
|
+
['system', 'You are an amazing translator.'],
|
|
68
|
+
['human', 'Translate "I love programming" into Korean.'],
|
|
69
|
+
],
|
|
70
|
+
]);
|
|
71
|
+
expect(res).toBeTruthy();
|
|
72
|
+
});
|
|
73
|
+
|
|
74
|
+
test('Test Google AI multimodal generation', async () => {
|
|
75
|
+
const imageData = (
|
|
76
|
+
await fs.readFile(path.join(__dirname, '/data/hotdog.jpg'))
|
|
77
|
+
).toString('base64');
|
|
78
|
+
const model = new ChatGoogleGenerativeAI({
|
|
79
|
+
model: 'gemini-2.0-flash',
|
|
80
|
+
});
|
|
81
|
+
const res = await model.invoke([
|
|
82
|
+
new HumanMessage({
|
|
83
|
+
content: [
|
|
84
|
+
{
|
|
85
|
+
type: 'text',
|
|
86
|
+
text: 'Describe the following image:',
|
|
87
|
+
},
|
|
88
|
+
{
|
|
89
|
+
type: 'image_url',
|
|
90
|
+
image_url: `data:image/png;base64,${imageData}`,
|
|
91
|
+
},
|
|
92
|
+
],
|
|
93
|
+
}),
|
|
94
|
+
]);
|
|
95
|
+
expect(res).toBeTruthy();
|
|
96
|
+
});
|
|
97
|
+
|
|
98
|
+
test('Test Google AI handleLLMNewToken callback', async () => {
|
|
99
|
+
// Running LangChain callbacks in the background will sometimes cause the callbackManager to execute
|
|
100
|
+
// after the test/llm call has already finished & returned. Set that environment variable to false
|
|
101
|
+
// to prevent that from happening.
|
|
102
|
+
process.env.LANGCHAIN_CALLBACKS_BACKGROUND = 'false';
|
|
103
|
+
|
|
104
|
+
try {
|
|
105
|
+
const model = new ChatGoogleGenerativeAI({ model: 'gemini-2.0-flash' });
|
|
106
|
+
let tokens = '';
|
|
107
|
+
const res = await model.call(
|
|
108
|
+
[new HumanMessage('what is 1 + 1?')],
|
|
109
|
+
undefined,
|
|
110
|
+
[
|
|
111
|
+
{
|
|
112
|
+
handleLLMNewToken(token: string): void {
|
|
113
|
+
tokens += token;
|
|
114
|
+
},
|
|
115
|
+
},
|
|
116
|
+
]
|
|
117
|
+
);
|
|
118
|
+
const responseContent = typeof res.content === 'string' ? res.content : '';
|
|
119
|
+
expect(tokens).toBe(responseContent);
|
|
120
|
+
} finally {
|
|
121
|
+
// Reset the environment variable
|
|
122
|
+
process.env.LANGCHAIN_CALLBACKS_BACKGROUND = originalBackground;
|
|
123
|
+
}
|
|
124
|
+
});
|
|
125
|
+
|
|
126
|
+
test('Test Google AI handleLLMNewToken callback with streaming', async () => {
|
|
127
|
+
// Running LangChain callbacks in the background will sometimes cause the callbackManager to execute
|
|
128
|
+
// after the test/llm call has already finished & returned. Set that environment variable to false
|
|
129
|
+
// to prevent that from happening.
|
|
130
|
+
process.env.LANGCHAIN_CALLBACKS_BACKGROUND = 'false';
|
|
131
|
+
|
|
132
|
+
try {
|
|
133
|
+
const model = new ChatGoogleGenerativeAI({ model: 'gemini-2.0-flash' });
|
|
134
|
+
let tokens = '';
|
|
135
|
+
const res = await model.stream([new HumanMessage('what is 1 + 1?')], {
|
|
136
|
+
callbacks: [
|
|
137
|
+
{
|
|
138
|
+
handleLLMNewToken(token: string): void {
|
|
139
|
+
tokens += token;
|
|
140
|
+
},
|
|
141
|
+
},
|
|
142
|
+
],
|
|
143
|
+
});
|
|
144
|
+
let responseContent = '';
|
|
145
|
+
for await (const streamItem of res) {
|
|
146
|
+
responseContent += streamItem.content;
|
|
147
|
+
}
|
|
148
|
+
expect(tokens).toBe(responseContent);
|
|
149
|
+
} finally {
|
|
150
|
+
// Reset the environment variable
|
|
151
|
+
process.env.LANGCHAIN_CALLBACKS_BACKGROUND = originalBackground;
|
|
152
|
+
}
|
|
153
|
+
});
|
|
154
|
+
|
|
155
|
+
test('Test Google AI in streaming mode', async () => {
|
|
156
|
+
// Running LangChain callbacks in the background will sometimes cause the callbackManager to execute
|
|
157
|
+
// after the test/llm call has already finished & returned. Set that environment variable to false
|
|
158
|
+
// to prevent that from happening.
|
|
159
|
+
process.env.LANGCHAIN_CALLBACKS_BACKGROUND = 'false';
|
|
160
|
+
|
|
161
|
+
try {
|
|
162
|
+
const model = new ChatGoogleGenerativeAI({
|
|
163
|
+
model: 'gemini-2.0-flash',
|
|
164
|
+
streaming: true,
|
|
165
|
+
});
|
|
166
|
+
let tokens = '';
|
|
167
|
+
let nrNewTokens = 0;
|
|
168
|
+
const res = await model.invoke([new HumanMessage('Write a haiku?')], {
|
|
169
|
+
callbacks: [
|
|
170
|
+
{
|
|
171
|
+
handleLLMNewToken(token: string): void {
|
|
172
|
+
nrNewTokens += 1;
|
|
173
|
+
tokens += token;
|
|
174
|
+
},
|
|
175
|
+
},
|
|
176
|
+
],
|
|
177
|
+
});
|
|
178
|
+
expect(nrNewTokens).toBeGreaterThanOrEqual(1);
|
|
179
|
+
expect(res.content).toBe(tokens);
|
|
180
|
+
} finally {
|
|
181
|
+
// Reset the environment variable
|
|
182
|
+
process.env.LANGCHAIN_CALLBACKS_BACKGROUND = originalBackground;
|
|
183
|
+
}
|
|
184
|
+
});
|
|
185
|
+
|
|
186
|
+
async function fileToBase64(filePath: string): Promise<string> {
|
|
187
|
+
const fileData = await fs.readFile(filePath);
|
|
188
|
+
// eslint-disable-next-line @typescript-eslint/ban-ts-comment
|
|
189
|
+
/** @ts-ignore */
|
|
190
|
+
const base64String = Buffer.from(fileData).toString('base64');
|
|
191
|
+
return base64String;
|
|
192
|
+
}
|
|
193
|
+
|
|
194
|
+
test('Gemini can understand audio', async () => {
|
|
195
|
+
// Update this with the correct path to an audio file on your machine.
|
|
196
|
+
const audioPath = path.join(__dirname, 'data/gettysburg10.wav');
|
|
197
|
+
const audioMimeType = 'audio/wav';
|
|
198
|
+
|
|
199
|
+
const model = new ChatGoogleGenerativeAI({
|
|
200
|
+
model: 'gemini-2.0-flash',
|
|
201
|
+
temperature: 0,
|
|
202
|
+
maxRetries: 0,
|
|
203
|
+
});
|
|
204
|
+
|
|
205
|
+
const audioBase64 = await fileToBase64(audioPath);
|
|
206
|
+
|
|
207
|
+
const prompt = ChatPromptTemplate.fromMessages([
|
|
208
|
+
new MessagesPlaceholder('audio'),
|
|
209
|
+
]);
|
|
210
|
+
|
|
211
|
+
const chain = prompt.pipe(model);
|
|
212
|
+
const response = await chain.invoke({
|
|
213
|
+
audio: new HumanMessage({
|
|
214
|
+
content: [
|
|
215
|
+
{
|
|
216
|
+
type: 'media',
|
|
217
|
+
mimeType: audioMimeType,
|
|
218
|
+
data: audioBase64,
|
|
219
|
+
},
|
|
220
|
+
{
|
|
221
|
+
type: 'text',
|
|
222
|
+
text: "Summarize the content in this audio. ALso, what is the speaker's tone?",
|
|
223
|
+
},
|
|
224
|
+
],
|
|
225
|
+
}),
|
|
226
|
+
});
|
|
227
|
+
|
|
228
|
+
expect(typeof response.content).toBe('string');
|
|
229
|
+
expect((response.content as string).length).toBeGreaterThan(15);
|
|
230
|
+
});
|
|
231
|
+
|
|
232
|
+
class FakeBrowserTool extends StructuredTool {
|
|
233
|
+
schema = z.object({
|
|
234
|
+
url: z.string(),
|
|
235
|
+
query: z.string().optional(),
|
|
236
|
+
});
|
|
237
|
+
|
|
238
|
+
name = 'fake_browser_tool';
|
|
239
|
+
|
|
240
|
+
description =
|
|
241
|
+
'useful for when you need to find something on the web or summarize a webpage.';
|
|
242
|
+
|
|
243
|
+
async _call(_: z.infer<this['schema']>): Promise<string> {
|
|
244
|
+
return 'fake_browser_tool';
|
|
245
|
+
}
|
|
246
|
+
}
|
|
247
|
+
const googleGenAITool = {
|
|
248
|
+
functionDeclarations: [
|
|
249
|
+
{
|
|
250
|
+
name: 'fake_browser_tool',
|
|
251
|
+
description:
|
|
252
|
+
'useful for when you need to find something on the web or summarize a webpage.',
|
|
253
|
+
parameters: {
|
|
254
|
+
type: FunctionDeclarationSchemaType.OBJECT,
|
|
255
|
+
required: ['url'],
|
|
256
|
+
properties: {
|
|
257
|
+
url: {
|
|
258
|
+
type: FunctionDeclarationSchemaType.STRING,
|
|
259
|
+
},
|
|
260
|
+
query: {
|
|
261
|
+
type: FunctionDeclarationSchemaType.STRING,
|
|
262
|
+
},
|
|
263
|
+
},
|
|
264
|
+
},
|
|
265
|
+
},
|
|
266
|
+
],
|
|
267
|
+
};
|
|
268
|
+
const prompt = new HumanMessage(
|
|
269
|
+
'Search the web and tell me what the weather will be like tonight in new york. use weather.com'
|
|
270
|
+
);
|
|
271
|
+
|
|
272
|
+
test('ChatGoogleGenerativeAI can bind and invoke langchain tools', async () => {
|
|
273
|
+
const model = new ChatGoogleGenerativeAI({ model: 'gemini-2.5-flash' });
|
|
274
|
+
|
|
275
|
+
const modelWithTools = model.bindTools([new FakeBrowserTool()]);
|
|
276
|
+
const res = await modelWithTools.invoke([prompt]);
|
|
277
|
+
const toolCalls = res.tool_calls;
|
|
278
|
+
expect(toolCalls).toBeDefined();
|
|
279
|
+
if (!toolCalls) {
|
|
280
|
+
throw new Error('tool_calls not in response');
|
|
281
|
+
}
|
|
282
|
+
expect(toolCalls.length).toBe(1);
|
|
283
|
+
expect(toolCalls[0].name).toBe('fake_browser_tool');
|
|
284
|
+
expect('url' in toolCalls[0].args).toBe(true);
|
|
285
|
+
});
|
|
286
|
+
|
|
287
|
+
test('ChatGoogleGenerativeAI can bind and stream langchain tools', async () => {
|
|
288
|
+
const model = new ChatGoogleGenerativeAI({
|
|
289
|
+
model: 'gemini-2.5-flash',
|
|
290
|
+
});
|
|
291
|
+
|
|
292
|
+
const modelWithTools = model.bindTools([new FakeBrowserTool()]);
|
|
293
|
+
let finalChunk: AIMessageChunk | undefined;
|
|
294
|
+
for await (const chunk of await modelWithTools.stream([prompt])) {
|
|
295
|
+
if (!finalChunk) {
|
|
296
|
+
finalChunk = chunk;
|
|
297
|
+
} else {
|
|
298
|
+
finalChunk = finalChunk.concat(chunk);
|
|
299
|
+
}
|
|
300
|
+
}
|
|
301
|
+
if (!finalChunk) {
|
|
302
|
+
throw new Error('finalChunk is undefined');
|
|
303
|
+
}
|
|
304
|
+
const toolCalls = finalChunk.tool_calls;
|
|
305
|
+
expect(toolCalls).toBeDefined();
|
|
306
|
+
if (!toolCalls) {
|
|
307
|
+
throw new Error('tool_calls not in response');
|
|
308
|
+
}
|
|
309
|
+
expect(toolCalls.length).toBe(1);
|
|
310
|
+
expect(toolCalls[0].name).toBe('fake_browser_tool');
|
|
311
|
+
expect(toolCalls[0].id).toBeDefined();
|
|
312
|
+
expect('url' in toolCalls[0].args).toBe(true);
|
|
313
|
+
});
|
|
314
|
+
|
|
315
|
+
test('ChatGoogleGenerativeAI can handle streaming tool messages.', async () => {
|
|
316
|
+
const model = new ChatGoogleGenerativeAI({
|
|
317
|
+
model: 'gemini-2.5-flash',
|
|
318
|
+
maxRetries: 1,
|
|
319
|
+
});
|
|
320
|
+
|
|
321
|
+
const browserTool = new FakeBrowserTool();
|
|
322
|
+
|
|
323
|
+
const modelWithTools = model.bindTools([browserTool]);
|
|
324
|
+
let finalChunk: AIMessageChunk | undefined;
|
|
325
|
+
const fullPrompt = [
|
|
326
|
+
new SystemMessage(
|
|
327
|
+
'You are a helpful assistant. If the chat history contains the tool results, you should use that and not call the tool again.'
|
|
328
|
+
),
|
|
329
|
+
prompt,
|
|
330
|
+
new AIMessage({
|
|
331
|
+
content: '',
|
|
332
|
+
tool_calls: [
|
|
333
|
+
{
|
|
334
|
+
name: browserTool.name,
|
|
335
|
+
args: {
|
|
336
|
+
query: 'weather tonight new york',
|
|
337
|
+
url: 'https://weather.com',
|
|
338
|
+
},
|
|
339
|
+
},
|
|
340
|
+
],
|
|
341
|
+
}),
|
|
342
|
+
new ToolMessage(dummyToolResponse, 'id', browserTool.name),
|
|
343
|
+
];
|
|
344
|
+
for await (const chunk of await modelWithTools.stream(fullPrompt)) {
|
|
345
|
+
if (!finalChunk) {
|
|
346
|
+
finalChunk = chunk;
|
|
347
|
+
} else {
|
|
348
|
+
finalChunk = finalChunk.concat(chunk);
|
|
349
|
+
}
|
|
350
|
+
}
|
|
351
|
+
if (!finalChunk) {
|
|
352
|
+
throw new Error('finalChunk is undefined');
|
|
353
|
+
}
|
|
354
|
+
expect(typeof finalChunk.content).toBe('string');
|
|
355
|
+
expect(finalChunk.content.length).toBeGreaterThan(1);
|
|
356
|
+
expect(finalChunk.tool_calls).toHaveLength(0);
|
|
357
|
+
});
|
|
358
|
+
|
|
359
|
+
test('ChatGoogleGenerativeAI can handle invoking tool messages.', async () => {
|
|
360
|
+
const model = new ChatGoogleGenerativeAI({
|
|
361
|
+
model: 'gemini-2.5-flash',
|
|
362
|
+
maxRetries: 1,
|
|
363
|
+
});
|
|
364
|
+
|
|
365
|
+
const browserTool = new FakeBrowserTool();
|
|
366
|
+
|
|
367
|
+
const modelWithTools = model.bindTools([browserTool]);
|
|
368
|
+
const fullPrompt = [
|
|
369
|
+
new SystemMessage(
|
|
370
|
+
'You are a helpful assistant. If the chat history contains the tool results, you should use that and not call the tool again.'
|
|
371
|
+
),
|
|
372
|
+
prompt,
|
|
373
|
+
new AIMessage({
|
|
374
|
+
content: '',
|
|
375
|
+
tool_calls: [
|
|
376
|
+
{
|
|
377
|
+
name: browserTool.name,
|
|
378
|
+
args: {
|
|
379
|
+
query: 'weather tonight new york',
|
|
380
|
+
url: 'https://weather.com',
|
|
381
|
+
},
|
|
382
|
+
},
|
|
383
|
+
],
|
|
384
|
+
}),
|
|
385
|
+
new ToolMessage(dummyToolResponse, 'id', browserTool.name),
|
|
386
|
+
];
|
|
387
|
+
const response = await modelWithTools.invoke(fullPrompt);
|
|
388
|
+
expect(typeof response.content).toBe('string');
|
|
389
|
+
expect(response.content.length).toBeGreaterThan(1);
|
|
390
|
+
expect(response.tool_calls).toHaveLength(0);
|
|
391
|
+
});
|
|
392
|
+
|
|
393
|
+
test('ChatGoogleGenerativeAI can bind and invoke genai tools', async () => {
|
|
394
|
+
const model = new ChatGoogleGenerativeAI({ model: 'gemini-2.5-flash' });
|
|
395
|
+
|
|
396
|
+
const modelWithTools = model.bindTools([googleGenAITool]);
|
|
397
|
+
const res = await modelWithTools.invoke([prompt]);
|
|
398
|
+
const toolCalls = res.tool_calls;
|
|
399
|
+
expect(toolCalls).toBeDefined();
|
|
400
|
+
if (!toolCalls) {
|
|
401
|
+
throw new Error('tool_calls not in response');
|
|
402
|
+
}
|
|
403
|
+
expect(toolCalls.length).toBe(1);
|
|
404
|
+
expect(toolCalls[0].name).toBe('fake_browser_tool');
|
|
405
|
+
expect('url' in toolCalls[0].args).toBe(true);
|
|
406
|
+
});
|
|
407
|
+
|
|
408
|
+
test('ChatGoogleGenerativeAI can bindTools with langchain tools and invoke', async () => {
|
|
409
|
+
const model = new ChatGoogleGenerativeAI({ model: 'gemini-2.5-flash' });
|
|
410
|
+
|
|
411
|
+
const modelWithTools = model.bindTools([new FakeBrowserTool()]);
|
|
412
|
+
const res = await modelWithTools.invoke([prompt]);
|
|
413
|
+
const toolCalls = res.tool_calls;
|
|
414
|
+
expect(toolCalls).toBeDefined();
|
|
415
|
+
if (!toolCalls) {
|
|
416
|
+
throw new Error('tool_calls not in response');
|
|
417
|
+
}
|
|
418
|
+
expect(toolCalls.length).toBe(1);
|
|
419
|
+
expect(toolCalls[0].name).toBe('fake_browser_tool');
|
|
420
|
+
expect('url' in toolCalls[0].args).toBe(true);
|
|
421
|
+
});
|
|
422
|
+
|
|
423
|
+
test('ChatGoogleGenerativeAI can bindTools with genai tools and invoke', async () => {
|
|
424
|
+
const model = new ChatGoogleGenerativeAI({ model: 'gemini-2.5-flash' });
|
|
425
|
+
|
|
426
|
+
const modelWithTools = model.bindTools([googleGenAITool]);
|
|
427
|
+
const res = await modelWithTools.invoke([prompt]);
|
|
428
|
+
const toolCalls = res.tool_calls;
|
|
429
|
+
expect(toolCalls).toBeDefined();
|
|
430
|
+
if (!toolCalls) {
|
|
431
|
+
throw new Error('tool_calls not in response');
|
|
432
|
+
}
|
|
433
|
+
expect(toolCalls.length).toBe(1);
|
|
434
|
+
expect(toolCalls[0].name).toBe('fake_browser_tool');
|
|
435
|
+
expect('url' in toolCalls[0].args).toBe(true);
|
|
436
|
+
});
|
|
437
|
+
|
|
438
|
+
test('ChatGoogleGenerativeAI can call withStructuredOutput langchain tools and invoke', async () => {
|
|
439
|
+
const model = new ChatGoogleGenerativeAI({ model: 'gemini-2.0-flash' });
|
|
440
|
+
|
|
441
|
+
const modelWithTools = model.withStructuredOutput(
|
|
442
|
+
z.object({
|
|
443
|
+
zomg: z.string(),
|
|
444
|
+
omg: z.number().optional(),
|
|
445
|
+
})
|
|
446
|
+
);
|
|
447
|
+
const res = await modelWithTools.invoke([prompt]);
|
|
448
|
+
expect(typeof res.zomg === 'string').toBe(true);
|
|
449
|
+
});
|
|
450
|
+
|
|
451
|
+
test('ChatGoogleGenerativeAI can call withStructuredOutput genai tools and invoke', async () => {
|
|
452
|
+
const model = new ChatGoogleGenerativeAI({ model: 'gemini-2.0-flash' });
|
|
453
|
+
|
|
454
|
+
type GeminiTool = {
|
|
455
|
+
url: string;
|
|
456
|
+
query?: string;
|
|
457
|
+
};
|
|
458
|
+
|
|
459
|
+
const modelWithTools = model.withStructuredOutput<GeminiTool>(
|
|
460
|
+
googleGenAITool.functionDeclarations[0].parameters
|
|
461
|
+
);
|
|
462
|
+
const res = await modelWithTools.invoke([prompt]);
|
|
463
|
+
expect(typeof res.url === 'string').toBe(true);
|
|
464
|
+
});
|
|
465
|
+
|
|
466
|
+
test('Stream token count usage_metadata', async () => {
|
|
467
|
+
const model = new ChatGoogleGenerativeAI({
|
|
468
|
+
temperature: 0,
|
|
469
|
+
model: 'gemini-2.0-flash',
|
|
470
|
+
maxOutputTokens: 10,
|
|
471
|
+
});
|
|
472
|
+
let res: AIMessageChunk | null = null;
|
|
473
|
+
for await (const chunk of await model.stream(
|
|
474
|
+
'Why is the sky blue? Be concise.'
|
|
475
|
+
)) {
|
|
476
|
+
if (!res) {
|
|
477
|
+
res = chunk;
|
|
478
|
+
} else {
|
|
479
|
+
res = res.concat(chunk);
|
|
480
|
+
}
|
|
481
|
+
}
|
|
482
|
+
expect(res?.usage_metadata).toBeDefined();
|
|
483
|
+
if (!res?.usage_metadata) {
|
|
484
|
+
return;
|
|
485
|
+
}
|
|
486
|
+
expect(res.usage_metadata.input_tokens).toBeGreaterThan(1);
|
|
487
|
+
expect(res.usage_metadata.output_tokens).toBeGreaterThan(1);
|
|
488
|
+
expect(res.usage_metadata.total_tokens).toBe(
|
|
489
|
+
res.usage_metadata.input_tokens + res.usage_metadata.output_tokens
|
|
490
|
+
);
|
|
491
|
+
});
|
|
492
|
+
|
|
493
|
+
describe('ChatGoogleGenerativeAI should count tokens correctly', () => {
|
|
494
|
+
describe('when streaming', () => {
|
|
495
|
+
test.each(['gemini-2.5-flash', 'gemini-2.5-pro'])(
|
|
496
|
+
'with %s',
|
|
497
|
+
async (modelName) => {
|
|
498
|
+
const model = new ChatGoogleGenerativeAI({
|
|
499
|
+
model: modelName,
|
|
500
|
+
temperature: 0,
|
|
501
|
+
maxRetries: 0,
|
|
502
|
+
});
|
|
503
|
+
const res = await model.stream('Why is the sky blue? Be concise.');
|
|
504
|
+
let full: AIMessageChunk | undefined;
|
|
505
|
+
for await (const chunk of res) {
|
|
506
|
+
full ??= chunk;
|
|
507
|
+
full = full.concat(chunk);
|
|
508
|
+
}
|
|
509
|
+
// expect(full?.usage_metadata);
|
|
510
|
+
// expect(res.usage_metadata).toBeDefined();
|
|
511
|
+
}
|
|
512
|
+
);
|
|
513
|
+
});
|
|
514
|
+
});
|
|
515
|
+
|
|
516
|
+
test('streamUsage excludes token usage', async () => {
|
|
517
|
+
const model = new ChatGoogleGenerativeAI({
|
|
518
|
+
temperature: 0,
|
|
519
|
+
model: 'gemini-2.0-flash',
|
|
520
|
+
streamUsage: false,
|
|
521
|
+
});
|
|
522
|
+
let res: AIMessageChunk | null = null;
|
|
523
|
+
for await (const chunk of await model.stream(
|
|
524
|
+
'Why is the sky blue? Be concise.'
|
|
525
|
+
)) {
|
|
526
|
+
if (!res) {
|
|
527
|
+
res = chunk;
|
|
528
|
+
} else {
|
|
529
|
+
res = res.concat(chunk);
|
|
530
|
+
}
|
|
531
|
+
}
|
|
532
|
+
expect(res?.usage_metadata).not.toBeDefined();
|
|
533
|
+
});
|
|
534
|
+
|
|
535
|
+
test('Invoke token count usage_metadata', async () => {
|
|
536
|
+
const model = new ChatGoogleGenerativeAI({
|
|
537
|
+
temperature: 0,
|
|
538
|
+
model: 'gemini-2.0-flash',
|
|
539
|
+
maxOutputTokens: 10,
|
|
540
|
+
});
|
|
541
|
+
const res = await model.invoke('Why is the sky blue? Be concise.');
|
|
542
|
+
expect(res?.usage_metadata).toBeDefined();
|
|
543
|
+
if (!res?.usage_metadata) {
|
|
544
|
+
return;
|
|
545
|
+
}
|
|
546
|
+
expect(res.usage_metadata.input_tokens).toBeGreaterThan(1);
|
|
547
|
+
expect(res.usage_metadata.output_tokens).toBeGreaterThan(1);
|
|
548
|
+
expect(res.usage_metadata.total_tokens).toBe(
|
|
549
|
+
res.usage_metadata.input_tokens + res.usage_metadata.output_tokens
|
|
550
|
+
);
|
|
551
|
+
});
|
|
552
|
+
|
|
553
|
+
test('Invoke with JSON mode', async () => {
|
|
554
|
+
const model = new ChatGoogleGenerativeAI({
|
|
555
|
+
model: 'gemini-2.0-flash',
|
|
556
|
+
temperature: 0,
|
|
557
|
+
maxOutputTokens: 10,
|
|
558
|
+
json: true,
|
|
559
|
+
});
|
|
560
|
+
const res = await model.invoke('Why is the sky blue? Be concise.');
|
|
561
|
+
expect(res?.usage_metadata).toBeDefined();
|
|
562
|
+
if (!res?.usage_metadata) {
|
|
563
|
+
return;
|
|
564
|
+
}
|
|
565
|
+
expect(res.usage_metadata.input_tokens).toBeGreaterThan(1);
|
|
566
|
+
expect(res.usage_metadata.output_tokens).toBeGreaterThan(1);
|
|
567
|
+
expect(res.usage_metadata.total_tokens).toBe(
|
|
568
|
+
res.usage_metadata.input_tokens + res.usage_metadata.output_tokens
|
|
569
|
+
);
|
|
570
|
+
});
|
|
571
|
+
|
|
572
|
+
test('Supports tool_choice', async () => {
|
|
573
|
+
const model = new ChatGoogleGenerativeAI({ model: 'gemini-2.0-flash' });
|
|
574
|
+
const tools = [
|
|
575
|
+
{
|
|
576
|
+
name: 'get_weather',
|
|
577
|
+
description: 'Get the weather',
|
|
578
|
+
schema: z.object({
|
|
579
|
+
location: z.string(),
|
|
580
|
+
}),
|
|
581
|
+
},
|
|
582
|
+
{
|
|
583
|
+
name: 'calculator',
|
|
584
|
+
description: 'Preform calculations',
|
|
585
|
+
schema: z.object({
|
|
586
|
+
expression: z.string(),
|
|
587
|
+
}),
|
|
588
|
+
},
|
|
589
|
+
];
|
|
590
|
+
|
|
591
|
+
const modelWithTools = model.bindTools(tools, {
|
|
592
|
+
tool_choice: 'calculator',
|
|
593
|
+
allowedFunctionNames: ['calculator'],
|
|
594
|
+
});
|
|
595
|
+
const response = await modelWithTools.invoke(
|
|
596
|
+
'What is 27725327 times 283683? Also whats the weather in New York?'
|
|
597
|
+
);
|
|
598
|
+
expect(response.tool_calls?.length).toBe(1);
|
|
599
|
+
});
|
|
600
|
+
|
|
601
|
+
describe('GoogleSearch (new API)', () => {
|
|
602
|
+
test('Supports GoogleSearch tool', async () => {
|
|
603
|
+
// New google_search tool for Gemini 2.0+ models
|
|
604
|
+
const googleSearchTool = {
|
|
605
|
+
googleSearch: {},
|
|
606
|
+
};
|
|
607
|
+
const model = new ChatGoogleGenerativeAI({
|
|
608
|
+
model: 'gemini-2.5-flash',
|
|
609
|
+
temperature: 0,
|
|
610
|
+
maxRetries: 0,
|
|
611
|
+
}).bindTools([googleSearchTool]);
|
|
612
|
+
|
|
613
|
+
// Ask about something that requires current web data beyond training cutoff
|
|
614
|
+
const result = await model.invoke(
|
|
615
|
+
'What was the closing price of NVIDIA stock yesterday? Use web search to find the exact current price.'
|
|
616
|
+
);
|
|
617
|
+
|
|
618
|
+
expect(result.content).toBeDefined();
|
|
619
|
+
expect(
|
|
620
|
+
typeof result.content === 'string' || Array.isArray(result.content)
|
|
621
|
+
).toBe(true);
|
|
622
|
+
|
|
623
|
+
// Grounding metadata should be present when Google Search is used
|
|
624
|
+
expect(result.response_metadata?.groundingMetadata).toBeDefined();
|
|
625
|
+
expect(result.response_metadata.groundingMetadata).toHaveProperty(
|
|
626
|
+
'groundingChunks'
|
|
627
|
+
);
|
|
628
|
+
expect(result.response_metadata.groundingMetadata).toHaveProperty(
|
|
629
|
+
'webSearchQueries'
|
|
630
|
+
);
|
|
631
|
+
});
|
|
632
|
+
|
|
633
|
+
test('Can stream GoogleSearch tool', async () => {
|
|
634
|
+
const googleSearchTool = {
|
|
635
|
+
googleSearch: {},
|
|
636
|
+
};
|
|
637
|
+
const model = new ChatGoogleGenerativeAI({
|
|
638
|
+
model: 'gemini-2.5-flash',
|
|
639
|
+
temperature: 0,
|
|
640
|
+
maxRetries: 0,
|
|
641
|
+
}).bindTools([googleSearchTool]);
|
|
642
|
+
|
|
643
|
+
const stream = await model.stream(
|
|
644
|
+
'What was the closing price of NVIDIA stock yesterday? Use web search to find the exact current price.'
|
|
645
|
+
);
|
|
646
|
+
let finalMsg: AIMessageChunk | undefined;
|
|
647
|
+
for await (const msg of stream) {
|
|
648
|
+
finalMsg = finalMsg ? concat(finalMsg, msg) : msg;
|
|
649
|
+
}
|
|
650
|
+
if (!finalMsg) {
|
|
651
|
+
throw new Error('finalMsg is undefined');
|
|
652
|
+
}
|
|
653
|
+
|
|
654
|
+
expect(finalMsg.content).toBeDefined();
|
|
655
|
+
expect(
|
|
656
|
+
typeof finalMsg.content === 'string' || Array.isArray(finalMsg.content)
|
|
657
|
+
).toBe(true);
|
|
658
|
+
|
|
659
|
+
// Grounding metadata should be present when Google Search is used
|
|
660
|
+
expect(finalMsg.response_metadata?.groundingMetadata).toBeDefined();
|
|
661
|
+
expect(finalMsg.response_metadata.groundingMetadata).toHaveProperty(
|
|
662
|
+
'groundingChunks'
|
|
663
|
+
);
|
|
664
|
+
expect(finalMsg.response_metadata.groundingMetadata).toHaveProperty(
|
|
665
|
+
'webSearchQueries'
|
|
666
|
+
);
|
|
667
|
+
});
|
|
668
|
+
});
|
|
669
|
+
|
|
670
|
+
describe('CodeExecutionTool', () => {
|
|
671
|
+
test('Supports CodeExecutionTool', async () => {
|
|
672
|
+
const codeExecutionTool: CodeExecutionTool = {
|
|
673
|
+
codeExecution: {}, // Simply pass an empty object to enable it.
|
|
674
|
+
};
|
|
675
|
+
const model = new ChatGoogleGenerativeAI({
|
|
676
|
+
model: 'gemini-2.5-flash',
|
|
677
|
+
temperature: 0,
|
|
678
|
+
maxRetries: 0,
|
|
679
|
+
}).bindTools([codeExecutionTool]);
|
|
680
|
+
|
|
681
|
+
const result = await model.invoke(
|
|
682
|
+
'Use code execution to find the sum of the first and last 3 numbers in the following list: [1, 2, 3, 72638, 8, 727, 4, 5, 6]'
|
|
683
|
+
);
|
|
684
|
+
|
|
685
|
+
expect(Array.isArray(result.content)).toBeTruthy();
|
|
686
|
+
if (!Array.isArray(result.content)) {
|
|
687
|
+
throw new Error('Content is not an array');
|
|
688
|
+
}
|
|
689
|
+
const texts = result.content
|
|
690
|
+
.flatMap((item) => ('text' in item ? [item.text] : []))
|
|
691
|
+
.join('\n');
|
|
692
|
+
expect(texts).toContain('21');
|
|
693
|
+
|
|
694
|
+
const executableCode = result.content.find(
|
|
695
|
+
(item) => item.type === 'executableCode'
|
|
696
|
+
);
|
|
697
|
+
expect(executableCode).toBeDefined();
|
|
698
|
+
const codeResult = result.content.find(
|
|
699
|
+
(item) => item.type === 'codeExecutionResult'
|
|
700
|
+
);
|
|
701
|
+
expect(codeResult).toBeDefined();
|
|
702
|
+
});
|
|
703
|
+
|
|
704
|
+
test('CodeExecutionTool contents can be passed in chat history', async () => {
|
|
705
|
+
const codeExecutionTool: CodeExecutionTool = {
|
|
706
|
+
codeExecution: {}, // Simply pass an empty object to enable it.
|
|
707
|
+
};
|
|
708
|
+
const model = new ChatGoogleGenerativeAI({
|
|
709
|
+
model: 'gemini-2.5-flash',
|
|
710
|
+
temperature: 0,
|
|
711
|
+
maxRetries: 0,
|
|
712
|
+
}).bindTools([codeExecutionTool]);
|
|
713
|
+
|
|
714
|
+
const codeResult = await model.invoke(
|
|
715
|
+
'Use code execution to find the sum of the first and last 3 numbers in the following list: [1, 2, 3, 72638, 8, 727, 4, 5, 6]'
|
|
716
|
+
);
|
|
717
|
+
|
|
718
|
+
const explanation = await model.invoke([
|
|
719
|
+
codeResult,
|
|
720
|
+
{
|
|
721
|
+
role: 'user',
|
|
722
|
+
content:
|
|
723
|
+
'Please explain the question I asked, the code you wrote, and the answer you got.',
|
|
724
|
+
},
|
|
725
|
+
]);
|
|
726
|
+
|
|
727
|
+
// Content can be string or array depending on response format
|
|
728
|
+
if (typeof explanation.content === 'string') {
|
|
729
|
+
expect(explanation.content.length).toBeGreaterThan(10);
|
|
730
|
+
} else if (Array.isArray(explanation.content)) {
|
|
731
|
+
expect(explanation.content.length).toBeGreaterThan(0);
|
|
732
|
+
} else {
|
|
733
|
+
expect(explanation.content).toBeDefined();
|
|
734
|
+
}
|
|
735
|
+
});
|
|
736
|
+
|
|
737
|
+
test('Can stream CodeExecutionTool', async () => {
|
|
738
|
+
const codeExecutionTool: CodeExecutionTool = {
|
|
739
|
+
codeExecution: {}, // Simply pass an empty object to enable it.
|
|
740
|
+
};
|
|
741
|
+
const model = new ChatGoogleGenerativeAI({
|
|
742
|
+
model: 'gemini-2.5-flash',
|
|
743
|
+
temperature: 0,
|
|
744
|
+
maxRetries: 0,
|
|
745
|
+
}).bindTools([codeExecutionTool]);
|
|
746
|
+
|
|
747
|
+
const stream = await model.stream(
|
|
748
|
+
'Use code execution to find the sum of the first and last 3 numbers in the following list: [1, 2, 3, 72638, 8, 727, 4, 5, 6]'
|
|
749
|
+
);
|
|
750
|
+
let finalMsg: AIMessageChunk | undefined;
|
|
751
|
+
for await (const msg of stream) {
|
|
752
|
+
finalMsg = finalMsg ? concat(finalMsg, msg) : msg;
|
|
753
|
+
}
|
|
754
|
+
|
|
755
|
+
if (!finalMsg) {
|
|
756
|
+
throw new Error('finalMsg is undefined');
|
|
757
|
+
}
|
|
758
|
+
expect(Array.isArray(finalMsg.content)).toBeTruthy();
|
|
759
|
+
if (!Array.isArray(finalMsg.content)) {
|
|
760
|
+
throw new Error('Content is not an array');
|
|
761
|
+
}
|
|
762
|
+
const texts = finalMsg.content
|
|
763
|
+
.flatMap((item) => ('text' in item ? [item.text] : []))
|
|
764
|
+
.join('\n');
|
|
765
|
+
expect(texts).toContain('21');
|
|
766
|
+
|
|
767
|
+
const executableCode = finalMsg.content.find(
|
|
768
|
+
(item) => item.type === 'executableCode'
|
|
769
|
+
);
|
|
770
|
+
expect(executableCode).toBeDefined();
|
|
771
|
+
const codeResult = finalMsg.content.find(
|
|
772
|
+
(item) => item.type === 'codeExecutionResult'
|
|
773
|
+
);
|
|
774
|
+
expect(codeResult).toBeDefined();
|
|
775
|
+
});
|
|
776
|
+
});
|
|
777
|
+
|
|
778
|
+
test('pass pdf to request', async () => {
|
|
779
|
+
const model = new ChatGoogleGenerativeAI({
|
|
780
|
+
model: 'gemini-2.0-flash-exp',
|
|
781
|
+
temperature: 0,
|
|
782
|
+
maxRetries: 0,
|
|
783
|
+
});
|
|
784
|
+
const pdfPath = path.join(
|
|
785
|
+
__dirname,
|
|
786
|
+
'../anthropic/Jacob_Lee_Resume_2023.pdf'
|
|
787
|
+
);
|
|
788
|
+
const pdfBase64 = await fs.readFile(pdfPath, 'base64');
|
|
789
|
+
|
|
790
|
+
const response = await model.invoke([
|
|
791
|
+
['system', 'Use the provided documents to answer the question'],
|
|
792
|
+
[
|
|
793
|
+
'user',
|
|
794
|
+
[
|
|
795
|
+
{
|
|
796
|
+
type: 'application/pdf',
|
|
797
|
+
data: pdfBase64,
|
|
798
|
+
},
|
|
799
|
+
{
|
|
800
|
+
type: 'text',
|
|
801
|
+
text: 'Summarize the contents of this PDF',
|
|
802
|
+
},
|
|
803
|
+
],
|
|
804
|
+
],
|
|
805
|
+
]);
|
|
806
|
+
|
|
807
|
+
expect(response.content.length).toBeGreaterThan(10);
|
|
808
|
+
});
|
|
809
|
+
|
|
810
|
+
test('calling tool with no args should work', async () => {
|
|
811
|
+
const llm = new ChatGoogleGenerativeAI({
|
|
812
|
+
model: 'gemini-2.0-flash',
|
|
813
|
+
maxRetries: 0,
|
|
814
|
+
});
|
|
815
|
+
const sfWeatherTool = tool(
|
|
816
|
+
async () => 'The weather is 80 degrees and sunny',
|
|
817
|
+
{
|
|
818
|
+
name: 'sf_weather',
|
|
819
|
+
description: 'Gets the weather in SF',
|
|
820
|
+
schema: z.object({}),
|
|
821
|
+
}
|
|
822
|
+
);
|
|
823
|
+
const llmWithTools = llm.bindTools([sfWeatherTool]);
|
|
824
|
+
const result = await llmWithTools.invoke([
|
|
825
|
+
{
|
|
826
|
+
role: 'user',
|
|
827
|
+
content: 'What is the current weather in SF?',
|
|
828
|
+
},
|
|
829
|
+
]);
|
|
830
|
+
const nextMessage = await sfWeatherTool.invoke(result.tool_calls![0]);
|
|
831
|
+
delete nextMessage.name; // Should work even if name is not present
|
|
832
|
+
const finalResult = await llmWithTools.invoke([
|
|
833
|
+
{
|
|
834
|
+
role: 'user',
|
|
835
|
+
content: 'What is the current weather in SF?',
|
|
836
|
+
},
|
|
837
|
+
result,
|
|
838
|
+
nextMessage,
|
|
839
|
+
]);
|
|
840
|
+
expect(finalResult.content).toContain('80');
|
|
841
|
+
});
|
|
842
|
+
|
|
843
|
+
describe('tool calling with thought signatures', () => {
|
|
844
|
+
const model = new ChatGoogleGenerativeAI({
|
|
845
|
+
model: 'gemini-3-pro-preview',
|
|
846
|
+
maxRetries: 0,
|
|
847
|
+
});
|
|
848
|
+
const weatherTool = tool(async () => 'The weather is 80 degrees and sunny', {
|
|
849
|
+
name: 'weather',
|
|
850
|
+
description: 'Gets the weather in SF',
|
|
851
|
+
schema: z.object({}),
|
|
852
|
+
});
|
|
853
|
+
const modelWithTools = model.bindTools([weatherTool]);
|
|
854
|
+
|
|
855
|
+
test('works when invoking', async () => {
|
|
856
|
+
const result = await modelWithTools.invoke(
|
|
857
|
+
'What is the current weather in SF?'
|
|
858
|
+
);
|
|
859
|
+
expect(result.tool_calls).toBeDefined();
|
|
860
|
+
expect(result.tool_calls!.length).toBe(1);
|
|
861
|
+
expect(result.tool_calls![0].id).toBeDefined();
|
|
862
|
+
|
|
863
|
+
const toolMessage = new ToolMessage({
|
|
864
|
+
content: 'The weather is 80 degrees and sunny',
|
|
865
|
+
tool_call_id: result.tool_calls![0].id ?? '',
|
|
866
|
+
});
|
|
867
|
+
|
|
868
|
+
// Thought signatures are stored in additional_kwargs
|
|
869
|
+
const thoughtSignatures = result.additional_kwargs?.[
|
|
870
|
+
_FUNCTION_CALL_THOUGHT_SIGNATURES_MAP_KEY
|
|
871
|
+
] as Record<string, string> | undefined;
|
|
872
|
+
|
|
873
|
+
// Gemini 3 REQUIRES thought signatures - not optional
|
|
874
|
+
expect(thoughtSignatures).toBeDefined();
|
|
875
|
+
expect(thoughtSignatures![result.tool_calls![0].id ?? '']).toBeDefined();
|
|
876
|
+
|
|
877
|
+
const finalResult = await model.invoke([
|
|
878
|
+
new HumanMessage('What is the current weather in SF?'),
|
|
879
|
+
result,
|
|
880
|
+
toolMessage,
|
|
881
|
+
]);
|
|
882
|
+
expect(finalResult.content).toBeDefined();
|
|
883
|
+
});
|
|
884
|
+
|
|
885
|
+
test('works when streaming', async () => {
|
|
886
|
+
let finalChunk: AIMessageChunk | undefined;
|
|
887
|
+
for await (const chunk of await modelWithTools.stream(
|
|
888
|
+
'What is the current weather in SF?'
|
|
889
|
+
)) {
|
|
890
|
+
finalChunk = finalChunk ? finalChunk.concat(chunk) : chunk;
|
|
891
|
+
}
|
|
892
|
+
expect(finalChunk).toBeDefined();
|
|
893
|
+
expect(finalChunk?.tool_calls).toBeDefined();
|
|
894
|
+
expect(finalChunk?.tool_calls!.length).toBe(1);
|
|
895
|
+
const toolMessage = new ToolMessage({
|
|
896
|
+
content: 'The weather is 80 degrees and sunny',
|
|
897
|
+
tool_call_id: finalChunk?.tool_calls![0].id ?? '',
|
|
898
|
+
});
|
|
899
|
+
|
|
900
|
+
// Thought signatures are stored in additional_kwargs
|
|
901
|
+
const thoughtSignatures = finalChunk?.additional_kwargs[
|
|
902
|
+
_FUNCTION_CALL_THOUGHT_SIGNATURES_MAP_KEY
|
|
903
|
+
] as Record<string, string> | undefined;
|
|
904
|
+
|
|
905
|
+
// Only check if thought signatures exist (may not be present for all models/responses)
|
|
906
|
+
if (thoughtSignatures) {
|
|
907
|
+
expect(
|
|
908
|
+
thoughtSignatures[finalChunk?.tool_calls![0].id ?? '']
|
|
909
|
+
).toBeDefined();
|
|
910
|
+
}
|
|
911
|
+
|
|
912
|
+
const finalResult = await model.invoke([
|
|
913
|
+
new HumanMessage('What is the current weather in SF?'),
|
|
914
|
+
finalChunk!,
|
|
915
|
+
toolMessage,
|
|
916
|
+
]);
|
|
917
|
+
expect(finalResult.content).toBeDefined();
|
|
918
|
+
});
|
|
919
|
+
});
|
|
920
|
+
|
|
921
|
+
test('works with thinking config', async () => {
|
|
922
|
+
const model = new ChatGoogleGenerativeAI({
|
|
923
|
+
model: 'gemini-3-pro-preview',
|
|
924
|
+
maxRetries: 0,
|
|
925
|
+
thinkingConfig: {
|
|
926
|
+
includeThoughts: true,
|
|
927
|
+
thinkingBudget: 100,
|
|
928
|
+
},
|
|
929
|
+
});
|
|
930
|
+
const result = await model.invoke('What is the current weather in SF?');
|
|
931
|
+
expect(result.content).toBeDefined();
|
|
932
|
+
});
|