@librechat/agents 3.0.62 → 3.0.64
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/cjs/graphs/MultiAgentGraph.cjs +129 -34
- package/dist/cjs/graphs/MultiAgentGraph.cjs.map +1 -1
- package/dist/cjs/tools/ToolNode.cjs +53 -0
- package/dist/cjs/tools/ToolNode.cjs.map +1 -1
- package/dist/esm/graphs/MultiAgentGraph.mjs +129 -34
- package/dist/esm/graphs/MultiAgentGraph.mjs.map +1 -1
- package/dist/esm/tools/ToolNode.mjs +54 -1
- package/dist/esm/tools/ToolNode.mjs.map +1 -1
- package/dist/types/graphs/MultiAgentGraph.d.ts +3 -0
- package/package.json +1 -1
- package/src/graphs/MultiAgentGraph.ts +138 -33
- package/src/scripts/test-handoff-preamble.ts +2 -0
- package/src/scripts/test-parallel-handoffs.ts +293 -0
- package/src/tools/ToolNode.ts +58 -0
|
@@ -0,0 +1,293 @@
|
|
|
1
|
+
import { config } from 'dotenv';
|
|
2
|
+
config();
|
|
3
|
+
|
|
4
|
+
import { HumanMessage, BaseMessage } from '@langchain/core/messages';
|
|
5
|
+
import type * as t from '@/types';
|
|
6
|
+
import { ChatModelStreamHandler, createContentAggregator } from '@/stream';
|
|
7
|
+
import { ToolEndHandler } from '@/events';
|
|
8
|
+
import { Providers, GraphEvents } from '@/common';
|
|
9
|
+
import { sleep } from '@/utils/run';
|
|
10
|
+
import { Run } from '@/run';
|
|
11
|
+
|
|
12
|
+
const conversationHistory: BaseMessage[] = [];
|
|
13
|
+
|
|
14
|
+
/**
|
|
15
|
+
* Test parallel handoffs - where an LLM calls multiple transfer tools simultaneously
|
|
16
|
+
*
|
|
17
|
+
* Graph structure:
|
|
18
|
+
* coordinator -> [researcher, writer] (via parallel handoff tools)
|
|
19
|
+
*
|
|
20
|
+
* The coordinator agent has two transfer tools:
|
|
21
|
+
* - transfer_to_researcher
|
|
22
|
+
* - transfer_to_writer
|
|
23
|
+
*
|
|
24
|
+
* When given a task that needs both, it should call both tools in parallel.
|
|
25
|
+
*/
|
|
26
|
+
async function testParallelHandoffs() {
|
|
27
|
+
console.log(
|
|
28
|
+
'Testing Parallel Handoffs (LLM calling multiple transfers)...\n'
|
|
29
|
+
);
|
|
30
|
+
|
|
31
|
+
const { contentParts, aggregateContent } = createContentAggregator();
|
|
32
|
+
|
|
33
|
+
const agents: t.AgentInputs[] = [
|
|
34
|
+
{
|
|
35
|
+
agentId: 'coordinator',
|
|
36
|
+
provider: Providers.OPENAI,
|
|
37
|
+
clientOptions: {
|
|
38
|
+
modelName: 'gpt-4o-mini',
|
|
39
|
+
apiKey: process.env.OPENAI_API_KEY,
|
|
40
|
+
},
|
|
41
|
+
instructions: `You are a COORDINATOR agent. Your job is to delegate tasks to specialized agents.
|
|
42
|
+
|
|
43
|
+
You have access to two transfer tools:
|
|
44
|
+
- transfer_to_researcher: For research and fact-finding tasks
|
|
45
|
+
- transfer_to_writer: For content creation and writing tasks
|
|
46
|
+
|
|
47
|
+
IMPORTANT: When a task requires BOTH research AND writing, you MUST call BOTH transfer tools SIMULTANEOUSLY in the same response. Do not call them sequentially.
|
|
48
|
+
|
|
49
|
+
For example, if asked to "research and write about X", call both transfers at once to enable parallel work.
|
|
50
|
+
|
|
51
|
+
When delegating, provide clear instructions to each agent about what they should do.`,
|
|
52
|
+
},
|
|
53
|
+
{
|
|
54
|
+
agentId: 'researcher',
|
|
55
|
+
provider: Providers.ANTHROPIC,
|
|
56
|
+
clientOptions: {
|
|
57
|
+
modelName: 'claude-haiku-4-5',
|
|
58
|
+
apiKey: process.env.ANTHROPIC_API_KEY,
|
|
59
|
+
},
|
|
60
|
+
instructions: `You are a RESEARCHER. When you receive a task:
|
|
61
|
+
1. Acknowledge the handoff
|
|
62
|
+
2. Provide concise research findings (100-150 words)
|
|
63
|
+
3. Start your response with "📚 RESEARCH FINDINGS:"`,
|
|
64
|
+
},
|
|
65
|
+
{
|
|
66
|
+
agentId: 'writer',
|
|
67
|
+
provider: Providers.ANTHROPIC,
|
|
68
|
+
clientOptions: {
|
|
69
|
+
modelName: 'claude-haiku-4-5',
|
|
70
|
+
apiKey: process.env.ANTHROPIC_API_KEY,
|
|
71
|
+
},
|
|
72
|
+
instructions: `You are a WRITER. When you receive a task:
|
|
73
|
+
1. Acknowledge the handoff
|
|
74
|
+
2. Provide creative content (100-150 words)
|
|
75
|
+
3. Start your response with "✍️ WRITTEN CONTENT:"`,
|
|
76
|
+
},
|
|
77
|
+
];
|
|
78
|
+
|
|
79
|
+
/**
|
|
80
|
+
* Create handoff edges from coordinator to both researcher and writer.
|
|
81
|
+
* These are separate edges so the LLM sees both transfer tools.
|
|
82
|
+
*/
|
|
83
|
+
const edges: t.GraphEdge[] = [
|
|
84
|
+
{
|
|
85
|
+
from: 'coordinator',
|
|
86
|
+
to: 'researcher',
|
|
87
|
+
edgeType: 'handoff',
|
|
88
|
+
description: 'Transfer to researcher for research and fact-finding tasks',
|
|
89
|
+
prompt: 'Research task instructions',
|
|
90
|
+
},
|
|
91
|
+
{
|
|
92
|
+
from: 'coordinator',
|
|
93
|
+
to: 'writer',
|
|
94
|
+
edgeType: 'handoff',
|
|
95
|
+
description: 'Transfer to writer for content creation and writing tasks',
|
|
96
|
+
prompt: 'Writing task instructions',
|
|
97
|
+
},
|
|
98
|
+
];
|
|
99
|
+
|
|
100
|
+
/** Track which agents are active and their timing */
|
|
101
|
+
const activeAgents = new Set<string>();
|
|
102
|
+
const agentTimings: Record<string, { start?: number; end?: number }> = {};
|
|
103
|
+
const startTime = Date.now();
|
|
104
|
+
|
|
105
|
+
const customHandlers = {
|
|
106
|
+
[GraphEvents.TOOL_END]: new ToolEndHandler(),
|
|
107
|
+
[GraphEvents.CHAT_MODEL_END]: {
|
|
108
|
+
handle: (
|
|
109
|
+
_event: string,
|
|
110
|
+
_data: t.StreamEventData,
|
|
111
|
+
metadata?: Record<string, unknown>
|
|
112
|
+
): void => {
|
|
113
|
+
const nodeName = metadata?.langgraph_node as string;
|
|
114
|
+
if (nodeName) {
|
|
115
|
+
const elapsed = Date.now() - startTime;
|
|
116
|
+
agentTimings[nodeName] = agentTimings[nodeName] || {};
|
|
117
|
+
agentTimings[nodeName].end = elapsed;
|
|
118
|
+
activeAgents.delete(nodeName);
|
|
119
|
+
console.log(`\n⏱️ [${nodeName}] COMPLETED at ${elapsed}ms`);
|
|
120
|
+
}
|
|
121
|
+
},
|
|
122
|
+
},
|
|
123
|
+
[GraphEvents.CHAT_MODEL_START]: {
|
|
124
|
+
handle: (
|
|
125
|
+
_event: string,
|
|
126
|
+
_data: t.StreamEventData,
|
|
127
|
+
metadata?: Record<string, unknown>
|
|
128
|
+
): void => {
|
|
129
|
+
const nodeName = metadata?.langgraph_node as string;
|
|
130
|
+
if (nodeName) {
|
|
131
|
+
const elapsed = Date.now() - startTime;
|
|
132
|
+
/** Store first start time for parallel overlap calculation */
|
|
133
|
+
if (!agentTimings[nodeName]?.start) {
|
|
134
|
+
agentTimings[nodeName] = agentTimings[nodeName] || {};
|
|
135
|
+
agentTimings[nodeName].start = elapsed;
|
|
136
|
+
}
|
|
137
|
+
activeAgents.add(nodeName);
|
|
138
|
+
console.log(`\n⏱️ [${nodeName}] STARTED at ${elapsed}ms`);
|
|
139
|
+
console.log(
|
|
140
|
+
` Active agents: ${Array.from(activeAgents).join(', ')}`
|
|
141
|
+
);
|
|
142
|
+
}
|
|
143
|
+
},
|
|
144
|
+
},
|
|
145
|
+
[GraphEvents.CHAT_MODEL_STREAM]: new ChatModelStreamHandler(),
|
|
146
|
+
[GraphEvents.ON_RUN_STEP_COMPLETED]: {
|
|
147
|
+
handle: (
|
|
148
|
+
event: GraphEvents.ON_RUN_STEP_COMPLETED,
|
|
149
|
+
data: t.StreamEventData
|
|
150
|
+
): void => {
|
|
151
|
+
aggregateContent({
|
|
152
|
+
event,
|
|
153
|
+
data: data as unknown as { result: t.ToolEndEvent },
|
|
154
|
+
});
|
|
155
|
+
},
|
|
156
|
+
},
|
|
157
|
+
[GraphEvents.ON_RUN_STEP]: {
|
|
158
|
+
handle: (
|
|
159
|
+
event: GraphEvents.ON_RUN_STEP,
|
|
160
|
+
data: t.StreamEventData
|
|
161
|
+
): void => {
|
|
162
|
+
aggregateContent({ event, data: data as t.RunStep });
|
|
163
|
+
},
|
|
164
|
+
},
|
|
165
|
+
[GraphEvents.ON_RUN_STEP_DELTA]: {
|
|
166
|
+
handle: (
|
|
167
|
+
event: GraphEvents.ON_RUN_STEP_DELTA,
|
|
168
|
+
data: t.StreamEventData
|
|
169
|
+
): void => {
|
|
170
|
+
aggregateContent({ event, data: data as t.RunStepDeltaEvent });
|
|
171
|
+
},
|
|
172
|
+
},
|
|
173
|
+
[GraphEvents.ON_MESSAGE_DELTA]: {
|
|
174
|
+
handle: (
|
|
175
|
+
event: GraphEvents.ON_MESSAGE_DELTA,
|
|
176
|
+
data: t.StreamEventData
|
|
177
|
+
): void => {
|
|
178
|
+
aggregateContent({ event, data: data as t.MessageDeltaEvent });
|
|
179
|
+
},
|
|
180
|
+
},
|
|
181
|
+
};
|
|
182
|
+
|
|
183
|
+
const runConfig: t.RunConfig = {
|
|
184
|
+
runId: `parallel-handoffs-${Date.now()}`,
|
|
185
|
+
graphConfig: {
|
|
186
|
+
type: 'multi-agent',
|
|
187
|
+
agents,
|
|
188
|
+
edges,
|
|
189
|
+
},
|
|
190
|
+
customHandlers,
|
|
191
|
+
returnContent: true,
|
|
192
|
+
};
|
|
193
|
+
|
|
194
|
+
try {
|
|
195
|
+
const run = await Run.create(runConfig);
|
|
196
|
+
|
|
197
|
+
/** Prompt designed to trigger parallel handoffs without confusing language */
|
|
198
|
+
const userMessage = `Help me with two topics:
|
|
199
|
+
1. The history of the internet
|
|
200
|
+
2. A short poem about technology
|
|
201
|
+
|
|
202
|
+
I need information on both topics.`;
|
|
203
|
+
|
|
204
|
+
conversationHistory.push(new HumanMessage(userMessage));
|
|
205
|
+
|
|
206
|
+
console.log('User message:', userMessage);
|
|
207
|
+
console.log(
|
|
208
|
+
'\nInvoking multi-agent graph with parallel handoff request...\n'
|
|
209
|
+
);
|
|
210
|
+
|
|
211
|
+
const config = {
|
|
212
|
+
configurable: {
|
|
213
|
+
thread_id: 'parallel-handoffs-test-1',
|
|
214
|
+
},
|
|
215
|
+
streamMode: 'values',
|
|
216
|
+
version: 'v2' as const,
|
|
217
|
+
};
|
|
218
|
+
|
|
219
|
+
const inputs = {
|
|
220
|
+
messages: conversationHistory,
|
|
221
|
+
};
|
|
222
|
+
|
|
223
|
+
await run.processStream(inputs, config);
|
|
224
|
+
const finalMessages = run.getRunMessages();
|
|
225
|
+
|
|
226
|
+
if (finalMessages) {
|
|
227
|
+
conversationHistory.push(...finalMessages);
|
|
228
|
+
}
|
|
229
|
+
|
|
230
|
+
/** Analyze parallel execution */
|
|
231
|
+
console.log('\n\n========== TIMING SUMMARY ==========');
|
|
232
|
+
console.log('Available timing keys:', Object.keys(agentTimings));
|
|
233
|
+
for (const [agent, timing] of Object.entries(agentTimings)) {
|
|
234
|
+
const duration =
|
|
235
|
+
timing.end && timing.start ? timing.end - timing.start : 'N/A';
|
|
236
|
+
console.log(
|
|
237
|
+
`${agent}: started=${timing.start}ms, ended=${timing.end}ms, duration=${duration}ms`
|
|
238
|
+
);
|
|
239
|
+
}
|
|
240
|
+
|
|
241
|
+
/** Check if researcher and writer ran in parallel (handle key variations) */
|
|
242
|
+
const researcherKey = Object.keys(agentTimings).find((k) =>
|
|
243
|
+
k.includes('researcher')
|
|
244
|
+
);
|
|
245
|
+
const writerKey = Object.keys(agentTimings).find((k) =>
|
|
246
|
+
k.includes('writer')
|
|
247
|
+
);
|
|
248
|
+
const researcherTiming = researcherKey
|
|
249
|
+
? agentTimings[researcherKey]
|
|
250
|
+
: undefined;
|
|
251
|
+
const writerTiming = writerKey ? agentTimings[writerKey] : undefined;
|
|
252
|
+
|
|
253
|
+
if (researcherTiming && writerTiming) {
|
|
254
|
+
const bothStarted = researcherTiming.start && writerTiming.start;
|
|
255
|
+
const bothEnded = researcherTiming.end && writerTiming.end;
|
|
256
|
+
|
|
257
|
+
if (bothStarted && bothEnded) {
|
|
258
|
+
const overlap =
|
|
259
|
+
Math.min(researcherTiming.end!, writerTiming.end!) -
|
|
260
|
+
Math.max(researcherTiming.start!, writerTiming.start!);
|
|
261
|
+
|
|
262
|
+
if (overlap > 0) {
|
|
263
|
+
console.log(
|
|
264
|
+
`\n✅ PARALLEL HANDOFFS SUCCESSFUL: ${overlap}ms overlap between researcher and writer`
|
|
265
|
+
);
|
|
266
|
+
} else {
|
|
267
|
+
console.log(
|
|
268
|
+
`\n⚠️ SEQUENTIAL EXECUTION: researcher and writer did not overlap`
|
|
269
|
+
);
|
|
270
|
+
console.log(
|
|
271
|
+
` This may indicate the LLM called transfers sequentially, not in parallel`
|
|
272
|
+
);
|
|
273
|
+
}
|
|
274
|
+
}
|
|
275
|
+
} else {
|
|
276
|
+
console.log(
|
|
277
|
+
'\n⚠️ Not all agents were invoked. Check if handoffs occurred.'
|
|
278
|
+
);
|
|
279
|
+
console.log(' researcher timing:', researcherTiming);
|
|
280
|
+
console.log(' writer timing:', writerTiming);
|
|
281
|
+
}
|
|
282
|
+
console.log('====================================\n');
|
|
283
|
+
|
|
284
|
+
console.log('Final content parts:', contentParts.length, 'parts');
|
|
285
|
+
console.dir(contentParts, { depth: null });
|
|
286
|
+
await sleep(3000);
|
|
287
|
+
} catch (error) {
|
|
288
|
+
console.error('Error in parallel handoffs test:', error);
|
|
289
|
+
throw error;
|
|
290
|
+
}
|
|
291
|
+
}
|
|
292
|
+
|
|
293
|
+
testParallelHandoffs();
|
package/src/tools/ToolNode.ts
CHANGED
|
@@ -281,6 +281,13 @@ export class ToolNode<T = any> extends RunnableCallable<T, T> {
|
|
|
281
281
|
)[] = [];
|
|
282
282
|
let parentCommand: Command | null = null;
|
|
283
283
|
|
|
284
|
+
/**
|
|
285
|
+
* Collect handoff commands (Commands with string goto and Command.PARENT)
|
|
286
|
+
* for potential parallel handoff aggregation
|
|
287
|
+
*/
|
|
288
|
+
const handoffCommands: Command[] = [];
|
|
289
|
+
const nonCommandOutputs: BaseMessage[] = [];
|
|
290
|
+
|
|
284
291
|
for (const output of outputs) {
|
|
285
292
|
if (isCommand(output)) {
|
|
286
293
|
if (
|
|
@@ -288,6 +295,7 @@ export class ToolNode<T = any> extends RunnableCallable<T, T> {
|
|
|
288
295
|
Array.isArray(output.goto) &&
|
|
289
296
|
output.goto.every((send): send is Send => isSend(send))
|
|
290
297
|
) {
|
|
298
|
+
/** Aggregate Send-based commands */
|
|
291
299
|
if (parentCommand) {
|
|
292
300
|
(parentCommand.goto as Send[]).push(...(output.goto as Send[]));
|
|
293
301
|
} else {
|
|
@@ -296,16 +304,66 @@ export class ToolNode<T = any> extends RunnableCallable<T, T> {
|
|
|
296
304
|
goto: output.goto,
|
|
297
305
|
});
|
|
298
306
|
}
|
|
307
|
+
} else if (output.graph === Command.PARENT) {
|
|
308
|
+
/**
|
|
309
|
+
* Handoff Command with destination.
|
|
310
|
+
* Handle both string ('agent') and array (['agent']) formats.
|
|
311
|
+
* Collect for potential parallel aggregation.
|
|
312
|
+
*/
|
|
313
|
+
const goto = output.goto;
|
|
314
|
+
const isSingleStringDest = typeof goto === 'string';
|
|
315
|
+
const isSingleArrayDest =
|
|
316
|
+
Array.isArray(goto) &&
|
|
317
|
+
goto.length === 1 &&
|
|
318
|
+
typeof goto[0] === 'string';
|
|
319
|
+
|
|
320
|
+
if (isSingleStringDest || isSingleArrayDest) {
|
|
321
|
+
handoffCommands.push(output);
|
|
322
|
+
} else {
|
|
323
|
+
/** Multi-destination or other command - pass through */
|
|
324
|
+
combinedOutputs.push(output);
|
|
325
|
+
}
|
|
299
326
|
} else {
|
|
327
|
+
/** Other commands - pass through */
|
|
300
328
|
combinedOutputs.push(output);
|
|
301
329
|
}
|
|
302
330
|
} else {
|
|
331
|
+
nonCommandOutputs.push(output);
|
|
303
332
|
combinedOutputs.push(
|
|
304
333
|
Array.isArray(input) ? [output] : { messages: [output] }
|
|
305
334
|
);
|
|
306
335
|
}
|
|
307
336
|
}
|
|
308
337
|
|
|
338
|
+
/**
|
|
339
|
+
* Handle handoff commands - convert to Send objects for parallel execution
|
|
340
|
+
* when multiple handoffs are requested
|
|
341
|
+
*/
|
|
342
|
+
if (handoffCommands.length > 1) {
|
|
343
|
+
/**
|
|
344
|
+
* Multiple parallel handoffs - convert to Send objects.
|
|
345
|
+
* Each Send carries its own state with the appropriate messages.
|
|
346
|
+
* This enables LLM-initiated parallel execution when calling multiple
|
|
347
|
+
* transfer tools simultaneously.
|
|
348
|
+
*/
|
|
349
|
+
const sends = handoffCommands.map((cmd) => {
|
|
350
|
+
/** Extract destination - handle both string and array formats */
|
|
351
|
+
const goto = cmd.goto;
|
|
352
|
+
const destination =
|
|
353
|
+
typeof goto === 'string' ? goto : (goto as string[])[0];
|
|
354
|
+
return new Send(destination, cmd.update);
|
|
355
|
+
});
|
|
356
|
+
|
|
357
|
+
const parallelCommand = new Command({
|
|
358
|
+
graph: Command.PARENT,
|
|
359
|
+
goto: sends,
|
|
360
|
+
});
|
|
361
|
+
combinedOutputs.push(parallelCommand);
|
|
362
|
+
} else if (handoffCommands.length === 1) {
|
|
363
|
+
/** Single handoff - pass through as-is */
|
|
364
|
+
combinedOutputs.push(handoffCommands[0]);
|
|
365
|
+
}
|
|
366
|
+
|
|
309
367
|
if (parentCommand) {
|
|
310
368
|
combinedOutputs.push(parentCommand);
|
|
311
369
|
}
|