@librechat/agents 3.0.53 → 3.0.55

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (38) hide show
  1. package/dist/cjs/graphs/Graph.cjs +28 -2
  2. package/dist/cjs/graphs/Graph.cjs.map +1 -1
  3. package/dist/cjs/graphs/MultiAgentGraph.cjs +108 -0
  4. package/dist/cjs/graphs/MultiAgentGraph.cjs.map +1 -1
  5. package/dist/cjs/messages/format.cjs +22 -1
  6. package/dist/cjs/messages/format.cjs.map +1 -1
  7. package/dist/cjs/stream.cjs +22 -11
  8. package/dist/cjs/stream.cjs.map +1 -1
  9. package/dist/esm/graphs/Graph.mjs +28 -2
  10. package/dist/esm/graphs/Graph.mjs.map +1 -1
  11. package/dist/esm/graphs/MultiAgentGraph.mjs +108 -0
  12. package/dist/esm/graphs/MultiAgentGraph.mjs.map +1 -1
  13. package/dist/esm/messages/format.mjs +22 -1
  14. package/dist/esm/messages/format.mjs.map +1 -1
  15. package/dist/esm/stream.mjs +22 -11
  16. package/dist/esm/stream.mjs.map +1 -1
  17. package/dist/types/graphs/Graph.d.ts +14 -0
  18. package/dist/types/graphs/MultiAgentGraph.d.ts +41 -0
  19. package/dist/types/messages/format.d.ts +8 -2
  20. package/dist/types/types/stream.d.ts +22 -2
  21. package/package.json +3 -2
  22. package/src/graphs/Graph.ts +30 -2
  23. package/src/graphs/MultiAgentGraph.ts +119 -0
  24. package/src/messages/format.ts +33 -3
  25. package/src/messages/formatAgentMessages.test.ts +168 -0
  26. package/src/scripts/multi-agent-chain.ts +59 -6
  27. package/src/scripts/multi-agent-parallel-start.ts +39 -6
  28. package/src/scripts/multi-agent-parallel.ts +61 -10
  29. package/src/scripts/multi-agent-sequence.ts +6 -1
  30. package/src/scripts/parallel-asymmetric-tools-test.ts +274 -0
  31. package/src/scripts/parallel-full-metadata-test.ts +240 -0
  32. package/src/scripts/parallel-tools-test.ts +340 -0
  33. package/src/scripts/sequential-full-metadata-test.ts +197 -0
  34. package/src/scripts/single-agent-metadata-test.ts +198 -0
  35. package/src/scripts/test-thinking-handoff.ts +8 -0
  36. package/src/scripts/tools.ts +31 -11
  37. package/src/stream.ts +25 -14
  38. package/src/types/stream.ts +23 -4
@@ -0,0 +1,340 @@
1
+ import { config } from 'dotenv';
2
+ config();
3
+
4
+ import { HumanMessage, BaseMessage } from '@langchain/core/messages';
5
+ import type * as t from '@/types';
6
+ import { ChatModelStreamHandler, createContentAggregator } from '@/stream';
7
+ import { ToolEndHandler } from '@/events';
8
+ import { Providers, GraphEvents } from '@/common';
9
+ import { sleep } from '@/utils/run';
10
+ import { Run } from '@/run';
11
+ import { Calculator } from '@/tools/Calculator';
12
+ import { Tool } from '@langchain/core/tools';
13
+
14
+ const conversationHistory: BaseMessage[] = [];
15
+
16
+ // Create a simple "WordCount" tool for the second agent
17
+ class WordCounter extends Tool {
18
+ static lc_name(): string {
19
+ return 'WordCounter';
20
+ }
21
+
22
+ name = 'word_counter';
23
+
24
+ description =
25
+ 'Useful for counting the number of words, characters, and sentences in a given text. Input should be the text to analyze.';
26
+
27
+ async _call(input: string): Promise<string> {
28
+ const words = input.trim().split(/\s+/).filter(Boolean).length;
29
+ const characters = input.length;
30
+ const sentences = input.split(/[.!?]+/).filter(Boolean).length;
31
+ return JSON.stringify({ words, characters, sentences });
32
+ }
33
+ }
34
+
35
+ /**
36
+ * Example of parallel multi-agent system with tools
37
+ *
38
+ * Graph structure:
39
+ * START -> [math_agent, text_agent] -> END (parallel from start, both run simultaneously)
40
+ *
41
+ * Both agents have tools they can use. This tests how langgraph_step behaves
42
+ * when parallel agents call tools.
43
+ */
44
+ async function testParallelWithTools() {
45
+ console.log('Testing Parallel From Start Multi-Agent System WITH TOOLS...\n');
46
+
47
+ // Set up content aggregator
48
+ const { contentParts, aggregateContent } = createContentAggregator();
49
+
50
+ // Track metadata for analysis
51
+ const metadataLog: Array<{
52
+ event: string;
53
+ langgraph_step: number;
54
+ langgraph_node: string;
55
+ timestamp: number;
56
+ }> = [];
57
+ const startTime = Date.now();
58
+
59
+ // Define two agents with different tools
60
+ const agents: t.AgentInputs[] = [
61
+ {
62
+ agentId: 'math_agent',
63
+ provider: Providers.ANTHROPIC,
64
+ clientOptions: {
65
+ modelName: 'claude-haiku-4-5',
66
+ apiKey: process.env.ANTHROPIC_API_KEY,
67
+ },
68
+ tools: [new Calculator()],
69
+ instructions: `You are a MATH SPECIALIST. When asked about numbers or calculations, ALWAYS use the calculator tool to perform the calculation. Start your response with "🧮 MATH:". Keep your response concise.`,
70
+ },
71
+ {
72
+ agentId: 'text_agent',
73
+ provider: Providers.ANTHROPIC,
74
+ clientOptions: {
75
+ modelName: 'claude-haiku-4-5',
76
+ apiKey: process.env.ANTHROPIC_API_KEY,
77
+ },
78
+ tools: [new WordCounter()],
79
+ instructions: `You are a TEXT ANALYST. When asked about text or content, ALWAYS use the word_counter tool to analyze the text. Start your response with "📝 TEXT:". Keep your response concise.`,
80
+ },
81
+ ];
82
+
83
+ // No edges - both agents run in parallel from start
84
+ const edges: t.GraphEdge[] = [];
85
+
86
+ // Track active agents and timing
87
+ const activeAgents = new Set<string>();
88
+ const agentTimings: Record<string, { start?: number; end?: number }> = {};
89
+
90
+ // Helper to log metadata
91
+ const logMetadata = (
92
+ eventName: string,
93
+ metadata?: Record<string, unknown>
94
+ ) => {
95
+ if (metadata) {
96
+ const entry = {
97
+ event: eventName,
98
+ langgraph_step: metadata.langgraph_step as number,
99
+ langgraph_node: metadata.langgraph_node as string,
100
+ timestamp: Date.now() - startTime,
101
+ };
102
+ metadataLog.push(entry);
103
+ console.log(
104
+ `📊 [${entry.timestamp}ms] ${eventName}: step=${entry.langgraph_step}, node=${entry.langgraph_node}`
105
+ );
106
+ }
107
+ };
108
+
109
+ // Create custom handlers with metadata logging
110
+ const customHandlers = {
111
+ [GraphEvents.TOOL_END]: {
112
+ handle: (
113
+ _event: string,
114
+ data: t.StreamEventData,
115
+ metadata?: Record<string, unknown>
116
+ ): void => {
117
+ console.log('\n====== TOOL_END ======');
118
+ logMetadata('TOOL_END', metadata);
119
+ console.dir(data, { depth: null });
120
+ },
121
+ },
122
+ [GraphEvents.TOOL_START]: {
123
+ handle: (
124
+ _event: string,
125
+ data: t.StreamEventData,
126
+ metadata?: Record<string, unknown>
127
+ ): void => {
128
+ console.log('\n====== TOOL_START ======');
129
+ logMetadata('TOOL_START', metadata);
130
+ },
131
+ },
132
+ [GraphEvents.CHAT_MODEL_END]: {
133
+ handle: (
134
+ _event: string,
135
+ _data: t.StreamEventData,
136
+ metadata?: Record<string, unknown>
137
+ ): void => {
138
+ console.log('\n====== CHAT_MODEL_END ======');
139
+ logMetadata('CHAT_MODEL_END', metadata);
140
+ const nodeName = metadata?.langgraph_node as string;
141
+ if (nodeName) {
142
+ const elapsed = Date.now() - startTime;
143
+ agentTimings[nodeName] = agentTimings[nodeName] || {};
144
+ agentTimings[nodeName].end = elapsed;
145
+ }
146
+ },
147
+ },
148
+ [GraphEvents.CHAT_MODEL_START]: {
149
+ handle: (
150
+ _event: string,
151
+ _data: t.StreamEventData,
152
+ metadata?: Record<string, unknown>
153
+ ): void => {
154
+ console.log('\n====== CHAT_MODEL_START ======');
155
+ logMetadata('CHAT_MODEL_START', metadata);
156
+ const nodeName = metadata?.langgraph_node as string;
157
+ if (nodeName) {
158
+ const elapsed = Date.now() - startTime;
159
+ agentTimings[nodeName] = agentTimings[nodeName] || {};
160
+ if (!agentTimings[nodeName].start) {
161
+ agentTimings[nodeName].start = elapsed;
162
+ }
163
+ activeAgents.add(nodeName);
164
+ }
165
+ },
166
+ },
167
+ [GraphEvents.CHAT_MODEL_STREAM]: new ChatModelStreamHandler(),
168
+ [GraphEvents.ON_RUN_STEP_COMPLETED]: {
169
+ handle: (
170
+ event: GraphEvents.ON_RUN_STEP_COMPLETED,
171
+ data: t.StreamEventData,
172
+ metadata?: Record<string, unknown>
173
+ ): void => {
174
+ console.log('\n====== ON_RUN_STEP_COMPLETED ======');
175
+ logMetadata('ON_RUN_STEP_COMPLETED', metadata);
176
+ aggregateContent({
177
+ event,
178
+ data: data as unknown as { result: t.ToolEndEvent },
179
+ });
180
+ },
181
+ },
182
+ [GraphEvents.ON_RUN_STEP]: {
183
+ handle: (
184
+ event: GraphEvents.ON_RUN_STEP,
185
+ data: t.StreamEventData,
186
+ metadata?: Record<string, unknown>
187
+ ): void => {
188
+ console.log('\n====== ON_RUN_STEP ======');
189
+ logMetadata('ON_RUN_STEP', metadata);
190
+ aggregateContent({ event, data: data as t.RunStep });
191
+ },
192
+ },
193
+ [GraphEvents.ON_RUN_STEP_DELTA]: {
194
+ handle: (
195
+ event: GraphEvents.ON_RUN_STEP_DELTA,
196
+ data: t.StreamEventData,
197
+ metadata?: Record<string, unknown>
198
+ ): void => {
199
+ logMetadata('ON_RUN_STEP_DELTA', metadata);
200
+ aggregateContent({ event, data: data as t.RunStepDeltaEvent });
201
+ },
202
+ },
203
+ [GraphEvents.ON_MESSAGE_DELTA]: {
204
+ handle: (
205
+ event: GraphEvents.ON_MESSAGE_DELTA,
206
+ data: t.StreamEventData,
207
+ metadata?: Record<string, unknown>
208
+ ): void => {
209
+ logMetadata('ON_MESSAGE_DELTA', metadata);
210
+ aggregateContent({ event, data: data as t.MessageDeltaEvent });
211
+ },
212
+ },
213
+ };
214
+
215
+ // Create multi-agent run configuration
216
+ const runConfig: t.RunConfig = {
217
+ runId: `parallel-tools-${Date.now()}`,
218
+ graphConfig: {
219
+ type: 'multi-agent',
220
+ agents,
221
+ edges,
222
+ },
223
+ customHandlers,
224
+ returnContent: true,
225
+ };
226
+
227
+ try {
228
+ // Create and execute the run
229
+ const run = await Run.create(runConfig);
230
+
231
+ // User message that should trigger both agents to use their tools
232
+ const userMessage = `I have two tasks:
233
+ 1. Calculate 1234 + 5678 * 2
234
+ 2. Analyze this text: "The quick brown fox jumps over the lazy dog"
235
+
236
+ Please help with both!`;
237
+
238
+ conversationHistory.push(new HumanMessage(userMessage));
239
+
240
+ console.log(
241
+ 'Invoking parallel-from-start multi-agent graph WITH TOOLS...\n'
242
+ );
243
+ console.log(
244
+ 'Both math_agent and text_agent should start simultaneously and use tools!\n'
245
+ );
246
+
247
+ const config = {
248
+ configurable: {
249
+ thread_id: 'parallel-tools-test-1',
250
+ },
251
+ streamMode: 'values',
252
+ version: 'v2' as const,
253
+ };
254
+
255
+ // Process with streaming
256
+ const inputs = {
257
+ messages: conversationHistory,
258
+ };
259
+
260
+ const finalContentParts = await run.processStream(inputs, config);
261
+ const finalMessages = run.getRunMessages();
262
+
263
+ if (finalMessages) {
264
+ conversationHistory.push(...finalMessages);
265
+ }
266
+
267
+ // Analysis output
268
+ console.log('\n\n========== METADATA ANALYSIS ==========');
269
+ console.log('\nAll metadata entries by timestamp:');
270
+ console.table(metadataLog);
271
+
272
+ // Group by langgraph_step
273
+ const stepGroups = new Map<number, typeof metadataLog>();
274
+ for (const entry of metadataLog) {
275
+ if (!stepGroups.has(entry.langgraph_step)) {
276
+ stepGroups.set(entry.langgraph_step, []);
277
+ }
278
+ stepGroups.get(entry.langgraph_step)!.push(entry);
279
+ }
280
+
281
+ console.log('\n\nGrouped by langgraph_step:');
282
+ for (const [step, entries] of stepGroups) {
283
+ const nodes = [...new Set(entries.map((e) => e.langgraph_node))];
284
+ console.log(
285
+ `\n Step ${step}: ${nodes.length} unique nodes: ${nodes.join(', ')}`
286
+ );
287
+ console.log(` Events: ${entries.map((e) => e.event).join(', ')}`);
288
+ }
289
+
290
+ // Identify parallel groups (same step, different nodes)
291
+ console.log('\n\n========== PARALLEL DETECTION ANALYSIS ==========');
292
+ for (const [step, entries] of stepGroups) {
293
+ const uniqueNodes = [...new Set(entries.map((e) => e.langgraph_node))];
294
+ if (uniqueNodes.length > 1) {
295
+ console.log(
296
+ `✅ Step ${step} has MULTIPLE agents: ${uniqueNodes.join(', ')} - PARALLEL!`
297
+ );
298
+ } else {
299
+ console.log(` Step ${step} has single agent: ${uniqueNodes[0]}`);
300
+ }
301
+ }
302
+
303
+ console.log('\n\n========== TIMING SUMMARY ==========');
304
+ for (const [agent, timing] of Object.entries(agentTimings)) {
305
+ const duration =
306
+ timing.end && timing.start ? timing.end - timing.start : 'N/A';
307
+ console.log(
308
+ `${agent}: started=${timing.start}ms, ended=${timing.end}ms, duration=${duration}ms`
309
+ );
310
+ }
311
+
312
+ // Check overlap for parallel confirmation
313
+ const agentList = Object.keys(agentTimings);
314
+ if (agentList.length >= 2) {
315
+ const [a1, a2] = agentList;
316
+ const t1 = agentTimings[a1];
317
+ const t2 = agentTimings[a2];
318
+ if (t1.start && t2.start && t1.end && t2.end) {
319
+ const overlap = Math.min(t1.end, t2.end) - Math.max(t1.start, t2.start);
320
+ if (overlap > 0) {
321
+ console.log(
322
+ `\n✅ PARALLEL EXECUTION CONFIRMED: ${overlap}ms overlap`
323
+ );
324
+ } else {
325
+ console.log(`\n❌ SEQUENTIAL EXECUTION: no overlap`);
326
+ }
327
+ }
328
+ }
329
+ console.log('====================================\n');
330
+
331
+ console.log('Final content parts:', contentParts.length, 'parts');
332
+ console.dir(contentParts, { depth: null });
333
+ await sleep(2000);
334
+ } catch (error) {
335
+ console.error('Error in parallel-with-tools test:', error);
336
+ }
337
+ }
338
+
339
+ // Run the test
340
+ testParallelWithTools();
@@ -0,0 +1,197 @@
1
+ import { config } from 'dotenv';
2
+ config();
3
+
4
+ import { HumanMessage, BaseMessage } from '@langchain/core/messages';
5
+ import type * as t from '@/types';
6
+ import { ChatModelStreamHandler, createContentAggregator } from '@/stream';
7
+ import { Providers, GraphEvents } from '@/common';
8
+ import { sleep } from '@/utils/run';
9
+ import { Run } from '@/run';
10
+
11
+ const conversationHistory: BaseMessage[] = [];
12
+
13
+ /**
14
+ * Dump ALL metadata for SEQUENTIAL execution to compare with parallel
15
+ */
16
+ async function testSequentialMetadata() {
17
+ console.log('Dumping FULL metadata for SEQUENTIAL execution...\n');
18
+
19
+ const { contentParts, aggregateContent } = createContentAggregator();
20
+
21
+ const allMetadata: Array<{
22
+ event: string;
23
+ timestamp: number;
24
+ metadata: Record<string, unknown>;
25
+ }> = [];
26
+ const startTime = Date.now();
27
+
28
+ // Sequential chain: agent_a -> agent_b
29
+ const agents: t.AgentInputs[] = [
30
+ {
31
+ agentId: 'agent_a',
32
+ provider: Providers.ANTHROPIC,
33
+ clientOptions: {
34
+ modelName: 'claude-haiku-4-5',
35
+ apiKey: process.env.ANTHROPIC_API_KEY,
36
+ },
37
+ instructions: `You are Agent A. Just say "Hello from A" in one sentence.`,
38
+ },
39
+ {
40
+ agentId: 'agent_b',
41
+ provider: Providers.ANTHROPIC,
42
+ clientOptions: {
43
+ modelName: 'claude-haiku-4-5',
44
+ apiKey: process.env.ANTHROPIC_API_KEY,
45
+ },
46
+ instructions: `You are Agent B. Just say "Hello from B" in one sentence.`,
47
+ },
48
+ ];
49
+
50
+ // Sequential edge: A -> B (using edgeType not type)
51
+ const edges: t.GraphEdge[] = [
52
+ { from: 'agent_a', to: 'agent_b', edgeType: 'direct' },
53
+ ];
54
+
55
+ const captureMetadata = (
56
+ eventName: string,
57
+ metadata?: Record<string, unknown>
58
+ ) => {
59
+ if (metadata) {
60
+ allMetadata.push({
61
+ event: eventName,
62
+ timestamp: Date.now() - startTime,
63
+ metadata: { ...metadata },
64
+ });
65
+ }
66
+ };
67
+
68
+ const customHandlers = {
69
+ [GraphEvents.CHAT_MODEL_END]: {
70
+ handle: (
71
+ _event: string,
72
+ _data: t.StreamEventData,
73
+ metadata?: Record<string, unknown>
74
+ ): void => {
75
+ captureMetadata('CHAT_MODEL_END', metadata);
76
+ },
77
+ },
78
+ [GraphEvents.CHAT_MODEL_START]: {
79
+ handle: (
80
+ _event: string,
81
+ _data: t.StreamEventData,
82
+ metadata?: Record<string, unknown>
83
+ ): void => {
84
+ captureMetadata('CHAT_MODEL_START', metadata);
85
+ },
86
+ },
87
+ [GraphEvents.CHAT_MODEL_STREAM]: new ChatModelStreamHandler(),
88
+ [GraphEvents.ON_RUN_STEP]: {
89
+ handle: (
90
+ event: GraphEvents.ON_RUN_STEP,
91
+ data: t.StreamEventData,
92
+ metadata?: Record<string, unknown>
93
+ ): void => {
94
+ captureMetadata('ON_RUN_STEP', metadata);
95
+ aggregateContent({ event, data: data as t.RunStep });
96
+ },
97
+ },
98
+ [GraphEvents.ON_RUN_STEP_DELTA]: {
99
+ handle: (
100
+ event: GraphEvents.ON_RUN_STEP_DELTA,
101
+ data: t.StreamEventData,
102
+ metadata?: Record<string, unknown>
103
+ ): void => {
104
+ aggregateContent({ event, data: data as t.RunStepDeltaEvent });
105
+ },
106
+ },
107
+ [GraphEvents.ON_MESSAGE_DELTA]: {
108
+ handle: (
109
+ event: GraphEvents.ON_MESSAGE_DELTA,
110
+ data: t.StreamEventData,
111
+ metadata?: Record<string, unknown>
112
+ ): void => {
113
+ aggregateContent({ event, data: data as t.MessageDeltaEvent });
114
+ },
115
+ },
116
+ };
117
+
118
+ const runConfig: t.RunConfig = {
119
+ runId: `sequential-metadata-${Date.now()}`,
120
+ graphConfig: {
121
+ type: 'multi-agent',
122
+ agents,
123
+ edges,
124
+ },
125
+ customHandlers,
126
+ returnContent: true,
127
+ };
128
+
129
+ try {
130
+ const run = await Run.create(runConfig);
131
+
132
+ const userMessage = `Hi`;
133
+ conversationHistory.push(new HumanMessage(userMessage));
134
+
135
+ const config = {
136
+ configurable: {
137
+ thread_id: 'sequential-metadata-test-1',
138
+ },
139
+ streamMode: 'values',
140
+ version: 'v2' as const,
141
+ };
142
+
143
+ await run.processStream({ messages: conversationHistory }, config);
144
+
145
+ // Print ALL CHAT_MODEL_START metadata (don't dedupe)
146
+ console.log(
147
+ '\n\n========== ALL CHAT_MODEL_START EVENTS (SEQUENTIAL) ==========\n'
148
+ );
149
+ for (const entry of allMetadata) {
150
+ if (entry.event === 'CHAT_MODEL_START') {
151
+ const node = entry.metadata.langgraph_node as string;
152
+ console.log(`\n--- ${node} (at ${entry.timestamp}ms) ---`);
153
+ console.dir(entry.metadata, { depth: null });
154
+ }
155
+ }
156
+
157
+ console.log('\n\n========== ALL EVENTS ==========\n');
158
+ for (const entry of allMetadata) {
159
+ console.log(
160
+ `[${entry.timestamp}ms] ${entry.event}: ${entry.metadata.langgraph_node}`
161
+ );
162
+ }
163
+
164
+ // Key comparison
165
+ console.log(
166
+ '\n\n========== KEY FIELDS COMPARISON (SEQUENTIAL) ==========\n'
167
+ );
168
+
169
+ const agentMetadataMap = new Map<string, Record<string, unknown>>();
170
+ for (const entry of allMetadata) {
171
+ if (entry.event === 'CHAT_MODEL_START') {
172
+ const node = entry.metadata.langgraph_node as string;
173
+ if (!agentMetadataMap.has(node)) {
174
+ agentMetadataMap.set(node, entry.metadata);
175
+ }
176
+ }
177
+ }
178
+
179
+ for (const [node, meta] of agentMetadataMap) {
180
+ console.log(`${node}:`);
181
+ console.log(` langgraph_step: ${meta.langgraph_step}`);
182
+ console.log(
183
+ ` langgraph_triggers: ${JSON.stringify(meta.langgraph_triggers)}`
184
+ );
185
+ console.log(` checkpoint_ns: ${meta.checkpoint_ns}`);
186
+ console.log(` __pregel_task_id: ${meta.__pregel_task_id}`);
187
+ console.log(` langgraph_path: ${JSON.stringify(meta.langgraph_path)}`);
188
+ console.log();
189
+ }
190
+
191
+ await sleep(1000);
192
+ } catch (error) {
193
+ console.error('Error:', error);
194
+ }
195
+ }
196
+
197
+ testSequentialMetadata();