@librechat/agents 3.0.52 → 3.0.54

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,263 @@
1
+ import { config } from 'dotenv';
2
+ config();
3
+
4
+ import { HumanMessage, BaseMessage } from '@langchain/core/messages';
5
+ import type * as t from '@/types';
6
+ import { ChatModelStreamHandler, createContentAggregator } from '@/stream';
7
+ import { ToolEndHandler, ModelEndHandler } from '@/events';
8
+ import { Providers, GraphEvents } from '@/common';
9
+ import { sleep } from '@/utils/run';
10
+ import { Run } from '@/run';
11
+
12
+ const conversationHistory: BaseMessage[] = [];
13
+
14
+ /**
15
+ * Example of parallel multi-agent system that starts with parallel execution immediately
16
+ *
17
+ * Graph structure:
18
+ * START -> [analyst1, analyst2] -> END (parallel from start, both run simultaneously)
19
+ *
20
+ * This demonstrates getting a parallel stream from the very beginning,
21
+ * with two agents running simultaneously. Useful for testing how different
22
+ * models respond to the same input.
23
+ */
24
+ async function testParallelFromStart() {
25
+ console.log('Testing Parallel From Start Multi-Agent System...\n');
26
+
27
+ // Set up content aggregator
28
+ const { contentParts, aggregateContent } = createContentAggregator();
29
+
30
+ // Define two agents - both have NO incoming edges, so they run in parallel from the start
31
+ const agents: t.AgentInputs[] = [
32
+ {
33
+ agentId: 'analyst1',
34
+ provider: Providers.ANTHROPIC,
35
+ clientOptions: {
36
+ modelName: 'claude-haiku-4-5',
37
+ apiKey: process.env.ANTHROPIC_API_KEY,
38
+ },
39
+ instructions: `You are a CREATIVE ANALYST. Analyze the user's query from a creative and innovative perspective. Focus on novel ideas, unconventional approaches, and imaginative possibilities. Keep your response concise (100-150 words). Start with "🎨 CREATIVE:"`,
40
+ },
41
+ {
42
+ agentId: 'analyst2',
43
+ provider: Providers.ANTHROPIC,
44
+ clientOptions: {
45
+ modelName: 'claude-haiku-4-5',
46
+ apiKey: process.env.ANTHROPIC_API_KEY,
47
+ },
48
+ instructions: `You are a PRACTICAL ANALYST. Analyze the user's query from a logical and practical perspective. Focus on feasibility, metrics, and actionable steps. Keep your response concise (100-150 words). Start with "📊 PRACTICAL:"`,
49
+ },
50
+ ];
51
+
52
+ // No edges needed - both agents have no incoming edges, so both are start nodes
53
+ // They will run in parallel and end when both complete
54
+ const edges: t.GraphEdge[] = [];
55
+
56
+ // Track which agents are active and their timing
57
+ const activeAgents = new Set<string>();
58
+ const agentTimings: Record<string, { start?: number; end?: number }> = {};
59
+ const startTime = Date.now();
60
+
61
+ // Create custom handlers with extensive metadata logging
62
+ const customHandlers = {
63
+ [GraphEvents.TOOL_END]: new ToolEndHandler(),
64
+ [GraphEvents.CHAT_MODEL_END]: {
65
+ handle: (
66
+ _event: string,
67
+ _data: t.StreamEventData,
68
+ metadata?: Record<string, unknown>
69
+ ): void => {
70
+ console.log('\n====== CHAT_MODEL_END METADATA ======');
71
+ console.dir(metadata, { depth: null });
72
+ const nodeName = metadata?.langgraph_node as string;
73
+ if (nodeName) {
74
+ const elapsed = Date.now() - startTime;
75
+ agentTimings[nodeName] = agentTimings[nodeName] || {};
76
+ agentTimings[nodeName].end = elapsed;
77
+ console.log(`⏱️ [${nodeName}] COMPLETED at ${elapsed}ms`);
78
+ }
79
+ },
80
+ },
81
+ [GraphEvents.CHAT_MODEL_START]: {
82
+ handle: (
83
+ _event: string,
84
+ _data: t.StreamEventData,
85
+ metadata?: Record<string, unknown>
86
+ ): void => {
87
+ console.log('\n====== CHAT_MODEL_START METADATA ======');
88
+ console.dir(metadata, { depth: null });
89
+ const nodeName = metadata?.langgraph_node as string;
90
+ if (nodeName) {
91
+ const elapsed = Date.now() - startTime;
92
+ agentTimings[nodeName] = agentTimings[nodeName] || {};
93
+ agentTimings[nodeName].start = elapsed;
94
+ activeAgents.add(nodeName);
95
+ console.log(`⏱️ [${nodeName}] STARTED at ${elapsed}ms`);
96
+ }
97
+ },
98
+ },
99
+ [GraphEvents.CHAT_MODEL_STREAM]: new ChatModelStreamHandler(),
100
+ [GraphEvents.ON_RUN_STEP_COMPLETED]: {
101
+ handle: (
102
+ event: GraphEvents.ON_RUN_STEP_COMPLETED,
103
+ data: t.StreamEventData,
104
+ metadata?: Record<string, unknown>
105
+ ): void => {
106
+ console.log('\n====== ON_RUN_STEP_COMPLETED ======');
107
+ console.log('DATA:');
108
+ console.dir(data, { depth: null });
109
+ console.log('METADATA:');
110
+ console.dir(metadata, { depth: null });
111
+ aggregateContent({
112
+ event,
113
+ data: data as unknown as { result: t.ToolEndEvent },
114
+ });
115
+ },
116
+ },
117
+ [GraphEvents.ON_RUN_STEP]: {
118
+ handle: (
119
+ event: GraphEvents.ON_RUN_STEP,
120
+ data: t.StreamEventData,
121
+ metadata?: Record<string, unknown>
122
+ ): void => {
123
+ console.log('\n====== ON_RUN_STEP ======');
124
+ console.log('DATA:');
125
+ console.dir(data, { depth: null });
126
+ console.log('METADATA:');
127
+ console.dir(metadata, { depth: null });
128
+ aggregateContent({ event, data: data as t.RunStep });
129
+ },
130
+ },
131
+ [GraphEvents.ON_RUN_STEP_DELTA]: {
132
+ handle: (
133
+ event: GraphEvents.ON_RUN_STEP_DELTA,
134
+ data: t.StreamEventData,
135
+ metadata?: Record<string, unknown>
136
+ ): void => {
137
+ console.log('\n====== ON_RUN_STEP_DELTA ======');
138
+ console.log('DATA:');
139
+ console.dir(data, { depth: null });
140
+ console.log('METADATA:');
141
+ console.dir(metadata, { depth: null });
142
+ aggregateContent({ event, data: data as t.RunStepDeltaEvent });
143
+ },
144
+ },
145
+ [GraphEvents.ON_MESSAGE_DELTA]: {
146
+ handle: (
147
+ event: GraphEvents.ON_MESSAGE_DELTA,
148
+ data: t.StreamEventData,
149
+ metadata?: Record<string, unknown>
150
+ ): void => {
151
+ // Only log first delta per agent to avoid spam
152
+ console.log('\n====== ON_MESSAGE_DELTA ======');
153
+ console.log('DATA:');
154
+ console.dir(data, { depth: null });
155
+ console.log('METADATA:');
156
+ console.dir(metadata, { depth: null });
157
+ aggregateContent({ event, data: data as t.MessageDeltaEvent });
158
+ },
159
+ },
160
+ };
161
+
162
+ // Create multi-agent run configuration
163
+ const runConfig: t.RunConfig = {
164
+ runId: `parallel-start-${Date.now()}`,
165
+ graphConfig: {
166
+ type: 'multi-agent',
167
+ agents,
168
+ edges,
169
+ },
170
+ customHandlers,
171
+ returnContent: true,
172
+ };
173
+
174
+ try {
175
+ // Create and execute the run
176
+ const run = await Run.create(runConfig);
177
+
178
+ // Debug: Log the graph structure
179
+ console.log('=== DEBUG: Graph Structure ===');
180
+ const graph = (run as any).Graph;
181
+ console.log('Graph exists:', !!graph);
182
+ if (graph) {
183
+ console.log('Graph type:', graph.constructor.name);
184
+ console.log('AgentContexts exists:', !!graph.agentContexts);
185
+ if (graph.agentContexts) {
186
+ console.log('AgentContexts size:', graph.agentContexts.size);
187
+ for (const [agentId, context] of graph.agentContexts) {
188
+ console.log(`\nAgent: ${agentId}`);
189
+ console.log(
190
+ `Tools: ${context.tools?.map((t: any) => t.name || 'unnamed').join(', ') || 'none'}`
191
+ );
192
+ }
193
+ }
194
+ }
195
+ console.log('=== END DEBUG ===\n');
196
+
197
+ const userMessage = `What are the best approaches to learning a new programming language?`;
198
+ conversationHistory.push(new HumanMessage(userMessage));
199
+
200
+ console.log('Invoking parallel-from-start multi-agent graph...\n');
201
+ console.log('Both analyst1 and analyst2 should start simultaneously!\n');
202
+
203
+ const config = {
204
+ configurable: {
205
+ thread_id: 'parallel-start-conversation-1',
206
+ },
207
+ streamMode: 'values',
208
+ version: 'v2' as const,
209
+ };
210
+
211
+ // Process with streaming
212
+ const inputs = {
213
+ messages: conversationHistory,
214
+ };
215
+
216
+ const finalContentParts = await run.processStream(inputs, config);
217
+ const finalMessages = run.getRunMessages();
218
+
219
+ if (finalMessages) {
220
+ conversationHistory.push(...finalMessages);
221
+ }
222
+
223
+ console.log('\n\n========== TIMING SUMMARY ==========');
224
+ for (const [agent, timing] of Object.entries(agentTimings)) {
225
+ const duration =
226
+ timing.end && timing.start ? timing.end - timing.start : 'N/A';
227
+ console.log(
228
+ `${agent}: started=${timing.start}ms, ended=${timing.end}ms, duration=${duration}ms`
229
+ );
230
+ }
231
+
232
+ // Check if parallel
233
+ const agents = Object.keys(agentTimings);
234
+ if (agents.length >= 2) {
235
+ const [a1, a2] = agents;
236
+ const t1 = agentTimings[a1];
237
+ const t2 = agentTimings[a2];
238
+ if (t1.start && t2.start && t1.end && t2.end) {
239
+ const overlap = Math.min(t1.end, t2.end) - Math.max(t1.start, t2.start);
240
+ if (overlap > 0) {
241
+ console.log(
242
+ `\n✅ PARALLEL EXECUTION CONFIRMED: ${overlap}ms overlap`
243
+ );
244
+ } else {
245
+ console.log(`\n❌ SEQUENTIAL EXECUTION: no overlap`);
246
+ }
247
+ }
248
+ }
249
+ console.log('====================================\n');
250
+
251
+ console.log('Final content parts:', contentParts.length, 'parts');
252
+ console.dir(contentParts, { depth: null });
253
+
254
+ // groupId on each content part allows frontend to derive boundaries if needed
255
+
256
+ await sleep(3000);
257
+ } catch (error) {
258
+ console.error('Error in parallel-from-start multi-agent test:', error);
259
+ }
260
+ }
261
+
262
+ // Run the test
263
+ testParallelFromStart();
@@ -200,18 +200,50 @@ async function testParallelMultiAgent() {
200
200
 
201
201
  // Track which agents are active
202
202
  const activeAgents = new Set<string>();
203
+ const startTime = Date.now();
204
+ let messageCount = 0;
203
205
 
204
- // Create custom handlers
206
+ // Create custom handlers with extensive metadata logging
205
207
  const customHandlers = {
206
208
  [GraphEvents.TOOL_END]: new ToolEndHandler(),
207
- [GraphEvents.CHAT_MODEL_END]: new ModelEndHandler(),
209
+ [GraphEvents.CHAT_MODEL_END]: {
210
+ handle: (
211
+ _event: string,
212
+ _data: t.StreamEventData,
213
+ metadata?: Record<string, unknown>
214
+ ): void => {
215
+ console.log('\n====== CHAT_MODEL_END METADATA ======');
216
+ console.dir(metadata, { depth: null });
217
+ const elapsed = Date.now() - startTime;
218
+ const nodeName = metadata?.langgraph_node as string;
219
+ console.log(`⏱️ [${nodeName || 'unknown'}] COMPLETED at ${elapsed}ms`);
220
+ },
221
+ },
222
+ [GraphEvents.CHAT_MODEL_START]: {
223
+ handle: (
224
+ _event: string,
225
+ _data: t.StreamEventData,
226
+ metadata?: Record<string, unknown>
227
+ ): void => {
228
+ console.log('\n====== CHAT_MODEL_START METADATA ======');
229
+ console.dir(metadata, { depth: null });
230
+ const elapsed = Date.now() - startTime;
231
+ const nodeName = metadata?.langgraph_node as string;
232
+ console.log(`⏱️ [${nodeName || 'unknown'}] STARTED at ${elapsed}ms`);
233
+ },
234
+ },
208
235
  [GraphEvents.CHAT_MODEL_STREAM]: new ChatModelStreamHandler(),
209
236
  [GraphEvents.ON_RUN_STEP_COMPLETED]: {
210
237
  handle: (
211
238
  event: GraphEvents.ON_RUN_STEP_COMPLETED,
212
- data: t.StreamEventData
239
+ data: t.StreamEventData,
240
+ metadata?: Record<string, unknown>
213
241
  ): void => {
214
- console.log('====== ON_RUN_STEP_COMPLETED ======');
242
+ console.log('\n====== ON_RUN_STEP_COMPLETED ======');
243
+ console.log('DATA:');
244
+ console.dir(data, { depth: null });
245
+ console.log('METADATA:');
246
+ console.dir(metadata, { depth: null });
215
247
  const runStepData = data as any;
216
248
  if (runStepData?.name) {
217
249
  activeAgents.delete(runStepData.name);
@@ -226,9 +258,14 @@ async function testParallelMultiAgent() {
226
258
  [GraphEvents.ON_RUN_STEP]: {
227
259
  handle: (
228
260
  event: GraphEvents.ON_RUN_STEP,
229
- data: t.StreamEventData
261
+ data: t.StreamEventData,
262
+ metadata?: Record<string, unknown>
230
263
  ): void => {
231
- console.log('====== ON_RUN_STEP ======');
264
+ console.log('\n====== ON_RUN_STEP ======');
265
+ console.log('DATA:');
266
+ console.dir(data, { depth: null });
267
+ console.log('METADATA:');
268
+ console.dir(metadata, { depth: null });
232
269
  const runStepData = data as any;
233
270
  if (runStepData?.name) {
234
271
  activeAgents.add(runStepData.name);
@@ -240,18 +277,32 @@ async function testParallelMultiAgent() {
240
277
  [GraphEvents.ON_RUN_STEP_DELTA]: {
241
278
  handle: (
242
279
  event: GraphEvents.ON_RUN_STEP_DELTA,
243
- data: t.StreamEventData
280
+ data: t.StreamEventData,
281
+ metadata?: Record<string, unknown>
244
282
  ): void => {
283
+ console.log('\n====== ON_RUN_STEP_DELTA ======');
284
+ console.log('DATA:');
285
+ console.dir(data, { depth: null });
286
+ console.log('METADATA:');
287
+ console.dir(metadata, { depth: null });
245
288
  aggregateContent({ event, data: data as t.RunStepDeltaEvent });
246
289
  },
247
290
  },
248
291
  [GraphEvents.ON_MESSAGE_DELTA]: {
249
292
  handle: (
250
293
  event: GraphEvents.ON_MESSAGE_DELTA,
251
- data: t.StreamEventData
294
+ data: t.StreamEventData,
295
+ metadata?: Record<string, unknown>
252
296
  ): void => {
253
- console.log('====== ON_MESSAGE_DELTA ======');
254
- console.dir(data, { depth: null });
297
+ messageCount++;
298
+ // Only log first few message deltas per agent to avoid spam
299
+ if (messageCount <= 5) {
300
+ console.log('\n====== ON_MESSAGE_DELTA ======');
301
+ console.log('DATA:');
302
+ console.dir(data, { depth: null });
303
+ console.log('METADATA:');
304
+ console.dir(metadata, { depth: null });
305
+ }
255
306
  aggregateContent({ event, data: data as t.MessageDeltaEvent });
256
307
  },
257
308
  },
@@ -0,0 +1,274 @@
1
+ import { config } from 'dotenv';
2
+ config();
3
+
4
+ import { HumanMessage, BaseMessage } from '@langchain/core/messages';
5
+ import type * as t from '@/types';
6
+ import { ChatModelStreamHandler, createContentAggregator } from '@/stream';
7
+ import { ToolEndHandler } from '@/events';
8
+ import { Providers, GraphEvents } from '@/common';
9
+ import { sleep } from '@/utils/run';
10
+ import { Run } from '@/run';
11
+ import { Calculator } from '@/tools/Calculator';
12
+
13
+ const conversationHistory: BaseMessage[] = [];
14
+
15
+ /**
16
+ * Test ASYMMETRIC parallel execution:
17
+ * - agent1: NO tools (will finish quickly in step 1)
18
+ * - agent2: HAS tools (will go step 1 → step 2 → step 3)
19
+ *
20
+ * This tests whether langgraph_step can still detect parallel execution
21
+ * when agents have different tool-calling patterns.
22
+ */
23
+ async function testAsymmetricParallelTools() {
24
+ console.log(
25
+ 'Testing ASYMMETRIC Parallel Agents (one with tools, one without)...\n'
26
+ );
27
+
28
+ const { contentParts, aggregateContent } = createContentAggregator();
29
+
30
+ // Track metadata for analysis
31
+ const metadataLog: Array<{
32
+ event: string;
33
+ langgraph_step: number;
34
+ langgraph_node: string;
35
+ timestamp: number;
36
+ }> = [];
37
+ const startTime = Date.now();
38
+
39
+ // Define two agents - one WITH tools, one WITHOUT
40
+ const agents: t.AgentInputs[] = [
41
+ {
42
+ agentId: 'simple_agent',
43
+ provider: Providers.ANTHROPIC,
44
+ clientOptions: {
45
+ modelName: 'claude-haiku-4-5',
46
+ apiKey: process.env.ANTHROPIC_API_KEY,
47
+ },
48
+ // NO TOOLS - will complete in single step
49
+ instructions: `You are a simple assistant. Just answer the question directly in 1-2 sentences. Start with "🗣️ SIMPLE:". Do NOT try to use any tools.`,
50
+ },
51
+ {
52
+ agentId: 'math_agent',
53
+ provider: Providers.ANTHROPIC,
54
+ clientOptions: {
55
+ modelName: 'claude-haiku-4-5',
56
+ apiKey: process.env.ANTHROPIC_API_KEY,
57
+ },
58
+ tools: [new Calculator()],
59
+ instructions: `You are a MATH SPECIALIST. ALWAYS use the calculator tool to perform calculations, even simple ones. Start your response with "🧮 MATH:". Keep your response concise.`,
60
+ },
61
+ ];
62
+
63
+ // No edges - both run in parallel from start
64
+ const edges: t.GraphEdge[] = [];
65
+
66
+ const agentTimings: Record<string, { start?: number; end?: number }> = {};
67
+
68
+ // Helper to log metadata
69
+ const logMetadata = (
70
+ eventName: string,
71
+ metadata?: Record<string, unknown>
72
+ ) => {
73
+ if (metadata) {
74
+ const entry = {
75
+ event: eventName,
76
+ langgraph_step: metadata.langgraph_step as number,
77
+ langgraph_node: metadata.langgraph_node as string,
78
+ timestamp: Date.now() - startTime,
79
+ };
80
+ metadataLog.push(entry);
81
+ console.log(
82
+ `📊 [${entry.timestamp}ms] ${eventName}: step=${entry.langgraph_step}, node=${entry.langgraph_node}`
83
+ );
84
+ }
85
+ };
86
+
87
+ const customHandlers = {
88
+ [GraphEvents.TOOL_END]: {
89
+ handle: (
90
+ _event: string,
91
+ data: t.StreamEventData,
92
+ metadata?: Record<string, unknown>
93
+ ): void => {
94
+ console.log('\n====== TOOL_END ======');
95
+ logMetadata('TOOL_END', metadata);
96
+ },
97
+ },
98
+ [GraphEvents.TOOL_START]: {
99
+ handle: (
100
+ _event: string,
101
+ _data: t.StreamEventData,
102
+ metadata?: Record<string, unknown>
103
+ ): void => {
104
+ console.log('\n====== TOOL_START ======');
105
+ logMetadata('TOOL_START', metadata);
106
+ },
107
+ },
108
+ [GraphEvents.CHAT_MODEL_END]: {
109
+ handle: (
110
+ _event: string,
111
+ _data: t.StreamEventData,
112
+ metadata?: Record<string, unknown>
113
+ ): void => {
114
+ console.log('\n====== CHAT_MODEL_END ======');
115
+ logMetadata('CHAT_MODEL_END', metadata);
116
+ const nodeName = metadata?.langgraph_node as string;
117
+ if (nodeName) {
118
+ const elapsed = Date.now() - startTime;
119
+ agentTimings[nodeName] = agentTimings[nodeName] || {};
120
+ agentTimings[nodeName].end = elapsed;
121
+ }
122
+ },
123
+ },
124
+ [GraphEvents.CHAT_MODEL_START]: {
125
+ handle: (
126
+ _event: string,
127
+ _data: t.StreamEventData,
128
+ metadata?: Record<string, unknown>
129
+ ): void => {
130
+ console.log('\n====== CHAT_MODEL_START ======');
131
+ logMetadata('CHAT_MODEL_START', metadata);
132
+ const nodeName = metadata?.langgraph_node as string;
133
+ if (nodeName) {
134
+ const elapsed = Date.now() - startTime;
135
+ agentTimings[nodeName] = agentTimings[nodeName] || {};
136
+ if (!agentTimings[nodeName].start) {
137
+ agentTimings[nodeName].start = elapsed;
138
+ }
139
+ }
140
+ },
141
+ },
142
+ [GraphEvents.CHAT_MODEL_STREAM]: new ChatModelStreamHandler(),
143
+ [GraphEvents.ON_RUN_STEP]: {
144
+ handle: (
145
+ event: GraphEvents.ON_RUN_STEP,
146
+ data: t.StreamEventData,
147
+ metadata?: Record<string, unknown>
148
+ ): void => {
149
+ console.log('\n====== ON_RUN_STEP ======');
150
+ logMetadata('ON_RUN_STEP', metadata);
151
+ aggregateContent({ event, data: data as t.RunStep });
152
+ },
153
+ },
154
+ [GraphEvents.ON_RUN_STEP_DELTA]: {
155
+ handle: (
156
+ event: GraphEvents.ON_RUN_STEP_DELTA,
157
+ data: t.StreamEventData,
158
+ metadata?: Record<string, unknown>
159
+ ): void => {
160
+ // Don't log these to reduce noise
161
+ aggregateContent({ event, data: data as t.RunStepDeltaEvent });
162
+ },
163
+ },
164
+ [GraphEvents.ON_MESSAGE_DELTA]: {
165
+ handle: (
166
+ event: GraphEvents.ON_MESSAGE_DELTA,
167
+ data: t.StreamEventData,
168
+ metadata?: Record<string, unknown>
169
+ ): void => {
170
+ // Don't log these to reduce noise
171
+ aggregateContent({ event, data: data as t.MessageDeltaEvent });
172
+ },
173
+ },
174
+ };
175
+
176
+ const runConfig: t.RunConfig = {
177
+ runId: `asymmetric-parallel-${Date.now()}`,
178
+ graphConfig: {
179
+ type: 'multi-agent',
180
+ agents,
181
+ edges,
182
+ },
183
+ customHandlers,
184
+ returnContent: true,
185
+ };
186
+
187
+ try {
188
+ const run = await Run.create(runConfig);
189
+
190
+ // Ask a question that will trigger only the math agent to use tools
191
+ const userMessage = `What is 42 * 17?`;
192
+
193
+ conversationHistory.push(new HumanMessage(userMessage));
194
+
195
+ console.log('User message:', userMessage);
196
+ console.log('\nExpected behavior:');
197
+ console.log(' - simple_agent: Step 1 only (no tools)');
198
+ console.log(' - math_agent: Step 1 → Step 2 (tool) → Step 3 (response)');
199
+ console.log('\n');
200
+
201
+ const config = {
202
+ configurable: {
203
+ thread_id: 'asymmetric-test-1',
204
+ },
205
+ streamMode: 'values',
206
+ version: 'v2' as const,
207
+ };
208
+
209
+ const inputs = {
210
+ messages: conversationHistory,
211
+ };
212
+
213
+ await run.processStream(inputs, config);
214
+
215
+ // Analysis
216
+ console.log('\n\n========== METADATA ANALYSIS ==========');
217
+ console.log('\nAll events by step and node:');
218
+ console.table(metadataLog);
219
+
220
+ // Group by step
221
+ const stepGroups = new Map<number, Map<string, string[]>>();
222
+ for (const entry of metadataLog) {
223
+ if (!stepGroups.has(entry.langgraph_step)) {
224
+ stepGroups.set(entry.langgraph_step, new Map());
225
+ }
226
+ const nodeMap = stepGroups.get(entry.langgraph_step)!;
227
+ if (!nodeMap.has(entry.langgraph_node)) {
228
+ nodeMap.set(entry.langgraph_node, []);
229
+ }
230
+ nodeMap.get(entry.langgraph_node)!.push(entry.event);
231
+ }
232
+
233
+ console.log('\n\n========== STEP BREAKDOWN ==========');
234
+ for (const [step, nodeMap] of stepGroups) {
235
+ console.log(`\nStep ${step}:`);
236
+ for (const [node, events] of nodeMap) {
237
+ console.log(` ${node}: ${events.join(', ')}`);
238
+ }
239
+ console.log(` → ${nodeMap.size} unique node(s) at this step`);
240
+ }
241
+
242
+ console.log('\n\n========== PARALLEL DETECTION CHALLENGE ==========');
243
+ console.log('\nAt which steps can we detect parallel execution?');
244
+ for (const [step, nodeMap] of stepGroups) {
245
+ if (nodeMap.size > 1) {
246
+ console.log(
247
+ ` ✅ Step ${step}: ${nodeMap.size} agents detected - PARALLEL`
248
+ );
249
+ } else {
250
+ const [nodeName] = nodeMap.keys();
251
+ console.log(
252
+ ` ⚠️ Step ${step}: Only 1 agent (${nodeName}) - looks sequential!`
253
+ );
254
+ }
255
+ }
256
+
257
+ console.log('\n\n========== KEY INSIGHT ==========');
258
+ console.log(
259
+ 'If we only look at step 2 or 3, we miss the parallel context!'
260
+ );
261
+ console.log(
262
+ 'We need to detect parallelism EARLY (at step 1) and carry that forward.'
263
+ );
264
+
265
+ console.log('\n\nFinal content parts:');
266
+ console.dir(contentParts, { depth: null });
267
+
268
+ await sleep(2000);
269
+ } catch (error) {
270
+ console.error('Error:', error);
271
+ }
272
+ }
273
+
274
+ testAsymmetricParallelTools();