@librechat/agents 3.0.53 → 3.0.55

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (38) hide show
  1. package/dist/cjs/graphs/Graph.cjs +28 -2
  2. package/dist/cjs/graphs/Graph.cjs.map +1 -1
  3. package/dist/cjs/graphs/MultiAgentGraph.cjs +108 -0
  4. package/dist/cjs/graphs/MultiAgentGraph.cjs.map +1 -1
  5. package/dist/cjs/messages/format.cjs +22 -1
  6. package/dist/cjs/messages/format.cjs.map +1 -1
  7. package/dist/cjs/stream.cjs +22 -11
  8. package/dist/cjs/stream.cjs.map +1 -1
  9. package/dist/esm/graphs/Graph.mjs +28 -2
  10. package/dist/esm/graphs/Graph.mjs.map +1 -1
  11. package/dist/esm/graphs/MultiAgentGraph.mjs +108 -0
  12. package/dist/esm/graphs/MultiAgentGraph.mjs.map +1 -1
  13. package/dist/esm/messages/format.mjs +22 -1
  14. package/dist/esm/messages/format.mjs.map +1 -1
  15. package/dist/esm/stream.mjs +22 -11
  16. package/dist/esm/stream.mjs.map +1 -1
  17. package/dist/types/graphs/Graph.d.ts +14 -0
  18. package/dist/types/graphs/MultiAgentGraph.d.ts +41 -0
  19. package/dist/types/messages/format.d.ts +8 -2
  20. package/dist/types/types/stream.d.ts +22 -2
  21. package/package.json +3 -2
  22. package/src/graphs/Graph.ts +30 -2
  23. package/src/graphs/MultiAgentGraph.ts +119 -0
  24. package/src/messages/format.ts +33 -3
  25. package/src/messages/formatAgentMessages.test.ts +168 -0
  26. package/src/scripts/multi-agent-chain.ts +59 -6
  27. package/src/scripts/multi-agent-parallel-start.ts +39 -6
  28. package/src/scripts/multi-agent-parallel.ts +61 -10
  29. package/src/scripts/multi-agent-sequence.ts +6 -1
  30. package/src/scripts/parallel-asymmetric-tools-test.ts +274 -0
  31. package/src/scripts/parallel-full-metadata-test.ts +240 -0
  32. package/src/scripts/parallel-tools-test.ts +340 -0
  33. package/src/scripts/sequential-full-metadata-test.ts +197 -0
  34. package/src/scripts/single-agent-metadata-test.ts +198 -0
  35. package/src/scripts/test-thinking-handoff.ts +8 -0
  36. package/src/scripts/tools.ts +31 -11
  37. package/src/stream.ts +25 -14
  38. package/src/types/stream.ts +23 -4
@@ -0,0 +1,274 @@
1
+ import { config } from 'dotenv';
2
+ config();
3
+
4
+ import { HumanMessage, BaseMessage } from '@langchain/core/messages';
5
+ import type * as t from '@/types';
6
+ import { ChatModelStreamHandler, createContentAggregator } from '@/stream';
7
+ import { ToolEndHandler } from '@/events';
8
+ import { Providers, GraphEvents } from '@/common';
9
+ import { sleep } from '@/utils/run';
10
+ import { Run } from '@/run';
11
+ import { Calculator } from '@/tools/Calculator';
12
+
13
+ const conversationHistory: BaseMessage[] = [];
14
+
15
+ /**
16
+ * Test ASYMMETRIC parallel execution:
17
+ * - agent1: NO tools (will finish quickly in step 1)
18
+ * - agent2: HAS tools (will go step 1 → step 2 → step 3)
19
+ *
20
+ * This tests whether langgraph_step can still detect parallel execution
21
+ * when agents have different tool-calling patterns.
22
+ */
23
+ async function testAsymmetricParallelTools() {
24
+ console.log(
25
+ 'Testing ASYMMETRIC Parallel Agents (one with tools, one without)...\n'
26
+ );
27
+
28
+ const { contentParts, aggregateContent } = createContentAggregator();
29
+
30
+ // Track metadata for analysis
31
+ const metadataLog: Array<{
32
+ event: string;
33
+ langgraph_step: number;
34
+ langgraph_node: string;
35
+ timestamp: number;
36
+ }> = [];
37
+ const startTime = Date.now();
38
+
39
+ // Define two agents - one WITH tools, one WITHOUT
40
+ const agents: t.AgentInputs[] = [
41
+ {
42
+ agentId: 'simple_agent',
43
+ provider: Providers.ANTHROPIC,
44
+ clientOptions: {
45
+ modelName: 'claude-haiku-4-5',
46
+ apiKey: process.env.ANTHROPIC_API_KEY,
47
+ },
48
+ // NO TOOLS - will complete in single step
49
+ instructions: `You are a simple assistant. Just answer the question directly in 1-2 sentences. Start with "🗣️ SIMPLE:". Do NOT try to use any tools.`,
50
+ },
51
+ {
52
+ agentId: 'math_agent',
53
+ provider: Providers.ANTHROPIC,
54
+ clientOptions: {
55
+ modelName: 'claude-haiku-4-5',
56
+ apiKey: process.env.ANTHROPIC_API_KEY,
57
+ },
58
+ tools: [new Calculator()],
59
+ instructions: `You are a MATH SPECIALIST. ALWAYS use the calculator tool to perform calculations, even simple ones. Start your response with "🧮 MATH:". Keep your response concise.`,
60
+ },
61
+ ];
62
+
63
+ // No edges - both run in parallel from start
64
+ const edges: t.GraphEdge[] = [];
65
+
66
+ const agentTimings: Record<string, { start?: number; end?: number }> = {};
67
+
68
+ // Helper to log metadata
69
+ const logMetadata = (
70
+ eventName: string,
71
+ metadata?: Record<string, unknown>
72
+ ) => {
73
+ if (metadata) {
74
+ const entry = {
75
+ event: eventName,
76
+ langgraph_step: metadata.langgraph_step as number,
77
+ langgraph_node: metadata.langgraph_node as string,
78
+ timestamp: Date.now() - startTime,
79
+ };
80
+ metadataLog.push(entry);
81
+ console.log(
82
+ `📊 [${entry.timestamp}ms] ${eventName}: step=${entry.langgraph_step}, node=${entry.langgraph_node}`
83
+ );
84
+ }
85
+ };
86
+
87
+ const customHandlers = {
88
+ [GraphEvents.TOOL_END]: {
89
+ handle: (
90
+ _event: string,
91
+ data: t.StreamEventData,
92
+ metadata?: Record<string, unknown>
93
+ ): void => {
94
+ console.log('\n====== TOOL_END ======');
95
+ logMetadata('TOOL_END', metadata);
96
+ },
97
+ },
98
+ [GraphEvents.TOOL_START]: {
99
+ handle: (
100
+ _event: string,
101
+ _data: t.StreamEventData,
102
+ metadata?: Record<string, unknown>
103
+ ): void => {
104
+ console.log('\n====== TOOL_START ======');
105
+ logMetadata('TOOL_START', metadata);
106
+ },
107
+ },
108
+ [GraphEvents.CHAT_MODEL_END]: {
109
+ handle: (
110
+ _event: string,
111
+ _data: t.StreamEventData,
112
+ metadata?: Record<string, unknown>
113
+ ): void => {
114
+ console.log('\n====== CHAT_MODEL_END ======');
115
+ logMetadata('CHAT_MODEL_END', metadata);
116
+ const nodeName = metadata?.langgraph_node as string;
117
+ if (nodeName) {
118
+ const elapsed = Date.now() - startTime;
119
+ agentTimings[nodeName] = agentTimings[nodeName] || {};
120
+ agentTimings[nodeName].end = elapsed;
121
+ }
122
+ },
123
+ },
124
+ [GraphEvents.CHAT_MODEL_START]: {
125
+ handle: (
126
+ _event: string,
127
+ _data: t.StreamEventData,
128
+ metadata?: Record<string, unknown>
129
+ ): void => {
130
+ console.log('\n====== CHAT_MODEL_START ======');
131
+ logMetadata('CHAT_MODEL_START', metadata);
132
+ const nodeName = metadata?.langgraph_node as string;
133
+ if (nodeName) {
134
+ const elapsed = Date.now() - startTime;
135
+ agentTimings[nodeName] = agentTimings[nodeName] || {};
136
+ if (!agentTimings[nodeName].start) {
137
+ agentTimings[nodeName].start = elapsed;
138
+ }
139
+ }
140
+ },
141
+ },
142
+ [GraphEvents.CHAT_MODEL_STREAM]: new ChatModelStreamHandler(),
143
+ [GraphEvents.ON_RUN_STEP]: {
144
+ handle: (
145
+ event: GraphEvents.ON_RUN_STEP,
146
+ data: t.StreamEventData,
147
+ metadata?: Record<string, unknown>
148
+ ): void => {
149
+ console.log('\n====== ON_RUN_STEP ======');
150
+ logMetadata('ON_RUN_STEP', metadata);
151
+ aggregateContent({ event, data: data as t.RunStep });
152
+ },
153
+ },
154
+ [GraphEvents.ON_RUN_STEP_DELTA]: {
155
+ handle: (
156
+ event: GraphEvents.ON_RUN_STEP_DELTA,
157
+ data: t.StreamEventData,
158
+ metadata?: Record<string, unknown>
159
+ ): void => {
160
+ // Don't log these to reduce noise
161
+ aggregateContent({ event, data: data as t.RunStepDeltaEvent });
162
+ },
163
+ },
164
+ [GraphEvents.ON_MESSAGE_DELTA]: {
165
+ handle: (
166
+ event: GraphEvents.ON_MESSAGE_DELTA,
167
+ data: t.StreamEventData,
168
+ metadata?: Record<string, unknown>
169
+ ): void => {
170
+ // Don't log these to reduce noise
171
+ aggregateContent({ event, data: data as t.MessageDeltaEvent });
172
+ },
173
+ },
174
+ };
175
+
176
+ const runConfig: t.RunConfig = {
177
+ runId: `asymmetric-parallel-${Date.now()}`,
178
+ graphConfig: {
179
+ type: 'multi-agent',
180
+ agents,
181
+ edges,
182
+ },
183
+ customHandlers,
184
+ returnContent: true,
185
+ };
186
+
187
+ try {
188
+ const run = await Run.create(runConfig);
189
+
190
+ // Ask a question that will trigger only the math agent to use tools
191
+ const userMessage = `What is 42 * 17?`;
192
+
193
+ conversationHistory.push(new HumanMessage(userMessage));
194
+
195
+ console.log('User message:', userMessage);
196
+ console.log('\nExpected behavior:');
197
+ console.log(' - simple_agent: Step 1 only (no tools)');
198
+ console.log(' - math_agent: Step 1 → Step 2 (tool) → Step 3 (response)');
199
+ console.log('\n');
200
+
201
+ const config = {
202
+ configurable: {
203
+ thread_id: 'asymmetric-test-1',
204
+ },
205
+ streamMode: 'values',
206
+ version: 'v2' as const,
207
+ };
208
+
209
+ const inputs = {
210
+ messages: conversationHistory,
211
+ };
212
+
213
+ await run.processStream(inputs, config);
214
+
215
+ // Analysis
216
+ console.log('\n\n========== METADATA ANALYSIS ==========');
217
+ console.log('\nAll events by step and node:');
218
+ console.table(metadataLog);
219
+
220
+ // Group by step
221
+ const stepGroups = new Map<number, Map<string, string[]>>();
222
+ for (const entry of metadataLog) {
223
+ if (!stepGroups.has(entry.langgraph_step)) {
224
+ stepGroups.set(entry.langgraph_step, new Map());
225
+ }
226
+ const nodeMap = stepGroups.get(entry.langgraph_step)!;
227
+ if (!nodeMap.has(entry.langgraph_node)) {
228
+ nodeMap.set(entry.langgraph_node, []);
229
+ }
230
+ nodeMap.get(entry.langgraph_node)!.push(entry.event);
231
+ }
232
+
233
+ console.log('\n\n========== STEP BREAKDOWN ==========');
234
+ for (const [step, nodeMap] of stepGroups) {
235
+ console.log(`\nStep ${step}:`);
236
+ for (const [node, events] of nodeMap) {
237
+ console.log(` ${node}: ${events.join(', ')}`);
238
+ }
239
+ console.log(` → ${nodeMap.size} unique node(s) at this step`);
240
+ }
241
+
242
+ console.log('\n\n========== PARALLEL DETECTION CHALLENGE ==========');
243
+ console.log('\nAt which steps can we detect parallel execution?');
244
+ for (const [step, nodeMap] of stepGroups) {
245
+ if (nodeMap.size > 1) {
246
+ console.log(
247
+ ` ✅ Step ${step}: ${nodeMap.size} agents detected - PARALLEL`
248
+ );
249
+ } else {
250
+ const [nodeName] = nodeMap.keys();
251
+ console.log(
252
+ ` ⚠️ Step ${step}: Only 1 agent (${nodeName}) - looks sequential!`
253
+ );
254
+ }
255
+ }
256
+
257
+ console.log('\n\n========== KEY INSIGHT ==========');
258
+ console.log(
259
+ 'If we only look at step 2 or 3, we miss the parallel context!'
260
+ );
261
+ console.log(
262
+ 'We need to detect parallelism EARLY (at step 1) and carry that forward.'
263
+ );
264
+
265
+ console.log('\n\nFinal content parts:');
266
+ console.dir(contentParts, { depth: null });
267
+
268
+ await sleep(2000);
269
+ } catch (error) {
270
+ console.error('Error:', error);
271
+ }
272
+ }
273
+
274
+ testAsymmetricParallelTools();
@@ -0,0 +1,240 @@
1
+ import { config } from 'dotenv';
2
+ config();
3
+
4
+ import { HumanMessage, BaseMessage } from '@langchain/core/messages';
5
+ import type * as t from '@/types';
6
+ import { ChatModelStreamHandler, createContentAggregator } from '@/stream';
7
+ import { Providers, GraphEvents } from '@/common';
8
+ import { sleep } from '@/utils/run';
9
+ import { Run } from '@/run';
10
+ import { Calculator } from '@/tools/Calculator';
11
+
12
+ const conversationHistory: BaseMessage[] = [];
13
+
14
+ /**
15
+ * Dump ALL metadata fields to understand what LangSmith uses
16
+ * to detect parallel execution
17
+ */
18
+ async function testFullMetadata() {
19
+ console.log('Dumping FULL metadata to find parallel detection fields...\n');
20
+
21
+ const { contentParts, aggregateContent } = createContentAggregator();
22
+
23
+ // Collect ALL metadata from all events
24
+ const allMetadata: Array<{
25
+ event: string;
26
+ timestamp: number;
27
+ metadata: Record<string, unknown>;
28
+ }> = [];
29
+ const startTime = Date.now();
30
+
31
+ const agents: t.AgentInputs[] = [
32
+ {
33
+ agentId: 'agent_a',
34
+ provider: Providers.ANTHROPIC,
35
+ clientOptions: {
36
+ modelName: 'claude-haiku-4-5',
37
+ apiKey: process.env.ANTHROPIC_API_KEY,
38
+ },
39
+ instructions: `You are Agent A. Just say "Hello from A" in one sentence.`,
40
+ },
41
+ {
42
+ agentId: 'agent_b',
43
+ provider: Providers.ANTHROPIC,
44
+ clientOptions: {
45
+ modelName: 'claude-haiku-4-5',
46
+ apiKey: process.env.ANTHROPIC_API_KEY,
47
+ },
48
+ tools: [new Calculator()],
49
+ instructions: `You are Agent B. Calculate 2+2 using the calculator tool.`,
50
+ },
51
+ ];
52
+
53
+ const edges: t.GraphEdge[] = [];
54
+
55
+ const captureMetadata = (
56
+ eventName: string,
57
+ metadata?: Record<string, unknown>
58
+ ) => {
59
+ if (metadata) {
60
+ allMetadata.push({
61
+ event: eventName,
62
+ timestamp: Date.now() - startTime,
63
+ metadata: { ...metadata },
64
+ });
65
+ }
66
+ };
67
+
68
+ const customHandlers = {
69
+ [GraphEvents.TOOL_END]: {
70
+ handle: (
71
+ _event: string,
72
+ _data: t.StreamEventData,
73
+ metadata?: Record<string, unknown>
74
+ ): void => {
75
+ captureMetadata('TOOL_END', metadata);
76
+ },
77
+ },
78
+ [GraphEvents.TOOL_START]: {
79
+ handle: (
80
+ _event: string,
81
+ _data: t.StreamEventData,
82
+ metadata?: Record<string, unknown>
83
+ ): void => {
84
+ captureMetadata('TOOL_START', metadata);
85
+ },
86
+ },
87
+ [GraphEvents.CHAT_MODEL_END]: {
88
+ handle: (
89
+ _event: string,
90
+ _data: t.StreamEventData,
91
+ metadata?: Record<string, unknown>
92
+ ): void => {
93
+ captureMetadata('CHAT_MODEL_END', metadata);
94
+ },
95
+ },
96
+ [GraphEvents.CHAT_MODEL_START]: {
97
+ handle: (
98
+ _event: string,
99
+ _data: t.StreamEventData,
100
+ metadata?: Record<string, unknown>
101
+ ): void => {
102
+ captureMetadata('CHAT_MODEL_START', metadata);
103
+ },
104
+ },
105
+ [GraphEvents.CHAT_MODEL_STREAM]: new ChatModelStreamHandler(),
106
+ [GraphEvents.ON_RUN_STEP]: {
107
+ handle: (
108
+ event: GraphEvents.ON_RUN_STEP,
109
+ data: t.StreamEventData,
110
+ metadata?: Record<string, unknown>
111
+ ): void => {
112
+ captureMetadata('ON_RUN_STEP', metadata);
113
+ const runStep = data as t.RunStep;
114
+ console.log(
115
+ `\n🔍 ON_RUN_STEP: agentId=${runStep.agentId}, groupId=${runStep.groupId}`
116
+ );
117
+ aggregateContent({ event, data: runStep });
118
+ },
119
+ },
120
+ [GraphEvents.ON_RUN_STEP_DELTA]: {
121
+ handle: (
122
+ event: GraphEvents.ON_RUN_STEP_DELTA,
123
+ data: t.StreamEventData,
124
+ metadata?: Record<string, unknown>
125
+ ): void => {
126
+ captureMetadata('ON_RUN_STEP_DELTA', metadata);
127
+ aggregateContent({ event, data: data as t.RunStepDeltaEvent });
128
+ },
129
+ },
130
+ [GraphEvents.ON_MESSAGE_DELTA]: {
131
+ handle: (
132
+ event: GraphEvents.ON_MESSAGE_DELTA,
133
+ data: t.StreamEventData,
134
+ metadata?: Record<string, unknown>
135
+ ): void => {
136
+ captureMetadata('ON_MESSAGE_DELTA', metadata);
137
+ aggregateContent({ event, data: data as t.MessageDeltaEvent });
138
+ },
139
+ },
140
+ };
141
+
142
+ const runConfig: t.RunConfig = {
143
+ runId: `full-metadata-${Date.now()}`,
144
+ graphConfig: {
145
+ type: 'multi-agent',
146
+ agents,
147
+ edges,
148
+ },
149
+ customHandlers,
150
+ returnContent: true,
151
+ };
152
+
153
+ try {
154
+ const run = await Run.create(runConfig);
155
+
156
+ const userMessage = `Hi, and calculate 2+2`;
157
+ conversationHistory.push(new HumanMessage(userMessage));
158
+
159
+ const config = {
160
+ configurable: {
161
+ thread_id: 'full-metadata-test-1',
162
+ },
163
+ streamMode: 'values',
164
+ version: 'v2' as const,
165
+ };
166
+
167
+ await run.processStream({ messages: conversationHistory }, config);
168
+
169
+ // Analysis - find ALL unique metadata keys
170
+ console.log('\n\n========== ALL UNIQUE METADATA KEYS ==========\n');
171
+ const allKeys = new Set<string>();
172
+ for (const entry of allMetadata) {
173
+ for (const key of Object.keys(entry.metadata)) {
174
+ allKeys.add(key);
175
+ }
176
+ }
177
+ console.log('Keys found:', [...allKeys].sort());
178
+
179
+ // Print first CHAT_MODEL_START for each agent with FULL metadata
180
+ console.log(
181
+ '\n\n========== FULL METADATA FROM CHAT_MODEL_START ==========\n'
182
+ );
183
+ const seenAgents = new Set<string>();
184
+ for (const entry of allMetadata) {
185
+ if (entry.event === 'CHAT_MODEL_START') {
186
+ const node = entry.metadata.langgraph_node as string;
187
+ if (!seenAgents.has(node)) {
188
+ seenAgents.add(node);
189
+ console.log(`\n--- ${node} ---`);
190
+ console.dir(entry.metadata, { depth: null });
191
+ }
192
+ }
193
+ }
194
+
195
+ // Look specifically at checkpoint_ns and __pregel_task_id
196
+ console.log('\n\n========== POTENTIAL PARALLEL INDICATORS ==========\n');
197
+ console.log(
198
+ 'Comparing checkpoint_ns and __pregel_task_id across agents:\n'
199
+ );
200
+
201
+ const agentMetadataMap = new Map<string, Record<string, unknown>>();
202
+ for (const entry of allMetadata) {
203
+ if (entry.event === 'CHAT_MODEL_START') {
204
+ const node = entry.metadata.langgraph_node as string;
205
+ if (!agentMetadataMap.has(node)) {
206
+ agentMetadataMap.set(node, entry.metadata);
207
+ }
208
+ }
209
+ }
210
+
211
+ for (const [node, meta] of agentMetadataMap) {
212
+ console.log(`${node}:`);
213
+ console.log(` langgraph_step: ${meta.langgraph_step}`);
214
+ console.log(
215
+ ` langgraph_triggers: ${JSON.stringify(meta.langgraph_triggers)}`
216
+ );
217
+ console.log(` checkpoint_ns: ${meta.checkpoint_ns}`);
218
+ console.log(` __pregel_task_id: ${meta.__pregel_task_id}`);
219
+ console.log(` langgraph_path: ${JSON.stringify(meta.langgraph_path)}`);
220
+ console.log(` langgraph_checkpoint_ns: ${meta.langgraph_checkpoint_ns}`);
221
+ console.log();
222
+ }
223
+
224
+ // Check langgraph_triggers specifically
225
+ console.log('\n========== LANGGRAPH_TRIGGERS ANALYSIS ==========\n');
226
+ for (const [node, meta] of agentMetadataMap) {
227
+ const triggers = meta.langgraph_triggers as string[];
228
+ console.log(`${node}: triggers = ${JSON.stringify(triggers)}`);
229
+ }
230
+
231
+ console.log('\n\nFinal content parts:');
232
+ console.dir(contentParts, { depth: null });
233
+
234
+ await sleep(1000);
235
+ } catch (error) {
236
+ console.error('Error:', error);
237
+ }
238
+ }
239
+
240
+ testFullMetadata();