@librechat/agents 3.0.16 → 3.0.18

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (35) hide show
  1. package/dist/cjs/graphs/Graph.cjs +78 -1
  2. package/dist/cjs/graphs/Graph.cjs.map +1 -1
  3. package/dist/cjs/main.cjs +1 -0
  4. package/dist/cjs/main.cjs.map +1 -1
  5. package/dist/cjs/messages/format.cjs +74 -0
  6. package/dist/cjs/messages/format.cjs.map +1 -1
  7. package/dist/cjs/stream.cjs +8 -8
  8. package/dist/cjs/stream.cjs.map +1 -1
  9. package/dist/cjs/tools/handlers.cjs +5 -5
  10. package/dist/cjs/tools/handlers.cjs.map +1 -1
  11. package/dist/esm/graphs/Graph.mjs +78 -1
  12. package/dist/esm/graphs/Graph.mjs.map +1 -1
  13. package/dist/esm/main.mjs +1 -1
  14. package/dist/esm/messages/format.mjs +75 -2
  15. package/dist/esm/messages/format.mjs.map +1 -1
  16. package/dist/esm/stream.mjs +8 -8
  17. package/dist/esm/stream.mjs.map +1 -1
  18. package/dist/esm/tools/handlers.mjs +5 -5
  19. package/dist/esm/tools/handlers.mjs.map +1 -1
  20. package/dist/types/graphs/Graph.d.ts +19 -2
  21. package/dist/types/messages/format.d.ts +11 -1
  22. package/dist/types/tools/handlers.d.ts +2 -1
  23. package/dist/types/types/stream.d.ts +2 -1
  24. package/package.json +8 -8
  25. package/src/graphs/Graph.ts +95 -2
  26. package/src/messages/ensureThinkingBlock.test.ts +393 -0
  27. package/src/messages/format.ts +95 -0
  28. package/src/messages/formatAgentMessages.test.ts +60 -0
  29. package/src/scripts/test-multi-agent-list-handoff.ts +53 -3
  30. package/src/scripts/test-thinking-handoff-bedrock.ts +153 -0
  31. package/src/scripts/test-thinking-handoff.ts +147 -0
  32. package/src/specs/thinking-handoff.test.ts +620 -0
  33. package/src/stream.ts +29 -22
  34. package/src/tools/handlers.ts +36 -18
  35. package/src/types/stream.ts +2 -1
@@ -71,6 +71,66 @@ describe('formatAgentMessages', () => {
71
71
  expect((result.messages[1] as ToolMessage).tool_call_id).toBe('123');
72
72
  });
73
73
 
74
+ it('should handle malformed tool call entries with missing tool_call property', () => {
75
+ const tools = new Set(['search']);
76
+ const payload = [
77
+ {
78
+ role: 'assistant',
79
+ content: [
80
+ {
81
+ type: ContentTypes.TEXT,
82
+ [ContentTypes.TEXT]: 'Let me check that.',
83
+ tool_call_ids: ['123'],
84
+ },
85
+ {
86
+ type: ContentTypes.TOOL_CALL,
87
+ // Missing tool_call property - should not crash
88
+ },
89
+ {
90
+ type: ContentTypes.TOOL_CALL,
91
+ tool_call: {
92
+ id: '123',
93
+ name: 'search',
94
+ args: '{"query":"test"}',
95
+ output: 'Result',
96
+ },
97
+ },
98
+ ],
99
+ },
100
+ ];
101
+ // Should not throw error
102
+ const result = formatAgentMessages(payload, undefined, tools);
103
+ expect(result.messages).toBeDefined();
104
+ expect(result.messages.length).toBeGreaterThan(0);
105
+ });
106
+
107
+ it('should handle malformed tool call entries with missing name', () => {
108
+ const tools = new Set(['search']);
109
+ const payload = [
110
+ {
111
+ role: 'assistant',
112
+ content: [
113
+ {
114
+ type: ContentTypes.TEXT,
115
+ [ContentTypes.TEXT]: 'Checking...',
116
+ },
117
+ {
118
+ type: ContentTypes.TOOL_CALL,
119
+ tool_call: {
120
+ id: '456',
121
+ // Missing name property
122
+ args: '{}',
123
+ },
124
+ },
125
+ ],
126
+ },
127
+ ];
128
+ // Should not throw error
129
+ const result = formatAgentMessages(payload, undefined, tools);
130
+ expect(result.messages).toBeDefined();
131
+ expect(result.messages.length).toBeGreaterThan(0);
132
+ });
133
+
74
134
  it('should handle multiple content parts in assistant messages', () => {
75
135
  const payload = [
76
136
  {
@@ -6,7 +6,7 @@ config();
6
6
  import { HumanMessage, BaseMessage } from '@langchain/core/messages';
7
7
  import { Run } from '@/run';
8
8
  import { ChatModelStreamHandler, createContentAggregator } from '@/stream';
9
- import { Providers, GraphEvents, Constants } from '@/common';
9
+ import { Providers, GraphEvents, Constants, StepTypes } from '@/common';
10
10
  import { ToolEndHandler, ModelEndHandler } from '@/events';
11
11
  import type * as t from '@/types';
12
12
 
@@ -230,9 +230,59 @@ async function testSupervisorListHandoff() {
230
230
  conversationHistory.push(...finalMessages);
231
231
  }
232
232
 
233
- // Show summary
233
+ // Demo: Map contentParts to agentIds
234
234
  console.log(`\n${'─'.repeat(60)}`);
235
- console.log(`Graph structure:`);
235
+ console.log('CONTENT PARTS TO AGENT MAPPING:');
236
+ console.log('─'.repeat(60));
237
+
238
+ if (run.Graph) {
239
+ // Get the mapping of contentPart index to agentId
240
+ const contentPartAgentMap = run.Graph.getContentPartAgentMap();
241
+
242
+ console.log(`\nTotal content parts: ${contentParts.length}`);
243
+ console.log(`\nContent Part → Agent Mapping:`);
244
+
245
+ contentPartAgentMap.forEach((agentId, index) => {
246
+ const contentPart = contentParts[index];
247
+ const contentType = contentPart?.type || 'unknown';
248
+ const preview =
249
+ contentType === 'text'
250
+ ? (contentPart as any).text?.slice(0, 50) || ''
251
+ : contentType === 'tool_call'
252
+ ? `Tool: ${(contentPart as any).tool_call?.name || 'unknown'}`
253
+ : contentType;
254
+
255
+ console.log(
256
+ ` [${index}] ${agentId} → ${contentType}: ${preview}${preview.length >= 50 ? '...' : ''}`
257
+ );
258
+ });
259
+
260
+ // Show agent participation summary
261
+ console.log(`\n${'─'.repeat(60)}`);
262
+ console.log('AGENT PARTICIPATION SUMMARY:');
263
+ console.log('─'.repeat(60));
264
+
265
+ const activeAgents = run.Graph.getActiveAgentIds();
266
+ console.log(`\nActive agents (${activeAgents.length}):`, activeAgents);
267
+
268
+ const stepsByAgent = run.Graph.getRunStepsByAgent();
269
+ stepsByAgent.forEach((steps, agentId) => {
270
+ const toolCallSteps = steps.filter(
271
+ (s) => s.type === StepTypes.TOOL_CALLS
272
+ ).length;
273
+ const messageSteps = steps.filter(
274
+ (s) => s.type === StepTypes.MESSAGE_CREATION
275
+ ).length;
276
+ console.log(`\n ${agentId}:`);
277
+ console.log(` - Total steps: ${steps.length}`);
278
+ console.log(` - Message steps: ${messageSteps}`);
279
+ console.log(` - Tool call steps: ${toolCallSteps}`);
280
+ });
281
+ }
282
+
283
+ // Show graph structure summary
284
+ console.log(`\n${'─'.repeat(60)}`);
285
+ console.log(`GRAPH STRUCTURE:`);
236
286
  console.log(`- Agents: 6 total (supervisor + 5 specialists)`);
237
287
  console.log(`- Edges: 1 edge with multiple destinations`);
238
288
  console.log(
@@ -0,0 +1,153 @@
1
+ #!/usr/bin/env bun
2
+
3
+ import { config } from 'dotenv';
4
+ config();
5
+
6
+ import { HumanMessage, BaseMessage } from '@langchain/core/messages';
7
+ import { Run } from '@/run';
8
+ import { ChatModelStreamHandler } from '@/stream';
9
+ import { Providers, GraphEvents } from '@/common';
10
+ import { ToolEndHandler, ModelEndHandler } from '@/events';
11
+ import type * as t from '@/types';
12
+
13
+ const conversationHistory: BaseMessage[] = [];
14
+
15
+ /**
16
+ * Test edge case: switching from OpenAI supervisor (no thinking) to Bedrock specialist (with thinking enabled)
17
+ * This should not throw an error about missing thinking blocks
18
+ */
19
+ async function testBedrockThinkingHandoff() {
20
+ console.log('Testing OpenAI → Bedrock (with thinking) handoff...\n');
21
+
22
+ // Create custom handlers
23
+ const customHandlers = {
24
+ [GraphEvents.TOOL_END]: new ToolEndHandler(),
25
+ [GraphEvents.CHAT_MODEL_END]: new ModelEndHandler(),
26
+ [GraphEvents.CHAT_MODEL_STREAM]: new ChatModelStreamHandler(),
27
+ [GraphEvents.TOOL_START]: {
28
+ handle: (_event: string, data: t.StreamEventData): void => {
29
+ const toolData = data as any;
30
+ if (toolData?.name) {
31
+ console.log(`\nšŸ”§ Tool called: ${toolData.name}`);
32
+ }
33
+ },
34
+ },
35
+ };
36
+
37
+ // Create the graph configuration
38
+ function createGraphConfig(): t.RunConfig {
39
+ console.log(
40
+ 'Creating graph with OpenAI supervisor and Bedrock specialist with thinking enabled.\n'
41
+ );
42
+
43
+ const agents: t.AgentInputs[] = [
44
+ {
45
+ agentId: 'supervisor',
46
+ provider: Providers.OPENAI,
47
+ clientOptions: {
48
+ modelName: 'gpt-4o-mini',
49
+ apiKey: process.env.OPENAI_API_KEY,
50
+ },
51
+ instructions: `You are a task supervisor. When the user asks about code review, use transfer_to_code_reviewer to hand off to the specialist.`,
52
+ maxContextTokens: 8000,
53
+ },
54
+ {
55
+ agentId: 'code_reviewer',
56
+ provider: Providers.BEDROCK,
57
+ clientOptions: {
58
+ region: process.env.BEDROCK_AWS_REGION || 'us-east-1',
59
+ model: 'us.anthropic.claude-3-7-sonnet-20250219-v1:0',
60
+ credentials: {
61
+ accessKeyId: process.env.BEDROCK_AWS_ACCESS_KEY_ID!,
62
+ secretAccessKey: process.env.BEDROCK_AWS_SECRET_ACCESS_KEY!,
63
+ },
64
+ additionalModelRequestFields: {
65
+ thinking: {
66
+ type: 'enabled',
67
+ budget_tokens: 2000,
68
+ },
69
+ },
70
+ },
71
+ instructions: `You are a code review specialist using Bedrock with extended thinking. Think carefully about the code quality, best practices, and potential issues. Provide thoughtful feedback.`,
72
+ maxContextTokens: 8000,
73
+ },
74
+ ];
75
+
76
+ const edges: t.GraphEdge[] = [
77
+ {
78
+ from: 'supervisor',
79
+ to: ['code_reviewer'],
80
+ description: 'Transfer to code review specialist',
81
+ edgeType: 'handoff',
82
+ },
83
+ ];
84
+
85
+ return {
86
+ runId: `bedrock-thinking-handoff-test-${Date.now()}`,
87
+ graphConfig: {
88
+ type: 'multi-agent',
89
+ agents,
90
+ edges,
91
+ },
92
+ customHandlers,
93
+ returnContent: true,
94
+ };
95
+ }
96
+
97
+ try {
98
+ // Test query that should trigger a handoff
99
+ const query =
100
+ 'Can you review this function and tell me if there are any issues?\n\nfunction add(a, b) { return a + b; }';
101
+
102
+ console.log(`${'='.repeat(60)}`);
103
+ console.log(`USER QUERY: "${query}"`);
104
+ console.log('='.repeat(60));
105
+
106
+ // Initialize conversation
107
+ conversationHistory.push(new HumanMessage(query));
108
+
109
+ // Create and run the graph
110
+ const runConfig = createGraphConfig();
111
+ const run = await Run.create(runConfig);
112
+
113
+ const config = {
114
+ configurable: {
115
+ thread_id: 'bedrock-thinking-handoff-test-1',
116
+ },
117
+ streamMode: 'values',
118
+ version: 'v2' as const,
119
+ };
120
+
121
+ console.log('\nProcessing request...\n');
122
+
123
+ // Process with streaming
124
+ const inputs = {
125
+ messages: conversationHistory,
126
+ };
127
+
128
+ await run.processStream(inputs, config);
129
+ const finalMessages = run.getRunMessages();
130
+
131
+ if (finalMessages) {
132
+ conversationHistory.push(...finalMessages);
133
+ }
134
+
135
+ // Success!
136
+ console.log(`\n${'='.repeat(60)}`);
137
+ console.log('āœ… TEST PASSED');
138
+ console.log('='.repeat(60));
139
+ console.log('\nSuccessfully handed off from OpenAI (no thinking) to');
140
+ console.log('Bedrock with thinking enabled without errors!');
141
+ console.log('\nThe ensureThinkingBlockInMessages() function correctly');
142
+ console.log('handled the transition by converting tool sequences to');
143
+ console.log('HumanMessages before calling the Bedrock API.');
144
+ } catch (error) {
145
+ console.error('\nāŒ TEST FAILED');
146
+ console.error('='.repeat(60));
147
+ console.error('Error:', error);
148
+ process.exit(1);
149
+ }
150
+ }
151
+
152
+ // Run the test
153
+ testBedrockThinkingHandoff();
@@ -0,0 +1,147 @@
1
+ #!/usr/bin/env bun
2
+
3
+ import { config } from 'dotenv';
4
+ config();
5
+
6
+ import { HumanMessage, BaseMessage } from '@langchain/core/messages';
7
+ import type * as t from '@/types';
8
+ import { ToolEndHandler, ModelEndHandler } from '@/events';
9
+ import { ChatModelStreamHandler } from '@/stream';
10
+ import { Providers, GraphEvents } from '@/common';
11
+ import { Run } from '@/run';
12
+
13
+ const conversationHistory: BaseMessage[] = [];
14
+
15
+ /**
16
+ * Test edge case: switching from OpenAI supervisor (no thinking) to Anthropic specialist (with thinking enabled)
17
+ * This should not throw an error about missing thinking blocks
18
+ */
19
+ async function testThinkingHandoff() {
20
+ console.log('Testing OpenAI → Anthropic (with thinking) handoff...\n');
21
+
22
+ // Create custom handlers
23
+ const customHandlers = {
24
+ [GraphEvents.TOOL_END]: new ToolEndHandler(),
25
+ [GraphEvents.CHAT_MODEL_END]: new ModelEndHandler(),
26
+ [GraphEvents.CHAT_MODEL_STREAM]: new ChatModelStreamHandler(),
27
+ [GraphEvents.TOOL_START]: {
28
+ handle: (_event: string, data: t.StreamEventData): void => {
29
+ const toolData = data as any;
30
+ if (toolData?.name) {
31
+ console.log(`\nšŸ”§ Tool called: ${toolData.name}`);
32
+ }
33
+ },
34
+ },
35
+ };
36
+
37
+ // Create the graph configuration
38
+ function createGraphConfig(): t.RunConfig {
39
+ console.log(
40
+ 'Creating graph with OpenAI supervisor and Anthropic specialist with thinking enabled.\n'
41
+ );
42
+
43
+ const agents: t.AgentInputs[] = [
44
+ {
45
+ agentId: 'supervisor',
46
+ provider: Providers.OPENAI,
47
+ clientOptions: {
48
+ modelName: 'gpt-4o-mini',
49
+ apiKey: process.env.OPENAI_API_KEY,
50
+ },
51
+ instructions: `You are a task supervisor. When the user asks about code review, use transfer_to_code_reviewer to hand off to the specialist.`,
52
+ maxContextTokens: 8000,
53
+ },
54
+ {
55
+ agentId: 'code_reviewer',
56
+ provider: Providers.ANTHROPIC,
57
+ clientOptions: {
58
+ modelName: 'claude-3-7-sonnet-20250219',
59
+ apiKey: process.env.ANTHROPIC_API_KEY,
60
+ thinking: {
61
+ type: 'enabled',
62
+ budget_tokens: 2000,
63
+ },
64
+ },
65
+ instructions: `You are a code review specialist. Think carefully about the code quality, best practices, and potential issues. Provide thoughtful feedback.`,
66
+ maxContextTokens: 8000,
67
+ },
68
+ ];
69
+
70
+ const edges: t.GraphEdge[] = [
71
+ {
72
+ from: 'supervisor',
73
+ to: ['code_reviewer'],
74
+ description: 'Transfer to code review specialist',
75
+ edgeType: 'handoff',
76
+ },
77
+ ];
78
+
79
+ return {
80
+ runId: `thinking-handoff-test-${Date.now()}`,
81
+ graphConfig: {
82
+ type: 'multi-agent',
83
+ agents,
84
+ edges,
85
+ },
86
+ customHandlers,
87
+ returnContent: true,
88
+ };
89
+ }
90
+
91
+ try {
92
+ // Test query that should trigger a handoff
93
+ const query =
94
+ 'Can you review this function and tell me if there are any issues?\n\nfunction add(a, b) { return a + b; }';
95
+
96
+ console.log(`${'='.repeat(60)}`);
97
+ console.log(`USER QUERY: "${query}"`);
98
+ console.log('='.repeat(60));
99
+
100
+ // Initialize conversation
101
+ conversationHistory.push(new HumanMessage(query));
102
+
103
+ // Create and run the graph
104
+ const runConfig = createGraphConfig();
105
+ const run = await Run.create(runConfig);
106
+
107
+ const config = {
108
+ configurable: {
109
+ thread_id: 'thinking-handoff-test-1',
110
+ },
111
+ streamMode: 'values',
112
+ version: 'v2' as const,
113
+ };
114
+
115
+ console.log('\nProcessing request...\n');
116
+
117
+ // Process with streaming
118
+ const inputs = {
119
+ messages: conversationHistory,
120
+ };
121
+
122
+ await run.processStream(inputs, config);
123
+ const finalMessages = run.getRunMessages();
124
+
125
+ if (finalMessages) {
126
+ conversationHistory.push(...finalMessages);
127
+ }
128
+
129
+ // Success!
130
+ console.log(`\n${'='.repeat(60)}`);
131
+ console.log('āœ… TEST PASSED');
132
+ console.log('='.repeat(60));
133
+ console.log('\nSuccessfully handed off from OpenAI (no thinking) to');
134
+ console.log('Anthropic with thinking enabled without errors!');
135
+ console.log('\nThe ensureThinkingBlockInMessages() function correctly');
136
+ console.log('added a placeholder thinking block to the last assistant');
137
+ console.log('message before calling the Anthropic API.');
138
+ } catch (error) {
139
+ console.error('\nāŒ TEST FAILED');
140
+ console.error('='.repeat(60));
141
+ console.error('Error:', error);
142
+ process.exit(1);
143
+ }
144
+ }
145
+
146
+ // Run the test
147
+ testThinkingHandoff();