@librechat/agents 3.0.17 → 3.0.18

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,393 @@
1
+ import { AIMessage, HumanMessage, ToolMessage } from '@langchain/core/messages';
2
+ import type { ExtendedMessageContent } from '@/types';
3
+ import { ensureThinkingBlockInMessages } from './format';
4
+ import { Providers, ContentTypes } from '@/common';
5
+
6
+ describe('ensureThinkingBlockInMessages', () => {
7
+ describe('messages with thinking blocks (should not be modified)', () => {
8
+ test('should not modify AI message that already has thinking block', () => {
9
+ const messages = [
10
+ new HumanMessage({ content: 'Hello' }),
11
+ new AIMessage({
12
+ content: [
13
+ { type: ContentTypes.THINKING, thinking: 'Let me think...' },
14
+ { type: 'text', text: 'Hi there!' },
15
+ ],
16
+ }),
17
+ ];
18
+
19
+ const result = ensureThinkingBlockInMessages(
20
+ messages,
21
+ Providers.ANTHROPIC
22
+ );
23
+
24
+ expect(result).toHaveLength(2);
25
+ expect(result[0]).toBeInstanceOf(HumanMessage);
26
+ expect(result[1]).toBeInstanceOf(AIMessage);
27
+ expect((result[1].content as ExtendedMessageContent[])[0].type).toBe(
28
+ ContentTypes.THINKING
29
+ );
30
+ });
31
+
32
+ test('should not modify AI message that has redacted_thinking block', () => {
33
+ const messages = [
34
+ new HumanMessage({ content: 'Hello' }),
35
+ new AIMessage({
36
+ content: [
37
+ { type: 'redacted_thinking', data: 'redacted' },
38
+ { type: 'text', text: 'Hi there!' },
39
+ ],
40
+ }),
41
+ ];
42
+
43
+ const result = ensureThinkingBlockInMessages(
44
+ messages,
45
+ Providers.ANTHROPIC
46
+ );
47
+
48
+ expect(result).toHaveLength(2);
49
+ expect(result[0]).toBeInstanceOf(HumanMessage);
50
+ expect(result[1]).toBeInstanceOf(AIMessage);
51
+ expect((result[1].content as ExtendedMessageContent[])[0].type).toBe(
52
+ 'redacted_thinking'
53
+ );
54
+ });
55
+ });
56
+
57
+ describe('messages with tool_calls (should be converted)', () => {
58
+ test('should convert AI message with tool_calls to HumanMessage', () => {
59
+ const messages = [
60
+ new HumanMessage({ content: 'What is the weather?' }),
61
+ new AIMessage({
62
+ content: 'Let me check the weather.',
63
+ tool_calls: [
64
+ {
65
+ id: 'call_123',
66
+ name: 'get_weather',
67
+ args: { location: 'NYC' },
68
+ type: 'tool_call',
69
+ },
70
+ ],
71
+ }),
72
+ new ToolMessage({
73
+ content: 'Sunny, 75°F',
74
+ tool_call_id: 'call_123',
75
+ }),
76
+ ];
77
+
78
+ const result = ensureThinkingBlockInMessages(
79
+ messages,
80
+ Providers.ANTHROPIC
81
+ );
82
+
83
+ // Should have 2 messages: HumanMessage + converted HumanMessage
84
+ expect(result).toHaveLength(2);
85
+ expect(result[0]).toBeInstanceOf(HumanMessage);
86
+ expect(result[0].content).toBe('What is the weather?');
87
+ expect(result[1]).toBeInstanceOf(HumanMessage);
88
+
89
+ // Check that the converted message includes the context prefix
90
+ expect(result[1].content).toContain('[Previous agent context]');
91
+ expect(result[1].content).toContain('Let me check the weather');
92
+ expect(result[1].content).toContain('Sunny, 75°F');
93
+ });
94
+
95
+ test('should convert AI message with tool_use in content to HumanMessage', () => {
96
+ const messages = [
97
+ new HumanMessage({ content: 'Search for something' }),
98
+ new AIMessage({
99
+ content: [
100
+ { type: 'text', text: 'Searching...' },
101
+ {
102
+ type: 'tool_use',
103
+ id: 'call_456',
104
+ name: 'search',
105
+ input: { query: 'test' },
106
+ },
107
+ ],
108
+ }),
109
+ new ToolMessage({
110
+ content: 'Found results',
111
+ tool_call_id: 'call_456',
112
+ }),
113
+ ];
114
+
115
+ const result = ensureThinkingBlockInMessages(
116
+ messages,
117
+ Providers.ANTHROPIC
118
+ );
119
+
120
+ expect(result).toHaveLength(2);
121
+ expect(result[0]).toBeInstanceOf(HumanMessage);
122
+ expect(result[1]).toBeInstanceOf(HumanMessage);
123
+ expect(result[1].content).toContain('[Previous agent context]');
124
+ expect(result[1].content).toContain('Searching...');
125
+ expect(result[1].content).toContain('Found results');
126
+ });
127
+
128
+ test('should handle multiple tool messages in sequence', () => {
129
+ const messages = [
130
+ new HumanMessage({ content: 'Do multiple things' }),
131
+ new AIMessage({
132
+ content: 'I will perform multiple actions.',
133
+ tool_calls: [
134
+ {
135
+ id: 'call_1',
136
+ name: 'action1',
137
+ args: { param: 'a' },
138
+ type: 'tool_call',
139
+ },
140
+ {
141
+ id: 'call_2',
142
+ name: 'action2',
143
+ args: { param: 'b' },
144
+ type: 'tool_call',
145
+ },
146
+ ],
147
+ }),
148
+ new ToolMessage({
149
+ content: 'Result 1',
150
+ tool_call_id: 'call_1',
151
+ }),
152
+ new ToolMessage({
153
+ content: 'Result 2',
154
+ tool_call_id: 'call_2',
155
+ }),
156
+ ];
157
+
158
+ const result = ensureThinkingBlockInMessages(
159
+ messages,
160
+ Providers.ANTHROPIC
161
+ );
162
+
163
+ // Should combine all tool messages into one HumanMessage
164
+ expect(result).toHaveLength(2);
165
+ expect(result[1]).toBeInstanceOf(HumanMessage);
166
+ expect(result[1].content).toContain('Result 1');
167
+ expect(result[1].content).toContain('Result 2');
168
+ });
169
+ });
170
+
171
+ describe('messages without tool calls (should pass through)', () => {
172
+ test('should not modify AI message without tool calls', () => {
173
+ const messages = [
174
+ new HumanMessage({ content: 'Hello' }),
175
+ new AIMessage({ content: 'Hi there, how can I help?' }),
176
+ ];
177
+
178
+ const result = ensureThinkingBlockInMessages(
179
+ messages,
180
+ Providers.ANTHROPIC
181
+ );
182
+
183
+ expect(result).toHaveLength(2);
184
+ expect(result[0]).toBeInstanceOf(HumanMessage);
185
+ expect(result[0].content).toBe('Hello');
186
+ expect(result[1]).toBeInstanceOf(AIMessage);
187
+ expect(result[1].content).toBe('Hi there, how can I help?');
188
+ });
189
+
190
+ test('should preserve HumanMessages and other message types', () => {
191
+ const messages = [
192
+ new HumanMessage({ content: 'Question 1' }),
193
+ new AIMessage({ content: 'Answer 1' }),
194
+ new HumanMessage({ content: 'Question 2' }),
195
+ new AIMessage({ content: 'Answer 2' }),
196
+ ];
197
+
198
+ const result = ensureThinkingBlockInMessages(
199
+ messages,
200
+ Providers.ANTHROPIC
201
+ );
202
+
203
+ expect(result).toHaveLength(4);
204
+ expect(result[0]).toBeInstanceOf(HumanMessage);
205
+ expect(result[1]).toBeInstanceOf(AIMessage);
206
+ expect(result[2]).toBeInstanceOf(HumanMessage);
207
+ expect(result[3]).toBeInstanceOf(AIMessage);
208
+ });
209
+ });
210
+
211
+ describe('mixed scenarios', () => {
212
+ test('should handle mix of normal and tool-using messages', () => {
213
+ const messages = [
214
+ new HumanMessage({ content: 'First question' }),
215
+ new AIMessage({ content: 'First answer without tools' }),
216
+ new HumanMessage({ content: 'Second question' }),
217
+ new AIMessage({
218
+ content: 'Using a tool',
219
+ tool_calls: [
220
+ {
221
+ id: 'call_abc',
222
+ name: 'some_tool',
223
+ args: {},
224
+ type: 'tool_call',
225
+ },
226
+ ],
227
+ }),
228
+ new ToolMessage({
229
+ content: 'Tool result',
230
+ tool_call_id: 'call_abc',
231
+ }),
232
+ new HumanMessage({ content: 'Third question' }),
233
+ new AIMessage({ content: 'Third answer without tools' }),
234
+ ];
235
+
236
+ const result = ensureThinkingBlockInMessages(
237
+ messages,
238
+ Providers.ANTHROPIC
239
+ );
240
+
241
+ // Original message 1: HumanMessage (preserved)
242
+ // Original message 2: AIMessage without tools (preserved)
243
+ // Original message 3: HumanMessage (preserved)
244
+ // Original messages 4-5: AIMessage with tool + ToolMessage (converted to 1 HumanMessage)
245
+ // Original message 6: HumanMessage (preserved)
246
+ // Original message 7: AIMessage without tools (preserved)
247
+ expect(result).toHaveLength(6);
248
+ expect(result[0]).toBeInstanceOf(HumanMessage);
249
+ expect(result[1]).toBeInstanceOf(AIMessage);
250
+ expect(result[2]).toBeInstanceOf(HumanMessage);
251
+ expect(result[3]).toBeInstanceOf(HumanMessage); // Converted
252
+ expect(result[4]).toBeInstanceOf(HumanMessage);
253
+ expect(result[5]).toBeInstanceOf(AIMessage);
254
+ });
255
+
256
+ test('should handle multiple tool-using sequences', () => {
257
+ const messages = [
258
+ new HumanMessage({ content: 'Do task 1' }),
259
+ new AIMessage({
260
+ content: 'Doing task 1',
261
+ tool_calls: [
262
+ {
263
+ id: 'call_1',
264
+ name: 'tool1',
265
+ args: {},
266
+ type: 'tool_call',
267
+ },
268
+ ],
269
+ }),
270
+ new ToolMessage({
271
+ content: 'Result 1',
272
+ tool_call_id: 'call_1',
273
+ }),
274
+ new HumanMessage({ content: 'Do task 2' }),
275
+ new AIMessage({
276
+ content: 'Doing task 2',
277
+ tool_calls: [
278
+ {
279
+ id: 'call_2',
280
+ name: 'tool2',
281
+ args: {},
282
+ type: 'tool_call',
283
+ },
284
+ ],
285
+ }),
286
+ new ToolMessage({
287
+ content: 'Result 2',
288
+ tool_call_id: 'call_2',
289
+ }),
290
+ ];
291
+
292
+ const result = ensureThinkingBlockInMessages(
293
+ messages,
294
+ Providers.ANTHROPIC
295
+ );
296
+
297
+ // Each tool sequence should be converted to a HumanMessage
298
+ expect(result).toHaveLength(4);
299
+ expect(result[0]).toBeInstanceOf(HumanMessage);
300
+ expect(result[0].content).toBe('Do task 1');
301
+ expect(result[1]).toBeInstanceOf(HumanMessage);
302
+ expect(result[1].content).toContain('Doing task 1');
303
+ expect(result[2]).toBeInstanceOf(HumanMessage);
304
+ expect(result[2].content).toBe('Do task 2');
305
+ expect(result[3]).toBeInstanceOf(HumanMessage);
306
+ expect(result[3].content).toContain('Doing task 2');
307
+ });
308
+ });
309
+
310
+ describe('edge cases', () => {
311
+ test('should handle empty messages array', () => {
312
+ const messages: never[] = [];
313
+
314
+ const result = ensureThinkingBlockInMessages(
315
+ messages,
316
+ Providers.ANTHROPIC
317
+ );
318
+
319
+ expect(result).toHaveLength(0);
320
+ });
321
+
322
+ test('should handle AI message with empty content array', () => {
323
+ const messages = [
324
+ new HumanMessage({ content: 'Hello' }),
325
+ new AIMessage({ content: [] }),
326
+ ];
327
+
328
+ const result = ensureThinkingBlockInMessages(
329
+ messages,
330
+ Providers.ANTHROPIC
331
+ );
332
+
333
+ expect(result).toHaveLength(2);
334
+ expect(result[1]).toBeInstanceOf(AIMessage);
335
+ });
336
+
337
+ test('should work with different providers', () => {
338
+ const messages = [
339
+ new AIMessage({
340
+ content: 'Using tool',
341
+ tool_calls: [
342
+ {
343
+ id: 'call_x',
344
+ name: 'test',
345
+ args: {},
346
+ type: 'tool_call',
347
+ },
348
+ ],
349
+ }),
350
+ new ToolMessage({
351
+ content: 'Result',
352
+ tool_call_id: 'call_x',
353
+ }),
354
+ ];
355
+
356
+ // Test with Anthropic
357
+ const resultAnthropic = ensureThinkingBlockInMessages(
358
+ messages,
359
+ Providers.ANTHROPIC
360
+ );
361
+ expect(resultAnthropic).toHaveLength(1);
362
+ expect(resultAnthropic[0]).toBeInstanceOf(HumanMessage);
363
+
364
+ // Test with Bedrock
365
+ const resultBedrock = ensureThinkingBlockInMessages(
366
+ messages,
367
+ Providers.BEDROCK
368
+ );
369
+ expect(resultBedrock).toHaveLength(1);
370
+ expect(resultBedrock[0]).toBeInstanceOf(HumanMessage);
371
+ });
372
+
373
+ test('should handle tool message without preceding AI message', () => {
374
+ const messages = [
375
+ new HumanMessage({ content: 'Hello' }),
376
+ new ToolMessage({
377
+ content: 'Unexpected tool result',
378
+ tool_call_id: 'call_orphan',
379
+ }),
380
+ ];
381
+
382
+ const result = ensureThinkingBlockInMessages(
383
+ messages,
384
+ Providers.ANTHROPIC
385
+ );
386
+
387
+ // Should preserve both messages as-is since tool message has no preceding AI message with tools
388
+ expect(result).toHaveLength(2);
389
+ expect(result[0]).toBeInstanceOf(HumanMessage);
390
+ expect(result[1]).toBeInstanceOf(ToolMessage);
391
+ });
392
+ });
393
+ });
@@ -1,6 +1,7 @@
1
1
  /* eslint-disable @typescript-eslint/no-explicit-any */
2
2
  import {
3
3
  AIMessage,
4
+ AIMessageChunk,
4
5
  ToolMessage,
5
6
  BaseMessage,
6
7
  HumanMessage,
@@ -10,6 +11,7 @@ import {
10
11
  import type { MessageContentImageUrl } from '@langchain/core/messages';
11
12
  import type { ToolCall } from '@langchain/core/messages/tool';
12
13
  import type {
14
+ ExtendedMessageContent,
13
15
  MessageContentComplex,
14
16
  ToolCallPart,
15
17
  TPayload,
@@ -609,3 +611,82 @@ export function shiftIndexTokenCountMap(
609
611
 
610
612
  return shiftedMap;
611
613
  }
614
+
615
+ /**
616
+ * Ensures compatibility when switching from a non-thinking agent to a thinking-enabled agent.
617
+ * Converts AI messages with tool calls (that lack thinking blocks) into buffer strings,
618
+ * avoiding the thinking block signature requirement.
619
+ *
620
+ * @param messages - Array of messages to process
621
+ * @param provider - The provider being used (unused but kept for future compatibility)
622
+ * @returns The messages array with tool sequences converted to buffer strings if necessary
623
+ */
624
+ export function ensureThinkingBlockInMessages(
625
+ messages: BaseMessage[],
626
+ _provider: Providers
627
+ ): BaseMessage[] {
628
+ const result: BaseMessage[] = [];
629
+ let i = 0;
630
+
631
+ while (i < messages.length) {
632
+ const msg = messages[i];
633
+ const isAI = msg instanceof AIMessage || msg instanceof AIMessageChunk;
634
+
635
+ if (!isAI) {
636
+ result.push(msg);
637
+ i++;
638
+ continue;
639
+ }
640
+
641
+ const aiMsg = msg as AIMessage | AIMessageChunk;
642
+ const hasToolCalls = aiMsg.tool_calls && aiMsg.tool_calls.length > 0;
643
+ const contentIsArray = Array.isArray(aiMsg.content);
644
+
645
+ // Check if the message has tool calls or tool_use content
646
+ let hasToolUse = hasToolCalls ?? false;
647
+ let firstContentType: string | undefined;
648
+
649
+ if (contentIsArray && aiMsg.content.length > 0) {
650
+ const content = aiMsg.content as ExtendedMessageContent[];
651
+ firstContentType = content[0]?.type;
652
+ hasToolUse =
653
+ hasToolUse ||
654
+ content.some((c) => typeof c === 'object' && c.type === 'tool_use');
655
+ }
656
+
657
+ // If message has tool use but no thinking block, convert to buffer string
658
+ if (
659
+ hasToolUse &&
660
+ firstContentType !== ContentTypes.THINKING &&
661
+ firstContentType !== 'redacted_thinking'
662
+ ) {
663
+ // Collect the AI message and any following tool messages
664
+ const toolSequence: BaseMessage[] = [msg];
665
+ let j = i + 1;
666
+
667
+ // Look ahead for tool messages that belong to this AI message
668
+ while (j < messages.length && messages[j] instanceof ToolMessage) {
669
+ toolSequence.push(messages[j]);
670
+ j++;
671
+ }
672
+
673
+ // Convert the sequence to a buffer string and wrap in a HumanMessage
674
+ // This avoids the thinking block requirement which only applies to AI messages
675
+ const bufferString = getBufferString(toolSequence);
676
+ result.push(
677
+ new HumanMessage({
678
+ content: `[Previous agent context]\n${bufferString}`,
679
+ })
680
+ );
681
+
682
+ // Skip the messages we've processed
683
+ i = j;
684
+ } else {
685
+ // Keep the message as is
686
+ result.push(msg);
687
+ i++;
688
+ }
689
+ }
690
+
691
+ return result;
692
+ }
@@ -6,7 +6,7 @@ config();
6
6
  import { HumanMessage, BaseMessage } from '@langchain/core/messages';
7
7
  import { Run } from '@/run';
8
8
  import { ChatModelStreamHandler, createContentAggregator } from '@/stream';
9
- import { Providers, GraphEvents, Constants } from '@/common';
9
+ import { Providers, GraphEvents, Constants, StepTypes } from '@/common';
10
10
  import { ToolEndHandler, ModelEndHandler } from '@/events';
11
11
  import type * as t from '@/types';
12
12
 
@@ -230,9 +230,59 @@ async function testSupervisorListHandoff() {
230
230
  conversationHistory.push(...finalMessages);
231
231
  }
232
232
 
233
- // Show summary
233
+ // Demo: Map contentParts to agentIds
234
234
  console.log(`\n${'─'.repeat(60)}`);
235
- console.log(`Graph structure:`);
235
+ console.log('CONTENT PARTS TO AGENT MAPPING:');
236
+ console.log('─'.repeat(60));
237
+
238
+ if (run.Graph) {
239
+ // Get the mapping of contentPart index to agentId
240
+ const contentPartAgentMap = run.Graph.getContentPartAgentMap();
241
+
242
+ console.log(`\nTotal content parts: ${contentParts.length}`);
243
+ console.log(`\nContent Part → Agent Mapping:`);
244
+
245
+ contentPartAgentMap.forEach((agentId, index) => {
246
+ const contentPart = contentParts[index];
247
+ const contentType = contentPart?.type || 'unknown';
248
+ const preview =
249
+ contentType === 'text'
250
+ ? (contentPart as any).text?.slice(0, 50) || ''
251
+ : contentType === 'tool_call'
252
+ ? `Tool: ${(contentPart as any).tool_call?.name || 'unknown'}`
253
+ : contentType;
254
+
255
+ console.log(
256
+ ` [${index}] ${agentId} → ${contentType}: ${preview}${preview.length >= 50 ? '...' : ''}`
257
+ );
258
+ });
259
+
260
+ // Show agent participation summary
261
+ console.log(`\n${'─'.repeat(60)}`);
262
+ console.log('AGENT PARTICIPATION SUMMARY:');
263
+ console.log('─'.repeat(60));
264
+
265
+ const activeAgents = run.Graph.getActiveAgentIds();
266
+ console.log(`\nActive agents (${activeAgents.length}):`, activeAgents);
267
+
268
+ const stepsByAgent = run.Graph.getRunStepsByAgent();
269
+ stepsByAgent.forEach((steps, agentId) => {
270
+ const toolCallSteps = steps.filter(
271
+ (s) => s.type === StepTypes.TOOL_CALLS
272
+ ).length;
273
+ const messageSteps = steps.filter(
274
+ (s) => s.type === StepTypes.MESSAGE_CREATION
275
+ ).length;
276
+ console.log(`\n ${agentId}:`);
277
+ console.log(` - Total steps: ${steps.length}`);
278
+ console.log(` - Message steps: ${messageSteps}`);
279
+ console.log(` - Tool call steps: ${toolCallSteps}`);
280
+ });
281
+ }
282
+
283
+ // Show graph structure summary
284
+ console.log(`\n${'─'.repeat(60)}`);
285
+ console.log(`GRAPH STRUCTURE:`);
236
286
  console.log(`- Agents: 6 total (supervisor + 5 specialists)`);
237
287
  console.log(`- Edges: 1 edge with multiple destinations`);
238
288
  console.log(