@librechat/agents 3.0.17 → 3.0.19
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/cjs/graphs/Graph.cjs +80 -1
- package/dist/cjs/graphs/Graph.cjs.map +1 -1
- package/dist/cjs/main.cjs +2 -0
- package/dist/cjs/main.cjs.map +1 -1
- package/dist/cjs/messages/format.cjs +242 -6
- package/dist/cjs/messages/format.cjs.map +1 -1
- package/dist/cjs/stream.cjs +3 -2
- package/dist/cjs/stream.cjs.map +1 -1
- package/dist/cjs/tools/handlers.cjs +5 -5
- package/dist/cjs/tools/handlers.cjs.map +1 -1
- package/dist/esm/graphs/Graph.mjs +80 -1
- package/dist/esm/graphs/Graph.mjs.map +1 -1
- package/dist/esm/main.mjs +1 -1
- package/dist/esm/messages/format.mjs +242 -8
- package/dist/esm/messages/format.mjs.map +1 -1
- package/dist/esm/stream.mjs +3 -2
- package/dist/esm/stream.mjs.map +1 -1
- package/dist/esm/tools/handlers.mjs +5 -5
- package/dist/esm/tools/handlers.mjs.map +1 -1
- package/dist/types/graphs/Graph.d.ts +19 -2
- package/dist/types/messages/format.d.ts +25 -1
- package/dist/types/tools/handlers.d.ts +2 -1
- package/dist/types/types/stream.d.ts +1 -0
- package/package.json +9 -8
- package/src/graphs/Graph.ts +99 -2
- package/src/messages/ensureThinkingBlock.test.ts +393 -0
- package/src/messages/format.ts +312 -6
- package/src/messages/labelContentByAgent.test.ts +887 -0
- package/src/scripts/test-multi-agent-list-handoff.ts +169 -13
- package/src/scripts/test-parallel-agent-labeling.ts +325 -0
- package/src/scripts/test-thinking-handoff-bedrock.ts +153 -0
- package/src/scripts/test-thinking-handoff.ts +147 -0
- package/src/specs/thinking-handoff.test.ts +620 -0
- package/src/stream.ts +19 -10
- package/src/tools/handlers.ts +36 -18
- package/src/types/stream.ts +1 -0
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@librechat/agents",
|
|
3
|
-
"version": "3.0.
|
|
3
|
+
"version": "3.0.19",
|
|
4
4
|
"main": "./dist/cjs/main.cjs",
|
|
5
5
|
"module": "./dist/esm/main.mjs",
|
|
6
6
|
"types": "./dist/types/index.d.ts",
|
|
@@ -64,6 +64,9 @@
|
|
|
64
64
|
"multi-agent-conditional": "node -r dotenv/config --loader ./tsconfig-paths-bootstrap.mjs --experimental-specifier-resolution=node ./src/scripts/multi-agent-conditional.ts",
|
|
65
65
|
"multi-agent-supervisor": "node -r dotenv/config --loader ./tsconfig-paths-bootstrap.mjs --experimental-specifier-resolution=node ./src/scripts/multi-agent-supervisor.ts",
|
|
66
66
|
"multi-agent-list-handoff": "node -r dotenv/config --loader ./tsconfig-paths-bootstrap.mjs --experimental-specifier-resolution=node ./src/scripts/test-multi-agent-list-handoff.ts",
|
|
67
|
+
"test-parallel-agent-labeling": "node -r dotenv/config --loader ./tsconfig-paths-bootstrap.mjs --experimental-specifier-resolution=node ./src/scripts/test-parallel-agent-labeling.ts",
|
|
68
|
+
"test-thinking-handoff": "node -r dotenv/config --loader ./tsconfig-paths-bootstrap.mjs --experimental-specifier-resolution=node ./src/scripts/test-thinking-handoff.ts",
|
|
69
|
+
"test-thinking-handoff-bedrock": "node -r dotenv/config --loader ./tsconfig-paths-bootstrap.mjs --experimental-specifier-resolution=node ./src/scripts/test-thinking-handoff-bedrock.ts",
|
|
67
70
|
"multi-agent-hybrid-flow": "node -r dotenv/config --loader ./tsconfig-paths-bootstrap.mjs --experimental-specifier-resolution=node ./src/scripts/multi-agent-hybrid-flow.ts",
|
|
68
71
|
"test-handoff-input": "node -r dotenv/config --loader ./tsconfig-paths-bootstrap.mjs --experimental-specifier-resolution=node ./src/scripts/test-handoff-input.ts",
|
|
69
72
|
"test-custom-prompt-key": "node -r dotenv/config --loader ./tsconfig-paths-bootstrap.mjs --experimental-specifier-resolution=node ./src/scripts/test-custom-prompt-key.ts",
|
|
@@ -128,26 +131,24 @@
|
|
|
128
131
|
"@rollup/plugin-commonjs": "^28.0.3",
|
|
129
132
|
"@rollup/plugin-json": "^6.1.0",
|
|
130
133
|
"@rollup/plugin-node-resolve": "^15.2.3",
|
|
131
|
-
"@rollup/plugin-terser": "^0.4.4",
|
|
132
134
|
"@rollup/plugin-typescript": "^12.1.2",
|
|
133
135
|
"@swc/core": "^1.6.13",
|
|
134
|
-
"@types/jest": "^
|
|
136
|
+
"@types/jest": "^30.0.0",
|
|
135
137
|
"@types/node": "^20.14.11",
|
|
136
138
|
"@types/node-fetch": "^2.6.13",
|
|
137
139
|
"@types/yargs-parser": "^21.0.3",
|
|
138
140
|
"@typescript-eslint/eslint-plugin": "^8.24.0",
|
|
139
141
|
"@typescript-eslint/parser": "^8.24.0",
|
|
140
|
-
"eslint": "^9.
|
|
142
|
+
"eslint": "^9.39.1",
|
|
141
143
|
"eslint-import-resolver-typescript": "^3.7.0",
|
|
142
144
|
"eslint-plugin-import": "^2.31.0",
|
|
143
145
|
"husky": "^9.1.7",
|
|
144
|
-
"jest": "^
|
|
146
|
+
"jest": "^30.2.0",
|
|
145
147
|
"lint-staged": "^15.2.7",
|
|
146
|
-
"prettier": "^3.
|
|
148
|
+
"prettier": "^3.6.2",
|
|
147
149
|
"rollup": "^4.34.6",
|
|
148
150
|
"rollup-plugin-cleandir": "^2.0.0",
|
|
149
|
-
"
|
|
150
|
-
"ts-jest": "^29.3.1",
|
|
151
|
+
"ts-jest": "^29.4.5",
|
|
151
152
|
"ts-node": "^10.9.2",
|
|
152
153
|
"tsc-alias": "^1.8.10",
|
|
153
154
|
"tsconfig-paths": "^4.2.0",
|
package/src/graphs/Graph.ts
CHANGED
|
@@ -38,6 +38,7 @@ import {
|
|
|
38
38
|
} from '@/common';
|
|
39
39
|
import {
|
|
40
40
|
formatAnthropicArtifactContent,
|
|
41
|
+
ensureThinkingBlockInMessages,
|
|
41
42
|
convertMessagesToContent,
|
|
42
43
|
addBedrockCacheControl,
|
|
43
44
|
modifyDeltaProperties,
|
|
@@ -96,7 +97,8 @@ export abstract class Graph<
|
|
|
96
97
|
abstract getRunStep(stepId: string): t.RunStep | undefined;
|
|
97
98
|
abstract dispatchRunStep(
|
|
98
99
|
stepKey: string,
|
|
99
|
-
stepDetails: t.StepDetails
|
|
100
|
+
stepDetails: t.StepDetails,
|
|
101
|
+
metadata?: Record<string, unknown>
|
|
100
102
|
): Promise<string>;
|
|
101
103
|
abstract dispatchRunStepDelta(
|
|
102
104
|
id: string,
|
|
@@ -327,6 +329,66 @@ export class StandardGraph extends Graph<t.BaseGraphState, t.GraphNode> {
|
|
|
327
329
|
return convertMessagesToContent(this.messages.slice(this.startIndex));
|
|
328
330
|
}
|
|
329
331
|
|
|
332
|
+
/**
|
|
333
|
+
* Get all run steps, optionally filtered by agent ID
|
|
334
|
+
*/
|
|
335
|
+
getRunSteps(agentId?: string): t.RunStep[] {
|
|
336
|
+
if (agentId == null || agentId === '') {
|
|
337
|
+
return [...this.contentData];
|
|
338
|
+
}
|
|
339
|
+
return this.contentData.filter((step) => step.agentId === agentId);
|
|
340
|
+
}
|
|
341
|
+
|
|
342
|
+
/**
|
|
343
|
+
* Get run steps grouped by agent ID
|
|
344
|
+
*/
|
|
345
|
+
getRunStepsByAgent(): Map<string, t.RunStep[]> {
|
|
346
|
+
const stepsByAgent = new Map<string, t.RunStep[]>();
|
|
347
|
+
|
|
348
|
+
for (const step of this.contentData) {
|
|
349
|
+
if (step.agentId == null || step.agentId === '') continue;
|
|
350
|
+
|
|
351
|
+
const steps = stepsByAgent.get(step.agentId) ?? [];
|
|
352
|
+
steps.push(step);
|
|
353
|
+
stepsByAgent.set(step.agentId, steps);
|
|
354
|
+
}
|
|
355
|
+
|
|
356
|
+
return stepsByAgent;
|
|
357
|
+
}
|
|
358
|
+
|
|
359
|
+
/**
|
|
360
|
+
* Get agent IDs that participated in this run
|
|
361
|
+
*/
|
|
362
|
+
getActiveAgentIds(): string[] {
|
|
363
|
+
const agentIds = new Set<string>();
|
|
364
|
+
for (const step of this.contentData) {
|
|
365
|
+
if (step.agentId != null && step.agentId !== '') {
|
|
366
|
+
agentIds.add(step.agentId);
|
|
367
|
+
}
|
|
368
|
+
}
|
|
369
|
+
return Array.from(agentIds);
|
|
370
|
+
}
|
|
371
|
+
|
|
372
|
+
/**
|
|
373
|
+
* Maps contentPart indices to agent IDs for post-run analysis
|
|
374
|
+
* Returns a map where key is the contentPart index and value is the agentId
|
|
375
|
+
*/
|
|
376
|
+
getContentPartAgentMap(): Map<number, string> {
|
|
377
|
+
const contentPartAgentMap = new Map<number, string>();
|
|
378
|
+
|
|
379
|
+
for (const step of this.contentData) {
|
|
380
|
+
if (
|
|
381
|
+
step.agentId != null &&
|
|
382
|
+
step.agentId !== '' &&
|
|
383
|
+
Number.isFinite(step.index)
|
|
384
|
+
) {
|
|
385
|
+
contentPartAgentMap.set(step.index, step.agentId);
|
|
386
|
+
}
|
|
387
|
+
}
|
|
388
|
+
|
|
389
|
+
return contentPartAgentMap;
|
|
390
|
+
}
|
|
391
|
+
|
|
330
392
|
/* Graph */
|
|
331
393
|
|
|
332
394
|
createSystemRunnable({
|
|
@@ -672,6 +734,26 @@ export class StandardGraph extends Graph<t.BaseGraphState, t.GraphNode> {
|
|
|
672
734
|
}
|
|
673
735
|
}
|
|
674
736
|
|
|
737
|
+
/**
|
|
738
|
+
* Handle edge case: when switching from a non-thinking agent to a thinking-enabled agent,
|
|
739
|
+
* convert AI messages with tool calls to HumanMessages to avoid thinking block requirements.
|
|
740
|
+
* This is required by Anthropic/Bedrock when thinking is enabled.
|
|
741
|
+
*/
|
|
742
|
+
const isAnthropicWithThinking =
|
|
743
|
+
(agentContext.provider === Providers.ANTHROPIC &&
|
|
744
|
+
(agentContext.clientOptions as t.AnthropicClientOptions).thinking !=
|
|
745
|
+
null) ||
|
|
746
|
+
(agentContext.provider === Providers.BEDROCK &&
|
|
747
|
+
(agentContext.clientOptions as t.BedrockAnthropicInput)
|
|
748
|
+
.additionalModelRequestFields?.['thinking'] != null);
|
|
749
|
+
|
|
750
|
+
if (isAnthropicWithThinking) {
|
|
751
|
+
finalMessages = ensureThinkingBlockInMessages(
|
|
752
|
+
finalMessages,
|
|
753
|
+
agentContext.provider
|
|
754
|
+
);
|
|
755
|
+
}
|
|
756
|
+
|
|
675
757
|
if (
|
|
676
758
|
agentContext.lastStreamCall != null &&
|
|
677
759
|
agentContext.streamBuffer != null
|
|
@@ -837,7 +919,8 @@ export class StandardGraph extends Graph<t.BaseGraphState, t.GraphNode> {
|
|
|
837
919
|
*/
|
|
838
920
|
async dispatchRunStep(
|
|
839
921
|
stepKey: string,
|
|
840
|
-
stepDetails: t.StepDetails
|
|
922
|
+
stepDetails: t.StepDetails,
|
|
923
|
+
metadata?: Record<string, unknown>
|
|
841
924
|
): Promise<string> {
|
|
842
925
|
if (!this.config) {
|
|
843
926
|
throw new Error('No config provided');
|
|
@@ -868,6 +951,20 @@ export class StandardGraph extends Graph<t.BaseGraphState, t.GraphNode> {
|
|
|
868
951
|
runStep.runId = runId;
|
|
869
952
|
}
|
|
870
953
|
|
|
954
|
+
/**
|
|
955
|
+
* Extract and store agentId from metadata
|
|
956
|
+
*/
|
|
957
|
+
if (metadata) {
|
|
958
|
+
try {
|
|
959
|
+
const agentContext = this.getAgentContext(metadata);
|
|
960
|
+
if (agentContext.agentId) {
|
|
961
|
+
runStep.agentId = agentContext.agentId;
|
|
962
|
+
}
|
|
963
|
+
} catch (_e) {
|
|
964
|
+
/** If we can't get agent context, that's okay - agentId remains undefined */
|
|
965
|
+
}
|
|
966
|
+
}
|
|
967
|
+
|
|
871
968
|
this.contentData.push(runStep);
|
|
872
969
|
this.contentIndexMap.set(stepId, runStep.index);
|
|
873
970
|
await safeDispatchCustomEvent(
|
|
@@ -0,0 +1,393 @@
|
|
|
1
|
+
import { AIMessage, HumanMessage, ToolMessage } from '@langchain/core/messages';
|
|
2
|
+
import type { ExtendedMessageContent } from '@/types';
|
|
3
|
+
import { ensureThinkingBlockInMessages } from './format';
|
|
4
|
+
import { Providers, ContentTypes } from '@/common';
|
|
5
|
+
|
|
6
|
+
describe('ensureThinkingBlockInMessages', () => {
|
|
7
|
+
describe('messages with thinking blocks (should not be modified)', () => {
|
|
8
|
+
test('should not modify AI message that already has thinking block', () => {
|
|
9
|
+
const messages = [
|
|
10
|
+
new HumanMessage({ content: 'Hello' }),
|
|
11
|
+
new AIMessage({
|
|
12
|
+
content: [
|
|
13
|
+
{ type: ContentTypes.THINKING, thinking: 'Let me think...' },
|
|
14
|
+
{ type: 'text', text: 'Hi there!' },
|
|
15
|
+
],
|
|
16
|
+
}),
|
|
17
|
+
];
|
|
18
|
+
|
|
19
|
+
const result = ensureThinkingBlockInMessages(
|
|
20
|
+
messages,
|
|
21
|
+
Providers.ANTHROPIC
|
|
22
|
+
);
|
|
23
|
+
|
|
24
|
+
expect(result).toHaveLength(2);
|
|
25
|
+
expect(result[0]).toBeInstanceOf(HumanMessage);
|
|
26
|
+
expect(result[1]).toBeInstanceOf(AIMessage);
|
|
27
|
+
expect((result[1].content as ExtendedMessageContent[])[0].type).toBe(
|
|
28
|
+
ContentTypes.THINKING
|
|
29
|
+
);
|
|
30
|
+
});
|
|
31
|
+
|
|
32
|
+
test('should not modify AI message that has redacted_thinking block', () => {
|
|
33
|
+
const messages = [
|
|
34
|
+
new HumanMessage({ content: 'Hello' }),
|
|
35
|
+
new AIMessage({
|
|
36
|
+
content: [
|
|
37
|
+
{ type: 'redacted_thinking', data: 'redacted' },
|
|
38
|
+
{ type: 'text', text: 'Hi there!' },
|
|
39
|
+
],
|
|
40
|
+
}),
|
|
41
|
+
];
|
|
42
|
+
|
|
43
|
+
const result = ensureThinkingBlockInMessages(
|
|
44
|
+
messages,
|
|
45
|
+
Providers.ANTHROPIC
|
|
46
|
+
);
|
|
47
|
+
|
|
48
|
+
expect(result).toHaveLength(2);
|
|
49
|
+
expect(result[0]).toBeInstanceOf(HumanMessage);
|
|
50
|
+
expect(result[1]).toBeInstanceOf(AIMessage);
|
|
51
|
+
expect((result[1].content as ExtendedMessageContent[])[0].type).toBe(
|
|
52
|
+
'redacted_thinking'
|
|
53
|
+
);
|
|
54
|
+
});
|
|
55
|
+
});
|
|
56
|
+
|
|
57
|
+
describe('messages with tool_calls (should be converted)', () => {
|
|
58
|
+
test('should convert AI message with tool_calls to HumanMessage', () => {
|
|
59
|
+
const messages = [
|
|
60
|
+
new HumanMessage({ content: 'What is the weather?' }),
|
|
61
|
+
new AIMessage({
|
|
62
|
+
content: 'Let me check the weather.',
|
|
63
|
+
tool_calls: [
|
|
64
|
+
{
|
|
65
|
+
id: 'call_123',
|
|
66
|
+
name: 'get_weather',
|
|
67
|
+
args: { location: 'NYC' },
|
|
68
|
+
type: 'tool_call',
|
|
69
|
+
},
|
|
70
|
+
],
|
|
71
|
+
}),
|
|
72
|
+
new ToolMessage({
|
|
73
|
+
content: 'Sunny, 75°F',
|
|
74
|
+
tool_call_id: 'call_123',
|
|
75
|
+
}),
|
|
76
|
+
];
|
|
77
|
+
|
|
78
|
+
const result = ensureThinkingBlockInMessages(
|
|
79
|
+
messages,
|
|
80
|
+
Providers.ANTHROPIC
|
|
81
|
+
);
|
|
82
|
+
|
|
83
|
+
// Should have 2 messages: HumanMessage + converted HumanMessage
|
|
84
|
+
expect(result).toHaveLength(2);
|
|
85
|
+
expect(result[0]).toBeInstanceOf(HumanMessage);
|
|
86
|
+
expect(result[0].content).toBe('What is the weather?');
|
|
87
|
+
expect(result[1]).toBeInstanceOf(HumanMessage);
|
|
88
|
+
|
|
89
|
+
// Check that the converted message includes the context prefix
|
|
90
|
+
expect(result[1].content).toContain('[Previous agent context]');
|
|
91
|
+
expect(result[1].content).toContain('Let me check the weather');
|
|
92
|
+
expect(result[1].content).toContain('Sunny, 75°F');
|
|
93
|
+
});
|
|
94
|
+
|
|
95
|
+
test('should convert AI message with tool_use in content to HumanMessage', () => {
|
|
96
|
+
const messages = [
|
|
97
|
+
new HumanMessage({ content: 'Search for something' }),
|
|
98
|
+
new AIMessage({
|
|
99
|
+
content: [
|
|
100
|
+
{ type: 'text', text: 'Searching...' },
|
|
101
|
+
{
|
|
102
|
+
type: 'tool_use',
|
|
103
|
+
id: 'call_456',
|
|
104
|
+
name: 'search',
|
|
105
|
+
input: { query: 'test' },
|
|
106
|
+
},
|
|
107
|
+
],
|
|
108
|
+
}),
|
|
109
|
+
new ToolMessage({
|
|
110
|
+
content: 'Found results',
|
|
111
|
+
tool_call_id: 'call_456',
|
|
112
|
+
}),
|
|
113
|
+
];
|
|
114
|
+
|
|
115
|
+
const result = ensureThinkingBlockInMessages(
|
|
116
|
+
messages,
|
|
117
|
+
Providers.ANTHROPIC
|
|
118
|
+
);
|
|
119
|
+
|
|
120
|
+
expect(result).toHaveLength(2);
|
|
121
|
+
expect(result[0]).toBeInstanceOf(HumanMessage);
|
|
122
|
+
expect(result[1]).toBeInstanceOf(HumanMessage);
|
|
123
|
+
expect(result[1].content).toContain('[Previous agent context]');
|
|
124
|
+
expect(result[1].content).toContain('Searching...');
|
|
125
|
+
expect(result[1].content).toContain('Found results');
|
|
126
|
+
});
|
|
127
|
+
|
|
128
|
+
test('should handle multiple tool messages in sequence', () => {
|
|
129
|
+
const messages = [
|
|
130
|
+
new HumanMessage({ content: 'Do multiple things' }),
|
|
131
|
+
new AIMessage({
|
|
132
|
+
content: 'I will perform multiple actions.',
|
|
133
|
+
tool_calls: [
|
|
134
|
+
{
|
|
135
|
+
id: 'call_1',
|
|
136
|
+
name: 'action1',
|
|
137
|
+
args: { param: 'a' },
|
|
138
|
+
type: 'tool_call',
|
|
139
|
+
},
|
|
140
|
+
{
|
|
141
|
+
id: 'call_2',
|
|
142
|
+
name: 'action2',
|
|
143
|
+
args: { param: 'b' },
|
|
144
|
+
type: 'tool_call',
|
|
145
|
+
},
|
|
146
|
+
],
|
|
147
|
+
}),
|
|
148
|
+
new ToolMessage({
|
|
149
|
+
content: 'Result 1',
|
|
150
|
+
tool_call_id: 'call_1',
|
|
151
|
+
}),
|
|
152
|
+
new ToolMessage({
|
|
153
|
+
content: 'Result 2',
|
|
154
|
+
tool_call_id: 'call_2',
|
|
155
|
+
}),
|
|
156
|
+
];
|
|
157
|
+
|
|
158
|
+
const result = ensureThinkingBlockInMessages(
|
|
159
|
+
messages,
|
|
160
|
+
Providers.ANTHROPIC
|
|
161
|
+
);
|
|
162
|
+
|
|
163
|
+
// Should combine all tool messages into one HumanMessage
|
|
164
|
+
expect(result).toHaveLength(2);
|
|
165
|
+
expect(result[1]).toBeInstanceOf(HumanMessage);
|
|
166
|
+
expect(result[1].content).toContain('Result 1');
|
|
167
|
+
expect(result[1].content).toContain('Result 2');
|
|
168
|
+
});
|
|
169
|
+
});
|
|
170
|
+
|
|
171
|
+
describe('messages without tool calls (should pass through)', () => {
|
|
172
|
+
test('should not modify AI message without tool calls', () => {
|
|
173
|
+
const messages = [
|
|
174
|
+
new HumanMessage({ content: 'Hello' }),
|
|
175
|
+
new AIMessage({ content: 'Hi there, how can I help?' }),
|
|
176
|
+
];
|
|
177
|
+
|
|
178
|
+
const result = ensureThinkingBlockInMessages(
|
|
179
|
+
messages,
|
|
180
|
+
Providers.ANTHROPIC
|
|
181
|
+
);
|
|
182
|
+
|
|
183
|
+
expect(result).toHaveLength(2);
|
|
184
|
+
expect(result[0]).toBeInstanceOf(HumanMessage);
|
|
185
|
+
expect(result[0].content).toBe('Hello');
|
|
186
|
+
expect(result[1]).toBeInstanceOf(AIMessage);
|
|
187
|
+
expect(result[1].content).toBe('Hi there, how can I help?');
|
|
188
|
+
});
|
|
189
|
+
|
|
190
|
+
test('should preserve HumanMessages and other message types', () => {
|
|
191
|
+
const messages = [
|
|
192
|
+
new HumanMessage({ content: 'Question 1' }),
|
|
193
|
+
new AIMessage({ content: 'Answer 1' }),
|
|
194
|
+
new HumanMessage({ content: 'Question 2' }),
|
|
195
|
+
new AIMessage({ content: 'Answer 2' }),
|
|
196
|
+
];
|
|
197
|
+
|
|
198
|
+
const result = ensureThinkingBlockInMessages(
|
|
199
|
+
messages,
|
|
200
|
+
Providers.ANTHROPIC
|
|
201
|
+
);
|
|
202
|
+
|
|
203
|
+
expect(result).toHaveLength(4);
|
|
204
|
+
expect(result[0]).toBeInstanceOf(HumanMessage);
|
|
205
|
+
expect(result[1]).toBeInstanceOf(AIMessage);
|
|
206
|
+
expect(result[2]).toBeInstanceOf(HumanMessage);
|
|
207
|
+
expect(result[3]).toBeInstanceOf(AIMessage);
|
|
208
|
+
});
|
|
209
|
+
});
|
|
210
|
+
|
|
211
|
+
describe('mixed scenarios', () => {
|
|
212
|
+
test('should handle mix of normal and tool-using messages', () => {
|
|
213
|
+
const messages = [
|
|
214
|
+
new HumanMessage({ content: 'First question' }),
|
|
215
|
+
new AIMessage({ content: 'First answer without tools' }),
|
|
216
|
+
new HumanMessage({ content: 'Second question' }),
|
|
217
|
+
new AIMessage({
|
|
218
|
+
content: 'Using a tool',
|
|
219
|
+
tool_calls: [
|
|
220
|
+
{
|
|
221
|
+
id: 'call_abc',
|
|
222
|
+
name: 'some_tool',
|
|
223
|
+
args: {},
|
|
224
|
+
type: 'tool_call',
|
|
225
|
+
},
|
|
226
|
+
],
|
|
227
|
+
}),
|
|
228
|
+
new ToolMessage({
|
|
229
|
+
content: 'Tool result',
|
|
230
|
+
tool_call_id: 'call_abc',
|
|
231
|
+
}),
|
|
232
|
+
new HumanMessage({ content: 'Third question' }),
|
|
233
|
+
new AIMessage({ content: 'Third answer without tools' }),
|
|
234
|
+
];
|
|
235
|
+
|
|
236
|
+
const result = ensureThinkingBlockInMessages(
|
|
237
|
+
messages,
|
|
238
|
+
Providers.ANTHROPIC
|
|
239
|
+
);
|
|
240
|
+
|
|
241
|
+
// Original message 1: HumanMessage (preserved)
|
|
242
|
+
// Original message 2: AIMessage without tools (preserved)
|
|
243
|
+
// Original message 3: HumanMessage (preserved)
|
|
244
|
+
// Original messages 4-5: AIMessage with tool + ToolMessage (converted to 1 HumanMessage)
|
|
245
|
+
// Original message 6: HumanMessage (preserved)
|
|
246
|
+
// Original message 7: AIMessage without tools (preserved)
|
|
247
|
+
expect(result).toHaveLength(6);
|
|
248
|
+
expect(result[0]).toBeInstanceOf(HumanMessage);
|
|
249
|
+
expect(result[1]).toBeInstanceOf(AIMessage);
|
|
250
|
+
expect(result[2]).toBeInstanceOf(HumanMessage);
|
|
251
|
+
expect(result[3]).toBeInstanceOf(HumanMessage); // Converted
|
|
252
|
+
expect(result[4]).toBeInstanceOf(HumanMessage);
|
|
253
|
+
expect(result[5]).toBeInstanceOf(AIMessage);
|
|
254
|
+
});
|
|
255
|
+
|
|
256
|
+
test('should handle multiple tool-using sequences', () => {
|
|
257
|
+
const messages = [
|
|
258
|
+
new HumanMessage({ content: 'Do task 1' }),
|
|
259
|
+
new AIMessage({
|
|
260
|
+
content: 'Doing task 1',
|
|
261
|
+
tool_calls: [
|
|
262
|
+
{
|
|
263
|
+
id: 'call_1',
|
|
264
|
+
name: 'tool1',
|
|
265
|
+
args: {},
|
|
266
|
+
type: 'tool_call',
|
|
267
|
+
},
|
|
268
|
+
],
|
|
269
|
+
}),
|
|
270
|
+
new ToolMessage({
|
|
271
|
+
content: 'Result 1',
|
|
272
|
+
tool_call_id: 'call_1',
|
|
273
|
+
}),
|
|
274
|
+
new HumanMessage({ content: 'Do task 2' }),
|
|
275
|
+
new AIMessage({
|
|
276
|
+
content: 'Doing task 2',
|
|
277
|
+
tool_calls: [
|
|
278
|
+
{
|
|
279
|
+
id: 'call_2',
|
|
280
|
+
name: 'tool2',
|
|
281
|
+
args: {},
|
|
282
|
+
type: 'tool_call',
|
|
283
|
+
},
|
|
284
|
+
],
|
|
285
|
+
}),
|
|
286
|
+
new ToolMessage({
|
|
287
|
+
content: 'Result 2',
|
|
288
|
+
tool_call_id: 'call_2',
|
|
289
|
+
}),
|
|
290
|
+
];
|
|
291
|
+
|
|
292
|
+
const result = ensureThinkingBlockInMessages(
|
|
293
|
+
messages,
|
|
294
|
+
Providers.ANTHROPIC
|
|
295
|
+
);
|
|
296
|
+
|
|
297
|
+
// Each tool sequence should be converted to a HumanMessage
|
|
298
|
+
expect(result).toHaveLength(4);
|
|
299
|
+
expect(result[0]).toBeInstanceOf(HumanMessage);
|
|
300
|
+
expect(result[0].content).toBe('Do task 1');
|
|
301
|
+
expect(result[1]).toBeInstanceOf(HumanMessage);
|
|
302
|
+
expect(result[1].content).toContain('Doing task 1');
|
|
303
|
+
expect(result[2]).toBeInstanceOf(HumanMessage);
|
|
304
|
+
expect(result[2].content).toBe('Do task 2');
|
|
305
|
+
expect(result[3]).toBeInstanceOf(HumanMessage);
|
|
306
|
+
expect(result[3].content).toContain('Doing task 2');
|
|
307
|
+
});
|
|
308
|
+
});
|
|
309
|
+
|
|
310
|
+
describe('edge cases', () => {
|
|
311
|
+
test('should handle empty messages array', () => {
|
|
312
|
+
const messages: never[] = [];
|
|
313
|
+
|
|
314
|
+
const result = ensureThinkingBlockInMessages(
|
|
315
|
+
messages,
|
|
316
|
+
Providers.ANTHROPIC
|
|
317
|
+
);
|
|
318
|
+
|
|
319
|
+
expect(result).toHaveLength(0);
|
|
320
|
+
});
|
|
321
|
+
|
|
322
|
+
test('should handle AI message with empty content array', () => {
|
|
323
|
+
const messages = [
|
|
324
|
+
new HumanMessage({ content: 'Hello' }),
|
|
325
|
+
new AIMessage({ content: [] }),
|
|
326
|
+
];
|
|
327
|
+
|
|
328
|
+
const result = ensureThinkingBlockInMessages(
|
|
329
|
+
messages,
|
|
330
|
+
Providers.ANTHROPIC
|
|
331
|
+
);
|
|
332
|
+
|
|
333
|
+
expect(result).toHaveLength(2);
|
|
334
|
+
expect(result[1]).toBeInstanceOf(AIMessage);
|
|
335
|
+
});
|
|
336
|
+
|
|
337
|
+
test('should work with different providers', () => {
|
|
338
|
+
const messages = [
|
|
339
|
+
new AIMessage({
|
|
340
|
+
content: 'Using tool',
|
|
341
|
+
tool_calls: [
|
|
342
|
+
{
|
|
343
|
+
id: 'call_x',
|
|
344
|
+
name: 'test',
|
|
345
|
+
args: {},
|
|
346
|
+
type: 'tool_call',
|
|
347
|
+
},
|
|
348
|
+
],
|
|
349
|
+
}),
|
|
350
|
+
new ToolMessage({
|
|
351
|
+
content: 'Result',
|
|
352
|
+
tool_call_id: 'call_x',
|
|
353
|
+
}),
|
|
354
|
+
];
|
|
355
|
+
|
|
356
|
+
// Test with Anthropic
|
|
357
|
+
const resultAnthropic = ensureThinkingBlockInMessages(
|
|
358
|
+
messages,
|
|
359
|
+
Providers.ANTHROPIC
|
|
360
|
+
);
|
|
361
|
+
expect(resultAnthropic).toHaveLength(1);
|
|
362
|
+
expect(resultAnthropic[0]).toBeInstanceOf(HumanMessage);
|
|
363
|
+
|
|
364
|
+
// Test with Bedrock
|
|
365
|
+
const resultBedrock = ensureThinkingBlockInMessages(
|
|
366
|
+
messages,
|
|
367
|
+
Providers.BEDROCK
|
|
368
|
+
);
|
|
369
|
+
expect(resultBedrock).toHaveLength(1);
|
|
370
|
+
expect(resultBedrock[0]).toBeInstanceOf(HumanMessage);
|
|
371
|
+
});
|
|
372
|
+
|
|
373
|
+
test('should handle tool message without preceding AI message', () => {
|
|
374
|
+
const messages = [
|
|
375
|
+
new HumanMessage({ content: 'Hello' }),
|
|
376
|
+
new ToolMessage({
|
|
377
|
+
content: 'Unexpected tool result',
|
|
378
|
+
tool_call_id: 'call_orphan',
|
|
379
|
+
}),
|
|
380
|
+
];
|
|
381
|
+
|
|
382
|
+
const result = ensureThinkingBlockInMessages(
|
|
383
|
+
messages,
|
|
384
|
+
Providers.ANTHROPIC
|
|
385
|
+
);
|
|
386
|
+
|
|
387
|
+
// Should preserve both messages as-is since tool message has no preceding AI message with tools
|
|
388
|
+
expect(result).toHaveLength(2);
|
|
389
|
+
expect(result[0]).toBeInstanceOf(HumanMessage);
|
|
390
|
+
expect(result[1]).toBeInstanceOf(ToolMessage);
|
|
391
|
+
});
|
|
392
|
+
});
|
|
393
|
+
});
|