@illuma-ai/agents 1.1.21 → 1.1.22

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (241) hide show
  1. package/dist/cjs/graphs/Graph.cjs +12 -1
  2. package/dist/cjs/graphs/Graph.cjs.map +1 -1
  3. package/dist/cjs/graphs/MultiAgentGraph.cjs +85 -1
  4. package/dist/cjs/graphs/MultiAgentGraph.cjs.map +1 -1
  5. package/dist/cjs/run.cjs +20 -9
  6. package/dist/cjs/run.cjs.map +1 -1
  7. package/dist/esm/graphs/Graph.mjs +12 -1
  8. package/dist/esm/graphs/Graph.mjs.map +1 -1
  9. package/dist/esm/graphs/MultiAgentGraph.mjs +85 -1
  10. package/dist/esm/graphs/MultiAgentGraph.mjs.map +1 -1
  11. package/dist/esm/run.mjs +20 -9
  12. package/dist/esm/run.mjs.map +1 -1
  13. package/dist/types/graphs/MultiAgentGraph.d.ts +17 -0
  14. package/package.json +1 -1
  15. package/src/graphs/Graph.ts +12 -1
  16. package/src/graphs/MultiAgentGraph.ts +105 -1
  17. package/src/graphs/__tests__/multi-agent-delegate.test.ts +191 -0
  18. package/src/run.ts +20 -11
  19. package/src/scripts/test-bedrock-handoff-autonomous.ts +231 -0
  20. package/src/agents/AgentContext.js +0 -782
  21. package/src/agents/AgentContext.test.js +0 -421
  22. package/src/agents/__tests__/AgentContext.test.js +0 -678
  23. package/src/agents/__tests__/resolveStructuredOutputMode.test.js +0 -117
  24. package/src/common/enum.js +0 -192
  25. package/src/common/index.js +0 -3
  26. package/src/events.js +0 -166
  27. package/src/graphs/Graph.js +0 -1857
  28. package/src/graphs/MultiAgentGraph.js +0 -1092
  29. package/src/graphs/__tests__/structured-output.integration.test.js +0 -624
  30. package/src/graphs/__tests__/structured-output.test.js +0 -144
  31. package/src/graphs/contextManagement.e2e.test.js +0 -718
  32. package/src/graphs/contextManagement.test.js +0 -485
  33. package/src/graphs/handoffValidation.test.js +0 -276
  34. package/src/graphs/index.js +0 -3
  35. package/src/index.js +0 -28
  36. package/src/instrumentation.js +0 -21
  37. package/src/llm/anthropic/index.js +0 -319
  38. package/src/llm/anthropic/types.js +0 -46
  39. package/src/llm/anthropic/utils/message_inputs.js +0 -627
  40. package/src/llm/anthropic/utils/message_outputs.js +0 -290
  41. package/src/llm/anthropic/utils/output_parsers.js +0 -89
  42. package/src/llm/anthropic/utils/tools.js +0 -25
  43. package/src/llm/bedrock/__tests__/bedrock-caching.test.js +0 -392
  44. package/src/llm/bedrock/index.js +0 -303
  45. package/src/llm/bedrock/types.js +0 -2
  46. package/src/llm/bedrock/utils/index.js +0 -6
  47. package/src/llm/bedrock/utils/message_inputs.js +0 -463
  48. package/src/llm/bedrock/utils/message_outputs.js +0 -269
  49. package/src/llm/fake.js +0 -92
  50. package/src/llm/google/index.js +0 -215
  51. package/src/llm/google/types.js +0 -12
  52. package/src/llm/google/utils/common.js +0 -670
  53. package/src/llm/google/utils/tools.js +0 -111
  54. package/src/llm/google/utils/zod_to_genai_parameters.js +0 -47
  55. package/src/llm/openai/index.js +0 -1033
  56. package/src/llm/openai/types.js +0 -2
  57. package/src/llm/openai/utils/index.js +0 -756
  58. package/src/llm/openai/utils/isReasoningModel.test.js +0 -79
  59. package/src/llm/openrouter/index.js +0 -261
  60. package/src/llm/openrouter/reasoning.test.js +0 -181
  61. package/src/llm/providers.js +0 -36
  62. package/src/llm/text.js +0 -65
  63. package/src/llm/vertexai/index.js +0 -402
  64. package/src/messages/__tests__/tools.test.js +0 -392
  65. package/src/messages/cache.js +0 -404
  66. package/src/messages/cache.test.js +0 -1167
  67. package/src/messages/content.js +0 -48
  68. package/src/messages/content.test.js +0 -314
  69. package/src/messages/core.js +0 -359
  70. package/src/messages/ensureThinkingBlock.test.js +0 -997
  71. package/src/messages/format.js +0 -973
  72. package/src/messages/formatAgentMessages.test.js +0 -2278
  73. package/src/messages/formatAgentMessages.tools.test.js +0 -362
  74. package/src/messages/formatMessage.test.js +0 -608
  75. package/src/messages/ids.js +0 -18
  76. package/src/messages/index.js +0 -9
  77. package/src/messages/labelContentByAgent.test.js +0 -725
  78. package/src/messages/prune.js +0 -438
  79. package/src/messages/reducer.js +0 -60
  80. package/src/messages/shiftIndexTokenCountMap.test.js +0 -63
  81. package/src/messages/summarize.js +0 -146
  82. package/src/messages/summarize.test.js +0 -332
  83. package/src/messages/tools.js +0 -90
  84. package/src/mockStream.js +0 -81
  85. package/src/prompts/collab.js +0 -7
  86. package/src/prompts/index.js +0 -3
  87. package/src/prompts/taskmanager.js +0 -58
  88. package/src/run.js +0 -427
  89. package/src/schemas/index.js +0 -3
  90. package/src/schemas/schema-preparation.test.js +0 -370
  91. package/src/schemas/validate.js +0 -314
  92. package/src/schemas/validate.test.js +0 -264
  93. package/src/scripts/abort.js +0 -127
  94. package/src/scripts/ant_web_search.js +0 -130
  95. package/src/scripts/ant_web_search_edge_case.js +0 -133
  96. package/src/scripts/ant_web_search_error_edge_case.js +0 -119
  97. package/src/scripts/args.js +0 -41
  98. package/src/scripts/bedrock-cache-debug.js +0 -186
  99. package/src/scripts/bedrock-content-aggregation-test.js +0 -195
  100. package/src/scripts/bedrock-merge-test.js +0 -80
  101. package/src/scripts/bedrock-parallel-tools-test.js +0 -150
  102. package/src/scripts/caching.js +0 -106
  103. package/src/scripts/cli.js +0 -152
  104. package/src/scripts/cli2.js +0 -119
  105. package/src/scripts/cli3.js +0 -163
  106. package/src/scripts/cli4.js +0 -165
  107. package/src/scripts/cli5.js +0 -165
  108. package/src/scripts/code_exec.js +0 -171
  109. package/src/scripts/code_exec_files.js +0 -180
  110. package/src/scripts/code_exec_multi_session.js +0 -185
  111. package/src/scripts/code_exec_ptc.js +0 -265
  112. package/src/scripts/code_exec_session.js +0 -217
  113. package/src/scripts/code_exec_simple.js +0 -120
  114. package/src/scripts/content.js +0 -111
  115. package/src/scripts/empty_input.js +0 -125
  116. package/src/scripts/handoff-test.js +0 -96
  117. package/src/scripts/image.js +0 -138
  118. package/src/scripts/memory.js +0 -83
  119. package/src/scripts/multi-agent-chain.js +0 -271
  120. package/src/scripts/multi-agent-conditional.js +0 -185
  121. package/src/scripts/multi-agent-document-review-chain.js +0 -171
  122. package/src/scripts/multi-agent-hybrid-flow.js +0 -264
  123. package/src/scripts/multi-agent-parallel-start.js +0 -214
  124. package/src/scripts/multi-agent-parallel.js +0 -346
  125. package/src/scripts/multi-agent-sequence.js +0 -184
  126. package/src/scripts/multi-agent-supervisor.js +0 -324
  127. package/src/scripts/multi-agent-test.js +0 -147
  128. package/src/scripts/parallel-asymmetric-tools-test.js +0 -202
  129. package/src/scripts/parallel-full-metadata-test.js +0 -176
  130. package/src/scripts/parallel-tools-test.js +0 -256
  131. package/src/scripts/programmatic_exec.js +0 -277
  132. package/src/scripts/programmatic_exec_agent.js +0 -168
  133. package/src/scripts/search.js +0 -118
  134. package/src/scripts/sequential-full-metadata-test.js +0 -143
  135. package/src/scripts/simple.js +0 -174
  136. package/src/scripts/single-agent-metadata-test.js +0 -152
  137. package/src/scripts/stream.js +0 -113
  138. package/src/scripts/test-custom-prompt-key.js +0 -132
  139. package/src/scripts/test-handoff-input.js +0 -143
  140. package/src/scripts/test-handoff-preamble.js +0 -227
  141. package/src/scripts/test-handoff-steering.js +0 -353
  142. package/src/scripts/test-multi-agent-list-handoff.js +0 -318
  143. package/src/scripts/test-parallel-agent-labeling.js +0 -253
  144. package/src/scripts/test-parallel-handoffs.js +0 -229
  145. package/src/scripts/test-thinking-handoff-bedrock.js +0 -132
  146. package/src/scripts/test-thinking-handoff.js +0 -132
  147. package/src/scripts/test-thinking-to-thinking-handoff-bedrock.js +0 -140
  148. package/src/scripts/test-tool-before-handoff-role-order.js +0 -223
  149. package/src/scripts/test-tools-before-handoff.js +0 -187
  150. package/src/scripts/test_code_api.js +0 -263
  151. package/src/scripts/thinking-bedrock.js +0 -128
  152. package/src/scripts/thinking-vertexai.js +0 -130
  153. package/src/scripts/thinking.js +0 -134
  154. package/src/scripts/tool_search.js +0 -114
  155. package/src/scripts/tools.js +0 -125
  156. package/src/specs/agent-handoffs-bedrock.integration.test.js +0 -280
  157. package/src/specs/agent-handoffs.test.js +0 -924
  158. package/src/specs/anthropic.simple.test.js +0 -287
  159. package/src/specs/azure.simple.test.js +0 -381
  160. package/src/specs/cache.simple.test.js +0 -282
  161. package/src/specs/custom-event-await.test.js +0 -148
  162. package/src/specs/deepseek.simple.test.js +0 -189
  163. package/src/specs/emergency-prune.test.js +0 -308
  164. package/src/specs/moonshot.simple.test.js +0 -237
  165. package/src/specs/observability.integration.test.js +0 -1337
  166. package/src/specs/openai.simple.test.js +0 -233
  167. package/src/specs/openrouter.simple.test.js +0 -202
  168. package/src/specs/prune.test.js +0 -733
  169. package/src/specs/reasoning.test.js +0 -144
  170. package/src/specs/spec.utils.js +0 -4
  171. package/src/specs/thinking-handoff.test.js +0 -486
  172. package/src/specs/thinking-prune.test.js +0 -600
  173. package/src/specs/token-distribution-edge-case.test.js +0 -246
  174. package/src/specs/token-memoization.test.js +0 -32
  175. package/src/specs/tokens.test.js +0 -49
  176. package/src/specs/tool-error.test.js +0 -139
  177. package/src/splitStream.js +0 -204
  178. package/src/splitStream.test.js +0 -504
  179. package/src/stream.js +0 -650
  180. package/src/stream.test.js +0 -225
  181. package/src/test/mockTools.js +0 -340
  182. package/src/tools/BrowserTools.js +0 -245
  183. package/src/tools/Calculator.js +0 -38
  184. package/src/tools/Calculator.test.js +0 -225
  185. package/src/tools/CodeExecutor.js +0 -233
  186. package/src/tools/ProgrammaticToolCalling.js +0 -602
  187. package/src/tools/StreamingToolCallBuffer.js +0 -179
  188. package/src/tools/ToolNode.js +0 -930
  189. package/src/tools/ToolSearch.js +0 -904
  190. package/src/tools/__tests__/BrowserTools.test.js +0 -306
  191. package/src/tools/__tests__/ProgrammaticToolCalling.integration.test.js +0 -276
  192. package/src/tools/__tests__/ProgrammaticToolCalling.test.js +0 -807
  193. package/src/tools/__tests__/StreamingToolCallBuffer.test.js +0 -175
  194. package/src/tools/__tests__/ToolApproval.test.js +0 -675
  195. package/src/tools/__tests__/ToolNode.recovery.test.js +0 -200
  196. package/src/tools/__tests__/ToolNode.session.test.js +0 -319
  197. package/src/tools/__tests__/ToolSearch.integration.test.js +0 -125
  198. package/src/tools/__tests__/ToolSearch.test.js +0 -812
  199. package/src/tools/__tests__/handlers.test.js +0 -799
  200. package/src/tools/__tests__/truncation-recovery.integration.test.js +0 -362
  201. package/src/tools/handlers.js +0 -306
  202. package/src/tools/schema.js +0 -25
  203. package/src/tools/search/anthropic.js +0 -34
  204. package/src/tools/search/content.js +0 -116
  205. package/src/tools/search/content.test.js +0 -133
  206. package/src/tools/search/firecrawl.js +0 -173
  207. package/src/tools/search/format.js +0 -198
  208. package/src/tools/search/highlights.js +0 -241
  209. package/src/tools/search/index.js +0 -3
  210. package/src/tools/search/jina-reranker.test.js +0 -106
  211. package/src/tools/search/rerankers.js +0 -165
  212. package/src/tools/search/schema.js +0 -102
  213. package/src/tools/search/search.js +0 -561
  214. package/src/tools/search/serper-scraper.js +0 -126
  215. package/src/tools/search/test.js +0 -129
  216. package/src/tools/search/tool.js +0 -453
  217. package/src/tools/search/types.js +0 -2
  218. package/src/tools/search/utils.js +0 -59
  219. package/src/types/graph.js +0 -24
  220. package/src/types/graph.test.js +0 -192
  221. package/src/types/index.js +0 -7
  222. package/src/types/llm.js +0 -2
  223. package/src/types/messages.js +0 -2
  224. package/src/types/run.js +0 -2
  225. package/src/types/stream.js +0 -2
  226. package/src/types/tools.js +0 -2
  227. package/src/utils/contextAnalytics.js +0 -79
  228. package/src/utils/contextAnalytics.test.js +0 -166
  229. package/src/utils/events.js +0 -26
  230. package/src/utils/graph.js +0 -11
  231. package/src/utils/handlers.js +0 -65
  232. package/src/utils/index.js +0 -10
  233. package/src/utils/llm.js +0 -21
  234. package/src/utils/llmConfig.js +0 -205
  235. package/src/utils/logging.js +0 -37
  236. package/src/utils/misc.js +0 -51
  237. package/src/utils/run.js +0 -69
  238. package/src/utils/schema.js +0 -21
  239. package/src/utils/title.js +0 -119
  240. package/src/utils/tokens.js +0 -92
  241. package/src/utils/toonFormat.js +0 -379
@@ -1,733 +0,0 @@
1
- // src/specs/prune.test.ts
2
- import { config } from 'dotenv';
3
- config();
4
- import { HumanMessage, AIMessage, SystemMessage, BaseMessage, ToolMessage, } from '@langchain/core/messages';
5
- import { createPruneMessages } from '@/messages/prune';
6
- import { getLLMConfig } from '@/utils/llmConfig';
7
- import { Providers } from '@/common';
8
- import { Run } from '@/run';
9
- // Create a simple token counter for testing
10
- const createTestTokenCounter = () => {
11
- // This simple token counter just counts characters as tokens for predictable testing
12
- return (message) => {
13
- // Use type assertion to help TypeScript understand the type
14
- const content = message.content;
15
- // Handle string content
16
- if (typeof content === 'string') {
17
- return content.length;
18
- }
19
- // Handle array content
20
- if (Array.isArray(content)) {
21
- let totalLength = 0;
22
- for (const item of content) {
23
- if (typeof item === 'string') {
24
- totalLength += item.length;
25
- }
26
- else if (typeof item === 'object') {
27
- if ('text' in item && typeof item.text === 'string') {
28
- totalLength += item.text.length;
29
- }
30
- }
31
- }
32
- return totalLength;
33
- }
34
- // Default case - if content is null, undefined, or any other type
35
- return 0;
36
- };
37
- };
38
- // Since the internal functions in prune.ts are not exported, we'll reimplement them here for testing
39
- // This is based on the implementation in src/messages/prune.ts
40
- function calculateTotalTokens(usage) {
41
- const baseInputTokens = Number(usage.input_tokens) || 0;
42
- const cacheCreation = Number(usage.input_token_details?.cache_creation) || 0;
43
- const cacheRead = Number(usage.input_token_details?.cache_read) || 0;
44
- const totalInputTokens = baseInputTokens + cacheCreation + cacheRead;
45
- const totalOutputTokens = Number(usage.output_tokens) || 0;
46
- return {
47
- input_tokens: totalInputTokens,
48
- output_tokens: totalOutputTokens,
49
- total_tokens: totalInputTokens + totalOutputTokens,
50
- };
51
- }
52
- function getMessagesWithinTokenLimit({ messages: _messages, maxContextTokens, indexTokenCountMap, startType, }) {
53
- // Every reply is primed with <|start|>assistant<|message|>, so we
54
- // start with 3 tokens for the label after all messages have been counted.
55
- let summaryIndex = -1;
56
- let currentTokenCount = 3;
57
- const instructions = _messages[0]?.getType() === 'system' ? _messages[0] : undefined;
58
- const instructionsTokenCount = instructions != null ? indexTokenCountMap[0] : 0;
59
- let remainingContextTokens = maxContextTokens - instructionsTokenCount;
60
- const messages = [..._messages];
61
- const context = [];
62
- if (currentTokenCount < remainingContextTokens) {
63
- let currentIndex = messages.length;
64
- while (messages.length > 0 &&
65
- currentTokenCount < remainingContextTokens &&
66
- currentIndex > 1) {
67
- currentIndex--;
68
- if (messages.length === 1 && instructions) {
69
- break;
70
- }
71
- const poppedMessage = messages.pop();
72
- if (!poppedMessage)
73
- continue;
74
- const tokenCount = indexTokenCountMap[currentIndex] || 0;
75
- if (currentTokenCount + tokenCount <= remainingContextTokens) {
76
- context.push(poppedMessage);
77
- currentTokenCount += tokenCount;
78
- }
79
- else {
80
- messages.push(poppedMessage);
81
- break;
82
- }
83
- }
84
- // If startType is specified, discard messages until we find one of the required type
85
- if (startType != null && startType && context.length > 0) {
86
- const requiredTypeIndex = context.findIndex((msg) => msg.getType() === startType);
87
- if (requiredTypeIndex > 0) {
88
- // If we found a message of the required type, discard all messages before it
89
- const remainingMessages = context.slice(requiredTypeIndex);
90
- context.length = 0; // Clear the array
91
- context.push(...remainingMessages);
92
- }
93
- }
94
- }
95
- if (instructions && _messages.length > 0) {
96
- context.push(_messages[0]);
97
- messages.shift();
98
- }
99
- const prunedMemory = messages;
100
- summaryIndex = prunedMemory.length - 1;
101
- remainingContextTokens -= currentTokenCount;
102
- return {
103
- summaryIndex,
104
- remainingContextTokens,
105
- context: context.reverse(),
106
- messagesToRefine: prunedMemory,
107
- };
108
- }
109
- function checkValidNumber(value) {
110
- return typeof value === 'number' && !isNaN(value) && value > 0;
111
- }
112
- describe('Prune Messages Tests', () => {
113
- jest.setTimeout(30000);
114
- describe('calculateTotalTokens', () => {
115
- it('should calculate total tokens correctly with all fields present', () => {
116
- const usage = {
117
- input_tokens: 100,
118
- output_tokens: 50,
119
- input_token_details: {
120
- cache_creation: 10,
121
- cache_read: 5,
122
- },
123
- };
124
- const result = calculateTotalTokens(usage);
125
- expect(result.input_tokens).toBe(115); // 100 + 10 + 5
126
- expect(result.output_tokens).toBe(50);
127
- expect(result.total_tokens).toBe(165); // 115 + 50
128
- });
129
- it('should handle missing fields gracefully', () => {
130
- const usage = {
131
- input_tokens: 100,
132
- output_tokens: 50,
133
- };
134
- const result = calculateTotalTokens(usage);
135
- expect(result.input_tokens).toBe(100);
136
- expect(result.output_tokens).toBe(50);
137
- expect(result.total_tokens).toBe(150);
138
- });
139
- it('should handle empty usage object', () => {
140
- const usage = {};
141
- const result = calculateTotalTokens(usage);
142
- expect(result.input_tokens).toBe(0);
143
- expect(result.output_tokens).toBe(0);
144
- expect(result.total_tokens).toBe(0);
145
- });
146
- });
147
- describe('getMessagesWithinTokenLimit', () => {
148
- it('should include all messages when under token limit', () => {
149
- const messages = [
150
- new SystemMessage('System instruction'),
151
- new HumanMessage('Hello'),
152
- new AIMessage('Hi there'),
153
- ];
154
- const indexTokenCountMap = {
155
- 0: 17, // "System instruction"
156
- 1: 5, // "Hello"
157
- 2: 8, // "Hi there"
158
- };
159
- const result = getMessagesWithinTokenLimit({
160
- messages,
161
- maxContextTokens: 100,
162
- indexTokenCountMap,
163
- });
164
- expect(result.context.length).toBe(3);
165
- expect(result.context[0]).toBe(messages[0]); // System message
166
- expect(result.context[0].getType()).toBe('system'); // System message
167
- expect(result.remainingContextTokens).toBe(100 - 17 - 5 - 8 - 3); // -3 for the assistant label tokens
168
- expect(result.messagesToRefine.length).toBe(0);
169
- });
170
- it('should prune oldest messages when over token limit', () => {
171
- const messages = [
172
- new SystemMessage('System instruction'),
173
- new HumanMessage('Message 1'),
174
- new AIMessage('Response 1'),
175
- new HumanMessage('Message 2'),
176
- new AIMessage('Response 2'),
177
- ];
178
- const indexTokenCountMap = {
179
- 0: 17, // "System instruction"
180
- 1: 9, // "Message 1"
181
- 2: 10, // "Response 1"
182
- 3: 9, // "Message 2"
183
- 4: 10, // "Response 2"
184
- };
185
- // Set a limit that can only fit the system message and the last two messages
186
- const result = getMessagesWithinTokenLimit({
187
- messages,
188
- maxContextTokens: 40,
189
- indexTokenCountMap,
190
- });
191
- // Should include system message and the last two messages
192
- expect(result.context.length).toBe(3);
193
- expect(result.context[0]).toBe(messages[0]); // System message
194
- expect(result.context[0].getType()).toBe('system'); // System message
195
- expect(result.context[1]).toBe(messages[3]); // Message 2
196
- expect(result.context[2]).toBe(messages[4]); // Response 2
197
- // Should have the first two messages in messagesToRefine
198
- expect(result.messagesToRefine.length).toBe(2);
199
- expect(result.messagesToRefine[0]).toBe(messages[1]); // Message 1
200
- expect(result.messagesToRefine[1]).toBe(messages[2]); // Response 1
201
- });
202
- it('should always include system message even when at token limit', () => {
203
- const messages = [
204
- new SystemMessage('System instruction'),
205
- new HumanMessage('Hello'),
206
- new AIMessage('Hi there'),
207
- ];
208
- const indexTokenCountMap = {
209
- 0: 17, // "System instruction"
210
- 1: 5, // "Hello"
211
- 2: 8, // "Hi there"
212
- };
213
- // Set a limit that can only fit the system message
214
- const result = getMessagesWithinTokenLimit({
215
- messages,
216
- maxContextTokens: 20,
217
- indexTokenCountMap,
218
- });
219
- expect(result.context.length).toBe(1);
220
- expect(result.context[0]).toBe(messages[0]); // System message
221
- expect(result.messagesToRefine.length).toBe(2);
222
- });
223
- it('should start context with a specific message type when startType is specified', () => {
224
- const messages = [
225
- new SystemMessage('System instruction'),
226
- new AIMessage('AI message 1'),
227
- new HumanMessage('Human message 1'),
228
- new AIMessage('AI message 2'),
229
- new HumanMessage('Human message 2'),
230
- ];
231
- const indexTokenCountMap = {
232
- 0: 17, // "System instruction"
233
- 1: 12, // "AI message 1"
234
- 2: 15, // "Human message 1"
235
- 3: 12, // "AI message 2"
236
- 4: 15, // "Human message 2"
237
- };
238
- // Set a limit that can fit all messages
239
- const result = getMessagesWithinTokenLimit({
240
- messages,
241
- maxContextTokens: 100,
242
- indexTokenCountMap,
243
- startType: 'human',
244
- });
245
- // All messages should be included since we're under the token limit
246
- expect(result.context.length).toBe(5);
247
- expect(result.context[0]).toBe(messages[0]); // System message
248
- expect(result.context[1]).toBe(messages[1]); // AI message 1
249
- expect(result.context[2]).toBe(messages[2]); // Human message 1
250
- expect(result.context[3]).toBe(messages[3]); // AI message 2
251
- expect(result.context[4]).toBe(messages[4]); // Human message 2
252
- // All messages should be included since we're under the token limit
253
- expect(result.messagesToRefine.length).toBe(0);
254
- });
255
- it('should keep all messages if no message of required type is found', () => {
256
- const messages = [
257
- new SystemMessage('System instruction'),
258
- new AIMessage('AI message 1'),
259
- new AIMessage('AI message 2'),
260
- ];
261
- const indexTokenCountMap = {
262
- 0: 17, // "System instruction"
263
- 1: 12, // "AI message 1"
264
- 2: 12, // "AI message 2"
265
- };
266
- // Set a limit that can fit all messages
267
- const result = getMessagesWithinTokenLimit({
268
- messages,
269
- maxContextTokens: 100,
270
- indexTokenCountMap,
271
- startType: 'human',
272
- });
273
- // Should include all messages since no human messages exist to start from
274
- expect(result.context.length).toBe(3);
275
- expect(result.context[0]).toBe(messages[0]); // System message
276
- expect(result.context[1]).toBe(messages[1]); // AI message 1
277
- expect(result.context[2]).toBe(messages[2]); // AI message 2
278
- expect(result.messagesToRefine.length).toBe(0);
279
- });
280
- });
281
- describe('checkValidNumber', () => {
282
- it('should return true for valid positive numbers', () => {
283
- expect(checkValidNumber(5)).toBe(true);
284
- expect(checkValidNumber(1.5)).toBe(true);
285
- expect(checkValidNumber(Number.MAX_SAFE_INTEGER)).toBe(true);
286
- });
287
- it('should return false for zero, negative numbers, and NaN', () => {
288
- expect(checkValidNumber(0)).toBe(false);
289
- expect(checkValidNumber(-5)).toBe(false);
290
- expect(checkValidNumber(NaN)).toBe(false);
291
- });
292
- it('should return false for non-number types', () => {
293
- expect(checkValidNumber('5')).toBe(false);
294
- expect(checkValidNumber(null)).toBe(false);
295
- expect(checkValidNumber(undefined)).toBe(false);
296
- expect(checkValidNumber({})).toBe(false);
297
- expect(checkValidNumber([])).toBe(false);
298
- });
299
- });
300
- describe('createPruneMessages', () => {
301
- it('should return all messages when under token limit', () => {
302
- const tokenCounter = createTestTokenCounter();
303
- const messages = [
304
- new SystemMessage('System instruction'),
305
- new HumanMessage('Hello'),
306
- new AIMessage('Hi there'),
307
- ];
308
- const indexTokenCountMap = {
309
- 0: tokenCounter(messages[0]),
310
- 1: tokenCounter(messages[1]),
311
- 2: tokenCounter(messages[2]),
312
- };
313
- const pruneMessages = createPruneMessages({
314
- maxTokens: 100,
315
- startIndex: 0,
316
- tokenCounter,
317
- indexTokenCountMap,
318
- });
319
- const result = pruneMessages({ messages });
320
- expect(result.context.length).toBe(3);
321
- expect(result.context).toEqual(messages);
322
- });
323
- it('should prune messages when over token limit', () => {
324
- const tokenCounter = createTestTokenCounter();
325
- const messages = [
326
- new SystemMessage('System instruction'),
327
- new HumanMessage('Message 1'),
328
- new AIMessage('Response 1'),
329
- new HumanMessage('Message 2'),
330
- new AIMessage('Response 2'),
331
- ];
332
- const indexTokenCountMap = {
333
- 0: tokenCounter(messages[0]),
334
- 1: tokenCounter(messages[1]),
335
- 2: tokenCounter(messages[2]),
336
- 3: tokenCounter(messages[3]),
337
- 4: tokenCounter(messages[4]),
338
- };
339
- // Set a limit that can only fit the system message and the last two messages
340
- const pruneMessages = createPruneMessages({
341
- maxTokens: 40,
342
- startIndex: 0,
343
- tokenCounter,
344
- indexTokenCountMap,
345
- });
346
- const result = pruneMessages({ messages });
347
- // Should include system message and the last two messages
348
- expect(result.context.length).toBe(3);
349
- expect(result.context[0]).toBe(messages[0]); // System message
350
- expect(result.context[1]).toBe(messages[3]); // Message 2
351
- expect(result.context[2]).toBe(messages[4]); // Response 2
352
- });
353
- it('should respect startType parameter', () => {
354
- const tokenCounter = createTestTokenCounter();
355
- const messages = [
356
- new SystemMessage('System instruction'),
357
- new AIMessage('AI message 1'),
358
- new HumanMessage('Human message 1'),
359
- new AIMessage('AI message 2'),
360
- new HumanMessage('Human message 2'),
361
- ];
362
- const indexTokenCountMap = {
363
- 0: tokenCounter(messages[0]),
364
- 1: tokenCounter(messages[1]),
365
- 2: tokenCounter(messages[2]),
366
- 3: tokenCounter(messages[3]),
367
- 4: tokenCounter(messages[4]),
368
- };
369
- // Set a limit that can fit all messages
370
- const pruneMessages = createPruneMessages({
371
- maxTokens: 100,
372
- startIndex: 0,
373
- tokenCounter,
374
- indexTokenCountMap: { ...indexTokenCountMap },
375
- });
376
- const result = pruneMessages({
377
- messages,
378
- startType: 'human',
379
- });
380
- // All messages should be included since we're under the token limit
381
- expect(result.context.length).toBe(5);
382
- expect(result.context[0]).toBe(messages[0]); // System message
383
- expect(result.context[1]).toBe(messages[1]); // AI message 1
384
- expect(result.context[2]).toBe(messages[2]); // Human message 1
385
- expect(result.context[3]).toBe(messages[3]); // AI message 2
386
- expect(result.context[4]).toBe(messages[4]); // Human message 2
387
- });
388
- it('should update token counts when usage metadata is provided', () => {
389
- const tokenCounter = createTestTokenCounter();
390
- const messages = [
391
- new SystemMessage('System instruction'),
392
- new HumanMessage('Hello'),
393
- new AIMessage('Hi there'),
394
- ];
395
- const indexTokenCountMap = {
396
- 0: tokenCounter(messages[0]),
397
- 1: tokenCounter(messages[1]),
398
- 2: tokenCounter(messages[2]),
399
- };
400
- const pruneMessages = createPruneMessages({
401
- maxTokens: 100,
402
- startIndex: 0,
403
- tokenCounter,
404
- indexTokenCountMap: { ...indexTokenCountMap },
405
- });
406
- // Provide usage metadata that indicates different token counts
407
- const usageMetadata = {
408
- input_tokens: 50,
409
- output_tokens: 25,
410
- total_tokens: 75,
411
- };
412
- const result = pruneMessages({
413
- messages,
414
- usageMetadata,
415
- });
416
- // The function should have updated the indexTokenCountMap based on the usage metadata
417
- expect(result.indexTokenCountMap).not.toEqual(indexTokenCountMap);
418
- // The total of all values in indexTokenCountMap should equal the total_tokens from usageMetadata
419
- const totalTokens = Object.values(result.indexTokenCountMap).reduce((a = 0, b = 0) => a + b, 0);
420
- expect(totalTokens).toBe(75);
421
- });
422
- });
423
- describe('Tool Message Handling', () => {
424
- it('should ensure context does not start with a tool message by finding an AI message', () => {
425
- const tokenCounter = createTestTokenCounter();
426
- const messages = [
427
- new SystemMessage('System instruction'),
428
- new AIMessage('AI message 1'),
429
- new ToolMessage({ content: 'Tool result 1', tool_call_id: 'tool1' }),
430
- new AIMessage('AI message 2'),
431
- new ToolMessage({ content: 'Tool result 2', tool_call_id: 'tool2' }),
432
- ];
433
- const indexTokenCountMap = {
434
- 0: 17, // System instruction
435
- 1: 12, // AI message 1
436
- 2: 13, // Tool result 1
437
- 3: 12, // AI message 2
438
- 4: 13, // Tool result 2
439
- };
440
- // Create a pruneMessages function with a token limit that will only include the last few messages
441
- const pruneMessages = createPruneMessages({
442
- maxTokens: 58, // Only enough for system + last 3 messages + 3, but should not include a parent-less tool message
443
- startIndex: 0,
444
- tokenCounter,
445
- indexTokenCountMap: { ...indexTokenCountMap },
446
- });
447
- const result = pruneMessages({ messages });
448
- // The context should include the system message, AI message 2, and Tool result 2
449
- // It should NOT start with Tool result 2 alone
450
- expect(result.context.length).toBe(3);
451
- expect(result.context[0]).toBe(messages[0]); // System message
452
- expect(result.context[1]).toBe(messages[3]); // AI message 2
453
- expect(result.context[2]).toBe(messages[4]); // Tool result 2
454
- });
455
- it('should ensure context does not start with a tool message by finding a human message', () => {
456
- const tokenCounter = createTestTokenCounter();
457
- const messages = [
458
- new SystemMessage('System instruction'),
459
- new HumanMessage('Human message 1'),
460
- new AIMessage('AI message 1'),
461
- new ToolMessage({ content: 'Tool result 1', tool_call_id: 'tool1' }),
462
- new HumanMessage('Human message 2'),
463
- new ToolMessage({ content: 'Tool result 2', tool_call_id: 'tool2' }),
464
- ];
465
- const indexTokenCountMap = {
466
- 0: 17, // System instruction
467
- 1: 15, // Human message 1
468
- 2: 12, // AI message 1
469
- 3: 13, // Tool result 1
470
- 4: 15, // Human message 2
471
- 5: 13, // Tool result 2
472
- };
473
- // Create a pruneMessages function with a token limit that will only include the last few messages
474
- const pruneMessages = createPruneMessages({
475
- maxTokens: 48, // Only enough for system + last 2 messages
476
- startIndex: 0,
477
- tokenCounter,
478
- indexTokenCountMap: { ...indexTokenCountMap },
479
- });
480
- const result = pruneMessages({ messages });
481
- // The context should include the system message, Human message 2, and Tool result 2
482
- // It should NOT start with Tool result 2 alone
483
- expect(result.context.length).toBe(3);
484
- expect(result.context[0]).toBe(messages[0]); // System message
485
- expect(result.context[1]).toBe(messages[4]); // Human message 2
486
- expect(result.context[2]).toBe(messages[5]); // Tool result 2
487
- });
488
- it('should handle the case where a tool message is followed by an AI message', () => {
489
- const tokenCounter = createTestTokenCounter();
490
- const messages = [
491
- new SystemMessage('System instruction'),
492
- new HumanMessage('Human message'),
493
- new AIMessage('AI message with tool use'),
494
- new ToolMessage({ content: 'Tool result', tool_call_id: 'tool1' }),
495
- new AIMessage('AI message after tool'),
496
- ];
497
- const indexTokenCountMap = {
498
- 0: 17, // System instruction
499
- 1: 13, // Human message
500
- 2: 22, // AI message with tool use
501
- 3: 11, // Tool result
502
- 4: 19, // AI message after tool
503
- };
504
- const pruneMessages = createPruneMessages({
505
- maxTokens: 50,
506
- startIndex: 0,
507
- tokenCounter,
508
- indexTokenCountMap: { ...indexTokenCountMap },
509
- });
510
- const result = pruneMessages({ messages });
511
- expect(result.context.length).toBe(2);
512
- expect(result.context[0]).toBe(messages[0]); // System message
513
- expect(result.context[1]).toBe(messages[4]); // AI message after tool
514
- });
515
- it('should handle the case where a tool message is followed by a human message', () => {
516
- const tokenCounter = createTestTokenCounter();
517
- const messages = [
518
- new SystemMessage('System instruction'),
519
- new HumanMessage('Human message 1'),
520
- new AIMessage('AI message with tool use'),
521
- new ToolMessage({ content: 'Tool result', tool_call_id: 'tool1' }),
522
- new HumanMessage('Human message 2'),
523
- ];
524
- const indexTokenCountMap = {
525
- 0: 17, // System instruction
526
- 1: 15, // Human message 1
527
- 2: 22, // AI message with tool use
528
- 3: 11, // Tool result
529
- 4: 15, // Human message 2
530
- };
531
- const pruneMessages = createPruneMessages({
532
- maxTokens: 46,
533
- startIndex: 0,
534
- tokenCounter,
535
- indexTokenCountMap: { ...indexTokenCountMap },
536
- });
537
- const result = pruneMessages({ messages });
538
- expect(result.context.length).toBe(2);
539
- expect(result.context[0]).toBe(messages[0]); // System message
540
- expect(result.context[1]).toBe(messages[4]); // Human message 2
541
- });
542
- it('should handle complex sequence with multiple tool messages', () => {
543
- const tokenCounter = createTestTokenCounter();
544
- const messages = [
545
- new SystemMessage('System instruction'),
546
- new HumanMessage('Human message 1'),
547
- new AIMessage('AI message 1 with tool use'),
548
- new ToolMessage({ content: 'Tool result 1', tool_call_id: 'tool1' }),
549
- new AIMessage('AI message 2 with tool use'),
550
- new ToolMessage({ content: 'Tool result 2', tool_call_id: 'tool2' }),
551
- new AIMessage('AI message 3 with tool use'),
552
- new ToolMessage({ content: 'Tool result 3', tool_call_id: 'tool3' }),
553
- ];
554
- const indexTokenCountMap = {
555
- 0: 17, // System instruction
556
- 1: 15, // Human message 1
557
- 2: 26, // AI message 1 with tool use
558
- 3: 13, // Tool result 1
559
- 4: 26, // AI message 2 with tool use
560
- 5: 13, // Tool result 2
561
- 6: 26, // AI message 3 with tool use
562
- 7: 13, // Tool result 3
563
- };
564
- const pruneMessages = createPruneMessages({
565
- maxTokens: 111,
566
- startIndex: 0,
567
- tokenCounter,
568
- indexTokenCountMap: { ...indexTokenCountMap },
569
- });
570
- const result = pruneMessages({ messages });
571
- expect(result.context.length).toBe(5);
572
- expect(result.context[0]).toBe(messages[0]); // System message
573
- expect(result.context[1]).toBe(messages[4]); // AI message 2 with tool use
574
- expect(result.context[2]).toBe(messages[5]); // Tool result 2
575
- expect(result.context[3]).toBe(messages[6]); // AI message 3 with tool use
576
- expect(result.context[4]).toBe(messages[7]); // Tool result 3
577
- });
578
- });
579
- describe('messagesToRefine return value', () => {
580
- it('should return empty messagesToRefine when all messages fit', () => {
581
- const tokenCounter = createTestTokenCounter();
582
- const messages = [
583
- new SystemMessage('System instruction'),
584
- new HumanMessage('Hello'),
585
- new AIMessage('Hi there'),
586
- ];
587
- const indexTokenCountMap = {
588
- 0: tokenCounter(messages[0]),
589
- 1: tokenCounter(messages[1]),
590
- 2: tokenCounter(messages[2]),
591
- };
592
- const pruneMessages = createPruneMessages({
593
- maxTokens: 500,
594
- startIndex: 0,
595
- tokenCounter,
596
- indexTokenCountMap: { ...indexTokenCountMap },
597
- });
598
- const result = pruneMessages({ messages });
599
- expect(result.messagesToRefine).toEqual([]);
600
- expect(Array.isArray(result.messagesToRefine)).toBe(true);
601
- expect(result.messagesToRefine.length).toBe(0);
602
- // All messages should be in context
603
- expect(result.context.length).toBe(3);
604
- expect(result.context).toEqual(messages);
605
- });
606
- it('should return pruned messages in messagesToRefine when context is exceeded', () => {
607
- const tokenCounter = createTestTokenCounter();
608
- const messages = [
609
- new SystemMessage('System instruction'), // 18 chars
610
- new HumanMessage('First message from user'), // 23 chars
611
- new AIMessage('First response from assistant'), // 29 chars
612
- new HumanMessage('Second message from user'), // 24 chars
613
- new AIMessage('Second response'), // 15 chars
614
- new HumanMessage('Third message'), // 13 chars
615
- new AIMessage('Third response'), // 14 chars
616
- ];
617
- const indexTokenCountMap = {};
618
- for (let i = 0; i < messages.length; i++) {
619
- indexTokenCountMap[i] = tokenCounter(messages[i]);
620
- }
621
- // Set a low maxTokens so that only system + last couple of messages fit
622
- // System instruction = 18, plus 3 base tokens = 21 reserved
623
- // Remaining budget = 55 - 18 = 37, minus 3 base = 34
624
- // Third response (14) + Third message (13) = 27, fits
625
- // Second response (15) would make 42, exceeds 34
626
- const pruneMessages = createPruneMessages({
627
- maxTokens: 55,
628
- startIndex: 0,
629
- tokenCounter,
630
- indexTokenCountMap: { ...indexTokenCountMap },
631
- });
632
- const result = pruneMessages({ messages });
633
- // The context should contain the system message and the most recent messages that fit
634
- expect(result.context.length).toBeGreaterThanOrEqual(2);
635
- expect(result.context[0]).toBe(messages[0]); // System message always included
636
- // messagesToRefine should contain the oldest messages that were discarded
637
- expect(result.messagesToRefine.length).toBeGreaterThan(0);
638
- // The pruned messages should be the ones NOT in context (excluding system message)
639
- // Verify no overlap: messagesToRefine should not contain any message from context
640
- for (const prunedMsg of result.messagesToRefine) {
641
- expect(result.context).not.toContain(prunedMsg);
642
- }
643
- // messagesToRefine contains the boundary messages that triggered the overflow
644
- // (the pruning loop pops from newest to oldest, and stops when a message doesn't fit)
645
- // So messagesToRefine will have at least the first message that exceeded the budget
646
- expect(result.messagesToRefine.length).toBeGreaterThanOrEqual(1);
647
- // Each message in messagesToRefine should be a valid BaseMessage
648
- for (const msg of result.messagesToRefine) {
649
- expect(msg).toBeDefined();
650
- expect(typeof msg.getType()).toBe('string');
651
- }
652
- ;
653
- });
654
- it('should return messagesToRefine compatible with summarization callbacks', () => {
655
- const tokenCounter = createTestTokenCounter();
656
- const messages = [
657
- new SystemMessage('You are a helpful assistant'),
658
- new HumanMessage('Tell me about TypeScript'),
659
- new AIMessage('TypeScript is a typed superset of JavaScript'),
660
- new HumanMessage('What about generics?'),
661
- new AIMessage('Generics allow creating reusable components'),
662
- new HumanMessage('Show me an example'),
663
- new AIMessage('Here is a generic function example'),
664
- ];
665
- const indexTokenCountMap = {};
666
- for (let i = 0; i < messages.length; i++) {
667
- indexTokenCountMap[i] = tokenCounter(messages[i]);
668
- }
669
- // Use a low token limit to force pruning
670
- const pruneMessages = createPruneMessages({
671
- maxTokens: 80,
672
- startIndex: 0,
673
- tokenCounter,
674
- indexTokenCountMap: { ...indexTokenCountMap },
675
- });
676
- const result = pruneMessages({ messages });
677
- // Verify messagesToRefine contains valid BaseMessage instances
678
- expect(result.messagesToRefine.length).toBeGreaterThan(0);
679
- for (const msg of result.messagesToRefine) {
680
- // Each message should be an instance of BaseMessage
681
- expect(msg).toBeInstanceOf(BaseMessage);
682
- // Each message should have a valid type
683
- const msgType = msg.getType();
684
- expect(['human', 'ai', 'system', 'tool', 'generic', 'function']).toContain(msgType);
685
- // Each message should have accessible content (string or array)
686
- expect(msg.content).toBeDefined();
687
- expect(typeof msg.content === 'string' || Array.isArray(msg.content)).toBe(true);
688
- }
689
- // Verify the messages could be serialized (important for passing to callbacks)
690
- const serialized = result.messagesToRefine.map((msg) => ({
691
- type: msg.getType(),
692
- content: msg.content,
693
- }));
694
- expect(serialized.length).toBe(result.messagesToRefine.length);
695
- expect(serialized.every((s) => s.type && s.content !== undefined)).toBe(true);
696
- });
697
- });
698
- describe('Integration with Run', () => {
699
- it('should initialize Run with custom token counter and process messages', async () => {
700
- const provider = Providers.OPENAI;
701
- const llmConfig = getLLMConfig(provider);
702
- const tokenCounter = createTestTokenCounter();
703
- const run = await Run.create({
704
- runId: 'test-prune-run',
705
- graphConfig: {
706
- type: 'standard',
707
- llmConfig,
708
- instructions: 'You are a helpful assistant.',
709
- maxContextTokens: 1000,
710
- },
711
- returnContent: true,
712
- skipCleanup: true,
713
- tokenCounter,
714
- indexTokenCountMap: {},
715
- });
716
- // Override the model to use a fake LLM
717
- run.Graph?.overrideTestModel(['This is a test response'], 1);
718
- const messages = [new HumanMessage('Hello, how are you?')];
719
- const config = {
720
- configurable: {
721
- thread_id: 'test-thread',
722
- },
723
- streamMode: 'values',
724
- version: 'v2',
725
- };
726
- await run.processStream({ messages }, config);
727
- const finalMessages = run.getRunMessages();
728
- expect(finalMessages).toBeDefined();
729
- expect(finalMessages?.length).toBeGreaterThan(0);
730
- });
731
- });
732
- });
733
- //# sourceMappingURL=prune.test.js.map