@illuma-ai/agents 1.1.21 → 1.1.22

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (241) hide show
  1. package/dist/cjs/graphs/Graph.cjs +12 -1
  2. package/dist/cjs/graphs/Graph.cjs.map +1 -1
  3. package/dist/cjs/graphs/MultiAgentGraph.cjs +85 -1
  4. package/dist/cjs/graphs/MultiAgentGraph.cjs.map +1 -1
  5. package/dist/cjs/run.cjs +20 -9
  6. package/dist/cjs/run.cjs.map +1 -1
  7. package/dist/esm/graphs/Graph.mjs +12 -1
  8. package/dist/esm/graphs/Graph.mjs.map +1 -1
  9. package/dist/esm/graphs/MultiAgentGraph.mjs +85 -1
  10. package/dist/esm/graphs/MultiAgentGraph.mjs.map +1 -1
  11. package/dist/esm/run.mjs +20 -9
  12. package/dist/esm/run.mjs.map +1 -1
  13. package/dist/types/graphs/MultiAgentGraph.d.ts +17 -0
  14. package/package.json +1 -1
  15. package/src/graphs/Graph.ts +12 -1
  16. package/src/graphs/MultiAgentGraph.ts +105 -1
  17. package/src/graphs/__tests__/multi-agent-delegate.test.ts +191 -0
  18. package/src/run.ts +20 -11
  19. package/src/scripts/test-bedrock-handoff-autonomous.ts +231 -0
  20. package/src/agents/AgentContext.js +0 -782
  21. package/src/agents/AgentContext.test.js +0 -421
  22. package/src/agents/__tests__/AgentContext.test.js +0 -678
  23. package/src/agents/__tests__/resolveStructuredOutputMode.test.js +0 -117
  24. package/src/common/enum.js +0 -192
  25. package/src/common/index.js +0 -3
  26. package/src/events.js +0 -166
  27. package/src/graphs/Graph.js +0 -1857
  28. package/src/graphs/MultiAgentGraph.js +0 -1092
  29. package/src/graphs/__tests__/structured-output.integration.test.js +0 -624
  30. package/src/graphs/__tests__/structured-output.test.js +0 -144
  31. package/src/graphs/contextManagement.e2e.test.js +0 -718
  32. package/src/graphs/contextManagement.test.js +0 -485
  33. package/src/graphs/handoffValidation.test.js +0 -276
  34. package/src/graphs/index.js +0 -3
  35. package/src/index.js +0 -28
  36. package/src/instrumentation.js +0 -21
  37. package/src/llm/anthropic/index.js +0 -319
  38. package/src/llm/anthropic/types.js +0 -46
  39. package/src/llm/anthropic/utils/message_inputs.js +0 -627
  40. package/src/llm/anthropic/utils/message_outputs.js +0 -290
  41. package/src/llm/anthropic/utils/output_parsers.js +0 -89
  42. package/src/llm/anthropic/utils/tools.js +0 -25
  43. package/src/llm/bedrock/__tests__/bedrock-caching.test.js +0 -392
  44. package/src/llm/bedrock/index.js +0 -303
  45. package/src/llm/bedrock/types.js +0 -2
  46. package/src/llm/bedrock/utils/index.js +0 -6
  47. package/src/llm/bedrock/utils/message_inputs.js +0 -463
  48. package/src/llm/bedrock/utils/message_outputs.js +0 -269
  49. package/src/llm/fake.js +0 -92
  50. package/src/llm/google/index.js +0 -215
  51. package/src/llm/google/types.js +0 -12
  52. package/src/llm/google/utils/common.js +0 -670
  53. package/src/llm/google/utils/tools.js +0 -111
  54. package/src/llm/google/utils/zod_to_genai_parameters.js +0 -47
  55. package/src/llm/openai/index.js +0 -1033
  56. package/src/llm/openai/types.js +0 -2
  57. package/src/llm/openai/utils/index.js +0 -756
  58. package/src/llm/openai/utils/isReasoningModel.test.js +0 -79
  59. package/src/llm/openrouter/index.js +0 -261
  60. package/src/llm/openrouter/reasoning.test.js +0 -181
  61. package/src/llm/providers.js +0 -36
  62. package/src/llm/text.js +0 -65
  63. package/src/llm/vertexai/index.js +0 -402
  64. package/src/messages/__tests__/tools.test.js +0 -392
  65. package/src/messages/cache.js +0 -404
  66. package/src/messages/cache.test.js +0 -1167
  67. package/src/messages/content.js +0 -48
  68. package/src/messages/content.test.js +0 -314
  69. package/src/messages/core.js +0 -359
  70. package/src/messages/ensureThinkingBlock.test.js +0 -997
  71. package/src/messages/format.js +0 -973
  72. package/src/messages/formatAgentMessages.test.js +0 -2278
  73. package/src/messages/formatAgentMessages.tools.test.js +0 -362
  74. package/src/messages/formatMessage.test.js +0 -608
  75. package/src/messages/ids.js +0 -18
  76. package/src/messages/index.js +0 -9
  77. package/src/messages/labelContentByAgent.test.js +0 -725
  78. package/src/messages/prune.js +0 -438
  79. package/src/messages/reducer.js +0 -60
  80. package/src/messages/shiftIndexTokenCountMap.test.js +0 -63
  81. package/src/messages/summarize.js +0 -146
  82. package/src/messages/summarize.test.js +0 -332
  83. package/src/messages/tools.js +0 -90
  84. package/src/mockStream.js +0 -81
  85. package/src/prompts/collab.js +0 -7
  86. package/src/prompts/index.js +0 -3
  87. package/src/prompts/taskmanager.js +0 -58
  88. package/src/run.js +0 -427
  89. package/src/schemas/index.js +0 -3
  90. package/src/schemas/schema-preparation.test.js +0 -370
  91. package/src/schemas/validate.js +0 -314
  92. package/src/schemas/validate.test.js +0 -264
  93. package/src/scripts/abort.js +0 -127
  94. package/src/scripts/ant_web_search.js +0 -130
  95. package/src/scripts/ant_web_search_edge_case.js +0 -133
  96. package/src/scripts/ant_web_search_error_edge_case.js +0 -119
  97. package/src/scripts/args.js +0 -41
  98. package/src/scripts/bedrock-cache-debug.js +0 -186
  99. package/src/scripts/bedrock-content-aggregation-test.js +0 -195
  100. package/src/scripts/bedrock-merge-test.js +0 -80
  101. package/src/scripts/bedrock-parallel-tools-test.js +0 -150
  102. package/src/scripts/caching.js +0 -106
  103. package/src/scripts/cli.js +0 -152
  104. package/src/scripts/cli2.js +0 -119
  105. package/src/scripts/cli3.js +0 -163
  106. package/src/scripts/cli4.js +0 -165
  107. package/src/scripts/cli5.js +0 -165
  108. package/src/scripts/code_exec.js +0 -171
  109. package/src/scripts/code_exec_files.js +0 -180
  110. package/src/scripts/code_exec_multi_session.js +0 -185
  111. package/src/scripts/code_exec_ptc.js +0 -265
  112. package/src/scripts/code_exec_session.js +0 -217
  113. package/src/scripts/code_exec_simple.js +0 -120
  114. package/src/scripts/content.js +0 -111
  115. package/src/scripts/empty_input.js +0 -125
  116. package/src/scripts/handoff-test.js +0 -96
  117. package/src/scripts/image.js +0 -138
  118. package/src/scripts/memory.js +0 -83
  119. package/src/scripts/multi-agent-chain.js +0 -271
  120. package/src/scripts/multi-agent-conditional.js +0 -185
  121. package/src/scripts/multi-agent-document-review-chain.js +0 -171
  122. package/src/scripts/multi-agent-hybrid-flow.js +0 -264
  123. package/src/scripts/multi-agent-parallel-start.js +0 -214
  124. package/src/scripts/multi-agent-parallel.js +0 -346
  125. package/src/scripts/multi-agent-sequence.js +0 -184
  126. package/src/scripts/multi-agent-supervisor.js +0 -324
  127. package/src/scripts/multi-agent-test.js +0 -147
  128. package/src/scripts/parallel-asymmetric-tools-test.js +0 -202
  129. package/src/scripts/parallel-full-metadata-test.js +0 -176
  130. package/src/scripts/parallel-tools-test.js +0 -256
  131. package/src/scripts/programmatic_exec.js +0 -277
  132. package/src/scripts/programmatic_exec_agent.js +0 -168
  133. package/src/scripts/search.js +0 -118
  134. package/src/scripts/sequential-full-metadata-test.js +0 -143
  135. package/src/scripts/simple.js +0 -174
  136. package/src/scripts/single-agent-metadata-test.js +0 -152
  137. package/src/scripts/stream.js +0 -113
  138. package/src/scripts/test-custom-prompt-key.js +0 -132
  139. package/src/scripts/test-handoff-input.js +0 -143
  140. package/src/scripts/test-handoff-preamble.js +0 -227
  141. package/src/scripts/test-handoff-steering.js +0 -353
  142. package/src/scripts/test-multi-agent-list-handoff.js +0 -318
  143. package/src/scripts/test-parallel-agent-labeling.js +0 -253
  144. package/src/scripts/test-parallel-handoffs.js +0 -229
  145. package/src/scripts/test-thinking-handoff-bedrock.js +0 -132
  146. package/src/scripts/test-thinking-handoff.js +0 -132
  147. package/src/scripts/test-thinking-to-thinking-handoff-bedrock.js +0 -140
  148. package/src/scripts/test-tool-before-handoff-role-order.js +0 -223
  149. package/src/scripts/test-tools-before-handoff.js +0 -187
  150. package/src/scripts/test_code_api.js +0 -263
  151. package/src/scripts/thinking-bedrock.js +0 -128
  152. package/src/scripts/thinking-vertexai.js +0 -130
  153. package/src/scripts/thinking.js +0 -134
  154. package/src/scripts/tool_search.js +0 -114
  155. package/src/scripts/tools.js +0 -125
  156. package/src/specs/agent-handoffs-bedrock.integration.test.js +0 -280
  157. package/src/specs/agent-handoffs.test.js +0 -924
  158. package/src/specs/anthropic.simple.test.js +0 -287
  159. package/src/specs/azure.simple.test.js +0 -381
  160. package/src/specs/cache.simple.test.js +0 -282
  161. package/src/specs/custom-event-await.test.js +0 -148
  162. package/src/specs/deepseek.simple.test.js +0 -189
  163. package/src/specs/emergency-prune.test.js +0 -308
  164. package/src/specs/moonshot.simple.test.js +0 -237
  165. package/src/specs/observability.integration.test.js +0 -1337
  166. package/src/specs/openai.simple.test.js +0 -233
  167. package/src/specs/openrouter.simple.test.js +0 -202
  168. package/src/specs/prune.test.js +0 -733
  169. package/src/specs/reasoning.test.js +0 -144
  170. package/src/specs/spec.utils.js +0 -4
  171. package/src/specs/thinking-handoff.test.js +0 -486
  172. package/src/specs/thinking-prune.test.js +0 -600
  173. package/src/specs/token-distribution-edge-case.test.js +0 -246
  174. package/src/specs/token-memoization.test.js +0 -32
  175. package/src/specs/tokens.test.js +0 -49
  176. package/src/specs/tool-error.test.js +0 -139
  177. package/src/splitStream.js +0 -204
  178. package/src/splitStream.test.js +0 -504
  179. package/src/stream.js +0 -650
  180. package/src/stream.test.js +0 -225
  181. package/src/test/mockTools.js +0 -340
  182. package/src/tools/BrowserTools.js +0 -245
  183. package/src/tools/Calculator.js +0 -38
  184. package/src/tools/Calculator.test.js +0 -225
  185. package/src/tools/CodeExecutor.js +0 -233
  186. package/src/tools/ProgrammaticToolCalling.js +0 -602
  187. package/src/tools/StreamingToolCallBuffer.js +0 -179
  188. package/src/tools/ToolNode.js +0 -930
  189. package/src/tools/ToolSearch.js +0 -904
  190. package/src/tools/__tests__/BrowserTools.test.js +0 -306
  191. package/src/tools/__tests__/ProgrammaticToolCalling.integration.test.js +0 -276
  192. package/src/tools/__tests__/ProgrammaticToolCalling.test.js +0 -807
  193. package/src/tools/__tests__/StreamingToolCallBuffer.test.js +0 -175
  194. package/src/tools/__tests__/ToolApproval.test.js +0 -675
  195. package/src/tools/__tests__/ToolNode.recovery.test.js +0 -200
  196. package/src/tools/__tests__/ToolNode.session.test.js +0 -319
  197. package/src/tools/__tests__/ToolSearch.integration.test.js +0 -125
  198. package/src/tools/__tests__/ToolSearch.test.js +0 -812
  199. package/src/tools/__tests__/handlers.test.js +0 -799
  200. package/src/tools/__tests__/truncation-recovery.integration.test.js +0 -362
  201. package/src/tools/handlers.js +0 -306
  202. package/src/tools/schema.js +0 -25
  203. package/src/tools/search/anthropic.js +0 -34
  204. package/src/tools/search/content.js +0 -116
  205. package/src/tools/search/content.test.js +0 -133
  206. package/src/tools/search/firecrawl.js +0 -173
  207. package/src/tools/search/format.js +0 -198
  208. package/src/tools/search/highlights.js +0 -241
  209. package/src/tools/search/index.js +0 -3
  210. package/src/tools/search/jina-reranker.test.js +0 -106
  211. package/src/tools/search/rerankers.js +0 -165
  212. package/src/tools/search/schema.js +0 -102
  213. package/src/tools/search/search.js +0 -561
  214. package/src/tools/search/serper-scraper.js +0 -126
  215. package/src/tools/search/test.js +0 -129
  216. package/src/tools/search/tool.js +0 -453
  217. package/src/tools/search/types.js +0 -2
  218. package/src/tools/search/utils.js +0 -59
  219. package/src/types/graph.js +0 -24
  220. package/src/types/graph.test.js +0 -192
  221. package/src/types/index.js +0 -7
  222. package/src/types/llm.js +0 -2
  223. package/src/types/messages.js +0 -2
  224. package/src/types/run.js +0 -2
  225. package/src/types/stream.js +0 -2
  226. package/src/types/tools.js +0 -2
  227. package/src/utils/contextAnalytics.js +0 -79
  228. package/src/utils/contextAnalytics.test.js +0 -166
  229. package/src/utils/events.js +0 -26
  230. package/src/utils/graph.js +0 -11
  231. package/src/utils/handlers.js +0 -65
  232. package/src/utils/index.js +0 -10
  233. package/src/utils/llm.js +0 -21
  234. package/src/utils/llmConfig.js +0 -205
  235. package/src/utils/logging.js +0 -37
  236. package/src/utils/misc.js +0 -51
  237. package/src/utils/run.js +0 -69
  238. package/src/utils/schema.js +0 -21
  239. package/src/utils/title.js +0 -119
  240. package/src/utils/tokens.js +0 -92
  241. package/src/utils/toonFormat.js +0 -379
@@ -1,308 +0,0 @@
1
- // src/specs/emergency-prune.test.ts
2
- /**
3
- * Tests for the emergency pruning feature that handles "input too long" errors
4
- * by retrying with more aggressive message pruning and adding a context notice.
5
- */
6
- import { HumanMessage, AIMessage, SystemMessage, } from '@langchain/core/messages';
7
- import { createPruneMessages } from '@/messages/prune';
8
- import { Providers } from '@/common';
9
- // Simple token counter for testing (1 character = 1 token)
10
- const createTestTokenCounter = () => {
11
- return (message) => {
12
- const content = message.content;
13
- if (typeof content === 'string') {
14
- return content.length;
15
- }
16
- if (Array.isArray(content)) {
17
- return content.reduce((total, item) => {
18
- if (typeof item === 'string')
19
- return total + item.length;
20
- if (typeof item === 'object' &&
21
- 'text' in item &&
22
- typeof item.text === 'string') {
23
- return total + item.text.length;
24
- }
25
- return total;
26
- }, 0);
27
- }
28
- return 0;
29
- };
30
- };
31
- // Helper to create test messages
32
- const createTestMessages = (count, tokensPer) => {
33
- const messages = [
34
- new SystemMessage('You are a helpful assistant.'),
35
- ];
36
- for (let i = 0; i < count; i++) {
37
- const content = 'x'.repeat(tokensPer);
38
- if (i % 2 === 0) {
39
- messages.push(new HumanMessage(content));
40
- }
41
- else {
42
- messages.push(new AIMessage(content));
43
- }
44
- }
45
- return messages;
46
- };
47
- // Helper to build indexTokenCountMap
48
- const buildIndexTokenCountMap = (messages, tokenCounter) => {
49
- const map = {};
50
- messages.forEach((msg, index) => {
51
- map[index] = tokenCounter(msg);
52
- });
53
- return map;
54
- };
55
- /**
56
- * Estimates a human-friendly description of the conversation timeframe based on message count.
57
- * This mirrors the implementation in Graph.ts
58
- */
59
- const getContextTimeframeDescription = (messageCount) => {
60
- if (messageCount <= 5) {
61
- return 'just the last few exchanges';
62
- }
63
- else if (messageCount <= 15) {
64
- return 'the last several minutes';
65
- }
66
- else if (messageCount <= 30) {
67
- return 'roughly the past hour';
68
- }
69
- else if (messageCount <= 60) {
70
- return 'the past couple of hours';
71
- }
72
- else if (messageCount <= 150) {
73
- return 'the past few hours';
74
- }
75
- else if (messageCount <= 300) {
76
- return 'roughly a day\'s worth';
77
- }
78
- else if (messageCount <= 700) {
79
- return 'the past few days';
80
- }
81
- else {
82
- return 'about a week or more';
83
- }
84
- };
85
- describe('Emergency Pruning Feature', () => {
86
- const tokenCounter = createTestTokenCounter();
87
- describe('Normal Pruning vs Emergency Pruning', () => {
88
- it('should prune more aggressively with 50% reduced context', () => {
89
- // Create 20 messages, each with 100 tokens = 2000 tokens total (excluding system)
90
- const messages = createTestMessages(20, 100);
91
- const indexTokenCountMap = buildIndexTokenCountMap(messages, tokenCounter);
92
- // Normal prune with 1500 token limit
93
- const normalMaxTokens = 1500;
94
- const normalPrune = createPruneMessages({
95
- startIndex: 0,
96
- provider: Providers.BEDROCK,
97
- tokenCounter,
98
- maxTokens: normalMaxTokens,
99
- thinkingEnabled: false,
100
- indexTokenCountMap,
101
- });
102
- const { context: normalContext } = normalPrune({ messages });
103
- // Emergency prune with 50% (750 tokens)
104
- const emergencyMaxTokens = Math.floor(normalMaxTokens * 0.5);
105
- const emergencyPrune = createPruneMessages({
106
- startIndex: 0,
107
- provider: Providers.BEDROCK,
108
- tokenCounter,
109
- maxTokens: emergencyMaxTokens,
110
- thinkingEnabled: false,
111
- indexTokenCountMap,
112
- });
113
- const { context: emergencyContext } = emergencyPrune({ messages });
114
- // Emergency should have fewer messages
115
- expect(emergencyContext.length).toBeLessThan(normalContext.length);
116
- console.log(`Normal prune: ${normalContext.length} messages, Emergency prune: ${emergencyContext.length} messages`);
117
- });
118
- it('should preserve system message and latest user message after emergency prune', () => {
119
- const messages = createTestMessages(10, 200);
120
- const indexTokenCountMap = buildIndexTokenCountMap(messages, tokenCounter);
121
- // Very aggressive prune - only 300 tokens
122
- const emergencyPrune = createPruneMessages({
123
- startIndex: 0,
124
- provider: Providers.BEDROCK,
125
- tokenCounter,
126
- maxTokens: 300,
127
- thinkingEnabled: false,
128
- indexTokenCountMap,
129
- });
130
- const { context } = emergencyPrune({ messages });
131
- // Should still have system message if it fits
132
- if (context.length > 0) {
133
- // Check that we have at least the most recent messages
134
- const lastMessage = context[context.length - 1];
135
- expect(lastMessage).toBeDefined();
136
- }
137
- });
138
- });
139
- describe('Pruning Notice Message Injection', () => {
140
- it('should calculate correct number of pruned messages', () => {
141
- const originalCount = 20;
142
- const messages = createTestMessages(originalCount, 100);
143
- const indexTokenCountMap = buildIndexTokenCountMap(messages, tokenCounter);
144
- const emergencyPrune = createPruneMessages({
145
- startIndex: 0,
146
- provider: Providers.BEDROCK,
147
- tokenCounter,
148
- maxTokens: 500, // Very small to force aggressive pruning
149
- thinkingEnabled: false,
150
- indexTokenCountMap,
151
- });
152
- const { context: reducedMessages } = emergencyPrune({ messages });
153
- // Calculate how many were pruned (this is what we inject in the notice)
154
- const prunedCount = messages.length - reducedMessages.length;
155
- expect(prunedCount).toBeGreaterThan(0);
156
- console.log(`Original: ${messages.length}, After prune: ${reducedMessages.length}, Pruned: ${prunedCount}`);
157
- });
158
- it('should inject personalized notice message after system message', () => {
159
- const messages = createTestMessages(10, 100);
160
- const indexTokenCountMap = buildIndexTokenCountMap(messages, tokenCounter);
161
- const emergencyPrune = createPruneMessages({
162
- startIndex: 0,
163
- provider: Providers.BEDROCK,
164
- tokenCounter,
165
- maxTokens: 800,
166
- thinkingEnabled: false,
167
- indexTokenCountMap,
168
- });
169
- const { context: reducedMessages } = emergencyPrune({ messages });
170
- // Simulate the notice injection logic from Graph.ts
171
- const prunedCount = messages.length - reducedMessages.length;
172
- const remainingCount = reducedMessages.length;
173
- const estimatedContextDescription = getContextTimeframeDescription(remainingCount);
174
- const pruneNoticeMessage = new HumanMessage({
175
- content: `[CONTEXT NOTICE]
176
- Our conversation has grown quite long, so I've focused on ${estimatedContextDescription} of our chat (${remainingCount} recent messages). ${prunedCount} earlier messages are no longer in my immediate memory.
177
-
178
- If I seem to be missing something we discussed earlier, just give me a quick reminder and I'll pick right back up! I'm still fully engaged and ready to help with whatever you need.`,
179
- });
180
- // Insert after system message
181
- const hasSystemMessage = reducedMessages[0]?.getType() === 'system';
182
- const insertIndex = hasSystemMessage ? 1 : 0;
183
- const messagesWithNotice = [
184
- ...reducedMessages.slice(0, insertIndex),
185
- pruneNoticeMessage,
186
- ...reducedMessages.slice(insertIndex),
187
- ];
188
- // Verify notice is in correct position
189
- if (hasSystemMessage) {
190
- expect(messagesWithNotice[0].getType()).toBe('system');
191
- expect(messagesWithNotice[1].getType()).toBe('human');
192
- expect(messagesWithNotice[1].content).toContain('[CONTEXT NOTICE]');
193
- expect(messagesWithNotice[1].content).toContain('recent messages');
194
- expect(messagesWithNotice[1].content).toContain('quick reminder');
195
- }
196
- else {
197
- expect(messagesWithNotice[0].getType()).toBe('human');
198
- expect(messagesWithNotice[0].content).toContain('[CONTEXT NOTICE]');
199
- }
200
- // Total messages should be reduced + 1 notice
201
- expect(messagesWithNotice.length).toBe(reducedMessages.length + 1);
202
- console.log(`Notice preview:\n${pruneNoticeMessage.content.substring(0, 200)}...`);
203
- });
204
- });
205
- describe('Context Timeframe Description', () => {
206
- it('should return appropriate descriptions for different message counts', () => {
207
- expect(getContextTimeframeDescription(3)).toBe('just the last few exchanges');
208
- expect(getContextTimeframeDescription(10)).toBe('the last several minutes');
209
- expect(getContextTimeframeDescription(25)).toBe('roughly the past hour');
210
- expect(getContextTimeframeDescription(45)).toBe('the past couple of hours');
211
- expect(getContextTimeframeDescription(100)).toBe('the past few hours');
212
- expect(getContextTimeframeDescription(200)).toBe('roughly a day\'s worth');
213
- expect(getContextTimeframeDescription(500)).toBe('the past few days');
214
- expect(getContextTimeframeDescription(1000)).toBe('about a week or more');
215
- });
216
- });
217
- describe('Error Detection Patterns', () => {
218
- const errorPatterns = [
219
- 'Input is too long for the model',
220
- 'context length exceeded',
221
- 'maximum context length',
222
- 'ValidationException: Input is too long',
223
- 'prompt is too long for this model',
224
- 'The input is too long',
225
- ];
226
- it('should detect various "input too long" error patterns', () => {
227
- const isInputTooLongError = (errorMessage) => {
228
- const lowerMessage = errorMessage.toLowerCase();
229
- return (lowerMessage.includes('too long') ||
230
- lowerMessage.includes('input is too long') ||
231
- lowerMessage.includes('context length') ||
232
- lowerMessage.includes('maximum context') ||
233
- lowerMessage.includes('validationexception') ||
234
- lowerMessage.includes('prompt is too long'));
235
- };
236
- for (const pattern of errorPatterns) {
237
- expect(isInputTooLongError(pattern)).toBe(true);
238
- console.log(`✓ Detected: "${pattern}"`);
239
- }
240
- // Should not match unrelated errors
241
- expect(isInputTooLongError('Network timeout')).toBe(false);
242
- expect(isInputTooLongError('Invalid API key')).toBe(false);
243
- expect(isInputTooLongError('Rate limit exceeded')).toBe(false);
244
- });
245
- });
246
- describe('Edge Cases', () => {
247
- it('should handle empty messages after pruning', () => {
248
- // Single very long message that exceeds the limit
249
- const messages = [
250
- new SystemMessage('System prompt'),
251
- new HumanMessage('x'.repeat(10000)), // Way too long
252
- ];
253
- const indexTokenCountMap = buildIndexTokenCountMap(messages, tokenCounter);
254
- const emergencyPrune = createPruneMessages({
255
- startIndex: 0,
256
- provider: Providers.BEDROCK,
257
- tokenCounter,
258
- maxTokens: 100, // Very small limit
259
- thinkingEnabled: false,
260
- indexTokenCountMap,
261
- });
262
- const { context } = emergencyPrune({ messages });
263
- // Should have at least tried to keep something or be empty
264
- // The key is it shouldn't throw
265
- expect(Array.isArray(context)).toBe(true);
266
- });
267
- it('should work with only system message and one user message', () => {
268
- const messages = [
269
- new SystemMessage('You are helpful.'),
270
- new HumanMessage('Hello'),
271
- ];
272
- const indexTokenCountMap = buildIndexTokenCountMap(messages, tokenCounter);
273
- const emergencyPrune = createPruneMessages({
274
- startIndex: 0,
275
- provider: Providers.BEDROCK,
276
- tokenCounter,
277
- maxTokens: 500,
278
- thinkingEnabled: false,
279
- indexTokenCountMap,
280
- });
281
- const { context } = emergencyPrune({ messages });
282
- expect(context.length).toBe(2);
283
- expect(context[0].getType()).toBe('system');
284
- expect(context[1].getType()).toBe('human');
285
- });
286
- it('should handle conversation without system message', () => {
287
- const messages = [
288
- new HumanMessage('Hello'),
289
- new AIMessage('Hi there!'),
290
- new HumanMessage('How are you?'),
291
- ];
292
- const indexTokenCountMap = buildIndexTokenCountMap(messages, tokenCounter);
293
- const emergencyPrune = createPruneMessages({
294
- startIndex: 0,
295
- provider: Providers.BEDROCK,
296
- tokenCounter,
297
- maxTokens: 100,
298
- thinkingEnabled: false,
299
- indexTokenCountMap,
300
- });
301
- const { context } = emergencyPrune({ messages });
302
- // Should keep the most recent messages that fit
303
- expect(context.length).toBeGreaterThan(0);
304
- expect(context[0].getType()).not.toBe('system');
305
- });
306
- });
307
- });
308
- //# sourceMappingURL=emergency-prune.test.js.map
@@ -1,237 +0,0 @@
1
- /* eslint-disable no-console */
2
- /* eslint-disable @typescript-eslint/no-explicit-any */
3
- import { config } from 'dotenv';
4
- config();
5
- import { Calculator } from '@/tools/Calculator';
6
- import { HumanMessage, } from '@langchain/core/messages';
7
- import { ToolEndHandler, ModelEndHandler } from '@/events';
8
- import { ContentTypes, GraphEvents, Providers } from '@/common';
9
- import { capitalizeFirstLetter } from './spec.utils';
10
- import { createContentAggregator } from '@/stream';
11
- import { Run } from '@/run';
12
- const provider = Providers.MOONSHOT;
13
- const llmConfig = {
14
- provider,
15
- model: 'kimi-k2.5',
16
- configuration: {
17
- apiKey: process.env.MOONSHOT_API_KEY,
18
- baseURL: 'https://api.moonshot.ai/v1',
19
- },
20
- };
21
- const skipTests = process.env.MOONSHOT_API_KEY == null;
22
- (skipTests ? describe.skip : describe)(`${capitalizeFirstLetter(provider)} Streaming Tests`, () => {
23
- jest.setTimeout(120000);
24
- let run;
25
- let collectedUsage;
26
- let conversationHistory;
27
- let aggregateContent;
28
- let _contentParts;
29
- const testConfig = {
30
- configurable: {
31
- thread_id: 'moonshot-test-1',
32
- },
33
- streamMode: 'values',
34
- version: 'v2',
35
- };
36
- beforeEach(async () => {
37
- conversationHistory = [];
38
- collectedUsage = [];
39
- const { contentParts: cp, aggregateContent: ac } = createContentAggregator();
40
- _contentParts = cp;
41
- aggregateContent = ac;
42
- });
43
- const onMessageDeltaSpy = jest.fn();
44
- const onReasoningDeltaSpy = jest.fn();
45
- const onRunStepSpy = jest.fn();
46
- afterAll(() => {
47
- onMessageDeltaSpy.mockReset();
48
- onReasoningDeltaSpy.mockReset();
49
- onRunStepSpy.mockReset();
50
- });
51
- const setupCustomHandlers = () => ({
52
- [GraphEvents.TOOL_END]: new ToolEndHandler(),
53
- [GraphEvents.CHAT_MODEL_END]: new ModelEndHandler(collectedUsage),
54
- [GraphEvents.ON_RUN_STEP_COMPLETED]: {
55
- handle: (event, data) => {
56
- aggregateContent({
57
- event,
58
- data: data,
59
- });
60
- },
61
- },
62
- [GraphEvents.ON_RUN_STEP]: {
63
- handle: (event, data, metadata, graph) => {
64
- onRunStepSpy(event, data, metadata, graph);
65
- aggregateContent({ event, data: data });
66
- },
67
- },
68
- [GraphEvents.ON_RUN_STEP_DELTA]: {
69
- handle: (event, data) => {
70
- aggregateContent({ event, data: data });
71
- },
72
- },
73
- [GraphEvents.ON_MESSAGE_DELTA]: {
74
- handle: (event, data, metadata, graph) => {
75
- onMessageDeltaSpy(event, data, metadata, graph);
76
- aggregateContent({ event, data: data });
77
- },
78
- },
79
- [GraphEvents.ON_REASONING_DELTA]: {
80
- handle: (event, data) => {
81
- onReasoningDeltaSpy(event, data);
82
- },
83
- },
84
- [GraphEvents.TOOL_START]: {
85
- handle: (_event, _data, _metadata) => {
86
- // Handle tool start
87
- },
88
- },
89
- });
90
- test(`${capitalizeFirstLetter(provider)}: should handle tool calls with reasoning_content preservation`, async () => {
91
- const customHandlers = setupCustomHandlers();
92
- run = await Run.create({
93
- runId: 'moonshot-tool-test',
94
- graphConfig: {
95
- type: 'standard',
96
- llmConfig,
97
- tools: [new Calculator()],
98
- instructions: 'You are a helpful math assistant. Use the calculator tool to solve math problems.',
99
- },
100
- returnContent: true,
101
- skipCleanup: true,
102
- customHandlers,
103
- });
104
- const userMessage = 'What is 127 * 453?';
105
- conversationHistory.push(new HumanMessage(userMessage));
106
- const inputs = {
107
- messages: conversationHistory,
108
- };
109
- console.log('Starting Moonshot tool call test...');
110
- const finalContentParts = await run.processStream(inputs, testConfig);
111
- expect(finalContentParts).toBeDefined();
112
- console.log('Final content parts:', finalContentParts);
113
- const finalMessages = run.getRunMessages();
114
- expect(finalMessages).toBeDefined();
115
- expect(finalMessages?.length).toBeGreaterThan(0);
116
- const hasToolCall = finalMessages?.some((msg) => msg.getType() === 'ai' &&
117
- Array.isArray(msg.tool_calls) &&
118
- msg.tool_calls.length > 0);
119
- expect(hasToolCall).toBe(true);
120
- const hasToolResult = finalMessages?.some((msg) => msg.getType() === 'tool');
121
- expect(hasToolResult).toBe(true);
122
- console.log('Tool call test passed - reasoning_content was preserved');
123
- console.log('Final response:', finalMessages?.[finalMessages.length - 1]?.content);
124
- });
125
- test(`${capitalizeFirstLetter(provider)}: should handle multi-turn conversation with tools`, async () => {
126
- const customHandlers = setupCustomHandlers();
127
- run = await Run.create({
128
- runId: 'moonshot-multi-turn-test',
129
- graphConfig: {
130
- type: 'standard',
131
- llmConfig,
132
- tools: [new Calculator()],
133
- instructions: 'You are a helpful math assistant. Use the calculator tool when needed.',
134
- },
135
- returnContent: true,
136
- skipCleanup: true,
137
- customHandlers,
138
- });
139
- conversationHistory.push(new HumanMessage('What is 15 + 27?'));
140
- let finalContentParts = await run.processStream({ messages: conversationHistory }, testConfig);
141
- expect(finalContentParts).toBeDefined();
142
- let runMessages = run.getRunMessages();
143
- conversationHistory.push(...(runMessages ?? []));
144
- console.log('Turn 1 completed, conversation length:', conversationHistory.length);
145
- conversationHistory.push(new HumanMessage('Now multiply that result by 3'));
146
- run = await Run.create({
147
- runId: 'moonshot-multi-turn-test-2',
148
- graphConfig: {
149
- type: 'standard',
150
- llmConfig,
151
- tools: [new Calculator()],
152
- instructions: 'You are a helpful math assistant. Use the calculator tool when needed.',
153
- },
154
- returnContent: true,
155
- skipCleanup: true,
156
- customHandlers,
157
- });
158
- finalContentParts = await run.processStream({ messages: conversationHistory }, { ...testConfig, configurable: { thread_id: 'moonshot-test-2' } });
159
- expect(finalContentParts).toBeDefined();
160
- runMessages = run.getRunMessages();
161
- expect(runMessages).toBeDefined();
162
- console.log('Turn 2 completed');
163
- console.log('Final response:', runMessages?.[runMessages.length - 1]?.content);
164
- const textParts = finalContentParts?.filter((part) => part.type === ContentTypes.TEXT);
165
- expect(textParts?.length).toBeGreaterThan(0);
166
- });
167
- test(`${capitalizeFirstLetter(provider)}: should process simple message without tools`, async () => {
168
- const customHandlers = setupCustomHandlers();
169
- run = await Run.create({
170
- runId: 'moonshot-simple-test',
171
- graphConfig: {
172
- type: 'standard',
173
- llmConfig,
174
- tools: [],
175
- instructions: 'You are a friendly AI assistant.',
176
- },
177
- returnContent: true,
178
- skipCleanup: true,
179
- customHandlers,
180
- });
181
- const userMessage = 'Hello! How are you today?';
182
- conversationHistory.push(new HumanMessage(userMessage));
183
- const inputs = {
184
- messages: conversationHistory,
185
- };
186
- const finalContentParts = await run.processStream(inputs, testConfig);
187
- expect(finalContentParts).toBeDefined();
188
- const allTextParts = finalContentParts?.every((part) => part.type === ContentTypes.TEXT);
189
- expect(allTextParts).toBe(true);
190
- expect(collectedUsage.length).toBeGreaterThan(0);
191
- expect(collectedUsage[0].input_tokens).toBeGreaterThan(0);
192
- expect(collectedUsage[0].output_tokens).toBeGreaterThan(0);
193
- const finalMessages = run.getRunMessages();
194
- expect(finalMessages).toBeDefined();
195
- console.log(`${capitalizeFirstLetter(provider)} response:`, finalMessages?.[finalMessages.length - 1]?.content);
196
- });
197
- test(`${capitalizeFirstLetter(provider)}: should handle tool calls with disableStreaming`, async () => {
198
- const customHandlers = setupCustomHandlers();
199
- const nonStreamingLlmConfig = {
200
- ...llmConfig,
201
- disableStreaming: true,
202
- };
203
- run = await Run.create({
204
- runId: 'moonshot-non-streaming-tool-test',
205
- graphConfig: {
206
- type: 'standard',
207
- llmConfig: nonStreamingLlmConfig,
208
- tools: [new Calculator()],
209
- instructions: 'You are a helpful math assistant. Use the calculator tool to solve math problems.',
210
- },
211
- returnContent: true,
212
- skipCleanup: true,
213
- customHandlers,
214
- });
215
- const userMessage = 'What is 99 * 77?';
216
- conversationHistory.push(new HumanMessage(userMessage));
217
- const inputs = {
218
- messages: conversationHistory,
219
- };
220
- console.log('Starting Moonshot non-streaming tool call test...');
221
- const finalContentParts = await run.processStream(inputs, testConfig);
222
- expect(finalContentParts).toBeDefined();
223
- console.log('Final content parts (non-streaming):', finalContentParts);
224
- const finalMessages = run.getRunMessages();
225
- expect(finalMessages).toBeDefined();
226
- expect(finalMessages?.length).toBeGreaterThan(0);
227
- const hasToolCall = finalMessages?.some((msg) => msg.getType() === 'ai' &&
228
- Array.isArray(msg.tool_calls) &&
229
- msg.tool_calls.length > 0);
230
- expect(hasToolCall).toBe(true);
231
- const hasToolResult = finalMessages?.some((msg) => msg.getType() === 'tool');
232
- expect(hasToolResult).toBe(true);
233
- console.log('Non-streaming tool call test passed');
234
- console.log('Final response:', finalMessages?.[finalMessages.length - 1]?.content);
235
- });
236
- });
237
- //# sourceMappingURL=moonshot.simple.test.js.map