@illuma-ai/agents 1.1.20 → 1.1.22

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (246) hide show
  1. package/dist/cjs/graphs/Graph.cjs +12 -1
  2. package/dist/cjs/graphs/Graph.cjs.map +1 -1
  3. package/dist/cjs/graphs/MultiAgentGraph.cjs +85 -1
  4. package/dist/cjs/graphs/MultiAgentGraph.cjs.map +1 -1
  5. package/dist/cjs/llm/bedrock/index.cjs +14 -0
  6. package/dist/cjs/llm/bedrock/index.cjs.map +1 -1
  7. package/dist/cjs/run.cjs +20 -9
  8. package/dist/cjs/run.cjs.map +1 -1
  9. package/dist/esm/graphs/Graph.mjs +12 -1
  10. package/dist/esm/graphs/Graph.mjs.map +1 -1
  11. package/dist/esm/graphs/MultiAgentGraph.mjs +85 -1
  12. package/dist/esm/graphs/MultiAgentGraph.mjs.map +1 -1
  13. package/dist/esm/llm/bedrock/index.mjs +14 -0
  14. package/dist/esm/llm/bedrock/index.mjs.map +1 -1
  15. package/dist/esm/run.mjs +20 -9
  16. package/dist/esm/run.mjs.map +1 -1
  17. package/dist/types/graphs/MultiAgentGraph.d.ts +17 -0
  18. package/package.json +1 -1
  19. package/src/graphs/Graph.ts +12 -1
  20. package/src/graphs/MultiAgentGraph.ts +105 -1
  21. package/src/graphs/__tests__/multi-agent-delegate.test.ts +191 -0
  22. package/src/llm/bedrock/index.ts +17 -0
  23. package/src/run.ts +20 -11
  24. package/src/scripts/test-bedrock-handoff-autonomous.ts +231 -0
  25. package/src/agents/AgentContext.js +0 -782
  26. package/src/agents/AgentContext.test.js +0 -421
  27. package/src/agents/__tests__/AgentContext.test.js +0 -678
  28. package/src/agents/__tests__/resolveStructuredOutputMode.test.js +0 -117
  29. package/src/common/enum.js +0 -192
  30. package/src/common/index.js +0 -3
  31. package/src/events.js +0 -166
  32. package/src/graphs/Graph.js +0 -1857
  33. package/src/graphs/MultiAgentGraph.js +0 -1092
  34. package/src/graphs/__tests__/structured-output.integration.test.js +0 -624
  35. package/src/graphs/__tests__/structured-output.test.js +0 -144
  36. package/src/graphs/contextManagement.e2e.test.js +0 -718
  37. package/src/graphs/contextManagement.test.js +0 -485
  38. package/src/graphs/handoffValidation.test.js +0 -276
  39. package/src/graphs/index.js +0 -3
  40. package/src/index.js +0 -28
  41. package/src/instrumentation.js +0 -21
  42. package/src/llm/anthropic/index.js +0 -319
  43. package/src/llm/anthropic/types.js +0 -46
  44. package/src/llm/anthropic/utils/message_inputs.js +0 -627
  45. package/src/llm/anthropic/utils/message_outputs.js +0 -290
  46. package/src/llm/anthropic/utils/output_parsers.js +0 -89
  47. package/src/llm/anthropic/utils/tools.js +0 -25
  48. package/src/llm/bedrock/__tests__/bedrock-caching.test.js +0 -392
  49. package/src/llm/bedrock/index.js +0 -303
  50. package/src/llm/bedrock/types.js +0 -2
  51. package/src/llm/bedrock/utils/index.js +0 -6
  52. package/src/llm/bedrock/utils/message_inputs.js +0 -463
  53. package/src/llm/bedrock/utils/message_outputs.js +0 -269
  54. package/src/llm/fake.js +0 -92
  55. package/src/llm/google/index.js +0 -215
  56. package/src/llm/google/types.js +0 -12
  57. package/src/llm/google/utils/common.js +0 -670
  58. package/src/llm/google/utils/tools.js +0 -111
  59. package/src/llm/google/utils/zod_to_genai_parameters.js +0 -47
  60. package/src/llm/openai/index.js +0 -1033
  61. package/src/llm/openai/types.js +0 -2
  62. package/src/llm/openai/utils/index.js +0 -756
  63. package/src/llm/openai/utils/isReasoningModel.test.js +0 -79
  64. package/src/llm/openrouter/index.js +0 -261
  65. package/src/llm/openrouter/reasoning.test.js +0 -181
  66. package/src/llm/providers.js +0 -36
  67. package/src/llm/text.js +0 -65
  68. package/src/llm/vertexai/index.js +0 -402
  69. package/src/messages/__tests__/tools.test.js +0 -392
  70. package/src/messages/cache.js +0 -404
  71. package/src/messages/cache.test.js +0 -1167
  72. package/src/messages/content.js +0 -48
  73. package/src/messages/content.test.js +0 -314
  74. package/src/messages/core.js +0 -359
  75. package/src/messages/ensureThinkingBlock.test.js +0 -997
  76. package/src/messages/format.js +0 -973
  77. package/src/messages/formatAgentMessages.test.js +0 -2278
  78. package/src/messages/formatAgentMessages.tools.test.js +0 -362
  79. package/src/messages/formatMessage.test.js +0 -608
  80. package/src/messages/ids.js +0 -18
  81. package/src/messages/index.js +0 -9
  82. package/src/messages/labelContentByAgent.test.js +0 -725
  83. package/src/messages/prune.js +0 -438
  84. package/src/messages/reducer.js +0 -60
  85. package/src/messages/shiftIndexTokenCountMap.test.js +0 -63
  86. package/src/messages/summarize.js +0 -146
  87. package/src/messages/summarize.test.js +0 -332
  88. package/src/messages/tools.js +0 -90
  89. package/src/mockStream.js +0 -81
  90. package/src/prompts/collab.js +0 -7
  91. package/src/prompts/index.js +0 -3
  92. package/src/prompts/taskmanager.js +0 -58
  93. package/src/run.js +0 -427
  94. package/src/schemas/index.js +0 -3
  95. package/src/schemas/schema-preparation.test.js +0 -370
  96. package/src/schemas/validate.js +0 -314
  97. package/src/schemas/validate.test.js +0 -264
  98. package/src/scripts/abort.js +0 -127
  99. package/src/scripts/ant_web_search.js +0 -130
  100. package/src/scripts/ant_web_search_edge_case.js +0 -133
  101. package/src/scripts/ant_web_search_error_edge_case.js +0 -119
  102. package/src/scripts/args.js +0 -41
  103. package/src/scripts/bedrock-cache-debug.js +0 -186
  104. package/src/scripts/bedrock-content-aggregation-test.js +0 -195
  105. package/src/scripts/bedrock-merge-test.js +0 -80
  106. package/src/scripts/bedrock-parallel-tools-test.js +0 -150
  107. package/src/scripts/caching.js +0 -106
  108. package/src/scripts/cli.js +0 -152
  109. package/src/scripts/cli2.js +0 -119
  110. package/src/scripts/cli3.js +0 -163
  111. package/src/scripts/cli4.js +0 -165
  112. package/src/scripts/cli5.js +0 -165
  113. package/src/scripts/code_exec.js +0 -171
  114. package/src/scripts/code_exec_files.js +0 -180
  115. package/src/scripts/code_exec_multi_session.js +0 -185
  116. package/src/scripts/code_exec_ptc.js +0 -265
  117. package/src/scripts/code_exec_session.js +0 -217
  118. package/src/scripts/code_exec_simple.js +0 -120
  119. package/src/scripts/content.js +0 -111
  120. package/src/scripts/empty_input.js +0 -125
  121. package/src/scripts/handoff-test.js +0 -96
  122. package/src/scripts/image.js +0 -138
  123. package/src/scripts/memory.js +0 -83
  124. package/src/scripts/multi-agent-chain.js +0 -271
  125. package/src/scripts/multi-agent-conditional.js +0 -185
  126. package/src/scripts/multi-agent-document-review-chain.js +0 -171
  127. package/src/scripts/multi-agent-hybrid-flow.js +0 -264
  128. package/src/scripts/multi-agent-parallel-start.js +0 -214
  129. package/src/scripts/multi-agent-parallel.js +0 -346
  130. package/src/scripts/multi-agent-sequence.js +0 -184
  131. package/src/scripts/multi-agent-supervisor.js +0 -324
  132. package/src/scripts/multi-agent-test.js +0 -147
  133. package/src/scripts/parallel-asymmetric-tools-test.js +0 -202
  134. package/src/scripts/parallel-full-metadata-test.js +0 -176
  135. package/src/scripts/parallel-tools-test.js +0 -256
  136. package/src/scripts/programmatic_exec.js +0 -277
  137. package/src/scripts/programmatic_exec_agent.js +0 -168
  138. package/src/scripts/search.js +0 -118
  139. package/src/scripts/sequential-full-metadata-test.js +0 -143
  140. package/src/scripts/simple.js +0 -174
  141. package/src/scripts/single-agent-metadata-test.js +0 -152
  142. package/src/scripts/stream.js +0 -113
  143. package/src/scripts/test-custom-prompt-key.js +0 -132
  144. package/src/scripts/test-handoff-input.js +0 -143
  145. package/src/scripts/test-handoff-preamble.js +0 -227
  146. package/src/scripts/test-handoff-steering.js +0 -353
  147. package/src/scripts/test-multi-agent-list-handoff.js +0 -318
  148. package/src/scripts/test-parallel-agent-labeling.js +0 -253
  149. package/src/scripts/test-parallel-handoffs.js +0 -229
  150. package/src/scripts/test-thinking-handoff-bedrock.js +0 -132
  151. package/src/scripts/test-thinking-handoff.js +0 -132
  152. package/src/scripts/test-thinking-to-thinking-handoff-bedrock.js +0 -140
  153. package/src/scripts/test-tool-before-handoff-role-order.js +0 -223
  154. package/src/scripts/test-tools-before-handoff.js +0 -187
  155. package/src/scripts/test_code_api.js +0 -263
  156. package/src/scripts/thinking-bedrock.js +0 -128
  157. package/src/scripts/thinking-vertexai.js +0 -130
  158. package/src/scripts/thinking.js +0 -134
  159. package/src/scripts/tool_search.js +0 -114
  160. package/src/scripts/tools.js +0 -125
  161. package/src/specs/agent-handoffs-bedrock.integration.test.js +0 -280
  162. package/src/specs/agent-handoffs.test.js +0 -924
  163. package/src/specs/anthropic.simple.test.js +0 -287
  164. package/src/specs/azure.simple.test.js +0 -381
  165. package/src/specs/cache.simple.test.js +0 -282
  166. package/src/specs/custom-event-await.test.js +0 -148
  167. package/src/specs/deepseek.simple.test.js +0 -189
  168. package/src/specs/emergency-prune.test.js +0 -308
  169. package/src/specs/moonshot.simple.test.js +0 -237
  170. package/src/specs/observability.integration.test.js +0 -1337
  171. package/src/specs/openai.simple.test.js +0 -233
  172. package/src/specs/openrouter.simple.test.js +0 -202
  173. package/src/specs/prune.test.js +0 -733
  174. package/src/specs/reasoning.test.js +0 -144
  175. package/src/specs/spec.utils.js +0 -4
  176. package/src/specs/thinking-handoff.test.js +0 -486
  177. package/src/specs/thinking-prune.test.js +0 -600
  178. package/src/specs/token-distribution-edge-case.test.js +0 -246
  179. package/src/specs/token-memoization.test.js +0 -32
  180. package/src/specs/tokens.test.js +0 -49
  181. package/src/specs/tool-error.test.js +0 -139
  182. package/src/splitStream.js +0 -204
  183. package/src/splitStream.test.js +0 -504
  184. package/src/stream.js +0 -650
  185. package/src/stream.test.js +0 -225
  186. package/src/test/mockTools.js +0 -340
  187. package/src/tools/BrowserTools.js +0 -245
  188. package/src/tools/Calculator.js +0 -38
  189. package/src/tools/Calculator.test.js +0 -225
  190. package/src/tools/CodeExecutor.js +0 -233
  191. package/src/tools/ProgrammaticToolCalling.js +0 -602
  192. package/src/tools/StreamingToolCallBuffer.js +0 -179
  193. package/src/tools/ToolNode.js +0 -930
  194. package/src/tools/ToolSearch.js +0 -904
  195. package/src/tools/__tests__/BrowserTools.test.js +0 -306
  196. package/src/tools/__tests__/ProgrammaticToolCalling.integration.test.js +0 -276
  197. package/src/tools/__tests__/ProgrammaticToolCalling.test.js +0 -807
  198. package/src/tools/__tests__/StreamingToolCallBuffer.test.js +0 -175
  199. package/src/tools/__tests__/ToolApproval.test.js +0 -675
  200. package/src/tools/__tests__/ToolNode.recovery.test.js +0 -200
  201. package/src/tools/__tests__/ToolNode.session.test.js +0 -319
  202. package/src/tools/__tests__/ToolSearch.integration.test.js +0 -125
  203. package/src/tools/__tests__/ToolSearch.test.js +0 -812
  204. package/src/tools/__tests__/handlers.test.js +0 -799
  205. package/src/tools/__tests__/truncation-recovery.integration.test.js +0 -362
  206. package/src/tools/handlers.js +0 -306
  207. package/src/tools/schema.js +0 -25
  208. package/src/tools/search/anthropic.js +0 -34
  209. package/src/tools/search/content.js +0 -116
  210. package/src/tools/search/content.test.js +0 -133
  211. package/src/tools/search/firecrawl.js +0 -173
  212. package/src/tools/search/format.js +0 -198
  213. package/src/tools/search/highlights.js +0 -241
  214. package/src/tools/search/index.js +0 -3
  215. package/src/tools/search/jina-reranker.test.js +0 -106
  216. package/src/tools/search/rerankers.js +0 -165
  217. package/src/tools/search/schema.js +0 -102
  218. package/src/tools/search/search.js +0 -561
  219. package/src/tools/search/serper-scraper.js +0 -126
  220. package/src/tools/search/test.js +0 -129
  221. package/src/tools/search/tool.js +0 -453
  222. package/src/tools/search/types.js +0 -2
  223. package/src/tools/search/utils.js +0 -59
  224. package/src/types/graph.js +0 -24
  225. package/src/types/graph.test.js +0 -192
  226. package/src/types/index.js +0 -7
  227. package/src/types/llm.js +0 -2
  228. package/src/types/messages.js +0 -2
  229. package/src/types/run.js +0 -2
  230. package/src/types/stream.js +0 -2
  231. package/src/types/tools.js +0 -2
  232. package/src/utils/contextAnalytics.js +0 -79
  233. package/src/utils/contextAnalytics.test.js +0 -166
  234. package/src/utils/events.js +0 -26
  235. package/src/utils/graph.js +0 -11
  236. package/src/utils/handlers.js +0 -65
  237. package/src/utils/index.js +0 -10
  238. package/src/utils/llm.js +0 -21
  239. package/src/utils/llmConfig.js +0 -205
  240. package/src/utils/logging.js +0 -37
  241. package/src/utils/misc.js +0 -51
  242. package/src/utils/run.js +0 -69
  243. package/src/utils/schema.js +0 -21
  244. package/src/utils/title.js +0 -119
  245. package/src/utils/tokens.js +0 -92
  246. package/src/utils/toonFormat.js +0 -379
@@ -1,119 +0,0 @@
1
- /* eslint-disable no-console */
2
- // src/scripts/ant_web_search_error_edge_case.ts
3
- import { config } from 'dotenv';
4
- config();
5
- import { HumanMessage } from '@langchain/core/messages';
6
- import { ChatModelStreamHandler, createContentAggregator } from '@/stream';
7
- import { ToolEndHandler, ModelEndHandler } from '@/events';
8
- import { getArgs } from '@/scripts/args';
9
- import { Run } from '@/run';
10
- import { GraphEvents, Providers } from '@/common';
11
- import { getLLMConfig } from '@/utils/llmConfig';
12
- const conversationHistory = [];
13
- let _contentParts = [];
14
- async function testStandardStreaming() {
15
- const { userName, location, currentDate } = await getArgs();
16
- const { contentParts, aggregateContent } = createContentAggregator();
17
- _contentParts = contentParts;
18
- const customHandlers = {
19
- [GraphEvents.TOOL_END]: new ToolEndHandler(),
20
- [GraphEvents.CHAT_MODEL_END]: new ModelEndHandler(),
21
- [GraphEvents.CHAT_MODEL_STREAM]: new ChatModelStreamHandler(),
22
- [GraphEvents.ON_RUN_STEP_COMPLETED]: {
23
- handle: (event, data) => {
24
- console.log('====== ON_RUN_STEP_COMPLETED ======');
25
- // console.dir(data, { depth: null });
26
- aggregateContent({
27
- event,
28
- data: data,
29
- });
30
- },
31
- },
32
- [GraphEvents.ON_RUN_STEP]: {
33
- handle: (event, data) => {
34
- console.log('====== ON_RUN_STEP ======');
35
- console.dir(data, { depth: null });
36
- aggregateContent({ event, data: data });
37
- },
38
- },
39
- [GraphEvents.ON_RUN_STEP_DELTA]: {
40
- handle: (event, data) => {
41
- console.log('====== ON_RUN_STEP_DELTA ======');
42
- console.dir(data, { depth: null });
43
- aggregateContent({ event, data: data });
44
- },
45
- },
46
- [GraphEvents.ON_MESSAGE_DELTA]: {
47
- handle: (event, data) => {
48
- // console.log('====== ON_MESSAGE_DELTA ======');
49
- // console.dir(data, { depth: null });
50
- aggregateContent({ event, data: data });
51
- },
52
- },
53
- [GraphEvents.TOOL_START]: {
54
- handle: (_event, data, metadata) => {
55
- console.log('====== TOOL_START ======');
56
- // console.dir(data, { depth: null });
57
- },
58
- },
59
- };
60
- const llmConfig = getLLMConfig(Providers.ANTHROPIC);
61
- llmConfig.model = 'claude-haiku-4-5';
62
- const run = await Run.create({
63
- runId: 'test-run-id',
64
- graphConfig: {
65
- type: 'standard',
66
- llmConfig,
67
- tools: [
68
- {
69
- type: 'web_search_20250305',
70
- name: 'web_search',
71
- max_uses: 5,
72
- },
73
- ],
74
- instructions: 'You are a helpful AI research assistant.',
75
- },
76
- returnContent: true,
77
- skipCleanup: true,
78
- customHandlers,
79
- });
80
- const config = {
81
- configurable: {
82
- provider: Providers.ANTHROPIC,
83
- thread_id: 'conversation-num-1',
84
- },
85
- streamMode: 'values',
86
- version: 'v2',
87
- };
88
- console.log('Test: Web search with multiple searches (error edge case test)');
89
- // This prompt should trigger multiple web searches which may result in errors
90
- const userMessage = 'Do a deep deep research on CoreWeave. I need you to perform multiple searches before you generate the answer. The basis of our research should be to investigate if this is a solid long term investment.';
91
- conversationHistory.push(new HumanMessage(userMessage));
92
- const inputs = {
93
- messages: conversationHistory,
94
- };
95
- const finalContentParts = await run.processStream(inputs, config);
96
- const finalMessages = run.getRunMessages();
97
- if (finalMessages) {
98
- conversationHistory.push(...finalMessages);
99
- console.dir(conversationHistory, { depth: null });
100
- }
101
- // console.dir(finalContentParts, { depth: null });
102
- console.log('\n\n====================\n\n');
103
- // console.dir(contentParts, { depth: null });
104
- }
105
- process.on('unhandledRejection', (reason, promise) => {
106
- console.error('Unhandled Rejection at:', promise, 'reason:', reason);
107
- console.log('Content Parts:');
108
- console.dir(_contentParts, { depth: null });
109
- process.exit(1);
110
- });
111
- testStandardStreaming().catch((err) => {
112
- console.error(err);
113
- console.log('Conversation history:');
114
- console.dir(conversationHistory, { depth: null });
115
- console.log('Content Parts:');
116
- console.dir(_contentParts, { depth: null });
117
- process.exit(1);
118
- });
119
- //# sourceMappingURL=ant_web_search_error_edge_case.js.map
@@ -1,41 +0,0 @@
1
- // src/scripts/args.ts
2
- import yargs from 'yargs';
3
- import { hideBin } from 'yargs/helpers';
4
- import { llmConfigs } from '@/utils/llmConfig';
5
- import { Providers } from '@/common';
6
- export async function getArgs() {
7
- const argv = yargs(hideBin(process.argv))
8
- .option('name', {
9
- alias: 'n',
10
- type: 'string',
11
- description: 'User name',
12
- default: 'Jo',
13
- })
14
- .option('location', {
15
- alias: 'l',
16
- type: 'string',
17
- description: 'User location',
18
- default: 'New York',
19
- })
20
- .option('provider', {
21
- alias: 'p',
22
- type: 'string',
23
- description: 'LLM provider',
24
- choices: Object.keys(llmConfigs),
25
- default: Providers.OPENAI,
26
- })
27
- .help()
28
- .alias('help', 'h').argv;
29
- const args = await argv;
30
- const userName = args.name;
31
- const location = args.location;
32
- const provider = args.provider;
33
- const currentDate = new Date().toLocaleString();
34
- return {
35
- userName,
36
- location,
37
- provider,
38
- currentDate,
39
- };
40
- }
41
- //# sourceMappingURL=args.js.map
@@ -1,186 +0,0 @@
1
- /**
2
- * Debug script to investigate cache token omission in Bedrock responses.
3
- *
4
- * This script:
5
- * 1. Makes a streaming call to Bedrock and logs the raw metadata event
6
- * 2. Shows exactly what fields the AWS SDK returns in usage (including cache tokens)
7
- * 3. Shows what our handleConverseStreamMetadata produces vs what it should produce
8
- * 4. Makes a multi-turn call to trigger caching and verify cache tokens appear
9
- */
10
- import { config } from 'dotenv';
11
- config();
12
- import { concat } from '@langchain/core/utils/stream';
13
- import { HumanMessage } from '@langchain/core/messages';
14
- import { BedrockRuntimeClient, ConverseStreamCommand, } from '@aws-sdk/client-bedrock-runtime';
15
- import { CustomChatBedrockConverse } from '@/llm/bedrock';
16
- const region = process.env.BEDROCK_AWS_REGION ?? 'us-east-1';
17
- const credentials = {
18
- accessKeyId: process.env.BEDROCK_AWS_ACCESS_KEY_ID,
19
- secretAccessKey: process.env.BEDROCK_AWS_SECRET_ACCESS_KEY,
20
- };
21
- const MODEL_ID = 'us.anthropic.claude-sonnet-4-5-20250929-v1:0';
22
- // A long system prompt to increase likelihood of cache usage
23
- // Bedrock requires minimum 1024 tokens for prompt caching to activate
24
- const SYSTEM_PROMPT = `You are an expert assistant. Here is a large context block to help trigger cache behavior:
25
-
26
- ${Array(200).fill('This is padding content to make the prompt large enough to trigger Bedrock prompt caching. The minimum requirement for Anthropic models on Bedrock is 1024 tokens in the cached prefix. We need to ensure this prompt is well above that threshold. ').join('')}
27
-
28
- When answering, be brief and direct.`;
29
- async function rawSdkCall() {
30
- console.log('='.repeat(60));
31
- console.log('TEST 1: Raw AWS SDK call - inspect metadata.usage directly');
32
- console.log('='.repeat(60));
33
- const client = new BedrockRuntimeClient({ region, credentials });
34
- // First call - should create cache
35
- // Use cachePoint block to explicitly enable prompt caching
36
- console.log('\n--- Call 1 (cache write expected) ---');
37
- const command1 = new ConverseStreamCommand({
38
- modelId: MODEL_ID,
39
- system: [{ text: SYSTEM_PROMPT }, { cachePoint: { type: 'default' } }],
40
- messages: [{ role: 'user', content: [{ text: 'What is 2+2?' }] }],
41
- inferenceConfig: { maxTokens: 100 },
42
- });
43
- const response1 = await client.send(command1);
44
- if (response1.stream) {
45
- for await (const event of response1.stream) {
46
- if (event.metadata != null) {
47
- console.log('\nRAW metadata event (Call 1):');
48
- console.dir(event.metadata, { depth: null });
49
- console.log('\nRAW metadata.usage:');
50
- console.dir(event.metadata.usage, { depth: null });
51
- console.log('\nSpecific cache fields:');
52
- console.log(' cacheReadInputTokens:', event.metadata.usage
53
- ?.cacheReadInputTokens);
54
- console.log(' cacheWriteInputTokens:', event.metadata.usage
55
- ?.cacheWriteInputTokens);
56
- }
57
- }
58
- }
59
- // Second call - should read from cache
60
- console.log('\n--- Call 2 (cache read expected) ---');
61
- const command2 = new ConverseStreamCommand({
62
- modelId: MODEL_ID,
63
- system: [{ text: SYSTEM_PROMPT }, { cachePoint: { type: 'default' } }],
64
- messages: [
65
- { role: 'user', content: [{ text: 'What is 2+2?' }] },
66
- { role: 'assistant', content: [{ text: '4' }] },
67
- { role: 'user', content: [{ text: 'And what is 3+3?' }] },
68
- ],
69
- inferenceConfig: { maxTokens: 100 },
70
- });
71
- const response2 = await client.send(command2);
72
- if (response2.stream) {
73
- for await (const event of response2.stream) {
74
- if (event.metadata != null) {
75
- console.log('\nRAW metadata event (Call 2):');
76
- console.dir(event.metadata, { depth: null });
77
- console.log('\nRAW metadata.usage:');
78
- console.dir(event.metadata.usage, { depth: null });
79
- console.log('\nSpecific cache fields:');
80
- console.log(' cacheReadInputTokens:', event.metadata.usage
81
- ?.cacheReadInputTokens);
82
- console.log(' cacheWriteInputTokens:', event.metadata.usage
83
- ?.cacheWriteInputTokens);
84
- }
85
- }
86
- }
87
- }
88
- async function wrapperStreamCallNoCachePoint() {
89
- console.log('\n' + '='.repeat(60));
90
- console.log('TEST 2: CustomChatBedrockConverse stream (NO cachePoint) - check usage_metadata');
91
- console.log('='.repeat(60));
92
- console.log('(Without cachePoint, Bedrock does NOT return cache tokens)');
93
- const model = new CustomChatBedrockConverse({
94
- model: MODEL_ID,
95
- region,
96
- credentials,
97
- maxTokens: 100,
98
- streaming: true,
99
- streamUsage: true,
100
- });
101
- console.log('\n--- Wrapper Call (no cachePoint) ---');
102
- const messages1 = [new HumanMessage(SYSTEM_PROMPT + '\n\nWhat is 2+2?')];
103
- let finalChunk1;
104
- for await (const chunk of await model.stream(messages1)) {
105
- finalChunk1 = finalChunk1 ? concat(finalChunk1, chunk) : chunk;
106
- }
107
- console.log('\nFinal usage_metadata:', JSON.stringify(finalChunk1.usage_metadata));
108
- console.log('(No cache tokens expected since no cachePoint block was sent)');
109
- }
110
- async function wrapperStreamCallWithCachePoint() {
111
- console.log('\n' + '='.repeat(60));
112
- console.log('TEST 3: Raw SDK with cachePoint -> verify handleConverseStreamMetadata extracts cache tokens');
113
- console.log('='.repeat(60));
114
- // We use the raw SDK with cachePoint to trigger caching, then verify
115
- // that our handleConverseStreamMetadata function properly extracts cache fields
116
- const { handleConverseStreamMetadata } = await import('@/llm/bedrock/utils/message_outputs');
117
- const client = new BedrockRuntimeClient({ region, credentials });
118
- // Call 1 - establish cache
119
- console.log('\n--- Call 1 (cache write) ---');
120
- const command1 = new ConverseStreamCommand({
121
- modelId: MODEL_ID,
122
- system: [{ text: SYSTEM_PROMPT }, { cachePoint: { type: 'default' } }],
123
- messages: [{ role: 'user', content: [{ text: 'What is 2+2?' }] }],
124
- inferenceConfig: { maxTokens: 100 },
125
- });
126
- const response1 = await client.send(command1);
127
- if (response1.stream) {
128
- for await (const event of response1.stream) {
129
- if (event.metadata != null) {
130
- console.log('Raw usage:', JSON.stringify(event.metadata.usage));
131
- // Test our handler
132
- const chunk = handleConverseStreamMetadata(event.metadata, {
133
- streamUsage: true,
134
- });
135
- const msg = chunk.message;
136
- console.log('handleConverseStreamMetadata output usage_metadata:', JSON.stringify(msg.usage_metadata));
137
- const hasDetails = msg.usage_metadata?.input_token_details != null;
138
- console.log(`Has input_token_details: ${hasDetails}`, hasDetails
139
- ? JSON.stringify(msg.usage_metadata.input_token_details)
140
- : '(MISSING - BUG!)');
141
- }
142
- }
143
- }
144
- // Call 2 - read from cache
145
- console.log('\n--- Call 2 (cache read) ---');
146
- const command2 = new ConverseStreamCommand({
147
- modelId: MODEL_ID,
148
- system: [{ text: SYSTEM_PROMPT }, { cachePoint: { type: 'default' } }],
149
- messages: [
150
- { role: 'user', content: [{ text: 'What is 2+2?' }] },
151
- { role: 'assistant', content: [{ text: '4' }] },
152
- { role: 'user', content: [{ text: 'What is 3+3?' }] },
153
- ],
154
- inferenceConfig: { maxTokens: 100 },
155
- });
156
- const response2 = await client.send(command2);
157
- if (response2.stream) {
158
- for await (const event of response2.stream) {
159
- if (event.metadata != null) {
160
- console.log('Raw usage:', JSON.stringify(event.metadata.usage));
161
- const chunk = handleConverseStreamMetadata(event.metadata, {
162
- streamUsage: true,
163
- });
164
- const msg = chunk.message;
165
- console.log('handleConverseStreamMetadata output usage_metadata:', JSON.stringify(msg.usage_metadata));
166
- const hasDetails = msg.usage_metadata?.input_token_details != null;
167
- console.log(`Has input_token_details: ${hasDetails}`, hasDetails
168
- ? JSON.stringify(msg.usage_metadata.input_token_details)
169
- : '(MISSING - BUG!)');
170
- }
171
- }
172
- }
173
- }
174
- async function main() {
175
- console.log('Bedrock Cache Token Debug Script');
176
- console.log(`Model: ${MODEL_ID}`);
177
- console.log(`Region: ${region}\n`);
178
- await rawSdkCall();
179
- await wrapperStreamCallNoCachePoint();
180
- await wrapperStreamCallWithCachePoint();
181
- }
182
- main().catch((err) => {
183
- console.error('Fatal error:', err);
184
- process.exit(1);
185
- });
186
- //# sourceMappingURL=bedrock-cache-debug.js.map
@@ -1,195 +0,0 @@
1
- import { config } from 'dotenv';
2
- config();
3
- import { HumanMessage } from '@langchain/core/messages';
4
- import { ChatModelStreamHandler, createContentAggregator } from '@/stream';
5
- import { createCodeExecutionTool } from '@/tools/CodeExecutor';
6
- import { ToolEndHandler, ModelEndHandler } from '@/events';
7
- import { GraphEvents, ContentTypes, Providers } from '@/common';
8
- import { getLLMConfig } from '@/utils/llmConfig';
9
- import { Run } from '@/run';
10
- const conversationHistory = [];
11
- let _contentParts = [];
12
- const collectedUsage = [];
13
- async function testBedrockContentAggregation() {
14
- const instructions = 'You are a helpful AI assistant with coding capabilities. When answering questions, be thorough in your reasoning.';
15
- const { contentParts, aggregateContent } = createContentAggregator();
16
- _contentParts = contentParts;
17
- const customHandlers = {
18
- [GraphEvents.TOOL_END]: new ToolEndHandler(),
19
- [GraphEvents.CHAT_MODEL_END]: new ModelEndHandler(collectedUsage),
20
- [GraphEvents.CHAT_MODEL_STREAM]: new ChatModelStreamHandler(),
21
- [GraphEvents.ON_RUN_STEP_COMPLETED]: {
22
- handle: (event, data) => {
23
- const result = data.result;
24
- console.log(`[ON_RUN_STEP_COMPLETED] stepId=${result.id} index=${result.index} type=${result.type} tool=${result.tool_call?.name ?? 'n/a'}`);
25
- aggregateContent({
26
- event,
27
- data: data,
28
- });
29
- },
30
- },
31
- [GraphEvents.ON_RUN_STEP]: {
32
- handle: (event, data) => {
33
- const toolCalls = data.stepDetails.type === 'tool_calls' && data.stepDetails.tool_calls
34
- ? data.stepDetails.tool_calls
35
- .map((tc) => `${tc.name ?? '?'}(${tc.id ?? '?'})`)
36
- .join(', ')
37
- : 'none';
38
- console.log(`[ON_RUN_STEP] stepId=${data.id} index=${data.index} type=${data.type} stepIndex=${data.stepIndex} toolCalls=[${toolCalls}]`);
39
- aggregateContent({ event, data });
40
- },
41
- },
42
- [GraphEvents.ON_RUN_STEP_DELTA]: {
43
- handle: (event, data) => {
44
- const tcNames = data.delta.tool_calls
45
- ?.map((tc) => `${tc.name ?? '?'}(args=${(tc.args ?? '').substring(0, 30)}...)`)
46
- .join(', ') ?? 'none';
47
- console.log(`[ON_RUN_STEP_DELTA] stepId=${data.id} type=${data.delta.type} toolCalls=[${tcNames}]`);
48
- aggregateContent({ event, data });
49
- },
50
- },
51
- [GraphEvents.ON_MESSAGE_DELTA]: {
52
- handle: (event, data) => {
53
- const preview = Array.isArray(data.delta.content)
54
- ? data.delta.content
55
- .map((c) => `${c.type}:"${String(c.text ?? c.think ?? '').substring(0, 40)}"`)
56
- .join(', ')
57
- : String(data.delta.content).substring(0, 40);
58
- console.log(`[ON_MESSAGE_DELTA] stepId=${data.id} content=[${preview}]`);
59
- aggregateContent({ event, data });
60
- },
61
- },
62
- [GraphEvents.ON_REASONING_DELTA]: {
63
- handle: (event, data) => {
64
- const preview = Array.isArray(data.delta.content)
65
- ? data.delta.content
66
- .map((c) => `${c.type}:"${String(c.think ?? '').substring(0, 40)}"`)
67
- .join(', ')
68
- : '?';
69
- console.log(`[ON_REASONING_DELTA] stepId=${data.id} content=[${preview}]`);
70
- aggregateContent({ event, data });
71
- },
72
- },
73
- };
74
- const baseLlmConfig = getLLMConfig(Providers.BEDROCK);
75
- const llmConfig = {
76
- ...baseLlmConfig,
77
- model: 'global.anthropic.claude-opus-4-6-v1',
78
- maxTokens: 16000,
79
- additionalModelRequestFields: {
80
- thinking: { type: 'enabled', budget_tokens: 10000 },
81
- },
82
- };
83
- const run = await Run.create({
84
- runId: 'bedrock-content-aggregation-test',
85
- graphConfig: {
86
- instructions,
87
- type: 'standard',
88
- tools: [createCodeExecutionTool()],
89
- llmConfig,
90
- },
91
- returnContent: true,
92
- skipCleanup: true,
93
- customHandlers: customHandlers,
94
- });
95
- const streamConfig = {
96
- configurable: {
97
- thread_id: 'bedrock-content-aggregation-thread',
98
- },
99
- streamMode: 'values',
100
- version: 'v2',
101
- };
102
- const userMessage = `im testing edge cases with our code interpreter. i know we can persist files, but what happens when we put them in directories?`;
103
- conversationHistory.push(new HumanMessage(userMessage));
104
- console.log('Running Bedrock content aggregation test...\n');
105
- console.log(`Prompt: "${userMessage}"\n`);
106
- const inputs = { messages: [...conversationHistory] };
107
- await run.processStream(inputs, streamConfig);
108
- console.log('\n\n========== CONTENT PARTS ANALYSIS ==========\n');
109
- let hasEmptyToolCall = false;
110
- let hasReasoningOrderIssue = false;
111
- for (let i = 0; i < _contentParts.length; i++) {
112
- const part = _contentParts[i];
113
- if (!part) {
114
- console.log(` [${i}] undefined`);
115
- continue;
116
- }
117
- const partType = part.type;
118
- if (partType === ContentTypes.TOOL_CALL) {
119
- const tc = part.tool_call;
120
- if (!tc || !tc.name) {
121
- hasEmptyToolCall = true;
122
- console.log(` [${i}] TOOL_CALL *** EMPTY (no tool_call data) ***`);
123
- }
124
- else {
125
- const outputPreview = tc.output
126
- ? `output=${tc.output.substring(0, 80)}...`
127
- : 'no output';
128
- console.log(` [${i}] TOOL_CALL name=${tc.name} ${outputPreview}`);
129
- }
130
- }
131
- else if (partType === ContentTypes.THINK) {
132
- const think = part.think ?? '';
133
- console.log(` [${i}] THINK (${think.length} chars): "${think.substring(0, 80)}..."`);
134
- }
135
- else if (partType === ContentTypes.TEXT) {
136
- const text = part.text ?? '';
137
- console.log(` [${i}] TEXT (${text.length} chars): "${text.substring(0, 80)}..."`);
138
- }
139
- else {
140
- console.log(` [${i}] ${partType}`);
141
- }
142
- }
143
- /**
144
- * Check reasoning ordering within a single invocation cycle.
145
- * A tool_call resets the cycle — text before think across different
146
- * invocations (e.g., text from invocation 2, think from invocation 3) is valid.
147
- */
148
- let lastTextInCycle = null;
149
- for (let i = 0; i < _contentParts.length; i++) {
150
- const part = _contentParts[i];
151
- if (!part)
152
- continue;
153
- if (part.type === ContentTypes.TOOL_CALL) {
154
- lastTextInCycle = null;
155
- continue;
156
- }
157
- if (part.type === ContentTypes.TEXT) {
158
- lastTextInCycle = i;
159
- }
160
- else if (part.type === ContentTypes.THINK && lastTextInCycle !== null) {
161
- const prevText = _contentParts[lastTextInCycle];
162
- const thinkContent = part.think ?? '';
163
- if (prevText?.text &&
164
- prevText.text.trim().length > 5 &&
165
- thinkContent.length > 0) {
166
- hasReasoningOrderIssue = true;
167
- console.log(`\n *** ORDERING ISSUE (same invocation): TEXT at [${lastTextInCycle}] appears before THINK at [${i}]`);
168
- console.log(` Text ends with: "...${prevText.text.substring(prevText.text.length - 60)}"`);
169
- console.log(` Think starts with: "${thinkContent.substring(0, 60)}..."`);
170
- }
171
- }
172
- }
173
- console.log('\n========== SUMMARY ==========\n');
174
- console.log(`Total content parts: ${_contentParts.filter(Boolean).length}`);
175
- console.log(`Empty tool_call parts: ${hasEmptyToolCall ? 'YES (BUG)' : 'No'}`);
176
- console.log(`Reasoning order issues: ${hasReasoningOrderIssue ? 'YES (BUG)' : 'No'}`);
177
- console.log('\nFull contentParts dump:');
178
- console.dir(_contentParts, { depth: null });
179
- }
180
- process.on('unhandledRejection', (reason, promise) => {
181
- console.error('Unhandled Rejection at:', promise, 'reason:', reason);
182
- console.log('Content parts:');
183
- console.dir(_contentParts, { depth: null });
184
- process.exit(1);
185
- });
186
- process.on('uncaughtException', (err) => {
187
- console.error('Uncaught Exception:', err);
188
- });
189
- testBedrockContentAggregation().catch((err) => {
190
- console.error(err);
191
- console.log('Content parts:');
192
- console.dir(_contentParts, { depth: null });
193
- process.exit(1);
194
- });
195
- //# sourceMappingURL=bedrock-content-aggregation-test.js.map
@@ -1,80 +0,0 @@
1
- import { config } from 'dotenv';
2
- config();
3
- import { HumanMessage } from '@langchain/core/messages';
4
- import { concat } from '@langchain/core/utils/stream';
5
- import { CustomChatBedrockConverse } from '@/llm/bedrock';
6
- import { modifyDeltaProperties } from '@/messages/core';
7
- import { Providers } from '@/common';
8
- async function testBedrockMerge() {
9
- const model = new CustomChatBedrockConverse({
10
- model: 'us.anthropic.claude-3-7-sonnet-20250219-v1:0',
11
- region: process.env.BEDROCK_AWS_REGION,
12
- credentials: {
13
- accessKeyId: process.env.BEDROCK_AWS_ACCESS_KEY_ID,
14
- secretAccessKey: process.env.BEDROCK_AWS_SECRET_ACCESS_KEY,
15
- },
16
- maxTokens: 4000,
17
- streaming: true,
18
- streamUsage: true,
19
- additionalModelRequestFields: {
20
- thinking: { type: 'enabled', budget_tokens: 2000 },
21
- },
22
- });
23
- const messages = [new HumanMessage('What is 25 * 37? Think step by step.')];
24
- console.log('Streaming from Bedrock with thinking enabled...\n');
25
- const stream = await model.stream(messages);
26
- let finalChunk;
27
- let chunkCount = 0;
28
- let firstTextLogged = false;
29
- for await (const chunk of stream) {
30
- chunkCount++;
31
- const isArr = Array.isArray(chunk.content);
32
- const isStr = typeof chunk.content === 'string';
33
- const isTextStr = isStr && chunk.content.length > 0;
34
- if (!firstTextLogged && isTextStr) {
35
- console.log(`chunk ${chunkCount} (first text): contentType=string, value="${chunk.content}"`);
36
- console.log(' response_metadata:', JSON.stringify(chunk.response_metadata));
37
- firstTextLogged = true;
38
- }
39
- if (isArr) {
40
- const blocks = chunk.content;
41
- const info = blocks.map((b) => ({
42
- type: b.type,
43
- hasIndex: 'index' in b,
44
- index: b.index,
45
- }));
46
- console.log(`chunk ${chunkCount}: array content, blocks:`, info);
47
- }
48
- finalChunk = finalChunk ? concat(finalChunk, chunk) : chunk;
49
- }
50
- console.log(`Total chunks received: ${chunkCount}\n`);
51
- console.log('=== RAW concat result (before modifyDeltaProperties) ===');
52
- console.log('content type:', typeof finalChunk.content);
53
- if (Array.isArray(finalChunk.content)) {
54
- console.log('content array length:', finalChunk.content.length);
55
- const types = finalChunk.content.map((b) => typeof b === 'object' && 'type' in b ? b.type : typeof b);
56
- const typeCounts = types.reduce((acc, t) => {
57
- acc[t ?? ''] = (acc[t ?? ''] || 0) + 1;
58
- return acc;
59
- }, {});
60
- console.log('content block type counts:', typeCounts);
61
- }
62
- console.log('\ncontent:');
63
- console.dir(finalChunk.content, { depth: null });
64
- console.log('\n=== lc_kwargs.content ===');
65
- if (Array.isArray(finalChunk.lc_kwargs.content)) {
66
- console.log('lc_kwargs.content length:', finalChunk.lc_kwargs.content.length);
67
- }
68
- console.dir(finalChunk.lc_kwargs.content, { depth: null });
69
- const modified = modifyDeltaProperties(Providers.BEDROCK, finalChunk);
70
- console.log('\n=== After modifyDeltaProperties ===');
71
- console.log('content:');
72
- console.dir(modified.content, { depth: null });
73
- console.log('\nlc_kwargs.content:');
74
- console.dir(modified.lc_kwargs.content, { depth: null });
75
- }
76
- testBedrockMerge().catch((err) => {
77
- console.error(err);
78
- process.exit(1);
79
- });
80
- //# sourceMappingURL=bedrock-merge-test.js.map