@illuma-ai/agents 1.1.20 → 1.1.22

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (246) hide show
  1. package/dist/cjs/graphs/Graph.cjs +12 -1
  2. package/dist/cjs/graphs/Graph.cjs.map +1 -1
  3. package/dist/cjs/graphs/MultiAgentGraph.cjs +85 -1
  4. package/dist/cjs/graphs/MultiAgentGraph.cjs.map +1 -1
  5. package/dist/cjs/llm/bedrock/index.cjs +14 -0
  6. package/dist/cjs/llm/bedrock/index.cjs.map +1 -1
  7. package/dist/cjs/run.cjs +20 -9
  8. package/dist/cjs/run.cjs.map +1 -1
  9. package/dist/esm/graphs/Graph.mjs +12 -1
  10. package/dist/esm/graphs/Graph.mjs.map +1 -1
  11. package/dist/esm/graphs/MultiAgentGraph.mjs +85 -1
  12. package/dist/esm/graphs/MultiAgentGraph.mjs.map +1 -1
  13. package/dist/esm/llm/bedrock/index.mjs +14 -0
  14. package/dist/esm/llm/bedrock/index.mjs.map +1 -1
  15. package/dist/esm/run.mjs +20 -9
  16. package/dist/esm/run.mjs.map +1 -1
  17. package/dist/types/graphs/MultiAgentGraph.d.ts +17 -0
  18. package/package.json +1 -1
  19. package/src/graphs/Graph.ts +12 -1
  20. package/src/graphs/MultiAgentGraph.ts +105 -1
  21. package/src/graphs/__tests__/multi-agent-delegate.test.ts +191 -0
  22. package/src/llm/bedrock/index.ts +17 -0
  23. package/src/run.ts +20 -11
  24. package/src/scripts/test-bedrock-handoff-autonomous.ts +231 -0
  25. package/src/agents/AgentContext.js +0 -782
  26. package/src/agents/AgentContext.test.js +0 -421
  27. package/src/agents/__tests__/AgentContext.test.js +0 -678
  28. package/src/agents/__tests__/resolveStructuredOutputMode.test.js +0 -117
  29. package/src/common/enum.js +0 -192
  30. package/src/common/index.js +0 -3
  31. package/src/events.js +0 -166
  32. package/src/graphs/Graph.js +0 -1857
  33. package/src/graphs/MultiAgentGraph.js +0 -1092
  34. package/src/graphs/__tests__/structured-output.integration.test.js +0 -624
  35. package/src/graphs/__tests__/structured-output.test.js +0 -144
  36. package/src/graphs/contextManagement.e2e.test.js +0 -718
  37. package/src/graphs/contextManagement.test.js +0 -485
  38. package/src/graphs/handoffValidation.test.js +0 -276
  39. package/src/graphs/index.js +0 -3
  40. package/src/index.js +0 -28
  41. package/src/instrumentation.js +0 -21
  42. package/src/llm/anthropic/index.js +0 -319
  43. package/src/llm/anthropic/types.js +0 -46
  44. package/src/llm/anthropic/utils/message_inputs.js +0 -627
  45. package/src/llm/anthropic/utils/message_outputs.js +0 -290
  46. package/src/llm/anthropic/utils/output_parsers.js +0 -89
  47. package/src/llm/anthropic/utils/tools.js +0 -25
  48. package/src/llm/bedrock/__tests__/bedrock-caching.test.js +0 -392
  49. package/src/llm/bedrock/index.js +0 -303
  50. package/src/llm/bedrock/types.js +0 -2
  51. package/src/llm/bedrock/utils/index.js +0 -6
  52. package/src/llm/bedrock/utils/message_inputs.js +0 -463
  53. package/src/llm/bedrock/utils/message_outputs.js +0 -269
  54. package/src/llm/fake.js +0 -92
  55. package/src/llm/google/index.js +0 -215
  56. package/src/llm/google/types.js +0 -12
  57. package/src/llm/google/utils/common.js +0 -670
  58. package/src/llm/google/utils/tools.js +0 -111
  59. package/src/llm/google/utils/zod_to_genai_parameters.js +0 -47
  60. package/src/llm/openai/index.js +0 -1033
  61. package/src/llm/openai/types.js +0 -2
  62. package/src/llm/openai/utils/index.js +0 -756
  63. package/src/llm/openai/utils/isReasoningModel.test.js +0 -79
  64. package/src/llm/openrouter/index.js +0 -261
  65. package/src/llm/openrouter/reasoning.test.js +0 -181
  66. package/src/llm/providers.js +0 -36
  67. package/src/llm/text.js +0 -65
  68. package/src/llm/vertexai/index.js +0 -402
  69. package/src/messages/__tests__/tools.test.js +0 -392
  70. package/src/messages/cache.js +0 -404
  71. package/src/messages/cache.test.js +0 -1167
  72. package/src/messages/content.js +0 -48
  73. package/src/messages/content.test.js +0 -314
  74. package/src/messages/core.js +0 -359
  75. package/src/messages/ensureThinkingBlock.test.js +0 -997
  76. package/src/messages/format.js +0 -973
  77. package/src/messages/formatAgentMessages.test.js +0 -2278
  78. package/src/messages/formatAgentMessages.tools.test.js +0 -362
  79. package/src/messages/formatMessage.test.js +0 -608
  80. package/src/messages/ids.js +0 -18
  81. package/src/messages/index.js +0 -9
  82. package/src/messages/labelContentByAgent.test.js +0 -725
  83. package/src/messages/prune.js +0 -438
  84. package/src/messages/reducer.js +0 -60
  85. package/src/messages/shiftIndexTokenCountMap.test.js +0 -63
  86. package/src/messages/summarize.js +0 -146
  87. package/src/messages/summarize.test.js +0 -332
  88. package/src/messages/tools.js +0 -90
  89. package/src/mockStream.js +0 -81
  90. package/src/prompts/collab.js +0 -7
  91. package/src/prompts/index.js +0 -3
  92. package/src/prompts/taskmanager.js +0 -58
  93. package/src/run.js +0 -427
  94. package/src/schemas/index.js +0 -3
  95. package/src/schemas/schema-preparation.test.js +0 -370
  96. package/src/schemas/validate.js +0 -314
  97. package/src/schemas/validate.test.js +0 -264
  98. package/src/scripts/abort.js +0 -127
  99. package/src/scripts/ant_web_search.js +0 -130
  100. package/src/scripts/ant_web_search_edge_case.js +0 -133
  101. package/src/scripts/ant_web_search_error_edge_case.js +0 -119
  102. package/src/scripts/args.js +0 -41
  103. package/src/scripts/bedrock-cache-debug.js +0 -186
  104. package/src/scripts/bedrock-content-aggregation-test.js +0 -195
  105. package/src/scripts/bedrock-merge-test.js +0 -80
  106. package/src/scripts/bedrock-parallel-tools-test.js +0 -150
  107. package/src/scripts/caching.js +0 -106
  108. package/src/scripts/cli.js +0 -152
  109. package/src/scripts/cli2.js +0 -119
  110. package/src/scripts/cli3.js +0 -163
  111. package/src/scripts/cli4.js +0 -165
  112. package/src/scripts/cli5.js +0 -165
  113. package/src/scripts/code_exec.js +0 -171
  114. package/src/scripts/code_exec_files.js +0 -180
  115. package/src/scripts/code_exec_multi_session.js +0 -185
  116. package/src/scripts/code_exec_ptc.js +0 -265
  117. package/src/scripts/code_exec_session.js +0 -217
  118. package/src/scripts/code_exec_simple.js +0 -120
  119. package/src/scripts/content.js +0 -111
  120. package/src/scripts/empty_input.js +0 -125
  121. package/src/scripts/handoff-test.js +0 -96
  122. package/src/scripts/image.js +0 -138
  123. package/src/scripts/memory.js +0 -83
  124. package/src/scripts/multi-agent-chain.js +0 -271
  125. package/src/scripts/multi-agent-conditional.js +0 -185
  126. package/src/scripts/multi-agent-document-review-chain.js +0 -171
  127. package/src/scripts/multi-agent-hybrid-flow.js +0 -264
  128. package/src/scripts/multi-agent-parallel-start.js +0 -214
  129. package/src/scripts/multi-agent-parallel.js +0 -346
  130. package/src/scripts/multi-agent-sequence.js +0 -184
  131. package/src/scripts/multi-agent-supervisor.js +0 -324
  132. package/src/scripts/multi-agent-test.js +0 -147
  133. package/src/scripts/parallel-asymmetric-tools-test.js +0 -202
  134. package/src/scripts/parallel-full-metadata-test.js +0 -176
  135. package/src/scripts/parallel-tools-test.js +0 -256
  136. package/src/scripts/programmatic_exec.js +0 -277
  137. package/src/scripts/programmatic_exec_agent.js +0 -168
  138. package/src/scripts/search.js +0 -118
  139. package/src/scripts/sequential-full-metadata-test.js +0 -143
  140. package/src/scripts/simple.js +0 -174
  141. package/src/scripts/single-agent-metadata-test.js +0 -152
  142. package/src/scripts/stream.js +0 -113
  143. package/src/scripts/test-custom-prompt-key.js +0 -132
  144. package/src/scripts/test-handoff-input.js +0 -143
  145. package/src/scripts/test-handoff-preamble.js +0 -227
  146. package/src/scripts/test-handoff-steering.js +0 -353
  147. package/src/scripts/test-multi-agent-list-handoff.js +0 -318
  148. package/src/scripts/test-parallel-agent-labeling.js +0 -253
  149. package/src/scripts/test-parallel-handoffs.js +0 -229
  150. package/src/scripts/test-thinking-handoff-bedrock.js +0 -132
  151. package/src/scripts/test-thinking-handoff.js +0 -132
  152. package/src/scripts/test-thinking-to-thinking-handoff-bedrock.js +0 -140
  153. package/src/scripts/test-tool-before-handoff-role-order.js +0 -223
  154. package/src/scripts/test-tools-before-handoff.js +0 -187
  155. package/src/scripts/test_code_api.js +0 -263
  156. package/src/scripts/thinking-bedrock.js +0 -128
  157. package/src/scripts/thinking-vertexai.js +0 -130
  158. package/src/scripts/thinking.js +0 -134
  159. package/src/scripts/tool_search.js +0 -114
  160. package/src/scripts/tools.js +0 -125
  161. package/src/specs/agent-handoffs-bedrock.integration.test.js +0 -280
  162. package/src/specs/agent-handoffs.test.js +0 -924
  163. package/src/specs/anthropic.simple.test.js +0 -287
  164. package/src/specs/azure.simple.test.js +0 -381
  165. package/src/specs/cache.simple.test.js +0 -282
  166. package/src/specs/custom-event-await.test.js +0 -148
  167. package/src/specs/deepseek.simple.test.js +0 -189
  168. package/src/specs/emergency-prune.test.js +0 -308
  169. package/src/specs/moonshot.simple.test.js +0 -237
  170. package/src/specs/observability.integration.test.js +0 -1337
  171. package/src/specs/openai.simple.test.js +0 -233
  172. package/src/specs/openrouter.simple.test.js +0 -202
  173. package/src/specs/prune.test.js +0 -733
  174. package/src/specs/reasoning.test.js +0 -144
  175. package/src/specs/spec.utils.js +0 -4
  176. package/src/specs/thinking-handoff.test.js +0 -486
  177. package/src/specs/thinking-prune.test.js +0 -600
  178. package/src/specs/token-distribution-edge-case.test.js +0 -246
  179. package/src/specs/token-memoization.test.js +0 -32
  180. package/src/specs/tokens.test.js +0 -49
  181. package/src/specs/tool-error.test.js +0 -139
  182. package/src/splitStream.js +0 -204
  183. package/src/splitStream.test.js +0 -504
  184. package/src/stream.js +0 -650
  185. package/src/stream.test.js +0 -225
  186. package/src/test/mockTools.js +0 -340
  187. package/src/tools/BrowserTools.js +0 -245
  188. package/src/tools/Calculator.js +0 -38
  189. package/src/tools/Calculator.test.js +0 -225
  190. package/src/tools/CodeExecutor.js +0 -233
  191. package/src/tools/ProgrammaticToolCalling.js +0 -602
  192. package/src/tools/StreamingToolCallBuffer.js +0 -179
  193. package/src/tools/ToolNode.js +0 -930
  194. package/src/tools/ToolSearch.js +0 -904
  195. package/src/tools/__tests__/BrowserTools.test.js +0 -306
  196. package/src/tools/__tests__/ProgrammaticToolCalling.integration.test.js +0 -276
  197. package/src/tools/__tests__/ProgrammaticToolCalling.test.js +0 -807
  198. package/src/tools/__tests__/StreamingToolCallBuffer.test.js +0 -175
  199. package/src/tools/__tests__/ToolApproval.test.js +0 -675
  200. package/src/tools/__tests__/ToolNode.recovery.test.js +0 -200
  201. package/src/tools/__tests__/ToolNode.session.test.js +0 -319
  202. package/src/tools/__tests__/ToolSearch.integration.test.js +0 -125
  203. package/src/tools/__tests__/ToolSearch.test.js +0 -812
  204. package/src/tools/__tests__/handlers.test.js +0 -799
  205. package/src/tools/__tests__/truncation-recovery.integration.test.js +0 -362
  206. package/src/tools/handlers.js +0 -306
  207. package/src/tools/schema.js +0 -25
  208. package/src/tools/search/anthropic.js +0 -34
  209. package/src/tools/search/content.js +0 -116
  210. package/src/tools/search/content.test.js +0 -133
  211. package/src/tools/search/firecrawl.js +0 -173
  212. package/src/tools/search/format.js +0 -198
  213. package/src/tools/search/highlights.js +0 -241
  214. package/src/tools/search/index.js +0 -3
  215. package/src/tools/search/jina-reranker.test.js +0 -106
  216. package/src/tools/search/rerankers.js +0 -165
  217. package/src/tools/search/schema.js +0 -102
  218. package/src/tools/search/search.js +0 -561
  219. package/src/tools/search/serper-scraper.js +0 -126
  220. package/src/tools/search/test.js +0 -129
  221. package/src/tools/search/tool.js +0 -453
  222. package/src/tools/search/types.js +0 -2
  223. package/src/tools/search/utils.js +0 -59
  224. package/src/types/graph.js +0 -24
  225. package/src/types/graph.test.js +0 -192
  226. package/src/types/index.js +0 -7
  227. package/src/types/llm.js +0 -2
  228. package/src/types/messages.js +0 -2
  229. package/src/types/run.js +0 -2
  230. package/src/types/stream.js +0 -2
  231. package/src/types/tools.js +0 -2
  232. package/src/utils/contextAnalytics.js +0 -79
  233. package/src/utils/contextAnalytics.test.js +0 -166
  234. package/src/utils/events.js +0 -26
  235. package/src/utils/graph.js +0 -11
  236. package/src/utils/handlers.js +0 -65
  237. package/src/utils/index.js +0 -10
  238. package/src/utils/llm.js +0 -21
  239. package/src/utils/llmConfig.js +0 -205
  240. package/src/utils/logging.js +0 -37
  241. package/src/utils/misc.js +0 -51
  242. package/src/utils/run.js +0 -69
  243. package/src/utils/schema.js +0 -21
  244. package/src/utils/title.js +0 -119
  245. package/src/utils/tokens.js +0 -92
  246. package/src/utils/toonFormat.js +0 -379
@@ -1,150 +0,0 @@
1
- import { config } from 'dotenv';
2
- config();
3
- import { HumanMessage } from '@langchain/core/messages';
4
- import { ChatModelStreamHandler, createContentAggregator } from '@/stream';
5
- import { GraphEvents, ContentTypes, Providers } from '@/common';
6
- import { ToolEndHandler, ModelEndHandler } from '@/events';
7
- import { getLLMConfig } from '@/utils/llmConfig';
8
- import { Calculator } from '@/tools/Calculator';
9
- import { Run } from '@/run';
10
- const conversationHistory = [];
11
- let _contentParts = [];
12
- const collectedUsage = [];
13
- async function testParallelToolCalls() {
14
- const { contentParts, aggregateContent } = createContentAggregator();
15
- _contentParts = contentParts;
16
- const customHandlers = {
17
- [GraphEvents.TOOL_END]: new ToolEndHandler(),
18
- [GraphEvents.CHAT_MODEL_END]: new ModelEndHandler(collectedUsage),
19
- [GraphEvents.CHAT_MODEL_STREAM]: {
20
- handle: async (event, data, metadata, graph) => {
21
- const chunk = data.chunk;
22
- const tcc = chunk?.tool_call_chunks;
23
- if (tcc && tcc.length > 0) {
24
- console.log(`[CHAT_MODEL_STREAM] tool_call_chunks: ${JSON.stringify(tcc.map((c) => ({ id: c.id, name: c.name, index: c.index })))}`);
25
- }
26
- const handler = new ChatModelStreamHandler();
27
- return handler.handle(event, data, metadata, graph);
28
- },
29
- },
30
- [GraphEvents.ON_RUN_STEP_COMPLETED]: {
31
- handle: (event, data) => {
32
- const result = data.result;
33
- console.log(`[ON_RUN_STEP_COMPLETED] stepId=${result.id} index=${result.index} type=${result.type} tool=${result.tool_call?.name ?? 'n/a'}`);
34
- aggregateContent({
35
- event,
36
- data: data,
37
- });
38
- },
39
- },
40
- [GraphEvents.ON_RUN_STEP]: {
41
- handle: (event, data) => {
42
- const toolCalls = data.stepDetails.type === 'tool_calls' && data.stepDetails.tool_calls
43
- ? data.stepDetails.tool_calls
44
- .map((tc) => `${tc.name ?? '?'}(${tc.id ?? '?'})`)
45
- .join(', ')
46
- : 'none';
47
- console.log(`[ON_RUN_STEP] stepId=${data.id} index=${data.index} type=${data.type} stepIndex=${data.stepIndex} toolCalls=[${toolCalls}]`);
48
- aggregateContent({ event, data });
49
- },
50
- },
51
- [GraphEvents.ON_RUN_STEP_DELTA]: {
52
- handle: (event, data) => {
53
- aggregateContent({ event, data });
54
- },
55
- },
56
- [GraphEvents.ON_MESSAGE_DELTA]: {
57
- handle: (event, data) => {
58
- aggregateContent({ event, data });
59
- },
60
- },
61
- [GraphEvents.ON_REASONING_DELTA]: {
62
- handle: (event, data) => {
63
- aggregateContent({ event, data });
64
- },
65
- },
66
- };
67
- const baseLlmConfig = getLLMConfig(Providers.BEDROCK);
68
- const llmConfig = {
69
- ...baseLlmConfig,
70
- model: 'global.anthropic.claude-opus-4-6-v1',
71
- maxTokens: 16000,
72
- additionalModelRequestFields: {
73
- thinking: { type: 'enabled', budget_tokens: 10000 },
74
- },
75
- };
76
- const run = await Run.create({
77
- runId: 'bedrock-parallel-tools-test',
78
- graphConfig: {
79
- instructions: 'You are a math assistant. When asked to calculate multiple things, use the calculator tool for ALL of them in parallel. Do NOT chain calculations sequentially.',
80
- type: 'standard',
81
- tools: [new Calculator()],
82
- llmConfig,
83
- },
84
- returnContent: true,
85
- skipCleanup: true,
86
- customHandlers: customHandlers,
87
- });
88
- const streamConfig = {
89
- configurable: { thread_id: 'bedrock-parallel-tools-thread' },
90
- streamMode: 'values',
91
- version: 'v2',
92
- };
93
- const userMessage = 'Calculate these 3 things at the same time using the calculator: 1) 123 * 456, 2) sqrt(144) + 7, 3) 2^10 - 24';
94
- conversationHistory.push(new HumanMessage(userMessage));
95
- console.log('Running Bedrock parallel tool calls test...\n');
96
- console.log(`Prompt: "${userMessage}"\n`);
97
- const inputs = { messages: [...conversationHistory] };
98
- await run.processStream(inputs, streamConfig);
99
- console.log('\n\n========== ANALYSIS ==========\n');
100
- let toolCallCount = 0;
101
- const toolCallNames = [];
102
- let hasUndefined = false;
103
- for (let i = 0; i < _contentParts.length; i++) {
104
- const part = _contentParts[i];
105
- if (!part) {
106
- hasUndefined = true;
107
- console.log(` [${i}] *** UNDEFINED ***`);
108
- continue;
109
- }
110
- if (part.type === ContentTypes.TOOL_CALL) {
111
- toolCallCount++;
112
- const tc = part.tool_call;
113
- const hasData = tc && tc.name;
114
- if (!hasData) {
115
- console.log(` [${i}] TOOL_CALL *** EMPTY ***`);
116
- }
117
- else {
118
- toolCallNames.push(tc.name ?? '');
119
- console.log(` [${i}] TOOL_CALL name=${tc.name} id=${tc.id} output=${String(tc.output ?? '').substring(0, 40)}`);
120
- }
121
- }
122
- else if (part.type === ContentTypes.THINK) {
123
- const think = part.think ?? '';
124
- console.log(` [${i}] THINK (${think.length} chars)`);
125
- }
126
- else if (part.type === ContentTypes.TEXT) {
127
- const text = part.text ?? '';
128
- console.log(` [${i}] TEXT (${text.length} chars): "${text.substring(0, 80)}..."`);
129
- }
130
- }
131
- console.log('\n========== SUMMARY ==========\n');
132
- console.log(`Total content parts: ${_contentParts.filter(Boolean).length}`);
133
- console.log(`Tool calls found: ${toolCallCount}`);
134
- console.log(`Tool call names: [${toolCallNames.join(', ')}]`);
135
- console.log(`Undefined gaps: ${hasUndefined ? 'YES (BUG)' : 'No'}`);
136
- console.log(`Expected 3 tool calls: ${toolCallCount >= 3 ? 'PASS' : 'FAIL (only ' + toolCallCount + ')'}`);
137
- console.log('\nFull contentParts dump:');
138
- console.dir(_contentParts, { depth: null });
139
- }
140
- process.on('unhandledRejection', (reason, promise) => {
141
- console.error('Unhandled Rejection at:', promise, 'reason:', reason);
142
- console.dir(_contentParts, { depth: null });
143
- process.exit(1);
144
- });
145
- testParallelToolCalls().catch((err) => {
146
- console.error(err);
147
- console.dir(_contentParts, { depth: null });
148
- process.exit(1);
149
- });
150
- //# sourceMappingURL=bedrock-parallel-tools-test.js.map
@@ -1,106 +0,0 @@
1
- // src/scripts/test-prompt-caching.ts
2
- import { config } from 'dotenv';
3
- config();
4
- import { HumanMessage, } from '@langchain/core/messages';
5
- import { ChatModelStreamHandler, createContentAggregator } from '@/stream';
6
- import { ToolEndHandler, ModelEndHandler } from '@/events';
7
- import { GraphEvents, Providers } from '@/common';
8
- import { getLLMConfig } from '@/utils/llmConfig';
9
- import { getArgs } from '@/scripts/args';
10
- import { Run } from '@/run';
11
- const CACHED_TEXT = `Ahoy there, me hearties! This be a grand tale o' the mighty prompt cachin' treasure map, a secret technique used by the wise Anthropic seafarers to stash away vast hordes o' text booty on their mystical servers! Arrr, 'tis a pirate's dream indeed - no need to haul the same heavy chest o' gold doubloons across the vast digital ocean with every message! When ye mark yer precious cargo with the secret flag 'cache_control: { type: \"ephemeral\" }', the text be safely buried on their distant shores, ready for plunderin' again without the weight slowin' down yer ship! The wise pirates at Anthropic introduced this magical scroll in the summer o' 2024, markin' it with the mysterious insignia 'anthropic-beta: prompt-caching-2024-07-31' that must be flown high on yer vessel's headers. This crafty script be testin' the waters of this new treasure map system, sendin' out three separate voyages across the AI seas: first to bury the treasure, second to dig it up again without payin' the full toll, and third to see if the map still leads to gold after the sands o' time have shifted (about thirty seconds o' waitin', which be an eternity for an impatient buccaneer!). The great advantage for a scurvy pirate captain is clear as Caribbean waters - ye can load up yer vessel with all manner o' reference scrolls, ancient tomes, and navigational charts without weighin' down each and every message ye send to port! This be savin' ye countless tokens, which as any seafarin' AI wrangler knows, be as precious as Spanish gold. The cached text could contain the full history o' the Seven Seas, detailed maps o' every port from Tortuga to Singapore, or the complete collection o' pirate shanties ever sung by drunken sailors under the light o' the silvery moon. When properly implemented, this mighty cachin' system keeps all that knowledge ready at hand without the Claude kraken needin' to process it anew with each passin' breeze. By Blackbeard's beard, 'tis a revolution in how we manage our conversational ships! The script be employin' the finest LangChain riggin' and custom-carved event handlers to properly track the treasure as it flows back and forth. If ye be successful in yer implementation, ye should witness the miracle o' significantly reduced token counts in yer usage metrics, faster responses from the AI oracle, and the ability to maintain vast knowledge without payin' the full price each time! So hoist the Jolly Roger, load yer pistols with API keys, and set sail on the grand adventure o' prompt cachin'! May the winds o' efficient token usage fill yer sails, and may ye never have to pay full price for passin' the same mammoth context to Claude again! Remember, a clever pirate only pays for their tokens once, then lets the cache do the heavy liftin'! YARRR! This file also contains the secrets of the legendary Pirate Code, passed down through generations of seafarers since the Golden Age of Piracy. It includes detailed accounts of famous pirate captains like Blackbeard, Calico Jack, Anne Bonny, and Mary Read, along with their most profitable plundering routes and techniques for capturing merchant vessels. The text chronicles the exact locations of at least seventeen buried treasures across the Caribbean, complete with riddles and map coordinates that only a true pirate could decipher. There are sections dedicated to ship maintenance, including how to properly seal a leaking hull during battle and the best methods for keeping your cannons in prime firing condition even in humid tropical conditions. The document contains an extensive glossary of pirate terminology, from 'avast' to 'Yellow Jack,' ensuring any landlubber can speak like a seasoned salt with enough study. There's a comprehensive guide to navigating by the stars without modern instruments, perfect for when your GPS fails in the middle of a daring escape. The cache also includes detailed recipes for grog, hardtack that won't break your teeth, and how to keep citrus fruits fresh to prevent scurvy during long voyages. The legendary Black Spot ritual is described in terrifying detail, along with other pirate superstitions and their origins in maritime folklore. A section on pirate governance explains the democratic nature of most pirate ships, how booty was divided fairly, and how captains were elected and deposed when necessary. The file even contains sheet music for dozens of sea shanties, with notes on when each should be sung for maximum crew morale during different sailing conditions. All of this knowledge is wrapped in colorful pirate dialect that would make any AI assistant respond with appropriate 'arghs' and 'avasts' when properly prompted!`;
12
- const conversationHistory = [];
13
- let _contentParts = [];
14
- const collectedUsage = [];
15
- async function testPromptCaching() {
16
- const { userName } = await getArgs();
17
- const instructions = `You are a pirate AI assistant for ${userName}. Always respond in pirate dialect. Use the following as context when answering questions:
18
- ${CACHED_TEXT}`;
19
- const { contentParts, aggregateContent } = createContentAggregator();
20
- _contentParts = contentParts;
21
- // Set up event handlers
22
- const customHandlers = {
23
- [GraphEvents.TOOL_END]: new ToolEndHandler(),
24
- [GraphEvents.CHAT_MODEL_END]: new ModelEndHandler(collectedUsage),
25
- // console.log('====== O ======');
26
- // console.log('Usage Metrics:', (data as any).llmOutput?.usage || (data as any).usage);
27
- [GraphEvents.CHAT_MODEL_STREAM]: new ChatModelStreamHandler(),
28
- // Additional handlers for tracking usage metrics
29
- [GraphEvents.ON_RUN_STEP_COMPLETED]: {
30
- handle: (event, data) => {
31
- console.log('====== ON_RUN_STEP_COMPLETED ======');
32
- aggregateContent({
33
- event,
34
- data: data,
35
- });
36
- },
37
- },
38
- };
39
- const baseLlmConfig = getLLMConfig(Providers.ANTHROPIC);
40
- if (baseLlmConfig.provider !== 'anthropic') {
41
- console.error('This test requires Anthropic as the LLM provider. Please specify provider=anthropic');
42
- process.exit(1);
43
- }
44
- const llmConfig = {
45
- ...baseLlmConfig,
46
- promptCache: true,
47
- };
48
- const run = await Run.create({
49
- runId: 'test-prompt-caching-id',
50
- graphConfig: {
51
- instructions,
52
- type: 'standard',
53
- llmConfig,
54
- },
55
- returnContent: true,
56
- skipCleanup: true,
57
- customHandlers,
58
- });
59
- const config = {
60
- configurable: {
61
- thread_id: 'prompt-cache-test-thread',
62
- },
63
- streamMode: 'values',
64
- version: 'v2',
65
- };
66
- // First request - should create the cache
67
- console.log('\n\nTest 1: First request (creates cache)');
68
- const userMessage1 = `What information do you have in your context?`;
69
- conversationHistory.push(new HumanMessage(userMessage1));
70
- console.log('Running first query to create cache...');
71
- const firstInputs = { messages: [...conversationHistory] };
72
- await run.processStream(firstInputs, config);
73
- const finalMessages = run.getRunMessages();
74
- if (finalMessages) {
75
- conversationHistory.push(...finalMessages);
76
- console.dir(conversationHistory, { depth: null });
77
- }
78
- // Second request - should use the cache
79
- console.log('\n\nTest 2: Second request (should use cache)');
80
- const userMessage2 = `Summarize the key concepts from the context information.`;
81
- conversationHistory.push(new HumanMessage(userMessage2));
82
- console.log('Running second query to use cache...');
83
- const secondInputs = { messages: [...conversationHistory] };
84
- await run.processStream(secondInputs, config);
85
- console.log('\n\nPrompt caching test completed!');
86
- }
87
- process.on('unhandledRejection', (reason, promise) => {
88
- console.error('Unhandled Rejection at:', promise, 'reason:', reason);
89
- console.log('Conversation history:');
90
- console.dir(conversationHistory, { depth: null });
91
- console.log('Content parts:');
92
- console.dir(_contentParts, { depth: null });
93
- process.exit(1);
94
- });
95
- process.on('uncaughtException', (err) => {
96
- console.error('Uncaught Exception:', err);
97
- });
98
- testPromptCaching().catch((err) => {
99
- console.error(err);
100
- console.log('Conversation history:');
101
- console.dir(conversationHistory, { depth: null });
102
- console.log('Content parts:');
103
- console.dir(_contentParts, { depth: null });
104
- process.exit(1);
105
- });
106
- //# sourceMappingURL=caching.js.map
@@ -1,152 +0,0 @@
1
- /* eslint-disable no-console */
2
- // src/scripts/cli.ts
3
- import { config } from 'dotenv';
4
- config();
5
- import { HumanMessage } from '@langchain/core/messages';
6
- import { ModelEndHandler, ToolEndHandler } from '@/events';
7
- import { ChatModelStreamHandler } from '@/stream';
8
- import { getArgs } from '@/scripts/args';
9
- import { Run } from '@/run';
10
- import { GraphEvents } from '@/common';
11
- import { getLLMConfig } from '@/utils/llmConfig';
12
- const conversationHistory = [];
13
- async function testStandardStreaming() {
14
- const { userName, location, provider, currentDate } = await getArgs();
15
- const customHandlers = {
16
- [GraphEvents.TOOL_END]: new ToolEndHandler(),
17
- [GraphEvents.CHAT_MODEL_END]: new ModelEndHandler(),
18
- [GraphEvents.CHAT_MODEL_STREAM]: new ChatModelStreamHandler(),
19
- [GraphEvents.ON_RUN_STEP_COMPLETED]: {
20
- handle: (_event, data) => {
21
- console.log('====== ON_RUN_STEP_COMPLETED ======');
22
- console.dir(data, { depth: null });
23
- },
24
- },
25
- [GraphEvents.ON_RUN_STEP]: {
26
- handle: (_event, data) => {
27
- console.log('====== ON_RUN_STEP ======');
28
- console.dir(data, { depth: null });
29
- },
30
- },
31
- [GraphEvents.ON_RUN_STEP_DELTA]: {
32
- handle: (_event, data) => {
33
- console.log('====== ON_RUN_STEP_DELTA ======');
34
- console.dir(data, { depth: null });
35
- },
36
- },
37
- [GraphEvents.ON_MESSAGE_DELTA]: {
38
- handle: (_event, data) => {
39
- console.log('====== ON_MESSAGE_DELTA ======');
40
- console.dir(data, { depth: null });
41
- },
42
- },
43
- [GraphEvents.TOOL_START]: {
44
- handle: (_event, data, metadata) => {
45
- console.log('====== TOOL_START ======');
46
- console.dir(data, { depth: null });
47
- },
48
- },
49
- // [GraphEvents.LLM_STREAM]: new LLMStreamHandler(),
50
- // [GraphEvents.LLM_START]: {
51
- // handle: (_event: string, data: t.StreamEventData): void => {
52
- // console.log('====== LLM_START ======');
53
- // console.dir(data, { depth: null });
54
- // }
55
- // },
56
- // [GraphEvents.LLM_END]: {
57
- // handle: (_event: string, data: t.StreamEventData): void => {
58
- // console.log('====== LLM_END ======');
59
- // console.dir(data, { depth: null });
60
- // }
61
- // },
62
- /*
63
- [GraphEvents.CHAIN_START]: {
64
- handle: (_event: string, data: t.StreamEventData): void => {
65
- console.log('====== CHAIN_START ======');
66
- // console.dir(data, { depth: null });
67
- }
68
- },
69
- [GraphEvents.CHAIN_END]: {
70
- handle: (_event: string, data: t.StreamEventData): void => {
71
- console.log('====== CHAIN_END ======');
72
- // console.dir(data, { depth: null });
73
- }
74
- },
75
- */
76
- // [GraphEvents.CHAT_MODEL_START]: {
77
- // handle: (_event: string, _data: t.StreamEventData): void => {
78
- // console.log('====== CHAT_MODEL_START ======');
79
- // console.dir(_data, { depth: null });
80
- // // Intentionally left empty
81
- // }
82
- // },
83
- };
84
- const llmConfig = getLLMConfig(provider);
85
- const run = await Run.create({
86
- runId: 'test-run-id',
87
- graphConfig: {
88
- type: 'standard',
89
- llmConfig,
90
- tools: [],
91
- instructions: 'You are a friendly AI assistant. Always address the user by their name.',
92
- additional_instructions: `The user's name is ${userName} and they are located in ${location}.`,
93
- },
94
- customHandlers,
95
- });
96
- const config = {
97
- configurable: {
98
- provider,
99
- thread_id: 'conversation-num-1',
100
- },
101
- streamMode: 'values',
102
- version: 'v2',
103
- };
104
- console.log(' Test 1: Initial greeting');
105
- conversationHistory.push(new HumanMessage(`Hi I'm ${userName}.`));
106
- let inputs = {
107
- messages: conversationHistory,
108
- };
109
- const contentParts = await run.processStream(inputs, config
110
- // {
111
- // [Callback.TOOL_START]: (graph, ...args) => {
112
- // console.log('TOOL_START callback');
113
- // },
114
- // [Callback.TOOL_END]: (graph, ...args) => {
115
- // console.log('TOOL_END callback');
116
- // },
117
- // }
118
- );
119
- const finalMessages = run.getRunMessages();
120
- if (finalMessages) {
121
- conversationHistory.push(...finalMessages);
122
- }
123
- console.log(' Test 2: Weather query');
124
- const userMessage = `
125
- Make a search for the weather in ${location} today, which is ${currentDate}.
126
- Make sure to always refer to me by name.
127
- After giving me a thorough summary, tell me a joke about the weather forecast we went over.
128
- `;
129
- conversationHistory.push(new HumanMessage(userMessage));
130
- inputs = {
131
- messages: conversationHistory,
132
- };
133
- const contentParts2 = await run.processStream(inputs, config);
134
- const finalMessages2 = run.getRunMessages();
135
- if (finalMessages2) {
136
- conversationHistory.push(...finalMessages2);
137
- console.dir(conversationHistory, { depth: null });
138
- }
139
- }
140
- process.on('unhandledRejection', (reason, promise) => {
141
- console.error('Unhandled Rejection at:', promise, 'reason:', reason);
142
- console.log('Conversation history:');
143
- console.dir(conversationHistory, { depth: null });
144
- process.exit(1);
145
- });
146
- testStandardStreaming().catch((err) => {
147
- console.error(err);
148
- console.log('Conversation history:');
149
- console.dir(conversationHistory, { depth: null });
150
- process.exit(1);
151
- });
152
- //# sourceMappingURL=cli.js.map
@@ -1,119 +0,0 @@
1
- /* eslint-disable no-console */
2
- // src/scripts/cli2.ts
3
- import { config } from 'dotenv';
4
- config();
5
- import { HumanMessage } from '@langchain/core/messages';
6
- import { ChatModelStreamHandler } from '@/stream';
7
- import { TestLLMStreamHandler } from '@/events';
8
- import { getArgs } from '@/scripts/args';
9
- import { Run } from '@/run';
10
- import { GraphEvents } from '@/common';
11
- import { getLLMConfig } from '@/utils/llmConfig';
12
- const conversationHistory = [];
13
- async function executePersonalizedQuerySuite() {
14
- const { userName, location, provider, currentDate } = await getArgs();
15
- const customHandlers = {
16
- [GraphEvents.LLM_STREAM]: new TestLLMStreamHandler(),
17
- [GraphEvents.CHAT_MODEL_STREAM]: new ChatModelStreamHandler(),
18
- [GraphEvents.LLM_START]: {
19
- handle: (_event, data) => {
20
- console.log('====== LLM_START ======');
21
- console.dir(data, { depth: null });
22
- },
23
- },
24
- [GraphEvents.LLM_END]: {
25
- handle: (_event, data) => {
26
- console.log('====== LLM_END ======');
27
- console.dir(data, { depth: null });
28
- },
29
- },
30
- [GraphEvents.CHAT_MODEL_START]: {
31
- handle: (_event, _data) => {
32
- console.log('====== CHAT_MODEL_START ======');
33
- console.dir(_data, { depth: null });
34
- },
35
- },
36
- [GraphEvents.CHAT_MODEL_END]: {
37
- handle: (_event, _data) => {
38
- console.log('====== CHAT_MODEL_END ======');
39
- console.dir(_data, { depth: null });
40
- },
41
- },
42
- [GraphEvents.TOOL_START]: {
43
- handle: (_event, data) => {
44
- console.log('====== TOOL_START ======');
45
- console.dir(data, { depth: null });
46
- },
47
- },
48
- [GraphEvents.TOOL_END]: {
49
- handle: (_event, data) => {
50
- console.log('====== TOOL_END ======');
51
- console.dir(data, { depth: null });
52
- },
53
- },
54
- };
55
- const llmConfig = getLLMConfig(provider);
56
- const run = await Run.create({
57
- runId: 'test-run-id',
58
- graphConfig: {
59
- type: 'standard',
60
- llmConfig,
61
- tools: [],
62
- },
63
- customHandlers,
64
- });
65
- const sessionConfig = {
66
- configurable: {
67
- provider,
68
- thread_id: `${userName}-session-${Date.now()}`,
69
- instructions: `You are a knowledgeable and friendly AI assistant. Tailor your responses to ${userName}'s interests in ${location}.`,
70
- additional_instructions: `Ensure each topic is thoroughly researched. Today is ${currentDate}. Maintain a warm, personalized tone throughout.`,
71
- },
72
- streamMode: 'values',
73
- version: 'v2',
74
- };
75
- console.log(`Initiating personalized query suite for ${userName}`);
76
- const queryTopics = [
77
- {
78
- task: 'current weather',
79
- description: 'Provide a detailed weather forecast',
80
- },
81
- {
82
- task: 'popular tourist attraction',
83
- description: 'Describe a notable sight',
84
- },
85
- {
86
- task: 'upcoming events',
87
- description: 'List major events or festivals this week',
88
- },
89
- // { task: "famous local dish", description: "Share a recipe for a regional specialty" },
90
- // { task: "local humor", description: "Tell a joke related to the area or findings" }
91
- ];
92
- const userPrompt = `
93
- Greetings! I'm ${userName}, currently in ${location}. Today's date is ${currentDate}.
94
- I'm seeking information on various aspects of ${location}. Please address the following:
95
-
96
- ${queryTopics.map((topic, index) => `${index + 1}. ${topic.description} in ${location}.`).join('\n ')}
97
-
98
- For each topic, conduct a separate search to ensure accuracy and depth.
99
- In your response, please address me as ${userName} and maintain a friendly, informative tone.
100
- `;
101
- conversationHistory.push(new HumanMessage(userPrompt));
102
- const runInput = {
103
- messages: conversationHistory,
104
- };
105
- const contentParts = await run.processStream(runInput, sessionConfig);
106
- const finalMessages = run.getRunMessages();
107
- if (finalMessages) {
108
- conversationHistory.push(...finalMessages);
109
- console.log("AI Assistant's Response:");
110
- console.dir(conversationHistory, { depth: null });
111
- }
112
- }
113
- executePersonalizedQuerySuite().catch((error) => {
114
- console.error('An error occurred during the query suite execution:', error);
115
- console.log('Final conversation state:');
116
- console.dir(conversationHistory, { depth: null });
117
- process.exit(1);
118
- });
119
- //# sourceMappingURL=cli2.js.map