illuma-agents 1.0.36 → 1.0.38

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (128) hide show
  1. package/dist/cjs/agents/AgentContext.cjs +69 -14
  2. package/dist/cjs/agents/AgentContext.cjs.map +1 -1
  3. package/dist/cjs/common/enum.cjs +3 -1
  4. package/dist/cjs/common/enum.cjs.map +1 -1
  5. package/dist/cjs/graphs/Graph.cjs +50 -8
  6. package/dist/cjs/graphs/Graph.cjs.map +1 -1
  7. package/dist/cjs/graphs/MultiAgentGraph.cjs +277 -11
  8. package/dist/cjs/graphs/MultiAgentGraph.cjs.map +1 -1
  9. package/dist/cjs/llm/bedrock/index.cjs +128 -61
  10. package/dist/cjs/llm/bedrock/index.cjs.map +1 -1
  11. package/dist/cjs/main.cjs +16 -7
  12. package/dist/cjs/main.cjs.map +1 -1
  13. package/dist/cjs/messages/cache.cjs +1 -0
  14. package/dist/cjs/messages/cache.cjs.map +1 -1
  15. package/dist/cjs/messages/core.cjs +1 -1
  16. package/dist/cjs/messages/core.cjs.map +1 -1
  17. package/dist/cjs/messages/tools.cjs +2 -2
  18. package/dist/cjs/messages/tools.cjs.map +1 -1
  19. package/dist/cjs/stream.cjs +4 -2
  20. package/dist/cjs/stream.cjs.map +1 -1
  21. package/dist/cjs/tools/BrowserTools.cjs +27 -3
  22. package/dist/cjs/tools/BrowserTools.cjs.map +1 -1
  23. package/dist/cjs/tools/CodeExecutor.cjs +22 -21
  24. package/dist/cjs/tools/CodeExecutor.cjs.map +1 -1
  25. package/dist/cjs/tools/ProgrammaticToolCalling.cjs +14 -11
  26. package/dist/cjs/tools/ProgrammaticToolCalling.cjs.map +1 -1
  27. package/dist/cjs/tools/ToolNode.cjs +101 -2
  28. package/dist/cjs/tools/ToolNode.cjs.map +1 -1
  29. package/dist/cjs/tools/ToolSearch.cjs +862 -0
  30. package/dist/cjs/tools/ToolSearch.cjs.map +1 -0
  31. package/dist/esm/agents/AgentContext.mjs +69 -14
  32. package/dist/esm/agents/AgentContext.mjs.map +1 -1
  33. package/dist/esm/common/enum.mjs +3 -1
  34. package/dist/esm/common/enum.mjs.map +1 -1
  35. package/dist/esm/graphs/Graph.mjs +51 -9
  36. package/dist/esm/graphs/Graph.mjs.map +1 -1
  37. package/dist/esm/graphs/MultiAgentGraph.mjs +278 -12
  38. package/dist/esm/graphs/MultiAgentGraph.mjs.map +1 -1
  39. package/dist/esm/llm/bedrock/index.mjs +127 -60
  40. package/dist/esm/llm/bedrock/index.mjs.map +1 -1
  41. package/dist/esm/main.mjs +1 -1
  42. package/dist/esm/messages/cache.mjs +1 -0
  43. package/dist/esm/messages/cache.mjs.map +1 -1
  44. package/dist/esm/messages/core.mjs +1 -1
  45. package/dist/esm/messages/core.mjs.map +1 -1
  46. package/dist/esm/messages/tools.mjs +2 -2
  47. package/dist/esm/messages/tools.mjs.map +1 -1
  48. package/dist/esm/stream.mjs +4 -2
  49. package/dist/esm/stream.mjs.map +1 -1
  50. package/dist/esm/tools/BrowserTools.mjs +27 -3
  51. package/dist/esm/tools/BrowserTools.mjs.map +1 -1
  52. package/dist/esm/tools/CodeExecutor.mjs +22 -21
  53. package/dist/esm/tools/CodeExecutor.mjs.map +1 -1
  54. package/dist/esm/tools/ProgrammaticToolCalling.mjs +14 -11
  55. package/dist/esm/tools/ProgrammaticToolCalling.mjs.map +1 -1
  56. package/dist/esm/tools/ToolNode.mjs +102 -3
  57. package/dist/esm/tools/ToolNode.mjs.map +1 -1
  58. package/dist/esm/tools/ToolSearch.mjs +827 -0
  59. package/dist/esm/tools/ToolSearch.mjs.map +1 -0
  60. package/dist/types/agents/AgentContext.d.ts +33 -1
  61. package/dist/types/common/enum.d.ts +4 -2
  62. package/dist/types/graphs/Graph.d.ts +6 -0
  63. package/dist/types/graphs/MultiAgentGraph.d.ts +16 -0
  64. package/dist/types/index.d.ts +1 -1
  65. package/dist/types/llm/bedrock/index.d.ts +89 -11
  66. package/dist/types/llm/bedrock/types.d.ts +27 -0
  67. package/dist/types/llm/bedrock/utils/index.d.ts +5 -0
  68. package/dist/types/llm/bedrock/utils/message_inputs.d.ts +31 -0
  69. package/dist/types/llm/bedrock/utils/message_outputs.d.ts +33 -0
  70. package/dist/types/tools/BrowserTools.d.ts +2 -0
  71. package/dist/types/tools/CodeExecutor.d.ts +0 -3
  72. package/dist/types/tools/ProgrammaticToolCalling.d.ts +0 -3
  73. package/dist/types/tools/ToolNode.d.ts +3 -1
  74. package/dist/types/tools/ToolSearch.d.ts +148 -0
  75. package/dist/types/types/graph.d.ts +2 -0
  76. package/dist/types/types/llm.d.ts +3 -1
  77. package/dist/types/types/tools.d.ts +42 -2
  78. package/package.json +12 -5
  79. package/src/agents/AgentContext.ts +88 -16
  80. package/src/common/enum.ts +3 -1
  81. package/src/graphs/Graph.ts +64 -13
  82. package/src/graphs/MultiAgentGraph.ts +350 -13
  83. package/src/index.ts +1 -1
  84. package/src/llm/bedrock/index.ts +221 -99
  85. package/src/llm/bedrock/llm.spec.ts +616 -0
  86. package/src/llm/bedrock/types.ts +51 -0
  87. package/src/llm/bedrock/utils/index.ts +18 -0
  88. package/src/llm/bedrock/utils/message_inputs.ts +563 -0
  89. package/src/llm/bedrock/utils/message_outputs.ts +310 -0
  90. package/src/messages/__tests__/tools.test.ts +21 -21
  91. package/src/messages/cache.test.ts +259 -0
  92. package/src/messages/cache.ts +104 -1
  93. package/src/messages/core.ts +1 -1
  94. package/src/messages/tools.ts +2 -2
  95. package/src/scripts/caching.ts +27 -19
  96. package/src/scripts/code_exec_files.ts +58 -15
  97. package/src/scripts/code_exec_multi_session.ts +241 -0
  98. package/src/scripts/code_exec_session.ts +282 -0
  99. package/src/scripts/multi-agent-conditional.ts +1 -0
  100. package/src/scripts/multi-agent-supervisor.ts +1 -0
  101. package/src/scripts/programmatic_exec_agent.ts +4 -4
  102. package/src/scripts/test-handoff-preamble.ts +277 -0
  103. package/src/scripts/test-parallel-handoffs.ts +291 -0
  104. package/src/scripts/test-tools-before-handoff.ts +8 -4
  105. package/src/scripts/test_code_api.ts +361 -0
  106. package/src/scripts/thinking-bedrock.ts +159 -0
  107. package/src/scripts/thinking.ts +39 -18
  108. package/src/scripts/{tool_search_regex.ts → tool_search.ts} +5 -5
  109. package/src/scripts/tools.ts +7 -3
  110. package/src/stream.ts +4 -2
  111. package/src/tools/BrowserTools.ts +68 -14
  112. package/src/tools/CodeExecutor.ts +26 -23
  113. package/src/tools/ProgrammaticToolCalling.ts +18 -14
  114. package/src/tools/ToolNode.ts +114 -1
  115. package/src/tools/ToolSearch.ts +1041 -0
  116. package/src/tools/__tests__/ProgrammaticToolCalling.test.ts +0 -2
  117. package/src/tools/__tests__/{ToolSearchRegex.integration.test.ts → ToolSearch.integration.test.ts} +6 -6
  118. package/src/tools/__tests__/ToolSearch.test.ts +1003 -0
  119. package/src/types/graph.ts +2 -0
  120. package/src/types/llm.ts +3 -1
  121. package/src/types/tools.ts +51 -2
  122. package/dist/cjs/tools/ToolSearchRegex.cjs +0 -455
  123. package/dist/cjs/tools/ToolSearchRegex.cjs.map +0 -1
  124. package/dist/esm/tools/ToolSearchRegex.mjs +0 -448
  125. package/dist/esm/tools/ToolSearchRegex.mjs.map +0 -1
  126. package/dist/types/tools/ToolSearchRegex.d.ts +0 -80
  127. package/src/tools/ToolSearchRegex.ts +0 -535
  128. package/src/tools/__tests__/ToolSearchRegex.test.ts +0 -232
@@ -835,6 +835,265 @@ describe('Multi-agent provider interoperability', () => {
835
835
  });
836
836
  });
837
837
 
838
+ describe('Immutability - addCacheControl does not mutate original messages', () => {
839
+ it('should not mutate original messages when adding cache control to string content', () => {
840
+ const originalMessages: TestMsg[] = [
841
+ { role: 'user', content: 'Hello' },
842
+ { role: 'assistant', content: 'Hi there' },
843
+ { role: 'user', content: 'How are you?' },
844
+ ];
845
+
846
+ const originalFirstContent = originalMessages[0].content;
847
+ const originalThirdContent = originalMessages[2].content;
848
+
849
+ const result = addCacheControl(originalMessages as never);
850
+
851
+ expect(originalMessages[0].content).toBe(originalFirstContent);
852
+ expect(originalMessages[2].content).toBe(originalThirdContent);
853
+ expect(typeof originalMessages[0].content).toBe('string');
854
+ expect(typeof originalMessages[2].content).toBe('string');
855
+
856
+ expect(Array.isArray(result[0].content)).toBe(true);
857
+ expect(Array.isArray(result[2].content)).toBe(true);
858
+ });
859
+
860
+ it('should not mutate original messages when adding cache control to array content', () => {
861
+ const originalMessages: TestMsg[] = [
862
+ {
863
+ role: 'user',
864
+ content: [{ type: ContentTypes.TEXT, text: 'Hello' }],
865
+ },
866
+ { role: 'assistant', content: 'Hi there' },
867
+ {
868
+ role: 'user',
869
+ content: [{ type: ContentTypes.TEXT, text: 'How are you?' }],
870
+ },
871
+ ];
872
+
873
+ const originalFirstBlock = {
874
+ ...(originalMessages[0].content as MessageContentComplex[])[0],
875
+ };
876
+ const originalThirdBlock = {
877
+ ...(originalMessages[2].content as MessageContentComplex[])[0],
878
+ };
879
+
880
+ const result = addCacheControl(originalMessages as never);
881
+
882
+ const firstContent = originalMessages[0].content as MessageContentComplex[];
883
+ const thirdContent = originalMessages[2].content as MessageContentComplex[];
884
+
885
+ expect('cache_control' in firstContent[0]).toBe(false);
886
+ expect('cache_control' in thirdContent[0]).toBe(false);
887
+ expect(firstContent[0]).toEqual(originalFirstBlock);
888
+ expect(thirdContent[0]).toEqual(originalThirdBlock);
889
+
890
+ const resultFirstContent = result[0].content as MessageContentComplex[];
891
+ const resultThirdContent = result[2].content as MessageContentComplex[];
892
+ expect('cache_control' in resultFirstContent[0]).toBe(true);
893
+ expect('cache_control' in resultThirdContent[0]).toBe(true);
894
+ });
895
+
896
+ it('should not mutate original messages when stripping existing cache control', () => {
897
+ const originalMessages: TestMsg[] = [
898
+ {
899
+ role: 'user',
900
+ content: [
901
+ {
902
+ type: ContentTypes.TEXT,
903
+ text: 'Hello',
904
+ cache_control: { type: 'ephemeral' },
905
+ } as MessageContentComplex,
906
+ ],
907
+ },
908
+ { role: 'assistant', content: 'Hi there' },
909
+ {
910
+ role: 'user',
911
+ content: [{ type: ContentTypes.TEXT, text: 'How are you?' }],
912
+ },
913
+ ];
914
+
915
+ const originalFirstBlock = (
916
+ originalMessages[0].content as MessageContentComplex[]
917
+ )[0];
918
+
919
+ addCacheControl(originalMessages as never);
920
+
921
+ expect('cache_control' in originalFirstBlock).toBe(true);
922
+ });
923
+ });
924
+
925
+ describe('Immutability - addBedrockCacheControl does not mutate original messages', () => {
926
+ it('should not mutate original messages when adding cache points to string content', () => {
927
+ const originalMessages: TestMsg[] = [
928
+ { role: 'user', content: 'Hello' },
929
+ { role: 'assistant', content: 'Hi there' },
930
+ ];
931
+
932
+ const originalFirstContent = originalMessages[0].content;
933
+ const originalSecondContent = originalMessages[1].content;
934
+
935
+ const result = addBedrockCacheControl(originalMessages);
936
+
937
+ expect(originalMessages[0].content).toBe(originalFirstContent);
938
+ expect(originalMessages[1].content).toBe(originalSecondContent);
939
+ expect(typeof originalMessages[0].content).toBe('string');
940
+ expect(typeof originalMessages[1].content).toBe('string');
941
+
942
+ expect(Array.isArray(result[0].content)).toBe(true);
943
+ expect(Array.isArray(result[1].content)).toBe(true);
944
+ });
945
+
946
+ it('should not mutate original messages when adding cache points to array content', () => {
947
+ const originalMessages: TestMsg[] = [
948
+ {
949
+ role: 'user',
950
+ content: [{ type: ContentTypes.TEXT, text: 'Hello' }],
951
+ },
952
+ {
953
+ role: 'assistant',
954
+ content: [{ type: ContentTypes.TEXT, text: 'Hi there' }],
955
+ },
956
+ ];
957
+
958
+ const originalFirstContentLength = (
959
+ originalMessages[0].content as MessageContentComplex[]
960
+ ).length;
961
+ const originalSecondContentLength = (
962
+ originalMessages[1].content as MessageContentComplex[]
963
+ ).length;
964
+
965
+ const result = addBedrockCacheControl(originalMessages);
966
+
967
+ const firstContent = originalMessages[0].content as MessageContentComplex[];
968
+ const secondContent = originalMessages[1]
969
+ .content as MessageContentComplex[];
970
+
971
+ expect(firstContent.length).toBe(originalFirstContentLength);
972
+ expect(secondContent.length).toBe(originalSecondContentLength);
973
+ expect(firstContent.some((b) => 'cachePoint' in b)).toBe(false);
974
+ expect(secondContent.some((b) => 'cachePoint' in b)).toBe(false);
975
+
976
+ const resultFirstContent = result[0].content as MessageContentComplex[];
977
+ const resultSecondContent = result[1].content as MessageContentComplex[];
978
+ expect(resultFirstContent.length).toBe(originalFirstContentLength + 1);
979
+ expect(resultSecondContent.length).toBe(originalSecondContentLength + 1);
980
+ expect(resultFirstContent.some((b) => 'cachePoint' in b)).toBe(true);
981
+ expect(resultSecondContent.some((b) => 'cachePoint' in b)).toBe(true);
982
+ });
983
+
984
+ it('should not mutate original messages when stripping existing cache control', () => {
985
+ const originalMessages: TestMsg[] = [
986
+ {
987
+ role: 'user',
988
+ content: [
989
+ {
990
+ type: ContentTypes.TEXT,
991
+ text: 'Hello',
992
+ cache_control: { type: 'ephemeral' },
993
+ } as MessageContentComplex,
994
+ ],
995
+ },
996
+ {
997
+ role: 'assistant',
998
+ content: [
999
+ { type: ContentTypes.TEXT, text: 'Hi there' },
1000
+ { cachePoint: { type: 'default' } },
1001
+ ],
1002
+ },
1003
+ ];
1004
+
1005
+ const originalFirstBlock = (
1006
+ originalMessages[0].content as MessageContentComplex[]
1007
+ )[0];
1008
+ const originalSecondContentLength = (
1009
+ originalMessages[1].content as MessageContentComplex[]
1010
+ ).length;
1011
+
1012
+ addBedrockCacheControl(originalMessages);
1013
+
1014
+ expect('cache_control' in originalFirstBlock).toBe(true);
1015
+ expect(
1016
+ (originalMessages[1].content as MessageContentComplex[]).length
1017
+ ).toBe(originalSecondContentLength);
1018
+ });
1019
+
1020
+ it('should allow different providers to process same messages without cross-contamination', () => {
1021
+ const sharedMessages: TestMsg[] = [
1022
+ {
1023
+ role: 'user',
1024
+ content: [{ type: ContentTypes.TEXT, text: 'Shared message 1' }],
1025
+ },
1026
+ {
1027
+ role: 'assistant',
1028
+ content: [{ type: ContentTypes.TEXT, text: 'Shared response 1' }],
1029
+ },
1030
+ ];
1031
+
1032
+ const bedrockResult = addBedrockCacheControl(sharedMessages);
1033
+
1034
+ const anthropicResult = addCacheControl(sharedMessages as never);
1035
+
1036
+ const originalFirstContent = sharedMessages[0]
1037
+ .content as MessageContentComplex[];
1038
+ expect(originalFirstContent.some((b) => 'cachePoint' in b)).toBe(false);
1039
+ expect('cache_control' in originalFirstContent[0]).toBe(false);
1040
+
1041
+ const bedrockFirstContent = bedrockResult[0]
1042
+ .content as MessageContentComplex[];
1043
+ expect(bedrockFirstContent.some((b) => 'cachePoint' in b)).toBe(true);
1044
+ expect('cache_control' in bedrockFirstContent[0]).toBe(false);
1045
+
1046
+ const anthropicFirstContent = anthropicResult[0]
1047
+ .content as MessageContentComplex[];
1048
+ expect(anthropicFirstContent.some((b) => 'cachePoint' in b)).toBe(false);
1049
+ expect('cache_control' in anthropicFirstContent[0]).toBe(true);
1050
+ });
1051
+
1052
+ it('should keep lc_kwargs.content in sync with content for LangChain messages', () => {
1053
+ type LangChainLikeMsg = TestMsg & {
1054
+ lc_kwargs?: { content?: MessageContentComplex[] };
1055
+ };
1056
+
1057
+ const messagesWithLcKwargs: LangChainLikeMsg[] = [
1058
+ {
1059
+ role: 'user',
1060
+ content: [{ type: ContentTypes.TEXT, text: 'User message' }],
1061
+ lc_kwargs: {
1062
+ content: [{ type: ContentTypes.TEXT, text: 'User message' }],
1063
+ },
1064
+ },
1065
+ {
1066
+ role: 'assistant',
1067
+ content: [{ type: ContentTypes.TEXT, text: 'Assistant response' }],
1068
+ lc_kwargs: {
1069
+ content: [{ type: ContentTypes.TEXT, text: 'Assistant response' }],
1070
+ },
1071
+ },
1072
+ ];
1073
+
1074
+ const bedrockResult = addBedrockCacheControl(messagesWithLcKwargs);
1075
+
1076
+ const resultFirst = bedrockResult[0] as LangChainLikeMsg;
1077
+ const resultSecond = bedrockResult[1] as LangChainLikeMsg;
1078
+
1079
+ expect(resultFirst.content).toEqual(resultFirst.lc_kwargs?.content);
1080
+ expect(resultSecond.content).toEqual(resultSecond.lc_kwargs?.content);
1081
+
1082
+ const firstContent = resultFirst.content as MessageContentComplex[];
1083
+ const firstLcContent = resultFirst.lc_kwargs
1084
+ ?.content as MessageContentComplex[];
1085
+ expect(firstContent.some((b) => 'cachePoint' in b)).toBe(true);
1086
+ expect(firstLcContent.some((b) => 'cachePoint' in b)).toBe(true);
1087
+
1088
+ const originalFirst = messagesWithLcKwargs[0];
1089
+ const originalContent = originalFirst.content as MessageContentComplex[];
1090
+ const originalLcContent = originalFirst.lc_kwargs
1091
+ ?.content as MessageContentComplex[];
1092
+ expect(originalContent.some((b) => 'cachePoint' in b)).toBe(false);
1093
+ expect(originalLcContent.some((b) => 'cachePoint' in b)).toBe(false);
1094
+ });
1095
+ });
1096
+
838
1097
  describe('Multi-turn cache cleanup', () => {
839
1098
  it('strips stale Bedrock cache points from previous turns before applying new ones', () => {
840
1099
  const messages: TestMsg[] = [
@@ -1,4 +1,11 @@
1
- import { BaseMessage, MessageContentComplex } from '@langchain/core/messages';
1
+ import {
2
+ BaseMessage,
3
+ MessageContentComplex,
4
+ AIMessage,
5
+ HumanMessage,
6
+ SystemMessage,
7
+ ToolMessage,
8
+ } from '@langchain/core/messages';
2
9
  import type { AnthropicMessage } from '@/types/messages';
3
10
  import type Anthropic from '@anthropic-ai/sdk';
4
11
  import { ContentTypes } from '@/common/enum';
@@ -18,6 +25,102 @@ const debugCache = (message: string, data?: unknown): void => {
18
25
  }
19
26
  };
20
27
 
28
+ /**
29
+ * Deep clones a message's content to prevent mutation of the original.
30
+ */
31
+ function deepCloneContent<T extends string | MessageContentComplex[]>(
32
+ content: T
33
+ ): T {
34
+ if (typeof content === 'string') {
35
+ return content;
36
+ }
37
+ if (Array.isArray(content)) {
38
+ return content.map((block) => ({ ...block })) as T;
39
+ }
40
+ return content;
41
+ }
42
+
43
+ /**
44
+ * Simple shallow clone with deep-cloned content.
45
+ * Used for stripping cache control where we don't need proper LangChain instances.
46
+ */
47
+ function _shallowCloneMessage<T extends MessageWithContent>(message: T): T {
48
+ const cloned = {
49
+ ...message,
50
+ content: deepCloneContent(message.content ?? ''),
51
+ } as T;
52
+ const lcKwargs = (cloned as Record<string, unknown>).lc_kwargs as
53
+ | Record<string, unknown>
54
+ | undefined;
55
+ if (lcKwargs != null) {
56
+ (cloned as Record<string, unknown>).lc_kwargs = {
57
+ ...lcKwargs,
58
+ content: cloned.content,
59
+ };
60
+ }
61
+ return cloned;
62
+ }
63
+
64
+ /**
65
+ * Creates a new LangChain message instance with the given content.
66
+ * Required when adding cache points to ensure proper serialization.
67
+ */
68
+ function _createNewMessage<T extends MessageWithContent>(
69
+ message: T,
70
+ content: MessageContentComplex[]
71
+ ): T {
72
+ if ('getType' in message && typeof message.getType === 'function') {
73
+ const baseMsg = message as unknown as BaseMessage;
74
+ const msgType = baseMsg.getType();
75
+
76
+ const baseFields = {
77
+ content,
78
+ name: baseMsg.name,
79
+ additional_kwargs: { ...baseMsg.additional_kwargs },
80
+ response_metadata: { ...baseMsg.response_metadata },
81
+ id: baseMsg.id,
82
+ };
83
+
84
+ switch (msgType) {
85
+ case 'human':
86
+ return new HumanMessage(baseFields) as unknown as T;
87
+ case 'ai': {
88
+ const aiMsg = baseMsg as AIMessage;
89
+ return new AIMessage({
90
+ ...baseFields,
91
+ tool_calls: aiMsg.tool_calls ? [...aiMsg.tool_calls] : [],
92
+ invalid_tool_calls: aiMsg.invalid_tool_calls
93
+ ? [...aiMsg.invalid_tool_calls]
94
+ : [],
95
+ usage_metadata: aiMsg.usage_metadata,
96
+ }) as unknown as T;
97
+ }
98
+ case 'system':
99
+ return new SystemMessage(baseFields) as unknown as T;
100
+ case 'tool': {
101
+ const toolMsg = baseMsg as ToolMessage;
102
+ return new ToolMessage({
103
+ ...baseFields,
104
+ tool_call_id: toolMsg.tool_call_id,
105
+ status: toolMsg.status,
106
+ artifact: toolMsg.artifact,
107
+ }) as unknown as T;
108
+ }
109
+ default:
110
+ break;
111
+ }
112
+ }
113
+
114
+ const cloned = { ...message, content } as T;
115
+ const lcKwargs = (cloned as Record<string, unknown>).lc_kwargs as
116
+ | Record<string, unknown>
117
+ | undefined;
118
+ if (lcKwargs != null) {
119
+ (cloned as Record<string, unknown>).lc_kwargs = { ...lcKwargs, content };
120
+ }
121
+ return cloned;
122
+ }
123
+
21
124
  /**
22
125
  * Anthropic API: Adds cache control to the appropriate user messages in the payload.
23
126
  * Strips ALL existing cache control (both Anthropic and Bedrock formats) from all messages,
@@ -41,7 +41,7 @@ User: ${userMessage[1]}
41
41
  const _allowedTypes = ['image_url', 'text', 'tool_use', 'tool_result'];
42
42
  const allowedTypesByProvider: Record<string, string[]> = {
43
43
  default: _allowedTypes,
44
- [Providers.ANTHROPIC]: [..._allowedTypes, 'thinking'],
44
+ [Providers.ANTHROPIC]: [..._allowedTypes, 'thinking', 'redacted_thinking'],
45
45
  [Providers.BEDROCK]: [..._allowedTypes, 'reasoning_content'],
46
46
  [Providers.OPENAI]: _allowedTypes,
47
47
  };
@@ -47,7 +47,7 @@ export function extractToolDiscoveries(messages: BaseMessage[]): string[] {
47
47
  // Use getType() instead of instanceof to avoid module mismatch issues
48
48
  if (msg.getType() !== MessageTypes.TOOL) continue;
49
49
  const toolMsg = msg as ToolMessage;
50
- if (toolMsg.name !== Constants.TOOL_SEARCH_REGEX) continue;
50
+ if (toolMsg.name !== Constants.TOOL_SEARCH) continue;
51
51
  if (!toolCallIds.has(toolMsg.tool_call_id)) continue;
52
52
 
53
53
  // This is a tool search result from the current turn
@@ -95,7 +95,7 @@ export function hasToolSearchInCurrentTurn(messages: BaseMessage[]): boolean {
95
95
  const msg = messages[i];
96
96
  if (
97
97
  msg.getType() === MessageTypes.TOOL &&
98
- (msg as ToolMessage).name === Constants.TOOL_SEARCH_REGEX &&
98
+ (msg as ToolMessage).name === Constants.TOOL_SEARCH &&
99
99
  toolCallIds.has((msg as ToolMessage).tool_call_id)
100
100
  ) {
101
101
  return true;
@@ -1,7 +1,11 @@
1
1
  // src/scripts/test-prompt-caching.ts
2
2
  import { config } from 'dotenv';
3
3
  config();
4
- import { HumanMessage, SystemMessage, BaseMessage } from '@langchain/core/messages';
4
+ import {
5
+ HumanMessage,
6
+ SystemMessage,
7
+ BaseMessage,
8
+ } from '@langchain/core/messages';
5
9
  import type { UsageMetadata } from '@langchain/core/messages';
6
10
  import type * as t from '@/types';
7
11
  import { ChatModelStreamHandler, createContentAggregator } from '@/stream';
@@ -11,7 +15,7 @@ import { getLLMConfig } from '@/utils/llmConfig';
11
15
  import { getArgs } from '@/scripts/args';
12
16
  import { Run } from '@/run';
13
17
 
14
- const CACHED_TEXT = `Ahoy there, me hearties! This be a grand tale o' the mighty prompt cachin' treasure map, a secret technique used by the wise Anthropic seafarers to stash away vast hordes o' text booty on their mystical servers! Arrr, 'tis a pirate's dream indeed - no need to haul the same heavy chest o' gold doubloons across the vast digital ocean with every message! When ye mark yer precious cargo with the secret flag 'cache_control: { type: \"ephemeral\" }', the text be safely buried on their distant shores, ready for plunderin' again without the weight slowin' down yer ship! The wise pirates at Anthropic introduced this magical scroll in the summer o' 2024, markin' it with the mysterious insignia 'anthropic-beta: prompt-caching-2024-07-31' that must be flown high on yer vessel's headers. This crafty script be testin' the waters of this new treasure map system, sendin' out three separate voyages across the AI seas: first to bury the treasure, second to dig it up again without payin' the full toll, and third to see if the map still leads to gold after the sands o' time have shifted (about thirty seconds o' waitin', which be an eternity for an impatient buccaneer!). The great advantage for a scurvy pirate captain is clear as Caribbean waters - ye can load up yer vessel with all manner o' reference scrolls, ancient tomes, and navigational charts without weighin' down each and every message ye send to port! This be savin' ye countless tokens, which as any seafarin' AI wrangler knows, be as precious as Spanish gold. The cached text could contain the full history o' the Seven Seas, detailed maps o' every port from Tortuga to Singapore, or the complete collection o' pirate shanties ever sung by drunken sailors under the light o' the silvery moon. When properly implemented, this mighty cachin' system keeps all that knowledge ready at hand without the Claude kraken needin' to process it anew with each passin' breeze. By Blackbeard's beard, 'tis a revolution in how we manage our conversational ships! The script be employin' the finest LangChain riggin' and custom-carved event handlers to properly track the treasure as it flows back and forth. If ye be successful in yer implementation, ye should witness the miracle o' significantly reduced token counts in yer usage metrics, faster responses from the AI oracle, and the ability to maintain vast knowledge without payin' the full price each time! So hoist the Jolly Roger, load yer pistols with API keys, and set sail on the grand adventure o' prompt cachin'! May the winds o' efficient token usage fill yer sails, and may ye never have to pay full price for passin' the same mammoth context to Claude again! Remember, a clever pirate only pays for their tokens once, then lets the cache do the heavy liftin'! YARRR! This file also contains the secrets of the legendary Pirate Code, passed down through generations of seafarers since the Golden Age of Piracy. It includes detailed accounts of famous pirate captains like Blackbeard, Calico Jack, Anne Bonny, and Mary Read, along with their most profitable plundering routes and techniques for capturing merchant vessels. The text chronicles the exact locations of at least seventeen buried treasures across the Caribbean, complete with riddles and map coordinates that only a true pirate could decipher. There are sections dedicated to ship maintenance, including how to properly seal a leaking hull during battle and the best methods for keeping your cannons in prime firing condition even in humid tropical conditions. The document contains an extensive glossary of pirate terminology, from 'avast' to 'Yellow Jack,' ensuring any landlubber can speak like a seasoned salt with enough study. There's a comprehensive guide to navigating by the stars without modern instruments, perfect for when your GPS fails in the middle of a daring escape. The cache also includes detailed recipes for grog, hardtack that won't break your teeth, and how to keep citrus fruits fresh to prevent scurvy during long voyages. The legendary Black Spot ritual is described in terrifying detail, along with other pirate superstitions and their origins in maritime folklore. A section on pirate governance explains the democratic nature of most pirate ships, how booty was divided fairly, and how captains were elected and deposed when necessary. The file even contains sheet music for dozens of sea shanties, with notes on when each should be sung for maximum crew morale during different sailing conditions. All of this knowledge is wrapped in colorful pirate dialect that would make any AI assistant respond with appropriate 'arghs' and 'avasts' when properly prompted!`
18
+ const CACHED_TEXT = `Ahoy there, me hearties! This be a grand tale o' the mighty prompt cachin' treasure map, a secret technique used by the wise Anthropic seafarers to stash away vast hordes o' text booty on their mystical servers! Arrr, 'tis a pirate's dream indeed - no need to haul the same heavy chest o' gold doubloons across the vast digital ocean with every message! When ye mark yer precious cargo with the secret flag 'cache_control: { type: \"ephemeral\" }', the text be safely buried on their distant shores, ready for plunderin' again without the weight slowin' down yer ship! The wise pirates at Anthropic introduced this magical scroll in the summer o' 2024, markin' it with the mysterious insignia 'anthropic-beta: prompt-caching-2024-07-31' that must be flown high on yer vessel's headers. This crafty script be testin' the waters of this new treasure map system, sendin' out three separate voyages across the AI seas: first to bury the treasure, second to dig it up again without payin' the full toll, and third to see if the map still leads to gold after the sands o' time have shifted (about thirty seconds o' waitin', which be an eternity for an impatient buccaneer!). The great advantage for a scurvy pirate captain is clear as Caribbean waters - ye can load up yer vessel with all manner o' reference scrolls, ancient tomes, and navigational charts without weighin' down each and every message ye send to port! This be savin' ye countless tokens, which as any seafarin' AI wrangler knows, be as precious as Spanish gold. The cached text could contain the full history o' the Seven Seas, detailed maps o' every port from Tortuga to Singapore, or the complete collection o' pirate shanties ever sung by drunken sailors under the light o' the silvery moon. When properly implemented, this mighty cachin' system keeps all that knowledge ready at hand without the Claude kraken needin' to process it anew with each passin' breeze. By Blackbeard's beard, 'tis a revolution in how we manage our conversational ships! The script be employin' the finest LangChain riggin' and custom-carved event handlers to properly track the treasure as it flows back and forth. If ye be successful in yer implementation, ye should witness the miracle o' significantly reduced token counts in yer usage metrics, faster responses from the AI oracle, and the ability to maintain vast knowledge without payin' the full price each time! So hoist the Jolly Roger, load yer pistols with API keys, and set sail on the grand adventure o' prompt cachin'! May the winds o' efficient token usage fill yer sails, and may ye never have to pay full price for passin' the same mammoth context to Claude again! Remember, a clever pirate only pays for their tokens once, then lets the cache do the heavy liftin'! YARRR! This file also contains the secrets of the legendary Pirate Code, passed down through generations of seafarers since the Golden Age of Piracy. It includes detailed accounts of famous pirate captains like Blackbeard, Calico Jack, Anne Bonny, and Mary Read, along with their most profitable plundering routes and techniques for capturing merchant vessels. The text chronicles the exact locations of at least seventeen buried treasures across the Caribbean, complete with riddles and map coordinates that only a true pirate could decipher. There are sections dedicated to ship maintenance, including how to properly seal a leaking hull during battle and the best methods for keeping your cannons in prime firing condition even in humid tropical conditions. The document contains an extensive glossary of pirate terminology, from 'avast' to 'Yellow Jack,' ensuring any landlubber can speak like a seasoned salt with enough study. There's a comprehensive guide to navigating by the stars without modern instruments, perfect for when your GPS fails in the middle of a daring escape. The cache also includes detailed recipes for grog, hardtack that won't break your teeth, and how to keep citrus fruits fresh to prevent scurvy during long voyages. The legendary Black Spot ritual is described in terrifying detail, along with other pirate superstitions and their origins in maritime folklore. A section on pirate governance explains the democratic nature of most pirate ships, how booty was divided fairly, and how captains were elected and deposed when necessary. The file even contains sheet music for dozens of sea shanties, with notes on when each should be sung for maximum crew morale during different sailing conditions. All of this knowledge is wrapped in colorful pirate dialect that would make any AI assistant respond with appropriate 'arghs' and 'avasts' when properly prompted!`;
15
19
 
16
20
  const conversationHistory: BaseMessage[] = [];
17
21
  let _contentParts: t.MessageContentComplex[] = [];
@@ -23,7 +27,7 @@ async function testPromptCaching(): Promise<void> {
23
27
  ${CACHED_TEXT}`;
24
28
  const { contentParts, aggregateContent } = createContentAggregator();
25
29
  _contentParts = contentParts as t.MessageContentComplex[];
26
-
30
+
27
31
  // Set up event handlers
28
32
  const customHandlers = {
29
33
  [GraphEvents.TOOL_END]: new ToolEndHandler(),
@@ -33,29 +37,33 @@ ${CACHED_TEXT}`;
33
37
  [GraphEvents.CHAT_MODEL_STREAM]: new ChatModelStreamHandler(),
34
38
  // Additional handlers for tracking usage metrics
35
39
  [GraphEvents.ON_RUN_STEP_COMPLETED]: {
36
- handle: (event: GraphEvents.ON_RUN_STEP_COMPLETED, data: t.StreamEventData): void => {
40
+ handle: (
41
+ event: GraphEvents.ON_RUN_STEP_COMPLETED,
42
+ data: t.StreamEventData
43
+ ): void => {
37
44
  console.log('====== ON_RUN_STEP_COMPLETED ======');
38
- aggregateContent({ event, data: data as unknown as { result: t.ToolEndEvent } });
39
- }
45
+ aggregateContent({
46
+ event,
47
+ data: data as unknown as { result: t.ToolEndEvent },
48
+ });
49
+ },
40
50
  },
41
51
  };
42
52
 
43
- const baseLlmConfig: t.LLMConfig & t.AnthropicClientOptions = getLLMConfig(Providers.ANTHROPIC);
44
-
53
+ const baseLlmConfig: t.LLMConfig & t.AnthropicClientOptions = getLLMConfig(
54
+ Providers.ANTHROPIC
55
+ );
56
+
45
57
  if (baseLlmConfig.provider !== 'anthropic') {
46
- console.error('This test requires Anthropic as the LLM provider. Please specify provider=anthropic');
58
+ console.error(
59
+ 'This test requires Anthropic as the LLM provider. Please specify provider=anthropic'
60
+ );
47
61
  process.exit(1);
48
62
  }
49
-
63
+
50
64
  const llmConfig = {
51
65
  ...baseLlmConfig,
52
- clientOptions: {
53
- ...baseLlmConfig.clientOptions,
54
- defaultHeaders: {
55
- ...baseLlmConfig.clientOptions?.defaultHeaders,
56
- "anthropic-beta": "prompt-caching-2024-07-31",
57
- }
58
- }
66
+ promptCache: true,
59
67
  };
60
68
 
61
69
  const run = await Run.create<t.IState>({
@@ -94,7 +102,7 @@ ${CACHED_TEXT}`;
94
102
  console.log('\n\nTest 2: Second request (should use cache)');
95
103
  const userMessage2 = `Summarize the key concepts from the context information.`;
96
104
  conversationHistory.push(new HumanMessage(userMessage2));
97
-
105
+
98
106
  console.log('Running second query to use cache...');
99
107
  const secondInputs = { messages: [...conversationHistory] };
100
108
  await run.processStream(secondInputs, config);
@@ -121,4 +129,4 @@ testPromptCaching().catch((err) => {
121
129
  console.log('Content parts:');
122
130
  console.dir(_contentParts, { depth: null });
123
131
  process.exit(1);
124
- });
132
+ });
@@ -1,4 +1,11 @@
1
- // src/scripts/cli.ts
1
+ // src/scripts/code_exec_files.ts
2
+ /**
3
+ * Tests automatic session tracking for code execution file persistence.
4
+ * Files created in one execution are automatically available in subsequent executions
5
+ * without the LLM needing to track or pass session_id.
6
+ *
7
+ * Run with: npm run code_exec_files
8
+ */
2
9
  import { config } from 'dotenv';
3
10
  config();
4
11
  import { HumanMessage, BaseMessage } from '@langchain/core/messages';
@@ -12,12 +19,39 @@ import {
12
19
  } from '@/events';
13
20
  import { getLLMConfig } from '@/utils/llmConfig';
14
21
  import { getArgs } from '@/scripts/args';
15
- import { GraphEvents } from '@/common';
22
+ import { Constants, GraphEvents } from '@/common';
16
23
  import { Run } from '@/run';
17
24
  import { createCodeExecutionTool } from '@/tools/CodeExecutor';
18
25
 
19
26
  const conversationHistory: BaseMessage[] = [];
20
27
 
28
+ /**
29
+ * Prints session context from the graph for debugging
30
+ */
31
+ function printSessionContext(run: Run<t.IState>): void {
32
+ const graph = run.Graph;
33
+ if (!graph) {
34
+ console.log('[Session] No graph available');
35
+ return;
36
+ }
37
+
38
+ const session = graph.sessions.get(Constants.EXECUTE_CODE) as
39
+ | t.CodeSessionContext
40
+ | undefined;
41
+
42
+ if (!session) {
43
+ console.log('[Session] No session context stored yet');
44
+ return;
45
+ }
46
+
47
+ console.log('[Session] Current session context:');
48
+ console.log(` - session_id: ${session.session_id}`);
49
+ console.log(` - files: ${JSON.stringify(session.files, null, 2)}`);
50
+ console.log(
51
+ ` - lastUpdated: ${new Date(session.lastUpdated).toISOString()}`
52
+ );
53
+ }
54
+
21
55
  async function testCodeExecution(): Promise<void> {
22
56
  const { userName, location, provider, currentDate } = await getArgs();
23
57
  const { contentParts, aggregateContent } = createContentAggregator();
@@ -72,7 +106,7 @@ async function testCodeExecution(): Promise<void> {
72
106
  handle: (
73
107
  _event: string,
74
108
  data: t.StreamEventData,
75
- metadata?: Record<string, unknown>
109
+ _metadata?: Record<string, unknown>
76
110
  ): void => {
77
111
  console.log('====== TOOL_START ======');
78
112
  console.dir(data, { depth: null });
@@ -96,7 +130,7 @@ async function testCodeExecution(): Promise<void> {
96
130
  customHandlers,
97
131
  });
98
132
 
99
- const config: Partial<RunnableConfig> & {
133
+ const streamConfig: Partial<RunnableConfig> & {
100
134
  version: 'v1' | 'v2';
101
135
  run_id?: string;
102
136
  streamMode: string;
@@ -107,10 +141,12 @@ async function testCodeExecution(): Promise<void> {
107
141
  },
108
142
  streamMode: 'values',
109
143
  version: 'v2' as const,
110
- // recursionLimit: 3,
111
144
  };
112
145
 
113
- console.log('Test 1: Create Project Plan');
146
+ console.log('\n========== Test 1: Create Project Plan ==========\n');
147
+ console.log(
148
+ 'Creating initial file - this establishes the session context.\n'
149
+ );
114
150
 
115
151
  const userMessage1 = `
116
152
  Hi ${userName} here. We are testing your file capabilities.
@@ -125,36 +161,43 @@ async function testCodeExecution(): Promise<void> {
125
161
  let inputs = {
126
162
  messages: conversationHistory,
127
163
  };
128
- const finalContentParts1 = await run.processStream(inputs, config);
164
+ await run.processStream(inputs, streamConfig);
129
165
  const finalMessages1 = run.getRunMessages();
130
166
  if (finalMessages1) {
131
167
  conversationHistory.push(...finalMessages1);
132
168
  }
133
- console.log('\n\n====================\n\n');
169
+
170
+ console.log('\n\n========== Session Context After Test 1 ==========\n');
171
+ printSessionContext(run);
134
172
  console.dir(contentParts, { depth: null });
135
173
 
136
- console.log('Test 2: Edit Project Plan');
174
+ console.log('\n========== Test 2: Edit Project Plan ==========\n');
175
+ console.log(
176
+ 'Editing the file from Test 1 - session_id is automatically injected.\n'
177
+ );
137
178
 
138
179
  const userMessage2 = `
139
180
  Thanks for creating the project plan. Now I'd like you to edit the same plan to:
140
181
 
141
- 1. Add a new section called "Technology Stack" that contains: "The technology stack for this project includes the following technologies" and nothing more.
142
-
182
+ 1. Read the existing project_plan.txt file
183
+ 2. Add a new section called "Technology Stack" that contains: "The technology stack for this project includes the following technologies" and nothing more.
184
+ 3. Save this as a new file called "project_plan_v2.txt" (remember files are read-only)
185
+ 4. Print the contents of both files to verify
143
186
  `;
144
187
 
145
- // Make sure to pass the file ID of the previous file you created and explicitly duplicate or rename the file in your code so we can then access it. Also print the contents of the new file to ensure we did what we wanted.`;
146
-
147
188
  conversationHistory.push(new HumanMessage(userMessage2));
148
189
 
149
190
  inputs = {
150
191
  messages: conversationHistory,
151
192
  };
152
- const finalContentParts2 = await run.processStream(inputs, config);
193
+ await run.processStream(inputs, streamConfig);
153
194
  const finalMessages2 = run.getRunMessages();
154
195
  if (finalMessages2) {
155
196
  conversationHistory.push(...finalMessages2);
156
197
  }
157
- console.log('\n\n====================\n\n');
198
+
199
+ console.log('\n\n========== Session Context After Test 2 ==========\n');
200
+ printSessionContext(run);
158
201
  console.dir(contentParts, { depth: null });
159
202
 
160
203
  const { handleLLMEnd, collected } = createMetadataAggregator();