illuma-agents 1.0.8 → 1.0.9

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (217) hide show
  1. package/LICENSE +1 -5
  2. package/dist/cjs/common/enum.cjs +1 -2
  3. package/dist/cjs/common/enum.cjs.map +1 -1
  4. package/dist/cjs/instrumentation.cjs.map +1 -1
  5. package/dist/cjs/llm/anthropic/types.cjs.map +1 -1
  6. package/dist/cjs/llm/anthropic/utils/message_inputs.cjs +79 -2
  7. package/dist/cjs/llm/anthropic/utils/message_inputs.cjs.map +1 -1
  8. package/dist/cjs/llm/anthropic/utils/tools.cjs.map +1 -1
  9. package/dist/cjs/llm/bedrock/index.cjs +99 -0
  10. package/dist/cjs/llm/bedrock/index.cjs.map +1 -0
  11. package/dist/cjs/llm/fake.cjs.map +1 -1
  12. package/dist/cjs/llm/providers.cjs +13 -16
  13. package/dist/cjs/llm/providers.cjs.map +1 -1
  14. package/dist/cjs/llm/text.cjs.map +1 -1
  15. package/dist/cjs/messages/core.cjs +14 -14
  16. package/dist/cjs/messages/core.cjs.map +1 -1
  17. package/dist/cjs/messages/ids.cjs.map +1 -1
  18. package/dist/cjs/messages/prune.cjs.map +1 -1
  19. package/dist/cjs/run.cjs +10 -1
  20. package/dist/cjs/run.cjs.map +1 -1
  21. package/dist/cjs/splitStream.cjs.map +1 -1
  22. package/dist/cjs/stream.cjs +4 -1
  23. package/dist/cjs/stream.cjs.map +1 -1
  24. package/dist/cjs/tools/ToolNode.cjs +10 -1
  25. package/dist/cjs/tools/ToolNode.cjs.map +1 -1
  26. package/dist/cjs/tools/handlers.cjs +29 -25
  27. package/dist/cjs/tools/handlers.cjs.map +1 -1
  28. package/dist/cjs/tools/search/anthropic.cjs.map +1 -1
  29. package/dist/cjs/tools/search/content.cjs.map +1 -1
  30. package/dist/cjs/tools/search/firecrawl.cjs.map +1 -1
  31. package/dist/cjs/tools/search/format.cjs.map +1 -1
  32. package/dist/cjs/tools/search/highlights.cjs.map +1 -1
  33. package/dist/cjs/tools/search/rerankers.cjs.map +1 -1
  34. package/dist/cjs/tools/search/schema.cjs +25 -25
  35. package/dist/cjs/tools/search/schema.cjs.map +1 -1
  36. package/dist/cjs/tools/search/search.cjs +6 -1
  37. package/dist/cjs/tools/search/search.cjs.map +1 -1
  38. package/dist/cjs/tools/search/serper-scraper.cjs.map +1 -1
  39. package/dist/cjs/tools/search/tool.cjs +162 -35
  40. package/dist/cjs/tools/search/tool.cjs.map +1 -1
  41. package/dist/cjs/tools/search/utils.cjs.map +1 -1
  42. package/dist/cjs/utils/graph.cjs.map +1 -1
  43. package/dist/cjs/utils/llm.cjs +0 -1
  44. package/dist/cjs/utils/llm.cjs.map +1 -1
  45. package/dist/cjs/utils/misc.cjs.map +1 -1
  46. package/dist/cjs/utils/run.cjs.map +1 -1
  47. package/dist/cjs/utils/title.cjs +7 -7
  48. package/dist/cjs/utils/title.cjs.map +1 -1
  49. package/dist/esm/common/enum.mjs +1 -2
  50. package/dist/esm/common/enum.mjs.map +1 -1
  51. package/dist/esm/instrumentation.mjs.map +1 -1
  52. package/dist/esm/llm/anthropic/types.mjs.map +1 -1
  53. package/dist/esm/llm/anthropic/utils/message_inputs.mjs +79 -2
  54. package/dist/esm/llm/anthropic/utils/message_inputs.mjs.map +1 -1
  55. package/dist/esm/llm/anthropic/utils/tools.mjs.map +1 -1
  56. package/dist/esm/llm/bedrock/index.mjs +97 -0
  57. package/dist/esm/llm/bedrock/index.mjs.map +1 -0
  58. package/dist/esm/llm/fake.mjs.map +1 -1
  59. package/dist/esm/llm/providers.mjs +2 -5
  60. package/dist/esm/llm/providers.mjs.map +1 -1
  61. package/dist/esm/llm/text.mjs.map +1 -1
  62. package/dist/esm/messages/core.mjs +14 -14
  63. package/dist/esm/messages/core.mjs.map +1 -1
  64. package/dist/esm/messages/ids.mjs.map +1 -1
  65. package/dist/esm/messages/prune.mjs.map +1 -1
  66. package/dist/esm/run.mjs +10 -1
  67. package/dist/esm/run.mjs.map +1 -1
  68. package/dist/esm/splitStream.mjs.map +1 -1
  69. package/dist/esm/stream.mjs +4 -1
  70. package/dist/esm/stream.mjs.map +1 -1
  71. package/dist/esm/tools/ToolNode.mjs +10 -1
  72. package/dist/esm/tools/ToolNode.mjs.map +1 -1
  73. package/dist/esm/tools/handlers.mjs +30 -26
  74. package/dist/esm/tools/handlers.mjs.map +1 -1
  75. package/dist/esm/tools/search/anthropic.mjs.map +1 -1
  76. package/dist/esm/tools/search/content.mjs.map +1 -1
  77. package/dist/esm/tools/search/firecrawl.mjs.map +1 -1
  78. package/dist/esm/tools/search/format.mjs.map +1 -1
  79. package/dist/esm/tools/search/highlights.mjs.map +1 -1
  80. package/dist/esm/tools/search/rerankers.mjs.map +1 -1
  81. package/dist/esm/tools/search/schema.mjs +25 -25
  82. package/dist/esm/tools/search/schema.mjs.map +1 -1
  83. package/dist/esm/tools/search/search.mjs +6 -1
  84. package/dist/esm/tools/search/search.mjs.map +1 -1
  85. package/dist/esm/tools/search/serper-scraper.mjs.map +1 -1
  86. package/dist/esm/tools/search/tool.mjs +162 -35
  87. package/dist/esm/tools/search/tool.mjs.map +1 -1
  88. package/dist/esm/tools/search/utils.mjs.map +1 -1
  89. package/dist/esm/utils/graph.mjs.map +1 -1
  90. package/dist/esm/utils/llm.mjs +0 -1
  91. package/dist/esm/utils/llm.mjs.map +1 -1
  92. package/dist/esm/utils/misc.mjs.map +1 -1
  93. package/dist/esm/utils/run.mjs.map +1 -1
  94. package/dist/esm/utils/title.mjs +7 -7
  95. package/dist/esm/utils/title.mjs.map +1 -1
  96. package/dist/types/common/enum.d.ts +1 -2
  97. package/dist/types/llm/bedrock/index.d.ts +36 -0
  98. package/dist/types/tools/search/types.d.ts +2 -0
  99. package/dist/types/types/llm.d.ts +3 -8
  100. package/package.json +15 -11
  101. package/src/common/enum.ts +1 -2
  102. package/src/common/index.ts +1 -1
  103. package/src/instrumentation.ts +22 -22
  104. package/src/llm/anthropic/llm.spec.ts +1442 -1442
  105. package/src/llm/anthropic/types.ts +140 -140
  106. package/src/llm/anthropic/utils/message_inputs.ts +757 -660
  107. package/src/llm/anthropic/utils/output_parsers.ts +133 -133
  108. package/src/llm/anthropic/utils/tools.ts +29 -29
  109. package/src/llm/bedrock/index.ts +128 -0
  110. package/src/llm/fake.ts +133 -133
  111. package/src/llm/google/utils/tools.ts +160 -160
  112. package/src/llm/openai/types.ts +24 -24
  113. package/src/llm/openai/utils/isReasoningModel.test.ts +90 -90
  114. package/src/llm/providers.ts +2 -7
  115. package/src/llm/text.ts +94 -94
  116. package/src/messages/core.ts +463 -463
  117. package/src/messages/formatAgentMessages.tools.test.ts +400 -400
  118. package/src/messages/formatMessage.test.ts +693 -693
  119. package/src/messages/ids.ts +26 -26
  120. package/src/messages/prune.ts +567 -567
  121. package/src/messages/shiftIndexTokenCountMap.test.ts +81 -81
  122. package/src/mockStream.ts +98 -98
  123. package/src/prompts/collab.ts +5 -5
  124. package/src/prompts/index.ts +1 -1
  125. package/src/prompts/taskmanager.ts +61 -61
  126. package/src/run.ts +13 -4
  127. package/src/scripts/ant_web_search_edge_case.ts +162 -0
  128. package/src/scripts/ant_web_search_error_edge_case.ts +148 -0
  129. package/src/scripts/args.ts +48 -48
  130. package/src/scripts/caching.ts +123 -123
  131. package/src/scripts/code_exec_files.ts +193 -193
  132. package/src/scripts/empty_input.ts +137 -137
  133. package/src/scripts/image.ts +178 -178
  134. package/src/scripts/memory.ts +97 -97
  135. package/src/scripts/thinking.ts +149 -149
  136. package/src/specs/anthropic.simple.test.ts +67 -0
  137. package/src/specs/spec.utils.ts +3 -3
  138. package/src/specs/token-distribution-edge-case.test.ts +316 -316
  139. package/src/specs/tool-error.test.ts +193 -193
  140. package/src/splitStream.test.ts +691 -691
  141. package/src/splitStream.ts +234 -234
  142. package/src/stream.test.ts +94 -94
  143. package/src/stream.ts +4 -1
  144. package/src/tools/ToolNode.ts +12 -1
  145. package/src/tools/handlers.ts +32 -28
  146. package/src/tools/search/anthropic.ts +51 -51
  147. package/src/tools/search/content.test.ts +173 -173
  148. package/src/tools/search/content.ts +147 -147
  149. package/src/tools/search/direct-url.test.ts +530 -0
  150. package/src/tools/search/firecrawl.ts +210 -210
  151. package/src/tools/search/format.ts +250 -250
  152. package/src/tools/search/highlights.ts +320 -320
  153. package/src/tools/search/index.ts +2 -2
  154. package/src/tools/search/jina-reranker.test.ts +126 -126
  155. package/src/tools/search/output.md +2775 -2775
  156. package/src/tools/search/rerankers.ts +242 -242
  157. package/src/tools/search/schema.ts +63 -63
  158. package/src/tools/search/search.ts +766 -759
  159. package/src/tools/search/serper-scraper.ts +155 -155
  160. package/src/tools/search/test.html +883 -883
  161. package/src/tools/search/test.md +642 -642
  162. package/src/tools/search/test.ts +159 -159
  163. package/src/tools/search/tool.ts +619 -471
  164. package/src/tools/search/types.ts +689 -687
  165. package/src/tools/search/utils.ts +79 -79
  166. package/src/types/index.ts +6 -6
  167. package/src/types/llm.ts +2 -8
  168. package/src/utils/graph.ts +10 -10
  169. package/src/utils/llm.ts +26 -27
  170. package/src/utils/llmConfig.ts +5 -3
  171. package/src/utils/logging.ts +48 -48
  172. package/src/utils/misc.ts +57 -57
  173. package/src/utils/run.ts +100 -100
  174. package/src/utils/title.ts +165 -165
  175. package/dist/cjs/llm/ollama/index.cjs +0 -70
  176. package/dist/cjs/llm/ollama/index.cjs.map +0 -1
  177. package/dist/cjs/llm/ollama/utils.cjs +0 -158
  178. package/dist/cjs/llm/ollama/utils.cjs.map +0 -1
  179. package/dist/esm/llm/ollama/index.mjs +0 -68
  180. package/dist/esm/llm/ollama/index.mjs.map +0 -1
  181. package/dist/esm/llm/ollama/utils.mjs +0 -155
  182. package/dist/esm/llm/ollama/utils.mjs.map +0 -1
  183. package/dist/types/llm/ollama/index.d.ts +0 -8
  184. package/dist/types/llm/ollama/utils.d.ts +0 -7
  185. package/src/llm/ollama/index.ts +0 -92
  186. package/src/llm/ollama/utils.ts +0 -193
  187. package/src/proto/CollabGraph.ts +0 -269
  188. package/src/proto/TaskManager.ts +0 -243
  189. package/src/proto/collab.ts +0 -200
  190. package/src/proto/collab_design.ts +0 -184
  191. package/src/proto/collab_design_v2.ts +0 -224
  192. package/src/proto/collab_design_v3.ts +0 -255
  193. package/src/proto/collab_design_v4.ts +0 -220
  194. package/src/proto/collab_design_v5.ts +0 -251
  195. package/src/proto/collab_graph.ts +0 -181
  196. package/src/proto/collab_original.ts +0 -123
  197. package/src/proto/example.ts +0 -93
  198. package/src/proto/example_new.ts +0 -68
  199. package/src/proto/example_old.ts +0 -201
  200. package/src/proto/example_test.ts +0 -152
  201. package/src/proto/example_test_anthropic.ts +0 -100
  202. package/src/proto/log_stream.ts +0 -202
  203. package/src/proto/main_collab_community_event.ts +0 -133
  204. package/src/proto/main_collab_design_v2.ts +0 -96
  205. package/src/proto/main_collab_design_v4.ts +0 -100
  206. package/src/proto/main_collab_design_v5.ts +0 -135
  207. package/src/proto/main_collab_global_analysis.ts +0 -122
  208. package/src/proto/main_collab_hackathon_event.ts +0 -153
  209. package/src/proto/main_collab_space_mission.ts +0 -153
  210. package/src/proto/main_philosophy.ts +0 -210
  211. package/src/proto/original_script.ts +0 -126
  212. package/src/proto/standard.ts +0 -100
  213. package/src/proto/stream.ts +0 -56
  214. package/src/proto/tasks.ts +0 -118
  215. package/src/proto/tools/global_analysis_tools.ts +0 -86
  216. package/src/proto/tools/space_mission_tools.ts +0 -60
  217. package/src/proto/vertexai.ts +0 -54
@@ -1,567 +1,567 @@
1
- import {
2
- AIMessage,
3
- BaseMessage,
4
- UsageMetadata,
5
- } from '@langchain/core/messages';
6
- import type {
7
- ThinkingContentText,
8
- MessageContentComplex,
9
- ReasoningContentText,
10
- } from '@/types/stream';
11
- import type { TokenCounter } from '@/types/run';
12
- import { ContentTypes, Providers } from '@/common';
13
-
14
- export type PruneMessagesFactoryParams = {
15
- provider?: Providers;
16
- maxTokens: number;
17
- startIndex: number;
18
- tokenCounter: TokenCounter;
19
- indexTokenCountMap: Record<string, number | undefined>;
20
- thinkingEnabled?: boolean;
21
- };
22
- export type PruneMessagesParams = {
23
- messages: BaseMessage[];
24
- usageMetadata?: Partial<UsageMetadata>;
25
- startType?: ReturnType<BaseMessage['getType']>;
26
- };
27
-
28
- function isIndexInContext(
29
- arrayA: unknown[],
30
- arrayB: unknown[],
31
- targetIndex: number
32
- ): boolean {
33
- const startingIndexInA = arrayA.length - arrayB.length;
34
- return targetIndex >= startingIndexInA;
35
- }
36
-
37
- function addThinkingBlock(
38
- message: AIMessage,
39
- thinkingBlock: ThinkingContentText | ReasoningContentText
40
- ): AIMessage {
41
- const content: MessageContentComplex[] = Array.isArray(message.content)
42
- ? (message.content as MessageContentComplex[])
43
- : [
44
- {
45
- type: ContentTypes.TEXT,
46
- text: message.content,
47
- },
48
- ];
49
- /** Edge case, the message already has the thinking block */
50
- if (content[0].type === thinkingBlock.type) {
51
- return message;
52
- }
53
- content.unshift(thinkingBlock);
54
- return new AIMessage({
55
- ...message,
56
- content,
57
- });
58
- }
59
-
60
- /**
61
- * Calculates the total tokens from a single usage object
62
- *
63
- * @param usage The usage metadata object containing token information
64
- * @returns An object containing the total input and output tokens
65
- */
66
- export function calculateTotalTokens(
67
- usage: Partial<UsageMetadata>
68
- ): UsageMetadata {
69
- const baseInputTokens = Number(usage.input_tokens) || 0;
70
- const cacheCreation = Number(usage.input_token_details?.cache_creation) || 0;
71
- const cacheRead = Number(usage.input_token_details?.cache_read) || 0;
72
-
73
- const totalInputTokens = baseInputTokens + cacheCreation + cacheRead;
74
- const totalOutputTokens = Number(usage.output_tokens) || 0;
75
-
76
- return {
77
- input_tokens: totalInputTokens,
78
- output_tokens: totalOutputTokens,
79
- total_tokens: totalInputTokens + totalOutputTokens,
80
- };
81
- }
82
-
83
- export type PruningResult = {
84
- context: BaseMessage[];
85
- remainingContextTokens: number;
86
- messagesToRefine: BaseMessage[];
87
- thinkingStartIndex?: number;
88
- };
89
-
90
- /**
91
- * Processes an array of messages and returns a context of messages that fit within a specified token limit.
92
- * It iterates over the messages from newest to oldest, adding them to the context until the token limit is reached.
93
- *
94
- * @param options Configuration options for processing messages
95
- * @returns Object containing the message context, remaining tokens, messages not included, and summary index
96
- */
97
- export function getMessagesWithinTokenLimit({
98
- messages: _messages,
99
- maxContextTokens,
100
- indexTokenCountMap,
101
- startType: _startType,
102
- thinkingEnabled,
103
- tokenCounter,
104
- thinkingStartIndex: _thinkingStartIndex = -1,
105
- reasoningType = ContentTypes.THINKING,
106
- }: {
107
- messages: BaseMessage[];
108
- maxContextTokens: number;
109
- indexTokenCountMap: Record<string, number | undefined>;
110
- startType?: string | string[];
111
- thinkingEnabled?: boolean;
112
- tokenCounter: TokenCounter;
113
- thinkingStartIndex?: number;
114
- reasoningType?: ContentTypes.THINKING | ContentTypes.REASONING_CONTENT;
115
- }): PruningResult {
116
- // Every reply is primed with <|start|>assistant<|message|>, so we
117
- // start with 3 tokens for the label after all messages have been counted.
118
- let currentTokenCount = 3;
119
- const instructions =
120
- _messages[0]?.getType() === 'system' ? _messages[0] : undefined;
121
- const instructionsTokenCount =
122
- instructions != null ? (indexTokenCountMap[0] ?? 0) : 0;
123
- const initialContextTokens = maxContextTokens - instructionsTokenCount;
124
- let remainingContextTokens = initialContextTokens;
125
- let startType = _startType;
126
- const originalLength = _messages.length;
127
- const messages = [..._messages];
128
- /**
129
- * IMPORTANT: this context array gets reversed at the end, since the latest messages get pushed first.
130
- *
131
- * This may be confusing to read, but it is done to ensure the context is in the correct order for the model.
132
- * */
133
- let context: Array<BaseMessage | undefined> = [];
134
-
135
- let thinkingStartIndex = _thinkingStartIndex;
136
- let thinkingEndIndex = -1;
137
- let thinkingBlock: ThinkingContentText | ReasoningContentText | undefined;
138
- const endIndex = instructions != null ? 1 : 0;
139
- const prunedMemory: BaseMessage[] = [];
140
-
141
- if (_thinkingStartIndex > -1) {
142
- const thinkingMessageContent = messages[_thinkingStartIndex]?.content;
143
- if (Array.isArray(thinkingMessageContent)) {
144
- thinkingBlock = thinkingMessageContent.find(
145
- (content) => content.type === reasoningType
146
- ) as ThinkingContentText | undefined;
147
- }
148
- }
149
-
150
- if (currentTokenCount < remainingContextTokens) {
151
- let currentIndex = messages.length;
152
- while (
153
- messages.length > 0 &&
154
- currentTokenCount < remainingContextTokens &&
155
- currentIndex > endIndex
156
- ) {
157
- currentIndex--;
158
- if (messages.length === 1 && instructions) {
159
- break;
160
- }
161
- const poppedMessage = messages.pop();
162
- if (!poppedMessage) continue;
163
- const messageType = poppedMessage.getType();
164
- if (
165
- thinkingEnabled === true &&
166
- thinkingEndIndex === -1 &&
167
- currentIndex === originalLength - 1 &&
168
- (messageType === 'ai' || messageType === 'tool')
169
- ) {
170
- thinkingEndIndex = currentIndex;
171
- }
172
- if (
173
- thinkingEndIndex > -1 &&
174
- !thinkingBlock &&
175
- thinkingStartIndex < 0 &&
176
- messageType === 'ai' &&
177
- Array.isArray(poppedMessage.content)
178
- ) {
179
- thinkingBlock = poppedMessage.content.find(
180
- (content) => content.type === reasoningType
181
- ) as ThinkingContentText | undefined;
182
- thinkingStartIndex = thinkingBlock != null ? currentIndex : -1;
183
- }
184
- /** False start, the latest message was not part of a multi-assistant/tool sequence of messages */
185
- if (
186
- thinkingEndIndex > -1 &&
187
- currentIndex === thinkingEndIndex - 1 &&
188
- messageType !== 'ai' &&
189
- messageType !== 'tool'
190
- ) {
191
- thinkingEndIndex = -1;
192
- }
193
-
194
- const tokenCount = indexTokenCountMap[currentIndex] ?? 0;
195
-
196
- if (
197
- prunedMemory.length === 0 &&
198
- currentTokenCount + tokenCount <= remainingContextTokens
199
- ) {
200
- context.push(poppedMessage);
201
- currentTokenCount += tokenCount;
202
- } else {
203
- prunedMemory.push(poppedMessage);
204
- if (thinkingEndIndex > -1 && thinkingStartIndex < 0) {
205
- continue;
206
- }
207
- break;
208
- }
209
- }
210
-
211
- if (context[context.length - 1]?.getType() === 'tool') {
212
- startType = ['ai', 'human'];
213
- }
214
-
215
- if (startType != null && startType.length > 0 && context.length > 0) {
216
- let requiredTypeIndex = -1;
217
-
218
- let totalTokens = 0;
219
- for (let i = context.length - 1; i >= 0; i--) {
220
- const currentType = context[i]?.getType() ?? '';
221
- if (
222
- Array.isArray(startType)
223
- ? startType.includes(currentType)
224
- : currentType === startType
225
- ) {
226
- requiredTypeIndex = i + 1;
227
- break;
228
- }
229
- const originalIndex = originalLength - 1 - i;
230
- totalTokens += indexTokenCountMap[originalIndex] ?? 0;
231
- }
232
-
233
- if (requiredTypeIndex > 0) {
234
- currentTokenCount -= totalTokens;
235
- context = context.slice(0, requiredTypeIndex);
236
- }
237
- }
238
- }
239
-
240
- if (instructions && originalLength > 0) {
241
- context.push(_messages[0] as BaseMessage);
242
- messages.shift();
243
- }
244
-
245
- remainingContextTokens -= currentTokenCount;
246
- const result: PruningResult = {
247
- remainingContextTokens,
248
- context: [] as BaseMessage[],
249
- messagesToRefine: prunedMemory,
250
- };
251
-
252
- if (thinkingStartIndex > -1) {
253
- result.thinkingStartIndex = thinkingStartIndex;
254
- }
255
-
256
- if (
257
- prunedMemory.length === 0 ||
258
- thinkingEndIndex < 0 ||
259
- (thinkingStartIndex > -1 &&
260
- isIndexInContext(_messages, context, thinkingStartIndex))
261
- ) {
262
- // we reverse at this step to ensure the context is in the correct order for the model, and we need to work backwards
263
- result.context = context.reverse() as BaseMessage[];
264
- return result;
265
- }
266
-
267
- if (thinkingEndIndex > -1 && thinkingStartIndex < 0) {
268
- throw new Error(
269
- 'The payload is malformed. There is a thinking sequence but no "AI" messages with thinking blocks.'
270
- );
271
- }
272
-
273
- if (!thinkingBlock) {
274
- throw new Error(
275
- 'The payload is malformed. There is a thinking sequence but no thinking block found.'
276
- );
277
- }
278
-
279
- // Since we have a thinking sequence, we need to find the last assistant message
280
- // in the latest AI/tool sequence to add the thinking block that falls outside of the current context
281
- // Latest messages are ordered first.
282
- let assistantIndex = -1;
283
- for (let i = 0; i < context.length; i++) {
284
- const currentMessage = context[i];
285
- const type = currentMessage?.getType();
286
- if (type === 'ai') {
287
- assistantIndex = i;
288
- }
289
- if (assistantIndex > -1 && (type === 'human' || type === 'system')) {
290
- break;
291
- }
292
- }
293
-
294
- if (assistantIndex === -1) {
295
- throw new Error(
296
- 'The payload is malformed. There is a thinking sequence but no "AI" messages to append thinking blocks to.'
297
- );
298
- }
299
-
300
- thinkingStartIndex = originalLength - 1 - assistantIndex;
301
- const thinkingTokenCount = tokenCounter(
302
- new AIMessage({ content: [thinkingBlock] })
303
- );
304
- const newRemainingCount = remainingContextTokens - thinkingTokenCount;
305
- const newMessage = addThinkingBlock(
306
- context[assistantIndex] as AIMessage,
307
- thinkingBlock
308
- );
309
- context[assistantIndex] = newMessage;
310
- if (newRemainingCount > 0) {
311
- result.context = context.reverse() as BaseMessage[];
312
- return result;
313
- }
314
-
315
- const thinkingMessage: AIMessage = context[assistantIndex] as AIMessage;
316
- // now we need to an additional round of pruning but making the thinking block fit
317
- const newThinkingMessageTokenCount =
318
- (indexTokenCountMap[thinkingStartIndex] ?? 0) + thinkingTokenCount;
319
- remainingContextTokens = initialContextTokens - newThinkingMessageTokenCount;
320
- currentTokenCount = 3;
321
- let newContext: BaseMessage[] = [];
322
- const secondRoundMessages = [..._messages];
323
- let currentIndex = secondRoundMessages.length;
324
- while (
325
- secondRoundMessages.length > 0 &&
326
- currentTokenCount < remainingContextTokens &&
327
- currentIndex > thinkingStartIndex
328
- ) {
329
- currentIndex--;
330
- const poppedMessage = secondRoundMessages.pop();
331
- if (!poppedMessage) continue;
332
- const tokenCount = indexTokenCountMap[currentIndex] ?? 0;
333
- if (currentTokenCount + tokenCount <= remainingContextTokens) {
334
- newContext.push(poppedMessage);
335
- currentTokenCount += tokenCount;
336
- } else {
337
- messages.push(poppedMessage);
338
- break;
339
- }
340
- }
341
-
342
- const firstMessage: AIMessage = newContext[newContext.length - 1];
343
- const firstMessageType = newContext[newContext.length - 1].getType();
344
- if (firstMessageType === 'tool') {
345
- startType = ['ai', 'human'];
346
- }
347
-
348
- if (startType != null && startType.length > 0 && newContext.length > 0) {
349
- let requiredTypeIndex = -1;
350
-
351
- let totalTokens = 0;
352
- for (let i = newContext.length - 1; i >= 0; i--) {
353
- const currentType = newContext[i]?.getType() ?? '';
354
- if (
355
- Array.isArray(startType)
356
- ? startType.includes(currentType)
357
- : currentType === startType
358
- ) {
359
- requiredTypeIndex = i + 1;
360
- break;
361
- }
362
- const originalIndex = originalLength - 1 - i;
363
- totalTokens += indexTokenCountMap[originalIndex] ?? 0;
364
- }
365
-
366
- if (requiredTypeIndex > 0) {
367
- currentTokenCount -= totalTokens;
368
- newContext = newContext.slice(0, requiredTypeIndex);
369
- }
370
- }
371
-
372
- if (firstMessageType === 'ai') {
373
- const newMessage = addThinkingBlock(firstMessage, thinkingBlock);
374
- newContext[newContext.length - 1] = newMessage;
375
- } else {
376
- newContext.push(thinkingMessage);
377
- }
378
-
379
- if (instructions && originalLength > 0) {
380
- newContext.push(_messages[0] as BaseMessage);
381
- secondRoundMessages.shift();
382
- }
383
-
384
- result.context = newContext.reverse();
385
- return result;
386
- }
387
-
388
- export function checkValidNumber(value: unknown): value is number {
389
- return typeof value === 'number' && !isNaN(value) && value > 0;
390
- }
391
-
392
- type ThinkingBlocks = {
393
- thinking_blocks?: Array<{
394
- type: 'thinking';
395
- thinking: string;
396
- signature: string;
397
- }>;
398
- };
399
-
400
- export function createPruneMessages(factoryParams: PruneMessagesFactoryParams) {
401
- const indexTokenCountMap = { ...factoryParams.indexTokenCountMap };
402
- let lastTurnStartIndex = factoryParams.startIndex;
403
- let lastCutOffIndex = 0;
404
- let totalTokens = Object.values(indexTokenCountMap).reduce(
405
- (a = 0, b = 0) => a + b,
406
- 0
407
- ) as number;
408
- let runThinkingStartIndex = -1;
409
- return function pruneMessages(params: PruneMessagesParams): {
410
- context: BaseMessage[];
411
- indexTokenCountMap: Record<string, number | undefined>;
412
- } {
413
- if (
414
- factoryParams.provider === Providers.OPENAI &&
415
- factoryParams.thinkingEnabled === true
416
- ) {
417
- for (let i = lastTurnStartIndex; i < params.messages.length; i++) {
418
- const m = params.messages[i];
419
- if (
420
- m.getType() === 'ai' &&
421
- typeof m.additional_kwargs.reasoning_content === 'string' &&
422
- Array.isArray(
423
- (
424
- m.additional_kwargs.provider_specific_fields as
425
- | ThinkingBlocks
426
- | undefined
427
- )?.thinking_blocks
428
- ) &&
429
- (m as AIMessage).tool_calls &&
430
- ((m as AIMessage).tool_calls?.length ?? 0) > 0
431
- ) {
432
- const message = m as AIMessage;
433
- const thinkingBlocks = (
434
- message.additional_kwargs.provider_specific_fields as ThinkingBlocks
435
- ).thinking_blocks;
436
- const signature =
437
- thinkingBlocks?.[thinkingBlocks.length - 1].signature;
438
- const thinkingBlock: ThinkingContentText = {
439
- signature,
440
- type: ContentTypes.THINKING,
441
- thinking: message.additional_kwargs.reasoning_content as string,
442
- };
443
-
444
- params.messages[i] = new AIMessage({
445
- ...message,
446
- content: [thinkingBlock],
447
- additional_kwargs: {
448
- ...message.additional_kwargs,
449
- reasoning_content: undefined,
450
- },
451
- });
452
- }
453
- }
454
- }
455
-
456
- let currentUsage: UsageMetadata | undefined;
457
- if (
458
- params.usageMetadata &&
459
- (checkValidNumber(params.usageMetadata.input_tokens) ||
460
- (checkValidNumber(params.usageMetadata.input_token_details) &&
461
- (checkValidNumber(
462
- params.usageMetadata.input_token_details.cache_creation
463
- ) ||
464
- checkValidNumber(
465
- params.usageMetadata.input_token_details.cache_read
466
- )))) &&
467
- checkValidNumber(params.usageMetadata.output_tokens)
468
- ) {
469
- currentUsage = calculateTotalTokens(params.usageMetadata);
470
- totalTokens = currentUsage.total_tokens;
471
- }
472
-
473
- const newOutputs = new Set<number>();
474
- for (let i = lastTurnStartIndex; i < params.messages.length; i++) {
475
- const message = params.messages[i];
476
- if (
477
- i === lastTurnStartIndex &&
478
- indexTokenCountMap[i] === undefined &&
479
- currentUsage
480
- ) {
481
- indexTokenCountMap[i] = currentUsage.output_tokens;
482
- } else if (indexTokenCountMap[i] === undefined) {
483
- indexTokenCountMap[i] = factoryParams.tokenCounter(message);
484
- if (currentUsage) {
485
- newOutputs.add(i);
486
- }
487
- totalTokens += indexTokenCountMap[i] ?? 0;
488
- }
489
- }
490
-
491
- // If `currentUsage` is defined, we need to distribute the current total tokens to our `indexTokenCountMap`,
492
- // We must distribute it in a weighted manner, so that the total token count is equal to `currentUsage.total_tokens`,
493
- // relative the manually counted tokens in `indexTokenCountMap`.
494
- // EDGE CASE: when the resulting context gets pruned, we should not distribute the usage for messages that are not in the context.
495
- if (currentUsage) {
496
- let totalIndexTokens = 0;
497
- if (params.messages[0].getType() === 'system') {
498
- totalIndexTokens += indexTokenCountMap[0] ?? 0;
499
- }
500
- for (let i = lastCutOffIndex; i < params.messages.length; i++) {
501
- if (i === 0 && params.messages[0].getType() === 'system') {
502
- continue;
503
- }
504
- if (newOutputs.has(i)) {
505
- continue;
506
- }
507
- totalIndexTokens += indexTokenCountMap[i] ?? 0;
508
- }
509
-
510
- // Calculate ratio based only on messages that remain in the context
511
- const ratio = currentUsage.total_tokens / totalIndexTokens;
512
- const isRatioSafe = ratio >= 1 / 3 && ratio <= 2.5;
513
-
514
- // Apply the ratio adjustment only to messages at or after lastCutOffIndex, and only if the ratio is safe
515
- if (isRatioSafe) {
516
- if (
517
- params.messages[0].getType() === 'system' &&
518
- lastCutOffIndex !== 0
519
- ) {
520
- indexTokenCountMap[0] = Math.round(
521
- (indexTokenCountMap[0] ?? 0) * ratio
522
- );
523
- }
524
-
525
- for (let i = lastCutOffIndex; i < params.messages.length; i++) {
526
- if (newOutputs.has(i)) {
527
- continue;
528
- }
529
- indexTokenCountMap[i] = Math.round(
530
- (indexTokenCountMap[i] ?? 0) * ratio
531
- );
532
- }
533
- }
534
- }
535
-
536
- lastTurnStartIndex = params.messages.length;
537
- if (lastCutOffIndex === 0 && totalTokens <= factoryParams.maxTokens) {
538
- return { context: params.messages, indexTokenCountMap };
539
- }
540
-
541
- const { context, thinkingStartIndex } = getMessagesWithinTokenLimit({
542
- maxContextTokens: factoryParams.maxTokens,
543
- messages: params.messages,
544
- indexTokenCountMap,
545
- startType: params.startType,
546
- thinkingEnabled: factoryParams.thinkingEnabled,
547
- tokenCounter: factoryParams.tokenCounter,
548
- reasoningType:
549
- factoryParams.provider === Providers.BEDROCK
550
- ? ContentTypes.REASONING_CONTENT
551
- : ContentTypes.THINKING,
552
- thinkingStartIndex:
553
- factoryParams.thinkingEnabled === true
554
- ? runThinkingStartIndex
555
- : undefined,
556
- });
557
- runThinkingStartIndex = thinkingStartIndex ?? -1;
558
- /** The index is the first value of `context`, index relative to `params.messages` */
559
- lastCutOffIndex = Math.max(
560
- params.messages.length -
561
- (context.length - (context[0]?.getType() === 'system' ? 1 : 0)),
562
- 0
563
- );
564
-
565
- return { context, indexTokenCountMap };
566
- };
567
- }
1
+ import {
2
+ AIMessage,
3
+ BaseMessage,
4
+ UsageMetadata,
5
+ } from '@langchain/core/messages';
6
+ import type {
7
+ ThinkingContentText,
8
+ MessageContentComplex,
9
+ ReasoningContentText,
10
+ } from '@/types/stream';
11
+ import type { TokenCounter } from '@/types/run';
12
+ import { ContentTypes, Providers } from '@/common';
13
+
14
+ export type PruneMessagesFactoryParams = {
15
+ provider?: Providers;
16
+ maxTokens: number;
17
+ startIndex: number;
18
+ tokenCounter: TokenCounter;
19
+ indexTokenCountMap: Record<string, number | undefined>;
20
+ thinkingEnabled?: boolean;
21
+ };
22
+ export type PruneMessagesParams = {
23
+ messages: BaseMessage[];
24
+ usageMetadata?: Partial<UsageMetadata>;
25
+ startType?: ReturnType<BaseMessage['getType']>;
26
+ };
27
+
28
+ function isIndexInContext(
29
+ arrayA: unknown[],
30
+ arrayB: unknown[],
31
+ targetIndex: number
32
+ ): boolean {
33
+ const startingIndexInA = arrayA.length - arrayB.length;
34
+ return targetIndex >= startingIndexInA;
35
+ }
36
+
37
+ function addThinkingBlock(
38
+ message: AIMessage,
39
+ thinkingBlock: ThinkingContentText | ReasoningContentText
40
+ ): AIMessage {
41
+ const content: MessageContentComplex[] = Array.isArray(message.content)
42
+ ? (message.content as MessageContentComplex[])
43
+ : [
44
+ {
45
+ type: ContentTypes.TEXT,
46
+ text: message.content,
47
+ },
48
+ ];
49
+ /** Edge case, the message already has the thinking block */
50
+ if (content[0].type === thinkingBlock.type) {
51
+ return message;
52
+ }
53
+ content.unshift(thinkingBlock);
54
+ return new AIMessage({
55
+ ...message,
56
+ content,
57
+ });
58
+ }
59
+
60
+ /**
61
+ * Calculates the total tokens from a single usage object
62
+ *
63
+ * @param usage The usage metadata object containing token information
64
+ * @returns An object containing the total input and output tokens
65
+ */
66
+ export function calculateTotalTokens(
67
+ usage: Partial<UsageMetadata>
68
+ ): UsageMetadata {
69
+ const baseInputTokens = Number(usage.input_tokens) || 0;
70
+ const cacheCreation = Number(usage.input_token_details?.cache_creation) || 0;
71
+ const cacheRead = Number(usage.input_token_details?.cache_read) || 0;
72
+
73
+ const totalInputTokens = baseInputTokens + cacheCreation + cacheRead;
74
+ const totalOutputTokens = Number(usage.output_tokens) || 0;
75
+
76
+ return {
77
+ input_tokens: totalInputTokens,
78
+ output_tokens: totalOutputTokens,
79
+ total_tokens: totalInputTokens + totalOutputTokens,
80
+ };
81
+ }
82
+
83
+ export type PruningResult = {
84
+ context: BaseMessage[];
85
+ remainingContextTokens: number;
86
+ messagesToRefine: BaseMessage[];
87
+ thinkingStartIndex?: number;
88
+ };
89
+
90
+ /**
91
+ * Processes an array of messages and returns a context of messages that fit within a specified token limit.
92
+ * It iterates over the messages from newest to oldest, adding them to the context until the token limit is reached.
93
+ *
94
+ * @param options Configuration options for processing messages
95
+ * @returns Object containing the message context, remaining tokens, messages not included, and summary index
96
+ */
97
+ export function getMessagesWithinTokenLimit({
98
+ messages: _messages,
99
+ maxContextTokens,
100
+ indexTokenCountMap,
101
+ startType: _startType,
102
+ thinkingEnabled,
103
+ tokenCounter,
104
+ thinkingStartIndex: _thinkingStartIndex = -1,
105
+ reasoningType = ContentTypes.THINKING,
106
+ }: {
107
+ messages: BaseMessage[];
108
+ maxContextTokens: number;
109
+ indexTokenCountMap: Record<string, number | undefined>;
110
+ startType?: string | string[];
111
+ thinkingEnabled?: boolean;
112
+ tokenCounter: TokenCounter;
113
+ thinkingStartIndex?: number;
114
+ reasoningType?: ContentTypes.THINKING | ContentTypes.REASONING_CONTENT;
115
+ }): PruningResult {
116
+ // Every reply is primed with <|start|>assistant<|message|>, so we
117
+ // start with 3 tokens for the label after all messages have been counted.
118
+ let currentTokenCount = 3;
119
+ const instructions =
120
+ _messages[0]?.getType() === 'system' ? _messages[0] : undefined;
121
+ const instructionsTokenCount =
122
+ instructions != null ? (indexTokenCountMap[0] ?? 0) : 0;
123
+ const initialContextTokens = maxContextTokens - instructionsTokenCount;
124
+ let remainingContextTokens = initialContextTokens;
125
+ let startType = _startType;
126
+ const originalLength = _messages.length;
127
+ const messages = [..._messages];
128
+ /**
129
+ * IMPORTANT: this context array gets reversed at the end, since the latest messages get pushed first.
130
+ *
131
+ * This may be confusing to read, but it is done to ensure the context is in the correct order for the model.
132
+ * */
133
+ let context: Array<BaseMessage | undefined> = [];
134
+
135
+ let thinkingStartIndex = _thinkingStartIndex;
136
+ let thinkingEndIndex = -1;
137
+ let thinkingBlock: ThinkingContentText | ReasoningContentText | undefined;
138
+ const endIndex = instructions != null ? 1 : 0;
139
+ const prunedMemory: BaseMessage[] = [];
140
+
141
+ if (_thinkingStartIndex > -1) {
142
+ const thinkingMessageContent = messages[_thinkingStartIndex]?.content;
143
+ if (Array.isArray(thinkingMessageContent)) {
144
+ thinkingBlock = thinkingMessageContent.find(
145
+ (content) => content.type === reasoningType
146
+ ) as ThinkingContentText | undefined;
147
+ }
148
+ }
149
+
150
+ if (currentTokenCount < remainingContextTokens) {
151
+ let currentIndex = messages.length;
152
+ while (
153
+ messages.length > 0 &&
154
+ currentTokenCount < remainingContextTokens &&
155
+ currentIndex > endIndex
156
+ ) {
157
+ currentIndex--;
158
+ if (messages.length === 1 && instructions) {
159
+ break;
160
+ }
161
+ const poppedMessage = messages.pop();
162
+ if (!poppedMessage) continue;
163
+ const messageType = poppedMessage.getType();
164
+ if (
165
+ thinkingEnabled === true &&
166
+ thinkingEndIndex === -1 &&
167
+ currentIndex === originalLength - 1 &&
168
+ (messageType === 'ai' || messageType === 'tool')
169
+ ) {
170
+ thinkingEndIndex = currentIndex;
171
+ }
172
+ if (
173
+ thinkingEndIndex > -1 &&
174
+ !thinkingBlock &&
175
+ thinkingStartIndex < 0 &&
176
+ messageType === 'ai' &&
177
+ Array.isArray(poppedMessage.content)
178
+ ) {
179
+ thinkingBlock = poppedMessage.content.find(
180
+ (content) => content.type === reasoningType
181
+ ) as ThinkingContentText | undefined;
182
+ thinkingStartIndex = thinkingBlock != null ? currentIndex : -1;
183
+ }
184
+ /** False start, the latest message was not part of a multi-assistant/tool sequence of messages */
185
+ if (
186
+ thinkingEndIndex > -1 &&
187
+ currentIndex === thinkingEndIndex - 1 &&
188
+ messageType !== 'ai' &&
189
+ messageType !== 'tool'
190
+ ) {
191
+ thinkingEndIndex = -1;
192
+ }
193
+
194
+ const tokenCount = indexTokenCountMap[currentIndex] ?? 0;
195
+
196
+ if (
197
+ prunedMemory.length === 0 &&
198
+ currentTokenCount + tokenCount <= remainingContextTokens
199
+ ) {
200
+ context.push(poppedMessage);
201
+ currentTokenCount += tokenCount;
202
+ } else {
203
+ prunedMemory.push(poppedMessage);
204
+ if (thinkingEndIndex > -1 && thinkingStartIndex < 0) {
205
+ continue;
206
+ }
207
+ break;
208
+ }
209
+ }
210
+
211
+ if (context[context.length - 1]?.getType() === 'tool') {
212
+ startType = ['ai', 'human'];
213
+ }
214
+
215
+ if (startType != null && startType.length > 0 && context.length > 0) {
216
+ let requiredTypeIndex = -1;
217
+
218
+ let totalTokens = 0;
219
+ for (let i = context.length - 1; i >= 0; i--) {
220
+ const currentType = context[i]?.getType() ?? '';
221
+ if (
222
+ Array.isArray(startType)
223
+ ? startType.includes(currentType)
224
+ : currentType === startType
225
+ ) {
226
+ requiredTypeIndex = i + 1;
227
+ break;
228
+ }
229
+ const originalIndex = originalLength - 1 - i;
230
+ totalTokens += indexTokenCountMap[originalIndex] ?? 0;
231
+ }
232
+
233
+ if (requiredTypeIndex > 0) {
234
+ currentTokenCount -= totalTokens;
235
+ context = context.slice(0, requiredTypeIndex);
236
+ }
237
+ }
238
+ }
239
+
240
+ if (instructions && originalLength > 0) {
241
+ context.push(_messages[0] as BaseMessage);
242
+ messages.shift();
243
+ }
244
+
245
+ remainingContextTokens -= currentTokenCount;
246
+ const result: PruningResult = {
247
+ remainingContextTokens,
248
+ context: [] as BaseMessage[],
249
+ messagesToRefine: prunedMemory,
250
+ };
251
+
252
+ if (thinkingStartIndex > -1) {
253
+ result.thinkingStartIndex = thinkingStartIndex;
254
+ }
255
+
256
+ if (
257
+ prunedMemory.length === 0 ||
258
+ thinkingEndIndex < 0 ||
259
+ (thinkingStartIndex > -1 &&
260
+ isIndexInContext(_messages, context, thinkingStartIndex))
261
+ ) {
262
+ // we reverse at this step to ensure the context is in the correct order for the model, and we need to work backwards
263
+ result.context = context.reverse() as BaseMessage[];
264
+ return result;
265
+ }
266
+
267
+ if (thinkingEndIndex > -1 && thinkingStartIndex < 0) {
268
+ throw new Error(
269
+ 'The payload is malformed. There is a thinking sequence but no "AI" messages with thinking blocks.'
270
+ );
271
+ }
272
+
273
+ if (!thinkingBlock) {
274
+ throw new Error(
275
+ 'The payload is malformed. There is a thinking sequence but no thinking block found.'
276
+ );
277
+ }
278
+
279
+ // Since we have a thinking sequence, we need to find the last assistant message
280
+ // in the latest AI/tool sequence to add the thinking block that falls outside of the current context
281
+ // Latest messages are ordered first.
282
+ let assistantIndex = -1;
283
+ for (let i = 0; i < context.length; i++) {
284
+ const currentMessage = context[i];
285
+ const type = currentMessage?.getType();
286
+ if (type === 'ai') {
287
+ assistantIndex = i;
288
+ }
289
+ if (assistantIndex > -1 && (type === 'human' || type === 'system')) {
290
+ break;
291
+ }
292
+ }
293
+
294
+ if (assistantIndex === -1) {
295
+ throw new Error(
296
+ 'The payload is malformed. There is a thinking sequence but no "AI" messages to append thinking blocks to.'
297
+ );
298
+ }
299
+
300
+ thinkingStartIndex = originalLength - 1 - assistantIndex;
301
+ const thinkingTokenCount = tokenCounter(
302
+ new AIMessage({ content: [thinkingBlock] })
303
+ );
304
+ const newRemainingCount = remainingContextTokens - thinkingTokenCount;
305
+ const newMessage = addThinkingBlock(
306
+ context[assistantIndex] as AIMessage,
307
+ thinkingBlock
308
+ );
309
+ context[assistantIndex] = newMessage;
310
+ if (newRemainingCount > 0) {
311
+ result.context = context.reverse() as BaseMessage[];
312
+ return result;
313
+ }
314
+
315
+ const thinkingMessage: AIMessage = context[assistantIndex] as AIMessage;
316
+ // now we need to an additional round of pruning but making the thinking block fit
317
+ const newThinkingMessageTokenCount =
318
+ (indexTokenCountMap[thinkingStartIndex] ?? 0) + thinkingTokenCount;
319
+ remainingContextTokens = initialContextTokens - newThinkingMessageTokenCount;
320
+ currentTokenCount = 3;
321
+ let newContext: BaseMessage[] = [];
322
+ const secondRoundMessages = [..._messages];
323
+ let currentIndex = secondRoundMessages.length;
324
+ while (
325
+ secondRoundMessages.length > 0 &&
326
+ currentTokenCount < remainingContextTokens &&
327
+ currentIndex > thinkingStartIndex
328
+ ) {
329
+ currentIndex--;
330
+ const poppedMessage = secondRoundMessages.pop();
331
+ if (!poppedMessage) continue;
332
+ const tokenCount = indexTokenCountMap[currentIndex] ?? 0;
333
+ if (currentTokenCount + tokenCount <= remainingContextTokens) {
334
+ newContext.push(poppedMessage);
335
+ currentTokenCount += tokenCount;
336
+ } else {
337
+ messages.push(poppedMessage);
338
+ break;
339
+ }
340
+ }
341
+
342
+ const firstMessage: AIMessage = newContext[newContext.length - 1];
343
+ const firstMessageType = newContext[newContext.length - 1].getType();
344
+ if (firstMessageType === 'tool') {
345
+ startType = ['ai', 'human'];
346
+ }
347
+
348
+ if (startType != null && startType.length > 0 && newContext.length > 0) {
349
+ let requiredTypeIndex = -1;
350
+
351
+ let totalTokens = 0;
352
+ for (let i = newContext.length - 1; i >= 0; i--) {
353
+ const currentType = newContext[i]?.getType() ?? '';
354
+ if (
355
+ Array.isArray(startType)
356
+ ? startType.includes(currentType)
357
+ : currentType === startType
358
+ ) {
359
+ requiredTypeIndex = i + 1;
360
+ break;
361
+ }
362
+ const originalIndex = originalLength - 1 - i;
363
+ totalTokens += indexTokenCountMap[originalIndex] ?? 0;
364
+ }
365
+
366
+ if (requiredTypeIndex > 0) {
367
+ currentTokenCount -= totalTokens;
368
+ newContext = newContext.slice(0, requiredTypeIndex);
369
+ }
370
+ }
371
+
372
+ if (firstMessageType === 'ai') {
373
+ const newMessage = addThinkingBlock(firstMessage, thinkingBlock);
374
+ newContext[newContext.length - 1] = newMessage;
375
+ } else {
376
+ newContext.push(thinkingMessage);
377
+ }
378
+
379
+ if (instructions && originalLength > 0) {
380
+ newContext.push(_messages[0] as BaseMessage);
381
+ secondRoundMessages.shift();
382
+ }
383
+
384
+ result.context = newContext.reverse();
385
+ return result;
386
+ }
387
+
388
+ export function checkValidNumber(value: unknown): value is number {
389
+ return typeof value === 'number' && !isNaN(value) && value > 0;
390
+ }
391
+
392
+ type ThinkingBlocks = {
393
+ thinking_blocks?: Array<{
394
+ type: 'thinking';
395
+ thinking: string;
396
+ signature: string;
397
+ }>;
398
+ };
399
+
400
+ export function createPruneMessages(factoryParams: PruneMessagesFactoryParams) {
401
+ const indexTokenCountMap = { ...factoryParams.indexTokenCountMap };
402
+ let lastTurnStartIndex = factoryParams.startIndex;
403
+ let lastCutOffIndex = 0;
404
+ let totalTokens = Object.values(indexTokenCountMap).reduce(
405
+ (a = 0, b = 0) => a + b,
406
+ 0
407
+ ) as number;
408
+ let runThinkingStartIndex = -1;
409
+ return function pruneMessages(params: PruneMessagesParams): {
410
+ context: BaseMessage[];
411
+ indexTokenCountMap: Record<string, number | undefined>;
412
+ } {
413
+ if (
414
+ factoryParams.provider === Providers.OPENAI &&
415
+ factoryParams.thinkingEnabled === true
416
+ ) {
417
+ for (let i = lastTurnStartIndex; i < params.messages.length; i++) {
418
+ const m = params.messages[i];
419
+ if (
420
+ m.getType() === 'ai' &&
421
+ typeof m.additional_kwargs.reasoning_content === 'string' &&
422
+ Array.isArray(
423
+ (
424
+ m.additional_kwargs.provider_specific_fields as
425
+ | ThinkingBlocks
426
+ | undefined
427
+ )?.thinking_blocks
428
+ ) &&
429
+ (m as AIMessage).tool_calls &&
430
+ ((m as AIMessage).tool_calls?.length ?? 0) > 0
431
+ ) {
432
+ const message = m as AIMessage;
433
+ const thinkingBlocks = (
434
+ message.additional_kwargs.provider_specific_fields as ThinkingBlocks
435
+ ).thinking_blocks;
436
+ const signature =
437
+ thinkingBlocks?.[thinkingBlocks.length - 1].signature;
438
+ const thinkingBlock: ThinkingContentText = {
439
+ signature,
440
+ type: ContentTypes.THINKING,
441
+ thinking: message.additional_kwargs.reasoning_content as string,
442
+ };
443
+
444
+ params.messages[i] = new AIMessage({
445
+ ...message,
446
+ content: [thinkingBlock],
447
+ additional_kwargs: {
448
+ ...message.additional_kwargs,
449
+ reasoning_content: undefined,
450
+ },
451
+ });
452
+ }
453
+ }
454
+ }
455
+
456
+ let currentUsage: UsageMetadata | undefined;
457
+ if (
458
+ params.usageMetadata &&
459
+ (checkValidNumber(params.usageMetadata.input_tokens) ||
460
+ (checkValidNumber(params.usageMetadata.input_token_details) &&
461
+ (checkValidNumber(
462
+ params.usageMetadata.input_token_details.cache_creation
463
+ ) ||
464
+ checkValidNumber(
465
+ params.usageMetadata.input_token_details.cache_read
466
+ )))) &&
467
+ checkValidNumber(params.usageMetadata.output_tokens)
468
+ ) {
469
+ currentUsage = calculateTotalTokens(params.usageMetadata);
470
+ totalTokens = currentUsage.total_tokens;
471
+ }
472
+
473
+ const newOutputs = new Set<number>();
474
+ for (let i = lastTurnStartIndex; i < params.messages.length; i++) {
475
+ const message = params.messages[i];
476
+ if (
477
+ i === lastTurnStartIndex &&
478
+ indexTokenCountMap[i] === undefined &&
479
+ currentUsage
480
+ ) {
481
+ indexTokenCountMap[i] = currentUsage.output_tokens;
482
+ } else if (indexTokenCountMap[i] === undefined) {
483
+ indexTokenCountMap[i] = factoryParams.tokenCounter(message);
484
+ if (currentUsage) {
485
+ newOutputs.add(i);
486
+ }
487
+ totalTokens += indexTokenCountMap[i] ?? 0;
488
+ }
489
+ }
490
+
491
+ // If `currentUsage` is defined, we need to distribute the current total tokens to our `indexTokenCountMap`,
492
+ // We must distribute it in a weighted manner, so that the total token count is equal to `currentUsage.total_tokens`,
493
+ // relative the manually counted tokens in `indexTokenCountMap`.
494
+ // EDGE CASE: when the resulting context gets pruned, we should not distribute the usage for messages that are not in the context.
495
+ if (currentUsage) {
496
+ let totalIndexTokens = 0;
497
+ if (params.messages[0].getType() === 'system') {
498
+ totalIndexTokens += indexTokenCountMap[0] ?? 0;
499
+ }
500
+ for (let i = lastCutOffIndex; i < params.messages.length; i++) {
501
+ if (i === 0 && params.messages[0].getType() === 'system') {
502
+ continue;
503
+ }
504
+ if (newOutputs.has(i)) {
505
+ continue;
506
+ }
507
+ totalIndexTokens += indexTokenCountMap[i] ?? 0;
508
+ }
509
+
510
+ // Calculate ratio based only on messages that remain in the context
511
+ const ratio = currentUsage.total_tokens / totalIndexTokens;
512
+ const isRatioSafe = ratio >= 1 / 3 && ratio <= 2.5;
513
+
514
+ // Apply the ratio adjustment only to messages at or after lastCutOffIndex, and only if the ratio is safe
515
+ if (isRatioSafe) {
516
+ if (
517
+ params.messages[0].getType() === 'system' &&
518
+ lastCutOffIndex !== 0
519
+ ) {
520
+ indexTokenCountMap[0] = Math.round(
521
+ (indexTokenCountMap[0] ?? 0) * ratio
522
+ );
523
+ }
524
+
525
+ for (let i = lastCutOffIndex; i < params.messages.length; i++) {
526
+ if (newOutputs.has(i)) {
527
+ continue;
528
+ }
529
+ indexTokenCountMap[i] = Math.round(
530
+ (indexTokenCountMap[i] ?? 0) * ratio
531
+ );
532
+ }
533
+ }
534
+ }
535
+
536
+ lastTurnStartIndex = params.messages.length;
537
+ if (lastCutOffIndex === 0 && totalTokens <= factoryParams.maxTokens) {
538
+ return { context: params.messages, indexTokenCountMap };
539
+ }
540
+
541
+ const { context, thinkingStartIndex } = getMessagesWithinTokenLimit({
542
+ maxContextTokens: factoryParams.maxTokens,
543
+ messages: params.messages,
544
+ indexTokenCountMap,
545
+ startType: params.startType,
546
+ thinkingEnabled: factoryParams.thinkingEnabled,
547
+ tokenCounter: factoryParams.tokenCounter,
548
+ reasoningType:
549
+ factoryParams.provider === Providers.BEDROCK
550
+ ? ContentTypes.REASONING_CONTENT
551
+ : ContentTypes.THINKING,
552
+ thinkingStartIndex:
553
+ factoryParams.thinkingEnabled === true
554
+ ? runThinkingStartIndex
555
+ : undefined,
556
+ });
557
+ runThinkingStartIndex = thinkingStartIndex ?? -1;
558
+ /** The index is the first value of `context`, index relative to `params.messages` */
559
+ lastCutOffIndex = Math.max(
560
+ params.messages.length -
561
+ (context.length - (context[0]?.getType() === 'system' ? 1 : 0)),
562
+ 0
563
+ );
564
+
565
+ return { context, indexTokenCountMap };
566
+ };
567
+ }