@librechat/agents 2.3.8 → 2.3.91

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -14,8 +14,15 @@ function addThinkingBlock(message, thinkingBlock) {
14
14
  type: _enum.ContentTypes.TEXT,
15
15
  text: message.content,
16
16
  }];
17
+ /** Edge case, the message already has the thinking block */
18
+ if (content[0].type === thinkingBlock.type) {
19
+ return message;
20
+ }
17
21
  content.unshift(thinkingBlock);
18
- return content;
22
+ return new messages.AIMessage({
23
+ ...message,
24
+ content
25
+ });
19
26
  }
20
27
  /**
21
28
  * Calculates the total tokens from a single usage object
@@ -42,7 +49,7 @@ function calculateTotalTokens(usage) {
42
49
  * @param options Configuration options for processing messages
43
50
  * @returns Object containing the message context, remaining tokens, messages not included, and summary index
44
51
  */
45
- function getMessagesWithinTokenLimit({ messages: _messages, maxContextTokens, indexTokenCountMap, startType: _startType, thinkingEnabled, tokenCounter, reasoningType = _enum.ContentTypes.THINKING, }) {
52
+ function getMessagesWithinTokenLimit({ messages: _messages, maxContextTokens, indexTokenCountMap, startType: _startType, thinkingEnabled, tokenCounter, thinkingStartIndex: _thinkingStartIndex = -1, reasoningType = _enum.ContentTypes.THINKING, }) {
46
53
  // Every reply is primed with <|start|>assistant<|message|>, so we
47
54
  // start with 3 tokens for the label after all messages have been counted.
48
55
  let currentTokenCount = 3;
@@ -59,11 +66,17 @@ function getMessagesWithinTokenLimit({ messages: _messages, maxContextTokens, in
59
66
  * This may be confusing to read, but it is done to ensure the context is in the correct order for the model.
60
67
  * */
61
68
  let context = [];
62
- let thinkingStartIndex = -1;
69
+ let thinkingStartIndex = _thinkingStartIndex;
63
70
  let thinkingEndIndex = -1;
64
71
  let thinkingBlock;
65
72
  const endIndex = instructions != null ? 1 : 0;
66
73
  const prunedMemory = [];
74
+ if (_thinkingStartIndex > -1) {
75
+ const thinkingMessageContent = messages$1[_thinkingStartIndex]?.content;
76
+ if (Array.isArray(thinkingMessageContent)) {
77
+ thinkingBlock = thinkingMessageContent.find((content) => content.type === reasoningType);
78
+ }
79
+ }
67
80
  if (currentTokenCount < remainingContextTokens) {
68
81
  let currentIndex = messages$1.length;
69
82
  while (messages$1.length > 0 && currentTokenCount < remainingContextTokens && currentIndex > endIndex) {
@@ -95,19 +108,30 @@ function getMessagesWithinTokenLimit({ messages: _messages, maxContextTokens, in
95
108
  }
96
109
  else {
97
110
  prunedMemory.push(poppedMessage);
98
- if (thinkingEndIndex > -1) {
111
+ if (thinkingEndIndex > -1 && thinkingStartIndex < 0) {
99
112
  continue;
100
113
  }
101
114
  break;
102
115
  }
103
116
  }
104
- if (thinkingEndIndex > -1 && context[context.length - 1]?.getType() === 'tool') {
105
- startType = 'ai';
117
+ if (context[context.length - 1]?.getType() === 'tool') {
118
+ startType = ['ai', 'human'];
106
119
  }
107
- if (startType != null && startType && context.length > 0) {
108
- const requiredTypeIndex = context.findIndex(msg => msg?.getType() === startType);
120
+ if (startType != null && startType.length > 0 && context.length > 0) {
121
+ let requiredTypeIndex = -1;
122
+ let totalTokens = 0;
123
+ for (let i = context.length - 1; i >= 0; i--) {
124
+ const currentType = context[i]?.getType() ?? '';
125
+ if (Array.isArray(startType) ? startType.includes(currentType) : currentType === startType) {
126
+ requiredTypeIndex = i + 1;
127
+ break;
128
+ }
129
+ const originalIndex = originalLength - 1 - i;
130
+ totalTokens += indexTokenCountMap[originalIndex] ?? 0;
131
+ }
109
132
  if (requiredTypeIndex > 0) {
110
- context = context.slice(requiredTypeIndex);
133
+ currentTokenCount -= totalTokens;
134
+ context = context.slice(0, requiredTypeIndex);
111
135
  }
112
136
  }
113
137
  }
@@ -121,6 +145,9 @@ function getMessagesWithinTokenLimit({ messages: _messages, maxContextTokens, in
121
145
  context: [],
122
146
  messagesToRefine: prunedMemory,
123
147
  };
148
+ if (thinkingStartIndex > -1) {
149
+ result.thinkingStartIndex = thinkingStartIndex;
150
+ }
124
151
  if (prunedMemory.length === 0 || thinkingEndIndex < 0 || (thinkingStartIndex > -1 && isIndexInContext(_messages, context, thinkingStartIndex))) {
125
152
  // we reverse at this step to ensure the context is in the correct order for the model, and we need to work backwards
126
153
  result.context = context.reverse();
@@ -152,8 +179,8 @@ function getMessagesWithinTokenLimit({ messages: _messages, maxContextTokens, in
152
179
  thinkingStartIndex = originalLength - 1 - assistantIndex;
153
180
  const thinkingTokenCount = tokenCounter(new messages.AIMessage({ content: [thinkingBlock] }));
154
181
  const newRemainingCount = remainingContextTokens - thinkingTokenCount;
155
- const content = addThinkingBlock(context[assistantIndex], thinkingBlock);
156
- context[assistantIndex].content = content;
182
+ const newMessage = addThinkingBlock(context[assistantIndex], thinkingBlock);
183
+ context[assistantIndex] = newMessage;
157
184
  if (newRemainingCount > 0) {
158
185
  result.context = context.reverse();
159
186
  return result;
@@ -184,17 +211,28 @@ function getMessagesWithinTokenLimit({ messages: _messages, maxContextTokens, in
184
211
  const firstMessage = newContext[newContext.length - 1];
185
212
  const firstMessageType = newContext[newContext.length - 1].getType();
186
213
  if (firstMessageType === 'tool') {
187
- startType = 'ai';
214
+ startType = ['ai', 'human'];
188
215
  }
189
- if (startType != null && startType && newContext.length > 0) {
190
- const requiredTypeIndex = newContext.findIndex(msg => msg.getType() === startType);
216
+ if (startType != null && startType.length > 0 && newContext.length > 0) {
217
+ let requiredTypeIndex = -1;
218
+ let totalTokens = 0;
219
+ for (let i = newContext.length - 1; i >= 0; i--) {
220
+ const currentType = newContext[i]?.getType() ?? '';
221
+ if (Array.isArray(startType) ? startType.includes(currentType) : currentType === startType) {
222
+ requiredTypeIndex = i + 1;
223
+ break;
224
+ }
225
+ const originalIndex = originalLength - 1 - i;
226
+ totalTokens += indexTokenCountMap[originalIndex] ?? 0;
227
+ }
191
228
  if (requiredTypeIndex > 0) {
192
- newContext = newContext.slice(requiredTypeIndex);
229
+ currentTokenCount -= totalTokens;
230
+ newContext = newContext.slice(0, requiredTypeIndex);
193
231
  }
194
232
  }
195
233
  if (firstMessageType === 'ai') {
196
- const content = addThinkingBlock(firstMessage, thinkingBlock);
197
- newContext[newContext.length - 1].content = content;
234
+ const newMessage = addThinkingBlock(firstMessage, thinkingBlock);
235
+ newContext[newContext.length - 1] = newMessage;
198
236
  }
199
237
  else {
200
238
  newContext.push(thinkingMessage);
@@ -214,6 +252,7 @@ function createPruneMessages(factoryParams) {
214
252
  let lastTurnStartIndex = factoryParams.startIndex;
215
253
  let lastCutOffIndex = 0;
216
254
  let totalTokens = (Object.values(indexTokenCountMap)).reduce((a, b) => a + b, 0);
255
+ let runThinkingStartIndex = -1;
217
256
  return function pruneMessages(params) {
218
257
  let currentUsage;
219
258
  if (params.usageMetadata && (checkValidNumber(params.usageMetadata.input_tokens)
@@ -270,7 +309,7 @@ function createPruneMessages(factoryParams) {
270
309
  if (totalTokens <= factoryParams.maxTokens) {
271
310
  return { context: params.messages, indexTokenCountMap };
272
311
  }
273
- const { context } = getMessagesWithinTokenLimit({
312
+ const { context, thinkingStartIndex } = getMessagesWithinTokenLimit({
274
313
  maxContextTokens: factoryParams.maxTokens,
275
314
  messages: params.messages,
276
315
  indexTokenCountMap,
@@ -278,8 +317,11 @@ function createPruneMessages(factoryParams) {
278
317
  thinkingEnabled: factoryParams.thinkingEnabled,
279
318
  tokenCounter: factoryParams.tokenCounter,
280
319
  reasoningType: factoryParams.provider === _enum.Providers.BEDROCK ? _enum.ContentTypes.REASONING_CONTENT : _enum.ContentTypes.THINKING,
320
+ thinkingStartIndex: factoryParams.thinkingEnabled === true ? runThinkingStartIndex : undefined,
281
321
  });
282
- lastCutOffIndex = Math.max(params.messages.length - context.length, 0);
322
+ runThinkingStartIndex = thinkingStartIndex ?? -1;
323
+ /** The index is the first value of `context`, index relative to `params.messages` */
324
+ lastCutOffIndex = Math.max(params.messages.length - (context.length - (context[0]?.getType() === 'system' ? 1 : 0)), 0);
283
325
  return { context, indexTokenCountMap };
284
326
  };
285
327
  }
@@ -1 +1 @@
1
- {"version":3,"file":"prune.cjs","sources":["../../../src/messages/prune.ts"],"sourcesContent":["import { AIMessage, BaseMessage, UsageMetadata } from '@langchain/core/messages';\nimport type { ThinkingContentText, MessageContentComplex, ReasoningContentText } from '@/types/stream';\nimport type { TokenCounter } from '@/types/run';\nimport { ContentTypes, Providers } from '@/common';\n\nexport type PruneMessagesFactoryParams = {\n provider?: Providers;\n maxTokens: number;\n startIndex: number;\n tokenCounter: TokenCounter;\n indexTokenCountMap: Record<string, number>;\n thinkingEnabled?: boolean;\n};\nexport type PruneMessagesParams = {\n messages: BaseMessage[];\n usageMetadata?: Partial<UsageMetadata>;\n startType?: ReturnType<BaseMessage['getType']>;\n}\n\nfunction isIndexInContext(arrayA: unknown[], arrayB: unknown[], targetIndex: number): boolean {\n const startingIndexInA = arrayA.length - arrayB.length;\n return targetIndex >= startingIndexInA;\n}\n\nfunction addThinkingBlock(message: AIMessage, thinkingBlock: ThinkingContentText | ReasoningContentText): MessageContentComplex[] {\n const content: MessageContentComplex[] = Array.isArray(message.content)\n ? message.content as MessageContentComplex[]\n : [{\n type: ContentTypes.TEXT,\n text: message.content,\n }];\n content.unshift(thinkingBlock);\n return content;\n}\n\n/**\n * Calculates the total tokens from a single usage object\n *\n * @param usage The usage metadata object containing token information\n * @returns An object containing the total input and output tokens\n */\nexport function calculateTotalTokens(usage: Partial<UsageMetadata>): UsageMetadata {\n const baseInputTokens = Number(usage.input_tokens) || 0;\n const cacheCreation = Number(usage.input_token_details?.cache_creation) || 0;\n const cacheRead = Number(usage.input_token_details?.cache_read) || 0;\n\n const totalInputTokens = baseInputTokens + cacheCreation + cacheRead;\n const totalOutputTokens = Number(usage.output_tokens) || 0;\n\n return {\n input_tokens: totalInputTokens,\n output_tokens: totalOutputTokens,\n total_tokens: totalInputTokens + totalOutputTokens\n };\n}\n\n/**\n * Processes an array of messages and returns a context of messages that fit within a specified token limit.\n * It iterates over the messages from newest to oldest, adding them to the context until the token limit is reached.\n *\n * @param options Configuration options for processing messages\n * @returns Object containing the message context, remaining tokens, messages not included, and summary index\n */\nexport function getMessagesWithinTokenLimit({\n messages: _messages,\n maxContextTokens,\n indexTokenCountMap,\n startType: _startType,\n thinkingEnabled,\n tokenCounter,\n reasoningType = ContentTypes.THINKING,\n}: {\n messages: BaseMessage[];\n maxContextTokens: number;\n indexTokenCountMap: Record<string, number | undefined>;\n tokenCounter: TokenCounter;\n startType?: string;\n thinkingEnabled?: boolean;\n reasoningType?: ContentTypes.THINKING | ContentTypes.REASONING_CONTENT;\n}): {\n context: BaseMessage[];\n remainingContextTokens: number;\n messagesToRefine: BaseMessage[];\n} {\n // Every reply is primed with <|start|>assistant<|message|>, so we\n // start with 3 tokens for the label after all messages have been counted.\n let currentTokenCount = 3;\n const instructions = _messages[0]?.getType() === 'system' ? _messages[0] : undefined;\n const instructionsTokenCount = instructions != null ? indexTokenCountMap[0] ?? 0 : 0;\n const initialContextTokens = maxContextTokens - instructionsTokenCount;\n let remainingContextTokens = initialContextTokens;\n let startType = _startType;\n const originalLength = _messages.length;\n const messages = [..._messages];\n /**\n * IMPORTANT: this context array gets reversed at the end, since the latest messages get pushed first.\n *\n * This may be confusing to read, but it is done to ensure the context is in the correct order for the model.\n * */\n let context: Array<BaseMessage | undefined> = [];\n\n let thinkingStartIndex = -1;\n let thinkingEndIndex = -1;\n let thinkingBlock: ThinkingContentText | ReasoningContentText | undefined;\n const endIndex = instructions != null ? 1 : 0;\n const prunedMemory: BaseMessage[] = [];\n\n if (currentTokenCount < remainingContextTokens) {\n let currentIndex = messages.length;\n while (messages.length > 0 && currentTokenCount < remainingContextTokens && currentIndex > endIndex) {\n currentIndex--;\n if (messages.length === 1 && instructions) {\n break;\n }\n const poppedMessage = messages.pop();\n if (!poppedMessage) continue;\n const messageType = poppedMessage.getType();\n if (thinkingEnabled === true && thinkingEndIndex === -1 && (currentIndex === (originalLength - 1)) && (messageType === 'ai' || messageType === 'tool')) {\n thinkingEndIndex = currentIndex;\n }\n if (thinkingEndIndex > -1 && !thinkingBlock && thinkingStartIndex < 0 && messageType === 'ai' && Array.isArray(poppedMessage.content)) {\n thinkingBlock = (poppedMessage.content.find((content) => content.type === reasoningType)) as ThinkingContentText | undefined;\n thinkingStartIndex = thinkingBlock != null ? currentIndex : -1;\n }\n /** False start, the latest message was not part of a multi-assistant/tool sequence of messages */\n if (\n thinkingEndIndex > -1\n && currentIndex === (thinkingEndIndex - 1)\n && (messageType !== 'ai' && messageType !== 'tool')\n ) {\n thinkingEndIndex = -1;\n }\n\n const tokenCount = indexTokenCountMap[currentIndex] ?? 0;\n\n if (prunedMemory.length === 0 && ((currentTokenCount + tokenCount) <= remainingContextTokens)) {\n context.push(poppedMessage);\n currentTokenCount += tokenCount;\n } else {\n prunedMemory.push(poppedMessage);\n if (thinkingEndIndex > -1) {\n continue;\n }\n break;\n }\n }\n\n if (thinkingEndIndex > -1 && context[context.length - 1]?.getType() === 'tool') {\n startType = 'ai';\n }\n\n if (startType != null && startType && context.length > 0) {\n const requiredTypeIndex = context.findIndex(msg => msg?.getType() === startType);\n\n if (requiredTypeIndex > 0) {\n context = context.slice(requiredTypeIndex);\n }\n }\n }\n\n if (instructions && originalLength > 0) {\n context.push(_messages[0] as BaseMessage);\n messages.shift();\n }\n\n remainingContextTokens -= currentTokenCount;\n const result = {\n remainingContextTokens,\n context: [] as BaseMessage[],\n messagesToRefine: prunedMemory,\n };\n\n if (prunedMemory.length === 0 || thinkingEndIndex < 0 || (thinkingStartIndex > -1 && isIndexInContext(_messages, context, thinkingStartIndex))) {\n // we reverse at this step to ensure the context is in the correct order for the model, and we need to work backwards\n result.context = context.reverse() as BaseMessage[];\n return result;\n }\n\n if (thinkingEndIndex > -1 && thinkingStartIndex < 0) {\n throw new Error('The payload is malformed. There is a thinking sequence but no \"AI\" messages with thinking blocks.');\n }\n\n if (!thinkingBlock) {\n throw new Error('The payload is malformed. There is a thinking sequence but no thinking block found.');\n }\n\n // Since we have a thinking sequence, we need to find the last assistant message\n // in the latest AI/tool sequence to add the thinking block that falls outside of the current context\n // Latest messages are ordered first.\n let assistantIndex = -1;\n for (let i = 0; i < context.length; i++) {\n const currentMessage = context[i];\n const type = currentMessage?.getType();\n if (type === 'ai') {\n assistantIndex = i;\n }\n if (assistantIndex > -1 && (type === 'human' || type === 'system')) {\n break;\n }\n }\n\n if (assistantIndex === -1) {\n throw new Error('The payload is malformed. There is a thinking sequence but no \"AI\" messages to append thinking blocks to.');\n }\n\n thinkingStartIndex = originalLength - 1 - assistantIndex;\n const thinkingTokenCount = tokenCounter(new AIMessage({ content: [thinkingBlock] }));\n const newRemainingCount = remainingContextTokens - thinkingTokenCount;\n const content: MessageContentComplex[] = addThinkingBlock(context[assistantIndex] as AIMessage, thinkingBlock);\n (context[assistantIndex] as AIMessage).content = content;\n if (newRemainingCount > 0) {\n result.context = context.reverse() as BaseMessage[];\n return result;\n }\n\n const thinkingMessage: AIMessage = context[assistantIndex] as AIMessage;\n // now we need to an additional round of pruning but making the thinking block fit\n const newThinkingMessageTokenCount = (indexTokenCountMap[thinkingStartIndex] ?? 0) + thinkingTokenCount;\n remainingContextTokens = initialContextTokens - newThinkingMessageTokenCount;\n currentTokenCount = 3;\n let newContext: BaseMessage[] = [];\n const secondRoundMessages = [..._messages];\n let currentIndex = secondRoundMessages.length;\n while (secondRoundMessages.length > 0 && currentTokenCount < remainingContextTokens && currentIndex > thinkingStartIndex) {\n currentIndex--;\n const poppedMessage = secondRoundMessages.pop();\n if (!poppedMessage) continue;\n const tokenCount = indexTokenCountMap[currentIndex] ?? 0;\n if ((currentTokenCount + tokenCount) <= remainingContextTokens) {\n newContext.push(poppedMessage);\n currentTokenCount += tokenCount;\n } else {\n messages.push(poppedMessage);\n break;\n }\n }\n\n const firstMessage: AIMessage = newContext[newContext.length - 1];\n const firstMessageType = newContext[newContext.length - 1].getType();\n if (firstMessageType === 'tool') {\n startType = 'ai';\n }\n\n if (startType != null && startType && newContext.length > 0) {\n const requiredTypeIndex = newContext.findIndex(msg => msg.getType() === startType);\n if (requiredTypeIndex > 0) {\n newContext = newContext.slice(requiredTypeIndex);\n }\n }\n\n if (firstMessageType === 'ai') {\n const content = addThinkingBlock(firstMessage, thinkingBlock);\n newContext[newContext.length - 1].content = content;\n } else {\n newContext.push(thinkingMessage);\n }\n\n if (instructions && originalLength > 0) {\n newContext.push(_messages[0] as BaseMessage);\n secondRoundMessages.shift();\n }\n\n result.context = newContext.reverse();\n return result;\n}\n\nexport function checkValidNumber(value: unknown): value is number {\n return typeof value === 'number' && !isNaN(value) && value > 0;\n}\n\nexport function createPruneMessages(factoryParams: PruneMessagesFactoryParams) {\n const indexTokenCountMap = { ...factoryParams.indexTokenCountMap };\n let lastTurnStartIndex = factoryParams.startIndex;\n let lastCutOffIndex = 0;\n let totalTokens = (Object.values(indexTokenCountMap)).reduce((a, b) => a + b, 0);\n return function pruneMessages(params: PruneMessagesParams): {\n context: BaseMessage[];\n indexTokenCountMap: Record<string, number>;\n } {\n let currentUsage: UsageMetadata | undefined;\n if (params.usageMetadata && (\n checkValidNumber(params.usageMetadata.input_tokens)\n || (\n checkValidNumber(params.usageMetadata.input_token_details)\n && (\n checkValidNumber(params.usageMetadata.input_token_details.cache_creation)\n || checkValidNumber(params.usageMetadata.input_token_details.cache_read)\n )\n )\n ) && checkValidNumber(params.usageMetadata.output_tokens)) {\n currentUsage = calculateTotalTokens(params.usageMetadata);\n totalTokens = currentUsage.total_tokens;\n }\n\n for (let i = lastTurnStartIndex; i < params.messages.length; i++) {\n const message = params.messages[i];\n // eslint-disable-next-line @typescript-eslint/no-unnecessary-condition\n if (i === lastTurnStartIndex && indexTokenCountMap[i] === undefined && currentUsage) {\n indexTokenCountMap[i] = currentUsage.output_tokens;\n // eslint-disable-next-line @typescript-eslint/no-unnecessary-condition\n } else if (indexTokenCountMap[i] === undefined) {\n indexTokenCountMap[i] = factoryParams.tokenCounter(message);\n totalTokens += indexTokenCountMap[i];\n }\n }\n\n // If `currentUsage` is defined, we need to distribute the current total tokens to our `indexTokenCountMap`,\n // We must distribute it in a weighted manner, so that the total token count is equal to `currentUsage.total_tokens`,\n // relative the manually counted tokens in `indexTokenCountMap`.\n // EDGE CASE: when the resulting context gets pruned, we should not distribute the usage for messages that are not in the context.\n if (currentUsage) {\n // Calculate the sum of tokens only for indices at or after lastCutOffIndex\n const totalIndexTokens = Object.entries(indexTokenCountMap).reduce((sum, [key, value]) => {\n // Convert string key to number and check if it's >= lastCutOffIndex\n const numericKey = Number(key);\n if (numericKey === 0 && params.messages[0].getType() === 'system') {\n return sum + value;\n }\n return numericKey >= lastCutOffIndex ? sum + value : sum;\n }, 0);\n\n // Calculate ratio based only on messages that remain in the context\n const ratio = currentUsage.total_tokens / totalIndexTokens;\n const isRatioSafe = ratio >= 1/3 && ratio <= 2.5;\n\n // Apply the ratio adjustment only to messages at or after lastCutOffIndex, and only if the ratio is safe\n if (isRatioSafe) {\n for (const key in indexTokenCountMap) {\n const numericKey = Number(key);\n if (numericKey === 0 && params.messages[0].getType() === 'system') {\n indexTokenCountMap[key] = Math.round(indexTokenCountMap[key] * ratio);\n } else if (numericKey >= lastCutOffIndex) {\n // Only adjust token counts for messages still in the context\n indexTokenCountMap[key] = Math.round(indexTokenCountMap[key] * ratio);\n }\n }\n }\n }\n\n lastTurnStartIndex = params.messages.length;\n if (totalTokens <= factoryParams.maxTokens) {\n return { context: params.messages, indexTokenCountMap };\n }\n\n const { context } = getMessagesWithinTokenLimit({\n maxContextTokens: factoryParams.maxTokens,\n messages: params.messages,\n indexTokenCountMap,\n startType: params.startType,\n thinkingEnabled: factoryParams.thinkingEnabled,\n tokenCounter: factoryParams.tokenCounter,\n reasoningType: factoryParams.provider === Providers.BEDROCK ? ContentTypes.REASONING_CONTENT : ContentTypes.THINKING,\n });\n lastCutOffIndex = Math.max(params.messages.length - context.length, 0);\n\n return { context, indexTokenCountMap };\n };\n}\n"],"names":["ContentTypes","messages","AIMessage","Providers"],"mappings":";;;;;AAmBA,SAAS,gBAAgB,CAAC,MAAiB,EAAE,MAAiB,EAAE,WAAmB,EAAA;IACjF,MAAM,gBAAgB,GAAG,MAAM,CAAC,MAAM,GAAG,MAAM,CAAC,MAAM;IACtD,OAAO,WAAW,IAAI,gBAAgB;AACxC;AAEA,SAAS,gBAAgB,CAAC,OAAkB,EAAE,aAAyD,EAAA;IACrG,MAAM,OAAO,GAA4B,KAAK,CAAC,OAAO,CAAC,OAAO,CAAC,OAAO;UAClE,OAAO,CAAC;AACV,UAAE,CAAC;gBACD,IAAI,EAAEA,kBAAY,CAAC,IAAI;gBACvB,IAAI,EAAE,OAAO,CAAC,OAAO;AACtB,aAAA,CAAC;AACJ,IAAA,OAAO,CAAC,OAAO,CAAC,aAAa,CAAC;AAC9B,IAAA,OAAO,OAAO;AAChB;AAEA;;;;;AAKG;AACG,SAAU,oBAAoB,CAAC,KAA6B,EAAA;IAChE,MAAM,eAAe,GAAG,MAAM,CAAC,KAAK,CAAC,YAAY,CAAC,IAAI,CAAC;AACvD,IAAA,MAAM,aAAa,GAAG,MAAM,CAAC,KAAK,CAAC,mBAAmB,EAAE,cAAc,CAAC,IAAI,CAAC;AAC5E,IAAA,MAAM,SAAS,GAAG,MAAM,CAAC,KAAK,CAAC,mBAAmB,EAAE,UAAU,CAAC,IAAI,CAAC;AAEpE,IAAA,MAAM,gBAAgB,GAAG,eAAe,GAAG,aAAa,GAAG,SAAS;IACpE,MAAM,iBAAiB,GAAG,MAAM,CAAC,KAAK,CAAC,aAAa,CAAC,IAAI,CAAC;IAE1D,OAAO;AACL,QAAA,YAAY,EAAE,gBAAgB;AAC9B,QAAA,aAAa,EAAE,iBAAiB;QAChC,YAAY,EAAE,gBAAgB,GAAG;KAClC;AACH;AAEA;;;;;;AAMG;AACG,SAAU,2BAA2B,CAAC,EAC1C,QAAQ,EAAE,SAAS,EACnB,gBAAgB,EAChB,kBAAkB,EAClB,SAAS,EAAE,UAAU,EACrB,eAAe,EACf,YAAY,EACZ,aAAa,GAAGA,kBAAY,CAAC,QAAQ,GAStC,EAAA;;;IAOC,IAAI,iBAAiB,GAAG,CAAC;IACzB,MAAM,YAAY,GAAG,SAAS,CAAC,CAAC,CAAC,EAAE,OAAO,EAAE,KAAK,QAAQ,GAAG,SAAS,CAAC,CAAC,CAAC,GAAG,SAAS;AACpF,IAAA,MAAM,sBAAsB,GAAG,YAAY,IAAI,IAAI,GAAG,kBAAkB,CAAC,CAAC,CAAC,IAAI,CAAC,GAAG,CAAC;AACpF,IAAA,MAAM,oBAAoB,GAAG,gBAAgB,GAAG,sBAAsB;IACtE,IAAI,sBAAsB,GAAG,oBAAoB;IACjD,IAAI,SAAS,GAAG,UAAU;AAC1B,IAAA,MAAM,cAAc,GAAG,SAAS,CAAC,MAAM;AACvC,IAAA,MAAMC,UAAQ,GAAG,CAAC,GAAG,SAAS,CAAC;AAC/B;;;;AAIK;IACL,IAAI,OAAO,GAAmC,EAAE;AAEhD,IAAA,IAAI,kBAAkB,GAAG,EAAE;AAC3B,IAAA,IAAI,gBAAgB,GAAG,EAAE;AACzB,IAAA,IAAI,aAAqE;AACzE,IAAA,MAAM,QAAQ,GAAG,YAAY,IAAI,IAAI,GAAG,CAAC,GAAG,CAAC;IAC7C,MAAM,YAAY,GAAkB,EAAE;AAEtC,IAAA,IAAI,iBAAiB,GAAG,sBAAsB,EAAE;AAC9C,QAAA,IAAI,YAAY,GAAGA,UAAQ,CAAC,MAAM;AAClC,QAAA,OAAOA,UAAQ,CAAC,MAAM,GAAG,CAAC,IAAI,iBAAiB,GAAG,sBAAsB,IAAI,YAAY,GAAG,QAAQ,EAAE;AACnG,YAAA,YAAY,EAAE;YACd,IAAIA,UAAQ,CAAC,MAAM,KAAK,CAAC,IAAI,YAAY,EAAE;gBACzC;;AAEF,YAAA,MAAM,aAAa,GAAGA,UAAQ,CAAC,GAAG,EAAE;AACpC,YAAA,IAAI,CAAC,aAAa;gBAAE;AACpB,YAAA,MAAM,WAAW,GAAG,aAAa,CAAC,OAAO,EAAE;AAC3C,YAAA,IAAI,eAAe,KAAK,IAAI,IAAI,gBAAgB,KAAK,EAAE,KAAK,YAAY,MAAM,cAAc,GAAG,CAAC,CAAC,CAAC,KAAK,WAAW,KAAK,IAAI,IAAI,WAAW,KAAK,MAAM,CAAC,EAAE;gBACtJ,gBAAgB,GAAG,YAAY;;YAEjC,IAAI,gBAAgB,GAAG,EAAE,IAAI,CAAC,aAAa,IAAK,kBAAkB,GAAG,CAAC,IAAI,WAAW,KAAK,IAAI,IAAI,KAAK,CAAC,OAAO,CAAC,aAAa,CAAC,OAAO,CAAC,EAAE;gBACtI,aAAa,IAAI,aAAa,CAAC,OAAO,CAAC,IAAI,CAAC,CAAC,OAAO,KAAK,OAAO,CAAC,IAAI,KAAK,aAAa,CAAC,CAAoC;AAC5H,gBAAA,kBAAkB,GAAG,aAAa,IAAI,IAAI,GAAG,YAAY,GAAG,EAAE;;;YAGhE,IACE,gBAAgB,GAAG;AAChB,mBAAA,YAAY,MAAM,gBAAgB,GAAG,CAAC;oBACrC,WAAW,KAAK,IAAI,IAAI,WAAW,KAAK,MAAM,CAAC,EACnD;gBACA,gBAAgB,GAAG,EAAE;;YAGvB,MAAM,UAAU,GAAG,kBAAkB,CAAC,YAAY,CAAC,IAAI,CAAC;AAExD,YAAA,IAAI,YAAY,CAAC,MAAM,KAAK,CAAC,KAAK,CAAC,iBAAiB,GAAG,UAAU,KAAK,sBAAsB,CAAC,EAAE;AAC7F,gBAAA,OAAO,CAAC,IAAI,CAAC,aAAa,CAAC;gBAC3B,iBAAiB,IAAI,UAAU;;iBAC1B;AACL,gBAAA,YAAY,CAAC,IAAI,CAAC,aAAa,CAAC;AAChC,gBAAA,IAAI,gBAAgB,GAAG,EAAE,EAAE;oBACzB;;gBAEF;;;AAIJ,QAAA,IAAI,gBAAgB,GAAG,EAAE,IAAI,OAAO,CAAC,OAAO,CAAC,MAAM,GAAG,CAAC,CAAC,EAAE,OAAO,EAAE,KAAK,MAAM,EAAE;YAC9E,SAAS,GAAG,IAAI;;AAGlB,QAAA,IAAI,SAAS,IAAI,IAAI,IAAI,SAAS,IAAI,OAAO,CAAC,MAAM,GAAG,CAAC,EAAE;AACxD,YAAA,MAAM,iBAAiB,GAAG,OAAO,CAAC,SAAS,CAAC,GAAG,IAAI,GAAG,EAAE,OAAO,EAAE,KAAK,SAAS,CAAC;AAEhF,YAAA,IAAI,iBAAiB,GAAG,CAAC,EAAE;AACzB,gBAAA,OAAO,GAAG,OAAO,CAAC,KAAK,CAAC,iBAAiB,CAAC;;;;AAKhD,IAAA,IAAI,YAAY,IAAI,cAAc,GAAG,CAAC,EAAE;QACtC,OAAO,CAAC,IAAI,CAAC,SAAS,CAAC,CAAC,CAAgB,CAAC;QACzCA,UAAQ,CAAC,KAAK,EAAE;;IAGlB,sBAAsB,IAAI,iBAAiB;AAC3C,IAAA,MAAM,MAAM,GAAG;QACb,sBAAsB;AACtB,QAAA,OAAO,EAAE,EAAmB;AAC5B,QAAA,gBAAgB,EAAE,YAAY;KAC/B;IAED,IAAI,YAAY,CAAC,MAAM,KAAK,CAAC,IAAI,gBAAgB,GAAG,CAAC,KAAK,kBAAkB,GAAG,EAAE,IAAI,gBAAgB,CAAC,SAAS,EAAE,OAAO,EAAE,kBAAkB,CAAC,CAAC,EAAE;;AAE9I,QAAA,MAAM,CAAC,OAAO,GAAG,OAAO,CAAC,OAAO,EAAmB;AACnD,QAAA,OAAO,MAAM;;IAGf,IAAI,gBAAgB,GAAG,EAAE,IAAI,kBAAkB,GAAG,CAAC,EAAE;AACnD,QAAA,MAAM,IAAI,KAAK,CAAC,mGAAmG,CAAC;;IAGtH,IAAI,CAAC,aAAa,EAAE;AAClB,QAAA,MAAM,IAAI,KAAK,CAAC,qFAAqF,CAAC;;;;;AAMxG,IAAA,IAAI,cAAc,GAAG,EAAE;AACvB,IAAA,KAAK,IAAI,CAAC,GAAG,CAAC,EAAE,CAAC,GAAG,OAAO,CAAC,MAAM,EAAE,CAAC,EAAE,EAAE;AACvC,QAAA,MAAM,cAAc,GAAG,OAAO,CAAC,CAAC,CAAC;AACjC,QAAA,MAAM,IAAI,GAAG,cAAc,EAAE,OAAO,EAAE;AACtC,QAAA,IAAI,IAAI,KAAK,IAAI,EAAE;YACjB,cAAc,GAAG,CAAC;;AAEpB,QAAA,IAAI,cAAc,GAAG,EAAE,KAAK,IAAI,KAAK,OAAO,IAAI,IAAI,KAAK,QAAQ,CAAC,EAAE;YAClE;;;AAIJ,IAAA,IAAI,cAAc,KAAK,EAAE,EAAE;AACzB,QAAA,MAAM,IAAI,KAAK,CAAC,2GAA2G,CAAC;;AAG9H,IAAA,kBAAkB,GAAG,cAAc,GAAG,CAAC,GAAG,cAAc;AACxD,IAAA,MAAM,kBAAkB,GAAG,YAAY,CAAC,IAAIC,kBAAS,CAAC,EAAE,OAAO,EAAE,CAAC,aAAa,CAAC,EAAE,CAAC,CAAC;AACpF,IAAA,MAAM,iBAAiB,GAAG,sBAAsB,GAAG,kBAAkB;IACrE,MAAM,OAAO,GAA4B,gBAAgB,CAAC,OAAO,CAAC,cAAc,CAAc,EAAE,aAAa,CAAC;AAC7G,IAAA,OAAO,CAAC,cAAc,CAAe,CAAC,OAAO,GAAG,OAAO;AACxD,IAAA,IAAI,iBAAiB,GAAG,CAAC,EAAE;AACzB,QAAA,MAAM,CAAC,OAAO,GAAG,OAAO,CAAC,OAAO,EAAmB;AACnD,QAAA,OAAO,MAAM;;AAGf,IAAA,MAAM,eAAe,GAAc,OAAO,CAAC,cAAc,CAAc;;AAEvE,IAAA,MAAM,4BAA4B,GAAG,CAAC,kBAAkB,CAAC,kBAAkB,CAAC,IAAI,CAAC,IAAI,kBAAkB;AACvG,IAAA,sBAAsB,GAAG,oBAAoB,GAAG,4BAA4B;IAC5E,iBAAiB,GAAG,CAAC;IACrB,IAAI,UAAU,GAAkB,EAAE;AAClC,IAAA,MAAM,mBAAmB,GAAG,CAAC,GAAG,SAAS,CAAC;AAC1C,IAAA,IAAI,YAAY,GAAG,mBAAmB,CAAC,MAAM;AAC7C,IAAA,OAAO,mBAAmB,CAAC,MAAM,GAAG,CAAC,IAAI,iBAAiB,GAAG,sBAAsB,IAAI,YAAY,GAAG,kBAAkB,EAAE;AACxH,QAAA,YAAY,EAAE;AACd,QAAA,MAAM,aAAa,GAAG,mBAAmB,CAAC,GAAG,EAAE;AAC/C,QAAA,IAAI,CAAC,aAAa;YAAE;QACpB,MAAM,UAAU,GAAG,kBAAkB,CAAC,YAAY,CAAC,IAAI,CAAC;QACxD,IAAI,CAAC,iBAAiB,GAAG,UAAU,KAAK,sBAAsB,EAAE;AAC9D,YAAA,UAAU,CAAC,IAAI,CAAC,aAAa,CAAC;YAC9B,iBAAiB,IAAI,UAAU;;aAC1B;AACL,YAAAD,UAAQ,CAAC,IAAI,CAAC,aAAa,CAAC;YAC5B;;;IAIJ,MAAM,YAAY,GAAc,UAAU,CAAC,UAAU,CAAC,MAAM,GAAG,CAAC,CAAC;AACjE,IAAA,MAAM,gBAAgB,GAAG,UAAU,CAAC,UAAU,CAAC,MAAM,GAAG,CAAC,CAAC,CAAC,OAAO,EAAE;AACpE,IAAA,IAAI,gBAAgB,KAAK,MAAM,EAAE;QAC/B,SAAS,GAAG,IAAI;;AAGlB,IAAA,IAAI,SAAS,IAAI,IAAI,IAAI,SAAS,IAAI,UAAU,CAAC,MAAM,GAAG,CAAC,EAAE;AAC3D,QAAA,MAAM,iBAAiB,GAAG,UAAU,CAAC,SAAS,CAAC,GAAG,IAAI,GAAG,CAAC,OAAO,EAAE,KAAK,SAAS,CAAC;AAClF,QAAA,IAAI,iBAAiB,GAAG,CAAC,EAAE;AACzB,YAAA,UAAU,GAAG,UAAU,CAAC,KAAK,CAAC,iBAAiB,CAAC;;;AAIpD,IAAA,IAAI,gBAAgB,KAAK,IAAI,EAAE;QAC7B,MAAM,OAAO,GAAG,gBAAgB,CAAC,YAAY,EAAE,aAAa,CAAC;QAC7D,UAAU,CAAC,UAAU,CAAC,MAAM,GAAG,CAAC,CAAC,CAAC,OAAO,GAAG,OAAO;;SAC9C;AACL,QAAA,UAAU,CAAC,IAAI,CAAC,eAAe,CAAC;;AAGlC,IAAA,IAAI,YAAY,IAAI,cAAc,GAAG,CAAC,EAAE;QACtC,UAAU,CAAC,IAAI,CAAC,SAAS,CAAC,CAAC,CAAgB,CAAC;QAC5C,mBAAmB,CAAC,KAAK,EAAE;;AAG7B,IAAA,MAAM,CAAC,OAAO,GAAG,UAAU,CAAC,OAAO,EAAE;AACrC,IAAA,OAAO,MAAM;AACf;AAEM,SAAU,gBAAgB,CAAC,KAAc,EAAA;AAC7C,IAAA,OAAO,OAAO,KAAK,KAAK,QAAQ,IAAI,CAAC,KAAK,CAAC,KAAK,CAAC,IAAI,KAAK,GAAG,CAAC;AAChE;AAEM,SAAU,mBAAmB,CAAC,aAAyC,EAAA;IAC3E,MAAM,kBAAkB,GAAG,EAAE,GAAG,aAAa,CAAC,kBAAkB,EAAE;AAClE,IAAA,IAAI,kBAAkB,GAAG,aAAa,CAAC,UAAU;IACjD,IAAI,eAAe,GAAG,CAAC;IACvB,IAAI,WAAW,GAAG,CAAC,MAAM,CAAC,MAAM,CAAC,kBAAkB,CAAC,EAAE,MAAM,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,GAAG,CAAC,EAAE,CAAC,CAAC;IAChF,OAAO,SAAS,aAAa,CAAC,MAA2B,EAAA;AAIvD,QAAA,IAAI,YAAuC;AAC3C,QAAA,IAAI,MAAM,CAAC,aAAa,KACtB,gBAAgB,CAAC,MAAM,CAAC,aAAa,CAAC,YAAY;AAC/C,gBACD,gBAAgB,CAAC,MAAM,CAAC,aAAa,CAAC,mBAAmB;oBAEvD,gBAAgB,CAAC,MAAM,CAAC,aAAa,CAAC,mBAAmB,CAAC,cAAc;uBACrE,gBAAgB,CAAC,MAAM,CAAC,aAAa,CAAC,mBAAmB,CAAC,UAAU,CAAC,CACzE,CACF,CACF,IAAI,gBAAgB,CAAC,MAAM,CAAC,aAAa,CAAC,aAAa,CAAC,EAAE;AACzD,YAAA,YAAY,GAAG,oBAAoB,CAAC,MAAM,CAAC,aAAa,CAAC;AACzD,YAAA,WAAW,GAAG,YAAY,CAAC,YAAY;;AAGzC,QAAA,KAAK,IAAI,CAAC,GAAG,kBAAkB,EAAE,CAAC,GAAG,MAAM,CAAC,QAAQ,CAAC,MAAM,EAAE,CAAC,EAAE,EAAE;YAChE,MAAM,OAAO,GAAG,MAAM,CAAC,QAAQ,CAAC,CAAC,CAAC;;AAElC,YAAA,IAAI,CAAC,KAAK,kBAAkB,IAAI,kBAAkB,CAAC,CAAC,CAAC,KAAK,SAAS,IAAI,YAAY,EAAE;AACnF,gBAAA,kBAAkB,CAAC,CAAC,CAAC,GAAG,YAAY,CAAC,aAAa;;;AAE7C,iBAAA,IAAI,kBAAkB,CAAC,CAAC,CAAC,KAAK,SAAS,EAAE;gBAC9C,kBAAkB,CAAC,CAAC,CAAC,GAAG,aAAa,CAAC,YAAY,CAAC,OAAO,CAAC;AAC3D,gBAAA,WAAW,IAAI,kBAAkB,CAAC,CAAC,CAAC;;;;;;;QAQxC,IAAI,YAAY,EAAE;;YAEhB,MAAM,gBAAgB,GAAG,MAAM,CAAC,OAAO,CAAC,kBAAkB,CAAC,CAAC,MAAM,CAAC,CAAC,GAAG,EAAE,CAAC,GAAG,EAAE,KAAK,CAAC,KAAI;;AAEvF,gBAAA,MAAM,UAAU,GAAG,MAAM,CAAC,GAAG,CAAC;AAC9B,gBAAA,IAAI,UAAU,KAAK,CAAC,IAAI,MAAM,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,OAAO,EAAE,KAAK,QAAQ,EAAE;oBACjE,OAAO,GAAG,GAAG,KAAK;;AAEpB,gBAAA,OAAO,UAAU,IAAI,eAAe,GAAG,GAAG,GAAG,KAAK,GAAG,GAAG;aACzD,EAAE,CAAC,CAAC;;AAGL,YAAA,MAAM,KAAK,GAAG,YAAY,CAAC,YAAY,GAAG,gBAAgB;YAC1D,MAAM,WAAW,GAAG,KAAK,IAAI,CAAC,GAAC,CAAC,IAAI,KAAK,IAAI,GAAG;;YAGhD,IAAI,WAAW,EAAE;AACf,gBAAA,KAAK,MAAM,GAAG,IAAI,kBAAkB,EAAE;AACpC,oBAAA,MAAM,UAAU,GAAG,MAAM,CAAC,GAAG,CAAC;AAC9B,oBAAA,IAAI,UAAU,KAAK,CAAC,IAAI,MAAM,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,OAAO,EAAE,KAAK,QAAQ,EAAE;AACjE,wBAAA,kBAAkB,CAAC,GAAG,CAAC,GAAG,IAAI,CAAC,KAAK,CAAC,kBAAkB,CAAC,GAAG,CAAC,GAAG,KAAK,CAAC;;AAChE,yBAAA,IAAI,UAAU,IAAI,eAAe,EAAE;;AAExC,wBAAA,kBAAkB,CAAC,GAAG,CAAC,GAAG,IAAI,CAAC,KAAK,CAAC,kBAAkB,CAAC,GAAG,CAAC,GAAG,KAAK,CAAC;;;;;AAM7E,QAAA,kBAAkB,GAAG,MAAM,CAAC,QAAQ,CAAC,MAAM;AAC3C,QAAA,IAAI,WAAW,IAAI,aAAa,CAAC,SAAS,EAAE;YAC1C,OAAO,EAAE,OAAO,EAAE,MAAM,CAAC,QAAQ,EAAE,kBAAkB,EAAE;;AAGzD,QAAA,MAAM,EAAE,OAAO,EAAE,GAAG,2BAA2B,CAAC;YAC9C,gBAAgB,EAAE,aAAa,CAAC,SAAS;YACzC,QAAQ,EAAE,MAAM,CAAC,QAAQ;YACzB,kBAAkB;YAClB,SAAS,EAAE,MAAM,CAAC,SAAS;YAC3B,eAAe,EAAE,aAAa,CAAC,eAAe;YAC9C,YAAY,EAAE,aAAa,CAAC,YAAY;AACxC,YAAA,aAAa,EAAE,aAAa,CAAC,QAAQ,KAAKE,eAAS,CAAC,OAAO,GAAGH,kBAAY,CAAC,iBAAiB,GAAGA,kBAAY,CAAC,QAAQ;AACrH,SAAA,CAAC;AACF,QAAA,eAAe,GAAG,IAAI,CAAC,GAAG,CAAC,MAAM,CAAC,QAAQ,CAAC,MAAM,GAAG,OAAO,CAAC,MAAM,EAAE,CAAC,CAAC;AAEtE,QAAA,OAAO,EAAE,OAAO,EAAE,kBAAkB,EAAE;AACxC,KAAC;AACH;;;;;;;"}
1
+ {"version":3,"file":"prune.cjs","sources":["../../../src/messages/prune.ts"],"sourcesContent":["import { AIMessage, BaseMessage, UsageMetadata } from '@langchain/core/messages';\nimport type { ThinkingContentText, MessageContentComplex, ReasoningContentText } from '@/types/stream';\nimport type { TokenCounter } from '@/types/run';\nimport { ContentTypes, Providers } from '@/common';\n\nexport type PruneMessagesFactoryParams = {\n provider?: Providers;\n maxTokens: number;\n startIndex: number;\n tokenCounter: TokenCounter;\n indexTokenCountMap: Record<string, number>;\n thinkingEnabled?: boolean;\n};\nexport type PruneMessagesParams = {\n messages: BaseMessage[];\n usageMetadata?: Partial<UsageMetadata>;\n startType?: ReturnType<BaseMessage['getType']>;\n}\n\nfunction isIndexInContext(arrayA: unknown[], arrayB: unknown[], targetIndex: number): boolean {\n const startingIndexInA = arrayA.length - arrayB.length;\n return targetIndex >= startingIndexInA;\n}\n\nfunction addThinkingBlock(message: AIMessage, thinkingBlock: ThinkingContentText | ReasoningContentText): AIMessage {\n const content: MessageContentComplex[] = Array.isArray(message.content)\n ? message.content as MessageContentComplex[]\n : [{\n type: ContentTypes.TEXT,\n text: message.content,\n }];\n /** Edge case, the message already has the thinking block */\n if (content[0].type === thinkingBlock.type) {\n return message;\n }\n content.unshift(thinkingBlock);\n return new AIMessage({\n ...message,\n content\n });\n}\n\n/**\n * Calculates the total tokens from a single usage object\n *\n * @param usage The usage metadata object containing token information\n * @returns An object containing the total input and output tokens\n */\nexport function calculateTotalTokens(usage: Partial<UsageMetadata>): UsageMetadata {\n const baseInputTokens = Number(usage.input_tokens) || 0;\n const cacheCreation = Number(usage.input_token_details?.cache_creation) || 0;\n const cacheRead = Number(usage.input_token_details?.cache_read) || 0;\n\n const totalInputTokens = baseInputTokens + cacheCreation + cacheRead;\n const totalOutputTokens = Number(usage.output_tokens) || 0;\n\n return {\n input_tokens: totalInputTokens,\n output_tokens: totalOutputTokens,\n total_tokens: totalInputTokens + totalOutputTokens\n };\n}\n\nexport type PruningResult = {\n context: BaseMessage[];\n remainingContextTokens: number;\n messagesToRefine: BaseMessage[];\n thinkingStartIndex?: number;\n};\n\n/**\n * Processes an array of messages and returns a context of messages that fit within a specified token limit.\n * It iterates over the messages from newest to oldest, adding them to the context until the token limit is reached.\n *\n * @param options Configuration options for processing messages\n * @returns Object containing the message context, remaining tokens, messages not included, and summary index\n */\nexport function getMessagesWithinTokenLimit({\n messages: _messages,\n maxContextTokens,\n indexTokenCountMap,\n startType: _startType,\n thinkingEnabled,\n tokenCounter,\n thinkingStartIndex: _thinkingStartIndex = -1,\n reasoningType = ContentTypes.THINKING,\n}: {\n messages: BaseMessage[];\n maxContextTokens: number;\n indexTokenCountMap: Record<string, number | undefined>;\n startType?: string | string[];\n thinkingEnabled?: boolean;\n tokenCounter: TokenCounter;\n thinkingStartIndex?: number;\n reasoningType?: ContentTypes.THINKING | ContentTypes.REASONING_CONTENT;\n}): PruningResult {\n // Every reply is primed with <|start|>assistant<|message|>, so we\n // start with 3 tokens for the label after all messages have been counted.\n let currentTokenCount = 3;\n const instructions = _messages[0]?.getType() === 'system' ? _messages[0] : undefined;\n const instructionsTokenCount = instructions != null ? indexTokenCountMap[0] ?? 0 : 0;\n const initialContextTokens = maxContextTokens - instructionsTokenCount;\n let remainingContextTokens = initialContextTokens;\n let startType = _startType;\n const originalLength = _messages.length;\n const messages = [..._messages];\n /**\n * IMPORTANT: this context array gets reversed at the end, since the latest messages get pushed first.\n *\n * This may be confusing to read, but it is done to ensure the context is in the correct order for the model.\n * */\n let context: Array<BaseMessage | undefined> = [];\n\n let thinkingStartIndex = _thinkingStartIndex;\n let thinkingEndIndex = -1;\n let thinkingBlock: ThinkingContentText | ReasoningContentText | undefined;\n const endIndex = instructions != null ? 1 : 0;\n const prunedMemory: BaseMessage[] = [];\n\n if (_thinkingStartIndex > -1) {\n const thinkingMessageContent = messages[_thinkingStartIndex]?.content;\n if (Array.isArray(thinkingMessageContent)) {\n thinkingBlock = thinkingMessageContent.find((content) => content.type === reasoningType) as ThinkingContentText | undefined;\n }\n }\n\n if (currentTokenCount < remainingContextTokens) {\n let currentIndex = messages.length;\n while (messages.length > 0 && currentTokenCount < remainingContextTokens && currentIndex > endIndex) {\n currentIndex--;\n if (messages.length === 1 && instructions) {\n break;\n }\n const poppedMessage = messages.pop();\n if (!poppedMessage) continue;\n const messageType = poppedMessage.getType();\n if (thinkingEnabled === true && thinkingEndIndex === -1 && (currentIndex === (originalLength - 1)) && (messageType === 'ai' || messageType === 'tool')) {\n thinkingEndIndex = currentIndex;\n }\n if (thinkingEndIndex > -1 && !thinkingBlock && thinkingStartIndex < 0 && messageType === 'ai' && Array.isArray(poppedMessage.content)) {\n thinkingBlock = (poppedMessage.content.find((content) => content.type === reasoningType)) as ThinkingContentText | undefined;\n thinkingStartIndex = thinkingBlock != null ? currentIndex : -1;\n }\n /** False start, the latest message was not part of a multi-assistant/tool sequence of messages */\n if (\n thinkingEndIndex > -1\n && currentIndex === (thinkingEndIndex - 1)\n && (messageType !== 'ai' && messageType !== 'tool')\n ) {\n thinkingEndIndex = -1;\n }\n\n const tokenCount = indexTokenCountMap[currentIndex] ?? 0;\n\n if (prunedMemory.length === 0 && ((currentTokenCount + tokenCount) <= remainingContextTokens)) {\n context.push(poppedMessage);\n currentTokenCount += tokenCount;\n } else {\n prunedMemory.push(poppedMessage);\n if (thinkingEndIndex > -1 && thinkingStartIndex < 0) {\n continue;\n }\n break;\n }\n }\n\n if (context[context.length - 1]?.getType() === 'tool') {\n startType = ['ai', 'human'];\n }\n\n if (startType != null && startType.length > 0 && context.length > 0) {\n let requiredTypeIndex = -1;\n\n let totalTokens = 0;\n for (let i = context.length - 1; i >= 0; i--) {\n const currentType = context[i]?.getType() ?? '';\n if (Array.isArray(startType) ? startType.includes(currentType) : currentType === startType) {\n requiredTypeIndex = i + 1;\n break;\n }\n const originalIndex = originalLength - 1 - i;\n totalTokens += indexTokenCountMap[originalIndex] ?? 0;\n }\n\n if (requiredTypeIndex > 0) {\n currentTokenCount -= totalTokens;\n context = context.slice(0, requiredTypeIndex);\n }\n }\n }\n\n if (instructions && originalLength > 0) {\n context.push(_messages[0] as BaseMessage);\n messages.shift();\n }\n\n remainingContextTokens -= currentTokenCount;\n const result: PruningResult = {\n remainingContextTokens,\n context: [] as BaseMessage[],\n messagesToRefine: prunedMemory,\n };\n\n if (thinkingStartIndex > -1) {\n result.thinkingStartIndex = thinkingStartIndex;\n }\n\n if (prunedMemory.length === 0 || thinkingEndIndex < 0 || (thinkingStartIndex > -1 && isIndexInContext(_messages, context, thinkingStartIndex))) {\n // we reverse at this step to ensure the context is in the correct order for the model, and we need to work backwards\n result.context = context.reverse() as BaseMessage[];\n return result;\n }\n\n if (thinkingEndIndex > -1 && thinkingStartIndex < 0) {\n throw new Error('The payload is malformed. There is a thinking sequence but no \"AI\" messages with thinking blocks.');\n }\n\n if (!thinkingBlock) {\n throw new Error('The payload is malformed. There is a thinking sequence but no thinking block found.');\n }\n\n // Since we have a thinking sequence, we need to find the last assistant message\n // in the latest AI/tool sequence to add the thinking block that falls outside of the current context\n // Latest messages are ordered first.\n let assistantIndex = -1;\n for (let i = 0; i < context.length; i++) {\n const currentMessage = context[i];\n const type = currentMessage?.getType();\n if (type === 'ai') {\n assistantIndex = i;\n }\n if (assistantIndex > -1 && (type === 'human' || type === 'system')) {\n break;\n }\n }\n\n if (assistantIndex === -1) {\n throw new Error('The payload is malformed. There is a thinking sequence but no \"AI\" messages to append thinking blocks to.');\n }\n\n thinkingStartIndex = originalLength - 1 - assistantIndex;\n const thinkingTokenCount = tokenCounter(new AIMessage({ content: [thinkingBlock] }));\n const newRemainingCount = remainingContextTokens - thinkingTokenCount;\n const newMessage = addThinkingBlock(context[assistantIndex] as AIMessage, thinkingBlock);\n context[assistantIndex] = newMessage;\n if (newRemainingCount > 0) {\n result.context = context.reverse() as BaseMessage[];\n return result;\n }\n\n const thinkingMessage: AIMessage = context[assistantIndex] as AIMessage;\n // now we need to an additional round of pruning but making the thinking block fit\n const newThinkingMessageTokenCount = (indexTokenCountMap[thinkingStartIndex] ?? 0) + thinkingTokenCount;\n remainingContextTokens = initialContextTokens - newThinkingMessageTokenCount;\n currentTokenCount = 3;\n let newContext: BaseMessage[] = [];\n const secondRoundMessages = [..._messages];\n let currentIndex = secondRoundMessages.length;\n while (secondRoundMessages.length > 0 && currentTokenCount < remainingContextTokens && currentIndex > thinkingStartIndex) {\n currentIndex--;\n const poppedMessage = secondRoundMessages.pop();\n if (!poppedMessage) continue;\n const tokenCount = indexTokenCountMap[currentIndex] ?? 0;\n if ((currentTokenCount + tokenCount) <= remainingContextTokens) {\n newContext.push(poppedMessage);\n currentTokenCount += tokenCount;\n } else {\n messages.push(poppedMessage);\n break;\n }\n }\n\n const firstMessage: AIMessage = newContext[newContext.length - 1];\n const firstMessageType = newContext[newContext.length - 1].getType();\n if (firstMessageType === 'tool') {\n startType = ['ai', 'human'];\n }\n\n if (startType != null && startType.length > 0 && newContext.length > 0) {\n let requiredTypeIndex = -1;\n\n let totalTokens = 0;\n for (let i = newContext.length - 1; i >= 0; i--) {\n const currentType = newContext[i]?.getType() ?? '';\n if (Array.isArray(startType) ? startType.includes(currentType) : currentType === startType) {\n requiredTypeIndex = i + 1;\n break;\n }\n const originalIndex = originalLength - 1 - i;\n totalTokens += indexTokenCountMap[originalIndex] ?? 0;\n }\n\n if (requiredTypeIndex > 0) {\n currentTokenCount -= totalTokens;\n newContext = newContext.slice(0, requiredTypeIndex);\n }\n }\n\n if (firstMessageType === 'ai') {\n const newMessage = addThinkingBlock(firstMessage, thinkingBlock);\n newContext[newContext.length - 1] = newMessage;\n } else {\n newContext.push(thinkingMessage);\n }\n\n if (instructions && originalLength > 0) {\n newContext.push(_messages[0] as BaseMessage);\n secondRoundMessages.shift();\n }\n\n result.context = newContext.reverse();\n return result;\n}\n\nexport function checkValidNumber(value: unknown): value is number {\n return typeof value === 'number' && !isNaN(value) && value > 0;\n}\n\nexport function createPruneMessages(factoryParams: PruneMessagesFactoryParams) {\n const indexTokenCountMap = { ...factoryParams.indexTokenCountMap };\n let lastTurnStartIndex = factoryParams.startIndex;\n let lastCutOffIndex = 0;\n let totalTokens = (Object.values(indexTokenCountMap)).reduce((a, b) => a + b, 0);\n let runThinkingStartIndex = -1;\n return function pruneMessages(params: PruneMessagesParams): {\n context: BaseMessage[];\n indexTokenCountMap: Record<string, number>;\n } {\n let currentUsage: UsageMetadata | undefined;\n if (params.usageMetadata && (\n checkValidNumber(params.usageMetadata.input_tokens)\n || (\n checkValidNumber(params.usageMetadata.input_token_details)\n && (\n checkValidNumber(params.usageMetadata.input_token_details.cache_creation)\n || checkValidNumber(params.usageMetadata.input_token_details.cache_read)\n )\n )\n ) && checkValidNumber(params.usageMetadata.output_tokens)) {\n currentUsage = calculateTotalTokens(params.usageMetadata);\n totalTokens = currentUsage.total_tokens;\n }\n\n for (let i = lastTurnStartIndex; i < params.messages.length; i++) {\n const message = params.messages[i];\n // eslint-disable-next-line @typescript-eslint/no-unnecessary-condition\n if (i === lastTurnStartIndex && indexTokenCountMap[i] === undefined && currentUsage) {\n indexTokenCountMap[i] = currentUsage.output_tokens;\n // eslint-disable-next-line @typescript-eslint/no-unnecessary-condition\n } else if (indexTokenCountMap[i] === undefined) {\n indexTokenCountMap[i] = factoryParams.tokenCounter(message);\n totalTokens += indexTokenCountMap[i];\n }\n }\n\n // If `currentUsage` is defined, we need to distribute the current total tokens to our `indexTokenCountMap`,\n // We must distribute it in a weighted manner, so that the total token count is equal to `currentUsage.total_tokens`,\n // relative the manually counted tokens in `indexTokenCountMap`.\n // EDGE CASE: when the resulting context gets pruned, we should not distribute the usage for messages that are not in the context.\n if (currentUsage) {\n // Calculate the sum of tokens only for indices at or after lastCutOffIndex\n const totalIndexTokens = Object.entries(indexTokenCountMap).reduce((sum, [key, value]) => {\n // Convert string key to number and check if it's >= lastCutOffIndex\n const numericKey = Number(key);\n if (numericKey === 0 && params.messages[0].getType() === 'system') {\n return sum + value;\n }\n return numericKey >= lastCutOffIndex ? sum + value : sum;\n }, 0);\n\n // Calculate ratio based only on messages that remain in the context\n const ratio = currentUsage.total_tokens / totalIndexTokens;\n const isRatioSafe = ratio >= 1/3 && ratio <= 2.5;\n\n // Apply the ratio adjustment only to messages at or after lastCutOffIndex, and only if the ratio is safe\n if (isRatioSafe) {\n for (const key in indexTokenCountMap) {\n const numericKey = Number(key);\n if (numericKey === 0 && params.messages[0].getType() === 'system') {\n indexTokenCountMap[key] = Math.round(indexTokenCountMap[key] * ratio);\n } else if (numericKey >= lastCutOffIndex) {\n // Only adjust token counts for messages still in the context\n indexTokenCountMap[key] = Math.round(indexTokenCountMap[key] * ratio);\n }\n }\n }\n }\n\n lastTurnStartIndex = params.messages.length;\n if (totalTokens <= factoryParams.maxTokens) {\n return { context: params.messages, indexTokenCountMap };\n }\n\n const { context, thinkingStartIndex } = getMessagesWithinTokenLimit({\n maxContextTokens: factoryParams.maxTokens,\n messages: params.messages,\n indexTokenCountMap,\n startType: params.startType,\n thinkingEnabled: factoryParams.thinkingEnabled,\n tokenCounter: factoryParams.tokenCounter,\n reasoningType: factoryParams.provider === Providers.BEDROCK ? ContentTypes.REASONING_CONTENT : ContentTypes.THINKING,\n thinkingStartIndex: factoryParams.thinkingEnabled === true ? runThinkingStartIndex : undefined,\n });\n runThinkingStartIndex = thinkingStartIndex ?? -1;\n /** The index is the first value of `context`, index relative to `params.messages` */\n lastCutOffIndex = Math.max(params.messages.length - (context.length - (context[0]?.getType() === 'system' ? 1 : 0)), 0);\n\n return { context, indexTokenCountMap };\n };\n}\n"],"names":["ContentTypes","AIMessage","messages","Providers"],"mappings":";;;;;AAmBA,SAAS,gBAAgB,CAAC,MAAiB,EAAE,MAAiB,EAAE,WAAmB,EAAA;IACjF,MAAM,gBAAgB,GAAG,MAAM,CAAC,MAAM,GAAG,MAAM,CAAC,MAAM;IACtD,OAAO,WAAW,IAAI,gBAAgB;AACxC;AAEA,SAAS,gBAAgB,CAAC,OAAkB,EAAE,aAAyD,EAAA;IACrG,MAAM,OAAO,GAA4B,KAAK,CAAC,OAAO,CAAC,OAAO,CAAC,OAAO;UAClE,OAAO,CAAC;AACV,UAAE,CAAC;gBACD,IAAI,EAAEA,kBAAY,CAAC,IAAI;gBACvB,IAAI,EAAE,OAAO,CAAC,OAAO;AACtB,aAAA,CAAC;;IAEJ,IAAI,OAAO,CAAC,CAAC,CAAC,CAAC,IAAI,KAAK,aAAa,CAAC,IAAI,EAAE;AAC1C,QAAA,OAAO,OAAO;;AAEhB,IAAA,OAAO,CAAC,OAAO,CAAC,aAAa,CAAC;IAC9B,OAAO,IAAIC,kBAAS,CAAC;AACnB,QAAA,GAAG,OAAO;QACV;AACD,KAAA,CAAC;AACJ;AAEA;;;;;AAKG;AACG,SAAU,oBAAoB,CAAC,KAA6B,EAAA;IAChE,MAAM,eAAe,GAAG,MAAM,CAAC,KAAK,CAAC,YAAY,CAAC,IAAI,CAAC;AACvD,IAAA,MAAM,aAAa,GAAG,MAAM,CAAC,KAAK,CAAC,mBAAmB,EAAE,cAAc,CAAC,IAAI,CAAC;AAC5E,IAAA,MAAM,SAAS,GAAG,MAAM,CAAC,KAAK,CAAC,mBAAmB,EAAE,UAAU,CAAC,IAAI,CAAC;AAEpE,IAAA,MAAM,gBAAgB,GAAG,eAAe,GAAG,aAAa,GAAG,SAAS;IACpE,MAAM,iBAAiB,GAAG,MAAM,CAAC,KAAK,CAAC,aAAa,CAAC,IAAI,CAAC;IAE1D,OAAO;AACL,QAAA,YAAY,EAAE,gBAAgB;AAC9B,QAAA,aAAa,EAAE,iBAAiB;QAChC,YAAY,EAAE,gBAAgB,GAAG;KAClC;AACH;AASA;;;;;;AAMG;AACa,SAAA,2BAA2B,CAAC,EAC1C,QAAQ,EAAE,SAAS,EACnB,gBAAgB,EAChB,kBAAkB,EAClB,SAAS,EAAE,UAAU,EACrB,eAAe,EACf,YAAY,EACZ,kBAAkB,EAAE,mBAAmB,GAAG,EAAE,EAC5C,aAAa,GAAGD,kBAAY,CAAC,QAAQ,GAUtC,EAAA;;;IAGC,IAAI,iBAAiB,GAAG,CAAC;IACzB,MAAM,YAAY,GAAG,SAAS,CAAC,CAAC,CAAC,EAAE,OAAO,EAAE,KAAK,QAAQ,GAAG,SAAS,CAAC,CAAC,CAAC,GAAG,SAAS;AACpF,IAAA,MAAM,sBAAsB,GAAG,YAAY,IAAI,IAAI,GAAG,kBAAkB,CAAC,CAAC,CAAC,IAAI,CAAC,GAAG,CAAC;AACpF,IAAA,MAAM,oBAAoB,GAAG,gBAAgB,GAAG,sBAAsB;IACtE,IAAI,sBAAsB,GAAG,oBAAoB;IACjD,IAAI,SAAS,GAAG,UAAU;AAC1B,IAAA,MAAM,cAAc,GAAG,SAAS,CAAC,MAAM;AACvC,IAAA,MAAME,UAAQ,GAAG,CAAC,GAAG,SAAS,CAAC;AAC/B;;;;AAIK;IACL,IAAI,OAAO,GAAmC,EAAE;IAEhD,IAAI,kBAAkB,GAAG,mBAAmB;AAC5C,IAAA,IAAI,gBAAgB,GAAG,EAAE;AACzB,IAAA,IAAI,aAAqE;AACzE,IAAA,MAAM,QAAQ,GAAG,YAAY,IAAI,IAAI,GAAG,CAAC,GAAG,CAAC;IAC7C,MAAM,YAAY,GAAkB,EAAE;AAEtC,IAAA,IAAI,mBAAmB,GAAG,EAAE,EAAE;QAC5B,MAAM,sBAAsB,GAAGA,UAAQ,CAAC,mBAAmB,CAAC,EAAE,OAAO;AACrE,QAAA,IAAI,KAAK,CAAC,OAAO,CAAC,sBAAsB,CAAC,EAAE;AACzC,YAAA,aAAa,GAAG,sBAAsB,CAAC,IAAI,CAAC,CAAC,OAAO,KAAK,OAAO,CAAC,IAAI,KAAK,aAAa,CAAoC;;;AAI/H,IAAA,IAAI,iBAAiB,GAAG,sBAAsB,EAAE;AAC9C,QAAA,IAAI,YAAY,GAAGA,UAAQ,CAAC,MAAM;AAClC,QAAA,OAAOA,UAAQ,CAAC,MAAM,GAAG,CAAC,IAAI,iBAAiB,GAAG,sBAAsB,IAAI,YAAY,GAAG,QAAQ,EAAE;AACnG,YAAA,YAAY,EAAE;YACd,IAAIA,UAAQ,CAAC,MAAM,KAAK,CAAC,IAAI,YAAY,EAAE;gBACzC;;AAEF,YAAA,MAAM,aAAa,GAAGA,UAAQ,CAAC,GAAG,EAAE;AACpC,YAAA,IAAI,CAAC,aAAa;gBAAE;AACpB,YAAA,MAAM,WAAW,GAAG,aAAa,CAAC,OAAO,EAAE;AAC3C,YAAA,IAAI,eAAe,KAAK,IAAI,IAAI,gBAAgB,KAAK,EAAE,KAAK,YAAY,MAAM,cAAc,GAAG,CAAC,CAAC,CAAC,KAAK,WAAW,KAAK,IAAI,IAAI,WAAW,KAAK,MAAM,CAAC,EAAE;gBACtJ,gBAAgB,GAAG,YAAY;;YAEjC,IAAI,gBAAgB,GAAG,EAAE,IAAI,CAAC,aAAa,IAAK,kBAAkB,GAAG,CAAC,IAAI,WAAW,KAAK,IAAI,IAAI,KAAK,CAAC,OAAO,CAAC,aAAa,CAAC,OAAO,CAAC,EAAE;gBACtI,aAAa,IAAI,aAAa,CAAC,OAAO,CAAC,IAAI,CAAC,CAAC,OAAO,KAAK,OAAO,CAAC,IAAI,KAAK,aAAa,CAAC,CAAoC;AAC5H,gBAAA,kBAAkB,GAAG,aAAa,IAAI,IAAI,GAAG,YAAY,GAAG,EAAE;;;YAGhE,IACE,gBAAgB,GAAG;AAChB,mBAAA,YAAY,MAAM,gBAAgB,GAAG,CAAC;oBACrC,WAAW,KAAK,IAAI,IAAI,WAAW,KAAK,MAAM,CAAC,EACnD;gBACA,gBAAgB,GAAG,EAAE;;YAGvB,MAAM,UAAU,GAAG,kBAAkB,CAAC,YAAY,CAAC,IAAI,CAAC;AAExD,YAAA,IAAI,YAAY,CAAC,MAAM,KAAK,CAAC,KAAK,CAAC,iBAAiB,GAAG,UAAU,KAAK,sBAAsB,CAAC,EAAE;AAC7F,gBAAA,OAAO,CAAC,IAAI,CAAC,aAAa,CAAC;gBAC3B,iBAAiB,IAAI,UAAU;;iBAC1B;AACL,gBAAA,YAAY,CAAC,IAAI,CAAC,aAAa,CAAC;gBAChC,IAAI,gBAAgB,GAAG,EAAE,IAAI,kBAAkB,GAAG,CAAC,EAAE;oBACnD;;gBAEF;;;AAIJ,QAAA,IAAI,OAAO,CAAC,OAAO,CAAC,MAAM,GAAG,CAAC,CAAC,EAAE,OAAO,EAAE,KAAK,MAAM,EAAE;AACrD,YAAA,SAAS,GAAG,CAAC,IAAI,EAAE,OAAO,CAAC;;AAG7B,QAAA,IAAI,SAAS,IAAI,IAAI,IAAI,SAAS,CAAC,MAAM,GAAG,CAAC,IAAI,OAAO,CAAC,MAAM,GAAG,CAAC,EAAE;AACnE,YAAA,IAAI,iBAAiB,GAAG,EAAE;YAE1B,IAAI,WAAW,GAAG,CAAC;AACnB,YAAA,KAAK,IAAI,CAAC,GAAG,OAAO,CAAC,MAAM,GAAG,CAAC,EAAE,CAAC,IAAI,CAAC,EAAE,CAAC,EAAE,EAAE;gBAC5C,MAAM,WAAW,GAAG,OAAO,CAAC,CAAC,CAAC,EAAE,OAAO,EAAE,IAAI,EAAE;gBAC/C,IAAI,KAAK,CAAC,OAAO,CAAC,SAAS,CAAC,GAAG,SAAS,CAAC,QAAQ,CAAC,WAAW,CAAC,GAAG,WAAW,KAAK,SAAS,EAAE;AAC1F,oBAAA,iBAAiB,GAAG,CAAC,GAAG,CAAC;oBACzB;;AAEF,gBAAA,MAAM,aAAa,GAAG,cAAc,GAAG,CAAC,GAAG,CAAC;AAC5C,gBAAA,WAAW,IAAI,kBAAkB,CAAC,aAAa,CAAC,IAAI,CAAC;;AAGvD,YAAA,IAAI,iBAAiB,GAAG,CAAC,EAAE;gBACzB,iBAAiB,IAAI,WAAW;gBAChC,OAAO,GAAG,OAAO,CAAC,KAAK,CAAC,CAAC,EAAE,iBAAiB,CAAC;;;;AAKnD,IAAA,IAAI,YAAY,IAAI,cAAc,GAAG,CAAC,EAAE;QACtC,OAAO,CAAC,IAAI,CAAC,SAAS,CAAC,CAAC,CAAgB,CAAC;QACzCA,UAAQ,CAAC,KAAK,EAAE;;IAGlB,sBAAsB,IAAI,iBAAiB;AAC3C,IAAA,MAAM,MAAM,GAAkB;QAC5B,sBAAsB;AACtB,QAAA,OAAO,EAAE,EAAmB;AAC5B,QAAA,gBAAgB,EAAE,YAAY;KAC/B;AAED,IAAA,IAAI,kBAAkB,GAAG,EAAE,EAAE;AAC3B,QAAA,MAAM,CAAC,kBAAkB,GAAG,kBAAkB;;IAGhD,IAAI,YAAY,CAAC,MAAM,KAAK,CAAC,IAAI,gBAAgB,GAAG,CAAC,KAAK,kBAAkB,GAAG,EAAE,IAAI,gBAAgB,CAAC,SAAS,EAAE,OAAO,EAAE,kBAAkB,CAAC,CAAC,EAAE;;AAE9I,QAAA,MAAM,CAAC,OAAO,GAAG,OAAO,CAAC,OAAO,EAAmB;AACnD,QAAA,OAAO,MAAM;;IAGf,IAAI,gBAAgB,GAAG,EAAE,IAAI,kBAAkB,GAAG,CAAC,EAAE;AACnD,QAAA,MAAM,IAAI,KAAK,CAAC,mGAAmG,CAAC;;IAGtH,IAAI,CAAC,aAAa,EAAE;AAClB,QAAA,MAAM,IAAI,KAAK,CAAC,qFAAqF,CAAC;;;;;AAMxG,IAAA,IAAI,cAAc,GAAG,EAAE;AACvB,IAAA,KAAK,IAAI,CAAC,GAAG,CAAC,EAAE,CAAC,GAAG,OAAO,CAAC,MAAM,EAAE,CAAC,EAAE,EAAE;AACvC,QAAA,MAAM,cAAc,GAAG,OAAO,CAAC,CAAC,CAAC;AACjC,QAAA,MAAM,IAAI,GAAG,cAAc,EAAE,OAAO,EAAE;AACtC,QAAA,IAAI,IAAI,KAAK,IAAI,EAAE;YACjB,cAAc,GAAG,CAAC;;AAEpB,QAAA,IAAI,cAAc,GAAG,EAAE,KAAK,IAAI,KAAK,OAAO,IAAI,IAAI,KAAK,QAAQ,CAAC,EAAE;YAClE;;;AAIJ,IAAA,IAAI,cAAc,KAAK,EAAE,EAAE;AACzB,QAAA,MAAM,IAAI,KAAK,CAAC,2GAA2G,CAAC;;AAG9H,IAAA,kBAAkB,GAAG,cAAc,GAAG,CAAC,GAAG,cAAc;AACxD,IAAA,MAAM,kBAAkB,GAAG,YAAY,CAAC,IAAID,kBAAS,CAAC,EAAE,OAAO,EAAE,CAAC,aAAa,CAAC,EAAE,CAAC,CAAC;AACpF,IAAA,MAAM,iBAAiB,GAAG,sBAAsB,GAAG,kBAAkB;IACrE,MAAM,UAAU,GAAG,gBAAgB,CAAC,OAAO,CAAC,cAAc,CAAc,EAAE,aAAa,CAAC;AACxF,IAAA,OAAO,CAAC,cAAc,CAAC,GAAG,UAAU;AACpC,IAAA,IAAI,iBAAiB,GAAG,CAAC,EAAE;AACzB,QAAA,MAAM,CAAC,OAAO,GAAG,OAAO,CAAC,OAAO,EAAmB;AACnD,QAAA,OAAO,MAAM;;AAGf,IAAA,MAAM,eAAe,GAAc,OAAO,CAAC,cAAc,CAAc;;AAEvE,IAAA,MAAM,4BAA4B,GAAG,CAAC,kBAAkB,CAAC,kBAAkB,CAAC,IAAI,CAAC,IAAI,kBAAkB;AACvG,IAAA,sBAAsB,GAAG,oBAAoB,GAAG,4BAA4B;IAC5E,iBAAiB,GAAG,CAAC;IACrB,IAAI,UAAU,GAAkB,EAAE;AAClC,IAAA,MAAM,mBAAmB,GAAG,CAAC,GAAG,SAAS,CAAC;AAC1C,IAAA,IAAI,YAAY,GAAG,mBAAmB,CAAC,MAAM;AAC7C,IAAA,OAAO,mBAAmB,CAAC,MAAM,GAAG,CAAC,IAAI,iBAAiB,GAAG,sBAAsB,IAAI,YAAY,GAAG,kBAAkB,EAAE;AACxH,QAAA,YAAY,EAAE;AACd,QAAA,MAAM,aAAa,GAAG,mBAAmB,CAAC,GAAG,EAAE;AAC/C,QAAA,IAAI,CAAC,aAAa;YAAE;QACpB,MAAM,UAAU,GAAG,kBAAkB,CAAC,YAAY,CAAC,IAAI,CAAC;QACxD,IAAI,CAAC,iBAAiB,GAAG,UAAU,KAAK,sBAAsB,EAAE;AAC9D,YAAA,UAAU,CAAC,IAAI,CAAC,aAAa,CAAC;YAC9B,iBAAiB,IAAI,UAAU;;aAC1B;AACL,YAAAC,UAAQ,CAAC,IAAI,CAAC,aAAa,CAAC;YAC5B;;;IAIJ,MAAM,YAAY,GAAc,UAAU,CAAC,UAAU,CAAC,MAAM,GAAG,CAAC,CAAC;AACjE,IAAA,MAAM,gBAAgB,GAAG,UAAU,CAAC,UAAU,CAAC,MAAM,GAAG,CAAC,CAAC,CAAC,OAAO,EAAE;AACpE,IAAA,IAAI,gBAAgB,KAAK,MAAM,EAAE;AAC/B,QAAA,SAAS,GAAG,CAAC,IAAI,EAAE,OAAO,CAAC;;AAG7B,IAAA,IAAI,SAAS,IAAI,IAAI,IAAI,SAAS,CAAC,MAAM,GAAG,CAAC,IAAI,UAAU,CAAC,MAAM,GAAG,CAAC,EAAE;AACtE,QAAA,IAAI,iBAAiB,GAAG,EAAE;QAE1B,IAAI,WAAW,GAAG,CAAC;AACnB,QAAA,KAAK,IAAI,CAAC,GAAG,UAAU,CAAC,MAAM,GAAG,CAAC,EAAE,CAAC,IAAI,CAAC,EAAE,CAAC,EAAE,EAAE;YAC/C,MAAM,WAAW,GAAG,UAAU,CAAC,CAAC,CAAC,EAAE,OAAO,EAAE,IAAI,EAAE;YAClD,IAAI,KAAK,CAAC,OAAO,CAAC,SAAS,CAAC,GAAG,SAAS,CAAC,QAAQ,CAAC,WAAW,CAAC,GAAG,WAAW,KAAK,SAAS,EAAE;AAC1F,gBAAA,iBAAiB,GAAG,CAAC,GAAG,CAAC;gBACzB;;AAEF,YAAA,MAAM,aAAa,GAAG,cAAc,GAAG,CAAC,GAAG,CAAC;AAC5C,YAAA,WAAW,IAAI,kBAAkB,CAAC,aAAa,CAAC,IAAI,CAAC;;AAGvD,QAAA,IAAI,iBAAiB,GAAG,CAAC,EAAE;YACzB,iBAAiB,IAAI,WAAW;YAChC,UAAU,GAAG,UAAU,CAAC,KAAK,CAAC,CAAC,EAAE,iBAAiB,CAAC;;;AAIvD,IAAA,IAAI,gBAAgB,KAAK,IAAI,EAAE;QAC7B,MAAM,UAAU,GAAG,gBAAgB,CAAC,YAAY,EAAE,aAAa,CAAC;QAChE,UAAU,CAAC,UAAU,CAAC,MAAM,GAAG,CAAC,CAAC,GAAG,UAAU;;SACzC;AACL,QAAA,UAAU,CAAC,IAAI,CAAC,eAAe,CAAC;;AAGlC,IAAA,IAAI,YAAY,IAAI,cAAc,GAAG,CAAC,EAAE;QACtC,UAAU,CAAC,IAAI,CAAC,SAAS,CAAC,CAAC,CAAgB,CAAC;QAC5C,mBAAmB,CAAC,KAAK,EAAE;;AAG7B,IAAA,MAAM,CAAC,OAAO,GAAG,UAAU,CAAC,OAAO,EAAE;AACrC,IAAA,OAAO,MAAM;AACf;AAEM,SAAU,gBAAgB,CAAC,KAAc,EAAA;AAC7C,IAAA,OAAO,OAAO,KAAK,KAAK,QAAQ,IAAI,CAAC,KAAK,CAAC,KAAK,CAAC,IAAI,KAAK,GAAG,CAAC;AAChE;AAEM,SAAU,mBAAmB,CAAC,aAAyC,EAAA;IAC3E,MAAM,kBAAkB,GAAG,EAAE,GAAG,aAAa,CAAC,kBAAkB,EAAE;AAClE,IAAA,IAAI,kBAAkB,GAAG,aAAa,CAAC,UAAU;IACjD,IAAI,eAAe,GAAG,CAAC;IACvB,IAAI,WAAW,GAAG,CAAC,MAAM,CAAC,MAAM,CAAC,kBAAkB,CAAC,EAAE,MAAM,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,GAAG,CAAC,EAAE,CAAC,CAAC;AAChF,IAAA,IAAI,qBAAqB,GAAG,EAAE;IAC9B,OAAO,SAAS,aAAa,CAAC,MAA2B,EAAA;AAIvD,QAAA,IAAI,YAAuC;AAC3C,QAAA,IAAI,MAAM,CAAC,aAAa,KACtB,gBAAgB,CAAC,MAAM,CAAC,aAAa,CAAC,YAAY;AAC/C,gBACD,gBAAgB,CAAC,MAAM,CAAC,aAAa,CAAC,mBAAmB;oBAEvD,gBAAgB,CAAC,MAAM,CAAC,aAAa,CAAC,mBAAmB,CAAC,cAAc;uBACrE,gBAAgB,CAAC,MAAM,CAAC,aAAa,CAAC,mBAAmB,CAAC,UAAU,CAAC,CACzE,CACF,CACF,IAAI,gBAAgB,CAAC,MAAM,CAAC,aAAa,CAAC,aAAa,CAAC,EAAE;AACzD,YAAA,YAAY,GAAG,oBAAoB,CAAC,MAAM,CAAC,aAAa,CAAC;AACzD,YAAA,WAAW,GAAG,YAAY,CAAC,YAAY;;AAGzC,QAAA,KAAK,IAAI,CAAC,GAAG,kBAAkB,EAAE,CAAC,GAAG,MAAM,CAAC,QAAQ,CAAC,MAAM,EAAE,CAAC,EAAE,EAAE;YAChE,MAAM,OAAO,GAAG,MAAM,CAAC,QAAQ,CAAC,CAAC,CAAC;;AAElC,YAAA,IAAI,CAAC,KAAK,kBAAkB,IAAI,kBAAkB,CAAC,CAAC,CAAC,KAAK,SAAS,IAAI,YAAY,EAAE;AACnF,gBAAA,kBAAkB,CAAC,CAAC,CAAC,GAAG,YAAY,CAAC,aAAa;;;AAE7C,iBAAA,IAAI,kBAAkB,CAAC,CAAC,CAAC,KAAK,SAAS,EAAE;gBAC9C,kBAAkB,CAAC,CAAC,CAAC,GAAG,aAAa,CAAC,YAAY,CAAC,OAAO,CAAC;AAC3D,gBAAA,WAAW,IAAI,kBAAkB,CAAC,CAAC,CAAC;;;;;;;QAQxC,IAAI,YAAY,EAAE;;YAEhB,MAAM,gBAAgB,GAAG,MAAM,CAAC,OAAO,CAAC,kBAAkB,CAAC,CAAC,MAAM,CAAC,CAAC,GAAG,EAAE,CAAC,GAAG,EAAE,KAAK,CAAC,KAAI;;AAEvF,gBAAA,MAAM,UAAU,GAAG,MAAM,CAAC,GAAG,CAAC;AAC9B,gBAAA,IAAI,UAAU,KAAK,CAAC,IAAI,MAAM,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,OAAO,EAAE,KAAK,QAAQ,EAAE;oBACjE,OAAO,GAAG,GAAG,KAAK;;AAEpB,gBAAA,OAAO,UAAU,IAAI,eAAe,GAAG,GAAG,GAAG,KAAK,GAAG,GAAG;aACzD,EAAE,CAAC,CAAC;;AAGL,YAAA,MAAM,KAAK,GAAG,YAAY,CAAC,YAAY,GAAG,gBAAgB;YAC1D,MAAM,WAAW,GAAG,KAAK,IAAI,CAAC,GAAC,CAAC,IAAI,KAAK,IAAI,GAAG;;YAGhD,IAAI,WAAW,EAAE;AACf,gBAAA,KAAK,MAAM,GAAG,IAAI,kBAAkB,EAAE;AACpC,oBAAA,MAAM,UAAU,GAAG,MAAM,CAAC,GAAG,CAAC;AAC9B,oBAAA,IAAI,UAAU,KAAK,CAAC,IAAI,MAAM,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,OAAO,EAAE,KAAK,QAAQ,EAAE;AACjE,wBAAA,kBAAkB,CAAC,GAAG,CAAC,GAAG,IAAI,CAAC,KAAK,CAAC,kBAAkB,CAAC,GAAG,CAAC,GAAG,KAAK,CAAC;;AAChE,yBAAA,IAAI,UAAU,IAAI,eAAe,EAAE;;AAExC,wBAAA,kBAAkB,CAAC,GAAG,CAAC,GAAG,IAAI,CAAC,KAAK,CAAC,kBAAkB,CAAC,GAAG,CAAC,GAAG,KAAK,CAAC;;;;;AAM7E,QAAA,kBAAkB,GAAG,MAAM,CAAC,QAAQ,CAAC,MAAM;AAC3C,QAAA,IAAI,WAAW,IAAI,aAAa,CAAC,SAAS,EAAE;YAC1C,OAAO,EAAE,OAAO,EAAE,MAAM,CAAC,QAAQ,EAAE,kBAAkB,EAAE;;AAGzD,QAAA,MAAM,EAAE,OAAO,EAAE,kBAAkB,EAAE,GAAG,2BAA2B,CAAC;YAClE,gBAAgB,EAAE,aAAa,CAAC,SAAS;YACzC,QAAQ,EAAE,MAAM,CAAC,QAAQ;YACzB,kBAAkB;YAClB,SAAS,EAAE,MAAM,CAAC,SAAS;YAC3B,eAAe,EAAE,aAAa,CAAC,eAAe;YAC9C,YAAY,EAAE,aAAa,CAAC,YAAY;AACxC,YAAA,aAAa,EAAE,aAAa,CAAC,QAAQ,KAAKC,eAAS,CAAC,OAAO,GAAGH,kBAAY,CAAC,iBAAiB,GAAGA,kBAAY,CAAC,QAAQ;AACpH,YAAA,kBAAkB,EAAE,aAAa,CAAC,eAAe,KAAK,IAAI,GAAG,qBAAqB,GAAG,SAAS;AAC/F,SAAA,CAAC;AACF,QAAA,qBAAqB,GAAG,kBAAkB,IAAI,EAAE;;AAEhD,QAAA,eAAe,GAAG,IAAI,CAAC,GAAG,CAAC,MAAM,CAAC,QAAQ,CAAC,MAAM,IAAI,OAAO,CAAC,MAAM,IAAI,OAAO,CAAC,CAAC,CAAC,EAAE,OAAO,EAAE,KAAK,QAAQ,GAAG,CAAC,GAAG,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC;AAEvH,QAAA,OAAO,EAAE,OAAO,EAAE,kBAAkB,EAAE;AACxC,KAAC;AACH;;;;;;;"}
@@ -12,8 +12,15 @@ function addThinkingBlock(message, thinkingBlock) {
12
12
  type: ContentTypes.TEXT,
13
13
  text: message.content,
14
14
  }];
15
+ /** Edge case, the message already has the thinking block */
16
+ if (content[0].type === thinkingBlock.type) {
17
+ return message;
18
+ }
15
19
  content.unshift(thinkingBlock);
16
- return content;
20
+ return new AIMessage({
21
+ ...message,
22
+ content
23
+ });
17
24
  }
18
25
  /**
19
26
  * Calculates the total tokens from a single usage object
@@ -40,7 +47,7 @@ function calculateTotalTokens(usage) {
40
47
  * @param options Configuration options for processing messages
41
48
  * @returns Object containing the message context, remaining tokens, messages not included, and summary index
42
49
  */
43
- function getMessagesWithinTokenLimit({ messages: _messages, maxContextTokens, indexTokenCountMap, startType: _startType, thinkingEnabled, tokenCounter, reasoningType = ContentTypes.THINKING, }) {
50
+ function getMessagesWithinTokenLimit({ messages: _messages, maxContextTokens, indexTokenCountMap, startType: _startType, thinkingEnabled, tokenCounter, thinkingStartIndex: _thinkingStartIndex = -1, reasoningType = ContentTypes.THINKING, }) {
44
51
  // Every reply is primed with <|start|>assistant<|message|>, so we
45
52
  // start with 3 tokens for the label after all messages have been counted.
46
53
  let currentTokenCount = 3;
@@ -57,11 +64,17 @@ function getMessagesWithinTokenLimit({ messages: _messages, maxContextTokens, in
57
64
  * This may be confusing to read, but it is done to ensure the context is in the correct order for the model.
58
65
  * */
59
66
  let context = [];
60
- let thinkingStartIndex = -1;
67
+ let thinkingStartIndex = _thinkingStartIndex;
61
68
  let thinkingEndIndex = -1;
62
69
  let thinkingBlock;
63
70
  const endIndex = instructions != null ? 1 : 0;
64
71
  const prunedMemory = [];
72
+ if (_thinkingStartIndex > -1) {
73
+ const thinkingMessageContent = messages[_thinkingStartIndex]?.content;
74
+ if (Array.isArray(thinkingMessageContent)) {
75
+ thinkingBlock = thinkingMessageContent.find((content) => content.type === reasoningType);
76
+ }
77
+ }
65
78
  if (currentTokenCount < remainingContextTokens) {
66
79
  let currentIndex = messages.length;
67
80
  while (messages.length > 0 && currentTokenCount < remainingContextTokens && currentIndex > endIndex) {
@@ -93,19 +106,30 @@ function getMessagesWithinTokenLimit({ messages: _messages, maxContextTokens, in
93
106
  }
94
107
  else {
95
108
  prunedMemory.push(poppedMessage);
96
- if (thinkingEndIndex > -1) {
109
+ if (thinkingEndIndex > -1 && thinkingStartIndex < 0) {
97
110
  continue;
98
111
  }
99
112
  break;
100
113
  }
101
114
  }
102
- if (thinkingEndIndex > -1 && context[context.length - 1]?.getType() === 'tool') {
103
- startType = 'ai';
115
+ if (context[context.length - 1]?.getType() === 'tool') {
116
+ startType = ['ai', 'human'];
104
117
  }
105
- if (startType != null && startType && context.length > 0) {
106
- const requiredTypeIndex = context.findIndex(msg => msg?.getType() === startType);
118
+ if (startType != null && startType.length > 0 && context.length > 0) {
119
+ let requiredTypeIndex = -1;
120
+ let totalTokens = 0;
121
+ for (let i = context.length - 1; i >= 0; i--) {
122
+ const currentType = context[i]?.getType() ?? '';
123
+ if (Array.isArray(startType) ? startType.includes(currentType) : currentType === startType) {
124
+ requiredTypeIndex = i + 1;
125
+ break;
126
+ }
127
+ const originalIndex = originalLength - 1 - i;
128
+ totalTokens += indexTokenCountMap[originalIndex] ?? 0;
129
+ }
107
130
  if (requiredTypeIndex > 0) {
108
- context = context.slice(requiredTypeIndex);
131
+ currentTokenCount -= totalTokens;
132
+ context = context.slice(0, requiredTypeIndex);
109
133
  }
110
134
  }
111
135
  }
@@ -119,6 +143,9 @@ function getMessagesWithinTokenLimit({ messages: _messages, maxContextTokens, in
119
143
  context: [],
120
144
  messagesToRefine: prunedMemory,
121
145
  };
146
+ if (thinkingStartIndex > -1) {
147
+ result.thinkingStartIndex = thinkingStartIndex;
148
+ }
122
149
  if (prunedMemory.length === 0 || thinkingEndIndex < 0 || (thinkingStartIndex > -1 && isIndexInContext(_messages, context, thinkingStartIndex))) {
123
150
  // we reverse at this step to ensure the context is in the correct order for the model, and we need to work backwards
124
151
  result.context = context.reverse();
@@ -150,8 +177,8 @@ function getMessagesWithinTokenLimit({ messages: _messages, maxContextTokens, in
150
177
  thinkingStartIndex = originalLength - 1 - assistantIndex;
151
178
  const thinkingTokenCount = tokenCounter(new AIMessage({ content: [thinkingBlock] }));
152
179
  const newRemainingCount = remainingContextTokens - thinkingTokenCount;
153
- const content = addThinkingBlock(context[assistantIndex], thinkingBlock);
154
- context[assistantIndex].content = content;
180
+ const newMessage = addThinkingBlock(context[assistantIndex], thinkingBlock);
181
+ context[assistantIndex] = newMessage;
155
182
  if (newRemainingCount > 0) {
156
183
  result.context = context.reverse();
157
184
  return result;
@@ -182,17 +209,28 @@ function getMessagesWithinTokenLimit({ messages: _messages, maxContextTokens, in
182
209
  const firstMessage = newContext[newContext.length - 1];
183
210
  const firstMessageType = newContext[newContext.length - 1].getType();
184
211
  if (firstMessageType === 'tool') {
185
- startType = 'ai';
212
+ startType = ['ai', 'human'];
186
213
  }
187
- if (startType != null && startType && newContext.length > 0) {
188
- const requiredTypeIndex = newContext.findIndex(msg => msg.getType() === startType);
214
+ if (startType != null && startType.length > 0 && newContext.length > 0) {
215
+ let requiredTypeIndex = -1;
216
+ let totalTokens = 0;
217
+ for (let i = newContext.length - 1; i >= 0; i--) {
218
+ const currentType = newContext[i]?.getType() ?? '';
219
+ if (Array.isArray(startType) ? startType.includes(currentType) : currentType === startType) {
220
+ requiredTypeIndex = i + 1;
221
+ break;
222
+ }
223
+ const originalIndex = originalLength - 1 - i;
224
+ totalTokens += indexTokenCountMap[originalIndex] ?? 0;
225
+ }
189
226
  if (requiredTypeIndex > 0) {
190
- newContext = newContext.slice(requiredTypeIndex);
227
+ currentTokenCount -= totalTokens;
228
+ newContext = newContext.slice(0, requiredTypeIndex);
191
229
  }
192
230
  }
193
231
  if (firstMessageType === 'ai') {
194
- const content = addThinkingBlock(firstMessage, thinkingBlock);
195
- newContext[newContext.length - 1].content = content;
232
+ const newMessage = addThinkingBlock(firstMessage, thinkingBlock);
233
+ newContext[newContext.length - 1] = newMessage;
196
234
  }
197
235
  else {
198
236
  newContext.push(thinkingMessage);
@@ -212,6 +250,7 @@ function createPruneMessages(factoryParams) {
212
250
  let lastTurnStartIndex = factoryParams.startIndex;
213
251
  let lastCutOffIndex = 0;
214
252
  let totalTokens = (Object.values(indexTokenCountMap)).reduce((a, b) => a + b, 0);
253
+ let runThinkingStartIndex = -1;
215
254
  return function pruneMessages(params) {
216
255
  let currentUsage;
217
256
  if (params.usageMetadata && (checkValidNumber(params.usageMetadata.input_tokens)
@@ -268,7 +307,7 @@ function createPruneMessages(factoryParams) {
268
307
  if (totalTokens <= factoryParams.maxTokens) {
269
308
  return { context: params.messages, indexTokenCountMap };
270
309
  }
271
- const { context } = getMessagesWithinTokenLimit({
310
+ const { context, thinkingStartIndex } = getMessagesWithinTokenLimit({
272
311
  maxContextTokens: factoryParams.maxTokens,
273
312
  messages: params.messages,
274
313
  indexTokenCountMap,
@@ -276,8 +315,11 @@ function createPruneMessages(factoryParams) {
276
315
  thinkingEnabled: factoryParams.thinkingEnabled,
277
316
  tokenCounter: factoryParams.tokenCounter,
278
317
  reasoningType: factoryParams.provider === Providers.BEDROCK ? ContentTypes.REASONING_CONTENT : ContentTypes.THINKING,
318
+ thinkingStartIndex: factoryParams.thinkingEnabled === true ? runThinkingStartIndex : undefined,
279
319
  });
280
- lastCutOffIndex = Math.max(params.messages.length - context.length, 0);
320
+ runThinkingStartIndex = thinkingStartIndex ?? -1;
321
+ /** The index is the first value of `context`, index relative to `params.messages` */
322
+ lastCutOffIndex = Math.max(params.messages.length - (context.length - (context[0]?.getType() === 'system' ? 1 : 0)), 0);
281
323
  return { context, indexTokenCountMap };
282
324
  };
283
325
  }
@@ -1 +1 @@
1
- {"version":3,"file":"prune.mjs","sources":["../../../src/messages/prune.ts"],"sourcesContent":["import { AIMessage, BaseMessage, UsageMetadata } from '@langchain/core/messages';\nimport type { ThinkingContentText, MessageContentComplex, ReasoningContentText } from '@/types/stream';\nimport type { TokenCounter } from '@/types/run';\nimport { ContentTypes, Providers } from '@/common';\n\nexport type PruneMessagesFactoryParams = {\n provider?: Providers;\n maxTokens: number;\n startIndex: number;\n tokenCounter: TokenCounter;\n indexTokenCountMap: Record<string, number>;\n thinkingEnabled?: boolean;\n};\nexport type PruneMessagesParams = {\n messages: BaseMessage[];\n usageMetadata?: Partial<UsageMetadata>;\n startType?: ReturnType<BaseMessage['getType']>;\n}\n\nfunction isIndexInContext(arrayA: unknown[], arrayB: unknown[], targetIndex: number): boolean {\n const startingIndexInA = arrayA.length - arrayB.length;\n return targetIndex >= startingIndexInA;\n}\n\nfunction addThinkingBlock(message: AIMessage, thinkingBlock: ThinkingContentText | ReasoningContentText): MessageContentComplex[] {\n const content: MessageContentComplex[] = Array.isArray(message.content)\n ? message.content as MessageContentComplex[]\n : [{\n type: ContentTypes.TEXT,\n text: message.content,\n }];\n content.unshift(thinkingBlock);\n return content;\n}\n\n/**\n * Calculates the total tokens from a single usage object\n *\n * @param usage The usage metadata object containing token information\n * @returns An object containing the total input and output tokens\n */\nexport function calculateTotalTokens(usage: Partial<UsageMetadata>): UsageMetadata {\n const baseInputTokens = Number(usage.input_tokens) || 0;\n const cacheCreation = Number(usage.input_token_details?.cache_creation) || 0;\n const cacheRead = Number(usage.input_token_details?.cache_read) || 0;\n\n const totalInputTokens = baseInputTokens + cacheCreation + cacheRead;\n const totalOutputTokens = Number(usage.output_tokens) || 0;\n\n return {\n input_tokens: totalInputTokens,\n output_tokens: totalOutputTokens,\n total_tokens: totalInputTokens + totalOutputTokens\n };\n}\n\n/**\n * Processes an array of messages and returns a context of messages that fit within a specified token limit.\n * It iterates over the messages from newest to oldest, adding them to the context until the token limit is reached.\n *\n * @param options Configuration options for processing messages\n * @returns Object containing the message context, remaining tokens, messages not included, and summary index\n */\nexport function getMessagesWithinTokenLimit({\n messages: _messages,\n maxContextTokens,\n indexTokenCountMap,\n startType: _startType,\n thinkingEnabled,\n tokenCounter,\n reasoningType = ContentTypes.THINKING,\n}: {\n messages: BaseMessage[];\n maxContextTokens: number;\n indexTokenCountMap: Record<string, number | undefined>;\n tokenCounter: TokenCounter;\n startType?: string;\n thinkingEnabled?: boolean;\n reasoningType?: ContentTypes.THINKING | ContentTypes.REASONING_CONTENT;\n}): {\n context: BaseMessage[];\n remainingContextTokens: number;\n messagesToRefine: BaseMessage[];\n} {\n // Every reply is primed with <|start|>assistant<|message|>, so we\n // start with 3 tokens for the label after all messages have been counted.\n let currentTokenCount = 3;\n const instructions = _messages[0]?.getType() === 'system' ? _messages[0] : undefined;\n const instructionsTokenCount = instructions != null ? indexTokenCountMap[0] ?? 0 : 0;\n const initialContextTokens = maxContextTokens - instructionsTokenCount;\n let remainingContextTokens = initialContextTokens;\n let startType = _startType;\n const originalLength = _messages.length;\n const messages = [..._messages];\n /**\n * IMPORTANT: this context array gets reversed at the end, since the latest messages get pushed first.\n *\n * This may be confusing to read, but it is done to ensure the context is in the correct order for the model.\n * */\n let context: Array<BaseMessage | undefined> = [];\n\n let thinkingStartIndex = -1;\n let thinkingEndIndex = -1;\n let thinkingBlock: ThinkingContentText | ReasoningContentText | undefined;\n const endIndex = instructions != null ? 1 : 0;\n const prunedMemory: BaseMessage[] = [];\n\n if (currentTokenCount < remainingContextTokens) {\n let currentIndex = messages.length;\n while (messages.length > 0 && currentTokenCount < remainingContextTokens && currentIndex > endIndex) {\n currentIndex--;\n if (messages.length === 1 && instructions) {\n break;\n }\n const poppedMessage = messages.pop();\n if (!poppedMessage) continue;\n const messageType = poppedMessage.getType();\n if (thinkingEnabled === true && thinkingEndIndex === -1 && (currentIndex === (originalLength - 1)) && (messageType === 'ai' || messageType === 'tool')) {\n thinkingEndIndex = currentIndex;\n }\n if (thinkingEndIndex > -1 && !thinkingBlock && thinkingStartIndex < 0 && messageType === 'ai' && Array.isArray(poppedMessage.content)) {\n thinkingBlock = (poppedMessage.content.find((content) => content.type === reasoningType)) as ThinkingContentText | undefined;\n thinkingStartIndex = thinkingBlock != null ? currentIndex : -1;\n }\n /** False start, the latest message was not part of a multi-assistant/tool sequence of messages */\n if (\n thinkingEndIndex > -1\n && currentIndex === (thinkingEndIndex - 1)\n && (messageType !== 'ai' && messageType !== 'tool')\n ) {\n thinkingEndIndex = -1;\n }\n\n const tokenCount = indexTokenCountMap[currentIndex] ?? 0;\n\n if (prunedMemory.length === 0 && ((currentTokenCount + tokenCount) <= remainingContextTokens)) {\n context.push(poppedMessage);\n currentTokenCount += tokenCount;\n } else {\n prunedMemory.push(poppedMessage);\n if (thinkingEndIndex > -1) {\n continue;\n }\n break;\n }\n }\n\n if (thinkingEndIndex > -1 && context[context.length - 1]?.getType() === 'tool') {\n startType = 'ai';\n }\n\n if (startType != null && startType && context.length > 0) {\n const requiredTypeIndex = context.findIndex(msg => msg?.getType() === startType);\n\n if (requiredTypeIndex > 0) {\n context = context.slice(requiredTypeIndex);\n }\n }\n }\n\n if (instructions && originalLength > 0) {\n context.push(_messages[0] as BaseMessage);\n messages.shift();\n }\n\n remainingContextTokens -= currentTokenCount;\n const result = {\n remainingContextTokens,\n context: [] as BaseMessage[],\n messagesToRefine: prunedMemory,\n };\n\n if (prunedMemory.length === 0 || thinkingEndIndex < 0 || (thinkingStartIndex > -1 && isIndexInContext(_messages, context, thinkingStartIndex))) {\n // we reverse at this step to ensure the context is in the correct order for the model, and we need to work backwards\n result.context = context.reverse() as BaseMessage[];\n return result;\n }\n\n if (thinkingEndIndex > -1 && thinkingStartIndex < 0) {\n throw new Error('The payload is malformed. There is a thinking sequence but no \"AI\" messages with thinking blocks.');\n }\n\n if (!thinkingBlock) {\n throw new Error('The payload is malformed. There is a thinking sequence but no thinking block found.');\n }\n\n // Since we have a thinking sequence, we need to find the last assistant message\n // in the latest AI/tool sequence to add the thinking block that falls outside of the current context\n // Latest messages are ordered first.\n let assistantIndex = -1;\n for (let i = 0; i < context.length; i++) {\n const currentMessage = context[i];\n const type = currentMessage?.getType();\n if (type === 'ai') {\n assistantIndex = i;\n }\n if (assistantIndex > -1 && (type === 'human' || type === 'system')) {\n break;\n }\n }\n\n if (assistantIndex === -1) {\n throw new Error('The payload is malformed. There is a thinking sequence but no \"AI\" messages to append thinking blocks to.');\n }\n\n thinkingStartIndex = originalLength - 1 - assistantIndex;\n const thinkingTokenCount = tokenCounter(new AIMessage({ content: [thinkingBlock] }));\n const newRemainingCount = remainingContextTokens - thinkingTokenCount;\n const content: MessageContentComplex[] = addThinkingBlock(context[assistantIndex] as AIMessage, thinkingBlock);\n (context[assistantIndex] as AIMessage).content = content;\n if (newRemainingCount > 0) {\n result.context = context.reverse() as BaseMessage[];\n return result;\n }\n\n const thinkingMessage: AIMessage = context[assistantIndex] as AIMessage;\n // now we need to an additional round of pruning but making the thinking block fit\n const newThinkingMessageTokenCount = (indexTokenCountMap[thinkingStartIndex] ?? 0) + thinkingTokenCount;\n remainingContextTokens = initialContextTokens - newThinkingMessageTokenCount;\n currentTokenCount = 3;\n let newContext: BaseMessage[] = [];\n const secondRoundMessages = [..._messages];\n let currentIndex = secondRoundMessages.length;\n while (secondRoundMessages.length > 0 && currentTokenCount < remainingContextTokens && currentIndex > thinkingStartIndex) {\n currentIndex--;\n const poppedMessage = secondRoundMessages.pop();\n if (!poppedMessage) continue;\n const tokenCount = indexTokenCountMap[currentIndex] ?? 0;\n if ((currentTokenCount + tokenCount) <= remainingContextTokens) {\n newContext.push(poppedMessage);\n currentTokenCount += tokenCount;\n } else {\n messages.push(poppedMessage);\n break;\n }\n }\n\n const firstMessage: AIMessage = newContext[newContext.length - 1];\n const firstMessageType = newContext[newContext.length - 1].getType();\n if (firstMessageType === 'tool') {\n startType = 'ai';\n }\n\n if (startType != null && startType && newContext.length > 0) {\n const requiredTypeIndex = newContext.findIndex(msg => msg.getType() === startType);\n if (requiredTypeIndex > 0) {\n newContext = newContext.slice(requiredTypeIndex);\n }\n }\n\n if (firstMessageType === 'ai') {\n const content = addThinkingBlock(firstMessage, thinkingBlock);\n newContext[newContext.length - 1].content = content;\n } else {\n newContext.push(thinkingMessage);\n }\n\n if (instructions && originalLength > 0) {\n newContext.push(_messages[0] as BaseMessage);\n secondRoundMessages.shift();\n }\n\n result.context = newContext.reverse();\n return result;\n}\n\nexport function checkValidNumber(value: unknown): value is number {\n return typeof value === 'number' && !isNaN(value) && value > 0;\n}\n\nexport function createPruneMessages(factoryParams: PruneMessagesFactoryParams) {\n const indexTokenCountMap = { ...factoryParams.indexTokenCountMap };\n let lastTurnStartIndex = factoryParams.startIndex;\n let lastCutOffIndex = 0;\n let totalTokens = (Object.values(indexTokenCountMap)).reduce((a, b) => a + b, 0);\n return function pruneMessages(params: PruneMessagesParams): {\n context: BaseMessage[];\n indexTokenCountMap: Record<string, number>;\n } {\n let currentUsage: UsageMetadata | undefined;\n if (params.usageMetadata && (\n checkValidNumber(params.usageMetadata.input_tokens)\n || (\n checkValidNumber(params.usageMetadata.input_token_details)\n && (\n checkValidNumber(params.usageMetadata.input_token_details.cache_creation)\n || checkValidNumber(params.usageMetadata.input_token_details.cache_read)\n )\n )\n ) && checkValidNumber(params.usageMetadata.output_tokens)) {\n currentUsage = calculateTotalTokens(params.usageMetadata);\n totalTokens = currentUsage.total_tokens;\n }\n\n for (let i = lastTurnStartIndex; i < params.messages.length; i++) {\n const message = params.messages[i];\n // eslint-disable-next-line @typescript-eslint/no-unnecessary-condition\n if (i === lastTurnStartIndex && indexTokenCountMap[i] === undefined && currentUsage) {\n indexTokenCountMap[i] = currentUsage.output_tokens;\n // eslint-disable-next-line @typescript-eslint/no-unnecessary-condition\n } else if (indexTokenCountMap[i] === undefined) {\n indexTokenCountMap[i] = factoryParams.tokenCounter(message);\n totalTokens += indexTokenCountMap[i];\n }\n }\n\n // If `currentUsage` is defined, we need to distribute the current total tokens to our `indexTokenCountMap`,\n // We must distribute it in a weighted manner, so that the total token count is equal to `currentUsage.total_tokens`,\n // relative the manually counted tokens in `indexTokenCountMap`.\n // EDGE CASE: when the resulting context gets pruned, we should not distribute the usage for messages that are not in the context.\n if (currentUsage) {\n // Calculate the sum of tokens only for indices at or after lastCutOffIndex\n const totalIndexTokens = Object.entries(indexTokenCountMap).reduce((sum, [key, value]) => {\n // Convert string key to number and check if it's >= lastCutOffIndex\n const numericKey = Number(key);\n if (numericKey === 0 && params.messages[0].getType() === 'system') {\n return sum + value;\n }\n return numericKey >= lastCutOffIndex ? sum + value : sum;\n }, 0);\n\n // Calculate ratio based only on messages that remain in the context\n const ratio = currentUsage.total_tokens / totalIndexTokens;\n const isRatioSafe = ratio >= 1/3 && ratio <= 2.5;\n\n // Apply the ratio adjustment only to messages at or after lastCutOffIndex, and only if the ratio is safe\n if (isRatioSafe) {\n for (const key in indexTokenCountMap) {\n const numericKey = Number(key);\n if (numericKey === 0 && params.messages[0].getType() === 'system') {\n indexTokenCountMap[key] = Math.round(indexTokenCountMap[key] * ratio);\n } else if (numericKey >= lastCutOffIndex) {\n // Only adjust token counts for messages still in the context\n indexTokenCountMap[key] = Math.round(indexTokenCountMap[key] * ratio);\n }\n }\n }\n }\n\n lastTurnStartIndex = params.messages.length;\n if (totalTokens <= factoryParams.maxTokens) {\n return { context: params.messages, indexTokenCountMap };\n }\n\n const { context } = getMessagesWithinTokenLimit({\n maxContextTokens: factoryParams.maxTokens,\n messages: params.messages,\n indexTokenCountMap,\n startType: params.startType,\n thinkingEnabled: factoryParams.thinkingEnabled,\n tokenCounter: factoryParams.tokenCounter,\n reasoningType: factoryParams.provider === Providers.BEDROCK ? ContentTypes.REASONING_CONTENT : ContentTypes.THINKING,\n });\n lastCutOffIndex = Math.max(params.messages.length - context.length, 0);\n\n return { context, indexTokenCountMap };\n };\n}\n"],"names":[],"mappings":";;;AAmBA,SAAS,gBAAgB,CAAC,MAAiB,EAAE,MAAiB,EAAE,WAAmB,EAAA;IACjF,MAAM,gBAAgB,GAAG,MAAM,CAAC,MAAM,GAAG,MAAM,CAAC,MAAM;IACtD,OAAO,WAAW,IAAI,gBAAgB;AACxC;AAEA,SAAS,gBAAgB,CAAC,OAAkB,EAAE,aAAyD,EAAA;IACrG,MAAM,OAAO,GAA4B,KAAK,CAAC,OAAO,CAAC,OAAO,CAAC,OAAO;UAClE,OAAO,CAAC;AACV,UAAE,CAAC;gBACD,IAAI,EAAE,YAAY,CAAC,IAAI;gBACvB,IAAI,EAAE,OAAO,CAAC,OAAO;AACtB,aAAA,CAAC;AACJ,IAAA,OAAO,CAAC,OAAO,CAAC,aAAa,CAAC;AAC9B,IAAA,OAAO,OAAO;AAChB;AAEA;;;;;AAKG;AACG,SAAU,oBAAoB,CAAC,KAA6B,EAAA;IAChE,MAAM,eAAe,GAAG,MAAM,CAAC,KAAK,CAAC,YAAY,CAAC,IAAI,CAAC;AACvD,IAAA,MAAM,aAAa,GAAG,MAAM,CAAC,KAAK,CAAC,mBAAmB,EAAE,cAAc,CAAC,IAAI,CAAC;AAC5E,IAAA,MAAM,SAAS,GAAG,MAAM,CAAC,KAAK,CAAC,mBAAmB,EAAE,UAAU,CAAC,IAAI,CAAC;AAEpE,IAAA,MAAM,gBAAgB,GAAG,eAAe,GAAG,aAAa,GAAG,SAAS;IACpE,MAAM,iBAAiB,GAAG,MAAM,CAAC,KAAK,CAAC,aAAa,CAAC,IAAI,CAAC;IAE1D,OAAO;AACL,QAAA,YAAY,EAAE,gBAAgB;AAC9B,QAAA,aAAa,EAAE,iBAAiB;QAChC,YAAY,EAAE,gBAAgB,GAAG;KAClC;AACH;AAEA;;;;;;AAMG;AACG,SAAU,2BAA2B,CAAC,EAC1C,QAAQ,EAAE,SAAS,EACnB,gBAAgB,EAChB,kBAAkB,EAClB,SAAS,EAAE,UAAU,EACrB,eAAe,EACf,YAAY,EACZ,aAAa,GAAG,YAAY,CAAC,QAAQ,GAStC,EAAA;;;IAOC,IAAI,iBAAiB,GAAG,CAAC;IACzB,MAAM,YAAY,GAAG,SAAS,CAAC,CAAC,CAAC,EAAE,OAAO,EAAE,KAAK,QAAQ,GAAG,SAAS,CAAC,CAAC,CAAC,GAAG,SAAS;AACpF,IAAA,MAAM,sBAAsB,GAAG,YAAY,IAAI,IAAI,GAAG,kBAAkB,CAAC,CAAC,CAAC,IAAI,CAAC,GAAG,CAAC;AACpF,IAAA,MAAM,oBAAoB,GAAG,gBAAgB,GAAG,sBAAsB;IACtE,IAAI,sBAAsB,GAAG,oBAAoB;IACjD,IAAI,SAAS,GAAG,UAAU;AAC1B,IAAA,MAAM,cAAc,GAAG,SAAS,CAAC,MAAM;AACvC,IAAA,MAAM,QAAQ,GAAG,CAAC,GAAG,SAAS,CAAC;AAC/B;;;;AAIK;IACL,IAAI,OAAO,GAAmC,EAAE;AAEhD,IAAA,IAAI,kBAAkB,GAAG,EAAE;AAC3B,IAAA,IAAI,gBAAgB,GAAG,EAAE;AACzB,IAAA,IAAI,aAAqE;AACzE,IAAA,MAAM,QAAQ,GAAG,YAAY,IAAI,IAAI,GAAG,CAAC,GAAG,CAAC;IAC7C,MAAM,YAAY,GAAkB,EAAE;AAEtC,IAAA,IAAI,iBAAiB,GAAG,sBAAsB,EAAE;AAC9C,QAAA,IAAI,YAAY,GAAG,QAAQ,CAAC,MAAM;AAClC,QAAA,OAAO,QAAQ,CAAC,MAAM,GAAG,CAAC,IAAI,iBAAiB,GAAG,sBAAsB,IAAI,YAAY,GAAG,QAAQ,EAAE;AACnG,YAAA,YAAY,EAAE;YACd,IAAI,QAAQ,CAAC,MAAM,KAAK,CAAC,IAAI,YAAY,EAAE;gBACzC;;AAEF,YAAA,MAAM,aAAa,GAAG,QAAQ,CAAC,GAAG,EAAE;AACpC,YAAA,IAAI,CAAC,aAAa;gBAAE;AACpB,YAAA,MAAM,WAAW,GAAG,aAAa,CAAC,OAAO,EAAE;AAC3C,YAAA,IAAI,eAAe,KAAK,IAAI,IAAI,gBAAgB,KAAK,EAAE,KAAK,YAAY,MAAM,cAAc,GAAG,CAAC,CAAC,CAAC,KAAK,WAAW,KAAK,IAAI,IAAI,WAAW,KAAK,MAAM,CAAC,EAAE;gBACtJ,gBAAgB,GAAG,YAAY;;YAEjC,IAAI,gBAAgB,GAAG,EAAE,IAAI,CAAC,aAAa,IAAK,kBAAkB,GAAG,CAAC,IAAI,WAAW,KAAK,IAAI,IAAI,KAAK,CAAC,OAAO,CAAC,aAAa,CAAC,OAAO,CAAC,EAAE;gBACtI,aAAa,IAAI,aAAa,CAAC,OAAO,CAAC,IAAI,CAAC,CAAC,OAAO,KAAK,OAAO,CAAC,IAAI,KAAK,aAAa,CAAC,CAAoC;AAC5H,gBAAA,kBAAkB,GAAG,aAAa,IAAI,IAAI,GAAG,YAAY,GAAG,EAAE;;;YAGhE,IACE,gBAAgB,GAAG;AAChB,mBAAA,YAAY,MAAM,gBAAgB,GAAG,CAAC;oBACrC,WAAW,KAAK,IAAI,IAAI,WAAW,KAAK,MAAM,CAAC,EACnD;gBACA,gBAAgB,GAAG,EAAE;;YAGvB,MAAM,UAAU,GAAG,kBAAkB,CAAC,YAAY,CAAC,IAAI,CAAC;AAExD,YAAA,IAAI,YAAY,CAAC,MAAM,KAAK,CAAC,KAAK,CAAC,iBAAiB,GAAG,UAAU,KAAK,sBAAsB,CAAC,EAAE;AAC7F,gBAAA,OAAO,CAAC,IAAI,CAAC,aAAa,CAAC;gBAC3B,iBAAiB,IAAI,UAAU;;iBAC1B;AACL,gBAAA,YAAY,CAAC,IAAI,CAAC,aAAa,CAAC;AAChC,gBAAA,IAAI,gBAAgB,GAAG,EAAE,EAAE;oBACzB;;gBAEF;;;AAIJ,QAAA,IAAI,gBAAgB,GAAG,EAAE,IAAI,OAAO,CAAC,OAAO,CAAC,MAAM,GAAG,CAAC,CAAC,EAAE,OAAO,EAAE,KAAK,MAAM,EAAE;YAC9E,SAAS,GAAG,IAAI;;AAGlB,QAAA,IAAI,SAAS,IAAI,IAAI,IAAI,SAAS,IAAI,OAAO,CAAC,MAAM,GAAG,CAAC,EAAE;AACxD,YAAA,MAAM,iBAAiB,GAAG,OAAO,CAAC,SAAS,CAAC,GAAG,IAAI,GAAG,EAAE,OAAO,EAAE,KAAK,SAAS,CAAC;AAEhF,YAAA,IAAI,iBAAiB,GAAG,CAAC,EAAE;AACzB,gBAAA,OAAO,GAAG,OAAO,CAAC,KAAK,CAAC,iBAAiB,CAAC;;;;AAKhD,IAAA,IAAI,YAAY,IAAI,cAAc,GAAG,CAAC,EAAE;QACtC,OAAO,CAAC,IAAI,CAAC,SAAS,CAAC,CAAC,CAAgB,CAAC;QACzC,QAAQ,CAAC,KAAK,EAAE;;IAGlB,sBAAsB,IAAI,iBAAiB;AAC3C,IAAA,MAAM,MAAM,GAAG;QACb,sBAAsB;AACtB,QAAA,OAAO,EAAE,EAAmB;AAC5B,QAAA,gBAAgB,EAAE,YAAY;KAC/B;IAED,IAAI,YAAY,CAAC,MAAM,KAAK,CAAC,IAAI,gBAAgB,GAAG,CAAC,KAAK,kBAAkB,GAAG,EAAE,IAAI,gBAAgB,CAAC,SAAS,EAAE,OAAO,EAAE,kBAAkB,CAAC,CAAC,EAAE;;AAE9I,QAAA,MAAM,CAAC,OAAO,GAAG,OAAO,CAAC,OAAO,EAAmB;AACnD,QAAA,OAAO,MAAM;;IAGf,IAAI,gBAAgB,GAAG,EAAE,IAAI,kBAAkB,GAAG,CAAC,EAAE;AACnD,QAAA,MAAM,IAAI,KAAK,CAAC,mGAAmG,CAAC;;IAGtH,IAAI,CAAC,aAAa,EAAE;AAClB,QAAA,MAAM,IAAI,KAAK,CAAC,qFAAqF,CAAC;;;;;AAMxG,IAAA,IAAI,cAAc,GAAG,EAAE;AACvB,IAAA,KAAK,IAAI,CAAC,GAAG,CAAC,EAAE,CAAC,GAAG,OAAO,CAAC,MAAM,EAAE,CAAC,EAAE,EAAE;AACvC,QAAA,MAAM,cAAc,GAAG,OAAO,CAAC,CAAC,CAAC;AACjC,QAAA,MAAM,IAAI,GAAG,cAAc,EAAE,OAAO,EAAE;AACtC,QAAA,IAAI,IAAI,KAAK,IAAI,EAAE;YACjB,cAAc,GAAG,CAAC;;AAEpB,QAAA,IAAI,cAAc,GAAG,EAAE,KAAK,IAAI,KAAK,OAAO,IAAI,IAAI,KAAK,QAAQ,CAAC,EAAE;YAClE;;;AAIJ,IAAA,IAAI,cAAc,KAAK,EAAE,EAAE;AACzB,QAAA,MAAM,IAAI,KAAK,CAAC,2GAA2G,CAAC;;AAG9H,IAAA,kBAAkB,GAAG,cAAc,GAAG,CAAC,GAAG,cAAc;AACxD,IAAA,MAAM,kBAAkB,GAAG,YAAY,CAAC,IAAI,SAAS,CAAC,EAAE,OAAO,EAAE,CAAC,aAAa,CAAC,EAAE,CAAC,CAAC;AACpF,IAAA,MAAM,iBAAiB,GAAG,sBAAsB,GAAG,kBAAkB;IACrE,MAAM,OAAO,GAA4B,gBAAgB,CAAC,OAAO,CAAC,cAAc,CAAc,EAAE,aAAa,CAAC;AAC7G,IAAA,OAAO,CAAC,cAAc,CAAe,CAAC,OAAO,GAAG,OAAO;AACxD,IAAA,IAAI,iBAAiB,GAAG,CAAC,EAAE;AACzB,QAAA,MAAM,CAAC,OAAO,GAAG,OAAO,CAAC,OAAO,EAAmB;AACnD,QAAA,OAAO,MAAM;;AAGf,IAAA,MAAM,eAAe,GAAc,OAAO,CAAC,cAAc,CAAc;;AAEvE,IAAA,MAAM,4BAA4B,GAAG,CAAC,kBAAkB,CAAC,kBAAkB,CAAC,IAAI,CAAC,IAAI,kBAAkB;AACvG,IAAA,sBAAsB,GAAG,oBAAoB,GAAG,4BAA4B;IAC5E,iBAAiB,GAAG,CAAC;IACrB,IAAI,UAAU,GAAkB,EAAE;AAClC,IAAA,MAAM,mBAAmB,GAAG,CAAC,GAAG,SAAS,CAAC;AAC1C,IAAA,IAAI,YAAY,GAAG,mBAAmB,CAAC,MAAM;AAC7C,IAAA,OAAO,mBAAmB,CAAC,MAAM,GAAG,CAAC,IAAI,iBAAiB,GAAG,sBAAsB,IAAI,YAAY,GAAG,kBAAkB,EAAE;AACxH,QAAA,YAAY,EAAE;AACd,QAAA,MAAM,aAAa,GAAG,mBAAmB,CAAC,GAAG,EAAE;AAC/C,QAAA,IAAI,CAAC,aAAa;YAAE;QACpB,MAAM,UAAU,GAAG,kBAAkB,CAAC,YAAY,CAAC,IAAI,CAAC;QACxD,IAAI,CAAC,iBAAiB,GAAG,UAAU,KAAK,sBAAsB,EAAE;AAC9D,YAAA,UAAU,CAAC,IAAI,CAAC,aAAa,CAAC;YAC9B,iBAAiB,IAAI,UAAU;;aAC1B;AACL,YAAA,QAAQ,CAAC,IAAI,CAAC,aAAa,CAAC;YAC5B;;;IAIJ,MAAM,YAAY,GAAc,UAAU,CAAC,UAAU,CAAC,MAAM,GAAG,CAAC,CAAC;AACjE,IAAA,MAAM,gBAAgB,GAAG,UAAU,CAAC,UAAU,CAAC,MAAM,GAAG,CAAC,CAAC,CAAC,OAAO,EAAE;AACpE,IAAA,IAAI,gBAAgB,KAAK,MAAM,EAAE;QAC/B,SAAS,GAAG,IAAI;;AAGlB,IAAA,IAAI,SAAS,IAAI,IAAI,IAAI,SAAS,IAAI,UAAU,CAAC,MAAM,GAAG,CAAC,EAAE;AAC3D,QAAA,MAAM,iBAAiB,GAAG,UAAU,CAAC,SAAS,CAAC,GAAG,IAAI,GAAG,CAAC,OAAO,EAAE,KAAK,SAAS,CAAC;AAClF,QAAA,IAAI,iBAAiB,GAAG,CAAC,EAAE;AACzB,YAAA,UAAU,GAAG,UAAU,CAAC,KAAK,CAAC,iBAAiB,CAAC;;;AAIpD,IAAA,IAAI,gBAAgB,KAAK,IAAI,EAAE;QAC7B,MAAM,OAAO,GAAG,gBAAgB,CAAC,YAAY,EAAE,aAAa,CAAC;QAC7D,UAAU,CAAC,UAAU,CAAC,MAAM,GAAG,CAAC,CAAC,CAAC,OAAO,GAAG,OAAO;;SAC9C;AACL,QAAA,UAAU,CAAC,IAAI,CAAC,eAAe,CAAC;;AAGlC,IAAA,IAAI,YAAY,IAAI,cAAc,GAAG,CAAC,EAAE;QACtC,UAAU,CAAC,IAAI,CAAC,SAAS,CAAC,CAAC,CAAgB,CAAC;QAC5C,mBAAmB,CAAC,KAAK,EAAE;;AAG7B,IAAA,MAAM,CAAC,OAAO,GAAG,UAAU,CAAC,OAAO,EAAE;AACrC,IAAA,OAAO,MAAM;AACf;AAEM,SAAU,gBAAgB,CAAC,KAAc,EAAA;AAC7C,IAAA,OAAO,OAAO,KAAK,KAAK,QAAQ,IAAI,CAAC,KAAK,CAAC,KAAK,CAAC,IAAI,KAAK,GAAG,CAAC;AAChE;AAEM,SAAU,mBAAmB,CAAC,aAAyC,EAAA;IAC3E,MAAM,kBAAkB,GAAG,EAAE,GAAG,aAAa,CAAC,kBAAkB,EAAE;AAClE,IAAA,IAAI,kBAAkB,GAAG,aAAa,CAAC,UAAU;IACjD,IAAI,eAAe,GAAG,CAAC;IACvB,IAAI,WAAW,GAAG,CAAC,MAAM,CAAC,MAAM,CAAC,kBAAkB,CAAC,EAAE,MAAM,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,GAAG,CAAC,EAAE,CAAC,CAAC;IAChF,OAAO,SAAS,aAAa,CAAC,MAA2B,EAAA;AAIvD,QAAA,IAAI,YAAuC;AAC3C,QAAA,IAAI,MAAM,CAAC,aAAa,KACtB,gBAAgB,CAAC,MAAM,CAAC,aAAa,CAAC,YAAY;AAC/C,gBACD,gBAAgB,CAAC,MAAM,CAAC,aAAa,CAAC,mBAAmB;oBAEvD,gBAAgB,CAAC,MAAM,CAAC,aAAa,CAAC,mBAAmB,CAAC,cAAc;uBACrE,gBAAgB,CAAC,MAAM,CAAC,aAAa,CAAC,mBAAmB,CAAC,UAAU,CAAC,CACzE,CACF,CACF,IAAI,gBAAgB,CAAC,MAAM,CAAC,aAAa,CAAC,aAAa,CAAC,EAAE;AACzD,YAAA,YAAY,GAAG,oBAAoB,CAAC,MAAM,CAAC,aAAa,CAAC;AACzD,YAAA,WAAW,GAAG,YAAY,CAAC,YAAY;;AAGzC,QAAA,KAAK,IAAI,CAAC,GAAG,kBAAkB,EAAE,CAAC,GAAG,MAAM,CAAC,QAAQ,CAAC,MAAM,EAAE,CAAC,EAAE,EAAE;YAChE,MAAM,OAAO,GAAG,MAAM,CAAC,QAAQ,CAAC,CAAC,CAAC;;AAElC,YAAA,IAAI,CAAC,KAAK,kBAAkB,IAAI,kBAAkB,CAAC,CAAC,CAAC,KAAK,SAAS,IAAI,YAAY,EAAE;AACnF,gBAAA,kBAAkB,CAAC,CAAC,CAAC,GAAG,YAAY,CAAC,aAAa;;;AAE7C,iBAAA,IAAI,kBAAkB,CAAC,CAAC,CAAC,KAAK,SAAS,EAAE;gBAC9C,kBAAkB,CAAC,CAAC,CAAC,GAAG,aAAa,CAAC,YAAY,CAAC,OAAO,CAAC;AAC3D,gBAAA,WAAW,IAAI,kBAAkB,CAAC,CAAC,CAAC;;;;;;;QAQxC,IAAI,YAAY,EAAE;;YAEhB,MAAM,gBAAgB,GAAG,MAAM,CAAC,OAAO,CAAC,kBAAkB,CAAC,CAAC,MAAM,CAAC,CAAC,GAAG,EAAE,CAAC,GAAG,EAAE,KAAK,CAAC,KAAI;;AAEvF,gBAAA,MAAM,UAAU,GAAG,MAAM,CAAC,GAAG,CAAC;AAC9B,gBAAA,IAAI,UAAU,KAAK,CAAC,IAAI,MAAM,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,OAAO,EAAE,KAAK,QAAQ,EAAE;oBACjE,OAAO,GAAG,GAAG,KAAK;;AAEpB,gBAAA,OAAO,UAAU,IAAI,eAAe,GAAG,GAAG,GAAG,KAAK,GAAG,GAAG;aACzD,EAAE,CAAC,CAAC;;AAGL,YAAA,MAAM,KAAK,GAAG,YAAY,CAAC,YAAY,GAAG,gBAAgB;YAC1D,MAAM,WAAW,GAAG,KAAK,IAAI,CAAC,GAAC,CAAC,IAAI,KAAK,IAAI,GAAG;;YAGhD,IAAI,WAAW,EAAE;AACf,gBAAA,KAAK,MAAM,GAAG,IAAI,kBAAkB,EAAE;AACpC,oBAAA,MAAM,UAAU,GAAG,MAAM,CAAC,GAAG,CAAC;AAC9B,oBAAA,IAAI,UAAU,KAAK,CAAC,IAAI,MAAM,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,OAAO,EAAE,KAAK,QAAQ,EAAE;AACjE,wBAAA,kBAAkB,CAAC,GAAG,CAAC,GAAG,IAAI,CAAC,KAAK,CAAC,kBAAkB,CAAC,GAAG,CAAC,GAAG,KAAK,CAAC;;AAChE,yBAAA,IAAI,UAAU,IAAI,eAAe,EAAE;;AAExC,wBAAA,kBAAkB,CAAC,GAAG,CAAC,GAAG,IAAI,CAAC,KAAK,CAAC,kBAAkB,CAAC,GAAG,CAAC,GAAG,KAAK,CAAC;;;;;AAM7E,QAAA,kBAAkB,GAAG,MAAM,CAAC,QAAQ,CAAC,MAAM;AAC3C,QAAA,IAAI,WAAW,IAAI,aAAa,CAAC,SAAS,EAAE;YAC1C,OAAO,EAAE,OAAO,EAAE,MAAM,CAAC,QAAQ,EAAE,kBAAkB,EAAE;;AAGzD,QAAA,MAAM,EAAE,OAAO,EAAE,GAAG,2BAA2B,CAAC;YAC9C,gBAAgB,EAAE,aAAa,CAAC,SAAS;YACzC,QAAQ,EAAE,MAAM,CAAC,QAAQ;YACzB,kBAAkB;YAClB,SAAS,EAAE,MAAM,CAAC,SAAS;YAC3B,eAAe,EAAE,aAAa,CAAC,eAAe;YAC9C,YAAY,EAAE,aAAa,CAAC,YAAY;AACxC,YAAA,aAAa,EAAE,aAAa,CAAC,QAAQ,KAAK,SAAS,CAAC,OAAO,GAAG,YAAY,CAAC,iBAAiB,GAAG,YAAY,CAAC,QAAQ;AACrH,SAAA,CAAC;AACF,QAAA,eAAe,GAAG,IAAI,CAAC,GAAG,CAAC,MAAM,CAAC,QAAQ,CAAC,MAAM,GAAG,OAAO,CAAC,MAAM,EAAE,CAAC,CAAC;AAEtE,QAAA,OAAO,EAAE,OAAO,EAAE,kBAAkB,EAAE;AACxC,KAAC;AACH;;;;"}
1
+ {"version":3,"file":"prune.mjs","sources":["../../../src/messages/prune.ts"],"sourcesContent":["import { AIMessage, BaseMessage, UsageMetadata } from '@langchain/core/messages';\nimport type { ThinkingContentText, MessageContentComplex, ReasoningContentText } from '@/types/stream';\nimport type { TokenCounter } from '@/types/run';\nimport { ContentTypes, Providers } from '@/common';\n\nexport type PruneMessagesFactoryParams = {\n provider?: Providers;\n maxTokens: number;\n startIndex: number;\n tokenCounter: TokenCounter;\n indexTokenCountMap: Record<string, number>;\n thinkingEnabled?: boolean;\n};\nexport type PruneMessagesParams = {\n messages: BaseMessage[];\n usageMetadata?: Partial<UsageMetadata>;\n startType?: ReturnType<BaseMessage['getType']>;\n}\n\nfunction isIndexInContext(arrayA: unknown[], arrayB: unknown[], targetIndex: number): boolean {\n const startingIndexInA = arrayA.length - arrayB.length;\n return targetIndex >= startingIndexInA;\n}\n\nfunction addThinkingBlock(message: AIMessage, thinkingBlock: ThinkingContentText | ReasoningContentText): AIMessage {\n const content: MessageContentComplex[] = Array.isArray(message.content)\n ? message.content as MessageContentComplex[]\n : [{\n type: ContentTypes.TEXT,\n text: message.content,\n }];\n /** Edge case, the message already has the thinking block */\n if (content[0].type === thinkingBlock.type) {\n return message;\n }\n content.unshift(thinkingBlock);\n return new AIMessage({\n ...message,\n content\n });\n}\n\n/**\n * Calculates the total tokens from a single usage object\n *\n * @param usage The usage metadata object containing token information\n * @returns An object containing the total input and output tokens\n */\nexport function calculateTotalTokens(usage: Partial<UsageMetadata>): UsageMetadata {\n const baseInputTokens = Number(usage.input_tokens) || 0;\n const cacheCreation = Number(usage.input_token_details?.cache_creation) || 0;\n const cacheRead = Number(usage.input_token_details?.cache_read) || 0;\n\n const totalInputTokens = baseInputTokens + cacheCreation + cacheRead;\n const totalOutputTokens = Number(usage.output_tokens) || 0;\n\n return {\n input_tokens: totalInputTokens,\n output_tokens: totalOutputTokens,\n total_tokens: totalInputTokens + totalOutputTokens\n };\n}\n\nexport type PruningResult = {\n context: BaseMessage[];\n remainingContextTokens: number;\n messagesToRefine: BaseMessage[];\n thinkingStartIndex?: number;\n};\n\n/**\n * Processes an array of messages and returns a context of messages that fit within a specified token limit.\n * It iterates over the messages from newest to oldest, adding them to the context until the token limit is reached.\n *\n * @param options Configuration options for processing messages\n * @returns Object containing the message context, remaining tokens, messages not included, and summary index\n */\nexport function getMessagesWithinTokenLimit({\n messages: _messages,\n maxContextTokens,\n indexTokenCountMap,\n startType: _startType,\n thinkingEnabled,\n tokenCounter,\n thinkingStartIndex: _thinkingStartIndex = -1,\n reasoningType = ContentTypes.THINKING,\n}: {\n messages: BaseMessage[];\n maxContextTokens: number;\n indexTokenCountMap: Record<string, number | undefined>;\n startType?: string | string[];\n thinkingEnabled?: boolean;\n tokenCounter: TokenCounter;\n thinkingStartIndex?: number;\n reasoningType?: ContentTypes.THINKING | ContentTypes.REASONING_CONTENT;\n}): PruningResult {\n // Every reply is primed with <|start|>assistant<|message|>, so we\n // start with 3 tokens for the label after all messages have been counted.\n let currentTokenCount = 3;\n const instructions = _messages[0]?.getType() === 'system' ? _messages[0] : undefined;\n const instructionsTokenCount = instructions != null ? indexTokenCountMap[0] ?? 0 : 0;\n const initialContextTokens = maxContextTokens - instructionsTokenCount;\n let remainingContextTokens = initialContextTokens;\n let startType = _startType;\n const originalLength = _messages.length;\n const messages = [..._messages];\n /**\n * IMPORTANT: this context array gets reversed at the end, since the latest messages get pushed first.\n *\n * This may be confusing to read, but it is done to ensure the context is in the correct order for the model.\n * */\n let context: Array<BaseMessage | undefined> = [];\n\n let thinkingStartIndex = _thinkingStartIndex;\n let thinkingEndIndex = -1;\n let thinkingBlock: ThinkingContentText | ReasoningContentText | undefined;\n const endIndex = instructions != null ? 1 : 0;\n const prunedMemory: BaseMessage[] = [];\n\n if (_thinkingStartIndex > -1) {\n const thinkingMessageContent = messages[_thinkingStartIndex]?.content;\n if (Array.isArray(thinkingMessageContent)) {\n thinkingBlock = thinkingMessageContent.find((content) => content.type === reasoningType) as ThinkingContentText | undefined;\n }\n }\n\n if (currentTokenCount < remainingContextTokens) {\n let currentIndex = messages.length;\n while (messages.length > 0 && currentTokenCount < remainingContextTokens && currentIndex > endIndex) {\n currentIndex--;\n if (messages.length === 1 && instructions) {\n break;\n }\n const poppedMessage = messages.pop();\n if (!poppedMessage) continue;\n const messageType = poppedMessage.getType();\n if (thinkingEnabled === true && thinkingEndIndex === -1 && (currentIndex === (originalLength - 1)) && (messageType === 'ai' || messageType === 'tool')) {\n thinkingEndIndex = currentIndex;\n }\n if (thinkingEndIndex > -1 && !thinkingBlock && thinkingStartIndex < 0 && messageType === 'ai' && Array.isArray(poppedMessage.content)) {\n thinkingBlock = (poppedMessage.content.find((content) => content.type === reasoningType)) as ThinkingContentText | undefined;\n thinkingStartIndex = thinkingBlock != null ? currentIndex : -1;\n }\n /** False start, the latest message was not part of a multi-assistant/tool sequence of messages */\n if (\n thinkingEndIndex > -1\n && currentIndex === (thinkingEndIndex - 1)\n && (messageType !== 'ai' && messageType !== 'tool')\n ) {\n thinkingEndIndex = -1;\n }\n\n const tokenCount = indexTokenCountMap[currentIndex] ?? 0;\n\n if (prunedMemory.length === 0 && ((currentTokenCount + tokenCount) <= remainingContextTokens)) {\n context.push(poppedMessage);\n currentTokenCount += tokenCount;\n } else {\n prunedMemory.push(poppedMessage);\n if (thinkingEndIndex > -1 && thinkingStartIndex < 0) {\n continue;\n }\n break;\n }\n }\n\n if (context[context.length - 1]?.getType() === 'tool') {\n startType = ['ai', 'human'];\n }\n\n if (startType != null && startType.length > 0 && context.length > 0) {\n let requiredTypeIndex = -1;\n\n let totalTokens = 0;\n for (let i = context.length - 1; i >= 0; i--) {\n const currentType = context[i]?.getType() ?? '';\n if (Array.isArray(startType) ? startType.includes(currentType) : currentType === startType) {\n requiredTypeIndex = i + 1;\n break;\n }\n const originalIndex = originalLength - 1 - i;\n totalTokens += indexTokenCountMap[originalIndex] ?? 0;\n }\n\n if (requiredTypeIndex > 0) {\n currentTokenCount -= totalTokens;\n context = context.slice(0, requiredTypeIndex);\n }\n }\n }\n\n if (instructions && originalLength > 0) {\n context.push(_messages[0] as BaseMessage);\n messages.shift();\n }\n\n remainingContextTokens -= currentTokenCount;\n const result: PruningResult = {\n remainingContextTokens,\n context: [] as BaseMessage[],\n messagesToRefine: prunedMemory,\n };\n\n if (thinkingStartIndex > -1) {\n result.thinkingStartIndex = thinkingStartIndex;\n }\n\n if (prunedMemory.length === 0 || thinkingEndIndex < 0 || (thinkingStartIndex > -1 && isIndexInContext(_messages, context, thinkingStartIndex))) {\n // we reverse at this step to ensure the context is in the correct order for the model, and we need to work backwards\n result.context = context.reverse() as BaseMessage[];\n return result;\n }\n\n if (thinkingEndIndex > -1 && thinkingStartIndex < 0) {\n throw new Error('The payload is malformed. There is a thinking sequence but no \"AI\" messages with thinking blocks.');\n }\n\n if (!thinkingBlock) {\n throw new Error('The payload is malformed. There is a thinking sequence but no thinking block found.');\n }\n\n // Since we have a thinking sequence, we need to find the last assistant message\n // in the latest AI/tool sequence to add the thinking block that falls outside of the current context\n // Latest messages are ordered first.\n let assistantIndex = -1;\n for (let i = 0; i < context.length; i++) {\n const currentMessage = context[i];\n const type = currentMessage?.getType();\n if (type === 'ai') {\n assistantIndex = i;\n }\n if (assistantIndex > -1 && (type === 'human' || type === 'system')) {\n break;\n }\n }\n\n if (assistantIndex === -1) {\n throw new Error('The payload is malformed. There is a thinking sequence but no \"AI\" messages to append thinking blocks to.');\n }\n\n thinkingStartIndex = originalLength - 1 - assistantIndex;\n const thinkingTokenCount = tokenCounter(new AIMessage({ content: [thinkingBlock] }));\n const newRemainingCount = remainingContextTokens - thinkingTokenCount;\n const newMessage = addThinkingBlock(context[assistantIndex] as AIMessage, thinkingBlock);\n context[assistantIndex] = newMessage;\n if (newRemainingCount > 0) {\n result.context = context.reverse() as BaseMessage[];\n return result;\n }\n\n const thinkingMessage: AIMessage = context[assistantIndex] as AIMessage;\n // now we need to an additional round of pruning but making the thinking block fit\n const newThinkingMessageTokenCount = (indexTokenCountMap[thinkingStartIndex] ?? 0) + thinkingTokenCount;\n remainingContextTokens = initialContextTokens - newThinkingMessageTokenCount;\n currentTokenCount = 3;\n let newContext: BaseMessage[] = [];\n const secondRoundMessages = [..._messages];\n let currentIndex = secondRoundMessages.length;\n while (secondRoundMessages.length > 0 && currentTokenCount < remainingContextTokens && currentIndex > thinkingStartIndex) {\n currentIndex--;\n const poppedMessage = secondRoundMessages.pop();\n if (!poppedMessage) continue;\n const tokenCount = indexTokenCountMap[currentIndex] ?? 0;\n if ((currentTokenCount + tokenCount) <= remainingContextTokens) {\n newContext.push(poppedMessage);\n currentTokenCount += tokenCount;\n } else {\n messages.push(poppedMessage);\n break;\n }\n }\n\n const firstMessage: AIMessage = newContext[newContext.length - 1];\n const firstMessageType = newContext[newContext.length - 1].getType();\n if (firstMessageType === 'tool') {\n startType = ['ai', 'human'];\n }\n\n if (startType != null && startType.length > 0 && newContext.length > 0) {\n let requiredTypeIndex = -1;\n\n let totalTokens = 0;\n for (let i = newContext.length - 1; i >= 0; i--) {\n const currentType = newContext[i]?.getType() ?? '';\n if (Array.isArray(startType) ? startType.includes(currentType) : currentType === startType) {\n requiredTypeIndex = i + 1;\n break;\n }\n const originalIndex = originalLength - 1 - i;\n totalTokens += indexTokenCountMap[originalIndex] ?? 0;\n }\n\n if (requiredTypeIndex > 0) {\n currentTokenCount -= totalTokens;\n newContext = newContext.slice(0, requiredTypeIndex);\n }\n }\n\n if (firstMessageType === 'ai') {\n const newMessage = addThinkingBlock(firstMessage, thinkingBlock);\n newContext[newContext.length - 1] = newMessage;\n } else {\n newContext.push(thinkingMessage);\n }\n\n if (instructions && originalLength > 0) {\n newContext.push(_messages[0] as BaseMessage);\n secondRoundMessages.shift();\n }\n\n result.context = newContext.reverse();\n return result;\n}\n\nexport function checkValidNumber(value: unknown): value is number {\n return typeof value === 'number' && !isNaN(value) && value > 0;\n}\n\nexport function createPruneMessages(factoryParams: PruneMessagesFactoryParams) {\n const indexTokenCountMap = { ...factoryParams.indexTokenCountMap };\n let lastTurnStartIndex = factoryParams.startIndex;\n let lastCutOffIndex = 0;\n let totalTokens = (Object.values(indexTokenCountMap)).reduce((a, b) => a + b, 0);\n let runThinkingStartIndex = -1;\n return function pruneMessages(params: PruneMessagesParams): {\n context: BaseMessage[];\n indexTokenCountMap: Record<string, number>;\n } {\n let currentUsage: UsageMetadata | undefined;\n if (params.usageMetadata && (\n checkValidNumber(params.usageMetadata.input_tokens)\n || (\n checkValidNumber(params.usageMetadata.input_token_details)\n && (\n checkValidNumber(params.usageMetadata.input_token_details.cache_creation)\n || checkValidNumber(params.usageMetadata.input_token_details.cache_read)\n )\n )\n ) && checkValidNumber(params.usageMetadata.output_tokens)) {\n currentUsage = calculateTotalTokens(params.usageMetadata);\n totalTokens = currentUsage.total_tokens;\n }\n\n for (let i = lastTurnStartIndex; i < params.messages.length; i++) {\n const message = params.messages[i];\n // eslint-disable-next-line @typescript-eslint/no-unnecessary-condition\n if (i === lastTurnStartIndex && indexTokenCountMap[i] === undefined && currentUsage) {\n indexTokenCountMap[i] = currentUsage.output_tokens;\n // eslint-disable-next-line @typescript-eslint/no-unnecessary-condition\n } else if (indexTokenCountMap[i] === undefined) {\n indexTokenCountMap[i] = factoryParams.tokenCounter(message);\n totalTokens += indexTokenCountMap[i];\n }\n }\n\n // If `currentUsage` is defined, we need to distribute the current total tokens to our `indexTokenCountMap`,\n // We must distribute it in a weighted manner, so that the total token count is equal to `currentUsage.total_tokens`,\n // relative the manually counted tokens in `indexTokenCountMap`.\n // EDGE CASE: when the resulting context gets pruned, we should not distribute the usage for messages that are not in the context.\n if (currentUsage) {\n // Calculate the sum of tokens only for indices at or after lastCutOffIndex\n const totalIndexTokens = Object.entries(indexTokenCountMap).reduce((sum, [key, value]) => {\n // Convert string key to number and check if it's >= lastCutOffIndex\n const numericKey = Number(key);\n if (numericKey === 0 && params.messages[0].getType() === 'system') {\n return sum + value;\n }\n return numericKey >= lastCutOffIndex ? sum + value : sum;\n }, 0);\n\n // Calculate ratio based only on messages that remain in the context\n const ratio = currentUsage.total_tokens / totalIndexTokens;\n const isRatioSafe = ratio >= 1/3 && ratio <= 2.5;\n\n // Apply the ratio adjustment only to messages at or after lastCutOffIndex, and only if the ratio is safe\n if (isRatioSafe) {\n for (const key in indexTokenCountMap) {\n const numericKey = Number(key);\n if (numericKey === 0 && params.messages[0].getType() === 'system') {\n indexTokenCountMap[key] = Math.round(indexTokenCountMap[key] * ratio);\n } else if (numericKey >= lastCutOffIndex) {\n // Only adjust token counts for messages still in the context\n indexTokenCountMap[key] = Math.round(indexTokenCountMap[key] * ratio);\n }\n }\n }\n }\n\n lastTurnStartIndex = params.messages.length;\n if (totalTokens <= factoryParams.maxTokens) {\n return { context: params.messages, indexTokenCountMap };\n }\n\n const { context, thinkingStartIndex } = getMessagesWithinTokenLimit({\n maxContextTokens: factoryParams.maxTokens,\n messages: params.messages,\n indexTokenCountMap,\n startType: params.startType,\n thinkingEnabled: factoryParams.thinkingEnabled,\n tokenCounter: factoryParams.tokenCounter,\n reasoningType: factoryParams.provider === Providers.BEDROCK ? ContentTypes.REASONING_CONTENT : ContentTypes.THINKING,\n thinkingStartIndex: factoryParams.thinkingEnabled === true ? runThinkingStartIndex : undefined,\n });\n runThinkingStartIndex = thinkingStartIndex ?? -1;\n /** The index is the first value of `context`, index relative to `params.messages` */\n lastCutOffIndex = Math.max(params.messages.length - (context.length - (context[0]?.getType() === 'system' ? 1 : 0)), 0);\n\n return { context, indexTokenCountMap };\n };\n}\n"],"names":[],"mappings":";;;AAmBA,SAAS,gBAAgB,CAAC,MAAiB,EAAE,MAAiB,EAAE,WAAmB,EAAA;IACjF,MAAM,gBAAgB,GAAG,MAAM,CAAC,MAAM,GAAG,MAAM,CAAC,MAAM;IACtD,OAAO,WAAW,IAAI,gBAAgB;AACxC;AAEA,SAAS,gBAAgB,CAAC,OAAkB,EAAE,aAAyD,EAAA;IACrG,MAAM,OAAO,GAA4B,KAAK,CAAC,OAAO,CAAC,OAAO,CAAC,OAAO;UAClE,OAAO,CAAC;AACV,UAAE,CAAC;gBACD,IAAI,EAAE,YAAY,CAAC,IAAI;gBACvB,IAAI,EAAE,OAAO,CAAC,OAAO;AACtB,aAAA,CAAC;;IAEJ,IAAI,OAAO,CAAC,CAAC,CAAC,CAAC,IAAI,KAAK,aAAa,CAAC,IAAI,EAAE;AAC1C,QAAA,OAAO,OAAO;;AAEhB,IAAA,OAAO,CAAC,OAAO,CAAC,aAAa,CAAC;IAC9B,OAAO,IAAI,SAAS,CAAC;AACnB,QAAA,GAAG,OAAO;QACV;AACD,KAAA,CAAC;AACJ;AAEA;;;;;AAKG;AACG,SAAU,oBAAoB,CAAC,KAA6B,EAAA;IAChE,MAAM,eAAe,GAAG,MAAM,CAAC,KAAK,CAAC,YAAY,CAAC,IAAI,CAAC;AACvD,IAAA,MAAM,aAAa,GAAG,MAAM,CAAC,KAAK,CAAC,mBAAmB,EAAE,cAAc,CAAC,IAAI,CAAC;AAC5E,IAAA,MAAM,SAAS,GAAG,MAAM,CAAC,KAAK,CAAC,mBAAmB,EAAE,UAAU,CAAC,IAAI,CAAC;AAEpE,IAAA,MAAM,gBAAgB,GAAG,eAAe,GAAG,aAAa,GAAG,SAAS;IACpE,MAAM,iBAAiB,GAAG,MAAM,CAAC,KAAK,CAAC,aAAa,CAAC,IAAI,CAAC;IAE1D,OAAO;AACL,QAAA,YAAY,EAAE,gBAAgB;AAC9B,QAAA,aAAa,EAAE,iBAAiB;QAChC,YAAY,EAAE,gBAAgB,GAAG;KAClC;AACH;AASA;;;;;;AAMG;AACa,SAAA,2BAA2B,CAAC,EAC1C,QAAQ,EAAE,SAAS,EACnB,gBAAgB,EAChB,kBAAkB,EAClB,SAAS,EAAE,UAAU,EACrB,eAAe,EACf,YAAY,EACZ,kBAAkB,EAAE,mBAAmB,GAAG,EAAE,EAC5C,aAAa,GAAG,YAAY,CAAC,QAAQ,GAUtC,EAAA;;;IAGC,IAAI,iBAAiB,GAAG,CAAC;IACzB,MAAM,YAAY,GAAG,SAAS,CAAC,CAAC,CAAC,EAAE,OAAO,EAAE,KAAK,QAAQ,GAAG,SAAS,CAAC,CAAC,CAAC,GAAG,SAAS;AACpF,IAAA,MAAM,sBAAsB,GAAG,YAAY,IAAI,IAAI,GAAG,kBAAkB,CAAC,CAAC,CAAC,IAAI,CAAC,GAAG,CAAC;AACpF,IAAA,MAAM,oBAAoB,GAAG,gBAAgB,GAAG,sBAAsB;IACtE,IAAI,sBAAsB,GAAG,oBAAoB;IACjD,IAAI,SAAS,GAAG,UAAU;AAC1B,IAAA,MAAM,cAAc,GAAG,SAAS,CAAC,MAAM;AACvC,IAAA,MAAM,QAAQ,GAAG,CAAC,GAAG,SAAS,CAAC;AAC/B;;;;AAIK;IACL,IAAI,OAAO,GAAmC,EAAE;IAEhD,IAAI,kBAAkB,GAAG,mBAAmB;AAC5C,IAAA,IAAI,gBAAgB,GAAG,EAAE;AACzB,IAAA,IAAI,aAAqE;AACzE,IAAA,MAAM,QAAQ,GAAG,YAAY,IAAI,IAAI,GAAG,CAAC,GAAG,CAAC;IAC7C,MAAM,YAAY,GAAkB,EAAE;AAEtC,IAAA,IAAI,mBAAmB,GAAG,EAAE,EAAE;QAC5B,MAAM,sBAAsB,GAAG,QAAQ,CAAC,mBAAmB,CAAC,EAAE,OAAO;AACrE,QAAA,IAAI,KAAK,CAAC,OAAO,CAAC,sBAAsB,CAAC,EAAE;AACzC,YAAA,aAAa,GAAG,sBAAsB,CAAC,IAAI,CAAC,CAAC,OAAO,KAAK,OAAO,CAAC,IAAI,KAAK,aAAa,CAAoC;;;AAI/H,IAAA,IAAI,iBAAiB,GAAG,sBAAsB,EAAE;AAC9C,QAAA,IAAI,YAAY,GAAG,QAAQ,CAAC,MAAM;AAClC,QAAA,OAAO,QAAQ,CAAC,MAAM,GAAG,CAAC,IAAI,iBAAiB,GAAG,sBAAsB,IAAI,YAAY,GAAG,QAAQ,EAAE;AACnG,YAAA,YAAY,EAAE;YACd,IAAI,QAAQ,CAAC,MAAM,KAAK,CAAC,IAAI,YAAY,EAAE;gBACzC;;AAEF,YAAA,MAAM,aAAa,GAAG,QAAQ,CAAC,GAAG,EAAE;AACpC,YAAA,IAAI,CAAC,aAAa;gBAAE;AACpB,YAAA,MAAM,WAAW,GAAG,aAAa,CAAC,OAAO,EAAE;AAC3C,YAAA,IAAI,eAAe,KAAK,IAAI,IAAI,gBAAgB,KAAK,EAAE,KAAK,YAAY,MAAM,cAAc,GAAG,CAAC,CAAC,CAAC,KAAK,WAAW,KAAK,IAAI,IAAI,WAAW,KAAK,MAAM,CAAC,EAAE;gBACtJ,gBAAgB,GAAG,YAAY;;YAEjC,IAAI,gBAAgB,GAAG,EAAE,IAAI,CAAC,aAAa,IAAK,kBAAkB,GAAG,CAAC,IAAI,WAAW,KAAK,IAAI,IAAI,KAAK,CAAC,OAAO,CAAC,aAAa,CAAC,OAAO,CAAC,EAAE;gBACtI,aAAa,IAAI,aAAa,CAAC,OAAO,CAAC,IAAI,CAAC,CAAC,OAAO,KAAK,OAAO,CAAC,IAAI,KAAK,aAAa,CAAC,CAAoC;AAC5H,gBAAA,kBAAkB,GAAG,aAAa,IAAI,IAAI,GAAG,YAAY,GAAG,EAAE;;;YAGhE,IACE,gBAAgB,GAAG;AAChB,mBAAA,YAAY,MAAM,gBAAgB,GAAG,CAAC;oBACrC,WAAW,KAAK,IAAI,IAAI,WAAW,KAAK,MAAM,CAAC,EACnD;gBACA,gBAAgB,GAAG,EAAE;;YAGvB,MAAM,UAAU,GAAG,kBAAkB,CAAC,YAAY,CAAC,IAAI,CAAC;AAExD,YAAA,IAAI,YAAY,CAAC,MAAM,KAAK,CAAC,KAAK,CAAC,iBAAiB,GAAG,UAAU,KAAK,sBAAsB,CAAC,EAAE;AAC7F,gBAAA,OAAO,CAAC,IAAI,CAAC,aAAa,CAAC;gBAC3B,iBAAiB,IAAI,UAAU;;iBAC1B;AACL,gBAAA,YAAY,CAAC,IAAI,CAAC,aAAa,CAAC;gBAChC,IAAI,gBAAgB,GAAG,EAAE,IAAI,kBAAkB,GAAG,CAAC,EAAE;oBACnD;;gBAEF;;;AAIJ,QAAA,IAAI,OAAO,CAAC,OAAO,CAAC,MAAM,GAAG,CAAC,CAAC,EAAE,OAAO,EAAE,KAAK,MAAM,EAAE;AACrD,YAAA,SAAS,GAAG,CAAC,IAAI,EAAE,OAAO,CAAC;;AAG7B,QAAA,IAAI,SAAS,IAAI,IAAI,IAAI,SAAS,CAAC,MAAM,GAAG,CAAC,IAAI,OAAO,CAAC,MAAM,GAAG,CAAC,EAAE;AACnE,YAAA,IAAI,iBAAiB,GAAG,EAAE;YAE1B,IAAI,WAAW,GAAG,CAAC;AACnB,YAAA,KAAK,IAAI,CAAC,GAAG,OAAO,CAAC,MAAM,GAAG,CAAC,EAAE,CAAC,IAAI,CAAC,EAAE,CAAC,EAAE,EAAE;gBAC5C,MAAM,WAAW,GAAG,OAAO,CAAC,CAAC,CAAC,EAAE,OAAO,EAAE,IAAI,EAAE;gBAC/C,IAAI,KAAK,CAAC,OAAO,CAAC,SAAS,CAAC,GAAG,SAAS,CAAC,QAAQ,CAAC,WAAW,CAAC,GAAG,WAAW,KAAK,SAAS,EAAE;AAC1F,oBAAA,iBAAiB,GAAG,CAAC,GAAG,CAAC;oBACzB;;AAEF,gBAAA,MAAM,aAAa,GAAG,cAAc,GAAG,CAAC,GAAG,CAAC;AAC5C,gBAAA,WAAW,IAAI,kBAAkB,CAAC,aAAa,CAAC,IAAI,CAAC;;AAGvD,YAAA,IAAI,iBAAiB,GAAG,CAAC,EAAE;gBACzB,iBAAiB,IAAI,WAAW;gBAChC,OAAO,GAAG,OAAO,CAAC,KAAK,CAAC,CAAC,EAAE,iBAAiB,CAAC;;;;AAKnD,IAAA,IAAI,YAAY,IAAI,cAAc,GAAG,CAAC,EAAE;QACtC,OAAO,CAAC,IAAI,CAAC,SAAS,CAAC,CAAC,CAAgB,CAAC;QACzC,QAAQ,CAAC,KAAK,EAAE;;IAGlB,sBAAsB,IAAI,iBAAiB;AAC3C,IAAA,MAAM,MAAM,GAAkB;QAC5B,sBAAsB;AACtB,QAAA,OAAO,EAAE,EAAmB;AAC5B,QAAA,gBAAgB,EAAE,YAAY;KAC/B;AAED,IAAA,IAAI,kBAAkB,GAAG,EAAE,EAAE;AAC3B,QAAA,MAAM,CAAC,kBAAkB,GAAG,kBAAkB;;IAGhD,IAAI,YAAY,CAAC,MAAM,KAAK,CAAC,IAAI,gBAAgB,GAAG,CAAC,KAAK,kBAAkB,GAAG,EAAE,IAAI,gBAAgB,CAAC,SAAS,EAAE,OAAO,EAAE,kBAAkB,CAAC,CAAC,EAAE;;AAE9I,QAAA,MAAM,CAAC,OAAO,GAAG,OAAO,CAAC,OAAO,EAAmB;AACnD,QAAA,OAAO,MAAM;;IAGf,IAAI,gBAAgB,GAAG,EAAE,IAAI,kBAAkB,GAAG,CAAC,EAAE;AACnD,QAAA,MAAM,IAAI,KAAK,CAAC,mGAAmG,CAAC;;IAGtH,IAAI,CAAC,aAAa,EAAE;AAClB,QAAA,MAAM,IAAI,KAAK,CAAC,qFAAqF,CAAC;;;;;AAMxG,IAAA,IAAI,cAAc,GAAG,EAAE;AACvB,IAAA,KAAK,IAAI,CAAC,GAAG,CAAC,EAAE,CAAC,GAAG,OAAO,CAAC,MAAM,EAAE,CAAC,EAAE,EAAE;AACvC,QAAA,MAAM,cAAc,GAAG,OAAO,CAAC,CAAC,CAAC;AACjC,QAAA,MAAM,IAAI,GAAG,cAAc,EAAE,OAAO,EAAE;AACtC,QAAA,IAAI,IAAI,KAAK,IAAI,EAAE;YACjB,cAAc,GAAG,CAAC;;AAEpB,QAAA,IAAI,cAAc,GAAG,EAAE,KAAK,IAAI,KAAK,OAAO,IAAI,IAAI,KAAK,QAAQ,CAAC,EAAE;YAClE;;;AAIJ,IAAA,IAAI,cAAc,KAAK,EAAE,EAAE;AACzB,QAAA,MAAM,IAAI,KAAK,CAAC,2GAA2G,CAAC;;AAG9H,IAAA,kBAAkB,GAAG,cAAc,GAAG,CAAC,GAAG,cAAc;AACxD,IAAA,MAAM,kBAAkB,GAAG,YAAY,CAAC,IAAI,SAAS,CAAC,EAAE,OAAO,EAAE,CAAC,aAAa,CAAC,EAAE,CAAC,CAAC;AACpF,IAAA,MAAM,iBAAiB,GAAG,sBAAsB,GAAG,kBAAkB;IACrE,MAAM,UAAU,GAAG,gBAAgB,CAAC,OAAO,CAAC,cAAc,CAAc,EAAE,aAAa,CAAC;AACxF,IAAA,OAAO,CAAC,cAAc,CAAC,GAAG,UAAU;AACpC,IAAA,IAAI,iBAAiB,GAAG,CAAC,EAAE;AACzB,QAAA,MAAM,CAAC,OAAO,GAAG,OAAO,CAAC,OAAO,EAAmB;AACnD,QAAA,OAAO,MAAM;;AAGf,IAAA,MAAM,eAAe,GAAc,OAAO,CAAC,cAAc,CAAc;;AAEvE,IAAA,MAAM,4BAA4B,GAAG,CAAC,kBAAkB,CAAC,kBAAkB,CAAC,IAAI,CAAC,IAAI,kBAAkB;AACvG,IAAA,sBAAsB,GAAG,oBAAoB,GAAG,4BAA4B;IAC5E,iBAAiB,GAAG,CAAC;IACrB,IAAI,UAAU,GAAkB,EAAE;AAClC,IAAA,MAAM,mBAAmB,GAAG,CAAC,GAAG,SAAS,CAAC;AAC1C,IAAA,IAAI,YAAY,GAAG,mBAAmB,CAAC,MAAM;AAC7C,IAAA,OAAO,mBAAmB,CAAC,MAAM,GAAG,CAAC,IAAI,iBAAiB,GAAG,sBAAsB,IAAI,YAAY,GAAG,kBAAkB,EAAE;AACxH,QAAA,YAAY,EAAE;AACd,QAAA,MAAM,aAAa,GAAG,mBAAmB,CAAC,GAAG,EAAE;AAC/C,QAAA,IAAI,CAAC,aAAa;YAAE;QACpB,MAAM,UAAU,GAAG,kBAAkB,CAAC,YAAY,CAAC,IAAI,CAAC;QACxD,IAAI,CAAC,iBAAiB,GAAG,UAAU,KAAK,sBAAsB,EAAE;AAC9D,YAAA,UAAU,CAAC,IAAI,CAAC,aAAa,CAAC;YAC9B,iBAAiB,IAAI,UAAU;;aAC1B;AACL,YAAA,QAAQ,CAAC,IAAI,CAAC,aAAa,CAAC;YAC5B;;;IAIJ,MAAM,YAAY,GAAc,UAAU,CAAC,UAAU,CAAC,MAAM,GAAG,CAAC,CAAC;AACjE,IAAA,MAAM,gBAAgB,GAAG,UAAU,CAAC,UAAU,CAAC,MAAM,GAAG,CAAC,CAAC,CAAC,OAAO,EAAE;AACpE,IAAA,IAAI,gBAAgB,KAAK,MAAM,EAAE;AAC/B,QAAA,SAAS,GAAG,CAAC,IAAI,EAAE,OAAO,CAAC;;AAG7B,IAAA,IAAI,SAAS,IAAI,IAAI,IAAI,SAAS,CAAC,MAAM,GAAG,CAAC,IAAI,UAAU,CAAC,MAAM,GAAG,CAAC,EAAE;AACtE,QAAA,IAAI,iBAAiB,GAAG,EAAE;QAE1B,IAAI,WAAW,GAAG,CAAC;AACnB,QAAA,KAAK,IAAI,CAAC,GAAG,UAAU,CAAC,MAAM,GAAG,CAAC,EAAE,CAAC,IAAI,CAAC,EAAE,CAAC,EAAE,EAAE;YAC/C,MAAM,WAAW,GAAG,UAAU,CAAC,CAAC,CAAC,EAAE,OAAO,EAAE,IAAI,EAAE;YAClD,IAAI,KAAK,CAAC,OAAO,CAAC,SAAS,CAAC,GAAG,SAAS,CAAC,QAAQ,CAAC,WAAW,CAAC,GAAG,WAAW,KAAK,SAAS,EAAE;AAC1F,gBAAA,iBAAiB,GAAG,CAAC,GAAG,CAAC;gBACzB;;AAEF,YAAA,MAAM,aAAa,GAAG,cAAc,GAAG,CAAC,GAAG,CAAC;AAC5C,YAAA,WAAW,IAAI,kBAAkB,CAAC,aAAa,CAAC,IAAI,CAAC;;AAGvD,QAAA,IAAI,iBAAiB,GAAG,CAAC,EAAE;YACzB,iBAAiB,IAAI,WAAW;YAChC,UAAU,GAAG,UAAU,CAAC,KAAK,CAAC,CAAC,EAAE,iBAAiB,CAAC;;;AAIvD,IAAA,IAAI,gBAAgB,KAAK,IAAI,EAAE;QAC7B,MAAM,UAAU,GAAG,gBAAgB,CAAC,YAAY,EAAE,aAAa,CAAC;QAChE,UAAU,CAAC,UAAU,CAAC,MAAM,GAAG,CAAC,CAAC,GAAG,UAAU;;SACzC;AACL,QAAA,UAAU,CAAC,IAAI,CAAC,eAAe,CAAC;;AAGlC,IAAA,IAAI,YAAY,IAAI,cAAc,GAAG,CAAC,EAAE;QACtC,UAAU,CAAC,IAAI,CAAC,SAAS,CAAC,CAAC,CAAgB,CAAC;QAC5C,mBAAmB,CAAC,KAAK,EAAE;;AAG7B,IAAA,MAAM,CAAC,OAAO,GAAG,UAAU,CAAC,OAAO,EAAE;AACrC,IAAA,OAAO,MAAM;AACf;AAEM,SAAU,gBAAgB,CAAC,KAAc,EAAA;AAC7C,IAAA,OAAO,OAAO,KAAK,KAAK,QAAQ,IAAI,CAAC,KAAK,CAAC,KAAK,CAAC,IAAI,KAAK,GAAG,CAAC;AAChE;AAEM,SAAU,mBAAmB,CAAC,aAAyC,EAAA;IAC3E,MAAM,kBAAkB,GAAG,EAAE,GAAG,aAAa,CAAC,kBAAkB,EAAE;AAClE,IAAA,IAAI,kBAAkB,GAAG,aAAa,CAAC,UAAU;IACjD,IAAI,eAAe,GAAG,CAAC;IACvB,IAAI,WAAW,GAAG,CAAC,MAAM,CAAC,MAAM,CAAC,kBAAkB,CAAC,EAAE,MAAM,CAAC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,GAAG,CAAC,EAAE,CAAC,CAAC;AAChF,IAAA,IAAI,qBAAqB,GAAG,EAAE;IAC9B,OAAO,SAAS,aAAa,CAAC,MAA2B,EAAA;AAIvD,QAAA,IAAI,YAAuC;AAC3C,QAAA,IAAI,MAAM,CAAC,aAAa,KACtB,gBAAgB,CAAC,MAAM,CAAC,aAAa,CAAC,YAAY;AAC/C,gBACD,gBAAgB,CAAC,MAAM,CAAC,aAAa,CAAC,mBAAmB;oBAEvD,gBAAgB,CAAC,MAAM,CAAC,aAAa,CAAC,mBAAmB,CAAC,cAAc;uBACrE,gBAAgB,CAAC,MAAM,CAAC,aAAa,CAAC,mBAAmB,CAAC,UAAU,CAAC,CACzE,CACF,CACF,IAAI,gBAAgB,CAAC,MAAM,CAAC,aAAa,CAAC,aAAa,CAAC,EAAE;AACzD,YAAA,YAAY,GAAG,oBAAoB,CAAC,MAAM,CAAC,aAAa,CAAC;AACzD,YAAA,WAAW,GAAG,YAAY,CAAC,YAAY;;AAGzC,QAAA,KAAK,IAAI,CAAC,GAAG,kBAAkB,EAAE,CAAC,GAAG,MAAM,CAAC,QAAQ,CAAC,MAAM,EAAE,CAAC,EAAE,EAAE;YAChE,MAAM,OAAO,GAAG,MAAM,CAAC,QAAQ,CAAC,CAAC,CAAC;;AAElC,YAAA,IAAI,CAAC,KAAK,kBAAkB,IAAI,kBAAkB,CAAC,CAAC,CAAC,KAAK,SAAS,IAAI,YAAY,EAAE;AACnF,gBAAA,kBAAkB,CAAC,CAAC,CAAC,GAAG,YAAY,CAAC,aAAa;;;AAE7C,iBAAA,IAAI,kBAAkB,CAAC,CAAC,CAAC,KAAK,SAAS,EAAE;gBAC9C,kBAAkB,CAAC,CAAC,CAAC,GAAG,aAAa,CAAC,YAAY,CAAC,OAAO,CAAC;AAC3D,gBAAA,WAAW,IAAI,kBAAkB,CAAC,CAAC,CAAC;;;;;;;QAQxC,IAAI,YAAY,EAAE;;YAEhB,MAAM,gBAAgB,GAAG,MAAM,CAAC,OAAO,CAAC,kBAAkB,CAAC,CAAC,MAAM,CAAC,CAAC,GAAG,EAAE,CAAC,GAAG,EAAE,KAAK,CAAC,KAAI;;AAEvF,gBAAA,MAAM,UAAU,GAAG,MAAM,CAAC,GAAG,CAAC;AAC9B,gBAAA,IAAI,UAAU,KAAK,CAAC,IAAI,MAAM,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,OAAO,EAAE,KAAK,QAAQ,EAAE;oBACjE,OAAO,GAAG,GAAG,KAAK;;AAEpB,gBAAA,OAAO,UAAU,IAAI,eAAe,GAAG,GAAG,GAAG,KAAK,GAAG,GAAG;aACzD,EAAE,CAAC,CAAC;;AAGL,YAAA,MAAM,KAAK,GAAG,YAAY,CAAC,YAAY,GAAG,gBAAgB;YAC1D,MAAM,WAAW,GAAG,KAAK,IAAI,CAAC,GAAC,CAAC,IAAI,KAAK,IAAI,GAAG;;YAGhD,IAAI,WAAW,EAAE;AACf,gBAAA,KAAK,MAAM,GAAG,IAAI,kBAAkB,EAAE;AACpC,oBAAA,MAAM,UAAU,GAAG,MAAM,CAAC,GAAG,CAAC;AAC9B,oBAAA,IAAI,UAAU,KAAK,CAAC,IAAI,MAAM,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,OAAO,EAAE,KAAK,QAAQ,EAAE;AACjE,wBAAA,kBAAkB,CAAC,GAAG,CAAC,GAAG,IAAI,CAAC,KAAK,CAAC,kBAAkB,CAAC,GAAG,CAAC,GAAG,KAAK,CAAC;;AAChE,yBAAA,IAAI,UAAU,IAAI,eAAe,EAAE;;AAExC,wBAAA,kBAAkB,CAAC,GAAG,CAAC,GAAG,IAAI,CAAC,KAAK,CAAC,kBAAkB,CAAC,GAAG,CAAC,GAAG,KAAK,CAAC;;;;;AAM7E,QAAA,kBAAkB,GAAG,MAAM,CAAC,QAAQ,CAAC,MAAM;AAC3C,QAAA,IAAI,WAAW,IAAI,aAAa,CAAC,SAAS,EAAE;YAC1C,OAAO,EAAE,OAAO,EAAE,MAAM,CAAC,QAAQ,EAAE,kBAAkB,EAAE;;AAGzD,QAAA,MAAM,EAAE,OAAO,EAAE,kBAAkB,EAAE,GAAG,2BAA2B,CAAC;YAClE,gBAAgB,EAAE,aAAa,CAAC,SAAS;YACzC,QAAQ,EAAE,MAAM,CAAC,QAAQ;YACzB,kBAAkB;YAClB,SAAS,EAAE,MAAM,CAAC,SAAS;YAC3B,eAAe,EAAE,aAAa,CAAC,eAAe;YAC9C,YAAY,EAAE,aAAa,CAAC,YAAY;AACxC,YAAA,aAAa,EAAE,aAAa,CAAC,QAAQ,KAAK,SAAS,CAAC,OAAO,GAAG,YAAY,CAAC,iBAAiB,GAAG,YAAY,CAAC,QAAQ;AACpH,YAAA,kBAAkB,EAAE,aAAa,CAAC,eAAe,KAAK,IAAI,GAAG,qBAAqB,GAAG,SAAS;AAC/F,SAAA,CAAC;AACF,QAAA,qBAAqB,GAAG,kBAAkB,IAAI,EAAE;;AAEhD,QAAA,eAAe,GAAG,IAAI,CAAC,GAAG,CAAC,MAAM,CAAC,QAAQ,CAAC,MAAM,IAAI,OAAO,CAAC,MAAM,IAAI,OAAO,CAAC,CAAC,CAAC,EAAE,OAAO,EAAE,KAAK,QAAQ,GAAG,CAAC,GAAG,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC;AAEvH,QAAA,OAAO,EAAE,OAAO,EAAE,kBAAkB,EAAE;AACxC,KAAC;AACH;;;;"}
@@ -21,6 +21,12 @@ export type PruneMessagesParams = {
21
21
  * @returns An object containing the total input and output tokens
22
22
  */
23
23
  export declare function calculateTotalTokens(usage: Partial<UsageMetadata>): UsageMetadata;
24
+ export type PruningResult = {
25
+ context: BaseMessage[];
26
+ remainingContextTokens: number;
27
+ messagesToRefine: BaseMessage[];
28
+ thinkingStartIndex?: number;
29
+ };
24
30
  /**
25
31
  * Processes an array of messages and returns a context of messages that fit within a specified token limit.
26
32
  * It iterates over the messages from newest to oldest, adding them to the context until the token limit is reached.
@@ -28,19 +34,16 @@ export declare function calculateTotalTokens(usage: Partial<UsageMetadata>): Usa
28
34
  * @param options Configuration options for processing messages
29
35
  * @returns Object containing the message context, remaining tokens, messages not included, and summary index
30
36
  */
31
- export declare function getMessagesWithinTokenLimit({ messages: _messages, maxContextTokens, indexTokenCountMap, startType: _startType, thinkingEnabled, tokenCounter, reasoningType, }: {
37
+ export declare function getMessagesWithinTokenLimit({ messages: _messages, maxContextTokens, indexTokenCountMap, startType: _startType, thinkingEnabled, tokenCounter, thinkingStartIndex: _thinkingStartIndex, reasoningType, }: {
32
38
  messages: BaseMessage[];
33
39
  maxContextTokens: number;
34
40
  indexTokenCountMap: Record<string, number | undefined>;
35
- tokenCounter: TokenCounter;
36
- startType?: string;
41
+ startType?: string | string[];
37
42
  thinkingEnabled?: boolean;
43
+ tokenCounter: TokenCounter;
44
+ thinkingStartIndex?: number;
38
45
  reasoningType?: ContentTypes.THINKING | ContentTypes.REASONING_CONTENT;
39
- }): {
40
- context: BaseMessage[];
41
- remainingContextTokens: number;
42
- messagesToRefine: BaseMessage[];
43
- };
46
+ }): PruningResult;
44
47
  export declare function checkValidNumber(value: unknown): value is number;
45
48
  export declare function createPruneMessages(factoryParams: PruneMessagesFactoryParams): (params: PruneMessagesParams) => {
46
49
  context: BaseMessage[];
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@librechat/agents",
3
- "version": "2.3.8",
3
+ "version": "2.3.91",
4
4
  "main": "./dist/cjs/main.cjs",
5
5
  "module": "./dist/esm/main.mjs",
6
6
  "types": "./dist/types/index.d.ts",
@@ -22,15 +22,22 @@ function isIndexInContext(arrayA: unknown[], arrayB: unknown[], targetIndex: num
22
22
  return targetIndex >= startingIndexInA;
23
23
  }
24
24
 
25
- function addThinkingBlock(message: AIMessage, thinkingBlock: ThinkingContentText | ReasoningContentText): MessageContentComplex[] {
25
+ function addThinkingBlock(message: AIMessage, thinkingBlock: ThinkingContentText | ReasoningContentText): AIMessage {
26
26
  const content: MessageContentComplex[] = Array.isArray(message.content)
27
27
  ? message.content as MessageContentComplex[]
28
28
  : [{
29
29
  type: ContentTypes.TEXT,
30
30
  text: message.content,
31
31
  }];
32
+ /** Edge case, the message already has the thinking block */
33
+ if (content[0].type === thinkingBlock.type) {
34
+ return message;
35
+ }
32
36
  content.unshift(thinkingBlock);
33
- return content;
37
+ return new AIMessage({
38
+ ...message,
39
+ content
40
+ });
34
41
  }
35
42
 
36
43
  /**
@@ -54,6 +61,13 @@ export function calculateTotalTokens(usage: Partial<UsageMetadata>): UsageMetada
54
61
  };
55
62
  }
56
63
 
64
+ export type PruningResult = {
65
+ context: BaseMessage[];
66
+ remainingContextTokens: number;
67
+ messagesToRefine: BaseMessage[];
68
+ thinkingStartIndex?: number;
69
+ };
70
+
57
71
  /**
58
72
  * Processes an array of messages and returns a context of messages that fit within a specified token limit.
59
73
  * It iterates over the messages from newest to oldest, adding them to the context until the token limit is reached.
@@ -68,20 +82,18 @@ export function getMessagesWithinTokenLimit({
68
82
  startType: _startType,
69
83
  thinkingEnabled,
70
84
  tokenCounter,
85
+ thinkingStartIndex: _thinkingStartIndex = -1,
71
86
  reasoningType = ContentTypes.THINKING,
72
87
  }: {
73
88
  messages: BaseMessage[];
74
89
  maxContextTokens: number;
75
90
  indexTokenCountMap: Record<string, number | undefined>;
76
- tokenCounter: TokenCounter;
77
- startType?: string;
91
+ startType?: string | string[];
78
92
  thinkingEnabled?: boolean;
93
+ tokenCounter: TokenCounter;
94
+ thinkingStartIndex?: number;
79
95
  reasoningType?: ContentTypes.THINKING | ContentTypes.REASONING_CONTENT;
80
- }): {
81
- context: BaseMessage[];
82
- remainingContextTokens: number;
83
- messagesToRefine: BaseMessage[];
84
- } {
96
+ }): PruningResult {
85
97
  // Every reply is primed with <|start|>assistant<|message|>, so we
86
98
  // start with 3 tokens for the label after all messages have been counted.
87
99
  let currentTokenCount = 3;
@@ -99,12 +111,19 @@ export function getMessagesWithinTokenLimit({
99
111
  * */
100
112
  let context: Array<BaseMessage | undefined> = [];
101
113
 
102
- let thinkingStartIndex = -1;
114
+ let thinkingStartIndex = _thinkingStartIndex;
103
115
  let thinkingEndIndex = -1;
104
116
  let thinkingBlock: ThinkingContentText | ReasoningContentText | undefined;
105
117
  const endIndex = instructions != null ? 1 : 0;
106
118
  const prunedMemory: BaseMessage[] = [];
107
119
 
120
+ if (_thinkingStartIndex > -1) {
121
+ const thinkingMessageContent = messages[_thinkingStartIndex]?.content;
122
+ if (Array.isArray(thinkingMessageContent)) {
123
+ thinkingBlock = thinkingMessageContent.find((content) => content.type === reasoningType) as ThinkingContentText | undefined;
124
+ }
125
+ }
126
+
108
127
  if (currentTokenCount < remainingContextTokens) {
109
128
  let currentIndex = messages.length;
110
129
  while (messages.length > 0 && currentTokenCount < remainingContextTokens && currentIndex > endIndex) {
@@ -138,22 +157,34 @@ export function getMessagesWithinTokenLimit({
138
157
  currentTokenCount += tokenCount;
139
158
  } else {
140
159
  prunedMemory.push(poppedMessage);
141
- if (thinkingEndIndex > -1) {
160
+ if (thinkingEndIndex > -1 && thinkingStartIndex < 0) {
142
161
  continue;
143
162
  }
144
163
  break;
145
164
  }
146
165
  }
147
166
 
148
- if (thinkingEndIndex > -1 && context[context.length - 1]?.getType() === 'tool') {
149
- startType = 'ai';
167
+ if (context[context.length - 1]?.getType() === 'tool') {
168
+ startType = ['ai', 'human'];
150
169
  }
151
170
 
152
- if (startType != null && startType && context.length > 0) {
153
- const requiredTypeIndex = context.findIndex(msg => msg?.getType() === startType);
171
+ if (startType != null && startType.length > 0 && context.length > 0) {
172
+ let requiredTypeIndex = -1;
173
+
174
+ let totalTokens = 0;
175
+ for (let i = context.length - 1; i >= 0; i--) {
176
+ const currentType = context[i]?.getType() ?? '';
177
+ if (Array.isArray(startType) ? startType.includes(currentType) : currentType === startType) {
178
+ requiredTypeIndex = i + 1;
179
+ break;
180
+ }
181
+ const originalIndex = originalLength - 1 - i;
182
+ totalTokens += indexTokenCountMap[originalIndex] ?? 0;
183
+ }
154
184
 
155
185
  if (requiredTypeIndex > 0) {
156
- context = context.slice(requiredTypeIndex);
186
+ currentTokenCount -= totalTokens;
187
+ context = context.slice(0, requiredTypeIndex);
157
188
  }
158
189
  }
159
190
  }
@@ -164,12 +195,16 @@ export function getMessagesWithinTokenLimit({
164
195
  }
165
196
 
166
197
  remainingContextTokens -= currentTokenCount;
167
- const result = {
198
+ const result: PruningResult = {
168
199
  remainingContextTokens,
169
200
  context: [] as BaseMessage[],
170
201
  messagesToRefine: prunedMemory,
171
202
  };
172
203
 
204
+ if (thinkingStartIndex > -1) {
205
+ result.thinkingStartIndex = thinkingStartIndex;
206
+ }
207
+
173
208
  if (prunedMemory.length === 0 || thinkingEndIndex < 0 || (thinkingStartIndex > -1 && isIndexInContext(_messages, context, thinkingStartIndex))) {
174
209
  // we reverse at this step to ensure the context is in the correct order for the model, and we need to work backwards
175
210
  result.context = context.reverse() as BaseMessage[];
@@ -206,8 +241,8 @@ export function getMessagesWithinTokenLimit({
206
241
  thinkingStartIndex = originalLength - 1 - assistantIndex;
207
242
  const thinkingTokenCount = tokenCounter(new AIMessage({ content: [thinkingBlock] }));
208
243
  const newRemainingCount = remainingContextTokens - thinkingTokenCount;
209
- const content: MessageContentComplex[] = addThinkingBlock(context[assistantIndex] as AIMessage, thinkingBlock);
210
- (context[assistantIndex] as AIMessage).content = content;
244
+ const newMessage = addThinkingBlock(context[assistantIndex] as AIMessage, thinkingBlock);
245
+ context[assistantIndex] = newMessage;
211
246
  if (newRemainingCount > 0) {
212
247
  result.context = context.reverse() as BaseMessage[];
213
248
  return result;
@@ -238,19 +273,32 @@ export function getMessagesWithinTokenLimit({
238
273
  const firstMessage: AIMessage = newContext[newContext.length - 1];
239
274
  const firstMessageType = newContext[newContext.length - 1].getType();
240
275
  if (firstMessageType === 'tool') {
241
- startType = 'ai';
276
+ startType = ['ai', 'human'];
242
277
  }
243
278
 
244
- if (startType != null && startType && newContext.length > 0) {
245
- const requiredTypeIndex = newContext.findIndex(msg => msg.getType() === startType);
279
+ if (startType != null && startType.length > 0 && newContext.length > 0) {
280
+ let requiredTypeIndex = -1;
281
+
282
+ let totalTokens = 0;
283
+ for (let i = newContext.length - 1; i >= 0; i--) {
284
+ const currentType = newContext[i]?.getType() ?? '';
285
+ if (Array.isArray(startType) ? startType.includes(currentType) : currentType === startType) {
286
+ requiredTypeIndex = i + 1;
287
+ break;
288
+ }
289
+ const originalIndex = originalLength - 1 - i;
290
+ totalTokens += indexTokenCountMap[originalIndex] ?? 0;
291
+ }
292
+
246
293
  if (requiredTypeIndex > 0) {
247
- newContext = newContext.slice(requiredTypeIndex);
294
+ currentTokenCount -= totalTokens;
295
+ newContext = newContext.slice(0, requiredTypeIndex);
248
296
  }
249
297
  }
250
298
 
251
299
  if (firstMessageType === 'ai') {
252
- const content = addThinkingBlock(firstMessage, thinkingBlock);
253
- newContext[newContext.length - 1].content = content;
300
+ const newMessage = addThinkingBlock(firstMessage, thinkingBlock);
301
+ newContext[newContext.length - 1] = newMessage;
254
302
  } else {
255
303
  newContext.push(thinkingMessage);
256
304
  }
@@ -273,6 +321,7 @@ export function createPruneMessages(factoryParams: PruneMessagesFactoryParams) {
273
321
  let lastTurnStartIndex = factoryParams.startIndex;
274
322
  let lastCutOffIndex = 0;
275
323
  let totalTokens = (Object.values(indexTokenCountMap)).reduce((a, b) => a + b, 0);
324
+ let runThinkingStartIndex = -1;
276
325
  return function pruneMessages(params: PruneMessagesParams): {
277
326
  context: BaseMessage[];
278
327
  indexTokenCountMap: Record<string, number>;
@@ -342,7 +391,7 @@ export function createPruneMessages(factoryParams: PruneMessagesFactoryParams) {
342
391
  return { context: params.messages, indexTokenCountMap };
343
392
  }
344
393
 
345
- const { context } = getMessagesWithinTokenLimit({
394
+ const { context, thinkingStartIndex } = getMessagesWithinTokenLimit({
346
395
  maxContextTokens: factoryParams.maxTokens,
347
396
  messages: params.messages,
348
397
  indexTokenCountMap,
@@ -350,8 +399,11 @@ export function createPruneMessages(factoryParams: PruneMessagesFactoryParams) {
350
399
  thinkingEnabled: factoryParams.thinkingEnabled,
351
400
  tokenCounter: factoryParams.tokenCounter,
352
401
  reasoningType: factoryParams.provider === Providers.BEDROCK ? ContentTypes.REASONING_CONTENT : ContentTypes.THINKING,
402
+ thinkingStartIndex: factoryParams.thinkingEnabled === true ? runThinkingStartIndex : undefined,
353
403
  });
354
- lastCutOffIndex = Math.max(params.messages.length - context.length, 0);
404
+ runThinkingStartIndex = thinkingStartIndex ?? -1;
405
+ /** The index is the first value of `context`, index relative to `params.messages` */
406
+ lastCutOffIndex = Math.max(params.messages.length - (context.length - (context[0]?.getType() === 'system' ? 1 : 0)), 0);
355
407
 
356
408
  return { context, indexTokenCountMap };
357
409
  };
@@ -1,7 +1,7 @@
1
1
  // src/specs/prune.test.ts
2
2
  import { config } from 'dotenv';
3
3
  config();
4
- import { HumanMessage, AIMessage, SystemMessage, BaseMessage } from '@langchain/core/messages';
4
+ import { HumanMessage, AIMessage, SystemMessage, BaseMessage, ToolMessage } from '@langchain/core/messages';
5
5
  import type { RunnableConfig } from '@langchain/core/runnables';
6
6
  import type { UsageMetadata } from '@langchain/core/messages';
7
7
  import type * as t from '@/types';
@@ -512,6 +512,187 @@ describe('Prune Messages Tests', () => {
512
512
  });
513
513
  });
514
514
 
515
+ describe('Tool Message Handling', () => {
516
+ it('should ensure context does not start with a tool message by finding an AI message', () => {
517
+ const tokenCounter = createTestTokenCounter();
518
+ const messages = [
519
+ new SystemMessage('System instruction'),
520
+ new AIMessage('AI message 1'),
521
+ new ToolMessage({ content: 'Tool result 1', tool_call_id: 'tool1' }),
522
+ new AIMessage('AI message 2'),
523
+ new ToolMessage({ content: 'Tool result 2', tool_call_id: 'tool2' })
524
+ ];
525
+
526
+ const indexTokenCountMap = {
527
+ 0: 17, // System instruction
528
+ 1: 12, // AI message 1
529
+ 2: 13, // Tool result 1
530
+ 3: 12, // AI message 2
531
+ 4: 13 // Tool result 2
532
+ };
533
+
534
+ // Create a pruneMessages function with a token limit that will only include the last few messages
535
+ const pruneMessages = createPruneMessages({
536
+ maxTokens: 58, // Only enough for system + last 3 messages + 3, but should not include a parent-less tool message
537
+ startIndex: 0,
538
+ tokenCounter,
539
+ indexTokenCountMap: { ...indexTokenCountMap }
540
+ });
541
+
542
+ const result = pruneMessages({ messages });
543
+
544
+ // The context should include the system message, AI message 2, and Tool result 2
545
+ // It should NOT start with Tool result 2 alone
546
+ expect(result.context.length).toBe(3);
547
+ expect(result.context[0]).toBe(messages[0]); // System message
548
+ expect(result.context[1]).toBe(messages[3]); // AI message 2
549
+ expect(result.context[2]).toBe(messages[4]); // Tool result 2
550
+ });
551
+
552
+ it('should ensure context does not start with a tool message by finding a human message', () => {
553
+ const tokenCounter = createTestTokenCounter();
554
+ const messages = [
555
+ new SystemMessage('System instruction'),
556
+ new HumanMessage('Human message 1'),
557
+ new AIMessage('AI message 1'),
558
+ new ToolMessage({ content: 'Tool result 1', tool_call_id: 'tool1' }),
559
+ new HumanMessage('Human message 2'),
560
+ new ToolMessage({ content: 'Tool result 2', tool_call_id: 'tool2' })
561
+ ];
562
+
563
+ const indexTokenCountMap = {
564
+ 0: 17, // System instruction
565
+ 1: 15, // Human message 1
566
+ 2: 12, // AI message 1
567
+ 3: 13, // Tool result 1
568
+ 4: 15, // Human message 2
569
+ 5: 13 // Tool result 2
570
+ };
571
+
572
+ // Create a pruneMessages function with a token limit that will only include the last few messages
573
+ const pruneMessages = createPruneMessages({
574
+ maxTokens: 48, // Only enough for system + last 2 messages
575
+ startIndex: 0,
576
+ tokenCounter,
577
+ indexTokenCountMap: { ...indexTokenCountMap }
578
+ });
579
+
580
+ const result = pruneMessages({ messages });
581
+
582
+ // The context should include the system message, Human message 2, and Tool result 2
583
+ // It should NOT start with Tool result 2 alone
584
+ expect(result.context.length).toBe(3);
585
+ expect(result.context[0]).toBe(messages[0]); // System message
586
+ expect(result.context[1]).toBe(messages[4]); // Human message 2
587
+ expect(result.context[2]).toBe(messages[5]); // Tool result 2
588
+ });
589
+
590
+ it('should handle the case where a tool message is followed by an AI message', () => {
591
+ const tokenCounter = createTestTokenCounter();
592
+ const messages = [
593
+ new SystemMessage('System instruction'),
594
+ new HumanMessage('Human message'),
595
+ new AIMessage('AI message with tool use'),
596
+ new ToolMessage({ content: 'Tool result', tool_call_id: 'tool1' }),
597
+ new AIMessage('AI message after tool')
598
+ ];
599
+
600
+ const indexTokenCountMap = {
601
+ 0: 17, // System instruction
602
+ 1: 13, // Human message
603
+ 2: 22, // AI message with tool use
604
+ 3: 11, // Tool result
605
+ 4: 19 // AI message after tool
606
+ };
607
+
608
+ const pruneMessages = createPruneMessages({
609
+ maxTokens: 50,
610
+ startIndex: 0,
611
+ tokenCounter,
612
+ indexTokenCountMap: { ...indexTokenCountMap }
613
+ });
614
+
615
+ const result = pruneMessages({ messages });
616
+
617
+ expect(result.context.length).toBe(2);
618
+ expect(result.context[0]).toBe(messages[0]); // System message
619
+ expect(result.context[1]).toBe(messages[4]); // AI message after tool
620
+ });
621
+
622
+ it('should handle the case where a tool message is followed by a human message', () => {
623
+ const tokenCounter = createTestTokenCounter();
624
+ const messages = [
625
+ new SystemMessage('System instruction'),
626
+ new HumanMessage('Human message 1'),
627
+ new AIMessage('AI message with tool use'),
628
+ new ToolMessage({ content: 'Tool result', tool_call_id: 'tool1' }),
629
+ new HumanMessage('Human message 2')
630
+ ];
631
+
632
+ const indexTokenCountMap = {
633
+ 0: 17, // System instruction
634
+ 1: 15, // Human message 1
635
+ 2: 22, // AI message with tool use
636
+ 3: 11, // Tool result
637
+ 4: 15 // Human message 2
638
+ };
639
+
640
+ const pruneMessages = createPruneMessages({
641
+ maxTokens: 46,
642
+ startIndex: 0,
643
+ tokenCounter,
644
+ indexTokenCountMap: { ...indexTokenCountMap }
645
+ });
646
+
647
+ const result = pruneMessages({ messages });
648
+
649
+ expect(result.context.length).toBe(2);
650
+ expect(result.context[0]).toBe(messages[0]); // System message
651
+ expect(result.context[1]).toBe(messages[4]); // Human message 2
652
+ });
653
+
654
+ it('should handle complex sequence with multiple tool messages', () => {
655
+ const tokenCounter = createTestTokenCounter();
656
+ const messages = [
657
+ new SystemMessage('System instruction'),
658
+ new HumanMessage('Human message 1'),
659
+ new AIMessage('AI message 1 with tool use'),
660
+ new ToolMessage({ content: 'Tool result 1', tool_call_id: 'tool1' }),
661
+ new AIMessage('AI message 2 with tool use'),
662
+ new ToolMessage({ content: 'Tool result 2', tool_call_id: 'tool2' }),
663
+ new AIMessage('AI message 3 with tool use'),
664
+ new ToolMessage({ content: 'Tool result 3', tool_call_id: 'tool3' })
665
+ ];
666
+
667
+ const indexTokenCountMap = {
668
+ 0: 17, // System instruction
669
+ 1: 15, // Human message 1
670
+ 2: 26, // AI message 1 with tool use
671
+ 3: 13, // Tool result 1
672
+ 4: 26, // AI message 2 with tool use
673
+ 5: 13, // Tool result 2
674
+ 6: 26, // AI message 3 with tool use
675
+ 7: 13 // Tool result 3
676
+ };
677
+
678
+ const pruneMessages = createPruneMessages({
679
+ maxTokens: 111,
680
+ startIndex: 0,
681
+ tokenCounter,
682
+ indexTokenCountMap: { ...indexTokenCountMap }
683
+ });
684
+
685
+ const result = pruneMessages({ messages });
686
+
687
+ expect(result.context.length).toBe(5);
688
+ expect(result.context[0]).toBe(messages[0]); // System message
689
+ expect(result.context[1]).toBe(messages[4]); // AI message 2 with tool use
690
+ expect(result.context[2]).toBe(messages[5]); // Tool result 2
691
+ expect(result.context[3]).toBe(messages[6]); // AI message 3 with tool use
692
+ expect(result.context[4]).toBe(messages[7]); // Tool result 3
693
+ });
694
+ });
695
+
515
696
  describe('Integration with Run', () => {
516
697
  it('should initialize Run with custom token counter and process messages', async () => {
517
698
  const provider = Providers.OPENAI;
@@ -235,9 +235,9 @@ describe('Token Distribution Edge Case Tests', () => {
235
235
  });
236
236
 
237
237
  // Add two more messages
238
+ messages.push(new HumanMessage('Message 4'));
238
239
  const extendedMessages = [
239
240
  ...messages,
240
- new HumanMessage('Message 4'),
241
241
  new AIMessage('Response 4')
242
242
  ];
243
243
 
@@ -257,6 +257,7 @@ describe('Token Distribution Edge Case Tests', () => {
257
257
  // The context should include the system message and some of the latest messages
258
258
  expect(thirdResult.context.length).toBeGreaterThan(0);
259
259
  expect(thirdResult.context[0].content).toBe('System instruction');
260
+ expect(thirdResult.context[1].content).toBe('Response 4');
260
261
 
261
262
  // Find which messages are in the final context
262
263
  const contextMessageIndices = thirdResult.context.map(msg => {
@@ -282,14 +283,12 @@ describe('Token Distribution Edge Case Tests', () => {
282
283
  // Verify that messages not in the context have their original token counts or previously adjusted values
283
284
  for (let i = 0; i < extendedMessages.length; i++) {
284
285
  if (!contextMessageIndices.includes(i)) {
285
- // This message is not in the context, so its token count should not have been adjusted in the last operation
286
286
  const expectedValue = i < messages.length
287
287
  ? (secondResult.indexTokenCountMap[i] || indexTokenCountMap[i])
288
- : (indexTokenCountMap as Record<string, number | undefined>)[i] ?? indexTokenCountMap[i - 1];
288
+ : (indexTokenCountMap as Record<string, number | undefined>)[i] ?? 0;
289
289
 
290
- // For defined values, we can check that they're close to what we expect
291
290
  const difference = Math.abs((thirdResult.indexTokenCountMap[i] || 0) - expectedValue);
292
- expect(difference).toBeLessThan(20); // Allow for some implementation differences
291
+ expect(difference).toBe(0);
293
292
  }
294
293
  }
295
294
  });