@librechat/agents 3.1.57 → 3.1.61
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/cjs/agents/AgentContext.cjs +326 -62
- package/dist/cjs/agents/AgentContext.cjs.map +1 -1
- package/dist/cjs/common/enum.cjs +13 -0
- package/dist/cjs/common/enum.cjs.map +1 -1
- package/dist/cjs/events.cjs +7 -27
- package/dist/cjs/events.cjs.map +1 -1
- package/dist/cjs/graphs/Graph.cjs +303 -222
- package/dist/cjs/graphs/Graph.cjs.map +1 -1
- package/dist/cjs/llm/anthropic/utils/message_inputs.cjs +4 -4
- package/dist/cjs/llm/anthropic/utils/message_inputs.cjs.map +1 -1
- package/dist/cjs/llm/bedrock/utils/message_inputs.cjs +6 -2
- package/dist/cjs/llm/bedrock/utils/message_inputs.cjs.map +1 -1
- package/dist/cjs/llm/init.cjs +60 -0
- package/dist/cjs/llm/init.cjs.map +1 -0
- package/dist/cjs/llm/invoke.cjs +90 -0
- package/dist/cjs/llm/invoke.cjs.map +1 -0
- package/dist/cjs/llm/openai/index.cjs +2 -0
- package/dist/cjs/llm/openai/index.cjs.map +1 -1
- package/dist/cjs/llm/request.cjs +41 -0
- package/dist/cjs/llm/request.cjs.map +1 -0
- package/dist/cjs/main.cjs +40 -0
- package/dist/cjs/main.cjs.map +1 -1
- package/dist/cjs/messages/cache.cjs +76 -89
- package/dist/cjs/messages/cache.cjs.map +1 -1
- package/dist/cjs/messages/contextPruning.cjs +156 -0
- package/dist/cjs/messages/contextPruning.cjs.map +1 -0
- package/dist/cjs/messages/contextPruningSettings.cjs +53 -0
- package/dist/cjs/messages/contextPruningSettings.cjs.map +1 -0
- package/dist/cjs/messages/core.cjs +23 -37
- package/dist/cjs/messages/core.cjs.map +1 -1
- package/dist/cjs/messages/format.cjs +156 -11
- package/dist/cjs/messages/format.cjs.map +1 -1
- package/dist/cjs/messages/prune.cjs +1161 -49
- package/dist/cjs/messages/prune.cjs.map +1 -1
- package/dist/cjs/messages/reducer.cjs +87 -0
- package/dist/cjs/messages/reducer.cjs.map +1 -0
- package/dist/cjs/run.cjs +81 -42
- package/dist/cjs/run.cjs.map +1 -1
- package/dist/cjs/stream.cjs +54 -7
- package/dist/cjs/stream.cjs.map +1 -1
- package/dist/cjs/summarization/index.cjs +75 -0
- package/dist/cjs/summarization/index.cjs.map +1 -0
- package/dist/cjs/summarization/node.cjs +663 -0
- package/dist/cjs/summarization/node.cjs.map +1 -0
- package/dist/cjs/tools/ToolNode.cjs +16 -8
- package/dist/cjs/tools/ToolNode.cjs.map +1 -1
- package/dist/cjs/tools/handlers.cjs +2 -0
- package/dist/cjs/tools/handlers.cjs.map +1 -1
- package/dist/cjs/utils/errors.cjs +115 -0
- package/dist/cjs/utils/errors.cjs.map +1 -0
- package/dist/cjs/utils/events.cjs +17 -0
- package/dist/cjs/utils/events.cjs.map +1 -1
- package/dist/cjs/utils/handlers.cjs +16 -0
- package/dist/cjs/utils/handlers.cjs.map +1 -1
- package/dist/cjs/utils/llm.cjs +10 -0
- package/dist/cjs/utils/llm.cjs.map +1 -1
- package/dist/cjs/utils/tokens.cjs +247 -14
- package/dist/cjs/utils/tokens.cjs.map +1 -1
- package/dist/cjs/utils/truncation.cjs +107 -0
- package/dist/cjs/utils/truncation.cjs.map +1 -0
- package/dist/esm/agents/AgentContext.mjs +325 -61
- package/dist/esm/agents/AgentContext.mjs.map +1 -1
- package/dist/esm/common/enum.mjs +13 -0
- package/dist/esm/common/enum.mjs.map +1 -1
- package/dist/esm/events.mjs +8 -28
- package/dist/esm/events.mjs.map +1 -1
- package/dist/esm/graphs/Graph.mjs +307 -226
- package/dist/esm/graphs/Graph.mjs.map +1 -1
- package/dist/esm/llm/anthropic/utils/message_inputs.mjs +4 -4
- package/dist/esm/llm/anthropic/utils/message_inputs.mjs.map +1 -1
- package/dist/esm/llm/bedrock/utils/message_inputs.mjs +6 -2
- package/dist/esm/llm/bedrock/utils/message_inputs.mjs.map +1 -1
- package/dist/esm/llm/init.mjs +58 -0
- package/dist/esm/llm/init.mjs.map +1 -0
- package/dist/esm/llm/invoke.mjs +87 -0
- package/dist/esm/llm/invoke.mjs.map +1 -0
- package/dist/esm/llm/openai/index.mjs +2 -0
- package/dist/esm/llm/openai/index.mjs.map +1 -1
- package/dist/esm/llm/request.mjs +38 -0
- package/dist/esm/llm/request.mjs.map +1 -0
- package/dist/esm/main.mjs +13 -3
- package/dist/esm/main.mjs.map +1 -1
- package/dist/esm/messages/cache.mjs +76 -89
- package/dist/esm/messages/cache.mjs.map +1 -1
- package/dist/esm/messages/contextPruning.mjs +154 -0
- package/dist/esm/messages/contextPruning.mjs.map +1 -0
- package/dist/esm/messages/contextPruningSettings.mjs +50 -0
- package/dist/esm/messages/contextPruningSettings.mjs.map +1 -0
- package/dist/esm/messages/core.mjs +23 -37
- package/dist/esm/messages/core.mjs.map +1 -1
- package/dist/esm/messages/format.mjs +156 -11
- package/dist/esm/messages/format.mjs.map +1 -1
- package/dist/esm/messages/prune.mjs +1158 -52
- package/dist/esm/messages/prune.mjs.map +1 -1
- package/dist/esm/messages/reducer.mjs +83 -0
- package/dist/esm/messages/reducer.mjs.map +1 -0
- package/dist/esm/run.mjs +82 -43
- package/dist/esm/run.mjs.map +1 -1
- package/dist/esm/stream.mjs +54 -7
- package/dist/esm/stream.mjs.map +1 -1
- package/dist/esm/summarization/index.mjs +73 -0
- package/dist/esm/summarization/index.mjs.map +1 -0
- package/dist/esm/summarization/node.mjs +659 -0
- package/dist/esm/summarization/node.mjs.map +1 -0
- package/dist/esm/tools/ToolNode.mjs +16 -8
- package/dist/esm/tools/ToolNode.mjs.map +1 -1
- package/dist/esm/tools/handlers.mjs +2 -0
- package/dist/esm/tools/handlers.mjs.map +1 -1
- package/dist/esm/utils/errors.mjs +111 -0
- package/dist/esm/utils/errors.mjs.map +1 -0
- package/dist/esm/utils/events.mjs +17 -1
- package/dist/esm/utils/events.mjs.map +1 -1
- package/dist/esm/utils/handlers.mjs +16 -0
- package/dist/esm/utils/handlers.mjs.map +1 -1
- package/dist/esm/utils/llm.mjs +10 -1
- package/dist/esm/utils/llm.mjs.map +1 -1
- package/dist/esm/utils/tokens.mjs +245 -15
- package/dist/esm/utils/tokens.mjs.map +1 -1
- package/dist/esm/utils/truncation.mjs +102 -0
- package/dist/esm/utils/truncation.mjs.map +1 -0
- package/dist/types/agents/AgentContext.d.ts +124 -6
- package/dist/types/common/enum.d.ts +14 -1
- package/dist/types/graphs/Graph.d.ts +22 -27
- package/dist/types/index.d.ts +5 -0
- package/dist/types/llm/init.d.ts +18 -0
- package/dist/types/llm/invoke.d.ts +48 -0
- package/dist/types/llm/request.d.ts +14 -0
- package/dist/types/messages/contextPruning.d.ts +42 -0
- package/dist/types/messages/contextPruningSettings.d.ts +44 -0
- package/dist/types/messages/core.d.ts +1 -1
- package/dist/types/messages/format.d.ts +17 -1
- package/dist/types/messages/index.d.ts +3 -0
- package/dist/types/messages/prune.d.ts +162 -1
- package/dist/types/messages/reducer.d.ts +18 -0
- package/dist/types/run.d.ts +12 -1
- package/dist/types/summarization/index.d.ts +20 -0
- package/dist/types/summarization/node.d.ts +29 -0
- package/dist/types/tools/ToolNode.d.ts +3 -1
- package/dist/types/types/graph.d.ts +44 -6
- package/dist/types/types/index.d.ts +1 -0
- package/dist/types/types/run.d.ts +30 -0
- package/dist/types/types/stream.d.ts +31 -4
- package/dist/types/types/summarize.d.ts +47 -0
- package/dist/types/types/tools.d.ts +7 -0
- package/dist/types/utils/errors.d.ts +28 -0
- package/dist/types/utils/events.d.ts +13 -0
- package/dist/types/utils/index.d.ts +2 -0
- package/dist/types/utils/llm.d.ts +4 -0
- package/dist/types/utils/tokens.d.ts +14 -1
- package/dist/types/utils/truncation.d.ts +49 -0
- package/package.json +3 -3
- package/src/agents/AgentContext.ts +388 -58
- package/src/agents/__tests__/AgentContext.test.ts +265 -5
- package/src/common/enum.ts +13 -0
- package/src/events.ts +9 -39
- package/src/graphs/Graph.ts +468 -331
- package/src/index.ts +7 -0
- package/src/llm/anthropic/llm.spec.ts +3 -3
- package/src/llm/anthropic/utils/message_inputs.ts +6 -4
- package/src/llm/bedrock/llm.spec.ts +1 -1
- package/src/llm/bedrock/utils/message_inputs.ts +6 -2
- package/src/llm/init.ts +63 -0
- package/src/llm/invoke.ts +144 -0
- package/src/llm/request.ts +55 -0
- package/src/messages/__tests__/observationMasking.test.ts +221 -0
- package/src/messages/cache.ts +77 -102
- package/src/messages/contextPruning.ts +191 -0
- package/src/messages/contextPruningSettings.ts +90 -0
- package/src/messages/core.ts +32 -53
- package/src/messages/ensureThinkingBlock.test.ts +39 -39
- package/src/messages/format.ts +227 -15
- package/src/messages/formatAgentMessages.test.ts +511 -1
- package/src/messages/index.ts +3 -0
- package/src/messages/prune.ts +1548 -62
- package/src/messages/reducer.ts +22 -0
- package/src/run.ts +104 -51
- package/src/scripts/bedrock-merge-test.ts +1 -1
- package/src/scripts/test-thinking-handoff-bedrock.ts +1 -1
- package/src/scripts/test-thinking-handoff.ts +1 -1
- package/src/scripts/thinking-bedrock.ts +1 -1
- package/src/scripts/thinking.ts +1 -1
- package/src/specs/anthropic.simple.test.ts +1 -1
- package/src/specs/multi-agent-summarization.test.ts +396 -0
- package/src/specs/prune.test.ts +1196 -23
- package/src/specs/summarization-unit.test.ts +868 -0
- package/src/specs/summarization.test.ts +3827 -0
- package/src/specs/summarize-prune.test.ts +376 -0
- package/src/specs/thinking-handoff.test.ts +10 -10
- package/src/specs/thinking-prune.test.ts +7 -4
- package/src/specs/token-accounting-e2e.test.ts +1034 -0
- package/src/specs/token-accounting-pipeline.test.ts +882 -0
- package/src/specs/token-distribution-edge-case.test.ts +25 -26
- package/src/splitStream.test.ts +42 -33
- package/src/stream.ts +64 -11
- package/src/summarization/__tests__/aggregator.test.ts +153 -0
- package/src/summarization/__tests__/node.test.ts +708 -0
- package/src/summarization/__tests__/trigger.test.ts +50 -0
- package/src/summarization/index.ts +102 -0
- package/src/summarization/node.ts +982 -0
- package/src/tools/ToolNode.ts +25 -3
- package/src/types/graph.ts +62 -7
- package/src/types/index.ts +1 -0
- package/src/types/run.ts +32 -0
- package/src/types/stream.ts +45 -5
- package/src/types/summarize.ts +58 -0
- package/src/types/tools.ts +7 -0
- package/src/utils/errors.ts +117 -0
- package/src/utils/events.ts +31 -0
- package/src/utils/handlers.ts +18 -0
- package/src/utils/index.ts +2 -0
- package/src/utils/llm.ts +12 -0
- package/src/utils/tokens.ts +336 -18
- package/src/utils/truncation.ts +124 -0
- package/src/scripts/image.ts +0 -180
|
@@ -1,6 +1,329 @@
|
|
|
1
|
-
import { AIMessage } from '@langchain/core/messages';
|
|
2
|
-
import {
|
|
1
|
+
import { AIMessage, ToolMessage } from '@langchain/core/messages';
|
|
2
|
+
import { truncateToolResultContent, truncateToolInput, calculateMaxToolResultChars } from '../utils/truncation.mjs';
|
|
3
|
+
import { resolveContextPruningSettings } from './contextPruningSettings.mjs';
|
|
4
|
+
import { Providers, ContentTypes, Constants } from '../common/enum.mjs';
|
|
5
|
+
import { applyContextPruning } from './contextPruning.mjs';
|
|
3
6
|
|
|
7
|
+
function sumTokenCounts(tokenMap, count) {
|
|
8
|
+
let total = 0;
|
|
9
|
+
for (let i = 0; i < count; i++) {
|
|
10
|
+
total += tokenMap[i] ?? 0;
|
|
11
|
+
}
|
|
12
|
+
return total;
|
|
13
|
+
}
|
|
14
|
+
/** Default fraction of the token budget reserved as headroom (5 %). */
|
|
15
|
+
const DEFAULT_RESERVE_RATIO = 0.05;
|
|
16
|
+
/** Context pressure at which observation masking and context fading activate. */
|
|
17
|
+
const PRESSURE_THRESHOLD_MASKING = 0.8;
|
|
18
|
+
/** Pressure band thresholds paired with budget factors for progressive context fading. */
|
|
19
|
+
const PRESSURE_BANDS = [
|
|
20
|
+
[0.99, 0.05],
|
|
21
|
+
[0.9, 0.2],
|
|
22
|
+
[0.85, 0.5],
|
|
23
|
+
[0.8, 1.0],
|
|
24
|
+
];
|
|
25
|
+
/** Maximum character length for masked (consumed) tool results. */
|
|
26
|
+
const MASKED_RESULT_MAX_CHARS = 300;
|
|
27
|
+
/** Hard cap for the originalToolContent store (~2 MB estimated from char length). */
|
|
28
|
+
const ORIGINAL_CONTENT_MAX_CHARS = 2_000_000;
|
|
29
|
+
/** Minimum cumulative calibration ratio — provider can't count fewer tokens
|
|
30
|
+
* than our raw estimate (within reason). Prevents divide-by-zero edge cases. */
|
|
31
|
+
const CALIBRATION_RATIO_MIN = 0.5;
|
|
32
|
+
/** Maximum cumulative calibration ratio — sanity cap for the running ratio. */
|
|
33
|
+
const CALIBRATION_RATIO_MAX = 5;
|
|
34
|
+
function getToolCallIds(message) {
|
|
35
|
+
if (message.getType() !== 'ai') {
|
|
36
|
+
return new Set();
|
|
37
|
+
}
|
|
38
|
+
const ids = new Set();
|
|
39
|
+
const aiMessage = message;
|
|
40
|
+
for (const toolCall of aiMessage.tool_calls ?? []) {
|
|
41
|
+
if (typeof toolCall.id === 'string' && toolCall.id.length > 0) {
|
|
42
|
+
ids.add(toolCall.id);
|
|
43
|
+
}
|
|
44
|
+
}
|
|
45
|
+
if (Array.isArray(aiMessage.content)) {
|
|
46
|
+
for (const part of aiMessage.content) {
|
|
47
|
+
if (typeof part !== 'object') {
|
|
48
|
+
continue;
|
|
49
|
+
}
|
|
50
|
+
const record = part;
|
|
51
|
+
if ((record.type === 'tool_use' || record.type === 'tool_call') &&
|
|
52
|
+
typeof record.id === 'string' &&
|
|
53
|
+
record.id.length > 0) {
|
|
54
|
+
ids.add(record.id);
|
|
55
|
+
}
|
|
56
|
+
}
|
|
57
|
+
}
|
|
58
|
+
return ids;
|
|
59
|
+
}
|
|
60
|
+
function getToolResultId(message) {
|
|
61
|
+
if (message.getType() !== 'tool') {
|
|
62
|
+
return null;
|
|
63
|
+
}
|
|
64
|
+
const toolMessage = message;
|
|
65
|
+
if (typeof toolMessage.tool_call_id === 'string' &&
|
|
66
|
+
toolMessage.tool_call_id.length > 0) {
|
|
67
|
+
return toolMessage.tool_call_id;
|
|
68
|
+
}
|
|
69
|
+
if (typeof toolMessage.toolCallId === 'string' &&
|
|
70
|
+
toolMessage.toolCallId.length > 0) {
|
|
71
|
+
return toolMessage.toolCallId;
|
|
72
|
+
}
|
|
73
|
+
return null;
|
|
74
|
+
}
|
|
75
|
+
function resolveTokenCountForMessage({ message, messageIndexMap, tokenCounter, indexTokenCountMap, }) {
|
|
76
|
+
const originalIndex = messageIndexMap.get(message) ?? -1;
|
|
77
|
+
if (originalIndex > -1 && indexTokenCountMap[originalIndex] != null) {
|
|
78
|
+
return indexTokenCountMap[originalIndex];
|
|
79
|
+
}
|
|
80
|
+
return tokenCounter(message);
|
|
81
|
+
}
|
|
82
|
+
function repairOrphanedToolMessages({ context, allMessages, tokenCounter, indexTokenCountMap, }) {
|
|
83
|
+
const messageIndexMap = new Map();
|
|
84
|
+
for (let i = 0; i < allMessages.length; i++) {
|
|
85
|
+
messageIndexMap.set(allMessages[i], i);
|
|
86
|
+
}
|
|
87
|
+
const validToolCallIds = new Set();
|
|
88
|
+
const presentToolResultIds = new Set();
|
|
89
|
+
for (const message of context) {
|
|
90
|
+
for (const id of getToolCallIds(message)) {
|
|
91
|
+
validToolCallIds.add(id);
|
|
92
|
+
}
|
|
93
|
+
const resultId = getToolResultId(message);
|
|
94
|
+
if (resultId != null) {
|
|
95
|
+
presentToolResultIds.add(resultId);
|
|
96
|
+
}
|
|
97
|
+
}
|
|
98
|
+
let reclaimedTokens = 0;
|
|
99
|
+
let droppedOrphanCount = 0;
|
|
100
|
+
const repairedContext = [];
|
|
101
|
+
const droppedMessages = [];
|
|
102
|
+
for (const message of context) {
|
|
103
|
+
if (message.getType() === 'tool') {
|
|
104
|
+
const toolResultId = getToolResultId(message);
|
|
105
|
+
if (toolResultId == null || !validToolCallIds.has(toolResultId)) {
|
|
106
|
+
droppedOrphanCount += 1;
|
|
107
|
+
reclaimedTokens += resolveTokenCountForMessage({
|
|
108
|
+
message,
|
|
109
|
+
tokenCounter,
|
|
110
|
+
messageIndexMap,
|
|
111
|
+
indexTokenCountMap,
|
|
112
|
+
});
|
|
113
|
+
droppedMessages.push(message);
|
|
114
|
+
continue;
|
|
115
|
+
}
|
|
116
|
+
repairedContext.push(message);
|
|
117
|
+
continue;
|
|
118
|
+
}
|
|
119
|
+
if (message.getType() === 'ai' && message instanceof AIMessage) {
|
|
120
|
+
const toolCallIds = getToolCallIds(message);
|
|
121
|
+
if (toolCallIds.size > 0) {
|
|
122
|
+
let hasOrphanToolCalls = false;
|
|
123
|
+
for (const id of toolCallIds) {
|
|
124
|
+
if (!presentToolResultIds.has(id)) {
|
|
125
|
+
hasOrphanToolCalls = true;
|
|
126
|
+
break;
|
|
127
|
+
}
|
|
128
|
+
}
|
|
129
|
+
if (hasOrphanToolCalls) {
|
|
130
|
+
const originalTokens = resolveTokenCountForMessage({
|
|
131
|
+
message,
|
|
132
|
+
messageIndexMap,
|
|
133
|
+
tokenCounter,
|
|
134
|
+
indexTokenCountMap,
|
|
135
|
+
});
|
|
136
|
+
const stripped = stripOrphanToolUseBlocks(message, presentToolResultIds);
|
|
137
|
+
if (stripped != null) {
|
|
138
|
+
const strippedTokens = tokenCounter(stripped);
|
|
139
|
+
reclaimedTokens += originalTokens - strippedTokens;
|
|
140
|
+
repairedContext.push(stripped);
|
|
141
|
+
}
|
|
142
|
+
else {
|
|
143
|
+
droppedOrphanCount += 1;
|
|
144
|
+
reclaimedTokens += originalTokens;
|
|
145
|
+
droppedMessages.push(message);
|
|
146
|
+
}
|
|
147
|
+
continue;
|
|
148
|
+
}
|
|
149
|
+
}
|
|
150
|
+
}
|
|
151
|
+
repairedContext.push(message);
|
|
152
|
+
}
|
|
153
|
+
return {
|
|
154
|
+
context: repairedContext,
|
|
155
|
+
reclaimedTokens,
|
|
156
|
+
droppedOrphanCount,
|
|
157
|
+
droppedMessages,
|
|
158
|
+
};
|
|
159
|
+
}
|
|
160
|
+
/**
|
|
161
|
+
* Strips tool_use content blocks and tool_calls entries from an AI message
|
|
162
|
+
* when their corresponding ToolMessages are not in the context.
|
|
163
|
+
* Returns null if the message has no content left after stripping.
|
|
164
|
+
*/
|
|
165
|
+
function stripOrphanToolUseBlocks(message, presentToolResultIds) {
|
|
166
|
+
const keptToolCalls = (message.tool_calls ?? []).filter((tc) => typeof tc.id === 'string' && presentToolResultIds.has(tc.id));
|
|
167
|
+
let keptContent;
|
|
168
|
+
if (Array.isArray(message.content)) {
|
|
169
|
+
const filtered = message.content.filter((block) => {
|
|
170
|
+
if (typeof block !== 'object') {
|
|
171
|
+
return true;
|
|
172
|
+
}
|
|
173
|
+
const record = block;
|
|
174
|
+
if ((record.type === 'tool_use' || record.type === 'tool_call') &&
|
|
175
|
+
typeof record.id === 'string') {
|
|
176
|
+
return presentToolResultIds.has(record.id);
|
|
177
|
+
}
|
|
178
|
+
return true;
|
|
179
|
+
});
|
|
180
|
+
if (filtered.length === 0) {
|
|
181
|
+
return null;
|
|
182
|
+
}
|
|
183
|
+
keptContent = filtered;
|
|
184
|
+
}
|
|
185
|
+
else {
|
|
186
|
+
keptContent = message.content;
|
|
187
|
+
}
|
|
188
|
+
return new AIMessage({
|
|
189
|
+
...message,
|
|
190
|
+
content: keptContent,
|
|
191
|
+
tool_calls: keptToolCalls.length > 0 ? keptToolCalls : undefined,
|
|
192
|
+
});
|
|
193
|
+
}
|
|
194
|
+
/**
|
|
195
|
+
* Lightweight structural cleanup: strips orphan tool_use blocks from AI messages
|
|
196
|
+
* and drops orphan ToolMessages whose AI counterpart is missing.
|
|
197
|
+
*
|
|
198
|
+
* Unlike `repairOrphanedToolMessages`, this does NOT track tokens — it is
|
|
199
|
+
* intended as a final safety net in Graph.ts right before model invocation
|
|
200
|
+
* to prevent Anthropic/Bedrock structural validation errors.
|
|
201
|
+
*
|
|
202
|
+
* Uses duck-typing instead of `getType()` because messages at this stage
|
|
203
|
+
* may be plain objects (from LangGraph state serialization) rather than
|
|
204
|
+
* proper BaseMessage class instances.
|
|
205
|
+
*
|
|
206
|
+
* Includes a fast-path: if every tool_call has a matching tool_result and
|
|
207
|
+
* vice-versa, the original array is returned immediately with zero allocation.
|
|
208
|
+
*/
|
|
209
|
+
function sanitizeOrphanToolBlocks(messages) {
|
|
210
|
+
const allToolCallIds = new Set();
|
|
211
|
+
const allToolResultIds = new Set();
|
|
212
|
+
for (const msg of messages) {
|
|
213
|
+
const msgAny = msg;
|
|
214
|
+
const toolCalls = msgAny.tool_calls;
|
|
215
|
+
if (Array.isArray(toolCalls)) {
|
|
216
|
+
for (const tc of toolCalls) {
|
|
217
|
+
if (typeof tc.id === 'string' &&
|
|
218
|
+
tc.id.length > 0 &&
|
|
219
|
+
!tc.id.startsWith(Constants.ANTHROPIC_SERVER_TOOL_PREFIX)) {
|
|
220
|
+
allToolCallIds.add(tc.id);
|
|
221
|
+
}
|
|
222
|
+
}
|
|
223
|
+
}
|
|
224
|
+
if (Array.isArray(msgAny.content)) {
|
|
225
|
+
for (const block of msgAny.content) {
|
|
226
|
+
if (typeof block === 'object' &&
|
|
227
|
+
(block.type === 'tool_use' || block.type === 'tool_call') &&
|
|
228
|
+
typeof block.id === 'string' &&
|
|
229
|
+
!block.id.startsWith(Constants.ANTHROPIC_SERVER_TOOL_PREFIX)) {
|
|
230
|
+
allToolCallIds.add(block.id);
|
|
231
|
+
}
|
|
232
|
+
}
|
|
233
|
+
}
|
|
234
|
+
const toolCallId = msgAny.tool_call_id;
|
|
235
|
+
if (typeof toolCallId === 'string' && toolCallId.length > 0) {
|
|
236
|
+
allToolResultIds.add(toolCallId);
|
|
237
|
+
}
|
|
238
|
+
}
|
|
239
|
+
let hasOrphans = false;
|
|
240
|
+
for (const id of allToolCallIds) {
|
|
241
|
+
if (!allToolResultIds.has(id)) {
|
|
242
|
+
hasOrphans = true;
|
|
243
|
+
break;
|
|
244
|
+
}
|
|
245
|
+
}
|
|
246
|
+
if (!hasOrphans) {
|
|
247
|
+
for (const id of allToolResultIds) {
|
|
248
|
+
if (!allToolCallIds.has(id)) {
|
|
249
|
+
hasOrphans = true;
|
|
250
|
+
break;
|
|
251
|
+
}
|
|
252
|
+
}
|
|
253
|
+
}
|
|
254
|
+
if (!hasOrphans) {
|
|
255
|
+
return messages;
|
|
256
|
+
}
|
|
257
|
+
const result = [];
|
|
258
|
+
const strippedAiIndices = new Set();
|
|
259
|
+
for (const msg of messages) {
|
|
260
|
+
const msgAny = msg;
|
|
261
|
+
const msgType = typeof msg.getType === 'function'
|
|
262
|
+
? msg.getType()
|
|
263
|
+
: (msgAny.role ??
|
|
264
|
+
msgAny._type);
|
|
265
|
+
const toolCallId = msgAny.tool_call_id;
|
|
266
|
+
if ((msgType === 'tool' || msg instanceof ToolMessage) &&
|
|
267
|
+
typeof toolCallId === 'string' &&
|
|
268
|
+
!allToolCallIds.has(toolCallId)) {
|
|
269
|
+
continue;
|
|
270
|
+
}
|
|
271
|
+
const toolCalls = msgAny.tool_calls;
|
|
272
|
+
if ((msgType === 'ai' ||
|
|
273
|
+
msgType === 'assistant' ||
|
|
274
|
+
msg instanceof AIMessage) &&
|
|
275
|
+
Array.isArray(toolCalls) &&
|
|
276
|
+
toolCalls.length > 0) {
|
|
277
|
+
const hasOrphanCalls = toolCalls.some((tc) => typeof tc.id === 'string' && !allToolResultIds.has(tc.id));
|
|
278
|
+
if (hasOrphanCalls) {
|
|
279
|
+
if (msg instanceof AIMessage) {
|
|
280
|
+
const stripped = stripOrphanToolUseBlocks(msg, allToolResultIds);
|
|
281
|
+
if (stripped != null) {
|
|
282
|
+
strippedAiIndices.add(result.length);
|
|
283
|
+
result.push(stripped);
|
|
284
|
+
}
|
|
285
|
+
continue;
|
|
286
|
+
}
|
|
287
|
+
const keptToolCalls = toolCalls.filter((tc) => typeof tc.id === 'string' && allToolResultIds.has(tc.id));
|
|
288
|
+
const keptContent = Array.isArray(msgAny.content)
|
|
289
|
+
? msgAny.content.filter((block) => {
|
|
290
|
+
if (typeof block !== 'object')
|
|
291
|
+
return true;
|
|
292
|
+
if ((block.type === 'tool_use' || block.type === 'tool_call') &&
|
|
293
|
+
typeof block.id === 'string') {
|
|
294
|
+
return allToolResultIds.has(block.id);
|
|
295
|
+
}
|
|
296
|
+
return true;
|
|
297
|
+
})
|
|
298
|
+
: msgAny.content;
|
|
299
|
+
if (keptToolCalls.length === 0 &&
|
|
300
|
+
Array.isArray(keptContent) &&
|
|
301
|
+
keptContent.length === 0) {
|
|
302
|
+
continue;
|
|
303
|
+
}
|
|
304
|
+
strippedAiIndices.add(result.length);
|
|
305
|
+
const patched = Object.create(Object.getPrototypeOf(msg), Object.getOwnPropertyDescriptors(msg));
|
|
306
|
+
patched.tool_calls = keptToolCalls.length > 0 ? keptToolCalls : [];
|
|
307
|
+
patched.content = keptContent;
|
|
308
|
+
result.push(patched);
|
|
309
|
+
continue;
|
|
310
|
+
}
|
|
311
|
+
}
|
|
312
|
+
result.push(msg);
|
|
313
|
+
}
|
|
314
|
+
// Bedrock/Anthropic require the conversation to end with a user message;
|
|
315
|
+
// a stripped AI message (tool_use removed) represents a dead-end exchange.
|
|
316
|
+
while (result.length > 0 && strippedAiIndices.has(result.length - 1)) {
|
|
317
|
+
result.pop();
|
|
318
|
+
}
|
|
319
|
+
return result;
|
|
320
|
+
}
|
|
321
|
+
/**
|
|
322
|
+
* Truncates an oversized tool_use `input` field using head+tail, preserving
|
|
323
|
+
* it as a valid JSON object. Head gets ~70%, tail gets ~30% so the model
|
|
324
|
+
* sees both the beginning (what was called) and end (closing structure/values).
|
|
325
|
+
* Falls back to head-only when the budget is too small for a meaningful tail.
|
|
326
|
+
*/
|
|
4
327
|
function isIndexInContext(arrayA, arrayB, targetIndex) {
|
|
5
328
|
const startingIndexInA = arrayA.length - arrayB.length;
|
|
6
329
|
return targetIndex >= startingIndexInA;
|
|
@@ -34,8 +357,14 @@ function calculateTotalTokens(usage) {
|
|
|
34
357
|
const baseInputTokens = Number(usage.input_tokens) || 0;
|
|
35
358
|
const cacheCreation = Number(usage.input_token_details?.cache_creation) || 0;
|
|
36
359
|
const cacheRead = Number(usage.input_token_details?.cache_read) || 0;
|
|
37
|
-
const totalInputTokens = baseInputTokens + cacheCreation + cacheRead;
|
|
38
360
|
const totalOutputTokens = Number(usage.output_tokens) || 0;
|
|
361
|
+
const cacheSum = cacheCreation + cacheRead;
|
|
362
|
+
// Anthropic: input_tokens excludes cache, cache_read can be much larger than input_tokens.
|
|
363
|
+
// OpenAI: input_tokens includes cache, cache_read is always <= input_tokens.
|
|
364
|
+
const cacheIsAdditive = cacheSum > 0 && cacheSum > baseInputTokens;
|
|
365
|
+
const totalInputTokens = cacheIsAdditive
|
|
366
|
+
? baseInputTokens + cacheSum
|
|
367
|
+
: baseInputTokens;
|
|
39
368
|
return {
|
|
40
369
|
input_tokens: totalInputTokens,
|
|
41
370
|
output_tokens: totalOutputTokens,
|
|
@@ -49,12 +378,12 @@ function calculateTotalTokens(usage) {
|
|
|
49
378
|
* @param options Configuration options for processing messages
|
|
50
379
|
* @returns Object containing the message context, remaining tokens, messages not included, and summary index
|
|
51
380
|
*/
|
|
52
|
-
function getMessagesWithinTokenLimit({ messages: _messages, maxContextTokens, indexTokenCountMap, startType: _startType, thinkingEnabled, tokenCounter, thinkingStartIndex: _thinkingStartIndex = -1, reasoningType = ContentTypes.THINKING, }) {
|
|
381
|
+
function getMessagesWithinTokenLimit({ messages: _messages, maxContextTokens, indexTokenCountMap, startType: _startType, thinkingEnabled, tokenCounter, thinkingStartIndex: _thinkingStartIndex = -1, reasoningType = ContentTypes.THINKING, instructionTokens: _instructionTokens = 0, }) {
|
|
53
382
|
// Every reply is primed with <|start|>assistant<|message|>, so we
|
|
54
383
|
// start with 3 tokens for the label after all messages have been counted.
|
|
55
384
|
let currentTokenCount = 3;
|
|
56
385
|
const instructions = _messages[0]?.getType() === 'system' ? _messages[0] : undefined;
|
|
57
|
-
const instructionsTokenCount = instructions != null ? (indexTokenCountMap[0] ?? 0) :
|
|
386
|
+
const instructionsTokenCount = instructions != null ? (indexTokenCountMap[0] ?? 0) : _instructionTokens;
|
|
58
387
|
const initialContextTokens = maxContextTokens - instructionsTokenCount;
|
|
59
388
|
let remainingContextTokens = initialContextTokens;
|
|
60
389
|
let startType = _startType;
|
|
@@ -152,6 +481,16 @@ function getMessagesWithinTokenLimit({ messages: _messages, maxContextTokens, in
|
|
|
152
481
|
context.push(_messages[0]);
|
|
153
482
|
messages.shift();
|
|
154
483
|
}
|
|
484
|
+
// The backward iteration pushed messages in reverse chronological order
|
|
485
|
+
// (newest first). Restore correct chronological order before prepending
|
|
486
|
+
// the remaining (older) messages so that messagesToRefine is always
|
|
487
|
+
// ordered oldest → newest. Without this, callers that rely on
|
|
488
|
+
// messagesToRefine order (e.g. the summarization node extracting the
|
|
489
|
+
// latest turn) would see tool_use/tool_result pairs in the wrong order.
|
|
490
|
+
prunedMemory.reverse();
|
|
491
|
+
if (messages.length > 0) {
|
|
492
|
+
prunedMemory.unshift(...messages);
|
|
493
|
+
}
|
|
155
494
|
remainingContextTokens -= currentTokenCount;
|
|
156
495
|
const result = {
|
|
157
496
|
remainingContextTokens,
|
|
@@ -165,7 +504,6 @@ function getMessagesWithinTokenLimit({ messages: _messages, maxContextTokens, in
|
|
|
165
504
|
thinkingEndIndex < 0 ||
|
|
166
505
|
(thinkingStartIndex > -1 &&
|
|
167
506
|
isIndexInContext(_messages, context, thinkingStartIndex))) {
|
|
168
|
-
// we reverse at this step to ensure the context is in the correct order for the model, and we need to work backwards
|
|
169
507
|
result.context = context.reverse();
|
|
170
508
|
return result;
|
|
171
509
|
}
|
|
@@ -175,9 +513,6 @@ function getMessagesWithinTokenLimit({ messages: _messages, maxContextTokens, in
|
|
|
175
513
|
if (!thinkingBlock) {
|
|
176
514
|
throw new Error('The payload is malformed. There is a thinking sequence but no thinking block found.');
|
|
177
515
|
}
|
|
178
|
-
// Since we have a thinking sequence, we need to find the last assistant message
|
|
179
|
-
// in the latest AI/tool sequence to add the thinking block that falls outside of the current context
|
|
180
|
-
// Latest messages are ordered first.
|
|
181
516
|
let assistantIndex = -1;
|
|
182
517
|
for (let i = 0; i < context.length; i++) {
|
|
183
518
|
const currentMessage = context[i];
|
|
@@ -190,7 +525,10 @@ function getMessagesWithinTokenLimit({ messages: _messages, maxContextTokens, in
|
|
|
190
525
|
}
|
|
191
526
|
}
|
|
192
527
|
if (assistantIndex === -1) {
|
|
193
|
-
|
|
528
|
+
// No AI messages survived pruning — skip thinking block reattachment.
|
|
529
|
+
// The caller handles empty/insufficient context via overflow recovery.
|
|
530
|
+
result.context = context.reverse();
|
|
531
|
+
return result;
|
|
194
532
|
}
|
|
195
533
|
thinkingStartIndex = originalLength - 1 - assistantIndex;
|
|
196
534
|
const thinkingTokenCount = tokenCounter(new AIMessage({ content: [thinkingBlock] }));
|
|
@@ -202,7 +540,6 @@ function getMessagesWithinTokenLimit({ messages: _messages, maxContextTokens, in
|
|
|
202
540
|
return result;
|
|
203
541
|
}
|
|
204
542
|
const thinkingMessage = context[assistantIndex];
|
|
205
|
-
// now we need to an additional round of pruning but making the thinking block fit
|
|
206
543
|
const newThinkingMessageTokenCount = (indexTokenCountMap[thinkingStartIndex] ?? 0) + thinkingTokenCount;
|
|
207
544
|
remainingContextTokens = initialContextTokens - newThinkingMessageTokenCount;
|
|
208
545
|
currentTokenCount = 3;
|
|
@@ -267,13 +604,271 @@ function getMessagesWithinTokenLimit({ messages: _messages, maxContextTokens, in
|
|
|
267
604
|
function checkValidNumber(value) {
|
|
268
605
|
return typeof value === 'number' && !isNaN(value) && value > 0;
|
|
269
606
|
}
|
|
607
|
+
/**
|
|
608
|
+
* Observation masking: replaces consumed ToolMessage content with tight
|
|
609
|
+
* head+tail truncations that serve as informative placeholders.
|
|
610
|
+
*
|
|
611
|
+
* A ToolMessage is "consumed" when a subsequent AI message exists that is NOT
|
|
612
|
+
* purely tool calls — meaning the model has already read and acted on the
|
|
613
|
+
* result. Unconsumed results (the latest tool outputs the model hasn't
|
|
614
|
+
* responded to yet) are left intact so the model can still use them.
|
|
615
|
+
*
|
|
616
|
+
* AI messages are never masked — they contain the model's own reasoning and
|
|
617
|
+
* conclusions, which is what prevents the model from repeating work after
|
|
618
|
+
* its tool results are masked.
|
|
619
|
+
*
|
|
620
|
+
* @returns The number of tool messages that were masked.
|
|
621
|
+
*/
|
|
622
|
+
function maskConsumedToolResults(params) {
|
|
623
|
+
const { messages, indexTokenCountMap, tokenCounter } = params;
|
|
624
|
+
let maskedCount = 0;
|
|
625
|
+
// Pass 1 (backward): identify consumed tool message indices.
|
|
626
|
+
// A ToolMessage is "consumed" once we've seen a subsequent AI message with
|
|
627
|
+
// substantive text content (not just tool calls).
|
|
628
|
+
// Collected in forward order (oldest first) for recency weighting.
|
|
629
|
+
let seenNonToolCallAI = false;
|
|
630
|
+
const consumedIndices = [];
|
|
631
|
+
for (let i = messages.length - 1; i >= 0; i--) {
|
|
632
|
+
const msg = messages[i];
|
|
633
|
+
const type = msg.getType();
|
|
634
|
+
if (type === 'ai') {
|
|
635
|
+
const hasText = typeof msg.content === 'string'
|
|
636
|
+
? msg.content.trim().length > 0
|
|
637
|
+
: Array.isArray(msg.content) &&
|
|
638
|
+
msg.content.some((b) => typeof b === 'object' &&
|
|
639
|
+
b.type === 'text' &&
|
|
640
|
+
typeof b.text === 'string' &&
|
|
641
|
+
b.text.trim().length >
|
|
642
|
+
0);
|
|
643
|
+
if (hasText) {
|
|
644
|
+
seenNonToolCallAI = true;
|
|
645
|
+
}
|
|
646
|
+
}
|
|
647
|
+
else if (type === 'tool' && seenNonToolCallAI) {
|
|
648
|
+
consumedIndices.push(i);
|
|
649
|
+
}
|
|
650
|
+
}
|
|
651
|
+
if (consumedIndices.length === 0) {
|
|
652
|
+
return 0;
|
|
653
|
+
}
|
|
654
|
+
consumedIndices.reverse();
|
|
655
|
+
const totalBudgetChars = params.availableRawBudget != null && params.availableRawBudget > 0
|
|
656
|
+
? params.availableRawBudget * 4
|
|
657
|
+
: 0;
|
|
658
|
+
const count = consumedIndices.length;
|
|
659
|
+
for (let c = 0; c < count; c++) {
|
|
660
|
+
const i = consumedIndices[c];
|
|
661
|
+
const message = messages[i];
|
|
662
|
+
const content = message.content;
|
|
663
|
+
if (typeof content !== 'string') {
|
|
664
|
+
continue;
|
|
665
|
+
}
|
|
666
|
+
let maxChars;
|
|
667
|
+
if (totalBudgetChars > 0) {
|
|
668
|
+
const position = count > 1 ? c / (count - 1) : 1;
|
|
669
|
+
const weight = 0.2 + 0.8 * position;
|
|
670
|
+
const totalWeight = count > 1 ? 0.6 * count : 1;
|
|
671
|
+
const share = (weight / totalWeight) * totalBudgetChars;
|
|
672
|
+
maxChars = Math.max(MASKED_RESULT_MAX_CHARS, Math.floor(share));
|
|
673
|
+
}
|
|
674
|
+
else {
|
|
675
|
+
maxChars = MASKED_RESULT_MAX_CHARS;
|
|
676
|
+
}
|
|
677
|
+
if (content.length <= maxChars) {
|
|
678
|
+
continue;
|
|
679
|
+
}
|
|
680
|
+
if (params.originalContentStore && !params.originalContentStore.has(i)) {
|
|
681
|
+
params.originalContentStore.set(i, content);
|
|
682
|
+
if (params.onContentStored) {
|
|
683
|
+
params.onContentStored(content.length);
|
|
684
|
+
}
|
|
685
|
+
}
|
|
686
|
+
const cloned = new ToolMessage({
|
|
687
|
+
content: truncateToolResultContent(content, maxChars),
|
|
688
|
+
tool_call_id: message.tool_call_id,
|
|
689
|
+
name: message.name,
|
|
690
|
+
id: message.id,
|
|
691
|
+
additional_kwargs: message.additional_kwargs,
|
|
692
|
+
response_metadata: message.response_metadata,
|
|
693
|
+
});
|
|
694
|
+
messages[i] = cloned;
|
|
695
|
+
indexTokenCountMap[i] = tokenCounter(cloned);
|
|
696
|
+
maskedCount++;
|
|
697
|
+
}
|
|
698
|
+
return maskedCount;
|
|
699
|
+
}
|
|
700
|
+
/**
|
|
701
|
+
* Pre-flight truncation: truncates oversized ToolMessage content before the
|
|
702
|
+
* main backward-iteration pruning runs. Unlike the ingestion guard (which caps
|
|
703
|
+
* at tool-execution time), pre-flight truncation applies per-turn based on the
|
|
704
|
+
* current context window budget (which may have shrunk due to growing conversation).
|
|
705
|
+
*
|
|
706
|
+
* After truncation, recounts tokens via tokenCounter and updates indexTokenCountMap
|
|
707
|
+
* so subsequent pruning works with accurate counts.
|
|
708
|
+
*
|
|
709
|
+
* @returns The number of tool messages that were truncated.
|
|
710
|
+
*/
|
|
711
|
+
function preFlightTruncateToolResults(params) {
|
|
712
|
+
const { messages, maxContextTokens, indexTokenCountMap, tokenCounter } = params;
|
|
713
|
+
const baseMaxChars = calculateMaxToolResultChars(maxContextTokens);
|
|
714
|
+
let truncatedCount = 0;
|
|
715
|
+
const toolIndices = [];
|
|
716
|
+
for (let i = 0; i < messages.length; i++) {
|
|
717
|
+
if (messages[i].getType() === 'tool') {
|
|
718
|
+
toolIndices.push(i);
|
|
719
|
+
}
|
|
720
|
+
}
|
|
721
|
+
for (let t = 0; t < toolIndices.length; t++) {
|
|
722
|
+
const i = toolIndices[t];
|
|
723
|
+
const message = messages[i];
|
|
724
|
+
const content = message.content;
|
|
725
|
+
if (typeof content !== 'string') {
|
|
726
|
+
continue;
|
|
727
|
+
}
|
|
728
|
+
const position = toolIndices.length > 1 ? t / (toolIndices.length - 1) : 1;
|
|
729
|
+
const recencyFactor = 0.2 + 0.8 * position;
|
|
730
|
+
const maxChars = Math.max(200, Math.floor(baseMaxChars * recencyFactor));
|
|
731
|
+
if (content.length <= maxChars) {
|
|
732
|
+
continue;
|
|
733
|
+
}
|
|
734
|
+
const truncated = truncateToolResultContent(content, maxChars);
|
|
735
|
+
const cloned = new ToolMessage({
|
|
736
|
+
content: truncated,
|
|
737
|
+
tool_call_id: message.tool_call_id,
|
|
738
|
+
name: message.name,
|
|
739
|
+
id: message.id,
|
|
740
|
+
additional_kwargs: message.additional_kwargs,
|
|
741
|
+
response_metadata: message.response_metadata,
|
|
742
|
+
});
|
|
743
|
+
messages[i] = cloned;
|
|
744
|
+
indexTokenCountMap[i] = tokenCounter(cloned);
|
|
745
|
+
truncatedCount++;
|
|
746
|
+
}
|
|
747
|
+
return truncatedCount;
|
|
748
|
+
}
|
|
749
|
+
/**
|
|
750
|
+
* Pre-flight truncation: truncates oversized `tool_use` input fields in AI messages.
|
|
751
|
+
*
|
|
752
|
+
* Tool call inputs (arguments) can be very large — e.g., code evaluation payloads from
|
|
753
|
+
* MCP tools like chrome-devtools. Since these tool calls have already been executed,
|
|
754
|
+
* the model only needs a summary of what was called, not the full arguments. Truncating
|
|
755
|
+
* them before pruning can prevent entire messages from being dropped.
|
|
756
|
+
*
|
|
757
|
+
* Uses 15% of the context window (in estimated characters, ~4 chars/token) as the
|
|
758
|
+
* per-input cap, capped at 200K chars.
|
|
759
|
+
*
|
|
760
|
+
* @returns The number of AI messages that had tool_use inputs truncated.
|
|
761
|
+
*/
|
|
762
|
+
function preFlightTruncateToolCallInputs(params) {
|
|
763
|
+
const { messages, maxContextTokens, indexTokenCountMap, tokenCounter } = params;
|
|
764
|
+
const maxInputChars = Math.min(Math.floor(maxContextTokens * 0.15) * 4, 200_000);
|
|
765
|
+
let truncatedCount = 0;
|
|
766
|
+
for (let i = 0; i < messages.length; i++) {
|
|
767
|
+
const message = messages[i];
|
|
768
|
+
if (message.getType() !== 'ai') {
|
|
769
|
+
continue;
|
|
770
|
+
}
|
|
771
|
+
if (!Array.isArray(message.content)) {
|
|
772
|
+
continue;
|
|
773
|
+
}
|
|
774
|
+
const originalContent = message.content;
|
|
775
|
+
const state = { changed: false };
|
|
776
|
+
const newContent = originalContent.map((block) => {
|
|
777
|
+
if (typeof block !== 'object') {
|
|
778
|
+
return block;
|
|
779
|
+
}
|
|
780
|
+
const record = block;
|
|
781
|
+
if (record.type !== 'tool_use' && record.type !== 'tool_call') {
|
|
782
|
+
return block;
|
|
783
|
+
}
|
|
784
|
+
const input = record.input;
|
|
785
|
+
if (input == null) {
|
|
786
|
+
return block;
|
|
787
|
+
}
|
|
788
|
+
const serialized = typeof input === 'string' ? input : JSON.stringify(input);
|
|
789
|
+
if (serialized.length <= maxInputChars) {
|
|
790
|
+
return block;
|
|
791
|
+
}
|
|
792
|
+
state.changed = true;
|
|
793
|
+
// Replaces original input with { _truncated, _originalChars } —
|
|
794
|
+
// safe because the tool call already executed in a prior turn.
|
|
795
|
+
return {
|
|
796
|
+
...record,
|
|
797
|
+
input: truncateToolInput(serialized, maxInputChars),
|
|
798
|
+
};
|
|
799
|
+
});
|
|
800
|
+
if (!state.changed) {
|
|
801
|
+
continue;
|
|
802
|
+
}
|
|
803
|
+
const aiMsg = message;
|
|
804
|
+
const newToolCalls = (aiMsg.tool_calls ?? []).map((tc) => {
|
|
805
|
+
const serializedArgs = JSON.stringify(tc.args);
|
|
806
|
+
if (serializedArgs.length <= maxInputChars) {
|
|
807
|
+
return tc;
|
|
808
|
+
}
|
|
809
|
+
// Replaces original args with { _truncated, _originalChars } —
|
|
810
|
+
// safe because the tool call already executed in a prior turn.
|
|
811
|
+
return {
|
|
812
|
+
...tc,
|
|
813
|
+
args: truncateToolInput(serializedArgs, maxInputChars),
|
|
814
|
+
};
|
|
815
|
+
});
|
|
816
|
+
messages[i] = new AIMessage({
|
|
817
|
+
...aiMsg,
|
|
818
|
+
content: newContent,
|
|
819
|
+
tool_calls: newToolCalls.length > 0 ? newToolCalls : undefined,
|
|
820
|
+
});
|
|
821
|
+
indexTokenCountMap[i] = tokenCounter(messages[i]);
|
|
822
|
+
truncatedCount++;
|
|
823
|
+
}
|
|
824
|
+
return truncatedCount;
|
|
825
|
+
}
|
|
270
826
|
function createPruneMessages(factoryParams) {
|
|
271
827
|
const indexTokenCountMap = { ...factoryParams.indexTokenCountMap };
|
|
272
828
|
let lastTurnStartIndex = factoryParams.startIndex;
|
|
273
829
|
let lastCutOffIndex = 0;
|
|
274
|
-
let totalTokens =
|
|
830
|
+
let totalTokens = 0;
|
|
831
|
+
for (const key in indexTokenCountMap) {
|
|
832
|
+
totalTokens += indexTokenCountMap[key] ?? 0;
|
|
833
|
+
}
|
|
275
834
|
let runThinkingStartIndex = -1;
|
|
835
|
+
/** Cumulative raw tiktoken tokens we've sent to the provider (messages only,
|
|
836
|
+
* excludes instruction overhead and new outputs not yet seen by provider). */
|
|
837
|
+
let cumulativeRawSent = 0;
|
|
838
|
+
/** Cumulative provider-reported message tokens (providerInput - instructionOverhead). */
|
|
839
|
+
let cumulativeProviderReported = 0;
|
|
840
|
+
/** Stable calibration ratio = cumulativeProviderReported / cumulativeRawSent.
|
|
841
|
+
* Converges monotonically as data accumulates. Falls back to seeded value. */
|
|
842
|
+
let calibrationRatio = factoryParams.calibrationRatio != null && factoryParams.calibrationRatio > 0
|
|
843
|
+
? factoryParams.calibrationRatio
|
|
844
|
+
: 1;
|
|
845
|
+
/** Best observed instruction overhead from a near-zero variance turn.
|
|
846
|
+
* Self-seeds from provider observations within the run. */
|
|
847
|
+
let bestInstructionOverhead;
|
|
848
|
+
let bestVarianceAbs = Infinity;
|
|
849
|
+
/** Local estimate at the time bestInstructionOverhead was observed.
|
|
850
|
+
* Used to invalidate the cached overhead when instructions change
|
|
851
|
+
* mid-run (e.g. tool discovery adds tools to the bound set). */
|
|
852
|
+
let bestInstructionEstimate;
|
|
853
|
+
/** Original (pre-masking) tool result content keyed by message index.
|
|
854
|
+
* Allows the summarizer to see full tool outputs even after masking
|
|
855
|
+
* has truncated them in the live message array. Cleared when the
|
|
856
|
+
* pruner is recreated after summarization. */
|
|
857
|
+
const originalToolContent = new Map();
|
|
858
|
+
let originalToolContentSize = 0;
|
|
859
|
+
const contextPruningSettings = resolveContextPruningSettings(factoryParams.contextPruningConfig);
|
|
276
860
|
return function pruneMessages(params) {
|
|
861
|
+
if (params.messages.length === 0) {
|
|
862
|
+
return {
|
|
863
|
+
context: [],
|
|
864
|
+
indexTokenCountMap,
|
|
865
|
+
messagesToRefine: [],
|
|
866
|
+
prePruneContextTokens: 0,
|
|
867
|
+
remainingContextTokens: factoryParams.maxTokens,
|
|
868
|
+
calibrationRatio,
|
|
869
|
+
resolvedInstructionOverhead: bestInstructionOverhead,
|
|
870
|
+
};
|
|
871
|
+
}
|
|
277
872
|
if (factoryParams.provider === Providers.OPENAI &&
|
|
278
873
|
factoryParams.thinkingEnabled === true) {
|
|
279
874
|
for (let i = lastTurnStartIndex; i < params.messages.length; i++) {
|
|
@@ -310,70 +905,309 @@ function createPruneMessages(factoryParams) {
|
|
|
310
905
|
checkValidNumber(params.usageMetadata.input_token_details.cache_read)))) &&
|
|
311
906
|
checkValidNumber(params.usageMetadata.output_tokens)) {
|
|
312
907
|
currentUsage = calculateTotalTokens(params.usageMetadata);
|
|
313
|
-
totalTokens = currentUsage.total_tokens;
|
|
314
908
|
}
|
|
315
909
|
const newOutputs = new Set();
|
|
910
|
+
let outputTokensAssigned = false;
|
|
316
911
|
for (let i = lastTurnStartIndex; i < params.messages.length; i++) {
|
|
317
912
|
const message = params.messages[i];
|
|
318
|
-
if (i
|
|
319
|
-
|
|
320
|
-
|
|
913
|
+
if (indexTokenCountMap[i] !== undefined) {
|
|
914
|
+
continue;
|
|
915
|
+
}
|
|
916
|
+
// Assign output_tokens to the first uncounted AI message — this is the
|
|
917
|
+
// model's response. Previous code blindly targeted lastTurnStartIndex
|
|
918
|
+
// which could hit a pre-counted HumanMessage or miss the AI entirely.
|
|
919
|
+
if (!outputTokensAssigned && currentUsage && message.getType() === 'ai') {
|
|
321
920
|
indexTokenCountMap[i] = currentUsage.output_tokens;
|
|
921
|
+
newOutputs.add(i);
|
|
922
|
+
outputTokensAssigned = true;
|
|
322
923
|
}
|
|
323
|
-
else
|
|
924
|
+
else {
|
|
925
|
+
// Always store raw tiktoken count — the map stays in raw space.
|
|
926
|
+
// Budget decisions multiply by calibrationRatio on the fly.
|
|
324
927
|
indexTokenCountMap[i] = factoryParams.tokenCounter(message);
|
|
325
928
|
if (currentUsage) {
|
|
326
929
|
newOutputs.add(i);
|
|
327
930
|
}
|
|
328
|
-
totalTokens += indexTokenCountMap[i] ?? 0;
|
|
329
931
|
}
|
|
932
|
+
totalTokens += indexTokenCountMap[i] ?? 0;
|
|
330
933
|
}
|
|
331
|
-
//
|
|
332
|
-
//
|
|
333
|
-
//
|
|
334
|
-
//
|
|
335
|
-
if (currentUsage) {
|
|
336
|
-
|
|
337
|
-
|
|
338
|
-
|
|
934
|
+
// Cumulative calibration: accumulate raw tiktoken tokens and provider-
|
|
935
|
+
// reported tokens across turns. The ratio of the two running totals
|
|
936
|
+
// converges monotonically to the true provider multiplier — no EMA,
|
|
937
|
+
// no per-turn oscillation, no map mutation.
|
|
938
|
+
if (currentUsage && params.totalTokensFresh !== false) {
|
|
939
|
+
const instructionOverhead = factoryParams.getInstructionTokens?.() ?? 0;
|
|
940
|
+
const providerInputTokens = params.lastCallUsage?.inputTokens ?? currentUsage.input_tokens;
|
|
941
|
+
// Sum raw tiktoken counts for messages the provider saw (excludes
|
|
942
|
+
// new outputs from this turn — the provider hasn't seen them yet).
|
|
943
|
+
let rawSentThisTurn = 0;
|
|
944
|
+
const firstIsSystem = params.messages.length > 0 && params.messages[0].getType() === 'system';
|
|
945
|
+
if (firstIsSystem) {
|
|
946
|
+
rawSentThisTurn += indexTokenCountMap[0] ?? 0;
|
|
339
947
|
}
|
|
340
948
|
for (let i = lastCutOffIndex; i < params.messages.length; i++) {
|
|
341
|
-
if (i === 0 &&
|
|
949
|
+
if ((i === 0 && firstIsSystem) || newOutputs.has(i)) {
|
|
342
950
|
continue;
|
|
343
951
|
}
|
|
344
|
-
|
|
345
|
-
|
|
346
|
-
|
|
347
|
-
|
|
348
|
-
|
|
349
|
-
|
|
350
|
-
|
|
351
|
-
|
|
352
|
-
|
|
353
|
-
|
|
354
|
-
|
|
355
|
-
|
|
356
|
-
|
|
357
|
-
|
|
358
|
-
|
|
359
|
-
|
|
360
|
-
|
|
952
|
+
rawSentThisTurn += indexTokenCountMap[i] ?? 0;
|
|
953
|
+
}
|
|
954
|
+
const providerMessageTokens = Math.max(0, providerInputTokens - instructionOverhead);
|
|
955
|
+
if (rawSentThisTurn > 0 && providerMessageTokens > 0) {
|
|
956
|
+
cumulativeRawSent += rawSentThisTurn;
|
|
957
|
+
cumulativeProviderReported += providerMessageTokens;
|
|
958
|
+
const newRatio = cumulativeProviderReported / cumulativeRawSent;
|
|
959
|
+
calibrationRatio = Math.max(CALIBRATION_RATIO_MIN, Math.min(CALIBRATION_RATIO_MAX, newRatio));
|
|
960
|
+
}
|
|
961
|
+
const calibratedOurTotal = instructionOverhead + rawSentThisTurn * calibrationRatio;
|
|
962
|
+
const overallRatio = calibratedOurTotal > 0 ? providerInputTokens / calibratedOurTotal : 0;
|
|
963
|
+
const variancePct = Math.round((overallRatio - 1) * 100);
|
|
964
|
+
const absVariance = Math.abs(overallRatio - 1);
|
|
965
|
+
if (absVariance < bestVarianceAbs && rawSentThisTurn > 0) {
|
|
966
|
+
bestVarianceAbs = absVariance;
|
|
967
|
+
bestInstructionOverhead = Math.max(0, Math.round(providerInputTokens - rawSentThisTurn * calibrationRatio));
|
|
968
|
+
bestInstructionEstimate = factoryParams.getInstructionTokens?.() ?? 0;
|
|
969
|
+
}
|
|
970
|
+
factoryParams.log?.('debug', 'Calibration observed', {
|
|
971
|
+
providerInputTokens,
|
|
972
|
+
calibratedEstimate: Math.round(calibratedOurTotal),
|
|
973
|
+
variance: `${variancePct > 0 ? '+' : ''}${variancePct}%`,
|
|
974
|
+
calibrationRatio: Math.round(calibrationRatio * 100) / 100,
|
|
975
|
+
instructionOverhead,
|
|
976
|
+
cumulativeRawSent,
|
|
977
|
+
cumulativeProviderReported,
|
|
978
|
+
});
|
|
979
|
+
}
|
|
980
|
+
// Computed BEFORE pre-flight truncation so the effective budget can drive
|
|
981
|
+
// truncation thresholds — without this, thresholds based on maxTokens are
|
|
982
|
+
// too generous and leave individual messages larger than the actual budget.
|
|
983
|
+
const estimatedInstructionTokens = factoryParams.getInstructionTokens?.() ?? 0;
|
|
984
|
+
const estimateStable = bestInstructionEstimate != null &&
|
|
985
|
+
bestInstructionEstimate > 0 &&
|
|
986
|
+
Math.abs(estimatedInstructionTokens - bestInstructionEstimate) /
|
|
987
|
+
bestInstructionEstimate <
|
|
988
|
+
0.1;
|
|
989
|
+
const currentInstructionTokens = bestInstructionOverhead != null &&
|
|
990
|
+
bestInstructionOverhead <= estimatedInstructionTokens &&
|
|
991
|
+
estimateStable
|
|
992
|
+
? bestInstructionOverhead
|
|
993
|
+
: estimatedInstructionTokens;
|
|
994
|
+
const reserveRatio = factoryParams.reserveRatio ?? DEFAULT_RESERVE_RATIO;
|
|
995
|
+
const reserveTokens = reserveRatio > 0 && reserveRatio < 1
|
|
996
|
+
? Math.round(factoryParams.maxTokens * reserveRatio)
|
|
997
|
+
: 0;
|
|
998
|
+
const pruningBudget = factoryParams.maxTokens - reserveTokens;
|
|
999
|
+
const effectiveMaxTokens = Math.max(0, pruningBudget - currentInstructionTokens);
|
|
1000
|
+
let calibratedTotalTokens = Math.round(totalTokens * calibrationRatio);
|
|
1001
|
+
factoryParams.log?.('debug', 'Budget', {
|
|
1002
|
+
maxTokens: factoryParams.maxTokens,
|
|
1003
|
+
pruningBudget,
|
|
1004
|
+
effectiveMax: effectiveMaxTokens,
|
|
1005
|
+
instructionTokens: currentInstructionTokens,
|
|
1006
|
+
messageCount: params.messages.length,
|
|
1007
|
+
calibratedTotalTokens,
|
|
1008
|
+
calibrationRatio: Math.round(calibrationRatio * 100) / 100,
|
|
1009
|
+
});
|
|
1010
|
+
// When instructions alone consume the entire budget, no message can
|
|
1011
|
+
// fit regardless of truncation. Short-circuit: yield all messages for
|
|
1012
|
+
// summarization and return an empty context so the Graph can route to
|
|
1013
|
+
// the summarize node immediately instead of falling through to the
|
|
1014
|
+
// emergency path that would reach the same outcome more expensively.
|
|
1015
|
+
if (effectiveMaxTokens === 0 &&
|
|
1016
|
+
factoryParams.summarizationEnabled === true &&
|
|
1017
|
+
params.messages.length > 0) {
|
|
1018
|
+
factoryParams.log?.('warn', 'Instructions consume entire budget — yielding all messages for summarization', {
|
|
1019
|
+
instructionTokens: currentInstructionTokens,
|
|
1020
|
+
pruningBudget,
|
|
1021
|
+
messageCount: params.messages.length,
|
|
1022
|
+
});
|
|
1023
|
+
lastTurnStartIndex = params.messages.length;
|
|
1024
|
+
return {
|
|
1025
|
+
context: [],
|
|
1026
|
+
indexTokenCountMap,
|
|
1027
|
+
messagesToRefine: [...params.messages],
|
|
1028
|
+
prePruneContextTokens: calibratedTotalTokens,
|
|
1029
|
+
remainingContextTokens: 0,
|
|
1030
|
+
contextPressure: pruningBudget > 0 ? calibratedTotalTokens / pruningBudget : 0,
|
|
1031
|
+
calibrationRatio,
|
|
1032
|
+
resolvedInstructionOverhead: bestInstructionOverhead,
|
|
1033
|
+
};
|
|
1034
|
+
}
|
|
1035
|
+
// ---------------------------------------------------------------------------
|
|
1036
|
+
// Progressive context fading — inspired by Claude Code's staged compaction.
|
|
1037
|
+
// Below 80%: no modifications, tool results retain full size.
|
|
1038
|
+
// Above 80%: graduated truncation with increasing aggression per pressure band.
|
|
1039
|
+
// Recency weighting ensures older results fade first, newer results last.
|
|
1040
|
+
//
|
|
1041
|
+
// At the gentlest level, truncation preserves most content (head+tail).
|
|
1042
|
+
// At the most aggressive level, the result is effectively a one-line placeholder.
|
|
1043
|
+
//
|
|
1044
|
+
// 80%: gentle — budget factor 1.0, oldest get light truncation
|
|
1045
|
+
// 85%: moderate — budget factor 0.50, older results shrink significantly
|
|
1046
|
+
// 90%: aggressive — budget factor 0.20, most results heavily truncated
|
|
1047
|
+
// 99%: emergency — budget factor 0.05, effectively placeholders for old results
|
|
1048
|
+
// ---------------------------------------------------------------------------
|
|
1049
|
+
totalTokens = sumTokenCounts(indexTokenCountMap, params.messages.length);
|
|
1050
|
+
calibratedTotalTokens = Math.round(totalTokens * calibrationRatio);
|
|
1051
|
+
const contextPressure = pruningBudget > 0 ? calibratedTotalTokens / pruningBudget : 0;
|
|
1052
|
+
let preFlightResultCount = 0;
|
|
1053
|
+
let preFlightInputCount = 0;
|
|
1054
|
+
// -----------------------------------------------------------------------
|
|
1055
|
+
// Observation masking (80%+ pressure, both paths):
|
|
1056
|
+
// Replace consumed ToolMessage content with tight head+tail placeholders.
|
|
1057
|
+
// AI messages stay intact so the model can read its own prior reasoning
|
|
1058
|
+
// and won't repeat work. Unconsumed results (latest tool outputs the
|
|
1059
|
+
// model hasn't acted on yet) stay full.
|
|
1060
|
+
//
|
|
1061
|
+
// When summarization is enabled, snapshot messages first so the
|
|
1062
|
+
// summarizer can see the full originals when compaction fires.
|
|
1063
|
+
// -----------------------------------------------------------------------
|
|
1064
|
+
let observationsMasked = 0;
|
|
1065
|
+
if (contextPressure >= PRESSURE_THRESHOLD_MASKING) {
|
|
1066
|
+
const rawMessageBudget = calibrationRatio > 0
|
|
1067
|
+
? Math.floor(effectiveMaxTokens / calibrationRatio)
|
|
1068
|
+
: effectiveMaxTokens;
|
|
1069
|
+
// When summarization is enabled, use half the reserve ratio as extra
|
|
1070
|
+
// masking headroom — the LLM keeps more context while the summarizer
|
|
1071
|
+
// gets full content from originalToolContent regardless. The remaining
|
|
1072
|
+
// half of the reserve covers estimation errors.
|
|
1073
|
+
const reserveHeadroom = factoryParams.summarizationEnabled === true
|
|
1074
|
+
? Math.floor(rawMessageBudget *
|
|
1075
|
+
(factoryParams.reserveRatio ?? DEFAULT_RESERVE_RATIO) *
|
|
1076
|
+
0.5)
|
|
1077
|
+
: 0;
|
|
1078
|
+
observationsMasked = maskConsumedToolResults({
|
|
1079
|
+
messages: params.messages,
|
|
1080
|
+
indexTokenCountMap,
|
|
1081
|
+
tokenCounter: factoryParams.tokenCounter,
|
|
1082
|
+
availableRawBudget: rawMessageBudget + reserveHeadroom,
|
|
1083
|
+
originalContentStore: factoryParams.summarizationEnabled === true
|
|
1084
|
+
? originalToolContent
|
|
1085
|
+
: undefined,
|
|
1086
|
+
onContentStored: factoryParams.summarizationEnabled === true
|
|
1087
|
+
? (charLen) => {
|
|
1088
|
+
originalToolContentSize += charLen;
|
|
1089
|
+
while (originalToolContentSize > ORIGINAL_CONTENT_MAX_CHARS &&
|
|
1090
|
+
originalToolContent.size > 0) {
|
|
1091
|
+
const oldest = originalToolContent.keys().next();
|
|
1092
|
+
if (oldest.done === true) {
|
|
1093
|
+
break;
|
|
1094
|
+
}
|
|
1095
|
+
const removed = originalToolContent.get(oldest.value);
|
|
1096
|
+
if (removed != null) {
|
|
1097
|
+
originalToolContentSize -= removed.length;
|
|
1098
|
+
}
|
|
1099
|
+
originalToolContent.delete(oldest.value);
|
|
1100
|
+
}
|
|
361
1101
|
}
|
|
362
|
-
|
|
363
|
-
|
|
1102
|
+
: undefined,
|
|
1103
|
+
});
|
|
1104
|
+
if (observationsMasked > 0) {
|
|
1105
|
+
cumulativeRawSent = 0;
|
|
1106
|
+
cumulativeProviderReported = 0;
|
|
364
1107
|
}
|
|
365
1108
|
}
|
|
1109
|
+
if (contextPressure >= PRESSURE_THRESHOLD_MASKING &&
|
|
1110
|
+
factoryParams.summarizationEnabled !== true) {
|
|
1111
|
+
const budgetFactor = PRESSURE_BANDS.find(([threshold]) => contextPressure >= threshold)?.[1] ?? 1.0;
|
|
1112
|
+
const baseBudget = Math.max(1024, Math.floor(effectiveMaxTokens * budgetFactor));
|
|
1113
|
+
preFlightResultCount = preFlightTruncateToolResults({
|
|
1114
|
+
messages: params.messages,
|
|
1115
|
+
maxContextTokens: baseBudget,
|
|
1116
|
+
indexTokenCountMap,
|
|
1117
|
+
tokenCounter: factoryParams.tokenCounter,
|
|
1118
|
+
});
|
|
1119
|
+
preFlightInputCount = preFlightTruncateToolCallInputs({
|
|
1120
|
+
messages: params.messages,
|
|
1121
|
+
maxContextTokens: baseBudget,
|
|
1122
|
+
indexTokenCountMap,
|
|
1123
|
+
tokenCounter: factoryParams.tokenCounter,
|
|
1124
|
+
});
|
|
1125
|
+
}
|
|
1126
|
+
if (factoryParams.contextPruningConfig?.enabled === true &&
|
|
1127
|
+
factoryParams.summarizationEnabled !== true) {
|
|
1128
|
+
applyContextPruning({
|
|
1129
|
+
messages: params.messages,
|
|
1130
|
+
indexTokenCountMap,
|
|
1131
|
+
tokenCounter: factoryParams.tokenCounter,
|
|
1132
|
+
resolvedSettings: contextPruningSettings,
|
|
1133
|
+
});
|
|
1134
|
+
}
|
|
1135
|
+
// Fit-to-budget: when summarization is enabled and individual messages
|
|
1136
|
+
// exceed the effective budget, truncate them so every message can fit in
|
|
1137
|
+
// a single context slot. Without this, oversized tool results (e.g.
|
|
1138
|
+
// take_snapshot at 9K chars) cause empty context → emergency truncation
|
|
1139
|
+
// → immediate re-summarization after just one tool call.
|
|
1140
|
+
//
|
|
1141
|
+
// This is NOT the lossy position-based fading above — it only targets
|
|
1142
|
+
// messages that individually exceed the budget, using the full effective
|
|
1143
|
+
// budget as the cap (not a pressure-scaled fraction).
|
|
1144
|
+
// Fit-to-budget caps are in raw space (divide by ratio) so that after
|
|
1145
|
+
// calibration the truncated results actually fit within the budget.
|
|
1146
|
+
const rawSpaceEffectiveMax = calibrationRatio > 0
|
|
1147
|
+
? Math.round(effectiveMaxTokens / calibrationRatio)
|
|
1148
|
+
: effectiveMaxTokens;
|
|
1149
|
+
if (factoryParams.summarizationEnabled === true &&
|
|
1150
|
+
rawSpaceEffectiveMax > 0) {
|
|
1151
|
+
preFlightResultCount = preFlightTruncateToolResults({
|
|
1152
|
+
messages: params.messages,
|
|
1153
|
+
maxContextTokens: rawSpaceEffectiveMax,
|
|
1154
|
+
indexTokenCountMap,
|
|
1155
|
+
tokenCounter: factoryParams.tokenCounter,
|
|
1156
|
+
});
|
|
1157
|
+
preFlightInputCount = preFlightTruncateToolCallInputs({
|
|
1158
|
+
messages: params.messages,
|
|
1159
|
+
maxContextTokens: rawSpaceEffectiveMax,
|
|
1160
|
+
indexTokenCountMap,
|
|
1161
|
+
tokenCounter: factoryParams.tokenCounter,
|
|
1162
|
+
});
|
|
1163
|
+
}
|
|
1164
|
+
const preTruncationTotalTokens = totalTokens;
|
|
1165
|
+
totalTokens = sumTokenCounts(indexTokenCountMap, params.messages.length);
|
|
1166
|
+
calibratedTotalTokens = Math.round(totalTokens * calibrationRatio);
|
|
1167
|
+
const anyAdjustment = observationsMasked > 0 ||
|
|
1168
|
+
preFlightResultCount > 0 ||
|
|
1169
|
+
preFlightInputCount > 0 ||
|
|
1170
|
+
totalTokens !== preTruncationTotalTokens;
|
|
1171
|
+
if (anyAdjustment) {
|
|
1172
|
+
factoryParams.log?.('debug', 'Context adjusted', {
|
|
1173
|
+
contextPressure: Math.round(contextPressure * 100),
|
|
1174
|
+
observationsMasked,
|
|
1175
|
+
toolOutputsTruncated: preFlightResultCount,
|
|
1176
|
+
toolInputsTruncated: preFlightInputCount,
|
|
1177
|
+
tokensBefore: preTruncationTotalTokens,
|
|
1178
|
+
tokensAfter: totalTokens,
|
|
1179
|
+
tokensSaved: preTruncationTotalTokens - totalTokens,
|
|
1180
|
+
});
|
|
1181
|
+
}
|
|
366
1182
|
lastTurnStartIndex = params.messages.length;
|
|
367
|
-
if (lastCutOffIndex === 0 &&
|
|
368
|
-
|
|
1183
|
+
if (lastCutOffIndex === 0 &&
|
|
1184
|
+
calibratedTotalTokens + currentInstructionTokens <= pruningBudget) {
|
|
1185
|
+
return {
|
|
1186
|
+
context: params.messages,
|
|
1187
|
+
indexTokenCountMap,
|
|
1188
|
+
messagesToRefine: [],
|
|
1189
|
+
prePruneContextTokens: calibratedTotalTokens,
|
|
1190
|
+
remainingContextTokens: pruningBudget - calibratedTotalTokens - currentInstructionTokens,
|
|
1191
|
+
contextPressure,
|
|
1192
|
+
originalToolContent: originalToolContent.size > 0 ? originalToolContent : undefined,
|
|
1193
|
+
calibrationRatio,
|
|
1194
|
+
resolvedInstructionOverhead: bestInstructionOverhead,
|
|
1195
|
+
};
|
|
369
1196
|
}
|
|
370
|
-
const
|
|
371
|
-
|
|
1197
|
+
const rawSpaceBudget = calibrationRatio > 0
|
|
1198
|
+
? Math.round(pruningBudget / calibrationRatio)
|
|
1199
|
+
: pruningBudget;
|
|
1200
|
+
const rawSpaceInstructionTokens = calibrationRatio > 0
|
|
1201
|
+
? Math.round(currentInstructionTokens / calibrationRatio)
|
|
1202
|
+
: currentInstructionTokens;
|
|
1203
|
+
const { context: initialContext, thinkingStartIndex, messagesToRefine, remainingContextTokens: initialRemainingContextTokens, } = getMessagesWithinTokenLimit({
|
|
1204
|
+
maxContextTokens: rawSpaceBudget,
|
|
372
1205
|
messages: params.messages,
|
|
373
1206
|
indexTokenCountMap,
|
|
374
1207
|
startType: params.startType,
|
|
375
1208
|
thinkingEnabled: factoryParams.thinkingEnabled,
|
|
376
1209
|
tokenCounter: factoryParams.tokenCounter,
|
|
1210
|
+
instructionTokens: rawSpaceInstructionTokens,
|
|
377
1211
|
reasoningType: factoryParams.provider === Providers.BEDROCK
|
|
378
1212
|
? ContentTypes.REASONING_CONTENT
|
|
379
1213
|
: ContentTypes.THINKING,
|
|
@@ -381,13 +1215,285 @@ function createPruneMessages(factoryParams) {
|
|
|
381
1215
|
? runThinkingStartIndex
|
|
382
1216
|
: undefined,
|
|
383
1217
|
});
|
|
1218
|
+
const { context: repairedContext, reclaimedTokens: initialReclaimedTokens, droppedMessages, } = repairOrphanedToolMessages({
|
|
1219
|
+
context: initialContext,
|
|
1220
|
+
allMessages: params.messages,
|
|
1221
|
+
tokenCounter: factoryParams.tokenCounter,
|
|
1222
|
+
indexTokenCountMap,
|
|
1223
|
+
});
|
|
1224
|
+
const contextBreakdown = repairedContext.map((msg) => {
|
|
1225
|
+
const type = msg.getType();
|
|
1226
|
+
const name = type === 'tool' ? (msg.name ?? 'unknown') : '';
|
|
1227
|
+
return name !== '' ? `${type}(${name})` : type;
|
|
1228
|
+
});
|
|
1229
|
+
factoryParams.log?.('debug', 'Pruning complete', {
|
|
1230
|
+
contextLength: repairedContext.length,
|
|
1231
|
+
contextTypes: contextBreakdown.join(', '),
|
|
1232
|
+
messagesToRefineCount: messagesToRefine.length,
|
|
1233
|
+
droppedOrphans: droppedMessages.length,
|
|
1234
|
+
remainingTokens: initialRemainingContextTokens,
|
|
1235
|
+
});
|
|
1236
|
+
let context = repairedContext;
|
|
1237
|
+
let reclaimedTokens = initialReclaimedTokens;
|
|
1238
|
+
// Orphan repair may drop ToolMessages whose parent AI was pruned.
|
|
1239
|
+
// Append them to messagesToRefine so summarization can still see the
|
|
1240
|
+
// tool results (otherwise the summary says "in progress" for a tool
|
|
1241
|
+
// call that already completed, causing the model to repeat it).
|
|
1242
|
+
if (droppedMessages.length > 0) {
|
|
1243
|
+
messagesToRefine.push(...droppedMessages);
|
|
1244
|
+
}
|
|
1245
|
+
// ---------------------------------------------------------------
|
|
1246
|
+
// Fallback fading: when summarization skipped fading earlier and
|
|
1247
|
+
// pruning still produced an empty context, apply lossy pressure-band
|
|
1248
|
+
// fading and retry. This is a last resort before emergency truncation
|
|
1249
|
+
// — the summarizer already saw the full messages, so fading the
|
|
1250
|
+
// surviving context for the LLM is acceptable.
|
|
1251
|
+
// ---------------------------------------------------------------
|
|
1252
|
+
if (context.length === 0 &&
|
|
1253
|
+
params.messages.length > 0 &&
|
|
1254
|
+
effectiveMaxTokens > 0 &&
|
|
1255
|
+
factoryParams.summarizationEnabled === true) {
|
|
1256
|
+
const fadingBudget = Math.max(1024, effectiveMaxTokens);
|
|
1257
|
+
factoryParams.log?.('debug', 'Fallback fading — empty context with summarization', {
|
|
1258
|
+
messageCount: params.messages.length,
|
|
1259
|
+
effectiveMaxTokens,
|
|
1260
|
+
fadingBudget,
|
|
1261
|
+
});
|
|
1262
|
+
const fadedMessages = [...params.messages];
|
|
1263
|
+
const preFadingTokenCounts = {};
|
|
1264
|
+
for (let i = 0; i < params.messages.length; i++) {
|
|
1265
|
+
preFadingTokenCounts[i] = indexTokenCountMap[i];
|
|
1266
|
+
}
|
|
1267
|
+
preFlightTruncateToolResults({
|
|
1268
|
+
messages: fadedMessages,
|
|
1269
|
+
maxContextTokens: fadingBudget,
|
|
1270
|
+
indexTokenCountMap,
|
|
1271
|
+
tokenCounter: factoryParams.tokenCounter,
|
|
1272
|
+
});
|
|
1273
|
+
preFlightTruncateToolCallInputs({
|
|
1274
|
+
messages: fadedMessages,
|
|
1275
|
+
maxContextTokens: fadingBudget,
|
|
1276
|
+
indexTokenCountMap,
|
|
1277
|
+
tokenCounter: factoryParams.tokenCounter,
|
|
1278
|
+
});
|
|
1279
|
+
const fadingRetry = getMessagesWithinTokenLimit({
|
|
1280
|
+
maxContextTokens: pruningBudget,
|
|
1281
|
+
messages: fadedMessages,
|
|
1282
|
+
indexTokenCountMap,
|
|
1283
|
+
startType: params.startType,
|
|
1284
|
+
thinkingEnabled: factoryParams.thinkingEnabled,
|
|
1285
|
+
tokenCounter: factoryParams.tokenCounter,
|
|
1286
|
+
instructionTokens: currentInstructionTokens,
|
|
1287
|
+
reasoningType: factoryParams.provider === Providers.BEDROCK
|
|
1288
|
+
? ContentTypes.REASONING_CONTENT
|
|
1289
|
+
: ContentTypes.THINKING,
|
|
1290
|
+
thinkingStartIndex: factoryParams.thinkingEnabled === true
|
|
1291
|
+
? runThinkingStartIndex
|
|
1292
|
+
: undefined,
|
|
1293
|
+
});
|
|
1294
|
+
const fadingRepaired = repairOrphanedToolMessages({
|
|
1295
|
+
context: fadingRetry.context,
|
|
1296
|
+
allMessages: fadedMessages,
|
|
1297
|
+
tokenCounter: factoryParams.tokenCounter,
|
|
1298
|
+
indexTokenCountMap,
|
|
1299
|
+
});
|
|
1300
|
+
if (fadingRepaired.context.length > 0) {
|
|
1301
|
+
context = fadingRepaired.context;
|
|
1302
|
+
reclaimedTokens = fadingRepaired.reclaimedTokens;
|
|
1303
|
+
messagesToRefine.push(...fadingRetry.messagesToRefine);
|
|
1304
|
+
if (fadingRepaired.droppedMessages.length > 0) {
|
|
1305
|
+
messagesToRefine.push(...fadingRepaired.droppedMessages);
|
|
1306
|
+
}
|
|
1307
|
+
factoryParams.log?.('debug', 'Fallback fading recovered context', {
|
|
1308
|
+
contextLength: context.length,
|
|
1309
|
+
messagesToRefineCount: messagesToRefine.length,
|
|
1310
|
+
remainingTokens: fadingRetry.remainingContextTokens,
|
|
1311
|
+
});
|
|
1312
|
+
for (const [key, value] of Object.entries(preFadingTokenCounts)) {
|
|
1313
|
+
indexTokenCountMap[key] = value;
|
|
1314
|
+
}
|
|
1315
|
+
}
|
|
1316
|
+
else {
|
|
1317
|
+
for (const [key, value] of Object.entries(preFadingTokenCounts)) {
|
|
1318
|
+
indexTokenCountMap[key] = value;
|
|
1319
|
+
}
|
|
1320
|
+
}
|
|
1321
|
+
}
|
|
1322
|
+
// ---------------------------------------------------------------
|
|
1323
|
+
// Emergency truncation: if pruning produced an empty context but
|
|
1324
|
+
// messages exist, aggressively truncate all tool_call inputs and
|
|
1325
|
+
// tool results, then retry. Budget is proportional to the
|
|
1326
|
+
// effective token limit (~4 chars/token, spread across messages)
|
|
1327
|
+
// with a floor of 200 chars so content is never completely blank.
|
|
1328
|
+
// Uses head+tail so the model sees both what was called and the
|
|
1329
|
+
// final outcome (e.g., return value at the end of a script eval).
|
|
1330
|
+
// ---------------------------------------------------------------
|
|
1331
|
+
if (context.length === 0 &&
|
|
1332
|
+
params.messages.length > 0 &&
|
|
1333
|
+
effectiveMaxTokens > 0) {
|
|
1334
|
+
const perMessageTokenBudget = Math.floor(effectiveMaxTokens / Math.max(1, params.messages.length));
|
|
1335
|
+
const emergencyMaxChars = Math.max(200, perMessageTokenBudget * 4);
|
|
1336
|
+
factoryParams.log?.('warn', 'Empty context, entering emergency truncation', {
|
|
1337
|
+
messageCount: params.messages.length,
|
|
1338
|
+
effectiveMax: effectiveMaxTokens,
|
|
1339
|
+
emergencyMaxChars,
|
|
1340
|
+
});
|
|
1341
|
+
// Clone the messages array so emergency truncation doesn't permanently
|
|
1342
|
+
// mutate graph state. The originals remain intact for future turns
|
|
1343
|
+
// where more budget may be available. Also snapshot indexTokenCountMap
|
|
1344
|
+
// entries so the closure doesn't retain stale (too-small) counts for
|
|
1345
|
+
// the original un-truncated messages on the next turn.
|
|
1346
|
+
const emergencyMessages = [...params.messages];
|
|
1347
|
+
const preEmergencyTokenCounts = {};
|
|
1348
|
+
for (let i = 0; i < params.messages.length; i++) {
|
|
1349
|
+
preEmergencyTokenCounts[i] = indexTokenCountMap[i];
|
|
1350
|
+
}
|
|
1351
|
+
try {
|
|
1352
|
+
let emergencyTruncatedCount = 0;
|
|
1353
|
+
for (let i = 0; i < emergencyMessages.length; i++) {
|
|
1354
|
+
const message = emergencyMessages[i];
|
|
1355
|
+
if (message.getType() === 'tool') {
|
|
1356
|
+
const content = message.content;
|
|
1357
|
+
if (typeof content === 'string' &&
|
|
1358
|
+
content.length > emergencyMaxChars) {
|
|
1359
|
+
const cloned = new ToolMessage({
|
|
1360
|
+
content: truncateToolResultContent(content, emergencyMaxChars),
|
|
1361
|
+
tool_call_id: message.tool_call_id,
|
|
1362
|
+
name: message.name,
|
|
1363
|
+
id: message.id,
|
|
1364
|
+
additional_kwargs: message.additional_kwargs,
|
|
1365
|
+
response_metadata: message.response_metadata,
|
|
1366
|
+
});
|
|
1367
|
+
emergencyMessages[i] = cloned;
|
|
1368
|
+
indexTokenCountMap[i] = factoryParams.tokenCounter(cloned);
|
|
1369
|
+
emergencyTruncatedCount++;
|
|
1370
|
+
}
|
|
1371
|
+
}
|
|
1372
|
+
if (message.getType() === 'ai' && Array.isArray(message.content)) {
|
|
1373
|
+
const aiMsg = message;
|
|
1374
|
+
const contentBlocks = aiMsg.content;
|
|
1375
|
+
const needsTruncation = contentBlocks.some((block) => {
|
|
1376
|
+
if (typeof block !== 'object')
|
|
1377
|
+
return false;
|
|
1378
|
+
const record = block;
|
|
1379
|
+
if ((record.type === 'tool_use' || record.type === 'tool_call') &&
|
|
1380
|
+
record.input != null) {
|
|
1381
|
+
const serialized = typeof record.input === 'string'
|
|
1382
|
+
? record.input
|
|
1383
|
+
: JSON.stringify(record.input);
|
|
1384
|
+
return serialized.length > emergencyMaxChars;
|
|
1385
|
+
}
|
|
1386
|
+
return false;
|
|
1387
|
+
});
|
|
1388
|
+
if (needsTruncation) {
|
|
1389
|
+
const newContent = contentBlocks.map((block) => {
|
|
1390
|
+
if (typeof block !== 'object')
|
|
1391
|
+
return block;
|
|
1392
|
+
const record = block;
|
|
1393
|
+
if ((record.type === 'tool_use' || record.type === 'tool_call') &&
|
|
1394
|
+
record.input != null) {
|
|
1395
|
+
const serialized = typeof record.input === 'string'
|
|
1396
|
+
? record.input
|
|
1397
|
+
: JSON.stringify(record.input);
|
|
1398
|
+
if (serialized.length > emergencyMaxChars) {
|
|
1399
|
+
// Replaces original input with { _truncated, _originalChars } —
|
|
1400
|
+
// safe because the tool call already executed in a prior turn.
|
|
1401
|
+
return {
|
|
1402
|
+
...record,
|
|
1403
|
+
input: truncateToolInput(serialized, emergencyMaxChars),
|
|
1404
|
+
};
|
|
1405
|
+
}
|
|
1406
|
+
}
|
|
1407
|
+
return block;
|
|
1408
|
+
});
|
|
1409
|
+
const newToolCalls = (aiMsg.tool_calls ?? []).map((tc) => {
|
|
1410
|
+
const serializedArgs = JSON.stringify(tc.args);
|
|
1411
|
+
if (serializedArgs.length > emergencyMaxChars) {
|
|
1412
|
+
// Replaces original args with { _truncated, _originalChars } —
|
|
1413
|
+
// safe because the tool call already executed in a prior turn.
|
|
1414
|
+
return {
|
|
1415
|
+
...tc,
|
|
1416
|
+
args: truncateToolInput(serializedArgs, emergencyMaxChars),
|
|
1417
|
+
};
|
|
1418
|
+
}
|
|
1419
|
+
return tc;
|
|
1420
|
+
});
|
|
1421
|
+
emergencyMessages[i] = new AIMessage({
|
|
1422
|
+
...aiMsg,
|
|
1423
|
+
content: newContent,
|
|
1424
|
+
tool_calls: newToolCalls.length > 0 ? newToolCalls : undefined,
|
|
1425
|
+
});
|
|
1426
|
+
indexTokenCountMap[i] = factoryParams.tokenCounter(emergencyMessages[i]);
|
|
1427
|
+
emergencyTruncatedCount++;
|
|
1428
|
+
}
|
|
1429
|
+
}
|
|
1430
|
+
}
|
|
1431
|
+
factoryParams.log?.('info', 'Emergency truncation complete');
|
|
1432
|
+
factoryParams.log?.('debug', 'Emergency truncation details', {
|
|
1433
|
+
truncatedCount: emergencyTruncatedCount,
|
|
1434
|
+
emergencyMaxChars,
|
|
1435
|
+
});
|
|
1436
|
+
const retryResult = getMessagesWithinTokenLimit({
|
|
1437
|
+
maxContextTokens: pruningBudget,
|
|
1438
|
+
messages: emergencyMessages,
|
|
1439
|
+
indexTokenCountMap,
|
|
1440
|
+
startType: params.startType,
|
|
1441
|
+
thinkingEnabled: factoryParams.thinkingEnabled,
|
|
1442
|
+
tokenCounter: factoryParams.tokenCounter,
|
|
1443
|
+
instructionTokens: currentInstructionTokens,
|
|
1444
|
+
reasoningType: factoryParams.provider === Providers.BEDROCK
|
|
1445
|
+
? ContentTypes.REASONING_CONTENT
|
|
1446
|
+
: ContentTypes.THINKING,
|
|
1447
|
+
thinkingStartIndex: factoryParams.thinkingEnabled === true
|
|
1448
|
+
? runThinkingStartIndex
|
|
1449
|
+
: undefined,
|
|
1450
|
+
});
|
|
1451
|
+
const repaired = repairOrphanedToolMessages({
|
|
1452
|
+
context: retryResult.context,
|
|
1453
|
+
allMessages: emergencyMessages,
|
|
1454
|
+
tokenCounter: factoryParams.tokenCounter,
|
|
1455
|
+
indexTokenCountMap,
|
|
1456
|
+
});
|
|
1457
|
+
context = repaired.context;
|
|
1458
|
+
reclaimedTokens = repaired.reclaimedTokens;
|
|
1459
|
+
messagesToRefine.push(...retryResult.messagesToRefine);
|
|
1460
|
+
if (repaired.droppedMessages.length > 0) {
|
|
1461
|
+
messagesToRefine.push(...repaired.droppedMessages);
|
|
1462
|
+
}
|
|
1463
|
+
factoryParams.log?.('debug', 'Emergency truncation retry result', {
|
|
1464
|
+
contextLength: context.length,
|
|
1465
|
+
messagesToRefineCount: messagesToRefine.length,
|
|
1466
|
+
remainingTokens: retryResult.remainingContextTokens,
|
|
1467
|
+
});
|
|
1468
|
+
}
|
|
1469
|
+
finally {
|
|
1470
|
+
// Restore the closure's indexTokenCountMap to pre-emergency values so the
|
|
1471
|
+
// next turn counts old messages at their original (un-truncated) size.
|
|
1472
|
+
// The emergency-truncated counts were only needed for this turn's
|
|
1473
|
+
// getMessagesWithinTokenLimit retry.
|
|
1474
|
+
for (const [key, value] of Object.entries(preEmergencyTokenCounts)) {
|
|
1475
|
+
indexTokenCountMap[key] = value;
|
|
1476
|
+
}
|
|
1477
|
+
}
|
|
1478
|
+
}
|
|
1479
|
+
const remainingContextTokens = Math.max(0, Math.min(pruningBudget, initialRemainingContextTokens + reclaimedTokens));
|
|
384
1480
|
runThinkingStartIndex = thinkingStartIndex ?? -1;
|
|
385
1481
|
/** The index is the first value of `context`, index relative to `params.messages` */
|
|
386
1482
|
lastCutOffIndex = Math.max(params.messages.length -
|
|
387
1483
|
(context.length - (context[0]?.getType() === 'system' ? 1 : 0)), 0);
|
|
388
|
-
return {
|
|
1484
|
+
return {
|
|
1485
|
+
context,
|
|
1486
|
+
indexTokenCountMap,
|
|
1487
|
+
messagesToRefine,
|
|
1488
|
+
prePruneContextTokens: calibratedTotalTokens,
|
|
1489
|
+
remainingContextTokens,
|
|
1490
|
+
contextPressure,
|
|
1491
|
+
originalToolContent: originalToolContent.size > 0 ? originalToolContent : undefined,
|
|
1492
|
+
calibrationRatio,
|
|
1493
|
+
resolvedInstructionOverhead: bestInstructionOverhead,
|
|
1494
|
+
};
|
|
389
1495
|
};
|
|
390
1496
|
}
|
|
391
1497
|
|
|
392
|
-
export { calculateTotalTokens, checkValidNumber, createPruneMessages, getMessagesWithinTokenLimit };
|
|
1498
|
+
export { DEFAULT_RESERVE_RATIO, calculateTotalTokens, checkValidNumber, createPruneMessages, getMessagesWithinTokenLimit, maskConsumedToolResults, preFlightTruncateToolCallInputs, preFlightTruncateToolResults, repairOrphanedToolMessages, sanitizeOrphanToolBlocks };
|
|
393
1499
|
//# sourceMappingURL=prune.mjs.map
|