langchain 1.0.4 → 1.0.5
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +14 -0
- package/dist/agents/ReactAgent.cjs +15 -17
- package/dist/agents/ReactAgent.cjs.map +1 -1
- package/dist/agents/ReactAgent.d.cts +3 -3
- package/dist/agents/ReactAgent.d.cts.map +1 -1
- package/dist/agents/ReactAgent.d.ts +3 -3
- package/dist/agents/ReactAgent.d.ts.map +1 -1
- package/dist/agents/ReactAgent.js +15 -17
- package/dist/agents/ReactAgent.js.map +1 -1
- package/dist/agents/middleware/contextEditing.cjs +302 -33
- package/dist/agents/middleware/contextEditing.cjs.map +1 -1
- package/dist/agents/middleware/contextEditing.d.cts +125 -41
- package/dist/agents/middleware/contextEditing.d.cts.map +1 -1
- package/dist/agents/middleware/contextEditing.d.ts +125 -41
- package/dist/agents/middleware/contextEditing.d.ts.map +1 -1
- package/dist/agents/middleware/contextEditing.js +302 -33
- package/dist/agents/middleware/contextEditing.js.map +1 -1
- package/dist/agents/middleware/dynamicSystemPrompt.d.cts.map +1 -1
- package/dist/agents/middleware/dynamicSystemPrompt.d.ts.map +1 -1
- package/dist/agents/middleware/hitl.d.cts.map +1 -1
- package/dist/agents/middleware/hitl.d.ts.map +1 -1
- package/dist/agents/middleware/index.cjs +2 -0
- package/dist/agents/middleware/index.js +2 -0
- package/dist/agents/middleware/llmToolSelector.d.cts +4 -4
- package/dist/agents/middleware/llmToolSelector.d.cts.map +1 -1
- package/dist/agents/middleware/modelCallLimit.cjs +6 -2
- package/dist/agents/middleware/modelCallLimit.cjs.map +1 -1
- package/dist/agents/middleware/modelCallLimit.d.cts +8 -8
- package/dist/agents/middleware/modelCallLimit.d.cts.map +1 -1
- package/dist/agents/middleware/modelCallLimit.d.ts +8 -8
- package/dist/agents/middleware/modelCallLimit.d.ts.map +1 -1
- package/dist/agents/middleware/modelCallLimit.js +6 -2
- package/dist/agents/middleware/modelCallLimit.js.map +1 -1
- package/dist/agents/middleware/modelFallback.cjs +2 -2
- package/dist/agents/middleware/modelFallback.cjs.map +1 -1
- package/dist/agents/middleware/modelFallback.d.cts +2 -2
- package/dist/agents/middleware/modelFallback.d.cts.map +1 -1
- package/dist/agents/middleware/modelFallback.d.ts +2 -2
- package/dist/agents/middleware/modelFallback.d.ts.map +1 -1
- package/dist/agents/middleware/modelFallback.js +2 -2
- package/dist/agents/middleware/modelFallback.js.map +1 -1
- package/dist/agents/middleware/pii.cjs +445 -0
- package/dist/agents/middleware/pii.cjs.map +1 -0
- package/dist/agents/middleware/pii.d.cts +216 -0
- package/dist/agents/middleware/pii.d.cts.map +1 -0
- package/dist/agents/middleware/pii.d.ts +216 -0
- package/dist/agents/middleware/pii.d.ts.map +1 -0
- package/dist/agents/middleware/pii.js +436 -0
- package/dist/agents/middleware/pii.js.map +1 -0
- package/dist/agents/middleware/piiRedaction.cjs +2 -1
- package/dist/agents/middleware/piiRedaction.cjs.map +1 -1
- package/dist/agents/middleware/piiRedaction.d.cts +4 -1
- package/dist/agents/middleware/piiRedaction.d.cts.map +1 -1
- package/dist/agents/middleware/piiRedaction.d.ts +4 -1
- package/dist/agents/middleware/piiRedaction.d.ts.map +1 -1
- package/dist/agents/middleware/piiRedaction.js +2 -1
- package/dist/agents/middleware/piiRedaction.js.map +1 -1
- package/dist/agents/middleware/promptCaching.d.cts.map +1 -1
- package/dist/agents/middleware/promptCaching.d.ts.map +1 -1
- package/dist/agents/middleware/summarization.cjs +15 -24
- package/dist/agents/middleware/summarization.cjs.map +1 -1
- package/dist/agents/middleware/summarization.d.cts +72 -9
- package/dist/agents/middleware/summarization.d.cts.map +1 -1
- package/dist/agents/middleware/summarization.d.ts +65 -2
- package/dist/agents/middleware/summarization.d.ts.map +1 -1
- package/dist/agents/middleware/summarization.js +13 -25
- package/dist/agents/middleware/summarization.js.map +1 -1
- package/dist/agents/middleware/todoListMiddleware.d.cts.map +1 -1
- package/dist/agents/middleware/todoListMiddleware.d.ts.map +1 -1
- package/dist/agents/middleware/toolCallLimit.d.cts.map +1 -1
- package/dist/agents/middleware/toolCallLimit.d.ts.map +1 -1
- package/dist/agents/middleware/toolEmulator.cjs +118 -0
- package/dist/agents/middleware/toolEmulator.cjs.map +1 -0
- package/dist/agents/middleware/toolEmulator.d.cts +76 -0
- package/dist/agents/middleware/toolEmulator.d.cts.map +1 -0
- package/dist/agents/middleware/toolEmulator.d.ts +76 -0
- package/dist/agents/middleware/toolEmulator.d.ts.map +1 -0
- package/dist/agents/middleware/toolEmulator.js +117 -0
- package/dist/agents/middleware/toolEmulator.js.map +1 -0
- package/dist/agents/middleware/types.d.cts.map +1 -1
- package/dist/agents/middleware/types.d.ts.map +1 -1
- package/dist/agents/middleware/utils.cjs +4 -0
- package/dist/agents/middleware/utils.cjs.map +1 -1
- package/dist/agents/middleware/utils.d.cts.map +1 -1
- package/dist/agents/middleware/utils.d.ts.map +1 -1
- package/dist/agents/middleware/utils.js +4 -0
- package/dist/agents/middleware/utils.js.map +1 -1
- package/dist/agents/nodes/AgentNode.cjs +1 -1
- package/dist/agents/nodes/AgentNode.cjs.map +1 -1
- package/dist/agents/nodes/AgentNode.js +1 -1
- package/dist/agents/nodes/AgentNode.js.map +1 -1
- package/dist/agents/runtime.d.cts +5 -5
- package/dist/agents/runtime.d.cts.map +1 -1
- package/dist/agents/runtime.d.ts +5 -5
- package/dist/agents/runtime.d.ts.map +1 -1
- package/dist/index.cjs +22 -0
- package/dist/index.d.cts +5 -3
- package/dist/index.d.ts +5 -3
- package/dist/index.js +13 -1
- package/package.json +9 -8
|
@@ -1,10 +1,13 @@
|
|
|
1
1
|
const require_rolldown_runtime = require('../../_virtual/rolldown_runtime.cjs');
|
|
2
2
|
const require_utils = require('./utils.cjs');
|
|
3
3
|
const require_middleware = require('../middleware.cjs');
|
|
4
|
+
const require_summarization = require('./summarization.cjs');
|
|
4
5
|
const __langchain_core_messages = require_rolldown_runtime.__toESM(require("@langchain/core/messages"));
|
|
5
6
|
|
|
6
7
|
//#region src/agents/middleware/contextEditing.ts
|
|
7
8
|
const DEFAULT_TOOL_PLACEHOLDER = "[cleared]";
|
|
9
|
+
const DEFAULT_TRIGGER_TOKENS = 1e5;
|
|
10
|
+
const DEFAULT_KEEP = 3;
|
|
8
11
|
/**
|
|
9
12
|
* Strategy for clearing tool outputs when token limits are exceeded.
|
|
10
13
|
*
|
|
@@ -18,33 +21,99 @@ const DEFAULT_TOOL_PLACEHOLDER = "[cleared]";
|
|
|
18
21
|
* import { ClearToolUsesEdit } from "langchain";
|
|
19
22
|
*
|
|
20
23
|
* const edit = new ClearToolUsesEdit({
|
|
21
|
-
*
|
|
22
|
-
*
|
|
23
|
-
*
|
|
24
|
-
*
|
|
25
|
-
*
|
|
26
|
-
*
|
|
24
|
+
* trigger: { tokens: 100000 }, // Start clearing at 100K tokens
|
|
25
|
+
* keep: { messages: 3 }, // Keep 3 most recent tool results
|
|
26
|
+
* excludeTools: ["important"], // Never clear "important" tool
|
|
27
|
+
* clearToolInputs: false, // Keep tool call arguments
|
|
28
|
+
* placeholder: "[cleared]", // Replacement text
|
|
29
|
+
* });
|
|
30
|
+
*
|
|
31
|
+
* // Multiple trigger conditions
|
|
32
|
+
* const edit2 = new ClearToolUsesEdit({
|
|
33
|
+
* trigger: [
|
|
34
|
+
* { tokens: 100000, messages: 50 },
|
|
35
|
+
* { tokens: 50000, messages: 100 }
|
|
36
|
+
* ],
|
|
37
|
+
* keep: { messages: 3 },
|
|
38
|
+
* });
|
|
39
|
+
*
|
|
40
|
+
* // Fractional trigger with model profile
|
|
41
|
+
* const edit3 = new ClearToolUsesEdit({
|
|
42
|
+
* trigger: { fraction: 0.8 }, // Trigger at 80% of model's max tokens
|
|
43
|
+
* keep: { fraction: 0.3 }, // Keep 30% of model's max tokens
|
|
27
44
|
* });
|
|
28
45
|
* ```
|
|
29
46
|
*/
|
|
30
47
|
var ClearToolUsesEdit = class {
|
|
31
|
-
|
|
32
|
-
|
|
48
|
+
#triggerConditions;
|
|
49
|
+
trigger;
|
|
33
50
|
keep;
|
|
34
51
|
clearToolInputs;
|
|
35
52
|
excludeTools;
|
|
36
53
|
placeholder;
|
|
54
|
+
model;
|
|
55
|
+
clearAtLeast;
|
|
37
56
|
constructor(config = {}) {
|
|
38
|
-
|
|
57
|
+
let trigger = config.trigger;
|
|
58
|
+
if (config.triggerTokens !== void 0) {
|
|
59
|
+
console.warn("triggerTokens is deprecated. Use `trigger: { tokens: value }` instead.");
|
|
60
|
+
if (trigger === void 0) trigger = { tokens: config.triggerTokens };
|
|
61
|
+
}
|
|
62
|
+
let keep = config.keep;
|
|
63
|
+
if (config.keepMessages !== void 0) {
|
|
64
|
+
console.warn("keepMessages is deprecated. Use `keep: { messages: value }` instead.");
|
|
65
|
+
if (keep === void 0) keep = { messages: config.keepMessages };
|
|
66
|
+
}
|
|
67
|
+
if (trigger === void 0) trigger = { tokens: DEFAULT_TRIGGER_TOKENS };
|
|
68
|
+
if (keep === void 0) keep = { messages: DEFAULT_KEEP };
|
|
69
|
+
if (Array.isArray(trigger)) {
|
|
70
|
+
this.#triggerConditions = trigger.map((t) => require_summarization.contextSizeSchema.parse(t));
|
|
71
|
+
this.trigger = this.#triggerConditions;
|
|
72
|
+
} else {
|
|
73
|
+
const validated = require_summarization.contextSizeSchema.parse(trigger);
|
|
74
|
+
this.#triggerConditions = [validated];
|
|
75
|
+
this.trigger = validated;
|
|
76
|
+
}
|
|
77
|
+
const validatedKeep = require_summarization.keepSchema.parse(keep);
|
|
78
|
+
this.keep = validatedKeep;
|
|
79
|
+
if (config.clearAtLeast !== void 0) console.warn("clearAtLeast is deprecated and will be removed in a future version. It conflicts with the `keep` property. Use `keep: { tokens: value }` or `keep: { messages: value }` instead to control retention.");
|
|
39
80
|
this.clearAtLeast = config.clearAtLeast ?? 0;
|
|
40
|
-
this.keep = config.keep ?? 3;
|
|
41
81
|
this.clearToolInputs = config.clearToolInputs ?? false;
|
|
42
82
|
this.excludeTools = new Set(config.excludeTools ?? []);
|
|
43
83
|
this.placeholder = config.placeholder ?? DEFAULT_TOOL_PLACEHOLDER;
|
|
44
84
|
}
|
|
45
85
|
async apply(params) {
|
|
46
|
-
const {
|
|
47
|
-
|
|
86
|
+
const { messages, model, countTokens } = params;
|
|
87
|
+
const tokens = await countTokens(messages);
|
|
88
|
+
/**
|
|
89
|
+
* Always remove orphaned tool messages (those without corresponding AI messages)
|
|
90
|
+
* regardless of whether editing is triggered
|
|
91
|
+
*/
|
|
92
|
+
const orphanedIndices = [];
|
|
93
|
+
for (let i = 0; i < messages.length; i++) {
|
|
94
|
+
const msg = messages[i];
|
|
95
|
+
if (__langchain_core_messages.ToolMessage.isInstance(msg)) {
|
|
96
|
+
const aiMessage = this.#findAIMessageForToolCall(messages.slice(0, i), msg.tool_call_id);
|
|
97
|
+
if (!aiMessage) orphanedIndices.push(i);
|
|
98
|
+
else {
|
|
99
|
+
const toolCall = aiMessage.tool_calls?.find((call) => call.id === msg.tool_call_id);
|
|
100
|
+
if (!toolCall) orphanedIndices.push(i);
|
|
101
|
+
}
|
|
102
|
+
}
|
|
103
|
+
}
|
|
104
|
+
/**
|
|
105
|
+
* Remove orphaned tool messages in reverse order to maintain indices
|
|
106
|
+
*/
|
|
107
|
+
for (let i = orphanedIndices.length - 1; i >= 0; i--) messages.splice(orphanedIndices[i], 1);
|
|
108
|
+
/**
|
|
109
|
+
* Recalculate tokens after removing orphaned messages
|
|
110
|
+
*/
|
|
111
|
+
let currentTokens = tokens;
|
|
112
|
+
if (orphanedIndices.length > 0) currentTokens = await countTokens(messages);
|
|
113
|
+
/**
|
|
114
|
+
* Check if editing should be triggered
|
|
115
|
+
*/
|
|
116
|
+
if (!this.#shouldEdit(messages, currentTokens, model)) return;
|
|
48
117
|
/**
|
|
49
118
|
* Find all tool message candidates with their actual indices in the messages array
|
|
50
119
|
*/
|
|
@@ -56,16 +125,22 @@ var ClearToolUsesEdit = class {
|
|
|
56
125
|
msg
|
|
57
126
|
});
|
|
58
127
|
}
|
|
128
|
+
if (candidates.length === 0) return;
|
|
129
|
+
/**
|
|
130
|
+
* Determine how many tool results to keep based on keep policy
|
|
131
|
+
*/
|
|
132
|
+
const keepCount = await this.#determineKeepCount(candidates, countTokens, model);
|
|
59
133
|
/**
|
|
60
|
-
* Keep the most recent tool messages
|
|
134
|
+
* Keep the most recent tool messages based on keep policy
|
|
135
|
+
*/
|
|
136
|
+
const candidatesToClear = keepCount >= candidates.length ? [] : keepCount > 0 ? candidates.slice(0, -keepCount) : candidates;
|
|
137
|
+
/**
|
|
138
|
+
* If clearAtLeast is set, we may need to clear more messages to meet the token requirement
|
|
139
|
+
* This is a deprecated feature that conflicts with keep, but we support it for backwards compatibility
|
|
61
140
|
*/
|
|
62
|
-
const candidatesToClear = this.keep >= candidates.length ? [] : this.keep > 0 ? candidates.slice(0, -this.keep) : candidates;
|
|
63
141
|
let clearedTokens = 0;
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
* Stop if we've cleared enough tokens
|
|
67
|
-
*/
|
|
68
|
-
if (this.clearAtLeast > 0 && clearedTokens >= this.clearAtLeast) break;
|
|
142
|
+
const initialCandidatesToClear = [...candidatesToClear];
|
|
143
|
+
for (const { idx, msg: toolMessage } of initialCandidatesToClear) {
|
|
69
144
|
/**
|
|
70
145
|
* Skip if already cleared
|
|
71
146
|
*/
|
|
@@ -113,9 +188,168 @@ var ClearToolUsesEdit = class {
|
|
|
113
188
|
* Recalculate tokens
|
|
114
189
|
*/
|
|
115
190
|
const newTokenCount = await countTokens(messages);
|
|
116
|
-
clearedTokens = Math.max(0,
|
|
191
|
+
clearedTokens = Math.max(0, currentTokens - newTokenCount);
|
|
192
|
+
}
|
|
193
|
+
/**
|
|
194
|
+
* If clearAtLeast is set and we haven't cleared enough tokens,
|
|
195
|
+
* continue clearing more messages (going backwards from keepCount)
|
|
196
|
+
* This is deprecated behavior but maintained for backwards compatibility
|
|
197
|
+
*/
|
|
198
|
+
if (this.clearAtLeast > 0 && clearedTokens < this.clearAtLeast) {
|
|
199
|
+
/**
|
|
200
|
+
* Find remaining candidates that weren't cleared yet (those that were kept)
|
|
201
|
+
*/
|
|
202
|
+
const remainingCandidates = keepCount > 0 && keepCount < candidates.length ? candidates.slice(-keepCount) : [];
|
|
203
|
+
/**
|
|
204
|
+
* Clear additional messages until we've cleared at least clearAtLeast tokens
|
|
205
|
+
* Go backwards through the kept messages
|
|
206
|
+
*/
|
|
207
|
+
for (let i = remainingCandidates.length - 1; i >= 0; i--) {
|
|
208
|
+
if (clearedTokens >= this.clearAtLeast) break;
|
|
209
|
+
const { idx, msg: toolMessage } = remainingCandidates[i];
|
|
210
|
+
/**
|
|
211
|
+
* Skip if already cleared
|
|
212
|
+
*/
|
|
213
|
+
const contextEditing = toolMessage.response_metadata?.context_editing;
|
|
214
|
+
if (contextEditing?.cleared) continue;
|
|
215
|
+
/**
|
|
216
|
+
* Find the corresponding AI message
|
|
217
|
+
*/
|
|
218
|
+
const aiMessage = this.#findAIMessageForToolCall(messages.slice(0, idx), toolMessage.tool_call_id);
|
|
219
|
+
if (!aiMessage) continue;
|
|
220
|
+
/**
|
|
221
|
+
* Find the corresponding tool call
|
|
222
|
+
*/
|
|
223
|
+
const toolCall = aiMessage.tool_calls?.find((call) => call.id === toolMessage.tool_call_id);
|
|
224
|
+
if (!toolCall) continue;
|
|
225
|
+
/**
|
|
226
|
+
* Skip if tool is excluded
|
|
227
|
+
*/
|
|
228
|
+
const toolName = toolMessage.name || toolCall.name;
|
|
229
|
+
if (this.excludeTools.has(toolName)) continue;
|
|
230
|
+
/**
|
|
231
|
+
* Clear the tool message
|
|
232
|
+
*/
|
|
233
|
+
messages[idx] = new __langchain_core_messages.ToolMessage({
|
|
234
|
+
tool_call_id: toolMessage.tool_call_id,
|
|
235
|
+
content: this.placeholder,
|
|
236
|
+
name: toolMessage.name,
|
|
237
|
+
artifact: void 0,
|
|
238
|
+
response_metadata: {
|
|
239
|
+
...toolMessage.response_metadata,
|
|
240
|
+
context_editing: {
|
|
241
|
+
cleared: true,
|
|
242
|
+
strategy: "clear_tool_uses"
|
|
243
|
+
}
|
|
244
|
+
}
|
|
245
|
+
});
|
|
246
|
+
/**
|
|
247
|
+
* Optionally clear the tool inputs
|
|
248
|
+
*/
|
|
249
|
+
if (this.clearToolInputs) {
|
|
250
|
+
const aiMsgIdx = messages.indexOf(aiMessage);
|
|
251
|
+
if (aiMsgIdx >= 0) messages[aiMsgIdx] = this.#buildClearedToolInputMessage(aiMessage, toolMessage.tool_call_id);
|
|
252
|
+
}
|
|
253
|
+
/**
|
|
254
|
+
* Recalculate tokens
|
|
255
|
+
*/
|
|
256
|
+
const newTokenCount = await countTokens(messages);
|
|
257
|
+
clearedTokens = Math.max(0, currentTokens - newTokenCount);
|
|
258
|
+
}
|
|
259
|
+
}
|
|
260
|
+
}
|
|
261
|
+
/**
|
|
262
|
+
* Determine whether editing should run for the current token usage
|
|
263
|
+
*/
|
|
264
|
+
#shouldEdit(messages, totalTokens, model) {
|
|
265
|
+
/**
|
|
266
|
+
* Check each condition (OR logic between conditions)
|
|
267
|
+
*/
|
|
268
|
+
for (const trigger of this.#triggerConditions) {
|
|
269
|
+
/**
|
|
270
|
+
* Within a single condition, all specified properties must be satisfied (AND logic)
|
|
271
|
+
*/
|
|
272
|
+
let conditionMet = true;
|
|
273
|
+
let hasAnyProperty = false;
|
|
274
|
+
if (trigger.messages !== void 0) {
|
|
275
|
+
hasAnyProperty = true;
|
|
276
|
+
if (messages.length < trigger.messages) conditionMet = false;
|
|
277
|
+
}
|
|
278
|
+
if (trigger.tokens !== void 0) {
|
|
279
|
+
hasAnyProperty = true;
|
|
280
|
+
if (totalTokens < trigger.tokens) conditionMet = false;
|
|
281
|
+
}
|
|
282
|
+
if (trigger.fraction !== void 0) {
|
|
283
|
+
hasAnyProperty = true;
|
|
284
|
+
if (!model) continue;
|
|
285
|
+
const maxInputTokens = require_summarization.getProfileLimits(model);
|
|
286
|
+
if (typeof maxInputTokens === "number") {
|
|
287
|
+
const threshold = Math.floor(maxInputTokens * trigger.fraction);
|
|
288
|
+
if (threshold <= 0) continue;
|
|
289
|
+
if (totalTokens < threshold) conditionMet = false;
|
|
290
|
+
} else
|
|
291
|
+
/**
|
|
292
|
+
* If fraction is specified but we can't get model limits, skip this condition
|
|
293
|
+
*/
|
|
294
|
+
continue;
|
|
295
|
+
}
|
|
296
|
+
/**
|
|
297
|
+
* If condition has at least one property and all properties are satisfied, trigger editing
|
|
298
|
+
*/
|
|
299
|
+
if (hasAnyProperty && conditionMet) return true;
|
|
300
|
+
}
|
|
301
|
+
return false;
|
|
302
|
+
}
|
|
303
|
+
/**
|
|
304
|
+
* Determine how many tool results to keep based on keep policy
|
|
305
|
+
*/
|
|
306
|
+
async #determineKeepCount(candidates, countTokens, model) {
|
|
307
|
+
if ("messages" in this.keep && this.keep.messages !== void 0) return this.keep.messages;
|
|
308
|
+
if ("tokens" in this.keep && this.keep.tokens !== void 0) {
|
|
309
|
+
/**
|
|
310
|
+
* For token-based keep, count backwards from the end until we exceed the token limit
|
|
311
|
+
* This is a simplified implementation - keeping N most recent tool messages
|
|
312
|
+
* A more sophisticated implementation would count actual tokens
|
|
313
|
+
*/
|
|
314
|
+
const targetTokens = this.keep.tokens;
|
|
315
|
+
let tokenCount = 0;
|
|
316
|
+
let keepCount = 0;
|
|
317
|
+
for (let i = candidates.length - 1; i >= 0; i--) {
|
|
318
|
+
const candidate = candidates[i];
|
|
319
|
+
/**
|
|
320
|
+
* Estimate tokens for this tool message (simplified - could be improved)
|
|
321
|
+
*/
|
|
322
|
+
const msgTokens = await countTokens([candidate.msg]);
|
|
323
|
+
if (tokenCount + msgTokens <= targetTokens) {
|
|
324
|
+
tokenCount += msgTokens;
|
|
325
|
+
keepCount++;
|
|
326
|
+
} else break;
|
|
327
|
+
}
|
|
328
|
+
return keepCount;
|
|
329
|
+
}
|
|
330
|
+
if ("fraction" in this.keep && this.keep.fraction !== void 0) {
|
|
331
|
+
if (!model) return DEFAULT_KEEP;
|
|
332
|
+
const maxInputTokens = require_summarization.getProfileLimits(model);
|
|
333
|
+
if (typeof maxInputTokens === "number") {
|
|
334
|
+
const targetTokens = Math.floor(maxInputTokens * this.keep.fraction);
|
|
335
|
+
if (targetTokens <= 0) return DEFAULT_KEEP;
|
|
336
|
+
/**
|
|
337
|
+
* Use token-based logic with fractional target
|
|
338
|
+
*/
|
|
339
|
+
let tokenCount = 0;
|
|
340
|
+
let keepCount = 0;
|
|
341
|
+
for (let i = candidates.length - 1; i >= 0; i--) {
|
|
342
|
+
const candidate = candidates[i];
|
|
343
|
+
const msgTokens = await countTokens([candidate.msg]);
|
|
344
|
+
if (tokenCount + msgTokens <= targetTokens) {
|
|
345
|
+
tokenCount += msgTokens;
|
|
346
|
+
keepCount++;
|
|
347
|
+
} else break;
|
|
348
|
+
}
|
|
349
|
+
return keepCount;
|
|
350
|
+
}
|
|
117
351
|
}
|
|
118
|
-
return
|
|
352
|
+
return DEFAULT_KEEP;
|
|
119
353
|
}
|
|
120
354
|
#findAIMessageForToolCall(previousMessages, toolCallId) {
|
|
121
355
|
for (let i = previousMessages.length - 1; i >= 0; i--) {
|
|
@@ -169,7 +403,7 @@ var ClearToolUsesEdit = class {
|
|
|
169
403
|
* import { createAgent } from "langchain";
|
|
170
404
|
*
|
|
171
405
|
* const agent = createAgent({
|
|
172
|
-
* model: "anthropic:claude-
|
|
406
|
+
* model: "anthropic:claude-sonnet-4-5",
|
|
173
407
|
* tools: [searchTool, calculatorTool],
|
|
174
408
|
* middleware: [
|
|
175
409
|
* contextEditingMiddleware(),
|
|
@@ -191,21 +425,57 @@ var ClearToolUsesEdit = class {
|
|
|
191
425
|
* ```ts
|
|
192
426
|
* import { contextEditingMiddleware, ClearToolUsesEdit } from "langchain";
|
|
193
427
|
*
|
|
194
|
-
*
|
|
195
|
-
*
|
|
428
|
+
* // Single condition: trigger if tokens >= 50000 AND messages >= 20
|
|
429
|
+
* const agent1 = createAgent({
|
|
430
|
+
* model: "anthropic:claude-sonnet-4-5",
|
|
431
|
+
* tools: [searchTool, calculatorTool],
|
|
432
|
+
* middleware: [
|
|
433
|
+
* contextEditingMiddleware({
|
|
434
|
+
* edits: [
|
|
435
|
+
* new ClearToolUsesEdit({
|
|
436
|
+
* trigger: { tokens: 50000, messages: 20 },
|
|
437
|
+
* keep: { messages: 5 },
|
|
438
|
+
* excludeTools: ["search"],
|
|
439
|
+
* clearToolInputs: true,
|
|
440
|
+
* }),
|
|
441
|
+
* ],
|
|
442
|
+
* tokenCountMethod: "approx",
|
|
443
|
+
* }),
|
|
444
|
+
* ],
|
|
445
|
+
* });
|
|
446
|
+
*
|
|
447
|
+
* // Multiple conditions: trigger if (tokens >= 50000 AND messages >= 20) OR (tokens >= 30000 AND messages >= 50)
|
|
448
|
+
* const agent2 = createAgent({
|
|
449
|
+
* model: "anthropic:claude-sonnet-4-5",
|
|
450
|
+
* tools: [searchTool, calculatorTool],
|
|
451
|
+
* middleware: [
|
|
452
|
+
* contextEditingMiddleware({
|
|
453
|
+
* edits: [
|
|
454
|
+
* new ClearToolUsesEdit({
|
|
455
|
+
* trigger: [
|
|
456
|
+
* { tokens: 50000, messages: 20 },
|
|
457
|
+
* { tokens: 30000, messages: 50 },
|
|
458
|
+
* ],
|
|
459
|
+
* keep: { messages: 5 },
|
|
460
|
+
* }),
|
|
461
|
+
* ],
|
|
462
|
+
* }),
|
|
463
|
+
* ],
|
|
464
|
+
* });
|
|
465
|
+
*
|
|
466
|
+
* // Fractional trigger with model profile
|
|
467
|
+
* const agent3 = createAgent({
|
|
468
|
+
* model: chatModel,
|
|
196
469
|
* tools: [searchTool, calculatorTool],
|
|
197
470
|
* middleware: [
|
|
198
471
|
* contextEditingMiddleware({
|
|
199
472
|
* edits: [
|
|
200
473
|
* new ClearToolUsesEdit({
|
|
201
|
-
*
|
|
202
|
-
*
|
|
203
|
-
*
|
|
204
|
-
* excludeTools: ["search"], // Never clear search results
|
|
205
|
-
* clearToolInputs: true, // Also clear tool call arguments
|
|
474
|
+
* trigger: { fraction: 0.8 }, // Trigger at 80% of model's max tokens
|
|
475
|
+
* keep: { fraction: 0.3 }, // Keep 30% of model's max tokens
|
|
476
|
+
* model: chatModel,
|
|
206
477
|
* }),
|
|
207
478
|
* ],
|
|
208
|
-
* tokenCountMethod: "approx", // Use approximate counting (or "model")
|
|
209
479
|
* }),
|
|
210
480
|
* ],
|
|
211
481
|
* });
|
|
@@ -258,13 +528,12 @@ function contextEditingMiddleware(config = {}) {
|
|
|
258
528
|
if ("getNumTokensFromMessages" in request.model) return request.model.getNumTokensFromMessages(allMessages).then(({ totalCount }) => totalCount);
|
|
259
529
|
throw new Error(`Model "${request.model.getName()}" does not support token counting`);
|
|
260
530
|
};
|
|
261
|
-
let tokens = await countTokens(request.messages);
|
|
262
531
|
/**
|
|
263
532
|
* Apply each edit in sequence
|
|
264
533
|
*/
|
|
265
|
-
for (const edit of edits)
|
|
266
|
-
tokens,
|
|
534
|
+
for (const edit of edits) await edit.apply({
|
|
267
535
|
messages: request.messages,
|
|
536
|
+
model: request.model,
|
|
268
537
|
countTokens
|
|
269
538
|
});
|
|
270
539
|
return handler(request);
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"contextEditing.cjs","names":["config: ClearToolUsesEditConfig","params: {\n tokens: number;\n messages: BaseMessage[];\n countTokens: TokenCounter;\n }","candidates: Array<{ idx: number; msg: ToolMessage }>","ToolMessage","#findAIMessageForToolCall","#buildClearedToolInputMessage","previousMessages: BaseMessage[]","toolCallId: string","AIMessage","message: AIMessage","config: ContextEditingMiddlewareConfig","createMiddleware","SystemMessage","countTokens: TokenCounter","countTokensApproximately","messages: BaseMessage[]"],"sources":["../../../src/agents/middleware/contextEditing.ts"],"sourcesContent":["/**\n * Context editing middleware.\n *\n * This middleware mirrors Anthropic's context editing capabilities by clearing\n * older tool results once the conversation grows beyond a configurable token\n * threshold. The implementation is intentionally model-agnostic so it can be used\n * with any LangChain chat model.\n */\n\nimport type { BaseMessage } from \"@langchain/core/messages\";\nimport type { LanguageModelLike } from \"@langchain/core/language_models/base\";\nimport {\n AIMessage,\n ToolMessage,\n SystemMessage,\n} from \"@langchain/core/messages\";\n\nimport { countTokensApproximately } from \"./utils.js\";\nimport { createMiddleware } from \"../middleware.js\";\n\nconst DEFAULT_TOOL_PLACEHOLDER = \"[cleared]\";\n\n/**\n * Function type for counting tokens in a sequence of messages.\n */\nexport type TokenCounter = (\n messages: BaseMessage[]\n) => number | Promise<number>;\n\n/**\n * Protocol describing a context editing strategy.\n *\n * Implement this interface to create custom strategies for managing\n * conversation context size. The `apply` method should modify the\n * messages array in-place and return the updated token count.\n *\n * @example\n * ```ts\n * import { SystemMessage } from \"langchain\";\n *\n * class RemoveOldSystemMessages implements ContextEdit {\n * async apply({ tokens, messages, countTokens }) {\n * // Remove old system messages if over limit\n * if (tokens > 50000) {\n * messages = messages.filter(SystemMessage.isInstance);\n * return await countTokens(messages);\n * }\n * return tokens;\n * }\n * }\n * ```\n */\nexport interface ContextEdit {\n /**\n * Apply an edit to the message list, returning the new token count.\n *\n * This method should:\n * 1. Check if editing is needed based on `tokens` parameter\n * 2. Modify the `messages` array in-place (if needed)\n * 3. Return the new token count after modifications\n *\n * @param params - Parameters for the editing operation\n * @returns The updated token count after applying edits\n */\n apply(params: {\n /**\n * Current token count of all messages\n */\n tokens: number;\n /**\n * Array of messages to potentially edit (modify in-place)\n */\n messages: BaseMessage[];\n /**\n * Function to count tokens in a message array\n */\n countTokens: TokenCounter;\n }): number | Promise<number>;\n}\n\n/**\n * Configuration for clearing tool outputs when token limits are exceeded.\n */\nexport interface ClearToolUsesEditConfig {\n /**\n * Token count that triggers the edit.\n * @default 100000\n */\n triggerTokens?: number;\n\n /**\n * Minimum number of tokens to reclaim when the edit runs.\n * @default 0\n */\n clearAtLeast?: number;\n\n /**\n * Number of most recent tool results that must be preserved.\n * @default 3\n */\n keep?: number;\n\n /**\n * Whether to clear the originating tool call parameters on the AI message.\n * @default false\n */\n clearToolInputs?: boolean;\n\n /**\n * List of tool names to exclude from clearing.\n * @default []\n */\n excludeTools?: string[];\n\n /**\n * Placeholder text inserted for cleared tool outputs.\n * @default \"[cleared]\"\n */\n placeholder?: string;\n}\n\n/**\n * Strategy for clearing tool outputs when token limits are exceeded.\n *\n * This strategy mirrors Anthropic's `clear_tool_uses_20250919` behavior by\n * replacing older tool results with a placeholder text when the conversation\n * grows too large. It preserves the most recent tool results and can exclude\n * specific tools from being cleared.\n *\n * @example\n * ```ts\n * import { ClearToolUsesEdit } from \"langchain\";\n *\n * const edit = new ClearToolUsesEdit({\n * triggerTokens: 100000, // Start clearing at 100K tokens\n * clearAtLeast: 0, // Clear as much as needed\n * keep: 3, // Always keep 3 most recent results\n * excludeTools: [\"important\"], // Never clear \"important\" tool\n * clearToolInputs: false, // Keep tool call arguments\n * placeholder: \"[cleared]\", // Replacement text\n * });\n * ```\n */\nexport class ClearToolUsesEdit implements ContextEdit {\n triggerTokens: number;\n clearAtLeast: number;\n keep: number;\n clearToolInputs: boolean;\n excludeTools: Set<string>;\n placeholder: string;\n\n constructor(config: ClearToolUsesEditConfig = {}) {\n this.triggerTokens = config.triggerTokens ?? 100000;\n this.clearAtLeast = config.clearAtLeast ?? 0;\n this.keep = config.keep ?? 3;\n this.clearToolInputs = config.clearToolInputs ?? false;\n this.excludeTools = new Set(config.excludeTools ?? []);\n this.placeholder = config.placeholder ?? DEFAULT_TOOL_PLACEHOLDER;\n }\n\n async apply(params: {\n tokens: number;\n messages: BaseMessage[];\n countTokens: TokenCounter;\n }): Promise<number> {\n const { tokens, messages, countTokens } = params;\n\n if (tokens <= this.triggerTokens) {\n return tokens;\n }\n\n /**\n * Find all tool message candidates with their actual indices in the messages array\n */\n const candidates: Array<{ idx: number; msg: ToolMessage }> = [];\n for (let i = 0; i < messages.length; i++) {\n const msg = messages[i];\n if (ToolMessage.isInstance(msg)) {\n candidates.push({ idx: i, msg });\n }\n }\n\n /**\n * Keep the most recent tool messages\n */\n const candidatesToClear =\n this.keep >= candidates.length\n ? []\n : this.keep > 0\n ? candidates.slice(0, -this.keep)\n : candidates;\n\n let clearedTokens = 0;\n for (const { idx, msg: toolMessage } of candidatesToClear) {\n /**\n * Stop if we've cleared enough tokens\n */\n if (this.clearAtLeast > 0 && clearedTokens >= this.clearAtLeast) {\n break;\n }\n\n /**\n * Skip if already cleared\n */\n const contextEditing = toolMessage.response_metadata?.context_editing as\n | { cleared?: boolean }\n | undefined;\n if (contextEditing?.cleared) {\n continue;\n }\n\n /**\n * Find the corresponding AI message\n */\n const aiMessage = this.#findAIMessageForToolCall(\n messages.slice(0, idx),\n toolMessage.tool_call_id\n );\n\n if (!aiMessage) {\n continue;\n }\n\n /**\n * Find the corresponding tool call\n */\n const toolCall = aiMessage.tool_calls?.find(\n (call) => call.id === toolMessage.tool_call_id\n );\n\n if (!toolCall) {\n continue;\n }\n\n /**\n * Skip if tool is excluded\n */\n const toolName = toolMessage.name || toolCall.name;\n if (this.excludeTools.has(toolName)) {\n continue;\n }\n\n /**\n * Clear the tool message\n */\n messages[idx] = new ToolMessage({\n tool_call_id: toolMessage.tool_call_id,\n content: this.placeholder,\n name: toolMessage.name,\n artifact: undefined,\n response_metadata: {\n ...toolMessage.response_metadata,\n context_editing: {\n cleared: true,\n strategy: \"clear_tool_uses\",\n },\n },\n });\n\n /**\n * Optionally clear the tool inputs\n */\n if (this.clearToolInputs) {\n const aiMsgIdx = messages.indexOf(aiMessage);\n if (aiMsgIdx >= 0) {\n messages[aiMsgIdx] = this.#buildClearedToolInputMessage(\n aiMessage,\n toolMessage.tool_call_id\n );\n }\n }\n\n /**\n * Recalculate tokens\n */\n const newTokenCount = await countTokens(messages);\n clearedTokens = Math.max(0, tokens - newTokenCount);\n }\n\n return tokens - clearedTokens;\n }\n\n #findAIMessageForToolCall(\n previousMessages: BaseMessage[],\n toolCallId: string\n ): AIMessage | null {\n // Search backwards through previous messages\n for (let i = previousMessages.length - 1; i >= 0; i--) {\n const msg = previousMessages[i];\n if (AIMessage.isInstance(msg)) {\n const hasToolCall = msg.tool_calls?.some(\n (call) => call.id === toolCallId\n );\n if (hasToolCall) {\n return msg;\n }\n }\n }\n return null;\n }\n\n #buildClearedToolInputMessage(\n message: AIMessage,\n toolCallId: string\n ): AIMessage {\n const updatedToolCalls = message.tool_calls?.map((toolCall) => {\n if (toolCall.id === toolCallId) {\n return { ...toolCall, args: {} };\n }\n return toolCall;\n });\n\n const metadata = { ...message.response_metadata };\n const contextEntry = {\n ...(metadata.context_editing as Record<string, unknown>),\n };\n\n const clearedIds = new Set<string>(\n contextEntry.cleared_tool_inputs as string[] | undefined\n );\n clearedIds.add(toolCallId);\n contextEntry.cleared_tool_inputs = Array.from(clearedIds).sort();\n metadata.context_editing = contextEntry;\n\n return new AIMessage({\n content: message.content,\n tool_calls: updatedToolCalls,\n response_metadata: metadata,\n id: message.id,\n name: message.name,\n additional_kwargs: message.additional_kwargs,\n });\n }\n}\n\n/**\n * Configuration for the Context Editing Middleware.\n */\nexport interface ContextEditingMiddlewareConfig {\n /**\n * Sequence of edit strategies to apply. Defaults to a single\n * ClearToolUsesEdit mirroring Anthropic defaults.\n */\n edits?: ContextEdit[];\n\n /**\n * Whether to use approximate token counting (faster, less accurate)\n * or exact counting implemented by the chat model (potentially slower, more accurate).\n * Currently only OpenAI models support exact counting.\n * @default \"approx\"\n */\n tokenCountMethod?: \"approx\" | \"model\";\n}\n\n/**\n * Middleware that automatically prunes tool results to manage context size.\n *\n * This middleware applies a sequence of edits when the total input token count\n * exceeds configured thresholds. By default, it uses the `ClearToolUsesEdit` strategy\n * which mirrors Anthropic's `clear_tool_uses_20250919` behaviour by clearing older\n * tool results once the conversation exceeds 100,000 tokens.\n *\n * ## Basic Usage\n *\n * Use the middleware with default settings to automatically manage context:\n *\n * @example Basic usage with defaults\n * ```ts\n * import { contextEditingMiddleware } from \"langchain\";\n * import { createAgent } from \"langchain\";\n *\n * const agent = createAgent({\n * model: \"anthropic:claude-3-5-sonnet\",\n * tools: [searchTool, calculatorTool],\n * middleware: [\n * contextEditingMiddleware(),\n * ],\n * });\n * ```\n *\n * The default configuration:\n * - Triggers when context exceeds **100,000 tokens**\n * - Keeps the **3 most recent** tool results\n * - Uses **approximate token counting** (fast)\n * - Does not clear tool call arguments\n *\n * ## Custom Configuration\n *\n * Customize the clearing behavior with `ClearToolUsesEdit`:\n *\n * @example Custom ClearToolUsesEdit configuration\n * ```ts\n * import { contextEditingMiddleware, ClearToolUsesEdit } from \"langchain\";\n *\n * const agent = createAgent({\n * model: \"anthropic:claude-3-5-sonnet\",\n * tools: [searchTool, calculatorTool],\n * middleware: [\n * contextEditingMiddleware({\n * edits: [\n * new ClearToolUsesEdit({\n * triggerTokens: 50000, // Clear when exceeding 50K tokens\n * clearAtLeast: 1000, // Reclaim at least 1K tokens\n * keep: 5, // Keep 5 most recent tool results\n * excludeTools: [\"search\"], // Never clear search results\n * clearToolInputs: true, // Also clear tool call arguments\n * }),\n * ],\n * tokenCountMethod: \"approx\", // Use approximate counting (or \"model\")\n * }),\n * ],\n * });\n * ```\n *\n * ## Custom Editing Strategies\n *\n * Implement your own context editing strategy by creating a class that\n * implements the `ContextEdit` interface:\n *\n * @example Custom editing strategy\n * ```ts\n * import { contextEditingMiddleware, type ContextEdit, type TokenCounter } from \"langchain\";\n * import type { BaseMessage } from \"@langchain/core/messages\";\n *\n * class CustomEdit implements ContextEdit {\n * async apply(params: {\n * tokens: number;\n * messages: BaseMessage[];\n * countTokens: TokenCounter;\n * }): Promise<number> {\n * // Implement your custom editing logic here\n * // and apply it to the messages array, then\n * // return the new token count after edits\n * return countTokens(messages);\n * }\n * }\n * ```\n *\n * @param config - Configuration options for the middleware\n * @returns A middleware instance that can be used with `createAgent`\n */\nexport function contextEditingMiddleware(\n config: ContextEditingMiddlewareConfig = {}\n) {\n const edits = config.edits ?? [new ClearToolUsesEdit()];\n const tokenCountMethod = config.tokenCountMethod ?? \"approx\";\n\n return createMiddleware({\n name: \"ContextEditingMiddleware\",\n wrapModelCall: async (request, handler) => {\n if (!request.messages || request.messages.length === 0) {\n return handler(request);\n }\n\n /**\n * Use model's token counting method\n */\n const systemMsg = request.systemPrompt\n ? [new SystemMessage(request.systemPrompt)]\n : [];\n\n const countTokens: TokenCounter =\n tokenCountMethod === \"approx\"\n ? countTokensApproximately\n : async (messages: BaseMessage[]): Promise<number> => {\n const allMessages = [...systemMsg, ...messages];\n\n /**\n * Check if model has getNumTokensFromMessages method\n * currently only OpenAI models have this method\n */\n if (\"getNumTokensFromMessages\" in request.model) {\n return (\n request.model as LanguageModelLike & {\n getNumTokensFromMessages: (\n messages: BaseMessage[]\n ) => Promise<{\n totalCount: number;\n countPerMessage: number[];\n }>;\n }\n )\n .getNumTokensFromMessages(allMessages)\n .then(({ totalCount }) => totalCount);\n }\n\n throw new Error(\n `Model \"${request.model.getName()}\" does not support token counting`\n );\n };\n\n let tokens = await countTokens(request.messages);\n\n /**\n * Apply each edit in sequence\n */\n for (const edit of edits) {\n tokens = await edit.apply({\n tokens,\n messages: request.messages,\n countTokens,\n });\n }\n\n return handler(request);\n },\n });\n}\n"],"mappings":";;;;;;AAoBA,MAAM,2BAA2B;;;;;;;;;;;;;;;;;;;;;;;AA2HjC,IAAa,oBAAb,MAAsD;CACpD;CACA;CACA;CACA;CACA;CACA;CAEA,YAAYA,SAAkC,CAAE,GAAE;EAChD,KAAK,gBAAgB,OAAO,iBAAiB;EAC7C,KAAK,eAAe,OAAO,gBAAgB;EAC3C,KAAK,OAAO,OAAO,QAAQ;EAC3B,KAAK,kBAAkB,OAAO,mBAAmB;EACjD,KAAK,eAAe,IAAI,IAAI,OAAO,gBAAgB,CAAE;EACrD,KAAK,cAAc,OAAO,eAAe;CAC1C;CAED,MAAM,MAAMC,QAIQ;EAClB,MAAM,EAAE,QAAQ,UAAU,aAAa,GAAG;AAE1C,MAAI,UAAU,KAAK,cACjB,QAAO;;;;EAMT,MAAMC,aAAuD,CAAE;AAC/D,OAAK,IAAI,IAAI,GAAG,IAAI,SAAS,QAAQ,KAAK;GACxC,MAAM,MAAM,SAAS;AACrB,OAAIC,sCAAY,WAAW,IAAI,EAC7B,WAAW,KAAK;IAAE,KAAK;IAAG;GAAK,EAAC;EAEnC;;;;EAKD,MAAM,oBACJ,KAAK,QAAQ,WAAW,SACpB,CAAE,IACF,KAAK,OAAO,IACZ,WAAW,MAAM,GAAG,CAAC,KAAK,KAAK,GAC/B;EAEN,IAAI,gBAAgB;AACpB,OAAK,MAAM,EAAE,KAAK,KAAK,aAAa,IAAI,mBAAmB;;;;AAIzD,OAAI,KAAK,eAAe,KAAK,iBAAiB,KAAK,aACjD;;;;GAMF,MAAM,iBAAiB,YAAY,mBAAmB;AAGtD,OAAI,gBAAgB,QAClB;;;;GAMF,MAAM,YAAY,KAAKC,0BACrB,SAAS,MAAM,GAAG,IAAI,EACtB,YAAY,aACb;AAED,OAAI,CAAC,UACH;;;;GAMF,MAAM,WAAW,UAAU,YAAY,KACrC,CAAC,SAAS,KAAK,OAAO,YAAY,aACnC;AAED,OAAI,CAAC,SACH;;;;GAMF,MAAM,WAAW,YAAY,QAAQ,SAAS;AAC9C,OAAI,KAAK,aAAa,IAAI,SAAS,CACjC;;;;GAMF,SAAS,OAAO,IAAID,sCAAY;IAC9B,cAAc,YAAY;IAC1B,SAAS,KAAK;IACd,MAAM,YAAY;IAClB,UAAU;IACV,mBAAmB;KACjB,GAAG,YAAY;KACf,iBAAiB;MACf,SAAS;MACT,UAAU;KACX;IACF;GACF;;;;AAKD,OAAI,KAAK,iBAAiB;IACxB,MAAM,WAAW,SAAS,QAAQ,UAAU;AAC5C,QAAI,YAAY,GACd,SAAS,YAAY,KAAKE,8BACxB,WACA,YAAY,aACb;GAEJ;;;;GAKD,MAAM,gBAAgB,MAAM,YAAY,SAAS;GACjD,gBAAgB,KAAK,IAAI,GAAG,SAAS,cAAc;EACpD;AAED,SAAO,SAAS;CACjB;CAED,0BACEC,kBACAC,YACkB;AAElB,OAAK,IAAI,IAAI,iBAAiB,SAAS,GAAG,KAAK,GAAG,KAAK;GACrD,MAAM,MAAM,iBAAiB;AAC7B,OAAIC,oCAAU,WAAW,IAAI,EAAE;IAC7B,MAAM,cAAc,IAAI,YAAY,KAClC,CAAC,SAAS,KAAK,OAAO,WACvB;AACD,QAAI,YACF,QAAO;GAEV;EACF;AACD,SAAO;CACR;CAED,8BACEC,SACAF,YACW;EACX,MAAM,mBAAmB,QAAQ,YAAY,IAAI,CAAC,aAAa;AAC7D,OAAI,SAAS,OAAO,WAClB,QAAO;IAAE,GAAG;IAAU,MAAM,CAAE;GAAE;AAElC,UAAO;EACR,EAAC;EAEF,MAAM,WAAW,EAAE,GAAG,QAAQ,kBAAmB;EACjD,MAAM,eAAe,EACnB,GAAI,SAAS,gBACd;EAED,MAAM,aAAa,IAAI,IACrB,aAAa;EAEf,WAAW,IAAI,WAAW;EAC1B,aAAa,sBAAsB,MAAM,KAAK,WAAW,CAAC,MAAM;EAChE,SAAS,kBAAkB;AAE3B,SAAO,IAAIC,oCAAU;GACnB,SAAS,QAAQ;GACjB,YAAY;GACZ,mBAAmB;GACnB,IAAI,QAAQ;GACZ,MAAM,QAAQ;GACd,mBAAmB,QAAQ;EAC5B;CACF;AACF;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AA4GD,SAAgB,yBACdE,SAAyC,CAAE,GAC3C;CACA,MAAM,QAAQ,OAAO,SAAS,CAAC,IAAI,mBAAoB;CACvD,MAAM,mBAAmB,OAAO,oBAAoB;AAEpD,QAAOC,oCAAiB;EACtB,MAAM;EACN,eAAe,OAAO,SAAS,YAAY;AACzC,OAAI,CAAC,QAAQ,YAAY,QAAQ,SAAS,WAAW,EACnD,QAAO,QAAQ,QAAQ;;;;GAMzB,MAAM,YAAY,QAAQ,eACtB,CAAC,IAAIC,wCAAc,QAAQ,aAAc,IACzC,CAAE;GAEN,MAAMC,cACJ,qBAAqB,WACjBC,yCACA,OAAOC,aAA6C;IAClD,MAAM,cAAc,CAAC,GAAG,WAAW,GAAG,QAAS;;;;;AAM/C,QAAI,8BAA8B,QAAQ,MACxC,QACE,QAAQ,MASP,yBAAyB,YAAY,CACrC,KAAK,CAAC,EAAE,YAAY,KAAK,WAAW;AAGzC,UAAM,IAAI,MACR,CAAC,OAAO,EAAE,QAAQ,MAAM,SAAS,CAAC,iCAAiC,CAAC;GAEvE;GAEP,IAAI,SAAS,MAAM,YAAY,QAAQ,SAAS;;;;AAKhD,QAAK,MAAM,QAAQ,OACjB,SAAS,MAAM,KAAK,MAAM;IACxB;IACA,UAAU,QAAQ;IAClB;GACD,EAAC;AAGJ,UAAO,QAAQ,QAAQ;EACxB;CACF,EAAC;AACH"}
|
|
1
|
+
{"version":3,"file":"contextEditing.cjs","names":["config: ClearToolUsesEditConfig","trigger: ContextSize | ContextSize[] | undefined","keep: KeepSize | undefined","#triggerConditions","contextSizeSchema","keepSchema","params: {\n messages: BaseMessage[];\n model: BaseLanguageModel;\n countTokens: TokenCounter;\n }","orphanedIndices: number[]","ToolMessage","#findAIMessageForToolCall","#shouldEdit","candidates: { idx: number; msg: ToolMessage }[]","#determineKeepCount","#buildClearedToolInputMessage","messages: BaseMessage[]","totalTokens: number","model: BaseLanguageModel","getProfileLimits","candidates: Array<{ idx: number; msg: ToolMessage }>","countTokens: TokenCounter","previousMessages: BaseMessage[]","toolCallId: string","AIMessage","message: AIMessage","config: ContextEditingMiddlewareConfig","createMiddleware","SystemMessage","countTokensApproximately"],"sources":["../../../src/agents/middleware/contextEditing.ts"],"sourcesContent":["/**\n * Context editing middleware.\n *\n * This middleware mirrors Anthropic's context editing capabilities by clearing\n * older tool results once the conversation grows beyond a configurable token\n * threshold. The implementation is intentionally model-agnostic so it can be used\n * with any LangChain chat model.\n */\n\nimport type { BaseMessage } from \"@langchain/core/messages\";\nimport type { BaseLanguageModel } from \"@langchain/core/language_models/base\";\nimport {\n AIMessage,\n ToolMessage,\n SystemMessage,\n} from \"@langchain/core/messages\";\n\nimport { countTokensApproximately } from \"./utils.js\";\nimport { createMiddleware } from \"../middleware.js\";\nimport {\n getProfileLimits,\n contextSizeSchema,\n keepSchema,\n type ContextSize,\n type KeepSize,\n type TokenCounter,\n} from \"./summarization.js\";\n\nconst DEFAULT_TOOL_PLACEHOLDER = \"[cleared]\";\nconst DEFAULT_TRIGGER_TOKENS = 100_000;\nconst DEFAULT_KEEP = 3;\n\n/**\n * Protocol describing a context editing strategy.\n *\n * Implement this interface to create custom strategies for managing\n * conversation context size. The `apply` method should modify the\n * messages array in-place and return the updated token count.\n *\n * @example\n * ```ts\n * import { SystemMessage } from \"langchain\";\n *\n * class RemoveOldSystemMessages implements ContextEdit {\n * async apply({ tokens, messages, countTokens }) {\n * // Remove old system messages if over limit\n * if (tokens > 50000) {\n * messages = messages.filter(SystemMessage.isInstance);\n * return await countTokens(messages);\n * }\n * return tokens;\n * }\n * }\n * ```\n */\nexport interface ContextEdit {\n /**\n * Apply an edit to the message list, returning the new token count.\n *\n * This method should:\n * 1. Check if editing is needed based on `tokens` parameter\n * 2. Modify the `messages` array in-place (if needed)\n * 3. Return the new token count after modifications\n *\n * @param params - Parameters for the editing operation\n * @returns The updated token count after applying edits\n */\n apply(params: {\n /**\n * Array of messages to potentially edit (modify in-place)\n */\n messages: BaseMessage[];\n /**\n * Function to count tokens in a message array\n */\n countTokens: TokenCounter;\n /**\n * Optional model instance for model profile information\n */\n model?: BaseLanguageModel;\n }): void | Promise<void>;\n}\n\n/**\n * Configuration for clearing tool outputs when token limits are exceeded.\n */\nexport interface ClearToolUsesEditConfig {\n /**\n * Trigger conditions for context editing.\n * Can be a single condition object (all properties must be met) or an array of conditions (any condition must be met).\n *\n * @example\n * ```ts\n * // Single condition: trigger if tokens >= 100000 AND messages >= 50\n * trigger: { tokens: 100000, messages: 50 }\n *\n * // Multiple conditions: trigger if (tokens >= 100000 AND messages >= 50) OR (tokens >= 50000 AND messages >= 100)\n * trigger: [\n * { tokens: 100000, messages: 50 },\n * { tokens: 50000, messages: 100 }\n * ]\n *\n * // Fractional trigger: trigger at 80% of model's max input tokens\n * trigger: { fraction: 0.8 }\n * ```\n */\n trigger?: ContextSize | ContextSize[];\n\n /**\n * Context retention policy applied after editing.\n * Specify how many tool results to preserve using messages, tokens, or fraction.\n *\n * @example\n * ```ts\n * // Keep 3 most recent tool results\n * keep: { messages: 3 }\n *\n * // Keep tool results that fit within 1000 tokens\n * keep: { tokens: 1000 }\n *\n * // Keep tool results that fit within 30% of model's max input tokens\n * keep: { fraction: 0.3 }\n * ```\n */\n keep?: KeepSize;\n\n /**\n * Whether to clear the originating tool call parameters on the AI message.\n * @default false\n */\n clearToolInputs?: boolean;\n\n /**\n * List of tool names to exclude from clearing.\n * @default []\n */\n excludeTools?: string[];\n\n /**\n * Placeholder text inserted for cleared tool outputs.\n * @default \"[cleared]\"\n */\n placeholder?: string;\n\n /**\n * @deprecated Use `trigger: { tokens: value }` instead.\n */\n triggerTokens?: number;\n\n /**\n * @deprecated Use `keep: { messages: value }` instead.\n */\n keepMessages?: number;\n\n /**\n * @deprecated This property is deprecated and will be removed in a future version.\n * Use `keep: { tokens: value }` or `keep: { messages: value }` instead to control retention.\n */\n clearAtLeast?: number;\n}\n\n/**\n * Strategy for clearing tool outputs when token limits are exceeded.\n *\n * This strategy mirrors Anthropic's `clear_tool_uses_20250919` behavior by\n * replacing older tool results with a placeholder text when the conversation\n * grows too large. It preserves the most recent tool results and can exclude\n * specific tools from being cleared.\n *\n * @example\n * ```ts\n * import { ClearToolUsesEdit } from \"langchain\";\n *\n * const edit = new ClearToolUsesEdit({\n * trigger: { tokens: 100000 }, // Start clearing at 100K tokens\n * keep: { messages: 3 }, // Keep 3 most recent tool results\n * excludeTools: [\"important\"], // Never clear \"important\" tool\n * clearToolInputs: false, // Keep tool call arguments\n * placeholder: \"[cleared]\", // Replacement text\n * });\n *\n * // Multiple trigger conditions\n * const edit2 = new ClearToolUsesEdit({\n * trigger: [\n * { tokens: 100000, messages: 50 },\n * { tokens: 50000, messages: 100 }\n * ],\n * keep: { messages: 3 },\n * });\n *\n * // Fractional trigger with model profile\n * const edit3 = new ClearToolUsesEdit({\n * trigger: { fraction: 0.8 }, // Trigger at 80% of model's max tokens\n * keep: { fraction: 0.3 }, // Keep 30% of model's max tokens\n * });\n * ```\n */\nexport class ClearToolUsesEdit implements ContextEdit {\n #triggerConditions: ContextSize[];\n\n trigger: ContextSize | ContextSize[];\n keep: KeepSize;\n clearToolInputs: boolean;\n excludeTools: Set<string>;\n placeholder: string;\n model: BaseLanguageModel;\n clearAtLeast: number;\n\n constructor(config: ClearToolUsesEditConfig = {}) {\n // Handle deprecated parameters\n let trigger: ContextSize | ContextSize[] | undefined = config.trigger;\n if (config.triggerTokens !== undefined) {\n console.warn(\n \"triggerTokens is deprecated. Use `trigger: { tokens: value }` instead.\"\n );\n if (trigger === undefined) {\n trigger = { tokens: config.triggerTokens };\n }\n }\n\n let keep: KeepSize | undefined = config.keep;\n if (config.keepMessages !== undefined) {\n console.warn(\n \"keepMessages is deprecated. Use `keep: { messages: value }` instead.\"\n );\n if (keep === undefined) {\n keep = { messages: config.keepMessages };\n }\n }\n\n // Set defaults\n if (trigger === undefined) {\n trigger = { tokens: DEFAULT_TRIGGER_TOKENS };\n }\n if (keep === undefined) {\n keep = { messages: DEFAULT_KEEP };\n }\n\n // Validate trigger conditions\n if (Array.isArray(trigger)) {\n this.#triggerConditions = trigger.map((t) => contextSizeSchema.parse(t));\n this.trigger = this.#triggerConditions;\n } else {\n const validated = contextSizeSchema.parse(trigger);\n this.#triggerConditions = [validated];\n this.trigger = validated;\n }\n\n // Validate keep\n const validatedKeep = keepSchema.parse(keep);\n this.keep = validatedKeep;\n\n // Handle deprecated clearAtLeast\n if (config.clearAtLeast !== undefined) {\n console.warn(\n \"clearAtLeast is deprecated and will be removed in a future version. \" +\n \"It conflicts with the `keep` property. Use `keep: { tokens: value }` or \" +\n \"`keep: { messages: value }` instead to control retention.\"\n );\n }\n this.clearAtLeast = config.clearAtLeast ?? 0;\n\n this.clearToolInputs = config.clearToolInputs ?? false;\n this.excludeTools = new Set(config.excludeTools ?? []);\n this.placeholder = config.placeholder ?? DEFAULT_TOOL_PLACEHOLDER;\n }\n\n async apply(params: {\n messages: BaseMessage[];\n model: BaseLanguageModel;\n countTokens: TokenCounter;\n }): Promise<void> {\n const { messages, model, countTokens } = params;\n const tokens = await countTokens(messages);\n\n /**\n * Always remove orphaned tool messages (those without corresponding AI messages)\n * regardless of whether editing is triggered\n */\n const orphanedIndices: number[] = [];\n for (let i = 0; i < messages.length; i++) {\n const msg = messages[i];\n if (ToolMessage.isInstance(msg)) {\n // Check if this tool message has a corresponding AI message\n const aiMessage = this.#findAIMessageForToolCall(\n messages.slice(0, i),\n msg.tool_call_id\n );\n\n if (!aiMessage) {\n // Orphaned tool message - mark for removal\n orphanedIndices.push(i);\n } else {\n // Check if the AI message actually has this tool call\n const toolCall = aiMessage.tool_calls?.find(\n (call) => call.id === msg.tool_call_id\n );\n if (!toolCall) {\n // Orphaned tool message - mark for removal\n orphanedIndices.push(i);\n }\n }\n }\n }\n\n /**\n * Remove orphaned tool messages in reverse order to maintain indices\n */\n for (let i = orphanedIndices.length - 1; i >= 0; i--) {\n messages.splice(orphanedIndices[i]!, 1);\n }\n\n /**\n * Recalculate tokens after removing orphaned messages\n */\n let currentTokens = tokens;\n if (orphanedIndices.length > 0) {\n currentTokens = await countTokens(messages);\n }\n\n /**\n * Check if editing should be triggered\n */\n if (!this.#shouldEdit(messages, currentTokens, model)) {\n return;\n }\n\n /**\n * Find all tool message candidates with their actual indices in the messages array\n */\n const candidates: { idx: number; msg: ToolMessage }[] = [];\n for (let i = 0; i < messages.length; i++) {\n const msg = messages[i];\n if (ToolMessage.isInstance(msg)) {\n candidates.push({ idx: i, msg });\n }\n }\n\n if (candidates.length === 0) {\n return;\n }\n\n /**\n * Determine how many tool results to keep based on keep policy\n */\n const keepCount = await this.#determineKeepCount(\n candidates,\n countTokens,\n model\n );\n\n /**\n * Keep the most recent tool messages based on keep policy\n */\n const candidatesToClear =\n keepCount >= candidates.length\n ? []\n : keepCount > 0\n ? candidates.slice(0, -keepCount)\n : candidates;\n\n /**\n * If clearAtLeast is set, we may need to clear more messages to meet the token requirement\n * This is a deprecated feature that conflicts with keep, but we support it for backwards compatibility\n */\n let clearedTokens = 0;\n const initialCandidatesToClear = [...candidatesToClear];\n\n for (const { idx, msg: toolMessage } of initialCandidatesToClear) {\n /**\n * Skip if already cleared\n */\n const contextEditing = toolMessage.response_metadata?.context_editing as\n | { cleared?: boolean }\n | undefined;\n if (contextEditing?.cleared) {\n continue;\n }\n\n /**\n * Find the corresponding AI message\n */\n const aiMessage = this.#findAIMessageForToolCall(\n messages.slice(0, idx),\n toolMessage.tool_call_id\n );\n\n if (!aiMessage) {\n continue;\n }\n\n /**\n * Find the corresponding tool call\n */\n const toolCall = aiMessage.tool_calls?.find(\n (call) => call.id === toolMessage.tool_call_id\n );\n\n if (!toolCall) {\n continue;\n }\n\n /**\n * Skip if tool is excluded\n */\n const toolName = toolMessage.name || toolCall.name;\n if (this.excludeTools.has(toolName)) {\n continue;\n }\n\n /**\n * Clear the tool message\n */\n messages[idx] = new ToolMessage({\n tool_call_id: toolMessage.tool_call_id,\n content: this.placeholder,\n name: toolMessage.name,\n artifact: undefined,\n response_metadata: {\n ...toolMessage.response_metadata,\n context_editing: {\n cleared: true,\n strategy: \"clear_tool_uses\",\n },\n },\n });\n\n /**\n * Optionally clear the tool inputs\n */\n if (this.clearToolInputs) {\n const aiMsgIdx = messages.indexOf(aiMessage);\n if (aiMsgIdx >= 0) {\n messages[aiMsgIdx] = this.#buildClearedToolInputMessage(\n aiMessage,\n toolMessage.tool_call_id\n );\n }\n }\n\n /**\n * Recalculate tokens\n */\n const newTokenCount = await countTokens(messages);\n clearedTokens = Math.max(0, currentTokens - newTokenCount);\n }\n\n /**\n * If clearAtLeast is set and we haven't cleared enough tokens,\n * continue clearing more messages (going backwards from keepCount)\n * This is deprecated behavior but maintained for backwards compatibility\n */\n if (this.clearAtLeast > 0 && clearedTokens < this.clearAtLeast) {\n /**\n * Find remaining candidates that weren't cleared yet (those that were kept)\n */\n const remainingCandidates =\n keepCount > 0 && keepCount < candidates.length\n ? candidates.slice(-keepCount)\n : [];\n\n /**\n * Clear additional messages until we've cleared at least clearAtLeast tokens\n * Go backwards through the kept messages\n */\n for (let i = remainingCandidates.length - 1; i >= 0; i--) {\n if (clearedTokens >= this.clearAtLeast) {\n break;\n }\n\n const { idx, msg: toolMessage } = remainingCandidates[i]!;\n\n /**\n * Skip if already cleared\n */\n const contextEditing = toolMessage.response_metadata\n ?.context_editing as { cleared?: boolean } | undefined;\n if (contextEditing?.cleared) {\n continue;\n }\n\n /**\n * Find the corresponding AI message\n */\n const aiMessage = this.#findAIMessageForToolCall(\n messages.slice(0, idx),\n toolMessage.tool_call_id\n );\n\n if (!aiMessage) {\n continue;\n }\n\n /**\n * Find the corresponding tool call\n */\n const toolCall = aiMessage.tool_calls?.find(\n (call) => call.id === toolMessage.tool_call_id\n );\n\n if (!toolCall) {\n continue;\n }\n\n /**\n * Skip if tool is excluded\n */\n const toolName = toolMessage.name || toolCall.name;\n if (this.excludeTools.has(toolName)) {\n continue;\n }\n\n /**\n * Clear the tool message\n */\n messages[idx] = new ToolMessage({\n tool_call_id: toolMessage.tool_call_id,\n content: this.placeholder,\n name: toolMessage.name,\n artifact: undefined,\n response_metadata: {\n ...toolMessage.response_metadata,\n context_editing: {\n cleared: true,\n strategy: \"clear_tool_uses\",\n },\n },\n });\n\n /**\n * Optionally clear the tool inputs\n */\n if (this.clearToolInputs) {\n const aiMsgIdx = messages.indexOf(aiMessage);\n if (aiMsgIdx >= 0) {\n messages[aiMsgIdx] = this.#buildClearedToolInputMessage(\n aiMessage,\n toolMessage.tool_call_id\n );\n }\n }\n\n /**\n * Recalculate tokens\n */\n const newTokenCount = await countTokens(messages);\n clearedTokens = Math.max(0, currentTokens - newTokenCount);\n }\n }\n }\n\n /**\n * Determine whether editing should run for the current token usage\n */\n #shouldEdit(\n messages: BaseMessage[],\n totalTokens: number,\n model: BaseLanguageModel\n ): boolean {\n /**\n * Check each condition (OR logic between conditions)\n */\n for (const trigger of this.#triggerConditions) {\n /**\n * Within a single condition, all specified properties must be satisfied (AND logic)\n */\n let conditionMet = true;\n let hasAnyProperty = false;\n\n if (trigger.messages !== undefined) {\n hasAnyProperty = true;\n if (messages.length < trigger.messages) {\n conditionMet = false;\n }\n }\n\n if (trigger.tokens !== undefined) {\n hasAnyProperty = true;\n if (totalTokens < trigger.tokens) {\n conditionMet = false;\n }\n }\n\n if (trigger.fraction !== undefined) {\n hasAnyProperty = true;\n if (!model) {\n continue;\n }\n const maxInputTokens = getProfileLimits(model);\n if (typeof maxInputTokens === \"number\") {\n const threshold = Math.floor(maxInputTokens * trigger.fraction);\n if (threshold <= 0) {\n continue;\n }\n if (totalTokens < threshold) {\n conditionMet = false;\n }\n } else {\n /**\n * If fraction is specified but we can't get model limits, skip this condition\n */\n continue;\n }\n }\n\n /**\n * If condition has at least one property and all properties are satisfied, trigger editing\n */\n if (hasAnyProperty && conditionMet) {\n return true;\n }\n }\n\n return false;\n }\n\n /**\n * Determine how many tool results to keep based on keep policy\n */\n async #determineKeepCount(\n candidates: Array<{ idx: number; msg: ToolMessage }>,\n countTokens: TokenCounter,\n model: BaseLanguageModel\n ): Promise<number> {\n if (\"messages\" in this.keep && this.keep.messages !== undefined) {\n return this.keep.messages;\n }\n\n if (\"tokens\" in this.keep && this.keep.tokens !== undefined) {\n /**\n * For token-based keep, count backwards from the end until we exceed the token limit\n * This is a simplified implementation - keeping N most recent tool messages\n * A more sophisticated implementation would count actual tokens\n */\n const targetTokens = this.keep.tokens;\n let tokenCount = 0;\n let keepCount = 0;\n\n for (let i = candidates.length - 1; i >= 0; i--) {\n const candidate = candidates[i];\n /**\n * Estimate tokens for this tool message (simplified - could be improved)\n */\n const msgTokens = await countTokens([candidate.msg]);\n if (tokenCount + msgTokens <= targetTokens) {\n tokenCount += msgTokens;\n keepCount++;\n } else {\n break;\n }\n }\n\n return keepCount;\n }\n\n if (\"fraction\" in this.keep && this.keep.fraction !== undefined) {\n if (!model) {\n return DEFAULT_KEEP;\n }\n const maxInputTokens = getProfileLimits(model);\n if (typeof maxInputTokens === \"number\") {\n const targetTokens = Math.floor(maxInputTokens * this.keep.fraction);\n if (targetTokens <= 0) {\n return DEFAULT_KEEP;\n }\n /**\n * Use token-based logic with fractional target\n */\n let tokenCount = 0;\n let keepCount = 0;\n\n for (let i = candidates.length - 1; i >= 0; i--) {\n const candidate = candidates[i];\n const msgTokens = await countTokens([candidate.msg]);\n if (tokenCount + msgTokens <= targetTokens) {\n tokenCount += msgTokens;\n keepCount++;\n } else {\n break;\n }\n }\n\n return keepCount;\n }\n }\n\n return DEFAULT_KEEP;\n }\n\n #findAIMessageForToolCall(\n previousMessages: BaseMessage[],\n toolCallId: string\n ): AIMessage | null {\n // Search backwards through previous messages\n for (let i = previousMessages.length - 1; i >= 0; i--) {\n const msg = previousMessages[i];\n if (AIMessage.isInstance(msg)) {\n const hasToolCall = msg.tool_calls?.some(\n (call) => call.id === toolCallId\n );\n if (hasToolCall) {\n return msg;\n }\n }\n }\n return null;\n }\n\n #buildClearedToolInputMessage(\n message: AIMessage,\n toolCallId: string\n ): AIMessage {\n const updatedToolCalls = message.tool_calls?.map((toolCall) => {\n if (toolCall.id === toolCallId) {\n return { ...toolCall, args: {} };\n }\n return toolCall;\n });\n\n const metadata = { ...message.response_metadata };\n const contextEntry = {\n ...(metadata.context_editing as Record<string, unknown>),\n };\n\n const clearedIds = new Set<string>(\n contextEntry.cleared_tool_inputs as string[] | undefined\n );\n clearedIds.add(toolCallId);\n contextEntry.cleared_tool_inputs = Array.from(clearedIds).sort();\n metadata.context_editing = contextEntry;\n\n return new AIMessage({\n content: message.content,\n tool_calls: updatedToolCalls,\n response_metadata: metadata,\n id: message.id,\n name: message.name,\n additional_kwargs: message.additional_kwargs,\n });\n }\n}\n\n/**\n * Configuration for the Context Editing Middleware.\n */\nexport interface ContextEditingMiddlewareConfig {\n /**\n * Sequence of edit strategies to apply. Defaults to a single\n * ClearToolUsesEdit mirroring Anthropic defaults.\n */\n edits?: ContextEdit[];\n\n /**\n * Whether to use approximate token counting (faster, less accurate)\n * or exact counting implemented by the chat model (potentially slower, more accurate).\n * Currently only OpenAI models support exact counting.\n * @default \"approx\"\n */\n tokenCountMethod?: \"approx\" | \"model\";\n}\n\n/**\n * Middleware that automatically prunes tool results to manage context size.\n *\n * This middleware applies a sequence of edits when the total input token count\n * exceeds configured thresholds. By default, it uses the `ClearToolUsesEdit` strategy\n * which mirrors Anthropic's `clear_tool_uses_20250919` behaviour by clearing older\n * tool results once the conversation exceeds 100,000 tokens.\n *\n * ## Basic Usage\n *\n * Use the middleware with default settings to automatically manage context:\n *\n * @example Basic usage with defaults\n * ```ts\n * import { contextEditingMiddleware } from \"langchain\";\n * import { createAgent } from \"langchain\";\n *\n * const agent = createAgent({\n * model: \"anthropic:claude-sonnet-4-5\",\n * tools: [searchTool, calculatorTool],\n * middleware: [\n * contextEditingMiddleware(),\n * ],\n * });\n * ```\n *\n * The default configuration:\n * - Triggers when context exceeds **100,000 tokens**\n * - Keeps the **3 most recent** tool results\n * - Uses **approximate token counting** (fast)\n * - Does not clear tool call arguments\n *\n * ## Custom Configuration\n *\n * Customize the clearing behavior with `ClearToolUsesEdit`:\n *\n * @example Custom ClearToolUsesEdit configuration\n * ```ts\n * import { contextEditingMiddleware, ClearToolUsesEdit } from \"langchain\";\n *\n * // Single condition: trigger if tokens >= 50000 AND messages >= 20\n * const agent1 = createAgent({\n * model: \"anthropic:claude-sonnet-4-5\",\n * tools: [searchTool, calculatorTool],\n * middleware: [\n * contextEditingMiddleware({\n * edits: [\n * new ClearToolUsesEdit({\n * trigger: { tokens: 50000, messages: 20 },\n * keep: { messages: 5 },\n * excludeTools: [\"search\"],\n * clearToolInputs: true,\n * }),\n * ],\n * tokenCountMethod: \"approx\",\n * }),\n * ],\n * });\n *\n * // Multiple conditions: trigger if (tokens >= 50000 AND messages >= 20) OR (tokens >= 30000 AND messages >= 50)\n * const agent2 = createAgent({\n * model: \"anthropic:claude-sonnet-4-5\",\n * tools: [searchTool, calculatorTool],\n * middleware: [\n * contextEditingMiddleware({\n * edits: [\n * new ClearToolUsesEdit({\n * trigger: [\n * { tokens: 50000, messages: 20 },\n * { tokens: 30000, messages: 50 },\n * ],\n * keep: { messages: 5 },\n * }),\n * ],\n * }),\n * ],\n * });\n *\n * // Fractional trigger with model profile\n * const agent3 = createAgent({\n * model: chatModel,\n * tools: [searchTool, calculatorTool],\n * middleware: [\n * contextEditingMiddleware({\n * edits: [\n * new ClearToolUsesEdit({\n * trigger: { fraction: 0.8 }, // Trigger at 80% of model's max tokens\n * keep: { fraction: 0.3 }, // Keep 30% of model's max tokens\n * model: chatModel,\n * }),\n * ],\n * }),\n * ],\n * });\n * ```\n *\n * ## Custom Editing Strategies\n *\n * Implement your own context editing strategy by creating a class that\n * implements the `ContextEdit` interface:\n *\n * @example Custom editing strategy\n * ```ts\n * import { contextEditingMiddleware, type ContextEdit, type TokenCounter } from \"langchain\";\n * import type { BaseMessage } from \"@langchain/core/messages\";\n *\n * class CustomEdit implements ContextEdit {\n * async apply(params: {\n * tokens: number;\n * messages: BaseMessage[];\n * countTokens: TokenCounter;\n * }): Promise<number> {\n * // Implement your custom editing logic here\n * // and apply it to the messages array, then\n * // return the new token count after edits\n * return countTokens(messages);\n * }\n * }\n * ```\n *\n * @param config - Configuration options for the middleware\n * @returns A middleware instance that can be used with `createAgent`\n */\nexport function contextEditingMiddleware(\n config: ContextEditingMiddlewareConfig = {}\n) {\n const edits = config.edits ?? [new ClearToolUsesEdit()];\n const tokenCountMethod = config.tokenCountMethod ?? \"approx\";\n\n return createMiddleware({\n name: \"ContextEditingMiddleware\",\n wrapModelCall: async (request, handler) => {\n if (!request.messages || request.messages.length === 0) {\n return handler(request);\n }\n\n /**\n * Use model's token counting method\n */\n const systemMsg = request.systemPrompt\n ? [new SystemMessage(request.systemPrompt)]\n : [];\n\n const countTokens: TokenCounter =\n tokenCountMethod === \"approx\"\n ? countTokensApproximately\n : async (messages: BaseMessage[]): Promise<number> => {\n const allMessages = [...systemMsg, ...messages];\n\n /**\n * Check if model has getNumTokensFromMessages method\n * currently only OpenAI models have this method\n */\n if (\"getNumTokensFromMessages\" in request.model) {\n return (\n request.model as BaseLanguageModel & {\n getNumTokensFromMessages: (\n messages: BaseMessage[]\n ) => Promise<{\n totalCount: number;\n countPerMessage: number[];\n }>;\n }\n )\n .getNumTokensFromMessages(allMessages)\n .then(({ totalCount }) => totalCount);\n }\n\n throw new Error(\n `Model \"${request.model.getName()}\" does not support token counting`\n );\n };\n\n /**\n * Apply each edit in sequence\n */\n for (const edit of edits) {\n await edit.apply({\n messages: request.messages,\n model: request.model as BaseLanguageModel,\n countTokens,\n });\n }\n\n return handler(request);\n },\n });\n}\n"],"mappings":";;;;;;;AA4BA,MAAM,2BAA2B;AACjC,MAAM,yBAAyB;AAC/B,MAAM,eAAe;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AAuKrB,IAAa,oBAAb,MAAsD;CACpD;CAEA;CACA;CACA;CACA;CACA;CACA;CACA;CAEA,YAAYA,SAAkC,CAAE,GAAE;EAEhD,IAAIC,UAAmD,OAAO;AAC9D,MAAI,OAAO,kBAAkB,QAAW;GACtC,QAAQ,KACN,yEACD;AACD,OAAI,YAAY,QACd,UAAU,EAAE,QAAQ,OAAO,cAAe;EAE7C;EAED,IAAIC,OAA6B,OAAO;AACxC,MAAI,OAAO,iBAAiB,QAAW;GACrC,QAAQ,KACN,uEACD;AACD,OAAI,SAAS,QACX,OAAO,EAAE,UAAU,OAAO,aAAc;EAE3C;AAGD,MAAI,YAAY,QACd,UAAU,EAAE,QAAQ,uBAAwB;AAE9C,MAAI,SAAS,QACX,OAAO,EAAE,UAAU,aAAc;AAInC,MAAI,MAAM,QAAQ,QAAQ,EAAE;GAC1B,KAAKC,qBAAqB,QAAQ,IAAI,CAAC,MAAMC,wCAAkB,MAAM,EAAE,CAAC;GACxE,KAAK,UAAU,KAAKD;EACrB,OAAM;GACL,MAAM,YAAYC,wCAAkB,MAAM,QAAQ;GAClD,KAAKD,qBAAqB,CAAC,SAAU;GACrC,KAAK,UAAU;EAChB;EAGD,MAAM,gBAAgBE,iCAAW,MAAM,KAAK;EAC5C,KAAK,OAAO;AAGZ,MAAI,OAAO,iBAAiB,QAC1B,QAAQ,KACN,wMAGD;EAEH,KAAK,eAAe,OAAO,gBAAgB;EAE3C,KAAK,kBAAkB,OAAO,mBAAmB;EACjD,KAAK,eAAe,IAAI,IAAI,OAAO,gBAAgB,CAAE;EACrD,KAAK,cAAc,OAAO,eAAe;CAC1C;CAED,MAAM,MAAMC,QAIM;EAChB,MAAM,EAAE,UAAU,OAAO,aAAa,GAAG;EACzC,MAAM,SAAS,MAAM,YAAY,SAAS;;;;;EAM1C,MAAMC,kBAA4B,CAAE;AACpC,OAAK,IAAI,IAAI,GAAG,IAAI,SAAS,QAAQ,KAAK;GACxC,MAAM,MAAM,SAAS;AACrB,OAAIC,sCAAY,WAAW,IAAI,EAAE;IAE/B,MAAM,YAAY,KAAKC,0BACrB,SAAS,MAAM,GAAG,EAAE,EACpB,IAAI,aACL;AAED,QAAI,CAAC,WAEH,gBAAgB,KAAK,EAAE;SAClB;KAEL,MAAM,WAAW,UAAU,YAAY,KACrC,CAAC,SAAS,KAAK,OAAO,IAAI,aAC3B;AACD,SAAI,CAAC,UAEH,gBAAgB,KAAK,EAAE;IAE1B;GACF;EACF;;;;AAKD,OAAK,IAAI,IAAI,gBAAgB,SAAS,GAAG,KAAK,GAAG,KAC/C,SAAS,OAAO,gBAAgB,IAAK,EAAE;;;;EAMzC,IAAI,gBAAgB;AACpB,MAAI,gBAAgB,SAAS,GAC3B,gBAAgB,MAAM,YAAY,SAAS;;;;AAM7C,MAAI,CAAC,KAAKC,YAAY,UAAU,eAAe,MAAM,CACnD;;;;EAMF,MAAMC,aAAkD,CAAE;AAC1D,OAAK,IAAI,IAAI,GAAG,IAAI,SAAS,QAAQ,KAAK;GACxC,MAAM,MAAM,SAAS;AACrB,OAAIH,sCAAY,WAAW,IAAI,EAC7B,WAAW,KAAK;IAAE,KAAK;IAAG;GAAK,EAAC;EAEnC;AAED,MAAI,WAAW,WAAW,EACxB;;;;EAMF,MAAM,YAAY,MAAM,KAAKI,oBAC3B,YACA,aACA,MACD;;;;EAKD,MAAM,oBACJ,aAAa,WAAW,SACpB,CAAE,IACF,YAAY,IACZ,WAAW,MAAM,GAAG,CAAC,UAAU,GAC/B;;;;;EAMN,IAAI,gBAAgB;EACpB,MAAM,2BAA2B,CAAC,GAAG,iBAAkB;AAEvD,OAAK,MAAM,EAAE,KAAK,KAAK,aAAa,IAAI,0BAA0B;;;;GAIhE,MAAM,iBAAiB,YAAY,mBAAmB;AAGtD,OAAI,gBAAgB,QAClB;;;;GAMF,MAAM,YAAY,KAAKH,0BACrB,SAAS,MAAM,GAAG,IAAI,EACtB,YAAY,aACb;AAED,OAAI,CAAC,UACH;;;;GAMF,MAAM,WAAW,UAAU,YAAY,KACrC,CAAC,SAAS,KAAK,OAAO,YAAY,aACnC;AAED,OAAI,CAAC,SACH;;;;GAMF,MAAM,WAAW,YAAY,QAAQ,SAAS;AAC9C,OAAI,KAAK,aAAa,IAAI,SAAS,CACjC;;;;GAMF,SAAS,OAAO,IAAID,sCAAY;IAC9B,cAAc,YAAY;IAC1B,SAAS,KAAK;IACd,MAAM,YAAY;IAClB,UAAU;IACV,mBAAmB;KACjB,GAAG,YAAY;KACf,iBAAiB;MACf,SAAS;MACT,UAAU;KACX;IACF;GACF;;;;AAKD,OAAI,KAAK,iBAAiB;IACxB,MAAM,WAAW,SAAS,QAAQ,UAAU;AAC5C,QAAI,YAAY,GACd,SAAS,YAAY,KAAKK,8BACxB,WACA,YAAY,aACb;GAEJ;;;;GAKD,MAAM,gBAAgB,MAAM,YAAY,SAAS;GACjD,gBAAgB,KAAK,IAAI,GAAG,gBAAgB,cAAc;EAC3D;;;;;;AAOD,MAAI,KAAK,eAAe,KAAK,gBAAgB,KAAK,cAAc;;;;GAI9D,MAAM,sBACJ,YAAY,KAAK,YAAY,WAAW,SACpC,WAAW,MAAM,CAAC,UAAU,GAC5B,CAAE;;;;;AAMR,QAAK,IAAI,IAAI,oBAAoB,SAAS,GAAG,KAAK,GAAG,KAAK;AACxD,QAAI,iBAAiB,KAAK,aACxB;IAGF,MAAM,EAAE,KAAK,KAAK,aAAa,GAAG,oBAAoB;;;;IAKtD,MAAM,iBAAiB,YAAY,mBAC/B;AACJ,QAAI,gBAAgB,QAClB;;;;IAMF,MAAM,YAAY,KAAKJ,0BACrB,SAAS,MAAM,GAAG,IAAI,EACtB,YAAY,aACb;AAED,QAAI,CAAC,UACH;;;;IAMF,MAAM,WAAW,UAAU,YAAY,KACrC,CAAC,SAAS,KAAK,OAAO,YAAY,aACnC;AAED,QAAI,CAAC,SACH;;;;IAMF,MAAM,WAAW,YAAY,QAAQ,SAAS;AAC9C,QAAI,KAAK,aAAa,IAAI,SAAS,CACjC;;;;IAMF,SAAS,OAAO,IAAID,sCAAY;KAC9B,cAAc,YAAY;KAC1B,SAAS,KAAK;KACd,MAAM,YAAY;KAClB,UAAU;KACV,mBAAmB;MACjB,GAAG,YAAY;MACf,iBAAiB;OACf,SAAS;OACT,UAAU;MACX;KACF;IACF;;;;AAKD,QAAI,KAAK,iBAAiB;KACxB,MAAM,WAAW,SAAS,QAAQ,UAAU;AAC5C,SAAI,YAAY,GACd,SAAS,YAAY,KAAKK,8BACxB,WACA,YAAY,aACb;IAEJ;;;;IAKD,MAAM,gBAAgB,MAAM,YAAY,SAAS;IACjD,gBAAgB,KAAK,IAAI,GAAG,gBAAgB,cAAc;GAC3D;EACF;CACF;;;;CAKD,YACEC,UACAC,aACAC,OACS;;;;AAIT,OAAK,MAAM,WAAW,KAAKb,oBAAoB;;;;GAI7C,IAAI,eAAe;GACnB,IAAI,iBAAiB;AAErB,OAAI,QAAQ,aAAa,QAAW;IAClC,iBAAiB;AACjB,QAAI,SAAS,SAAS,QAAQ,UAC5B,eAAe;GAElB;AAED,OAAI,QAAQ,WAAW,QAAW;IAChC,iBAAiB;AACjB,QAAI,cAAc,QAAQ,QACxB,eAAe;GAElB;AAED,OAAI,QAAQ,aAAa,QAAW;IAClC,iBAAiB;AACjB,QAAI,CAAC,MACH;IAEF,MAAM,iBAAiBc,uCAAiB,MAAM;AAC9C,QAAI,OAAO,mBAAmB,UAAU;KACtC,MAAM,YAAY,KAAK,MAAM,iBAAiB,QAAQ,SAAS;AAC/D,SAAI,aAAa,EACf;AAEF,SAAI,cAAc,WAChB,eAAe;IAElB;;;;AAIC;GAEH;;;;AAKD,OAAI,kBAAkB,aACpB,QAAO;EAEV;AAED,SAAO;CACR;;;;CAKD,MAAML,oBACJM,YACAC,aACAH,OACiB;AACjB,MAAI,cAAc,KAAK,QAAQ,KAAK,KAAK,aAAa,OACpD,QAAO,KAAK,KAAK;AAGnB,MAAI,YAAY,KAAK,QAAQ,KAAK,KAAK,WAAW,QAAW;;;;;;GAM3D,MAAM,eAAe,KAAK,KAAK;GAC/B,IAAI,aAAa;GACjB,IAAI,YAAY;AAEhB,QAAK,IAAI,IAAI,WAAW,SAAS,GAAG,KAAK,GAAG,KAAK;IAC/C,MAAM,YAAY,WAAW;;;;IAI7B,MAAM,YAAY,MAAM,YAAY,CAAC,UAAU,GAAI,EAAC;AACpD,QAAI,aAAa,aAAa,cAAc;KAC1C,cAAc;KACd;IACD,MACC;GAEH;AAED,UAAO;EACR;AAED,MAAI,cAAc,KAAK,QAAQ,KAAK,KAAK,aAAa,QAAW;AAC/D,OAAI,CAAC,MACH,QAAO;GAET,MAAM,iBAAiBC,uCAAiB,MAAM;AAC9C,OAAI,OAAO,mBAAmB,UAAU;IACtC,MAAM,eAAe,KAAK,MAAM,iBAAiB,KAAK,KAAK,SAAS;AACpE,QAAI,gBAAgB,EAClB,QAAO;;;;IAKT,IAAI,aAAa;IACjB,IAAI,YAAY;AAEhB,SAAK,IAAI,IAAI,WAAW,SAAS,GAAG,KAAK,GAAG,KAAK;KAC/C,MAAM,YAAY,WAAW;KAC7B,MAAM,YAAY,MAAM,YAAY,CAAC,UAAU,GAAI,EAAC;AACpD,SAAI,aAAa,aAAa,cAAc;MAC1C,cAAc;MACd;KACD,MACC;IAEH;AAED,WAAO;GACR;EACF;AAED,SAAO;CACR;CAED,0BACEG,kBACAC,YACkB;AAElB,OAAK,IAAI,IAAI,iBAAiB,SAAS,GAAG,KAAK,GAAG,KAAK;GACrD,MAAM,MAAM,iBAAiB;AAC7B,OAAIC,oCAAU,WAAW,IAAI,EAAE;IAC7B,MAAM,cAAc,IAAI,YAAY,KAClC,CAAC,SAAS,KAAK,OAAO,WACvB;AACD,QAAI,YACF,QAAO;GAEV;EACF;AACD,SAAO;CACR;CAED,8BACEC,SACAF,YACW;EACX,MAAM,mBAAmB,QAAQ,YAAY,IAAI,CAAC,aAAa;AAC7D,OAAI,SAAS,OAAO,WAClB,QAAO;IAAE,GAAG;IAAU,MAAM,CAAE;GAAE;AAElC,UAAO;EACR,EAAC;EAEF,MAAM,WAAW,EAAE,GAAG,QAAQ,kBAAmB;EACjD,MAAM,eAAe,EACnB,GAAI,SAAS,gBACd;EAED,MAAM,aAAa,IAAI,IACrB,aAAa;EAEf,WAAW,IAAI,WAAW;EAC1B,aAAa,sBAAsB,MAAM,KAAK,WAAW,CAAC,MAAM;EAChE,SAAS,kBAAkB;AAE3B,SAAO,IAAIC,oCAAU;GACnB,SAAS,QAAQ;GACjB,YAAY;GACZ,mBAAmB;GACnB,IAAI,QAAQ;GACZ,MAAM,QAAQ;GACd,mBAAmB,QAAQ;EAC5B;CACF;AACF;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AAgJD,SAAgB,yBACdE,SAAyC,CAAE,GAC3C;CACA,MAAM,QAAQ,OAAO,SAAS,CAAC,IAAI,mBAAoB;CACvD,MAAM,mBAAmB,OAAO,oBAAoB;AAEpD,QAAOC,oCAAiB;EACtB,MAAM;EACN,eAAe,OAAO,SAAS,YAAY;AACzC,OAAI,CAAC,QAAQ,YAAY,QAAQ,SAAS,WAAW,EACnD,QAAO,QAAQ,QAAQ;;;;GAMzB,MAAM,YAAY,QAAQ,eACtB,CAAC,IAAIC,wCAAc,QAAQ,aAAc,IACzC,CAAE;GAEN,MAAMP,cACJ,qBAAqB,WACjBQ,yCACA,OAAOb,aAA6C;IAClD,MAAM,cAAc,CAAC,GAAG,WAAW,GAAG,QAAS;;;;;AAM/C,QAAI,8BAA8B,QAAQ,MACxC,QACE,QAAQ,MASP,yBAAyB,YAAY,CACrC,KAAK,CAAC,EAAE,YAAY,KAAK,WAAW;AAGzC,UAAM,IAAI,MACR,CAAC,OAAO,EAAE,QAAQ,MAAM,SAAS,CAAC,iCAAiC,CAAC;GAEvE;;;;AAKP,QAAK,MAAM,QAAQ,OACjB,MAAM,KAAK,MAAM;IACf,UAAU,QAAQ;IAClB,OAAO,QAAQ;IACf;GACD,EAAC;AAGJ,UAAO,QAAQ,QAAQ;EACxB;CACF,EAAC;AACH"}
|