@blockrun/runcode 2.3.0 → 2.3.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/agent/loop.js +8 -1
- package/dist/agent/reduce.d.ts +38 -0
- package/dist/agent/reduce.js +231 -0
- package/package.json +1 -1
package/dist/agent/loop.js
CHANGED
|
@@ -7,6 +7,7 @@ import { ModelClient } from './llm.js';
|
|
|
7
7
|
import { autoCompactIfNeeded, microCompact } from './compact.js';
|
|
8
8
|
import { estimateHistoryTokens, updateActualTokens, resetTokenAnchor } from './tokens.js';
|
|
9
9
|
import { handleSlashCommand } from './commands.js';
|
|
10
|
+
import { reduceTokens } from './reduce.js';
|
|
10
11
|
import { PermissionManager } from './permissions.js';
|
|
11
12
|
import { StreamingExecutor } from './streaming-executor.js';
|
|
12
13
|
import { optimizeHistory, CAPPED_MAX_TOKENS, ESCALATED_MAX_TOKENS, getMaxOutputTokens } from './optimize.js';
|
|
@@ -253,7 +254,13 @@ export async function interactiveSession(config, getUserInput, onEvent, onAbortR
|
|
|
253
254
|
history.length = 0;
|
|
254
255
|
history.push(...optimized);
|
|
255
256
|
}
|
|
256
|
-
// 2.
|
|
257
|
+
// 2. Token reduction: age old results, normalize whitespace, trim verbose messages
|
|
258
|
+
const reduced = reduceTokens(history, config.debug);
|
|
259
|
+
if (reduced !== history) {
|
|
260
|
+
history.length = 0;
|
|
261
|
+
history.push(...reduced);
|
|
262
|
+
}
|
|
263
|
+
// 3. Microcompact: only when history has >15 messages (skip for short conversations)
|
|
257
264
|
if (history.length > 15) {
|
|
258
265
|
const microCompacted = microCompact(history, 8);
|
|
259
266
|
if (microCompacted !== history) {
|
|
@@ -0,0 +1,38 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Token Reduction for runcode.
|
|
3
|
+
* Original implementation — reduces context size through intelligent pruning.
|
|
4
|
+
*
|
|
5
|
+
* Strategy: instead of compression/encoding, we PRUNE redundant content.
|
|
6
|
+
* The model doesn't need verbose tool outputs from 20 turns ago.
|
|
7
|
+
*
|
|
8
|
+
* Three reduction passes:
|
|
9
|
+
* 1. Tool result aging — progressively shorten old tool results
|
|
10
|
+
* 2. Whitespace normalization — remove excessive blank lines and indentation
|
|
11
|
+
* 3. Stale context removal — drop system info that's been superseded
|
|
12
|
+
*/
|
|
13
|
+
import type { Dialogue } from './types.js';
|
|
14
|
+
/**
|
|
15
|
+
* Progressively shorten tool results based on age.
|
|
16
|
+
* Recent results: keep full. Older results: keep summary. Very old: keep one line.
|
|
17
|
+
*
|
|
18
|
+
* This is the biggest token saver — a 10KB bash output from 20 turns ago
|
|
19
|
+
* can be reduced to "✓ Bash: ran npm test (exit 0)" saving ~2500 tokens.
|
|
20
|
+
*/
|
|
21
|
+
export declare function ageToolResults(history: Dialogue[]): Dialogue[];
|
|
22
|
+
/**
|
|
23
|
+
* Normalize whitespace in text messages.
|
|
24
|
+
* - Collapse 3+ blank lines to 2
|
|
25
|
+
* - Remove trailing spaces
|
|
26
|
+
* - Reduce indentation beyond 8 spaces to 8
|
|
27
|
+
*/
|
|
28
|
+
export declare function normalizeWhitespace(history: Dialogue[]): Dialogue[];
|
|
29
|
+
/**
|
|
30
|
+
* Trim very long assistant text messages from old turns.
|
|
31
|
+
* Recent messages: keep full. Old long messages: keep first 1000 chars.
|
|
32
|
+
*/
|
|
33
|
+
export declare function trimOldAssistantMessages(history: Dialogue[]): Dialogue[];
|
|
34
|
+
/**
|
|
35
|
+
* Run all token reduction passes on conversation history.
|
|
36
|
+
* Returns same reference if nothing changed (cheap identity check).
|
|
37
|
+
*/
|
|
38
|
+
export declare function reduceTokens(history: Dialogue[], debug?: boolean): Dialogue[];
|
|
@@ -0,0 +1,231 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Token Reduction for runcode.
|
|
3
|
+
* Original implementation — reduces context size through intelligent pruning.
|
|
4
|
+
*
|
|
5
|
+
* Strategy: instead of compression/encoding, we PRUNE redundant content.
|
|
6
|
+
* The model doesn't need verbose tool outputs from 20 turns ago.
|
|
7
|
+
*
|
|
8
|
+
* Three reduction passes:
|
|
9
|
+
* 1. Tool result aging — progressively shorten old tool results
|
|
10
|
+
* 2. Whitespace normalization — remove excessive blank lines and indentation
|
|
11
|
+
* 3. Stale context removal — drop system info that's been superseded
|
|
12
|
+
*/
|
|
13
|
+
// ─── 1. Tool Result Aging ─────────────────────────────────────────────────
|
|
14
|
+
/**
|
|
15
|
+
* Progressively shorten tool results based on age.
|
|
16
|
+
* Recent results: keep full. Older results: keep summary. Very old: keep one line.
|
|
17
|
+
*
|
|
18
|
+
* This is the biggest token saver — a 10KB bash output from 20 turns ago
|
|
19
|
+
* can be reduced to "✓ Bash: ran npm test (exit 0)" saving ~2500 tokens.
|
|
20
|
+
*/
|
|
21
|
+
export function ageToolResults(history) {
|
|
22
|
+
// Find all tool_result positions
|
|
23
|
+
const toolPositions = [];
|
|
24
|
+
for (let i = 0; i < history.length; i++) {
|
|
25
|
+
const msg = history[i];
|
|
26
|
+
if (msg.role === 'user' &&
|
|
27
|
+
Array.isArray(msg.content) &&
|
|
28
|
+
msg.content.some(p => p.type === 'tool_result')) {
|
|
29
|
+
toolPositions.push(i);
|
|
30
|
+
}
|
|
31
|
+
}
|
|
32
|
+
if (toolPositions.length <= 3)
|
|
33
|
+
return history; // Nothing to age
|
|
34
|
+
const result = [...history];
|
|
35
|
+
const totalResults = toolPositions.length;
|
|
36
|
+
for (let idx = 0; idx < toolPositions.length; idx++) {
|
|
37
|
+
const pos = toolPositions[idx];
|
|
38
|
+
const age = totalResults - idx; // Higher = older
|
|
39
|
+
const msg = result[pos];
|
|
40
|
+
if (!Array.isArray(msg.content))
|
|
41
|
+
continue;
|
|
42
|
+
const parts = msg.content;
|
|
43
|
+
let modified = false;
|
|
44
|
+
const aged = parts.map(part => {
|
|
45
|
+
if (part.type !== 'tool_result')
|
|
46
|
+
return part;
|
|
47
|
+
const content = typeof part.content === 'string'
|
|
48
|
+
? part.content
|
|
49
|
+
: JSON.stringify(part.content);
|
|
50
|
+
const charLen = content.length;
|
|
51
|
+
// Recent 3 results: keep full
|
|
52
|
+
if (age <= 3)
|
|
53
|
+
return part;
|
|
54
|
+
// Age 4-8: keep first 500 chars
|
|
55
|
+
if (age <= 8 && charLen > 500) {
|
|
56
|
+
modified = true;
|
|
57
|
+
const truncated = content.slice(0, 500);
|
|
58
|
+
const lastNl = truncated.lastIndexOf('\n');
|
|
59
|
+
const clean = lastNl > 250 ? truncated.slice(0, lastNl) : truncated;
|
|
60
|
+
return {
|
|
61
|
+
...part,
|
|
62
|
+
content: `${clean}\n... (${charLen - clean.length} chars omitted, ${age} turns ago)`,
|
|
63
|
+
};
|
|
64
|
+
}
|
|
65
|
+
// Age 9-15: keep first 200 chars
|
|
66
|
+
if (age <= 15 && charLen > 200) {
|
|
67
|
+
modified = true;
|
|
68
|
+
const firstLine = content.split('\n')[0].slice(0, 150);
|
|
69
|
+
return {
|
|
70
|
+
...part,
|
|
71
|
+
content: `${firstLine}\n... (${charLen} chars, ${age} turns ago)`,
|
|
72
|
+
};
|
|
73
|
+
}
|
|
74
|
+
// Age 16+: one line summary
|
|
75
|
+
if (age > 15 && charLen > 80) {
|
|
76
|
+
modified = true;
|
|
77
|
+
const summary = content.split('\n')[0].slice(0, 60);
|
|
78
|
+
return {
|
|
79
|
+
...part,
|
|
80
|
+
content: part.is_error
|
|
81
|
+
? `[Error: ${summary}...]`
|
|
82
|
+
: `[Result: ${summary}...]`,
|
|
83
|
+
};
|
|
84
|
+
}
|
|
85
|
+
return part;
|
|
86
|
+
});
|
|
87
|
+
if (modified) {
|
|
88
|
+
result[pos] = { role: 'user', content: aged };
|
|
89
|
+
}
|
|
90
|
+
}
|
|
91
|
+
return result;
|
|
92
|
+
}
|
|
93
|
+
// ─── 2. Whitespace Normalization ──────────────────────────────────────────
|
|
94
|
+
/**
|
|
95
|
+
* Normalize whitespace in text messages.
|
|
96
|
+
* - Collapse 3+ blank lines to 2
|
|
97
|
+
* - Remove trailing spaces
|
|
98
|
+
* - Reduce indentation beyond 8 spaces to 8
|
|
99
|
+
*/
|
|
100
|
+
export function normalizeWhitespace(history) {
|
|
101
|
+
let modified = false;
|
|
102
|
+
const result = history.map(msg => {
|
|
103
|
+
if (typeof msg.content !== 'string')
|
|
104
|
+
return msg;
|
|
105
|
+
const original = msg.content;
|
|
106
|
+
const cleaned = original
|
|
107
|
+
.replace(/[ \t]+$/gm, '') // Trailing spaces
|
|
108
|
+
.replace(/\n{4,}/g, '\n\n\n') // Max 3 consecutive newlines
|
|
109
|
+
.replace(/^( {9,})/gm, ' '); // Cap indentation at 8 spaces
|
|
110
|
+
if (cleaned !== original) {
|
|
111
|
+
modified = true;
|
|
112
|
+
return { ...msg, content: cleaned };
|
|
113
|
+
}
|
|
114
|
+
return msg;
|
|
115
|
+
});
|
|
116
|
+
return modified ? result : history;
|
|
117
|
+
}
|
|
118
|
+
// ─── 3. Verbose Assistant Message Trimming ────────────────────────────────
|
|
119
|
+
/**
|
|
120
|
+
* Trim very long assistant text messages from old turns.
|
|
121
|
+
* Recent messages: keep full. Old long messages: keep first 1000 chars.
|
|
122
|
+
*/
|
|
123
|
+
export function trimOldAssistantMessages(history) {
|
|
124
|
+
const MAX_OLD_ASSISTANT_CHARS = 1500;
|
|
125
|
+
const KEEP_RECENT = 4; // Keep last 4 assistant messages full
|
|
126
|
+
let assistantCount = 0;
|
|
127
|
+
for (const msg of history) {
|
|
128
|
+
if (msg.role === 'assistant')
|
|
129
|
+
assistantCount++;
|
|
130
|
+
}
|
|
131
|
+
if (assistantCount <= KEEP_RECENT)
|
|
132
|
+
return history;
|
|
133
|
+
let seenAssistant = 0;
|
|
134
|
+
let modified = false;
|
|
135
|
+
const result = history.map(msg => {
|
|
136
|
+
if (msg.role !== 'assistant')
|
|
137
|
+
return msg;
|
|
138
|
+
seenAssistant++;
|
|
139
|
+
// Keep recent messages full
|
|
140
|
+
if (assistantCount - seenAssistant < KEEP_RECENT)
|
|
141
|
+
return msg;
|
|
142
|
+
if (typeof msg.content === 'string' && msg.content.length > MAX_OLD_ASSISTANT_CHARS) {
|
|
143
|
+
modified = true;
|
|
144
|
+
const truncated = msg.content.slice(0, MAX_OLD_ASSISTANT_CHARS);
|
|
145
|
+
const lastNl = truncated.lastIndexOf('\n');
|
|
146
|
+
const clean = lastNl > MAX_OLD_ASSISTANT_CHARS / 2 ? truncated.slice(0, lastNl) : truncated;
|
|
147
|
+
return { ...msg, content: clean + '\n... (response truncated)' };
|
|
148
|
+
}
|
|
149
|
+
// Also handle content array with text parts
|
|
150
|
+
if (Array.isArray(msg.content)) {
|
|
151
|
+
const parts = msg.content;
|
|
152
|
+
let totalChars = 0;
|
|
153
|
+
for (const p of parts) {
|
|
154
|
+
if (p.type === 'text')
|
|
155
|
+
totalChars += p.text.length;
|
|
156
|
+
}
|
|
157
|
+
if (totalChars > MAX_OLD_ASSISTANT_CHARS) {
|
|
158
|
+
modified = true;
|
|
159
|
+
const trimmedParts = parts.map(p => {
|
|
160
|
+
if (p.type !== 'text' || p.text.length <= 500)
|
|
161
|
+
return p;
|
|
162
|
+
return { ...p, text: p.text.slice(0, 500) + '\n... (trimmed)' };
|
|
163
|
+
});
|
|
164
|
+
return { ...msg, content: trimmedParts };
|
|
165
|
+
}
|
|
166
|
+
}
|
|
167
|
+
return msg;
|
|
168
|
+
});
|
|
169
|
+
return modified ? result : history;
|
|
170
|
+
}
|
|
171
|
+
// ─── Pipeline ─────────────────────────────────────────────────────────────
|
|
172
|
+
/**
|
|
173
|
+
* Run all token reduction passes on conversation history.
|
|
174
|
+
* Returns same reference if nothing changed (cheap identity check).
|
|
175
|
+
*/
|
|
176
|
+
export function reduceTokens(history, debug) {
|
|
177
|
+
if (history.length < 8)
|
|
178
|
+
return history; // Skip for short conversations
|
|
179
|
+
let current = history;
|
|
180
|
+
let totalSaved = 0;
|
|
181
|
+
// Pass 1: Age old tool results
|
|
182
|
+
const aged = ageToolResults(current);
|
|
183
|
+
if (aged !== current) {
|
|
184
|
+
const before = estimateChars(current);
|
|
185
|
+
current = aged;
|
|
186
|
+
const saved = before - estimateChars(current);
|
|
187
|
+
totalSaved += saved;
|
|
188
|
+
}
|
|
189
|
+
// Pass 2: Normalize whitespace
|
|
190
|
+
const normalized = normalizeWhitespace(current);
|
|
191
|
+
if (normalized !== current) {
|
|
192
|
+
const before = estimateChars(current);
|
|
193
|
+
current = normalized;
|
|
194
|
+
totalSaved += before - estimateChars(current);
|
|
195
|
+
}
|
|
196
|
+
// Pass 3: Trim old verbose assistant messages
|
|
197
|
+
const trimmed = trimOldAssistantMessages(current);
|
|
198
|
+
if (trimmed !== current) {
|
|
199
|
+
const before = estimateChars(current);
|
|
200
|
+
current = trimmed;
|
|
201
|
+
totalSaved += before - estimateChars(current);
|
|
202
|
+
}
|
|
203
|
+
if (debug && totalSaved > 500) {
|
|
204
|
+
const tokensSaved = Math.round(totalSaved / 4);
|
|
205
|
+
console.error(`[runcode] Token reduction: ~${tokensSaved} tokens saved`);
|
|
206
|
+
}
|
|
207
|
+
return current;
|
|
208
|
+
}
|
|
209
|
+
function estimateChars(history) {
|
|
210
|
+
let total = 0;
|
|
211
|
+
for (const msg of history) {
|
|
212
|
+
if (typeof msg.content === 'string') {
|
|
213
|
+
total += msg.content.length;
|
|
214
|
+
}
|
|
215
|
+
else if (Array.isArray(msg.content)) {
|
|
216
|
+
for (const p of msg.content) {
|
|
217
|
+
if ('type' in p) {
|
|
218
|
+
if (p.type === 'text')
|
|
219
|
+
total += p.text.length;
|
|
220
|
+
else if (p.type === 'tool_result') {
|
|
221
|
+
total += typeof p.content === 'string' ? p.content.length : JSON.stringify(p.content).length;
|
|
222
|
+
}
|
|
223
|
+
else if (p.type === 'tool_use') {
|
|
224
|
+
total += JSON.stringify(p.input).length;
|
|
225
|
+
}
|
|
226
|
+
}
|
|
227
|
+
}
|
|
228
|
+
}
|
|
229
|
+
}
|
|
230
|
+
return total;
|
|
231
|
+
}
|