@blockrun/runcode 2.4.0 → 2.5.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (35) hide show
  1. package/dist/agent/commands.js +37 -3
  2. package/dist/agent/context.js +3 -2
  3. package/dist/agent/loop.js +37 -13
  4. package/dist/agent/reduce.d.ts +42 -0
  5. package/dist/agent/reduce.js +258 -0
  6. package/dist/agent/streaming-executor.js +19 -13
  7. package/dist/agent/types.d.ts +2 -0
  8. package/dist/commands/start.js +0 -2
  9. package/dist/index.js +2 -0
  10. package/dist/tools/bash.js +16 -9
  11. package/dist/ui/terminal.d.ts +9 -1
  12. package/dist/ui/terminal.js +55 -36
  13. package/package.json +1 -1
  14. package/dist/compression/adapter.d.ts +0 -13
  15. package/dist/compression/adapter.js +0 -104
  16. package/dist/compression/codebook.d.ts +0 -23
  17. package/dist/compression/codebook.js +0 -118
  18. package/dist/compression/index.d.ts +0 -32
  19. package/dist/compression/index.js +0 -258
  20. package/dist/compression/layers/deduplication.d.ts +0 -27
  21. package/dist/compression/layers/deduplication.js +0 -97
  22. package/dist/compression/layers/dictionary.d.ts +0 -20
  23. package/dist/compression/layers/dictionary.js +0 -67
  24. package/dist/compression/layers/dynamic-codebook.d.ts +0 -25
  25. package/dist/compression/layers/dynamic-codebook.js +0 -145
  26. package/dist/compression/layers/json-compact.d.ts +0 -22
  27. package/dist/compression/layers/json-compact.js +0 -74
  28. package/dist/compression/layers/observation.d.ts +0 -20
  29. package/dist/compression/layers/observation.js +0 -126
  30. package/dist/compression/layers/paths.d.ts +0 -23
  31. package/dist/compression/layers/paths.js +0 -107
  32. package/dist/compression/layers/whitespace.d.ts +0 -26
  33. package/dist/compression/layers/whitespace.js +0 -57
  34. package/dist/compression/types.d.ts +0 -83
  35. package/dist/compression/types.js +0 -26
@@ -108,13 +108,16 @@ const DIRECT_COMMANDS = {
108
108
  emitDone(ctx);
109
109
  },
110
110
  '/help': (ctx) => {
111
+ const ultrathinkOn = ctx.config.ultrathink;
111
112
  ctx.onEvent({ kind: 'text_delta', text: `**RunCode Commands**\n\n` +
112
113
  ` **Coding:** /commit /review /test /fix /debug /explain /search /find /refactor /scaffold\n` +
113
114
  ` **Git:** /push /pr /undo /status /diff /log /branch /stash /unstash\n` +
114
115
  ` **Analysis:** /security /lint /optimize /todo /deps /clean /migrate /doc\n` +
115
- ` **Session:** /plan /execute /compact /retry /sessions /resume /context /tasks\n` +
116
- ` **Info:** /model /wallet /cost /mcp /doctor /version /bug /help\n` +
117
- ` **UI:** /clear /exit\n`
116
+ ` **Session:** /plan /ultraplan /execute /compact /retry /sessions /resume /context /tasks\n` +
117
+ ` **Power:** /ultrathink [query] /ultraplan /dump\n` +
118
+ ` **Info:** /model /wallet /cost /tokens /mcp /doctor /version /bug /help\n` +
119
+ ` **UI:** /clear /exit\n` +
120
+ (ultrathinkOn ? `\n Ultrathink: ON\n` : '')
118
121
  });
119
122
  emitDone(ctx);
120
123
  },
@@ -201,6 +204,27 @@ const DIRECT_COMMANDS = {
201
204
  }
202
205
  emitDone(ctx);
203
206
  },
207
+ '/ultrathink': (ctx) => {
208
+ const cfg = ctx.config;
209
+ cfg.ultrathink = !cfg.ultrathink;
210
+ if (cfg.ultrathink) {
211
+ ctx.onEvent({ kind: 'text_delta', text: '**Ultrathink mode ON.** Extended reasoning active — the model will think deeply before responding.\n' +
212
+ 'Use `/ultrathink` again to disable, or `/ultrathink <query>` to send a one-shot deep analysis.\n'
213
+ });
214
+ }
215
+ else {
216
+ ctx.onEvent({ kind: 'text_delta', text: '**Ultrathink mode OFF.** Normal response mode restored.\n' });
217
+ }
218
+ emitDone(ctx);
219
+ },
220
+ '/dump': (ctx) => {
221
+ const instructions = ctx.config.systemInstructions;
222
+ const joined = instructions.join('\n\n---\n\n');
223
+ ctx.onEvent({ kind: 'text_delta', text: `**System Prompt** (${instructions.length} section${instructions.length !== 1 ? 's' : ''}):\n\n` +
224
+ `\`\`\`\n${joined.slice(0, 4000)}${joined.length > 4000 ? `\n... (${joined.length - 4000} chars truncated)` : ''}\n\`\`\`\n`
225
+ });
226
+ emitDone(ctx);
227
+ },
204
228
  '/execute': (ctx) => {
205
229
  if (ctx.config.permissionMode !== 'plan') {
206
230
  ctx.onEvent({ kind: 'text_delta', text: 'Not in plan mode. Use /plan to enter.\n' });
@@ -264,9 +288,19 @@ const REWRITE_COMMANDS = {
264
288
  '/migrate': 'Check for pending database migrations, outdated dependencies, or breaking changes that need addressing. List required migration steps.',
265
289
  '/clean': 'Find and remove dead code: unused imports, unreachable code, commented-out blocks, unused variables and functions. Show what would be removed before making changes.',
266
290
  '/tasks': 'List all current tasks using the Task tool.',
291
+ '/ultraplan': 'Enter ultraplan mode: create a detailed, step-by-step implementation plan before writing any code. ' +
292
+ 'First, thoroughly read ALL relevant files. Map out every dependency and potential side effect. ' +
293
+ 'Identify edge cases, security considerations, and performance implications. ' +
294
+ 'Then produce a numbered implementation plan with specific file paths, function names, and code changes. ' +
295
+ 'Do NOT write any code yet — only the plan.',
267
296
  };
268
297
  // Commands with arguments (prefix match → rewrite)
269
298
  const ARG_COMMANDS = [
299
+ { prefix: '/ultrathink ', rewrite: (a) => `Think deeply, carefully, and thoroughly before responding. ` +
300
+ `Consider multiple approaches, check edge cases, reason through implications step by step, ` +
301
+ `and challenge your initial assumptions. Take your time — quality of reasoning matters more than speed. ` +
302
+ `Now respond to: ${a}`
303
+ },
270
304
  { prefix: '/explain ', rewrite: (a) => `Read and explain the code in ${a}. Cover: what it does, key functions/classes, how it connects to the rest of the codebase.` },
271
305
  { prefix: '/search ', rewrite: (a) => `Search the codebase for "${a}" using Grep. Show the matching files and relevant code context.` },
272
306
  { prefix: '/find ', rewrite: (a) => `Find files matching the pattern "${a}" using Glob. Show the results.` },
@@ -50,8 +50,9 @@ You have access to tools for reading, writing, editing files, running shell comm
50
50
  # Slash Commands Available
51
51
  The user can type these shortcuts: /commit, /review, /test, /fix, /debug, /explain <file>,
52
52
  /search <query>, /find <pattern>, /refactor <desc>, /init, /todo, /deps, /diff, /status,
53
- /log, /branch, /stash, /plan, /execute, /compact, /retry, /sessions, /resume, /tasks,
54
- /context, /doctor, /model, /cost, /clear, /help, /exit.`;
53
+ /log, /branch, /stash, /plan, /ultraplan, /execute, /compact, /retry, /sessions, /resume,
54
+ /tasks, /context, /doctor, /tokens, /model, /cost, /dump, /ultrathink [query], /clear,
55
+ /help, /exit.`;
55
56
  /**
56
57
  * Build the full system instructions array for a session.
57
58
  */
@@ -5,9 +5,9 @@
5
5
  */
6
6
  import { ModelClient } from './llm.js';
7
7
  import { autoCompactIfNeeded, microCompact } from './compact.js';
8
- import { estimateHistoryTokens, updateActualTokens, resetTokenAnchor } from './tokens.js';
8
+ import { estimateHistoryTokens, updateActualTokens, resetTokenAnchor, getAnchoredTokenCount, getContextWindow } from './tokens.js';
9
9
  import { handleSlashCommand } from './commands.js';
10
- import { compressHistory } from '../compression/adapter.js';
10
+ import { reduceTokens } from './reduce.js';
11
11
  import { PermissionManager } from './permissions.js';
12
12
  import { StreamingExecutor } from './streaming-executor.js';
13
13
  import { optimizeHistory, CAPPED_MAX_TOKENS, ESCALATED_MAX_TOKENS, getMaxOutputTokens } from './optimize.js';
@@ -214,6 +214,7 @@ export async function interactiveSession(config, getUserInput, onEvent, onAbortR
214
214
  // Session persistence
215
215
  const sessionId = createSessionId();
216
216
  let turnCount = 0;
217
+ let tokenBudgetWarned = false; // Emit token budget warning at most once per session
217
218
  pruneOldSessions(sessionId); // Cleanup old sessions on start, protect current
218
219
  while (true) {
219
220
  let input = await getUserInput();
@@ -254,23 +255,22 @@ export async function interactiveSession(config, getUserInput, onEvent, onAbortR
254
255
  history.length = 0;
255
256
  history.push(...optimized);
256
257
  }
257
- // 2. Microcompact: only when history has >15 messages (skip for short conversations)
258
+ // 2. Token reduction: age old results, normalize whitespace, trim verbose messages
259
+ const reduced = reduceTokens(history, config.debug);
260
+ if (reduced !== history) {
261
+ history.length = 0;
262
+ history.push(...reduced);
263
+ }
264
+ // 3. Microcompact: only when history has >15 messages (skip for short conversations)
258
265
  if (history.length > 15) {
259
266
  const microCompacted = microCompact(history, 8);
260
267
  if (microCompacted !== history) {
261
268
  history.length = 0;
262
269
  history.push(...microCompacted);
270
+ resetTokenAnchor(); // History shrunk — resync token tracking
263
271
  }
264
272
  }
265
- // 3. Context compression: 7-layer compression for 15-40% token savings
266
- if (history.length > 10) {
267
- const compressed = await compressHistory(history, config.debug);
268
- if (compressed) {
269
- history.length = 0;
270
- history.push(...compressed.history);
271
- }
272
- }
273
- // 4. Auto-compact: summarize history if approaching context limit
273
+ // 3. Auto-compact: summarize history if approaching context limit
274
274
  // Circuit breaker: stop retrying after 3 consecutive failures
275
275
  if (compactFailures < 3) {
276
276
  try {
@@ -292,7 +292,18 @@ export async function interactiveSession(config, getUserInput, onEvent, onAbortR
292
292
  }
293
293
  }
294
294
  }
295
- const systemPrompt = config.systemInstructions.join('\n\n');
295
+ // Inject ultrathink instruction when mode is active
296
+ const systemParts = [...config.systemInstructions];
297
+ if (config.ultrathink) {
298
+ systemParts.push('# Ultrathink Mode\n' +
299
+ 'You are in deep reasoning mode. Before responding to any request:\n' +
300
+ '1. Thoroughly analyze the problem from multiple angles\n' +
301
+ '2. Consider edge cases, failure modes, and second-order effects\n' +
302
+ '3. Challenge your initial assumptions before committing to an approach\n' +
303
+ '4. Think step by step — show your reasoning explicitly when it adds value\n' +
304
+ 'Prioritize correctness and thoroughness over speed.');
305
+ }
306
+ const systemPrompt = systemParts.join('\n\n');
296
307
  const modelMaxOut = getMaxOutputTokens(config.model);
297
308
  let maxTokens = Math.min(maxTokensOverride ?? CAPPED_MAX_TOKENS, modelMaxOut);
298
309
  let responseParts = [];
@@ -435,6 +446,19 @@ export async function interactiveSession(config, getUserInput, onEvent, onAbortR
435
446
  turnCount,
436
447
  messageCount: history.length,
437
448
  });
449
+ // Token budget warning — emit once per session when crossing 70%
450
+ if (!tokenBudgetWarned) {
451
+ const { estimated } = getAnchoredTokenCount(history);
452
+ const contextWindow = getContextWindow(config.model);
453
+ const pct = (estimated / contextWindow) * 100;
454
+ if (pct >= 70) {
455
+ tokenBudgetWarned = true;
456
+ onEvent({
457
+ kind: 'text_delta',
458
+ text: `\n\n> **Token budget: ${pct.toFixed(0)}% used** (~${estimated.toLocaleString()} / ${(contextWindow / 1000).toFixed(0)}k tokens). Run \`/compact\` to free up space.\n`,
459
+ });
460
+ }
461
+ }
438
462
  onEvent({ kind: 'turn_done', reason: 'completed' });
439
463
  break;
440
464
  }
@@ -0,0 +1,42 @@
1
+ /**
2
+ * Token Reduction for runcode.
3
+ * Original implementation — reduces context size through intelligent pruning.
4
+ *
5
+ * Strategy: instead of compression/encoding, we PRUNE redundant content.
6
+ * The model doesn't need verbose tool outputs from 20 turns ago.
7
+ *
8
+ * Three reduction passes:
9
+ * 1. Tool result aging — progressively shorten old tool results
10
+ * 2. Whitespace normalization — remove excessive blank lines and indentation
11
+ * 3. Stale context removal — drop system info that's been superseded
12
+ */
13
+ import type { Dialogue } from './types.js';
14
+ /**
15
+ * Progressively shorten tool results based on age.
16
+ * Recent results: keep full. Older results: keep summary. Very old: keep one line.
17
+ *
18
+ * This is the biggest token saver — a 10KB bash output from 20 turns ago
19
+ * can be reduced to "✓ Bash: ran npm test (exit 0)" saving ~2500 tokens.
20
+ */
21
+ export declare function ageToolResults(history: Dialogue[]): Dialogue[];
22
+ /**
23
+ * Normalize whitespace in text messages.
24
+ * - Collapse 3+ blank lines to 2
25
+ * - Remove trailing spaces
26
+ * - Reduce indentation beyond 8 spaces to 8
27
+ */
28
+ export declare function normalizeWhitespace(history: Dialogue[]): Dialogue[];
29
+ /**
30
+ * Trim very long assistant text messages from old turns.
31
+ * Recent messages: keep full. Old long messages: keep first 1000 chars.
32
+ */
33
+ export declare function trimOldAssistantMessages(history: Dialogue[]): Dialogue[];
34
+ /**
35
+ * Remove consecutive duplicate messages (same role + same content).
36
+ */
37
+ export declare function deduplicateMessages(history: Dialogue[]): Dialogue[];
38
+ /**
39
+ * Run all token reduction passes on conversation history.
40
+ * Returns same reference if nothing changed (cheap identity check).
41
+ */
42
+ export declare function reduceTokens(history: Dialogue[], debug?: boolean): Dialogue[];
@@ -0,0 +1,258 @@
1
+ /**
2
+ * Token Reduction for runcode.
3
+ * Original implementation — reduces context size through intelligent pruning.
4
+ *
5
+ * Strategy: instead of compression/encoding, we PRUNE redundant content.
6
+ * The model doesn't need verbose tool outputs from 20 turns ago.
7
+ *
8
+ * Three reduction passes:
9
+ * 1. Tool result aging — progressively shorten old tool results
10
+ * 2. Whitespace normalization — remove excessive blank lines and indentation
11
+ * 3. Stale context removal — drop system info that's been superseded
12
+ */
13
+ // ─── 1. Tool Result Aging ─────────────────────────────────────────────────
14
+ /**
15
+ * Progressively shorten tool results based on age.
16
+ * Recent results: keep full. Older results: keep summary. Very old: keep one line.
17
+ *
18
+ * This is the biggest token saver — a 10KB bash output from 20 turns ago
19
+ * can be reduced to "✓ Bash: ran npm test (exit 0)" saving ~2500 tokens.
20
+ */
21
+ export function ageToolResults(history) {
22
+ // Find all tool_result positions
23
+ const toolPositions = [];
24
+ for (let i = 0; i < history.length; i++) {
25
+ const msg = history[i];
26
+ if (msg.role === 'user' &&
27
+ Array.isArray(msg.content) &&
28
+ msg.content.some(p => p.type === 'tool_result')) {
29
+ toolPositions.push(i);
30
+ }
31
+ }
32
+ if (toolPositions.length <= 3)
33
+ return history; // Nothing to age
34
+ const result = [...history];
35
+ const totalResults = toolPositions.length;
36
+ for (let idx = 0; idx < toolPositions.length; idx++) {
37
+ const pos = toolPositions[idx];
38
+ const age = totalResults - idx; // Higher = older
39
+ const msg = result[pos];
40
+ if (!Array.isArray(msg.content))
41
+ continue;
42
+ const parts = msg.content;
43
+ let modified = false;
44
+ const aged = parts.map(part => {
45
+ if (part.type !== 'tool_result')
46
+ return part;
47
+ const content = typeof part.content === 'string'
48
+ ? part.content
49
+ : JSON.stringify(part.content);
50
+ const charLen = content.length;
51
+ // Recent 3 results: keep full
52
+ if (age <= 3)
53
+ return part;
54
+ // Age 4-8: keep first 500 chars
55
+ if (age <= 8 && charLen > 500) {
56
+ modified = true;
57
+ const truncated = content.slice(0, 500);
58
+ const lastNl = truncated.lastIndexOf('\n');
59
+ const clean = lastNl > 250 ? truncated.slice(0, lastNl) : truncated;
60
+ return {
61
+ ...part,
62
+ content: `${clean}\n... (${charLen - clean.length} chars omitted, ${age} turns ago)`,
63
+ };
64
+ }
65
+ // Age 9-15: keep first 200 chars
66
+ if (age <= 15 && charLen > 200) {
67
+ modified = true;
68
+ const firstLine = content.split('\n')[0].slice(0, 150);
69
+ return {
70
+ ...part,
71
+ content: `${firstLine}\n... (${charLen} chars, ${age} turns ago)`,
72
+ };
73
+ }
74
+ // Age 16+: one line summary
75
+ if (age > 15 && charLen > 80) {
76
+ modified = true;
77
+ const summary = content.split('\n')[0].slice(0, 60);
78
+ return {
79
+ ...part,
80
+ content: part.is_error
81
+ ? `[Error: ${summary}...]`
82
+ : `[Result: ${summary}...]`,
83
+ };
84
+ }
85
+ return part;
86
+ });
87
+ if (modified) {
88
+ result[pos] = { role: 'user', content: aged };
89
+ }
90
+ }
91
+ return result;
92
+ }
93
+ // ─── 2. Whitespace Normalization ──────────────────────────────────────────
94
+ /**
95
+ * Normalize whitespace in text messages.
96
+ * - Collapse 3+ blank lines to 2
97
+ * - Remove trailing spaces
98
+ * - Reduce indentation beyond 8 spaces to 8
99
+ */
100
+ export function normalizeWhitespace(history) {
101
+ let modified = false;
102
+ const result = history.map(msg => {
103
+ if (typeof msg.content !== 'string')
104
+ return msg;
105
+ const original = msg.content;
106
+ const cleaned = original
107
+ .replace(/[ \t]+$/gm, '') // Trailing spaces
108
+ .replace(/\n{4,}/g, '\n\n\n') // Max 3 consecutive newlines
109
+ .replace(/^( {9,})/gm, ' '); // Cap indentation at 8 spaces
110
+ if (cleaned !== original) {
111
+ modified = true;
112
+ return { ...msg, content: cleaned };
113
+ }
114
+ return msg;
115
+ });
116
+ return modified ? result : history;
117
+ }
118
+ // ─── 3. Verbose Assistant Message Trimming ────────────────────────────────
119
+ /**
120
+ * Trim very long assistant text messages from old turns.
121
+ * Recent messages: keep full. Old long messages: keep first 1000 chars.
122
+ */
123
+ export function trimOldAssistantMessages(history) {
124
+ const MAX_OLD_ASSISTANT_CHARS = 1500;
125
+ const KEEP_RECENT = 4; // Keep last 4 assistant messages full
126
+ let assistantCount = 0;
127
+ for (const msg of history) {
128
+ if (msg.role === 'assistant')
129
+ assistantCount++;
130
+ }
131
+ if (assistantCount <= KEEP_RECENT)
132
+ return history;
133
+ let seenAssistant = 0;
134
+ let modified = false;
135
+ const result = history.map(msg => {
136
+ if (msg.role !== 'assistant')
137
+ return msg;
138
+ seenAssistant++;
139
+ // Keep recent messages full
140
+ if (assistantCount - seenAssistant < KEEP_RECENT)
141
+ return msg;
142
+ if (typeof msg.content === 'string' && msg.content.length > MAX_OLD_ASSISTANT_CHARS) {
143
+ modified = true;
144
+ const truncated = msg.content.slice(0, MAX_OLD_ASSISTANT_CHARS);
145
+ const lastNl = truncated.lastIndexOf('\n');
146
+ const clean = lastNl > MAX_OLD_ASSISTANT_CHARS / 2 ? truncated.slice(0, lastNl) : truncated;
147
+ return { ...msg, content: clean + '\n... (response truncated)' };
148
+ }
149
+ // Also handle content array with text parts
150
+ if (Array.isArray(msg.content)) {
151
+ const parts = msg.content;
152
+ let totalChars = 0;
153
+ for (const p of parts) {
154
+ if (p.type === 'text')
155
+ totalChars += p.text.length;
156
+ }
157
+ if (totalChars > MAX_OLD_ASSISTANT_CHARS) {
158
+ modified = true;
159
+ const trimmedParts = parts.map(p => {
160
+ if (p.type !== 'text' || p.text.length <= 500)
161
+ return p;
162
+ return { ...p, text: p.text.slice(0, 500) + '\n... (trimmed)' };
163
+ });
164
+ return { ...msg, content: trimmedParts };
165
+ }
166
+ }
167
+ return msg;
168
+ });
169
+ return modified ? result : history;
170
+ }
171
+ // ─── 4. Deduplication ────────────���────────────────────────────────────────
172
+ /**
173
+ * Remove consecutive duplicate messages (same role + same content).
174
+ */
175
+ export function deduplicateMessages(history) {
176
+ if (history.length < 3)
177
+ return history;
178
+ const result = [history[0]];
179
+ let modified = false;
180
+ for (let i = 1; i < history.length; i++) {
181
+ const prev = history[i - 1];
182
+ const curr = history[i];
183
+ if (curr.role === prev.role && typeof curr.content === 'string' && curr.content === prev.content) {
184
+ modified = true;
185
+ continue;
186
+ }
187
+ result.push(curr);
188
+ }
189
+ return modified ? result : history;
190
+ }
191
+ // ─── Pipeline ───────���───────────────────���─────────────────────────────────
192
+ /**
193
+ * Run all token reduction passes on conversation history.
194
+ * Returns same reference if nothing changed (cheap identity check).
195
+ */
196
+ export function reduceTokens(history, debug) {
197
+ if (history.length < 8)
198
+ return history; // Skip for short conversations
199
+ let current = history;
200
+ let totalSaved = 0;
201
+ // Pass 1: Age old tool results
202
+ const aged = ageToolResults(current);
203
+ if (aged !== current) {
204
+ const before = estimateChars(current);
205
+ current = aged;
206
+ const saved = before - estimateChars(current);
207
+ totalSaved += saved;
208
+ }
209
+ // Pass 2: Normalize whitespace
210
+ const normalized = normalizeWhitespace(current);
211
+ if (normalized !== current) {
212
+ const before = estimateChars(current);
213
+ current = normalized;
214
+ totalSaved += before - estimateChars(current);
215
+ }
216
+ // Pass 3: Trim old verbose assistant messages
217
+ const trimmed = trimOldAssistantMessages(current);
218
+ if (trimmed !== current) {
219
+ const before = estimateChars(current);
220
+ current = trimmed;
221
+ totalSaved += before - estimateChars(current);
222
+ }
223
+ // Pass 4: Remove consecutive duplicate messages
224
+ const deduped = deduplicateMessages(current);
225
+ if (deduped !== current) {
226
+ const before = estimateChars(current);
227
+ current = deduped;
228
+ totalSaved += before - estimateChars(current);
229
+ }
230
+ if (debug && totalSaved > 500) {
231
+ const tokensSaved = Math.round(totalSaved / 4);
232
+ console.error(`[runcode] Token reduction: ~${tokensSaved} tokens saved`);
233
+ }
234
+ return current;
235
+ }
236
+ function estimateChars(history) {
237
+ let total = 0;
238
+ for (const msg of history) {
239
+ if (typeof msg.content === 'string') {
240
+ total += msg.content.length;
241
+ }
242
+ else if (Array.isArray(msg.content)) {
243
+ for (const p of msg.content) {
244
+ if ('type' in p) {
245
+ if (p.type === 'text')
246
+ total += p.text.length;
247
+ else if (p.type === 'tool_result') {
248
+ total += typeof p.content === 'string' ? p.content.length : JSON.stringify(p.content).length;
249
+ }
250
+ else if (p.type === 'tool_use') {
251
+ total += JSON.stringify(p.input).length;
252
+ }
253
+ }
254
+ }
255
+ }
256
+ }
257
+ return total;
258
+ }
@@ -39,21 +39,27 @@ export class StreamingExecutor {
39
39
  async collectResults(allInvocations) {
40
40
  const results = [];
41
41
  const alreadyStarted = new Set(this.pending.map(p => p.invocation.id));
42
- // Wait for concurrent results that were started during streaming
43
- for (const p of this.pending) {
44
- const result = await p.promise;
45
- results.push([p.invocation, result]);
42
+ const pendingSnapshot = [...this.pending];
43
+ this.pending = []; // Clear immediately so errors don't leave stale state
44
+ try {
45
+ // Wait for concurrent results that were started during streaming
46
+ for (const p of pendingSnapshot) {
47
+ const result = await p.promise;
48
+ results.push([p.invocation, result]);
49
+ }
50
+ // Execute sequential (non-concurrent) tools now
51
+ for (const inv of allInvocations) {
52
+ if (alreadyStarted.has(inv.id))
53
+ continue;
54
+ this.onStart(inv.id, inv.name);
55
+ const result = await this.executeWithPermissions(inv);
56
+ results.push([inv, result]);
57
+ }
46
58
  }
47
- // Execute sequential (non-concurrent) tools now
48
- for (const inv of allInvocations) {
49
- if (alreadyStarted.has(inv.id))
50
- continue;
51
- this.onStart(inv.id, inv.name);
52
- const result = await this.executeWithPermissions(inv);
53
- results.push([inv, result]);
59
+ catch (err) {
60
+ // Return partial results rather than losing them; caller handles errors
61
+ throw err;
54
62
  }
55
- // Clear for next round
56
- this.pending = [];
57
63
  return results;
58
64
  }
59
65
  async executeWithPermissions(invocation) {
@@ -101,4 +101,6 @@ export interface AgentConfig {
101
101
  permissionMode?: 'default' | 'trust' | 'deny-all' | 'plan';
102
102
  onEvent?: (event: StreamEvent) => void;
103
103
  debug?: boolean;
104
+ /** Ultrathink mode: inject deep-reasoning instruction into every prompt */
105
+ ultrathink?: boolean;
104
106
  }
@@ -170,7 +170,6 @@ async function runWithInkUI(agentConfig, model, workDir, version, walletInfo, on
170
170
  flushStats();
171
171
  await disconnectMcpServers();
172
172
  console.log(chalk.dim('\nGoodbye.\n'));
173
- process.exit(0);
174
173
  }
175
174
  // ─── Basic readline UI (piped input) ───────────────────────────────────────
176
175
  async function runWithBasicUI(agentConfig, model, workDir) {
@@ -224,7 +223,6 @@ async function runWithBasicUI(agentConfig, model, workDir) {
224
223
  }
225
224
  ui.printGoodbye();
226
225
  flushStats();
227
- process.exit(0);
228
226
  }
229
227
  async function handleSlashCommand(cmd, config, ui) {
230
228
  const parts = cmd.trim().split(/\s+/);
package/dist/index.js CHANGED
@@ -108,6 +108,7 @@ if (firstArg === 'solana' || firstArg === 'base') {
108
108
  }
109
109
  }
110
110
  await startCommand(startOpts);
111
+ process.exit(0);
111
112
  }
112
113
  else if (!firstArg || (firstArg.startsWith('-') && !['-h', '--help', '-V', '--version'].includes(firstArg))) {
113
114
  // No subcommand or only flags — treat as 'start' with flags
@@ -122,6 +123,7 @@ else if (!firstArg || (firstArg.startsWith('-') && !['-h', '--help', '-V', '--ve
122
123
  }
123
124
  }
124
125
  await startCommand(startOpts);
126
+ process.exit(0);
125
127
  }
126
128
  else {
127
129
  program.parse();
@@ -12,15 +12,22 @@ async function execute(input, ctx) {
12
12
  const timeoutMs = Math.min(timeout ?? DEFAULT_TIMEOUT_MS, 600_000);
13
13
  return new Promise((resolve) => {
14
14
  const shell = process.env.SHELL || '/bin/bash';
15
- const child = spawn(shell, ['-c', command], {
16
- cwd: ctx.workingDir,
17
- env: {
18
- ...process.env,
19
- RUNCODE: '1', // Let scripts detect they're running inside runcode
20
- RUNCODE_WORKDIR: ctx.workingDir,
21
- },
22
- stdio: ['ignore', 'pipe', 'pipe'],
23
- });
15
+ let child;
16
+ try {
17
+ child = spawn(shell, ['-c', command], {
18
+ cwd: ctx.workingDir,
19
+ env: {
20
+ ...process.env,
21
+ RUNCODE: '1', // Let scripts detect they're running inside runcode
22
+ RUNCODE_WORKDIR: ctx.workingDir,
23
+ },
24
+ stdio: ['ignore', 'pipe', 'pipe'],
25
+ });
26
+ }
27
+ catch (spawnErr) {
28
+ resolve({ output: `Error spawning shell: ${spawnErr.message}`, isError: true });
29
+ return;
30
+ }
24
31
  let stdout = '';
25
32
  let stderr = '';
26
33
  let outputBytes = 0;
@@ -10,15 +10,23 @@ export declare class TerminalUI {
10
10
  private totalInputTokens;
11
11
  private totalOutputTokens;
12
12
  private mdRenderer;
13
+ private lineQueue;
14
+ private lineWaiters;
15
+ private stdinEOF;
16
+ constructor();
13
17
  /**
14
18
  * Prompt the user for input. Returns null on EOF/exit.
19
+ * Uses a line-queue approach so piped input works across multiple calls.
15
20
  */
16
21
  promptUser(promptText?: string): Promise<string | null>;
22
+ private nextLine;
23
+ /** No-op kept for API compatibility — readline closes when stdin EOF. */
24
+ closeInput(): void;
17
25
  /**
18
26
  * Handle a stream event from the agent loop.
19
27
  */
20
28
  handleEvent(event: StreamEvent): void;
21
- /** Check if input is a slash command. Returns true if handled. */
29
+ /** Check if input is a slash command. Returns true if handled locally (don't pass to agent). */
22
30
  handleSlashCommand(input: string): boolean;
23
31
  printWelcome(model: string, workDir: string): void;
24
32
  printUsageSummary(): void;