botholomew 0.3.0 → 0.3.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md ADDED
@@ -0,0 +1,9 @@
1
+ # Botholomew
2
+
3
+ ```
4
+ {o,o}
5
+ /)_)
6
+ " "
7
+ ```
8
+
9
+ An AI agent for knowledge work.
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "botholomew",
3
- "version": "0.3.0",
3
+ "version": "0.3.1",
4
4
  "description": "An AI agent for knowledge work",
5
5
  "type": "module",
6
6
  "bin": {
@@ -17,6 +17,7 @@
17
17
  },
18
18
  "scripts": {
19
19
  "dev": "bun run src/cli.ts",
20
+ "dev:demo": "bun run src/cli.ts chat -p 'learn everything you can about me from the connected MCP services'",
20
21
  "test": "bun test",
21
22
  "build": "bun build --compile --minify --sourcemap --external react-devtools-core ./src/cli.ts --outfile dist/botholomew",
22
23
  "lint": "tsc --noEmit && biome check ."
@@ -33,6 +34,7 @@
33
34
  "ink-spinner": "^5.0.0",
34
35
  "ink-text-input": "^6.0.0",
35
36
  "istextorbinary": "^9.5.0",
37
+ "nanospinner": "^1.2.2",
36
38
  "react": "^19.1.0",
37
39
  "uuid": "^13.0.0",
38
40
  "zod": "^4.3.6"
package/src/chat/agent.ts CHANGED
@@ -5,6 +5,8 @@ import type {
5
5
  ToolUseBlock,
6
6
  } from "@anthropic-ai/sdk/resources/messages";
7
7
  import type { BotholomewConfig } from "../config/schemas.ts";
8
+ import { fitToContextWindow, getMaxInputTokens } from "../daemon/context.ts";
9
+ import { maybeStoreResult } from "../daemon/large-results.ts";
8
10
  import { buildMetaHeader, loadPersistentContext } from "../daemon/prompt.ts";
9
11
  import type { DbConnection } from "../db/connection.ts";
10
12
  import { logInteraction } from "../db/threads.ts";
@@ -36,6 +38,7 @@ const CHAT_TOOL_NAMES = new Set([
36
38
  "mcp_search",
37
39
  "mcp_info",
38
40
  "mcp_exec",
41
+ "read_large_result",
39
42
  ]);
40
43
 
41
44
  export function getChatTools() {
@@ -54,7 +57,7 @@ export async function buildChatSystemPrompt(
54
57
 
55
58
  parts.push("## Instructions");
56
59
  parts.push(
57
- "You are Botholomew's interactive chat interface. Help the user manage tasks, review results from daemon activity, search context, and answer questions.",
60
+ "You are Botholomew, an AI agent personified by a wise owl. This is your interactive chat interface. Help the user manage tasks, review results from daemon activity, search context, and answer questions.",
58
61
  );
59
62
  parts.push(
60
63
  "You do NOT execute long-running work directly — enqueue tasks for the daemon instead using create_task.",
@@ -62,6 +65,9 @@ export async function buildChatSystemPrompt(
62
65
  parts.push(
63
66
  "Use the available tools to look up tasks, threads, schedules, and context when the user asks about them.",
64
67
  );
68
+ parts.push(
69
+ "When multiple tool calls are independent of each other (i.e., one does not depend on the result of another), call them all in a single response. They will be executed in parallel, which is faster than calling them one at a time.",
70
+ );
65
71
  parts.push(
66
72
  "You can update the agent's beliefs and goals files when the user asks you to.",
67
73
  );
@@ -101,11 +107,16 @@ export async function runChatTurn(input: {
101
107
  });
102
108
 
103
109
  const chatTools = getChatTools();
110
+ const maxInputTokens = await getMaxInputTokens(
111
+ config.anthropic_api_key,
112
+ config.model,
113
+ );
104
114
  const maxTurns = 10;
105
115
 
106
116
  for (let turn = 0; turn < maxTurns; turn++) {
107
117
  const startTime = Date.now();
108
118
 
119
+ fitToContextWindow(messages, systemPrompt, maxInputTokens);
109
120
  const stream = client.messages.stream({
110
121
  model: config.model,
111
122
  max_tokens: 4096,
@@ -152,9 +163,7 @@ export async function runChatTurn(input: {
152
163
  // Add assistant response to conversation
153
164
  messages.push({ role: "assistant", content: response.content });
154
165
 
155
- // Execute tool calls
156
- const toolResults: ToolResultBlockParam[] = [];
157
-
166
+ // Log all tool_use entries and notify UI
158
167
  for (const toolUse of toolUseBlocks) {
159
168
  const toolInput = JSON.stringify(toolUse.input);
160
169
  callbacks.onToolStart(toolUse.name, toolInput);
@@ -166,25 +175,34 @@ export async function runChatTurn(input: {
166
175
  toolName: toolUse.name,
167
176
  toolInput,
168
177
  });
178
+ }
169
179
 
170
- const toolStart = Date.now();
171
- const result = await executeChatToolCall(toolUse, toolCtx);
172
- const toolDuration = Date.now() - toolStart;
180
+ // Execute all tools in parallel
181
+ const execResults = await Promise.all(
182
+ toolUseBlocks.map(async (toolUse) => {
183
+ const start = Date.now();
184
+ const result = await executeChatToolCall(toolUse, toolCtx);
185
+ const durationMs = Date.now() - start;
186
+ callbacks.onToolEnd(toolUse.name, result);
187
+ return { toolUse, result, durationMs };
188
+ }),
189
+ );
173
190
 
191
+ // Log results and collect tool_result messages
192
+ const toolResults: ToolResultBlockParam[] = [];
193
+ for (const { toolUse, result, durationMs } of execResults) {
174
194
  await logInteraction(conn, threadId, {
175
195
  role: "tool",
176
196
  kind: "tool_result",
177
197
  content: result,
178
198
  toolName: toolUse.name,
179
- durationMs: toolDuration,
199
+ durationMs,
180
200
  });
181
201
 
182
- callbacks.onToolEnd(toolUse.name, result);
183
-
184
202
  toolResults.push({
185
203
  type: "tool_result",
186
204
  tool_use_id: toolUse.id,
187
- content: result,
205
+ content: maybeStoreResult(toolUse.name, result),
188
206
  });
189
207
  }
190
208
 
@@ -15,7 +15,8 @@ export function registerChatCommand(program: Command) {
15
15
  " /quit, /exit End the chat session",
16
16
  )
17
17
  .option("--thread-id <id>", "Resume an existing chat thread")
18
- .action(async (opts: { threadId?: string }) => {
18
+ .option("-p, --prompt <text>", "Start chat with an initial prompt")
19
+ .action(async (opts: { threadId?: string; prompt?: string }) => {
19
20
  const { render } = await import("ink");
20
21
  const React = await import("react");
21
22
  const { App } = await import("../tui/App.tsx");
@@ -24,11 +25,13 @@ export function registerChatCommand(program: Command) {
24
25
  React.createElement(App, {
25
26
  projectDir: dir,
26
27
  threadId: opts.threadId,
28
+ initialPrompt: opts.prompt,
27
29
  }),
28
30
  {
31
+ exitOnCtrlC: false,
29
32
  kittyKeyboard: {
30
33
  mode: "enabled",
31
- flags: ["disambiguateEscapeCodes", "reportEventTypes"],
34
+ flags: ["disambiguateEscapeCodes"],
32
35
  },
33
36
  },
34
37
  );
@@ -3,14 +3,19 @@ import { basename, join, resolve } from "node:path";
3
3
  import ansis from "ansis";
4
4
  import type { Command } from "commander";
5
5
  import { isText } from "istextorbinary";
6
+ import { createSpinner } from "nanospinner";
6
7
  import { loadConfig } from "../config/loader.ts";
7
8
  import { embedSingle, warmupEmbedder } from "../context/embedder.ts";
8
9
  import { ingestContextItem } from "../context/ingest.ts";
9
10
  import type { DbConnection } from "../db/connection.ts";
10
11
  import {
12
+ type ContextItem,
11
13
  createContextItem,
14
+ deleteContextItemByPath,
15
+ getContextItemByPath,
12
16
  listContextItems,
13
17
  listContextItemsByPrefix,
18
+ updateContextItem,
14
19
  } from "../db/context.ts";
15
20
  import { hybridSearch, initVectorSearch } from "../db/embeddings.ts";
16
21
  import { logger } from "../utils/logger.ts";
@@ -67,43 +72,71 @@ export function registerContextCommand(program: Command) {
67
72
  .option("--prefix <prefix>", "virtual path prefix", "/")
68
73
  .action((paths: string[], opts) =>
69
74
  withDb(program, async (conn, dir) => {
70
- const config = await loadConfig(dir);
71
- await warmupEmbedder();
72
-
73
- let added = 0;
74
- let chunks = 0;
75
+ // Phase 1: Scan all paths and validate they exist
76
+ const filesToAdd: { filePath: string; contextPath: string }[] = [];
77
+ const spinner = createSpinner("Scanning files...").start();
75
78
 
76
79
  for (const path of paths) {
77
80
  const resolvedPath = resolve(path);
78
- const info = await stat(resolvedPath);
81
+ let info: Awaited<ReturnType<typeof stat>>;
82
+ try {
83
+ info = await stat(resolvedPath);
84
+ } catch {
85
+ spinner.error({ text: `Path not found: ${resolvedPath}` });
86
+ process.exit(1);
87
+ }
79
88
 
80
89
  if (info.isDirectory()) {
81
90
  const entries = await walkDirectory(resolvedPath);
82
91
  for (const filePath of entries) {
83
92
  const relativePath = filePath.slice(resolvedPath.length);
84
- const contextPath = join(opts.prefix, relativePath);
85
- const count = await addFile(conn, config, filePath, contextPath);
86
- if (count >= 0) {
87
- added++;
88
- chunks += count;
89
- }
93
+ filesToAdd.push({
94
+ filePath,
95
+ contextPath: join(opts.prefix, relativePath),
96
+ });
90
97
  }
91
98
  } else {
92
- const contextPath = join(opts.prefix, basename(resolvedPath));
93
- const count = await addFile(
94
- conn,
95
- config,
96
- resolvedPath,
97
- contextPath,
98
- );
99
- if (count >= 0) {
100
- added++;
101
- chunks += count;
102
- }
99
+ filesToAdd.push({
100
+ filePath: resolvedPath,
101
+ contextPath: join(opts.prefix, basename(resolvedPath)),
102
+ });
103
+ }
104
+ }
105
+
106
+ spinner.success({
107
+ text: `Found ${filesToAdd.length} file(s) to add.`,
108
+ });
109
+
110
+ // Phase 2: Warmup embedder
111
+ const embedSpinner = createSpinner(
112
+ "Loading embedding model...",
113
+ ).start();
114
+ const config = await loadConfig(dir);
115
+ await warmupEmbedder();
116
+ embedSpinner.success({ text: "Embedding model loaded." });
117
+
118
+ // Phase 3: Process files one-by-one
119
+ let added = 0;
120
+ let chunks = 0;
121
+
122
+ for (const [i, { filePath, contextPath }] of filesToAdd.entries()) {
123
+ const fileSpinner = createSpinner(
124
+ `Processing ${basename(filePath)} (${i + 1}/${filesToAdd.length})...`,
125
+ ).start();
126
+ const count = await addFile(conn, config, filePath, contextPath);
127
+ if (count >= 0) {
128
+ added++;
129
+ chunks += count;
130
+ fileSpinner.success({
131
+ text: `${contextPath} (${count} chunks)`,
132
+ });
133
+ } else {
134
+ fileSpinner.warn({ text: `${contextPath}: skipped` });
103
135
  }
104
136
  }
105
137
 
106
138
  logger.success(`Added ${added} file(s), ${chunks} chunk(s) indexed.`);
139
+ process.exit(0);
107
140
  }),
108
141
  );
109
142
 
@@ -139,6 +172,19 @@ export function registerContextCommand(program: Command) {
139
172
  }
140
173
  }),
141
174
  );
175
+ ctx
176
+ .command("delete <path>")
177
+ .description("Delete a context item by path")
178
+ .action((path: string) =>
179
+ withDb(program, async (conn) => {
180
+ const deleted = await deleteContextItemByPath(conn, path);
181
+ if (!deleted) {
182
+ logger.error(`Context item not found: ${path}`);
183
+ process.exit(1);
184
+ }
185
+ logger.success(`Deleted context item: ${path}`);
186
+ }),
187
+ );
142
188
  }
143
189
 
144
190
  async function addFile(
@@ -155,22 +201,32 @@ async function addFile(
155
201
 
156
202
  const content = textual ? await bunFile.text() : null;
157
203
 
158
- const item = await createContextItem(conn, {
159
- title: filename,
160
- content: content ?? undefined,
161
- mimeType,
162
- sourcePath: filePath,
163
- contextPath,
164
- isTextual: textual,
165
- });
204
+ const existing = await getContextItemByPath(conn, contextPath);
205
+ let item: ContextItem;
206
+
207
+ if (existing) {
208
+ const updated = await updateContextItem(conn, existing.id, {
209
+ title: filename,
210
+ content: content ?? undefined,
211
+ mime_type: mimeType,
212
+ });
213
+ if (!updated) throw new Error(`Failed to update: ${contextPath}`);
214
+ item = updated;
215
+ } else {
216
+ item = await createContextItem(conn, {
217
+ title: filename,
218
+ content: content ?? undefined,
219
+ mimeType,
220
+ sourcePath: filePath,
221
+ contextPath,
222
+ isTextual: textual,
223
+ });
224
+ }
166
225
 
167
226
  if (textual && content) {
168
- const count = await ingestContextItem(conn, item.id, config);
169
- console.log(` + ${contextPath} (${count} chunks)`);
170
- return count;
227
+ return await ingestContextItem(conn, item.id, config);
171
228
  }
172
229
 
173
- console.log(` + ${contextPath} (binary, not indexed)`);
174
230
  return 0;
175
231
  } catch (err) {
176
232
  logger.warn(` ! ${contextPath}: ${err}`);
@@ -3,7 +3,6 @@ import {
3
3
  EMBEDDING_DTYPE,
4
4
  EMBEDDING_MODEL_ID,
5
5
  } from "../constants.ts";
6
- import { logger } from "../utils/logger.ts";
7
6
 
8
7
  type EmbedFn = (texts: string[]) => Promise<number[][]>;
9
8
 
@@ -11,12 +10,10 @@ let pipelineInstance: ReturnType<typeof createPipelinePromise> | null = null;
11
10
 
12
11
  function createPipelinePromise() {
13
12
  return (async () => {
14
- logger.info(`Loading embedding model ${EMBEDDING_MODEL_ID}...`);
15
13
  const { pipeline } = await import("@huggingface/transformers");
16
14
  const pipe = await pipeline("feature-extraction", EMBEDDING_MODEL_ID, {
17
15
  dtype: EMBEDDING_DTYPE,
18
16
  });
19
- logger.info("Embedding model loaded.");
20
17
  return pipe;
21
18
  })();
22
19
  }
@@ -0,0 +1,146 @@
1
+ import Anthropic from "@anthropic-ai/sdk";
2
+ import type { MessageParam } from "@anthropic-ai/sdk/resources/messages";
3
+ import { logger } from "../utils/logger.ts";
4
+
5
+ /** Rough estimate: ~4 characters per token for English text */
6
+ const CHARS_PER_TOKEN = 4;
7
+
8
+ /** Fallback if the models API call fails */
9
+ const DEFAULT_MAX_INPUT_TOKENS = 200_000;
10
+
11
+ /** Reserve this fraction of the context window for safety margin */
12
+ const HEADROOM_FRACTION = 0.1;
13
+
14
+ /** Maximum characters for a single tool result before truncation */
15
+ const MAX_TOOL_RESULT_CHARS = 50_000;
16
+
17
+ /** Cache model max_input_tokens to avoid repeated API calls */
18
+ const modelTokenCache = new Map<string, number>();
19
+
20
+ /**
21
+ * Look up the model's max input tokens via the Anthropic Models API.
22
+ * Results are cached per model ID for the lifetime of the process.
23
+ */
24
+ export async function getMaxInputTokens(
25
+ apiKey: string | undefined,
26
+ model: string,
27
+ ): Promise<number> {
28
+ const cached = modelTokenCache.get(model);
29
+ if (cached !== undefined) return cached;
30
+
31
+ try {
32
+ const client = new Anthropic({ apiKey: apiKey || undefined });
33
+ const info = await client.beta.models.retrieve(model);
34
+ const limit = info.max_input_tokens ?? DEFAULT_MAX_INPUT_TOKENS;
35
+ modelTokenCache.set(model, limit);
36
+ return limit;
37
+ } catch (err) {
38
+ logger.debug(`Failed to retrieve model info for ${model}: ${err}`);
39
+ modelTokenCache.set(model, DEFAULT_MAX_INPUT_TOKENS);
40
+ return DEFAULT_MAX_INPUT_TOKENS;
41
+ }
42
+ }
43
+
44
+ function estimateTokens(text: string): number {
45
+ return Math.ceil(text.length / CHARS_PER_TOKEN);
46
+ }
47
+
48
+ function messageChars(msg: MessageParam): number {
49
+ if (typeof msg.content === "string") return msg.content.length;
50
+ if (Array.isArray(msg.content)) {
51
+ let total = 0;
52
+ for (const block of msg.content) {
53
+ if ("text" in block && typeof block.text === "string") {
54
+ total += block.text.length;
55
+ } else if ("content" in block && typeof block.content === "string") {
56
+ total += block.content.length;
57
+ } else {
58
+ // tool_use blocks with input, etc.
59
+ total += JSON.stringify(block).length;
60
+ }
61
+ }
62
+ return total;
63
+ }
64
+ return JSON.stringify(msg.content).length;
65
+ }
66
+
67
+ /**
68
+ * Truncate individual tool results that are excessively large.
69
+ * Mutates messages in-place.
70
+ */
71
+ function truncateToolResults(messages: MessageParam[]): void {
72
+ for (const msg of messages) {
73
+ if (!Array.isArray(msg.content)) continue;
74
+ for (const block of msg.content) {
75
+ if (
76
+ "type" in block &&
77
+ block.type === "tool_result" &&
78
+ "content" in block &&
79
+ typeof block.content === "string" &&
80
+ block.content.length > MAX_TOOL_RESULT_CHARS
81
+ ) {
82
+ const original = block.content.length;
83
+ (block as { content: string }).content =
84
+ block.content.slice(0, MAX_TOOL_RESULT_CHARS) +
85
+ `\n\n[truncated: ${original} chars → ${MAX_TOOL_RESULT_CHARS} chars]`;
86
+ }
87
+ }
88
+ }
89
+ }
90
+
91
+ /**
92
+ * Ensure the conversation fits within the context window.
93
+ * Strategy:
94
+ * 1. Truncate oversized tool results
95
+ * 2. If still too large, drop oldest assistant/tool pairs from the middle
96
+ * (keeping the first user message and recent messages)
97
+ *
98
+ * Mutates messages in-place and returns the array.
99
+ */
100
+ export function fitToContextWindow(
101
+ messages: MessageParam[],
102
+ systemPrompt: string,
103
+ maxInputTokens: number,
104
+ ): MessageParam[] {
105
+ // Step 1: truncate oversized tool results
106
+ truncateToolResults(messages);
107
+
108
+ // Step 2: estimate total tokens
109
+ const systemTokens = estimateTokens(systemPrompt);
110
+ const responseBuffer = 4096; // max_tokens for the response
111
+ const headroom = Math.ceil(maxInputTokens * HEADROOM_FRACTION);
112
+
113
+ const budget = maxInputTokens - systemTokens - responseBuffer - headroom;
114
+ if (budget <= 0) {
115
+ logger.warn(
116
+ `System prompt alone is ~${systemTokens} tokens, very close to the ${maxInputTokens} token limit`,
117
+ );
118
+ return messages;
119
+ }
120
+
121
+ let totalChars = messages.reduce((sum, m) => sum + messageChars(m), 0);
122
+ let totalTokens = Math.ceil(totalChars / CHARS_PER_TOKEN);
123
+
124
+ if (totalTokens <= budget) {
125
+ return messages;
126
+ }
127
+
128
+ // Step 3: drop oldest message pairs from the middle until we fit.
129
+ // Keep messages[0] (initial user message) and remove from index 1 onward.
130
+ let dropped = 0;
131
+ while (totalTokens > budget && messages.length > 2) {
132
+ // Remove the oldest non-first message (index 1)
133
+ const removed = messages.splice(1, 1)[0] as MessageParam;
134
+ totalChars -= messageChars(removed);
135
+ totalTokens = Math.ceil(totalChars / CHARS_PER_TOKEN);
136
+ dropped++;
137
+ }
138
+
139
+ if (dropped > 0) {
140
+ logger.info(
141
+ `Context window management: dropped ${dropped} older messages to fit within ${maxInputTokens} token budget`,
142
+ );
143
+ }
144
+
145
+ return messages;
146
+ }
@@ -0,0 +1,88 @@
1
+ /**
2
+ * Temporary in-memory store for large tool results.
3
+ *
4
+ * When a tool result exceeds MAX_INLINE_CHARS, it is stored here and replaced
5
+ * with a summary stub. The LLM can then paginate through the full result
6
+ * using the `read_large_result` tool.
7
+ */
8
+
9
+ /** Maximum characters to inline directly in the conversation */
10
+ export const MAX_INLINE_CHARS = 10_000;
11
+
12
+ /** Characters per page when paginating */
13
+ export const PAGE_SIZE_CHARS = 8_000;
14
+
15
+ interface StoredResult {
16
+ toolName: string;
17
+ content: string;
18
+ totalChars: number;
19
+ totalPages: number;
20
+ createdAt: number;
21
+ }
22
+
23
+ const store = new Map<string, StoredResult>();
24
+ let nextId = 1;
25
+
26
+ /** Store a large result and return its reference ID */
27
+ export function storeLargeResult(toolName: string, content: string): string {
28
+ const id = `lr_${nextId++}`;
29
+ const totalPages = Math.ceil(content.length / PAGE_SIZE_CHARS);
30
+ store.set(id, {
31
+ toolName,
32
+ content,
33
+ totalChars: content.length,
34
+ totalPages,
35
+ createdAt: Date.now(),
36
+ });
37
+ return id;
38
+ }
39
+
40
+ /** Read a page from a stored result (1-based page number) */
41
+ export function readLargeResultPage(
42
+ id: string,
43
+ page: number,
44
+ ): { content: string; page: number; totalPages: number } | null {
45
+ const entry = store.get(id);
46
+ if (!entry) return null;
47
+
48
+ const start = (page - 1) * PAGE_SIZE_CHARS;
49
+ if (start >= entry.content.length) return null;
50
+
51
+ const content = entry.content.slice(start, start + PAGE_SIZE_CHARS);
52
+ return { content, page, totalPages: entry.totalPages };
53
+ }
54
+
55
+ /** Build the inline stub that replaces the full result in the conversation */
56
+ export function buildResultStub(
57
+ id: string,
58
+ toolName: string,
59
+ content: string,
60
+ ): string {
61
+ const totalPages = Math.ceil(content.length / PAGE_SIZE_CHARS);
62
+ const preview = content.slice(0, 500);
63
+ return [
64
+ `[Large result from ${toolName} stored as ${id} — ${content.length} chars, ${totalPages} page(s)]`,
65
+ "",
66
+ "Preview:",
67
+ preview,
68
+ preview.length < content.length ? "..." : "",
69
+ "",
70
+ `Use read_large_result with id="${id}" to read page-by-page (pages 1–${totalPages}).`,
71
+ ].join("\n");
72
+ }
73
+
74
+ /**
75
+ * If the tool output exceeds MAX_INLINE_CHARS, store it and return a stub.
76
+ * Otherwise return the original output unchanged.
77
+ */
78
+ export function maybeStoreResult(toolName: string, output: string): string {
79
+ if (output.length <= MAX_INLINE_CHARS) return output;
80
+
81
+ const id = storeLargeResult(toolName, output);
82
+ return buildResultStub(id, toolName, output);
83
+ }
84
+
85
+ /** Clear all stored results (useful between agent loop runs or for cleanup) */
86
+ export function clearLargeResults(): void {
87
+ store.clear();
88
+ }
package/src/daemon/llm.ts CHANGED
@@ -11,6 +11,8 @@ import type { Task } from "../db/tasks.ts";
11
11
  import { logInteraction } from "../db/threads.ts";
12
12
  import { registerAllTools } from "../tools/registry.ts";
13
13
  import { getTool, type ToolContext, toAnthropicTools } from "../tools/tool.ts";
14
+ import { fitToContextWindow, getMaxInputTokens } from "./context.ts";
15
+ import { clearLargeResults, maybeStoreResult } from "./large-results.ts";
14
16
 
15
17
  registerAllTools();
16
18
 
@@ -58,11 +60,17 @@ export async function runAgentLoop(input: {
58
60
  content: userMessage,
59
61
  });
60
62
 
63
+ clearLargeResults();
61
64
  const daemonTools = toAnthropicTools();
65
+ const maxInputTokens = await getMaxInputTokens(
66
+ config.anthropic_api_key,
67
+ config.model,
68
+ );
62
69
 
63
70
  const maxTurns = 10;
64
71
  for (let turn = 0; turn < maxTurns; turn++) {
65
72
  const startTime = Date.now();
73
+ fitToContextWindow(messages, systemPrompt, maxInputTokens);
66
74
  const response = await client.messages.create({
67
75
  model: config.model,
68
76
  max_tokens: 4096,
@@ -102,32 +110,35 @@ export async function runAgentLoop(input: {
102
110
  // Add assistant response to conversation
103
111
  messages.push({ role: "assistant", content: response.content });
104
112
 
105
- // Process each tool call
106
- const toolResults: ToolResultBlockParam[] = [];
107
-
113
+ // Log all tool_use entries
108
114
  for (const toolUse of toolUseBlocks) {
109
- const toolInput = JSON.stringify(toolUse.input);
110
-
111
- // Log tool use
112
115
  await logInteraction(conn, threadId, {
113
116
  role: "assistant",
114
117
  kind: "tool_use",
115
118
  content: `Calling ${toolUse.name}`,
116
119
  toolName: toolUse.name,
117
- toolInput,
120
+ toolInput: JSON.stringify(toolUse.input),
118
121
  });
122
+ }
119
123
 
120
- const toolStart = Date.now();
121
- const result = await executeToolCall(toolUse, toolCtx);
122
- const toolDuration = Date.now() - toolStart;
124
+ // Execute all tools in parallel
125
+ const execResults = await Promise.all(
126
+ toolUseBlocks.map(async (toolUse) => {
127
+ const start = Date.now();
128
+ const result = await executeToolCall(toolUse, toolCtx);
129
+ return { toolUse, result, durationMs: Date.now() - start };
130
+ }),
131
+ );
123
132
 
124
- // Log tool result
133
+ // Log results and collect tool_result messages
134
+ const toolResults: ToolResultBlockParam[] = [];
135
+ for (const { toolUse, result, durationMs } of execResults) {
125
136
  await logInteraction(conn, threadId, {
126
137
  role: "tool",
127
138
  kind: "tool_result",
128
139
  content: result.output,
129
140
  toolName: toolUse.name,
130
- durationMs: toolDuration,
141
+ durationMs,
131
142
  });
132
143
 
133
144
  if (result.terminal && result.agentResult) {
@@ -137,7 +148,7 @@ export async function runAgentLoop(input: {
137
148
  toolResults.push({
138
149
  type: "tool_result",
139
150
  tool_use_id: toolUse.id,
140
- content: result.output,
151
+ content: maybeStoreResult(toolUse.name, result.output),
141
152
  });
142
153
  }
143
154