@townco/agent 0.1.19 → 0.1.21
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/acp-server/adapter.js +77 -73
- package/dist/acp-server/http.js +412 -173
- package/dist/definition/index.d.ts +3 -0
- package/dist/definition/index.js +11 -1
- package/dist/dev-agent/index.d.ts +2 -0
- package/dist/dev-agent/index.js +18 -0
- package/dist/index.js +14 -14
- package/dist/runner/agent-runner.d.ts +20 -3
- package/dist/runner/agent-runner.js +4 -4
- package/dist/runner/langchain/index.d.ts +6 -5
- package/dist/runner/langchain/index.js +58 -17
- package/dist/runner/langchain/tools/filesystem.d.ts +66 -0
- package/dist/runner/langchain/tools/filesystem.js +261 -0
- package/dist/runner/tools.d.ts +5 -2
- package/dist/runner/tools.js +11 -1
- package/dist/templates/index.d.ts +3 -0
- package/dist/templates/index.js +11 -2
- package/dist/test-script.js +12 -12
- package/dist/tsconfig.tsbuildinfo +1 -1
- package/dist/utils/index.d.ts +1 -0
- package/dist/utils/index.js +1 -0
- package/dist/utils/logger.d.ts +39 -0
- package/dist/utils/logger.js +175 -0
- package/index.ts +7 -6
- package/package.json +10 -5
- package/templates/index.ts +14 -3
|
@@ -0,0 +1,18 @@
|
|
|
1
|
+
#!/usr/bin/env bun
|
|
2
|
+
import { readFileSync } from "node:fs";
|
|
3
|
+
import { join } from "node:path";
|
|
4
|
+
import { makeHttpTransport, makeStdioTransport } from "../acp-server/index";
|
|
5
|
+
// Load agent definition from JSON file
|
|
6
|
+
const configPath = join(import.meta.dir, "agent.json");
|
|
7
|
+
const agent = JSON.parse(readFileSync(configPath, "utf-8"));
|
|
8
|
+
const transport = process.argv[2] || "stdio";
|
|
9
|
+
if (transport === "http") {
|
|
10
|
+
makeHttpTransport(agent);
|
|
11
|
+
}
|
|
12
|
+
else if (transport === "stdio") {
|
|
13
|
+
makeStdioTransport(agent);
|
|
14
|
+
}
|
|
15
|
+
else {
|
|
16
|
+
console.error(`Invalid transport: ${transport}`);
|
|
17
|
+
process.exit(1);
|
|
18
|
+
}
|
package/dist/index.js
CHANGED
|
@@ -1,19 +1,19 @@
|
|
|
1
|
+
import { readFileSync } from "node:fs";
|
|
2
|
+
import { join } from "node:path";
|
|
1
3
|
import { makeHttpTransport, makeStdioTransport } from "./acp-server";
|
|
2
|
-
|
|
3
|
-
const
|
|
4
|
-
|
|
5
|
-
systemPrompt: "You are a helpful assistant.",
|
|
6
|
-
tools: ["todo_write", "get_weather", "web_search"],
|
|
7
|
-
mcps: [],
|
|
8
|
-
};
|
|
4
|
+
// Load agent definition from shared JSON file at repo root
|
|
5
|
+
const configPath = join(import.meta.dir, "../../agent.json");
|
|
6
|
+
const exampleAgent = JSON.parse(readFileSync(configPath, "utf-8"));
|
|
9
7
|
// Parse transport type from command line argument
|
|
10
8
|
const transport = process.argv[2] || "stdio";
|
|
11
9
|
if (transport === "http") {
|
|
12
|
-
|
|
13
|
-
}
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
|
|
10
|
+
makeHttpTransport(exampleAgent);
|
|
11
|
+
}
|
|
12
|
+
else if (transport === "stdio") {
|
|
13
|
+
makeStdioTransport(exampleAgent);
|
|
14
|
+
}
|
|
15
|
+
else {
|
|
16
|
+
console.error(`Invalid transport: ${transport}`);
|
|
17
|
+
console.error("Usage: bun run index.ts [stdio|http]");
|
|
18
|
+
process.exit(1);
|
|
19
19
|
}
|
|
@@ -3,9 +3,12 @@ import { z } from "zod";
|
|
|
3
3
|
export declare const zAgentRunnerParams: z.ZodObject<{
|
|
4
4
|
systemPrompt: z.ZodNullable<z.ZodString>;
|
|
5
5
|
model: z.ZodString;
|
|
6
|
-
tools: z.ZodOptional<z.ZodArray<z.ZodUnion<readonly [z.ZodUnion<readonly [z.ZodLiteral<"todo_write">, z.ZodLiteral<"get_weather">, z.ZodLiteral<"web_search">]>, z.ZodObject<{
|
|
6
|
+
tools: z.ZodOptional<z.ZodArray<z.ZodUnion<readonly [z.ZodUnion<readonly [z.ZodLiteral<"todo_write">, z.ZodLiteral<"get_weather">, z.ZodLiteral<"web_search">, z.ZodLiteral<"filesystem">]>, z.ZodObject<{
|
|
7
7
|
type: z.ZodLiteral<"custom">;
|
|
8
8
|
modulePath: z.ZodString;
|
|
9
|
+
}, z.core.$strip>, z.ZodObject<{
|
|
10
|
+
type: z.ZodLiteral<"filesystem">;
|
|
11
|
+
working_directory: z.ZodOptional<z.ZodString>;
|
|
9
12
|
}, z.core.$strip>]>>>;
|
|
10
13
|
mcps: z.ZodOptional<z.ZodArray<z.ZodUnion<readonly [z.ZodObject<{
|
|
11
14
|
name: z.ZodString;
|
|
@@ -20,9 +23,23 @@ export declare const zAgentRunnerParams: z.ZodObject<{
|
|
|
20
23
|
}, z.core.$strip>]>>>;
|
|
21
24
|
}, z.core.$strip>;
|
|
22
25
|
export type CreateAgentRunnerParams = z.infer<typeof zAgentRunnerParams>;
|
|
23
|
-
export type InvokeRequest = Omit<PromptRequest, "_meta"
|
|
26
|
+
export type InvokeRequest = Omit<PromptRequest, "_meta"> & {
|
|
27
|
+
messageId: string;
|
|
28
|
+
};
|
|
29
|
+
export type ExtendedSessionUpdate = SessionNotification["update"] | {
|
|
30
|
+
sessionUpdate: "tool_output";
|
|
31
|
+
toolCallId: string;
|
|
32
|
+
content?: Array<{
|
|
33
|
+
type: string;
|
|
34
|
+
[key: string]: unknown;
|
|
35
|
+
}>;
|
|
36
|
+
rawOutput?: Record<string, unknown>;
|
|
37
|
+
_meta?: {
|
|
38
|
+
messageId?: string;
|
|
39
|
+
};
|
|
40
|
+
};
|
|
24
41
|
/** Describes an object that can run an agent definition */
|
|
25
42
|
export interface AgentRunner {
|
|
26
43
|
definition: CreateAgentRunnerParams;
|
|
27
|
-
invoke(req: InvokeRequest): AsyncGenerator<
|
|
44
|
+
invoke(req: InvokeRequest): AsyncGenerator<ExtendedSessionUpdate, PromptResponse, undefined>;
|
|
28
45
|
}
|
|
@@ -2,8 +2,8 @@ import { z } from "zod";
|
|
|
2
2
|
import { McpConfigSchema } from "../definition";
|
|
3
3
|
import { zToolType } from "./tools";
|
|
4
4
|
export const zAgentRunnerParams = z.object({
|
|
5
|
-
|
|
6
|
-
|
|
7
|
-
|
|
8
|
-
|
|
5
|
+
systemPrompt: z.string().nullable(),
|
|
6
|
+
model: z.string(),
|
|
7
|
+
tools: z.array(zToolType).optional(),
|
|
8
|
+
mcps: z.array(McpConfigSchema).optional(),
|
|
9
9
|
});
|
|
@@ -1,15 +1,16 @@
|
|
|
1
|
-
import type { PromptResponse
|
|
1
|
+
import type { PromptResponse } from "@agentclientprotocol/sdk";
|
|
2
2
|
import { type DynamicStructuredTool, type Tool } from "langchain";
|
|
3
|
-
import type { AgentRunner, CreateAgentRunnerParams, InvokeRequest } from "../agent-runner";
|
|
4
|
-
import type { BuiltInToolType } from "../tools";
|
|
3
|
+
import type { AgentRunner, CreateAgentRunnerParams, ExtendedSessionUpdate, InvokeRequest } from "../agent-runner";
|
|
4
|
+
import type { BuiltInToolType } from "../tools.js";
|
|
5
5
|
type LangchainTool = DynamicStructuredTool | Tool;
|
|
6
6
|
/** Lazily-loaded langchain tools */
|
|
7
7
|
type LazyLangchainTool = MakeLazy<LangchainTool>;
|
|
8
|
+
type LazyLangchainTools = () => readonly LangchainTool[];
|
|
8
9
|
type MakeLazy<T> = T extends LangchainTool ? () => T : never;
|
|
9
|
-
export declare const TOOL_REGISTRY: Record<BuiltInToolType, LangchainTool | LazyLangchainTool>;
|
|
10
|
+
export declare const TOOL_REGISTRY: Record<BuiltInToolType, LangchainTool | LazyLangchainTool | LazyLangchainTools>;
|
|
10
11
|
export declare class LangchainAgent implements AgentRunner {
|
|
11
12
|
definition: CreateAgentRunnerParams;
|
|
12
13
|
constructor(params: CreateAgentRunnerParams);
|
|
13
|
-
invoke(req: InvokeRequest): AsyncGenerator<
|
|
14
|
+
invoke(req: InvokeRequest): AsyncGenerator<ExtendedSessionUpdate, PromptResponse, undefined>;
|
|
14
15
|
}
|
|
15
16
|
export {};
|
|
@@ -1,9 +1,12 @@
|
|
|
1
1
|
import { MultiServerMCPClient } from "@langchain/mcp-adapters";
|
|
2
2
|
import { AIMessageChunk, createAgent, ToolMessage, tool, } from "langchain";
|
|
3
3
|
import { z } from "zod";
|
|
4
|
-
import {
|
|
4
|
+
import { createLogger } from "../../utils/logger.js";
|
|
5
|
+
import { loadCustomToolModule, } from "../tool-loader.js";
|
|
6
|
+
import { makeFilesystemTools } from "./tools/filesystem.js";
|
|
5
7
|
import { todoItemSchema, todoWrite } from "./tools/todo";
|
|
6
8
|
import { makeWebSearchTool } from "./tools/web_search";
|
|
9
|
+
const logger = createLogger("agent-runner");
|
|
7
10
|
const getWeather = tool(({ city }) => `It's always sunny in ${city}!`, {
|
|
8
11
|
name: "get_weather",
|
|
9
12
|
description: "Get the weather for a given city",
|
|
@@ -15,6 +18,7 @@ export const TOOL_REGISTRY = {
|
|
|
15
18
|
todo_write: todoWrite,
|
|
16
19
|
get_weather: getWeather,
|
|
17
20
|
web_search: makeWebSearchTool,
|
|
21
|
+
filesystem: () => makeFilesystemTools(process.cwd()),
|
|
18
22
|
};
|
|
19
23
|
// ============================================================================
|
|
20
24
|
// Custom tool loading
|
|
@@ -47,6 +51,7 @@ export class LangchainAgent {
|
|
|
47
51
|
const todoWriteToolCallIds = new Set();
|
|
48
52
|
// --------------------------------------------------------------------------
|
|
49
53
|
// Resolve tools: built-ins (string) + custom ({ type: "custom", modulePath })
|
|
54
|
+
// + filesystem ({ type: "filesystem", working_directory? })
|
|
50
55
|
// --------------------------------------------------------------------------
|
|
51
56
|
const enabledTools = [];
|
|
52
57
|
const toolDefs = this.definition.tools ?? [];
|
|
@@ -56,13 +61,18 @@ export class LangchainAgent {
|
|
|
56
61
|
if (typeof t === "string") {
|
|
57
62
|
builtInNames.push(t);
|
|
58
63
|
}
|
|
59
|
-
else if (t &&
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
64
|
+
else if (t && typeof t === "object" && "type" in t) {
|
|
65
|
+
const type = t.type;
|
|
66
|
+
if (type === "custom" &&
|
|
67
|
+
"modulePath" in t &&
|
|
68
|
+
typeof t.modulePath === "string") {
|
|
69
|
+
customToolPaths.push(t.modulePath);
|
|
70
|
+
}
|
|
71
|
+
else if (type === "filesystem") {
|
|
72
|
+
const wd = t.working_directory ??
|
|
73
|
+
process.cwd();
|
|
74
|
+
enabledTools.push(...makeFilesystemTools(wd));
|
|
75
|
+
}
|
|
66
76
|
}
|
|
67
77
|
}
|
|
68
78
|
// Built-in tools from registry
|
|
@@ -71,7 +81,18 @@ export class LangchainAgent {
|
|
|
71
81
|
if (!entry) {
|
|
72
82
|
throw new Error(`Unknown built-in tool "${name}"`);
|
|
73
83
|
}
|
|
74
|
-
|
|
84
|
+
if (typeof entry === "function") {
|
|
85
|
+
const result = entry();
|
|
86
|
+
if (Array.isArray(result)) {
|
|
87
|
+
enabledTools.push(...result);
|
|
88
|
+
}
|
|
89
|
+
else {
|
|
90
|
+
enabledTools.push(result);
|
|
91
|
+
}
|
|
92
|
+
}
|
|
93
|
+
else {
|
|
94
|
+
enabledTools.push(entry);
|
|
95
|
+
}
|
|
75
96
|
}
|
|
76
97
|
// Custom tools loaded from modulePaths
|
|
77
98
|
if (customToolPaths.length > 0) {
|
|
@@ -90,14 +111,13 @@ export class LangchainAgent {
|
|
|
90
111
|
agentConfig.systemPrompt = this.definition.systemPrompt;
|
|
91
112
|
}
|
|
92
113
|
const agent = createAgent(agentConfig);
|
|
93
|
-
const
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
}, {
|
|
114
|
+
const messages = req.prompt
|
|
115
|
+
.filter((promptMsg) => promptMsg.type === "text")
|
|
116
|
+
.map((promptMsg) => ({
|
|
117
|
+
type: "human",
|
|
118
|
+
content: promptMsg.text,
|
|
119
|
+
}));
|
|
120
|
+
const stream = agent.stream({ messages }, {
|
|
101
121
|
streamMode: ["updates", "messages"],
|
|
102
122
|
});
|
|
103
123
|
for await (const [streamMode, chunk] of await stream) {
|
|
@@ -114,6 +134,15 @@ export class LangchainAgent {
|
|
|
114
134
|
throw new Error(`Unhandled updates message chunk types: ${JSON.stringify(updatesMessages)}`);
|
|
115
135
|
}
|
|
116
136
|
for (const msg of updatesMessages) {
|
|
137
|
+
// Extract token usage metadata if available
|
|
138
|
+
const tokenUsage = msg.usage_metadata
|
|
139
|
+
? {
|
|
140
|
+
inputTokens: msg.usage_metadata.input_tokens,
|
|
141
|
+
outputTokens: msg.usage_metadata.output_tokens,
|
|
142
|
+
totalTokens: msg.usage_metadata.total_tokens,
|
|
143
|
+
}
|
|
144
|
+
: undefined;
|
|
145
|
+
logger.debug("Token usage:", tokenUsage);
|
|
117
146
|
for (const toolCall of msg.tool_calls ?? []) {
|
|
118
147
|
if (toolCall.id == null) {
|
|
119
148
|
throw new Error(`Tool call is missing id: ${JSON.stringify(toolCall)}`);
|
|
@@ -149,11 +178,15 @@ export class LangchainAgent {
|
|
|
149
178
|
kind: "other",
|
|
150
179
|
status: "pending",
|
|
151
180
|
rawInput: toolCall.args,
|
|
181
|
+
...(tokenUsage ? { tokenUsage } : {}),
|
|
182
|
+
_meta: { messageId: req.messageId },
|
|
152
183
|
};
|
|
153
184
|
yield {
|
|
154
185
|
sessionUpdate: "tool_call_update",
|
|
155
186
|
toolCallId: toolCall.id,
|
|
156
187
|
status: "in_progress",
|
|
188
|
+
...(tokenUsage ? { tokenUsage } : {}),
|
|
189
|
+
_meta: { messageId: req.messageId },
|
|
157
190
|
};
|
|
158
191
|
}
|
|
159
192
|
}
|
|
@@ -202,10 +235,17 @@ export class LangchainAgent {
|
|
|
202
235
|
// Skip tool_call_update for todo_write tools
|
|
203
236
|
continue;
|
|
204
237
|
}
|
|
238
|
+
// Send status update (metadata only, no content)
|
|
205
239
|
yield {
|
|
206
240
|
sessionUpdate: "tool_call_update",
|
|
207
241
|
toolCallId: aiMessage.tool_call_id,
|
|
208
242
|
status: "completed",
|
|
243
|
+
_meta: { messageId: req.messageId },
|
|
244
|
+
};
|
|
245
|
+
// Send tool output separately (via direct SSE, bypassing PostgreSQL NOTIFY)
|
|
246
|
+
yield {
|
|
247
|
+
sessionUpdate: "tool_output",
|
|
248
|
+
toolCallId: aiMessage.tool_call_id,
|
|
209
249
|
content: [
|
|
210
250
|
{
|
|
211
251
|
type: "content",
|
|
@@ -216,6 +256,7 @@ export class LangchainAgent {
|
|
|
216
256
|
},
|
|
217
257
|
],
|
|
218
258
|
rawOutput: { content: aiMessage.content },
|
|
259
|
+
_meta: { messageId: req.messageId },
|
|
219
260
|
};
|
|
220
261
|
}
|
|
221
262
|
else {
|
|
@@ -0,0 +1,66 @@
|
|
|
1
|
+
import { z } from "zod";
|
|
2
|
+
export declare function makeFilesystemTools(workingDirectory: string): readonly [import("langchain").DynamicStructuredTool<z.ZodObject<{
|
|
3
|
+
pattern: z.ZodString;
|
|
4
|
+
path: z.ZodOptional<z.ZodString>;
|
|
5
|
+
glob: z.ZodOptional<z.ZodString>;
|
|
6
|
+
output_mode: z.ZodOptional<z.ZodEnum<{
|
|
7
|
+
content: "content";
|
|
8
|
+
files_with_matches: "files_with_matches";
|
|
9
|
+
count: "count";
|
|
10
|
+
}>>;
|
|
11
|
+
"-B": z.ZodOptional<z.ZodNumber>;
|
|
12
|
+
"-A": z.ZodOptional<z.ZodNumber>;
|
|
13
|
+
"-C": z.ZodOptional<z.ZodNumber>;
|
|
14
|
+
"-n": z.ZodOptional<z.ZodBoolean>;
|
|
15
|
+
"-i": z.ZodOptional<z.ZodBoolean>;
|
|
16
|
+
type: z.ZodOptional<z.ZodString>;
|
|
17
|
+
head_limit: z.ZodOptional<z.ZodNumber>;
|
|
18
|
+
multiline: z.ZodOptional<z.ZodBoolean>;
|
|
19
|
+
}, z.core.$strip>, {
|
|
20
|
+
pattern: string;
|
|
21
|
+
path?: string | undefined;
|
|
22
|
+
glob?: string | undefined;
|
|
23
|
+
output_mode?: "content" | "files_with_matches" | "count" | undefined;
|
|
24
|
+
"-B"?: number | undefined;
|
|
25
|
+
"-A"?: number | undefined;
|
|
26
|
+
"-C"?: number | undefined;
|
|
27
|
+
"-n"?: boolean | undefined;
|
|
28
|
+
"-i"?: boolean | undefined;
|
|
29
|
+
type?: string | undefined;
|
|
30
|
+
head_limit?: number | undefined;
|
|
31
|
+
multiline?: boolean | undefined;
|
|
32
|
+
}, {
|
|
33
|
+
pattern: string;
|
|
34
|
+
path?: string | undefined;
|
|
35
|
+
glob?: string | undefined;
|
|
36
|
+
output_mode?: "content" | "files_with_matches" | "count" | undefined;
|
|
37
|
+
"-B"?: number | undefined;
|
|
38
|
+
"-A"?: number | undefined;
|
|
39
|
+
"-C"?: number | undefined;
|
|
40
|
+
"-n"?: boolean | undefined;
|
|
41
|
+
"-i"?: boolean | undefined;
|
|
42
|
+
type?: string | undefined;
|
|
43
|
+
head_limit?: number | undefined;
|
|
44
|
+
multiline?: boolean | undefined;
|
|
45
|
+
}, unknown>, import("langchain").DynamicStructuredTool<z.ZodObject<{
|
|
46
|
+
file_path: z.ZodString;
|
|
47
|
+
offset: z.ZodOptional<z.ZodNumber>;
|
|
48
|
+
limit: z.ZodOptional<z.ZodNumber>;
|
|
49
|
+
}, z.core.$strip>, {
|
|
50
|
+
file_path: string;
|
|
51
|
+
offset?: number | undefined;
|
|
52
|
+
limit?: number | undefined;
|
|
53
|
+
}, {
|
|
54
|
+
file_path: string;
|
|
55
|
+
offset?: number | undefined;
|
|
56
|
+
limit?: number | undefined;
|
|
57
|
+
}, unknown>, import("langchain").DynamicStructuredTool<z.ZodObject<{
|
|
58
|
+
file_path: z.ZodString;
|
|
59
|
+
content: z.ZodString;
|
|
60
|
+
}, z.core.$strip>, {
|
|
61
|
+
file_path: string;
|
|
62
|
+
content: string;
|
|
63
|
+
}, {
|
|
64
|
+
file_path: string;
|
|
65
|
+
content: string;
|
|
66
|
+
}, unknown>];
|
|
@@ -0,0 +1,261 @@
|
|
|
1
|
+
import { spawn } from "node:child_process";
|
|
2
|
+
import { once } from "node:events";
|
|
3
|
+
import * as fs from "node:fs/promises";
|
|
4
|
+
import * as path from "node:path";
|
|
5
|
+
import { SandboxManager, } from "@anthropic-ai/sandbox-runtime";
|
|
6
|
+
import { tool } from "langchain";
|
|
7
|
+
import { z } from "zod";
|
|
8
|
+
/**
|
|
9
|
+
* Lazily initialize Sandbox Runtime with write access limited to workingDirectory.
|
|
10
|
+
* Read access defaults to Sandbox Runtime's defaults (read allowed everywhere),
|
|
11
|
+
* but commands run with cwd=workingDirectory, so rg/reads are scoped naturally.
|
|
12
|
+
*/
|
|
13
|
+
let initialized = false;
|
|
14
|
+
async function ensureSandbox(workingDirectory) {
|
|
15
|
+
if (initialized)
|
|
16
|
+
return;
|
|
17
|
+
const cfg = {
|
|
18
|
+
network: {
|
|
19
|
+
// No outbound network needed for Grep/Read/Write; block by default.
|
|
20
|
+
allowedDomains: [],
|
|
21
|
+
deniedDomains: [],
|
|
22
|
+
},
|
|
23
|
+
filesystem: {
|
|
24
|
+
// Allow writes only within the configured sandbox directory.
|
|
25
|
+
allowWrite: [workingDirectory],
|
|
26
|
+
denyWrite: [],
|
|
27
|
+
// Optional: harden reads a bit (deny common sensitive dirs)
|
|
28
|
+
denyRead: ["~/.ssh", "~/.gnupg", "/etc/ssh"],
|
|
29
|
+
},
|
|
30
|
+
};
|
|
31
|
+
await SandboxManager.initialize(cfg);
|
|
32
|
+
initialized = true;
|
|
33
|
+
}
|
|
34
|
+
/** Small shell-escape for args that we pass via `shell: true`. */
|
|
35
|
+
function shEscape(s) {
|
|
36
|
+
return `'${s.replace(/'/g, `'\\''`)}'`;
|
|
37
|
+
}
|
|
38
|
+
/** Run a command string inside the sandbox, returning { stdout, stderr, code }. */
|
|
39
|
+
async function runSandboxed(cmd, cwd) {
|
|
40
|
+
const wrapped = await SandboxManager.wrapWithSandbox(cmd);
|
|
41
|
+
const child = spawn(wrapped, { shell: true, cwd });
|
|
42
|
+
const stdout = [];
|
|
43
|
+
const stderr = [];
|
|
44
|
+
child.stdout?.on("data", (d) => stdout.push(Buffer.from(d)));
|
|
45
|
+
child.stderr?.on("data", (d) => stderr.push(Buffer.from(d)));
|
|
46
|
+
const [code] = (await once(child, "exit"));
|
|
47
|
+
return {
|
|
48
|
+
stdout: Buffer.concat(stdout),
|
|
49
|
+
stderr: Buffer.concat(stderr),
|
|
50
|
+
code: code ?? 1,
|
|
51
|
+
};
|
|
52
|
+
}
|
|
53
|
+
/** Check that ripgrep is available inside the sandbox. Throw with a helpful note if not. */
|
|
54
|
+
async function assertRipgrep(workingDirectory) {
|
|
55
|
+
const { code } = await runSandboxed("rg --version", workingDirectory);
|
|
56
|
+
if (code !== 0) {
|
|
57
|
+
throw new Error("ripgrep (rg) is required for the Grep tool. Please install it (e.g., `brew install ripgrep` on macOS or your distro package on Linux).");
|
|
58
|
+
}
|
|
59
|
+
}
|
|
60
|
+
/** Validate that a path is absolute */
|
|
61
|
+
function assertAbsolutePath(filePath, paramName) {
|
|
62
|
+
if (!path.isAbsolute(filePath)) {
|
|
63
|
+
throw new Error(`${paramName} must be an absolute path, got: ${filePath}`);
|
|
64
|
+
}
|
|
65
|
+
}
|
|
66
|
+
/** Validate that a path is within the working directory bounds */
|
|
67
|
+
function assertWithinWorkingDirectory(filePath, workingDirectory) {
|
|
68
|
+
const resolved = path.resolve(filePath);
|
|
69
|
+
const normalizedWd = path.resolve(workingDirectory);
|
|
70
|
+
if (!resolved.startsWith(normalizedWd + path.sep) &&
|
|
71
|
+
resolved !== normalizedWd) {
|
|
72
|
+
throw new Error(`Path ${filePath} is outside the allowed working directory ${workingDirectory}`);
|
|
73
|
+
}
|
|
74
|
+
}
|
|
75
|
+
export function makeFilesystemTools(workingDirectory) {
|
|
76
|
+
const resolvedWd = path.resolve(workingDirectory);
|
|
77
|
+
const grep = tool(async ({ pattern, path: searchPath, glob, output_mode, "-B": before, "-A": after, "-C": context, "-n": lineNumbers, "-i": ignoreCase, type: fileType, head_limit, multiline, }) => {
|
|
78
|
+
await ensureSandbox(resolvedWd);
|
|
79
|
+
await assertRipgrep(resolvedWd);
|
|
80
|
+
let target = resolvedWd;
|
|
81
|
+
if (searchPath) {
|
|
82
|
+
assertAbsolutePath(searchPath, "path");
|
|
83
|
+
assertWithinWorkingDirectory(searchPath, resolvedWd);
|
|
84
|
+
target = path.resolve(searchPath);
|
|
85
|
+
}
|
|
86
|
+
// Build rg command
|
|
87
|
+
const parts = ["rg"];
|
|
88
|
+
// Output format
|
|
89
|
+
const mode = output_mode ?? "files_with_matches";
|
|
90
|
+
if (mode === "files_with_matches") {
|
|
91
|
+
parts.push("--files-with-matches");
|
|
92
|
+
}
|
|
93
|
+
else if (mode === "count") {
|
|
94
|
+
parts.push("--count");
|
|
95
|
+
}
|
|
96
|
+
else if (mode === "content") {
|
|
97
|
+
// Default content mode
|
|
98
|
+
if (lineNumbers !== false) {
|
|
99
|
+
parts.push("--line-number");
|
|
100
|
+
}
|
|
101
|
+
if (context !== undefined) {
|
|
102
|
+
parts.push(`-C${context}`);
|
|
103
|
+
}
|
|
104
|
+
else {
|
|
105
|
+
if (before !== undefined)
|
|
106
|
+
parts.push(`-B${before}`);
|
|
107
|
+
if (after !== undefined)
|
|
108
|
+
parts.push(`-A${after}`);
|
|
109
|
+
}
|
|
110
|
+
}
|
|
111
|
+
// Search options
|
|
112
|
+
if (ignoreCase)
|
|
113
|
+
parts.push("-i");
|
|
114
|
+
if (multiline)
|
|
115
|
+
parts.push("-U", "--multiline-dotall");
|
|
116
|
+
if (fileType)
|
|
117
|
+
parts.push("--type", fileType);
|
|
118
|
+
if (glob)
|
|
119
|
+
parts.push("-g", shEscape(glob));
|
|
120
|
+
// Pattern and target
|
|
121
|
+
parts.push(shEscape(pattern), shEscape(target));
|
|
122
|
+
// Head limit (done via pipe)
|
|
123
|
+
let cmd = parts.join(" ");
|
|
124
|
+
if (head_limit !== undefined) {
|
|
125
|
+
cmd = `${cmd} | head -n ${head_limit}`;
|
|
126
|
+
}
|
|
127
|
+
const { stdout, stderr, code } = await runSandboxed(cmd, resolvedWd);
|
|
128
|
+
// rg returns non-zero on "no matches" — treat as empty results
|
|
129
|
+
if (code !== 0 && stdout.length === 0) {
|
|
130
|
+
const err = stderr.toString("utf8");
|
|
131
|
+
// If stderr looks like a real error (not just "no matches"), surface it
|
|
132
|
+
if (err && !/no such file or directory|nothing matched/i.test(err)) {
|
|
133
|
+
throw new Error(`ripgrep failed:\n${err}`);
|
|
134
|
+
}
|
|
135
|
+
return mode === "count" ? "0" : "";
|
|
136
|
+
}
|
|
137
|
+
return stdout.toString("utf8");
|
|
138
|
+
}, {
|
|
139
|
+
name: "Grep",
|
|
140
|
+
description: 'A powerful search tool built on ripgrep\n\n Usage:\n - ALWAYS use Grep for search tasks. NEVER invoke `grep` or `rg` as a Bash command. The Grep tool has been optimized for correct permissions and access.\n - Supports full regex syntax (e.g., "log.*Error", "function\\s+\\w+")\n - Filter files with glob parameter (e.g., "*.js", "**/*.tsx") or type parameter (e.g., "js", "py", "rust")\n - Output modes: "content" shows matching lines, "files_with_matches" shows only file paths (default), "count" shows match counts\n - Use Task tool for open-ended searches requiring multiple rounds\n - Pattern syntax: Uses ripgrep (not grep) - literal braces need escaping (use `interface\\{\\}` to find `interface{}` in Go code)\n - Multiline matching: By default patterns match within single lines only. For cross-line patterns like `struct \\{[\\s\\S]*?field`, use `multiline: true`\n',
|
|
141
|
+
schema: z.object({
|
|
142
|
+
pattern: z
|
|
143
|
+
.string()
|
|
144
|
+
.describe("The regular expression pattern to search for in file contents"),
|
|
145
|
+
path: z
|
|
146
|
+
.string()
|
|
147
|
+
.optional()
|
|
148
|
+
.describe("File or directory to search in (rg PATH). Defaults to current working directory."),
|
|
149
|
+
glob: z
|
|
150
|
+
.string()
|
|
151
|
+
.optional()
|
|
152
|
+
.describe('Glob pattern to filter files (e.g. "*.js", "*.{ts,tsx}") - maps to rg --glob'),
|
|
153
|
+
output_mode: z
|
|
154
|
+
.enum(["content", "files_with_matches", "count"])
|
|
155
|
+
.optional()
|
|
156
|
+
.describe('Output mode: "content" shows matching lines (supports -A/-B/-C context, -n line numbers, head_limit), "files_with_matches" shows file paths (supports head_limit), "count" shows match counts (supports head_limit). Defaults to "files_with_matches".'),
|
|
157
|
+
"-B": z
|
|
158
|
+
.number()
|
|
159
|
+
.optional()
|
|
160
|
+
.describe('Number of lines to show before each match (rg -B). Requires output_mode: "content", ignored otherwise.'),
|
|
161
|
+
"-A": z
|
|
162
|
+
.number()
|
|
163
|
+
.optional()
|
|
164
|
+
.describe('Number of lines to show after each match (rg -A). Requires output_mode: "content", ignored otherwise.'),
|
|
165
|
+
"-C": z
|
|
166
|
+
.number()
|
|
167
|
+
.optional()
|
|
168
|
+
.describe('Number of lines to show before and after each match (rg -C). Requires output_mode: "content", ignored otherwise.'),
|
|
169
|
+
"-n": z
|
|
170
|
+
.boolean()
|
|
171
|
+
.optional()
|
|
172
|
+
.describe('Show line numbers in output (rg -n). Requires output_mode: "content", ignored otherwise.'),
|
|
173
|
+
"-i": z
|
|
174
|
+
.boolean()
|
|
175
|
+
.optional()
|
|
176
|
+
.describe("Case insensitive search (rg -i)"),
|
|
177
|
+
type: z
|
|
178
|
+
.string()
|
|
179
|
+
.optional()
|
|
180
|
+
.describe("File type to search (rg --type). Common types: js, py, rust, go, java, etc. More efficient than include for standard file types."),
|
|
181
|
+
head_limit: z
|
|
182
|
+
.number()
|
|
183
|
+
.optional()
|
|
184
|
+
.describe('Limit output to first N lines/entries, equivalent to "| head -N". Works across all output modes: content (limits output lines), files_with_matches (limits file paths), count (limits count entries). When unspecified, shows all results from ripgrep.'),
|
|
185
|
+
multiline: z
|
|
186
|
+
.boolean()
|
|
187
|
+
.optional()
|
|
188
|
+
.describe("Enable multiline mode where . matches newlines and patterns can span lines (rg -U --multiline-dotall). Default: false."),
|
|
189
|
+
}),
|
|
190
|
+
});
|
|
191
|
+
const read = tool(async ({ file_path, offset, limit }) => {
|
|
192
|
+
await ensureSandbox(resolvedWd);
|
|
193
|
+
assertAbsolutePath(file_path, "file_path");
|
|
194
|
+
assertWithinWorkingDirectory(file_path, resolvedWd);
|
|
195
|
+
const target = path.resolve(file_path);
|
|
196
|
+
// Read the file using sandboxed cat
|
|
197
|
+
const cmd = `cat ${shEscape(target)}`;
|
|
198
|
+
const { stdout, stderr, code } = await runSandboxed(cmd, resolvedWd);
|
|
199
|
+
if (code !== 0) {
|
|
200
|
+
throw new Error(`Read failed for ${file_path}:\n${stderr.toString("utf8") || "Unknown error"}`);
|
|
201
|
+
}
|
|
202
|
+
// Handle offset and limit
|
|
203
|
+
let lines = stdout.toString("utf8").split(/\r?\n/);
|
|
204
|
+
if (offset !== undefined) {
|
|
205
|
+
lines = lines.slice(offset);
|
|
206
|
+
}
|
|
207
|
+
if (limit !== undefined) {
|
|
208
|
+
lines = lines.slice(0, limit);
|
|
209
|
+
}
|
|
210
|
+
// Truncate long lines
|
|
211
|
+
const truncatedLines = lines.map((line) => line.length > 2000 ? `${line.slice(0, 2000)}...` : line);
|
|
212
|
+
// Format with line numbers (cat -n style)
|
|
213
|
+
const startLine = (offset ?? 0) + 1;
|
|
214
|
+
const formatted = truncatedLines
|
|
215
|
+
.map((line, idx) => `${startLine + idx}→${line}`)
|
|
216
|
+
.join("\n");
|
|
217
|
+
return formatted;
|
|
218
|
+
}, {
|
|
219
|
+
name: "Read",
|
|
220
|
+
description: "Reads a file from the local filesystem. You can access any file directly by using this tool.\nAssume this tool is able to read all files on the machine. If the User provides a path to a file assume that path is valid. It is okay to read a file that does not exist; an error will be returned.\n\nUsage:\n- The file_path parameter must be an absolute path, not a relative path\n- By default, it reads up to 2000 lines starting from the beginning of the file\n- You can optionally specify a line offset and limit (especially handy for long files), but it's recommended to read the whole file by not providing these parameters\n- Any lines longer than 2000 characters will be truncated\n- Results are returned using cat -n format, with line numbers starting at 1\n- This tool allows Claude Code to read images (eg PNG, JPG, etc). When reading an image file the contents are presented visually as Claude Code is a multimodal LLM.\n- This tool can read PDF files (.pdf). PDFs are processed page by page, extracting both text and visual content for analysis.\n- This tool can read Jupyter notebooks (.ipynb files) and returns all cells with their outputs, combining code, text, and visualizations.\n- This tool can only read files, not directories. To read a directory, use an ls command via the Bash tool.\n- You can call multiple tools in a single response. It is always better to speculatively read multiple potentially useful files in parallel.\n- You will regularly be asked to read screenshots. If the user provides a path to a screenshot, ALWAYS use this tool to view the file at the path. This tool will work with all temporary file paths.\n- If you read a file that exists but has empty contents you will receive a system reminder warning in place of file contents.",
|
|
221
|
+
schema: z.object({
|
|
222
|
+
file_path: z.string().describe("The absolute path to the file to read"),
|
|
223
|
+
offset: z
|
|
224
|
+
.number()
|
|
225
|
+
.optional()
|
|
226
|
+
.describe("The line number to start reading from. Only provide if the file is too large to read at once"),
|
|
227
|
+
limit: z
|
|
228
|
+
.number()
|
|
229
|
+
.optional()
|
|
230
|
+
.describe("The number of lines to read. Only provide if the file is too large to read at once."),
|
|
231
|
+
}),
|
|
232
|
+
});
|
|
233
|
+
const write = tool(async ({ file_path, content }) => {
|
|
234
|
+
await ensureSandbox(resolvedWd);
|
|
235
|
+
assertAbsolutePath(file_path, "file_path");
|
|
236
|
+
assertWithinWorkingDirectory(file_path, resolvedWd);
|
|
237
|
+
const target = path.resolve(file_path);
|
|
238
|
+
const dir = path.dirname(target);
|
|
239
|
+
// Make sure parent exists (in *parent* process, just for convenience of here-doc).
|
|
240
|
+
// This does not write file contents; the write itself happens inside the sandbox.
|
|
241
|
+
await fs.mkdir(dir, { recursive: true });
|
|
242
|
+
// Safe here-doc to avoid shell interpolation
|
|
243
|
+
const cmd = `bash -c 'mkdir -p ${shEscape(dir)} && cat > ${shEscape(target)} <<'EOF'\n` +
|
|
244
|
+
`${content}\nEOF\n'`;
|
|
245
|
+
const { stderr, code } = await runSandboxed(cmd, resolvedWd);
|
|
246
|
+
if (code !== 0) {
|
|
247
|
+
throw new Error(`Write failed for ${file_path}:\n${stderr.toString("utf8") || "Unknown error"}`);
|
|
248
|
+
}
|
|
249
|
+
return `Successfully wrote ${Buffer.byteLength(content, "utf8")} bytes to ${file_path}`;
|
|
250
|
+
}, {
|
|
251
|
+
name: "Write",
|
|
252
|
+
description: "Writes a file to the local filesystem.\n\nUsage:\n- This tool will overwrite the existing file if there is one at the provided path.\n- If this is an existing file, you MUST use the Read tool first to read the file's contents. This tool will fail if you did not read the file first.\n- ALWAYS prefer editing existing files in the codebase. NEVER write new files unless explicitly required.\n- NEVER proactively create documentation files (*.md) or README files. Only create documentation files if explicitly requested by the User.\n- Only use emojis if the user explicitly requests it. Avoid writing emojis to files unless asked.",
|
|
253
|
+
schema: z.object({
|
|
254
|
+
file_path: z
|
|
255
|
+
.string()
|
|
256
|
+
.describe("The absolute path to the file to write (must be absolute, not relative)"),
|
|
257
|
+
content: z.string().describe("The content to write to the file"),
|
|
258
|
+
}),
|
|
259
|
+
});
|
|
260
|
+
return [grep, read, write];
|
|
261
|
+
}
|
package/dist/runner/tools.d.ts
CHANGED
|
@@ -1,10 +1,13 @@
|
|
|
1
1
|
import { z } from "zod";
|
|
2
2
|
/** Built-in tool types. */
|
|
3
|
-
export declare const zBuiltInToolType: z.ZodUnion<readonly [z.ZodLiteral<"todo_write">, z.ZodLiteral<"get_weather">, z.ZodLiteral<"web_search">]>;
|
|
3
|
+
export declare const zBuiltInToolType: z.ZodUnion<readonly [z.ZodLiteral<"todo_write">, z.ZodLiteral<"get_weather">, z.ZodLiteral<"web_search">, z.ZodLiteral<"filesystem">]>;
|
|
4
4
|
/** Tool type - can be a built-in tool string or custom tool object. */
|
|
5
|
-
export declare const zToolType: z.ZodUnion<readonly [z.ZodUnion<readonly [z.ZodLiteral<"todo_write">, z.ZodLiteral<"get_weather">, z.ZodLiteral<"web_search">]>, z.ZodObject<{
|
|
5
|
+
export declare const zToolType: z.ZodUnion<readonly [z.ZodUnion<readonly [z.ZodLiteral<"todo_write">, z.ZodLiteral<"get_weather">, z.ZodLiteral<"web_search">, z.ZodLiteral<"filesystem">]>, z.ZodObject<{
|
|
6
6
|
type: z.ZodLiteral<"custom">;
|
|
7
7
|
modulePath: z.ZodString;
|
|
8
|
+
}, z.core.$strip>, z.ZodObject<{
|
|
9
|
+
type: z.ZodLiteral<"filesystem">;
|
|
10
|
+
working_directory: z.ZodOptional<z.ZodString>;
|
|
8
11
|
}, z.core.$strip>]>;
|
|
9
12
|
export type ToolType = z.infer<typeof zToolType>;
|
|
10
13
|
export type BuiltInToolType = z.infer<typeof zBuiltInToolType>;
|
package/dist/runner/tools.js
CHANGED
|
@@ -4,11 +4,21 @@ export const zBuiltInToolType = z.union([
|
|
|
4
4
|
z.literal("todo_write"),
|
|
5
5
|
z.literal("get_weather"),
|
|
6
6
|
z.literal("web_search"),
|
|
7
|
+
z.literal("filesystem"),
|
|
7
8
|
]);
|
|
8
9
|
/** Custom tool schema. */
|
|
9
10
|
const zCustomTool = z.object({
|
|
10
11
|
type: z.literal("custom"),
|
|
11
12
|
modulePath: z.string(),
|
|
12
13
|
});
|
|
14
|
+
/** Filesystem tool schema. */
|
|
15
|
+
const zFilesystemTool = z.object({
|
|
16
|
+
type: z.literal("filesystem"),
|
|
17
|
+
working_directory: z.string().optional(),
|
|
18
|
+
});
|
|
13
19
|
/** Tool type - can be a built-in tool string or custom tool object. */
|
|
14
|
-
export const zToolType = z.union([
|
|
20
|
+
export const zToolType = z.union([
|
|
21
|
+
zBuiltInToolType,
|
|
22
|
+
zCustomTool,
|
|
23
|
+
zFilesystemTool,
|
|
24
|
+
]);
|