bashkit 0.3.0 → 0.3.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +2 -0
- package/dist/cloudflare/index.d.ts +20 -0
- package/dist/cloudflare/index.js +1251 -0
- package/dist/durable/durable-session.d.ts +220 -0
- package/dist/durable/index.d.ts +41 -0
- package/dist/durable/index.js +159 -0
- package/dist/durable/schema.d.ts +51 -0
- package/dist/durable/types.d.ts +208 -0
- package/dist/index.d.ts +2 -2
- package/dist/index.js +646 -113
- package/dist/react/index.d.ts +42 -0
- package/dist/react/index.js +10 -0
- package/dist/react/types.d.ts +333 -0
- package/dist/react/use-agent.d.ts +33 -0
- package/dist/react/use-durable-chat.d.ts +39 -0
- package/dist/sandbox/ripgrep.d.ts +11 -0
- package/dist/tools/task.d.ts +6 -4
- package/dist/utils/debug.d.ts +83 -0
- package/dist/utils/index.d.ts +1 -0
- package/dist/workflow.d.ts +52 -0
- package/dist/workflow.js +1051 -0
- package/package.json +1 -1
- package/dist/tools/web-constants.d.ts +0 -5
package/dist/workflow.js
ADDED
|
@@ -0,0 +1,1051 @@
|
|
|
1
|
+
import { createRequire } from "node:module";
|
|
2
|
+
var __create = Object.create;
|
|
3
|
+
var __getProtoOf = Object.getPrototypeOf;
|
|
4
|
+
var __defProp = Object.defineProperty;
|
|
5
|
+
var __getOwnPropNames = Object.getOwnPropertyNames;
|
|
6
|
+
var __hasOwnProp = Object.prototype.hasOwnProperty;
|
|
7
|
+
var __toESM = (mod, isNodeMode, target) => {
|
|
8
|
+
target = mod != null ? __create(__getProtoOf(mod)) : {};
|
|
9
|
+
const to = isNodeMode || !mod || !mod.__esModule ? __defProp(target, "default", { value: mod, enumerable: true }) : target;
|
|
10
|
+
for (let key of __getOwnPropNames(mod))
|
|
11
|
+
if (!__hasOwnProp.call(to, key))
|
|
12
|
+
__defProp(to, key, {
|
|
13
|
+
get: () => mod[key],
|
|
14
|
+
enumerable: true
|
|
15
|
+
});
|
|
16
|
+
return to;
|
|
17
|
+
};
|
|
18
|
+
var __export = (target, all) => {
|
|
19
|
+
for (var name in all)
|
|
20
|
+
__defProp(target, name, {
|
|
21
|
+
get: all[name],
|
|
22
|
+
enumerable: true,
|
|
23
|
+
configurable: true,
|
|
24
|
+
set: (newValue) => all[name] = () => newValue
|
|
25
|
+
});
|
|
26
|
+
};
|
|
27
|
+
var __esm = (fn, res) => () => (fn && (res = fn(fn = 0)), res);
|
|
28
|
+
var __require = /* @__PURE__ */ createRequire(import.meta.url);
|
|
29
|
+
|
|
30
|
+
// src/tools/read.ts
|
|
31
|
+
var exports_read = {};
|
|
32
|
+
__export(exports_read, {
|
|
33
|
+
readInputSchema: () => readInputSchema,
|
|
34
|
+
createReadTool: () => createReadTool,
|
|
35
|
+
READ_DESCRIPTION: () => READ_DESCRIPTION
|
|
36
|
+
});
|
|
37
|
+
import { tool, zodSchema } from "ai";
|
|
38
|
+
import { z } from "zod";
|
|
39
|
+
function createReadTool(sandbox, config) {
|
|
40
|
+
return tool({
|
|
41
|
+
description: READ_DESCRIPTION,
|
|
42
|
+
inputSchema: zodSchema(readInputSchema),
|
|
43
|
+
strict: config?.strict,
|
|
44
|
+
needsApproval: config?.needsApproval,
|
|
45
|
+
providerOptions: config?.providerOptions,
|
|
46
|
+
execute: async ({
|
|
47
|
+
file_path,
|
|
48
|
+
offset,
|
|
49
|
+
limit
|
|
50
|
+
}) => {
|
|
51
|
+
if (config?.allowedPaths) {
|
|
52
|
+
const isAllowed = config.allowedPaths.some((allowed) => file_path.startsWith(allowed));
|
|
53
|
+
if (!isAllowed) {
|
|
54
|
+
return { error: `Path not allowed: ${file_path}` };
|
|
55
|
+
}
|
|
56
|
+
}
|
|
57
|
+
try {
|
|
58
|
+
const exists = await sandbox.fileExists(file_path);
|
|
59
|
+
if (!exists) {
|
|
60
|
+
return { error: `Path not found: ${file_path}` };
|
|
61
|
+
}
|
|
62
|
+
const isDir = await sandbox.isDirectory(file_path);
|
|
63
|
+
if (isDir) {
|
|
64
|
+
const entries = await sandbox.readDir(file_path);
|
|
65
|
+
return {
|
|
66
|
+
type: "directory",
|
|
67
|
+
entries,
|
|
68
|
+
count: entries.length
|
|
69
|
+
};
|
|
70
|
+
}
|
|
71
|
+
const content = await sandbox.readFile(file_path);
|
|
72
|
+
const nullByteIndex = content.indexOf("\x00");
|
|
73
|
+
if (nullByteIndex !== -1 && nullByteIndex < 1000) {
|
|
74
|
+
const ext = file_path.split(".").pop()?.toLowerCase();
|
|
75
|
+
const binaryExtensions = [
|
|
76
|
+
"pdf",
|
|
77
|
+
"png",
|
|
78
|
+
"jpg",
|
|
79
|
+
"jpeg",
|
|
80
|
+
"gif",
|
|
81
|
+
"zip",
|
|
82
|
+
"tar",
|
|
83
|
+
"gz",
|
|
84
|
+
"exe",
|
|
85
|
+
"bin",
|
|
86
|
+
"so",
|
|
87
|
+
"dylib"
|
|
88
|
+
];
|
|
89
|
+
if (binaryExtensions.includes(ext || "")) {
|
|
90
|
+
return {
|
|
91
|
+
error: `Cannot read binary file: ${file_path} (file exists, ${content.length} bytes). Use appropriate tools to process ${ext?.toUpperCase()} files (e.g., Python scripts for PDFs).`
|
|
92
|
+
};
|
|
93
|
+
}
|
|
94
|
+
}
|
|
95
|
+
const allLines = content.split(`
|
|
96
|
+
`);
|
|
97
|
+
const totalLines = allLines.length;
|
|
98
|
+
const maxLinesWithoutLimit = config?.maxFileSize || 500;
|
|
99
|
+
if (!limit && totalLines > maxLinesWithoutLimit) {
|
|
100
|
+
return {
|
|
101
|
+
error: `File is large (${totalLines} lines). Use 'offset' and 'limit' to read in chunks. Example: offset=1, limit=100 for first 100 lines.`
|
|
102
|
+
};
|
|
103
|
+
}
|
|
104
|
+
const startLine = offset ? offset - 1 : 0;
|
|
105
|
+
const endLine = limit ? startLine + limit : allLines.length;
|
|
106
|
+
const selectedLines = allLines.slice(startLine, endLine);
|
|
107
|
+
const lines = selectedLines.map((line, i) => ({
|
|
108
|
+
line_number: startLine + i + 1,
|
|
109
|
+
content: line
|
|
110
|
+
}));
|
|
111
|
+
return {
|
|
112
|
+
type: "text",
|
|
113
|
+
content: selectedLines.join(`
|
|
114
|
+
`),
|
|
115
|
+
lines,
|
|
116
|
+
total_lines: totalLines
|
|
117
|
+
};
|
|
118
|
+
} catch (error) {
|
|
119
|
+
return {
|
|
120
|
+
error: error instanceof Error ? error.message : "Unknown error"
|
|
121
|
+
};
|
|
122
|
+
}
|
|
123
|
+
}
|
|
124
|
+
});
|
|
125
|
+
}
|
|
126
|
+
var readInputSchema, READ_DESCRIPTION = `Reads a file from the local filesystem. You can access any file directly by using this tool.
|
|
127
|
+
Assume this tool is able to read all files on the machine. If the User provides a path to a file assume that path is valid. It is okay to read a file that does not exist; an error will be returned.
|
|
128
|
+
|
|
129
|
+
Usage:
|
|
130
|
+
- The file_path parameter must be an absolute path, not a relative path
|
|
131
|
+
- By default, it reads up to 500 lines starting from the beginning of the file
|
|
132
|
+
- You can optionally specify a line offset and limit (especially handy for long files)
|
|
133
|
+
- Results are returned with line numbers starting at 1
|
|
134
|
+
- This tool can only read text files, not binary files (images, PDFs, etc.)
|
|
135
|
+
- This tool can only read files, not directories. To read a directory, use an ls command via the Bash tool.
|
|
136
|
+
- It is always better to speculatively read multiple potentially useful files in parallel
|
|
137
|
+
- If you read a file that exists but has empty contents you will receive a warning in place of file contents`;
|
|
138
|
+
var init_read = __esm(() => {
|
|
139
|
+
readInputSchema = z.object({
|
|
140
|
+
file_path: z.string().describe("Absolute path to file or directory"),
|
|
141
|
+
offset: z.number().optional().describe("Line number to start reading from (1-indexed)"),
|
|
142
|
+
limit: z.number().optional().describe("Maximum number of lines to read")
|
|
143
|
+
});
|
|
144
|
+
});
|
|
145
|
+
|
|
146
|
+
// src/tools/write.ts
|
|
147
|
+
var exports_write = {};
|
|
148
|
+
__export(exports_write, {
|
|
149
|
+
writeInputSchema: () => writeInputSchema,
|
|
150
|
+
createWriteTool: () => createWriteTool,
|
|
151
|
+
WRITE_DESCRIPTION: () => WRITE_DESCRIPTION
|
|
152
|
+
});
|
|
153
|
+
import { tool as tool2, zodSchema as zodSchema2 } from "ai";
|
|
154
|
+
import { z as z2 } from "zod";
|
|
155
|
+
function createWriteTool(sandbox, config) {
|
|
156
|
+
return tool2({
|
|
157
|
+
description: WRITE_DESCRIPTION,
|
|
158
|
+
inputSchema: zodSchema2(writeInputSchema),
|
|
159
|
+
strict: config?.strict,
|
|
160
|
+
needsApproval: config?.needsApproval,
|
|
161
|
+
providerOptions: config?.providerOptions,
|
|
162
|
+
execute: async ({
|
|
163
|
+
file_path,
|
|
164
|
+
content
|
|
165
|
+
}) => {
|
|
166
|
+
const byteLength = Buffer.byteLength(content, "utf-8");
|
|
167
|
+
if (config?.maxFileSize && byteLength > config.maxFileSize) {
|
|
168
|
+
return {
|
|
169
|
+
error: `File content exceeds maximum size of ${config.maxFileSize} bytes (got ${byteLength})`
|
|
170
|
+
};
|
|
171
|
+
}
|
|
172
|
+
if (config?.allowedPaths) {
|
|
173
|
+
const isAllowed = config.allowedPaths.some((allowed) => file_path.startsWith(allowed));
|
|
174
|
+
if (!isAllowed) {
|
|
175
|
+
return { error: `Path not allowed: ${file_path}` };
|
|
176
|
+
}
|
|
177
|
+
}
|
|
178
|
+
try {
|
|
179
|
+
await sandbox.writeFile(file_path, content);
|
|
180
|
+
return {
|
|
181
|
+
message: `Successfully wrote to ${file_path}`,
|
|
182
|
+
bytes_written: byteLength,
|
|
183
|
+
file_path
|
|
184
|
+
};
|
|
185
|
+
} catch (error) {
|
|
186
|
+
return {
|
|
187
|
+
error: error instanceof Error ? error.message : "Unknown error"
|
|
188
|
+
};
|
|
189
|
+
}
|
|
190
|
+
}
|
|
191
|
+
});
|
|
192
|
+
}
|
|
193
|
+
var writeInputSchema, WRITE_DESCRIPTION = `Writes content to a file on the filesystem.
|
|
194
|
+
|
|
195
|
+
**Important guidelines:**
|
|
196
|
+
- This tool will overwrite existing files at the provided path
|
|
197
|
+
- If modifying an existing file, you MUST use the Read tool first to read the file's contents
|
|
198
|
+
- ALWAYS prefer editing existing files over creating new ones
|
|
199
|
+
- NEVER proactively create documentation files (*.md) or README files unless explicitly requested
|
|
200
|
+
- The file_path must be an absolute path, not relative
|
|
201
|
+
|
|
202
|
+
**When to use Write vs Edit:**
|
|
203
|
+
- Use Write for creating new files or completely replacing file contents
|
|
204
|
+
- Use Edit for making targeted changes to existing files (preferred for modifications)`;
|
|
205
|
+
var init_write = __esm(() => {
|
|
206
|
+
writeInputSchema = z2.object({
|
|
207
|
+
file_path: z2.string().describe("Path to the file to write"),
|
|
208
|
+
content: z2.string().describe("Content to write to the file")
|
|
209
|
+
});
|
|
210
|
+
});
|
|
211
|
+
|
|
212
|
+
// src/tools/edit.ts
|
|
213
|
+
var exports_edit = {};
|
|
214
|
+
__export(exports_edit, {
|
|
215
|
+
editInputSchema: () => editInputSchema,
|
|
216
|
+
createEditTool: () => createEditTool,
|
|
217
|
+
EDIT_DESCRIPTION: () => EDIT_DESCRIPTION
|
|
218
|
+
});
|
|
219
|
+
import { tool as tool3, zodSchema as zodSchema3 } from "ai";
|
|
220
|
+
import { z as z3 } from "zod";
|
|
221
|
+
function createEditTool(sandbox, config) {
|
|
222
|
+
return tool3({
|
|
223
|
+
description: EDIT_DESCRIPTION,
|
|
224
|
+
inputSchema: zodSchema3(editInputSchema),
|
|
225
|
+
strict: config?.strict,
|
|
226
|
+
needsApproval: config?.needsApproval,
|
|
227
|
+
providerOptions: config?.providerOptions,
|
|
228
|
+
execute: async ({
|
|
229
|
+
file_path,
|
|
230
|
+
old_string,
|
|
231
|
+
new_string,
|
|
232
|
+
replace_all = false
|
|
233
|
+
}) => {
|
|
234
|
+
if (old_string === new_string) {
|
|
235
|
+
return { error: "old_string and new_string must be different" };
|
|
236
|
+
}
|
|
237
|
+
if (config?.allowedPaths) {
|
|
238
|
+
const isAllowed = config.allowedPaths.some((allowed) => file_path.startsWith(allowed));
|
|
239
|
+
if (!isAllowed) {
|
|
240
|
+
return { error: `Path not allowed: ${file_path}` };
|
|
241
|
+
}
|
|
242
|
+
}
|
|
243
|
+
try {
|
|
244
|
+
const exists = await sandbox.fileExists(file_path);
|
|
245
|
+
if (!exists) {
|
|
246
|
+
return { error: `File not found: ${file_path}` };
|
|
247
|
+
}
|
|
248
|
+
const content = await sandbox.readFile(file_path);
|
|
249
|
+
const occurrences = content.split(old_string).length - 1;
|
|
250
|
+
if (occurrences === 0) {
|
|
251
|
+
return { error: `String not found in file: "${old_string}"` };
|
|
252
|
+
}
|
|
253
|
+
if (!replace_all && occurrences > 1) {
|
|
254
|
+
return {
|
|
255
|
+
error: `String appears ${occurrences} times in file. Use replace_all=true to replace all, or provide a more unique string.`
|
|
256
|
+
};
|
|
257
|
+
}
|
|
258
|
+
let newContent;
|
|
259
|
+
let replacements;
|
|
260
|
+
if (replace_all) {
|
|
261
|
+
newContent = content.split(old_string).join(new_string);
|
|
262
|
+
replacements = occurrences;
|
|
263
|
+
} else {
|
|
264
|
+
newContent = content.replace(old_string, new_string);
|
|
265
|
+
replacements = 1;
|
|
266
|
+
}
|
|
267
|
+
await sandbox.writeFile(file_path, newContent);
|
|
268
|
+
return {
|
|
269
|
+
message: `Successfully edited ${file_path}`,
|
|
270
|
+
file_path,
|
|
271
|
+
replacements
|
|
272
|
+
};
|
|
273
|
+
} catch (error) {
|
|
274
|
+
return {
|
|
275
|
+
error: error instanceof Error ? error.message : "Unknown error"
|
|
276
|
+
};
|
|
277
|
+
}
|
|
278
|
+
}
|
|
279
|
+
});
|
|
280
|
+
}
|
|
281
|
+
var editInputSchema, EDIT_DESCRIPTION = `Performs exact string replacements in files.
|
|
282
|
+
|
|
283
|
+
**Important guidelines:**
|
|
284
|
+
- You MUST use the Read tool first before editing any file
|
|
285
|
+
- Preserve exact indentation (tabs/spaces) when replacing text
|
|
286
|
+
- The old_string must be unique in the file, or the edit will fail
|
|
287
|
+
- If old_string appears multiple times, either provide more context to make it unique, or use replace_all=true
|
|
288
|
+
|
|
289
|
+
**Parameters:**
|
|
290
|
+
- old_string: The exact text to find and replace (must match exactly, including whitespace)
|
|
291
|
+
- new_string: The replacement text (must be different from old_string)
|
|
292
|
+
- replace_all: Set to true to replace all occurrences (useful for renaming variables)
|
|
293
|
+
|
|
294
|
+
**When to use:**
|
|
295
|
+
- Making targeted changes to existing files
|
|
296
|
+
- Renaming variables or functions (with replace_all=true)
|
|
297
|
+
- Updating specific sections`;
|
|
298
|
+
var init_edit = __esm(() => {
|
|
299
|
+
editInputSchema = z3.object({
|
|
300
|
+
file_path: z3.string().describe("The absolute path to the file to modify"),
|
|
301
|
+
old_string: z3.string().describe("The text to replace"),
|
|
302
|
+
new_string: z3.string().describe("The text to replace it with (must be different from old_string)"),
|
|
303
|
+
replace_all: z3.boolean().optional().describe("Replace all occurrences of old_string (default false)")
|
|
304
|
+
});
|
|
305
|
+
});
|
|
306
|
+
|
|
307
|
+
// src/tools/bash.ts
|
|
308
|
+
var exports_bash = {};
|
|
309
|
+
__export(exports_bash, {
|
|
310
|
+
createBashTool: () => createBashTool,
|
|
311
|
+
bashInputSchema: () => bashInputSchema,
|
|
312
|
+
BASH_DESCRIPTION: () => BASH_DESCRIPTION
|
|
313
|
+
});
|
|
314
|
+
import { tool as tool4, zodSchema as zodSchema4 } from "ai";
|
|
315
|
+
import { z as z4 } from "zod";
|
|
316
|
+
function createBashTool(sandbox, config) {
|
|
317
|
+
const maxOutputLength = config?.maxOutputLength ?? 30000;
|
|
318
|
+
const defaultTimeout = config?.timeout ?? 120000;
|
|
319
|
+
return tool4({
|
|
320
|
+
description: BASH_DESCRIPTION,
|
|
321
|
+
inputSchema: zodSchema4(bashInputSchema),
|
|
322
|
+
strict: config?.strict,
|
|
323
|
+
needsApproval: config?.needsApproval,
|
|
324
|
+
providerOptions: config?.providerOptions,
|
|
325
|
+
execute: async ({
|
|
326
|
+
command,
|
|
327
|
+
timeout,
|
|
328
|
+
description: _description,
|
|
329
|
+
run_in_background: _run_in_background
|
|
330
|
+
}) => {
|
|
331
|
+
if (config?.blockedCommands) {
|
|
332
|
+
for (const blocked of config.blockedCommands) {
|
|
333
|
+
if (command.includes(blocked)) {
|
|
334
|
+
return {
|
|
335
|
+
error: `Command blocked: contains '${blocked}'`
|
|
336
|
+
};
|
|
337
|
+
}
|
|
338
|
+
}
|
|
339
|
+
}
|
|
340
|
+
try {
|
|
341
|
+
const effectiveTimeout = Math.min(timeout ?? defaultTimeout, 600000);
|
|
342
|
+
const result = await sandbox.exec(command, {
|
|
343
|
+
timeout: effectiveTimeout
|
|
344
|
+
});
|
|
345
|
+
let stdout = result.stdout;
|
|
346
|
+
let stderr = result.stderr;
|
|
347
|
+
if (stdout.length > maxOutputLength) {
|
|
348
|
+
stdout = stdout.slice(0, maxOutputLength) + `
|
|
349
|
+
[output truncated, ${stdout.length - maxOutputLength} chars omitted]`;
|
|
350
|
+
}
|
|
351
|
+
if (stderr.length > maxOutputLength) {
|
|
352
|
+
stderr = stderr.slice(0, maxOutputLength) + `
|
|
353
|
+
[output truncated, ${stderr.length - maxOutputLength} chars omitted]`;
|
|
354
|
+
}
|
|
355
|
+
return {
|
|
356
|
+
stdout,
|
|
357
|
+
stderr,
|
|
358
|
+
exit_code: result.exitCode,
|
|
359
|
+
interrupted: result.interrupted,
|
|
360
|
+
duration_ms: result.durationMs
|
|
361
|
+
};
|
|
362
|
+
} catch (error) {
|
|
363
|
+
return {
|
|
364
|
+
error: error instanceof Error ? error.message : "Unknown error"
|
|
365
|
+
};
|
|
366
|
+
}
|
|
367
|
+
}
|
|
368
|
+
});
|
|
369
|
+
}
|
|
370
|
+
var bashInputSchema, BASH_DESCRIPTION = `Executes a bash command in a persistent shell session with optional timeout.
|
|
371
|
+
|
|
372
|
+
IMPORTANT: For file operations (reading, writing, editing, searching, finding files) - use the specialized tools instead of bash commands.
|
|
373
|
+
|
|
374
|
+
Before executing the command, please follow these steps:
|
|
375
|
+
|
|
376
|
+
1. Directory Verification:
|
|
377
|
+
- If the command will create new directories or files, first use \`ls\` to verify the parent directory exists and is the correct location
|
|
378
|
+
- For example, before running "mkdir foo/bar", first use \`ls foo\` to check that "foo" exists
|
|
379
|
+
|
|
380
|
+
2. Command Execution:
|
|
381
|
+
- Always quote file paths that contain spaces with double quotes (e.g., cd "/path/with spaces")
|
|
382
|
+
- Examples of proper quoting:
|
|
383
|
+
- cd "/Users/name/My Documents" (correct)
|
|
384
|
+
- cd /Users/name/My Documents (incorrect - will fail)
|
|
385
|
+
- After ensuring proper quoting, execute the command
|
|
386
|
+
|
|
387
|
+
Usage notes:
|
|
388
|
+
- The command argument is required
|
|
389
|
+
- You can specify an optional timeout in milliseconds (max 600000ms / 10 minutes). Default is 120000ms (2 minutes).
|
|
390
|
+
- It is very helpful if you write a clear, concise description of what this command does in 5-10 words
|
|
391
|
+
- If the output exceeds 30000 characters, output will be truncated
|
|
392
|
+
- Avoid using \`find\`, \`grep\`, \`cat\`, \`head\`, \`tail\`, \`sed\`, \`awk\`, or \`echo\` commands. Instead, use dedicated tools:
|
|
393
|
+
- File search: Use Glob (NOT find or ls)
|
|
394
|
+
- Content search: Use Grep (NOT grep or rg)
|
|
395
|
+
- Read files: Use Read (NOT cat/head/tail)
|
|
396
|
+
- Edit files: Use Edit (NOT sed/awk)
|
|
397
|
+
- Write files: Use Write (NOT echo >/cat <<EOF)
|
|
398
|
+
- When issuing multiple commands:
|
|
399
|
+
- If commands are independent, make multiple Bash tool calls in parallel
|
|
400
|
+
- If commands depend on each other, use '&&' to chain them (e.g., \`git add . && git commit -m "message"\`)
|
|
401
|
+
- Use ';' only when you need sequential execution but don't care if earlier commands fail
|
|
402
|
+
- DO NOT use newlines to separate commands
|
|
403
|
+
- Try to maintain your current working directory by using absolute paths and avoiding \`cd\``;
|
|
404
|
+
var init_bash = __esm(() => {
|
|
405
|
+
bashInputSchema = z4.object({
|
|
406
|
+
command: z4.string().describe("The command to execute"),
|
|
407
|
+
timeout: z4.number().optional().describe("Optional timeout in milliseconds (max 600000)"),
|
|
408
|
+
description: z4.string().optional().describe("Clear, concise description of what this command does in 5-10 words"),
|
|
409
|
+
run_in_background: z4.boolean().optional().describe("Set to true to run this command in the background")
|
|
410
|
+
});
|
|
411
|
+
});
|
|
412
|
+
|
|
413
|
+
// src/tools/glob.ts
|
|
414
|
+
var exports_glob = {};
|
|
415
|
+
__export(exports_glob, {
|
|
416
|
+
globInputSchema: () => globInputSchema,
|
|
417
|
+
createGlobTool: () => createGlobTool,
|
|
418
|
+
GLOB_DESCRIPTION: () => GLOB_DESCRIPTION
|
|
419
|
+
});
|
|
420
|
+
import { tool as tool5, zodSchema as zodSchema5 } from "ai";
|
|
421
|
+
import { z as z5 } from "zod";
|
|
422
|
+
function createGlobTool(sandbox, config) {
|
|
423
|
+
return tool5({
|
|
424
|
+
description: GLOB_DESCRIPTION,
|
|
425
|
+
inputSchema: zodSchema5(globInputSchema),
|
|
426
|
+
strict: config?.strict,
|
|
427
|
+
needsApproval: config?.needsApproval,
|
|
428
|
+
providerOptions: config?.providerOptions,
|
|
429
|
+
execute: async ({
|
|
430
|
+
pattern,
|
|
431
|
+
path
|
|
432
|
+
}) => {
|
|
433
|
+
const searchPath = path || ".";
|
|
434
|
+
if (config?.allowedPaths) {
|
|
435
|
+
const isAllowed = config.allowedPaths.some((allowed) => searchPath.startsWith(allowed));
|
|
436
|
+
if (!isAllowed) {
|
|
437
|
+
return { error: `Path not allowed: ${searchPath}` };
|
|
438
|
+
}
|
|
439
|
+
}
|
|
440
|
+
try {
|
|
441
|
+
const result = await sandbox.exec(`find ${searchPath} -type f -name "${pattern}" 2>/dev/null | head -1000`, { timeout: config?.timeout });
|
|
442
|
+
if (result.exitCode !== 0 && result.stderr) {
|
|
443
|
+
return { error: result.stderr };
|
|
444
|
+
}
|
|
445
|
+
const matches = result.stdout.split(`
|
|
446
|
+
`).filter(Boolean).map((p) => p.trim());
|
|
447
|
+
return {
|
|
448
|
+
matches,
|
|
449
|
+
count: matches.length,
|
|
450
|
+
search_path: searchPath
|
|
451
|
+
};
|
|
452
|
+
} catch (error) {
|
|
453
|
+
return {
|
|
454
|
+
error: error instanceof Error ? error.message : "Unknown error"
|
|
455
|
+
};
|
|
456
|
+
}
|
|
457
|
+
}
|
|
458
|
+
});
|
|
459
|
+
}
|
|
460
|
+
var globInputSchema, GLOB_DESCRIPTION = `
|
|
461
|
+
- Fast file pattern matching tool that works with any codebase size
|
|
462
|
+
- Supports glob patterns like "**/*.js" or "src/**/*.ts"
|
|
463
|
+
- Returns matching file paths sorted by modification time
|
|
464
|
+
- Use this tool when you need to find files by name patterns
|
|
465
|
+
- When you are doing an open ended search that may require multiple rounds of globbing and grepping, use the Task tool instead
|
|
466
|
+
- It is always better to speculatively perform multiple searches in parallel if they are potentially useful
|
|
467
|
+
`;
|
|
468
|
+
var init_glob = __esm(() => {
|
|
469
|
+
globInputSchema = z5.object({
|
|
470
|
+
pattern: z5.string().describe('Glob pattern to match files (e.g., "**/*.ts", "src/**/*.js", "*.md")'),
|
|
471
|
+
path: z5.string().optional().describe("Directory to search in (defaults to working directory)")
|
|
472
|
+
});
|
|
473
|
+
});
|
|
474
|
+
|
|
475
|
+
// src/tools/grep.ts
|
|
476
|
+
var exports_grep = {};
|
|
477
|
+
__export(exports_grep, {
|
|
478
|
+
grepInputSchema: () => grepInputSchema,
|
|
479
|
+
createGrepTool: () => createGrepTool,
|
|
480
|
+
GREP_DESCRIPTION: () => GREP_DESCRIPTION
|
|
481
|
+
});
|
|
482
|
+
import { tool as tool6, zodSchema as zodSchema6 } from "ai";
|
|
483
|
+
import { z as z6 } from "zod";
|
|
484
|
+
function createGrepTool(sandbox, config) {
|
|
485
|
+
return tool6({
|
|
486
|
+
description: GREP_DESCRIPTION,
|
|
487
|
+
inputSchema: zodSchema6(grepInputSchema),
|
|
488
|
+
strict: config?.strict,
|
|
489
|
+
needsApproval: config?.needsApproval,
|
|
490
|
+
providerOptions: config?.providerOptions,
|
|
491
|
+
execute: async (input) => {
|
|
492
|
+
const {
|
|
493
|
+
pattern,
|
|
494
|
+
path,
|
|
495
|
+
glob,
|
|
496
|
+
type,
|
|
497
|
+
output_mode = "files_with_matches",
|
|
498
|
+
"-i": caseInsensitive,
|
|
499
|
+
"-B": beforeContext,
|
|
500
|
+
"-A": afterContext,
|
|
501
|
+
"-C": context,
|
|
502
|
+
head_limit,
|
|
503
|
+
offset = 0,
|
|
504
|
+
multiline
|
|
505
|
+
} = input;
|
|
506
|
+
const searchPath = path || ".";
|
|
507
|
+
if (config?.allowedPaths) {
|
|
508
|
+
const isAllowed = config.allowedPaths.some((allowed) => searchPath.startsWith(allowed));
|
|
509
|
+
if (!isAllowed) {
|
|
510
|
+
return { error: `Path not allowed: ${searchPath}` };
|
|
511
|
+
}
|
|
512
|
+
}
|
|
513
|
+
try {
|
|
514
|
+
if (!sandbox.rgPath) {
|
|
515
|
+
return {
|
|
516
|
+
error: "Ripgrep not available. Call ensureSandboxTools(sandbox) before using Grep with remote sandboxes."
|
|
517
|
+
};
|
|
518
|
+
}
|
|
519
|
+
const cmd = buildRipgrepCommand({
|
|
520
|
+
rgPath: sandbox.rgPath,
|
|
521
|
+
pattern,
|
|
522
|
+
searchPath,
|
|
523
|
+
output_mode,
|
|
524
|
+
caseInsensitive,
|
|
525
|
+
beforeContext,
|
|
526
|
+
afterContext,
|
|
527
|
+
context,
|
|
528
|
+
glob,
|
|
529
|
+
type,
|
|
530
|
+
multiline
|
|
531
|
+
});
|
|
532
|
+
const result = await sandbox.exec(cmd, { timeout: config?.timeout });
|
|
533
|
+
if (output_mode === "files_with_matches") {
|
|
534
|
+
return parseFilesOutput(result.stdout);
|
|
535
|
+
} else if (output_mode === "count") {
|
|
536
|
+
return parseCountOutput(result.stdout);
|
|
537
|
+
} else {
|
|
538
|
+
return parseContentOutput(result.stdout, head_limit, offset);
|
|
539
|
+
}
|
|
540
|
+
} catch (error) {
|
|
541
|
+
return {
|
|
542
|
+
error: error instanceof Error ? error.message : "Unknown error"
|
|
543
|
+
};
|
|
544
|
+
}
|
|
545
|
+
}
|
|
546
|
+
});
|
|
547
|
+
}
|
|
548
|
+
function buildRipgrepCommand(opts) {
|
|
549
|
+
const flags = ["--json"];
|
|
550
|
+
if (opts.caseInsensitive)
|
|
551
|
+
flags.push("-i");
|
|
552
|
+
if (opts.multiline)
|
|
553
|
+
flags.push("-U", "--multiline-dotall");
|
|
554
|
+
if (opts.output_mode === "content") {
|
|
555
|
+
if (opts.context) {
|
|
556
|
+
flags.push(`-C ${opts.context}`);
|
|
557
|
+
} else {
|
|
558
|
+
if (opts.beforeContext)
|
|
559
|
+
flags.push(`-B ${opts.beforeContext}`);
|
|
560
|
+
if (opts.afterContext)
|
|
561
|
+
flags.push(`-A ${opts.afterContext}`);
|
|
562
|
+
}
|
|
563
|
+
}
|
|
564
|
+
if (opts.glob)
|
|
565
|
+
flags.push(`-g "${opts.glob}"`);
|
|
566
|
+
if (opts.type)
|
|
567
|
+
flags.push(`-t ${opts.type}`);
|
|
568
|
+
const flagStr = flags.join(" ");
|
|
569
|
+
return `${opts.rgPath} ${flagStr} "${opts.pattern}" ${opts.searchPath} 2>/dev/null`;
|
|
570
|
+
}
|
|
571
|
+
function parseFilesOutput(stdout) {
|
|
572
|
+
const files = new Set;
|
|
573
|
+
for (const line of stdout.split(`
|
|
574
|
+
`).filter(Boolean)) {
|
|
575
|
+
try {
|
|
576
|
+
const msg = JSON.parse(line);
|
|
577
|
+
if (msg.type === "begin") {
|
|
578
|
+
const data = msg.data;
|
|
579
|
+
files.add(data.path.text);
|
|
580
|
+
}
|
|
581
|
+
} catch {}
|
|
582
|
+
}
|
|
583
|
+
return {
|
|
584
|
+
files: Array.from(files),
|
|
585
|
+
count: files.size
|
|
586
|
+
};
|
|
587
|
+
}
|
|
588
|
+
function parseCountOutput(stdout) {
|
|
589
|
+
const counts = new Map;
|
|
590
|
+
for (const line of stdout.split(`
|
|
591
|
+
`).filter(Boolean)) {
|
|
592
|
+
try {
|
|
593
|
+
const msg = JSON.parse(line);
|
|
594
|
+
if (msg.type === "end") {
|
|
595
|
+
const data = msg.data;
|
|
596
|
+
counts.set(data.path.text, data.stats.matches);
|
|
597
|
+
}
|
|
598
|
+
} catch {}
|
|
599
|
+
}
|
|
600
|
+
const countsArray = Array.from(counts.entries()).map(([file, count]) => ({
|
|
601
|
+
file,
|
|
602
|
+
count
|
|
603
|
+
}));
|
|
604
|
+
const total = countsArray.reduce((sum, c) => sum + c.count, 0);
|
|
605
|
+
return {
|
|
606
|
+
counts: countsArray,
|
|
607
|
+
total
|
|
608
|
+
};
|
|
609
|
+
}
|
|
610
|
+
function parseContentOutput(stdout, head_limit, offset = 0) {
|
|
611
|
+
const fileData = new Map;
|
|
612
|
+
for (const line of stdout.split(`
|
|
613
|
+
`).filter(Boolean)) {
|
|
614
|
+
try {
|
|
615
|
+
const msg = JSON.parse(line);
|
|
616
|
+
if (msg.type === "begin") {
|
|
617
|
+
const data = msg.data;
|
|
618
|
+
fileData.set(data.path.text, { matches: [], contexts: [] });
|
|
619
|
+
} else if (msg.type === "context") {
|
|
620
|
+
const data = msg.data;
|
|
621
|
+
const fd = fileData.get(data.path.text);
|
|
622
|
+
if (fd) {
|
|
623
|
+
fd.contexts.push({
|
|
624
|
+
line_number: data.line_number,
|
|
625
|
+
text: data.lines.text.replace(/\n$/, "")
|
|
626
|
+
});
|
|
627
|
+
}
|
|
628
|
+
} else if (msg.type === "match") {
|
|
629
|
+
const data = msg.data;
|
|
630
|
+
const fd = fileData.get(data.path.text);
|
|
631
|
+
if (fd) {
|
|
632
|
+
fd.matches.push({
|
|
633
|
+
line_number: data.line_number,
|
|
634
|
+
text: data.lines.text.replace(/\n$/, "")
|
|
635
|
+
});
|
|
636
|
+
}
|
|
637
|
+
}
|
|
638
|
+
} catch {}
|
|
639
|
+
}
|
|
640
|
+
const allMatches = [];
|
|
641
|
+
for (const [file, { matches, contexts }] of fileData) {
|
|
642
|
+
matches.sort((a, b) => a.line_number - b.line_number);
|
|
643
|
+
contexts.sort((a, b) => a.line_number - b.line_number);
|
|
644
|
+
const matchContexts = new Map;
|
|
645
|
+
for (const match of matches) {
|
|
646
|
+
matchContexts.set(match.line_number, { before: [], after: [] });
|
|
647
|
+
}
|
|
648
|
+
for (const ctx of contexts) {
|
|
649
|
+
let bestMatch = null;
|
|
650
|
+
let bestDistance = Infinity;
|
|
651
|
+
let isBefore = false;
|
|
652
|
+
for (const match of matches) {
|
|
653
|
+
const distance = Math.abs(ctx.line_number - match.line_number);
|
|
654
|
+
if (distance < bestDistance) {
|
|
655
|
+
bestDistance = distance;
|
|
656
|
+
bestMatch = match;
|
|
657
|
+
isBefore = ctx.line_number < match.line_number;
|
|
658
|
+
}
|
|
659
|
+
}
|
|
660
|
+
if (bestMatch) {
|
|
661
|
+
const mc = matchContexts.get(bestMatch.line_number);
|
|
662
|
+
if (mc) {
|
|
663
|
+
if (isBefore) {
|
|
664
|
+
mc.before.push(ctx.text);
|
|
665
|
+
} else {
|
|
666
|
+
mc.after.push(ctx.text);
|
|
667
|
+
}
|
|
668
|
+
}
|
|
669
|
+
}
|
|
670
|
+
}
|
|
671
|
+
for (const match of matches) {
|
|
672
|
+
const mc = matchContexts.get(match.line_number);
|
|
673
|
+
allMatches.push({
|
|
674
|
+
file,
|
|
675
|
+
line_number: match.line_number,
|
|
676
|
+
line: match.text,
|
|
677
|
+
before_context: mc?.before ?? [],
|
|
678
|
+
after_context: mc?.after ?? []
|
|
679
|
+
});
|
|
680
|
+
}
|
|
681
|
+
}
|
|
682
|
+
const grepMatches = allMatches.map((m) => ({
|
|
683
|
+
file: m.file,
|
|
684
|
+
line_number: m.line_number,
|
|
685
|
+
line: m.line,
|
|
686
|
+
before_context: m.before_context.length > 0 ? m.before_context : undefined,
|
|
687
|
+
after_context: m.after_context.length > 0 ? m.after_context : undefined
|
|
688
|
+
}));
|
|
689
|
+
let result = grepMatches;
|
|
690
|
+
if (offset > 0) {
|
|
691
|
+
result = result.slice(offset);
|
|
692
|
+
}
|
|
693
|
+
if (head_limit && head_limit > 0) {
|
|
694
|
+
result = result.slice(0, head_limit);
|
|
695
|
+
}
|
|
696
|
+
return {
|
|
697
|
+
matches: result,
|
|
698
|
+
total_matches: result.length
|
|
699
|
+
};
|
|
700
|
+
}
|
|
701
|
+
var grepInputSchema, GREP_DESCRIPTION = `A powerful content search tool built on ripgrep with regex support.
|
|
702
|
+
|
|
703
|
+
**Usage:**
|
|
704
|
+
- ALWAYS use Grep for search tasks. NEVER invoke \`grep\` or \`rg\` as a Bash command.
|
|
705
|
+
- Supports full regex syntax (e.g., "log.*Error", "function\\s+\\w+")
|
|
706
|
+
- Filter files with glob parameter (e.g., "*.js", "**/*.tsx") or type parameter (e.g., "js", "py", "rust")
|
|
707
|
+
|
|
708
|
+
**Output modes:**
|
|
709
|
+
- "content": Shows matching lines with optional context
|
|
710
|
+
- "files_with_matches": Shows only file paths (default)
|
|
711
|
+
- "count": Shows match counts per file
|
|
712
|
+
|
|
713
|
+
**Context options (content mode only):**
|
|
714
|
+
- -B: Lines to show before each match
|
|
715
|
+
- -A: Lines to show after each match
|
|
716
|
+
- -C: Lines to show before and after each match
|
|
717
|
+
|
|
718
|
+
**Pagination:**
|
|
719
|
+
- Use offset to skip results (useful for pagination)
|
|
720
|
+
- Use head_limit to limit total results returned`;
|
|
721
|
+
var init_grep = __esm(() => {
|
|
722
|
+
grepInputSchema = z6.object({
|
|
723
|
+
pattern: z6.string().describe("The regular expression pattern to search for in file contents"),
|
|
724
|
+
path: z6.string().optional().describe("File or directory to search in (defaults to cwd)"),
|
|
725
|
+
glob: z6.string().optional().describe('Glob pattern to filter files (e.g. "*.js", "*.{ts,tsx}")'),
|
|
726
|
+
type: z6.string().optional().describe('File type to search (e.g. "js", "py", "rust")'),
|
|
727
|
+
output_mode: z6.enum(["content", "files_with_matches", "count"]).optional().describe('Output mode: "content" shows matching lines, "files_with_matches" shows file paths (default), "count" shows match counts'),
|
|
728
|
+
"-i": z6.boolean().optional().describe("Case insensitive search"),
|
|
729
|
+
"-n": z6.boolean().optional().describe("Show line numbers in output. Requires output_mode: 'content'. Defaults to true."),
|
|
730
|
+
"-B": z6.number().optional().describe("Number of lines to show before each match. Requires output_mode: 'content'."),
|
|
731
|
+
"-A": z6.number().optional().describe("Number of lines to show after each match. Requires output_mode: 'content'."),
|
|
732
|
+
"-C": z6.number().optional().describe("Number of lines to show before and after each match. Requires output_mode: 'content'."),
|
|
733
|
+
head_limit: z6.number().optional().describe("Limit output to first N lines/entries. Works across all output modes. Defaults to 0 (unlimited)."),
|
|
734
|
+
offset: z6.number().optional().describe("Skip first N lines/entries before applying head_limit. Works across all output modes. Defaults to 0."),
|
|
735
|
+
multiline: z6.boolean().optional().describe("Enable multiline mode where patterns can span lines. Default: false.")
|
|
736
|
+
});
|
|
737
|
+
});
|
|
738
|
+
|
|
739
|
+
// src/sandbox/lazy-singleton.ts
|
|
740
|
+
function createLazySingleton(factory) {
|
|
741
|
+
let promise = null;
|
|
742
|
+
return {
|
|
743
|
+
get: () => {
|
|
744
|
+
if (!promise) {
|
|
745
|
+
promise = factory();
|
|
746
|
+
}
|
|
747
|
+
return promise;
|
|
748
|
+
},
|
|
749
|
+
reset: () => {
|
|
750
|
+
promise = null;
|
|
751
|
+
}
|
|
752
|
+
};
|
|
753
|
+
}
|
|
754
|
+
|
|
755
|
+
// src/sandbox/ripgrep.ts
|
|
756
|
+
async function getBundledRgPath() {
|
|
757
|
+
try {
|
|
758
|
+
const { rgPath } = await import("@vscode/ripgrep");
|
|
759
|
+
return rgPath;
|
|
760
|
+
} catch {
|
|
761
|
+
return;
|
|
762
|
+
}
|
|
763
|
+
}
|
|
764
|
+
function getBundledRgPathSync() {
|
|
765
|
+
try {
|
|
766
|
+
const { rgPath } = __require("@vscode/ripgrep");
|
|
767
|
+
return rgPath;
|
|
768
|
+
} catch {
|
|
769
|
+
return;
|
|
770
|
+
}
|
|
771
|
+
}
|
|
772
|
+
|
|
773
|
+
// src/sandbox/ensure-tools.ts
|
|
774
|
+
async function ensureSandboxTools(sandbox) {
|
|
775
|
+
const bundledRgPath = await getBundledRgPath();
|
|
776
|
+
if (bundledRgPath) {
|
|
777
|
+
const bundledCheck = await sandbox.exec(`test -x "${bundledRgPath}" && echo found`);
|
|
778
|
+
if (bundledCheck.stdout.includes("found")) {
|
|
779
|
+
sandbox.rgPath = bundledRgPath;
|
|
780
|
+
return;
|
|
781
|
+
}
|
|
782
|
+
}
|
|
783
|
+
const tmpCheck = await sandbox.exec("test -x /tmp/rg && echo found");
|
|
784
|
+
if (tmpCheck.stdout.includes("found")) {
|
|
785
|
+
sandbox.rgPath = "/tmp/rg";
|
|
786
|
+
return;
|
|
787
|
+
}
|
|
788
|
+
const systemCheck = await sandbox.exec("which rg 2>/dev/null");
|
|
789
|
+
if (systemCheck.exitCode === 0 && systemCheck.stdout.trim()) {
|
|
790
|
+
sandbox.rgPath = systemCheck.stdout.trim();
|
|
791
|
+
return;
|
|
792
|
+
}
|
|
793
|
+
const archResult = await sandbox.exec("uname -m");
|
|
794
|
+
const arch = archResult.stdout.trim();
|
|
795
|
+
const ripgrepArch = ARCH_MAP[arch];
|
|
796
|
+
if (!ripgrepArch) {
|
|
797
|
+
throw new Error(`Unsupported architecture: ${arch}. Supported: ${Object.keys(ARCH_MAP).join(", ")}`);
|
|
798
|
+
}
|
|
799
|
+
const ripgrepUrl = `https://github.com/BurntSushi/ripgrep/releases/download/${RIPGREP_VERSION}/ripgrep-${RIPGREP_VERSION}-${ripgrepArch}.tar.gz`;
|
|
800
|
+
const tarPath = `ripgrep-${RIPGREP_VERSION}-${ripgrepArch}/rg`;
|
|
801
|
+
const installResult = await sandbox.exec(`
|
|
802
|
+
curl -sL "${ripgrepUrl}" |
|
|
803
|
+
tar xzf - -C /tmp --strip-components=1 ${tarPath} &&
|
|
804
|
+
chmod +x /tmp/rg
|
|
805
|
+
`);
|
|
806
|
+
if (installResult.exitCode !== 0) {
|
|
807
|
+
throw new Error(`Failed to install ripgrep: ${installResult.stderr}`);
|
|
808
|
+
}
|
|
809
|
+
sandbox.rgPath = "/tmp/rg";
|
|
810
|
+
}
|
|
811
|
+
var RIPGREP_VERSION = "14.1.0", ARCH_MAP;
|
|
812
|
+
var init_ensure_tools = __esm(() => {
|
|
813
|
+
ARCH_MAP = {
|
|
814
|
+
x86_64: "x86_64-unknown-linux-musl",
|
|
815
|
+
aarch64: "aarch64-unknown-linux-gnu",
|
|
816
|
+
arm64: "aarch64-unknown-linux-gnu"
|
|
817
|
+
};
|
|
818
|
+
});
|
|
819
|
+
|
|
820
|
+
// src/sandbox/e2b.ts
|
|
821
|
+
var exports_e2b = {};
|
|
822
|
+
__export(exports_e2b, {
|
|
823
|
+
createE2BSandbox: () => createE2BSandbox
|
|
824
|
+
});
|
|
825
|
+
async function createE2BSandbox(config = {}) {
|
|
826
|
+
let sandboxId = config.sandboxId;
|
|
827
|
+
const workingDirectory = config.cwd || "/home/user";
|
|
828
|
+
const timeout = config.timeout ?? 300000;
|
|
829
|
+
const sandbox = createLazySingleton(async () => {
|
|
830
|
+
let E2BSandboxSDK;
|
|
831
|
+
try {
|
|
832
|
+
const module = await import("@e2b/code-interpreter");
|
|
833
|
+
E2BSandboxSDK = module.Sandbox;
|
|
834
|
+
} catch {
|
|
835
|
+
throw new Error("E2BSandbox requires @e2b/code-interpreter. Install with: npm install @e2b/code-interpreter");
|
|
836
|
+
}
|
|
837
|
+
let sbx;
|
|
838
|
+
if (config.sandboxId) {
|
|
839
|
+
sbx = await E2BSandboxSDK.connect(config.sandboxId);
|
|
840
|
+
} else {
|
|
841
|
+
sbx = await E2BSandboxSDK.create({
|
|
842
|
+
apiKey: config.apiKey,
|
|
843
|
+
timeoutMs: timeout,
|
|
844
|
+
metadata: config.metadata
|
|
845
|
+
});
|
|
846
|
+
sandboxId = sbx.sandboxId;
|
|
847
|
+
}
|
|
848
|
+
return sbx;
|
|
849
|
+
});
|
|
850
|
+
const exec = async (command, options) => {
|
|
851
|
+
const sbx = await sandbox.get();
|
|
852
|
+
const startTime = performance.now();
|
|
853
|
+
try {
|
|
854
|
+
const result = await sbx.commands.run(command, {
|
|
855
|
+
cwd: options?.cwd || workingDirectory,
|
|
856
|
+
timeoutMs: options?.timeout
|
|
857
|
+
});
|
|
858
|
+
const durationMs = Math.round(performance.now() - startTime);
|
|
859
|
+
return {
|
|
860
|
+
stdout: result.stdout,
|
|
861
|
+
stderr: result.stderr,
|
|
862
|
+
exitCode: result.exitCode,
|
|
863
|
+
durationMs,
|
|
864
|
+
interrupted: false
|
|
865
|
+
};
|
|
866
|
+
} catch (error) {
|
|
867
|
+
const durationMs = Math.round(performance.now() - startTime);
|
|
868
|
+
if (error instanceof Error && error.message.toLowerCase().includes("timeout")) {
|
|
869
|
+
return {
|
|
870
|
+
stdout: "",
|
|
871
|
+
stderr: "Command timed out",
|
|
872
|
+
exitCode: 124,
|
|
873
|
+
durationMs,
|
|
874
|
+
interrupted: true
|
|
875
|
+
};
|
|
876
|
+
}
|
|
877
|
+
if (error instanceof Error) {
|
|
878
|
+
const exitMatch = error.message.match(/exit status (\d+)/i);
|
|
879
|
+
const exitCode = exitMatch ? parseInt(exitMatch[1], 10) : 1;
|
|
880
|
+
return {
|
|
881
|
+
stdout: "",
|
|
882
|
+
stderr: error.message,
|
|
883
|
+
exitCode,
|
|
884
|
+
durationMs,
|
|
885
|
+
interrupted: false
|
|
886
|
+
};
|
|
887
|
+
}
|
|
888
|
+
throw error;
|
|
889
|
+
}
|
|
890
|
+
};
|
|
891
|
+
let rgPath;
|
|
892
|
+
const sandboxObj = {
|
|
893
|
+
exec,
|
|
894
|
+
get id() {
|
|
895
|
+
return sandboxId;
|
|
896
|
+
},
|
|
897
|
+
get rgPath() {
|
|
898
|
+
return rgPath;
|
|
899
|
+
},
|
|
900
|
+
set rgPath(path) {
|
|
901
|
+
rgPath = path;
|
|
902
|
+
},
|
|
903
|
+
async readFile(path) {
|
|
904
|
+
const result = await exec(`cat "${path}"`);
|
|
905
|
+
if (result.exitCode !== 0) {
|
|
906
|
+
throw new Error(`Failed to read file: ${result.stderr}`);
|
|
907
|
+
}
|
|
908
|
+
return result.stdout;
|
|
909
|
+
},
|
|
910
|
+
async writeFile(path, content) {
|
|
911
|
+
const sbx = await sandbox.get();
|
|
912
|
+
await sbx.files.write(path, content);
|
|
913
|
+
},
|
|
914
|
+
async readDir(path) {
|
|
915
|
+
const result = await exec(`ls -1 "${path}"`);
|
|
916
|
+
if (result.exitCode !== 0) {
|
|
917
|
+
throw new Error(`Failed to read directory: ${result.stderr}`);
|
|
918
|
+
}
|
|
919
|
+
return result.stdout.split(`
|
|
920
|
+
`).filter(Boolean);
|
|
921
|
+
},
|
|
922
|
+
async fileExists(path) {
|
|
923
|
+
const result = await exec(`test -e "${path}"`);
|
|
924
|
+
return result.exitCode === 0;
|
|
925
|
+
},
|
|
926
|
+
async isDirectory(path) {
|
|
927
|
+
const result = await exec(`test -d "${path}"`);
|
|
928
|
+
return result.exitCode === 0;
|
|
929
|
+
},
|
|
930
|
+
async destroy() {
|
|
931
|
+
try {
|
|
932
|
+
const sbx = await sandbox.get();
|
|
933
|
+
await sbx.kill();
|
|
934
|
+
} catch {}
|
|
935
|
+
sandbox.reset();
|
|
936
|
+
}
|
|
937
|
+
};
|
|
938
|
+
if (config.ensureTools !== false) {
|
|
939
|
+
await ensureSandboxTools(sandboxObj);
|
|
940
|
+
}
|
|
941
|
+
return sandboxObj;
|
|
942
|
+
}
|
|
943
|
+
var init_e2b = __esm(() => {
|
|
944
|
+
init_ensure_tools();
|
|
945
|
+
});
|
|
946
|
+
|
|
947
|
+
// src/workflow.ts
|
|
948
|
+
init_read();
|
|
949
|
+
init_write();
|
|
950
|
+
init_edit();
|
|
951
|
+
init_bash();
|
|
952
|
+
init_glob();
|
|
953
|
+
init_grep();
|
|
954
|
+
import { tool as tool7, zodSchema as zodSchema7 } from "ai";
|
|
955
|
+
function createDurableAgentTools(sandboxId, config) {
|
|
956
|
+
const toolsConfig = config?.tools || {};
|
|
957
|
+
const _sandboxId = sandboxId;
|
|
958
|
+
const _apiKey = config?.apiKey;
|
|
959
|
+
const _readConfig = toolsConfig.Read;
|
|
960
|
+
const _bashConfig = toolsConfig.Bash;
|
|
961
|
+
const tools = {
|
|
962
|
+
Read: tool7({
|
|
963
|
+
description: READ_DESCRIPTION,
|
|
964
|
+
inputSchema: zodSchema7(readInputSchema),
|
|
965
|
+
execute: async function readFile(params, options) {
|
|
966
|
+
"use step";
|
|
967
|
+
const { createReadTool: createReadTool2 } = await Promise.resolve().then(() => (init_read(), exports_read));
|
|
968
|
+
const { createE2BSandbox: createE2BSandbox2 } = await Promise.resolve().then(() => (init_e2b(), exports_e2b));
|
|
969
|
+
const sandbox = await createE2BSandbox2({ sandboxId: _sandboxId, apiKey: _apiKey, ensureTools: false });
|
|
970
|
+
const readTool = createReadTool2(sandbox, _readConfig);
|
|
971
|
+
if (!readTool.execute)
|
|
972
|
+
throw new Error("Read tool missing execute");
|
|
973
|
+
return readTool.execute(params, options);
|
|
974
|
+
}
|
|
975
|
+
}),
|
|
976
|
+
Write: tool7({
|
|
977
|
+
description: WRITE_DESCRIPTION,
|
|
978
|
+
inputSchema: zodSchema7(writeInputSchema),
|
|
979
|
+
execute: async function writeFile(params, options) {
|
|
980
|
+
"use step";
|
|
981
|
+
const { createWriteTool: createWriteTool2 } = await Promise.resolve().then(() => (init_write(), exports_write));
|
|
982
|
+
const { createE2BSandbox: createE2BSandbox2 } = await Promise.resolve().then(() => (init_e2b(), exports_e2b));
|
|
983
|
+
const sandbox = await createE2BSandbox2({ sandboxId: _sandboxId, apiKey: _apiKey, ensureTools: false });
|
|
984
|
+
const writeTool = createWriteTool2(sandbox);
|
|
985
|
+
if (!writeTool.execute)
|
|
986
|
+
throw new Error("Write tool missing execute");
|
|
987
|
+
return writeTool.execute(params, options);
|
|
988
|
+
}
|
|
989
|
+
}),
|
|
990
|
+
Edit: tool7({
|
|
991
|
+
description: EDIT_DESCRIPTION,
|
|
992
|
+
inputSchema: zodSchema7(editInputSchema),
|
|
993
|
+
execute: async function editFile(params, options) {
|
|
994
|
+
"use step";
|
|
995
|
+
const { createEditTool: createEditTool2 } = await Promise.resolve().then(() => (init_edit(), exports_edit));
|
|
996
|
+
const { createE2BSandbox: createE2BSandbox2 } = await Promise.resolve().then(() => (init_e2b(), exports_e2b));
|
|
997
|
+
const sandbox = await createE2BSandbox2({ sandboxId: _sandboxId, apiKey: _apiKey, ensureTools: false });
|
|
998
|
+
const editTool = createEditTool2(sandbox);
|
|
999
|
+
if (!editTool.execute)
|
|
1000
|
+
throw new Error("Edit tool missing execute");
|
|
1001
|
+
return editTool.execute(params, options);
|
|
1002
|
+
}
|
|
1003
|
+
}),
|
|
1004
|
+
Bash: tool7({
|
|
1005
|
+
description: BASH_DESCRIPTION,
|
|
1006
|
+
inputSchema: zodSchema7(bashInputSchema),
|
|
1007
|
+
execute: async function runBash(params, options) {
|
|
1008
|
+
"use step";
|
|
1009
|
+
const { createBashTool: createBashTool2 } = await Promise.resolve().then(() => (init_bash(), exports_bash));
|
|
1010
|
+
const { createE2BSandbox: createE2BSandbox2 } = await Promise.resolve().then(() => (init_e2b(), exports_e2b));
|
|
1011
|
+
const sandbox = await createE2BSandbox2({ sandboxId: _sandboxId, apiKey: _apiKey, ensureTools: false });
|
|
1012
|
+
const bashTool = createBashTool2(sandbox, _bashConfig);
|
|
1013
|
+
if (!bashTool.execute)
|
|
1014
|
+
throw new Error("Bash tool missing execute");
|
|
1015
|
+
return bashTool.execute(params, options);
|
|
1016
|
+
}
|
|
1017
|
+
}),
|
|
1018
|
+
Glob: tool7({
|
|
1019
|
+
description: GLOB_DESCRIPTION,
|
|
1020
|
+
inputSchema: zodSchema7(globInputSchema),
|
|
1021
|
+
execute: async function globFiles(params, options) {
|
|
1022
|
+
"use step";
|
|
1023
|
+
const { createGlobTool: createGlobTool2 } = await Promise.resolve().then(() => (init_glob(), exports_glob));
|
|
1024
|
+
const { createE2BSandbox: createE2BSandbox2 } = await Promise.resolve().then(() => (init_e2b(), exports_e2b));
|
|
1025
|
+
const sandbox = await createE2BSandbox2({ sandboxId: _sandboxId, apiKey: _apiKey, ensureTools: false });
|
|
1026
|
+
const globTool = createGlobTool2(sandbox);
|
|
1027
|
+
if (!globTool.execute)
|
|
1028
|
+
throw new Error("Glob tool missing execute");
|
|
1029
|
+
return globTool.execute(params, options);
|
|
1030
|
+
}
|
|
1031
|
+
}),
|
|
1032
|
+
Grep: tool7({
|
|
1033
|
+
description: GREP_DESCRIPTION,
|
|
1034
|
+
inputSchema: zodSchema7(grepInputSchema),
|
|
1035
|
+
execute: async function grepSearch(params, options) {
|
|
1036
|
+
"use step";
|
|
1037
|
+
const { createGrepTool: createGrepTool2 } = await Promise.resolve().then(() => (init_grep(), exports_grep));
|
|
1038
|
+
const { createE2BSandbox: createE2BSandbox2 } = await Promise.resolve().then(() => (init_e2b(), exports_e2b));
|
|
1039
|
+
const sandbox = await createE2BSandbox2({ sandboxId: _sandboxId, apiKey: _apiKey, ensureTools: false });
|
|
1040
|
+
const grepTool = createGrepTool2(sandbox);
|
|
1041
|
+
if (!grepTool.execute)
|
|
1042
|
+
throw new Error("Grep tool missing execute");
|
|
1043
|
+
return grepTool.execute(params, options);
|
|
1044
|
+
}
|
|
1045
|
+
})
|
|
1046
|
+
};
|
|
1047
|
+
return { tools };
|
|
1048
|
+
}
|
|
1049
|
+
export {
|
|
1050
|
+
createDurableAgentTools
|
|
1051
|
+
};
|