@heysalad/cheri-cli 0.10.0 → 1.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +95 -0
- package/package.json +12 -3
- package/src/commands/agent.js +427 -119
- package/src/commands/login.js +30 -10
- package/src/commands/memory.js +2 -2
- package/src/commands/status.js +1 -1
- package/src/commands/usage.js +13 -9
- package/src/commands/workspace.js +1 -1
- package/src/lib/approval.js +120 -0
- package/src/lib/command-safety.js +171 -0
- package/src/lib/config-store.js +140 -45
- package/src/lib/context.js +103 -37
- package/src/lib/diff-tracker.js +156 -0
- package/src/lib/logger.js +31 -12
- package/src/lib/markdown.js +80 -0
- package/src/lib/mcp/client.js +258 -0
- package/src/lib/multi-agent.js +153 -0
- package/src/lib/providers/index.js +290 -0
- package/src/lib/sandbox.js +164 -0
- package/src/lib/sessions/index.js +1 -2
- package/src/lib/tools/file-tools.js +56 -23
- package/src/lib/tools/search-tools.js +1 -1
- package/src/lib/ui.js +554 -0
- package/src/repl.js +5 -0
|
@@ -0,0 +1,164 @@
|
|
|
1
|
+
// Sandboxing for command execution
|
|
2
|
+
// Linux: uses unshare + seccomp-like restrictions via spawn options
|
|
3
|
+
// Fallback: restricted environment variables and timeout enforcement
|
|
4
|
+
|
|
5
|
+
import { spawn } from "child_process";
|
|
6
|
+
import { platform } from "os";
|
|
7
|
+
|
|
8
|
+
const IS_LINUX = platform() === "linux";
|
|
9
|
+
|
|
10
|
+
// Sandbox policy levels
|
|
11
|
+
export const SandboxLevel = {
|
|
12
|
+
NONE: "none", // No sandbox (legacy behavior)
|
|
13
|
+
BASIC: "basic", // Timeout + restricted env + no network
|
|
14
|
+
STRICT: "strict", // BASIC + filesystem restrictions via unshare
|
|
15
|
+
};
|
|
16
|
+
|
|
17
|
+
// Check if unshare is available (Linux namespace isolation)
|
|
18
|
+
let _unshareAvailable = null;
|
|
19
|
+
async function hasUnshare() {
|
|
20
|
+
if (_unshareAvailable !== null) return _unshareAvailable;
|
|
21
|
+
try {
|
|
22
|
+
const { execSync } = await import("child_process");
|
|
23
|
+
execSync("which unshare", { stdio: "pipe" });
|
|
24
|
+
_unshareAvailable = true;
|
|
25
|
+
} catch {
|
|
26
|
+
_unshareAvailable = false;
|
|
27
|
+
}
|
|
28
|
+
return _unshareAvailable;
|
|
29
|
+
}
|
|
30
|
+
|
|
31
|
+
// Restricted environment: strip sensitive vars, disable network hints
|
|
32
|
+
function buildSandboxEnv(allowNetwork = false) {
|
|
33
|
+
const env = { ...process.env };
|
|
34
|
+
|
|
35
|
+
// Remove sensitive environment variables
|
|
36
|
+
const sensitiveVars = [
|
|
37
|
+
"AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY", "AWS_SESSION_TOKEN",
|
|
38
|
+
"AWS_BEARER_TOKEN_BEDROCK", "GITHUB_TOKEN", "GH_TOKEN",
|
|
39
|
+
"OPENAI_API_KEY", "ANTHROPIC_API_KEY", "STRIPE_SECRET_KEY",
|
|
40
|
+
"DATABASE_URL", "MONGO_URI", "REDIS_URL",
|
|
41
|
+
"SSH_AUTH_SOCK", "GPG_AGENT_INFO",
|
|
42
|
+
"npm_config_//registry.npmjs.org/:_authToken",
|
|
43
|
+
];
|
|
44
|
+
for (const v of sensitiveVars) delete env[v];
|
|
45
|
+
|
|
46
|
+
// Signal to child processes that they're sandboxed
|
|
47
|
+
env.CHERI_SANDBOXED = "1";
|
|
48
|
+
env.CHERI_SANDBOX_LEVEL = allowNetwork ? "basic" : "strict";
|
|
49
|
+
|
|
50
|
+
return env;
|
|
51
|
+
}
|
|
52
|
+
|
|
53
|
+
/**
|
|
54
|
+
* Execute a command inside a sandbox.
|
|
55
|
+
* Returns { stdout, stderr, exitCode, timedOut }
|
|
56
|
+
*/
|
|
57
|
+
export function sandboxExec(command, options = {}) {
|
|
58
|
+
const {
|
|
59
|
+
cwd = process.cwd(),
|
|
60
|
+
timeout = 120000,
|
|
61
|
+
maxBuffer = 10 * 1024 * 1024,
|
|
62
|
+
level = SandboxLevel.BASIC,
|
|
63
|
+
allowNetwork = false,
|
|
64
|
+
allowWrite = [], // additional writable paths beyond cwd
|
|
65
|
+
} = options;
|
|
66
|
+
|
|
67
|
+
return new Promise(async (resolve) => {
|
|
68
|
+
const env = level === SandboxLevel.NONE ? process.env : buildSandboxEnv(allowNetwork);
|
|
69
|
+
let cmd, args;
|
|
70
|
+
|
|
71
|
+
if (level === SandboxLevel.STRICT && IS_LINUX && await hasUnshare()) {
|
|
72
|
+
// Use unshare for network namespace isolation (no network access)
|
|
73
|
+
// --net creates a new network namespace with no interfaces
|
|
74
|
+
if (!allowNetwork) {
|
|
75
|
+
cmd = "unshare";
|
|
76
|
+
args = ["--net", "--map-root-user", "sh", "-c", command];
|
|
77
|
+
} else {
|
|
78
|
+
cmd = "sh";
|
|
79
|
+
args = ["-c", command];
|
|
80
|
+
}
|
|
81
|
+
} else {
|
|
82
|
+
cmd = "sh";
|
|
83
|
+
args = ["-c", command];
|
|
84
|
+
}
|
|
85
|
+
|
|
86
|
+
let stdout = "";
|
|
87
|
+
let stderr = "";
|
|
88
|
+
let timedOut = false;
|
|
89
|
+
let killed = false;
|
|
90
|
+
|
|
91
|
+
const proc = spawn(cmd, args, {
|
|
92
|
+
cwd,
|
|
93
|
+
env,
|
|
94
|
+
stdio: ["pipe", "pipe", "pipe"],
|
|
95
|
+
// Kill the entire process group on timeout
|
|
96
|
+
detached: IS_LINUX,
|
|
97
|
+
});
|
|
98
|
+
|
|
99
|
+
const timer = setTimeout(() => {
|
|
100
|
+
timedOut = true;
|
|
101
|
+
killed = true;
|
|
102
|
+
try {
|
|
103
|
+
if (IS_LINUX && proc.pid) {
|
|
104
|
+
process.kill(-proc.pid, "SIGKILL");
|
|
105
|
+
} else {
|
|
106
|
+
proc.kill("SIGKILL");
|
|
107
|
+
}
|
|
108
|
+
} catch {}
|
|
109
|
+
}, timeout);
|
|
110
|
+
|
|
111
|
+
proc.stdout.on("data", (data) => {
|
|
112
|
+
stdout += data.toString();
|
|
113
|
+
if (stdout.length > maxBuffer) {
|
|
114
|
+
stdout = stdout.slice(0, maxBuffer) + "\n...(truncated)";
|
|
115
|
+
killed = true;
|
|
116
|
+
try { proc.kill("SIGKILL"); } catch {}
|
|
117
|
+
}
|
|
118
|
+
});
|
|
119
|
+
|
|
120
|
+
proc.stderr.on("data", (data) => {
|
|
121
|
+
stderr += data.toString();
|
|
122
|
+
if (stderr.length > maxBuffer) {
|
|
123
|
+
stderr = stderr.slice(0, maxBuffer) + "\n...(truncated)";
|
|
124
|
+
}
|
|
125
|
+
});
|
|
126
|
+
|
|
127
|
+
proc.on("close", (code) => {
|
|
128
|
+
clearTimeout(timer);
|
|
129
|
+
resolve({
|
|
130
|
+
stdout: stdout.trim(),
|
|
131
|
+
stderr: stderr.trim(),
|
|
132
|
+
exitCode: code ?? 1,
|
|
133
|
+
timedOut,
|
|
134
|
+
});
|
|
135
|
+
});
|
|
136
|
+
|
|
137
|
+
proc.on("error", (err) => {
|
|
138
|
+
clearTimeout(timer);
|
|
139
|
+
resolve({
|
|
140
|
+
stdout: "",
|
|
141
|
+
stderr: err.message,
|
|
142
|
+
exitCode: 1,
|
|
143
|
+
timedOut: false,
|
|
144
|
+
});
|
|
145
|
+
});
|
|
146
|
+
|
|
147
|
+
// Close stdin immediately
|
|
148
|
+
proc.stdin.end();
|
|
149
|
+
});
|
|
150
|
+
}
|
|
151
|
+
|
|
152
|
+
/**
|
|
153
|
+
* Get sandbox info for display
|
|
154
|
+
*/
|
|
155
|
+
export function getSandboxInfo(level) {
|
|
156
|
+
switch (level) {
|
|
157
|
+
case SandboxLevel.STRICT:
|
|
158
|
+
return IS_LINUX ? "strict (network isolated)" : "basic (strict requires Linux)";
|
|
159
|
+
case SandboxLevel.BASIC:
|
|
160
|
+
return "basic (env sanitized, timeout enforced)";
|
|
161
|
+
default:
|
|
162
|
+
return "none";
|
|
163
|
+
}
|
|
164
|
+
}
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
import { existsSync, mkdirSync, writeFileSync, readFileSync, readdirSync } from "fs";
|
|
1
|
+
import { existsSync, mkdirSync, writeFileSync, readFileSync, readdirSync, unlinkSync } from "fs";
|
|
2
2
|
import { join } from "path";
|
|
3
3
|
import { homedir } from "os";
|
|
4
4
|
|
|
@@ -48,7 +48,6 @@ export function listSessions() {
|
|
|
48
48
|
export function deleteSession(sessionId) {
|
|
49
49
|
const filePath = join(SESSIONS_DIR, `${sessionId}.json`);
|
|
50
50
|
if (existsSync(filePath)) {
|
|
51
|
-
const { unlinkSync } = require("fs");
|
|
52
51
|
unlinkSync(filePath);
|
|
53
52
|
return true;
|
|
54
53
|
}
|
|
@@ -1,6 +1,8 @@
|
|
|
1
|
-
import { readFileSync, writeFileSync, mkdirSync, existsSync } from "fs";
|
|
1
|
+
import { readFileSync, writeFileSync, mkdirSync, existsSync, statSync } from "fs";
|
|
2
2
|
import { dirname, resolve } from "path";
|
|
3
3
|
|
|
4
|
+
const MAX_FILE_SIZE = 5 * 1024 * 1024; // 5MB limit
|
|
5
|
+
|
|
4
6
|
export const readFile = {
|
|
5
7
|
name: "read_file",
|
|
6
8
|
description: "Read the contents of a file at the given path. Returns the file contents as a string.",
|
|
@@ -12,12 +14,32 @@ export const readFile = {
|
|
|
12
14
|
required: ["path"],
|
|
13
15
|
},
|
|
14
16
|
handler: async ({ path }) => {
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
17
|
+
try {
|
|
18
|
+
const resolved = resolve(path);
|
|
19
|
+
if (!existsSync(resolved)) {
|
|
20
|
+
return { error: `File not found: ${resolved}` };
|
|
21
|
+
}
|
|
22
|
+
const stat = statSync(resolved);
|
|
23
|
+
if (stat.isDirectory()) {
|
|
24
|
+
return { error: `Path is a directory, not a file: ${resolved}` };
|
|
25
|
+
}
|
|
26
|
+
if (stat.size > MAX_FILE_SIZE) {
|
|
27
|
+
return { error: `File too large (${(stat.size / 1024 / 1024).toFixed(1)}MB). Max: 5MB` };
|
|
28
|
+
}
|
|
29
|
+
// Detect binary files by checking for null bytes in first 8KB
|
|
30
|
+
const buf = Buffer.alloc(Math.min(8192, stat.size));
|
|
31
|
+
const fd = await import("fs").then(fs => fs.openSync(resolved, "r"));
|
|
32
|
+
const { readSync, closeSync } = await import("fs");
|
|
33
|
+
readSync(fd, buf, 0, buf.length, 0);
|
|
34
|
+
closeSync(fd);
|
|
35
|
+
if (buf.includes(0)) {
|
|
36
|
+
return { error: `Binary file detected: ${resolved}. Cannot read binary files as text.` };
|
|
37
|
+
}
|
|
38
|
+
const content = readFileSync(resolved, "utf-8");
|
|
39
|
+
return { path: resolved, content, lines: content.split("\n").length };
|
|
40
|
+
} catch (err) {
|
|
41
|
+
return { error: `Failed to read file: ${err.message}` };
|
|
18
42
|
}
|
|
19
|
-
const content = readFileSync(resolved, "utf-8");
|
|
20
|
-
return { path: resolved, content, lines: content.split("\n").length };
|
|
21
43
|
},
|
|
22
44
|
};
|
|
23
45
|
|
|
@@ -33,13 +55,17 @@ export const writeFile = {
|
|
|
33
55
|
required: ["path", "content"],
|
|
34
56
|
},
|
|
35
57
|
handler: async ({ path, content }) => {
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
58
|
+
try {
|
|
59
|
+
const resolved = resolve(path);
|
|
60
|
+
const dir = dirname(resolved);
|
|
61
|
+
if (!existsSync(dir)) {
|
|
62
|
+
mkdirSync(dir, { recursive: true });
|
|
63
|
+
}
|
|
64
|
+
writeFileSync(resolved, content, "utf-8");
|
|
65
|
+
return { path: resolved, bytesWritten: Buffer.byteLength(content, "utf-8") };
|
|
66
|
+
} catch (err) {
|
|
67
|
+
return { error: `Failed to write file: ${err.message}` };
|
|
40
68
|
}
|
|
41
|
-
writeFileSync(resolved, content, "utf-8");
|
|
42
|
-
return { path: resolved, bytesWritten: Buffer.byteLength(content, "utf-8") };
|
|
43
69
|
},
|
|
44
70
|
};
|
|
45
71
|
|
|
@@ -56,17 +82,24 @@ export const editFile = {
|
|
|
56
82
|
required: ["path", "old_string", "new_string"],
|
|
57
83
|
},
|
|
58
84
|
handler: async ({ path, old_string, new_string }) => {
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
85
|
+
try {
|
|
86
|
+
const resolved = resolve(path);
|
|
87
|
+
if (!existsSync(resolved)) {
|
|
88
|
+
return { error: `File not found: ${resolved}` };
|
|
89
|
+
}
|
|
90
|
+
const content = readFileSync(resolved, "utf-8");
|
|
91
|
+
if (!content.includes(old_string)) {
|
|
92
|
+
return { error: "old_string not found in file. Make sure it matches exactly, including whitespace." };
|
|
93
|
+
}
|
|
94
|
+
const count = content.split(old_string).length - 1;
|
|
95
|
+
if (count > 1) {
|
|
96
|
+
return { error: `old_string found ${count} times. It must be unique — add more surrounding context.` };
|
|
97
|
+
}
|
|
98
|
+
const newContent = content.replace(old_string, new_string);
|
|
99
|
+
writeFileSync(resolved, newContent, "utf-8");
|
|
100
|
+
return { path: resolved, replacements: 1 };
|
|
101
|
+
} catch (err) {
|
|
102
|
+
return { error: `Failed to edit file: ${err.message}` };
|
|
66
103
|
}
|
|
67
|
-
const count = content.split(old_string).length - 1;
|
|
68
|
-
const newContent = content.replace(old_string, new_string);
|
|
69
|
-
writeFileSync(resolved, newContent, "utf-8");
|
|
70
|
-
return { path: resolved, replacements: 1, totalOccurrences: count };
|
|
71
104
|
},
|
|
72
105
|
};
|
|
@@ -43,7 +43,7 @@ export const searchContent = {
|
|
|
43
43
|
handler: async ({ pattern, path, include }) => {
|
|
44
44
|
const dir = resolve(path || ".");
|
|
45
45
|
try {
|
|
46
|
-
let cmd = `grep -rn --include
|
|
46
|
+
let cmd = `grep -rn --include=${JSON.stringify(include || "*")} ${JSON.stringify(pattern)} ${JSON.stringify(dir)} 2>/dev/null | head -50`;
|
|
47
47
|
const result = execSync(cmd, { encoding: "utf-8", timeout: 10_000 });
|
|
48
48
|
const lines = result.trim().split("\n").filter(Boolean);
|
|
49
49
|
const matches = lines.map((line) => {
|