@heysalad/cheri-cli 0.9.0 → 1.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,285 @@
1
+ // Multi-provider abstraction
2
+ // Supports: Cheri Cloud (default), OpenAI, Anthropic, Ollama, custom OpenAI-compatible
3
+ import { getConfigValue } from "../config-store.js";
4
+
5
+ /**
6
+ * Provider interface:
7
+ * - chatStream(messages, tools, options) => Response (SSE stream)
8
+ * - chatSync(messages, tools, options) => parsed JSON response
9
+ */
10
+
11
+ // ── Cheri Cloud Provider (default) ──────────────────────────────────────────
12
+ class CheriCloudProvider {
13
+ constructor(token, baseUrl) {
14
+ this.token = token;
15
+ this.baseUrl = baseUrl || "https://cheri.heysalad.app";
16
+ }
17
+
18
+ async chatStream(messages, tools = [], options = {}) {
19
+ const body = { messages, stream: true };
20
+ if (tools.length > 0) body.tools = tools;
21
+ if (options.model) body.model = options.model;
22
+
23
+ const res = await fetch(`${this.baseUrl}/api/chat/completions`, {
24
+ method: "POST",
25
+ headers: {
26
+ Authorization: `Bearer ${this.token}`,
27
+ "Content-Type": "application/json",
28
+ },
29
+ body: JSON.stringify(body),
30
+ });
31
+
32
+ if (!res.ok) {
33
+ const text = await res.text();
34
+ let msg;
35
+ try { msg = JSON.parse(text).error || text; } catch { msg = text; }
36
+ throw new Error(`AI error (${res.status}): ${msg}`);
37
+ }
38
+ return res;
39
+ }
40
+ }
41
+
42
+ // ── OpenAI-Compatible Provider ──────────────────────────────────────────────
43
+ class OpenAIProvider {
44
+ constructor(apiKey, baseUrl, defaultModel) {
45
+ this.apiKey = apiKey;
46
+ this.baseUrl = baseUrl;
47
+ this.defaultModel = defaultModel;
48
+ }
49
+
50
+ async chatStream(messages, tools = [], options = {}) {
51
+ const body = {
52
+ model: options.model || this.defaultModel,
53
+ messages,
54
+ stream: true,
55
+ };
56
+ if (tools.length > 0) body.tools = tools;
57
+ if (options.temperature !== undefined) body.temperature = options.temperature;
58
+ if (options.max_tokens) body.max_tokens = options.max_tokens;
59
+
60
+ const res = await fetch(`${this.baseUrl}/chat/completions`, {
61
+ method: "POST",
62
+ headers: {
63
+ Authorization: `Bearer ${this.apiKey}`,
64
+ "Content-Type": "application/json",
65
+ },
66
+ body: JSON.stringify(body),
67
+ });
68
+
69
+ if (!res.ok) {
70
+ const text = await res.text();
71
+ throw new Error(`Provider error (${res.status}): ${text.slice(0, 200)}`);
72
+ }
73
+ return res;
74
+ }
75
+ }
76
+
77
+ // ── Anthropic Provider ──────────────────────────────────────────────────────
78
+ class AnthropicProvider {
79
+ constructor(apiKey, defaultModel) {
80
+ this.apiKey = apiKey;
81
+ this.defaultModel = defaultModel || "claude-sonnet-4-20250514";
82
+ }
83
+
84
+ async chatStream(messages, tools = [], options = {}) {
85
+ // Convert OpenAI format to Anthropic format
86
+ const system = messages.filter(m => m.role === "system").map(m => m.content).join("\n");
87
+ const anthropicMessages = [];
88
+
89
+ for (const msg of messages) {
90
+ if (msg.role === "system") continue;
91
+
92
+ if (msg.role === "user") {
93
+ anthropicMessages.push({ role: "user", content: msg.content });
94
+ } else if (msg.role === "assistant") {
95
+ const content = [];
96
+ if (msg.content) content.push({ type: "text", text: msg.content });
97
+ if (msg.tool_calls) {
98
+ for (const tc of msg.tool_calls) {
99
+ let input = {};
100
+ try { input = JSON.parse(tc.function.arguments); } catch {}
101
+ content.push({ type: "tool_use", id: tc.id, name: tc.function.name, input });
102
+ }
103
+ }
104
+ anthropicMessages.push({ role: "assistant", content });
105
+ } else if (msg.role === "tool") {
106
+ // Merge consecutive tool results into user message
107
+ const last = anthropicMessages[anthropicMessages.length - 1];
108
+ const block = { type: "tool_result", tool_use_id: msg.tool_call_id, content: msg.content };
109
+ if (last?.role === "user" && Array.isArray(last.content)) {
110
+ last.content.push(block);
111
+ } else {
112
+ anthropicMessages.push({ role: "user", content: [block] });
113
+ }
114
+ }
115
+ }
116
+
117
+ const anthropicTools = tools.map(t => ({
118
+ name: t.function.name,
119
+ description: t.function.description,
120
+ input_schema: t.function.parameters,
121
+ }));
122
+
123
+ const body = {
124
+ model: options.model || this.defaultModel,
125
+ max_tokens: options.max_tokens || 8192,
126
+ system,
127
+ messages: anthropicMessages,
128
+ stream: true,
129
+ };
130
+ if (anthropicTools.length > 0) body.tools = anthropicTools;
131
+
132
+ const res = await fetch("https://api.anthropic.com/v1/messages", {
133
+ method: "POST",
134
+ headers: {
135
+ "x-api-key": this.apiKey,
136
+ "anthropic-version": "2023-06-01",
137
+ "Content-Type": "application/json",
138
+ },
139
+ body: JSON.stringify(body),
140
+ });
141
+
142
+ if (!res.ok) {
143
+ const text = await res.text();
144
+ throw new Error(`Anthropic error (${res.status}): ${text.slice(0, 200)}`);
145
+ }
146
+
147
+ // Anthropic SSE has different format — we need to convert to OpenAI format
148
+ // Return raw response; the agent will need to handle both formats
149
+ // For now, wrap in a converter
150
+ return this._convertStream(res, options.model || this.defaultModel);
151
+ }
152
+
153
+ async _convertStream(res, model) {
154
+ const reader = res.body.getReader();
155
+ const { readable, writable } = new TransformStream();
156
+ const writer = writable.getWriter();
157
+ const encoder = new TextEncoder();
158
+
159
+ (async () => {
160
+ const decoder = new TextDecoder();
161
+ let buffer = "";
162
+ let toolUseId = "";
163
+ let toolName = "";
164
+
165
+ try {
166
+ while (true) {
167
+ const { done, value } = await reader.read();
168
+ if (done) break;
169
+ buffer += decoder.decode(value, { stream: true });
170
+ const lines = buffer.split("\n");
171
+ buffer = lines.pop() || "";
172
+
173
+ for (const line of lines) {
174
+ if (!line.startsWith("data: ")) continue;
175
+ const data = line.slice(6).trim();
176
+ if (!data || data === "[DONE]") continue;
177
+
178
+ try {
179
+ const event = JSON.parse(data);
180
+ let chunk = null;
181
+
182
+ switch (event.type) {
183
+ case "content_block_start":
184
+ if (event.content_block?.type === "tool_use") {
185
+ toolUseId = event.content_block.id;
186
+ toolName = event.content_block.name;
187
+ chunk = { choices: [{ index: 0, delta: { tool_calls: [{ index: event.index, id: toolUseId, type: "function", function: { name: toolName, arguments: "" } }] }, finish_reason: null }] };
188
+ }
189
+ break;
190
+ case "content_block_delta":
191
+ if (event.delta?.type === "text_delta") {
192
+ chunk = { choices: [{ index: 0, delta: { content: event.delta.text }, finish_reason: null }] };
193
+ } else if (event.delta?.type === "input_json_delta") {
194
+ chunk = { choices: [{ index: 0, delta: { tool_calls: [{ index: 0, function: { arguments: event.delta.partial_json } }] }, finish_reason: null }] };
195
+ }
196
+ break;
197
+ case "message_delta":
198
+ if (event.delta?.stop_reason) {
199
+ const fr = event.delta.stop_reason === "tool_use" ? "tool_calls" : "stop";
200
+ chunk = { choices: [{ index: 0, delta: {}, finish_reason: fr }] };
201
+ }
202
+ break;
203
+ }
204
+
205
+ if (chunk) {
206
+ chunk.model = model;
207
+ await writer.write(encoder.encode(`data: ${JSON.stringify(chunk)}\n\n`));
208
+ }
209
+ } catch {}
210
+ }
211
+ }
212
+ await writer.write(encoder.encode("data: [DONE]\n\n"));
213
+ } catch {} finally {
214
+ await writer.close();
215
+ }
216
+ })();
217
+
218
+ return new Response(readable, {
219
+ headers: { "Content-Type": "text/event-stream" },
220
+ });
221
+ }
222
+ }
223
+
224
+ // ── Provider Factory ────────────────────────────────────────────────────────
225
+
226
+ const PROVIDER_CONFIGS = {
227
+ cheri: { envKey: null },
228
+ openai: { envKey: "OPENAI_API_KEY", baseUrl: "https://api.openai.com/v1", defaultModel: "gpt-4o" },
229
+ anthropic: { envKey: "ANTHROPIC_API_KEY", defaultModel: "claude-sonnet-4-20250514" },
230
+ deepseek: { envKey: "DEEPSEEK_API_KEY", baseUrl: "https://api.deepseek.com/v1", defaultModel: "deepseek-chat" },
231
+ groq: { envKey: "GROQ_API_KEY", baseUrl: "https://api.groq.com/openai/v1", defaultModel: "llama-3.3-70b-versatile" },
232
+ together: { envKey: "TOGETHER_API_KEY", baseUrl: "https://api.together.xyz/v1", defaultModel: "meta-llama/Llama-3.3-70B-Instruct-Turbo" },
233
+ ollama: { envKey: null, baseUrl: "http://localhost:11434/v1", defaultModel: "llama3.2" },
234
+ openrouter:{ envKey: "OPENROUTER_API_KEY", baseUrl: "https://openrouter.ai/api/v1", defaultModel: "anthropic/claude-sonnet-4" },
235
+ xai: { envKey: "XAI_API_KEY", baseUrl: "https://api.x.ai/v1", defaultModel: "grok-3" },
236
+ mistral: { envKey: "MISTRAL_API_KEY", baseUrl: "https://api.mistral.ai/v1", defaultModel: "mistral-large-latest" },
237
+ };
238
+
239
+ /**
240
+ * Create a provider based on config
241
+ */
242
+ export function createProvider(providerName) {
243
+ providerName = providerName || getConfigValue("ai.provider") || "cheri";
244
+
245
+ if (providerName === "cheri") {
246
+ const token = getConfigValue("token");
247
+ const baseUrl = getConfigValue("apiUrl") || "https://cheri.heysalad.app";
248
+ if (!token) throw new Error("Not logged in. Run 'cheri login' first.");
249
+ return new CheriCloudProvider(token, baseUrl);
250
+ }
251
+
252
+ if (providerName === "anthropic") {
253
+ const apiKey = getConfigValue(`ai.keys.${providerName}`) || process.env.ANTHROPIC_API_KEY;
254
+ if (!apiKey) throw new Error(`No API key for ${providerName}. Set via: cheri config set ai.keys.anthropic <key>`);
255
+ const model = getConfigValue("ai.model") || PROVIDER_CONFIGS.anthropic.defaultModel;
256
+ return new AnthropicProvider(apiKey, model);
257
+ }
258
+
259
+ const config = PROVIDER_CONFIGS[providerName];
260
+ if (!config) {
261
+ // Treat as custom OpenAI-compatible endpoint
262
+ const baseUrl = getConfigValue(`ai.providers.${providerName}.baseUrl`);
263
+ const apiKey = getConfigValue(`ai.keys.${providerName}`) || "none";
264
+ const model = getConfigValue(`ai.providers.${providerName}.model`) || "default";
265
+ if (!baseUrl) throw new Error(`Unknown provider '${providerName}'. Set baseUrl via: cheri config set ai.providers.${providerName}.baseUrl <url>`);
266
+ return new OpenAIProvider(apiKey, baseUrl, model);
267
+ }
268
+
269
+ const apiKey = getConfigValue(`ai.keys.${providerName}`) || (config.envKey ? process.env[config.envKey] : "none");
270
+ if (!apiKey || apiKey === "none") {
271
+ if (providerName !== "ollama") {
272
+ throw new Error(`No API key for ${providerName}. Set via: cheri config set ai.keys.${providerName} <key>`);
273
+ }
274
+ }
275
+
276
+ const model = getConfigValue("ai.model") || config.defaultModel;
277
+ return new OpenAIProvider(apiKey || "ollama", config.baseUrl, model);
278
+ }
279
+
280
+ /**
281
+ * List available providers
282
+ */
283
+ export function listProviders() {
284
+ return Object.keys(PROVIDER_CONFIGS);
285
+ }
@@ -0,0 +1,164 @@
1
+ // Sandboxing for command execution
2
+ // Linux: uses unshare + seccomp-like restrictions via spawn options
3
+ // Fallback: restricted environment variables and timeout enforcement
4
+
5
+ import { spawn } from "child_process";
6
+ import { platform } from "os";
7
+
8
+ const IS_LINUX = platform() === "linux";
9
+
10
+ // Sandbox policy levels
11
+ export const SandboxLevel = {
12
+ NONE: "none", // No sandbox (legacy behavior)
13
+ BASIC: "basic", // Timeout + restricted env + no network
14
+ STRICT: "strict", // BASIC + filesystem restrictions via unshare
15
+ };
16
+
17
+ // Check if unshare is available (Linux namespace isolation)
18
+ let _unshareAvailable = null;
19
+ async function hasUnshare() {
20
+ if (_unshareAvailable !== null) return _unshareAvailable;
21
+ try {
22
+ const { execSync } = await import("child_process");
23
+ execSync("which unshare", { stdio: "pipe" });
24
+ _unshareAvailable = true;
25
+ } catch {
26
+ _unshareAvailable = false;
27
+ }
28
+ return _unshareAvailable;
29
+ }
30
+
31
+ // Restricted environment: strip sensitive vars, disable network hints
32
+ function buildSandboxEnv(allowNetwork = false) {
33
+ const env = { ...process.env };
34
+
35
+ // Remove sensitive environment variables
36
+ const sensitiveVars = [
37
+ "AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY", "AWS_SESSION_TOKEN",
38
+ "AWS_BEARER_TOKEN_BEDROCK", "GITHUB_TOKEN", "GH_TOKEN",
39
+ "OPENAI_API_KEY", "ANTHROPIC_API_KEY", "STRIPE_SECRET_KEY",
40
+ "DATABASE_URL", "MONGO_URI", "REDIS_URL",
41
+ "SSH_AUTH_SOCK", "GPG_AGENT_INFO",
42
+ "npm_config_//registry.npmjs.org/:_authToken",
43
+ ];
44
+ for (const v of sensitiveVars) delete env[v];
45
+
46
+ // Signal to child processes that they're sandboxed
47
+ env.CHERI_SANDBOXED = "1";
48
+ env.CHERI_SANDBOX_LEVEL = allowNetwork ? "basic" : "strict";
49
+
50
+ return env;
51
+ }
52
+
53
+ /**
54
+ * Execute a command inside a sandbox.
55
+ * Returns { stdout, stderr, exitCode, timedOut }
56
+ */
57
+ export function sandboxExec(command, options = {}) {
58
+ const {
59
+ cwd = process.cwd(),
60
+ timeout = 120000,
61
+ maxBuffer = 10 * 1024 * 1024,
62
+ level = SandboxLevel.BASIC,
63
+ allowNetwork = false,
64
+ allowWrite = [], // additional writable paths beyond cwd
65
+ } = options;
66
+
67
+ return new Promise(async (resolve) => {
68
+ const env = level === SandboxLevel.NONE ? process.env : buildSandboxEnv(allowNetwork);
69
+ let cmd, args;
70
+
71
+ if (level === SandboxLevel.STRICT && IS_LINUX && await hasUnshare()) {
72
+ // Use unshare for network namespace isolation (no network access)
73
+ // --net creates a new network namespace with no interfaces
74
+ if (!allowNetwork) {
75
+ cmd = "unshare";
76
+ args = ["--net", "--map-root-user", "sh", "-c", command];
77
+ } else {
78
+ cmd = "sh";
79
+ args = ["-c", command];
80
+ }
81
+ } else {
82
+ cmd = "sh";
83
+ args = ["-c", command];
84
+ }
85
+
86
+ let stdout = "";
87
+ let stderr = "";
88
+ let timedOut = false;
89
+ let killed = false;
90
+
91
+ const proc = spawn(cmd, args, {
92
+ cwd,
93
+ env,
94
+ stdio: ["pipe", "pipe", "pipe"],
95
+ // Kill the entire process group on timeout
96
+ detached: IS_LINUX,
97
+ });
98
+
99
+ const timer = setTimeout(() => {
100
+ timedOut = true;
101
+ killed = true;
102
+ try {
103
+ if (IS_LINUX && proc.pid) {
104
+ process.kill(-proc.pid, "SIGKILL");
105
+ } else {
106
+ proc.kill("SIGKILL");
107
+ }
108
+ } catch {}
109
+ }, timeout);
110
+
111
+ proc.stdout.on("data", (data) => {
112
+ stdout += data.toString();
113
+ if (stdout.length > maxBuffer) {
114
+ stdout = stdout.slice(0, maxBuffer) + "\n...(truncated)";
115
+ killed = true;
116
+ try { proc.kill("SIGKILL"); } catch {}
117
+ }
118
+ });
119
+
120
+ proc.stderr.on("data", (data) => {
121
+ stderr += data.toString();
122
+ if (stderr.length > maxBuffer) {
123
+ stderr = stderr.slice(0, maxBuffer) + "\n...(truncated)";
124
+ }
125
+ });
126
+
127
+ proc.on("close", (code) => {
128
+ clearTimeout(timer);
129
+ resolve({
130
+ stdout: stdout.trim(),
131
+ stderr: stderr.trim(),
132
+ exitCode: code ?? 1,
133
+ timedOut,
134
+ });
135
+ });
136
+
137
+ proc.on("error", (err) => {
138
+ clearTimeout(timer);
139
+ resolve({
140
+ stdout: "",
141
+ stderr: err.message,
142
+ exitCode: 1,
143
+ timedOut: false,
144
+ });
145
+ });
146
+
147
+ // Close stdin immediately
148
+ proc.stdin.end();
149
+ });
150
+ }
151
+
152
+ /**
153
+ * Get sandbox info for display
154
+ */
155
+ export function getSandboxInfo(level) {
156
+ switch (level) {
157
+ case SandboxLevel.STRICT:
158
+ return IS_LINUX ? "strict (network isolated)" : "basic (strict requires Linux)";
159
+ case SandboxLevel.BASIC:
160
+ return "basic (env sanitized, timeout enforced)";
161
+ default:
162
+ return "none";
163
+ }
164
+ }