@clawdreyhepburn/carapace 0.2.1 → 0.3.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -38,19 +38,22 @@ The progression:
38
38
  ## Architecture
39
39
 
40
40
  ```
41
- +-------------+ +----------------------------+ +-----------------+
42
- | | | Carapace | | MCP Server A |
43
- | OpenClaw |---->| |---->| (filesystem) |
44
- | Agent | | +----------------------+ | +-----------------+
45
- | | | | Cedarling WASM | | | MCP Server B |
46
- | mcp_call |---->| | (Cedar 4.4.2) | |---->| (GitHub) |
47
- | | | +----------------------+ | +-----------------+
48
- | carapace | | | +-----------------+
49
- | _exec --|---->| Cedar: exec_command |---->| Shell (local) |
50
- | | | | +-----------------+
51
- | carapace | | | +-----------------+
52
- | _fetch --|---->| Cedar: call_api |---->| HTTP (remote) |
53
- | | | +----------------------+ | +-----------------+
41
+ +----------------------------+
42
+ | Carapace |
43
+ +-------------+ | | +------------------+
44
+ | | | +----------------------+ | | Anthropic / |
45
+ | OpenClaw |---->| | LLM Proxy | |---->| OpenAI API |
46
+ | Agent | | | (intercepts tool_use)| | +------------------+
47
+ | | | +----------------------+ |
48
+ | | | | | +-----------------+
49
+ | | | Cedar evaluates | | MCP Server A |
50
+ | | | every tool call |---->| (filesystem) |
51
+ | | | | | +-----------------+
52
+ | | | +----------------------+ | | MCP Server B |
53
+ | | | | Cedarling WASM | |---->| (GitHub) |
54
+ | | | | (Cedar 4.4.2) | | +-----------------+
55
+ | | | +----------------------+ |
56
+ | | | +----------------------+ |
54
57
  | | | | Local Control GUI | |
55
58
  +-------------+ | +----------------------+ |
56
59
  +--------------+--------------+
@@ -61,7 +64,11 @@ The progression:
61
64
  +-------------+
62
65
  ```
63
66
 
64
- **Every operation flows through Cedar evaluation.** MCP tool calls, shell commands, and outbound API requests are all authorized by Cedar policies before execution. If the policy says deny, the operation never happens. The agent gets a clear denial message with the reason.
67
+ ### Two enforcement modes
68
+
69
+ **LLM Proxy (recommended):** Carapace holds the real API key and proxies all LLM traffic. When the LLM suggests a tool call, Carapace evaluates it against Cedar *before returning the response to OpenClaw*. Denied tool calls are stripped from the response — OpenClaw never sees them and can't execute them. **This is un-bypassable.** The agent can't call tools that Cedar denies because it literally never receives the tool call instruction.
70
+
71
+ **Tool-level gating:** Carapace registers Cedar-gated agent tools (`carapace_exec`, `carapace_fetch`, `mcp_call`) that authorize each operation before executing it. This requires denying built-in tools via `openclaw carapace setup` to prevent bypass. Use this mode when you can't or don't want to proxy LLM traffic.
65
72
 
66
73
  ## Screenshots
67
74
 
@@ -155,7 +162,41 @@ In your OpenClaw config, add the servers you want Carapace to manage:
155
162
  }
156
163
  ```
157
164
 
158
- ### 2. Close the bypass gap
165
+ ### 2. Enable the LLM Proxy (recommended)
166
+
167
+ The LLM Proxy is the strongest enforcement mode. Carapace holds the real API key and intercepts tool calls before OpenClaw can execute them.
168
+
169
+ ```json5
170
+ {
171
+ plugins: {
172
+ entries: {
173
+ carapace: {
174
+ enabled: true,
175
+ config: {
176
+ proxy: {
177
+ enabled: true,
178
+ port: 19821,
179
+ upstream: {
180
+ anthropic: { apiKey: "sk-ant-your-real-key-here" }
181
+ }
182
+ }
183
+ }
184
+ }
185
+ }
186
+ },
187
+ // Point OpenClaw at the proxy instead of the real API
188
+ providers: {
189
+ anthropic: {
190
+ apiKey: "carapace-proxy", // dummy — proxy holds the real key
191
+ baseUrl: "http://127.0.0.1:19821"
192
+ }
193
+ }
194
+ }
195
+ ```
196
+
197
+ Now every tool call the LLM suggests goes through Cedar. If Cedar denies it, the tool call is stripped from the response before OpenClaw ever sees it.
198
+
199
+ ### 3. (Alternative) Close the bypass gap without the proxy
159
200
 
160
201
  By default, agents can still use OpenClaw's built-in `exec` and `web_fetch` tools, which bypass Cedar entirely. Run setup to close this:
161
202
 
@@ -173,18 +214,18 @@ openclaw carapace check
173
214
 
174
215
  > ⚠️ **Without this step, Carapace policies are advisory, not enforced.** The agent can simply choose to use the built-in tools instead. Always run `carapace setup` for real security.
175
216
 
176
- ### 3. Open the control GUI
217
+ ### 4. Open the control GUI
177
218
 
178
219
  Navigate to [http://localhost:19820](http://localhost:19820) in your browser. You'll see all discovered tools from all connected servers.
179
220
 
180
- ### 4. Enable tools
221
+ ### 5. Enable tools
181
222
 
182
223
  Toggle individual tools on/off. Each toggle writes a Cedar policy:
183
224
 
184
225
  - **Toggle ON** → creates a `permit` policy for that tool
185
226
  - **Toggle OFF** → creates a `forbid` policy for that tool
186
227
 
187
- ### 5. Create custom policies
228
+ ### 6. Create custom policies
188
229
 
189
230
  Click **"+ New Policy"** to open the visual builder, or edit policies directly in the Policies tab. Examples:
190
231
 
@@ -239,7 +280,7 @@ permit(
239
280
 
240
281
  > 📖 **Want more?** See [Recommended Policies](docs/RECOMMENDED-POLICIES.md) for real-world policies covering destructive commands, credential theft, data exfiltration, email deletion, and complete starter configurations.
241
282
 
242
- ### 6. Verify policies
283
+ ### 7. Verify policies
243
284
 
244
285
  Click **⚡ Verify** to validate that all policies are syntactically correct and consistent.
245
286
 
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@clawdreyhepburn/carapace",
3
- "version": "0.2.1",
3
+ "version": "0.3.0",
4
4
  "description": "Immutable policy boundaries for MCP tool access. Powered by Cedar + Cedarling WASM.",
5
5
  "license": "Apache-2.0",
6
6
  "type": "module",
package/src/index.ts CHANGED
@@ -8,6 +8,7 @@
8
8
  import { CedarlingEngine } from "./cedar-engine-cedarling.js";
9
9
  import { McpAggregator } from "./mcp-aggregator.js";
10
10
  import { ControlGui } from "./gui/server.js";
11
+ import { LlmProxy } from "./llm-proxy.js";
11
12
  import type { PluginConfig } from "./types.js";
12
13
 
13
14
  export const id = "carapace";
@@ -64,6 +65,24 @@ export default function register(api: OpenClawPluginApi) {
64
65
  logger,
65
66
  });
66
67
 
68
+ // --- LLM Proxy: intercept tool calls at the API level ---
69
+ const proxyConfig = config.proxy;
70
+ const proxy = proxyConfig?.enabled ? new LlmProxy({
71
+ port: proxyConfig.port ?? 19821,
72
+ upstream: {
73
+ anthropic: proxyConfig.upstream?.anthropic ? {
74
+ url: proxyConfig.upstream.anthropic.url ?? "https://api.anthropic.com",
75
+ apiKey: proxyConfig.upstream.anthropic.apiKey,
76
+ } : undefined,
77
+ openai: proxyConfig.upstream?.openai ? {
78
+ url: proxyConfig.upstream.openai.url ?? "https://api.openai.com",
79
+ apiKey: proxyConfig.upstream.openai.apiKey,
80
+ } : undefined,
81
+ },
82
+ cedar,
83
+ logger,
84
+ }) : null;
85
+
67
86
  // --- Bypass detection: warn if built-in tools aren't denied ---
68
87
  const BYPASS_TOOLS = ["exec", "web_fetch", "web_search"];
69
88
 
@@ -121,17 +140,26 @@ export default function register(api: OpenClawPluginApi) {
121
140
  await gui.start();
122
141
  logger.info(`Control GUI at http://localhost:${config.guiPort ?? 19820}`);
123
142
 
124
- // Check for bypass vulnerabilities
125
- const bypasses = checkForBypasses();
126
- if (bypasses.length > 0) {
127
- logger.warn(
128
- `⚠️ BYPASS RISK: Built-in tools [${bypasses.join(", ")}] are NOT denied. ` +
129
- `Agents can use these to bypass Carapace Cedar policies. ` +
130
- `Run "openclaw carapace setup" to fix this automatically.`
143
+ if (proxy) {
144
+ await proxy.start();
145
+ logger.info(
146
+ `🛡️ LLM Proxy active on http://127.0.0.1:${proxyConfig!.port ?? 19821} — ` +
147
+ `all tool calls go through Cedar`
131
148
  );
149
+ } else {
150
+ // Check for bypass vulnerabilities only when proxy is disabled
151
+ const bypasses = checkForBypasses();
152
+ if (bypasses.length > 0) {
153
+ logger.warn(
154
+ `⚠️ BYPASS RISK: Built-in tools [${bypasses.join(", ")}] are NOT denied and LLM proxy is not enabled. ` +
155
+ `Agents can use these to bypass Carapace Cedar policies. ` +
156
+ `Enable the LLM proxy (recommended) or run "openclaw carapace setup" to deny built-in tools.`
157
+ );
158
+ }
132
159
  }
133
160
  },
134
161
  async stop() {
162
+ if (proxy) await proxy.stop();
135
163
  await gui.stop();
136
164
  await aggregator.disconnectAll();
137
165
  logger.info("Carapace stopped");
@@ -408,7 +436,16 @@ export default function register(api: OpenClawPluginApi) {
408
436
  const tools = aggregator.listTools();
409
437
  const enabled = tools.filter((t) => t.enabled).length;
410
438
  console.log(`\n ${enabled}/${tools.length} tools enabled`);
411
- console.log(` GUI: http://localhost:${config.guiPort ?? 19820}\n`);
439
+ console.log(` GUI: http://localhost:${config.guiPort ?? 19820}`);
440
+
441
+ if (proxy) {
442
+ const stats = proxy.getStats();
443
+ console.log(`\n 🛡️ LLM Proxy: http://127.0.0.1:${proxyConfig!.port ?? 19821}`);
444
+ console.log(` Requests: ${stats.requests} | Tool calls evaluated: ${stats.toolCallsEvaluated} | Denied: ${stats.toolCallsDenied}`);
445
+ } else {
446
+ console.log(`\n ⚠️ LLM Proxy: disabled`);
447
+ }
448
+ console.log();
412
449
  });
413
450
 
414
451
  cmd.command("tools").action(async () => {
@@ -0,0 +1,648 @@
1
+ /**
2
+ * LLM Proxy — Sits between OpenClaw and the LLM provider.
3
+ *
4
+ * Intercepts tool_use blocks in LLM responses and evaluates them
5
+ * against Cedar policies before OpenClaw can execute them.
6
+ *
7
+ * The agent never gets the real API key. Carapace holds it.
8
+ * Denied tool calls are replaced with text blocks explaining why.
9
+ *
10
+ * Supports:
11
+ * - Anthropic Messages API (/v1/messages)
12
+ * - OpenAI Chat Completions API (/v1/chat/completions)
13
+ * - Both streaming and non-streaming (streaming is buffered, filtered, re-streamed)
14
+ */
15
+
16
+ import { createServer, type IncomingMessage, type ServerResponse } from "node:http";
17
+ import type { Logger } from "./types.js";
18
+
19
+ interface ToolUseBlock {
20
+ type: "tool_use";
21
+ id: string;
22
+ name: string;
23
+ input: Record<string, unknown>;
24
+ }
25
+
26
+ interface TextBlock {
27
+ type: "text";
28
+ text: string;
29
+ }
30
+
31
+ type ContentBlock = ToolUseBlock | TextBlock | { type: string; [key: string]: unknown };
32
+
33
+ interface CedarAuthorizer {
34
+ authorize(request: {
35
+ principal: string;
36
+ action: string;
37
+ resource: string;
38
+ context?: Record<string, unknown>;
39
+ }): Promise<{ decision: "allow" | "deny"; reasons: string[] }>;
40
+ }
41
+
42
+ export interface LlmProxyOpts {
43
+ port: number;
44
+ upstream: {
45
+ anthropic?: { url: string; apiKey: string };
46
+ openai?: { url: string; apiKey: string };
47
+ };
48
+ cedar: CedarAuthorizer;
49
+ logger: Logger;
50
+ }
51
+
52
+ export class LlmProxy {
53
+ private server: ReturnType<typeof createServer> | null = null;
54
+ private port: number;
55
+ private upstream: LlmProxyOpts["upstream"];
56
+ private cedar: CedarAuthorizer;
57
+ private logger: Logger;
58
+
59
+ // Stats
60
+ private stats = {
61
+ requests: 0,
62
+ toolCallsEvaluated: 0,
63
+ toolCallsDenied: 0,
64
+ };
65
+
66
+ constructor(opts: LlmProxyOpts) {
67
+ this.port = opts.port;
68
+ this.upstream = opts.upstream;
69
+ this.cedar = opts.cedar;
70
+ this.logger = opts.logger;
71
+ }
72
+
73
+ async start(): Promise<void> {
74
+ this.server = createServer(async (req, res) => {
75
+ try {
76
+ await this.handleRequest(req, res);
77
+ } catch (err: any) {
78
+ this.logger.error(`LLM Proxy error: ${err.message}`);
79
+ res.writeHead(502, { "Content-Type": "application/json" });
80
+ res.end(JSON.stringify({ error: { message: `Carapace proxy error: ${err.message}` } }));
81
+ }
82
+ });
83
+
84
+ return new Promise((resolve) => {
85
+ this.server!.listen(this.port, "127.0.0.1", () => {
86
+ this.logger.info(`LLM Proxy listening on http://127.0.0.1:${this.port}`);
87
+ resolve();
88
+ });
89
+ });
90
+ }
91
+
92
+ async stop(): Promise<void> {
93
+ if (!this.server) return;
94
+ return new Promise((resolve) => {
95
+ this.server!.close(() => resolve());
96
+ });
97
+ }
98
+
99
+ getStats() {
100
+ return { ...this.stats };
101
+ }
102
+
103
+ // ── Request handling ──
104
+
105
+ private async handleRequest(req: IncomingMessage, res: ServerResponse): Promise<void> {
106
+ this.stats.requests++;
107
+ const path = req.url ?? "/";
108
+
109
+ // Health check
110
+ if (path === "/health" || path === "/carapace/status") {
111
+ res.writeHead(200, { "Content-Type": "application/json" });
112
+ res.end(JSON.stringify({ ok: true, stats: this.stats }));
113
+ return;
114
+ }
115
+
116
+ // Detect provider from path
117
+ if (path.startsWith("/v1/messages")) {
118
+ await this.proxyAnthropic(req, res);
119
+ } else if (path.startsWith("/v1/chat/completions")) {
120
+ await this.proxyOpenAI(req, res);
121
+ } else {
122
+ // Pass through unknown paths (models list, etc.)
123
+ await this.passthrough(req, res, path);
124
+ }
125
+ }
126
+
127
+ // ── Anthropic Messages API ──
128
+
129
+ private async proxyAnthropic(req: IncomingMessage, res: ServerResponse): Promise<void> {
130
+ const upstream = this.upstream.anthropic;
131
+ if (!upstream) {
132
+ res.writeHead(501, { "Content-Type": "application/json" });
133
+ res.end(JSON.stringify({ error: { message: "Anthropic upstream not configured" } }));
134
+ return;
135
+ }
136
+
137
+ const body = await this.readBody(req);
138
+ let parsed: any;
139
+ try {
140
+ parsed = JSON.parse(body);
141
+ } catch {
142
+ res.writeHead(400, { "Content-Type": "application/json" });
143
+ res.end(JSON.stringify({ error: { message: "Invalid JSON body" } }));
144
+ return;
145
+ }
146
+
147
+ const isStreaming = parsed.stream === true;
148
+
149
+ // Forward to Anthropic (always non-streaming for filtering)
150
+ const upstreamResponse = await this.forwardToAnthropic(upstream, body, req, isStreaming);
151
+
152
+ if (!upstreamResponse.ok) {
153
+ // Forward error as-is
154
+ const errorBody = await upstreamResponse.text();
155
+ res.writeHead(upstreamResponse.status, {
156
+ "Content-Type": upstreamResponse.headers.get("content-type") ?? "application/json",
157
+ });
158
+ res.end(errorBody);
159
+ return;
160
+ }
161
+
162
+ if (isStreaming) {
163
+ await this.handleAnthropicStreaming(upstreamResponse, res);
164
+ } else {
165
+ await this.handleAnthropicNonStreaming(upstreamResponse, res);
166
+ }
167
+ }
168
+
169
+ private async forwardToAnthropic(
170
+ upstream: { url: string; apiKey: string },
171
+ body: string,
172
+ req: IncomingMessage,
173
+ isStreaming: boolean,
174
+ ): Promise<Response> {
175
+ // Build headers, replacing auth
176
+ const headers: Record<string, string> = {
177
+ "Content-Type": "application/json",
178
+ "x-api-key": upstream.apiKey,
179
+ "anthropic-version": (req.headers["anthropic-version"] as string) ?? "2023-06-01",
180
+ };
181
+
182
+ // Forward anthropic-beta if present
183
+ const beta = req.headers["anthropic-beta"];
184
+ if (beta) headers["anthropic-beta"] = beta as string;
185
+
186
+ return fetch(`${upstream.url}/v1/messages`, {
187
+ method: "POST",
188
+ headers,
189
+ body,
190
+ });
191
+ }
192
+
193
+ private async handleAnthropicNonStreaming(upstreamResponse: Response, res: ServerResponse): Promise<void> {
194
+ const responseBody = await upstreamResponse.text();
195
+ let parsed: any;
196
+ try {
197
+ parsed = JSON.parse(responseBody);
198
+ } catch {
199
+ res.writeHead(200, { "Content-Type": "application/json" });
200
+ res.end(responseBody);
201
+ return;
202
+ }
203
+
204
+ // Filter tool_use blocks
205
+ if (parsed.content && Array.isArray(parsed.content)) {
206
+ parsed.content = await this.filterContentBlocks(parsed.content);
207
+
208
+ // Update stop_reason if all tool_use blocks were denied
209
+ const hasToolUse = parsed.content.some((b: any) => b.type === "tool_use");
210
+ if (!hasToolUse && parsed.stop_reason === "tool_use") {
211
+ parsed.stop_reason = "end_turn";
212
+ }
213
+ }
214
+
215
+ const filtered = JSON.stringify(parsed);
216
+ res.writeHead(200, {
217
+ "Content-Type": "application/json",
218
+ });
219
+ res.end(filtered);
220
+ }
221
+
222
+ private async handleAnthropicStreaming(upstreamResponse: Response, res: ServerResponse): Promise<void> {
223
+ // Buffer the full streaming response, then filter and re-stream
224
+ const reader = upstreamResponse.body?.getReader();
225
+ if (!reader) {
226
+ res.writeHead(502);
227
+ res.end();
228
+ return;
229
+ }
230
+
231
+ // Collect all SSE events
232
+ const decoder = new TextDecoder();
233
+ let buffer = "";
234
+ const events: Array<{ event: string; data: string }> = [];
235
+
236
+ while (true) {
237
+ const { done, value } = await reader.read();
238
+ if (done) break;
239
+ buffer += decoder.decode(value, { stream: true });
240
+
241
+ // Parse SSE events from buffer
242
+ const lines = buffer.split("\n");
243
+ buffer = lines.pop() ?? ""; // Keep incomplete line
244
+
245
+ let currentEvent = "";
246
+ for (const line of lines) {
247
+ if (line.startsWith("event: ")) {
248
+ currentEvent = line.slice(7).trim();
249
+ } else if (line.startsWith("data: ")) {
250
+ events.push({ event: currentEvent, data: line.slice(6) });
251
+ currentEvent = "";
252
+ }
253
+ }
254
+ }
255
+
256
+ // Find tool_use content blocks and evaluate them
257
+ const toolBlocks = new Map<number, { name: string; inputJson: string; id: string }>();
258
+ const deniedIndices = new Set<number>();
259
+ let currentBlockIndex = -1;
260
+
261
+ for (const ev of events) {
262
+ if (ev.event === "content_block_start") {
263
+ try {
264
+ const d = JSON.parse(ev.data);
265
+ currentBlockIndex = d.index ?? -1;
266
+ if (d.content_block?.type === "tool_use") {
267
+ toolBlocks.set(currentBlockIndex, {
268
+ name: d.content_block.name,
269
+ id: d.content_block.id,
270
+ inputJson: "",
271
+ });
272
+ }
273
+ } catch {}
274
+ } else if (ev.event === "content_block_delta") {
275
+ try {
276
+ const d = JSON.parse(ev.data);
277
+ const idx = d.index ?? currentBlockIndex;
278
+ const block = toolBlocks.get(idx);
279
+ if (block && d.delta?.type === "input_json_delta") {
280
+ block.inputJson += d.delta.partial_json ?? "";
281
+ }
282
+ } catch {}
283
+ }
284
+ }
285
+
286
+ // Evaluate each tool call against Cedar
287
+ for (const [idx, block] of toolBlocks) {
288
+ const decision = await this.evaluateToolCall(block.name, block.inputJson);
289
+ if (decision === "deny") {
290
+ deniedIndices.add(idx);
291
+ this.logger.info(`LLM Proxy DENIED tool call: ${block.name} (block ${idx})`);
292
+ }
293
+ }
294
+
295
+ if (deniedIndices.size === 0) {
296
+ // No denials — forward everything as-is
297
+ res.writeHead(200, {
298
+ "Content-Type": "text/event-stream",
299
+ "Cache-Control": "no-cache",
300
+ "Connection": "keep-alive",
301
+ });
302
+ for (const ev of events) {
303
+ if (ev.event) res.write(`event: ${ev.event}\n`);
304
+ res.write(`data: ${ev.data}\n\n`);
305
+ }
306
+ res.end();
307
+ return;
308
+ }
309
+
310
+ // Rewrite stream: replace denied tool blocks with text blocks
311
+ res.writeHead(200, {
312
+ "Content-Type": "text/event-stream",
313
+ "Cache-Control": "no-cache",
314
+ "Connection": "keep-alive",
315
+ });
316
+
317
+ let skipBlock = false;
318
+ let skipBlockIndex = -1;
319
+
320
+ for (const ev of events) {
321
+ if (ev.event === "content_block_start") {
322
+ try {
323
+ const d = JSON.parse(ev.data);
324
+ const idx = d.index ?? -1;
325
+ if (deniedIndices.has(idx)) {
326
+ skipBlock = true;
327
+ skipBlockIndex = idx;
328
+ const block = toolBlocks.get(idx)!;
329
+ // Emit a text block instead
330
+ const replacement = {
331
+ index: idx,
332
+ content_block: {
333
+ type: "text",
334
+ text: "",
335
+ },
336
+ };
337
+ res.write(`event: content_block_start\ndata: ${JSON.stringify(replacement)}\n\n`);
338
+ // Emit the denial text as a delta
339
+ const denialText = `\n🚫 DENIED by Cedar policy: ${block.name}\n`;
340
+ const delta = {
341
+ index: idx,
342
+ delta: { type: "text_delta", text: denialText },
343
+ };
344
+ res.write(`event: content_block_delta\ndata: ${JSON.stringify(delta)}\n\n`);
345
+ continue;
346
+ }
347
+ } catch {}
348
+ skipBlock = false;
349
+ }
350
+
351
+ if (ev.event === "content_block_delta") {
352
+ try {
353
+ const d = JSON.parse(ev.data);
354
+ if (deniedIndices.has(d.index ?? skipBlockIndex)) continue;
355
+ } catch {}
356
+ }
357
+
358
+ if (ev.event === "content_block_stop") {
359
+ try {
360
+ const d = JSON.parse(ev.data);
361
+ if (deniedIndices.has(d.index ?? skipBlockIndex)) {
362
+ // Emit the stop for our replacement text block
363
+ res.write(`event: content_block_stop\ndata: ${ev.data}\n\n`);
364
+ skipBlock = false;
365
+ continue;
366
+ }
367
+ } catch {}
368
+ }
369
+
370
+ if (skipBlock) continue;
371
+
372
+ // Fix message_delta stop_reason if all tools were denied
373
+ if (ev.event === "message_delta") {
374
+ try {
375
+ const d = JSON.parse(ev.data);
376
+ const remainingTools = [...toolBlocks.keys()].filter((i) => !deniedIndices.has(i));
377
+ if (remainingTools.length === 0 && d.delta?.stop_reason === "tool_use") {
378
+ d.delta.stop_reason = "end_turn";
379
+ res.write(`event: message_delta\ndata: ${JSON.stringify(d)}\n\n`);
380
+ continue;
381
+ }
382
+ } catch {}
383
+ }
384
+
385
+ // Forward event as-is
386
+ if (ev.event) res.write(`event: ${ev.event}\n`);
387
+ res.write(`data: ${ev.data}\n\n`);
388
+ }
389
+
390
+ res.end();
391
+ }
392
+
393
+ // ── OpenAI Chat Completions API ──
394
+
395
+ private async proxyOpenAI(req: IncomingMessage, res: ServerResponse): Promise<void> {
396
+ const upstream = this.upstream.openai;
397
+ if (!upstream) {
398
+ res.writeHead(501, { "Content-Type": "application/json" });
399
+ res.end(JSON.stringify({ error: { message: "OpenAI upstream not configured" } }));
400
+ return;
401
+ }
402
+
403
+ const body = await this.readBody(req);
404
+ let parsed: any;
405
+ try {
406
+ parsed = JSON.parse(body);
407
+ } catch {
408
+ res.writeHead(400, { "Content-Type": "application/json" });
409
+ res.end(JSON.stringify({ error: { message: "Invalid JSON body" } }));
410
+ return;
411
+ }
412
+
413
+ const isStreaming = parsed.stream === true;
414
+
415
+ // For streaming: force non-streaming, filter, then re-stream
416
+ const forwardBody = isStreaming ? JSON.stringify({ ...parsed, stream: false }) : body;
417
+
418
+ const headers: Record<string, string> = {
419
+ "Content-Type": "application/json",
420
+ "Authorization": `Bearer ${upstream.apiKey}`,
421
+ };
422
+
423
+ const upstreamResponse = await fetch(`${upstream.url}/v1/chat/completions`, {
424
+ method: "POST",
425
+ headers,
426
+ body: forwardBody,
427
+ });
428
+
429
+ if (!upstreamResponse.ok) {
430
+ const errorBody = await upstreamResponse.text();
431
+ res.writeHead(upstreamResponse.status, { "Content-Type": "application/json" });
432
+ res.end(errorBody);
433
+ return;
434
+ }
435
+
436
+ const responseBody = await upstreamResponse.text();
437
+ let response: any;
438
+ try {
439
+ response = JSON.parse(responseBody);
440
+ } catch {
441
+ res.writeHead(200, { "Content-Type": "application/json" });
442
+ res.end(responseBody);
443
+ return;
444
+ }
445
+
446
+ // Filter tool_calls from choices
447
+ if (response.choices) {
448
+ for (const choice of response.choices) {
449
+ if (choice.message?.tool_calls) {
450
+ const filtered = [];
451
+ const denials: string[] = [];
452
+
453
+ for (const tc of choice.message.tool_calls) {
454
+ const decision = await this.evaluateToolCall(
455
+ tc.function?.name ?? tc.name,
456
+ typeof tc.function?.arguments === "string"
457
+ ? tc.function.arguments
458
+ : JSON.stringify(tc.function?.arguments ?? {}),
459
+ );
460
+
461
+ if (decision === "allow") {
462
+ filtered.push(tc);
463
+ } else {
464
+ denials.push(
465
+ `🚫 DENIED by Cedar policy: ${tc.function?.name ?? tc.name}`,
466
+ );
467
+ this.logger.info(`LLM Proxy DENIED tool call: ${tc.function?.name}`);
468
+ }
469
+ }
470
+
471
+ choice.message.tool_calls = filtered.length > 0 ? filtered : undefined;
472
+
473
+ // Add denial messages to content
474
+ if (denials.length > 0) {
475
+ const existing = choice.message.content ?? "";
476
+ choice.message.content = (existing + "\n" + denials.join("\n")).trim();
477
+ }
478
+
479
+ // Fix finish_reason if all tool calls were denied
480
+ if (filtered.length === 0 && choice.finish_reason === "tool_calls") {
481
+ choice.finish_reason = "stop";
482
+ }
483
+ }
484
+ }
485
+ }
486
+
487
+ if (isStreaming) {
488
+ // Re-stream as SSE (single chunk since we forced non-streaming)
489
+ res.writeHead(200, {
490
+ "Content-Type": "text/event-stream",
491
+ "Cache-Control": "no-cache",
492
+ });
493
+ // Convert to streaming format
494
+ const chunk = { ...response, object: "chat.completion.chunk" };
495
+ for (const choice of chunk.choices ?? []) {
496
+ choice.delta = choice.message;
497
+ delete choice.message;
498
+ }
499
+ res.write(`data: ${JSON.stringify(chunk)}\n\n`);
500
+ res.write("data: [DONE]\n\n");
501
+ res.end();
502
+ } else {
503
+ res.writeHead(200, { "Content-Type": "application/json" });
504
+ res.end(JSON.stringify(response));
505
+ }
506
+ }
507
+
508
+ // ── Passthrough for non-chat endpoints ──
509
+
510
+ private async passthrough(req: IncomingMessage, res: ServerResponse, path: string): Promise<void> {
511
+ // Try Anthropic first, then OpenAI
512
+ const upstream = this.upstream.anthropic ?? this.upstream.openai;
513
+ if (!upstream) {
514
+ res.writeHead(501);
515
+ res.end();
516
+ return;
517
+ }
518
+
519
+ const body = req.method !== "GET" ? await this.readBody(req) : undefined;
520
+
521
+ const headers: Record<string, string> = { "Content-Type": "application/json" };
522
+ if (this.upstream.anthropic) {
523
+ headers["x-api-key"] = upstream.apiKey;
524
+ headers["anthropic-version"] = "2023-06-01";
525
+ } else {
526
+ headers["Authorization"] = `Bearer ${upstream.apiKey}`;
527
+ }
528
+
529
+ const response = await fetch(`${upstream.url}${path}`, {
530
+ method: req.method ?? "GET",
531
+ headers,
532
+ body,
533
+ });
534
+
535
+ const responseBody = await response.text();
536
+ res.writeHead(response.status, {
537
+ "Content-Type": response.headers.get("content-type") ?? "application/json",
538
+ });
539
+ res.end(responseBody);
540
+ }
541
+
542
+ // ── Cedar evaluation ──
543
+
544
+ private async evaluateToolCall(toolName: string, inputJson: string): Promise<"allow" | "deny"> {
545
+ this.stats.toolCallsEvaluated++;
546
+
547
+ let parsedInput: Record<string, unknown> = {};
548
+ try {
549
+ parsedInput = JSON.parse(inputJson || "{}");
550
+ } catch {}
551
+
552
+ // Determine resource type based on tool name
553
+ let resourceType = "Tool";
554
+ let action = "call_tool";
555
+ let resourceId = toolName;
556
+ let context: Record<string, unknown> = {};
557
+
558
+ // Map known OpenClaw built-in tools to resource types
559
+ if (toolName === "exec" || toolName === "process") {
560
+ resourceType = "Shell";
561
+ action = "exec_command";
562
+ // Extract binary name from the command argument
563
+ const cmd = (parsedInput.command as string) ?? "";
564
+ resourceId = cmd.trim().split(/\s+/)[0]?.replace(/^.*\//, "") || toolName;
565
+ // Map to schema-known context attributes
566
+ context = {
567
+ args: cmd,
568
+ workdir: (parsedInput.workdir as string) ?? "",
569
+ };
570
+ } else if (toolName === "web_fetch" || toolName === "web_search") {
571
+ resourceType = "API";
572
+ action = "call_api";
573
+ // Extract domain from URL
574
+ const url = (parsedInput.url as string) ?? (parsedInput.query as string) ?? "";
575
+ try {
576
+ if (url.startsWith("http")) {
577
+ resourceId = new URL(url).hostname;
578
+ } else {
579
+ resourceId = toolName;
580
+ }
581
+ } catch {
582
+ resourceId = toolName;
583
+ }
584
+ // Map to schema-known context attributes
585
+ context = {
586
+ url,
587
+ method: (parsedInput.method as string) ?? "GET",
588
+ body: (parsedInput.body as string) ?? "",
589
+ };
590
+ }
591
+
592
+ const decision = await this.cedar.authorize({
593
+ principal: `Agent::"openclaw"`,
594
+ action: `Action::"${action}"`,
595
+ resource: `${resourceType}::"${resourceId}"`,
596
+ context,
597
+ });
598
+
599
+ if (decision.decision === "deny") {
600
+ this.stats.toolCallsDenied++;
601
+ }
602
+
603
+ return decision.decision;
604
+ }
605
+
606
+ // ── Content block filtering (Anthropic non-streaming) ──
607
+
608
+ private async filterContentBlocks(blocks: ContentBlock[]): Promise<ContentBlock[]> {
609
+ const result: ContentBlock[] = [];
610
+
611
+ for (const block of blocks) {
612
+ if (block.type !== "tool_use") {
613
+ result.push(block);
614
+ continue;
615
+ }
616
+
617
+ const toolBlock = block as ToolUseBlock;
618
+ const decision = await this.evaluateToolCall(
619
+ toolBlock.name,
620
+ JSON.stringify(toolBlock.input),
621
+ );
622
+
623
+ if (decision === "allow") {
624
+ result.push(block);
625
+ } else {
626
+ // Replace with denial text
627
+ result.push({
628
+ type: "text",
629
+ text: `\n🚫 DENIED by Cedar policy: ${toolBlock.name}\n`,
630
+ });
631
+ this.logger.info(`LLM Proxy DENIED tool call: ${toolBlock.name}`);
632
+ }
633
+ }
634
+
635
+ return result;
636
+ }
637
+
638
+ // ── Utilities ──
639
+
640
+ private readBody(req: IncomingMessage): Promise<string> {
641
+ return new Promise((resolve, reject) => {
642
+ const chunks: Buffer[] = [];
643
+ req.on("data", (chunk) => chunks.push(chunk));
644
+ req.on("end", () => resolve(Buffer.concat(chunks).toString()));
645
+ req.on("error", reject);
646
+ });
647
+ }
648
+ }
package/src/types.ts CHANGED
@@ -23,6 +23,15 @@ export interface PluginConfig {
23
23
  policyDir?: string;
24
24
  defaultPolicy?: "deny-all" | "allow-all";
25
25
  verify?: boolean;
26
+ /** LLM proxy configuration — sits between agent and LLM provider */
27
+ proxy?: {
28
+ enabled?: boolean;
29
+ port?: number; // default: 19821
30
+ upstream?: {
31
+ anthropic?: { url?: string; apiKey: string };
32
+ openai?: { url?: string; apiKey: string };
33
+ };
34
+ };
26
35
  }
27
36
 
28
37
  export interface ServerConfig {