@clawdreyhepburn/carapace 0.2.0 → 0.3.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,648 @@
1
+ /**
2
+ * LLM Proxy — Sits between OpenClaw and the LLM provider.
3
+ *
4
+ * Intercepts tool_use blocks in LLM responses and evaluates them
5
+ * against Cedar policies before OpenClaw can execute them.
6
+ *
7
+ * The agent never gets the real API key. Carapace holds it.
8
+ * Denied tool calls are replaced with text blocks explaining why.
9
+ *
10
+ * Supports:
11
+ * - Anthropic Messages API (/v1/messages)
12
+ * - OpenAI Chat Completions API (/v1/chat/completions)
13
+ * - Both streaming and non-streaming (streaming is buffered, filtered, re-streamed)
14
+ */
15
+
16
+ import { createServer, type IncomingMessage, type ServerResponse } from "node:http";
17
+ import type { Logger } from "./types.js";
18
+
19
+ interface ToolUseBlock {
20
+ type: "tool_use";
21
+ id: string;
22
+ name: string;
23
+ input: Record<string, unknown>;
24
+ }
25
+
26
+ interface TextBlock {
27
+ type: "text";
28
+ text: string;
29
+ }
30
+
31
+ type ContentBlock = ToolUseBlock | TextBlock | { type: string; [key: string]: unknown };
32
+
33
+ interface CedarAuthorizer {
34
+ authorize(request: {
35
+ principal: string;
36
+ action: string;
37
+ resource: string;
38
+ context?: Record<string, unknown>;
39
+ }): Promise<{ decision: "allow" | "deny"; reasons: string[] }>;
40
+ }
41
+
42
+ export interface LlmProxyOpts {
43
+ port: number;
44
+ upstream: {
45
+ anthropic?: { url: string; apiKey: string };
46
+ openai?: { url: string; apiKey: string };
47
+ };
48
+ cedar: CedarAuthorizer;
49
+ logger: Logger;
50
+ }
51
+
52
+ export class LlmProxy {
53
+ private server: ReturnType<typeof createServer> | null = null;
54
+ private port: number;
55
+ private upstream: LlmProxyOpts["upstream"];
56
+ private cedar: CedarAuthorizer;
57
+ private logger: Logger;
58
+
59
+ // Stats
60
+ private stats = {
61
+ requests: 0,
62
+ toolCallsEvaluated: 0,
63
+ toolCallsDenied: 0,
64
+ };
65
+
66
+ constructor(opts: LlmProxyOpts) {
67
+ this.port = opts.port;
68
+ this.upstream = opts.upstream;
69
+ this.cedar = opts.cedar;
70
+ this.logger = opts.logger;
71
+ }
72
+
73
+ async start(): Promise<void> {
74
+ this.server = createServer(async (req, res) => {
75
+ try {
76
+ await this.handleRequest(req, res);
77
+ } catch (err: any) {
78
+ this.logger.error(`LLM Proxy error: ${err.message}`);
79
+ res.writeHead(502, { "Content-Type": "application/json" });
80
+ res.end(JSON.stringify({ error: { message: `Carapace proxy error: ${err.message}` } }));
81
+ }
82
+ });
83
+
84
+ return new Promise((resolve) => {
85
+ this.server!.listen(this.port, "127.0.0.1", () => {
86
+ this.logger.info(`LLM Proxy listening on http://127.0.0.1:${this.port}`);
87
+ resolve();
88
+ });
89
+ });
90
+ }
91
+
92
+ async stop(): Promise<void> {
93
+ if (!this.server) return;
94
+ return new Promise((resolve) => {
95
+ this.server!.close(() => resolve());
96
+ });
97
+ }
98
+
99
+ getStats() {
100
+ return { ...this.stats };
101
+ }
102
+
103
+ // ── Request handling ──
104
+
105
+ private async handleRequest(req: IncomingMessage, res: ServerResponse): Promise<void> {
106
+ this.stats.requests++;
107
+ const path = req.url ?? "/";
108
+
109
+ // Health check
110
+ if (path === "/health" || path === "/carapace/status") {
111
+ res.writeHead(200, { "Content-Type": "application/json" });
112
+ res.end(JSON.stringify({ ok: true, stats: this.stats }));
113
+ return;
114
+ }
115
+
116
+ // Detect provider from path
117
+ if (path.startsWith("/v1/messages")) {
118
+ await this.proxyAnthropic(req, res);
119
+ } else if (path.startsWith("/v1/chat/completions")) {
120
+ await this.proxyOpenAI(req, res);
121
+ } else {
122
+ // Pass through unknown paths (models list, etc.)
123
+ await this.passthrough(req, res, path);
124
+ }
125
+ }
126
+
127
+ // ── Anthropic Messages API ──
128
+
129
+ private async proxyAnthropic(req: IncomingMessage, res: ServerResponse): Promise<void> {
130
+ const upstream = this.upstream.anthropic;
131
+ if (!upstream) {
132
+ res.writeHead(501, { "Content-Type": "application/json" });
133
+ res.end(JSON.stringify({ error: { message: "Anthropic upstream not configured" } }));
134
+ return;
135
+ }
136
+
137
+ const body = await this.readBody(req);
138
+ let parsed: any;
139
+ try {
140
+ parsed = JSON.parse(body);
141
+ } catch {
142
+ res.writeHead(400, { "Content-Type": "application/json" });
143
+ res.end(JSON.stringify({ error: { message: "Invalid JSON body" } }));
144
+ return;
145
+ }
146
+
147
+ const isStreaming = parsed.stream === true;
148
+
149
+ // Forward to Anthropic (always non-streaming for filtering)
150
+ const upstreamResponse = await this.forwardToAnthropic(upstream, body, req, isStreaming);
151
+
152
+ if (!upstreamResponse.ok) {
153
+ // Forward error as-is
154
+ const errorBody = await upstreamResponse.text();
155
+ res.writeHead(upstreamResponse.status, {
156
+ "Content-Type": upstreamResponse.headers.get("content-type") ?? "application/json",
157
+ });
158
+ res.end(errorBody);
159
+ return;
160
+ }
161
+
162
+ if (isStreaming) {
163
+ await this.handleAnthropicStreaming(upstreamResponse, res);
164
+ } else {
165
+ await this.handleAnthropicNonStreaming(upstreamResponse, res);
166
+ }
167
+ }
168
+
169
+ private async forwardToAnthropic(
170
+ upstream: { url: string; apiKey: string },
171
+ body: string,
172
+ req: IncomingMessage,
173
+ isStreaming: boolean,
174
+ ): Promise<Response> {
175
+ // Build headers, replacing auth
176
+ const headers: Record<string, string> = {
177
+ "Content-Type": "application/json",
178
+ "x-api-key": upstream.apiKey,
179
+ "anthropic-version": (req.headers["anthropic-version"] as string) ?? "2023-06-01",
180
+ };
181
+
182
+ // Forward anthropic-beta if present
183
+ const beta = req.headers["anthropic-beta"];
184
+ if (beta) headers["anthropic-beta"] = beta as string;
185
+
186
+ return fetch(`${upstream.url}/v1/messages`, {
187
+ method: "POST",
188
+ headers,
189
+ body,
190
+ });
191
+ }
192
+
193
+ private async handleAnthropicNonStreaming(upstreamResponse: Response, res: ServerResponse): Promise<void> {
194
+ const responseBody = await upstreamResponse.text();
195
+ let parsed: any;
196
+ try {
197
+ parsed = JSON.parse(responseBody);
198
+ } catch {
199
+ res.writeHead(200, { "Content-Type": "application/json" });
200
+ res.end(responseBody);
201
+ return;
202
+ }
203
+
204
+ // Filter tool_use blocks
205
+ if (parsed.content && Array.isArray(parsed.content)) {
206
+ parsed.content = await this.filterContentBlocks(parsed.content);
207
+
208
+ // Update stop_reason if all tool_use blocks were denied
209
+ const hasToolUse = parsed.content.some((b: any) => b.type === "tool_use");
210
+ if (!hasToolUse && parsed.stop_reason === "tool_use") {
211
+ parsed.stop_reason = "end_turn";
212
+ }
213
+ }
214
+
215
+ const filtered = JSON.stringify(parsed);
216
+ res.writeHead(200, {
217
+ "Content-Type": "application/json",
218
+ });
219
+ res.end(filtered);
220
+ }
221
+
222
+ private async handleAnthropicStreaming(upstreamResponse: Response, res: ServerResponse): Promise<void> {
223
+ // Buffer the full streaming response, then filter and re-stream
224
+ const reader = upstreamResponse.body?.getReader();
225
+ if (!reader) {
226
+ res.writeHead(502);
227
+ res.end();
228
+ return;
229
+ }
230
+
231
+ // Collect all SSE events
232
+ const decoder = new TextDecoder();
233
+ let buffer = "";
234
+ const events: Array<{ event: string; data: string }> = [];
235
+
236
+ while (true) {
237
+ const { done, value } = await reader.read();
238
+ if (done) break;
239
+ buffer += decoder.decode(value, { stream: true });
240
+
241
+ // Parse SSE events from buffer
242
+ const lines = buffer.split("\n");
243
+ buffer = lines.pop() ?? ""; // Keep incomplete line
244
+
245
+ let currentEvent = "";
246
+ for (const line of lines) {
247
+ if (line.startsWith("event: ")) {
248
+ currentEvent = line.slice(7).trim();
249
+ } else if (line.startsWith("data: ")) {
250
+ events.push({ event: currentEvent, data: line.slice(6) });
251
+ currentEvent = "";
252
+ }
253
+ }
254
+ }
255
+
256
+ // Find tool_use content blocks and evaluate them
257
+ const toolBlocks = new Map<number, { name: string; inputJson: string; id: string }>();
258
+ const deniedIndices = new Set<number>();
259
+ let currentBlockIndex = -1;
260
+
261
+ for (const ev of events) {
262
+ if (ev.event === "content_block_start") {
263
+ try {
264
+ const d = JSON.parse(ev.data);
265
+ currentBlockIndex = d.index ?? -1;
266
+ if (d.content_block?.type === "tool_use") {
267
+ toolBlocks.set(currentBlockIndex, {
268
+ name: d.content_block.name,
269
+ id: d.content_block.id,
270
+ inputJson: "",
271
+ });
272
+ }
273
+ } catch {}
274
+ } else if (ev.event === "content_block_delta") {
275
+ try {
276
+ const d = JSON.parse(ev.data);
277
+ const idx = d.index ?? currentBlockIndex;
278
+ const block = toolBlocks.get(idx);
279
+ if (block && d.delta?.type === "input_json_delta") {
280
+ block.inputJson += d.delta.partial_json ?? "";
281
+ }
282
+ } catch {}
283
+ }
284
+ }
285
+
286
+ // Evaluate each tool call against Cedar
287
+ for (const [idx, block] of toolBlocks) {
288
+ const decision = await this.evaluateToolCall(block.name, block.inputJson);
289
+ if (decision === "deny") {
290
+ deniedIndices.add(idx);
291
+ this.logger.info(`LLM Proxy DENIED tool call: ${block.name} (block ${idx})`);
292
+ }
293
+ }
294
+
295
+ if (deniedIndices.size === 0) {
296
+ // No denials — forward everything as-is
297
+ res.writeHead(200, {
298
+ "Content-Type": "text/event-stream",
299
+ "Cache-Control": "no-cache",
300
+ "Connection": "keep-alive",
301
+ });
302
+ for (const ev of events) {
303
+ if (ev.event) res.write(`event: ${ev.event}\n`);
304
+ res.write(`data: ${ev.data}\n\n`);
305
+ }
306
+ res.end();
307
+ return;
308
+ }
309
+
310
+ // Rewrite stream: replace denied tool blocks with text blocks
311
+ res.writeHead(200, {
312
+ "Content-Type": "text/event-stream",
313
+ "Cache-Control": "no-cache",
314
+ "Connection": "keep-alive",
315
+ });
316
+
317
+ let skipBlock = false;
318
+ let skipBlockIndex = -1;
319
+
320
+ for (const ev of events) {
321
+ if (ev.event === "content_block_start") {
322
+ try {
323
+ const d = JSON.parse(ev.data);
324
+ const idx = d.index ?? -1;
325
+ if (deniedIndices.has(idx)) {
326
+ skipBlock = true;
327
+ skipBlockIndex = idx;
328
+ const block = toolBlocks.get(idx)!;
329
+ // Emit a text block instead
330
+ const replacement = {
331
+ index: idx,
332
+ content_block: {
333
+ type: "text",
334
+ text: "",
335
+ },
336
+ };
337
+ res.write(`event: content_block_start\ndata: ${JSON.stringify(replacement)}\n\n`);
338
+ // Emit the denial text as a delta
339
+ const denialText = `\n🚫 DENIED by Cedar policy: ${block.name}\n`;
340
+ const delta = {
341
+ index: idx,
342
+ delta: { type: "text_delta", text: denialText },
343
+ };
344
+ res.write(`event: content_block_delta\ndata: ${JSON.stringify(delta)}\n\n`);
345
+ continue;
346
+ }
347
+ } catch {}
348
+ skipBlock = false;
349
+ }
350
+
351
+ if (ev.event === "content_block_delta") {
352
+ try {
353
+ const d = JSON.parse(ev.data);
354
+ if (deniedIndices.has(d.index ?? skipBlockIndex)) continue;
355
+ } catch {}
356
+ }
357
+
358
+ if (ev.event === "content_block_stop") {
359
+ try {
360
+ const d = JSON.parse(ev.data);
361
+ if (deniedIndices.has(d.index ?? skipBlockIndex)) {
362
+ // Emit the stop for our replacement text block
363
+ res.write(`event: content_block_stop\ndata: ${ev.data}\n\n`);
364
+ skipBlock = false;
365
+ continue;
366
+ }
367
+ } catch {}
368
+ }
369
+
370
+ if (skipBlock) continue;
371
+
372
+ // Fix message_delta stop_reason if all tools were denied
373
+ if (ev.event === "message_delta") {
374
+ try {
375
+ const d = JSON.parse(ev.data);
376
+ const remainingTools = [...toolBlocks.keys()].filter((i) => !deniedIndices.has(i));
377
+ if (remainingTools.length === 0 && d.delta?.stop_reason === "tool_use") {
378
+ d.delta.stop_reason = "end_turn";
379
+ res.write(`event: message_delta\ndata: ${JSON.stringify(d)}\n\n`);
380
+ continue;
381
+ }
382
+ } catch {}
383
+ }
384
+
385
+ // Forward event as-is
386
+ if (ev.event) res.write(`event: ${ev.event}\n`);
387
+ res.write(`data: ${ev.data}\n\n`);
388
+ }
389
+
390
+ res.end();
391
+ }
392
+
393
+ // ── OpenAI Chat Completions API ──
394
+
395
+ private async proxyOpenAI(req: IncomingMessage, res: ServerResponse): Promise<void> {
396
+ const upstream = this.upstream.openai;
397
+ if (!upstream) {
398
+ res.writeHead(501, { "Content-Type": "application/json" });
399
+ res.end(JSON.stringify({ error: { message: "OpenAI upstream not configured" } }));
400
+ return;
401
+ }
402
+
403
+ const body = await this.readBody(req);
404
+ let parsed: any;
405
+ try {
406
+ parsed = JSON.parse(body);
407
+ } catch {
408
+ res.writeHead(400, { "Content-Type": "application/json" });
409
+ res.end(JSON.stringify({ error: { message: "Invalid JSON body" } }));
410
+ return;
411
+ }
412
+
413
+ const isStreaming = parsed.stream === true;
414
+
415
+ // For streaming: force non-streaming, filter, then re-stream
416
+ const forwardBody = isStreaming ? JSON.stringify({ ...parsed, stream: false }) : body;
417
+
418
+ const headers: Record<string, string> = {
419
+ "Content-Type": "application/json",
420
+ "Authorization": `Bearer ${upstream.apiKey}`,
421
+ };
422
+
423
+ const upstreamResponse = await fetch(`${upstream.url}/v1/chat/completions`, {
424
+ method: "POST",
425
+ headers,
426
+ body: forwardBody,
427
+ });
428
+
429
+ if (!upstreamResponse.ok) {
430
+ const errorBody = await upstreamResponse.text();
431
+ res.writeHead(upstreamResponse.status, { "Content-Type": "application/json" });
432
+ res.end(errorBody);
433
+ return;
434
+ }
435
+
436
+ const responseBody = await upstreamResponse.text();
437
+ let response: any;
438
+ try {
439
+ response = JSON.parse(responseBody);
440
+ } catch {
441
+ res.writeHead(200, { "Content-Type": "application/json" });
442
+ res.end(responseBody);
443
+ return;
444
+ }
445
+
446
+ // Filter tool_calls from choices
447
+ if (response.choices) {
448
+ for (const choice of response.choices) {
449
+ if (choice.message?.tool_calls) {
450
+ const filtered = [];
451
+ const denials: string[] = [];
452
+
453
+ for (const tc of choice.message.tool_calls) {
454
+ const decision = await this.evaluateToolCall(
455
+ tc.function?.name ?? tc.name,
456
+ typeof tc.function?.arguments === "string"
457
+ ? tc.function.arguments
458
+ : JSON.stringify(tc.function?.arguments ?? {}),
459
+ );
460
+
461
+ if (decision === "allow") {
462
+ filtered.push(tc);
463
+ } else {
464
+ denials.push(
465
+ `🚫 DENIED by Cedar policy: ${tc.function?.name ?? tc.name}`,
466
+ );
467
+ this.logger.info(`LLM Proxy DENIED tool call: ${tc.function?.name}`);
468
+ }
469
+ }
470
+
471
+ choice.message.tool_calls = filtered.length > 0 ? filtered : undefined;
472
+
473
+ // Add denial messages to content
474
+ if (denials.length > 0) {
475
+ const existing = choice.message.content ?? "";
476
+ choice.message.content = (existing + "\n" + denials.join("\n")).trim();
477
+ }
478
+
479
+ // Fix finish_reason if all tool calls were denied
480
+ if (filtered.length === 0 && choice.finish_reason === "tool_calls") {
481
+ choice.finish_reason = "stop";
482
+ }
483
+ }
484
+ }
485
+ }
486
+
487
+ if (isStreaming) {
488
+ // Re-stream as SSE (single chunk since we forced non-streaming)
489
+ res.writeHead(200, {
490
+ "Content-Type": "text/event-stream",
491
+ "Cache-Control": "no-cache",
492
+ });
493
+ // Convert to streaming format
494
+ const chunk = { ...response, object: "chat.completion.chunk" };
495
+ for (const choice of chunk.choices ?? []) {
496
+ choice.delta = choice.message;
497
+ delete choice.message;
498
+ }
499
+ res.write(`data: ${JSON.stringify(chunk)}\n\n`);
500
+ res.write("data: [DONE]\n\n");
501
+ res.end();
502
+ } else {
503
+ res.writeHead(200, { "Content-Type": "application/json" });
504
+ res.end(JSON.stringify(response));
505
+ }
506
+ }
507
+
508
+ // ── Passthrough for non-chat endpoints ──
509
+
510
+ private async passthrough(req: IncomingMessage, res: ServerResponse, path: string): Promise<void> {
511
+ // Try Anthropic first, then OpenAI
512
+ const upstream = this.upstream.anthropic ?? this.upstream.openai;
513
+ if (!upstream) {
514
+ res.writeHead(501);
515
+ res.end();
516
+ return;
517
+ }
518
+
519
+ const body = req.method !== "GET" ? await this.readBody(req) : undefined;
520
+
521
+ const headers: Record<string, string> = { "Content-Type": "application/json" };
522
+ if (this.upstream.anthropic) {
523
+ headers["x-api-key"] = upstream.apiKey;
524
+ headers["anthropic-version"] = "2023-06-01";
525
+ } else {
526
+ headers["Authorization"] = `Bearer ${upstream.apiKey}`;
527
+ }
528
+
529
+ const response = await fetch(`${upstream.url}${path}`, {
530
+ method: req.method ?? "GET",
531
+ headers,
532
+ body,
533
+ });
534
+
535
+ const responseBody = await response.text();
536
+ res.writeHead(response.status, {
537
+ "Content-Type": response.headers.get("content-type") ?? "application/json",
538
+ });
539
+ res.end(responseBody);
540
+ }
541
+
542
+ // ── Cedar evaluation ──
543
+
544
+ private async evaluateToolCall(toolName: string, inputJson: string): Promise<"allow" | "deny"> {
545
+ this.stats.toolCallsEvaluated++;
546
+
547
+ let parsedInput: Record<string, unknown> = {};
548
+ try {
549
+ parsedInput = JSON.parse(inputJson || "{}");
550
+ } catch {}
551
+
552
+ // Determine resource type based on tool name
553
+ let resourceType = "Tool";
554
+ let action = "call_tool";
555
+ let resourceId = toolName;
556
+ let context: Record<string, unknown> = {};
557
+
558
+ // Map known OpenClaw built-in tools to resource types
559
+ if (toolName === "exec" || toolName === "process") {
560
+ resourceType = "Shell";
561
+ action = "exec_command";
562
+ // Extract binary name from the command argument
563
+ const cmd = (parsedInput.command as string) ?? "";
564
+ resourceId = cmd.trim().split(/\s+/)[0]?.replace(/^.*\//, "") || toolName;
565
+ // Map to schema-known context attributes
566
+ context = {
567
+ args: cmd,
568
+ workdir: (parsedInput.workdir as string) ?? "",
569
+ };
570
+ } else if (toolName === "web_fetch" || toolName === "web_search") {
571
+ resourceType = "API";
572
+ action = "call_api";
573
+ // Extract domain from URL
574
+ const url = (parsedInput.url as string) ?? (parsedInput.query as string) ?? "";
575
+ try {
576
+ if (url.startsWith("http")) {
577
+ resourceId = new URL(url).hostname;
578
+ } else {
579
+ resourceId = toolName;
580
+ }
581
+ } catch {
582
+ resourceId = toolName;
583
+ }
584
+ // Map to schema-known context attributes
585
+ context = {
586
+ url,
587
+ method: (parsedInput.method as string) ?? "GET",
588
+ body: (parsedInput.body as string) ?? "",
589
+ };
590
+ }
591
+
592
+ const decision = await this.cedar.authorize({
593
+ principal: `Agent::"openclaw"`,
594
+ action: `Action::"${action}"`,
595
+ resource: `${resourceType}::"${resourceId}"`,
596
+ context,
597
+ });
598
+
599
+ if (decision.decision === "deny") {
600
+ this.stats.toolCallsDenied++;
601
+ }
602
+
603
+ return decision.decision;
604
+ }
605
+
606
+ // ── Content block filtering (Anthropic non-streaming) ──
607
+
608
+ private async filterContentBlocks(blocks: ContentBlock[]): Promise<ContentBlock[]> {
609
+ const result: ContentBlock[] = [];
610
+
611
+ for (const block of blocks) {
612
+ if (block.type !== "tool_use") {
613
+ result.push(block);
614
+ continue;
615
+ }
616
+
617
+ const toolBlock = block as ToolUseBlock;
618
+ const decision = await this.evaluateToolCall(
619
+ toolBlock.name,
620
+ JSON.stringify(toolBlock.input),
621
+ );
622
+
623
+ if (decision === "allow") {
624
+ result.push(block);
625
+ } else {
626
+ // Replace with denial text
627
+ result.push({
628
+ type: "text",
629
+ text: `\n🚫 DENIED by Cedar policy: ${toolBlock.name}\n`,
630
+ });
631
+ this.logger.info(`LLM Proxy DENIED tool call: ${toolBlock.name}`);
632
+ }
633
+ }
634
+
635
+ return result;
636
+ }
637
+
638
+ // ── Utilities ──
639
+
640
+ private readBody(req: IncomingMessage): Promise<string> {
641
+ return new Promise((resolve, reject) => {
642
+ const chunks: Buffer[] = [];
643
+ req.on("data", (chunk) => chunks.push(chunk));
644
+ req.on("end", () => resolve(Buffer.concat(chunks).toString()));
645
+ req.on("error", reject);
646
+ });
647
+ }
648
+ }
package/src/types.ts CHANGED
@@ -23,6 +23,15 @@ export interface PluginConfig {
23
23
  policyDir?: string;
24
24
  defaultPolicy?: "deny-all" | "allow-all";
25
25
  verify?: boolean;
26
+ /** LLM proxy configuration — sits between agent and LLM provider */
27
+ proxy?: {
28
+ enabled?: boolean;
29
+ port?: number; // default: 19821
30
+ upstream?: {
31
+ anthropic?: { url?: string; apiKey: string };
32
+ openai?: { url?: string; apiKey: string };
33
+ };
34
+ };
26
35
  }
27
36
 
28
37
  export interface ServerConfig {