@cdot65/prisma-airs 0.1.4 → 0.2.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/hooks/prisma-airs-audit/HOOK.md +47 -0
- package/hooks/prisma-airs-audit/handler.ts +167 -0
- package/hooks/prisma-airs-context/HOOK.md +41 -0
- package/hooks/prisma-airs-context/handler.ts +295 -0
- package/hooks/prisma-airs-outbound/HOOK.md +43 -0
- package/hooks/prisma-airs-outbound/handler.test.ts +296 -0
- package/hooks/prisma-airs-outbound/handler.ts +341 -0
- package/hooks/prisma-airs-tools/HOOK.md +40 -0
- package/hooks/prisma-airs-tools/handler.ts +279 -0
- package/index.ts +3 -3
- package/openclaw.plugin.json +75 -4
- package/package.json +2 -2
- package/src/scan-cache.test.ts +167 -0
- package/src/scan-cache.ts +134 -0
- package/src/scanner.ts +15 -7
|
@@ -0,0 +1,296 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Tests for prisma-airs-outbound hook handler
|
|
3
|
+
*/
|
|
4
|
+
|
|
5
|
+
import { describe, it, expect, vi, beforeEach, afterEach } from "vitest";
|
|
6
|
+
import handler from "./handler";
|
|
7
|
+
|
|
8
|
+
// Mock the scanner module
|
|
9
|
+
vi.mock("../../src/scanner", () => ({
|
|
10
|
+
scan: vi.fn(),
|
|
11
|
+
}));
|
|
12
|
+
|
|
13
|
+
import { scan } from "../../src/scanner";
|
|
14
|
+
const mockScan = vi.mocked(scan);
|
|
15
|
+
|
|
16
|
+
describe("prisma-airs-outbound handler", () => {
|
|
17
|
+
beforeEach(() => {
|
|
18
|
+
vi.clearAllMocks();
|
|
19
|
+
// Suppress console output during tests
|
|
20
|
+
vi.spyOn(console, "log").mockImplementation(() => {});
|
|
21
|
+
vi.spyOn(console, "error").mockImplementation(() => {});
|
|
22
|
+
});
|
|
23
|
+
|
|
24
|
+
afterEach(() => {
|
|
25
|
+
vi.restoreAllMocks();
|
|
26
|
+
});
|
|
27
|
+
|
|
28
|
+
const baseEvent = {
|
|
29
|
+
content: "This is a test response",
|
|
30
|
+
to: "user@example.com",
|
|
31
|
+
channel: "slack",
|
|
32
|
+
metadata: {
|
|
33
|
+
sessionKey: "test-session",
|
|
34
|
+
},
|
|
35
|
+
};
|
|
36
|
+
|
|
37
|
+
const baseCtx = {
|
|
38
|
+
channelId: "slack",
|
|
39
|
+
conversationId: "conv-123",
|
|
40
|
+
cfg: {
|
|
41
|
+
plugins: {
|
|
42
|
+
entries: {
|
|
43
|
+
"prisma-airs": {
|
|
44
|
+
config: {
|
|
45
|
+
outbound_scanning_enabled: true,
|
|
46
|
+
profile_name: "default",
|
|
47
|
+
app_name: "test-app",
|
|
48
|
+
fail_closed: true,
|
|
49
|
+
dlp_mask_only: true,
|
|
50
|
+
},
|
|
51
|
+
},
|
|
52
|
+
},
|
|
53
|
+
},
|
|
54
|
+
},
|
|
55
|
+
};
|
|
56
|
+
|
|
57
|
+
describe("allow action", () => {
|
|
58
|
+
it("should return undefined for allowed responses", async () => {
|
|
59
|
+
mockScan.mockResolvedValue({
|
|
60
|
+
action: "allow",
|
|
61
|
+
severity: "SAFE",
|
|
62
|
+
categories: ["safe"],
|
|
63
|
+
scanId: "scan_123",
|
|
64
|
+
reportId: "report_456",
|
|
65
|
+
profileName: "default",
|
|
66
|
+
promptDetected: { injection: false, dlp: false, urlCats: false },
|
|
67
|
+
responseDetected: { dlp: false, urlCats: false },
|
|
68
|
+
latencyMs: 50,
|
|
69
|
+
});
|
|
70
|
+
|
|
71
|
+
const result = await handler(baseEvent, baseCtx);
|
|
72
|
+
expect(result).toBeUndefined();
|
|
73
|
+
});
|
|
74
|
+
});
|
|
75
|
+
|
|
76
|
+
describe("warn action", () => {
|
|
77
|
+
it("should allow through with warning logged", async () => {
|
|
78
|
+
mockScan.mockResolvedValue({
|
|
79
|
+
action: "warn",
|
|
80
|
+
severity: "MEDIUM",
|
|
81
|
+
categories: ["url_filtering_response"],
|
|
82
|
+
scanId: "scan_123",
|
|
83
|
+
reportId: "report_456",
|
|
84
|
+
profileName: "default",
|
|
85
|
+
promptDetected: { injection: false, dlp: false, urlCats: false },
|
|
86
|
+
responseDetected: { dlp: false, urlCats: true },
|
|
87
|
+
latencyMs: 50,
|
|
88
|
+
});
|
|
89
|
+
|
|
90
|
+
const result = await handler(baseEvent, baseCtx);
|
|
91
|
+
expect(result).toBeUndefined();
|
|
92
|
+
expect(console.log).toHaveBeenCalled();
|
|
93
|
+
});
|
|
94
|
+
});
|
|
95
|
+
|
|
96
|
+
describe("block action - DLP masking", () => {
|
|
97
|
+
it("should mask SSN in response", async () => {
|
|
98
|
+
mockScan.mockResolvedValue({
|
|
99
|
+
action: "block",
|
|
100
|
+
severity: "HIGH",
|
|
101
|
+
categories: ["dlp_response"],
|
|
102
|
+
scanId: "scan_123",
|
|
103
|
+
reportId: "report_456",
|
|
104
|
+
profileName: "default",
|
|
105
|
+
promptDetected: { injection: false, dlp: false, urlCats: false },
|
|
106
|
+
responseDetected: { dlp: true, urlCats: false },
|
|
107
|
+
latencyMs: 50,
|
|
108
|
+
});
|
|
109
|
+
|
|
110
|
+
const eventWithSSN = {
|
|
111
|
+
...baseEvent,
|
|
112
|
+
content: "Your SSN is 123-45-6789",
|
|
113
|
+
};
|
|
114
|
+
|
|
115
|
+
const result = await handler(eventWithSSN, baseCtx);
|
|
116
|
+
expect(result?.content).toContain("[SSN REDACTED]");
|
|
117
|
+
expect(result?.content).not.toContain("123-45-6789");
|
|
118
|
+
});
|
|
119
|
+
|
|
120
|
+
it("should mask credit card numbers", async () => {
|
|
121
|
+
mockScan.mockResolvedValue({
|
|
122
|
+
action: "block",
|
|
123
|
+
severity: "HIGH",
|
|
124
|
+
categories: ["dlp_response"],
|
|
125
|
+
scanId: "scan_123",
|
|
126
|
+
reportId: "report_456",
|
|
127
|
+
profileName: "default",
|
|
128
|
+
promptDetected: { injection: false, dlp: false, urlCats: false },
|
|
129
|
+
responseDetected: { dlp: true, urlCats: false },
|
|
130
|
+
latencyMs: 50,
|
|
131
|
+
});
|
|
132
|
+
|
|
133
|
+
const eventWithCard = {
|
|
134
|
+
...baseEvent,
|
|
135
|
+
content: "Your card number is 4111-1111-1111-1111",
|
|
136
|
+
};
|
|
137
|
+
|
|
138
|
+
const result = await handler(eventWithCard, baseCtx);
|
|
139
|
+
expect(result?.content).toContain("[CARD REDACTED]");
|
|
140
|
+
});
|
|
141
|
+
|
|
142
|
+
it("should mask email addresses", async () => {
|
|
143
|
+
mockScan.mockResolvedValue({
|
|
144
|
+
action: "block",
|
|
145
|
+
severity: "HIGH",
|
|
146
|
+
categories: ["dlp_response"],
|
|
147
|
+
scanId: "scan_123",
|
|
148
|
+
reportId: "report_456",
|
|
149
|
+
profileName: "default",
|
|
150
|
+
promptDetected: { injection: false, dlp: false, urlCats: false },
|
|
151
|
+
responseDetected: { dlp: true, urlCats: false },
|
|
152
|
+
latencyMs: 50,
|
|
153
|
+
});
|
|
154
|
+
|
|
155
|
+
const eventWithEmail = {
|
|
156
|
+
...baseEvent,
|
|
157
|
+
content: "Contact us at secret@company.com",
|
|
158
|
+
};
|
|
159
|
+
|
|
160
|
+
const result = await handler(eventWithEmail, baseCtx);
|
|
161
|
+
expect(result?.content).toContain("[EMAIL REDACTED]");
|
|
162
|
+
});
|
|
163
|
+
});
|
|
164
|
+
|
|
165
|
+
describe("block action - full block", () => {
|
|
166
|
+
it("should block responses with malicious code", async () => {
|
|
167
|
+
mockScan.mockResolvedValue({
|
|
168
|
+
action: "block",
|
|
169
|
+
severity: "CRITICAL",
|
|
170
|
+
categories: ["malicious_code"],
|
|
171
|
+
scanId: "scan_123",
|
|
172
|
+
reportId: "report_456",
|
|
173
|
+
profileName: "default",
|
|
174
|
+
promptDetected: { injection: false, dlp: false, urlCats: false },
|
|
175
|
+
responseDetected: { dlp: false, urlCats: false },
|
|
176
|
+
latencyMs: 50,
|
|
177
|
+
});
|
|
178
|
+
|
|
179
|
+
const result = await handler(baseEvent, baseCtx);
|
|
180
|
+
expect(result?.content).toContain("security policy");
|
|
181
|
+
expect(result?.content).toContain("malicious code");
|
|
182
|
+
});
|
|
183
|
+
|
|
184
|
+
it("should block responses with toxicity", async () => {
|
|
185
|
+
mockScan.mockResolvedValue({
|
|
186
|
+
action: "block",
|
|
187
|
+
severity: "HIGH",
|
|
188
|
+
categories: ["toxicity"],
|
|
189
|
+
scanId: "scan_123",
|
|
190
|
+
reportId: "report_456",
|
|
191
|
+
profileName: "default",
|
|
192
|
+
promptDetected: { injection: false, dlp: false, urlCats: false },
|
|
193
|
+
responseDetected: { dlp: false, urlCats: false },
|
|
194
|
+
latencyMs: 50,
|
|
195
|
+
});
|
|
196
|
+
|
|
197
|
+
const result = await handler(baseEvent, baseCtx);
|
|
198
|
+
expect(result?.content).toContain("security policy");
|
|
199
|
+
});
|
|
200
|
+
|
|
201
|
+
it("should block even DLP violations when combined with other threats", async () => {
|
|
202
|
+
mockScan.mockResolvedValue({
|
|
203
|
+
action: "block",
|
|
204
|
+
severity: "CRITICAL",
|
|
205
|
+
categories: ["dlp_response", "malicious_code"],
|
|
206
|
+
scanId: "scan_123",
|
|
207
|
+
reportId: "report_456",
|
|
208
|
+
profileName: "default",
|
|
209
|
+
promptDetected: { injection: false, dlp: false, urlCats: false },
|
|
210
|
+
responseDetected: { dlp: true, urlCats: false },
|
|
211
|
+
latencyMs: 50,
|
|
212
|
+
});
|
|
213
|
+
|
|
214
|
+
const eventWithSSN = {
|
|
215
|
+
...baseEvent,
|
|
216
|
+
content: "Your SSN is 123-45-6789",
|
|
217
|
+
};
|
|
218
|
+
|
|
219
|
+
const result = await handler(eventWithSSN, baseCtx);
|
|
220
|
+
// Should be a full block, not masking
|
|
221
|
+
expect(result?.content).toContain("security policy");
|
|
222
|
+
expect(result?.content).not.toContain("[SSN REDACTED]");
|
|
223
|
+
});
|
|
224
|
+
});
|
|
225
|
+
|
|
226
|
+
describe("fail-closed behavior", () => {
|
|
227
|
+
it("should block on scan failure when fail_closed is true", async () => {
|
|
228
|
+
mockScan.mockRejectedValue(new Error("API timeout"));
|
|
229
|
+
|
|
230
|
+
const result = await handler(baseEvent, baseCtx);
|
|
231
|
+
expect(result?.content).toContain("security verification issue");
|
|
232
|
+
});
|
|
233
|
+
|
|
234
|
+
it("should allow through on scan failure when fail_closed is false", async () => {
|
|
235
|
+
mockScan.mockRejectedValue(new Error("API timeout"));
|
|
236
|
+
|
|
237
|
+
const ctxFailOpen = {
|
|
238
|
+
...baseCtx,
|
|
239
|
+
cfg: {
|
|
240
|
+
plugins: {
|
|
241
|
+
entries: {
|
|
242
|
+
"prisma-airs": {
|
|
243
|
+
config: {
|
|
244
|
+
...baseCtx.cfg?.plugins?.entries?.["prisma-airs"]?.config,
|
|
245
|
+
fail_closed: false,
|
|
246
|
+
},
|
|
247
|
+
},
|
|
248
|
+
},
|
|
249
|
+
},
|
|
250
|
+
},
|
|
251
|
+
};
|
|
252
|
+
|
|
253
|
+
const result = await handler(baseEvent, ctxFailOpen);
|
|
254
|
+
expect(result).toBeUndefined();
|
|
255
|
+
});
|
|
256
|
+
});
|
|
257
|
+
|
|
258
|
+
describe("disabled scanning", () => {
|
|
259
|
+
it("should skip scanning when disabled", async () => {
|
|
260
|
+
const ctxDisabled = {
|
|
261
|
+
...baseCtx,
|
|
262
|
+
cfg: {
|
|
263
|
+
plugins: {
|
|
264
|
+
entries: {
|
|
265
|
+
"prisma-airs": {
|
|
266
|
+
config: {
|
|
267
|
+
outbound_scanning_enabled: false,
|
|
268
|
+
},
|
|
269
|
+
},
|
|
270
|
+
},
|
|
271
|
+
},
|
|
272
|
+
},
|
|
273
|
+
};
|
|
274
|
+
|
|
275
|
+
const result = await handler(baseEvent, ctxDisabled);
|
|
276
|
+
expect(result).toBeUndefined();
|
|
277
|
+
expect(mockScan).not.toHaveBeenCalled();
|
|
278
|
+
});
|
|
279
|
+
});
|
|
280
|
+
|
|
281
|
+
describe("empty content", () => {
|
|
282
|
+
it("should skip empty content", async () => {
|
|
283
|
+
const emptyEvent = { ...baseEvent, content: "" };
|
|
284
|
+
const result = await handler(emptyEvent, baseCtx);
|
|
285
|
+
expect(result).toBeUndefined();
|
|
286
|
+
expect(mockScan).not.toHaveBeenCalled();
|
|
287
|
+
});
|
|
288
|
+
|
|
289
|
+
it("should skip undefined content", async () => {
|
|
290
|
+
const noContentEvent = { ...baseEvent, content: undefined };
|
|
291
|
+
const result = await handler(noContentEvent, baseCtx);
|
|
292
|
+
expect(result).toBeUndefined();
|
|
293
|
+
expect(mockScan).not.toHaveBeenCalled();
|
|
294
|
+
});
|
|
295
|
+
});
|
|
296
|
+
});
|
|
@@ -0,0 +1,341 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Prisma AIRS Outbound Security Scanner (message_sending)
|
|
3
|
+
*
|
|
4
|
+
* Scans ALL outbound responses for:
|
|
5
|
+
* - WildFire: malicious URLs and content
|
|
6
|
+
* - Toxicity: harmful/abusive content
|
|
7
|
+
* - URL Filtering: disallowed URL categories
|
|
8
|
+
* - DLP: sensitive data leakage
|
|
9
|
+
* - Malicious Code: malware/exploits
|
|
10
|
+
* - Custom Topics: org-specific policy violations
|
|
11
|
+
* - Grounding: hallucination detection
|
|
12
|
+
*
|
|
13
|
+
* CAN BLOCK via { cancel: true } or modify via { content: "..." }
|
|
14
|
+
*/
|
|
15
|
+
|
|
16
|
+
import { scan, type ScanResult } from "../../src/scanner";
|
|
17
|
+
|
|
18
|
+
// Event shape from OpenClaw message_sending hook
|
|
19
|
+
interface MessageSendingEvent {
|
|
20
|
+
content?: string;
|
|
21
|
+
to?: string;
|
|
22
|
+
channel?: string;
|
|
23
|
+
metadata?: {
|
|
24
|
+
sessionKey?: string;
|
|
25
|
+
messageId?: string;
|
|
26
|
+
};
|
|
27
|
+
}
|
|
28
|
+
|
|
29
|
+
// Context passed to hook
|
|
30
|
+
interface HookContext {
|
|
31
|
+
channelId?: string;
|
|
32
|
+
accountId?: string;
|
|
33
|
+
conversationId?: string;
|
|
34
|
+
cfg?: PluginConfig;
|
|
35
|
+
}
|
|
36
|
+
|
|
37
|
+
// Plugin config structure
|
|
38
|
+
interface PluginConfig {
|
|
39
|
+
plugins?: {
|
|
40
|
+
entries?: {
|
|
41
|
+
"prisma-airs"?: {
|
|
42
|
+
config?: {
|
|
43
|
+
outbound_scanning_enabled?: boolean;
|
|
44
|
+
profile_name?: string;
|
|
45
|
+
app_name?: string;
|
|
46
|
+
fail_closed?: boolean;
|
|
47
|
+
dlp_mask_only?: boolean;
|
|
48
|
+
};
|
|
49
|
+
};
|
|
50
|
+
};
|
|
51
|
+
};
|
|
52
|
+
}
|
|
53
|
+
|
|
54
|
+
// Hook result type - can modify content or cancel
|
|
55
|
+
interface HookResult {
|
|
56
|
+
content?: string;
|
|
57
|
+
cancel?: boolean;
|
|
58
|
+
}
|
|
59
|
+
|
|
60
|
+
// Map AIRS categories to user-friendly messages
|
|
61
|
+
const CATEGORY_MESSAGES: Record<string, string> = {
|
|
62
|
+
// Core detections
|
|
63
|
+
prompt_injection: "prompt injection attempt",
|
|
64
|
+
dlp_prompt: "sensitive data in input",
|
|
65
|
+
dlp_response: "sensitive data leakage",
|
|
66
|
+
url_filtering_prompt: "disallowed URL in input",
|
|
67
|
+
url_filtering_response: "disallowed URL in response",
|
|
68
|
+
malicious_url: "malicious URL detected",
|
|
69
|
+
toxicity: "inappropriate content",
|
|
70
|
+
toxic_content: "inappropriate content",
|
|
71
|
+
malicious_code: "malicious code detected",
|
|
72
|
+
agent_threat: "AI agent threat",
|
|
73
|
+
grounding: "response grounding violation",
|
|
74
|
+
ungrounded: "ungrounded response",
|
|
75
|
+
custom_topic: "policy violation",
|
|
76
|
+
topic_violation: "policy violation",
|
|
77
|
+
db_security: "database security threat",
|
|
78
|
+
safe: "safe",
|
|
79
|
+
benign: "safe",
|
|
80
|
+
api_error: "security scan error",
|
|
81
|
+
"scan-failure": "security scan failed",
|
|
82
|
+
};
|
|
83
|
+
|
|
84
|
+
// Categories that can be masked instead of blocked
|
|
85
|
+
const MASKABLE_CATEGORIES = ["dlp_response", "dlp_prompt", "dlp"];
|
|
86
|
+
|
|
87
|
+
// Categories that always require full block
|
|
88
|
+
const ALWAYS_BLOCK_CATEGORIES = [
|
|
89
|
+
"malicious_code",
|
|
90
|
+
"malicious_url",
|
|
91
|
+
"toxicity",
|
|
92
|
+
"toxic_content",
|
|
93
|
+
"agent_threat",
|
|
94
|
+
"prompt_injection",
|
|
95
|
+
"db_security",
|
|
96
|
+
"scan-failure",
|
|
97
|
+
];
|
|
98
|
+
|
|
99
|
+
/**
|
|
100
|
+
* Get plugin configuration
|
|
101
|
+
*/
|
|
102
|
+
function getPluginConfig(ctx: HookContext): {
|
|
103
|
+
enabled: boolean;
|
|
104
|
+
profileName: string;
|
|
105
|
+
appName: string;
|
|
106
|
+
failClosed: boolean;
|
|
107
|
+
dlpMaskOnly: boolean;
|
|
108
|
+
} {
|
|
109
|
+
const cfg = ctx.cfg?.plugins?.entries?.["prisma-airs"]?.config;
|
|
110
|
+
return {
|
|
111
|
+
enabled: cfg?.outbound_scanning_enabled !== false,
|
|
112
|
+
profileName: cfg?.profile_name ?? "default",
|
|
113
|
+
appName: cfg?.app_name ?? "openclaw",
|
|
114
|
+
failClosed: cfg?.fail_closed ?? true, // Default fail-closed
|
|
115
|
+
dlpMaskOnly: cfg?.dlp_mask_only ?? true, // Default mask instead of block for DLP
|
|
116
|
+
};
|
|
117
|
+
}
|
|
118
|
+
|
|
119
|
+
/**
|
|
120
|
+
* Mask sensitive data in content
|
|
121
|
+
*
|
|
122
|
+
* Uses regex patterns for common PII types.
|
|
123
|
+
* TODO: Use AIRS API match offsets for precision masking when available.
|
|
124
|
+
*/
|
|
125
|
+
function maskSensitiveData(content: string): string {
|
|
126
|
+
let masked = content;
|
|
127
|
+
|
|
128
|
+
// Social Security Numbers (XXX-XX-XXXX)
|
|
129
|
+
masked = masked.replace(/\b\d{3}-\d{2}-\d{4}\b/g, "[SSN REDACTED]");
|
|
130
|
+
|
|
131
|
+
// Credit Card Numbers (with or without spaces/dashes)
|
|
132
|
+
masked = masked.replace(/\b(?:\d{4}[-\s]?){3}\d{4}\b/g, "[CARD REDACTED]");
|
|
133
|
+
|
|
134
|
+
// Email addresses
|
|
135
|
+
masked = masked.replace(
|
|
136
|
+
/\b[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\.[A-Z|a-z]{2,}\b/g,
|
|
137
|
+
"[EMAIL REDACTED]"
|
|
138
|
+
);
|
|
139
|
+
|
|
140
|
+
// API keys and tokens (common patterns)
|
|
141
|
+
masked = masked.replace(
|
|
142
|
+
/\b(?:sk-|pk-|api[_-]?key[_-]?|token[_-]?|secret[_-]?|password[_-]?)[a-zA-Z0-9_-]{16,}\b/gi,
|
|
143
|
+
"[API KEY REDACTED]"
|
|
144
|
+
);
|
|
145
|
+
|
|
146
|
+
// AWS keys
|
|
147
|
+
masked = masked.replace(/\b(?:AKIA|ABIA|ACCA|ASIA)[A-Z0-9]{16}\b/g, "[AWS KEY REDACTED]");
|
|
148
|
+
|
|
149
|
+
// Generic long alphanumeric strings that look like secrets (40+ chars)
|
|
150
|
+
masked = masked.replace(/\b[a-zA-Z0-9_-]{40,}\b/g, (match) => {
|
|
151
|
+
// Only redact if it looks like a key (has mixed case or numbers)
|
|
152
|
+
if (/[a-z]/.test(match) && /[A-Z]/.test(match) && /[0-9]/.test(match)) {
|
|
153
|
+
return "[SECRET REDACTED]";
|
|
154
|
+
}
|
|
155
|
+
return match;
|
|
156
|
+
});
|
|
157
|
+
|
|
158
|
+
// US Phone numbers
|
|
159
|
+
masked = masked.replace(
|
|
160
|
+
/\b(?:\+1[-.\s]?)?\(?\d{3}\)?[-.\s]?\d{3}[-.\s]?\d{4}\b/g,
|
|
161
|
+
"[PHONE REDACTED]"
|
|
162
|
+
);
|
|
163
|
+
|
|
164
|
+
// IP addresses (private ranges especially)
|
|
165
|
+
masked = masked.replace(
|
|
166
|
+
/\b(?:10\.\d{1,3}\.\d{1,3}\.\d{1,3}|172\.(?:1[6-9]|2\d|3[01])\.\d{1,3}\.\d{1,3}|192\.168\.\d{1,3}\.\d{1,3})\b/g,
|
|
167
|
+
"[IP REDACTED]"
|
|
168
|
+
);
|
|
169
|
+
|
|
170
|
+
return masked;
|
|
171
|
+
}
|
|
172
|
+
|
|
173
|
+
/**
|
|
174
|
+
* Build user-friendly block message
|
|
175
|
+
*/
|
|
176
|
+
function buildBlockMessage(result: ScanResult): string {
|
|
177
|
+
const reasons = result.categories
|
|
178
|
+
.map((cat) => CATEGORY_MESSAGES[cat] || cat.replace(/_/g, " "))
|
|
179
|
+
.filter((r) => r !== "safe")
|
|
180
|
+
.join(", ");
|
|
181
|
+
|
|
182
|
+
return (
|
|
183
|
+
`I apologize, but I'm unable to provide that response due to security policy` +
|
|
184
|
+
(reasons ? ` (${reasons})` : "") +
|
|
185
|
+
`. Please rephrase your request or contact support if you believe this is an error.`
|
|
186
|
+
);
|
|
187
|
+
}
|
|
188
|
+
|
|
189
|
+
/**
|
|
190
|
+
* Determine if result should be masked vs blocked
|
|
191
|
+
*/
|
|
192
|
+
function shouldMaskOnly(result: ScanResult, config: { dlpMaskOnly: boolean }): boolean {
|
|
193
|
+
if (!config.dlpMaskOnly) return false;
|
|
194
|
+
|
|
195
|
+
// Check if any always-block categories are present
|
|
196
|
+
const hasBlockingCategory = result.categories.some((cat) =>
|
|
197
|
+
ALWAYS_BLOCK_CATEGORIES.includes(cat)
|
|
198
|
+
);
|
|
199
|
+
if (hasBlockingCategory) return false;
|
|
200
|
+
|
|
201
|
+
// Check if all categories are maskable
|
|
202
|
+
const allMaskable = result.categories.every(
|
|
203
|
+
(cat) => MASKABLE_CATEGORIES.includes(cat) || cat === "safe" || cat === "benign"
|
|
204
|
+
);
|
|
205
|
+
|
|
206
|
+
return allMaskable;
|
|
207
|
+
}
|
|
208
|
+
|
|
209
|
+
/**
|
|
210
|
+
* Main hook handler
|
|
211
|
+
*/
|
|
212
|
+
const handler = async (
|
|
213
|
+
event: MessageSendingEvent,
|
|
214
|
+
ctx: HookContext
|
|
215
|
+
): Promise<HookResult | void> => {
|
|
216
|
+
const config = getPluginConfig(ctx);
|
|
217
|
+
|
|
218
|
+
// Check if outbound scanning is enabled
|
|
219
|
+
if (!config.enabled) {
|
|
220
|
+
return;
|
|
221
|
+
}
|
|
222
|
+
|
|
223
|
+
// Validate we have content to scan
|
|
224
|
+
const content = event.content;
|
|
225
|
+
if (!content || typeof content !== "string" || content.trim().length === 0) {
|
|
226
|
+
return;
|
|
227
|
+
}
|
|
228
|
+
|
|
229
|
+
const sessionKey = event.metadata?.sessionKey || ctx.conversationId || "unknown";
|
|
230
|
+
|
|
231
|
+
let result: ScanResult;
|
|
232
|
+
|
|
233
|
+
try {
|
|
234
|
+
// Scan the outbound response
|
|
235
|
+
result = await scan({
|
|
236
|
+
response: content,
|
|
237
|
+
profileName: config.profileName,
|
|
238
|
+
appName: config.appName,
|
|
239
|
+
});
|
|
240
|
+
} catch (err) {
|
|
241
|
+
console.error(
|
|
242
|
+
JSON.stringify({
|
|
243
|
+
event: "prisma_airs_outbound_scan_error",
|
|
244
|
+
timestamp: new Date().toISOString(),
|
|
245
|
+
sessionKey,
|
|
246
|
+
error: err instanceof Error ? err.message : String(err),
|
|
247
|
+
})
|
|
248
|
+
);
|
|
249
|
+
|
|
250
|
+
// Fail-closed: block on scan failure
|
|
251
|
+
if (config.failClosed) {
|
|
252
|
+
return {
|
|
253
|
+
content:
|
|
254
|
+
"I apologize, but I'm unable to provide a response at this time due to a security verification issue. Please try again.",
|
|
255
|
+
};
|
|
256
|
+
}
|
|
257
|
+
|
|
258
|
+
return; // Fail-open
|
|
259
|
+
}
|
|
260
|
+
|
|
261
|
+
// Log the scan result
|
|
262
|
+
console.log(
|
|
263
|
+
JSON.stringify({
|
|
264
|
+
event: "prisma_airs_outbound_scan",
|
|
265
|
+
timestamp: new Date().toISOString(),
|
|
266
|
+
sessionKey,
|
|
267
|
+
action: result.action,
|
|
268
|
+
severity: result.severity,
|
|
269
|
+
categories: result.categories,
|
|
270
|
+
scanId: result.scanId,
|
|
271
|
+
reportId: result.reportId,
|
|
272
|
+
latencyMs: result.latencyMs,
|
|
273
|
+
responseDetected: result.responseDetected,
|
|
274
|
+
})
|
|
275
|
+
);
|
|
276
|
+
|
|
277
|
+
// Handle allow - no modification needed
|
|
278
|
+
if (result.action === "allow") {
|
|
279
|
+
return;
|
|
280
|
+
}
|
|
281
|
+
|
|
282
|
+
// Handle warn - log but allow through
|
|
283
|
+
if (result.action === "warn") {
|
|
284
|
+
console.log(
|
|
285
|
+
JSON.stringify({
|
|
286
|
+
event: "prisma_airs_outbound_warn",
|
|
287
|
+
timestamp: new Date().toISOString(),
|
|
288
|
+
sessionKey,
|
|
289
|
+
severity: result.severity,
|
|
290
|
+
categories: result.categories,
|
|
291
|
+
scanId: result.scanId,
|
|
292
|
+
})
|
|
293
|
+
);
|
|
294
|
+
return; // Allow through with warning logged
|
|
295
|
+
}
|
|
296
|
+
|
|
297
|
+
// Handle block
|
|
298
|
+
if (result.action === "block") {
|
|
299
|
+
// Check if we should mask instead of block (DLP-only)
|
|
300
|
+
if (shouldMaskOnly(result, config)) {
|
|
301
|
+
const maskedContent = maskSensitiveData(content);
|
|
302
|
+
|
|
303
|
+
// Only return modified content if masking actually changed something
|
|
304
|
+
if (maskedContent !== content) {
|
|
305
|
+
console.log(
|
|
306
|
+
JSON.stringify({
|
|
307
|
+
event: "prisma_airs_outbound_mask",
|
|
308
|
+
timestamp: new Date().toISOString(),
|
|
309
|
+
sessionKey,
|
|
310
|
+
categories: result.categories,
|
|
311
|
+
scanId: result.scanId,
|
|
312
|
+
})
|
|
313
|
+
);
|
|
314
|
+
|
|
315
|
+
return {
|
|
316
|
+
content: maskedContent,
|
|
317
|
+
};
|
|
318
|
+
}
|
|
319
|
+
}
|
|
320
|
+
|
|
321
|
+
// Full block - replace content entirely
|
|
322
|
+
console.log(
|
|
323
|
+
JSON.stringify({
|
|
324
|
+
event: "prisma_airs_outbound_block",
|
|
325
|
+
timestamp: new Date().toISOString(),
|
|
326
|
+
sessionKey,
|
|
327
|
+
action: result.action,
|
|
328
|
+
severity: result.severity,
|
|
329
|
+
categories: result.categories,
|
|
330
|
+
scanId: result.scanId,
|
|
331
|
+
reportId: result.reportId,
|
|
332
|
+
})
|
|
333
|
+
);
|
|
334
|
+
|
|
335
|
+
return {
|
|
336
|
+
content: buildBlockMessage(result),
|
|
337
|
+
};
|
|
338
|
+
}
|
|
339
|
+
};
|
|
340
|
+
|
|
341
|
+
export default handler;
|
|
@@ -0,0 +1,40 @@
|
|
|
1
|
+
---
|
|
2
|
+
name: prisma-airs-tools
|
|
3
|
+
description: "Block dangerous tool calls when security threats are detected"
|
|
4
|
+
metadata: { "openclaw": { "emoji": "🛑", "events": ["before_tool_call"] } }
|
|
5
|
+
---
|
|
6
|
+
|
|
7
|
+
# Prisma AIRS Tool Gating
|
|
8
|
+
|
|
9
|
+
Blocks dangerous tool calls when security warnings are active from inbound scanning.
|
|
10
|
+
|
|
11
|
+
## Behavior
|
|
12
|
+
|
|
13
|
+
This hook runs before each tool call and checks if the current session has an active security warning (from `message_received` or `before_agent_start` scanning). Based on the detected threat categories, it blocks specific tools that could be dangerous.
|
|
14
|
+
|
|
15
|
+
## Tool Blocking Matrix
|
|
16
|
+
|
|
17
|
+
| Threat Category | Blocked Tools |
|
|
18
|
+
| ------------------------------- | ----------------------------- |
|
|
19
|
+
| `agent-threat` | ALL external tools |
|
|
20
|
+
| `sql-injection` / `db-security` | exec, database, query, sql |
|
|
21
|
+
| `malicious-code` | exec, write, edit, eval, bash |
|
|
22
|
+
| `prompt-injection` | exec, gateway, message, cron |
|
|
23
|
+
| `malicious-url` | web_fetch, browser, curl |
|
|
24
|
+
|
|
25
|
+
## High-Risk Tools (Default)
|
|
26
|
+
|
|
27
|
+
These tools are blocked on ANY detected threat:
|
|
28
|
+
|
|
29
|
+
- `exec` - Command execution
|
|
30
|
+
- `Bash` - Shell access
|
|
31
|
+
- `write` - File writing
|
|
32
|
+
- `edit` - File editing
|
|
33
|
+
- `gateway` - Gateway operations
|
|
34
|
+
- `message` - Sending messages
|
|
35
|
+
- `cron` - Scheduled tasks
|
|
36
|
+
|
|
37
|
+
## Configuration
|
|
38
|
+
|
|
39
|
+
- `tool_gating_enabled`: Enable/disable (default: true)
|
|
40
|
+
- `high_risk_tools`: List of tools to block on any threat
|