clawmoat 0.2.1 → 0.5.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +32 -0
- package/Dockerfile +22 -0
- package/README.md +144 -5
- package/SECURITY.md +63 -0
- package/bin/clawmoat.js +186 -1
- package/docs/ai-agent-security-scanner.html +691 -0
- package/docs/apple-touch-icon.png +0 -0
- package/docs/blog/host-guardian-launch.html +345 -0
- package/docs/blog/host-guardian-launch.md +249 -0
- package/docs/blog/index.html +2 -0
- package/docs/blog/langchain-security-tutorial.html +319 -0
- package/docs/blog/owasp-agentic-ai-top10.html +2 -0
- package/docs/blog/securing-ai-agents.html +2 -0
- package/docs/compare.html +2 -0
- package/docs/favicon.png +0 -0
- package/docs/icon-192.png +0 -0
- package/docs/index.html +258 -65
- package/docs/integrations/langchain.html +2 -0
- package/docs/integrations/openai.html +2 -0
- package/docs/integrations/openclaw.html +2 -0
- package/docs/logo.png +0 -0
- package/docs/logo.svg +60 -0
- package/docs/mark-with-moat.svg +33 -0
- package/docs/mark.png +0 -0
- package/docs/mark.svg +30 -0
- package/docs/og-image.png +0 -0
- package/docs/playground.html +440 -0
- package/docs/positioning-v2.md +155 -0
- package/docs/report-demo.html +399 -0
- package/docs/thanks.html +2 -0
- package/examples/github-action-workflow.yml +94 -0
- package/logo.png +0 -0
- package/logo.svg +60 -0
- package/mark-with-moat.svg +33 -0
- package/mark.png +0 -0
- package/mark.svg +30 -0
- package/package.json +1 -1
- package/server/index.js +9 -5
- package/skill/README.md +57 -0
- package/skill/SKILL.md +49 -30
- package/skill/scripts/audit.sh +28 -0
- package/skill/scripts/scan.sh +32 -0
- package/skill/scripts/test.sh +13 -0
- package/src/guardian/alerts.js +138 -0
- package/src/guardian/index.js +686 -0
- package/src/guardian/network-log.js +281 -0
- package/src/guardian/skill-integrity.js +290 -0
- package/src/index.js +37 -0
- package/src/middleware/openclaw.js +76 -1
- package/src/scanners/excessive-agency.js +88 -0
- package/wiki/Architecture.md +103 -0
- package/wiki/CLI-Reference.md +167 -0
- package/wiki/FAQ.md +135 -0
- package/wiki/Home.md +70 -0
- package/wiki/Policy-Engine.md +229 -0
- package/wiki/Scanner-Modules.md +224 -0
|
@@ -0,0 +1,281 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* ClawMoat Network Egress Logger
|
|
3
|
+
*
|
|
4
|
+
* Parses session JSONL files for outbound network activity,
|
|
5
|
+
* maintains domain allowlists, and flags suspicious destinations.
|
|
6
|
+
*
|
|
7
|
+
* @module clawmoat/guardian/network-log
|
|
8
|
+
*/
|
|
9
|
+
|
|
10
|
+
const fs = require('fs');
|
|
11
|
+
const path = require('path');
|
|
12
|
+
const { URL } = require('url');
|
|
13
|
+
|
|
14
|
+
// Known-bad domains commonly used for exfiltration
|
|
15
|
+
const KNOWN_BAD_DOMAINS = [
|
|
16
|
+
'webhook.site',
|
|
17
|
+
'requestbin.com',
|
|
18
|
+
'pipedream.net',
|
|
19
|
+
'ngrok.io',
|
|
20
|
+
'ngrok-free.app',
|
|
21
|
+
'ngrok.app',
|
|
22
|
+
'burpcollaborator.net',
|
|
23
|
+
'interact.sh',
|
|
24
|
+
'oastify.com',
|
|
25
|
+
'canarytokens.com',
|
|
26
|
+
'dnslog.cn',
|
|
27
|
+
'beeceptor.com',
|
|
28
|
+
'hookbin.com',
|
|
29
|
+
'requestcatcher.com',
|
|
30
|
+
'mockbin.org',
|
|
31
|
+
'postb.in',
|
|
32
|
+
'ptsv2.com',
|
|
33
|
+
'transfer.sh',
|
|
34
|
+
'file.io',
|
|
35
|
+
'0x0.st',
|
|
36
|
+
'hastebin.com',
|
|
37
|
+
'pastebin.com',
|
|
38
|
+
'paste.ee',
|
|
39
|
+
'dpaste.org',
|
|
40
|
+
'serveo.net',
|
|
41
|
+
'localtunnel.me',
|
|
42
|
+
'localhost.run',
|
|
43
|
+
];
|
|
44
|
+
|
|
45
|
+
// Default safe domains
|
|
46
|
+
const DEFAULT_ALLOWLIST = [
|
|
47
|
+
'github.com',
|
|
48
|
+
'api.github.com',
|
|
49
|
+
'raw.githubusercontent.com',
|
|
50
|
+
'npmjs.org',
|
|
51
|
+
'registry.npmjs.org',
|
|
52
|
+
'google.com',
|
|
53
|
+
'googleapis.com',
|
|
54
|
+
'stackoverflow.com',
|
|
55
|
+
'developer.mozilla.org',
|
|
56
|
+
'nodejs.org',
|
|
57
|
+
'docs.python.org',
|
|
58
|
+
];
|
|
59
|
+
|
|
60
|
+
/**
|
|
61
|
+
* Extract URLs from a text string.
|
|
62
|
+
* @param {string} text
|
|
63
|
+
* @returns {string[]} Extracted URLs
|
|
64
|
+
*/
|
|
65
|
+
function extractUrls(text) {
|
|
66
|
+
if (!text) return [];
|
|
67
|
+
const urlRegex = /https?:\/\/[^\s"'<>\]\)]+/gi;
|
|
68
|
+
return (text.match(urlRegex) || []);
|
|
69
|
+
}
|
|
70
|
+
|
|
71
|
+
/**
|
|
72
|
+
* Extract domain from a URL string.
|
|
73
|
+
* @param {string} urlStr
|
|
74
|
+
* @returns {string|null}
|
|
75
|
+
*/
|
|
76
|
+
function extractDomain(urlStr) {
|
|
77
|
+
try {
|
|
78
|
+
const u = new URL(urlStr);
|
|
79
|
+
return u.hostname.toLowerCase();
|
|
80
|
+
} catch {
|
|
81
|
+
return null;
|
|
82
|
+
}
|
|
83
|
+
}
|
|
84
|
+
|
|
85
|
+
/**
|
|
86
|
+
* Parse a session JSONL file for network activity.
|
|
87
|
+
* @param {string} filePath - Path to .jsonl file
|
|
88
|
+
* @returns {{ urls: string[], domains: Set<string>, toolCalls: Array }}
|
|
89
|
+
*/
|
|
90
|
+
function parseSessionFile(filePath) {
|
|
91
|
+
const urls = [];
|
|
92
|
+
const domains = new Set();
|
|
93
|
+
const toolCalls = [];
|
|
94
|
+
|
|
95
|
+
let content;
|
|
96
|
+
try {
|
|
97
|
+
content = fs.readFileSync(filePath, 'utf8');
|
|
98
|
+
} catch {
|
|
99
|
+
return { urls, domains, toolCalls };
|
|
100
|
+
}
|
|
101
|
+
|
|
102
|
+
const lines = content.split('\n').filter(Boolean);
|
|
103
|
+
|
|
104
|
+
for (const line of lines) {
|
|
105
|
+
let entry;
|
|
106
|
+
try { entry = JSON.parse(line); } catch { continue; }
|
|
107
|
+
|
|
108
|
+
// Check tool calls from assistant
|
|
109
|
+
if (entry.role === 'assistant' && Array.isArray(entry.content)) {
|
|
110
|
+
for (const part of entry.content) {
|
|
111
|
+
if (part.type !== 'toolCall') continue;
|
|
112
|
+
const name = part.name || '';
|
|
113
|
+
const args = part.arguments || {};
|
|
114
|
+
|
|
115
|
+
if (name === 'web_fetch' || name === 'web_search') {
|
|
116
|
+
const url = args.url || args.query || '';
|
|
117
|
+
const extracted = extractUrls(url);
|
|
118
|
+
if (extracted.length) {
|
|
119
|
+
urls.push(...extracted);
|
|
120
|
+
} else if (url.startsWith('http')) {
|
|
121
|
+
urls.push(url);
|
|
122
|
+
}
|
|
123
|
+
toolCalls.push({ tool: name, args, session: filePath });
|
|
124
|
+
}
|
|
125
|
+
|
|
126
|
+
if (name === 'exec') {
|
|
127
|
+
const cmd = args.command || '';
|
|
128
|
+
if (/\b(curl|wget|fetch|http)\b/i.test(cmd)) {
|
|
129
|
+
const cmdUrls = extractUrls(cmd);
|
|
130
|
+
urls.push(...cmdUrls);
|
|
131
|
+
toolCalls.push({ tool: 'exec', args, session: filePath });
|
|
132
|
+
}
|
|
133
|
+
}
|
|
134
|
+
}
|
|
135
|
+
}
|
|
136
|
+
}
|
|
137
|
+
|
|
138
|
+
for (const u of urls) {
|
|
139
|
+
const d = extractDomain(u);
|
|
140
|
+
if (d) domains.add(d);
|
|
141
|
+
}
|
|
142
|
+
|
|
143
|
+
return { urls, domains, toolCalls };
|
|
144
|
+
}
|
|
145
|
+
|
|
146
|
+
class NetworkEgressLogger {
|
|
147
|
+
/**
|
|
148
|
+
* @param {Object} opts
|
|
149
|
+
* @param {string[]} [opts.allowlist] - Additional allowed domains
|
|
150
|
+
* @param {string[]} [opts.badDomains] - Additional known-bad domains
|
|
151
|
+
* @param {Function} [opts.onAlert] - Callback for alerts
|
|
152
|
+
*/
|
|
153
|
+
constructor(opts = {}) {
|
|
154
|
+
this.allowlist = new Set([...DEFAULT_ALLOWLIST, ...(opts.allowlist || [])]);
|
|
155
|
+
this.badDomains = new Set([...KNOWN_BAD_DOMAINS, ...(opts.badDomains || [])]);
|
|
156
|
+
this.seenDomains = new Set();
|
|
157
|
+
this.onAlert = opts.onAlert || null;
|
|
158
|
+
this.log = []; // { timestamp, url, domain, status }
|
|
159
|
+
}
|
|
160
|
+
|
|
161
|
+
/**
|
|
162
|
+
* Scan session directory for network egress.
|
|
163
|
+
* @param {string} sessionsDir - Path to sessions directory
|
|
164
|
+
* @param {Object} [opts]
|
|
165
|
+
* @param {number} [opts.maxAge] - Only scan files modified within this many ms
|
|
166
|
+
* @returns {{ totalUrls: number, domains: string[], flagged: Array, badDomains: Array, firstSeen: string[] }}
|
|
167
|
+
*/
|
|
168
|
+
scanSessions(sessionsDir, opts = {}) {
|
|
169
|
+
if (!fs.existsSync(sessionsDir)) {
|
|
170
|
+
return { totalUrls: 0, domains: [], flagged: [], badDomains: [], firstSeen: [] };
|
|
171
|
+
}
|
|
172
|
+
|
|
173
|
+
const files = fs.readdirSync(sessionsDir).filter(f => f.endsWith('.jsonl'));
|
|
174
|
+
const allUrls = [];
|
|
175
|
+
const allDomains = new Set();
|
|
176
|
+
const flagged = [];
|
|
177
|
+
const badFound = [];
|
|
178
|
+
const firstSeen = [];
|
|
179
|
+
|
|
180
|
+
for (const file of files) {
|
|
181
|
+
const filePath = path.join(sessionsDir, file);
|
|
182
|
+
|
|
183
|
+
// Optional age filter
|
|
184
|
+
if (opts.maxAge) {
|
|
185
|
+
try {
|
|
186
|
+
const stat = fs.statSync(filePath);
|
|
187
|
+
if (Date.now() - stat.mtimeMs > opts.maxAge) continue;
|
|
188
|
+
} catch { continue; }
|
|
189
|
+
}
|
|
190
|
+
|
|
191
|
+
const result = parseSessionFile(filePath);
|
|
192
|
+
allUrls.push(...result.urls);
|
|
193
|
+
|
|
194
|
+
for (const domain of result.domains) {
|
|
195
|
+
allDomains.add(domain);
|
|
196
|
+
|
|
197
|
+
// Check bad domains
|
|
198
|
+
if (this._isBadDomain(domain)) {
|
|
199
|
+
badFound.push({ domain, file, urls: result.urls.filter(u => extractDomain(u) === domain) });
|
|
200
|
+
this._alert({
|
|
201
|
+
severity: 'critical',
|
|
202
|
+
type: 'bad_domain',
|
|
203
|
+
message: `Known-bad domain contacted: ${domain}`,
|
|
204
|
+
details: { domain, session: file },
|
|
205
|
+
});
|
|
206
|
+
}
|
|
207
|
+
|
|
208
|
+
// Check first-seen
|
|
209
|
+
if (!this.seenDomains.has(domain) && !this.allowlist.has(domain)) {
|
|
210
|
+
firstSeen.push(domain);
|
|
211
|
+
this._alert({
|
|
212
|
+
severity: 'info',
|
|
213
|
+
type: 'first_seen_domain',
|
|
214
|
+
message: `First-seen domain: ${domain}`,
|
|
215
|
+
details: { domain, session: file },
|
|
216
|
+
});
|
|
217
|
+
}
|
|
218
|
+
|
|
219
|
+
this.seenDomains.add(domain);
|
|
220
|
+
}
|
|
221
|
+
}
|
|
222
|
+
|
|
223
|
+
// Flag non-allowlisted domains
|
|
224
|
+
for (const d of allDomains) {
|
|
225
|
+
if (!this.allowlist.has(d)) {
|
|
226
|
+
flagged.push(d);
|
|
227
|
+
}
|
|
228
|
+
}
|
|
229
|
+
|
|
230
|
+
return {
|
|
231
|
+
totalUrls: allUrls.length,
|
|
232
|
+
domains: [...allDomains],
|
|
233
|
+
flagged,
|
|
234
|
+
badDomains: badFound,
|
|
235
|
+
firstSeen,
|
|
236
|
+
};
|
|
237
|
+
}
|
|
238
|
+
|
|
239
|
+
/**
|
|
240
|
+
* Check a single URL against rules.
|
|
241
|
+
* @param {string} url
|
|
242
|
+
* @returns {{ allowed: boolean, domain: string|null, reason: string|null }}
|
|
243
|
+
*/
|
|
244
|
+
checkUrl(url) {
|
|
245
|
+
const domain = extractDomain(url);
|
|
246
|
+
if (!domain) return { allowed: true, domain: null, reason: null };
|
|
247
|
+
|
|
248
|
+
if (this._isBadDomain(domain)) {
|
|
249
|
+
return { allowed: false, domain, reason: `Known-bad domain: ${domain}` };
|
|
250
|
+
}
|
|
251
|
+
|
|
252
|
+
if (!this.allowlist.has(domain)) {
|
|
253
|
+
return { allowed: true, domain, reason: `Not in allowlist: ${domain}` };
|
|
254
|
+
}
|
|
255
|
+
|
|
256
|
+
return { allowed: true, domain, reason: null };
|
|
257
|
+
}
|
|
258
|
+
|
|
259
|
+
_isBadDomain(domain) {
|
|
260
|
+
if (this.badDomains.has(domain)) return true;
|
|
261
|
+
// Check subdomains (e.g. xyz.ngrok.io)
|
|
262
|
+
for (const bad of this.badDomains) {
|
|
263
|
+
if (domain.endsWith('.' + bad)) return true;
|
|
264
|
+
}
|
|
265
|
+
return false;
|
|
266
|
+
}
|
|
267
|
+
|
|
268
|
+
_alert(alert) {
|
|
269
|
+
this.log.push({ timestamp: Date.now(), ...alert });
|
|
270
|
+
if (this.onAlert) this.onAlert(alert);
|
|
271
|
+
}
|
|
272
|
+
}
|
|
273
|
+
|
|
274
|
+
module.exports = {
|
|
275
|
+
NetworkEgressLogger,
|
|
276
|
+
extractUrls,
|
|
277
|
+
extractDomain,
|
|
278
|
+
parseSessionFile,
|
|
279
|
+
KNOWN_BAD_DOMAINS,
|
|
280
|
+
DEFAULT_ALLOWLIST,
|
|
281
|
+
};
|
|
@@ -0,0 +1,290 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* ClawMoat Skill Integrity Checker
|
|
3
|
+
*
|
|
4
|
+
* Hashes skill files on startup, detects modifications, and flags
|
|
5
|
+
* suspicious patterns in skill content.
|
|
6
|
+
*
|
|
7
|
+
* @module clawmoat/guardian/skill-integrity
|
|
8
|
+
*/
|
|
9
|
+
|
|
10
|
+
const fs = require('fs');
|
|
11
|
+
const path = require('path');
|
|
12
|
+
const crypto = require('crypto');
|
|
13
|
+
|
|
14
|
+
const HASH_FILE = '.clawmoat-hashes.json';
|
|
15
|
+
|
|
16
|
+
// Suspicious patterns that may indicate malicious skills
|
|
17
|
+
const SUSPICIOUS_PATTERNS = [
|
|
18
|
+
{ pattern: /\bcurl\s+(?:https?:\/\/|ftp:\/\/)\S+/gi, label: 'curl to external URL', severity: 'warning' },
|
|
19
|
+
{ pattern: /\bwget\s+(?:https?:\/\/|ftp:\/\/)\S+/gi, label: 'wget to external URL', severity: 'warning' },
|
|
20
|
+
{ pattern: /\beval\s*\(/gi, label: 'eval() usage', severity: 'critical' },
|
|
21
|
+
{ pattern: /\bnew\s+Function\s*\(/gi, label: 'new Function() usage', severity: 'critical' },
|
|
22
|
+
{ pattern: /\batob\s*\(/gi, label: 'base64 decode (atob)', severity: 'warning' },
|
|
23
|
+
{ pattern: /\bbtoa\s*\(/gi, label: 'base64 encode (btoa)', severity: 'warning' },
|
|
24
|
+
{ pattern: /Buffer\.from\s*\([^)]*,\s*['"]base64['"]\s*\)/gi, label: 'Buffer base64 decode', severity: 'warning' },
|
|
25
|
+
{ pattern: /\bbase64\b.*(?:decode|encode)/gi, label: 'base64 operation', severity: 'warning' },
|
|
26
|
+
{ pattern: /(?:\/etc\/passwd|\/etc\/shadow|~\/\.ssh|~\/\.aws|~\/\.gnupg)/g, label: 'sensitive file reference', severity: 'critical' },
|
|
27
|
+
{ pattern: /\bexec\s*\(\s*['"`]/gi, label: 'exec() with string', severity: 'warning' },
|
|
28
|
+
{ pattern: /\bchild_process\b/gi, label: 'child_process usage', severity: 'warning' },
|
|
29
|
+
{ pattern: /\brequire\s*\(\s*['"]child_process['"]\s*\)/gi, label: 'require child_process', severity: 'warning' },
|
|
30
|
+
{ pattern: /(?:nc|netcat)\s+-[a-z]*e\s/gi, label: 'reverse shell pattern', severity: 'critical' },
|
|
31
|
+
{ pattern: /\|\s*(?:bash|sh|zsh)\b/gi, label: 'pipe to shell', severity: 'critical' },
|
|
32
|
+
];
|
|
33
|
+
|
|
34
|
+
/**
|
|
35
|
+
* Hash a file's contents using SHA-256.
|
|
36
|
+
* @param {string} filePath
|
|
37
|
+
* @returns {string|null} hex hash or null if file unreadable
|
|
38
|
+
*/
|
|
39
|
+
function hashFile(filePath) {
|
|
40
|
+
try {
|
|
41
|
+
const content = fs.readFileSync(filePath);
|
|
42
|
+
return crypto.createHash('sha256').update(content).digest('hex');
|
|
43
|
+
} catch {
|
|
44
|
+
return null;
|
|
45
|
+
}
|
|
46
|
+
}
|
|
47
|
+
|
|
48
|
+
/**
|
|
49
|
+
* Recursively find skill files in a directory.
|
|
50
|
+
* Returns SKILL.md files and associated scripts (*.js, *.sh, *.py, *.ts).
|
|
51
|
+
* @param {string} dir - Skills directory
|
|
52
|
+
* @returns {string[]} Array of file paths
|
|
53
|
+
*/
|
|
54
|
+
function findSkillFiles(dir) {
|
|
55
|
+
const files = [];
|
|
56
|
+
if (!fs.existsSync(dir)) return files;
|
|
57
|
+
|
|
58
|
+
const walk = (d) => {
|
|
59
|
+
let entries;
|
|
60
|
+
try { entries = fs.readdirSync(d, { withFileTypes: true }); } catch { return; }
|
|
61
|
+
for (const entry of entries) {
|
|
62
|
+
const full = path.join(d, entry.name);
|
|
63
|
+
if (entry.isDirectory()) {
|
|
64
|
+
if (entry.name === 'node_modules' || entry.name === '.git') continue;
|
|
65
|
+
walk(full);
|
|
66
|
+
} else if (
|
|
67
|
+
entry.name === 'SKILL.md' ||
|
|
68
|
+
/\.(js|sh|py|ts)$/.test(entry.name)
|
|
69
|
+
) {
|
|
70
|
+
files.push(full);
|
|
71
|
+
}
|
|
72
|
+
}
|
|
73
|
+
};
|
|
74
|
+
|
|
75
|
+
walk(dir);
|
|
76
|
+
return files;
|
|
77
|
+
}
|
|
78
|
+
|
|
79
|
+
/**
|
|
80
|
+
* Scan file content for suspicious patterns.
|
|
81
|
+
* @param {string} content - File content
|
|
82
|
+
* @param {string} filePath - File path (for reporting)
|
|
83
|
+
* @returns {{ suspicious: boolean, findings: Array }}
|
|
84
|
+
*/
|
|
85
|
+
function scanForSuspicious(content, filePath) {
|
|
86
|
+
const findings = [];
|
|
87
|
+
for (const rule of SUSPICIOUS_PATTERNS) {
|
|
88
|
+
// Reset regex lastIndex
|
|
89
|
+
rule.pattern.lastIndex = 0;
|
|
90
|
+
const match = rule.pattern.exec(content);
|
|
91
|
+
if (match) {
|
|
92
|
+
findings.push({
|
|
93
|
+
file: filePath,
|
|
94
|
+
label: rule.label,
|
|
95
|
+
severity: rule.severity,
|
|
96
|
+
matched: match[0].substring(0, 100),
|
|
97
|
+
});
|
|
98
|
+
}
|
|
99
|
+
}
|
|
100
|
+
return { suspicious: findings.length > 0, findings };
|
|
101
|
+
}
|
|
102
|
+
|
|
103
|
+
class SkillIntegrityChecker {
|
|
104
|
+
/**
|
|
105
|
+
* @param {Object} opts
|
|
106
|
+
* @param {string} opts.skillsDir - Path to skills directory
|
|
107
|
+
* @param {string} [opts.hashFile] - Path to hash lockfile
|
|
108
|
+
* @param {Function} [opts.onAlert] - Callback for alerts: (alert) => void
|
|
109
|
+
*/
|
|
110
|
+
constructor(opts = {}) {
|
|
111
|
+
this.skillsDir = opts.skillsDir || '';
|
|
112
|
+
this.hashFilePath = opts.hashFile || path.join(this.skillsDir, HASH_FILE);
|
|
113
|
+
this.onAlert = opts.onAlert || null;
|
|
114
|
+
this.hashes = {};
|
|
115
|
+
this.watcher = null;
|
|
116
|
+
}
|
|
117
|
+
|
|
118
|
+
/**
|
|
119
|
+
* Initialize: hash all skill files and store/compare with lockfile.
|
|
120
|
+
* @returns {{ files: number, new: number, changed: number, suspicious: Array }}
|
|
121
|
+
*/
|
|
122
|
+
init() {
|
|
123
|
+
const files = findSkillFiles(this.skillsDir);
|
|
124
|
+
const currentHashes = {};
|
|
125
|
+
const suspiciousFindings = [];
|
|
126
|
+
let newFiles = 0;
|
|
127
|
+
let changedFiles = 0;
|
|
128
|
+
|
|
129
|
+
// Load existing hashes
|
|
130
|
+
const storedHashes = this._loadHashes();
|
|
131
|
+
|
|
132
|
+
for (const file of files) {
|
|
133
|
+
const hash = hashFile(file);
|
|
134
|
+
if (!hash) continue;
|
|
135
|
+
|
|
136
|
+
const rel = path.relative(this.skillsDir, file);
|
|
137
|
+
currentHashes[rel] = hash;
|
|
138
|
+
|
|
139
|
+
// Check for changes
|
|
140
|
+
if (!storedHashes[rel]) {
|
|
141
|
+
newFiles++;
|
|
142
|
+
} else if (storedHashes[rel] !== hash) {
|
|
143
|
+
changedFiles++;
|
|
144
|
+
this._alert({
|
|
145
|
+
severity: 'warning',
|
|
146
|
+
type: 'skill_modified',
|
|
147
|
+
message: `Skill file modified: ${rel}`,
|
|
148
|
+
details: { file: rel, oldHash: storedHashes[rel], newHash: hash },
|
|
149
|
+
});
|
|
150
|
+
}
|
|
151
|
+
|
|
152
|
+
// Scan content for suspicious patterns
|
|
153
|
+
try {
|
|
154
|
+
const content = fs.readFileSync(file, 'utf8');
|
|
155
|
+
const scan = scanForSuspicious(content, rel);
|
|
156
|
+
if (scan.suspicious) {
|
|
157
|
+
suspiciousFindings.push(...scan.findings);
|
|
158
|
+
}
|
|
159
|
+
} catch {}
|
|
160
|
+
}
|
|
161
|
+
|
|
162
|
+
this.hashes = currentHashes;
|
|
163
|
+
this._saveHashes(currentHashes);
|
|
164
|
+
|
|
165
|
+
return {
|
|
166
|
+
files: files.length,
|
|
167
|
+
new: newFiles,
|
|
168
|
+
changed: changedFiles,
|
|
169
|
+
suspicious: suspiciousFindings,
|
|
170
|
+
};
|
|
171
|
+
}
|
|
172
|
+
|
|
173
|
+
/**
|
|
174
|
+
* Audit: verify all current skill files against stored hashes.
|
|
175
|
+
* @returns {{ ok: boolean, files: number, changed: string[], missing: string[], suspicious: Array }}
|
|
176
|
+
*/
|
|
177
|
+
audit() {
|
|
178
|
+
const storedHashes = this._loadHashes();
|
|
179
|
+
const files = findSkillFiles(this.skillsDir);
|
|
180
|
+
const changed = [];
|
|
181
|
+
const missing = [];
|
|
182
|
+
const suspiciousFindings = [];
|
|
183
|
+
|
|
184
|
+
// Check stored files still exist and match
|
|
185
|
+
for (const [rel, storedHash] of Object.entries(storedHashes)) {
|
|
186
|
+
const full = path.join(this.skillsDir, rel);
|
|
187
|
+
const currentHash = hashFile(full);
|
|
188
|
+
if (!currentHash) {
|
|
189
|
+
missing.push(rel);
|
|
190
|
+
} else if (currentHash !== storedHash) {
|
|
191
|
+
changed.push(rel);
|
|
192
|
+
}
|
|
193
|
+
}
|
|
194
|
+
|
|
195
|
+
// Scan for suspicious patterns
|
|
196
|
+
for (const file of files) {
|
|
197
|
+
try {
|
|
198
|
+
const content = fs.readFileSync(file, 'utf8');
|
|
199
|
+
const rel = path.relative(this.skillsDir, file);
|
|
200
|
+
const scan = scanForSuspicious(content, rel);
|
|
201
|
+
if (scan.suspicious) suspiciousFindings.push(...scan.findings);
|
|
202
|
+
} catch {}
|
|
203
|
+
}
|
|
204
|
+
|
|
205
|
+
return {
|
|
206
|
+
ok: changed.length === 0 && missing.length === 0 && suspiciousFindings.length === 0,
|
|
207
|
+
files: Object.keys(storedHashes).length,
|
|
208
|
+
changed,
|
|
209
|
+
missing,
|
|
210
|
+
suspicious: suspiciousFindings,
|
|
211
|
+
};
|
|
212
|
+
}
|
|
213
|
+
|
|
214
|
+
/**
|
|
215
|
+
* Watch skills directory for changes (real-time monitoring).
|
|
216
|
+
* @returns {fs.FSWatcher|null}
|
|
217
|
+
*/
|
|
218
|
+
watch() {
|
|
219
|
+
if (!fs.existsSync(this.skillsDir)) return null;
|
|
220
|
+
|
|
221
|
+
this.watcher = fs.watch(this.skillsDir, { recursive: true }, (eventType, filename) => {
|
|
222
|
+
if (!filename) return;
|
|
223
|
+
if (filename === HASH_FILE || filename.includes('node_modules')) return;
|
|
224
|
+
|
|
225
|
+
const ext = path.extname(filename);
|
|
226
|
+
if (filename !== 'SKILL.md' && !['.js', '.sh', '.py', '.ts'].includes(ext)) return;
|
|
227
|
+
|
|
228
|
+
const full = path.join(this.skillsDir, filename);
|
|
229
|
+
const hash = hashFile(full);
|
|
230
|
+
const stored = this.hashes[filename];
|
|
231
|
+
|
|
232
|
+
if (hash && stored && hash !== stored) {
|
|
233
|
+
this._alert({
|
|
234
|
+
severity: 'warning',
|
|
235
|
+
type: 'skill_modified',
|
|
236
|
+
message: `Skill file changed: ${filename}`,
|
|
237
|
+
details: { file: filename, oldHash: stored, newHash: hash },
|
|
238
|
+
});
|
|
239
|
+
this.hashes[filename] = hash;
|
|
240
|
+
this._saveHashes(this.hashes);
|
|
241
|
+
} else if (hash && !stored) {
|
|
242
|
+
this._alert({
|
|
243
|
+
severity: 'info',
|
|
244
|
+
type: 'skill_added',
|
|
245
|
+
message: `New skill file: ${filename}`,
|
|
246
|
+
details: { file: filename, hash },
|
|
247
|
+
});
|
|
248
|
+
this.hashes[filename] = hash;
|
|
249
|
+
this._saveHashes(this.hashes);
|
|
250
|
+
}
|
|
251
|
+
});
|
|
252
|
+
|
|
253
|
+
return this.watcher;
|
|
254
|
+
}
|
|
255
|
+
|
|
256
|
+
stop() {
|
|
257
|
+
if (this.watcher) {
|
|
258
|
+
this.watcher.close();
|
|
259
|
+
this.watcher = null;
|
|
260
|
+
}
|
|
261
|
+
}
|
|
262
|
+
|
|
263
|
+
_loadHashes() {
|
|
264
|
+
try {
|
|
265
|
+
return JSON.parse(fs.readFileSync(this.hashFilePath, 'utf8'));
|
|
266
|
+
} catch {
|
|
267
|
+
return {};
|
|
268
|
+
}
|
|
269
|
+
}
|
|
270
|
+
|
|
271
|
+
_saveHashes(hashes) {
|
|
272
|
+
try {
|
|
273
|
+
const dir = path.dirname(this.hashFilePath);
|
|
274
|
+
if (!fs.existsSync(dir)) fs.mkdirSync(dir, { recursive: true });
|
|
275
|
+
fs.writeFileSync(this.hashFilePath, JSON.stringify(hashes, null, 2) + '\n');
|
|
276
|
+
} catch {}
|
|
277
|
+
}
|
|
278
|
+
|
|
279
|
+
_alert(alert) {
|
|
280
|
+
if (this.onAlert) this.onAlert(alert);
|
|
281
|
+
}
|
|
282
|
+
}
|
|
283
|
+
|
|
284
|
+
module.exports = {
|
|
285
|
+
SkillIntegrityChecker,
|
|
286
|
+
hashFile,
|
|
287
|
+
findSkillFiles,
|
|
288
|
+
scanForSuspicious,
|
|
289
|
+
SUSPICIOUS_PATTERNS,
|
|
290
|
+
};
|
package/src/index.js
CHANGED
|
@@ -28,8 +28,10 @@ const { scanPII } = require('./scanners/pii');
|
|
|
28
28
|
const { scanUrls } = require('./scanners/urls');
|
|
29
29
|
const { scanMemoryPoison } = require('./scanners/memory-poison');
|
|
30
30
|
const { scanExfiltration } = require('./scanners/exfiltration');
|
|
31
|
+
const { scanExcessiveAgency } = require('./scanners/excessive-agency');
|
|
31
32
|
const { scanSkill, scanSkillContent } = require('./scanners/supply-chain');
|
|
32
33
|
const { evaluateToolCall } = require('./policies/engine');
|
|
34
|
+
const { HostGuardian, TIERS } = require('./guardian');
|
|
33
35
|
const { SecurityLogger } = require('./utils/logger');
|
|
34
36
|
const { loadConfig } = require('./utils/config');
|
|
35
37
|
|
|
@@ -82,6 +84,29 @@ class ClawMoat {
|
|
|
82
84
|
onEvent: opts.onEvent,
|
|
83
85
|
});
|
|
84
86
|
this.stats = { scanned: 0, blocked: 0, warnings: 0 };
|
|
87
|
+
|
|
88
|
+
// Initialize Host Guardian if configured
|
|
89
|
+
if (this.config.guardian) {
|
|
90
|
+
this.guardian = new HostGuardian({
|
|
91
|
+
mode: this.config.guardian.mode || 'standard',
|
|
92
|
+
workspace: this.config.guardian.workspace,
|
|
93
|
+
safeZones: this.config.guardian.safe_zones,
|
|
94
|
+
forbiddenZones: this.config.guardian.forbidden_zones,
|
|
95
|
+
logFile: opts.logFile,
|
|
96
|
+
quiet: opts.quiet,
|
|
97
|
+
onViolation: opts.onViolation,
|
|
98
|
+
});
|
|
99
|
+
}
|
|
100
|
+
}
|
|
101
|
+
|
|
102
|
+
/**
|
|
103
|
+
* Create and return a Host Guardian instance for laptop-hosted agent security.
|
|
104
|
+
* Can be used standalone without full ClawMoat config.
|
|
105
|
+
* @param {Object} opts - Guardian options (mode, workspace, safeZones, etc.)
|
|
106
|
+
* @returns {HostGuardian}
|
|
107
|
+
*/
|
|
108
|
+
static createGuardian(opts = {}) {
|
|
109
|
+
return new HostGuardian(opts);
|
|
85
110
|
}
|
|
86
111
|
|
|
87
112
|
/**
|
|
@@ -122,6 +147,15 @@ class ClawMoat {
|
|
|
122
147
|
}
|
|
123
148
|
}
|
|
124
149
|
|
|
150
|
+
// Excessive agency scan
|
|
151
|
+
if (this.config.detection?.excessive_agency !== false) {
|
|
152
|
+
const ea = scanExcessiveAgency(text, opts);
|
|
153
|
+
if (!ea.clean) {
|
|
154
|
+
results.findings.push(...ea.findings);
|
|
155
|
+
results.safe = false;
|
|
156
|
+
}
|
|
157
|
+
}
|
|
158
|
+
|
|
125
159
|
// Memory poisoning scan
|
|
126
160
|
if (this.config.detection?.memory_poison !== false) {
|
|
127
161
|
const mp = scanMemoryPoison(text, opts);
|
|
@@ -311,6 +345,9 @@ module.exports.scanPII = scanPII;
|
|
|
311
345
|
module.exports.scanUrls = scanUrls;
|
|
312
346
|
module.exports.scanMemoryPoison = scanMemoryPoison;
|
|
313
347
|
module.exports.scanExfiltration = scanExfiltration;
|
|
348
|
+
module.exports.scanExcessiveAgency = scanExcessiveAgency;
|
|
314
349
|
module.exports.scanSkill = scanSkill;
|
|
315
350
|
module.exports.scanSkillContent = scanSkillContent;
|
|
316
351
|
module.exports.evaluateToolCall = evaluateToolCall;
|
|
352
|
+
module.exports.HostGuardian = HostGuardian;
|
|
353
|
+
module.exports.TIERS = TIERS;
|