hackmyagent 0.10.0 → 0.11.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +111 -257
- package/dist/arp/index.d.ts +5 -1
- package/dist/arp/index.d.ts.map +1 -1
- package/dist/arp/index.js +38 -1
- package/dist/arp/index.js.map +1 -1
- package/dist/arp/monitors/skill-capability-monitor.d.ts +119 -0
- package/dist/arp/monitors/skill-capability-monitor.d.ts.map +1 -0
- package/dist/arp/monitors/skill-capability-monitor.js +258 -0
- package/dist/arp/monitors/skill-capability-monitor.js.map +1 -0
- package/dist/arp/telemetry/forwarder.d.ts +62 -0
- package/dist/arp/telemetry/forwarder.d.ts.map +1 -0
- package/dist/arp/telemetry/forwarder.js +106 -0
- package/dist/arp/telemetry/forwarder.js.map +1 -0
- package/dist/arp/telemetry/gtin.d.ts +87 -0
- package/dist/arp/telemetry/gtin.d.ts.map +1 -0
- package/dist/arp/telemetry/gtin.js +239 -0
- package/dist/arp/telemetry/gtin.js.map +1 -0
- package/dist/arp/telemetry/index.d.ts +6 -0
- package/dist/arp/telemetry/index.d.ts.map +1 -0
- package/dist/arp/telemetry/index.js +17 -0
- package/dist/arp/telemetry/index.js.map +1 -0
- package/dist/arp/types.d.ts +10 -0
- package/dist/arp/types.d.ts.map +1 -1
- package/dist/attack/index.d.ts +1 -1
- package/dist/attack/index.d.ts.map +1 -1
- package/dist/attack/index.js +5 -1
- package/dist/attack/index.js.map +1 -1
- package/dist/attack/payloads/context-window.d.ts +7 -0
- package/dist/attack/payloads/context-window.d.ts.map +1 -0
- package/dist/attack/payloads/context-window.js +110 -0
- package/dist/attack/payloads/context-window.js.map +1 -0
- package/dist/attack/payloads/index.d.ts +5 -1
- package/dist/attack/payloads/index.d.ts.map +1 -1
- package/dist/attack/payloads/index.js +17 -1
- package/dist/attack/payloads/index.js.map +1 -1
- package/dist/attack/payloads/memory-weaponization.d.ts +7 -0
- package/dist/attack/payloads/memory-weaponization.d.ts.map +1 -0
- package/dist/attack/payloads/memory-weaponization.js +110 -0
- package/dist/attack/payloads/memory-weaponization.js.map +1 -0
- package/dist/attack/payloads/supply-chain.d.ts +7 -0
- package/dist/attack/payloads/supply-chain.d.ts.map +1 -0
- package/dist/attack/payloads/supply-chain.js +110 -0
- package/dist/attack/payloads/supply-chain.js.map +1 -0
- package/dist/attack/payloads/tool-shadow.d.ts +8 -0
- package/dist/attack/payloads/tool-shadow.d.ts.map +1 -0
- package/dist/attack/payloads/tool-shadow.js +209 -0
- package/dist/attack/payloads/tool-shadow.js.map +1 -0
- package/dist/attack/scanner.d.ts.map +1 -1
- package/dist/attack/scanner.js +4 -0
- package/dist/attack/scanner.js.map +1 -1
- package/dist/attack/types.d.ts +1 -1
- package/dist/attack/types.d.ts.map +1 -1
- package/dist/attack/types.js +20 -0
- package/dist/attack/types.js.map +1 -1
- package/dist/checker/index.d.ts +2 -0
- package/dist/checker/index.d.ts.map +1 -1
- package/dist/checker/index.js +8 -1
- package/dist/checker/index.js.map +1 -1
- package/dist/checker/skill-dependency-graph.d.ts +55 -0
- package/dist/checker/skill-dependency-graph.d.ts.map +1 -0
- package/dist/checker/skill-dependency-graph.js +288 -0
- package/dist/checker/skill-dependency-graph.js.map +1 -0
- package/dist/cli.js +481 -66
- package/dist/cli.js.map +1 -1
- package/dist/hardening/index.d.ts +5 -0
- package/dist/hardening/index.d.ts.map +1 -1
- package/dist/hardening/index.js +11 -1
- package/dist/hardening/index.js.map +1 -1
- package/dist/hardening/scanner.d.ts +40 -0
- package/dist/hardening/scanner.d.ts.map +1 -1
- package/dist/hardening/scanner.js +988 -11
- package/dist/hardening/scanner.js.map +1 -1
- package/dist/hardening/security-check.d.ts +2 -0
- package/dist/hardening/security-check.d.ts.map +1 -1
- package/dist/hardening/skill-capability-validator.d.ts +31 -0
- package/dist/hardening/skill-capability-validator.d.ts.map +1 -0
- package/dist/hardening/skill-capability-validator.js +237 -0
- package/dist/hardening/skill-capability-validator.js.map +1 -0
- package/dist/hardening/skill-context.d.ts +22 -0
- package/dist/hardening/skill-context.d.ts.map +1 -0
- package/dist/hardening/skill-context.js +127 -0
- package/dist/hardening/skill-context.js.map +1 -0
- package/dist/hardening/taxonomy.d.ts +17 -0
- package/dist/hardening/taxonomy.d.ts.map +1 -0
- package/dist/hardening/taxonomy.js +152 -0
- package/dist/hardening/taxonomy.js.map +1 -0
- package/dist/index.d.ts +12 -4
- package/dist/index.d.ts.map +1 -1
- package/dist/index.js +36 -3
- package/dist/index.js.map +1 -1
- package/dist/plugins/credvault.js +2 -2
- package/dist/plugins/credvault.js.map +1 -1
- package/dist/plugins/secretless.d.ts +15 -0
- package/dist/plugins/secretless.d.ts.map +1 -0
- package/dist/plugins/secretless.js +199 -0
- package/dist/plugins/secretless.js.map +1 -0
- package/dist/plugins/signcrypt.js +2 -2
- package/dist/plugins/signcrypt.js.map +1 -1
- package/dist/plugins/skillguard.js +2 -2
- package/dist/plugins/skillguard.js.map +1 -1
- package/dist/registry/client.d.ts +1 -1
- package/dist/registry/client.d.ts.map +1 -1
- package/dist/registry/client.js +4 -1
- package/dist/registry/client.js.map +1 -1
- package/dist/registry/publish.d.ts.map +1 -1
- package/dist/registry/publish.js +7 -1
- package/dist/registry/publish.js.map +1 -1
- package/dist/resolve-mcp.d.ts +21 -0
- package/dist/resolve-mcp.d.ts.map +1 -0
- package/dist/resolve-mcp.js +42 -0
- package/dist/resolve-mcp.js.map +1 -0
- package/dist/scanner/external-scanner.d.ts.map +1 -1
- package/dist/scanner/external-scanner.js +48 -14
- package/dist/scanner/external-scanner.js.map +1 -1
- package/dist/scanner/types.d.ts +1 -0
- package/dist/scanner/types.d.ts.map +1 -1
- package/dist/soul/scanner.d.ts.map +1 -1
- package/dist/soul/scanner.js +2 -1
- package/dist/soul/scanner.js.map +1 -1
- package/dist/telemetry/contribute.d.ts +60 -0
- package/dist/telemetry/contribute.d.ts.map +1 -0
- package/dist/telemetry/contribute.js +169 -0
- package/dist/telemetry/contribute.js.map +1 -0
- package/dist/telemetry/index.d.ts +6 -0
- package/dist/telemetry/index.d.ts.map +1 -0
- package/dist/telemetry/index.js +18 -0
- package/dist/telemetry/index.js.map +1 -0
- package/dist/telemetry/opt-in.d.ts +46 -0
- package/dist/telemetry/opt-in.d.ts.map +1 -0
- package/dist/telemetry/opt-in.js +220 -0
- package/dist/telemetry/opt-in.js.map +1 -0
- package/package.json +9 -3
|
@@ -39,8 +39,12 @@ var __importStar = (this && this.__importStar) || (function () {
|
|
|
39
39
|
Object.defineProperty(exports, "__esModule", { value: true });
|
|
40
40
|
exports.HardeningScanner = void 0;
|
|
41
41
|
const fs = __importStar(require("fs/promises"));
|
|
42
|
+
const crypto = __importStar(require("crypto"));
|
|
42
43
|
const path = __importStar(require("path"));
|
|
43
44
|
const semantic_1 = require("../semantic");
|
|
45
|
+
const taxonomy_1 = require("./taxonomy");
|
|
46
|
+
const skill_context_1 = require("./skill-context");
|
|
47
|
+
const skill_capability_validator_1 = require("./skill-capability-validator");
|
|
44
48
|
/**
|
|
45
49
|
* Defines which checks apply to which project types
|
|
46
50
|
* Key: check ID prefix or full ID
|
|
@@ -88,6 +92,18 @@ const CHECK_PROJECT_TYPES = {
|
|
|
88
92
|
'SEC-': ['webapp', 'api', 'mcp'],
|
|
89
93
|
// Semantic analysis - applies to all project types
|
|
90
94
|
'SEM-': ['all'],
|
|
95
|
+
// Unicode steganography - applies to all projects
|
|
96
|
+
'UNICODE-STEGO-': ['all'],
|
|
97
|
+
// Agent memory/context checks
|
|
98
|
+
'MEM-': ['all'],
|
|
99
|
+
// RAG poisoning checks
|
|
100
|
+
'RAG-': ['all'],
|
|
101
|
+
// Agent identity checks
|
|
102
|
+
'AIM-': ['all'],
|
|
103
|
+
// Agent DNA integrity checks
|
|
104
|
+
'DNA-': ['all'],
|
|
105
|
+
// Skill memory manipulation checks
|
|
106
|
+
'SKILL-MEM-': ['openclaw', 'mcp'],
|
|
91
107
|
};
|
|
92
108
|
// Patterns for detecting exposed credentials
|
|
93
109
|
// Each pattern is carefully tuned to minimize false positives
|
|
@@ -344,6 +360,26 @@ class HardeningScanner {
|
|
|
344
360
|
// OpenClaw CVE-specific checks
|
|
345
361
|
const cveFindings = await this.checkOpenclawCVE(targetDir, shouldFix);
|
|
346
362
|
findings.push(...cveFindings);
|
|
363
|
+
// Unicode steganography checks (GlassWorm detection)
|
|
364
|
+
const unicodeStegoFindings = await this.checkUnicodeSteganography(targetDir, shouldFix);
|
|
365
|
+
findings.push(...unicodeStegoFindings);
|
|
366
|
+
// Memory/context poisoning checks
|
|
367
|
+
const memFindings = await this.checkMemoryPoisoning(targetDir, shouldFix);
|
|
368
|
+
findings.push(...memFindings);
|
|
369
|
+
// RAG poisoning checks
|
|
370
|
+
const ragFindings = await this.checkRAGPoisoning(targetDir, shouldFix);
|
|
371
|
+
findings.push(...ragFindings);
|
|
372
|
+
// Agent identity checks
|
|
373
|
+
const aimFindings = await this.checkAgentIdentity(targetDir, shouldFix);
|
|
374
|
+
findings.push(...aimFindings);
|
|
375
|
+
// Agent DNA integrity checks
|
|
376
|
+
const dnaFindings = await this.checkAgentDNA(targetDir, shouldFix);
|
|
377
|
+
findings.push(...dnaFindings);
|
|
378
|
+
// Skill memory manipulation checks
|
|
379
|
+
const skillMemFindings = await this.checkSkillMemory(targetDir, shouldFix);
|
|
380
|
+
findings.push(...skillMemFindings);
|
|
381
|
+
// Enrich findings with attack taxonomy mapping
|
|
382
|
+
(0, taxonomy_1.enrichWithTaxonomy)(findings);
|
|
347
383
|
// Layer 2: Structural analysis (always on)
|
|
348
384
|
let layer2Count = 0;
|
|
349
385
|
let layer3Count = 0;
|
|
@@ -3846,19 +3882,33 @@ dist/
|
|
|
3846
3882
|
const lines = content.split('\n').map(line => line.length > MAX_LINE_LENGTH ? line.substring(0, MAX_LINE_LENGTH) : line);
|
|
3847
3883
|
// SKILL-001: Unsigned Skill
|
|
3848
3884
|
const hasSignature = content.includes('opena2a_signature:') ||
|
|
3849
|
-
content.includes('-----BEGIN SIGNATURE-----')
|
|
3885
|
+
content.includes('-----BEGIN SIGNATURE-----') ||
|
|
3886
|
+
content.includes('<!-- opena2a-guard hash=');
|
|
3887
|
+
let skill001Fixed = false;
|
|
3888
|
+
if (!hasSignature && autoFix) {
|
|
3889
|
+
const hash = crypto.createHash('sha256').update(content).digest('hex');
|
|
3890
|
+
const signedDate = new Date().toISOString();
|
|
3891
|
+
const signatureBlock = `\n<!-- opena2a-guard hash="sha256:${hash}" signed="${signedDate}" -->`;
|
|
3892
|
+
content = content + signatureBlock;
|
|
3893
|
+
await fs.writeFile(skillFile, content);
|
|
3894
|
+
skill001Fixed = true;
|
|
3895
|
+
}
|
|
3850
3896
|
findings.push({
|
|
3851
3897
|
checkId: 'SKILL-001',
|
|
3852
3898
|
name: 'Unsigned Skill',
|
|
3853
3899
|
description: 'Skill file lacks cryptographic signature for authenticity verification',
|
|
3854
3900
|
category: 'skill',
|
|
3855
3901
|
severity: 'medium',
|
|
3856
|
-
passed: hasSignature,
|
|
3902
|
+
passed: hasSignature || skill001Fixed,
|
|
3857
3903
|
message: hasSignature
|
|
3858
3904
|
? 'Skill has cryptographic signature'
|
|
3859
|
-
:
|
|
3905
|
+
: skill001Fixed
|
|
3906
|
+
? 'Skill was unsigned - signature added'
|
|
3907
|
+
: 'Skill is unsigned - cannot verify authenticity or integrity',
|
|
3860
3908
|
file: relativePath,
|
|
3861
|
-
fixable:
|
|
3909
|
+
fixable: true,
|
|
3910
|
+
fixed: skill001Fixed,
|
|
3911
|
+
fixMessage: skill001Fixed ? 'Added SHA-256 signature block to skill file' : undefined,
|
|
3862
3912
|
fix: 'Sign the skill using: openclaw sign skill.md --key ~/.openclaw/signing-key.pem',
|
|
3863
3913
|
});
|
|
3864
3914
|
// SKILL-002: Remote Fetch Pattern
|
|
@@ -3907,26 +3957,45 @@ dist/
|
|
|
3907
3957
|
}
|
|
3908
3958
|
}
|
|
3909
3959
|
// SKILL-004: Filesystem Write Outside Sandbox
|
|
3910
|
-
const filesystemWildcardPattern = /filesystem:\s*\*|filesystem:\s
|
|
3960
|
+
const filesystemWildcardPattern = /filesystem:\s*\*|filesystem:\s*~\/|filesystem:\s*\//gi;
|
|
3961
|
+
let skill004FileModified = false;
|
|
3911
3962
|
for (let i = 0; i < lines.length; i++) {
|
|
3912
3963
|
const line = lines[i];
|
|
3913
3964
|
filesystemWildcardPattern.lastIndex = 0;
|
|
3914
3965
|
if (filesystemWildcardPattern.test(line)) {
|
|
3966
|
+
let fixApplied = false;
|
|
3967
|
+
if (autoFix) {
|
|
3968
|
+
const originalLine = lines[i];
|
|
3969
|
+
lines[i] = lines[i].replace(/filesystem:\s*\*/gi, 'filesystem:./');
|
|
3970
|
+
lines[i] = lines[i].replace(/filesystem:\s*~\//gi, 'filesystem:./data/');
|
|
3971
|
+
if (lines[i] !== originalLine) {
|
|
3972
|
+
fixApplied = true;
|
|
3973
|
+
skill004FileModified = true;
|
|
3974
|
+
}
|
|
3975
|
+
}
|
|
3915
3976
|
findings.push({
|
|
3916
3977
|
checkId: 'SKILL-004',
|
|
3917
3978
|
name: 'Filesystem Write Outside Sandbox',
|
|
3918
3979
|
description: 'Skill requests broad filesystem access outside sandbox',
|
|
3919
3980
|
category: 'skill',
|
|
3920
3981
|
severity: 'critical',
|
|
3921
|
-
passed:
|
|
3922
|
-
message:
|
|
3982
|
+
passed: fixApplied,
|
|
3983
|
+
message: fixApplied
|
|
3984
|
+
? `Broad filesystem access restricted: "${lines[i].trim()}"`
|
|
3985
|
+
: `Broad filesystem access requested: "${line.trim()}"`,
|
|
3923
3986
|
file: relativePath,
|
|
3924
3987
|
line: i + 1,
|
|
3925
|
-
fixable:
|
|
3988
|
+
fixable: true,
|
|
3989
|
+
fixed: fixApplied,
|
|
3990
|
+
fixMessage: fixApplied ? 'Restricted filesystem access to sandbox scope' : undefined,
|
|
3926
3991
|
fix: 'Restrict filesystem access to specific directories (e.g., filesystem:./data/*)',
|
|
3927
3992
|
});
|
|
3928
3993
|
}
|
|
3929
3994
|
}
|
|
3995
|
+
if (skill004FileModified) {
|
|
3996
|
+
content = lines.join('\n');
|
|
3997
|
+
await fs.writeFile(skillFile, content);
|
|
3998
|
+
}
|
|
3930
3999
|
// SKILL-005: Credential File Access
|
|
3931
4000
|
for (let i = 0; i < lines.length; i++) {
|
|
3932
4001
|
const line = lines[i];
|
|
@@ -4050,12 +4119,16 @@ dist/
|
|
|
4050
4119
|
break; // One typosquatting finding per skill file
|
|
4051
4120
|
}
|
|
4052
4121
|
}
|
|
4053
|
-
// SKILL-010: Env File Exfiltration
|
|
4122
|
+
// SKILL-010: Env File Exfiltration (context-aware)
|
|
4054
4123
|
const envFilePattern = /\.env|dotenv|process\.env|environ|getenv/gi;
|
|
4055
4124
|
for (let i = 0; i < lines.length; i++) {
|
|
4056
4125
|
const line = lines[i];
|
|
4057
4126
|
envFilePattern.lastIndex = 0;
|
|
4058
4127
|
if (envFilePattern.test(line)) {
|
|
4128
|
+
const section = (0, skill_context_1.classifySkillSection)(content, i);
|
|
4129
|
+
if ((0, skill_context_1.isLikelyFalsePositive)('SKILL-010', line, section, content)) {
|
|
4130
|
+
continue;
|
|
4131
|
+
}
|
|
4059
4132
|
findings.push({
|
|
4060
4133
|
checkId: 'SKILL-010',
|
|
4061
4134
|
name: 'Env File Exfiltration',
|
|
@@ -4071,12 +4144,16 @@ dist/
|
|
|
4071
4144
|
});
|
|
4072
4145
|
}
|
|
4073
4146
|
}
|
|
4074
|
-
// SKILL-011: Browser Data Access
|
|
4147
|
+
// SKILL-011: Browser Data Access (context-aware)
|
|
4075
4148
|
const browserDataPattern = /chrome|firefox|cookies|localStorage|sessionStorage|browser.*data|chromium|safari.*cookies/gi;
|
|
4076
4149
|
for (let i = 0; i < lines.length; i++) {
|
|
4077
4150
|
const line = lines[i];
|
|
4078
4151
|
browserDataPattern.lastIndex = 0;
|
|
4079
4152
|
if (browserDataPattern.test(line)) {
|
|
4153
|
+
const section = (0, skill_context_1.classifySkillSection)(content, i);
|
|
4154
|
+
if ((0, skill_context_1.isLikelyFalsePositive)('SKILL-011', line, section, content)) {
|
|
4155
|
+
continue;
|
|
4156
|
+
}
|
|
4080
4157
|
findings.push({
|
|
4081
4158
|
checkId: 'SKILL-011',
|
|
4082
4159
|
name: 'Browser Data Access',
|
|
@@ -4092,12 +4169,16 @@ dist/
|
|
|
4092
4169
|
});
|
|
4093
4170
|
}
|
|
4094
4171
|
}
|
|
4095
|
-
// SKILL-012: Crypto Wallet Access
|
|
4172
|
+
// SKILL-012: Crypto Wallet Access (context-aware)
|
|
4096
4173
|
const cryptoWalletPattern = /wallet|solana|phantom|metamask|ledger|seed\s*phrase|mnemonic|\.sol\b|\.eth\b|private\s*key/gi;
|
|
4097
4174
|
for (let i = 0; i < lines.length; i++) {
|
|
4098
4175
|
const line = lines[i];
|
|
4099
4176
|
cryptoWalletPattern.lastIndex = 0;
|
|
4100
4177
|
if (cryptoWalletPattern.test(line)) {
|
|
4178
|
+
const section = (0, skill_context_1.classifySkillSection)(content, i);
|
|
4179
|
+
if ((0, skill_context_1.isLikelyFalsePositive)('SKILL-012', line, section, content)) {
|
|
4180
|
+
continue;
|
|
4181
|
+
}
|
|
4101
4182
|
findings.push({
|
|
4102
4183
|
checkId: 'SKILL-012',
|
|
4103
4184
|
name: 'Crypto Wallet Access',
|
|
@@ -4113,6 +4194,84 @@ dist/
|
|
|
4113
4194
|
});
|
|
4114
4195
|
}
|
|
4115
4196
|
}
|
|
4197
|
+
// SKILL-018: Undeclared Capability Validation
|
|
4198
|
+
const declaredCaps = (0, skill_capability_validator_1.parseDeclaredCapabilities)(content);
|
|
4199
|
+
const inferredCaps = (0, skill_capability_validator_1.inferActualCapabilities)(content);
|
|
4200
|
+
const capFindings = (0, skill_capability_validator_1.validateCapabilities)(declaredCaps, inferredCaps, relativePath);
|
|
4201
|
+
findings.push(...capFindings);
|
|
4202
|
+
// SKILL-019: Stale Skill Signature
|
|
4203
|
+
const signatureMatch = content.match(/<!-- opena2a-guard hash="sha256:([a-f0-9]+)" signed="([^"]+)"(?: expires_at="([^"]+)")? -->/);
|
|
4204
|
+
if (signatureMatch) {
|
|
4205
|
+
const storedHash = signatureMatch[1];
|
|
4206
|
+
const signatureBlock = signatureMatch[0];
|
|
4207
|
+
// Compute hash of content excluding the signature block
|
|
4208
|
+
const contentWithoutSig = content.replace(signatureBlock, '').replace(/\n$/, '');
|
|
4209
|
+
const computedHash = crypto.createHash('sha256').update(contentWithoutSig).digest('hex');
|
|
4210
|
+
if (storedHash !== computedHash) {
|
|
4211
|
+
let skill019Fixed = false;
|
|
4212
|
+
if (autoFix) {
|
|
4213
|
+
const newHash = crypto.createHash('sha256').update(contentWithoutSig).digest('hex');
|
|
4214
|
+
const newDate = new Date().toISOString();
|
|
4215
|
+
const expiresAt = signatureMatch[3]
|
|
4216
|
+
? ` expires_at="${new Date(Date.now() + 7 * 24 * 60 * 60 * 1000).toISOString()}"`
|
|
4217
|
+
: '';
|
|
4218
|
+
const newSigBlock = `<!-- opena2a-guard hash="sha256:${newHash}" signed="${newDate}"${expiresAt} -->`;
|
|
4219
|
+
content = content.replace(signatureBlock, newSigBlock);
|
|
4220
|
+
await fs.writeFile(skillFile, content);
|
|
4221
|
+
skill019Fixed = true;
|
|
4222
|
+
}
|
|
4223
|
+
findings.push({
|
|
4224
|
+
checkId: 'SKILL-019',
|
|
4225
|
+
name: 'Stale Skill Signature',
|
|
4226
|
+
description: 'Skill content has changed since it was signed - signature hash mismatch',
|
|
4227
|
+
category: 'skill',
|
|
4228
|
+
severity: 'medium',
|
|
4229
|
+
passed: skill019Fixed,
|
|
4230
|
+
message: skill019Fixed
|
|
4231
|
+
? 'Stale signature detected and re-signed'
|
|
4232
|
+
: 'Signature hash does not match current content - skill may have been tampered with',
|
|
4233
|
+
file: relativePath,
|
|
4234
|
+
fixable: true,
|
|
4235
|
+
fixed: skill019Fixed,
|
|
4236
|
+
fixMessage: skill019Fixed ? 'Re-computed hash and updated signature block' : undefined,
|
|
4237
|
+
fix: 'Re-sign the skill to update the hash: hackmyagent secure --fix',
|
|
4238
|
+
});
|
|
4239
|
+
}
|
|
4240
|
+
// HEARTBEAT-007: Expired Heartbeat (check expires_at in signature block)
|
|
4241
|
+
if (signatureMatch[3]) {
|
|
4242
|
+
const expiresAt = new Date(signatureMatch[3]);
|
|
4243
|
+
const now = new Date();
|
|
4244
|
+
if (expiresAt < now) {
|
|
4245
|
+
let hb007Fixed = false;
|
|
4246
|
+
if (autoFix) {
|
|
4247
|
+
const contentWithoutSig = content.replace(signatureMatch[0], '').replace(/\n$/, '');
|
|
4248
|
+
const newHash = crypto.createHash('sha256').update(contentWithoutSig).digest('hex');
|
|
4249
|
+
const newDate = new Date().toISOString();
|
|
4250
|
+
const newExpiry = new Date(Date.now() + 7 * 24 * 60 * 60 * 1000).toISOString();
|
|
4251
|
+
const newSigBlock = `<!-- opena2a-guard hash="sha256:${newHash}" signed="${newDate}" expires_at="${newExpiry}" -->`;
|
|
4252
|
+
content = content.replace(signatureMatch[0], newSigBlock);
|
|
4253
|
+
await fs.writeFile(skillFile, content);
|
|
4254
|
+
hb007Fixed = true;
|
|
4255
|
+
}
|
|
4256
|
+
findings.push({
|
|
4257
|
+
checkId: 'HEARTBEAT-007',
|
|
4258
|
+
name: 'Expired Heartbeat',
|
|
4259
|
+
description: 'Skill signature has expired and needs renewal',
|
|
4260
|
+
category: 'skill',
|
|
4261
|
+
severity: 'high',
|
|
4262
|
+
passed: hb007Fixed,
|
|
4263
|
+
message: hb007Fixed
|
|
4264
|
+
? 'Expired signature renewed with 7-day validity'
|
|
4265
|
+
: `Skill signature expired at ${signatureMatch[3]}`,
|
|
4266
|
+
file: relativePath,
|
|
4267
|
+
fixable: true,
|
|
4268
|
+
fixed: hb007Fixed,
|
|
4269
|
+
fixMessage: hb007Fixed ? 'Updated expiry to 7 days from now and re-signed' : undefined,
|
|
4270
|
+
fix: 'Re-sign the skill with a new expiry: hackmyagent secure --fix',
|
|
4271
|
+
});
|
|
4272
|
+
}
|
|
4273
|
+
}
|
|
4274
|
+
}
|
|
4116
4275
|
}
|
|
4117
4276
|
return findings;
|
|
4118
4277
|
}
|
|
@@ -5452,6 +5611,824 @@ dist/
|
|
|
5452
5611
|
}
|
|
5453
5612
|
return findings;
|
|
5454
5613
|
}
|
|
5614
|
+
/**
|
|
5615
|
+
* Recursively find source files (.ts, .js, .mjs, .cjs, .tsx, .jsx)
|
|
5616
|
+
* Skips node_modules, dist, .git, and hidden directories
|
|
5617
|
+
*/
|
|
5618
|
+
async findSourceFiles(dir, baseDir, depth = 0) {
|
|
5619
|
+
if (depth > 10)
|
|
5620
|
+
return [];
|
|
5621
|
+
const sourceExtensions = new Set(['.ts', '.js', '.mjs', '.cjs', '.tsx', '.jsx']);
|
|
5622
|
+
const skipDirs = new Set(['node_modules', 'dist', '.git']);
|
|
5623
|
+
const files = [];
|
|
5624
|
+
let entries;
|
|
5625
|
+
try {
|
|
5626
|
+
entries = await fs.readdir(dir, { withFileTypes: true });
|
|
5627
|
+
}
|
|
5628
|
+
catch {
|
|
5629
|
+
return files;
|
|
5630
|
+
}
|
|
5631
|
+
for (const entry of entries) {
|
|
5632
|
+
const fullPath = path.join(dir, entry.name);
|
|
5633
|
+
// Validate path is within directory (no path traversal)
|
|
5634
|
+
if (!this.isPathWithinDirectory(fullPath, baseDir)) {
|
|
5635
|
+
continue;
|
|
5636
|
+
}
|
|
5637
|
+
if (entry.isDirectory()) {
|
|
5638
|
+
// Skip node_modules, dist, .git, and hidden directories
|
|
5639
|
+
if (skipDirs.has(entry.name))
|
|
5640
|
+
continue;
|
|
5641
|
+
if (entry.name.startsWith('.'))
|
|
5642
|
+
continue;
|
|
5643
|
+
// Skip symlinks to prevent path traversal
|
|
5644
|
+
try {
|
|
5645
|
+
const stats = await fs.lstat(fullPath);
|
|
5646
|
+
if (stats.isSymbolicLink())
|
|
5647
|
+
continue;
|
|
5648
|
+
}
|
|
5649
|
+
catch {
|
|
5650
|
+
continue;
|
|
5651
|
+
}
|
|
5652
|
+
const subFiles = await this.findSourceFiles(fullPath, baseDir, depth + 1);
|
|
5653
|
+
files.push(...subFiles);
|
|
5654
|
+
}
|
|
5655
|
+
else if (entry.isFile()) {
|
|
5656
|
+
const ext = path.extname(entry.name).toLowerCase();
|
|
5657
|
+
if (sourceExtensions.has(ext)) {
|
|
5658
|
+
files.push(fullPath);
|
|
5659
|
+
}
|
|
5660
|
+
}
|
|
5661
|
+
}
|
|
5662
|
+
return files;
|
|
5663
|
+
}
|
|
5664
|
+
/**
|
|
5665
|
+
* Walk a directory recursively and return files matching the given extensions.
|
|
5666
|
+
* Skips node_modules, dist, .git, and hidden directories.
|
|
5667
|
+
*/
|
|
5668
|
+
async walkDirectory(dir, extensions, depth = 0, maxDepth = 10) {
|
|
5669
|
+
if (depth > maxDepth)
|
|
5670
|
+
return [];
|
|
5671
|
+
const extSet = new Set(extensions.map((e) => e.toLowerCase()));
|
|
5672
|
+
const skipDirs = new Set(['node_modules', 'dist', '.git', '__pycache__', '.venv']);
|
|
5673
|
+
const files = [];
|
|
5674
|
+
let entries;
|
|
5675
|
+
try {
|
|
5676
|
+
entries = await fs.readdir(dir, { withFileTypes: true });
|
|
5677
|
+
}
|
|
5678
|
+
catch {
|
|
5679
|
+
return files;
|
|
5680
|
+
}
|
|
5681
|
+
for (const entry of entries) {
|
|
5682
|
+
const fullPath = path.join(dir, entry.name);
|
|
5683
|
+
if (entry.isSymbolicLink())
|
|
5684
|
+
continue;
|
|
5685
|
+
if (entry.isDirectory()) {
|
|
5686
|
+
if (skipDirs.has(entry.name))
|
|
5687
|
+
continue;
|
|
5688
|
+
if (entry.name.startsWith('.'))
|
|
5689
|
+
continue;
|
|
5690
|
+
const subFiles = await this.walkDirectory(fullPath, extensions, depth + 1, maxDepth);
|
|
5691
|
+
files.push(...subFiles);
|
|
5692
|
+
}
|
|
5693
|
+
else if (entry.isFile()) {
|
|
5694
|
+
const ext = path.extname(entry.name).toLowerCase();
|
|
5695
|
+
if (extSet.has(ext)) {
|
|
5696
|
+
files.push(fullPath);
|
|
5697
|
+
}
|
|
5698
|
+
}
|
|
5699
|
+
}
|
|
5700
|
+
return files;
|
|
5701
|
+
}
|
|
5702
|
+
/**
|
|
5703
|
+
* Check for memory/context poisoning risks
|
|
5704
|
+
* Detects patterns that could allow attackers to poison agent memory or conversation context
|
|
5705
|
+
*/
|
|
5706
|
+
async checkMemoryPoisoning(targetDir, _autoFix) {
|
|
5707
|
+
const findings = [];
|
|
5708
|
+
// MEM-001: Unvalidated memory persistence
|
|
5709
|
+
// Check for memory/context files that accept external input without validation
|
|
5710
|
+
const memoryFiles = ['memory.json', 'context.json', '.memory', 'agent-memory.json', 'conversation-history.json'];
|
|
5711
|
+
for (const memFile of memoryFiles) {
|
|
5712
|
+
const filePath = path.join(targetDir, memFile);
|
|
5713
|
+
try {
|
|
5714
|
+
const content = await fs.readFile(filePath, 'utf-8');
|
|
5715
|
+
// Check if memory file is world-writable or contains unvalidated external refs
|
|
5716
|
+
if (content.includes('$ref') || content.includes('__proto__') || content.includes('constructor')) {
|
|
5717
|
+
findings.push({
|
|
5718
|
+
checkId: 'MEM-001',
|
|
5719
|
+
name: 'Unvalidated memory persistence',
|
|
5720
|
+
description: 'Memory file contains prototype pollution vectors or unvalidated external references that could be exploited to inject malicious context',
|
|
5721
|
+
category: 'memory-poisoning',
|
|
5722
|
+
severity: 'high',
|
|
5723
|
+
passed: false,
|
|
5724
|
+
message: `Memory file ${memFile} contains potentially dangerous patterns ($ref, __proto__, constructor)`,
|
|
5725
|
+
fixable: false,
|
|
5726
|
+
file: memFile,
|
|
5727
|
+
fix: 'Sanitize all memory entries before persistence. Remove __proto__ and constructor keys. Validate $ref URIs.',
|
|
5728
|
+
});
|
|
5729
|
+
}
|
|
5730
|
+
}
|
|
5731
|
+
catch { /* file doesn't exist - skip */ }
|
|
5732
|
+
}
|
|
5733
|
+
// MEM-002: No memory integrity verification
|
|
5734
|
+
// Check if conversation/memory files have integrity checks
|
|
5735
|
+
const configFiles = ['agent-config.json', 'config.json', 'settings.json', '.agent.json'];
|
|
5736
|
+
for (const cfgFile of configFiles) {
|
|
5737
|
+
const filePath = path.join(targetDir, cfgFile);
|
|
5738
|
+
try {
|
|
5739
|
+
const content = await fs.readFile(filePath, 'utf-8');
|
|
5740
|
+
const config = JSON.parse(content);
|
|
5741
|
+
if (config.memory || config.context || config.conversationHistory) {
|
|
5742
|
+
const hasIntegrity = config.memoryIntegrity || config.contextVerification ||
|
|
5743
|
+
config.memory?.signatureVerification || config.memory?.hashValidation;
|
|
5744
|
+
if (!hasIntegrity) {
|
|
5745
|
+
findings.push({
|
|
5746
|
+
checkId: 'MEM-002',
|
|
5747
|
+
name: 'No memory integrity verification',
|
|
5748
|
+
description: 'Agent configuration enables memory/context persistence without integrity verification. An attacker with file access could inject malicious context.',
|
|
5749
|
+
category: 'memory-poisoning',
|
|
5750
|
+
severity: 'medium',
|
|
5751
|
+
passed: false,
|
|
5752
|
+
message: `${cfgFile} enables memory persistence without integrity checks`,
|
|
5753
|
+
fixable: false,
|
|
5754
|
+
file: cfgFile,
|
|
5755
|
+
fix: 'Enable memory integrity verification: add hash validation or signature checks for persisted context.',
|
|
5756
|
+
});
|
|
5757
|
+
}
|
|
5758
|
+
}
|
|
5759
|
+
}
|
|
5760
|
+
catch { /* skip */ }
|
|
5761
|
+
}
|
|
5762
|
+
// MEM-003: Context window overflow risk
|
|
5763
|
+
// Check for agents that load large context without size limits
|
|
5764
|
+
for (const cfgFile of configFiles) {
|
|
5765
|
+
const filePath = path.join(targetDir, cfgFile);
|
|
5766
|
+
try {
|
|
5767
|
+
const content = await fs.readFile(filePath, 'utf-8');
|
|
5768
|
+
const config = JSON.parse(content);
|
|
5769
|
+
if (config.contextWindow || config.maxTokens || config.memory) {
|
|
5770
|
+
const hasLimits = config.maxContextSize || config.contextWindow?.maxSize ||
|
|
5771
|
+
config.memory?.maxEntries || config.memory?.maxSize;
|
|
5772
|
+
if (!hasLimits) {
|
|
5773
|
+
findings.push({
|
|
5774
|
+
checkId: 'MEM-003',
|
|
5775
|
+
name: 'No context size limits',
|
|
5776
|
+
description: 'Agent loads context/memory without size limits. An attacker could craft inputs that overflow the context window, pushing safety instructions out of scope.',
|
|
5777
|
+
category: 'memory-poisoning',
|
|
5778
|
+
severity: 'medium',
|
|
5779
|
+
passed: false,
|
|
5780
|
+
message: `${cfgFile} has no context size limits configured`,
|
|
5781
|
+
fixable: false,
|
|
5782
|
+
file: cfgFile,
|
|
5783
|
+
fix: 'Set explicit context size limits: maxContextSize, memory.maxEntries, or memory.maxSize.',
|
|
5784
|
+
});
|
|
5785
|
+
}
|
|
5786
|
+
}
|
|
5787
|
+
}
|
|
5788
|
+
catch { /* skip */ }
|
|
5789
|
+
}
|
|
5790
|
+
// MEM-004: Shared memory without isolation
|
|
5791
|
+
// Check for multi-agent setups with shared memory
|
|
5792
|
+
const multiAgentFiles = ['agents.json', 'orchestrator.json', 'multi-agent.json', '.agents'];
|
|
5793
|
+
for (const maFile of multiAgentFiles) {
|
|
5794
|
+
const filePath = path.join(targetDir, maFile);
|
|
5795
|
+
try {
|
|
5796
|
+
const content = await fs.readFile(filePath, 'utf-8');
|
|
5797
|
+
const config = JSON.parse(content);
|
|
5798
|
+
const agents = config.agents || config.workers || [];
|
|
5799
|
+
if (Array.isArray(agents) && agents.length > 1) {
|
|
5800
|
+
const sharedMem = config.sharedMemory || config.shared?.memory || config.commonContext;
|
|
5801
|
+
if (sharedMem) {
|
|
5802
|
+
const hasIsolation = sharedMem.isolation || sharedMem.sandboxed || sharedMem.perAgent;
|
|
5803
|
+
if (!hasIsolation) {
|
|
5804
|
+
findings.push({
|
|
5805
|
+
checkId: 'MEM-004',
|
|
5806
|
+
name: 'Shared memory without isolation',
|
|
5807
|
+
description: 'Multiple agents share memory without isolation boundaries. A compromised agent could poison the shared context to influence other agents.',
|
|
5808
|
+
category: 'memory-poisoning',
|
|
5809
|
+
severity: 'high',
|
|
5810
|
+
passed: false,
|
|
5811
|
+
message: `${maFile} configures shared memory for ${agents.length} agents without isolation`,
|
|
5812
|
+
fixable: false,
|
|
5813
|
+
file: maFile,
|
|
5814
|
+
fix: 'Enable memory isolation: set sharedMemory.isolation=true or use per-agent memory scopes.',
|
|
5815
|
+
});
|
|
5816
|
+
}
|
|
5817
|
+
}
|
|
5818
|
+
}
|
|
5819
|
+
}
|
|
5820
|
+
catch { /* skip */ }
|
|
5821
|
+
}
|
|
5822
|
+
// MEM-005: Conversation history injection
|
|
5823
|
+
// Check source files for patterns that build prompts from unvalidated history
|
|
5824
|
+
try {
|
|
5825
|
+
const srcDir = path.join(targetDir, 'src');
|
|
5826
|
+
const srcExists = await fs.access(srcDir).then(() => true).catch(() => false);
|
|
5827
|
+
if (srcExists) {
|
|
5828
|
+
const files = await this.walkDirectory(srcDir, ['.ts', '.js', '.py', '.mjs']);
|
|
5829
|
+
for (const file of files.slice(0, 50)) {
|
|
5830
|
+
try {
|
|
5831
|
+
const content = await fs.readFile(file, 'utf-8');
|
|
5832
|
+
const lines = content.split('\n');
|
|
5833
|
+
for (let i = 0; i < lines.length; i++) {
|
|
5834
|
+
const line = lines[i];
|
|
5835
|
+
// Detect direct concatenation of history into system prompts
|
|
5836
|
+
if ((line.includes('systemPrompt') || line.includes('system_prompt') || line.includes('system_message')) &&
|
|
5837
|
+
(line.includes('history') || line.includes('previousMessages') || line.includes('conversation'))) {
|
|
5838
|
+
if (!line.includes('sanitize') && !line.includes('validate') && !line.includes('filter')) {
|
|
5839
|
+
findings.push({
|
|
5840
|
+
checkId: 'MEM-005',
|
|
5841
|
+
name: 'Conversation history injection',
|
|
5842
|
+
description: 'System prompt includes unvalidated conversation history. An attacker could craft messages in history that inject instructions into the system prompt.',
|
|
5843
|
+
category: 'memory-poisoning',
|
|
5844
|
+
severity: 'high',
|
|
5845
|
+
passed: false,
|
|
5846
|
+
message: 'System prompt concatenates unvalidated conversation history',
|
|
5847
|
+
fixable: false,
|
|
5848
|
+
file: path.relative(targetDir, file),
|
|
5849
|
+
line: i + 1,
|
|
5850
|
+
fix: 'Sanitize conversation history before including in system prompts. Strip instruction-like patterns.',
|
|
5851
|
+
});
|
|
5852
|
+
break; // One finding per file
|
|
5853
|
+
}
|
|
5854
|
+
}
|
|
5855
|
+
}
|
|
5856
|
+
}
|
|
5857
|
+
catch { /* skip unreadable */ }
|
|
5858
|
+
}
|
|
5859
|
+
}
|
|
5860
|
+
}
|
|
5861
|
+
catch { /* skip */ }
|
|
5862
|
+
return findings;
|
|
5863
|
+
}
|
|
5864
|
+
/**
|
|
5865
|
+
* Check for RAG (Retrieval-Augmented Generation) poisoning risks
|
|
5866
|
+
* Detects patterns that could allow attackers to inject malicious content into RAG pipelines
|
|
5867
|
+
*/
|
|
5868
|
+
async checkRAGPoisoning(targetDir, _autoFix) {
|
|
5869
|
+
const findings = [];
|
|
5870
|
+
// RAG-001: Unvalidated retrieval sources
|
|
5871
|
+
const ragConfigFiles = ['rag.json', 'retrieval.json', 'vector-store.json', 'embeddings.json'];
|
|
5872
|
+
for (const ragFile of ragConfigFiles) {
|
|
5873
|
+
const filePath = path.join(targetDir, ragFile);
|
|
5874
|
+
try {
|
|
5875
|
+
const content = await fs.readFile(filePath, 'utf-8');
|
|
5876
|
+
const config = JSON.parse(content);
|
|
5877
|
+
const sources = config.sources || config.dataSources || config.indices || [];
|
|
5878
|
+
if (Array.isArray(sources)) {
|
|
5879
|
+
for (const source of sources) {
|
|
5880
|
+
const sourceUrl = source.url || source.endpoint || source.uri || '';
|
|
5881
|
+
if (sourceUrl && !source.verified && !source.trustedSource && !source.signatureCheck) {
|
|
5882
|
+
findings.push({
|
|
5883
|
+
checkId: 'RAG-001',
|
|
5884
|
+
name: 'Unvalidated RAG retrieval source',
|
|
5885
|
+
description: 'RAG pipeline retrieves from an unverified source. An attacker who controls the source could inject malicious content into agent responses.',
|
|
5886
|
+
category: 'rag-poisoning',
|
|
5887
|
+
severity: 'high',
|
|
5888
|
+
passed: false,
|
|
5889
|
+
message: `RAG source ${sourceUrl} has no verification or trust validation`,
|
|
5890
|
+
fixable: false,
|
|
5891
|
+
file: ragFile,
|
|
5892
|
+
fix: 'Add source verification: set trustedSource=true only for validated endpoints, or enable signatureCheck.',
|
|
5893
|
+
});
|
|
5894
|
+
}
|
|
5895
|
+
}
|
|
5896
|
+
}
|
|
5897
|
+
}
|
|
5898
|
+
catch { /* skip */ }
|
|
5899
|
+
}
|
|
5900
|
+
// RAG-002: No content sanitization in retrieval pipeline
|
|
5901
|
+
try {
|
|
5902
|
+
const srcDir = path.join(targetDir, 'src');
|
|
5903
|
+
const srcExists = await fs.access(srcDir).then(() => true).catch(() => false);
|
|
5904
|
+
if (srcExists) {
|
|
5905
|
+
const files = await this.walkDirectory(srcDir, ['.ts', '.js', '.py', '.mjs']);
|
|
5906
|
+
for (const file of files.slice(0, 50)) {
|
|
5907
|
+
try {
|
|
5908
|
+
const content = await fs.readFile(file, 'utf-8');
|
|
5909
|
+
const lines = content.split('\n');
|
|
5910
|
+
for (let i = 0; i < lines.length; i++) {
|
|
5911
|
+
const line = lines[i];
|
|
5912
|
+
if ((line.includes('retrieve') || line.includes('vectorSearch') || line.includes('similarity_search') ||
|
|
5913
|
+
line.includes('query_engine')) &&
|
|
5914
|
+
(line.includes('context') || line.includes('prompt') || line.includes('augment'))) {
|
|
5915
|
+
// Check surrounding lines for sanitization
|
|
5916
|
+
const surroundingLines = lines.slice(Math.max(0, i - 3), Math.min(lines.length, i + 4)).join(' ');
|
|
5917
|
+
if (!surroundingLines.includes('sanitize') && !surroundingLines.includes('validate') &&
|
|
5918
|
+
!surroundingLines.includes('filter') && !surroundingLines.includes('escape')) {
|
|
5919
|
+
findings.push({
|
|
5920
|
+
checkId: 'RAG-002',
|
|
5921
|
+
name: 'No RAG content sanitization',
|
|
5922
|
+
description: 'Retrieved content is passed to the LLM without sanitization. Poisoned documents could inject instructions into the prompt.',
|
|
5923
|
+
category: 'rag-poisoning',
|
|
5924
|
+
severity: 'high',
|
|
5925
|
+
passed: false,
|
|
5926
|
+
message: 'Retrieved content flows to LLM without sanitization',
|
|
5927
|
+
fixable: false,
|
|
5928
|
+
file: path.relative(targetDir, file),
|
|
5929
|
+
line: i + 1,
|
|
5930
|
+
fix: 'Sanitize retrieved content before including in prompts. Strip instruction-like patterns and markup.',
|
|
5931
|
+
});
|
|
5932
|
+
break;
|
|
5933
|
+
}
|
|
5934
|
+
}
|
|
5935
|
+
}
|
|
5936
|
+
}
|
|
5937
|
+
catch { /* skip */ }
|
|
5938
|
+
}
|
|
5939
|
+
}
|
|
5940
|
+
}
|
|
5941
|
+
catch { /* skip */ }
|
|
5942
|
+
// RAG-003: Public-writable vector store
|
|
5943
|
+
for (const ragFile of ragConfigFiles) {
|
|
5944
|
+
const filePath = path.join(targetDir, ragFile);
|
|
5945
|
+
try {
|
|
5946
|
+
const content = await fs.readFile(filePath, 'utf-8');
|
|
5947
|
+
const config = JSON.parse(content);
|
|
5948
|
+
if (config.writeAccess === 'public' || config.allowPublicIngestion || config.openIngestion) {
|
|
5949
|
+
findings.push({
|
|
5950
|
+
checkId: 'RAG-003',
|
|
5951
|
+
name: 'Public-writable vector store',
|
|
5952
|
+
description: 'Vector store allows public write access. An attacker could insert poisoned documents that will be retrieved and influence agent responses.',
|
|
5953
|
+
category: 'rag-poisoning',
|
|
5954
|
+
severity: 'critical',
|
|
5955
|
+
passed: false,
|
|
5956
|
+
message: `${ragFile} allows public write access to vector store`,
|
|
5957
|
+
fixable: false,
|
|
5958
|
+
file: ragFile,
|
|
5959
|
+
fix: 'Restrict vector store write access. Require authentication for document ingestion.',
|
|
5960
|
+
});
|
|
5961
|
+
}
|
|
5962
|
+
}
|
|
5963
|
+
catch { /* skip */ }
|
|
5964
|
+
}
|
|
5965
|
+
// RAG-004: No provenance tracking on retrieved content
|
|
5966
|
+
for (const ragFile of ragConfigFiles) {
|
|
5967
|
+
const filePath = path.join(targetDir, ragFile);
|
|
5968
|
+
try {
|
|
5969
|
+
const content = await fs.readFile(filePath, 'utf-8');
|
|
5970
|
+
const config = JSON.parse(content);
|
|
5971
|
+
if (config.sources || config.dataSources || config.indices) {
|
|
5972
|
+
if (!config.provenance && !config.sourceTracking && !config.metadata?.trackSource) {
|
|
5973
|
+
findings.push({
|
|
5974
|
+
checkId: 'RAG-004',
|
|
5975
|
+
name: 'No provenance tracking',
|
|
5976
|
+
description: 'RAG pipeline does not track provenance of retrieved content. Without provenance, poisoned content cannot be traced back to its source.',
|
|
5977
|
+
category: 'rag-poisoning',
|
|
5978
|
+
severity: 'medium',
|
|
5979
|
+
passed: false,
|
|
5980
|
+
message: `${ragFile} has no content provenance tracking`,
|
|
5981
|
+
fixable: false,
|
|
5982
|
+
file: ragFile,
|
|
5983
|
+
fix: 'Enable provenance tracking: set sourceTracking=true to track which source each document came from.',
|
|
5984
|
+
});
|
|
5985
|
+
}
|
|
5986
|
+
}
|
|
5987
|
+
}
|
|
5988
|
+
catch { /* skip */ }
|
|
5989
|
+
}
|
|
5990
|
+
return findings;
|
|
5991
|
+
}
|
|
5992
|
+
/**
|
|
5993
|
+
* Check for agent identity spoofing risks
|
|
5994
|
+
* Detects missing or weak agent identity verification
|
|
5995
|
+
*/
|
|
5996
|
+
async checkAgentIdentity(targetDir, _autoFix) {
|
|
5997
|
+
const findings = [];
|
|
5998
|
+
// AIM-001: No agent identity declaration
|
|
5999
|
+
const identityFiles = ['agent-card.json', 'agent.json', '.well-known/agent.json', 'aim.json'];
|
|
6000
|
+
let hasIdentity = false;
|
|
6001
|
+
for (const idFile of identityFiles) {
|
|
6002
|
+
const filePath = path.join(targetDir, idFile);
|
|
6003
|
+
try {
|
|
6004
|
+
await fs.access(filePath);
|
|
6005
|
+
hasIdentity = true;
|
|
6006
|
+
const content = await fs.readFile(filePath, 'utf-8');
|
|
6007
|
+
const config = JSON.parse(content);
|
|
6008
|
+
// AIM-002: Identity without cryptographic binding
|
|
6009
|
+
if (config.agentId || config.name || config.identity) {
|
|
6010
|
+
if (!config.publicKey && !config.keyId && !config.jwk && !config.x509) {
|
|
6011
|
+
findings.push({
|
|
6012
|
+
checkId: 'AIM-002',
|
|
6013
|
+
name: 'Identity without cryptographic binding',
|
|
6014
|
+
description: 'Agent declares an identity but has no cryptographic key binding. Any agent could claim this identity without proof.',
|
|
6015
|
+
category: 'identity-spoofing',
|
|
6016
|
+
severity: 'high',
|
|
6017
|
+
passed: false,
|
|
6018
|
+
message: `${idFile} declares identity without cryptographic key binding`,
|
|
6019
|
+
fixable: false,
|
|
6020
|
+
file: idFile,
|
|
6021
|
+
fix: 'Bind agent identity to a cryptographic key pair. Add publicKey or keyId field to the agent card.',
|
|
6022
|
+
});
|
|
6023
|
+
}
|
|
6024
|
+
}
|
|
6025
|
+
// AIM-003: No identity verification endpoint
|
|
6026
|
+
if (config.agentId || config.identity) {
|
|
6027
|
+
if (!config.verificationEndpoint && !config.oidcIssuer && !config.wellKnown) {
|
|
6028
|
+
findings.push({
|
|
6029
|
+
checkId: 'AIM-003',
|
|
6030
|
+
name: 'No identity verification endpoint',
|
|
6031
|
+
description: 'Agent identity has no verification endpoint. Other agents cannot verify this agent\'s identity claims.',
|
|
6032
|
+
category: 'identity-spoofing',
|
|
6033
|
+
severity: 'medium',
|
|
6034
|
+
passed: false,
|
|
6035
|
+
message: `${idFile} has no identity verification endpoint (verificationEndpoint, oidcIssuer, or wellKnown)`,
|
|
6036
|
+
fixable: false,
|
|
6037
|
+
file: idFile,
|
|
6038
|
+
fix: 'Add a verification endpoint: verificationEndpoint URL or oidcIssuer for federated identity.',
|
|
6039
|
+
});
|
|
6040
|
+
}
|
|
6041
|
+
}
|
|
6042
|
+
}
|
|
6043
|
+
catch { /* skip */ }
|
|
6044
|
+
}
|
|
6045
|
+
// Also check package.json or A2A agent card
|
|
6046
|
+
if (!hasIdentity) {
|
|
6047
|
+
try {
|
|
6048
|
+
const pkgPath = path.join(targetDir, 'package.json');
|
|
6049
|
+
const pkgContent = await fs.readFile(pkgPath, 'utf-8');
|
|
6050
|
+
const pkg = JSON.parse(pkgContent);
|
|
6051
|
+
if (pkg.agentCard || pkg.a2a || pkg.keywords?.some((k) => k.includes('agent') || k.includes('a2a'))) {
|
|
6052
|
+
findings.push({
|
|
6053
|
+
checkId: 'AIM-001',
|
|
6054
|
+
name: 'No agent identity declaration',
|
|
6055
|
+
description: 'Project appears to be an AI agent but has no formal identity declaration. Without identity, the agent cannot be verified by other agents or registries.',
|
|
6056
|
+
category: 'identity-spoofing',
|
|
6057
|
+
severity: 'medium',
|
|
6058
|
+
passed: false,
|
|
6059
|
+
message: 'Agent project has no identity declaration file (agent-card.json, agent.json, aim.json)',
|
|
6060
|
+
fixable: false,
|
|
6061
|
+
file: 'package.json',
|
|
6062
|
+
fix: 'Create an agent-card.json with agentId, name, publicKey, and capabilities fields.',
|
|
6063
|
+
});
|
|
6064
|
+
}
|
|
6065
|
+
}
|
|
6066
|
+
catch { /* skip */ }
|
|
6067
|
+
}
|
|
6068
|
+
return findings;
|
|
6069
|
+
}
|
|
6070
|
+
/**
|
|
6071
|
+
* Check for agent DNA/behavioral fingerprint forgery risks
|
|
6072
|
+
* Detects integrity issues with agent behavioral profiles
|
|
6073
|
+
*/
|
|
6074
|
+
async checkAgentDNA(targetDir, _autoFix) {
|
|
6075
|
+
const findings = [];
|
|
6076
|
+
// DNA-001: No behavioral fingerprint
|
|
6077
|
+
const dnaFiles = ['agent-dna.json', '.agent-dna', 'behavioral-profile.json'];
|
|
6078
|
+
const soulFileNames = ['SOUL.md', 'system-prompt.md', '.cursorrules', 'CLAUDE.md'];
|
|
6079
|
+
let hasDna = false;
|
|
6080
|
+
let hasSoul = false;
|
|
6081
|
+
let foundSoulFile = '';
|
|
6082
|
+
for (const dnaFile of dnaFiles) {
|
|
6083
|
+
try {
|
|
6084
|
+
await fs.access(path.join(targetDir, dnaFile));
|
|
6085
|
+
hasDna = true;
|
|
6086
|
+
const content = await fs.readFile(path.join(targetDir, dnaFile), 'utf-8');
|
|
6087
|
+
const config = JSON.parse(content);
|
|
6088
|
+
// DNA-002: Unsigned behavioral profile
|
|
6089
|
+
if (!config.signature && !config.hash && !config.contentHash) {
|
|
6090
|
+
findings.push({
|
|
6091
|
+
checkId: 'DNA-002',
|
|
6092
|
+
name: 'Unsigned behavioral profile',
|
|
6093
|
+
description: 'Agent DNA/behavioral profile exists but is not signed. An attacker could modify the profile to change agent behavior without detection.',
|
|
6094
|
+
category: 'agent-dna',
|
|
6095
|
+
severity: 'high',
|
|
6096
|
+
passed: false,
|
|
6097
|
+
message: `${dnaFile} has no signature or content hash`,
|
|
6098
|
+
fixable: false,
|
|
6099
|
+
file: dnaFile,
|
|
6100
|
+
fix: 'Sign the behavioral profile: add a contentHash (SHA-256) or signature field verified at startup.',
|
|
6101
|
+
});
|
|
6102
|
+
}
|
|
6103
|
+
// DNA-003: No behavioral drift detection
|
|
6104
|
+
if (!config.baselineHash && !config.driftThreshold && !config.monitoringEnabled) {
|
|
6105
|
+
findings.push({
|
|
6106
|
+
checkId: 'DNA-003',
|
|
6107
|
+
name: 'No behavioral drift detection',
|
|
6108
|
+
description: 'Agent DNA has no drift detection configured. Gradual behavioral changes would go undetected.',
|
|
6109
|
+
category: 'agent-dna',
|
|
6110
|
+
severity: 'medium',
|
|
6111
|
+
passed: false,
|
|
6112
|
+
message: `${dnaFile} has no behavioral drift detection (baselineHash, driftThreshold, monitoring)`,
|
|
6113
|
+
fixable: false,
|
|
6114
|
+
file: dnaFile,
|
|
6115
|
+
fix: 'Enable behavioral drift detection: set baselineHash and driftThreshold for continuous monitoring.',
|
|
6116
|
+
});
|
|
6117
|
+
}
|
|
6118
|
+
}
|
|
6119
|
+
catch { /* skip */ }
|
|
6120
|
+
}
|
|
6121
|
+
for (const soulFile of soulFileNames) {
|
|
6122
|
+
try {
|
|
6123
|
+
await fs.access(path.join(targetDir, soulFile));
|
|
6124
|
+
hasSoul = true;
|
|
6125
|
+
if (!foundSoulFile)
|
|
6126
|
+
foundSoulFile = soulFile;
|
|
6127
|
+
}
|
|
6128
|
+
catch { /* skip */ }
|
|
6129
|
+
}
|
|
6130
|
+
// If agent has a SOUL/system prompt but no DNA fingerprint
|
|
6131
|
+
if (hasSoul && !hasDna) {
|
|
6132
|
+
// Check if this is actually an agent project
|
|
6133
|
+
try {
|
|
6134
|
+
const pkgPath = path.join(targetDir, 'package.json');
|
|
6135
|
+
const pkgContent = await fs.readFile(pkgPath, 'utf-8');
|
|
6136
|
+
const pkg = JSON.parse(pkgContent);
|
|
6137
|
+
if (pkg.agentCard || pkg.a2a || pkg.keywords?.some((k) => k.includes('agent'))) {
|
|
6138
|
+
findings.push({
|
|
6139
|
+
checkId: 'DNA-001',
|
|
6140
|
+
name: 'No behavioral fingerprint',
|
|
6141
|
+
description: 'Agent has behavioral instructions (SOUL.md/system prompt) but no behavioral fingerprint. Without a fingerprint, behavioral integrity cannot be verified.',
|
|
6142
|
+
category: 'agent-dna',
|
|
6143
|
+
severity: 'medium',
|
|
6144
|
+
passed: false,
|
|
6145
|
+
message: 'Agent has behavioral instructions but no DNA fingerprint file',
|
|
6146
|
+
fixable: false,
|
|
6147
|
+
file: foundSoulFile || 'SOUL.md',
|
|
6148
|
+
fix: 'Create agent-dna.json with contentHash of SOUL.md, baselineHash, and signature for integrity verification.',
|
|
6149
|
+
});
|
|
6150
|
+
}
|
|
6151
|
+
}
|
|
6152
|
+
catch { /* skip */ }
|
|
6153
|
+
}
|
|
6154
|
+
return findings;
|
|
6155
|
+
}
|
|
6156
|
+
/**
|
|
6157
|
+
* Check for skill-based memory manipulation risks
|
|
6158
|
+
*/
|
|
6159
|
+
async checkSkillMemory(targetDir, _autoFix) {
|
|
6160
|
+
const findings = [];
|
|
6161
|
+
// SKILL-MEM-001: Skills with memory write access
|
|
6162
|
+
// Check SKILL.md for memory manipulation patterns
|
|
6163
|
+
try {
|
|
6164
|
+
const skillMdPath = path.join(targetDir, 'SKILL.md');
|
|
6165
|
+
const content = await fs.readFile(skillMdPath, 'utf-8');
|
|
6166
|
+
const lowerContent = content.toLowerCase();
|
|
6167
|
+
if ((lowerContent.includes('memory') || lowerContent.includes('context') || lowerContent.includes('state')) &&
|
|
6168
|
+
(lowerContent.includes('write') || lowerContent.includes('modify') || lowerContent.includes('update') || lowerContent.includes('set'))) {
|
|
6169
|
+
if (!lowerContent.includes('read-only') && !lowerContent.includes('readonly') && !lowerContent.includes('immutable')) {
|
|
6170
|
+
findings.push({
|
|
6171
|
+
checkId: 'SKILL-MEM-001',
|
|
6172
|
+
name: 'Skill with unrestricted memory access',
|
|
6173
|
+
description: 'A skill declares memory/context write capabilities without explicit restrictions. A malicious skill could manipulate agent memory to alter future behavior.',
|
|
6174
|
+
category: 'skill-memory',
|
|
6175
|
+
severity: 'high',
|
|
6176
|
+
passed: false,
|
|
6177
|
+
message: 'SKILL.md declares memory write access without read-only constraints',
|
|
6178
|
+
fixable: false,
|
|
6179
|
+
file: 'SKILL.md',
|
|
6180
|
+
fix: 'Restrict skill memory access: declare explicit read-only or scoped-write permissions in SKILL.md.',
|
|
6181
|
+
});
|
|
6182
|
+
}
|
|
6183
|
+
}
|
|
6184
|
+
}
|
|
6185
|
+
catch { /* no SKILL.md */ }
|
|
6186
|
+
// Check skills directory for memory manipulation patterns
|
|
6187
|
+
try {
|
|
6188
|
+
const skillsDir = path.join(targetDir, 'skills');
|
|
6189
|
+
const dirExists = await fs.access(skillsDir).then(() => true).catch(() => false);
|
|
6190
|
+
if (dirExists) {
|
|
6191
|
+
const files = await this.walkDirectory(skillsDir, ['.ts', '.js', '.py', '.md']);
|
|
6192
|
+
for (const file of files.slice(0, 30)) {
|
|
6193
|
+
try {
|
|
6194
|
+
const content = await fs.readFile(file, 'utf-8');
|
|
6195
|
+
if ((content.includes('writeMemory') || content.includes('setContext') ||
|
|
6196
|
+
content.includes('updateState') || content.includes('persistMemory')) &&
|
|
6197
|
+
!content.includes('readOnly') && !content.includes('read_only')) {
|
|
6198
|
+
findings.push({
|
|
6199
|
+
checkId: 'SKILL-MEM-001',
|
|
6200
|
+
name: 'Skill with unrestricted memory access',
|
|
6201
|
+
description: 'Skill file contains memory write operations without read-only guards.',
|
|
6202
|
+
category: 'skill-memory',
|
|
6203
|
+
severity: 'high',
|
|
6204
|
+
passed: false,
|
|
6205
|
+
message: 'Skill writes to agent memory without restrictions',
|
|
6206
|
+
fixable: false,
|
|
6207
|
+
file: path.relative(targetDir, file),
|
|
6208
|
+
fix: 'Add read-only guards or scope memory writes to skill-specific namespaces.',
|
|
6209
|
+
});
|
|
6210
|
+
break; // One per skill dir
|
|
6211
|
+
}
|
|
6212
|
+
}
|
|
6213
|
+
catch { /* skip */ }
|
|
6214
|
+
}
|
|
6215
|
+
}
|
|
6216
|
+
}
|
|
6217
|
+
catch { /* skip */ }
|
|
6218
|
+
return findings;
|
|
6219
|
+
}
|
|
6220
|
+
/**
|
|
6221
|
+
* Check for Unicode steganography attacks (GlassWorm detection)
|
|
6222
|
+
* Detects invisible codepoints, decoder patterns, eval on empty strings,
|
|
6223
|
+
* and tag character block presence in source files.
|
|
6224
|
+
*/
|
|
6225
|
+
async checkUnicodeSteganography(targetDir, _autoFix) {
|
|
6226
|
+
const findings = [];
|
|
6227
|
+
const sourceFiles = await this.findSourceFiles(targetDir, targetDir);
|
|
6228
|
+
for (const filePath of sourceFiles) {
|
|
6229
|
+
const relativePath = path.relative(targetDir, filePath);
|
|
6230
|
+
let rawBuffer;
|
|
6231
|
+
try {
|
|
6232
|
+
rawBuffer = await fs.readFile(filePath);
|
|
6233
|
+
}
|
|
6234
|
+
catch {
|
|
6235
|
+
continue;
|
|
6236
|
+
}
|
|
6237
|
+
// Skip files larger than MAX_FILE_SIZE
|
|
6238
|
+
if (rawBuffer.length > MAX_FILE_SIZE)
|
|
6239
|
+
continue;
|
|
6240
|
+
// UNICODE-STEGO-001: Invisible Codepoint Detection
|
|
6241
|
+
// Scan for variation selectors U+FE00-FE0F (UTF-8: EF B8 80-8F)
|
|
6242
|
+
// and tag characters U+E0100-E01EF (UTF-8: F3 A0 84 80 - F3 A0 87 AF)
|
|
6243
|
+
let hasVariationSelectors = false;
|
|
6244
|
+
let variationSelectorLine = 1;
|
|
6245
|
+
let hasTagCharsIn001 = false;
|
|
6246
|
+
let tagCharLine001 = 1;
|
|
6247
|
+
let currentLine = 1;
|
|
6248
|
+
for (let i = 0; i < rawBuffer.length; i++) {
|
|
6249
|
+
if (rawBuffer[i] === 0x0A) {
|
|
6250
|
+
currentLine++;
|
|
6251
|
+
continue;
|
|
6252
|
+
}
|
|
6253
|
+
// Variation selectors: EF B8 80-8F
|
|
6254
|
+
if (rawBuffer[i] === 0xEF &&
|
|
6255
|
+
i + 2 < rawBuffer.length &&
|
|
6256
|
+
rawBuffer[i + 1] === 0xB8 &&
|
|
6257
|
+
rawBuffer[i + 2] >= 0x80 &&
|
|
6258
|
+
rawBuffer[i + 2] <= 0x8F) {
|
|
6259
|
+
if (!hasVariationSelectors) {
|
|
6260
|
+
hasVariationSelectors = true;
|
|
6261
|
+
variationSelectorLine = currentLine;
|
|
6262
|
+
}
|
|
6263
|
+
}
|
|
6264
|
+
// Tag characters in U+E0100-E01EF: F3 A0 84 80 through F3 A0 87 AF
|
|
6265
|
+
if (rawBuffer[i] === 0xF3 &&
|
|
6266
|
+
i + 2 < rawBuffer.length &&
|
|
6267
|
+
rawBuffer[i + 1] === 0xA0 &&
|
|
6268
|
+
rawBuffer[i + 2] >= 0x84 &&
|
|
6269
|
+
rawBuffer[i + 2] <= 0x87) {
|
|
6270
|
+
if (!hasTagCharsIn001) {
|
|
6271
|
+
hasTagCharsIn001 = true;
|
|
6272
|
+
tagCharLine001 = currentLine;
|
|
6273
|
+
}
|
|
6274
|
+
}
|
|
6275
|
+
}
|
|
6276
|
+
if (hasVariationSelectors || hasTagCharsIn001) {
|
|
6277
|
+
const detectedTypes = [];
|
|
6278
|
+
if (hasVariationSelectors)
|
|
6279
|
+
detectedTypes.push('variation selectors (U+FE00-FE0F)');
|
|
6280
|
+
if (hasTagCharsIn001)
|
|
6281
|
+
detectedTypes.push('tag characters (U+E0100-E01EF)');
|
|
6282
|
+
findings.push({
|
|
6283
|
+
checkId: 'UNICODE-STEGO-001',
|
|
6284
|
+
name: 'Invisible Unicode Codepoints Detected',
|
|
6285
|
+
description: 'Source file contains invisible Unicode codepoints that can hide malicious payloads (GlassWorm attack vector)',
|
|
6286
|
+
category: 'unicode-stego',
|
|
6287
|
+
severity: 'critical',
|
|
6288
|
+
passed: false,
|
|
6289
|
+
message: `Found ${detectedTypes.join(' and ')} in ${relativePath}`,
|
|
6290
|
+
file: relativePath,
|
|
6291
|
+
line: hasVariationSelectors ? variationSelectorLine : tagCharLine001,
|
|
6292
|
+
fixable: false,
|
|
6293
|
+
fix: 'Inspect the file with a hex editor (e.g., xxd) to identify and remove invisible Unicode codepoints. Run: xxd ' + relativePath + ' | grep -E "fe0[0-9a-f]|f3a0"',
|
|
6294
|
+
});
|
|
6295
|
+
}
|
|
6296
|
+
// UNICODE-STEGO-002: GlassWorm Decoder Pattern
|
|
6297
|
+
// Detect .codePointAt( combined with hex literals in the variation selector or tag range
|
|
6298
|
+
const content = rawBuffer.toString('utf-8');
|
|
6299
|
+
const lines = content.split('\n');
|
|
6300
|
+
let hasCodePointAt = false;
|
|
6301
|
+
let hasHexLiteral = false;
|
|
6302
|
+
let decoderLine = 1;
|
|
6303
|
+
const hexPattern = /0x(?:FE0[0-9A-Fa-f]|fe0[0-9a-f]|E010[0-9A-Fa-f]|e010[0-9a-f]|E01[0-9A-Ea-e][0-9A-Fa-f]|e01[0-9a-e][0-9a-f])/;
|
|
6304
|
+
for (let i = 0; i < lines.length; i++) {
|
|
6305
|
+
const line = lines[i];
|
|
6306
|
+
if (line.length > MAX_LINE_LENGTH)
|
|
6307
|
+
continue;
|
|
6308
|
+
if (line.includes('.codePointAt(')) {
|
|
6309
|
+
hasCodePointAt = true;
|
|
6310
|
+
if (!decoderLine || decoderLine === 1)
|
|
6311
|
+
decoderLine = i + 1;
|
|
6312
|
+
}
|
|
6313
|
+
if (hexPattern.test(line)) {
|
|
6314
|
+
hasHexLiteral = true;
|
|
6315
|
+
}
|
|
6316
|
+
}
|
|
6317
|
+
if (hasCodePointAt && hasHexLiteral) {
|
|
6318
|
+
findings.push({
|
|
6319
|
+
checkId: 'UNICODE-STEGO-002',
|
|
6320
|
+
name: 'GlassWorm Decoder Pattern Detected',
|
|
6321
|
+
description: 'Source file contains .codePointAt() usage combined with Unicode variation selector or tag character hex literals - this is the decoder half of a GlassWorm attack',
|
|
6322
|
+
category: 'unicode-stego',
|
|
6323
|
+
severity: 'critical',
|
|
6324
|
+
passed: false,
|
|
6325
|
+
message: `Found GlassWorm decoder pattern (.codePointAt + hex range literals) in ${relativePath}`,
|
|
6326
|
+
file: relativePath,
|
|
6327
|
+
line: decoderLine,
|
|
6328
|
+
fixable: false,
|
|
6329
|
+
fix: 'Review the file for suspicious .codePointAt() logic that decodes hidden data from variation selectors (0xFE00-0xFE0F) or tag characters (0xE0100-0xE01EF). Remove the decoder function and audit the file for tampering.',
|
|
6330
|
+
});
|
|
6331
|
+
}
|
|
6332
|
+
// UNICODE-STEGO-003: Eval on Empty String
|
|
6333
|
+
// Find eval() or Function() calls where the string argument has few visible chars but many bytes
|
|
6334
|
+
const evalPattern = /(?:eval|Function)\s*\(\s*(['"`])([\s\S]*?)\1\s*\)/g;
|
|
6335
|
+
let evalMatch;
|
|
6336
|
+
while ((evalMatch = evalPattern.exec(content)) !== null) {
|
|
6337
|
+
const matchedStr = evalMatch[2];
|
|
6338
|
+
// Count truly visible characters by excluding invisible Unicode ranges:
|
|
6339
|
+
// - Control characters (U+0000-001F, U+007F-009F)
|
|
6340
|
+
// - Variation selectors (U+FE00-FE0F)
|
|
6341
|
+
// - Zero-width characters (U+200B-200F, U+2060, U+FEFF)
|
|
6342
|
+
// - Tag characters (U+E0000-E01EF)
|
|
6343
|
+
// - Combining marks and other invisible codepoints
|
|
6344
|
+
let visibleChars = 0;
|
|
6345
|
+
for (const ch of matchedStr) {
|
|
6346
|
+
const cp = ch.codePointAt(0);
|
|
6347
|
+
if (cp <= 0x1F)
|
|
6348
|
+
continue; // C0 controls
|
|
6349
|
+
if (cp >= 0x7F && cp <= 0x9F)
|
|
6350
|
+
continue; // C1 controls
|
|
6351
|
+
if (cp >= 0x200B && cp <= 0x200F)
|
|
6352
|
+
continue; // zero-width chars
|
|
6353
|
+
if (cp === 0x2060 || cp === 0xFEFF)
|
|
6354
|
+
continue; // word joiner, BOM
|
|
6355
|
+
if (cp >= 0xFE00 && cp <= 0xFE0F)
|
|
6356
|
+
continue; // variation selectors
|
|
6357
|
+
if (cp >= 0xE0000 && cp <= 0xE01EF)
|
|
6358
|
+
continue; // tag characters
|
|
6359
|
+
if (cp >= 0xE0100 && cp <= 0xE01EF)
|
|
6360
|
+
continue; // variation selector supplement
|
|
6361
|
+
visibleChars++;
|
|
6362
|
+
}
|
|
6363
|
+
const byteLength = Buffer.byteLength(matchedStr, 'utf-8');
|
|
6364
|
+
if (visibleChars < 5 && byteLength > 100) {
|
|
6365
|
+
// Find the line number
|
|
6366
|
+
const offset = evalMatch.index;
|
|
6367
|
+
let evalLine = 1;
|
|
6368
|
+
for (let j = 0; j < offset && j < content.length; j++) {
|
|
6369
|
+
if (content[j] === '\n')
|
|
6370
|
+
evalLine++;
|
|
6371
|
+
}
|
|
6372
|
+
findings.push({
|
|
6373
|
+
checkId: 'UNICODE-STEGO-003',
|
|
6374
|
+
name: 'Eval on String with Hidden Payload',
|
|
6375
|
+
description: 'eval() or Function() is called with a string that has very few visible characters but a large byte footprint - indicates invisible Unicode payload',
|
|
6376
|
+
category: 'unicode-stego',
|
|
6377
|
+
severity: 'critical',
|
|
6378
|
+
passed: false,
|
|
6379
|
+
message: `Found eval/Function with ${visibleChars} visible chars but ${byteLength} bytes in ${relativePath}`,
|
|
6380
|
+
file: relativePath,
|
|
6381
|
+
line: evalLine,
|
|
6382
|
+
fixable: false,
|
|
6383
|
+
fix: 'Remove the eval/Function call and inspect the string argument with a hex editor. The string likely contains invisible Unicode characters encoding a malicious payload. Run: node -e "const fs=require(\'fs\'); const s=fs.readFileSync(\'' + relativePath + '\',\'utf8\'); console.log([...s].filter(c=>c.codePointAt(0)>0x200).map(c=>c.codePointAt(0).toString(16)))"',
|
|
6384
|
+
});
|
|
6385
|
+
break; // One finding per file
|
|
6386
|
+
}
|
|
6387
|
+
}
|
|
6388
|
+
// UNICODE-STEGO-004: Tag Character Block Presence
|
|
6389
|
+
// Scan for any U+E0000-U+E01EF characters (broader than 001, covers entire tag block)
|
|
6390
|
+
// UTF-8 encoding: F3 A0 80 80 through F3 A0 87 AF
|
|
6391
|
+
let hasTagBlock = false;
|
|
6392
|
+
let tagBlockLine = 1;
|
|
6393
|
+
currentLine = 1;
|
|
6394
|
+
for (let i = 0; i < rawBuffer.length; i++) {
|
|
6395
|
+
if (rawBuffer[i] === 0x0A) {
|
|
6396
|
+
currentLine++;
|
|
6397
|
+
continue;
|
|
6398
|
+
}
|
|
6399
|
+
if (rawBuffer[i] === 0xF3 &&
|
|
6400
|
+
i + 3 < rawBuffer.length &&
|
|
6401
|
+
rawBuffer[i + 1] === 0xA0 &&
|
|
6402
|
+
rawBuffer[i + 2] >= 0x80 &&
|
|
6403
|
+
rawBuffer[i + 2] <= 0x87) {
|
|
6404
|
+
hasTagBlock = true;
|
|
6405
|
+
tagBlockLine = currentLine;
|
|
6406
|
+
break;
|
|
6407
|
+
}
|
|
6408
|
+
}
|
|
6409
|
+
if (hasTagBlock) {
|
|
6410
|
+
// Only add UNICODE-STEGO-004 if we did not already flag tag chars in 001
|
|
6411
|
+
// (004 is broader - covers U+E0000-U+E01EF, 001 only covers U+E0100-E01EF)
|
|
6412
|
+
const already001 = findings.some((f) => f.checkId === 'UNICODE-STEGO-001' && f.file === relativePath);
|
|
6413
|
+
if (!already001) {
|
|
6414
|
+
findings.push({
|
|
6415
|
+
checkId: 'UNICODE-STEGO-004',
|
|
6416
|
+
name: 'Unicode Tag Character Block Detected',
|
|
6417
|
+
description: 'Source file contains characters from the Unicode Tag block (U+E0000-U+E01EF) which have no visible rendering and can be used to hide data',
|
|
6418
|
+
category: 'unicode-stego',
|
|
6419
|
+
severity: 'high',
|
|
6420
|
+
passed: false,
|
|
6421
|
+
message: `Found Unicode tag block characters in ${relativePath}`,
|
|
6422
|
+
file: relativePath,
|
|
6423
|
+
line: tagBlockLine,
|
|
6424
|
+
fixable: false,
|
|
6425
|
+
fix: 'Inspect the file with a hex editor to identify tag block characters (byte sequence starting with F3 A0). These characters are invisible and have no legitimate use in source code. Run: xxd ' + relativePath + ' | grep "f3a0"',
|
|
6426
|
+
});
|
|
6427
|
+
}
|
|
6428
|
+
}
|
|
6429
|
+
}
|
|
6430
|
+
return findings;
|
|
6431
|
+
}
|
|
5455
6432
|
}
|
|
5456
6433
|
exports.HardeningScanner = HardeningScanner;
|
|
5457
6434
|
// Files that may be created or modified during auto-fix
|