white-hat-scanner 1.0.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +2 -0
- package/dist/analyzer.js +852 -0
- package/dist/contest.js +144 -0
- package/dist/disclosure.js +85 -0
- package/dist/discovery.js +260 -0
- package/dist/index.js +88 -0
- package/dist/notifier.js +51 -0
- package/dist/redis.js +36 -0
- package/dist/scorer.js +33 -0
- package/dist/submission.js +103 -0
- package/dist/test/smoke.js +511 -0
- package/package.json +23 -0
- package/research/bounty-economics.md +145 -0
- package/research/tooling-landscape.md +216 -0
- package/research/vuln-pattern-library.md +401 -0
- package/src/analyzer.ts +974 -0
- package/src/contest.ts +172 -0
- package/src/disclosure.ts +111 -0
- package/src/discovery.ts +297 -0
- package/src/index.ts +105 -0
- package/src/notifier.ts +58 -0
- package/src/redis.ts +31 -0
- package/src/scorer.ts +46 -0
- package/src/submission.ts +124 -0
- package/src/test/smoke.ts +457 -0
- package/system/architecture.md +488 -0
- package/system/scanner-mvp.md +305 -0
- package/targets/active-bounty-programs.md +111 -0
- package/tsconfig.json +15 -0
package/dist/contest.js
ADDED
|
@@ -0,0 +1,144 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
/**
|
|
3
|
+
* Code4rena and Sherlock contest calendar scraper.
|
|
4
|
+
*
|
|
5
|
+
* Fetches recently-created audit contest repos from `code-423n4` and `sherlock-audit`
|
|
6
|
+
* GitHub orgs. Active audit contests are live protocols with known codebases — ideal
|
|
7
|
+
* scan targets because the code is publicly accessible and the protocol is actively
|
|
8
|
+
* seeking security review.
|
|
9
|
+
*
|
|
10
|
+
* The contest repo itself (or the protocol code it mirrors) is used as the github field
|
|
11
|
+
* so our analyzer can clone and run Slither alongside Claude review.
|
|
12
|
+
*/
|
|
13
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
14
|
+
exports.fetchContestProtocols = exports.parseProtocolName = void 0;
|
|
15
|
+
const redis_1 = require("./redis");
|
|
16
|
+
const CONTEST_ORGS = ['code-423n4', 'sherlock-audit'];
|
|
17
|
+
const GITHUB_API = 'https://api.github.com';
|
|
18
|
+
// Default TVL for contest protocols — they lack on-chain TVL data but are
|
|
19
|
+
// vetted protocols worth auditing (otherwise they wouldn't pay for audits).
|
|
20
|
+
const CONTEST_DEFAULT_TVL = 50000000;
|
|
21
|
+
// Only fetch repos created within this window (active/recent contests)
|
|
22
|
+
const CONTEST_MAX_AGE_DAYS = 30;
|
|
23
|
+
/**
|
|
24
|
+
* Parse a human-readable protocol name from a contest repo name.
|
|
25
|
+
* Code4rena repos: "2024-01-protocolname" → "protocolname"
|
|
26
|
+
* Sherlock repos: "2024-protocolname-audit" → "protocolname"
|
|
27
|
+
*/
|
|
28
|
+
function parseProtocolName(org, repoName) {
|
|
29
|
+
let name = repoName;
|
|
30
|
+
// Strip leading date prefix: YYYY-MM- or YYYY-
|
|
31
|
+
name = name.replace(/^\d{4}-\d{2}-/, '').replace(/^\d{4}-/, '');
|
|
32
|
+
// Strip trailing "-audit" suffix (Sherlock convention)
|
|
33
|
+
name = name.replace(/-audit$/, '');
|
|
34
|
+
// Convert hyphens/underscores to spaces and title-case
|
|
35
|
+
name = name
|
|
36
|
+
.split(/[-_]/)
|
|
37
|
+
.map((w) => w.charAt(0).toUpperCase() + w.slice(1))
|
|
38
|
+
.join(' ')
|
|
39
|
+
.trim();
|
|
40
|
+
if (!name)
|
|
41
|
+
return repoName;
|
|
42
|
+
const source = org === 'code-423n4' ? 'C4' : 'Sherlock';
|
|
43
|
+
return `${name} (${source})`;
|
|
44
|
+
}
|
|
45
|
+
exports.parseProtocolName = parseProtocolName;
|
|
46
|
+
/**
|
|
47
|
+
* Filter: skip repos that are clearly meta/admin repos, not protocol contest repos.
|
|
48
|
+
*/
|
|
49
|
+
function isContestRepo(org, repo) {
|
|
50
|
+
if (repo.fork)
|
|
51
|
+
return false;
|
|
52
|
+
if (repo.archived)
|
|
53
|
+
return false;
|
|
54
|
+
const name = repo.name.toLowerCase();
|
|
55
|
+
// Skip org-level meta repos
|
|
56
|
+
const metaPatterns = [
|
|
57
|
+
/^\.github$/,
|
|
58
|
+
/^org-/,
|
|
59
|
+
/^template/,
|
|
60
|
+
/^docs$/,
|
|
61
|
+
/^website$/,
|
|
62
|
+
/^judging/,
|
|
63
|
+
/^judge/,
|
|
64
|
+
/^findings/,
|
|
65
|
+
/^leaderboard/,
|
|
66
|
+
/^governance/,
|
|
67
|
+
];
|
|
68
|
+
if (metaPatterns.some((p) => p.test(name)))
|
|
69
|
+
return false;
|
|
70
|
+
// Code4rena contest repos start with a year
|
|
71
|
+
if (org === 'code-423n4') {
|
|
72
|
+
if (!/^\d{4}-/.test(name))
|
|
73
|
+
return false;
|
|
74
|
+
}
|
|
75
|
+
// Sherlock contest repos end with -audit or start with a year
|
|
76
|
+
if (org === 'sherlock-audit') {
|
|
77
|
+
if (!name.includes('audit') && !/^\d{4}-/.test(name))
|
|
78
|
+
return false;
|
|
79
|
+
}
|
|
80
|
+
return true;
|
|
81
|
+
}
|
|
82
|
+
async function fetchOrgContests(org) {
|
|
83
|
+
const cutoff = new Date(Date.now() - CONTEST_MAX_AGE_DAYS * 24 * 60 * 60 * 1000);
|
|
84
|
+
const protocols = [];
|
|
85
|
+
let page = 1;
|
|
86
|
+
while (page <= 3) {
|
|
87
|
+
const url = `${GITHUB_API}/orgs/${org}/repos?type=public&sort=created&direction=desc&per_page=50&page=${page}`;
|
|
88
|
+
const res = await fetch(url, {
|
|
89
|
+
headers: {
|
|
90
|
+
'User-Agent': 'white-hat-scanner/1.0',
|
|
91
|
+
Accept: 'application/vnd.github+json',
|
|
92
|
+
},
|
|
93
|
+
signal: AbortSignal.timeout(20000),
|
|
94
|
+
});
|
|
95
|
+
if (!res.ok) {
|
|
96
|
+
if (res.status === 404)
|
|
97
|
+
break; // org might not exist
|
|
98
|
+
throw new Error(`GitHub API ${org} returned ${res.status}`);
|
|
99
|
+
}
|
|
100
|
+
const repos = (await res.json());
|
|
101
|
+
if (repos.length === 0)
|
|
102
|
+
break;
|
|
103
|
+
for (const repo of repos) {
|
|
104
|
+
const createdAt = new Date(repo.created_at);
|
|
105
|
+
// Since repos are sorted by created desc, stop when we go past the cutoff
|
|
106
|
+
if (createdAt < cutoff) {
|
|
107
|
+
page = 999; // signal outer loop to stop
|
|
108
|
+
break;
|
|
109
|
+
}
|
|
110
|
+
if (!isContestRepo(org, repo))
|
|
111
|
+
continue;
|
|
112
|
+
const name = parseProtocolName(org, repo.name);
|
|
113
|
+
protocols.push({
|
|
114
|
+
id: `contest-${org}-${repo.name}`,
|
|
115
|
+
name,
|
|
116
|
+
github: repo.html_url,
|
|
117
|
+
chain: 'ethereum', // most DeFi audit contests are EVM-based
|
|
118
|
+
tvl: CONTEST_DEFAULT_TVL,
|
|
119
|
+
listedAt: createdAt.getTime(),
|
|
120
|
+
});
|
|
121
|
+
}
|
|
122
|
+
page++;
|
|
123
|
+
}
|
|
124
|
+
return protocols;
|
|
125
|
+
}
|
|
126
|
+
/**
|
|
127
|
+
* Fetch active/recent audit contest protocols from Code4rena and Sherlock.
|
|
128
|
+
* Returns Protocol objects ready to be queued via queueNewProtocols().
|
|
129
|
+
*/
|
|
130
|
+
async function fetchContestProtocols() {
|
|
131
|
+
const all = [];
|
|
132
|
+
for (const org of CONTEST_ORGS) {
|
|
133
|
+
try {
|
|
134
|
+
const protocols = await fetchOrgContests(org);
|
|
135
|
+
await (0, redis_1.log)(`Contest scraper (${org}): found ${protocols.length} recent contest repos`);
|
|
136
|
+
all.push(...protocols);
|
|
137
|
+
}
|
|
138
|
+
catch (err) {
|
|
139
|
+
await (0, redis_1.log)(`Contest scraper (${org}) error: ${err.message}`);
|
|
140
|
+
}
|
|
141
|
+
}
|
|
142
|
+
return all;
|
|
143
|
+
}
|
|
144
|
+
exports.fetchContestProtocols = fetchContestProtocols;
|
|
@@ -0,0 +1,85 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.draftDisclosureEmail = exports.checkDisclosureTimelines = exports.createDisclosureRecord = void 0;
|
|
4
|
+
const redis_1 = require("./redis");
|
|
5
|
+
const DISCLOSURE_DEADLINE_MS = 72 * 60 * 60 * 1000;
|
|
6
|
+
const FOLLOWUP_MS = 24 * 60 * 60 * 1000;
|
|
7
|
+
async function createDisclosureRecord(result, scored) {
|
|
8
|
+
const redis = (0, redis_1.getRedis)();
|
|
9
|
+
const id = `${result.protocolId}-${Date.now()}`;
|
|
10
|
+
const record = {
|
|
11
|
+
id,
|
|
12
|
+
protocolName: result.protocolName,
|
|
13
|
+
riskLevel: result.riskLevel,
|
|
14
|
+
bounty: result.estimatedBounty,
|
|
15
|
+
summary: result.disclosureSummary,
|
|
16
|
+
status: 'pending',
|
|
17
|
+
createdAt: Date.now(),
|
|
18
|
+
};
|
|
19
|
+
await redis.set(`whiteh:findings:${result.protocolId}`, JSON.stringify(result));
|
|
20
|
+
await redis.lpush('whiteh:disclosures', JSON.stringify(record));
|
|
21
|
+
await redis.ltrim('whiteh:disclosures', 0, 499);
|
|
22
|
+
await (0, redis_1.log)(`Disclosure record created for ${result.protocolName} (${result.riskLevel})`);
|
|
23
|
+
return record;
|
|
24
|
+
}
|
|
25
|
+
exports.createDisclosureRecord = createDisclosureRecord;
|
|
26
|
+
async function checkDisclosureTimelines() {
|
|
27
|
+
const redis = (0, redis_1.getRedis)();
|
|
28
|
+
const rawRecords = await redis.lrange('whiteh:disclosures', 0, -1);
|
|
29
|
+
const now = Date.now();
|
|
30
|
+
let warnings = 0;
|
|
31
|
+
let expired = 0;
|
|
32
|
+
for (const raw of rawRecords) {
|
|
33
|
+
let record;
|
|
34
|
+
try {
|
|
35
|
+
record = JSON.parse(raw);
|
|
36
|
+
}
|
|
37
|
+
catch {
|
|
38
|
+
continue;
|
|
39
|
+
}
|
|
40
|
+
if (record.status === 'public' || record.status === 'fixed')
|
|
41
|
+
continue;
|
|
42
|
+
if (record.contactedAt) {
|
|
43
|
+
const deadline = record.contactedAt + DISCLOSURE_DEADLINE_MS;
|
|
44
|
+
if (now > deadline && record.status !== 'expired') {
|
|
45
|
+
await (0, redis_1.log)(`⏰ DEADLINE EXPIRED: ${record.protocolName} — 72h disclosure window passed, consider public disclosure`);
|
|
46
|
+
expired++;
|
|
47
|
+
}
|
|
48
|
+
else if (deadline - now < FOLLOWUP_MS && record.status === 'contacted') {
|
|
49
|
+
await (0, redis_1.log)(`⚠️ FOLLOW-UP DUE: ${record.protocolName} — ${Math.floor((deadline - now) / 3600000)}h until deadline`);
|
|
50
|
+
warnings++;
|
|
51
|
+
}
|
|
52
|
+
}
|
|
53
|
+
}
|
|
54
|
+
if (warnings > 0 || expired > 0) {
|
|
55
|
+
await (0, redis_1.log)(`Timeline check: ${warnings} follow-ups due, ${expired} deadlines expired`);
|
|
56
|
+
}
|
|
57
|
+
else {
|
|
58
|
+
await (0, redis_1.log)('Timeline check: all disclosures on track');
|
|
59
|
+
}
|
|
60
|
+
}
|
|
61
|
+
exports.checkDisclosureTimelines = checkDisclosureTimelines;
|
|
62
|
+
function draftDisclosureEmail(record) {
|
|
63
|
+
return `Subject: Responsible Disclosure — Security Vulnerability in ${record.protocolName}
|
|
64
|
+
|
|
65
|
+
Dear ${record.protocolName} Security Team,
|
|
66
|
+
|
|
67
|
+
I am writing to responsibly disclose a security vulnerability I discovered in your protocol.
|
|
68
|
+
|
|
69
|
+
Risk Level: ${record.riskLevel}
|
|
70
|
+
${record.bounty > 0 ? `Estimated Bug Bounty: $${record.bounty.toLocaleString()}` : ''}
|
|
71
|
+
|
|
72
|
+
Summary:
|
|
73
|
+
${record.summary}
|
|
74
|
+
|
|
75
|
+
I am disclosing this under a 72-hour responsible disclosure policy. I will refrain from publishing details until:
|
|
76
|
+
1. You acknowledge receipt of this report
|
|
77
|
+
2. A fix is deployed, OR
|
|
78
|
+
3. 72 hours have elapsed from the timestamp of this email
|
|
79
|
+
|
|
80
|
+
Please respond to confirm receipt.
|
|
81
|
+
|
|
82
|
+
Regards,
|
|
83
|
+
White Hat Security Researcher`;
|
|
84
|
+
}
|
|
85
|
+
exports.draftDisclosureEmail = draftDisclosureEmail;
|
|
@@ -0,0 +1,260 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.dequeueProtocol = exports.pruneNoGithubFromQueue = exports.prioritizeExploitTargets = exports.queueNewProtocols = exports.discoverProtocols = exports.isExploitTarget = exports.EXPLOIT_HISTORY_TARGETS = void 0;
|
|
4
|
+
const redis_1 = require("./redis");
|
|
5
|
+
const contest_1 = require("./contest");
|
|
6
|
+
const DEFILLAMA_API = 'https://api.llama.fi/protocols';
|
|
7
|
+
const TVL_THRESHOLD = 10000000;
|
|
8
|
+
const MAX_AGE_DAYS = 7;
|
|
9
|
+
/**
|
|
10
|
+
* Protocols with known exploit history from Rekt.news / public incident records.
|
|
11
|
+
* These are high-priority scan targets: prior exploits indicate architectural
|
|
12
|
+
* weaknesses or recurring vuln classes that may still be present in redeployments
|
|
13
|
+
* or sister protocols. Matched case-insensitively against protocol names.
|
|
14
|
+
*
|
|
15
|
+
* Sources: rekt.news all-time losses, Immunefi incident reports, DeFiHackLabs
|
|
16
|
+
*/
|
|
17
|
+
exports.EXPLOIT_HISTORY_TARGETS = [
|
|
18
|
+
// Cross-chain bridges (historically highest losses)
|
|
19
|
+
'ronin', 'wormhole', 'nomad', 'multichain', 'anyswap', 'thorchain', 'harmony',
|
|
20
|
+
'poly network', 'polynetwork', 'orbit bridge', 'orbitbridge',
|
|
21
|
+
// Lending / money markets
|
|
22
|
+
'compound', 'aave', 'cream finance', 'cream', 'beanstalk', 'euler',
|
|
23
|
+
'inverse finance', 'mango markets', 'venus', 'moola',
|
|
24
|
+
// DEX / AMM
|
|
25
|
+
'uniswap', 'curve', 'balancer', 'kyber', 'dodo', 'pancakeswap',
|
|
26
|
+
'saddle finance', 'saddle',
|
|
27
|
+
// Yield / vault
|
|
28
|
+
'yearn', 'harvest finance', 'harvest', 'badger', 'rari capital',
|
|
29
|
+
'value defi', 'valuedefi', 'belt finance', 'belt', 'bunny finance', 'bunny',
|
|
30
|
+
// Stablecoin / algo-stable
|
|
31
|
+
'terra', 'anchor', 'fei', 'iron finance', 'ironfinance',
|
|
32
|
+
'deus finance', 'deus', 'mirror protocol',
|
|
33
|
+
// Other notable hacks
|
|
34
|
+
'bzx', 'fulcrum', 'indexed finance', 'indexed', 'uranium', 'spartan',
|
|
35
|
+
'merlin lab', 'merlin', 'ola finance', 'ola', 'pickle finance', 'pickle',
|
|
36
|
+
'alpha finance', 'alpha homora', 'mushroom', '88mph', 'cover protocol',
|
|
37
|
+
'kucoin', 'bitmart', 'vulcan forged', 'ronin network',
|
|
38
|
+
];
|
|
39
|
+
/**
|
|
40
|
+
* Returns true if the protocol name matches any known exploit target.
|
|
41
|
+
* Case-insensitive substring match.
|
|
42
|
+
*/
|
|
43
|
+
function isExploitTarget(name) {
|
|
44
|
+
const lower = name.toLowerCase();
|
|
45
|
+
return exports.EXPLOIT_HISTORY_TARGETS.some((target) => lower.includes(target));
|
|
46
|
+
}
|
|
47
|
+
exports.isExploitTarget = isExploitTarget;
|
|
48
|
+
// Protocols without a GitHub repo produce only speculative Claude-only reviews with no real
|
|
49
|
+
// code evidence, so they're not actionable for disclosure. Only queue them if their TVL is
|
|
50
|
+
// exceptionally large (≥ $500M) — large enough that an architectural threat model has value.
|
|
51
|
+
const NO_GITHUB_MIN_TVL = 500000000;
|
|
52
|
+
async function discoverProtocols() {
|
|
53
|
+
await (0, redis_1.log)('Starting protocol discovery from DeFiLlama + contest calendars...');
|
|
54
|
+
let protocols = [];
|
|
55
|
+
try {
|
|
56
|
+
protocols = await fetchDefiLlama();
|
|
57
|
+
await (0, redis_1.log)(`DeFiLlama: found ${protocols.length} new protocols with TVL > $${TVL_THRESHOLD.toLocaleString()}`);
|
|
58
|
+
}
|
|
59
|
+
catch (err) {
|
|
60
|
+
await (0, redis_1.log)(`DeFiLlama fetch error: ${err.message}`);
|
|
61
|
+
}
|
|
62
|
+
try {
|
|
63
|
+
const contestProtocols = await (0, contest_1.fetchContestProtocols)();
|
|
64
|
+
if (contestProtocols.length > 0) {
|
|
65
|
+
await (0, redis_1.log)(`Contest calendars: found ${contestProtocols.length} active audit contest protocols`);
|
|
66
|
+
protocols = [...protocols, ...contestProtocols];
|
|
67
|
+
}
|
|
68
|
+
}
|
|
69
|
+
catch (err) {
|
|
70
|
+
await (0, redis_1.log)(`Contest scraper error: ${err.message}`);
|
|
71
|
+
}
|
|
72
|
+
return protocols;
|
|
73
|
+
}
|
|
74
|
+
exports.discoverProtocols = discoverProtocols;
|
|
75
|
+
async function fetchDefiLlama() {
|
|
76
|
+
const res = await fetch(DEFILLAMA_API, {
|
|
77
|
+
headers: { 'User-Agent': 'white-hat-scanner/1.0' },
|
|
78
|
+
signal: AbortSignal.timeout(30000),
|
|
79
|
+
});
|
|
80
|
+
if (!res.ok) {
|
|
81
|
+
throw new Error(`DeFiLlama API returned ${res.status}`);
|
|
82
|
+
}
|
|
83
|
+
const data = (await res.json());
|
|
84
|
+
const cutoff = Date.now() - MAX_AGE_DAYS * 24 * 60 * 60 * 1000;
|
|
85
|
+
const filtered = [];
|
|
86
|
+
for (const p of data) {
|
|
87
|
+
const tvl = p.tvl ?? 0;
|
|
88
|
+
const listedAt = p.listedAt ? p.listedAt * 1000 : 0;
|
|
89
|
+
if (tvl < TVL_THRESHOLD)
|
|
90
|
+
continue;
|
|
91
|
+
if (listedAt && listedAt < cutoff)
|
|
92
|
+
continue;
|
|
93
|
+
// DeFiLlama github field is org names; build a searchable URL
|
|
94
|
+
const githubOrg = p.github && p.github.length > 0 ? p.github[0] : undefined;
|
|
95
|
+
const github = githubOrg ? `https://github.com/${githubOrg}` : undefined;
|
|
96
|
+
filtered.push({
|
|
97
|
+
id: p.id,
|
|
98
|
+
name: p.name,
|
|
99
|
+
github,
|
|
100
|
+
chain: p.chain || (p.chains && p.chains[0]) || 'unknown',
|
|
101
|
+
tvl,
|
|
102
|
+
listedAt: listedAt || Date.now(),
|
|
103
|
+
});
|
|
104
|
+
}
|
|
105
|
+
return filtered;
|
|
106
|
+
}
|
|
107
|
+
async function queueNewProtocols(protocols) {
|
|
108
|
+
const redis = (0, redis_1.getRedis)();
|
|
109
|
+
let queued = 0;
|
|
110
|
+
let skippedNoGithub = 0;
|
|
111
|
+
let prioritized = 0;
|
|
112
|
+
for (const p of protocols) {
|
|
113
|
+
// Skip protocols that lack source code unless they are very high TVL.
|
|
114
|
+
// No-GitHub protocols generate only speculative architectural assessments
|
|
115
|
+
// (no real code to audit), and PR #5 already skips their disclosures —
|
|
116
|
+
// so queueing them wastes scan slots.
|
|
117
|
+
if (!p.github && p.tvl < NO_GITHUB_MIN_TVL) {
|
|
118
|
+
skippedNoGithub++;
|
|
119
|
+
continue;
|
|
120
|
+
}
|
|
121
|
+
const alreadyScanned = await redis.sismember('whiteh:scanned', p.id);
|
|
122
|
+
if (alreadyScanned)
|
|
123
|
+
continue;
|
|
124
|
+
// Use a parallel set (whiteh:queued) to track what's in the queue.
|
|
125
|
+
// lpos() on a JSON-object list requires an exact string match against the full
|
|
126
|
+
// serialised entry — it would never match a bare ID — so it was silently broken.
|
|
127
|
+
const alreadyQueued = await redis.sismember('whiteh:queued', p.id);
|
|
128
|
+
if (alreadyQueued)
|
|
129
|
+
continue;
|
|
130
|
+
// Protocols with known exploit history jump to the front of the queue —
|
|
131
|
+
// prior hacks indicate architectural weaknesses likely to recur.
|
|
132
|
+
if (isExploitTarget(p.name)) {
|
|
133
|
+
await redis.lpush('whiteh:queue', JSON.stringify(p));
|
|
134
|
+
prioritized++;
|
|
135
|
+
}
|
|
136
|
+
else {
|
|
137
|
+
await redis.rpush('whiteh:queue', JSON.stringify(p));
|
|
138
|
+
}
|
|
139
|
+
await redis.sadd('whiteh:queued', p.id);
|
|
140
|
+
queued++;
|
|
141
|
+
}
|
|
142
|
+
if (queued > 0 || skippedNoGithub > 0) {
|
|
143
|
+
const priorityNote = prioritized > 0 ? ` (${prioritized} exploit-history targets front-queued)` : '';
|
|
144
|
+
await (0, redis_1.log)(`Queued ${queued} new protocols for scanning (skipped ${skippedNoGithub} with no source code)${priorityNote}`);
|
|
145
|
+
}
|
|
146
|
+
return queued;
|
|
147
|
+
}
|
|
148
|
+
exports.queueNewProtocols = queueNewProtocols;
|
|
149
|
+
/**
|
|
150
|
+
* Reorder existing queue entries so that exploit-history targets move to the front.
|
|
151
|
+
* Safe to call at startup — rewrites the queue atomically.
|
|
152
|
+
*/
|
|
153
|
+
async function prioritizeExploitTargets() {
|
|
154
|
+
const redis = (0, redis_1.getRedis)();
|
|
155
|
+
const all = await redis.lrange('whiteh:queue', 0, -1);
|
|
156
|
+
const priority = [];
|
|
157
|
+
const normal = [];
|
|
158
|
+
for (const raw of all) {
|
|
159
|
+
try {
|
|
160
|
+
const p = JSON.parse(raw);
|
|
161
|
+
if (isExploitTarget(p.name)) {
|
|
162
|
+
priority.push(raw);
|
|
163
|
+
}
|
|
164
|
+
else {
|
|
165
|
+
normal.push(raw);
|
|
166
|
+
}
|
|
167
|
+
}
|
|
168
|
+
catch {
|
|
169
|
+
normal.push(raw);
|
|
170
|
+
}
|
|
171
|
+
}
|
|
172
|
+
if (priority.length === 0)
|
|
173
|
+
return 0;
|
|
174
|
+
await redis.del('whiteh:queue');
|
|
175
|
+
const ordered = [...priority, ...normal];
|
|
176
|
+
if (ordered.length > 0) {
|
|
177
|
+
await redis.rpush('whiteh:queue', ...ordered);
|
|
178
|
+
}
|
|
179
|
+
await (0, redis_1.log)(`Queue reordered: ${priority.length} exploit-history targets moved to front, ${normal.length} normal`);
|
|
180
|
+
return priority.length;
|
|
181
|
+
}
|
|
182
|
+
exports.prioritizeExploitTargets = prioritizeExploitTargets;
|
|
183
|
+
/**
|
|
184
|
+
* Prune existing queue entries that have no GitHub repo and TVL below threshold,
|
|
185
|
+
* and remove entries for protocols already in whiteh:scanned (duplicate cleanup).
|
|
186
|
+
* Rebuilds whiteh:queued set from the surviving entries.
|
|
187
|
+
* Called once at startup.
|
|
188
|
+
*/
|
|
189
|
+
async function pruneNoGithubFromQueue() {
|
|
190
|
+
const redis = (0, redis_1.getRedis)();
|
|
191
|
+
const all = await redis.lrange('whiteh:queue', 0, -1);
|
|
192
|
+
const keep = [];
|
|
193
|
+
const seenIds = new Set();
|
|
194
|
+
let pruned = 0;
|
|
195
|
+
for (const raw of all) {
|
|
196
|
+
let p;
|
|
197
|
+
try {
|
|
198
|
+
p = JSON.parse(raw);
|
|
199
|
+
}
|
|
200
|
+
catch {
|
|
201
|
+
pruned++;
|
|
202
|
+
continue;
|
|
203
|
+
}
|
|
204
|
+
if (!p.github && p.tvl < NO_GITHUB_MIN_TVL) {
|
|
205
|
+
pruned++;
|
|
206
|
+
continue;
|
|
207
|
+
}
|
|
208
|
+
// Drop entries already scanned or already kept once (deduplicate)
|
|
209
|
+
const alreadyScanned = await redis.sismember('whiteh:scanned', p.id);
|
|
210
|
+
if (alreadyScanned || seenIds.has(p.id)) {
|
|
211
|
+
pruned++;
|
|
212
|
+
continue;
|
|
213
|
+
}
|
|
214
|
+
seenIds.add(p.id);
|
|
215
|
+
keep.push(raw);
|
|
216
|
+
}
|
|
217
|
+
// Atomically replace queue and rebuild the queued-ID set
|
|
218
|
+
await redis.del('whiteh:queue');
|
|
219
|
+
await redis.del('whiteh:queued');
|
|
220
|
+
if (keep.length > 0) {
|
|
221
|
+
await redis.rpush('whiteh:queue', ...keep);
|
|
222
|
+
for (const raw of keep) {
|
|
223
|
+
try {
|
|
224
|
+
const p = JSON.parse(raw);
|
|
225
|
+
await redis.sadd('whiteh:queued', p.id);
|
|
226
|
+
}
|
|
227
|
+
catch { /* skip */ }
|
|
228
|
+
}
|
|
229
|
+
}
|
|
230
|
+
if (pruned > 0) {
|
|
231
|
+
await (0, redis_1.log)(`Queue pruned: removed ${pruned} entries (no-source/already-scanned/duplicates), ${keep.length} remaining`);
|
|
232
|
+
}
|
|
233
|
+
return pruned;
|
|
234
|
+
}
|
|
235
|
+
exports.pruneNoGithubFromQueue = pruneNoGithubFromQueue;
|
|
236
|
+
async function dequeueProtocol() {
|
|
237
|
+
const redis = (0, redis_1.getRedis)();
|
|
238
|
+
// Pop entries until we find one that hasn't been scanned yet.
|
|
239
|
+
// Duplicate entries accumulated before the whiteh:queued fix was deployed
|
|
240
|
+
// (or race conditions between discovery and scan) are silently discarded here.
|
|
241
|
+
for (let attempts = 0; attempts < 20; attempts++) {
|
|
242
|
+
const raw = await redis.lpop('whiteh:queue');
|
|
243
|
+
if (!raw)
|
|
244
|
+
return null;
|
|
245
|
+
let p;
|
|
246
|
+
try {
|
|
247
|
+
p = JSON.parse(raw);
|
|
248
|
+
}
|
|
249
|
+
catch {
|
|
250
|
+
continue; // malformed — discard
|
|
251
|
+
}
|
|
252
|
+
await redis.srem('whiteh:queued', p.id);
|
|
253
|
+
const alreadyScanned = await redis.sismember('whiteh:scanned', p.id);
|
|
254
|
+
if (!alreadyScanned)
|
|
255
|
+
return p;
|
|
256
|
+
// Already done — discard duplicate and try next
|
|
257
|
+
}
|
|
258
|
+
return null;
|
|
259
|
+
}
|
|
260
|
+
exports.dequeueProtocol = dequeueProtocol;
|
package/dist/index.js
ADDED
|
@@ -0,0 +1,88 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
const discovery_1 = require("./discovery");
|
|
4
|
+
const analyzer_1 = require("./analyzer");
|
|
5
|
+
const scorer_1 = require("./scorer");
|
|
6
|
+
const disclosure_1 = require("./disclosure");
|
|
7
|
+
const notifier_1 = require("./notifier");
|
|
8
|
+
const submission_1 = require("./submission");
|
|
9
|
+
const redis_1 = require("./redis");
|
|
10
|
+
const DISCOVER_INTERVAL_MS = 6 * 60 * 60 * 1000;
|
|
11
|
+
const SCAN_INTERVAL_MS = 30 * 60 * 1000;
|
|
12
|
+
const DISCLOSURE_CHECK_INTERVAL_MS = 60 * 60 * 1000;
|
|
13
|
+
async function runDiscovery() {
|
|
14
|
+
try {
|
|
15
|
+
const protocols = await (0, discovery_1.discoverProtocols)();
|
|
16
|
+
const queued = await (0, discovery_1.queueNewProtocols)(protocols);
|
|
17
|
+
await (0, redis_1.log)(`Discovery complete: ${protocols.length} protocols found, ${queued} new queued`);
|
|
18
|
+
}
|
|
19
|
+
catch (err) {
|
|
20
|
+
await (0, redis_1.log)(`Discovery error: ${err.message}`);
|
|
21
|
+
}
|
|
22
|
+
}
|
|
23
|
+
async function processScanQueue() {
|
|
24
|
+
const redis = (0, redis_1.getRedis)();
|
|
25
|
+
try {
|
|
26
|
+
const queueLen = await redis.llen('whiteh:queue');
|
|
27
|
+
await (0, redis_1.log)(`Scan queue: ${queueLen} protocols pending`);
|
|
28
|
+
if (queueLen === 0)
|
|
29
|
+
return;
|
|
30
|
+
const protocol = await (0, discovery_1.dequeueProtocol)();
|
|
31
|
+
if (!protocol)
|
|
32
|
+
return;
|
|
33
|
+
await (0, redis_1.log)(`Processing: ${protocol.name}`);
|
|
34
|
+
const result = await (0, analyzer_1.analyzeProtocol)(protocol);
|
|
35
|
+
const scored = (0, scorer_1.scoreFindings)(result);
|
|
36
|
+
await redis.sadd('whiteh:scanned', protocol.id);
|
|
37
|
+
// Only track disclosures when source code was actually reviewed — no-source findings are
|
|
38
|
+
// speculative architectural assessments and don't meet the bar for responsible disclosure.
|
|
39
|
+
if (scored.sourceAvailable) {
|
|
40
|
+
await (0, disclosure_1.createDisclosureRecord)(result, scored);
|
|
41
|
+
}
|
|
42
|
+
if (scored.needsAlert) {
|
|
43
|
+
await (0, notifier_1.sendAlert)(scored);
|
|
44
|
+
}
|
|
45
|
+
// Generate Immunefi submission draft for qualifying HIGH/CRITICAL findings (human review required)
|
|
46
|
+
await (0, submission_1.generateSubmissionDraft)(result, scored);
|
|
47
|
+
await (0, redis_1.log)(`Completed: ${protocol.name} — ${scored.riskLevel} (severity: ${scored.severity.toFixed(1)}, bounty: $${scored.estimatedBounty})`);
|
|
48
|
+
}
|
|
49
|
+
catch (err) {
|
|
50
|
+
await (0, redis_1.log)(`Scan queue error: ${err.message}`);
|
|
51
|
+
}
|
|
52
|
+
}
|
|
53
|
+
async function runDisclosureCheck() {
|
|
54
|
+
try {
|
|
55
|
+
await (0, disclosure_1.checkDisclosureTimelines)();
|
|
56
|
+
}
|
|
57
|
+
catch (err) {
|
|
58
|
+
await (0, redis_1.log)(`Disclosure check error: ${err.message}`);
|
|
59
|
+
}
|
|
60
|
+
}
|
|
61
|
+
async function main() {
|
|
62
|
+
await (0, redis_1.log)('=== white-hat-scanner starting ===');
|
|
63
|
+
await (0, redis_1.log)(`Node ${process.version} | PID ${process.pid}`);
|
|
64
|
+
process.on('unhandledRejection', async (reason) => {
|
|
65
|
+
await (0, redis_1.log)(`Unhandled rejection: ${reason}`);
|
|
66
|
+
});
|
|
67
|
+
process.on('uncaughtException', async (err) => {
|
|
68
|
+
await (0, redis_1.log)(`Uncaught exception: ${err.message}`);
|
|
69
|
+
process.exit(1);
|
|
70
|
+
});
|
|
71
|
+
// One-time startup: prune queue bloat from before the GitHub filter was added.
|
|
72
|
+
await (0, discovery_1.pruneNoGithubFromQueue)();
|
|
73
|
+
// One-time startup: reorder existing queue so exploit-history targets scan first.
|
|
74
|
+
await (0, discovery_1.prioritizeExploitTargets)();
|
|
75
|
+
// Run immediately on startup
|
|
76
|
+
await runDiscovery();
|
|
77
|
+
await processScanQueue();
|
|
78
|
+
await runDisclosureCheck();
|
|
79
|
+
// Set up recurring intervals
|
|
80
|
+
setInterval(() => { runDiscovery().catch(console.error); }, DISCOVER_INTERVAL_MS);
|
|
81
|
+
setInterval(() => { processScanQueue().catch(console.error); }, SCAN_INTERVAL_MS);
|
|
82
|
+
setInterval(() => { runDisclosureCheck().catch(console.error); }, DISCLOSURE_CHECK_INTERVAL_MS);
|
|
83
|
+
await (0, redis_1.log)('Service running — discovery every 6h, scanning every 30m, disclosure check every 1h');
|
|
84
|
+
}
|
|
85
|
+
main().catch(async (err) => {
|
|
86
|
+
console.error('[fatal]', err);
|
|
87
|
+
process.exit(1);
|
|
88
|
+
});
|
package/dist/notifier.js
ADDED
|
@@ -0,0 +1,51 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.sendAlert = void 0;
|
|
4
|
+
const redis_1 = require("./redis");
|
|
5
|
+
const NOTIFY_CHANNEL = 'cca:notify:money-brain';
|
|
6
|
+
const ALERT_DEDUP_TTL_MS = 24 * 60 * 60 * 1000; // 24 hours
|
|
7
|
+
/** Returns true if we have already sent an alert for this protocol within the dedup window. */
|
|
8
|
+
async function isRecentlyAlerted(protocolName) {
|
|
9
|
+
const redis = (0, redis_1.getRedis)();
|
|
10
|
+
const raw = await redis.lrange('whiteh:alerts', 0, 99);
|
|
11
|
+
const cutoff = Date.now() - ALERT_DEDUP_TTL_MS;
|
|
12
|
+
for (const entry of raw) {
|
|
13
|
+
try {
|
|
14
|
+
const rec = JSON.parse(entry);
|
|
15
|
+
if (rec.protocol === protocolName && rec.ts >= cutoff)
|
|
16
|
+
return true;
|
|
17
|
+
}
|
|
18
|
+
catch { }
|
|
19
|
+
}
|
|
20
|
+
return false;
|
|
21
|
+
}
|
|
22
|
+
async function sendAlert(finding) {
|
|
23
|
+
const redis = (0, redis_1.getRedis)();
|
|
24
|
+
if (await isRecentlyAlerted(finding.protocolName)) {
|
|
25
|
+
await (0, redis_1.log)(`Alert deduped for ${finding.protocolName} (${finding.riskLevel}) — already alerted within 24h`);
|
|
26
|
+
return;
|
|
27
|
+
}
|
|
28
|
+
const bountyStr = finding.estimatedBounty > 0
|
|
29
|
+
? `$${(finding.estimatedBounty / 1000).toFixed(0)}k`
|
|
30
|
+
: 'unknown';
|
|
31
|
+
const emoji = finding.riskLevel === 'CRITICAL' ? '🚨' : '⚠️';
|
|
32
|
+
const message = `${emoji} ${finding.riskLevel} VULN: ${finding.protocolName}\n` +
|
|
33
|
+
`Estimated bounty: ${bountyStr}\n` +
|
|
34
|
+
`Slither findings: ${finding.slitherCount}\n` +
|
|
35
|
+
`Summary: ${finding.disclosureSummary.slice(0, 300)}`;
|
|
36
|
+
const payload = JSON.stringify({ text: message });
|
|
37
|
+
await redis.lpush(NOTIFY_CHANNEL, payload);
|
|
38
|
+
// Also track in internal alerts list for health-check queries
|
|
39
|
+
const alertRecord = JSON.stringify({
|
|
40
|
+
ts: Date.now(),
|
|
41
|
+
protocol: finding.protocolName,
|
|
42
|
+
riskLevel: finding.riskLevel,
|
|
43
|
+
bounty: finding.estimatedBounty,
|
|
44
|
+
slitherCount: finding.slitherCount,
|
|
45
|
+
sourceAvailable: finding.sourceAvailable,
|
|
46
|
+
});
|
|
47
|
+
await redis.lpush('whiteh:alerts', alertRecord);
|
|
48
|
+
await redis.ltrim('whiteh:alerts', 0, 199);
|
|
49
|
+
await (0, redis_1.log)(`Alert sent for ${finding.protocolName} (${finding.riskLevel})`);
|
|
50
|
+
}
|
|
51
|
+
exports.sendAlert = sendAlert;
|
package/dist/redis.js
ADDED
|
@@ -0,0 +1,36 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
var __importDefault = (this && this.__importDefault) || function (mod) {
|
|
3
|
+
return (mod && mod.__esModule) ? mod : { "default": mod };
|
|
4
|
+
};
|
|
5
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
6
|
+
exports.closeRedis = exports.log = exports.getRedis = void 0;
|
|
7
|
+
const ioredis_1 = __importDefault(require("ioredis"));
|
|
8
|
+
let _redis = null;
|
|
9
|
+
function getRedis() {
|
|
10
|
+
if (!_redis) {
|
|
11
|
+
_redis = new ioredis_1.default(process.env.REDIS_URL || 'redis://localhost:6379', {
|
|
12
|
+
retryStrategy: (times) => Math.min(times * 500, 5000),
|
|
13
|
+
maxRetriesPerRequest: null,
|
|
14
|
+
enableReadyCheck: false,
|
|
15
|
+
});
|
|
16
|
+
_redis.on('error', (e) => console.error('[redis]', e.message));
|
|
17
|
+
_redis.on('connect', () => console.log('[redis] connected'));
|
|
18
|
+
}
|
|
19
|
+
return _redis;
|
|
20
|
+
}
|
|
21
|
+
exports.getRedis = getRedis;
|
|
22
|
+
async function log(message) {
|
|
23
|
+
const redis = getRedis();
|
|
24
|
+
const entry = JSON.stringify({ ts: Date.now(), message });
|
|
25
|
+
await redis.lpush('whiteh:log', entry);
|
|
26
|
+
await redis.ltrim('whiteh:log', 0, 999);
|
|
27
|
+
console.log(`[whiteh] ${message}`);
|
|
28
|
+
}
|
|
29
|
+
exports.log = log;
|
|
30
|
+
async function closeRedis() {
|
|
31
|
+
if (_redis) {
|
|
32
|
+
await _redis.quit();
|
|
33
|
+
_redis = null;
|
|
34
|
+
}
|
|
35
|
+
}
|
|
36
|
+
exports.closeRedis = closeRedis;
|