@lhi/tdd-audit 1.8.4 → 1.10.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +81 -2
- package/docs/ai-remediation.md +182 -0
- package/docs/rest-api.md +230 -0
- package/index.js +52 -7
- package/lib/config.js +116 -0
- package/lib/github.js +93 -0
- package/lib/remediator.js +181 -0
- package/lib/reporter.js +164 -0
- package/lib/server.js +247 -0
- package/package.json +1 -1
package/lib/config.js
ADDED
|
@@ -0,0 +1,116 @@
|
|
|
1
|
+
'use strict';
|
|
2
|
+
|
|
3
|
+
const fs = require('fs');
|
|
4
|
+
const path = require('path');
|
|
5
|
+
|
|
6
|
+
const CONFIG_FILE = '.tdd-audit.json';
|
|
7
|
+
|
|
8
|
+
const DEFAULTS = {
|
|
9
|
+
port: 3000,
|
|
10
|
+
output: 'text', // 'text' | 'json' | 'sarif'
|
|
11
|
+
severityThreshold: 'LOW', // minimum severity to include in output
|
|
12
|
+
ignore: [], // path prefixes to skip
|
|
13
|
+
provider: null, // 'anthropic' | 'openai' | 'gemini' | 'ollama'
|
|
14
|
+
model: null,
|
|
15
|
+
apiKey: null,
|
|
16
|
+
baseUrl: null, // override base URL for OpenAI-compatible providers
|
|
17
|
+
apiKeyEnv: null, // env var name to read the key from
|
|
18
|
+
serverApiKey: null, // key required on REST API calls
|
|
19
|
+
trustProxy: false, // trust X-Forwarded-For for rate limiting
|
|
20
|
+
};
|
|
21
|
+
|
|
22
|
+
// Template written by `tdd-audit init`
|
|
23
|
+
const INIT_TEMPLATE = {
|
|
24
|
+
provider: 'openai',
|
|
25
|
+
model: 'gpt-4o',
|
|
26
|
+
apiKeyEnv: 'OPENAI_API_KEY',
|
|
27
|
+
baseUrl: null,
|
|
28
|
+
output: 'text',
|
|
29
|
+
severityThreshold: 'LOW',
|
|
30
|
+
port: 3000,
|
|
31
|
+
serverApiKey: null,
|
|
32
|
+
ignore: ['node_modules', 'dist', 'build', 'coverage'],
|
|
33
|
+
};
|
|
34
|
+
|
|
35
|
+
/**
|
|
36
|
+
* Load config from an explicit file path or from .tdd-audit.json in cwd.
|
|
37
|
+
* CLI flags win over file config; file config wins over DEFAULTS.
|
|
38
|
+
*
|
|
39
|
+
* @param {string} [cwd=process.cwd()]
|
|
40
|
+
* @param {object} [cliOverrides={}] - may include { configPath: '/abs/path/to/file.json' }
|
|
41
|
+
* @returns {object}
|
|
42
|
+
*/
|
|
43
|
+
function loadConfig(cwd = process.cwd(), cliOverrides = {}) {
|
|
44
|
+
let fileConfig = {};
|
|
45
|
+
|
|
46
|
+
// Explicit --config path wins over the cwd convention
|
|
47
|
+
const filePath = cliOverrides.configPath
|
|
48
|
+
? path.resolve(cliOverrides.configPath)
|
|
49
|
+
: path.join(cwd, CONFIG_FILE);
|
|
50
|
+
|
|
51
|
+
if (fs.existsSync(filePath)) {
|
|
52
|
+
try {
|
|
53
|
+
const raw = fs.readFileSync(filePath, 'utf8');
|
|
54
|
+
fileConfig = JSON.parse(raw);
|
|
55
|
+
} catch (err) {
|
|
56
|
+
process.stderr.write(`⚠️ Could not parse ${filePath}: ${err.message}\n`);
|
|
57
|
+
}
|
|
58
|
+
}
|
|
59
|
+
|
|
60
|
+
const merged = { ...DEFAULTS, ...fileConfig };
|
|
61
|
+
|
|
62
|
+
// Apply CLI overrides (skip internal keys like configPath)
|
|
63
|
+
const INTERNAL = new Set(['configPath']);
|
|
64
|
+
for (const [key, val] of Object.entries(cliOverrides)) {
|
|
65
|
+
if (!INTERNAL.has(key) && val !== undefined && val !== null) merged[key] = val;
|
|
66
|
+
}
|
|
67
|
+
|
|
68
|
+
// Resolve apiKey from env var if apiKeyEnv is set and apiKey isn't already
|
|
69
|
+
if (!merged.apiKey && merged.apiKeyEnv) {
|
|
70
|
+
merged.apiKey = process.env[merged.apiKeyEnv] || null;
|
|
71
|
+
}
|
|
72
|
+
|
|
73
|
+
return merged;
|
|
74
|
+
}
|
|
75
|
+
|
|
76
|
+
/**
|
|
77
|
+
* Parse relevant CLI args into an overrides object for loadConfig.
|
|
78
|
+
* @param {string[]} args - process.argv.slice(2)
|
|
79
|
+
* @returns {object}
|
|
80
|
+
*/
|
|
81
|
+
function parseCliOverrides(args) {
|
|
82
|
+
const get = (flag) => {
|
|
83
|
+
const i = args.indexOf(flag);
|
|
84
|
+
return i !== -1 ? args[i + 1] : undefined;
|
|
85
|
+
};
|
|
86
|
+
const overrides = {};
|
|
87
|
+
const configPath = get('--config'); if (configPath) overrides.configPath = configPath;
|
|
88
|
+
const port = get('--port'); if (port) overrides.port = Number(port);
|
|
89
|
+
const provider = get('--provider'); if (provider) overrides.provider = provider;
|
|
90
|
+
const model = get('--model'); if (model) overrides.model = model;
|
|
91
|
+
const apiKey = get('--api-key'); if (apiKey) overrides.apiKey = apiKey;
|
|
92
|
+
const baseUrl = get('--base-url'); if (baseUrl) overrides.baseUrl = baseUrl;
|
|
93
|
+
const format = get('--format'); if (format) overrides.output = format;
|
|
94
|
+
const srvKey = get('--api-key'); if (srvKey) overrides.serverApiKey = srvKey;
|
|
95
|
+
if (args.includes('--json')) overrides.output = 'json';
|
|
96
|
+
return overrides;
|
|
97
|
+
}
|
|
98
|
+
|
|
99
|
+
/**
|
|
100
|
+
* Write a starter .tdd-audit.json to destPath (default: cwd/.tdd-audit.json).
|
|
101
|
+
* Returns the path written, or throws if the file already exists and force is false.
|
|
102
|
+
*
|
|
103
|
+
* @param {string} [destPath]
|
|
104
|
+
* @param {boolean} [force=false]
|
|
105
|
+
* @returns {string}
|
|
106
|
+
*/
|
|
107
|
+
function writeInitConfig(destPath, force = false) {
|
|
108
|
+
const target = destPath || path.join(process.cwd(), CONFIG_FILE);
|
|
109
|
+
if (fs.existsSync(target) && !force) {
|
|
110
|
+
throw new Error(`${target} already exists. Pass --force to overwrite.`);
|
|
111
|
+
}
|
|
112
|
+
fs.writeFileSync(target, JSON.stringify(INIT_TEMPLATE, null, 2) + '\n', 'utf8');
|
|
113
|
+
return target;
|
|
114
|
+
}
|
|
115
|
+
|
|
116
|
+
module.exports = { loadConfig, parseCliOverrides, writeInitConfig, DEFAULTS, INIT_TEMPLATE, CONFIG_FILE };
|
package/lib/github.js
ADDED
|
@@ -0,0 +1,93 @@
|
|
|
1
|
+
'use strict';
|
|
2
|
+
|
|
3
|
+
// ─── GitHub REST helpers ──────────────────────────────────────────────────────
|
|
4
|
+
|
|
5
|
+
async function ghFetch(path, token, method = 'GET', body = null) {
|
|
6
|
+
const opts = {
|
|
7
|
+
method,
|
|
8
|
+
headers: {
|
|
9
|
+
'Accept': 'application/vnd.github+json',
|
|
10
|
+
'Authorization': `Bearer ${token}`,
|
|
11
|
+
'X-GitHub-Api-Version': '2022-11-28',
|
|
12
|
+
'Content-Type': 'application/json',
|
|
13
|
+
},
|
|
14
|
+
};
|
|
15
|
+
if (body) opts.body = JSON.stringify(body);
|
|
16
|
+
const res = await fetch(`https://api.github.com${path}`, opts);
|
|
17
|
+
if (!res.ok) {
|
|
18
|
+
const text = await res.text().catch(() => '');
|
|
19
|
+
throw new Error(`GitHub API ${method} ${path} → ${res.status}: ${text.slice(0, 200)}`);
|
|
20
|
+
}
|
|
21
|
+
return res.status === 204 ? null : res.json();
|
|
22
|
+
}
|
|
23
|
+
|
|
24
|
+
// ─── SARIF upload ─────────────────────────────────────────────────────────────
|
|
25
|
+
|
|
26
|
+
/**
|
|
27
|
+
* Upload a SARIF report to GitHub code scanning.
|
|
28
|
+
* Findings will appear inline in PRs and the Security tab.
|
|
29
|
+
*
|
|
30
|
+
* @param {object} opts
|
|
31
|
+
* @param {string} opts.owner
|
|
32
|
+
* @param {string} opts.repo
|
|
33
|
+
* @param {string} opts.token - GitHub token with `security_events` write scope
|
|
34
|
+
* @param {string} opts.ref - full git ref, e.g. "refs/heads/main"
|
|
35
|
+
* @param {string} opts.commitSha
|
|
36
|
+
* @param {object} opts.sarif - SARIF 2.1.0 object from toSarif()
|
|
37
|
+
* @returns {Promise<object>}
|
|
38
|
+
*/
|
|
39
|
+
async function uploadSarif({ owner, repo, token, ref, commitSha, sarif }) {
|
|
40
|
+
const encoded = Buffer.from(JSON.stringify(sarif)).toString('base64');
|
|
41
|
+
return ghFetch(`/repos/${owner}/${repo}/code-scanning/sarifs`, token, 'POST', {
|
|
42
|
+
ref,
|
|
43
|
+
commit_sha: commitSha,
|
|
44
|
+
sarif: encoded,
|
|
45
|
+
tool_name: '@lhi/tdd-audit',
|
|
46
|
+
});
|
|
47
|
+
}
|
|
48
|
+
|
|
49
|
+
// ─── PR review comments ───────────────────────────────────────────────────────
|
|
50
|
+
|
|
51
|
+
/**
|
|
52
|
+
* Post inline review comments on a pull request for each finding.
|
|
53
|
+
* CRITICAL and HIGH findings request changes; others leave comments only.
|
|
54
|
+
*
|
|
55
|
+
* @param {object} opts
|
|
56
|
+
* @param {string} opts.owner
|
|
57
|
+
* @param {string} opts.repo
|
|
58
|
+
* @param {number} opts.pull_number
|
|
59
|
+
* @param {string} opts.token
|
|
60
|
+
* @param {string} opts.commitSha - head SHA of the PR
|
|
61
|
+
* @param {Array} opts.findings
|
|
62
|
+
* @returns {Promise<object>} - GitHub review object
|
|
63
|
+
*/
|
|
64
|
+
async function postReviewComments({ owner, repo, pull_number, token, commitSha, findings }) {
|
|
65
|
+
const real = findings.filter(f => !f.likelyFalsePositive);
|
|
66
|
+
if (!real.length) return null;
|
|
67
|
+
|
|
68
|
+
const hasCritical = real.some(f => f.severity === 'CRITICAL' || f.severity === 'HIGH');
|
|
69
|
+
|
|
70
|
+
const comments = real.map(f => ({
|
|
71
|
+
path: f.file,
|
|
72
|
+
line: f.line,
|
|
73
|
+
side: 'RIGHT',
|
|
74
|
+
body: `**[${f.severity}] ${f.name}**\n\`\`\`\n${f.snippet}\n\`\`\`\nRun \`/tdd-audit\` to remediate.`,
|
|
75
|
+
}));
|
|
76
|
+
|
|
77
|
+
return ghFetch(`/repos/${owner}/${repo}/pulls/${pull_number}/reviews`, token, 'POST', {
|
|
78
|
+
commit_id: commitSha,
|
|
79
|
+
body: `**@lhi/tdd-audit** found ${real.length} issue(s). ${hasCritical ? 'CRITICAL/HIGH findings require changes.' : 'See inline comments.'}`,
|
|
80
|
+
event: hasCritical ? 'REQUEST_CHANGES' : 'COMMENT',
|
|
81
|
+
comments,
|
|
82
|
+
});
|
|
83
|
+
}
|
|
84
|
+
|
|
85
|
+
// ─── Parse "owner/repo" helper ────────────────────────────────────────────────
|
|
86
|
+
|
|
87
|
+
function parseRepo(repoStr) {
|
|
88
|
+
const [owner, repo] = (repoStr || '').split('/');
|
|
89
|
+
if (!owner || !repo) throw new Error('--repo must be in "owner/repo" format');
|
|
90
|
+
return { owner, repo };
|
|
91
|
+
}
|
|
92
|
+
|
|
93
|
+
module.exports = { uploadSarif, postReviewComments, parseRepo };
|
|
@@ -0,0 +1,181 @@
|
|
|
1
|
+
'use strict';
|
|
2
|
+
|
|
3
|
+
// ─── Provider endpoints ───────────────────────────────────────────────────────
|
|
4
|
+
|
|
5
|
+
const PROVIDERS = {
|
|
6
|
+
anthropic: {
|
|
7
|
+
url: 'https://api.anthropic.com/v1/messages',
|
|
8
|
+
headers: (apiKey) => ({
|
|
9
|
+
'Content-Type': 'application/json',
|
|
10
|
+
'x-api-key': apiKey,
|
|
11
|
+
'anthropic-version': '2023-06-01',
|
|
12
|
+
}),
|
|
13
|
+
body: (model, prompt) => ({
|
|
14
|
+
model: model || 'claude-opus-4-6',
|
|
15
|
+
max_tokens: 8192,
|
|
16
|
+
messages: [{ role: 'user', content: prompt }],
|
|
17
|
+
}),
|
|
18
|
+
extract: (data) => data?.content?.[0]?.text || '',
|
|
19
|
+
},
|
|
20
|
+
openai: {
|
|
21
|
+
url: 'https://api.openai.com/v1/chat/completions',
|
|
22
|
+
openaiCompat: true, // supports --base-url override for compatible APIs
|
|
23
|
+
headers: (apiKey) => ({
|
|
24
|
+
'Content-Type': 'application/json',
|
|
25
|
+
'Authorization': `Bearer ${apiKey}`,
|
|
26
|
+
}),
|
|
27
|
+
body: (model, prompt) => ({
|
|
28
|
+
model: model || 'gpt-4o',
|
|
29
|
+
messages: [{ role: 'user', content: prompt }],
|
|
30
|
+
}),
|
|
31
|
+
extract: (data) => data?.choices?.[0]?.message?.content || '',
|
|
32
|
+
},
|
|
33
|
+
gemini: {
|
|
34
|
+
url: 'https://generativelanguage.googleapis.com/v1beta/models/gemini-2.0-flash:generateContent',
|
|
35
|
+
headers: (apiKey) => ({ 'Content-Type': 'application/json', 'x-goog-api-key': apiKey }),
|
|
36
|
+
body: (model, prompt) => ({
|
|
37
|
+
contents: [{ parts: [{ text: prompt }] }],
|
|
38
|
+
}),
|
|
39
|
+
extract: (data) => data?.candidates?.[0]?.content?.parts?.[0]?.text || '',
|
|
40
|
+
},
|
|
41
|
+
ollama: {
|
|
42
|
+
url: 'http://localhost:11434/api/generate',
|
|
43
|
+
headers: () => ({ 'Content-Type': 'application/json' }),
|
|
44
|
+
body: (model, prompt) => ({
|
|
45
|
+
model: model || 'llama3',
|
|
46
|
+
prompt,
|
|
47
|
+
stream: false,
|
|
48
|
+
}),
|
|
49
|
+
extract: (data) => data?.response || '',
|
|
50
|
+
},
|
|
51
|
+
};
|
|
52
|
+
|
|
53
|
+
// ─── Prompt builder ───────────────────────────────────────────────────────────
|
|
54
|
+
|
|
55
|
+
const MAX_SNIPPET_CHARS = 500;
|
|
56
|
+
|
|
57
|
+
/**
|
|
58
|
+
* Sanitize a raw code snippet before embedding it in an AI prompt.
|
|
59
|
+
* Strips null bytes, limits length, and trims whitespace so that
|
|
60
|
+
* injected newlines cannot introduce new top-level instruction lines.
|
|
61
|
+
*/
|
|
62
|
+
function sanitizeSnippet(raw) {
|
|
63
|
+
if (typeof raw !== 'string') return '';
|
|
64
|
+
return raw
|
|
65
|
+
.replace(/\x00/g, '') // strip null bytes
|
|
66
|
+
.replace(/[\r\n]+/g, ' ') // collapse newlines → prevent line injection
|
|
67
|
+
.slice(0, MAX_SNIPPET_CHARS)
|
|
68
|
+
.trim();
|
|
69
|
+
}
|
|
70
|
+
|
|
71
|
+
function buildRemediationPrompt(finding) {
|
|
72
|
+
const snippet = sanitizeSnippet(finding.snippet);
|
|
73
|
+
return `You are a security engineer applying the Red-Green-Refactor TDD remediation protocol.
|
|
74
|
+
|
|
75
|
+
VULNERABILITY FINDING:
|
|
76
|
+
- Type: ${finding.name}
|
|
77
|
+
- Severity: ${finding.severity}
|
|
78
|
+
- File: ${finding.file}
|
|
79
|
+
- Line: ${finding.line}
|
|
80
|
+
- Code snippet: <snippet>${snippet}</snippet>
|
|
81
|
+
|
|
82
|
+
TASK:
|
|
83
|
+
1. Write a Jest/supertest exploit test (Red phase) that proves this vulnerability exists.
|
|
84
|
+
The test must be placed in __tests__/security/ and must FAIL before the fix.
|
|
85
|
+
2. Write the minimum code patch (Green phase) that closes the vulnerability.
|
|
86
|
+
Show it as a unified diff against the original file.
|
|
87
|
+
3. Confirm what regression checks to run (Refactor phase).
|
|
88
|
+
|
|
89
|
+
Respond with valid JSON in exactly this shape:
|
|
90
|
+
{
|
|
91
|
+
"exploitTest": {
|
|
92
|
+
"filename": "__tests__/security/<slug>.test.js",
|
|
93
|
+
"content": "<full test file content>"
|
|
94
|
+
},
|
|
95
|
+
"patch": {
|
|
96
|
+
"filename": "<path to file being patched>",
|
|
97
|
+
"diff": "<unified diff>"
|
|
98
|
+
},
|
|
99
|
+
"refactorChecks": ["<check 1>", "<check 2>"]
|
|
100
|
+
}`;
|
|
101
|
+
}
|
|
102
|
+
|
|
103
|
+
// ─── HTTP call ────────────────────────────────────────────────────────────────
|
|
104
|
+
|
|
105
|
+
/**
|
|
106
|
+
* @param {string} provider - 'anthropic' | 'openai' | 'gemini' | 'ollama'
|
|
107
|
+
* @param {string} apiKey
|
|
108
|
+
* @param {string} model
|
|
109
|
+
* @param {string} prompt
|
|
110
|
+
* @param {string} [baseUrl] - override base URL for OpenAI-compatible providers
|
|
111
|
+
* e.g. 'https://api.groq.com/openai/v1'
|
|
112
|
+
* 'https://openrouter.ai/api/v1'
|
|
113
|
+
* 'https://api.together.xyz/v1'
|
|
114
|
+
*/
|
|
115
|
+
async function callProvider(provider, apiKey, model, prompt, baseUrl) {
|
|
116
|
+
const p = PROVIDERS[provider];
|
|
117
|
+
if (!p) throw new Error(`Unknown provider "${provider}". Supported: ${Object.keys(PROVIDERS).join(', ')}`);
|
|
118
|
+
|
|
119
|
+
let url = typeof p.url === 'function' ? p.url(apiKey) : p.url;
|
|
120
|
+
if (baseUrl && p.openaiCompat) {
|
|
121
|
+
// Any OpenAI-compatible service: strip trailing slash and append path
|
|
122
|
+
url = baseUrl.replace(/\/+$/, '') + '/chat/completions';
|
|
123
|
+
}
|
|
124
|
+
const headers = p.headers(apiKey);
|
|
125
|
+
const body = JSON.stringify(p.body(model, prompt));
|
|
126
|
+
|
|
127
|
+
const res = await fetch(url, { method: 'POST', headers, body });
|
|
128
|
+
if (!res.ok) {
|
|
129
|
+
const text = await res.text().catch(() => '');
|
|
130
|
+
throw new Error(`Provider ${provider} returned ${res.status}: ${text.slice(0, 200)}`);
|
|
131
|
+
}
|
|
132
|
+
const data = await res.json();
|
|
133
|
+
return p.extract(data);
|
|
134
|
+
}
|
|
135
|
+
|
|
136
|
+
// ─── Parse model response ─────────────────────────────────────────────────────
|
|
137
|
+
|
|
138
|
+
function parseResponse(text) {
|
|
139
|
+
// Extract JSON from response (model may wrap it in markdown)
|
|
140
|
+
const match = text.match(/\{[\s\S]*\}/);
|
|
141
|
+
if (!match) throw new Error('Model response did not contain a JSON object');
|
|
142
|
+
return JSON.parse(match[0]);
|
|
143
|
+
}
|
|
144
|
+
|
|
145
|
+
// ─── Main remediate function ──────────────────────────────────────────────────
|
|
146
|
+
|
|
147
|
+
/**
|
|
148
|
+
* Run AI-powered remediation for a list of findings.
|
|
149
|
+
*
|
|
150
|
+
* @param {object} opts
|
|
151
|
+
* @param {Array} opts.findings - finding objects from quickScan
|
|
152
|
+
* @param {string} opts.provider - 'anthropic' | 'openai' | 'gemini' | 'ollama'
|
|
153
|
+
* @param {string} opts.apiKey
|
|
154
|
+
* @param {string} [opts.model]
|
|
155
|
+
* @param {string} [opts.baseUrl] - override base URL for OpenAI-compatible providers
|
|
156
|
+
* @param {string} [opts.severity] - minimum severity to fix ('CRITICAL','HIGH','MEDIUM','LOW')
|
|
157
|
+
* @returns {Promise<Array>} - results per finding
|
|
158
|
+
*/
|
|
159
|
+
async function remediate({ findings, provider, apiKey, model, baseUrl, severity = 'LOW' }) {
|
|
160
|
+
const ORDER = { CRITICAL: 0, HIGH: 1, MEDIUM: 2, LOW: 3 };
|
|
161
|
+
const threshold = ORDER[severity.toUpperCase()] ?? 3;
|
|
162
|
+
|
|
163
|
+
const targets = findings
|
|
164
|
+
.filter(f => !f.likelyFalsePositive && (ORDER[f.severity] ?? 99) <= threshold)
|
|
165
|
+
.sort((a, b) => (ORDER[a.severity] ?? 99) - (ORDER[b.severity] ?? 99));
|
|
166
|
+
|
|
167
|
+
const results = [];
|
|
168
|
+
for (const finding of targets) {
|
|
169
|
+
try {
|
|
170
|
+
const prompt = buildRemediationPrompt(finding);
|
|
171
|
+
const raw = await callProvider(provider, apiKey, model, prompt, baseUrl);
|
|
172
|
+
const parsed = parseResponse(raw);
|
|
173
|
+
results.push({ finding, status: 'remediated', ...parsed });
|
|
174
|
+
} catch (err) {
|
|
175
|
+
results.push({ finding, status: 'error', error: err.message });
|
|
176
|
+
}
|
|
177
|
+
}
|
|
178
|
+
return results;
|
|
179
|
+
}
|
|
180
|
+
|
|
181
|
+
module.exports = { remediate, callProvider, buildRemediationPrompt, PROVIDERS };
|
package/lib/reporter.js
ADDED
|
@@ -0,0 +1,164 @@
|
|
|
1
|
+
'use strict';
|
|
2
|
+
|
|
3
|
+
const { version } = require('../package.json');
|
|
4
|
+
|
|
5
|
+
// ─── JSON ─────────────────────────────────────────────────────────────────────
|
|
6
|
+
|
|
7
|
+
/**
|
|
8
|
+
* Return findings as a structured JSON-serialisable object.
|
|
9
|
+
* @param {Array} findings
|
|
10
|
+
* @param {string[]} [exempted=[]]
|
|
11
|
+
* @returns {object}
|
|
12
|
+
*/
|
|
13
|
+
function toJson(findings, exempted = []) {
|
|
14
|
+
const real = findings.filter(f => !f.likelyFalsePositive);
|
|
15
|
+
const noisy = findings.filter(f => f.likelyFalsePositive);
|
|
16
|
+
|
|
17
|
+
const summary = { CRITICAL: 0, HIGH: 0, MEDIUM: 0, LOW: 0 };
|
|
18
|
+
for (const f of real) summary[f.severity] = (summary[f.severity] || 0) + 1;
|
|
19
|
+
|
|
20
|
+
return {
|
|
21
|
+
version,
|
|
22
|
+
summary,
|
|
23
|
+
findings: real,
|
|
24
|
+
likelyFalsePositives: noisy,
|
|
25
|
+
exempted,
|
|
26
|
+
scannedAt: new Date().toISOString(),
|
|
27
|
+
};
|
|
28
|
+
}
|
|
29
|
+
|
|
30
|
+
// ─── SARIF ────────────────────────────────────────────────────────────────────
|
|
31
|
+
|
|
32
|
+
const SARIF_LEVEL = { CRITICAL: 'error', HIGH: 'error', MEDIUM: 'warning', LOW: 'note' };
|
|
33
|
+
|
|
34
|
+
// Maps our vuln names to CWE IDs for richer GitHub annotations
|
|
35
|
+
const CWE_MAP = {
|
|
36
|
+
'SQL Injection': 'CWE-89',
|
|
37
|
+
'Command Injection': 'CWE-78',
|
|
38
|
+
'Path Traversal': 'CWE-22',
|
|
39
|
+
'XSS': 'CWE-79',
|
|
40
|
+
'IDOR': 'CWE-639',
|
|
41
|
+
'Broken Auth': 'CWE-287',
|
|
42
|
+
'Hardcoded Secret': 'CWE-798',
|
|
43
|
+
'SSRF': 'CWE-918',
|
|
44
|
+
'Open Redirect': 'CWE-601',
|
|
45
|
+
'NoSQL Injection': 'CWE-943',
|
|
46
|
+
'Mass Assignment': 'CWE-915',
|
|
47
|
+
'Prototype Pollution': 'CWE-1321',
|
|
48
|
+
'Weak Crypto': 'CWE-327',
|
|
49
|
+
'Insecure Deserialization': 'CWE-502',
|
|
50
|
+
'TLS Bypass': 'CWE-295',
|
|
51
|
+
'Sensitive Storage': 'CWE-312',
|
|
52
|
+
'JWT Alg None': 'CWE-347',
|
|
53
|
+
'Secret Fallback': 'CWE-798',
|
|
54
|
+
'eval() Injection': 'CWE-95',
|
|
55
|
+
'Template Injection': 'CWE-94',
|
|
56
|
+
'ReDoS': 'CWE-1333',
|
|
57
|
+
'XXE': 'CWE-611',
|
|
58
|
+
'CORS Wildcard': 'CWE-942',
|
|
59
|
+
'Insecure Random': 'CWE-338',
|
|
60
|
+
'Timing-Unsafe Comparison': 'CWE-208',
|
|
61
|
+
};
|
|
62
|
+
|
|
63
|
+
/**
|
|
64
|
+
* Return findings as a SARIF 2.1.0 object (GitHub code scanning compatible).
|
|
65
|
+
* @param {Array} findings
|
|
66
|
+
* @param {string} [projectDir=''] - used to build relative artifact URIs
|
|
67
|
+
* @returns {object}
|
|
68
|
+
*/
|
|
69
|
+
function toSarif(findings, projectDir = '') {
|
|
70
|
+
const rules = [];
|
|
71
|
+
const ruleIndex = {};
|
|
72
|
+
|
|
73
|
+
const results = findings.filter(f => !f.likelyFalsePositive).map(f => {
|
|
74
|
+
if (ruleIndex[f.name] === undefined) {
|
|
75
|
+
ruleIndex[f.name] = rules.length;
|
|
76
|
+
const cwe = CWE_MAP[f.name];
|
|
77
|
+
rules.push({
|
|
78
|
+
id: f.name.replace(/\s+/g, '-').replace(/[()]/g, '').toLowerCase(),
|
|
79
|
+
name: f.name,
|
|
80
|
+
shortDescription: { text: f.name },
|
|
81
|
+
fullDescription: { text: `${f.name} detected — severity: ${f.severity}` },
|
|
82
|
+
defaultConfiguration: { level: SARIF_LEVEL[f.severity] || 'warning' },
|
|
83
|
+
...(cwe && { relationships: [{ target: { id: cwe, toolComponent: { name: 'CWE' } } }] }),
|
|
84
|
+
helpUri: `https://cwe.mitre.org/data/definitions/${cwe ? cwe.replace('CWE-', '') : '0'}.html`,
|
|
85
|
+
});
|
|
86
|
+
}
|
|
87
|
+
|
|
88
|
+
return {
|
|
89
|
+
ruleId: rules[ruleIndex[f.name]].id,
|
|
90
|
+
ruleIndex: ruleIndex[f.name],
|
|
91
|
+
level: SARIF_LEVEL[f.severity] || 'warning',
|
|
92
|
+
message: { text: f.snippet || f.name },
|
|
93
|
+
locations: [{
|
|
94
|
+
physicalLocation: {
|
|
95
|
+
artifactLocation: {
|
|
96
|
+
uri: f.file.replace(/\\/g, '/'),
|
|
97
|
+
uriBaseId: '%SRCROOT%',
|
|
98
|
+
},
|
|
99
|
+
region: { startLine: f.line },
|
|
100
|
+
},
|
|
101
|
+
}],
|
|
102
|
+
};
|
|
103
|
+
});
|
|
104
|
+
|
|
105
|
+
return {
|
|
106
|
+
$schema: 'https://json.schemastore.org/sarif-2.1.0.json',
|
|
107
|
+
version: '2.1.0',
|
|
108
|
+
runs: [{
|
|
109
|
+
tool: {
|
|
110
|
+
driver: {
|
|
111
|
+
name: '@lhi/tdd-audit',
|
|
112
|
+
version,
|
|
113
|
+
informationUri: 'https://www.npmjs.com/package/@lhi/tdd-audit',
|
|
114
|
+
rules,
|
|
115
|
+
},
|
|
116
|
+
},
|
|
117
|
+
results,
|
|
118
|
+
}],
|
|
119
|
+
};
|
|
120
|
+
}
|
|
121
|
+
|
|
122
|
+
// ─── Text (existing printFindings extracted for reuse) ────────────────────────
|
|
123
|
+
|
|
124
|
+
/**
|
|
125
|
+
* Return a human-readable text report string (without printing it).
|
|
126
|
+
* @param {Array} findings
|
|
127
|
+
* @param {string[]} [exempted=[]]
|
|
128
|
+
* @returns {string}
|
|
129
|
+
*/
|
|
130
|
+
function toText(findings, exempted = []) {
|
|
131
|
+
const lines = [];
|
|
132
|
+
if (findings.length === 0) {
|
|
133
|
+
lines.push(' ✅ No obvious vulnerability patterns detected.\n');
|
|
134
|
+
} else {
|
|
135
|
+
const real = findings.filter(f => !f.likelyFalsePositive);
|
|
136
|
+
const noisy = findings.filter(f => f.likelyFalsePositive);
|
|
137
|
+
const bySeverity = { CRITICAL: [], HIGH: [], MEDIUM: [], LOW: [] };
|
|
138
|
+
for (const f of real) (bySeverity[f.severity] || bySeverity.LOW).push(f);
|
|
139
|
+
const icons = { CRITICAL: '🔴', HIGH: '🟠', MEDIUM: '🟡', LOW: '🔵' };
|
|
140
|
+
|
|
141
|
+
lines.push(`\n Found ${real.length} potential issue(s)${noisy.length ? ` (+${noisy.length} in test files — see below)` : ''}:\n`);
|
|
142
|
+
for (const [sev, list] of Object.entries(bySeverity)) {
|
|
143
|
+
if (!list.length) continue;
|
|
144
|
+
for (const f of list) {
|
|
145
|
+
const badge = f.inTestFile ? ' [test file]' : '';
|
|
146
|
+
lines.push(` ${icons[sev]} [${sev}] ${f.name} — ${f.file}:${f.line}${badge}`);
|
|
147
|
+
lines.push(` ${f.snippet}`);
|
|
148
|
+
}
|
|
149
|
+
}
|
|
150
|
+
if (noisy.length) {
|
|
151
|
+
lines.push('\n ⚪ Likely intentional (in test files — verify manually):');
|
|
152
|
+
for (const f of noisy) lines.push(` ${f.name} — ${f.file}:${f.line}`);
|
|
153
|
+
}
|
|
154
|
+
lines.push('\n Run /tdd-audit in your agent to remediate.\n');
|
|
155
|
+
}
|
|
156
|
+
if (exempted.length) {
|
|
157
|
+
lines.push(' ⚠️ Files skipped via audit_status:safe (verify these exemptions are intentional):');
|
|
158
|
+
for (const p of exempted) lines.push(` ${p}`);
|
|
159
|
+
lines.push('');
|
|
160
|
+
}
|
|
161
|
+
return lines.join('\n');
|
|
162
|
+
}
|
|
163
|
+
|
|
164
|
+
module.exports = { toJson, toSarif, toText };
|