@lhi/tdd-audit 1.8.4 → 1.9.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +44 -2
- package/docs/ai-remediation.md +107 -0
- package/docs/rest-api.md +188 -0
- package/index.js +35 -7
- package/lib/config.js +76 -0
- package/lib/github.js +93 -0
- package/lib/remediator.js +148 -0
- package/lib/reporter.js +164 -0
- package/lib/server.js +181 -0
- package/package.json +1 -1
package/README.md
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
# @lhi/tdd-audit
|
|
2
2
|
|
|
3
|
-
> **v1.
|
|
3
|
+
> **v1.9.0** — Security skill installer for **Claude Code, Gemini CLI, Cursor, Codex, and OpenCode**. Patches vulnerabilities using a Red-Green-Refactor exploit-test protocol — prove the hole exists, apply the fix, prove it's closed.
|
|
4
4
|
|
|
5
5
|
## Install
|
|
6
6
|
|
|
@@ -41,11 +41,53 @@ On first run the installer:
|
|
|
41
41
|
|
|
42
42
|
The agent detects your stack, presents a CRITICAL → LOW findings report, waits for confirmation, then works through each vulnerability one at a time using Red-Green-Refactor. Pass `--scan` for a report-only run with no code changes.
|
|
43
43
|
|
|
44
|
+
## REST API + AI remediation
|
|
45
|
+
|
|
46
|
+
```bash
|
|
47
|
+
# Start the API server
|
|
48
|
+
npx @lhi/tdd-audit serve --port 3000 --api-key YOUR_SECRET
|
|
49
|
+
|
|
50
|
+
# Scan any path → JSON
|
|
51
|
+
curl -X POST http://localhost:3000/scan \
|
|
52
|
+
-H "Authorization: Bearer YOUR_SECRET" \
|
|
53
|
+
-d '{"path": "."}' | jq '.summary'
|
|
54
|
+
|
|
55
|
+
# Auto-fix with any AI provider
|
|
56
|
+
npx @lhi/tdd-audit --scan --fix critical \
|
|
57
|
+
--provider anthropic --api-key $ANTHROPIC_API_KEY --json
|
|
58
|
+
```
|
|
59
|
+
|
|
60
|
+
Supported providers: `anthropic` · `openai` · `gemini` · `ollama` (local)
|
|
61
|
+
|
|
62
|
+
## Output formats
|
|
63
|
+
|
|
64
|
+
```bash
|
|
65
|
+
npx @lhi/tdd-audit --scan --json # structured JSON
|
|
66
|
+
npx @lhi/tdd-audit --scan --format sarif # GitHub code scanning (inline PR annotations)
|
|
67
|
+
npx @lhi/tdd-audit --scan # human-readable text (default)
|
|
68
|
+
```
|
|
69
|
+
|
|
70
|
+
## Config file
|
|
71
|
+
|
|
72
|
+
`.tdd-audit.json` in your project root — all CLI flags can be set here:
|
|
73
|
+
|
|
74
|
+
```json
|
|
75
|
+
{
|
|
76
|
+
"port": 3000,
|
|
77
|
+
"output": "json",
|
|
78
|
+
"provider": "anthropic",
|
|
79
|
+
"apiKeyEnv": "ANTHROPIC_API_KEY",
|
|
80
|
+
"severityThreshold": "HIGH"
|
|
81
|
+
}
|
|
82
|
+
```
|
|
83
|
+
|
|
44
84
|
## Documentation
|
|
45
85
|
|
|
46
86
|
| | |
|
|
47
87
|
|---|---|
|
|
48
|
-
| [
|
|
88
|
+
| [REST API](docs/rest-api.md) | Endpoints, auth, request/response schema, curl examples |
|
|
89
|
+
| [AI Remediation](docs/ai-remediation.md) | Provider setup, CLI flags, Ollama local mode |
|
|
90
|
+
| [Scanner](docs/scanner.md) | Architecture, detection logic, false-positive handling |
|
|
49
91
|
| [Vulnerability Patterns](docs/vulnerability-patterns.md) | All 34 patterns — descriptions, grep signatures, fix pointers |
|
|
50
92
|
| [TDD Protocol](docs/tdd-protocol.md) | Red-Green-Refactor in full, with framework templates for all 6 stacks |
|
|
51
93
|
| [Agentic AI Security](docs/agentic-ai-security.md) | ASI01–ASI10 — prompt injection, MCP supply chain, Actions injection |
|
|
@@ -0,0 +1,107 @@
|
|
|
1
|
+
# AI Remediation
|
|
2
|
+
|
|
3
|
+
Pass a provider and API key to have tdd-audit autonomously generate exploit tests, patches, and regression checks for each finding — no agent required.
|
|
4
|
+
|
|
5
|
+
---
|
|
6
|
+
|
|
7
|
+
## CLI usage
|
|
8
|
+
|
|
9
|
+
```bash
|
|
10
|
+
# Scan and auto-fix all CRITICAL findings via Anthropic
|
|
11
|
+
npx @lhi/tdd-audit --scan --fix critical \
|
|
12
|
+
--provider anthropic \
|
|
13
|
+
--api-key $ANTHROPIC_API_KEY
|
|
14
|
+
|
|
15
|
+
# Fix everything, use a specific model
|
|
16
|
+
npx @lhi/tdd-audit --scan --fix all \
|
|
17
|
+
--provider openai \
|
|
18
|
+
--model gpt-4o \
|
|
19
|
+
--api-key $OPENAI_API_KEY \
|
|
20
|
+
--json
|
|
21
|
+
```
|
|
22
|
+
|
|
23
|
+
## REST API usage
|
|
24
|
+
|
|
25
|
+
```bash
|
|
26
|
+
# 1. Scan and get findings
|
|
27
|
+
FINDINGS=$(curl -s -X POST http://localhost:3000/scan \
|
|
28
|
+
-H "Authorization: Bearer $SERVER_KEY" \
|
|
29
|
+
-H "Content-Type: application/json" \
|
|
30
|
+
-d '{"path": "."}' | jq '.findings')
|
|
31
|
+
|
|
32
|
+
# 2. Submit remediation job
|
|
33
|
+
JOB=$(curl -s -X POST http://localhost:3000/remediate \
|
|
34
|
+
-H "Authorization: Bearer $SERVER_KEY" \
|
|
35
|
+
-H "Content-Type: application/json" \
|
|
36
|
+
-d "{\"findings\": $FINDINGS, \"provider\": \"anthropic\", \"apiKey\": \"$ANTHROPIC_API_KEY\", \"severity\": \"HIGH\"}")
|
|
37
|
+
|
|
38
|
+
JOB_ID=$(echo $JOB | jq -r '.jobId')
|
|
39
|
+
|
|
40
|
+
# 3. Poll for results
|
|
41
|
+
curl -s "http://localhost:3000/jobs/$JOB_ID" \
|
|
42
|
+
-H "Authorization: Bearer $SERVER_KEY" | jq '.status'
|
|
43
|
+
```
|
|
44
|
+
|
|
45
|
+
---
|
|
46
|
+
|
|
47
|
+
## Supported providers
|
|
48
|
+
|
|
49
|
+
| Provider | `--provider` | Default model | Key env var |
|
|
50
|
+
|---|---|---|---|
|
|
51
|
+
| Anthropic | `anthropic` | `claude-opus-4-6` | `ANTHROPIC_API_KEY` |
|
|
52
|
+
| OpenAI | `openai` | `gpt-4o` | `OPENAI_API_KEY` |
|
|
53
|
+
| Google Gemini | `gemini` | `gemini-2.0-flash` | `GEMINI_API_KEY` |
|
|
54
|
+
| Ollama (local) | `ollama` | `llama3` | — |
|
|
55
|
+
|
|
56
|
+
---
|
|
57
|
+
|
|
58
|
+
## Config file
|
|
59
|
+
|
|
60
|
+
```json
|
|
61
|
+
{
|
|
62
|
+
"provider": "anthropic",
|
|
63
|
+
"model": "claude-opus-4-6",
|
|
64
|
+
"apiKeyEnv": "ANTHROPIC_API_KEY"
|
|
65
|
+
}
|
|
66
|
+
```
|
|
67
|
+
|
|
68
|
+
`apiKeyEnv` lets you name the environment variable to read the key from, so no key is ever written to disk.
|
|
69
|
+
|
|
70
|
+
---
|
|
71
|
+
|
|
72
|
+
## What the model returns
|
|
73
|
+
|
|
74
|
+
For each finding the remediator sends a structured prompt and expects back:
|
|
75
|
+
|
|
76
|
+
```json
|
|
77
|
+
{
|
|
78
|
+
"exploitTest": {
|
|
79
|
+
"filename": "__tests__/security/xss-comments.test.js",
|
|
80
|
+
"content": "..."
|
|
81
|
+
},
|
|
82
|
+
"patch": {
|
|
83
|
+
"filename": "src/routes/comments.js",
|
|
84
|
+
"diff": "--- a/src/routes/comments.js\n+++ ..."
|
|
85
|
+
},
|
|
86
|
+
"refactorChecks": ["npm test", "npm run test:security"]
|
|
87
|
+
}
|
|
88
|
+
```
|
|
89
|
+
|
|
90
|
+
The result is returned as-is from the API — review and apply patches manually or pipe into your own automation.
|
|
91
|
+
|
|
92
|
+
---
|
|
93
|
+
|
|
94
|
+
## Ollama (fully local / air-gapped)
|
|
95
|
+
|
|
96
|
+
```bash
|
|
97
|
+
# Start Ollama with a code model
|
|
98
|
+
ollama pull codellama
|
|
99
|
+
ollama serve
|
|
100
|
+
|
|
101
|
+
# Run tdd-audit against it
|
|
102
|
+
npx @lhi/tdd-audit --scan --fix high \
|
|
103
|
+
--provider ollama \
|
|
104
|
+
--model codellama
|
|
105
|
+
```
|
|
106
|
+
|
|
107
|
+
No API key required. Ollama must be running on `http://localhost:11434`.
|
package/docs/rest-api.md
ADDED
|
@@ -0,0 +1,188 @@
|
|
|
1
|
+
# REST API
|
|
2
|
+
|
|
3
|
+
`tdd-audit serve` turns the scanner into an authenticated HTTP API. Use it to integrate vulnerability scanning into dashboards, CI pipelines, bots, or any tooling that speaks JSON.
|
|
4
|
+
|
|
5
|
+
---
|
|
6
|
+
|
|
7
|
+
## Start the server
|
|
8
|
+
|
|
9
|
+
```bash
|
|
10
|
+
npx @lhi/tdd-audit serve --port 3000 --api-key YOUR_SECRET
|
|
11
|
+
```
|
|
12
|
+
|
|
13
|
+
Or via config file (`.tdd-audit.json` in your project root):
|
|
14
|
+
|
|
15
|
+
```json
|
|
16
|
+
{
|
|
17
|
+
"port": 3000,
|
|
18
|
+
"serverApiKey": "YOUR_SECRET",
|
|
19
|
+
"output": "json"
|
|
20
|
+
}
|
|
21
|
+
```
|
|
22
|
+
|
|
23
|
+
If `--api-key` / `serverApiKey` is omitted the server starts unauthenticated with a warning. Always set one in production.
|
|
24
|
+
|
|
25
|
+
---
|
|
26
|
+
|
|
27
|
+
## Authentication
|
|
28
|
+
|
|
29
|
+
All endpoints except `GET /health` require:
|
|
30
|
+
|
|
31
|
+
```
|
|
32
|
+
Authorization: Bearer YOUR_SECRET
|
|
33
|
+
```
|
|
34
|
+
|
|
35
|
+
Missing or wrong key → `401 Unauthorized`.
|
|
36
|
+
|
|
37
|
+
---
|
|
38
|
+
|
|
39
|
+
## Endpoints
|
|
40
|
+
|
|
41
|
+
### `GET /health`
|
|
42
|
+
|
|
43
|
+
No auth required.
|
|
44
|
+
|
|
45
|
+
```json
|
|
46
|
+
{ "status": "ok", "version": "1.9.0" }
|
|
47
|
+
```
|
|
48
|
+
|
|
49
|
+
---
|
|
50
|
+
|
|
51
|
+
### `POST /scan`
|
|
52
|
+
|
|
53
|
+
Scan a local path and return structured findings.
|
|
54
|
+
|
|
55
|
+
**Request**
|
|
56
|
+
```json
|
|
57
|
+
{
|
|
58
|
+
"path": ".",
|
|
59
|
+
"format": "json"
|
|
60
|
+
}
|
|
61
|
+
```
|
|
62
|
+
|
|
63
|
+
| Field | Type | Default | Description |
|
|
64
|
+
|---|---|---|---|
|
|
65
|
+
| `path` | string | cwd | Absolute or relative path to scan. Must be inside cwd. |
|
|
66
|
+
| `format` | `"json"` \| `"sarif"` | `"json"` | Output format |
|
|
67
|
+
|
|
68
|
+
**Response — JSON format**
|
|
69
|
+
```json
|
|
70
|
+
{
|
|
71
|
+
"version": "1.9.0",
|
|
72
|
+
"summary": { "CRITICAL": 1, "HIGH": 3, "MEDIUM": 1, "LOW": 0 },
|
|
73
|
+
"findings": [ ... ],
|
|
74
|
+
"likelyFalsePositives": [ ... ],
|
|
75
|
+
"exempted": [],
|
|
76
|
+
"scannedAt": "2026-03-25T12:00:00.000Z",
|
|
77
|
+
"duration": 42
|
|
78
|
+
}
|
|
79
|
+
```
|
|
80
|
+
|
|
81
|
+
**Response — SARIF format**
|
|
82
|
+
|
|
83
|
+
Returns a SARIF 2.1.0 object ready to upload to GitHub code scanning.
|
|
84
|
+
|
|
85
|
+
**Errors**
|
|
86
|
+
| Status | Reason |
|
|
87
|
+
|---|---|
|
|
88
|
+
| 400 | Missing path, path traversal attempt, or invalid JSON body |
|
|
89
|
+
| 401 | Missing or invalid API key |
|
|
90
|
+
|
|
91
|
+
---
|
|
92
|
+
|
|
93
|
+
### `POST /remediate`
|
|
94
|
+
|
|
95
|
+
Queue an AI-powered remediation job. Returns immediately with a `jobId`; poll `/jobs/:id` for results.
|
|
96
|
+
|
|
97
|
+
**Request**
|
|
98
|
+
```json
|
|
99
|
+
{
|
|
100
|
+
"findings": [ ... ],
|
|
101
|
+
"provider": "anthropic",
|
|
102
|
+
"apiKey": "sk-ant-...",
|
|
103
|
+
"model": "claude-opus-4-6",
|
|
104
|
+
"severity": "HIGH"
|
|
105
|
+
}
|
|
106
|
+
```
|
|
107
|
+
|
|
108
|
+
| Field | Required | Description |
|
|
109
|
+
|---|---|---|
|
|
110
|
+
| `findings` | yes | Array of finding objects from `POST /scan` |
|
|
111
|
+
| `provider` | yes | `anthropic` \| `openai` \| `gemini` \| `ollama` |
|
|
112
|
+
| `apiKey` | yes | Provider API key |
|
|
113
|
+
| `model` | no | Defaults per provider (see [AI Remediation](ai-remediation.md)) |
|
|
114
|
+
| `severity` | no | Minimum severity to fix. Default: `LOW` (fix all) |
|
|
115
|
+
|
|
116
|
+
**Response**
|
|
117
|
+
```json
|
|
118
|
+
{ "jobId": "job_1_1711363200000" }
|
|
119
|
+
```
|
|
120
|
+
|
|
121
|
+
---
|
|
122
|
+
|
|
123
|
+
### `GET /jobs/:id`
|
|
124
|
+
|
|
125
|
+
Poll for remediation job status.
|
|
126
|
+
|
|
127
|
+
**Response — pending**
|
|
128
|
+
```json
|
|
129
|
+
{ "id": "job_1_...", "status": "pending", "createdAt": "..." }
|
|
130
|
+
```
|
|
131
|
+
|
|
132
|
+
**Response — done**
|
|
133
|
+
```json
|
|
134
|
+
{
|
|
135
|
+
"id": "job_1_...",
|
|
136
|
+
"status": "done",
|
|
137
|
+
"createdAt": "...",
|
|
138
|
+
"startedAt": "...",
|
|
139
|
+
"completedAt": "...",
|
|
140
|
+
"results": [
|
|
141
|
+
{
|
|
142
|
+
"finding": { ... },
|
|
143
|
+
"status": "remediated",
|
|
144
|
+
"exploitTest": { "filename": "__tests__/security/xss.test.js", "content": "..." },
|
|
145
|
+
"patch": { "filename": "src/app.js", "diff": "..." },
|
|
146
|
+
"refactorChecks": ["npm test", "npm run test:security"]
|
|
147
|
+
}
|
|
148
|
+
]
|
|
149
|
+
}
|
|
150
|
+
```
|
|
151
|
+
|
|
152
|
+
---
|
|
153
|
+
|
|
154
|
+
## Example: scan from curl
|
|
155
|
+
|
|
156
|
+
```bash
|
|
157
|
+
# Start server
|
|
158
|
+
npx @lhi/tdd-audit serve --port 3000 --api-key mysecret &
|
|
159
|
+
|
|
160
|
+
# Scan current directory
|
|
161
|
+
curl -s -X POST http://localhost:3000/scan \
|
|
162
|
+
-H "Authorization: Bearer mysecret" \
|
|
163
|
+
-H "Content-Type: application/json" \
|
|
164
|
+
-d '{"path": "."}' | jq '.summary'
|
|
165
|
+
|
|
166
|
+
# Get SARIF for GitHub upload
|
|
167
|
+
curl -s -X POST http://localhost:3000/scan \
|
|
168
|
+
-H "Authorization: Bearer mysecret" \
|
|
169
|
+
-H "Content-Type: application/json" \
|
|
170
|
+
-d '{"path": ".", "format": "sarif"}' > results.sarif
|
|
171
|
+
```
|
|
172
|
+
|
|
173
|
+
---
|
|
174
|
+
|
|
175
|
+
## Example: scan from Node.js
|
|
176
|
+
|
|
177
|
+
```javascript
|
|
178
|
+
const res = await fetch('http://localhost:3000/scan', {
|
|
179
|
+
method: 'POST',
|
|
180
|
+
headers: {
|
|
181
|
+
'Authorization': 'Bearer mysecret',
|
|
182
|
+
'Content-Type': 'application/json',
|
|
183
|
+
},
|
|
184
|
+
body: JSON.stringify({ path: '/path/to/project' }),
|
|
185
|
+
});
|
|
186
|
+
const { findings, summary } = await res.json();
|
|
187
|
+
console.log(`CRITICAL: ${summary.CRITICAL} HIGH: ${summary.HIGH}`);
|
|
188
|
+
```
|
package/index.js
CHANGED
|
@@ -11,13 +11,24 @@ const {
|
|
|
11
11
|
quickScan,
|
|
12
12
|
printFindings,
|
|
13
13
|
} = require('./lib/scanner');
|
|
14
|
+
const { toJson, toSarif, toText } = require('./lib/reporter');
|
|
14
15
|
|
|
15
16
|
const args = process.argv.slice(2);
|
|
16
|
-
const isLocal
|
|
17
|
-
const isClaude
|
|
17
|
+
const isLocal = args.includes('--local');
|
|
18
|
+
const isClaude = args.includes('--claude');
|
|
18
19
|
const withHooks = args.includes('--with-hooks');
|
|
19
|
-
const skipScan
|
|
20
|
-
const scanOnly
|
|
20
|
+
const skipScan = args.includes('--skip-scan');
|
|
21
|
+
const scanOnly = args.includes('--scan-only') || args.includes('--scan');
|
|
22
|
+
const isServe = args[0] === 'serve';
|
|
23
|
+
|
|
24
|
+
// --json or --format json → structured JSON output
|
|
25
|
+
// --format sarif → SARIF 2.1.0 output
|
|
26
|
+
const formatIdx = args.indexOf('--format');
|
|
27
|
+
const formatArg = formatIdx !== -1 ? args[formatIdx + 1] : null;
|
|
28
|
+
const outputFormat = args.includes('--json') ? 'json'
|
|
29
|
+
: formatArg === 'sarif' ? 'sarif'
|
|
30
|
+
: formatArg === 'json' ? 'json'
|
|
31
|
+
: 'text';
|
|
21
32
|
|
|
22
33
|
const agentBaseDir = isLocal ? process.cwd() : os.homedir();
|
|
23
34
|
const agentDirName = isClaude ? '.claude' : '.agents';
|
|
@@ -33,13 +44,30 @@ const framework = detectFramework(projectDir);
|
|
|
33
44
|
const testBaseDir = detectTestBaseDir(projectDir, framework);
|
|
34
45
|
const targetTestDir = path.join(projectDir, testBaseDir, 'security');
|
|
35
46
|
|
|
47
|
+
// ─── Serve mode early exit ────────────────────────────────────────────────────
|
|
48
|
+
|
|
49
|
+
if (isServe) {
|
|
50
|
+
require('./lib/server').start(args);
|
|
51
|
+
return; // server stays alive — do not fall through to installer
|
|
52
|
+
}
|
|
53
|
+
|
|
36
54
|
// ─── Scan-only early exit ─────────────────────────────────────────────────────
|
|
37
55
|
|
|
38
56
|
if (scanOnly) {
|
|
39
|
-
process.stdout.write('\n🔍 Scanning
|
|
57
|
+
if (outputFormat !== 'text') process.stdout.write('\n🔍 Scanning...\n');
|
|
58
|
+
else process.stdout.write('\n🔍 Scanning for vulnerability patterns...');
|
|
40
59
|
const findings = quickScan(projectDir);
|
|
41
|
-
|
|
42
|
-
|
|
60
|
+
const exempted = findings.exempted || [];
|
|
61
|
+
if (outputFormat === 'json') {
|
|
62
|
+
process.stdout.write('\n');
|
|
63
|
+
console.log(JSON.stringify(toJson(findings, exempted), null, 2));
|
|
64
|
+
} else if (outputFormat === 'sarif') {
|
|
65
|
+
process.stdout.write('\n');
|
|
66
|
+
console.log(JSON.stringify(toSarif(findings, projectDir), null, 2));
|
|
67
|
+
} else {
|
|
68
|
+
process.stdout.write('\n');
|
|
69
|
+
printFindings(findings, exempted);
|
|
70
|
+
}
|
|
43
71
|
process.exit(0);
|
|
44
72
|
}
|
|
45
73
|
|
package/lib/config.js
ADDED
|
@@ -0,0 +1,76 @@
|
|
|
1
|
+
'use strict';
|
|
2
|
+
|
|
3
|
+
const fs = require('fs');
|
|
4
|
+
const path = require('path');
|
|
5
|
+
|
|
6
|
+
const CONFIG_FILE = '.tdd-audit.json';
|
|
7
|
+
|
|
8
|
+
const DEFAULTS = {
|
|
9
|
+
port: 3000,
|
|
10
|
+
output: 'text', // 'text' | 'json' | 'sarif'
|
|
11
|
+
severityThreshold:'LOW', // minimum severity to include in output
|
|
12
|
+
ignore: [], // path prefixes to skip
|
|
13
|
+
provider: null, // 'anthropic' | 'openai' | 'gemini' | 'ollama'
|
|
14
|
+
model: null,
|
|
15
|
+
apiKey: null,
|
|
16
|
+
apiKeyEnv: null, // env var name to read the key from
|
|
17
|
+
serverApiKey: null, // key required on REST API calls
|
|
18
|
+
};
|
|
19
|
+
|
|
20
|
+
/**
|
|
21
|
+
* Load .tdd-audit.json from cwd (or a given dir), merge with DEFAULTS.
|
|
22
|
+
* CLI flags (passed as an object) win over file config.
|
|
23
|
+
*
|
|
24
|
+
* @param {string} [cwd=process.cwd()]
|
|
25
|
+
* @param {object} [cliOverrides={}]
|
|
26
|
+
* @returns {object}
|
|
27
|
+
*/
|
|
28
|
+
function loadConfig(cwd = process.cwd(), cliOverrides = {}) {
|
|
29
|
+
let fileConfig = {};
|
|
30
|
+
const filePath = path.join(cwd, CONFIG_FILE);
|
|
31
|
+
if (fs.existsSync(filePath)) {
|
|
32
|
+
try {
|
|
33
|
+
const raw = fs.readFileSync(filePath, 'utf8');
|
|
34
|
+
fileConfig = JSON.parse(raw);
|
|
35
|
+
} catch (err) {
|
|
36
|
+
process.stderr.write(`⚠️ Could not parse ${CONFIG_FILE}: ${err.message}\n`);
|
|
37
|
+
}
|
|
38
|
+
}
|
|
39
|
+
|
|
40
|
+
const merged = { ...DEFAULTS, ...fileConfig };
|
|
41
|
+
|
|
42
|
+
// CLI overrides — only set keys that were explicitly provided
|
|
43
|
+
for (const [key, val] of Object.entries(cliOverrides)) {
|
|
44
|
+
if (val !== undefined && val !== null) merged[key] = val;
|
|
45
|
+
}
|
|
46
|
+
|
|
47
|
+
// Resolve apiKey from env var if apiKeyEnv is set and apiKey isn't already
|
|
48
|
+
if (!merged.apiKey && merged.apiKeyEnv) {
|
|
49
|
+
merged.apiKey = process.env[merged.apiKeyEnv] || null;
|
|
50
|
+
}
|
|
51
|
+
|
|
52
|
+
return merged;
|
|
53
|
+
}
|
|
54
|
+
|
|
55
|
+
/**
|
|
56
|
+
* Parse relevant CLI args into an overrides object for loadConfig.
|
|
57
|
+
* @param {string[]} args - process.argv.slice(2)
|
|
58
|
+
* @returns {object}
|
|
59
|
+
*/
|
|
60
|
+
function parseCliOverrides(args) {
|
|
61
|
+
const get = (flag) => {
|
|
62
|
+
const i = args.indexOf(flag);
|
|
63
|
+
return i !== -1 ? args[i + 1] : undefined;
|
|
64
|
+
};
|
|
65
|
+
const overrides = {};
|
|
66
|
+
const port = get('--port'); if (port) overrides.port = Number(port);
|
|
67
|
+
const provider = get('--provider'); if (provider) overrides.provider = provider;
|
|
68
|
+
const model = get('--model'); if (model) overrides.model = model;
|
|
69
|
+
const apiKey = get('--api-key'); if (apiKey) overrides.apiKey = apiKey;
|
|
70
|
+
const format = get('--format'); if (format) overrides.output = format;
|
|
71
|
+
const srvKey = get('--api-key'); if (srvKey) overrides.serverApiKey = srvKey;
|
|
72
|
+
if (args.includes('--json')) overrides.output = 'json';
|
|
73
|
+
return overrides;
|
|
74
|
+
}
|
|
75
|
+
|
|
76
|
+
module.exports = { loadConfig, parseCliOverrides, DEFAULTS, CONFIG_FILE };
|
package/lib/github.js
ADDED
|
@@ -0,0 +1,93 @@
|
|
|
1
|
+
'use strict';
|
|
2
|
+
|
|
3
|
+
// ─── GitHub REST helpers ──────────────────────────────────────────────────────
|
|
4
|
+
|
|
5
|
+
async function ghFetch(path, token, method = 'GET', body = null) {
|
|
6
|
+
const opts = {
|
|
7
|
+
method,
|
|
8
|
+
headers: {
|
|
9
|
+
'Accept': 'application/vnd.github+json',
|
|
10
|
+
'Authorization': `Bearer ${token}`,
|
|
11
|
+
'X-GitHub-Api-Version': '2022-11-28',
|
|
12
|
+
'Content-Type': 'application/json',
|
|
13
|
+
},
|
|
14
|
+
};
|
|
15
|
+
if (body) opts.body = JSON.stringify(body);
|
|
16
|
+
const res = await fetch(`https://api.github.com${path}`, opts);
|
|
17
|
+
if (!res.ok) {
|
|
18
|
+
const text = await res.text().catch(() => '');
|
|
19
|
+
throw new Error(`GitHub API ${method} ${path} → ${res.status}: ${text.slice(0, 200)}`);
|
|
20
|
+
}
|
|
21
|
+
return res.status === 204 ? null : res.json();
|
|
22
|
+
}
|
|
23
|
+
|
|
24
|
+
// ─── SARIF upload ─────────────────────────────────────────────────────────────
|
|
25
|
+
|
|
26
|
+
/**
|
|
27
|
+
* Upload a SARIF report to GitHub code scanning.
|
|
28
|
+
* Findings will appear inline in PRs and the Security tab.
|
|
29
|
+
*
|
|
30
|
+
* @param {object} opts
|
|
31
|
+
* @param {string} opts.owner
|
|
32
|
+
* @param {string} opts.repo
|
|
33
|
+
* @param {string} opts.token - GitHub token with `security_events` write scope
|
|
34
|
+
* @param {string} opts.ref - full git ref, e.g. "refs/heads/main"
|
|
35
|
+
* @param {string} opts.commitSha
|
|
36
|
+
* @param {object} opts.sarif - SARIF 2.1.0 object from toSarif()
|
|
37
|
+
* @returns {Promise<object>}
|
|
38
|
+
*/
|
|
39
|
+
async function uploadSarif({ owner, repo, token, ref, commitSha, sarif }) {
|
|
40
|
+
const encoded = Buffer.from(JSON.stringify(sarif)).toString('base64');
|
|
41
|
+
return ghFetch(`/repos/${owner}/${repo}/code-scanning/sarifs`, token, 'POST', {
|
|
42
|
+
ref,
|
|
43
|
+
commit_sha: commitSha,
|
|
44
|
+
sarif: encoded,
|
|
45
|
+
tool_name: '@lhi/tdd-audit',
|
|
46
|
+
});
|
|
47
|
+
}
|
|
48
|
+
|
|
49
|
+
// ─── PR review comments ───────────────────────────────────────────────────────
|
|
50
|
+
|
|
51
|
+
/**
|
|
52
|
+
* Post inline review comments on a pull request for each finding.
|
|
53
|
+
* CRITICAL and HIGH findings request changes; others leave comments only.
|
|
54
|
+
*
|
|
55
|
+
* @param {object} opts
|
|
56
|
+
* @param {string} opts.owner
|
|
57
|
+
* @param {string} opts.repo
|
|
58
|
+
* @param {number} opts.pull_number
|
|
59
|
+
* @param {string} opts.token
|
|
60
|
+
* @param {string} opts.commitSha - head SHA of the PR
|
|
61
|
+
* @param {Array} opts.findings
|
|
62
|
+
* @returns {Promise<object>} - GitHub review object
|
|
63
|
+
*/
|
|
64
|
+
async function postReviewComments({ owner, repo, pull_number, token, commitSha, findings }) {
|
|
65
|
+
const real = findings.filter(f => !f.likelyFalsePositive);
|
|
66
|
+
if (!real.length) return null;
|
|
67
|
+
|
|
68
|
+
const hasCritical = real.some(f => f.severity === 'CRITICAL' || f.severity === 'HIGH');
|
|
69
|
+
|
|
70
|
+
const comments = real.map(f => ({
|
|
71
|
+
path: f.file,
|
|
72
|
+
line: f.line,
|
|
73
|
+
side: 'RIGHT',
|
|
74
|
+
body: `**[${f.severity}] ${f.name}**\n\`\`\`\n${f.snippet}\n\`\`\`\nRun \`/tdd-audit\` to remediate.`,
|
|
75
|
+
}));
|
|
76
|
+
|
|
77
|
+
return ghFetch(`/repos/${owner}/${repo}/pulls/${pull_number}/reviews`, token, 'POST', {
|
|
78
|
+
commit_id: commitSha,
|
|
79
|
+
body: `**@lhi/tdd-audit** found ${real.length} issue(s). ${hasCritical ? 'CRITICAL/HIGH findings require changes.' : 'See inline comments.'}`,
|
|
80
|
+
event: hasCritical ? 'REQUEST_CHANGES' : 'COMMENT',
|
|
81
|
+
comments,
|
|
82
|
+
});
|
|
83
|
+
}
|
|
84
|
+
|
|
85
|
+
// ─── Parse "owner/repo" helper ────────────────────────────────────────────────
|
|
86
|
+
|
|
87
|
+
function parseRepo(repoStr) {
|
|
88
|
+
const [owner, repo] = (repoStr || '').split('/');
|
|
89
|
+
if (!owner || !repo) throw new Error('--repo must be in "owner/repo" format');
|
|
90
|
+
return { owner, repo };
|
|
91
|
+
}
|
|
92
|
+
|
|
93
|
+
module.exports = { uploadSarif, postReviewComments, parseRepo };
|
|
@@ -0,0 +1,148 @@
|
|
|
1
|
+
'use strict';
|
|
2
|
+
|
|
3
|
+
// ─── Provider endpoints ───────────────────────────────────────────────────────
|
|
4
|
+
|
|
5
|
+
const PROVIDERS = {
|
|
6
|
+
anthropic: {
|
|
7
|
+
url: 'https://api.anthropic.com/v1/messages',
|
|
8
|
+
headers: (apiKey) => ({
|
|
9
|
+
'Content-Type': 'application/json',
|
|
10
|
+
'x-api-key': apiKey,
|
|
11
|
+
'anthropic-version': '2023-06-01',
|
|
12
|
+
}),
|
|
13
|
+
body: (model, prompt) => ({
|
|
14
|
+
model: model || 'claude-opus-4-6',
|
|
15
|
+
max_tokens: 8192,
|
|
16
|
+
messages: [{ role: 'user', content: prompt }],
|
|
17
|
+
}),
|
|
18
|
+
extract: (data) => data?.content?.[0]?.text || '',
|
|
19
|
+
},
|
|
20
|
+
openai: {
|
|
21
|
+
url: 'https://api.openai.com/v1/chat/completions',
|
|
22
|
+
headers: (apiKey) => ({
|
|
23
|
+
'Content-Type': 'application/json',
|
|
24
|
+
'Authorization': `Bearer ${apiKey}`,
|
|
25
|
+
}),
|
|
26
|
+
body: (model, prompt) => ({
|
|
27
|
+
model: model || 'gpt-4o',
|
|
28
|
+
messages: [{ role: 'user', content: prompt }],
|
|
29
|
+
}),
|
|
30
|
+
extract: (data) => data?.choices?.[0]?.message?.content || '',
|
|
31
|
+
},
|
|
32
|
+
gemini: {
|
|
33
|
+
url: (apiKey) => `https://generativelanguage.googleapis.com/v1beta/models/gemini-2.0-flash:generateContent?key=${apiKey}`,
|
|
34
|
+
headers: () => ({ 'Content-Type': 'application/json' }),
|
|
35
|
+
body: (model, prompt) => ({
|
|
36
|
+
contents: [{ parts: [{ text: prompt }] }],
|
|
37
|
+
}),
|
|
38
|
+
extract: (data) => data?.candidates?.[0]?.content?.parts?.[0]?.text || '',
|
|
39
|
+
},
|
|
40
|
+
ollama: {
|
|
41
|
+
url: 'http://localhost:11434/api/generate',
|
|
42
|
+
headers: () => ({ 'Content-Type': 'application/json' }),
|
|
43
|
+
body: (model, prompt) => ({
|
|
44
|
+
model: model || 'llama3',
|
|
45
|
+
prompt,
|
|
46
|
+
stream: false,
|
|
47
|
+
}),
|
|
48
|
+
extract: (data) => data?.response || '',
|
|
49
|
+
},
|
|
50
|
+
};
|
|
51
|
+
|
|
52
|
+
// ─── Prompt builder ───────────────────────────────────────────────────────────
|
|
53
|
+
|
|
54
|
+
function buildRemediationPrompt(finding) {
|
|
55
|
+
return `You are a security engineer applying the Red-Green-Refactor TDD remediation protocol.
|
|
56
|
+
|
|
57
|
+
VULNERABILITY FINDING:
|
|
58
|
+
- Type: ${finding.name}
|
|
59
|
+
- Severity: ${finding.severity}
|
|
60
|
+
- File: ${finding.file}
|
|
61
|
+
- Line: ${finding.line}
|
|
62
|
+
- Code snippet: ${finding.snippet}
|
|
63
|
+
|
|
64
|
+
TASK:
|
|
65
|
+
1. Write a Jest/supertest exploit test (Red phase) that proves this vulnerability exists.
|
|
66
|
+
The test must be placed in __tests__/security/ and must FAIL before the fix.
|
|
67
|
+
2. Write the minimum code patch (Green phase) that closes the vulnerability.
|
|
68
|
+
Show it as a unified diff against the original file.
|
|
69
|
+
3. Confirm what regression checks to run (Refactor phase).
|
|
70
|
+
|
|
71
|
+
Respond with valid JSON in exactly this shape:
|
|
72
|
+
{
|
|
73
|
+
"exploitTest": {
|
|
74
|
+
"filename": "__tests__/security/<slug>.test.js",
|
|
75
|
+
"content": "<full test file content>"
|
|
76
|
+
},
|
|
77
|
+
"patch": {
|
|
78
|
+
"filename": "<path to file being patched>",
|
|
79
|
+
"diff": "<unified diff>"
|
|
80
|
+
},
|
|
81
|
+
"refactorChecks": ["<check 1>", "<check 2>"]
|
|
82
|
+
}`;
|
|
83
|
+
}
|
|
84
|
+
|
|
85
|
+
// ─── HTTP call ────────────────────────────────────────────────────────────────
|
|
86
|
+
|
|
87
|
+
async function callProvider(provider, apiKey, model, prompt) {
|
|
88
|
+
const p = PROVIDERS[provider];
|
|
89
|
+
if (!p) throw new Error(`Unknown provider "${provider}". Supported: ${Object.keys(PROVIDERS).join(', ')}`);
|
|
90
|
+
|
|
91
|
+
const url = typeof p.url === 'function' ? p.url(apiKey) : p.url;
|
|
92
|
+
const headers = p.headers(apiKey);
|
|
93
|
+
const body = JSON.stringify(p.body(model, prompt));
|
|
94
|
+
|
|
95
|
+
const res = await fetch(url, { method: 'POST', headers, body });
|
|
96
|
+
if (!res.ok) {
|
|
97
|
+
const text = await res.text().catch(() => '');
|
|
98
|
+
throw new Error(`Provider ${provider} returned ${res.status}: ${text.slice(0, 200)}`);
|
|
99
|
+
}
|
|
100
|
+
const data = await res.json();
|
|
101
|
+
return p.extract(data);
|
|
102
|
+
}
|
|
103
|
+
|
|
104
|
+
// ─── Parse model response ─────────────────────────────────────────────────────
|
|
105
|
+
|
|
106
|
+
function parseResponse(text) {
|
|
107
|
+
// Extract JSON from response (model may wrap it in markdown)
|
|
108
|
+
const match = text.match(/\{[\s\S]*\}/);
|
|
109
|
+
if (!match) throw new Error('Model response did not contain a JSON object');
|
|
110
|
+
return JSON.parse(match[0]);
|
|
111
|
+
}
|
|
112
|
+
|
|
113
|
+
// ─── Main remediate function ──────────────────────────────────────────────────
|
|
114
|
+
|
|
115
|
+
/**
|
|
116
|
+
* Run AI-powered remediation for a list of findings.
|
|
117
|
+
*
|
|
118
|
+
* @param {object} opts
|
|
119
|
+
* @param {Array} opts.findings - finding objects from quickScan
|
|
120
|
+
* @param {string} opts.provider - 'anthropic' | 'openai' | 'gemini' | 'ollama'
|
|
121
|
+
* @param {string} opts.apiKey
|
|
122
|
+
* @param {string} [opts.model]
|
|
123
|
+
* @param {string} [opts.severity] - minimum severity to fix ('CRITICAL','HIGH','MEDIUM','LOW')
|
|
124
|
+
* @returns {Promise<Array>} - results per finding
|
|
125
|
+
*/
|
|
126
|
+
async function remediate({ findings, provider, apiKey, model, severity = 'LOW' }) {
|
|
127
|
+
const ORDER = { CRITICAL: 0, HIGH: 1, MEDIUM: 2, LOW: 3 };
|
|
128
|
+
const threshold = ORDER[severity.toUpperCase()] ?? 3;
|
|
129
|
+
|
|
130
|
+
const targets = findings
|
|
131
|
+
.filter(f => !f.likelyFalsePositive && (ORDER[f.severity] ?? 99) <= threshold)
|
|
132
|
+
.sort((a, b) => (ORDER[a.severity] ?? 99) - (ORDER[b.severity] ?? 99));
|
|
133
|
+
|
|
134
|
+
const results = [];
|
|
135
|
+
for (const finding of targets) {
|
|
136
|
+
try {
|
|
137
|
+
const prompt = buildRemediationPrompt(finding);
|
|
138
|
+
const raw = await callProvider(provider, apiKey, model, prompt);
|
|
139
|
+
const parsed = parseResponse(raw);
|
|
140
|
+
results.push({ finding, status: 'remediated', ...parsed });
|
|
141
|
+
} catch (err) {
|
|
142
|
+
results.push({ finding, status: 'error', error: err.message });
|
|
143
|
+
}
|
|
144
|
+
}
|
|
145
|
+
return results;
|
|
146
|
+
}
|
|
147
|
+
|
|
148
|
+
module.exports = { remediate, callProvider, buildRemediationPrompt, PROVIDERS };
|
package/lib/reporter.js
ADDED
|
@@ -0,0 +1,164 @@
|
|
|
1
|
+
'use strict';
|
|
2
|
+
|
|
3
|
+
const { version } = require('../package.json');
|
|
4
|
+
|
|
5
|
+
// ─── JSON ─────────────────────────────────────────────────────────────────────
|
|
6
|
+
|
|
7
|
+
/**
|
|
8
|
+
* Return findings as a structured JSON-serialisable object.
|
|
9
|
+
* @param {Array} findings
|
|
10
|
+
* @param {string[]} [exempted=[]]
|
|
11
|
+
* @returns {object}
|
|
12
|
+
*/
|
|
13
|
+
function toJson(findings, exempted = []) {
|
|
14
|
+
const real = findings.filter(f => !f.likelyFalsePositive);
|
|
15
|
+
const noisy = findings.filter(f => f.likelyFalsePositive);
|
|
16
|
+
|
|
17
|
+
const summary = { CRITICAL: 0, HIGH: 0, MEDIUM: 0, LOW: 0 };
|
|
18
|
+
for (const f of real) summary[f.severity] = (summary[f.severity] || 0) + 1;
|
|
19
|
+
|
|
20
|
+
return {
|
|
21
|
+
version,
|
|
22
|
+
summary,
|
|
23
|
+
findings: real,
|
|
24
|
+
likelyFalsePositives: noisy,
|
|
25
|
+
exempted,
|
|
26
|
+
scannedAt: new Date().toISOString(),
|
|
27
|
+
};
|
|
28
|
+
}
|
|
29
|
+
|
|
30
|
+
// ─── SARIF ────────────────────────────────────────────────────────────────────
|
|
31
|
+
|
|
32
|
+
const SARIF_LEVEL = { CRITICAL: 'error', HIGH: 'error', MEDIUM: 'warning', LOW: 'note' };
|
|
33
|
+
|
|
34
|
+
// Maps our vuln names to CWE IDs for richer GitHub annotations
|
|
35
|
+
const CWE_MAP = {
|
|
36
|
+
'SQL Injection': 'CWE-89',
|
|
37
|
+
'Command Injection': 'CWE-78',
|
|
38
|
+
'Path Traversal': 'CWE-22',
|
|
39
|
+
'XSS': 'CWE-79',
|
|
40
|
+
'IDOR': 'CWE-639',
|
|
41
|
+
'Broken Auth': 'CWE-287',
|
|
42
|
+
'Hardcoded Secret': 'CWE-798',
|
|
43
|
+
'SSRF': 'CWE-918',
|
|
44
|
+
'Open Redirect': 'CWE-601',
|
|
45
|
+
'NoSQL Injection': 'CWE-943',
|
|
46
|
+
'Mass Assignment': 'CWE-915',
|
|
47
|
+
'Prototype Pollution': 'CWE-1321',
|
|
48
|
+
'Weak Crypto': 'CWE-327',
|
|
49
|
+
'Insecure Deserialization': 'CWE-502',
|
|
50
|
+
'TLS Bypass': 'CWE-295',
|
|
51
|
+
'Sensitive Storage': 'CWE-312',
|
|
52
|
+
'JWT Alg None': 'CWE-347',
|
|
53
|
+
'Secret Fallback': 'CWE-798',
|
|
54
|
+
'eval() Injection': 'CWE-95',
|
|
55
|
+
'Template Injection': 'CWE-94',
|
|
56
|
+
'ReDoS': 'CWE-1333',
|
|
57
|
+
'XXE': 'CWE-611',
|
|
58
|
+
'CORS Wildcard': 'CWE-942',
|
|
59
|
+
'Insecure Random': 'CWE-338',
|
|
60
|
+
'Timing-Unsafe Comparison': 'CWE-208',
|
|
61
|
+
};
|
|
62
|
+
|
|
63
|
+
/**
|
|
64
|
+
* Return findings as a SARIF 2.1.0 object (GitHub code scanning compatible).
|
|
65
|
+
* @param {Array} findings
|
|
66
|
+
* @param {string} [projectDir=''] - used to build relative artifact URIs
|
|
67
|
+
* @returns {object}
|
|
68
|
+
*/
|
|
69
|
+
function toSarif(findings, projectDir = '') {
|
|
70
|
+
const rules = [];
|
|
71
|
+
const ruleIndex = {};
|
|
72
|
+
|
|
73
|
+
const results = findings.filter(f => !f.likelyFalsePositive).map(f => {
|
|
74
|
+
if (ruleIndex[f.name] === undefined) {
|
|
75
|
+
ruleIndex[f.name] = rules.length;
|
|
76
|
+
const cwe = CWE_MAP[f.name];
|
|
77
|
+
rules.push({
|
|
78
|
+
id: f.name.replace(/\s+/g, '-').replace(/[()]/g, '').toLowerCase(),
|
|
79
|
+
name: f.name,
|
|
80
|
+
shortDescription: { text: f.name },
|
|
81
|
+
fullDescription: { text: `${f.name} detected — severity: ${f.severity}` },
|
|
82
|
+
defaultConfiguration: { level: SARIF_LEVEL[f.severity] || 'warning' },
|
|
83
|
+
...(cwe && { relationships: [{ target: { id: cwe, toolComponent: { name: 'CWE' } } }] }),
|
|
84
|
+
helpUri: `https://cwe.mitre.org/data/definitions/${cwe ? cwe.replace('CWE-', '') : '0'}.html`,
|
|
85
|
+
});
|
|
86
|
+
}
|
|
87
|
+
|
|
88
|
+
return {
|
|
89
|
+
ruleId: rules[ruleIndex[f.name]].id,
|
|
90
|
+
ruleIndex: ruleIndex[f.name],
|
|
91
|
+
level: SARIF_LEVEL[f.severity] || 'warning',
|
|
92
|
+
message: { text: f.snippet || f.name },
|
|
93
|
+
locations: [{
|
|
94
|
+
physicalLocation: {
|
|
95
|
+
artifactLocation: {
|
|
96
|
+
uri: f.file.replace(/\\/g, '/'),
|
|
97
|
+
uriBaseId: '%SRCROOT%',
|
|
98
|
+
},
|
|
99
|
+
region: { startLine: f.line },
|
|
100
|
+
},
|
|
101
|
+
}],
|
|
102
|
+
};
|
|
103
|
+
});
|
|
104
|
+
|
|
105
|
+
return {
|
|
106
|
+
$schema: 'https://json.schemastore.org/sarif-2.1.0.json',
|
|
107
|
+
version: '2.1.0',
|
|
108
|
+
runs: [{
|
|
109
|
+
tool: {
|
|
110
|
+
driver: {
|
|
111
|
+
name: '@lhi/tdd-audit',
|
|
112
|
+
version,
|
|
113
|
+
informationUri: 'https://www.npmjs.com/package/@lhi/tdd-audit',
|
|
114
|
+
rules,
|
|
115
|
+
},
|
|
116
|
+
},
|
|
117
|
+
results,
|
|
118
|
+
}],
|
|
119
|
+
};
|
|
120
|
+
}
|
|
121
|
+
|
|
122
|
+
// ─── Text (existing printFindings extracted for reuse) ────────────────────────
|
|
123
|
+
|
|
124
|
+
/**
|
|
125
|
+
* Return a human-readable text report string (without printing it).
|
|
126
|
+
* @param {Array} findings
|
|
127
|
+
* @param {string[]} [exempted=[]]
|
|
128
|
+
* @returns {string}
|
|
129
|
+
*/
|
|
130
|
+
function toText(findings, exempted = []) {
|
|
131
|
+
const lines = [];
|
|
132
|
+
if (findings.length === 0) {
|
|
133
|
+
lines.push(' ✅ No obvious vulnerability patterns detected.\n');
|
|
134
|
+
} else {
|
|
135
|
+
const real = findings.filter(f => !f.likelyFalsePositive);
|
|
136
|
+
const noisy = findings.filter(f => f.likelyFalsePositive);
|
|
137
|
+
const bySeverity = { CRITICAL: [], HIGH: [], MEDIUM: [], LOW: [] };
|
|
138
|
+
for (const f of real) (bySeverity[f.severity] || bySeverity.LOW).push(f);
|
|
139
|
+
const icons = { CRITICAL: '🔴', HIGH: '🟠', MEDIUM: '🟡', LOW: '🔵' };
|
|
140
|
+
|
|
141
|
+
lines.push(`\n Found ${real.length} potential issue(s)${noisy.length ? ` (+${noisy.length} in test files — see below)` : ''}:\n`);
|
|
142
|
+
for (const [sev, list] of Object.entries(bySeverity)) {
|
|
143
|
+
if (!list.length) continue;
|
|
144
|
+
for (const f of list) {
|
|
145
|
+
const badge = f.inTestFile ? ' [test file]' : '';
|
|
146
|
+
lines.push(` ${icons[sev]} [${sev}] ${f.name} — ${f.file}:${f.line}${badge}`);
|
|
147
|
+
lines.push(` ${f.snippet}`);
|
|
148
|
+
}
|
|
149
|
+
}
|
|
150
|
+
if (noisy.length) {
|
|
151
|
+
lines.push('\n ⚪ Likely intentional (in test files — verify manually):');
|
|
152
|
+
for (const f of noisy) lines.push(` ${f.name} — ${f.file}:${f.line}`);
|
|
153
|
+
}
|
|
154
|
+
lines.push('\n Run /tdd-audit in your agent to remediate.\n');
|
|
155
|
+
}
|
|
156
|
+
if (exempted.length) {
|
|
157
|
+
lines.push(' ⚠️ Files skipped via audit_status:safe (verify these exemptions are intentional):');
|
|
158
|
+
for (const p of exempted) lines.push(` ${p}`);
|
|
159
|
+
lines.push('');
|
|
160
|
+
}
|
|
161
|
+
return lines.join('\n');
|
|
162
|
+
}
|
|
163
|
+
|
|
164
|
+
module.exports = { toJson, toSarif, toText };
|
package/lib/server.js
ADDED
|
@@ -0,0 +1,181 @@
|
|
|
1
|
+
'use strict';
|
|
2
|
+
|
|
3
|
+
const http = require('http');
|
|
4
|
+
const path = require('path');
|
|
5
|
+
const { quickScan, scanPromptFiles } = require('./scanner');
|
|
6
|
+
const { toJson, toSarif, toText } = require('./reporter');
|
|
7
|
+
const { loadConfig, parseCliOverrides } = require('./config');
|
|
8
|
+
const { version } = require('../package.json');
|
|
9
|
+
|
|
10
|
+
// ─── Job store (in-memory) ────────────────────────────────────────────────────
|
|
11
|
+
|
|
12
|
+
const jobs = new Map();
|
|
13
|
+
let jobSeq = 0;
|
|
14
|
+
|
|
15
|
+
function createJob() {
|
|
16
|
+
const id = `job_${++jobSeq}_${Date.now()}`;
|
|
17
|
+
jobs.set(id, { id, status: 'pending', createdAt: new Date().toISOString() });
|
|
18
|
+
return id;
|
|
19
|
+
}
|
|
20
|
+
|
|
21
|
+
function updateJob(id, patch) {
|
|
22
|
+
const job = jobs.get(id);
|
|
23
|
+
if (job) jobs.set(id, { ...job, ...patch });
|
|
24
|
+
}
|
|
25
|
+
|
|
26
|
+
// ─── Helpers ──────────────────────────────────────────────────────────────────
|
|
27
|
+
|
|
28
|
+
function json(res, status, body) {
|
|
29
|
+
const payload = JSON.stringify(body);
|
|
30
|
+
res.writeHead(status, {
|
|
31
|
+
'Content-Type': 'application/json',
|
|
32
|
+
'Content-Length': Buffer.byteLength(payload),
|
|
33
|
+
'X-Content-Type-Options': 'nosniff',
|
|
34
|
+
'X-Frame-Options': 'DENY',
|
|
35
|
+
});
|
|
36
|
+
res.end(payload);
|
|
37
|
+
}
|
|
38
|
+
|
|
39
|
+
function readBody(req) {
|
|
40
|
+
return new Promise((resolve, reject) => {
|
|
41
|
+
let data = '';
|
|
42
|
+
req.on('data', chunk => {
|
|
43
|
+
data += chunk;
|
|
44
|
+
if (data.length > 1024 * 512) reject(new Error('Request body too large'));
|
|
45
|
+
});
|
|
46
|
+
req.on('end', () => {
|
|
47
|
+
try { resolve(JSON.parse(data || '{}')); }
|
|
48
|
+
catch { reject(new Error('Invalid JSON body')); }
|
|
49
|
+
});
|
|
50
|
+
req.on('error', reject);
|
|
51
|
+
});
|
|
52
|
+
}
|
|
53
|
+
|
|
54
|
+
/**
|
|
55
|
+
* Authenticate incoming requests.
|
|
56
|
+
* If serverApiKey is set, require `Authorization: Bearer <key>`.
|
|
57
|
+
*/
|
|
58
|
+
function authenticate(req, cfg) {
|
|
59
|
+
if (!cfg.serverApiKey) return true; // no key configured — open
|
|
60
|
+
const header = req.headers['authorization'] || '';
|
|
61
|
+
const token = header.startsWith('Bearer ') ? header.slice(7) : '';
|
|
62
|
+
return token === cfg.serverApiKey;
|
|
63
|
+
}
|
|
64
|
+
|
|
65
|
+
/**
|
|
66
|
+
* Validate and sanitise the `path` field from POST /scan.
|
|
67
|
+
* Only allow paths inside cwd to prevent path traversal.
|
|
68
|
+
*/
|
|
69
|
+
function safeScanPath(rawPath) {
|
|
70
|
+
const cwd = process.cwd();
|
|
71
|
+
const resolved = path.resolve(cwd, rawPath || cwd);
|
|
72
|
+
if (!resolved.startsWith(cwd)) throw new Error('Path outside working directory');
|
|
73
|
+
return resolved;
|
|
74
|
+
}
|
|
75
|
+
|
|
76
|
+
// ─── Router ───────────────────────────────────────────────────────────────────
|
|
77
|
+
|
|
78
|
+
async function handleRequest(req, res, cfg) {
|
|
79
|
+
const { method, url } = req;
|
|
80
|
+
|
|
81
|
+
// ── GET /health ────────────────────────────────────────────────────────────
|
|
82
|
+
if (method === 'GET' && url === '/health') {
|
|
83
|
+
return json(res, 200, { status: 'ok', version });
|
|
84
|
+
}
|
|
85
|
+
|
|
86
|
+
// All other routes require authentication
|
|
87
|
+
if (!authenticate(req, cfg)) {
|
|
88
|
+
return json(res, 401, { error: 'Unauthorized' });
|
|
89
|
+
}
|
|
90
|
+
|
|
91
|
+
// ── POST /scan ─────────────────────────────────────────────────────────────
|
|
92
|
+
if (method === 'POST' && url === '/scan') {
|
|
93
|
+
let body;
|
|
94
|
+
try { body = await readBody(req); }
|
|
95
|
+
catch (e) { return json(res, 400, { error: e.message }); }
|
|
96
|
+
|
|
97
|
+
let scanPath;
|
|
98
|
+
try { scanPath = safeScanPath(body.path); }
|
|
99
|
+
catch (e) { return json(res, 400, { error: e.message }); }
|
|
100
|
+
|
|
101
|
+
const format = body.format || cfg.output || 'json';
|
|
102
|
+
const t0 = Date.now();
|
|
103
|
+
const findings = quickScan(scanPath);
|
|
104
|
+
const exempted = findings.exempted || [];
|
|
105
|
+
const duration = Date.now() - t0;
|
|
106
|
+
|
|
107
|
+
if (format === 'sarif') {
|
|
108
|
+
return json(res, 200, toSarif(findings, scanPath));
|
|
109
|
+
}
|
|
110
|
+
return json(res, 200, { ...toJson(findings, exempted), duration });
|
|
111
|
+
}
|
|
112
|
+
|
|
113
|
+
// ── POST /remediate ────────────────────────────────────────────────────────
|
|
114
|
+
if (method === 'POST' && url === '/remediate') {
|
|
115
|
+
let body;
|
|
116
|
+
try { body = await readBody(req); }
|
|
117
|
+
catch (e) { return json(res, 400, { error: e.message }); }
|
|
118
|
+
|
|
119
|
+
const { findings, provider, apiKey, model } = body;
|
|
120
|
+
if (!findings || !provider || !apiKey) {
|
|
121
|
+
return json(res, 400, { error: 'findings, provider, and apiKey are required' });
|
|
122
|
+
}
|
|
123
|
+
|
|
124
|
+
const jobId = createJob();
|
|
125
|
+
|
|
126
|
+
// Kick off async remediation (non-blocking)
|
|
127
|
+
setImmediate(async () => {
|
|
128
|
+
try {
|
|
129
|
+
updateJob(jobId, { status: 'running', startedAt: new Date().toISOString() });
|
|
130
|
+
const { remediate } = require('./remediator');
|
|
131
|
+
const results = await remediate({ findings, provider, apiKey, model: model || cfg.model });
|
|
132
|
+
updateJob(jobId, { status: 'done', completedAt: new Date().toISOString(), results });
|
|
133
|
+
} catch (err) {
|
|
134
|
+
updateJob(jobId, { status: 'error', error: err.message });
|
|
135
|
+
}
|
|
136
|
+
});
|
|
137
|
+
|
|
138
|
+
return json(res, 202, { jobId });
|
|
139
|
+
}
|
|
140
|
+
|
|
141
|
+
// ── GET /jobs/:id ──────────────────────────────────────────────────────────
|
|
142
|
+
const jobMatch = url.match(/^\/jobs\/([^/?]+)$/);
|
|
143
|
+
if (method === 'GET' && jobMatch) {
|
|
144
|
+
const job = jobs.get(jobMatch[1]);
|
|
145
|
+
if (!job) return json(res, 404, { error: 'Job not found' });
|
|
146
|
+
return json(res, 200, job);
|
|
147
|
+
}
|
|
148
|
+
|
|
149
|
+
return json(res, 404, { error: 'Not found' });
|
|
150
|
+
}
|
|
151
|
+
|
|
152
|
+
// ─── Start ────────────────────────────────────────────────────────────────────
|
|
153
|
+
|
|
154
|
+
function start(args = []) {
|
|
155
|
+
const cfg = loadConfig(process.cwd(), parseCliOverrides(args));
|
|
156
|
+
const port = cfg.port;
|
|
157
|
+
|
|
158
|
+
const server = http.createServer(async (req, res) => {
|
|
159
|
+
try {
|
|
160
|
+
await handleRequest(req, res, cfg);
|
|
161
|
+
} catch (err) {
|
|
162
|
+
// Production error handler — no stack traces
|
|
163
|
+
json(res, 500, { error: 'Internal server error' });
|
|
164
|
+
}
|
|
165
|
+
});
|
|
166
|
+
|
|
167
|
+
server.listen(port, () => {
|
|
168
|
+
process.stdout.write(`\n🔒 tdd-audit REST API listening on http://localhost:${port}\n`);
|
|
169
|
+
if (!cfg.serverApiKey) {
|
|
170
|
+
process.stderr.write('⚠️ No --api-key set — server is unauthenticated. Set one for production.\n');
|
|
171
|
+
}
|
|
172
|
+
process.stdout.write(' GET /health\n');
|
|
173
|
+
process.stdout.write(' POST /scan { path, format? }\n');
|
|
174
|
+
process.stdout.write(' POST /remediate { findings, provider, apiKey, model? }\n');
|
|
175
|
+
process.stdout.write(' GET /jobs/:id\n\n');
|
|
176
|
+
});
|
|
177
|
+
|
|
178
|
+
return server; // returned for testing
|
|
179
|
+
}
|
|
180
|
+
|
|
181
|
+
module.exports = { start, jobs, createJob, updateJob, safeScanPath };
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@lhi/tdd-audit",
|
|
3
|
-
"version": "1.
|
|
3
|
+
"version": "1.9.0",
|
|
4
4
|
"description": "Security skill installer for Claude Code, Gemini CLI, Cursor, Codex, and OpenCode. Patches vulnerabilities using a Red-Green-Refactor exploit-test protocol.",
|
|
5
5
|
"main": "index.js",
|
|
6
6
|
"bin": {
|