@lhi/tdd-audit 1.12.0 → 1.15.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +30 -5
- package/docs/rest-api.md +185 -35
- package/docs/scanner.md +13 -9
- package/index.js +5 -0
- package/lib/badge.js +94 -0
- package/lib/jobs.js +53 -0
- package/lib/plugin.js +308 -0
- package/lib/remediator.js +8 -3
- package/lib/scanner.js +1 -1
- package/lib/server.js +57 -100
- package/package.json +4 -1
- package/prompts/auto-audit.md +109 -0
- package/prompts/hardening-phase.md +91 -0
package/README.md
CHANGED
|
@@ -1,6 +1,7 @@
|
|
|
1
1
|
# @lhi/tdd-audit
|
|
2
|
+
[](https://www.npmjs.com/package/@lhi/tdd-audit) <!-- tdd-audit-badge -->
|
|
2
3
|
|
|
3
|
-
> **v1.
|
|
4
|
+
> **v1.15.0** — Security skill installer for **Claude Code, Gemini CLI, Cursor, Codex, and OpenCode**. Patches vulnerabilities using a Red-Green-Refactor exploit-test protocol — prove the hole exists, apply the fix, prove it's closed. Enforces ≥ 95% test coverage, README badge, and SECURITY.md on every audit.
|
|
4
5
|
|
|
5
6
|
## Install
|
|
6
7
|
|
|
@@ -80,7 +81,7 @@ npx @lhi/tdd-audit serve --config ~/configs/prod-audit.json
|
|
|
80
81
|
## REST API + AI remediation
|
|
81
82
|
|
|
82
83
|
```bash
|
|
83
|
-
# Start the API server
|
|
84
|
+
# Start the API server (now powered by Fastify)
|
|
84
85
|
npx @lhi/tdd-audit serve --port 3000 --api-key YOUR_SECRET
|
|
85
86
|
|
|
86
87
|
# Scan any path → JSON
|
|
@@ -88,6 +89,19 @@ curl -X POST http://localhost:3000/scan \
|
|
|
88
89
|
-H "Authorization: Bearer YOUR_SECRET" \
|
|
89
90
|
-d '{"path": "."}' | jq '.summary'
|
|
90
91
|
|
|
92
|
+
# Full automated pipeline: scan + remediate in one shot
|
|
93
|
+
curl -X POST http://localhost:3000/audit \
|
|
94
|
+
-H "Authorization: Bearer YOUR_SECRET" \
|
|
95
|
+
-H "Content-Type: application/json" \
|
|
96
|
+
-d '{"path": ".", "provider": "anthropic", "apiKey": "sk-ant-..."}' \
|
|
97
|
+
| jq '.jobId'
|
|
98
|
+
|
|
99
|
+
# Poll job status
|
|
100
|
+
curl http://localhost:3000/jobs/<jobId>
|
|
101
|
+
|
|
102
|
+
# Or stream real-time updates via SSE
|
|
103
|
+
curl -N http://localhost:3000/jobs/<jobId>/stream
|
|
104
|
+
|
|
91
105
|
# Use any OpenAI-compatible service (Groq, OpenRouter, Together AI, etc.)
|
|
92
106
|
npx @lhi/tdd-audit serve \
|
|
93
107
|
--provider openai \
|
|
@@ -98,6 +112,17 @@ npx @lhi/tdd-audit serve \
|
|
|
98
112
|
|
|
99
113
|
Supported providers: `anthropic` · `openai` · `gemini` · `ollama` (local) · **any OpenAI-compatible endpoint via `--base-url`**
|
|
100
114
|
|
|
115
|
+
### Endpoints
|
|
116
|
+
|
|
117
|
+
| Method | Path | Auth | Description |
|
|
118
|
+
|---|---|---|---|
|
|
119
|
+
| `GET` | `/health` | No | Version + liveness check |
|
|
120
|
+
| `POST` | `/scan` | Yes | Scan a path, return findings |
|
|
121
|
+
| `POST` | `/remediate` | Yes | AI-fix a findings list; returns `jobId` |
|
|
122
|
+
| `POST` | `/audit` | Yes | Full scan+remediate pipeline; returns `jobId` |
|
|
123
|
+
| `GET` | `/jobs/:id` | Yes | Poll job status |
|
|
124
|
+
| `GET` | `/jobs/:id/stream` | Yes | SSE stream — real-time job progress |
|
|
125
|
+
|
|
101
126
|
## Output formats
|
|
102
127
|
|
|
103
128
|
```bash
|
|
@@ -108,11 +133,11 @@ npx @lhi/tdd-audit --scan # human-readable text (default)
|
|
|
108
133
|
|
|
109
134
|
## Testing
|
|
110
135
|
|
|
111
|
-
|
|
136
|
+
586 tests across unit, E2E, and security suites:
|
|
112
137
|
|
|
113
138
|
```bash
|
|
114
139
|
npm test # full suite
|
|
115
|
-
npm run test:unit # unit tests with coverage (
|
|
140
|
+
npm run test:unit # unit tests with coverage (96.6% branch coverage)
|
|
116
141
|
npm run test:security # security regression tests only
|
|
117
142
|
npm run test:e2e # end-to-end REST API tests
|
|
118
143
|
```
|
|
@@ -126,7 +151,7 @@ Security tests cover prompt injection, path traversal, rate limiting, timing-saf
|
|
|
126
151
|
| [REST API](docs/rest-api.md) | Endpoints, auth, rate limiting, trust-proxy, request/response schema |
|
|
127
152
|
| [AI Remediation](docs/ai-remediation.md) | Provider setup, `--base-url` for compatible APIs, config file |
|
|
128
153
|
| [Scanner](docs/scanner.md) | Architecture, detection logic, false-positive handling |
|
|
129
|
-
| [Vulnerability Patterns](docs/vulnerability-patterns.md) | All
|
|
154
|
+
| [Vulnerability Patterns](docs/vulnerability-patterns.md) | All 57 patterns — descriptions, grep signatures, fix pointers |
|
|
130
155
|
| [TDD Protocol](docs/tdd-protocol.md) | Red-Green-Refactor in full, with framework templates for all 6 stacks |
|
|
131
156
|
| [Agentic AI Security](docs/agentic-ai-security.md) | ASI01–ASI10 — prompt injection, MCP supply chain, Actions injection |
|
|
132
157
|
| [Hardening](docs/hardening.md) | Phase 4 controls — Helmet, CSP, CSRF, rate limiting, gitleaks, SRI |
|
package/docs/rest-api.md
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
# REST API
|
|
2
2
|
|
|
3
|
-
`tdd-audit serve` turns the scanner into an authenticated HTTP API
|
|
3
|
+
`tdd-audit serve` turns the scanner into an authenticated HTTP API built on **Fastify**. Use it to integrate vulnerability scanning and AI remediation into dashboards, CI pipelines, bots, or any tooling that speaks JSON.
|
|
4
4
|
|
|
5
5
|
---
|
|
6
6
|
|
|
@@ -29,7 +29,7 @@ npx @lhi/tdd-audit serve --config ~/configs/prod.json
|
|
|
29
29
|
}
|
|
30
30
|
```
|
|
31
31
|
|
|
32
|
-
If `--api-key` / `serverApiKey` is omitted the server starts unauthenticated with a warning. Always set one in production.
|
|
32
|
+
If `--api-key` / `serverApiKey` is omitted the server starts unauthenticated with a stderr warning. Always set one in production.
|
|
33
33
|
|
|
34
34
|
---
|
|
35
35
|
|
|
@@ -45,13 +45,13 @@ Authorization: Bearer YOUR_SECRET
|
|
|
45
45
|
|
|
46
46
|
Missing or wrong key → `401 Unauthorized`.
|
|
47
47
|
|
|
48
|
-
Tokens are compared using **HMAC + `crypto.timingSafeEqual`** to prevent timing-oracle attacks.
|
|
48
|
+
Tokens are compared using **HMAC + `crypto.timingSafeEqual`** to prevent timing-oracle attacks (both values are HMAC-normalised before comparison so lengths are always equal).
|
|
49
49
|
|
|
50
50
|
### Rate limiting
|
|
51
51
|
|
|
52
|
-
All endpoints are rate-limited to **60 requests / IP / minute
|
|
52
|
+
All endpoints are rate-limited to **60 requests / IP / minute**. Exceeding the limit returns `429 Too Many Requests`.
|
|
53
53
|
|
|
54
|
-
By default the rate limiter keys on the **socket IP**, not `X-Forwarded-For`, to prevent header-spoofing bypasses. Enable proxy-forwarded IPs only
|
|
54
|
+
By default the rate limiter keys on the **socket IP**, not `X-Forwarded-For`, to prevent header-spoofing bypasses. Enable proxy-forwarded IPs only when you are behind a trusted reverse proxy:
|
|
55
55
|
|
|
56
56
|
```json
|
|
57
57
|
{ "trustProxy": true }
|
|
@@ -59,15 +59,16 @@ By default the rate limiter keys on the **socket IP**, not `X-Forwarded-For`, to
|
|
|
59
59
|
|
|
60
60
|
### Path validation
|
|
61
61
|
|
|
62
|
-
`POST /scan`
|
|
62
|
+
`POST /scan` and `POST /audit` validate that the requested path is inside the server's working directory. The check is normalised with a trailing path separator to prevent sibling-directory prefix bypasses (e.g. `/app-evil` cannot escape via `/app`). Paths outside cwd return `400`.
|
|
63
63
|
|
|
64
64
|
### Security headers
|
|
65
65
|
|
|
66
66
|
Every response includes:
|
|
67
67
|
|
|
68
68
|
```
|
|
69
|
-
|
|
70
|
-
X-
|
|
69
|
+
Content-Security-Policy: default-src 'none'
|
|
70
|
+
X-Content-Type-Options: nosniff
|
|
71
|
+
X-Frame-Options: DENY
|
|
71
72
|
```
|
|
72
73
|
|
|
73
74
|
---
|
|
@@ -79,14 +80,14 @@ X-Frame-Options: DENY
|
|
|
79
80
|
No auth required. Returns server status and version.
|
|
80
81
|
|
|
81
82
|
```json
|
|
82
|
-
{ "status": "ok", "version": "1.
|
|
83
|
+
{ "status": "ok", "version": "1.13.0" }
|
|
83
84
|
```
|
|
84
85
|
|
|
85
86
|
---
|
|
86
87
|
|
|
87
88
|
### `POST /scan`
|
|
88
89
|
|
|
89
|
-
Scan a local path and return structured findings.
|
|
90
|
+
Scan a local path and return structured findings synchronously.
|
|
90
91
|
|
|
91
92
|
**Request**
|
|
92
93
|
```json
|
|
@@ -104,13 +105,13 @@ Scan a local path and return structured findings.
|
|
|
104
105
|
**Response — JSON**
|
|
105
106
|
```json
|
|
106
107
|
{
|
|
107
|
-
"version":
|
|
108
|
-
"summary":
|
|
109
|
-
"findings":
|
|
108
|
+
"version": "1.13.0",
|
|
109
|
+
"summary": { "CRITICAL": 1, "HIGH": 3, "MEDIUM": 1, "LOW": 0 },
|
|
110
|
+
"findings": [ ... ],
|
|
110
111
|
"likelyFalsePositives": [ ... ],
|
|
111
|
-
"exempted":
|
|
112
|
-
"scannedAt":
|
|
113
|
-
"duration":
|
|
112
|
+
"exempted": [],
|
|
113
|
+
"scannedAt": "2026-03-25T12:00:00.000Z",
|
|
114
|
+
"duration": 42
|
|
114
115
|
}
|
|
115
116
|
```
|
|
116
117
|
|
|
@@ -122,7 +123,7 @@ Returns a SARIF 2.1.0 object ready to upload to GitHub code scanning.
|
|
|
122
123
|
|
|
123
124
|
| Status | Reason |
|
|
124
125
|
|---|---|
|
|
125
|
-
| 400 | Path traversal attempt,
|
|
126
|
+
| 400 | Path traversal attempt, oversized body (> 512 KB), or invalid JSON |
|
|
126
127
|
| 401 | Missing or invalid API key |
|
|
127
128
|
| 429 | Rate limit exceeded |
|
|
128
129
|
|
|
@@ -130,18 +131,18 @@ Returns a SARIF 2.1.0 object ready to upload to GitHub code scanning.
|
|
|
130
131
|
|
|
131
132
|
### `POST /remediate`
|
|
132
133
|
|
|
133
|
-
Queue an AI-powered remediation job
|
|
134
|
+
Queue an AI-powered remediation job for a **provided findings list**. Returns immediately with a `jobId`; poll `GET /jobs/:id` (or stream `GET /jobs/:id/stream`) for results.
|
|
134
135
|
|
|
135
|
-
|
|
136
|
+
Use `POST /audit` instead if you want the server to run the scan itself.
|
|
136
137
|
|
|
137
138
|
**Request**
|
|
138
139
|
```json
|
|
139
140
|
{
|
|
140
141
|
"findings": [ ... ],
|
|
141
|
-
"provider": "
|
|
142
|
-
"apiKey": "sk-...",
|
|
143
|
-
"model": "
|
|
144
|
-
"baseUrl":
|
|
142
|
+
"provider": "anthropic",
|
|
143
|
+
"apiKey": "sk-ant-...",
|
|
144
|
+
"model": "claude-opus-4-6",
|
|
145
|
+
"baseUrl": null,
|
|
145
146
|
"severity": "HIGH"
|
|
146
147
|
}
|
|
147
148
|
```
|
|
@@ -155,23 +156,69 @@ The server stores up to **1 000 jobs** in memory (TTL: 1 hour). Oldest jobs are
|
|
|
155
156
|
| `baseUrl` | no | Override base URL for any OpenAI-compatible service |
|
|
156
157
|
| `severity` | no | Minimum severity to fix. Default: `LOW` (fix all) |
|
|
157
158
|
|
|
158
|
-
**Response**
|
|
159
|
+
**Response — 202 Accepted**
|
|
159
160
|
```json
|
|
160
161
|
{ "jobId": "job_1_1711363200000" }
|
|
161
162
|
```
|
|
162
163
|
|
|
164
|
+
Job lifecycle: `pending → running → done | error`
|
|
165
|
+
|
|
163
166
|
---
|
|
164
167
|
|
|
165
|
-
### `
|
|
168
|
+
### `POST /audit`
|
|
169
|
+
|
|
170
|
+
Full automated pipeline: **scan + AI remediation in one shot**. No interaction needed. Returns immediately with a `jobId`.
|
|
166
171
|
|
|
167
|
-
|
|
172
|
+
If no `provider`/`apiKey` are supplied, the server runs the scan only (no remediation) and the job transitions to `done` with just the `findings` array.
|
|
168
173
|
|
|
169
|
-
**
|
|
174
|
+
**Request**
|
|
170
175
|
```json
|
|
171
|
-
{
|
|
176
|
+
{
|
|
177
|
+
"path": ".",
|
|
178
|
+
"provider": "anthropic",
|
|
179
|
+
"apiKey": "sk-ant-...",
|
|
180
|
+
"model": "claude-opus-4-6",
|
|
181
|
+
"baseUrl": null,
|
|
182
|
+
"webhook": "https://your-server.example.com/webhook"
|
|
183
|
+
}
|
|
172
184
|
```
|
|
173
185
|
|
|
174
|
-
|
|
186
|
+
| Field | Required | Description |
|
|
187
|
+
|---|---|---|
|
|
188
|
+
| `path` | no | Path to scan. Defaults to cwd. Must be inside server cwd. |
|
|
189
|
+
| `provider` | no | If supplied with `apiKey`, AI remediation runs after the scan |
|
|
190
|
+
| `apiKey` | no | Provider API key |
|
|
191
|
+
| `model` | no | Defaults per provider |
|
|
192
|
+
| `baseUrl` | no | Override base URL for OpenAI-compatible providers |
|
|
193
|
+
| `webhook` | no | URL to POST the final job payload to when complete (fire-and-forget) |
|
|
194
|
+
|
|
195
|
+
**Response — 202 Accepted**
|
|
196
|
+
|
|
197
|
+
```
|
|
198
|
+
HTTP/1.1 202 Accepted
|
|
199
|
+
Location: /jobs/job_1_1711363200000
|
|
200
|
+
Retry-After: 2
|
|
201
|
+
```
|
|
202
|
+
```json
|
|
203
|
+
{ "jobId": "job_1_1711363200000" }
|
|
204
|
+
```
|
|
205
|
+
|
|
206
|
+
Job lifecycle: `pending → scanning → scanned → remediating → done | error`
|
|
207
|
+
|
|
208
|
+
Poll `GET /jobs/:id` or stream `GET /jobs/:id/stream` for progress.
|
|
209
|
+
|
|
210
|
+
**Job object during remediation**
|
|
211
|
+
```json
|
|
212
|
+
{
|
|
213
|
+
"id": "job_1_...",
|
|
214
|
+
"status": "remediating",
|
|
215
|
+
"total": 8,
|
|
216
|
+
"completed": 3,
|
|
217
|
+
"current": "SQL Injection"
|
|
218
|
+
}
|
|
219
|
+
```
|
|
220
|
+
|
|
221
|
+
**Job object when done**
|
|
175
222
|
```json
|
|
176
223
|
{
|
|
177
224
|
"id": "job_1_...",
|
|
@@ -179,6 +226,7 @@ Poll for remediation job status.
|
|
|
179
226
|
"createdAt": "...",
|
|
180
227
|
"startedAt": "...",
|
|
181
228
|
"completedAt": "...",
|
|
229
|
+
"findings": [ ... ],
|
|
182
230
|
"results": [
|
|
183
231
|
{
|
|
184
232
|
"finding": { ... },
|
|
@@ -193,28 +241,130 @@ Poll for remediation job status.
|
|
|
193
241
|
|
|
194
242
|
---
|
|
195
243
|
|
|
196
|
-
|
|
244
|
+
### `GET /jobs/:id`
|
|
245
|
+
|
|
246
|
+
Poll for job status. Works for jobs created by both `POST /remediate` and `POST /audit`.
|
|
247
|
+
|
|
248
|
+
**Response — pending / scanning**
|
|
249
|
+
```json
|
|
250
|
+
{ "id": "job_1_...", "status": "scanning", "createdAt": "..." }
|
|
251
|
+
```
|
|
252
|
+
|
|
253
|
+
**Response — remediating (with progress)**
|
|
254
|
+
```json
|
|
255
|
+
{
|
|
256
|
+
"id": "job_1_...",
|
|
257
|
+
"status": "remediating",
|
|
258
|
+
"total": 8,
|
|
259
|
+
"completed": 3,
|
|
260
|
+
"current": "SQL Injection"
|
|
261
|
+
}
|
|
262
|
+
```
|
|
263
|
+
|
|
264
|
+
**Response — done**
|
|
265
|
+
```json
|
|
266
|
+
{
|
|
267
|
+
"id": "job_1_...",
|
|
268
|
+
"status": "done",
|
|
269
|
+
"createdAt": "...",
|
|
270
|
+
"startedAt": "...",
|
|
271
|
+
"completedAt": "...",
|
|
272
|
+
"results": [ ... ]
|
|
273
|
+
}
|
|
274
|
+
```
|
|
275
|
+
|
|
276
|
+
**Response — error**
|
|
277
|
+
```json
|
|
278
|
+
{ "id": "job_1_...", "status": "error", "error": "Provider returned 401: ..." }
|
|
279
|
+
```
|
|
280
|
+
|
|
281
|
+
The job store keeps up to **1 000 jobs** in memory (TTL: 1 hour). Oldest jobs are evicted when the cap is reached.
|
|
282
|
+
|
|
283
|
+
---
|
|
284
|
+
|
|
285
|
+
### `GET /jobs/:id/stream`
|
|
197
286
|
|
|
198
|
-
|
|
287
|
+
Real-time job progress via **Server-Sent Events (SSE)**. The server pushes an event each time the job state changes, and closes the connection when the job reaches `done` or `error`.
|
|
288
|
+
|
|
289
|
+
```bash
|
|
290
|
+
curl -N http://localhost:3000/jobs/job_1_.../stream \
|
|
291
|
+
-H "Authorization: Bearer YOUR_SECRET"
|
|
292
|
+
```
|
|
293
|
+
|
|
294
|
+
**Event format**
|
|
295
|
+
```
|
|
296
|
+
data: {"id":"job_1_...","status":"scanning","createdAt":"..."}
|
|
297
|
+
|
|
298
|
+
data: {"id":"job_1_...","status":"scanned","findings":[...]}
|
|
299
|
+
|
|
300
|
+
data: {"id":"job_1_...","status":"remediating","total":8,"completed":1,"current":"SQL Injection"}
|
|
301
|
+
|
|
302
|
+
data: {"id":"job_1_...","status":"done","completedAt":"...","results":[...]}
|
|
303
|
+
```
|
|
304
|
+
|
|
305
|
+
The connection is closed automatically after the terminal state (`done` / `error`). If you connect to an already-completed job, the server pushes the current state and closes immediately.
|
|
306
|
+
|
|
307
|
+
**Node.js example using EventSource**
|
|
308
|
+
```javascript
|
|
309
|
+
const es = new EventSource(
|
|
310
|
+
'http://localhost:3000/jobs/job_1_.../stream',
|
|
311
|
+
{ headers: { Authorization: 'Bearer YOUR_SECRET' } }
|
|
312
|
+
);
|
|
313
|
+
es.onmessage = (e) => {
|
|
314
|
+
const job = JSON.parse(e.data);
|
|
315
|
+
if (job.status === 'done') { console.log(job.results); es.close(); }
|
|
316
|
+
if (job.status === 'error') { console.error(job.error); es.close(); }
|
|
317
|
+
};
|
|
318
|
+
```
|
|
319
|
+
|
|
320
|
+
---
|
|
321
|
+
|
|
322
|
+
## Full workflow examples
|
|
323
|
+
|
|
324
|
+
### curl — scan only
|
|
199
325
|
|
|
200
326
|
```bash
|
|
201
|
-
# Start server
|
|
202
327
|
npx @lhi/tdd-audit serve --port 3000 --api-key mysecret &
|
|
203
328
|
|
|
204
|
-
# Scan current directory
|
|
205
329
|
curl -s -X POST http://localhost:3000/scan \
|
|
206
330
|
-H "Authorization: Bearer mysecret" \
|
|
207
331
|
-H "Content-Type: application/json" \
|
|
208
332
|
-d '{"path": "."}' | jq '.summary'
|
|
333
|
+
```
|
|
334
|
+
|
|
335
|
+
### curl — full pipeline with polling
|
|
209
336
|
|
|
210
|
-
|
|
337
|
+
```bash
|
|
338
|
+
# Kick off audit
|
|
339
|
+
JOB=$(curl -s -X POST http://localhost:3000/audit \
|
|
340
|
+
-H "Authorization: Bearer mysecret" \
|
|
341
|
+
-H "Content-Type: application/json" \
|
|
342
|
+
-d '{
|
|
343
|
+
"path": ".",
|
|
344
|
+
"provider": "anthropic",
|
|
345
|
+
"apiKey": "sk-ant-..."
|
|
346
|
+
}' | jq -r '.jobId')
|
|
347
|
+
|
|
348
|
+
# Poll until done
|
|
349
|
+
while true; do
|
|
350
|
+
STATUS=$(curl -s http://localhost:3000/jobs/$JOB \
|
|
351
|
+
-H "Authorization: Bearer mysecret" | jq -r '.status')
|
|
352
|
+
echo "Status: $STATUS"
|
|
353
|
+
[ "$STATUS" = "done" ] || [ "$STATUS" = "error" ] && break
|
|
354
|
+
sleep 2
|
|
355
|
+
done
|
|
356
|
+
```
|
|
357
|
+
|
|
358
|
+
### curl — SARIF output for GitHub code scanning
|
|
359
|
+
|
|
360
|
+
```bash
|
|
211
361
|
curl -s -X POST http://localhost:3000/scan \
|
|
212
362
|
-H "Authorization: Bearer mysecret" \
|
|
213
363
|
-H "Content-Type: application/json" \
|
|
214
364
|
-d '{"path": ".", "format": "sarif"}' > results.sarif
|
|
215
365
|
```
|
|
216
366
|
|
|
217
|
-
### Node.js
|
|
367
|
+
### Node.js — scan
|
|
218
368
|
|
|
219
369
|
```javascript
|
|
220
370
|
const res = await fetch('http://localhost:3000/scan', {
|
package/docs/scanner.md
CHANGED
|
@@ -8,10 +8,12 @@
|
|
|
8
8
|
|
|
9
9
|
| Export | Purpose |
|
|
10
10
|
|---|---|
|
|
11
|
-
| `quickScan(projectDir)` | Walk all source files and return a findings array |
|
|
11
|
+
| `quickScan(projectDir)` | Walk all source files and return a merged findings array |
|
|
12
12
|
| `scanPromptFiles(projectDir)` | Walk all `.md` prompt/skill files and check for prompt-specific patterns |
|
|
13
13
|
| `scanAppConfig(projectDir)` | Check `app.json` / `app.config.*` for embedded secrets |
|
|
14
14
|
| `scanAndroidManifest(projectDir)` | Check `AndroidManifest.xml` for `android:debuggable="true"` |
|
|
15
|
+
| `scanPackageJson(projectDir)` | Check `package.json` lifecycle scripts for supply-chain exfiltration (postinstall curl/wget) |
|
|
16
|
+
| `scanEnvFiles(projectDir)` | Check `.env*` files for `NEXT_PUBLIC_*SECRET/KEY/TOKEN` leaking secrets to the browser |
|
|
15
17
|
| `printFindings(findings, exempted)` | Format and print a findings report to stdout |
|
|
16
18
|
| `detectFramework(dir)` | Detect the test framework (`jest`, `vitest`, `mocha`, `pytest`, `go`, `flutter`) |
|
|
17
19
|
| `detectAppFramework(dir)` | Detect the UI framework (`nextjs`, `expo`, `react-native`, `react`, `flutter`) |
|
|
@@ -23,7 +25,7 @@
|
|
|
23
25
|
|
|
24
26
|
```
|
|
25
27
|
projectDir
|
|
26
|
-
└─ walkFiles()
|
|
28
|
+
└─ walkFiles() — yields source files (see Scanned extensions below)
|
|
27
29
|
└─ for each file:
|
|
28
30
|
1. Read file content (read-first, check length after — no TOCTOU)
|
|
29
31
|
2. Skip if content.length > 512 KB
|
|
@@ -32,12 +34,14 @@ projectDir
|
|
|
32
34
|
– If pattern matches, push finding with severity / name / file / line / snippet
|
|
33
35
|
– inTestFile: true if path is under a test directory
|
|
34
36
|
– likelyFalsePositive: true if inTestFile && pattern.skipInTests
|
|
35
|
-
└─ scanAppConfig()
|
|
36
|
-
└─ scanAndroidManifest() — checks android:debuggable
|
|
37
|
-
└─ scanPromptFiles()
|
|
37
|
+
└─ scanAppConfig() — checks app.json / app.config.* for embedded secret patterns
|
|
38
|
+
└─ scanAndroidManifest() — checks android:debuggable="true"
|
|
39
|
+
└─ scanPromptFiles() — walks .md files in agent config directories for prompt-specific patterns
|
|
40
|
+
└─ scanPackageJson() — checks postinstall/preinstall lifecycle scripts for curl/wget exfiltration
|
|
41
|
+
└─ scanEnvFiles() — checks .env* files for NEXT_PUBLIC_* keys with secret-sounding names
|
|
38
42
|
```
|
|
39
43
|
|
|
40
|
-
All
|
|
44
|
+
All six result sets are merged into one array and returned to the caller.
|
|
41
45
|
|
|
42
46
|
---
|
|
43
47
|
|
|
@@ -47,7 +51,7 @@ All four result sets are merged into one array and returned to the caller.
|
|
|
47
51
|
|
|
48
52
|
Yields scannable source files (`SCAN_EXTENSIONS`). Skips:
|
|
49
53
|
|
|
50
|
-
- **`SKIP_DIRS`**: `node_modules`, `.git`, `dist`, `build`, `.next`, `out`, `__pycache__`, `venv`, `.venv`, `vendor`, `.expo`, `.dart_tool`, `.pub-cache`
|
|
54
|
+
- **`SKIP_DIRS`**: `node_modules`, `.git`, `dist`, `build`, `coverage`, `.next`, `out`, `__pycache__`, `venv`, `.venv`, `vendor`, `.expo`, `.dart_tool`, `.pub-cache`
|
|
51
55
|
- **Symlinks** — never followed, preventing escape from the project root on shared/M-series filesystems
|
|
52
56
|
|
|
53
57
|
### `walkMdFiles(dir)`
|
|
@@ -58,9 +62,9 @@ Same skip rules, yields `.md` files only. Used by `scanPromptFiles`.
|
|
|
58
62
|
|
|
59
63
|
## Scanned extensions
|
|
60
64
|
|
|
61
|
-
`.js` `.ts` `.jsx` `.tsx` `.mjs` `.py` `.go` `.dart`
|
|
65
|
+
`.js` `.ts` `.jsx` `.tsx` `.mjs` `.py` `.go` `.dart` `.yml` `.yaml`
|
|
62
66
|
|
|
63
|
-
|
|
67
|
+
JSON and XML files are not walked by the code scanner. `package.json` is handled by `scanPackageJson()` and `.env*` files by `scanEnvFiles()` — both run as separate targeted checks. CI workflow files (`.yml`/`.yaml`) **are** now scanned by `walkFiles()` for GitHub Actions expression injection and similar patterns.
|
|
64
68
|
|
|
65
69
|
---
|
|
66
70
|
|
package/index.js
CHANGED
|
@@ -13,6 +13,7 @@ const {
|
|
|
13
13
|
} = require('./lib/scanner');
|
|
14
14
|
const { toJson, toSarif, toText } = require('./lib/reporter');
|
|
15
15
|
const { writeInitConfig } = require('./lib/config');
|
|
16
|
+
const { badgeLine, injectBadge } = require('./lib/badge');
|
|
16
17
|
|
|
17
18
|
const args = process.argv.slice(2);
|
|
18
19
|
const isLocal = args.includes('--local');
|
|
@@ -87,6 +88,7 @@ if (scanOnly) {
|
|
|
87
88
|
process.stdout.write('\n');
|
|
88
89
|
printFindings(findings, exempted);
|
|
89
90
|
}
|
|
91
|
+
injectBadge(projectDir, badgeLine(findings));
|
|
90
92
|
process.exit(0);
|
|
91
93
|
}
|
|
92
94
|
|
|
@@ -243,6 +245,9 @@ if (!skipScan) {
|
|
|
243
245
|
const findings = quickScan(projectDir);
|
|
244
246
|
process.stdout.write('\n');
|
|
245
247
|
printFindings(findings);
|
|
248
|
+
const badge = badgeLine(findings);
|
|
249
|
+
injectBadge(projectDir, badge);
|
|
250
|
+
console.log('✅ README badge updated');
|
|
246
251
|
}
|
|
247
252
|
|
|
248
253
|
console.log(`\nSkill installed to ${path.relative(os.homedir(), targetSkillDir)}`);
|
package/lib/badge.js
ADDED
|
@@ -0,0 +1,94 @@
|
|
|
1
|
+
'use strict';
|
|
2
|
+
|
|
3
|
+
const fs = require('fs');
|
|
4
|
+
const path = require('path');
|
|
5
|
+
|
|
6
|
+
// Marker embedded in the badge line — used to find and replace it on re-scan.
|
|
7
|
+
const BADGE_MARKER = 'tdd-audit-badge';
|
|
8
|
+
|
|
9
|
+
const NPM_URL = 'https://www.npmjs.com/package/@lhi/tdd-audit';
|
|
10
|
+
|
|
11
|
+
/**
|
|
12
|
+
* Build a shields.io badge markdown line reflecting actual scan results.
|
|
13
|
+
*
|
|
14
|
+
* - 0 critical/high (real) findings → "passing" · brightgreen
|
|
15
|
+
* - ≥1 high (no critical) → "{n} high" · orange
|
|
16
|
+
* - ≥1 critical → "{n} critical" · red
|
|
17
|
+
*
|
|
18
|
+
* likelyFalsePositive findings (test fixtures) are excluded from the count.
|
|
19
|
+
*
|
|
20
|
+
* @param {Array} findings - findings array returned by quickScan()
|
|
21
|
+
* @returns {string} - single-line markdown badge ending with \n
|
|
22
|
+
*/
|
|
23
|
+
function badgeLine(findings) {
|
|
24
|
+
// Exclude test-file findings and likely false positives — badge reflects production code only
|
|
25
|
+
const real = (findings || []).filter(f => !f.likelyFalsePositive && !f.inTestFile);
|
|
26
|
+
const criticals = real.filter(f => f.severity === 'CRITICAL').length;
|
|
27
|
+
const highs = real.filter(f => f.severity === 'HIGH').length;
|
|
28
|
+
|
|
29
|
+
let message, color;
|
|
30
|
+
if (criticals > 0) {
|
|
31
|
+
message = `${criticals}%20critical`;
|
|
32
|
+
color = 'red';
|
|
33
|
+
} else if (highs > 0) {
|
|
34
|
+
message = `${highs}%20high`;
|
|
35
|
+
color = 'orange';
|
|
36
|
+
} else {
|
|
37
|
+
message = 'passing';
|
|
38
|
+
color = 'brightgreen';
|
|
39
|
+
}
|
|
40
|
+
|
|
41
|
+
const badgeUrl = `https://img.shields.io/badge/tdd--audit-${message}-${color}`;
|
|
42
|
+
// Embed the marker as a hidden HTML comment after the badge so injectBadge()
|
|
43
|
+
// can locate and replace the line on subsequent runs.
|
|
44
|
+
return `[](${NPM_URL}) <!-- ${BADGE_MARKER} -->\n`;
|
|
45
|
+
}
|
|
46
|
+
|
|
47
|
+
/**
|
|
48
|
+
* Inject or update the tdd-audit badge in the project's README.md.
|
|
49
|
+
*
|
|
50
|
+
* Behaviour:
|
|
51
|
+
* - Searches for README.md / readme.md / README in the project root.
|
|
52
|
+
* - If a badge line (identified by BADGE_MARKER) already exists, replaces it.
|
|
53
|
+
* - Otherwise inserts the badge immediately after the first `# Heading` line.
|
|
54
|
+
* If no heading exists, prepends to the file.
|
|
55
|
+
* - No-ops silently when no README is found.
|
|
56
|
+
* - Idempotent: running twice with the same inputs produces the same output.
|
|
57
|
+
*
|
|
58
|
+
* @param {string} projectDir - absolute path to the project root
|
|
59
|
+
* @param {string} badge - badge markdown line from badgeLine()
|
|
60
|
+
*/
|
|
61
|
+
function injectBadge(projectDir, badge) {
|
|
62
|
+
const candidates = ['README.md', 'readme.md', 'Readme.md', 'README'];
|
|
63
|
+
let readmePath = null;
|
|
64
|
+
for (const name of candidates) {
|
|
65
|
+
const p = path.join(projectDir, name);
|
|
66
|
+
if (fs.existsSync(p)) { readmePath = p; break; }
|
|
67
|
+
}
|
|
68
|
+
if (!readmePath) return;
|
|
69
|
+
|
|
70
|
+
const original = fs.readFileSync(readmePath, 'utf8');
|
|
71
|
+
|
|
72
|
+
// Replace existing badge (idempotent + allows re-scan update)
|
|
73
|
+
if (original.includes(BADGE_MARKER)) {
|
|
74
|
+
const updated = original.replace(/^.*tdd-audit-badge.*$/m, badge.trimEnd());
|
|
75
|
+
fs.writeFileSync(readmePath, updated);
|
|
76
|
+
return;
|
|
77
|
+
}
|
|
78
|
+
|
|
79
|
+
// Insert after the first h1 line, or prepend if no h1 exists
|
|
80
|
+
const lines = original.split('\n');
|
|
81
|
+
const h1Idx = lines.findIndex(l => /^#\s/.test(l));
|
|
82
|
+
|
|
83
|
+
let updated;
|
|
84
|
+
if (h1Idx !== -1) {
|
|
85
|
+
lines.splice(h1Idx + 1, 0, badge.trimEnd());
|
|
86
|
+
updated = lines.join('\n');
|
|
87
|
+
} else {
|
|
88
|
+
updated = badge.trimEnd() + '\n' + original;
|
|
89
|
+
}
|
|
90
|
+
|
|
91
|
+
fs.writeFileSync(readmePath, updated);
|
|
92
|
+
}
|
|
93
|
+
|
|
94
|
+
module.exports = { badgeLine, injectBadge, BADGE_MARKER };
|
package/lib/jobs.js
ADDED
|
@@ -0,0 +1,53 @@
|
|
|
1
|
+
'use strict';
|
|
2
|
+
|
|
3
|
+
const { EventEmitter } = require('events');
|
|
4
|
+
|
|
5
|
+
// ─── Job store (singleton, in-memory) ────────────────────────────────────────
|
|
6
|
+
|
|
7
|
+
const MAX_JOBS = 1_000;
|
|
8
|
+
const JOB_TTL_MS = 60 * 60 * 1_000; // 1 hour
|
|
9
|
+
|
|
10
|
+
const jobs = new Map();
|
|
11
|
+
let jobSeq = 0;
|
|
12
|
+
|
|
13
|
+
// EventEmitter used to push job updates to SSE subscribers
|
|
14
|
+
const _emitter = new EventEmitter();
|
|
15
|
+
_emitter.setMaxListeners(500);
|
|
16
|
+
|
|
17
|
+
function evictJobs() {
|
|
18
|
+
const cutoff = Date.now() - JOB_TTL_MS;
|
|
19
|
+
for (const [id, job] of jobs) {
|
|
20
|
+
if (new Date(job.createdAt).getTime() < cutoff) jobs.delete(id);
|
|
21
|
+
}
|
|
22
|
+
while (jobs.size >= MAX_JOBS) {
|
|
23
|
+
jobs.delete(jobs.keys().next().value);
|
|
24
|
+
}
|
|
25
|
+
}
|
|
26
|
+
|
|
27
|
+
function createJob() {
|
|
28
|
+
evictJobs();
|
|
29
|
+
const id = `job_${++jobSeq}_${Date.now()}`;
|
|
30
|
+
jobs.set(id, { id, status: 'pending', createdAt: new Date().toISOString() });
|
|
31
|
+
return id;
|
|
32
|
+
}
|
|
33
|
+
|
|
34
|
+
function updateJob(id, patch) {
|
|
35
|
+
const job = jobs.get(id);
|
|
36
|
+
if (!job) return;
|
|
37
|
+
const updated = { ...job, ...patch };
|
|
38
|
+
jobs.set(id, updated);
|
|
39
|
+
_emitter.emit(id, updated);
|
|
40
|
+
}
|
|
41
|
+
|
|
42
|
+
/**
|
|
43
|
+
* Subscribe to live updates for a job.
|
|
44
|
+
* @param {string} id - job id
|
|
45
|
+
* @param {Function} fn - called with the updated job object on every change
|
|
46
|
+
* @returns {Function} - call to unsubscribe
|
|
47
|
+
*/
|
|
48
|
+
function subscribe(id, fn) {
|
|
49
|
+
_emitter.on(id, fn);
|
|
50
|
+
return () => _emitter.off(id, fn);
|
|
51
|
+
}
|
|
52
|
+
|
|
53
|
+
module.exports = { jobs, createJob, updateJob, subscribe, evictJobs, MAX_JOBS, JOB_TTL_MS };
|