@lhi/tdd-audit 1.8.4 → 1.10.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +81 -2
- package/docs/ai-remediation.md +182 -0
- package/docs/rest-api.md +230 -0
- package/index.js +52 -7
- package/lib/config.js +116 -0
- package/lib/github.js +93 -0
- package/lib/remediator.js +181 -0
- package/lib/reporter.js +164 -0
- package/lib/server.js +247 -0
- package/package.json +1 -1
package/README.md
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
# @lhi/tdd-audit
|
|
2
2
|
|
|
3
|
-
> **v1.
|
|
3
|
+
> **v1.10.0** — Security skill installer for **Claude Code, Gemini CLI, Cursor, Codex, and OpenCode**. Patches vulnerabilities using a Red-Green-Refactor exploit-test protocol — prove the hole exists, apply the fix, prove it's closed.
|
|
4
4
|
|
|
5
5
|
## Install
|
|
6
6
|
|
|
@@ -25,6 +25,9 @@ On first run the installer:
|
|
|
25
25
|
| `--with-hooks` | Add a pre-commit hook that blocks commits on failing security tests |
|
|
26
26
|
| `--skip-scan` | Skip the vulnerability scan on install |
|
|
27
27
|
| `--scan` / `--scan-only` | Scan only — no install, no code changes |
|
|
28
|
+
| `--json` | Output findings as JSON |
|
|
29
|
+
| `--format sarif` | Output findings as SARIF 2.1.0 (GitHub code scanning) |
|
|
30
|
+
| `--config <path>` | Load config from an explicit file path |
|
|
28
31
|
|
|
29
32
|
### Platform
|
|
30
33
|
|
|
@@ -41,11 +44,87 @@ On first run the installer:
|
|
|
41
44
|
|
|
42
45
|
The agent detects your stack, presents a CRITICAL → LOW findings report, waits for confirmation, then works through each vulnerability one at a time using Red-Green-Refactor. Pass `--scan` for a report-only run with no code changes.
|
|
43
46
|
|
|
47
|
+
## Config file
|
|
48
|
+
|
|
49
|
+
Scaffold a starter config with a single command:
|
|
50
|
+
|
|
51
|
+
```bash
|
|
52
|
+
npx @lhi/tdd-audit init
|
|
53
|
+
# or at a custom path:
|
|
54
|
+
npx @lhi/tdd-audit init ~/configs/my-audit.json
|
|
55
|
+
```
|
|
56
|
+
|
|
57
|
+
`.tdd-audit.json` — all CLI flags settable here, loaded automatically from your project root:
|
|
58
|
+
|
|
59
|
+
```json
|
|
60
|
+
{
|
|
61
|
+
"provider": "openai",
|
|
62
|
+
"model": "gpt-4o",
|
|
63
|
+
"apiKeyEnv": "OPENAI_API_KEY",
|
|
64
|
+
"baseUrl": null,
|
|
65
|
+
"output": "text",
|
|
66
|
+
"severityThreshold": "LOW",
|
|
67
|
+
"port": 3000,
|
|
68
|
+
"serverApiKey": null,
|
|
69
|
+
"trustProxy": false,
|
|
70
|
+
"ignore": ["node_modules", "dist", "build", "coverage"]
|
|
71
|
+
}
|
|
72
|
+
```
|
|
73
|
+
|
|
74
|
+
Point to a config anywhere with `--config`:
|
|
75
|
+
|
|
76
|
+
```bash
|
|
77
|
+
npx @lhi/tdd-audit serve --config ~/configs/prod-audit.json
|
|
78
|
+
```
|
|
79
|
+
|
|
80
|
+
## REST API + AI remediation
|
|
81
|
+
|
|
82
|
+
```bash
|
|
83
|
+
# Start the API server
|
|
84
|
+
npx @lhi/tdd-audit serve --port 3000 --api-key YOUR_SECRET
|
|
85
|
+
|
|
86
|
+
# Scan any path → JSON
|
|
87
|
+
curl -X POST http://localhost:3000/scan \
|
|
88
|
+
-H "Authorization: Bearer YOUR_SECRET" \
|
|
89
|
+
-d '{"path": "."}' | jq '.summary'
|
|
90
|
+
|
|
91
|
+
# Use any OpenAI-compatible service (Groq, OpenRouter, Together AI, etc.)
|
|
92
|
+
npx @lhi/tdd-audit serve \
|
|
93
|
+
--provider openai \
|
|
94
|
+
--base-url https://api.groq.com/openai/v1 \
|
|
95
|
+
--api-key $GROQ_API_KEY \
|
|
96
|
+
--model llama-3.3-70b-versatile
|
|
97
|
+
```
|
|
98
|
+
|
|
99
|
+
Supported providers: `anthropic` · `openai` · `gemini` · `ollama` (local) · **any OpenAI-compatible endpoint via `--base-url`**
|
|
100
|
+
|
|
101
|
+
## Output formats
|
|
102
|
+
|
|
103
|
+
```bash
|
|
104
|
+
npx @lhi/tdd-audit --scan --json # structured JSON
|
|
105
|
+
npx @lhi/tdd-audit --scan --format sarif # GitHub code scanning (inline PR annotations)
|
|
106
|
+
npx @lhi/tdd-audit --scan # human-readable text (default)
|
|
107
|
+
```
|
|
108
|
+
|
|
109
|
+
## Testing
|
|
110
|
+
|
|
111
|
+
323 tests across unit, integration, and security suites:
|
|
112
|
+
|
|
113
|
+
```bash
|
|
114
|
+
npm test # full suite
|
|
115
|
+
npm run test:unit # unit tests with coverage
|
|
116
|
+
npm run test:security # security regression tests only
|
|
117
|
+
```
|
|
118
|
+
|
|
119
|
+
Security tests cover prompt injection, path traversal, rate limiting, timing-safe auth, job store bounds, SARIF schema, and more. See [`__tests__/security/`](__tests__/security/) for all 17 regression tests.
|
|
120
|
+
|
|
44
121
|
## Documentation
|
|
45
122
|
|
|
46
123
|
| | |
|
|
47
124
|
|---|---|
|
|
48
|
-
| [
|
|
125
|
+
| [REST API](docs/rest-api.md) | Endpoints, auth, rate limiting, trust-proxy, request/response schema |
|
|
126
|
+
| [AI Remediation](docs/ai-remediation.md) | Provider setup, `--base-url` for compatible APIs, config file |
|
|
127
|
+
| [Scanner](docs/scanner.md) | Architecture, detection logic, false-positive handling |
|
|
49
128
|
| [Vulnerability Patterns](docs/vulnerability-patterns.md) | All 34 patterns — descriptions, grep signatures, fix pointers |
|
|
50
129
|
| [TDD Protocol](docs/tdd-protocol.md) | Red-Green-Refactor in full, with framework templates for all 6 stacks |
|
|
51
130
|
| [Agentic AI Security](docs/agentic-ai-security.md) | ASI01–ASI10 — prompt injection, MCP supply chain, Actions injection |
|
|
@@ -0,0 +1,182 @@
|
|
|
1
|
+
# AI Remediation
|
|
2
|
+
|
|
3
|
+
Pass a provider and API key to have tdd-audit autonomously generate exploit tests, patches, and regression checks for each finding — no agent required.
|
|
4
|
+
|
|
5
|
+
---
|
|
6
|
+
|
|
7
|
+
## Config file (recommended)
|
|
8
|
+
|
|
9
|
+
Scaffold once, run anywhere:
|
|
10
|
+
|
|
11
|
+
```bash
|
|
12
|
+
npx @lhi/tdd-audit init
|
|
13
|
+
```
|
|
14
|
+
|
|
15
|
+
Edit `.tdd-audit.json`:
|
|
16
|
+
|
|
17
|
+
```json
|
|
18
|
+
{
|
|
19
|
+
"provider": "openai",
|
|
20
|
+
"model": "gpt-4o",
|
|
21
|
+
"apiKeyEnv": "OPENAI_API_KEY"
|
|
22
|
+
}
|
|
23
|
+
```
|
|
24
|
+
|
|
25
|
+
`apiKeyEnv` names the environment variable to read the key from — no key ever touches disk. Then just:
|
|
26
|
+
|
|
27
|
+
```bash
|
|
28
|
+
npx @lhi/tdd-audit serve
|
|
29
|
+
```
|
|
30
|
+
|
|
31
|
+
Point to a config at any path:
|
|
32
|
+
|
|
33
|
+
```bash
|
|
34
|
+
npx @lhi/tdd-audit serve --config ~/configs/my-audit.json
|
|
35
|
+
```
|
|
36
|
+
|
|
37
|
+
---
|
|
38
|
+
|
|
39
|
+
## CLI flags
|
|
40
|
+
|
|
41
|
+
```bash
|
|
42
|
+
# Anthropic
|
|
43
|
+
npx @lhi/tdd-audit serve \
|
|
44
|
+
--provider anthropic \
|
|
45
|
+
--api-key $ANTHROPIC_API_KEY
|
|
46
|
+
|
|
47
|
+
# OpenAI
|
|
48
|
+
npx @lhi/tdd-audit serve \
|
|
49
|
+
--provider openai \
|
|
50
|
+
--api-key $OPENAI_API_KEY \
|
|
51
|
+
--model gpt-4o-mini
|
|
52
|
+
```
|
|
53
|
+
|
|
54
|
+
---
|
|
55
|
+
|
|
56
|
+
## OpenAI-compatible services
|
|
57
|
+
|
|
58
|
+
Any service that exposes the OpenAI chat completions API works via `--base-url`.
|
|
59
|
+
The API key is sent in the `Authorization: Bearer` header — never in the URL.
|
|
60
|
+
|
|
61
|
+
```bash
|
|
62
|
+
# Groq (fast inference)
|
|
63
|
+
npx @lhi/tdd-audit serve \
|
|
64
|
+
--provider openai \
|
|
65
|
+
--base-url https://api.groq.com/openai/v1 \
|
|
66
|
+
--model llama-3.3-70b-versatile \
|
|
67
|
+
--api-key $GROQ_API_KEY
|
|
68
|
+
|
|
69
|
+
# OpenRouter (access 200+ models)
|
|
70
|
+
npx @lhi/tdd-audit serve \
|
|
71
|
+
--provider openai \
|
|
72
|
+
--base-url https://openrouter.ai/api/v1 \
|
|
73
|
+
--model meta-llama/llama-3.3-70b-instruct \
|
|
74
|
+
--api-key $OPENROUTER_API_KEY
|
|
75
|
+
|
|
76
|
+
# Together AI
|
|
77
|
+
npx @lhi/tdd-audit serve \
|
|
78
|
+
--provider openai \
|
|
79
|
+
--base-url https://api.together.xyz/v1 \
|
|
80
|
+
--model mistralai/Mixtral-8x7B-Instruct-v0.1 \
|
|
81
|
+
--api-key $TOGETHER_API_KEY
|
|
82
|
+
|
|
83
|
+
# LM Studio / vLLM / llama.cpp (fully local)
|
|
84
|
+
npx @lhi/tdd-audit serve \
|
|
85
|
+
--provider openai \
|
|
86
|
+
--base-url http://localhost:1234/v1 \
|
|
87
|
+
--model local-model
|
|
88
|
+
# no --api-key needed for local servers
|
|
89
|
+
```
|
|
90
|
+
|
|
91
|
+
In `.tdd-audit.json`:
|
|
92
|
+
|
|
93
|
+
```json
|
|
94
|
+
{
|
|
95
|
+
"provider": "openai",
|
|
96
|
+
"baseUrl": "https://api.groq.com/openai/v1",
|
|
97
|
+
"model": "llama-3.3-70b-versatile",
|
|
98
|
+
"apiKeyEnv": "GROQ_API_KEY"
|
|
99
|
+
}
|
|
100
|
+
```
|
|
101
|
+
|
|
102
|
+
---
|
|
103
|
+
|
|
104
|
+
## Supported providers
|
|
105
|
+
|
|
106
|
+
| Provider | `--provider` | Default model | Key env var | Notes |
|
|
107
|
+
|---|---|---|---|---|
|
|
108
|
+
| Anthropic | `anthropic` | `claude-opus-4-6` | `ANTHROPIC_API_KEY` | |
|
|
109
|
+
| OpenAI | `openai` | `gpt-4o` | `OPENAI_API_KEY` | Supports `--base-url` |
|
|
110
|
+
| Google Gemini | `gemini` | `gemini-2.0-flash` | `GEMINI_API_KEY` | Key sent via `x-goog-api-key` header |
|
|
111
|
+
| Ollama (local) | `ollama` | `llama3` | — | No key required |
|
|
112
|
+
| Any OpenAI-compat | `openai` | — | varies | Set `--base-url` |
|
|
113
|
+
|
|
114
|
+
---
|
|
115
|
+
|
|
116
|
+
## REST API usage
|
|
117
|
+
|
|
118
|
+
```bash
|
|
119
|
+
# 1. Scan and get findings
|
|
120
|
+
FINDINGS=$(curl -s -X POST http://localhost:3000/scan \
|
|
121
|
+
-H "Authorization: Bearer $SERVER_KEY" \
|
|
122
|
+
-H "Content-Type: application/json" \
|
|
123
|
+
-d '{"path": "."}' | jq '.findings')
|
|
124
|
+
|
|
125
|
+
# 2. Submit remediation job (using Groq via --base-url)
|
|
126
|
+
JOB=$(curl -s -X POST http://localhost:3000/remediate \
|
|
127
|
+
-H "Authorization: Bearer $SERVER_KEY" \
|
|
128
|
+
-H "Content-Type: application/json" \
|
|
129
|
+
-d "{
|
|
130
|
+
\"findings\": $FINDINGS,
|
|
131
|
+
\"provider\": \"openai\",
|
|
132
|
+
\"apiKey\": \"$GROQ_API_KEY\",
|
|
133
|
+
\"baseUrl\": \"https://api.groq.com/openai/v1\",
|
|
134
|
+
\"model\": \"llama-3.3-70b-versatile\",
|
|
135
|
+
\"severity\": \"HIGH\"
|
|
136
|
+
}")
|
|
137
|
+
|
|
138
|
+
JOB_ID=$(echo $JOB | jq -r '.jobId')
|
|
139
|
+
|
|
140
|
+
# 3. Poll for results
|
|
141
|
+
curl -s "http://localhost:3000/jobs/$JOB_ID" \
|
|
142
|
+
-H "Authorization: Bearer $SERVER_KEY" | jq '.status'
|
|
143
|
+
```
|
|
144
|
+
|
|
145
|
+
---
|
|
146
|
+
|
|
147
|
+
## What the model returns
|
|
148
|
+
|
|
149
|
+
For each finding the remediator sends a structured prompt and expects back:
|
|
150
|
+
|
|
151
|
+
```json
|
|
152
|
+
{
|
|
153
|
+
"exploitTest": {
|
|
154
|
+
"filename": "__tests__/security/xss-comments.test.js",
|
|
155
|
+
"content": "..."
|
|
156
|
+
},
|
|
157
|
+
"patch": {
|
|
158
|
+
"filename": "src/routes/comments.js",
|
|
159
|
+
"diff": "--- a/src/routes/comments.js\n+++ ..."
|
|
160
|
+
},
|
|
161
|
+
"refactorChecks": ["npm test", "npm run test:security"]
|
|
162
|
+
}
|
|
163
|
+
```
|
|
164
|
+
|
|
165
|
+
The result is returned as-is from the API — review and apply patches manually or pipe into your own automation.
|
|
166
|
+
|
|
167
|
+
---
|
|
168
|
+
|
|
169
|
+
## Ollama (fully local / air-gapped)
|
|
170
|
+
|
|
171
|
+
```bash
|
|
172
|
+
# Pull a code model
|
|
173
|
+
ollama pull codellama
|
|
174
|
+
ollama serve
|
|
175
|
+
|
|
176
|
+
# Run tdd-audit against it
|
|
177
|
+
npx @lhi/tdd-audit serve \
|
|
178
|
+
--provider ollama \
|
|
179
|
+
--model codellama
|
|
180
|
+
```
|
|
181
|
+
|
|
182
|
+
No API key required. Ollama must be running on `http://localhost:11434`.
|
package/docs/rest-api.md
ADDED
|
@@ -0,0 +1,230 @@
|
|
|
1
|
+
# REST API
|
|
2
|
+
|
|
3
|
+
`tdd-audit serve` turns the scanner into an authenticated HTTP API. Use it to integrate vulnerability scanning into dashboards, CI pipelines, bots, or any tooling that speaks JSON.
|
|
4
|
+
|
|
5
|
+
---
|
|
6
|
+
|
|
7
|
+
## Start the server
|
|
8
|
+
|
|
9
|
+
```bash
|
|
10
|
+
# Minimal
|
|
11
|
+
npx @lhi/tdd-audit serve --port 3000 --api-key YOUR_SECRET
|
|
12
|
+
|
|
13
|
+
# With config file (recommended)
|
|
14
|
+
npx @lhi/tdd-audit init # scaffold .tdd-audit.json
|
|
15
|
+
npx @lhi/tdd-audit serve # reads config automatically
|
|
16
|
+
|
|
17
|
+
# Point to a config anywhere
|
|
18
|
+
npx @lhi/tdd-audit serve --config ~/configs/prod.json
|
|
19
|
+
```
|
|
20
|
+
|
|
21
|
+
**`.tdd-audit.json` server options:**
|
|
22
|
+
|
|
23
|
+
```json
|
|
24
|
+
{
|
|
25
|
+
"port": 3000,
|
|
26
|
+
"serverApiKey": "YOUR_SECRET",
|
|
27
|
+
"output": "json",
|
|
28
|
+
"trustProxy": false
|
|
29
|
+
}
|
|
30
|
+
```
|
|
31
|
+
|
|
32
|
+
If `--api-key` / `serverApiKey` is omitted the server starts unauthenticated with a warning. Always set one in production.
|
|
33
|
+
|
|
34
|
+
---
|
|
35
|
+
|
|
36
|
+
## Security
|
|
37
|
+
|
|
38
|
+
### Authentication
|
|
39
|
+
|
|
40
|
+
All endpoints except `GET /health` require:
|
|
41
|
+
|
|
42
|
+
```
|
|
43
|
+
Authorization: Bearer YOUR_SECRET
|
|
44
|
+
```
|
|
45
|
+
|
|
46
|
+
Missing or wrong key → `401 Unauthorized`.
|
|
47
|
+
|
|
48
|
+
Tokens are compared using **HMAC + `crypto.timingSafeEqual`** to prevent timing-oracle attacks.
|
|
49
|
+
|
|
50
|
+
### Rate limiting
|
|
51
|
+
|
|
52
|
+
All endpoints are rate-limited to **60 requests / IP / minute** (default). Exceeding the limit returns `429 Too Many Requests`.
|
|
53
|
+
|
|
54
|
+
By default the rate limiter keys on the **socket IP**, not `X-Forwarded-For`, to prevent header-spoofing bypasses. Enable proxy-forwarded IPs only if you are behind a trusted reverse proxy:
|
|
55
|
+
|
|
56
|
+
```json
|
|
57
|
+
{ "trustProxy": true }
|
|
58
|
+
```
|
|
59
|
+
|
|
60
|
+
### Path validation
|
|
61
|
+
|
|
62
|
+
`POST /scan` validates that the requested path is inside the server's working directory (normalised with a trailing separator to prevent sibling-directory prefix bypasses). Paths outside cwd return `400`.
|
|
63
|
+
|
|
64
|
+
### Security headers
|
|
65
|
+
|
|
66
|
+
Every response includes:
|
|
67
|
+
|
|
68
|
+
```
|
|
69
|
+
X-Content-Type-Options: nosniff
|
|
70
|
+
X-Frame-Options: DENY
|
|
71
|
+
```
|
|
72
|
+
|
|
73
|
+
---
|
|
74
|
+
|
|
75
|
+
## Endpoints
|
|
76
|
+
|
|
77
|
+
### `GET /health`
|
|
78
|
+
|
|
79
|
+
No auth required. Returns server status and version.
|
|
80
|
+
|
|
81
|
+
```json
|
|
82
|
+
{ "status": "ok", "version": "1.9.0" }
|
|
83
|
+
```
|
|
84
|
+
|
|
85
|
+
---
|
|
86
|
+
|
|
87
|
+
### `POST /scan`
|
|
88
|
+
|
|
89
|
+
Scan a local path and return structured findings.
|
|
90
|
+
|
|
91
|
+
**Request**
|
|
92
|
+
```json
|
|
93
|
+
{
|
|
94
|
+
"path": ".",
|
|
95
|
+
"format": "json"
|
|
96
|
+
}
|
|
97
|
+
```
|
|
98
|
+
|
|
99
|
+
| Field | Type | Default | Description |
|
|
100
|
+
|---|---|---|---|
|
|
101
|
+
| `path` | string | cwd | Absolute or relative path to scan. Must be inside server cwd. |
|
|
102
|
+
| `format` | `"json"` \| `"sarif"` | `"json"` | Output format |
|
|
103
|
+
|
|
104
|
+
**Response — JSON**
|
|
105
|
+
```json
|
|
106
|
+
{
|
|
107
|
+
"version": "1.9.0",
|
|
108
|
+
"summary": { "CRITICAL": 1, "HIGH": 3, "MEDIUM": 1, "LOW": 0 },
|
|
109
|
+
"findings": [ ... ],
|
|
110
|
+
"likelyFalsePositives": [ ... ],
|
|
111
|
+
"exempted": [],
|
|
112
|
+
"scannedAt": "2026-03-25T12:00:00.000Z",
|
|
113
|
+
"duration": 42
|
|
114
|
+
}
|
|
115
|
+
```
|
|
116
|
+
|
|
117
|
+
**Response — SARIF**
|
|
118
|
+
|
|
119
|
+
Returns a SARIF 2.1.0 object ready to upload to GitHub code scanning.
|
|
120
|
+
|
|
121
|
+
**Errors**
|
|
122
|
+
|
|
123
|
+
| Status | Reason |
|
|
124
|
+
|---|---|
|
|
125
|
+
| 400 | Path traversal attempt, sibling-directory bypass, oversized body (> 512 KB), or invalid JSON |
|
|
126
|
+
| 401 | Missing or invalid API key |
|
|
127
|
+
| 429 | Rate limit exceeded |
|
|
128
|
+
|
|
129
|
+
---
|
|
130
|
+
|
|
131
|
+
### `POST /remediate`
|
|
132
|
+
|
|
133
|
+
Queue an AI-powered remediation job. Returns immediately with a `jobId`; poll `/jobs/:id` for results.
|
|
134
|
+
|
|
135
|
+
The server stores up to **1 000 jobs** in memory (TTL: 1 hour). Oldest jobs are evicted when the cap is reached.
|
|
136
|
+
|
|
137
|
+
**Request**
|
|
138
|
+
```json
|
|
139
|
+
{
|
|
140
|
+
"findings": [ ... ],
|
|
141
|
+
"provider": "openai",
|
|
142
|
+
"apiKey": "sk-...",
|
|
143
|
+
"model": "gpt-4o",
|
|
144
|
+
"baseUrl": "https://api.groq.com/openai/v1",
|
|
145
|
+
"severity": "HIGH"
|
|
146
|
+
}
|
|
147
|
+
```
|
|
148
|
+
|
|
149
|
+
| Field | Required | Description |
|
|
150
|
+
|---|---|---|
|
|
151
|
+
| `findings` | yes | Array of finding objects from `POST /scan` |
|
|
152
|
+
| `provider` | yes | `anthropic` \| `openai` \| `gemini` \| `ollama` |
|
|
153
|
+
| `apiKey` | yes | Provider API key |
|
|
154
|
+
| `model` | no | Defaults per provider (see [AI Remediation](ai-remediation.md)) |
|
|
155
|
+
| `baseUrl` | no | Override base URL for any OpenAI-compatible service |
|
|
156
|
+
| `severity` | no | Minimum severity to fix. Default: `LOW` (fix all) |
|
|
157
|
+
|
|
158
|
+
**Response**
|
|
159
|
+
```json
|
|
160
|
+
{ "jobId": "job_1_1711363200000" }
|
|
161
|
+
```
|
|
162
|
+
|
|
163
|
+
---
|
|
164
|
+
|
|
165
|
+
### `GET /jobs/:id`
|
|
166
|
+
|
|
167
|
+
Poll for remediation job status.
|
|
168
|
+
|
|
169
|
+
**Response — pending / running**
|
|
170
|
+
```json
|
|
171
|
+
{ "id": "job_1_...", "status": "pending", "createdAt": "..." }
|
|
172
|
+
```
|
|
173
|
+
|
|
174
|
+
**Response — done**
|
|
175
|
+
```json
|
|
176
|
+
{
|
|
177
|
+
"id": "job_1_...",
|
|
178
|
+
"status": "done",
|
|
179
|
+
"createdAt": "...",
|
|
180
|
+
"startedAt": "...",
|
|
181
|
+
"completedAt": "...",
|
|
182
|
+
"results": [
|
|
183
|
+
{
|
|
184
|
+
"finding": { ... },
|
|
185
|
+
"status": "remediated",
|
|
186
|
+
"exploitTest": { "filename": "__tests__/security/xss.test.js", "content": "..." },
|
|
187
|
+
"patch": { "filename": "src/app.js", "diff": "..." },
|
|
188
|
+
"refactorChecks": ["npm test", "npm run test:security"]
|
|
189
|
+
}
|
|
190
|
+
]
|
|
191
|
+
}
|
|
192
|
+
```
|
|
193
|
+
|
|
194
|
+
---
|
|
195
|
+
|
|
196
|
+
## Examples
|
|
197
|
+
|
|
198
|
+
### curl
|
|
199
|
+
|
|
200
|
+
```bash
|
|
201
|
+
# Start server
|
|
202
|
+
npx @lhi/tdd-audit serve --port 3000 --api-key mysecret &
|
|
203
|
+
|
|
204
|
+
# Scan current directory
|
|
205
|
+
curl -s -X POST http://localhost:3000/scan \
|
|
206
|
+
-H "Authorization: Bearer mysecret" \
|
|
207
|
+
-H "Content-Type: application/json" \
|
|
208
|
+
-d '{"path": "."}' | jq '.summary'
|
|
209
|
+
|
|
210
|
+
# SARIF output for GitHub upload
|
|
211
|
+
curl -s -X POST http://localhost:3000/scan \
|
|
212
|
+
-H "Authorization: Bearer mysecret" \
|
|
213
|
+
-H "Content-Type: application/json" \
|
|
214
|
+
-d '{"path": ".", "format": "sarif"}' > results.sarif
|
|
215
|
+
```
|
|
216
|
+
|
|
217
|
+
### Node.js
|
|
218
|
+
|
|
219
|
+
```javascript
|
|
220
|
+
const res = await fetch('http://localhost:3000/scan', {
|
|
221
|
+
method: 'POST',
|
|
222
|
+
headers: {
|
|
223
|
+
'Authorization': 'Bearer mysecret',
|
|
224
|
+
'Content-Type': 'application/json',
|
|
225
|
+
},
|
|
226
|
+
body: JSON.stringify({ path: '/path/to/project' }),
|
|
227
|
+
});
|
|
228
|
+
const { findings, summary } = await res.json();
|
|
229
|
+
console.log(`CRITICAL: ${summary.CRITICAL} HIGH: ${summary.HIGH}`);
|
|
230
|
+
```
|
package/index.js
CHANGED
|
@@ -11,13 +11,25 @@ const {
|
|
|
11
11
|
quickScan,
|
|
12
12
|
printFindings,
|
|
13
13
|
} = require('./lib/scanner');
|
|
14
|
+
const { toJson, toSarif, toText } = require('./lib/reporter');
|
|
15
|
+
const { writeInitConfig } = require('./lib/config');
|
|
14
16
|
|
|
15
17
|
const args = process.argv.slice(2);
|
|
16
|
-
const isLocal
|
|
17
|
-
const isClaude
|
|
18
|
+
const isLocal = args.includes('--local');
|
|
19
|
+
const isClaude = args.includes('--claude');
|
|
18
20
|
const withHooks = args.includes('--with-hooks');
|
|
19
|
-
const skipScan
|
|
20
|
-
const scanOnly
|
|
21
|
+
const skipScan = args.includes('--skip-scan');
|
|
22
|
+
const scanOnly = args.includes('--scan-only') || args.includes('--scan');
|
|
23
|
+
const isServe = args[0] === 'serve';
|
|
24
|
+
|
|
25
|
+
// --json or --format json → structured JSON output
|
|
26
|
+
// --format sarif → SARIF 2.1.0 output
|
|
27
|
+
const formatIdx = args.indexOf('--format');
|
|
28
|
+
const formatArg = formatIdx !== -1 ? args[formatIdx + 1] : null;
|
|
29
|
+
const outputFormat = args.includes('--json') ? 'json'
|
|
30
|
+
: formatArg === 'sarif' ? 'sarif'
|
|
31
|
+
: formatArg === 'json' ? 'json'
|
|
32
|
+
: 'text';
|
|
21
33
|
|
|
22
34
|
const agentBaseDir = isLocal ? process.cwd() : os.homedir();
|
|
23
35
|
const agentDirName = isClaude ? '.claude' : '.agents';
|
|
@@ -33,13 +45,46 @@ const framework = detectFramework(projectDir);
|
|
|
33
45
|
const testBaseDir = detectTestBaseDir(projectDir, framework);
|
|
34
46
|
const targetTestDir = path.join(projectDir, testBaseDir, 'security');
|
|
35
47
|
|
|
48
|
+
// ─── Init mode early exit ────────────────────────────────────────────────────
|
|
49
|
+
|
|
50
|
+
if (args[0] === 'init') {
|
|
51
|
+
const destArg = args[1] && !args[1].startsWith('-') ? args[1] : undefined;
|
|
52
|
+
const force = args.includes('--force');
|
|
53
|
+
try {
|
|
54
|
+
const written = writeInitConfig(destArg, force);
|
|
55
|
+
console.log(`✅ Created ${path.relative(process.cwd(), written)}`);
|
|
56
|
+
console.log(' Edit it, then run: node index.js serve or node index.js --scan');
|
|
57
|
+
} catch (e) {
|
|
58
|
+
console.error(`❌ ${e.message}`);
|
|
59
|
+
process.exit(1);
|
|
60
|
+
}
|
|
61
|
+
process.exit(0);
|
|
62
|
+
}
|
|
63
|
+
|
|
64
|
+
// ─── Serve mode early exit ────────────────────────────────────────────────────
|
|
65
|
+
|
|
66
|
+
if (isServe) {
|
|
67
|
+
require('./lib/server').start(args);
|
|
68
|
+
return; // server stays alive — do not fall through to installer
|
|
69
|
+
}
|
|
70
|
+
|
|
36
71
|
// ─── Scan-only early exit ─────────────────────────────────────────────────────
|
|
37
72
|
|
|
38
73
|
if (scanOnly) {
|
|
39
|
-
process.stdout.write('\n🔍 Scanning
|
|
74
|
+
if (outputFormat !== 'text') process.stdout.write('\n🔍 Scanning...\n');
|
|
75
|
+
else process.stdout.write('\n🔍 Scanning for vulnerability patterns...');
|
|
40
76
|
const findings = quickScan(projectDir);
|
|
41
|
-
|
|
42
|
-
|
|
77
|
+
const exempted = findings.exempted || [];
|
|
78
|
+
if (outputFormat === 'json') {
|
|
79
|
+
process.stdout.write('\n');
|
|
80
|
+
console.log(JSON.stringify(toJson(findings, exempted), null, 2));
|
|
81
|
+
} else if (outputFormat === 'sarif') {
|
|
82
|
+
process.stdout.write('\n');
|
|
83
|
+
console.log(JSON.stringify(toSarif(findings, projectDir), null, 2));
|
|
84
|
+
} else {
|
|
85
|
+
process.stdout.write('\n');
|
|
86
|
+
printFindings(findings, exempted);
|
|
87
|
+
}
|
|
43
88
|
process.exit(0);
|
|
44
89
|
}
|
|
45
90
|
|