codebase-auditor 1.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.claude/settings.local.json +22 -0
- package/.claude-flow/swarm/swarm-state.json +23 -0
- package/.env.example +1 -0
- package/50) +0 -0
- package/README.md +88 -0
- package/audit.js +76 -0
- package/console.log(process.env.ANTHROPIC_API_KEY +0 -0
- package/package.json +25 -0
- package/src/agents/dependencies.js +79 -0
- package/src/agents/docs.js +54 -0
- package/src/agents/performance.js +54 -0
- package/src/agents/quality.js +53 -0
- package/src/agents/security.js +54 -0
- package/src/agents/tests.js +53 -0
- package/src/parseJson.js +24 -0
- package/src/reporter.js +240 -0
- package/src/scanner.js +46 -0
- package/{ +0 -0
|
@@ -0,0 +1,22 @@
|
|
|
1
|
+
{
|
|
2
|
+
"permissions": {
|
|
3
|
+
"allow": [
|
|
4
|
+
"mcp__claude-flow__swarm_init",
|
|
5
|
+
"Bash(npm install:*)",
|
|
6
|
+
"Bash(node audit.js \"C:\\\\Users\\\\Still Pending\\\\Projects\\\\toddler-activities\")",
|
|
7
|
+
"Bash(notepad .env)",
|
|
8
|
+
"Bash(node audit.js \"C:\\\\Users\\\\Still Pending\\\\Projects\\\\village\")",
|
|
9
|
+
"Bash(node audit.js \"C:\\\\Users\\\\Still Pending\\\\Projects\\\\audit-test-repo\")",
|
|
10
|
+
"Bash(node audit.js \"C:\\\\Users\\\\Still Pending\\\\Projects\\\\dotenv-test\")",
|
|
11
|
+
"Bash(node src/index.js \"C:\\\\Users\\\\Still Pending\\\\Projects\\\\dotenv-test\")",
|
|
12
|
+
"Bash(node index.js \"C:\\\\Users\\\\Still Pending\\\\Projects\\\\dotenv-test\")",
|
|
13
|
+
"Bash(ls *.js *.mjs *.ts)",
|
|
14
|
+
"Bash(python3 -c \"import sys,json; d=json.load\\(sys.stdin\\); print\\('main:', d.get\\('main'\\)\\); print\\('scripts:', d.get\\('scripts'\\)\\)\")",
|
|
15
|
+
"Bash(node audit.js \"C:\\\\Users\\\\Still Pending\\\\Projects\\\\chalk-test\")"
|
|
16
|
+
]
|
|
17
|
+
},
|
|
18
|
+
"enableAllProjectMcpServers": true,
|
|
19
|
+
"enabledMcpjsonServers": [
|
|
20
|
+
"claude-flow"
|
|
21
|
+
]
|
|
22
|
+
}
|
|
@@ -0,0 +1,23 @@
|
|
|
1
|
+
{
|
|
2
|
+
"swarms": {
|
|
3
|
+
"swarm-1775182478458-20duxg": {
|
|
4
|
+
"swarmId": "swarm-1775182478458-20duxg",
|
|
5
|
+
"topology": "hierarchical",
|
|
6
|
+
"maxAgents": 8,
|
|
7
|
+
"status": "running",
|
|
8
|
+
"agents": [],
|
|
9
|
+
"tasks": [],
|
|
10
|
+
"config": {
|
|
11
|
+
"topology": "hierarchical",
|
|
12
|
+
"maxAgents": 8,
|
|
13
|
+
"strategy": "balanced",
|
|
14
|
+
"communicationProtocol": "message-bus",
|
|
15
|
+
"autoScaling": true,
|
|
16
|
+
"consensusMechanism": "majority"
|
|
17
|
+
},
|
|
18
|
+
"createdAt": "2026-04-03T02:14:38.458Z",
|
|
19
|
+
"updatedAt": "2026-04-03T02:14:38.458Z"
|
|
20
|
+
}
|
|
21
|
+
},
|
|
22
|
+
"version": "3.0.0"
|
|
23
|
+
}
|
package/.env.example
ADDED
|
@@ -0,0 +1 @@
|
|
|
1
|
+
ANTHROPIC_API_KEY=your_key_here
|
package/50)
ADDED
|
File without changes
|
package/README.md
ADDED
|
@@ -0,0 +1,88 @@
|
|
|
1
|
+
# Codebase Auditor
|
|
2
|
+
|
|
3
|
+
An AI-powered CLI tool that audits your codebase across six dimensions using the Anthropic API. Point it at any project and get a structured Markdown report with prioritized findings and fix suggestions.
|
|
4
|
+
|
|
5
|
+
## What It Does
|
|
6
|
+
|
|
7
|
+
Codebase Auditor runs six parallel AI agents against your source files, each focused on a specific dimension:
|
|
8
|
+
|
|
9
|
+
- **Security** — Hardcoded secrets, SQL injection, unsafe `eval()`, insecure patterns
|
|
10
|
+
- **Performance** — Blocking sync operations, N+1 queries, memory leaks, inefficient loops
|
|
11
|
+
- **Test Coverage** — Untested exports, missing error-path tests, weak assertions
|
|
12
|
+
- **Documentation** — Missing JSDoc, undocumented parameters, README gaps
|
|
13
|
+
- **Dependencies** — Vulnerable packages, abandoned libraries, license conflicts
|
|
14
|
+
- **Code Quality** — Long functions, deep nesting, duplicate code, dead code
|
|
15
|
+
|
|
16
|
+
All six agents run simultaneously via `Promise.all()` and their results are merged into a single `audit-report.md` file.
|
|
17
|
+
|
|
18
|
+
## Requirements
|
|
19
|
+
|
|
20
|
+
- Node.js 18 or higher
|
|
21
|
+
- An [Anthropic API key](https://console.anthropic.com/)
|
|
22
|
+
|
|
23
|
+
## Installation
|
|
24
|
+
|
|
25
|
+
```bash
|
|
26
|
+
git clone https://github.com/your-username/codebase-auditor.git
|
|
27
|
+
cd codebase-auditor
|
|
28
|
+
npm install
|
|
29
|
+
cp .env.example .env
|
|
30
|
+
# Edit .env and add your Anthropic API key
|
|
31
|
+
```
|
|
32
|
+
|
|
33
|
+
## Usage
|
|
34
|
+
|
|
35
|
+
```bash
|
|
36
|
+
node audit.js ./your-project
|
|
37
|
+
```
|
|
38
|
+
|
|
39
|
+
Replace `./your-project` with the path to the directory you want to audit. Defaults to `./` if no path is given.
|
|
40
|
+
|
|
41
|
+
## What Gets Checked
|
|
42
|
+
|
|
43
|
+
| Dimension | What the Agent Looks For |
|
|
44
|
+
|-----------|--------------------------|
|
|
45
|
+
| Security | Hardcoded credentials, API keys, SQL injection, `eval()` misuse, path traversal |
|
|
46
|
+
| Performance | `readFileSync` in async functions, nested loops on large data, N+1 DB calls, memory leaks |
|
|
47
|
+
| Tests | Exported functions with no tests, missing edge-case coverage, trivial test files |
|
|
48
|
+
| Docs | Public functions without JSDoc, missing `@param`/`@returns`, README without usage section |
|
|
49
|
+
| Dependencies | CVE-prone packages, unmaintained libraries (2+ years), `devDependencies` in wrong section |
|
|
50
|
+
| Quality | Functions >50 lines, 4+ nesting levels, duplicated blocks, `console.log` in production code |
|
|
51
|
+
|
|
52
|
+
## Example Output
|
|
53
|
+
|
|
54
|
+
```
|
|
55
|
+
Codebase Auditor — scanning: /home/user/my-app
|
|
56
|
+
|
|
57
|
+
Scanning files...
|
|
58
|
+
Found 42 files to audit.
|
|
59
|
+
|
|
60
|
+
Starting Security agent...
|
|
61
|
+
Starting Performance agent...
|
|
62
|
+
Starting Tests agent...
|
|
63
|
+
Starting Docs agent...
|
|
64
|
+
Starting Dependencies agent...
|
|
65
|
+
Starting Quality agent...
|
|
66
|
+
|
|
67
|
+
Security agent complete.
|
|
68
|
+
Performance agent complete.
|
|
69
|
+
...
|
|
70
|
+
|
|
71
|
+
Audit complete. 17 total findings:
|
|
72
|
+
|
|
73
|
+
🔴 Critical: 2
|
|
74
|
+
🟠 High: 5
|
|
75
|
+
🟡 Medium: 7
|
|
76
|
+
🟢 Low: 3
|
|
77
|
+
|
|
78
|
+
Report saved to: /home/user/my-app/audit-report.md
|
|
79
|
+
```
|
|
80
|
+
|
|
81
|
+
The generated `audit-report.md` contains an executive summary table and a dedicated section for each audit dimension with severity labels, affected files, descriptions, and fix suggestions.
|
|
82
|
+
|
|
83
|
+
## Contributing
|
|
84
|
+
|
|
85
|
+
1. Fork the repository
|
|
86
|
+
2. Create a feature branch (`git checkout -b feature/my-feature`)
|
|
87
|
+
3. Make your changes and run `npm test`
|
|
88
|
+
4. Submit a pull request with a clear description of the change
|
package/audit.js
ADDED
|
@@ -0,0 +1,76 @@
|
|
|
1
|
+
#!/usr/bin/env node
|
|
2
|
+
import 'dotenv/config';
|
|
3
|
+
import { resolve, basename } from 'path';
|
|
4
|
+
import { scanDirectory } from './src/scanner.js';
|
|
5
|
+
import { runSecurityAudit } from './src/agents/security.js';
|
|
6
|
+
import { runPerformanceAudit } from './src/agents/performance.js';
|
|
7
|
+
import { runTestAudit } from './src/agents/tests.js';
|
|
8
|
+
import { runDocsAudit } from './src/agents/docs.js';
|
|
9
|
+
import { runDependencyAudit } from './src/agents/dependencies.js';
|
|
10
|
+
import { runQualityAudit } from './src/agents/quality.js';
|
|
11
|
+
import { generateReport } from './src/reporter.js';
|
|
12
|
+
|
|
13
|
+
async function main() {
|
|
14
|
+
const targetPath = process.argv[2] || './';
|
|
15
|
+
const absTarget = resolve(targetPath);
|
|
16
|
+
const projectName = basename(absTarget);
|
|
17
|
+
|
|
18
|
+
console.log(`\nCodebase Auditor — scanning: ${absTarget}\n`);
|
|
19
|
+
|
|
20
|
+
console.log('Scanning files...');
|
|
21
|
+
const { files, packageJson } = await scanDirectory(absTarget);
|
|
22
|
+
console.log(`Found ${files.length} files to audit.\n`);
|
|
23
|
+
|
|
24
|
+
if (files.length === 0) {
|
|
25
|
+
console.log('No auditable files found. Exiting.');
|
|
26
|
+
process.exit(0);
|
|
27
|
+
}
|
|
28
|
+
|
|
29
|
+
const agentNames = ['Security', 'Performance', 'Tests', 'Docs', 'Dependencies', 'Quality'];
|
|
30
|
+
agentNames.forEach(name => console.log(`Starting ${name} agent...`));
|
|
31
|
+
console.log('');
|
|
32
|
+
|
|
33
|
+
console.log('Running Security agent...');
|
|
34
|
+
const security = await runSecurityAudit(files);
|
|
35
|
+
console.log('Security agent complete.');
|
|
36
|
+
|
|
37
|
+
console.log('Running Performance agent...');
|
|
38
|
+
const performance = await runPerformanceAudit(files);
|
|
39
|
+
console.log('Performance agent complete.');
|
|
40
|
+
|
|
41
|
+
console.log('Running Tests agent...');
|
|
42
|
+
const tests = await runTestAudit(files);
|
|
43
|
+
console.log('Tests agent complete.');
|
|
44
|
+
|
|
45
|
+
console.log('Running Docs agent...');
|
|
46
|
+
const docs = await runDocsAudit(files);
|
|
47
|
+
console.log('Docs agent complete.');
|
|
48
|
+
|
|
49
|
+
console.log('Running Dependencies agent...');
|
|
50
|
+
const dependencies = await runDependencyAudit(packageJson);
|
|
51
|
+
console.log('Dependencies agent complete.');
|
|
52
|
+
|
|
53
|
+
console.log('Running Quality agent...');
|
|
54
|
+
const quality = await runQualityAudit(files);
|
|
55
|
+
console.log('Quality agent complete.');
|
|
56
|
+
|
|
57
|
+
console.log('\nGenerating report...');
|
|
58
|
+
|
|
59
|
+
const { outputPath, totals, totalFindings } = await generateReport(
|
|
60
|
+
{ security, performance, tests, docs, dependencies, quality },
|
|
61
|
+
projectName,
|
|
62
|
+
absTarget
|
|
63
|
+
);
|
|
64
|
+
|
|
65
|
+
console.log(`\nAudit complete. ${totalFindings} total findings:\n`);
|
|
66
|
+
console.log(` 🔴 Critical: ${totals.critical}`);
|
|
67
|
+
console.log(` 🟠 High: ${totals.high}`);
|
|
68
|
+
console.log(` 🟡 Medium: ${totals.medium}`);
|
|
69
|
+
console.log(` 🟢 Low: ${totals.low}`);
|
|
70
|
+
console.log(`\nReport saved to: ${outputPath}\n`);
|
|
71
|
+
}
|
|
72
|
+
|
|
73
|
+
main().catch(err => {
|
|
74
|
+
console.error('Audit failed:', err.message);
|
|
75
|
+
process.exit(1);
|
|
76
|
+
});
|
|
File without changes
|
package/package.json
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
1
|
+
{
|
|
2
|
+
"name": "codebase-auditor",
|
|
3
|
+
"version": "1.0.0",
|
|
4
|
+
"description": "AI-powered codebase auditor. Scans your project with 6 specialized agents and generates a structured report with Risk and Quality scores.",
|
|
5
|
+
"main": "audit.js",
|
|
6
|
+
"bin": {
|
|
7
|
+
"codebase-auditor": "./audit.js"
|
|
8
|
+
},
|
|
9
|
+
"scripts": {
|
|
10
|
+
"test": "echo \"Error: no test specified\" && exit 1"
|
|
11
|
+
},
|
|
12
|
+
"keywords": ["audit", "code-quality", "security", "static-analysis", "ai", "claude", "codebase"],
|
|
13
|
+
"author": "skynetendofhumanraise-beep",
|
|
14
|
+
"license": "MIT",
|
|
15
|
+
"homepage": "https://github.com/skynetendofhumanraise-beep/codebase-auditor",
|
|
16
|
+
"repository": {
|
|
17
|
+
"type": "git",
|
|
18
|
+
"url": "https://github.com/skynetendofhumanraise-beep/codebase-auditor.git"
|
|
19
|
+
},
|
|
20
|
+
"type": "module",
|
|
21
|
+
"dependencies": {
|
|
22
|
+
"@anthropic-ai/sdk": "^0.82.0",
|
|
23
|
+
"dotenv": "^17.4.0"
|
|
24
|
+
}
|
|
25
|
+
}
|
|
@@ -0,0 +1,79 @@
|
|
|
1
|
+
import Anthropic from '@anthropic-ai/sdk';
|
|
2
|
+
import 'dotenv/config';
|
|
3
|
+
import { parseJsonResponse } from '../parseJson.js';
|
|
4
|
+
|
|
5
|
+
const client = new Anthropic({ apiKey: process.env.ANTHROPIC_API_KEY });
|
|
6
|
+
|
|
7
|
+
export async function runDependencyAudit(packageJson) {
|
|
8
|
+
if (packageJson == null) {
|
|
9
|
+
return {
|
|
10
|
+
findings: [{
|
|
11
|
+
severity: 'low',
|
|
12
|
+
title: 'No package.json found',
|
|
13
|
+
file: 'package.json',
|
|
14
|
+
description: 'No package.json was detected in this project. Dependency audit was skipped.',
|
|
15
|
+
fix: 'If this project uses npm packages, ensure a package.json exists in the root directory.',
|
|
16
|
+
}],
|
|
17
|
+
};
|
|
18
|
+
}
|
|
19
|
+
|
|
20
|
+
const hasDeps = packageJson.dependencies && Object.keys(packageJson.dependencies).length > 0;
|
|
21
|
+
const hasDevDeps = packageJson.devDependencies && Object.keys(packageJson.devDependencies).length > 0;
|
|
22
|
+
|
|
23
|
+
if (!hasDeps && !hasDevDeps) {
|
|
24
|
+
return {
|
|
25
|
+
findings: [{
|
|
26
|
+
severity: 'low',
|
|
27
|
+
title: 'No dependencies declared',
|
|
28
|
+
file: 'package.json',
|
|
29
|
+
description: 'The package.json exists but declares no dependencies or devDependencies.',
|
|
30
|
+
fix: 'If this project relies on external packages, add them to the appropriate section in package.json.',
|
|
31
|
+
}],
|
|
32
|
+
};
|
|
33
|
+
}
|
|
34
|
+
|
|
35
|
+
const packageSummary = JSON.stringify(packageJson, null, 2);
|
|
36
|
+
|
|
37
|
+
const prompt = `You are a dependency security and maintenance auditor. Analyze the following package.json for genuine dependency risks.
|
|
38
|
+
|
|
39
|
+
SHARED SEVERITY RULES (apply to all findings):
|
|
40
|
+
- CRITICAL: Only for issues that directly enable data breach, code execution, or system compromise. Examples: hardcoded credentials, eval() with user input, SQL injection. Never use Critical for code quality, documentation, or test coverage issues.
|
|
41
|
+
- HIGH: Significant issues that meaningfully increase risk or maintenance burden. One HIGH per distinct problem, never multiple HIGHs for sub-parts of the same issue.
|
|
42
|
+
- MEDIUM: Real issues worth fixing but not urgent. Missing documentation, weak test coverage, performance patterns that could cause problems at scale.
|
|
43
|
+
- LOW: Suggestions and minor improvements. Style issues, optional optimizations, nice-to-have documentation.
|
|
44
|
+
- Never file more than one finding per function per issue type. If a function has multiple related problems, combine them into one finding.
|
|
45
|
+
|
|
46
|
+
FLAG these issues:
|
|
47
|
+
- Packages with known CVEs or confirmed security vulnerabilities at the version range declared
|
|
48
|
+
- Packages explicitly deprecated by their maintainers with a stated recommended replacement
|
|
49
|
+
- Packages with no releases in 3+ years that are not intentionally in maintenance-only mode
|
|
50
|
+
- devDependencies that are actually imported or required in production source code
|
|
51
|
+
- Dependencies with licenses that are restrictive or incompatible with this project's license
|
|
52
|
+
|
|
53
|
+
DO NOT flag:
|
|
54
|
+
- Packages that are stable and intentionally not updated frequently (e.g., dotenv — stability is a feature, not a defect)
|
|
55
|
+
- Minor patch or minor version differences from the absolute latest release
|
|
56
|
+
- TypeScript type packages (@types/*) being slightly behind the runtime package version
|
|
57
|
+
- Packages explicitly in maintenance mode that still receive security patches
|
|
58
|
+
|
|
59
|
+
SEVERITY for dependencies:
|
|
60
|
+
- Packages with known CVEs = CRITICAL if the vulnerability is critical/high severity, or HIGH if the CVE is moderate
|
|
61
|
+
- Packages explicitly deprecated with no security coverage = MEDIUM
|
|
62
|
+
- Packages outdated but still actively maintained and receiving security fixes = LOW
|
|
63
|
+
|
|
64
|
+
package.json to audit:
|
|
65
|
+
${packageSummary}
|
|
66
|
+
|
|
67
|
+
Respond with ONLY valid JSON in this exact shape, no markdown, no explanation:
|
|
68
|
+
{"findings":[{"severity":"critical|high|medium|low","title":"string","file":"package.json","description":"string","fix":"string"}]}
|
|
69
|
+
|
|
70
|
+
If no issues found, return: {"findings":[]}`;
|
|
71
|
+
|
|
72
|
+
const message = await client.messages.create({
|
|
73
|
+
model: 'claude-sonnet-4-20250514',
|
|
74
|
+
max_tokens: 2000,
|
|
75
|
+
messages: [{ role: 'user', content: prompt }],
|
|
76
|
+
});
|
|
77
|
+
|
|
78
|
+
return parseJsonResponse(message.content[0].text);
|
|
79
|
+
}
|
|
@@ -0,0 +1,54 @@
|
|
|
1
|
+
import Anthropic from '@anthropic-ai/sdk';
|
|
2
|
+
import 'dotenv/config';
|
|
3
|
+
import { parseJsonResponse } from '../parseJson.js';
|
|
4
|
+
|
|
5
|
+
const client = new Anthropic({ apiKey: process.env.ANTHROPIC_API_KEY });
|
|
6
|
+
|
|
7
|
+
export async function runDocsAudit(files) {
|
|
8
|
+
const filesSummary = files.map(f => `// File: ${f.path}\n${f.content}`).join('\n\n---\n\n');
|
|
9
|
+
|
|
10
|
+
const prompt = `You are a technical writer and documentation auditor. Analyze the following source files for meaningful documentation gaps that would hurt a developer trying to use this code.
|
|
11
|
+
|
|
12
|
+
SHARED SEVERITY RULES (apply to all findings):
|
|
13
|
+
- CRITICAL: Only for issues that directly enable data breach, code execution, or system compromise. Examples: hardcoded credentials, eval() with user input, SQL injection. Never use Critical for code quality, documentation, or test coverage issues.
|
|
14
|
+
- HIGH: Significant issues that meaningfully increase risk or maintenance burden. One HIGH per distinct problem, never multiple HIGHs for sub-parts of the same issue.
|
|
15
|
+
- MEDIUM: Real issues worth fixing but not urgent. Missing documentation, weak test coverage, performance patterns that could cause problems at scale.
|
|
16
|
+
- LOW: Suggestions and minor improvements. Style issues, optional optimizations, nice-to-have documentation.
|
|
17
|
+
- Never file more than one finding per function per issue type. If a function has multiple related problems, combine them into one finding.
|
|
18
|
+
|
|
19
|
+
FLAG these issues:
|
|
20
|
+
- Exported public functions with no JSDoc comment at all
|
|
21
|
+
- README files that have no usage instructions or examples
|
|
22
|
+
- Complex algorithms (cryptographic operations, custom parsers, non-obvious bitwise operations) with no inline explanation of what they do or why
|
|
23
|
+
|
|
24
|
+
DO NOT flag:
|
|
25
|
+
- Internal private functions prefixed with an underscore (_)
|
|
26
|
+
- Simple one-liner functions where the function name makes the behavior completely obvious
|
|
27
|
+
- Missing @param or @returns tags when the function signature and types are self-evident
|
|
28
|
+
- Getters and setters
|
|
29
|
+
|
|
30
|
+
CONSOLIDATION RULE — STRICTLY ENFORCED: If a function is missing its JSDoc entirely, file EXACTLY ONE finding that covers the missing description, @param tags, and @returns tags all together. Never file separate findings for different missing JSDoc elements on the same function. Only file separate @param or @returns findings if the function already has a JSDoc description but is specifically missing those tags.
|
|
31
|
+
|
|
32
|
+
SEVERITY for documentation:
|
|
33
|
+
- NEVER use CRITICAL for any documentation finding — documentation gaps are never critical
|
|
34
|
+
- Missing JSDoc on primary public API functions = MEDIUM
|
|
35
|
+
- Missing README usage section = MEDIUM
|
|
36
|
+
- Missing inline explanation for complex algorithms = LOW
|
|
37
|
+
- Missing @param or @returns on functions that already have a JSDoc description = LOW
|
|
38
|
+
|
|
39
|
+
Files to audit:
|
|
40
|
+
${filesSummary}
|
|
41
|
+
|
|
42
|
+
Respond with ONLY valid JSON in this exact shape, no markdown, no explanation:
|
|
43
|
+
{"findings":[{"severity":"critical|high|medium|low","title":"string","file":"string","description":"string","fix":"string"}]}
|
|
44
|
+
|
|
45
|
+
If no issues found, return: {"findings":[]}`;
|
|
46
|
+
|
|
47
|
+
const message = await client.messages.create({
|
|
48
|
+
model: 'claude-sonnet-4-20250514',
|
|
49
|
+
max_tokens: 2000,
|
|
50
|
+
messages: [{ role: 'user', content: prompt }],
|
|
51
|
+
});
|
|
52
|
+
|
|
53
|
+
return parseJsonResponse(message.content[0].text);
|
|
54
|
+
}
|
|
@@ -0,0 +1,54 @@
|
|
|
1
|
+
import Anthropic from '@anthropic-ai/sdk';
|
|
2
|
+
import 'dotenv/config';
|
|
3
|
+
import { parseJsonResponse } from '../parseJson.js';
|
|
4
|
+
|
|
5
|
+
const client = new Anthropic({ apiKey: process.env.ANTHROPIC_API_KEY });
|
|
6
|
+
|
|
7
|
+
export async function runPerformanceAudit(files) {
|
|
8
|
+
const filesSummary = files.map(f => `// File: ${f.path}\n${f.content}`).join('\n\n---\n\n');
|
|
9
|
+
|
|
10
|
+
const prompt = `You are a performance engineer. Analyze the following source files for genuine performance problems that would affect real users.
|
|
11
|
+
|
|
12
|
+
SHARED SEVERITY RULES (apply to all findings):
|
|
13
|
+
- CRITICAL: Only for issues that directly enable data breach, code execution, or system compromise. Examples: hardcoded credentials, eval() with user input, SQL injection. Never use Critical for code quality, documentation, or test coverage issues.
|
|
14
|
+
- HIGH: Significant issues that meaningfully increase risk or maintenance burden. One HIGH per distinct problem, never multiple HIGHs for sub-parts of the same issue.
|
|
15
|
+
- MEDIUM: Real issues worth fixing but not urgent. Missing documentation, weak test coverage, performance patterns that could cause problems at scale.
|
|
16
|
+
- LOW: Suggestions and minor improvements. Style issues, optional optimizations, nice-to-have documentation.
|
|
17
|
+
- Never file more than one finding per function per issue type. If a function has multiple related problems, combine them into one finding.
|
|
18
|
+
|
|
19
|
+
FLAG these issues:
|
|
20
|
+
- fs.readFileSync inside async functions, inside request handlers, or inside loops that execute repeatedly at runtime
|
|
21
|
+
- Inefficient O(n²) patterns — nested loops iterating the same dataset
|
|
22
|
+
- Repeated DOM queries (e.g., document.querySelector) inside loops or render functions that run frequently
|
|
23
|
+
- N+1 patterns — database or API calls made inside loops instead of batched
|
|
24
|
+
- Event listeners added in components that mount/unmount but never removed, causing accumulation
|
|
25
|
+
|
|
26
|
+
DO NOT flag:
|
|
27
|
+
- fs.readFileSync in functions named config, load, init, setup, parse, or configDotenv — these are intentional one-time initialization patterns
|
|
28
|
+
- Math.random() calls
|
|
29
|
+
- Simple Object.keys() iterations on small, bounded objects
|
|
30
|
+
- Memory retention in small constant arrays or static data structures like tips arrays
|
|
31
|
+
- One-time startup I/O that does not repeat during the application lifecycle
|
|
32
|
+
|
|
33
|
+
SEVERITY for performance:
|
|
34
|
+
- Blocking synchronous I/O inside request handlers or repeated runtime paths = HIGH
|
|
35
|
+
- O(n²) patterns in loops = MEDIUM
|
|
36
|
+
- Repeated DOM queries inside render or loop functions = MEDIUM
|
|
37
|
+
- Minor inefficiencies with limited real-world impact = LOW
|
|
38
|
+
|
|
39
|
+
Files to audit:
|
|
40
|
+
${filesSummary}
|
|
41
|
+
|
|
42
|
+
Respond with ONLY valid JSON in this exact shape, no markdown, no explanation:
|
|
43
|
+
{"findings":[{"severity":"critical|high|medium|low","title":"string","file":"string","description":"string","fix":"string"}]}
|
|
44
|
+
|
|
45
|
+
If no issues found, return: {"findings":[]}`;
|
|
46
|
+
|
|
47
|
+
const message = await client.messages.create({
|
|
48
|
+
model: 'claude-sonnet-4-20250514',
|
|
49
|
+
max_tokens: 2000,
|
|
50
|
+
messages: [{ role: 'user', content: prompt }],
|
|
51
|
+
});
|
|
52
|
+
|
|
53
|
+
return parseJsonResponse(message.content[0].text);
|
|
54
|
+
}
|
|
@@ -0,0 +1,53 @@
|
|
|
1
|
+
import Anthropic from '@anthropic-ai/sdk';
|
|
2
|
+
import 'dotenv/config';
|
|
3
|
+
import { parseJsonResponse } from '../parseJson.js';
|
|
4
|
+
|
|
5
|
+
const client = new Anthropic({ apiKey: process.env.ANTHROPIC_API_KEY });
|
|
6
|
+
|
|
7
|
+
export async function runQualityAudit(files) {
|
|
8
|
+
const filesSummary = files.map(f => `// File: ${f.path}\n${f.content}`).join('\n\n---\n\n');
|
|
9
|
+
|
|
10
|
+
const prompt = `You are a code quality reviewer. Analyze the following source files for genuine code quality issues that affect maintainability.
|
|
11
|
+
|
|
12
|
+
SHARED SEVERITY RULES (apply to all findings):
|
|
13
|
+
- CRITICAL: Only for issues that directly enable data breach, code execution, or system compromise. Examples: hardcoded credentials, eval() with user input, SQL injection. Never use Critical for code quality, documentation, or test coverage issues.
|
|
14
|
+
- HIGH: Significant issues that meaningfully increase risk or maintenance burden. One HIGH per distinct problem, never multiple HIGHs for sub-parts of the same issue.
|
|
15
|
+
- MEDIUM: Real issues worth fixing but not urgent. Missing documentation, weak test coverage, performance patterns that could cause problems at scale.
|
|
16
|
+
- LOW: Suggestions and minor improvements. Style issues, optional optimizations, nice-to-have documentation.
|
|
17
|
+
- Never file more than one finding per function per issue type. If a function has multiple related problems, combine them into one finding.
|
|
18
|
+
|
|
19
|
+
FLAG these issues:
|
|
20
|
+
- Functions genuinely over 60 lines of executable code (60 lines is the real threshold where complexity becomes a problem — not 50)
|
|
21
|
+
- Nesting depth of 5 or more levels (deeply nested conditionals or loops within loops)
|
|
22
|
+
- Large blocks of commented-out code — 10 or more consecutive lines of code that has been commented out
|
|
23
|
+
- console.log statements that are clearly leftover debug output: inside business logic, loops, or event handlers where there is no logging purpose
|
|
24
|
+
|
|
25
|
+
DO NOT flag:
|
|
26
|
+
- console.log inside functions whose sole purpose is logging — functions named log, warn, debug, error, _log, _warn, _debug, logger, print, output, or any similar logging-purpose name
|
|
27
|
+
- Duplicate data structures that are intentionally separate (e.g., two activity arrays maintained independently for different purposes)
|
|
28
|
+
- Functions that are long because they contain data definitions or configuration, not executable logic
|
|
29
|
+
- Nesting at 4 levels or fewer
|
|
30
|
+
|
|
31
|
+
SEVERITY for quality:
|
|
32
|
+
- Functions over 100 lines of executable logic = HIGH
|
|
33
|
+
- Functions between 60 and 100 lines of executable logic = MEDIUM
|
|
34
|
+
- Nesting depth of 5+ levels = MEDIUM
|
|
35
|
+
- Commented-out code blocks (10+ lines) = LOW
|
|
36
|
+
- Leftover console.log debug statements = LOW
|
|
37
|
+
|
|
38
|
+
Files to audit:
|
|
39
|
+
${filesSummary}
|
|
40
|
+
|
|
41
|
+
Respond with ONLY valid JSON in this exact shape, no markdown, no explanation:
|
|
42
|
+
{"findings":[{"severity":"critical|high|medium|low","title":"string","file":"string","description":"string","fix":"string"}]}
|
|
43
|
+
|
|
44
|
+
If no issues found, return: {"findings":[]}`;
|
|
45
|
+
|
|
46
|
+
const message = await client.messages.create({
|
|
47
|
+
model: 'claude-sonnet-4-20250514',
|
|
48
|
+
max_tokens: 2000,
|
|
49
|
+
messages: [{ role: 'user', content: prompt }],
|
|
50
|
+
});
|
|
51
|
+
|
|
52
|
+
return parseJsonResponse(message.content[0].text);
|
|
53
|
+
}
|
|
@@ -0,0 +1,54 @@
|
|
|
1
|
+
import Anthropic from '@anthropic-ai/sdk';
|
|
2
|
+
import 'dotenv/config';
|
|
3
|
+
import { parseJsonResponse } from '../parseJson.js';
|
|
4
|
+
|
|
5
|
+
const client = new Anthropic({ apiKey: process.env.ANTHROPIC_API_KEY });
|
|
6
|
+
|
|
7
|
+
export async function runSecurityAudit(files) {
|
|
8
|
+
const filesSummary = files.map(f => `// File: ${f.path}\n${f.content}`).join('\n\n---\n\n');
|
|
9
|
+
|
|
10
|
+
const prompt = `You are a security auditor. Analyze the following source files for genuine security vulnerabilities only. Do not flag theoretical or unlikely risks.
|
|
11
|
+
|
|
12
|
+
SHARED SEVERITY RULES (apply to all findings):
|
|
13
|
+
- CRITICAL: Only for issues that directly enable data breach, code execution, or system compromise. Examples: hardcoded credentials, eval() with user input, SQL injection. Never use Critical for code quality, documentation, or test coverage issues.
|
|
14
|
+
- HIGH: Significant issues that meaningfully increase risk or maintenance burden. One HIGH per distinct problem, never multiple HIGHs for sub-parts of the same issue.
|
|
15
|
+
- MEDIUM: Real issues worth fixing but not urgent. Missing documentation, weak test coverage, performance patterns that could cause problems at scale.
|
|
16
|
+
- LOW: Suggestions and minor improvements. Style issues, optional optimizations, nice-to-have documentation.
|
|
17
|
+
- Never file more than one finding per function per issue type. If a function has multiple related problems, combine them into one finding.
|
|
18
|
+
|
|
19
|
+
FLAG these issues:
|
|
20
|
+
- Hardcoded secrets, API keys, or passwords embedded in source code
|
|
21
|
+
- eval() or Function() called with user-controlled input
|
|
22
|
+
- SQL injection via string concatenation with unsanitized user input
|
|
23
|
+
- Path traversal vulnerabilities where user input reaches file paths without validation
|
|
24
|
+
- XSS via unsanitized user input assigned to innerHTML or similar DOM sinks
|
|
25
|
+
- Insecure use of crypto (weak algorithms, hardcoded IVs, broken key derivation)
|
|
26
|
+
|
|
27
|
+
DO NOT flag:
|
|
28
|
+
- console.log or debug logging output
|
|
29
|
+
- Synchronous file operations used in initialization or configuration loading
|
|
30
|
+
- Regex complexity unless there is a proven ReDoS pattern with a specific exploit string demonstrating catastrophic backtracking
|
|
31
|
+
- Domain names or URLs appearing in strings
|
|
32
|
+
- Debug logging functions or structured loggers
|
|
33
|
+
|
|
34
|
+
SEVERITY for security:
|
|
35
|
+
- Hardcoded secrets and eval() with user input = CRITICAL
|
|
36
|
+
- SQL injection = HIGH
|
|
37
|
+
- Path traversal and XSS = MEDIUM unless the code is directly and trivially exploitable with no mitigating factors, then HIGH
|
|
38
|
+
|
|
39
|
+
Files to audit:
|
|
40
|
+
${filesSummary}
|
|
41
|
+
|
|
42
|
+
Respond with ONLY valid JSON in this exact shape, no markdown, no explanation:
|
|
43
|
+
{"findings":[{"severity":"critical|high|medium|low","title":"string","file":"string","description":"string","fix":"string"}]}
|
|
44
|
+
|
|
45
|
+
If no issues found, return: {"findings":[]}`;
|
|
46
|
+
|
|
47
|
+
const message = await client.messages.create({
|
|
48
|
+
model: 'claude-sonnet-4-20250514',
|
|
49
|
+
max_tokens: 2000,
|
|
50
|
+
messages: [{ role: 'user', content: prompt }],
|
|
51
|
+
});
|
|
52
|
+
|
|
53
|
+
return parseJsonResponse(message.content[0].text);
|
|
54
|
+
}
|
|
@@ -0,0 +1,53 @@
|
|
|
1
|
+
import Anthropic from '@anthropic-ai/sdk';
|
|
2
|
+
import 'dotenv/config';
|
|
3
|
+
import { parseJsonResponse } from '../parseJson.js';
|
|
4
|
+
|
|
5
|
+
const client = new Anthropic({ apiKey: process.env.ANTHROPIC_API_KEY });
|
|
6
|
+
|
|
7
|
+
export async function runTestAudit(files) {
|
|
8
|
+
const filesSummary = files.map(f => `// File: ${f.path}\n${f.content}`).join('\n\n---\n\n');
|
|
9
|
+
|
|
10
|
+
const prompt = `You are a QA engineer. Analyze the following source files for genuine gaps in test coverage.
|
|
11
|
+
|
|
12
|
+
SHARED SEVERITY RULES (apply to all findings):
|
|
13
|
+
- CRITICAL: Only for issues that directly enable data breach, code execution, or system compromise. Examples: hardcoded credentials, eval() with user input, SQL injection. Never use Critical for code quality, documentation, or test coverage issues.
|
|
14
|
+
- HIGH: Significant issues that meaningfully increase risk or maintenance burden. One HIGH per distinct problem, never multiple HIGHs for sub-parts of the same issue.
|
|
15
|
+
- MEDIUM: Real issues worth fixing but not urgent. Missing documentation, weak test coverage, performance patterns that could cause problems at scale.
|
|
16
|
+
- LOW: Suggestions and minor improvements. Style issues, optional optimizations, nice-to-have documentation.
|
|
17
|
+
- Never file more than one finding per function per issue type. If a function has multiple related problems, combine them into one finding.
|
|
18
|
+
|
|
19
|
+
FLAG these issues:
|
|
20
|
+
- Exported functions or classes with zero test coverage anywhere in the entire test suite
|
|
21
|
+
- Test files that contain only trivial assertions such as expect(true).toBe(true) and test nothing real
|
|
22
|
+
- Missing tests for critical error paths in security-sensitive functions (e.g., auth, input validation, crypto)
|
|
23
|
+
|
|
24
|
+
DO NOT flag:
|
|
25
|
+
- Functions that are tested indirectly through integration tests — indirect coverage counts
|
|
26
|
+
- Internal helper functions that are not exported from the module
|
|
27
|
+
- Missing tests for simple getter functions or plain constants
|
|
28
|
+
- Missing edge case tests unless the function handles user input or security-sensitive data
|
|
29
|
+
|
|
30
|
+
CRITICAL ANALYSIS INSTRUCTION: Before flagging any function as untested, carefully read ALL test files in the project. Test files do not need to import functions directly to provide coverage — integration tests, end-to-end tests, and indirect calls all count as coverage. Only flag a function as untested if you are confident it is not exercised by any test in any test file.
|
|
31
|
+
|
|
32
|
+
SEVERITY for test coverage:
|
|
33
|
+
- No tests at all for an exported function = HIGH
|
|
34
|
+
- Test file exists but contains only trivial or meaningless assertions = LOW
|
|
35
|
+
- Missing error path tests for security-sensitive functions = MEDIUM
|
|
36
|
+
- Missing edge case tests for non-sensitive functions = LOW
|
|
37
|
+
|
|
38
|
+
Files to audit:
|
|
39
|
+
${filesSummary}
|
|
40
|
+
|
|
41
|
+
Respond with ONLY valid JSON in this exact shape, no markdown, no explanation:
|
|
42
|
+
{"findings":[{"severity":"critical|high|medium|low","title":"string","file":"string","description":"string","fix":"string"}]}
|
|
43
|
+
|
|
44
|
+
If no issues found, return: {"findings":[]}`;
|
|
45
|
+
|
|
46
|
+
const message = await client.messages.create({
|
|
47
|
+
model: 'claude-sonnet-4-20250514',
|
|
48
|
+
max_tokens: 2000,
|
|
49
|
+
messages: [{ role: 'user', content: prompt }],
|
|
50
|
+
});
|
|
51
|
+
|
|
52
|
+
return parseJsonResponse(message.content[0].text);
|
|
53
|
+
}
|
package/src/parseJson.js
ADDED
|
@@ -0,0 +1,24 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Extract and parse the first JSON object from an LLM response.
|
|
3
|
+
* Handles: raw JSON, ```json fences, prose before/after JSON.
|
|
4
|
+
*/
|
|
5
|
+
export function parseJsonResponse(text) {
|
|
6
|
+
const trimmed = text.trim();
|
|
7
|
+
|
|
8
|
+
// Strip markdown fences first
|
|
9
|
+
const defenced = trimmed.replace(/^```(?:json)?\n?/, '').replace(/\n?```$/, '').trim();
|
|
10
|
+
|
|
11
|
+
// Try direct parse
|
|
12
|
+
try {
|
|
13
|
+
return JSON.parse(defenced);
|
|
14
|
+
} catch (_) {}
|
|
15
|
+
|
|
16
|
+
// Extract the first {...} block from anywhere in the text
|
|
17
|
+
const start = defenced.indexOf('{');
|
|
18
|
+
const end = defenced.lastIndexOf('}');
|
|
19
|
+
if (start !== -1 && end > start) {
|
|
20
|
+
return JSON.parse(defenced.slice(start, end + 1));
|
|
21
|
+
}
|
|
22
|
+
|
|
23
|
+
throw new SyntaxError(`No JSON object found in response: ${trimmed.slice(0, 120)}`);
|
|
24
|
+
}
|
package/src/reporter.js
ADDED
|
@@ -0,0 +1,240 @@
|
|
|
1
|
+
import { writeFile } from 'fs/promises';
|
|
2
|
+
import { join } from 'path';
|
|
3
|
+
|
|
4
|
+
const SEVERITY_EMOJI = { critical: '🔴', high: '🟠', medium: '🟡', low: '🟢' };
|
|
5
|
+
const SEVERITY_LABEL = { critical: 'CRITICAL', high: 'HIGH', medium: 'MEDIUM', low: 'LOW' };
|
|
6
|
+
const SEVERITY_RANK = { critical: 4, high: 3, medium: 2, low: 1 };
|
|
7
|
+
const STOP_WORDS = new Set(['the','a','an','in','for','of','and','or','with','without','no','missing','is','are','not','to','on','at','by','be','as','it','its']);
|
|
8
|
+
const SECURITY_KEYWORDS = ['eval','sql injection','hardcoded','credentials','api key','password','injection','xss','sanitize','sanitization'];
|
|
9
|
+
|
|
10
|
+
function significantWords(title) {
|
|
11
|
+
return title.toLowerCase().replace(/[^a-z0-9 ]/g, ' ').split(/\s+/).filter(w => w.length > 1 && !STOP_WORDS.has(w));
|
|
12
|
+
}
|
|
13
|
+
|
|
14
|
+
function titlesSimilar(a, b) {
|
|
15
|
+
const wordsA = new Set(significantWords(a));
|
|
16
|
+
const wordsB = significantWords(b);
|
|
17
|
+
let shared = 0;
|
|
18
|
+
for (const w of wordsB) if (wordsA.has(w)) shared++;
|
|
19
|
+
return shared >= 3;
|
|
20
|
+
}
|
|
21
|
+
|
|
22
|
+
function mentionsSecurityKeyword(finding) {
|
|
23
|
+
const haystack = `${finding.title} ${finding.description}`.toLowerCase();
|
|
24
|
+
return SECURITY_KEYWORDS.some(kw => haystack.includes(kw));
|
|
25
|
+
}
|
|
26
|
+
|
|
27
|
+
function suppressedByAuthority(candidate, allFindings) {
|
|
28
|
+
// Docs/Quality findings that mention security keywords are suppressed when Security owns that file
|
|
29
|
+
if (candidate.agentName === 'docs' || candidate.agentName === 'quality') {
|
|
30
|
+
if (mentionsSecurityKeyword(candidate)) {
|
|
31
|
+
const fileKey = (candidate.file || '').toLowerCase();
|
|
32
|
+
const securityOwnsFile = allFindings.some(
|
|
33
|
+
f => f.agentName === 'security' && (f.file || '').toLowerCase() === fileKey
|
|
34
|
+
);
|
|
35
|
+
if (securityOwnsFile) return true;
|
|
36
|
+
}
|
|
37
|
+
}
|
|
38
|
+
|
|
39
|
+
// Quality findings about a package are suppressed when Dependencies owns that package
|
|
40
|
+
if (candidate.agentName === 'quality') {
|
|
41
|
+
const candidateFile = (candidate.file || '').toLowerCase();
|
|
42
|
+
const depOwnsPackage = allFindings.some(f => {
|
|
43
|
+
if (f.agentName !== 'dependencies') return false;
|
|
44
|
+
const depFile = (f.file || '').toLowerCase();
|
|
45
|
+
// Match if they reference the same package name (same file token, e.g. "lodash")
|
|
46
|
+
return depFile && candidateFile && (depFile === candidateFile || candidateFile.includes(depFile) || depFile.includes(candidateFile));
|
|
47
|
+
});
|
|
48
|
+
if (depOwnsPackage) return true;
|
|
49
|
+
}
|
|
50
|
+
|
|
51
|
+
return false;
|
|
52
|
+
}
|
|
53
|
+
|
|
54
|
+
export function deduplicateFindings(allFindings) {
|
|
55
|
+
// Authority suppression pass — run before similarity grouping
|
|
56
|
+
const afterSuppression = allFindings.filter(f => !suppressedByAuthority(f, allFindings));
|
|
57
|
+
|
|
58
|
+
const byFile = new Map();
|
|
59
|
+
for (const f of afterSuppression) {
|
|
60
|
+
const key = (f.file || '').toLowerCase();
|
|
61
|
+
if (!byFile.has(key)) byFile.set(key, []);
|
|
62
|
+
byFile.get(key).push(f);
|
|
63
|
+
}
|
|
64
|
+
|
|
65
|
+
const kept = [];
|
|
66
|
+
for (const group of byFile.values()) {
|
|
67
|
+
const merged = [];
|
|
68
|
+
for (const candidate of group) {
|
|
69
|
+
const dupIdx = merged.findIndex(m => titlesSimilar(m.title, candidate.title));
|
|
70
|
+
if (dupIdx === -1) {
|
|
71
|
+
merged.push(candidate);
|
|
72
|
+
} else {
|
|
73
|
+
const existing = merged[dupIdx];
|
|
74
|
+
const rankCand = SEVERITY_RANK[(candidate.severity || 'low').toLowerCase()] || 1;
|
|
75
|
+
const rankExist = SEVERITY_RANK[(existing.severity || 'low').toLowerCase()] || 1;
|
|
76
|
+
if (rankCand > rankExist || (rankCand === rankExist && (candidate.description || '').length > (existing.description || '').length)) {
|
|
77
|
+
merged[dupIdx] = candidate;
|
|
78
|
+
}
|
|
79
|
+
}
|
|
80
|
+
}
|
|
81
|
+
kept.push(...merged);
|
|
82
|
+
}
|
|
83
|
+
return kept;
|
|
84
|
+
}
|
|
85
|
+
|
|
86
|
+
function countBySeverity(findings) {
|
|
87
|
+
const counts = { critical: 0, high: 0, medium: 0, low: 0 };
|
|
88
|
+
for (const f of findings) {
|
|
89
|
+
const sev = (f.severity || 'low').toLowerCase();
|
|
90
|
+
if (sev in counts) counts[sev]++;
|
|
91
|
+
}
|
|
92
|
+
return counts;
|
|
93
|
+
}
|
|
94
|
+
|
|
95
|
+
function calcRiskScore(findings) {
|
|
96
|
+
const score = Math.max(0,
|
|
97
|
+
100
|
|
98
|
+
- countBySeverity(findings).critical * 25
|
|
99
|
+
- countBySeverity(findings).high * 15
|
|
100
|
+
- countBySeverity(findings).medium * 8
|
|
101
|
+
- countBySeverity(findings).low * 3
|
|
102
|
+
);
|
|
103
|
+
if (score >= 90) return { score, emoji: '🟢', label: 'Low Risk' };
|
|
104
|
+
if (score >= 70) return { score, emoji: '🟡', label: 'Moderate Risk' };
|
|
105
|
+
if (score >= 40) return { score, emoji: '🟠', label: 'Elevated Risk' };
|
|
106
|
+
return { score, emoji: '🔴', label: 'High Risk' };
|
|
107
|
+
}
|
|
108
|
+
|
|
109
|
+
function calcQualityScore(findings) {
|
|
110
|
+
const score = Math.max(0,
|
|
111
|
+
100
|
|
112
|
+
- countBySeverity(findings).critical * 20
|
|
113
|
+
- countBySeverity(findings).high * 10
|
|
114
|
+
- countBySeverity(findings).medium * 5
|
|
115
|
+
- countBySeverity(findings).low * 2
|
|
116
|
+
);
|
|
117
|
+
if (score >= 90) return { score, emoji: '🟢', label: 'Excellent' };
|
|
118
|
+
if (score >= 75) return { score, emoji: '🟡', label: 'Good' };
|
|
119
|
+
if (score >= 50) return { score, emoji: '🟠', label: 'Needs Work' };
|
|
120
|
+
return { score, emoji: '🔴', label: 'Needs Significant Work' };
|
|
121
|
+
}
|
|
122
|
+
|
|
123
|
+
function renderFindings(findings) {
|
|
124
|
+
if (!findings || findings.length === 0) return '_No issues found._\n';
|
|
125
|
+
return findings.map(f => {
|
|
126
|
+
const sev = (f.severity || 'low').toLowerCase();
|
|
127
|
+
const emoji = SEVERITY_EMOJI[sev] || '⚪';
|
|
128
|
+
const label = SEVERITY_LABEL[sev] || sev.toUpperCase();
|
|
129
|
+
return [
|
|
130
|
+
`#### ${emoji} [${label}] ${f.title}`,
|
|
131
|
+
`- **File:** \`${f.file}\``,
|
|
132
|
+
`- **Issue:** ${f.description}`,
|
|
133
|
+
`- **Fix:** ${f.fix}`,
|
|
134
|
+
].join('\n');
|
|
135
|
+
}).join('\n\n');
|
|
136
|
+
}
|
|
137
|
+
|
|
138
|
+
export async function generateReport(allResults, projectName, outputDir = '.') {
|
|
139
|
+
const sections = {
|
|
140
|
+
security: { label: 'Security', findings: (allResults.security || { findings: [] }).findings },
|
|
141
|
+
performance: { label: 'Performance', findings: (allResults.performance || { findings: [] }).findings },
|
|
142
|
+
tests: { label: 'Test Coverage', findings: (allResults.tests || { findings: [] }).findings },
|
|
143
|
+
docs: { label: 'Documentation', findings: (allResults.docs || { findings: [] }).findings },
|
|
144
|
+
dependencies: { label: 'Dependencies', findings: (allResults.dependencies || { findings: [] }).findings },
|
|
145
|
+
quality: { label: 'Code Quality', findings: (allResults.quality || { findings: [] }).findings },
|
|
146
|
+
};
|
|
147
|
+
|
|
148
|
+
// Tag every finding with its agent source
|
|
149
|
+
for (const [agentName, { findings }] of Object.entries(sections)) {
|
|
150
|
+
for (const f of findings) f.agentName = agentName;
|
|
151
|
+
}
|
|
152
|
+
|
|
153
|
+
const combined = Object.values(sections).flatMap(s => s.findings);
|
|
154
|
+
const deduped = deduplicateFindings(combined);
|
|
155
|
+
const duplicatesRemoved = combined.length - deduped.length;
|
|
156
|
+
|
|
157
|
+
// Re-partition deduplicated findings back into their sections
|
|
158
|
+
for (const s of Object.values(sections)) s.findings = [];
|
|
159
|
+
for (const f of deduped) {
|
|
160
|
+
if (sections[f.agentName]) sections[f.agentName].findings.push(f);
|
|
161
|
+
}
|
|
162
|
+
|
|
163
|
+
const riskFindings = [...sections.security.findings, ...sections.dependencies.findings];
|
|
164
|
+
const qualityFindings = [...sections.tests.findings, ...sections.docs.findings, ...sections.performance.findings, ...sections.quality.findings];
|
|
165
|
+
|
|
166
|
+
const totals = countBySeverity(deduped);
|
|
167
|
+
const risk = calcRiskScore(riskFindings);
|
|
168
|
+
const qual = calcQualityScore(qualityFindings);
|
|
169
|
+
const date = new Date().toISOString().split('T')[0];
|
|
170
|
+
|
|
171
|
+
const report = `# Codebase Audit Report: ${projectName}
|
|
172
|
+
|
|
173
|
+
**Date:** ${date}
|
|
174
|
+
**Total Findings:** ${deduped.length}
|
|
175
|
+
**Duplicates Removed:** ${duplicatesRemoved} findings merged
|
|
176
|
+
|
|
177
|
+
---
|
|
178
|
+
|
|
179
|
+
## Executive Summary
|
|
180
|
+
|
|
181
|
+
| Severity | Count |
|
|
182
|
+
|----------|-------|
|
|
183
|
+
| ${SEVERITY_EMOJI.critical} Critical | ${totals.critical} |
|
|
184
|
+
| ${SEVERITY_EMOJI.high} High | ${totals.high} |
|
|
185
|
+
| ${SEVERITY_EMOJI.medium} Medium | ${totals.medium} |
|
|
186
|
+
| ${SEVERITY_EMOJI.low} Low | ${totals.low} |
|
|
187
|
+
|
|
188
|
+
---
|
|
189
|
+
|
|
190
|
+
## Scores
|
|
191
|
+
|
|
192
|
+
| | Score | Grade |
|
|
193
|
+
|---|---|---|
|
|
194
|
+
| 🛡️ Risk | ${risk.score}/100 | ${risk.emoji} ${risk.label} |
|
|
195
|
+
| 🔧 Quality | ${qual.score}/100 | ${qual.emoji} ${qual.label} |
|
|
196
|
+
|
|
197
|
+
**Risk** measures security vulnerabilities and dependency exposure.
|
|
198
|
+
**Quality** measures test coverage, documentation, and code maintainability.
|
|
199
|
+
|
|
200
|
+
---
|
|
201
|
+
|
|
202
|
+
## Security
|
|
203
|
+
|
|
204
|
+
${renderFindings(sections.security.findings)}
|
|
205
|
+
|
|
206
|
+
---
|
|
207
|
+
|
|
208
|
+
## Performance
|
|
209
|
+
|
|
210
|
+
${renderFindings(sections.performance.findings)}
|
|
211
|
+
|
|
212
|
+
---
|
|
213
|
+
|
|
214
|
+
## Test Coverage
|
|
215
|
+
|
|
216
|
+
${renderFindings(sections.tests.findings)}
|
|
217
|
+
|
|
218
|
+
---
|
|
219
|
+
|
|
220
|
+
## Documentation
|
|
221
|
+
|
|
222
|
+
${renderFindings(sections.docs.findings)}
|
|
223
|
+
|
|
224
|
+
---
|
|
225
|
+
|
|
226
|
+
## Dependencies
|
|
227
|
+
|
|
228
|
+
${renderFindings(sections.dependencies.findings)}
|
|
229
|
+
|
|
230
|
+
---
|
|
231
|
+
|
|
232
|
+
## Code Quality
|
|
233
|
+
|
|
234
|
+
${renderFindings(sections.quality.findings)}
|
|
235
|
+
`;
|
|
236
|
+
|
|
237
|
+
const outputPath = join(outputDir, 'audit-report.md');
|
|
238
|
+
await writeFile(outputPath, report, 'utf-8');
|
|
239
|
+
return { outputPath, totals, totalFindings: deduped.length };
|
|
240
|
+
}
|
package/src/scanner.js
ADDED
|
@@ -0,0 +1,46 @@
|
|
|
1
|
+
import { readdir, readFile } from 'fs/promises';
|
|
2
|
+
import { join, extname, resolve } from 'path';
|
|
3
|
+
|
|
4
|
+
const ALLOWED_EXTENSIONS = new Set(['.js', '.ts', '.jsx', '.tsx', '.py', '.css', '.html']);
|
|
5
|
+
const SKIP_DIRS = new Set(['node_modules', '.git', 'dist', 'build', '.next', 'coverage', '__pycache__']);
|
|
6
|
+
|
|
7
|
+
async function collectFiles(dir, results = []) {
|
|
8
|
+
let entries;
|
|
9
|
+
try {
|
|
10
|
+
entries = await readdir(dir, { withFileTypes: true });
|
|
11
|
+
} catch {
|
|
12
|
+
return results;
|
|
13
|
+
}
|
|
14
|
+
|
|
15
|
+
for (const entry of entries) {
|
|
16
|
+
const fullPath = join(dir, entry.name);
|
|
17
|
+
if (entry.isDirectory()) {
|
|
18
|
+
if (!SKIP_DIRS.has(entry.name)) {
|
|
19
|
+
await collectFiles(fullPath, results);
|
|
20
|
+
}
|
|
21
|
+
} else if (entry.isFile() && ALLOWED_EXTENSIONS.has(extname(entry.name))) {
|
|
22
|
+
try {
|
|
23
|
+
const content = await readFile(fullPath, 'utf-8');
|
|
24
|
+
results.push({ path: fullPath, content });
|
|
25
|
+
} catch {
|
|
26
|
+
// skip unreadable files
|
|
27
|
+
}
|
|
28
|
+
}
|
|
29
|
+
}
|
|
30
|
+
return results;
|
|
31
|
+
}
|
|
32
|
+
|
|
33
|
+
export async function scanDirectory(targetPath) {
|
|
34
|
+
const absPath = resolve(targetPath);
|
|
35
|
+
const files = await collectFiles(absPath);
|
|
36
|
+
|
|
37
|
+
let packageJson = null;
|
|
38
|
+
try {
|
|
39
|
+
const pkgContent = await readFile(join(absPath, 'package.json'), 'utf-8');
|
|
40
|
+
packageJson = JSON.parse(pkgContent);
|
|
41
|
+
} catch {
|
|
42
|
+
// no package.json found
|
|
43
|
+
}
|
|
44
|
+
|
|
45
|
+
return { files, packageJson };
|
|
46
|
+
}
|
package/{
ADDED
|
File without changes
|