@oculum/scanner 1.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/formatters/cli-terminal.d.ts +27 -0
- package/dist/formatters/cli-terminal.d.ts.map +1 -0
- package/dist/formatters/cli-terminal.js +412 -0
- package/dist/formatters/cli-terminal.js.map +1 -0
- package/dist/formatters/github-comment.d.ts +41 -0
- package/dist/formatters/github-comment.d.ts.map +1 -0
- package/dist/formatters/github-comment.js +306 -0
- package/dist/formatters/github-comment.js.map +1 -0
- package/dist/formatters/grouping.d.ts +52 -0
- package/dist/formatters/grouping.d.ts.map +1 -0
- package/dist/formatters/grouping.js +152 -0
- package/dist/formatters/grouping.js.map +1 -0
- package/dist/formatters/index.d.ts +9 -0
- package/dist/formatters/index.d.ts.map +1 -0
- package/dist/formatters/index.js +35 -0
- package/dist/formatters/index.js.map +1 -0
- package/dist/formatters/vscode-diagnostic.d.ts +103 -0
- package/dist/formatters/vscode-diagnostic.d.ts.map +1 -0
- package/dist/formatters/vscode-diagnostic.js +151 -0
- package/dist/formatters/vscode-diagnostic.js.map +1 -0
- package/dist/index.d.ts +52 -0
- package/dist/index.d.ts.map +1 -0
- package/dist/index.js +648 -0
- package/dist/index.js.map +1 -0
- package/dist/layer1/comments.d.ts +8 -0
- package/dist/layer1/comments.d.ts.map +1 -0
- package/dist/layer1/comments.js +203 -0
- package/dist/layer1/comments.js.map +1 -0
- package/dist/layer1/config-audit.d.ts +8 -0
- package/dist/layer1/config-audit.d.ts.map +1 -0
- package/dist/layer1/config-audit.js +252 -0
- package/dist/layer1/config-audit.js.map +1 -0
- package/dist/layer1/entropy.d.ts +8 -0
- package/dist/layer1/entropy.d.ts.map +1 -0
- package/dist/layer1/entropy.js +500 -0
- package/dist/layer1/entropy.js.map +1 -0
- package/dist/layer1/file-flags.d.ts +7 -0
- package/dist/layer1/file-flags.d.ts.map +1 -0
- package/dist/layer1/file-flags.js +112 -0
- package/dist/layer1/file-flags.js.map +1 -0
- package/dist/layer1/index.d.ts +36 -0
- package/dist/layer1/index.d.ts.map +1 -0
- package/dist/layer1/index.js +132 -0
- package/dist/layer1/index.js.map +1 -0
- package/dist/layer1/patterns.d.ts +8 -0
- package/dist/layer1/patterns.d.ts.map +1 -0
- package/dist/layer1/patterns.js +482 -0
- package/dist/layer1/patterns.js.map +1 -0
- package/dist/layer1/urls.d.ts +8 -0
- package/dist/layer1/urls.d.ts.map +1 -0
- package/dist/layer1/urls.js +296 -0
- package/dist/layer1/urls.js.map +1 -0
- package/dist/layer1/weak-crypto.d.ts +7 -0
- package/dist/layer1/weak-crypto.d.ts.map +1 -0
- package/dist/layer1/weak-crypto.js +291 -0
- package/dist/layer1/weak-crypto.js.map +1 -0
- package/dist/layer2/ai-agent-tools.d.ts +19 -0
- package/dist/layer2/ai-agent-tools.d.ts.map +1 -0
- package/dist/layer2/ai-agent-tools.js +528 -0
- package/dist/layer2/ai-agent-tools.js.map +1 -0
- package/dist/layer2/ai-endpoint-protection.d.ts +36 -0
- package/dist/layer2/ai-endpoint-protection.d.ts.map +1 -0
- package/dist/layer2/ai-endpoint-protection.js +332 -0
- package/dist/layer2/ai-endpoint-protection.js.map +1 -0
- package/dist/layer2/ai-execution-sinks.d.ts +18 -0
- package/dist/layer2/ai-execution-sinks.d.ts.map +1 -0
- package/dist/layer2/ai-execution-sinks.js +496 -0
- package/dist/layer2/ai-execution-sinks.js.map +1 -0
- package/dist/layer2/ai-fingerprinting.d.ts +7 -0
- package/dist/layer2/ai-fingerprinting.d.ts.map +1 -0
- package/dist/layer2/ai-fingerprinting.js +654 -0
- package/dist/layer2/ai-fingerprinting.js.map +1 -0
- package/dist/layer2/ai-prompt-hygiene.d.ts +19 -0
- package/dist/layer2/ai-prompt-hygiene.d.ts.map +1 -0
- package/dist/layer2/ai-prompt-hygiene.js +356 -0
- package/dist/layer2/ai-prompt-hygiene.js.map +1 -0
- package/dist/layer2/ai-rag-safety.d.ts +21 -0
- package/dist/layer2/ai-rag-safety.d.ts.map +1 -0
- package/dist/layer2/ai-rag-safety.js +459 -0
- package/dist/layer2/ai-rag-safety.js.map +1 -0
- package/dist/layer2/ai-schema-validation.d.ts +25 -0
- package/dist/layer2/ai-schema-validation.d.ts.map +1 -0
- package/dist/layer2/ai-schema-validation.js +375 -0
- package/dist/layer2/ai-schema-validation.js.map +1 -0
- package/dist/layer2/auth-antipatterns.d.ts +20 -0
- package/dist/layer2/auth-antipatterns.d.ts.map +1 -0
- package/dist/layer2/auth-antipatterns.js +333 -0
- package/dist/layer2/auth-antipatterns.js.map +1 -0
- package/dist/layer2/byok-patterns.d.ts +12 -0
- package/dist/layer2/byok-patterns.d.ts.map +1 -0
- package/dist/layer2/byok-patterns.js +299 -0
- package/dist/layer2/byok-patterns.js.map +1 -0
- package/dist/layer2/dangerous-functions.d.ts +7 -0
- package/dist/layer2/dangerous-functions.d.ts.map +1 -0
- package/dist/layer2/dangerous-functions.js +1375 -0
- package/dist/layer2/dangerous-functions.js.map +1 -0
- package/dist/layer2/data-exposure.d.ts +16 -0
- package/dist/layer2/data-exposure.d.ts.map +1 -0
- package/dist/layer2/data-exposure.js +279 -0
- package/dist/layer2/data-exposure.js.map +1 -0
- package/dist/layer2/framework-checks.d.ts +7 -0
- package/dist/layer2/framework-checks.d.ts.map +1 -0
- package/dist/layer2/framework-checks.js +388 -0
- package/dist/layer2/framework-checks.js.map +1 -0
- package/dist/layer2/index.d.ts +58 -0
- package/dist/layer2/index.d.ts.map +1 -0
- package/dist/layer2/index.js +380 -0
- package/dist/layer2/index.js.map +1 -0
- package/dist/layer2/logic-gates.d.ts +7 -0
- package/dist/layer2/logic-gates.d.ts.map +1 -0
- package/dist/layer2/logic-gates.js +182 -0
- package/dist/layer2/logic-gates.js.map +1 -0
- package/dist/layer2/risky-imports.d.ts +7 -0
- package/dist/layer2/risky-imports.d.ts.map +1 -0
- package/dist/layer2/risky-imports.js +161 -0
- package/dist/layer2/risky-imports.js.map +1 -0
- package/dist/layer2/variables.d.ts +8 -0
- package/dist/layer2/variables.d.ts.map +1 -0
- package/dist/layer2/variables.js +152 -0
- package/dist/layer2/variables.js.map +1 -0
- package/dist/layer3/anthropic.d.ts +83 -0
- package/dist/layer3/anthropic.d.ts.map +1 -0
- package/dist/layer3/anthropic.js +1745 -0
- package/dist/layer3/anthropic.js.map +1 -0
- package/dist/layer3/index.d.ts +24 -0
- package/dist/layer3/index.d.ts.map +1 -0
- package/dist/layer3/index.js +119 -0
- package/dist/layer3/index.js.map +1 -0
- package/dist/layer3/openai.d.ts +25 -0
- package/dist/layer3/openai.d.ts.map +1 -0
- package/dist/layer3/openai.js +238 -0
- package/dist/layer3/openai.js.map +1 -0
- package/dist/layer3/package-check.d.ts +63 -0
- package/dist/layer3/package-check.d.ts.map +1 -0
- package/dist/layer3/package-check.js +508 -0
- package/dist/layer3/package-check.js.map +1 -0
- package/dist/modes/incremental.d.ts +66 -0
- package/dist/modes/incremental.d.ts.map +1 -0
- package/dist/modes/incremental.js +200 -0
- package/dist/modes/incremental.js.map +1 -0
- package/dist/tiers.d.ts +125 -0
- package/dist/tiers.d.ts.map +1 -0
- package/dist/tiers.js +234 -0
- package/dist/tiers.js.map +1 -0
- package/dist/types.d.ts +175 -0
- package/dist/types.d.ts.map +1 -0
- package/dist/types.js +50 -0
- package/dist/types.js.map +1 -0
- package/dist/utils/auth-helper-detector.d.ts +56 -0
- package/dist/utils/auth-helper-detector.d.ts.map +1 -0
- package/dist/utils/auth-helper-detector.js +360 -0
- package/dist/utils/auth-helper-detector.js.map +1 -0
- package/dist/utils/context-helpers.d.ts +96 -0
- package/dist/utils/context-helpers.d.ts.map +1 -0
- package/dist/utils/context-helpers.js +493 -0
- package/dist/utils/context-helpers.js.map +1 -0
- package/dist/utils/diff-detector.d.ts +53 -0
- package/dist/utils/diff-detector.d.ts.map +1 -0
- package/dist/utils/diff-detector.js +104 -0
- package/dist/utils/diff-detector.js.map +1 -0
- package/dist/utils/diff-parser.d.ts +80 -0
- package/dist/utils/diff-parser.d.ts.map +1 -0
- package/dist/utils/diff-parser.js +202 -0
- package/dist/utils/diff-parser.js.map +1 -0
- package/dist/utils/imported-auth-detector.d.ts +37 -0
- package/dist/utils/imported-auth-detector.d.ts.map +1 -0
- package/dist/utils/imported-auth-detector.js +251 -0
- package/dist/utils/imported-auth-detector.js.map +1 -0
- package/dist/utils/middleware-detector.d.ts +55 -0
- package/dist/utils/middleware-detector.d.ts.map +1 -0
- package/dist/utils/middleware-detector.js +260 -0
- package/dist/utils/middleware-detector.js.map +1 -0
- package/dist/utils/oauth-flow-detector.d.ts +41 -0
- package/dist/utils/oauth-flow-detector.d.ts.map +1 -0
- package/dist/utils/oauth-flow-detector.js +202 -0
- package/dist/utils/oauth-flow-detector.js.map +1 -0
- package/dist/utils/path-exclusions.d.ts +55 -0
- package/dist/utils/path-exclusions.d.ts.map +1 -0
- package/dist/utils/path-exclusions.js +222 -0
- package/dist/utils/path-exclusions.js.map +1 -0
- package/dist/utils/project-context-builder.d.ts +119 -0
- package/dist/utils/project-context-builder.d.ts.map +1 -0
- package/dist/utils/project-context-builder.js +534 -0
- package/dist/utils/project-context-builder.js.map +1 -0
- package/dist/utils/registry-clients.d.ts +93 -0
- package/dist/utils/registry-clients.d.ts.map +1 -0
- package/dist/utils/registry-clients.js +273 -0
- package/dist/utils/registry-clients.js.map +1 -0
- package/dist/utils/trpc-analyzer.d.ts +78 -0
- package/dist/utils/trpc-analyzer.d.ts.map +1 -0
- package/dist/utils/trpc-analyzer.js +297 -0
- package/dist/utils/trpc-analyzer.js.map +1 -0
- package/package.json +45 -0
- package/src/__tests__/benchmark/fixtures/false-positives.ts +227 -0
- package/src/__tests__/benchmark/fixtures/index.ts +68 -0
- package/src/__tests__/benchmark/fixtures/layer1/config-audit.ts +364 -0
- package/src/__tests__/benchmark/fixtures/layer1/hardcoded-secrets.ts +173 -0
- package/src/__tests__/benchmark/fixtures/layer1/high-entropy.ts +234 -0
- package/src/__tests__/benchmark/fixtures/layer1/index.ts +31 -0
- package/src/__tests__/benchmark/fixtures/layer1/sensitive-urls.ts +90 -0
- package/src/__tests__/benchmark/fixtures/layer1/weak-crypto.ts +197 -0
- package/src/__tests__/benchmark/fixtures/layer2/ai-agent-tools.ts +170 -0
- package/src/__tests__/benchmark/fixtures/layer2/ai-endpoint-protection.ts +418 -0
- package/src/__tests__/benchmark/fixtures/layer2/ai-execution-sinks.ts +189 -0
- package/src/__tests__/benchmark/fixtures/layer2/ai-fingerprinting.ts +316 -0
- package/src/__tests__/benchmark/fixtures/layer2/ai-prompt-hygiene.ts +178 -0
- package/src/__tests__/benchmark/fixtures/layer2/ai-rag-safety.ts +184 -0
- package/src/__tests__/benchmark/fixtures/layer2/ai-schema-validation.ts +434 -0
- package/src/__tests__/benchmark/fixtures/layer2/auth-antipatterns.ts +159 -0
- package/src/__tests__/benchmark/fixtures/layer2/byok-patterns.ts +112 -0
- package/src/__tests__/benchmark/fixtures/layer2/dangerous-functions.ts +246 -0
- package/src/__tests__/benchmark/fixtures/layer2/data-exposure.ts +168 -0
- package/src/__tests__/benchmark/fixtures/layer2/framework-checks.ts +346 -0
- package/src/__tests__/benchmark/fixtures/layer2/index.ts +67 -0
- package/src/__tests__/benchmark/fixtures/layer2/injection-vulnerabilities.ts +239 -0
- package/src/__tests__/benchmark/fixtures/layer2/logic-gates.ts +246 -0
- package/src/__tests__/benchmark/fixtures/layer2/risky-imports.ts +231 -0
- package/src/__tests__/benchmark/fixtures/layer2/variables.ts +167 -0
- package/src/__tests__/benchmark/index.ts +29 -0
- package/src/__tests__/benchmark/run-benchmark.ts +144 -0
- package/src/__tests__/benchmark/run-depth-validation.ts +206 -0
- package/src/__tests__/benchmark/run-real-world-test.ts +243 -0
- package/src/__tests__/benchmark/security-benchmark-script.ts +1737 -0
- package/src/__tests__/benchmark/tier-integration-script.ts +177 -0
- package/src/__tests__/benchmark/types.ts +144 -0
- package/src/__tests__/benchmark/utils/test-runner.ts +475 -0
- package/src/__tests__/regression/known-false-positives.test.ts +467 -0
- package/src/__tests__/snapshots/__snapshots__/scan-depth.test.ts.snap +178 -0
- package/src/__tests__/snapshots/scan-depth.test.ts +258 -0
- package/src/__tests__/validation/analyze-results.ts +542 -0
- package/src/__tests__/validation/extract-for-triage.ts +146 -0
- package/src/__tests__/validation/fp-deep-analysis.ts +327 -0
- package/src/__tests__/validation/run-validation.ts +364 -0
- package/src/__tests__/validation/triage-template.md +132 -0
- package/src/formatters/cli-terminal.ts +446 -0
- package/src/formatters/github-comment.ts +382 -0
- package/src/formatters/grouping.ts +190 -0
- package/src/formatters/index.ts +47 -0
- package/src/formatters/vscode-diagnostic.ts +243 -0
- package/src/index.ts +823 -0
- package/src/layer1/comments.ts +218 -0
- package/src/layer1/config-audit.ts +289 -0
- package/src/layer1/entropy.ts +583 -0
- package/src/layer1/file-flags.ts +127 -0
- package/src/layer1/index.ts +181 -0
- package/src/layer1/patterns.ts +516 -0
- package/src/layer1/urls.ts +334 -0
- package/src/layer1/weak-crypto.ts +328 -0
- package/src/layer2/ai-agent-tools.ts +601 -0
- package/src/layer2/ai-endpoint-protection.ts +387 -0
- package/src/layer2/ai-execution-sinks.ts +580 -0
- package/src/layer2/ai-fingerprinting.ts +758 -0
- package/src/layer2/ai-prompt-hygiene.ts +411 -0
- package/src/layer2/ai-rag-safety.ts +511 -0
- package/src/layer2/ai-schema-validation.ts +421 -0
- package/src/layer2/auth-antipatterns.ts +394 -0
- package/src/layer2/byok-patterns.ts +336 -0
- package/src/layer2/dangerous-functions.ts +1563 -0
- package/src/layer2/data-exposure.ts +315 -0
- package/src/layer2/framework-checks.ts +433 -0
- package/src/layer2/index.ts +473 -0
- package/src/layer2/logic-gates.ts +206 -0
- package/src/layer2/risky-imports.ts +186 -0
- package/src/layer2/variables.ts +166 -0
- package/src/layer3/anthropic.ts +2030 -0
- package/src/layer3/index.ts +130 -0
- package/src/layer3/package-check.ts +604 -0
- package/src/modes/incremental.ts +293 -0
- package/src/tiers.ts +318 -0
- package/src/types.ts +284 -0
- package/src/utils/auth-helper-detector.ts +443 -0
- package/src/utils/context-helpers.ts +535 -0
- package/src/utils/diff-detector.ts +135 -0
- package/src/utils/diff-parser.ts +272 -0
- package/src/utils/imported-auth-detector.ts +320 -0
- package/src/utils/middleware-detector.ts +333 -0
- package/src/utils/oauth-flow-detector.ts +246 -0
- package/src/utils/path-exclusions.ts +266 -0
- package/src/utils/project-context-builder.ts +707 -0
- package/src/utils/registry-clients.ts +351 -0
- package/src/utils/trpc-analyzer.ts +382 -0
|
@@ -0,0 +1,327 @@
|
|
|
1
|
+
#!/usr/bin/env npx tsx
|
|
2
|
+
/**
|
|
3
|
+
* M7 Deep FP Analysis
|
|
4
|
+
*
|
|
5
|
+
* Analyzes the gap between cheap and validated scans to identify
|
|
6
|
+
* exactly which patterns are causing false positives and where
|
|
7
|
+
* to focus tuning efforts.
|
|
8
|
+
*/
|
|
9
|
+
|
|
10
|
+
import * as fs from 'fs'
|
|
11
|
+
import * as path from 'path'
|
|
12
|
+
import type { ScanResult, Vulnerability } from '../../types'
|
|
13
|
+
|
|
14
|
+
const RESULTS_DIR = path.join(__dirname, '../../../validation-results')
|
|
15
|
+
const OUTPUT_PATH = path.join(__dirname, '../../../docs/FP_DEEP_ANALYSIS.md')
|
|
16
|
+
|
|
17
|
+
interface RejectedFinding {
|
|
18
|
+
repo: string
|
|
19
|
+
file: string
|
|
20
|
+
line: number
|
|
21
|
+
category: string
|
|
22
|
+
title: string
|
|
23
|
+
severity: string
|
|
24
|
+
lineContent: string
|
|
25
|
+
}
|
|
26
|
+
|
|
27
|
+
interface CategoryAnalysis {
|
|
28
|
+
category: string
|
|
29
|
+
cheapCount: number
|
|
30
|
+
validatedCount: number
|
|
31
|
+
rejectedCount: number
|
|
32
|
+
fpRate: number
|
|
33
|
+
byPathType: Record<string, RejectedFinding[]>
|
|
34
|
+
byTitle: Record<string, number>
|
|
35
|
+
sampleFindings: RejectedFinding[]
|
|
36
|
+
}
|
|
37
|
+
|
|
38
|
+
function classifyPath(filePath: string): string {
|
|
39
|
+
const lower = filePath.toLowerCase()
|
|
40
|
+
if (lower.includes('/examples/') || lower.includes('/example/')) return 'examples'
|
|
41
|
+
if (lower.includes('/__tests__/') || lower.includes('/test/') || lower.includes('.test.') || lower.includes('.spec.')) return 'tests'
|
|
42
|
+
if (lower.includes('/src/') || lower.includes('/lib/') || lower.includes('/libs/')) return 'library'
|
|
43
|
+
return 'other'
|
|
44
|
+
}
|
|
45
|
+
|
|
46
|
+
function loadScanResult(fileName: string): ScanResult | null {
|
|
47
|
+
const filePath = path.join(RESULTS_DIR, fileName)
|
|
48
|
+
if (!fs.existsSync(filePath)) return null
|
|
49
|
+
return JSON.parse(fs.readFileSync(filePath, 'utf-8'))
|
|
50
|
+
}
|
|
51
|
+
|
|
52
|
+
function findRejectedFindings(cheapFile: string, validatedFile: string): RejectedFinding[] {
|
|
53
|
+
const cheap = loadScanResult(cheapFile)
|
|
54
|
+
const validated = loadScanResult(validatedFile)
|
|
55
|
+
if (!cheap || !validated) return []
|
|
56
|
+
|
|
57
|
+
// Build set of validated finding keys
|
|
58
|
+
const validatedKeys = new Set(
|
|
59
|
+
validated.vulnerabilities.map(v => `${v.filePath}:${v.lineNumber}:${v.category}`)
|
|
60
|
+
)
|
|
61
|
+
|
|
62
|
+
// Find what was rejected (in cheap but not in validated)
|
|
63
|
+
const rejected: RejectedFinding[] = []
|
|
64
|
+
for (const v of cheap.vulnerabilities) {
|
|
65
|
+
if (!['critical', 'high'].includes(v.severity)) continue
|
|
66
|
+
const key = `${v.filePath}:${v.lineNumber}:${v.category}`
|
|
67
|
+
if (!validatedKeys.has(key)) {
|
|
68
|
+
rejected.push({
|
|
69
|
+
repo: cheap.repoName,
|
|
70
|
+
file: v.filePath,
|
|
71
|
+
line: v.lineNumber,
|
|
72
|
+
category: v.category,
|
|
73
|
+
title: v.title,
|
|
74
|
+
severity: v.severity,
|
|
75
|
+
lineContent: v.lineContent?.slice(0, 100) || '',
|
|
76
|
+
})
|
|
77
|
+
}
|
|
78
|
+
}
|
|
79
|
+
return rejected
|
|
80
|
+
}
|
|
81
|
+
|
|
82
|
+
function analyzeCategory(category: string, findings: RejectedFinding[], cheapCount: number, validatedCount: number): CategoryAnalysis {
|
|
83
|
+
const byPathType: Record<string, RejectedFinding[]> = {}
|
|
84
|
+
const byTitle: Record<string, number> = {}
|
|
85
|
+
|
|
86
|
+
for (const f of findings) {
|
|
87
|
+
const pathType = classifyPath(f.file)
|
|
88
|
+
if (!byPathType[pathType]) byPathType[pathType] = []
|
|
89
|
+
byPathType[pathType].push(f)
|
|
90
|
+
|
|
91
|
+
byTitle[f.title] = (byTitle[f.title] || 0) + 1
|
|
92
|
+
}
|
|
93
|
+
|
|
94
|
+
return {
|
|
95
|
+
category,
|
|
96
|
+
cheapCount,
|
|
97
|
+
validatedCount,
|
|
98
|
+
rejectedCount: findings.length,
|
|
99
|
+
fpRate: cheapCount > 0 ? Math.round((findings.length / cheapCount) * 100) : 0,
|
|
100
|
+
byPathType,
|
|
101
|
+
byTitle,
|
|
102
|
+
sampleFindings: findings.slice(0, 10),
|
|
103
|
+
}
|
|
104
|
+
}
|
|
105
|
+
|
|
106
|
+
function generateMarkdown(analyses: CategoryAnalysis[]): string {
|
|
107
|
+
const lines: string[] = []
|
|
108
|
+
|
|
109
|
+
lines.push('# M7: False Positive Deep Analysis')
|
|
110
|
+
lines.push('')
|
|
111
|
+
lines.push('> This document analyzes exactly where the scanner is generating false positives')
|
|
112
|
+
lines.push('> to guide targeted improvements to the heuristics.')
|
|
113
|
+
lines.push('')
|
|
114
|
+
lines.push('## Executive Summary')
|
|
115
|
+
lines.push('')
|
|
116
|
+
lines.push('**Problem:** 69% of Critical+High findings in cheap scans are false positives that')
|
|
117
|
+
lines.push('require expensive AI validation to filter. We need to improve the heuristics to')
|
|
118
|
+
lines.push('reduce this noise at the source.')
|
|
119
|
+
lines.push('')
|
|
120
|
+
lines.push('## Category FP Rates (Critical+High only)')
|
|
121
|
+
lines.push('')
|
|
122
|
+
lines.push('| Category | Cheap | Validated | Rejected | FP Rate |')
|
|
123
|
+
lines.push('|----------|-------|-----------|----------|---------|')
|
|
124
|
+
|
|
125
|
+
for (const a of analyses.sort((a, b) => b.rejectedCount - a.rejectedCount)) {
|
|
126
|
+
lines.push(`| ${a.category} | ${a.cheapCount} | ${a.validatedCount} | ${a.rejectedCount} | **${a.fpRate}%** |`)
|
|
127
|
+
}
|
|
128
|
+
|
|
129
|
+
lines.push('')
|
|
130
|
+
lines.push('## Detailed Analysis by Category')
|
|
131
|
+
lines.push('')
|
|
132
|
+
|
|
133
|
+
for (const a of analyses.sort((a, b) => b.rejectedCount - a.rejectedCount)) {
|
|
134
|
+
if (a.rejectedCount === 0) continue
|
|
135
|
+
|
|
136
|
+
lines.push(`### ${a.category}`)
|
|
137
|
+
lines.push('')
|
|
138
|
+
lines.push(`- **FP Rate:** ${a.fpRate}% (${a.rejectedCount}/${a.cheapCount} rejected)`)
|
|
139
|
+
lines.push('')
|
|
140
|
+
|
|
141
|
+
// Path type breakdown
|
|
142
|
+
lines.push('**Where FPs occur:**')
|
|
143
|
+
lines.push('')
|
|
144
|
+
for (const [pathType, findings] of Object.entries(a.byPathType)) {
|
|
145
|
+
lines.push(`- ${pathType}: ${findings.length} findings (${Math.round((findings.length / a.rejectedCount) * 100)}%)`)
|
|
146
|
+
}
|
|
147
|
+
lines.push('')
|
|
148
|
+
|
|
149
|
+
// Title breakdown
|
|
150
|
+
const sortedTitles = Object.entries(a.byTitle).sort((a, b) => b[1] - a[1])
|
|
151
|
+
lines.push('**Common patterns:**')
|
|
152
|
+
lines.push('')
|
|
153
|
+
for (const [title, count] of sortedTitles.slice(0, 5)) {
|
|
154
|
+
lines.push(`- "${title}": ${count} occurrences`)
|
|
155
|
+
}
|
|
156
|
+
lines.push('')
|
|
157
|
+
|
|
158
|
+
// Sample findings
|
|
159
|
+
lines.push('**Sample rejected findings:**')
|
|
160
|
+
lines.push('')
|
|
161
|
+
lines.push('```')
|
|
162
|
+
for (const f of a.sampleFindings.slice(0, 5)) {
|
|
163
|
+
lines.push(`${f.file}:${f.line}`)
|
|
164
|
+
lines.push(` Title: ${f.title}`)
|
|
165
|
+
lines.push(` Code: ${f.lineContent}`)
|
|
166
|
+
lines.push('')
|
|
167
|
+
}
|
|
168
|
+
lines.push('```')
|
|
169
|
+
lines.push('')
|
|
170
|
+
|
|
171
|
+
// Recommendations
|
|
172
|
+
lines.push('**Tuning recommendations:**')
|
|
173
|
+
lines.push('')
|
|
174
|
+
|
|
175
|
+
// Category-specific recommendations
|
|
176
|
+
if (a.category === 'ai_endpoint_unprotected') {
|
|
177
|
+
const examplePct = a.byPathType['examples'] ? Math.round((a.byPathType['examples'].length / a.rejectedCount) * 100) : 0
|
|
178
|
+
if (examplePct > 50) {
|
|
179
|
+
lines.push(`- ${examplePct}% of FPs are in /examples/ directories. Add path-based severity downgrade.`)
|
|
180
|
+
}
|
|
181
|
+
lines.push('- Check for global middleware patterns more aggressively')
|
|
182
|
+
lines.push('- Recognize demo/tutorial context from surrounding code')
|
|
183
|
+
} else if (a.category === 'ai_overpermissive_tool') {
|
|
184
|
+
lines.push('- Distinguish between library definitions (intentionally flexible) and app usage')
|
|
185
|
+
lines.push('- Check if tools have sandboxing/restrictions defined elsewhere')
|
|
186
|
+
lines.push('- Look for permission checks in tool implementation')
|
|
187
|
+
} else if (a.category === 'ai_rag_exfiltration') {
|
|
188
|
+
const libPct = a.byPathType['library'] ? Math.round((a.byPathType['library'].length / a.rejectedCount) * 100) : 0
|
|
189
|
+
if (libPct > 50) {
|
|
190
|
+
lines.push(`- ${libPct}% of FPs are in library code. Library base classes are intentionally generic.`)
|
|
191
|
+
lines.push('- Downgrade library code to info severity (consumers add filters)')
|
|
192
|
+
}
|
|
193
|
+
lines.push('- Look for filter parameters in method signatures')
|
|
194
|
+
} else if (a.category === 'ai_unsafe_execution') {
|
|
195
|
+
const examplePct = a.byPathType['examples'] ? Math.round((a.byPathType['examples'].length / a.rejectedCount) * 100) : 0
|
|
196
|
+
lines.push('- Check if path comes from trusted source (config, not user input)')
|
|
197
|
+
if (examplePct > 30) {
|
|
198
|
+
lines.push(`- ${examplePct}% in examples - consider demo context`)
|
|
199
|
+
}
|
|
200
|
+
} else if (a.category === 'hardcoded_secret') {
|
|
201
|
+
lines.push('- These are likely test/fixture data - check file context')
|
|
202
|
+
lines.push('- Look for variable names containing "test", "mock", "example"')
|
|
203
|
+
lines.push('- Check entropy threshold - may be too sensitive')
|
|
204
|
+
}
|
|
205
|
+
lines.push('')
|
|
206
|
+
}
|
|
207
|
+
|
|
208
|
+
// Overall recommendations
|
|
209
|
+
lines.push('## Overall Recommendations')
|
|
210
|
+
lines.push('')
|
|
211
|
+
lines.push('### Quick Wins (High Impact, Low Effort)')
|
|
212
|
+
lines.push('')
|
|
213
|
+
lines.push('1. **Path-based severity adjustment:** Downgrade findings in `/examples/` directories to info')
|
|
214
|
+
lines.push('2. **Library code handling:** Flag library base classes as "intentionally generic" with lower severity')
|
|
215
|
+
lines.push('3. **Test file handling:** Already done, but verify it covers all patterns')
|
|
216
|
+
lines.push('')
|
|
217
|
+
lines.push('### Medium-Term Improvements')
|
|
218
|
+
lines.push('')
|
|
219
|
+
lines.push('1. **Better context detection:** Look at surrounding code for security indicators')
|
|
220
|
+
lines.push('2. **Cross-file analysis:** Check if protection exists in middleware/imports')
|
|
221
|
+
lines.push('3. **Comment analysis:** Look for "// example", "// demo", "// for testing" patterns')
|
|
222
|
+
lines.push('')
|
|
223
|
+
lines.push('### Cost Reduction Strategy')
|
|
224
|
+
lines.push('')
|
|
225
|
+
lines.push('If we can reduce the FP rate from 69% to 30% through heuristic improvements:')
|
|
226
|
+
lines.push('- AI validation calls would drop by ~57%')
|
|
227
|
+
lines.push('- $3 scan cost would become ~$1.30')
|
|
228
|
+
lines.push('- Better user experience (less noise to review)')
|
|
229
|
+
lines.push('')
|
|
230
|
+
|
|
231
|
+
return lines.join('\n')
|
|
232
|
+
}
|
|
233
|
+
|
|
234
|
+
async function main() {
|
|
235
|
+
console.log('Loading scan results...')
|
|
236
|
+
|
|
237
|
+
// Find rejected findings from all repos
|
|
238
|
+
const allRejected: RejectedFinding[] = [
|
|
239
|
+
...findRejectedFindings('ai-cheap.json', 'ai-validated.json'),
|
|
240
|
+
...findRejectedFindings('langchainjs-cheap.json', 'langchainjs-validated.json'),
|
|
241
|
+
...findRejectedFindings('anthropic-cookbook-cheap.json', 'anthropic-cookbook-validated.json'),
|
|
242
|
+
...findRejectedFindings('openai-cookbook-cheap.json', 'openai-cookbook-validated.json'),
|
|
243
|
+
]
|
|
244
|
+
|
|
245
|
+
console.log(`Found ${allRejected.length} rejected findings (FPs)`)
|
|
246
|
+
|
|
247
|
+
// Load cheap scan totals by category
|
|
248
|
+
const cheapTotals: Record<string, number> = {}
|
|
249
|
+
const validatedTotals: Record<string, number> = {}
|
|
250
|
+
|
|
251
|
+
const cheapFiles = ['ai-cheap.json', 'langchainjs-cheap.json', 'anthropic-cookbook-cheap.json', 'openai-cookbook-cheap.json']
|
|
252
|
+
const validatedFiles = ['ai-validated.json', 'langchainjs-validated.json', 'anthropic-cookbook-validated.json', 'openai-cookbook-validated.json']
|
|
253
|
+
|
|
254
|
+
for (const f of cheapFiles) {
|
|
255
|
+
const result = loadScanResult(f)
|
|
256
|
+
if (!result) continue
|
|
257
|
+
for (const v of result.vulnerabilities) {
|
|
258
|
+
if (!['critical', 'high'].includes(v.severity)) continue
|
|
259
|
+
cheapTotals[v.category] = (cheapTotals[v.category] || 0) + 1
|
|
260
|
+
}
|
|
261
|
+
}
|
|
262
|
+
|
|
263
|
+
for (const f of validatedFiles) {
|
|
264
|
+
const result = loadScanResult(f)
|
|
265
|
+
if (!result) continue
|
|
266
|
+
for (const v of result.vulnerabilities) {
|
|
267
|
+
if (!['critical', 'high'].includes(v.severity)) continue
|
|
268
|
+
validatedTotals[v.category] = (validatedTotals[v.category] || 0) + 1
|
|
269
|
+
}
|
|
270
|
+
}
|
|
271
|
+
|
|
272
|
+
// Group rejected by category
|
|
273
|
+
const byCategory: Record<string, RejectedFinding[]> = {}
|
|
274
|
+
for (const f of allRejected) {
|
|
275
|
+
if (!byCategory[f.category]) byCategory[f.category] = []
|
|
276
|
+
byCategory[f.category].push(f)
|
|
277
|
+
}
|
|
278
|
+
|
|
279
|
+
// Analyze each category
|
|
280
|
+
const analyses: CategoryAnalysis[] = []
|
|
281
|
+
for (const [category, findings] of Object.entries(byCategory)) {
|
|
282
|
+
analyses.push(analyzeCategory(
|
|
283
|
+
category,
|
|
284
|
+
findings,
|
|
285
|
+
cheapTotals[category] || 0,
|
|
286
|
+
validatedTotals[category] || 0
|
|
287
|
+
))
|
|
288
|
+
}
|
|
289
|
+
|
|
290
|
+
// Also add categories with 0% FP rate
|
|
291
|
+
for (const [category, count] of Object.entries(cheapTotals)) {
|
|
292
|
+
if (!byCategory[category]) {
|
|
293
|
+
analyses.push({
|
|
294
|
+
category,
|
|
295
|
+
cheapCount: count,
|
|
296
|
+
validatedCount: validatedTotals[category] || 0,
|
|
297
|
+
rejectedCount: 0,
|
|
298
|
+
fpRate: 0,
|
|
299
|
+
byPathType: {},
|
|
300
|
+
byTitle: {},
|
|
301
|
+
sampleFindings: [],
|
|
302
|
+
})
|
|
303
|
+
}
|
|
304
|
+
}
|
|
305
|
+
|
|
306
|
+
// Generate report
|
|
307
|
+
const markdown = generateMarkdown(analyses)
|
|
308
|
+
|
|
309
|
+
// Ensure output directory exists
|
|
310
|
+
const outputDir = path.dirname(OUTPUT_PATH)
|
|
311
|
+
if (!fs.existsSync(outputDir)) {
|
|
312
|
+
fs.mkdirSync(outputDir, { recursive: true })
|
|
313
|
+
}
|
|
314
|
+
|
|
315
|
+
fs.writeFileSync(OUTPUT_PATH, markdown)
|
|
316
|
+
console.log(`\nReport saved to: ${OUTPUT_PATH}`)
|
|
317
|
+
|
|
318
|
+
// Print summary
|
|
319
|
+
console.log('\n=== SUMMARY ===')
|
|
320
|
+
console.log('Categories with highest FP rates:')
|
|
321
|
+
for (const a of analyses.sort((a, b) => b.fpRate - a.fpRate).slice(0, 5)) {
|
|
322
|
+
if (a.cheapCount === 0) continue
|
|
323
|
+
console.log(` ${a.category}: ${a.fpRate}% FP rate (${a.rejectedCount}/${a.cheapCount})`)
|
|
324
|
+
}
|
|
325
|
+
}
|
|
326
|
+
|
|
327
|
+
main().catch(console.error)
|
|
@@ -0,0 +1,364 @@
|
|
|
1
|
+
#!/usr/bin/env npx tsx
|
|
2
|
+
/**
|
|
3
|
+
* M7: Real-Repo Validation Script
|
|
4
|
+
*
|
|
5
|
+
* Runs security scans on real-world AI/LLM codebases to validate
|
|
6
|
+
* scanner effectiveness before beta launch.
|
|
7
|
+
*
|
|
8
|
+
* Target repos:
|
|
9
|
+
* - langchainjs (LangChain.js)
|
|
10
|
+
* - ai (Vercel AI SDK)
|
|
11
|
+
* - openai-cookbook
|
|
12
|
+
* - anthropic-cookbook
|
|
13
|
+
*
|
|
14
|
+
* Usage:
|
|
15
|
+
* npx tsx packages/scanner/src/__tests__/validation/run-validation.ts
|
|
16
|
+
* npx tsx packages/scanner/src/__tests__/validation/run-validation.ts --repo langchainjs
|
|
17
|
+
* npx tsx packages/scanner/src/__tests__/validation/run-validation.ts --depth cheap
|
|
18
|
+
*/
|
|
19
|
+
|
|
20
|
+
import * as fs from 'fs'
|
|
21
|
+
import * as path from 'path'
|
|
22
|
+
import { glob } from 'glob'
|
|
23
|
+
import { runScan, type ScanFile, type ScanResult, type ScanDepth } from '../../index'
|
|
24
|
+
|
|
25
|
+
// Configuration
|
|
26
|
+
const VALIDATION_DIR = path.join(__dirname, '../../../validation-repos')
|
|
27
|
+
const RESULTS_DIR = path.join(__dirname, '../../../validation-results')
|
|
28
|
+
|
|
29
|
+
const TARGET_REPOS = ['langchainjs', 'ai', 'openai-cookbook', 'anthropic-cookbook']
|
|
30
|
+
const SCAN_DEPTHS: ScanDepth[] = ['cheap', 'validated']
|
|
31
|
+
|
|
32
|
+
// File patterns to scan
|
|
33
|
+
const INCLUDE_PATTERNS = [
|
|
34
|
+
'**/*.ts',
|
|
35
|
+
'**/*.tsx',
|
|
36
|
+
'**/*.js',
|
|
37
|
+
'**/*.jsx',
|
|
38
|
+
'**/*.py',
|
|
39
|
+
'**/*.json',
|
|
40
|
+
'**/*.yaml',
|
|
41
|
+
'**/*.yml',
|
|
42
|
+
]
|
|
43
|
+
|
|
44
|
+
// Patterns to exclude
|
|
45
|
+
const EXCLUDE_PATTERNS = [
|
|
46
|
+
'**/node_modules/**',
|
|
47
|
+
'**/dist/**',
|
|
48
|
+
'**/build/**',
|
|
49
|
+
'**/.git/**',
|
|
50
|
+
'**/coverage/**',
|
|
51
|
+
'**/__pycache__/**',
|
|
52
|
+
'**/venv/**',
|
|
53
|
+
'**/.venv/**',
|
|
54
|
+
'**/vendor/**',
|
|
55
|
+
'**/*.min.js',
|
|
56
|
+
'**/*.bundle.js',
|
|
57
|
+
'**/package-lock.json',
|
|
58
|
+
'**/yarn.lock',
|
|
59
|
+
'**/pnpm-lock.yaml',
|
|
60
|
+
]
|
|
61
|
+
|
|
62
|
+
// Max file size (50KB as per scanner spec)
|
|
63
|
+
const MAX_FILE_SIZE = 50 * 1024
|
|
64
|
+
|
|
65
|
+
interface ValidationConfig {
|
|
66
|
+
repos: string[]
|
|
67
|
+
depths: ScanDepth[]
|
|
68
|
+
maxFilesPerRepo?: number
|
|
69
|
+
verbose: boolean
|
|
70
|
+
}
|
|
71
|
+
|
|
72
|
+
/**
|
|
73
|
+
* Collect scannable files from a repository
|
|
74
|
+
*/
|
|
75
|
+
async function collectFiles(repoPath: string, maxFiles?: number): Promise<ScanFile[]> {
|
|
76
|
+
const files: ScanFile[] = []
|
|
77
|
+
|
|
78
|
+
for (const pattern of INCLUDE_PATTERNS) {
|
|
79
|
+
const matches = await glob(pattern, {
|
|
80
|
+
cwd: repoPath,
|
|
81
|
+
ignore: EXCLUDE_PATTERNS,
|
|
82
|
+
nodir: true,
|
|
83
|
+
absolute: false,
|
|
84
|
+
})
|
|
85
|
+
|
|
86
|
+
for (const match of matches) {
|
|
87
|
+
if (maxFiles && files.length >= maxFiles) break
|
|
88
|
+
|
|
89
|
+
const filePath = path.join(repoPath, match)
|
|
90
|
+
|
|
91
|
+
try {
|
|
92
|
+
const stats = fs.statSync(filePath)
|
|
93
|
+
if (stats.size > MAX_FILE_SIZE) continue
|
|
94
|
+
|
|
95
|
+
const content = fs.readFileSync(filePath, 'utf-8')
|
|
96
|
+
const ext = path.extname(match).slice(1)
|
|
97
|
+
|
|
98
|
+
files.push({
|
|
99
|
+
path: match,
|
|
100
|
+
content,
|
|
101
|
+
language: getLanguage(ext),
|
|
102
|
+
size: stats.size,
|
|
103
|
+
})
|
|
104
|
+
} catch (err) {
|
|
105
|
+
// Skip files that can't be read
|
|
106
|
+
continue
|
|
107
|
+
}
|
|
108
|
+
}
|
|
109
|
+
|
|
110
|
+
if (maxFiles && files.length >= maxFiles) break
|
|
111
|
+
}
|
|
112
|
+
|
|
113
|
+
return files
|
|
114
|
+
}
|
|
115
|
+
|
|
116
|
+
/**
|
|
117
|
+
* Map file extension to language
|
|
118
|
+
*/
|
|
119
|
+
function getLanguage(ext: string): string {
|
|
120
|
+
const langMap: Record<string, string> = {
|
|
121
|
+
ts: 'typescript',
|
|
122
|
+
tsx: 'typescript',
|
|
123
|
+
js: 'javascript',
|
|
124
|
+
jsx: 'javascript',
|
|
125
|
+
py: 'python',
|
|
126
|
+
json: 'json',
|
|
127
|
+
yaml: 'yaml',
|
|
128
|
+
yml: 'yaml',
|
|
129
|
+
}
|
|
130
|
+
return langMap[ext] || ext
|
|
131
|
+
}
|
|
132
|
+
|
|
133
|
+
/**
|
|
134
|
+
* Run a scan on a repository
|
|
135
|
+
*/
|
|
136
|
+
async function scanRepo(
|
|
137
|
+
repoName: string,
|
|
138
|
+
depth: ScanDepth,
|
|
139
|
+
verbose: boolean,
|
|
140
|
+
maxFiles?: number
|
|
141
|
+
): Promise<ScanResult> {
|
|
142
|
+
const repoPath = path.join(VALIDATION_DIR, repoName)
|
|
143
|
+
|
|
144
|
+
if (!fs.existsSync(repoPath)) {
|
|
145
|
+
throw new Error(`Repository not found: ${repoPath}. Run: git clone --depth 1 https://github.com/<org>/${repoName}.git ${repoPath}`)
|
|
146
|
+
}
|
|
147
|
+
|
|
148
|
+
console.log(`\n${'='.repeat(60)}`)
|
|
149
|
+
console.log(`Scanning: ${repoName} (depth: ${depth})`)
|
|
150
|
+
console.log('='.repeat(60))
|
|
151
|
+
|
|
152
|
+
// Collect files
|
|
153
|
+
const startCollect = Date.now()
|
|
154
|
+
const files = await collectFiles(repoPath, maxFiles)
|
|
155
|
+
const collectTime = Date.now() - startCollect
|
|
156
|
+
|
|
157
|
+
console.log(`Collected ${files.length} files in ${collectTime}ms`)
|
|
158
|
+
|
|
159
|
+
if (files.length === 0) {
|
|
160
|
+
throw new Error(`No scannable files found in ${repoPath}`)
|
|
161
|
+
}
|
|
162
|
+
|
|
163
|
+
// Run scan
|
|
164
|
+
const result = await runScan(
|
|
165
|
+
files,
|
|
166
|
+
{
|
|
167
|
+
name: repoName,
|
|
168
|
+
url: `https://github.com/${getRepoOrg(repoName)}/${repoName}`,
|
|
169
|
+
branch: 'main',
|
|
170
|
+
},
|
|
171
|
+
{
|
|
172
|
+
enableAI: depth !== 'cheap', // Only enable AI for validated/deep
|
|
173
|
+
scanDepth: depth,
|
|
174
|
+
},
|
|
175
|
+
verbose ? (progress) => {
|
|
176
|
+
console.log(` [${progress.status}] ${progress.message}`)
|
|
177
|
+
} : undefined
|
|
178
|
+
)
|
|
179
|
+
|
|
180
|
+
// Print summary
|
|
181
|
+
console.log(`\nScan complete in ${result.scanDuration}ms`)
|
|
182
|
+
console.log(` Files scanned: ${result.filesScanned}`)
|
|
183
|
+
console.log(` Total findings: ${result.vulnerabilities.length}`)
|
|
184
|
+
console.log(` Severity breakdown:`)
|
|
185
|
+
console.log(` Critical: ${result.severityCounts.critical}`)
|
|
186
|
+
console.log(` High: ${result.severityCounts.high}`)
|
|
187
|
+
console.log(` Medium: ${result.severityCounts.medium}`)
|
|
188
|
+
console.log(` Low: ${result.severityCounts.low}`)
|
|
189
|
+
console.log(` Info: ${result.severityCounts.info}`)
|
|
190
|
+
|
|
191
|
+
return result
|
|
192
|
+
}
|
|
193
|
+
|
|
194
|
+
/**
|
|
195
|
+
* Get GitHub organization for a repo
|
|
196
|
+
*/
|
|
197
|
+
function getRepoOrg(repoName: string): string {
|
|
198
|
+
const orgs: Record<string, string> = {
|
|
199
|
+
langchainjs: 'langchain-ai',
|
|
200
|
+
ai: 'vercel',
|
|
201
|
+
'openai-cookbook': 'openai',
|
|
202
|
+
'anthropic-cookbook': 'anthropics',
|
|
203
|
+
}
|
|
204
|
+
return orgs[repoName] || 'unknown'
|
|
205
|
+
}
|
|
206
|
+
|
|
207
|
+
/**
|
|
208
|
+
* Save scan results to file
|
|
209
|
+
*/
|
|
210
|
+
function saveResults(repoName: string, depth: ScanDepth, result: ScanResult): string {
|
|
211
|
+
const outputPath = path.join(RESULTS_DIR, `${repoName}-${depth}.json`)
|
|
212
|
+
|
|
213
|
+
// Ensure results directory exists
|
|
214
|
+
if (!fs.existsSync(RESULTS_DIR)) {
|
|
215
|
+
fs.mkdirSync(RESULTS_DIR, { recursive: true })
|
|
216
|
+
}
|
|
217
|
+
|
|
218
|
+
fs.writeFileSync(outputPath, JSON.stringify(result, null, 2))
|
|
219
|
+
console.log(`Results saved to: ${outputPath}`)
|
|
220
|
+
|
|
221
|
+
return outputPath
|
|
222
|
+
}
|
|
223
|
+
|
|
224
|
+
/**
|
|
225
|
+
* Parse command line arguments
|
|
226
|
+
*/
|
|
227
|
+
function parseArgs(): ValidationConfig {
|
|
228
|
+
const args = process.argv.slice(2)
|
|
229
|
+
const config: ValidationConfig = {
|
|
230
|
+
repos: TARGET_REPOS,
|
|
231
|
+
depths: SCAN_DEPTHS,
|
|
232
|
+
verbose: false,
|
|
233
|
+
}
|
|
234
|
+
|
|
235
|
+
for (let i = 0; i < args.length; i++) {
|
|
236
|
+
const arg = args[i]
|
|
237
|
+
|
|
238
|
+
if (arg === '--repo' && args[i + 1]) {
|
|
239
|
+
const repo = args[i + 1]
|
|
240
|
+
if (!TARGET_REPOS.includes(repo)) {
|
|
241
|
+
console.error(`Unknown repo: ${repo}. Valid repos: ${TARGET_REPOS.join(', ')}`)
|
|
242
|
+
process.exit(1)
|
|
243
|
+
}
|
|
244
|
+
config.repos = [repo]
|
|
245
|
+
i++
|
|
246
|
+
} else if (arg === '--depth' && args[i + 1]) {
|
|
247
|
+
const depth = args[i + 1] as ScanDepth
|
|
248
|
+
if (!['cheap', 'validated', 'deep'].includes(depth)) {
|
|
249
|
+
console.error(`Invalid depth: ${depth}. Valid depths: cheap, validated, deep`)
|
|
250
|
+
process.exit(1)
|
|
251
|
+
}
|
|
252
|
+
config.depths = [depth]
|
|
253
|
+
i++
|
|
254
|
+
} else if (arg === '--max-files' && args[i + 1]) {
|
|
255
|
+
config.maxFilesPerRepo = parseInt(args[i + 1], 10)
|
|
256
|
+
i++
|
|
257
|
+
} else if (arg === '--verbose' || arg === '-v') {
|
|
258
|
+
config.verbose = true
|
|
259
|
+
} else if (arg === '--help' || arg === '-h') {
|
|
260
|
+
console.log(`
|
|
261
|
+
M7: Real-Repo Validation Script
|
|
262
|
+
|
|
263
|
+
Usage:
|
|
264
|
+
npx tsx run-validation.ts [options]
|
|
265
|
+
|
|
266
|
+
Options:
|
|
267
|
+
--repo <name> Scan only this repo (langchainjs, ai, openai-cookbook, anthropic-cookbook)
|
|
268
|
+
--depth <depth> Use only this scan depth (cheap, validated, deep)
|
|
269
|
+
--max-files <n> Limit files per repo (for faster testing)
|
|
270
|
+
--verbose, -v Show detailed progress
|
|
271
|
+
--help, -h Show this help
|
|
272
|
+
|
|
273
|
+
Examples:
|
|
274
|
+
npx tsx run-validation.ts # Scan all repos at all depths
|
|
275
|
+
npx tsx run-validation.ts --repo langchainjs # Scan only LangChain.js
|
|
276
|
+
npx tsx run-validation.ts --depth cheap # Only cheap scans
|
|
277
|
+
npx tsx run-validation.ts --repo ai --max-files 50 # Quick test on Vercel AI SDK
|
|
278
|
+
`)
|
|
279
|
+
process.exit(0)
|
|
280
|
+
}
|
|
281
|
+
}
|
|
282
|
+
|
|
283
|
+
return config
|
|
284
|
+
}
|
|
285
|
+
|
|
286
|
+
/**
|
|
287
|
+
* Main entry point
|
|
288
|
+
*/
|
|
289
|
+
async function main() {
|
|
290
|
+
const config = parseArgs()
|
|
291
|
+
const results: Map<string, ScanResult> = new Map()
|
|
292
|
+
|
|
293
|
+
console.log('\n' + '='.repeat(60))
|
|
294
|
+
console.log('M7: REAL-REPO VALIDATION')
|
|
295
|
+
console.log('='.repeat(60))
|
|
296
|
+
console.log(`Repos: ${config.repos.join(', ')}`)
|
|
297
|
+
console.log(`Depths: ${config.depths.join(', ')}`)
|
|
298
|
+
if (config.maxFilesPerRepo) {
|
|
299
|
+
console.log(`Max files per repo: ${config.maxFilesPerRepo}`)
|
|
300
|
+
}
|
|
301
|
+
|
|
302
|
+
// Check repos exist
|
|
303
|
+
for (const repo of config.repos) {
|
|
304
|
+
const repoPath = path.join(VALIDATION_DIR, repo)
|
|
305
|
+
if (!fs.existsSync(repoPath)) {
|
|
306
|
+
console.error(`\nError: Repository not found: ${repoPath}`)
|
|
307
|
+
console.error(`Please clone it first:`)
|
|
308
|
+
console.error(` cd ${VALIDATION_DIR}`)
|
|
309
|
+
console.error(` git clone --depth 1 https://github.com/${getRepoOrg(repo)}/${repo}.git`)
|
|
310
|
+
process.exit(1)
|
|
311
|
+
}
|
|
312
|
+
}
|
|
313
|
+
|
|
314
|
+
// Run scans
|
|
315
|
+
const totalScans = config.repos.length * config.depths.length
|
|
316
|
+
let scanCount = 0
|
|
317
|
+
|
|
318
|
+
for (const repo of config.repos) {
|
|
319
|
+
for (const depth of config.depths) {
|
|
320
|
+
scanCount++
|
|
321
|
+
console.log(`\n[${scanCount}/${totalScans}] Starting scan...`)
|
|
322
|
+
|
|
323
|
+
try {
|
|
324
|
+
const result = await scanRepo(repo, depth, config.verbose, config.maxFilesPerRepo)
|
|
325
|
+
const key = `${repo}-${depth}`
|
|
326
|
+
results.set(key, result)
|
|
327
|
+
saveResults(repo, depth, result)
|
|
328
|
+
} catch (err) {
|
|
329
|
+
console.error(`Error scanning ${repo} at ${depth}:`, err)
|
|
330
|
+
}
|
|
331
|
+
}
|
|
332
|
+
}
|
|
333
|
+
|
|
334
|
+
// Print summary
|
|
335
|
+
console.log('\n' + '='.repeat(60))
|
|
336
|
+
console.log('VALIDATION SUMMARY')
|
|
337
|
+
console.log('='.repeat(60))
|
|
338
|
+
|
|
339
|
+
for (const [key, result] of results) {
|
|
340
|
+
const [repo, depth] = key.split('-')
|
|
341
|
+
const mediumPlus = result.severityCounts.critical +
|
|
342
|
+
result.severityCounts.high +
|
|
343
|
+
result.severityCounts.medium
|
|
344
|
+
|
|
345
|
+
console.log(`\n${repo} (${depth}):`)
|
|
346
|
+
console.log(` Files: ${result.filesScanned}`)
|
|
347
|
+
console.log(` Total findings: ${result.vulnerabilities.length}`)
|
|
348
|
+
console.log(` Medium+ findings: ${mediumPlus} (to triage)`)
|
|
349
|
+
console.log(` Duration: ${result.scanDuration}ms`)
|
|
350
|
+
}
|
|
351
|
+
|
|
352
|
+
console.log('\n' + '='.repeat(60))
|
|
353
|
+
console.log('Next steps:')
|
|
354
|
+
console.log('1. Run analyze-results.ts to generate detailed metrics')
|
|
355
|
+
console.log('2. Review medium+ findings for FP triage')
|
|
356
|
+
console.log('3. Update docs/RESULTSCOMPARISON.md with findings')
|
|
357
|
+
console.log('='.repeat(60))
|
|
358
|
+
}
|
|
359
|
+
|
|
360
|
+
// Run
|
|
361
|
+
main().catch(err => {
|
|
362
|
+
console.error('Validation failed:', err)
|
|
363
|
+
process.exit(1)
|
|
364
|
+
})
|