@bouncesecurity/aghast 0.0.13
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +661 -0
- package/README.md +111 -0
- package/config/prompts/generic-instructions.md +56 -0
- package/config/prompts/test-cheaper-instructions.md +57 -0
- package/dist/check-library.d.ts +87 -0
- package/dist/check-library.d.ts.map +1 -0
- package/dist/check-library.js +374 -0
- package/dist/check-library.js.map +1 -0
- package/dist/claude-code-provider.d.ts +26 -0
- package/dist/claude-code-provider.d.ts.map +1 -0
- package/dist/claude-code-provider.js +247 -0
- package/dist/claude-code-provider.js.map +1 -0
- package/dist/cli.d.ts +13 -0
- package/dist/cli.d.ts.map +1 -0
- package/dist/cli.js +78 -0
- package/dist/cli.js.map +1 -0
- package/dist/colors.d.ts +7 -0
- package/dist/colors.d.ts.map +1 -0
- package/dist/colors.js +18 -0
- package/dist/colors.js.map +1 -0
- package/dist/error-codes.d.ts +42 -0
- package/dist/error-codes.d.ts.map +1 -0
- package/dist/error-codes.js +60 -0
- package/dist/error-codes.js.map +1 -0
- package/dist/formatters/index.d.ts +10 -0
- package/dist/formatters/index.d.ts.map +1 -0
- package/dist/formatters/index.js +23 -0
- package/dist/formatters/index.js.map +1 -0
- package/dist/formatters/json-formatter.d.ts +11 -0
- package/dist/formatters/json-formatter.d.ts.map +1 -0
- package/dist/formatters/json-formatter.js +11 -0
- package/dist/formatters/json-formatter.js.map +1 -0
- package/dist/formatters/sarif-formatter.d.ts +18 -0
- package/dist/formatters/sarif-formatter.d.ts.map +1 -0
- package/dist/formatters/sarif-formatter.js +103 -0
- package/dist/formatters/sarif-formatter.js.map +1 -0
- package/dist/formatters/types.d.ts +11 -0
- package/dist/formatters/types.d.ts.map +1 -0
- package/dist/formatters/types.js +6 -0
- package/dist/formatters/types.js.map +1 -0
- package/dist/index.d.ts +7 -0
- package/dist/index.d.ts.map +1 -0
- package/dist/index.js +406 -0
- package/dist/index.js.map +1 -0
- package/dist/logging.d.ts +26 -0
- package/dist/logging.d.ts.map +1 -0
- package/dist/logging.js +79 -0
- package/dist/logging.js.map +1 -0
- package/dist/mock-ai-provider.d.ts +18 -0
- package/dist/mock-ai-provider.d.ts.map +1 -0
- package/dist/mock-ai-provider.js +28 -0
- package/dist/mock-ai-provider.js.map +1 -0
- package/dist/new-check.d.ts +13 -0
- package/dist/new-check.d.ts.map +1 -0
- package/dist/new-check.js +405 -0
- package/dist/new-check.js.map +1 -0
- package/dist/prompt-template.d.ts +12 -0
- package/dist/prompt-template.d.ts.map +1 -0
- package/dist/prompt-template.js +35 -0
- package/dist/prompt-template.js.map +1 -0
- package/dist/provider-registry.d.ts +15 -0
- package/dist/provider-registry.d.ts.map +1 -0
- package/dist/provider-registry.js +27 -0
- package/dist/provider-registry.js.map +1 -0
- package/dist/repository-analyzer.d.ts +68 -0
- package/dist/repository-analyzer.d.ts.map +1 -0
- package/dist/repository-analyzer.js +230 -0
- package/dist/repository-analyzer.js.map +1 -0
- package/dist/response-parser.d.ts +12 -0
- package/dist/response-parser.d.ts.map +1 -0
- package/dist/response-parser.js +109 -0
- package/dist/response-parser.js.map +1 -0
- package/dist/runtime-config.d.ts +15 -0
- package/dist/runtime-config.d.ts.map +1 -0
- package/dist/runtime-config.js +73 -0
- package/dist/runtime-config.js.map +1 -0
- package/dist/sarif-parser.d.ts +20 -0
- package/dist/sarif-parser.d.ts.map +1 -0
- package/dist/sarif-parser.js +76 -0
- package/dist/sarif-parser.js.map +1 -0
- package/dist/scan-runner.d.ts +29 -0
- package/dist/scan-runner.d.ts.map +1 -0
- package/dist/scan-runner.js +559 -0
- package/dist/scan-runner.js.map +1 -0
- package/dist/semgrep-runner.d.ts +25 -0
- package/dist/semgrep-runner.d.ts.map +1 -0
- package/dist/semgrep-runner.js +100 -0
- package/dist/semgrep-runner.js.map +1 -0
- package/dist/snippet-extractor.d.ts +25 -0
- package/dist/snippet-extractor.d.ts.map +1 -0
- package/dist/snippet-extractor.js +56 -0
- package/dist/snippet-extractor.js.map +1 -0
- package/dist/types.d.ts +206 -0
- package/dist/types.d.ts.map +1 -0
- package/dist/types.js +19 -0
- package/dist/types.js.map +1 -0
- package/package.json +55 -0
|
@@ -0,0 +1,76 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* SARIF 2.1.0 parser for Semgrep output.
|
|
3
|
+
* Extracts CheckTarget[] from SARIF results.
|
|
4
|
+
*/
|
|
5
|
+
/**
|
|
6
|
+
* Parse SARIF 2.1.0 JSON content into CheckTarget[].
|
|
7
|
+
* Skips results with missing location fields.
|
|
8
|
+
* Throws on invalid JSON or missing SARIF structure.
|
|
9
|
+
*/
|
|
10
|
+
export function parseSARIF(sarifContent) {
|
|
11
|
+
let doc;
|
|
12
|
+
try {
|
|
13
|
+
doc = JSON.parse(sarifContent);
|
|
14
|
+
}
|
|
15
|
+
catch {
|
|
16
|
+
throw new Error('Invalid SARIF: malformed JSON');
|
|
17
|
+
}
|
|
18
|
+
if (!doc.runs || !Array.isArray(doc.runs)) {
|
|
19
|
+
throw new Error('Invalid SARIF: missing "runs" array');
|
|
20
|
+
}
|
|
21
|
+
const targets = [];
|
|
22
|
+
for (const run of doc.runs) {
|
|
23
|
+
if (!run.results || !Array.isArray(run.results)) {
|
|
24
|
+
continue;
|
|
25
|
+
}
|
|
26
|
+
for (const result of run.results) {
|
|
27
|
+
if (!result.locations || result.locations.length === 0) {
|
|
28
|
+
continue;
|
|
29
|
+
}
|
|
30
|
+
const loc = result.locations[0];
|
|
31
|
+
const phys = loc?.physicalLocation;
|
|
32
|
+
if (!phys)
|
|
33
|
+
continue;
|
|
34
|
+
const file = phys.artifactLocation?.uri;
|
|
35
|
+
const startLine = phys.region?.startLine;
|
|
36
|
+
// Skip results missing required location fields
|
|
37
|
+
if (!file || startLine === undefined) {
|
|
38
|
+
continue;
|
|
39
|
+
}
|
|
40
|
+
const endLine = phys.region?.endLine ?? startLine;
|
|
41
|
+
const target = {
|
|
42
|
+
file,
|
|
43
|
+
startLine,
|
|
44
|
+
endLine,
|
|
45
|
+
message: result.message?.text ?? '',
|
|
46
|
+
};
|
|
47
|
+
if (phys.region?.snippet?.text) {
|
|
48
|
+
target.snippet = phys.region.snippet.text;
|
|
49
|
+
}
|
|
50
|
+
targets.push(target);
|
|
51
|
+
}
|
|
52
|
+
}
|
|
53
|
+
return targets;
|
|
54
|
+
}
|
|
55
|
+
/**
|
|
56
|
+
* Deduplicate targets by file:startLine:endLine key.
|
|
57
|
+
*/
|
|
58
|
+
export function deduplicateTargets(targets) {
|
|
59
|
+
const seen = new Set();
|
|
60
|
+
const result = [];
|
|
61
|
+
for (const target of targets) {
|
|
62
|
+
const key = `${target.file}:${target.startLine}:${target.endLine}`;
|
|
63
|
+
if (!seen.has(key)) {
|
|
64
|
+
seen.add(key);
|
|
65
|
+
result.push(target);
|
|
66
|
+
}
|
|
67
|
+
}
|
|
68
|
+
return result;
|
|
69
|
+
}
|
|
70
|
+
/**
|
|
71
|
+
* Limit the number of targets to a maximum.
|
|
72
|
+
*/
|
|
73
|
+
export function limitTargets(targets, maxTargets) {
|
|
74
|
+
return targets.slice(0, maxTargets);
|
|
75
|
+
}
|
|
76
|
+
//# sourceMappingURL=sarif-parser.js.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"sarif-parser.js","sourceRoot":"","sources":["../src/sarif-parser.ts"],"names":[],"mappings":"AAAA;;;GAGG;AA2BH;;;;GAIG;AACH,MAAM,UAAU,UAAU,CAAC,YAAoB;IAC7C,IAAI,GAAkB,CAAC;IACvB,IAAI,CAAC;QACH,GAAG,GAAG,IAAI,CAAC,KAAK,CAAC,YAAY,CAAkB,CAAC;IAClD,CAAC;IAAC,MAAM,CAAC;QACP,MAAM,IAAI,KAAK,CAAC,+BAA+B,CAAC,CAAC;IACnD,CAAC;IAED,IAAI,CAAC,GAAG,CAAC,IAAI,IAAI,CAAC,KAAK,CAAC,OAAO,CAAC,GAAG,CAAC,IAAI,CAAC,EAAE,CAAC;QAC1C,MAAM,IAAI,KAAK,CAAC,qCAAqC,CAAC,CAAC;IACzD,CAAC;IAED,MAAM,OAAO,GAAkB,EAAE,CAAC;IAElC,KAAK,MAAM,GAAG,IAAI,GAAG,CAAC,IAAI,EAAE,CAAC;QAC3B,IAAI,CAAC,GAAG,CAAC,OAAO,IAAI,CAAC,KAAK,CAAC,OAAO,CAAC,GAAG,CAAC,OAAO,CAAC,EAAE,CAAC;YAChD,SAAS;QACX,CAAC;QAED,KAAK,MAAM,MAAM,IAAI,GAAG,CAAC,OAAO,EAAE,CAAC;YACjC,IAAI,CAAC,MAAM,CAAC,SAAS,IAAI,MAAM,CAAC,SAAS,CAAC,MAAM,KAAK,CAAC,EAAE,CAAC;gBACvD,SAAS;YACX,CAAC;YAED,MAAM,GAAG,GAAG,MAAM,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC;YAChC,MAAM,IAAI,GAAG,GAAG,EAAE,gBAAgB,CAAC;YACnC,IAAI,CAAC,IAAI;gBAAE,SAAS;YAEpB,MAAM,IAAI,GAAG,IAAI,CAAC,gBAAgB,EAAE,GAAG,CAAC;YACxC,MAAM,SAAS,GAAG,IAAI,CAAC,MAAM,EAAE,SAAS,CAAC;YAEzC,gDAAgD;YAChD,IAAI,CAAC,IAAI,IAAI,SAAS,KAAK,SAAS,EAAE,CAAC;gBACrC,SAAS;YACX,CAAC;YAED,MAAM,OAAO,GAAG,IAAI,CAAC,MAAM,EAAE,OAAO,IAAI,SAAS,CAAC;YAElD,MAAM,MAAM,GAAgB;gBAC1B,IAAI;gBACJ,SAAS;gBACT,OAAO;gBACP,OAAO,EAAE,MAAM,CAAC,OAAO,EAAE,IAAI,IAAI,EAAE;aACpC,CAAC;YAEF,IAAI,IAAI,CAAC,MAAM,EAAE,OAAO,EAAE,IAAI,EAAE,CAAC;gBAC/B,MAAM,CAAC,OAAO,GAAG,IAAI,CAAC,MAAM,CAAC,OAAO,CAAC,IAAI,CAAC;YAC5C,CAAC;YAED,OAAO,CAAC,IAAI,CAAC,MAAM,CAAC,CAAC;QACvB,CAAC;IACH,CAAC;IAED,OAAO,OAAO,CAAC;AACjB,CAAC;AAED;;GAEG;AACH,MAAM,UAAU,kBAAkB,CAAC,OAAsB;IACvD,MAAM,IAAI,GAAG,IAAI,GAAG,EAAU,CAAC;IAC/B,MAAM,MAAM,GAAkB,EAAE,CAAC;IAEjC,KAAK,MAAM,MAAM,IAAI,OAAO,EAAE,CAAC;QAC7B,MAAM,GAAG,GAAG,GAAG,MAAM,CAAC,IAAI,IAAI,MAAM,CAAC,SAAS,IAAI,MAAM,CAAC,OAAO,EAAE,CAAC;QACnE,IAAI,CAAC,IAAI,CAAC,GAAG,CAAC,GAAG,CAAC,EAAE,CAAC;YACnB,IAAI,CAAC,GAAG,CAAC,GAAG,CAAC,CAAC;YACd,MAAM,CAAC,IAAI,CAAC,MAAM,CAAC,CAAC;QACtB,CAAC;IACH,CAAC;IAED,OAAO,MAAM,CAAC;AAChB,CAAC;AAED;;GAEG;AACH,MAAM,UAAU,YAAY,CAAC,OAAsB,EAAE,UAAkB;IACrE,OAAO,OAAO,CAAC,KAAK,CAAC,CAAC,EAAE,UAAU,CAAC,CAAC;AACtC,CAAC"}
|
|
@@ -0,0 +1,29 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Scan runner (orchestrator).
|
|
3
|
+
* Runs security checks against a repository and produces ScanResults.
|
|
4
|
+
* Implements the core workflow from spec Section 2.2.
|
|
5
|
+
*/
|
|
6
|
+
import { type AIProvider, type RepositoryInfo, type CheckDetails, type SecurityCheck, type ScanResults } from './types.js';
|
|
7
|
+
export interface MultiScanOptions {
|
|
8
|
+
repositoryPath: string;
|
|
9
|
+
checks: Array<{
|
|
10
|
+
check: SecurityCheck;
|
|
11
|
+
details: CheckDetails;
|
|
12
|
+
}>;
|
|
13
|
+
aiProvider?: AIProvider;
|
|
14
|
+
aiModelName?: string;
|
|
15
|
+
aiProviderName?: string;
|
|
16
|
+
concurrency?: number;
|
|
17
|
+
repositoryInfo?: RepositoryInfo;
|
|
18
|
+
configDir?: string;
|
|
19
|
+
genericPrompt?: string;
|
|
20
|
+
}
|
|
21
|
+
/**
|
|
22
|
+
* Generate a scanId in the format: scan-<timestamp>-<hash>
|
|
23
|
+
*/
|
|
24
|
+
export declare function generateScanId(): string;
|
|
25
|
+
/**
|
|
26
|
+
* Run multiple security checks and return aggregated ScanResults.
|
|
27
|
+
*/
|
|
28
|
+
export declare function runMultiScan(options: MultiScanOptions): Promise<ScanResults>;
|
|
29
|
+
//# sourceMappingURL=scan-runner.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"scan-runner.d.ts","sourceRoot":"","sources":["../src/scan-runner.ts"],"names":[],"mappings":"AAAA;;;;GAIG;AAaH,OAAO,EAGL,KAAK,UAAU,EACf,KAAK,cAAc,EAInB,KAAK,YAAY,EACjB,KAAK,aAAa,EAElB,KAAK,WAAW,EAGjB,MAAM,YAAY,CAAC;AAgFpB,MAAM,WAAW,gBAAgB;IAC/B,cAAc,EAAE,MAAM,CAAC;IACvB,MAAM,EAAE,KAAK,CAAC;QAAE,KAAK,EAAE,aAAa,CAAC;QAAC,OAAO,EAAE,YAAY,CAAA;KAAE,CAAC,CAAC;IAC/D,UAAU,CAAC,EAAE,UAAU,CAAC;IACxB,WAAW,CAAC,EAAE,MAAM,CAAC;IACrB,cAAc,CAAC,EAAE,MAAM,CAAC;IACxB,WAAW,CAAC,EAAE,MAAM,CAAC;IACrB,cAAc,CAAC,EAAE,cAAc,CAAC;IAChC,SAAS,CAAC,EAAE,MAAM,CAAC;IACnB,aAAa,CAAC,EAAE,MAAM,CAAC;CACxB;AAED;;GAEG;AACH,wBAAgB,cAAc,IAAI,MAAM,CAKvC;AAweD;;GAEG;AACH,wBAAsB,YAAY,CAAC,OAAO,EAAE,gBAAgB,GAAG,OAAO,CAAC,WAAW,CAAC,CAkHlF"}
|
|
@@ -0,0 +1,559 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Scan runner (orchestrator).
|
|
3
|
+
* Runs security checks against a repository and produces ScanResults.
|
|
4
|
+
* Implements the core workflow from spec Section 2.2.
|
|
5
|
+
*/
|
|
6
|
+
import { readFile } from 'node:fs/promises';
|
|
7
|
+
import { randomBytes } from 'node:crypto';
|
|
8
|
+
import { resolve, dirname } from 'node:path';
|
|
9
|
+
import { fileURLToPath } from 'node:url';
|
|
10
|
+
import { buildPrompt } from './prompt-template.js';
|
|
11
|
+
import { parseAIResponse } from './response-parser.js';
|
|
12
|
+
import { extractSnippet } from './snippet-extractor.js';
|
|
13
|
+
import { analyzeRepository } from './repository-analyzer.js';
|
|
14
|
+
import { runSemgrep } from './semgrep-runner.js';
|
|
15
|
+
import { parseSARIF, deduplicateTargets, limitTargets } from './sarif-parser.js';
|
|
16
|
+
import { logProgress, logDebug, createTimer } from './logging.js';
|
|
17
|
+
import { DEFAULT_AI_MODEL, FatalProviderError, } from './types.js';
|
|
18
|
+
const __dirname = dirname(fileURLToPath(import.meta.url));
|
|
19
|
+
const TAG = 'scan';
|
|
20
|
+
const DEFAULT_CONCURRENCY = 5;
|
|
21
|
+
/**
|
|
22
|
+
* Sum multiple TokenUsage values into one aggregate.
|
|
23
|
+
* Returns undefined if no inputs have token usage.
|
|
24
|
+
*/
|
|
25
|
+
function sumTokenUsage(usages) {
|
|
26
|
+
const defined = usages.filter((u) => u !== undefined);
|
|
27
|
+
if (defined.length === 0)
|
|
28
|
+
return undefined;
|
|
29
|
+
return {
|
|
30
|
+
inputTokens: defined.reduce((sum, u) => sum + u.inputTokens, 0),
|
|
31
|
+
outputTokens: defined.reduce((sum, u) => sum + u.outputTokens, 0),
|
|
32
|
+
totalTokens: defined.reduce((sum, u) => sum + u.totalTokens, 0),
|
|
33
|
+
};
|
|
34
|
+
}
|
|
35
|
+
/**
|
|
36
|
+
* Get the version from package.json.
|
|
37
|
+
*/
|
|
38
|
+
async function getVersion() {
|
|
39
|
+
try {
|
|
40
|
+
const pkgPath = resolve(__dirname, '..', 'package.json');
|
|
41
|
+
const pkg = JSON.parse(await readFile(pkgPath, 'utf-8'));
|
|
42
|
+
return pkg.version ?? '0.0.0';
|
|
43
|
+
}
|
|
44
|
+
catch {
|
|
45
|
+
return '0.0.0';
|
|
46
|
+
}
|
|
47
|
+
}
|
|
48
|
+
/**
|
|
49
|
+
* Run an async function over items with bounded concurrency.
|
|
50
|
+
* Spawns min(concurrency, items.length) workers that pull from a shared index.
|
|
51
|
+
* Results are written to a pre-allocated array to preserve input order.
|
|
52
|
+
*
|
|
53
|
+
* If abortHandle is provided, workers stop picking up new items once
|
|
54
|
+
* abortHandle.aborted is set to true. In-flight items complete naturally.
|
|
55
|
+
*/
|
|
56
|
+
async function mapWithConcurrency(items, concurrency, fn, abortHandle) {
|
|
57
|
+
const results = new Array(items.length);
|
|
58
|
+
let nextIndex = 0;
|
|
59
|
+
async function worker() {
|
|
60
|
+
while (nextIndex < items.length) {
|
|
61
|
+
if (abortHandle?.aborted)
|
|
62
|
+
break;
|
|
63
|
+
const i = nextIndex++;
|
|
64
|
+
results[i] = await fn(items[i], i);
|
|
65
|
+
}
|
|
66
|
+
}
|
|
67
|
+
const workerCount = Math.min(concurrency, items.length);
|
|
68
|
+
const workers = [];
|
|
69
|
+
for (let w = 0; w < workerCount; w++) {
|
|
70
|
+
workers.push(worker());
|
|
71
|
+
}
|
|
72
|
+
// Use allSettled so in-flight items complete before we propagate errors
|
|
73
|
+
const settled = await Promise.allSettled(workers);
|
|
74
|
+
const firstRejection = settled.find((r) => r.status === 'rejected');
|
|
75
|
+
if (firstRejection && firstRejection.status === 'rejected') {
|
|
76
|
+
throw firstRejection.reason;
|
|
77
|
+
}
|
|
78
|
+
return results;
|
|
79
|
+
}
|
|
80
|
+
/**
|
|
81
|
+
* Generate a scanId in the format: scan-<timestamp>-<hash>
|
|
82
|
+
*/
|
|
83
|
+
export function generateScanId() {
|
|
84
|
+
const now = new Date();
|
|
85
|
+
const ts = now.toISOString().replace(/[-:T]/g, '').replace(/\..+/, '');
|
|
86
|
+
const hash = randomBytes(3).toString('hex');
|
|
87
|
+
return `scan-${ts}-${hash}`;
|
|
88
|
+
}
|
|
89
|
+
/**
|
|
90
|
+
* Enrich a raw AI issue into a full SecurityIssue.
|
|
91
|
+
* Extracts code snippets, applies check metadata, and normalizes paths.
|
|
92
|
+
*/
|
|
93
|
+
async function enrichIssue(aiIssue, checkId, checkName, repositoryPath, checkMetadata) {
|
|
94
|
+
const codeSnippet = await extractSnippet(repositoryPath, aiIssue.file, aiIssue.startLine, aiIssue.endLine);
|
|
95
|
+
const issue = {
|
|
96
|
+
checkId,
|
|
97
|
+
checkName,
|
|
98
|
+
file: aiIssue.file.replace(/\\/g, '/'),
|
|
99
|
+
startLine: aiIssue.startLine,
|
|
100
|
+
endLine: aiIssue.endLine,
|
|
101
|
+
description: aiIssue.description,
|
|
102
|
+
};
|
|
103
|
+
if (codeSnippet !== undefined) {
|
|
104
|
+
issue.codeSnippet = codeSnippet;
|
|
105
|
+
}
|
|
106
|
+
if (checkMetadata?.severity !== undefined) {
|
|
107
|
+
issue.severity = checkMetadata.severity;
|
|
108
|
+
}
|
|
109
|
+
if (checkMetadata?.confidence !== undefined) {
|
|
110
|
+
issue.confidence = checkMetadata.confidence;
|
|
111
|
+
}
|
|
112
|
+
if (aiIssue.dataFlow !== undefined) {
|
|
113
|
+
issue.dataFlow = aiIssue.dataFlow.map((step) => ({
|
|
114
|
+
...step,
|
|
115
|
+
file: step.file.replace(/\\/g, '/'),
|
|
116
|
+
}));
|
|
117
|
+
}
|
|
118
|
+
return issue;
|
|
119
|
+
}
|
|
120
|
+
/**
|
|
121
|
+
* Execute a single check against a repository via the AI provider.
|
|
122
|
+
* Routes to multi-target execution if checkTarget is configured.
|
|
123
|
+
* Handles prompt building, AI call, response parsing, issue enrichment.
|
|
124
|
+
*/
|
|
125
|
+
async function executeSingleCheck(check, checkName, checkInstructions, repositoryPath, aiProvider, checkMetadata, concurrency, configDir, genericPrompt) {
|
|
126
|
+
const checkId = check.id;
|
|
127
|
+
// Route to multi-target execution if configured
|
|
128
|
+
if (check.checkTarget?.type === 'semgrep') {
|
|
129
|
+
if (!aiProvider) {
|
|
130
|
+
throw new Error(`Check "${checkId}" requires an AI provider but none was configured`);
|
|
131
|
+
}
|
|
132
|
+
return executeMultiTargetCheck(check, checkName, checkInstructions, repositoryPath, aiProvider, checkMetadata, concurrency, configDir, genericPrompt);
|
|
133
|
+
}
|
|
134
|
+
// Route to semgrep-only execution (no AI)
|
|
135
|
+
if (check.checkTarget?.type === 'semgrep-only') {
|
|
136
|
+
return executeSemgrepOnlyCheck(check, checkName, repositoryPath, checkMetadata);
|
|
137
|
+
}
|
|
138
|
+
if (!aiProvider) {
|
|
139
|
+
throw new Error(`Check "${checkId}" requires an AI provider but none was configured`);
|
|
140
|
+
}
|
|
141
|
+
logProgress(TAG, `Running check: ${checkName}`);
|
|
142
|
+
const prompt = await buildPrompt(checkInstructions, configDir, genericPrompt);
|
|
143
|
+
logDebug(TAG, `Prompt built: ${prompt.length} chars`);
|
|
144
|
+
let issues = [];
|
|
145
|
+
let summary;
|
|
146
|
+
const checkTimer = createTimer();
|
|
147
|
+
try {
|
|
148
|
+
const aiResponse = await aiProvider.executeCheck(prompt, repositoryPath);
|
|
149
|
+
const executionTime = checkTimer.elapsed();
|
|
150
|
+
logDebug(TAG, `AI response: ${aiResponse.raw.length} chars`);
|
|
151
|
+
const parsed = aiResponse.parsed ?? parseAIResponse(aiResponse.raw);
|
|
152
|
+
if (!parsed) {
|
|
153
|
+
logProgress(TAG, 'Result: ERROR (malformed response)');
|
|
154
|
+
summary = {
|
|
155
|
+
checkId,
|
|
156
|
+
checkName,
|
|
157
|
+
status: 'ERROR',
|
|
158
|
+
issuesFound: 0,
|
|
159
|
+
executionTime,
|
|
160
|
+
error: 'AI provider returned malformed response',
|
|
161
|
+
rawAiResponse: aiResponse.raw,
|
|
162
|
+
tokenUsage: aiResponse.tokenUsage,
|
|
163
|
+
};
|
|
164
|
+
}
|
|
165
|
+
else if (parsed.issues.length > 0) {
|
|
166
|
+
logProgress(TAG, `Result: FAIL (${parsed.issues.length} issues)`);
|
|
167
|
+
issues = await Promise.all(parsed.issues.map((aiIssue) => enrichIssue(aiIssue, checkId, checkName, repositoryPath, checkMetadata)));
|
|
168
|
+
summary = {
|
|
169
|
+
checkId,
|
|
170
|
+
checkName,
|
|
171
|
+
status: 'FAIL',
|
|
172
|
+
issuesFound: issues.length,
|
|
173
|
+
executionTime,
|
|
174
|
+
tokenUsage: aiResponse.tokenUsage,
|
|
175
|
+
};
|
|
176
|
+
}
|
|
177
|
+
else if (parsed.flagged) {
|
|
178
|
+
logProgress(TAG, 'Result: FLAG (AI flagged for review)');
|
|
179
|
+
summary = {
|
|
180
|
+
checkId,
|
|
181
|
+
checkName,
|
|
182
|
+
status: 'FLAG',
|
|
183
|
+
issuesFound: 0,
|
|
184
|
+
executionTime,
|
|
185
|
+
tokenUsage: aiResponse.tokenUsage,
|
|
186
|
+
};
|
|
187
|
+
}
|
|
188
|
+
else {
|
|
189
|
+
logProgress(TAG, 'Result: PASS');
|
|
190
|
+
summary = {
|
|
191
|
+
checkId,
|
|
192
|
+
checkName,
|
|
193
|
+
status: 'PASS',
|
|
194
|
+
issuesFound: 0,
|
|
195
|
+
executionTime,
|
|
196
|
+
tokenUsage: aiResponse.tokenUsage,
|
|
197
|
+
};
|
|
198
|
+
}
|
|
199
|
+
}
|
|
200
|
+
catch (err) {
|
|
201
|
+
// Fatal errors must propagate up to abort the entire scan
|
|
202
|
+
if (err instanceof FatalProviderError) {
|
|
203
|
+
throw err;
|
|
204
|
+
}
|
|
205
|
+
const executionTime = checkTimer.elapsed();
|
|
206
|
+
const errorMsg = err instanceof Error ? err.message : String(err);
|
|
207
|
+
logProgress(TAG, `Result: ERROR (${errorMsg})`);
|
|
208
|
+
summary = {
|
|
209
|
+
checkId,
|
|
210
|
+
checkName,
|
|
211
|
+
status: 'ERROR',
|
|
212
|
+
issuesFound: 0,
|
|
213
|
+
executionTime,
|
|
214
|
+
error: errorMsg,
|
|
215
|
+
};
|
|
216
|
+
}
|
|
217
|
+
return { summary, issues };
|
|
218
|
+
}
|
|
219
|
+
/**
|
|
220
|
+
* Map a SARIF CheckTarget directly to a SecurityIssue (for semgrep-only checks).
|
|
221
|
+
* Extracts code snippet from source file via extractSnippet().
|
|
222
|
+
*/
|
|
223
|
+
async function mapTargetToIssue(target, checkId, checkName, repositoryPath, checkMetadata) {
|
|
224
|
+
const codeSnippet = await extractSnippet(repositoryPath, target.file, target.startLine, target.endLine);
|
|
225
|
+
const issue = {
|
|
226
|
+
checkId,
|
|
227
|
+
checkName,
|
|
228
|
+
file: target.file.replace(/\\/g, '/'),
|
|
229
|
+
startLine: target.startLine,
|
|
230
|
+
endLine: target.endLine,
|
|
231
|
+
description: target.message || 'Semgrep finding',
|
|
232
|
+
};
|
|
233
|
+
if (codeSnippet !== undefined) {
|
|
234
|
+
issue.codeSnippet = codeSnippet;
|
|
235
|
+
}
|
|
236
|
+
if (checkMetadata?.severity !== undefined) {
|
|
237
|
+
issue.severity = checkMetadata.severity;
|
|
238
|
+
}
|
|
239
|
+
if (checkMetadata?.confidence !== undefined) {
|
|
240
|
+
issue.confidence = checkMetadata.confidence;
|
|
241
|
+
}
|
|
242
|
+
return issue;
|
|
243
|
+
}
|
|
244
|
+
/**
|
|
245
|
+
* Execute a semgrep-only check: Semgrep discovers findings, mapped directly
|
|
246
|
+
* to issues with no AI involvement.
|
|
247
|
+
*/
|
|
248
|
+
async function executeSemgrepOnlyCheck(check, checkName, repositoryPath, checkMetadata) {
|
|
249
|
+
const checkId = check.id;
|
|
250
|
+
const checkTarget = check.checkTarget;
|
|
251
|
+
logProgress(TAG, `Running semgrep-only check: ${checkName}`);
|
|
252
|
+
const checkTimer = createTimer();
|
|
253
|
+
try {
|
|
254
|
+
// 1. Run Semgrep to discover findings
|
|
255
|
+
const sarifContent = await runSemgrep({
|
|
256
|
+
repositoryPath,
|
|
257
|
+
rules: checkTarget.rules,
|
|
258
|
+
config: checkTarget.config,
|
|
259
|
+
});
|
|
260
|
+
// 2. Parse, deduplicate, and limit targets
|
|
261
|
+
let targets = parseSARIF(sarifContent);
|
|
262
|
+
targets = deduplicateTargets(targets);
|
|
263
|
+
if (checkTarget.maxTargets !== undefined) {
|
|
264
|
+
targets = limitTargets(targets, checkTarget.maxTargets);
|
|
265
|
+
}
|
|
266
|
+
// 3. If no targets, return PASS
|
|
267
|
+
if (targets.length === 0) {
|
|
268
|
+
logProgress(TAG, 'Result: PASS (no findings)');
|
|
269
|
+
return {
|
|
270
|
+
summary: {
|
|
271
|
+
checkId,
|
|
272
|
+
checkName,
|
|
273
|
+
status: 'PASS',
|
|
274
|
+
issuesFound: 0,
|
|
275
|
+
executionTime: checkTimer.elapsed(),
|
|
276
|
+
targetsAnalyzed: 0,
|
|
277
|
+
},
|
|
278
|
+
issues: [],
|
|
279
|
+
};
|
|
280
|
+
}
|
|
281
|
+
// 4. Map each target directly to a SecurityIssue (no AI)
|
|
282
|
+
const issues = await Promise.all(targets.map((target) => mapTargetToIssue(target, checkId, checkName, repositoryPath, checkMetadata)));
|
|
283
|
+
const executionTime = checkTimer.elapsed();
|
|
284
|
+
logProgress(TAG, `Result: FAIL (${issues.length} findings, ${targets.length} targets)`);
|
|
285
|
+
return {
|
|
286
|
+
summary: {
|
|
287
|
+
checkId,
|
|
288
|
+
checkName,
|
|
289
|
+
status: 'FAIL',
|
|
290
|
+
issuesFound: issues.length,
|
|
291
|
+
executionTime,
|
|
292
|
+
targetsAnalyzed: targets.length,
|
|
293
|
+
},
|
|
294
|
+
issues,
|
|
295
|
+
};
|
|
296
|
+
}
|
|
297
|
+
catch (err) {
|
|
298
|
+
const executionTime = checkTimer.elapsed();
|
|
299
|
+
const errorMsg = err instanceof Error ? err.message : String(err);
|
|
300
|
+
logProgress(TAG, `Result: ERROR (${errorMsg})`);
|
|
301
|
+
return {
|
|
302
|
+
summary: {
|
|
303
|
+
checkId,
|
|
304
|
+
checkName,
|
|
305
|
+
status: 'ERROR',
|
|
306
|
+
issuesFound: 0,
|
|
307
|
+
executionTime,
|
|
308
|
+
error: errorMsg,
|
|
309
|
+
},
|
|
310
|
+
issues: [],
|
|
311
|
+
};
|
|
312
|
+
}
|
|
313
|
+
}
|
|
314
|
+
/**
|
|
315
|
+
* Execute a multi-target check: Semgrep discovers targets, AI analyzes each.
|
|
316
|
+
*/
|
|
317
|
+
async function executeMultiTargetCheck(check, checkName, checkInstructions, repositoryPath, aiProvider, checkMetadata, optionsConcurrency, configDir, genericPrompt) {
|
|
318
|
+
const checkId = check.id;
|
|
319
|
+
const checkTarget = check.checkTarget;
|
|
320
|
+
logProgress(TAG, `Running multi-target check: ${checkName}`);
|
|
321
|
+
const checkTimer = createTimer();
|
|
322
|
+
try {
|
|
323
|
+
// 1. Run Semgrep to discover targets
|
|
324
|
+
const sarifContent = await runSemgrep({
|
|
325
|
+
repositoryPath,
|
|
326
|
+
rules: checkTarget.rules,
|
|
327
|
+
config: checkTarget.config,
|
|
328
|
+
});
|
|
329
|
+
// 2. Parse, deduplicate, and limit targets
|
|
330
|
+
let targets = parseSARIF(sarifContent);
|
|
331
|
+
targets = deduplicateTargets(targets);
|
|
332
|
+
if (checkTarget.maxTargets !== undefined) {
|
|
333
|
+
targets = limitTargets(targets, checkTarget.maxTargets);
|
|
334
|
+
}
|
|
335
|
+
// 3. If no targets, return PASS
|
|
336
|
+
if (targets.length === 0) {
|
|
337
|
+
logProgress(TAG, 'Result: PASS (no targets found)');
|
|
338
|
+
return {
|
|
339
|
+
summary: {
|
|
340
|
+
checkId,
|
|
341
|
+
checkName,
|
|
342
|
+
status: 'PASS',
|
|
343
|
+
issuesFound: 0,
|
|
344
|
+
executionTime: checkTimer.elapsed(),
|
|
345
|
+
targetsAnalyzed: 0,
|
|
346
|
+
},
|
|
347
|
+
issues: [],
|
|
348
|
+
};
|
|
349
|
+
}
|
|
350
|
+
// 4. Resolve effective concurrency: per-check > options > default
|
|
351
|
+
const effectiveConcurrency = checkTarget.concurrency ?? optionsConcurrency ?? DEFAULT_CONCURRENCY;
|
|
352
|
+
logProgress(TAG, `Found ${targets.length} targets to analyze (concurrency: ${effectiveConcurrency})`);
|
|
353
|
+
// 5. Analyze targets concurrently
|
|
354
|
+
const basePrompt = await buildPrompt(checkInstructions, configDir, genericPrompt);
|
|
355
|
+
let completedCount = 0;
|
|
356
|
+
const abortHandle = { aborted: false };
|
|
357
|
+
const targetResults = await mapWithConcurrency(targets, effectiveConcurrency, async (target, idx) => {
|
|
358
|
+
const label = `[target ${idx + 1}/${targets.length}]`;
|
|
359
|
+
try {
|
|
360
|
+
const prompt = basePrompt + `\n\nTARGET LOCATION:
|
|
361
|
+
|
|
362
|
+
You are analyzing a specific code location:
|
|
363
|
+
- File: ${target.file}
|
|
364
|
+
- Lines: ${target.startLine}-${target.endLine}
|
|
365
|
+
|
|
366
|
+
You MUST:
|
|
367
|
+
- Analyze ONLY this specific target location — do not search for or report issues at other locations
|
|
368
|
+
- You may read other files to understand context (e.g., imports, type definitions, data flow), but only report issues for this target
|
|
369
|
+
- If the code at this location is not vulnerable, return {"issues": []}
|
|
370
|
+
- Do NOT scan the broader repository for other instances of this vulnerability pattern
|
|
371
|
+
`;
|
|
372
|
+
logDebug(TAG, `${label} Analyzing target: ${target.file}:${target.startLine}-${target.endLine}`);
|
|
373
|
+
const aiResponse = await aiProvider.executeCheck(prompt, repositoryPath, label);
|
|
374
|
+
const parsed = aiResponse.parsed ?? parseAIResponse(aiResponse.raw);
|
|
375
|
+
if (!parsed) {
|
|
376
|
+
logDebug(TAG, `${label} Target returned malformed response`);
|
|
377
|
+
return { issues: [], error: true, flagged: false, tokenUsage: aiResponse.tokenUsage };
|
|
378
|
+
}
|
|
379
|
+
const issues = await Promise.all(parsed.issues.map((aiIssue) => enrichIssue(aiIssue, checkId, checkName, repositoryPath, checkMetadata)));
|
|
380
|
+
return { issues, error: false, flagged: parsed.flagged === true, tokenUsage: aiResponse.tokenUsage };
|
|
381
|
+
}
|
|
382
|
+
catch (err) {
|
|
383
|
+
// Fatal errors: signal abort and re-throw to stop other workers
|
|
384
|
+
if (err instanceof FatalProviderError) {
|
|
385
|
+
abortHandle.aborted = true;
|
|
386
|
+
abortHandle.reason = err;
|
|
387
|
+
throw err;
|
|
388
|
+
}
|
|
389
|
+
const errorMsg = err instanceof Error ? err.message : String(err);
|
|
390
|
+
logDebug(TAG, `${label} Target error: ${errorMsg}`);
|
|
391
|
+
return { issues: [], error: true, flagged: false, tokenUsage: undefined };
|
|
392
|
+
}
|
|
393
|
+
finally {
|
|
394
|
+
completedCount++;
|
|
395
|
+
logProgress(TAG, `Progress: ${completedCount}/${targets.length} targets analyzed`);
|
|
396
|
+
}
|
|
397
|
+
}, abortHandle);
|
|
398
|
+
// 6. Aggregate results
|
|
399
|
+
const allIssues = [];
|
|
400
|
+
let hasErrors = false;
|
|
401
|
+
let hasFlagged = false;
|
|
402
|
+
const targetTokenUsages = [];
|
|
403
|
+
for (const result of targetResults) {
|
|
404
|
+
allIssues.push(...result.issues);
|
|
405
|
+
if (result.error)
|
|
406
|
+
hasErrors = true;
|
|
407
|
+
if (result.flagged)
|
|
408
|
+
hasFlagged = true;
|
|
409
|
+
targetTokenUsages.push(result.tokenUsage);
|
|
410
|
+
}
|
|
411
|
+
// 7. Determine status: FAIL > FLAG > ERROR > PASS
|
|
412
|
+
const executionTime = checkTimer.elapsed();
|
|
413
|
+
let status;
|
|
414
|
+
if (allIssues.length > 0) {
|
|
415
|
+
status = 'FAIL';
|
|
416
|
+
}
|
|
417
|
+
else if (hasFlagged) {
|
|
418
|
+
status = 'FLAG';
|
|
419
|
+
}
|
|
420
|
+
else if (hasErrors) {
|
|
421
|
+
status = 'ERROR';
|
|
422
|
+
}
|
|
423
|
+
else {
|
|
424
|
+
status = 'PASS';
|
|
425
|
+
}
|
|
426
|
+
logProgress(TAG, `Result: ${status} (${allIssues.length} issues, ${targets.length} targets)`);
|
|
427
|
+
return {
|
|
428
|
+
summary: {
|
|
429
|
+
checkId,
|
|
430
|
+
checkName,
|
|
431
|
+
status,
|
|
432
|
+
issuesFound: allIssues.length,
|
|
433
|
+
executionTime,
|
|
434
|
+
targetsAnalyzed: targets.length,
|
|
435
|
+
tokenUsage: sumTokenUsage(targetTokenUsages),
|
|
436
|
+
},
|
|
437
|
+
issues: allIssues,
|
|
438
|
+
};
|
|
439
|
+
}
|
|
440
|
+
catch (err) {
|
|
441
|
+
// Fatal errors must propagate up to abort the entire scan
|
|
442
|
+
if (err instanceof FatalProviderError) {
|
|
443
|
+
throw err;
|
|
444
|
+
}
|
|
445
|
+
const executionTime = checkTimer.elapsed();
|
|
446
|
+
const errorMsg = err instanceof Error ? err.message : String(err);
|
|
447
|
+
logProgress(TAG, `Result: ERROR (${errorMsg})`);
|
|
448
|
+
return {
|
|
449
|
+
summary: {
|
|
450
|
+
checkId,
|
|
451
|
+
checkName,
|
|
452
|
+
status: 'ERROR',
|
|
453
|
+
issuesFound: 0,
|
|
454
|
+
executionTime,
|
|
455
|
+
error: errorMsg,
|
|
456
|
+
},
|
|
457
|
+
issues: [],
|
|
458
|
+
};
|
|
459
|
+
}
|
|
460
|
+
}
|
|
461
|
+
/**
|
|
462
|
+
* Run multiple security checks and return aggregated ScanResults.
|
|
463
|
+
*/
|
|
464
|
+
export async function runMultiScan(options) {
|
|
465
|
+
const { repositoryPath, checks, aiProvider, aiModelName, aiProviderName, concurrency, configDir, genericPrompt } = options;
|
|
466
|
+
const scanTimer = createTimer();
|
|
467
|
+
const scanId = generateScanId();
|
|
468
|
+
const startTime = new Date();
|
|
469
|
+
const version = await getVersion();
|
|
470
|
+
logProgress(TAG, `Starting multi-check scan ${scanId} (${checks.length} checks)`);
|
|
471
|
+
logDebug(TAG, `Repository: ${repositoryPath}`);
|
|
472
|
+
// Use pre-analyzed repository info if provided, otherwise analyze here
|
|
473
|
+
let repositoryInfo;
|
|
474
|
+
if (options.repositoryInfo) {
|
|
475
|
+
repositoryInfo = options.repositoryInfo;
|
|
476
|
+
}
|
|
477
|
+
else {
|
|
478
|
+
const repoAnalysis = await analyzeRepository(repositoryPath);
|
|
479
|
+
repositoryInfo = repoAnalysis.repository;
|
|
480
|
+
}
|
|
481
|
+
const allCheckSummaries = [];
|
|
482
|
+
const allIssues = [];
|
|
483
|
+
// Execute checks sequentially
|
|
484
|
+
for (let ci = 0; ci < checks.length; ci++) {
|
|
485
|
+
const { check, details } = checks[ci];
|
|
486
|
+
const checkMetadata = {
|
|
487
|
+
severity: check.severity,
|
|
488
|
+
confidence: check.confidence,
|
|
489
|
+
};
|
|
490
|
+
try {
|
|
491
|
+
const { summary: checkSummary, issues } = await executeSingleCheck(check, details.name, details.content, repositoryPath, aiProvider, checkMetadata, concurrency, configDir, genericPrompt);
|
|
492
|
+
allCheckSummaries.push(checkSummary);
|
|
493
|
+
allIssues.push(...issues);
|
|
494
|
+
}
|
|
495
|
+
catch (err) {
|
|
496
|
+
if (err instanceof FatalProviderError) {
|
|
497
|
+
// Record the failing check as ERROR
|
|
498
|
+
logProgress(TAG, `Fatal error during check "${check.id}": ${err.message}`);
|
|
499
|
+
allCheckSummaries.push({
|
|
500
|
+
checkId: check.id,
|
|
501
|
+
checkName: details.name,
|
|
502
|
+
status: 'ERROR',
|
|
503
|
+
issuesFound: 0,
|
|
504
|
+
executionTime: 0,
|
|
505
|
+
error: err.message,
|
|
506
|
+
});
|
|
507
|
+
// Record remaining checks as ERROR (aborted)
|
|
508
|
+
for (let ri = ci + 1; ri < checks.length; ri++) {
|
|
509
|
+
const remaining = checks[ri];
|
|
510
|
+
logProgress(TAG, `Skipping check "${remaining.check.id}" due to fatal error`);
|
|
511
|
+
allCheckSummaries.push({
|
|
512
|
+
checkId: remaining.check.id,
|
|
513
|
+
checkName: remaining.details.name,
|
|
514
|
+
status: 'ERROR',
|
|
515
|
+
issuesFound: 0,
|
|
516
|
+
executionTime: 0,
|
|
517
|
+
error: `Scan aborted: ${err.message}`,
|
|
518
|
+
});
|
|
519
|
+
}
|
|
520
|
+
logProgress(TAG, `Scan aborted due to fatal error: ${err.message}`);
|
|
521
|
+
break;
|
|
522
|
+
}
|
|
523
|
+
// Non-fatal errors should not reach here (executeSingleCheck catches them),
|
|
524
|
+
// but handle gracefully just in case.
|
|
525
|
+
throw err;
|
|
526
|
+
}
|
|
527
|
+
}
|
|
528
|
+
const endTime = new Date();
|
|
529
|
+
const executionTime = endTime.getTime() - startTime.getTime();
|
|
530
|
+
const summary = {
|
|
531
|
+
totalChecks: allCheckSummaries.length,
|
|
532
|
+
passedChecks: allCheckSummaries.filter((c) => c.status === 'PASS').length,
|
|
533
|
+
failedChecks: allCheckSummaries.filter((c) => c.status === 'FAIL').length,
|
|
534
|
+
flaggedChecks: allCheckSummaries.filter((c) => c.status === 'FLAG').length,
|
|
535
|
+
errorChecks: allCheckSummaries.filter((c) => c.status === 'ERROR').length,
|
|
536
|
+
totalIssues: allIssues.length,
|
|
537
|
+
};
|
|
538
|
+
logProgress(TAG, `Scan completed in ${scanTimer.elapsedStr()}`);
|
|
539
|
+
// Aggregate token usage across all checks
|
|
540
|
+
const aggregateTokenUsage = sumTokenUsage(allCheckSummaries.map((c) => c.tokenUsage));
|
|
541
|
+
const results = {
|
|
542
|
+
scanId,
|
|
543
|
+
timestamp: startTime.toISOString(),
|
|
544
|
+
version,
|
|
545
|
+
repository: repositoryInfo,
|
|
546
|
+
issues: allIssues,
|
|
547
|
+
checks: allCheckSummaries,
|
|
548
|
+
summary,
|
|
549
|
+
executionTime,
|
|
550
|
+
startTime: startTime.toISOString(),
|
|
551
|
+
endTime: endTime.toISOString(),
|
|
552
|
+
aiProvider: aiProvider
|
|
553
|
+
? { name: aiProviderName ?? 'claude-code', models: aiModelName ? [aiModelName] : [DEFAULT_AI_MODEL] }
|
|
554
|
+
: { name: 'none', models: [] },
|
|
555
|
+
tokenUsage: aggregateTokenUsage,
|
|
556
|
+
};
|
|
557
|
+
return results;
|
|
558
|
+
}
|
|
559
|
+
//# sourceMappingURL=scan-runner.js.map
|