x-readiness-mcp 0.3.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/bin/cli.js +3 -0
- package/package.json +27 -0
- package/server.js +284 -0
- package/templates/master_template.json +524 -0
- package/templates/master_template.md +374 -0
- package/tools/execution.js +852 -0
- package/tools/planning.js +343 -0
- package/x-readiness-mcp-0.3.0.tgz +0 -0
|
@@ -0,0 +1,852 @@
|
|
|
1
|
+
// tools/execution.js
|
|
2
|
+
// X-API Readiness Execution Tool (STRICT, no silent skips)
|
|
3
|
+
//
|
|
4
|
+
// Guarantees:
|
|
5
|
+
// - Every checklist rule returns a row in rule_evidence[] with a status + reason
|
|
6
|
+
// - No "silent SKIPPED": unsupported categories => NOT_IMPLEMENTED
|
|
7
|
+
// - PASS requires positive evidence (not just "no violations found")
|
|
8
|
+
// - If any rule is NOT_EVALUATED / NOT_IMPLEMENTED => overall status INCOMPLETE + score "N/A" (strict_default)
|
|
9
|
+
// - 100% only possible when ALL rules are evaluated (PASS/FAIL only) and failures=0
|
|
10
|
+
//
|
|
11
|
+
// Optional behavior knobs (recommended defaults below):
|
|
12
|
+
// - strict: true => score becomes N/A if any rule not evaluated / not implemented
|
|
13
|
+
// - treatUnknownAsFail: false => if true, unknown rules count as FAIL (harsh mode)
|
|
14
|
+
|
|
15
|
+
import fs from "node:fs";
|
|
16
|
+
import fsp from "node:fs/promises";
|
|
17
|
+
import path from "node:path";
|
|
18
|
+
import crypto from "node:crypto";
|
|
19
|
+
|
|
20
|
+
export async function executionTool(repoPath, checklist, options = {}) {
|
|
21
|
+
const startTime = new Date();
|
|
22
|
+
|
|
23
|
+
const strict = options.strict ?? true; // default strict: blocks score if anything unknown
|
|
24
|
+
const treatUnknownAsFail = options.treatUnknownAsFail ?? false;
|
|
25
|
+
|
|
26
|
+
if (!repoPath || !fs.existsSync(repoPath)) {
|
|
27
|
+
throw new Error(`Repository path does not exist: ${repoPath}`);
|
|
28
|
+
}
|
|
29
|
+
|
|
30
|
+
const normalizedChecklist = normalizeChecklist(checklist);
|
|
31
|
+
if (!normalizedChecklist.rules?.length) {
|
|
32
|
+
throw new Error("Checklist must contain a non-empty rules[] or features[] array");
|
|
33
|
+
}
|
|
34
|
+
|
|
35
|
+
// Discover + parse files
|
|
36
|
+
const sourceFiles = await discoverSourceFiles(repoPath);
|
|
37
|
+
const parsedFiles = await parseSourceFiles(sourceFiles);
|
|
38
|
+
|
|
39
|
+
// Detect API signals (so we can decide if we can truly evaluate API rules)
|
|
40
|
+
const signals = detectApiSignals(parsedFiles);
|
|
41
|
+
|
|
42
|
+
// Rule execution + accounting
|
|
43
|
+
const violations = [];
|
|
44
|
+
const ruleEvidence = [];
|
|
45
|
+
|
|
46
|
+
let pass = 0;
|
|
47
|
+
let fail = 0;
|
|
48
|
+
let notEvaluated = 0;
|
|
49
|
+
let notImplemented = 0;
|
|
50
|
+
|
|
51
|
+
for (const rule of normalizedChecklist.rules) {
|
|
52
|
+
const r = await executeRuleCheck(parsedFiles, rule, normalizedChecklist.scope, signals);
|
|
53
|
+
|
|
54
|
+
// If treatUnknownAsFail=true, convert NOT_EVALUATED / NOT_IMPLEMENTED into FAIL
|
|
55
|
+
if (treatUnknownAsFail && (r.status === "NOT_EVALUATED" || r.status === "NOT_IMPLEMENTED")) {
|
|
56
|
+
r.status = "FAIL";
|
|
57
|
+
r.reason = `${r.reason} (treated as FAIL due to treatUnknownAsFail=true)`;
|
|
58
|
+
// No concrete file/line exists here, so we attach a synthetic violation
|
|
59
|
+
r.violations = r.violations || [];
|
|
60
|
+
r.violations.push({
|
|
61
|
+
scope: normalizedChecklist.scope,
|
|
62
|
+
category: rule.category,
|
|
63
|
+
ruleId: rule.ruleId,
|
|
64
|
+
ruleName: rule.ruleName,
|
|
65
|
+
referenceUrl: rule.referenceUrl ?? null,
|
|
66
|
+
normative: rule.normative ?? null,
|
|
67
|
+
severity: rule.severity ?? "medium",
|
|
68
|
+
filePath: null,
|
|
69
|
+
lineNumber: null,
|
|
70
|
+
columnNumber: null,
|
|
71
|
+
description: `Rule could not be evaluated: ${r.reason}`,
|
|
72
|
+
currentCode: null,
|
|
73
|
+
suggestedFix: "Implement a checker for this rule/category OR provide API specs/source signals to evaluate it."
|
|
74
|
+
});
|
|
75
|
+
}
|
|
76
|
+
|
|
77
|
+
// Count statuses
|
|
78
|
+
if (r.status === "PASS") pass++;
|
|
79
|
+
else if (r.status === "FAIL") fail++;
|
|
80
|
+
else if (r.status === "NOT_EVALUATED") notEvaluated++;
|
|
81
|
+
else if (r.status === "NOT_IMPLEMENTED") notImplemented++;
|
|
82
|
+
|
|
83
|
+
// Accumulate violations (only real FAIL should add)
|
|
84
|
+
if (r.status === "FAIL" && Array.isArray(r.violations) && r.violations.length) {
|
|
85
|
+
violations.push(...r.violations);
|
|
86
|
+
}
|
|
87
|
+
|
|
88
|
+
// Rule evidence row (always)
|
|
89
|
+
ruleEvidence.push({
|
|
90
|
+
scope: normalizedChecklist.scope,
|
|
91
|
+
category: rule.category,
|
|
92
|
+
rule_id: rule.ruleId,
|
|
93
|
+
rule_name: rule.ruleName,
|
|
94
|
+
normative: rule.normative ?? null,
|
|
95
|
+
severity: rule.severity ?? "medium",
|
|
96
|
+
reference_url: rule.referenceUrl ?? null,
|
|
97
|
+
|
|
98
|
+
status: r.status,
|
|
99
|
+
reason: r.reason,
|
|
100
|
+
|
|
101
|
+
evidence_locations: (r.evidence_locations ?? []).slice(0, 10),
|
|
102
|
+
violations_found: r.violations?.length ?? 0
|
|
103
|
+
});
|
|
104
|
+
}
|
|
105
|
+
|
|
106
|
+
// Readiness scoring rules:
|
|
107
|
+
// - Evaluated rules = PASS + FAIL
|
|
108
|
+
// - If strict and any unknown/not implemented => score N/A
|
|
109
|
+
const evaluated = pass + fail;
|
|
110
|
+
const total = normalizedChecklist.rules.length;
|
|
111
|
+
|
|
112
|
+
let score = null;
|
|
113
|
+
let scoreText = "N/A";
|
|
114
|
+
let status = "INCOMPLETE";
|
|
115
|
+
let message = "Some rules were not evaluated or not implemented. Score is not available in strict mode.";
|
|
116
|
+
|
|
117
|
+
if (!strict && evaluated > 0) {
|
|
118
|
+
score = Math.round((pass / evaluated) * 100);
|
|
119
|
+
scoreText = `${score}%`;
|
|
120
|
+
status = score >= 80 ? "GOOD" : score >= 60 ? "FAIR" : "NEEDS_IMPROVEMENT";
|
|
121
|
+
message = `Your repository is ${score}% X-API ready (based on evaluated rules only).`;
|
|
122
|
+
}
|
|
123
|
+
|
|
124
|
+
// Only allow a real % score in strict mode if ALL rules were evaluated
|
|
125
|
+
if (strict && evaluated === total) {
|
|
126
|
+
score = Math.round((pass / evaluated) * 100);
|
|
127
|
+
scoreText = `${score}%`;
|
|
128
|
+
status = score === 100 ? "GOOD" : score >= 80 ? "GOOD" : score >= 60 ? "FAIR" : "NEEDS_IMPROVEMENT";
|
|
129
|
+
message = `All rules evaluated. Your repository is ${score}% X-API ready.`;
|
|
130
|
+
}
|
|
131
|
+
|
|
132
|
+
// Never allow 100% unless evaluated === total and fail === 0
|
|
133
|
+
if (score === 100 && (evaluated !== total || fail !== 0)) {
|
|
134
|
+
score = null;
|
|
135
|
+
scoreText = "N/A";
|
|
136
|
+
status = "INCOMPLETE";
|
|
137
|
+
message = "100% suppressed because not all rules were fully evaluated.";
|
|
138
|
+
}
|
|
139
|
+
|
|
140
|
+
const executionTimeMs = new Date() - startTime;
|
|
141
|
+
|
|
142
|
+
const violationsFlat = violations.map(v => ({
|
|
143
|
+
scope: v.scope,
|
|
144
|
+
category: v.category,
|
|
145
|
+
rule_id: v.ruleId,
|
|
146
|
+
rule_name: v.ruleName,
|
|
147
|
+
reference_url: v.referenceUrl,
|
|
148
|
+
normative: v.normative,
|
|
149
|
+
severity: v.severity,
|
|
150
|
+
file_path: v.filePath,
|
|
151
|
+
line_number: v.lineNumber,
|
|
152
|
+
column_number: v.columnNumber ?? null,
|
|
153
|
+
violation_description: v.description,
|
|
154
|
+
current_code: v.currentCode,
|
|
155
|
+
suggested_fix: v.suggestedFix
|
|
156
|
+
}));
|
|
157
|
+
|
|
158
|
+
const coverage = total === 0 ? 0 : Math.round((evaluated / total) * 100);
|
|
159
|
+
|
|
160
|
+
const reportPayload = {
|
|
161
|
+
readiness_summary: {
|
|
162
|
+
score: scoreText,
|
|
163
|
+
score_value: score,
|
|
164
|
+
status,
|
|
165
|
+
message,
|
|
166
|
+
|
|
167
|
+
rules_total: total,
|
|
168
|
+
rules_evaluated: evaluated,
|
|
169
|
+
rules_passed: pass,
|
|
170
|
+
rules_failed: fail,
|
|
171
|
+
rules_not_evaluated: notEvaluated,
|
|
172
|
+
rules_not_implemented: notImplemented,
|
|
173
|
+
|
|
174
|
+
evaluation_coverage_percent: coverage,
|
|
175
|
+
|
|
176
|
+
total_violations: violationsFlat.length,
|
|
177
|
+
files_discovered: sourceFiles.length,
|
|
178
|
+
files_parsed: parsedFiles.length,
|
|
179
|
+
|
|
180
|
+
api_signals: signals // super important for debugging why rules aren't evaluatable
|
|
181
|
+
},
|
|
182
|
+
|
|
183
|
+
// Always detailed per-rule status (so no rule is missed)
|
|
184
|
+
rule_evidence: ruleEvidence,
|
|
185
|
+
|
|
186
|
+
// Only populated if failures exist
|
|
187
|
+
violations: violationsFlat,
|
|
188
|
+
|
|
189
|
+
recommended_actions: generateRecommendations(violations),
|
|
190
|
+
|
|
191
|
+
detailed_results: {
|
|
192
|
+
checklist_id: normalizedChecklist.checklist_id || "unknown",
|
|
193
|
+
scope: normalizedChecklist.scope,
|
|
194
|
+
intent: normalizedChecklist.intent || "X-API readiness check",
|
|
195
|
+
checked_at: startTime.toISOString(),
|
|
196
|
+
repository: repoPath,
|
|
197
|
+
execution_time_ms: executionTimeMs,
|
|
198
|
+
options_used: { strict, treatUnknownAsFail }
|
|
199
|
+
}
|
|
200
|
+
};
|
|
201
|
+
|
|
202
|
+
// Persist report files
|
|
203
|
+
const reports = await writeReports(reportPayload);
|
|
204
|
+
reportPayload.reports = reports;
|
|
205
|
+
|
|
206
|
+
return reportPayload;
|
|
207
|
+
}
|
|
208
|
+
|
|
209
|
+
// -----------------------------
|
|
210
|
+
// Checklist normalization
|
|
211
|
+
// -----------------------------
|
|
212
|
+
function normalizeChecklist(checklist) {
|
|
213
|
+
const normalized = {
|
|
214
|
+
checklist_id: checklist?.checklist_id,
|
|
215
|
+
intent: checklist?.intent,
|
|
216
|
+
scope: checklist?.scope || checklist?.readiness_scope || "x_api_readiness",
|
|
217
|
+
rules: []
|
|
218
|
+
};
|
|
219
|
+
|
|
220
|
+
const normalizeCategoryKey = (v) => {
|
|
221
|
+
if (!v) return "UNKNOWN";
|
|
222
|
+
const up = String(v).trim().replace(/[\s\-]+/g, "_").toUpperCase();
|
|
223
|
+
const map = {
|
|
224
|
+
"API_VERSIONING": "VERSIONING",
|
|
225
|
+
"VERSIONING": "VERSIONING",
|
|
226
|
+
"ERROR_HANDLING": "ERROR_HANDLING",
|
|
227
|
+
"MEDIA_TYPES": "MEDIA_TYPES",
|
|
228
|
+
"NAMING_CONVENTIONS": "NAMING_CONVENTIONS",
|
|
229
|
+
"PAGINATION": "PAGINATION",
|
|
230
|
+
"SECURITY": "SECURITY",
|
|
231
|
+
"COMMON_OPERATIONS": "COMMON_OPERATIONS"
|
|
232
|
+
};
|
|
233
|
+
return map[up] || up;
|
|
234
|
+
};
|
|
235
|
+
|
|
236
|
+
const pushRule = (rule, fallbackCategoryKey) => {
|
|
237
|
+
if (!rule || typeof rule !== "object") return;
|
|
238
|
+
|
|
239
|
+
const categoryKey = normalizeCategoryKey(
|
|
240
|
+
rule.categoryKey ||
|
|
241
|
+
rule.category_key ||
|
|
242
|
+
rule.category ||
|
|
243
|
+
fallbackCategoryKey
|
|
244
|
+
);
|
|
245
|
+
|
|
246
|
+
const ruleId = rule.ruleId || rule.rule_id || rule.id;
|
|
247
|
+
const ruleName = rule.ruleName || rule.rule_name || rule.name;
|
|
248
|
+
if (!ruleId || !ruleName) return;
|
|
249
|
+
|
|
250
|
+
normalized.rules.push({
|
|
251
|
+
ruleId,
|
|
252
|
+
ruleName,
|
|
253
|
+
category: categoryKey,
|
|
254
|
+
categoryKey,
|
|
255
|
+
normative: rule.normative || null,
|
|
256
|
+
severity: rule.severity || "medium",
|
|
257
|
+
referenceUrl: rule.referenceUrl || rule.reference_url || rule.reference || rule.url || null
|
|
258
|
+
});
|
|
259
|
+
};
|
|
260
|
+
|
|
261
|
+
if (Array.isArray(checklist?.rules)) {
|
|
262
|
+
checklist.rules.forEach((r) => pushRule(r));
|
|
263
|
+
}
|
|
264
|
+
|
|
265
|
+
if (normalized.rules.length === 0 && Array.isArray(checklist?.features)) {
|
|
266
|
+
for (const feature of checklist.features) {
|
|
267
|
+
const fallbackCategoryKey =
|
|
268
|
+
feature?.category_key ||
|
|
269
|
+
feature?.categoryKey ||
|
|
270
|
+
feature?.category ||
|
|
271
|
+
feature?.feature_id ||
|
|
272
|
+
null;
|
|
273
|
+
|
|
274
|
+
const featureRules = Array.isArray(feature?.rules) ? feature.rules : [];
|
|
275
|
+
featureRules.forEach((r) => pushRule(r, fallbackCategoryKey));
|
|
276
|
+
}
|
|
277
|
+
}
|
|
278
|
+
|
|
279
|
+
return normalized;
|
|
280
|
+
}
|
|
281
|
+
|
|
282
|
+
// -----------------------------
|
|
283
|
+
// File discovery / parsing
|
|
284
|
+
// -----------------------------
|
|
285
|
+
async function discoverSourceFiles(repoPath) {
|
|
286
|
+
const files = [];
|
|
287
|
+
const extensions = [".js", ".mjs", ".cjs", ".ts", ".tsx", ".json", ".yml", ".yaml"];
|
|
288
|
+
|
|
289
|
+
const excludeDirs = ["node_modules", ".git", "dist", "build", "coverage", ".next", "out"];
|
|
290
|
+
const excludeFiles = new Set([
|
|
291
|
+
"package-lock.json",
|
|
292
|
+
"package.json",
|
|
293
|
+
"yarn.lock",
|
|
294
|
+
"pnpm-lock.yaml",
|
|
295
|
+
"npm-shrinkwrap.json"
|
|
296
|
+
]);
|
|
297
|
+
|
|
298
|
+
function scanDirectory(dir, depth = 0) {
|
|
299
|
+
if (depth > 12) return;
|
|
300
|
+
if (!fs.existsSync(dir)) return;
|
|
301
|
+
|
|
302
|
+
const entries = fs.readdirSync(dir, { withFileTypes: true });
|
|
303
|
+
for (const entry of entries) {
|
|
304
|
+
const fullPath = path.join(dir, entry.name);
|
|
305
|
+
|
|
306
|
+
if (entry.isDirectory()) {
|
|
307
|
+
if (excludeDirs.includes(entry.name) || entry.name.startsWith(".")) continue;
|
|
308
|
+
scanDirectory(fullPath, depth + 1);
|
|
309
|
+
} else if (entry.isFile()) {
|
|
310
|
+
if (excludeFiles.has(entry.name)) continue;
|
|
311
|
+
const ext = path.extname(entry.name);
|
|
312
|
+
if (extensions.includes(ext)) {
|
|
313
|
+
files.push({
|
|
314
|
+
absolutePath: fullPath,
|
|
315
|
+
relativePath: path.relative(repoPath, fullPath),
|
|
316
|
+
fileName: entry.name,
|
|
317
|
+
extension: ext,
|
|
318
|
+
type: determineFileType(entry.name, fullPath)
|
|
319
|
+
});
|
|
320
|
+
}
|
|
321
|
+
}
|
|
322
|
+
}
|
|
323
|
+
}
|
|
324
|
+
|
|
325
|
+
scanDirectory(repoPath);
|
|
326
|
+
return files;
|
|
327
|
+
}
|
|
328
|
+
|
|
329
|
+
function determineFileType(fileName, fullPath) {
|
|
330
|
+
const lower = fileName.toLowerCase();
|
|
331
|
+
const rel = fullPath.toLowerCase();
|
|
332
|
+
|
|
333
|
+
if (lower.includes("openapi") || lower.includes("swagger")) return "openapi";
|
|
334
|
+
if (lower.endsWith(".schema.json") || rel.includes("/schema") || rel.includes("\\schema")) return "schema";
|
|
335
|
+
if (lower.includes("route") || lower.includes("router")) return "routes";
|
|
336
|
+
if (lower.includes("controller")) return "controller";
|
|
337
|
+
if (lower.includes("service")) return "service";
|
|
338
|
+
if (lower.includes("config")) return "config";
|
|
339
|
+
return "other";
|
|
340
|
+
}
|
|
341
|
+
|
|
342
|
+
async function parseSourceFiles(sourceFiles) {
|
|
343
|
+
const parsed = [];
|
|
344
|
+
for (const file of sourceFiles) {
|
|
345
|
+
try {
|
|
346
|
+
const content = fs.readFileSync(file.absolutePath, "utf8");
|
|
347
|
+
const lines = content.split("\n");
|
|
348
|
+
parsed.push({
|
|
349
|
+
...file,
|
|
350
|
+
content,
|
|
351
|
+
lines,
|
|
352
|
+
lineCount: lines.length,
|
|
353
|
+
analysis: analyzeFileContent(content, lines)
|
|
354
|
+
});
|
|
355
|
+
} catch {
|
|
356
|
+
// skip unreadable
|
|
357
|
+
}
|
|
358
|
+
}
|
|
359
|
+
return parsed;
|
|
360
|
+
}
|
|
361
|
+
|
|
362
|
+
function analyzeFileContent(content, lines) {
|
|
363
|
+
const analysis = {
|
|
364
|
+
routes: [],
|
|
365
|
+
statusCodes: [],
|
|
366
|
+
paginationPatterns: [],
|
|
367
|
+
versionPatterns: [],
|
|
368
|
+
securityPatterns: []
|
|
369
|
+
};
|
|
370
|
+
|
|
371
|
+
// Express-like routes
|
|
372
|
+
const routePatterns = [
|
|
373
|
+
/router\.(get|post|put|patch|delete)\s*\(\s*['"`]([^'"`]+)['"`]/gi,
|
|
374
|
+
/app\.(get|post|put|patch|delete)\s*\(\s*['"`]([^'"`]+)['"`]/gi
|
|
375
|
+
];
|
|
376
|
+
routePatterns.forEach(pattern => {
|
|
377
|
+
let match;
|
|
378
|
+
while ((match = pattern.exec(content)) !== null) {
|
|
379
|
+
analysis.routes.push({
|
|
380
|
+
method: match[1].toUpperCase(),
|
|
381
|
+
path: match[2],
|
|
382
|
+
lineNumber: content.substring(0, match.index).split("\n").length
|
|
383
|
+
});
|
|
384
|
+
}
|
|
385
|
+
});
|
|
386
|
+
|
|
387
|
+
// Status codes
|
|
388
|
+
lines.forEach((line, index) => {
|
|
389
|
+
const m = line.match(/\b(res|response)\.status\s*\(\s*(\d{3})\s*\)/);
|
|
390
|
+
if (m) analysis.statusCodes.push({ code: parseInt(m[2], 10), lineNumber: index + 1 });
|
|
391
|
+
});
|
|
392
|
+
|
|
393
|
+
// Pagination keywords
|
|
394
|
+
const paginationKeywords = ["limit", "offset", "page", "cursor"];
|
|
395
|
+
lines.forEach((line, index) => {
|
|
396
|
+
for (const keyword of paginationKeywords) {
|
|
397
|
+
if (new RegExp(`\\b${keyword}\\b`, "i").test(line)) {
|
|
398
|
+
analysis.paginationPatterns.push({ keyword, lineNumber: index + 1 });
|
|
399
|
+
}
|
|
400
|
+
}
|
|
401
|
+
});
|
|
402
|
+
|
|
403
|
+
// Versioning
|
|
404
|
+
lines.forEach((line, index) => {
|
|
405
|
+
const m = line.match(/\/v(\d+)\//i);
|
|
406
|
+
if (m) analysis.versionPatterns.push({ version: m[1], lineNumber: index + 1 });
|
|
407
|
+
});
|
|
408
|
+
|
|
409
|
+
// Security (http://)
|
|
410
|
+
lines.forEach((line, index) => {
|
|
411
|
+
if (/http:\/\//i.test(line) && !/localhost|127\.0\.0\.1/i.test(line)) {
|
|
412
|
+
analysis.securityPatterns.push({ keyword: "insecure-http", lineNumber: index + 1, line: line.trim() });
|
|
413
|
+
}
|
|
414
|
+
});
|
|
415
|
+
|
|
416
|
+
return analysis;
|
|
417
|
+
}
|
|
418
|
+
|
|
419
|
+
// -----------------------------
|
|
420
|
+
// API signal detection
|
|
421
|
+
// -----------------------------
|
|
422
|
+
function detectApiSignals(parsedFiles) {
|
|
423
|
+
const routes = parsedFiles.reduce((acc, f) => acc + (f.analysis?.routes?.length ?? 0), 0);
|
|
424
|
+
const statusCalls = parsedFiles.reduce((acc, f) => acc + (f.analysis?.statusCodes?.length ?? 0), 0);
|
|
425
|
+
const hasOpenApi = parsedFiles.some(f => f.type === "openapi");
|
|
426
|
+
const hasControllersOrRoutes = parsedFiles.some(f => f.type === "routes" || f.type === "controller");
|
|
427
|
+
|
|
428
|
+
return {
|
|
429
|
+
routes_found: routes,
|
|
430
|
+
status_calls_found: statusCalls,
|
|
431
|
+
openapi_found: hasOpenApi,
|
|
432
|
+
route_or_controller_files_found: hasControllersOrRoutes,
|
|
433
|
+
api_surface_detected: hasOpenApi || routes > 0 || hasControllersOrRoutes
|
|
434
|
+
};
|
|
435
|
+
}
|
|
436
|
+
|
|
437
|
+
// -----------------------------
|
|
438
|
+
// Rule execution: no silent skips
|
|
439
|
+
// -----------------------------
|
|
440
|
+
async function executeRuleCheck(parsedFiles, rule, scope, signals) {
|
|
441
|
+
// default: NOT_EVALUATED until proven PASS/FAIL or NOT_IMPLEMENTED
|
|
442
|
+
const result = {
|
|
443
|
+
status: "NOT_EVALUATED",
|
|
444
|
+
reason: "No positive evidence found yet.",
|
|
445
|
+
evidence_locations: [],
|
|
446
|
+
violations: []
|
|
447
|
+
};
|
|
448
|
+
|
|
449
|
+
const categoryKey = rule.categoryKey || rule.category;
|
|
450
|
+
|
|
451
|
+
// If we don't even detect API surface, API rules cannot be evaluated meaningfully
|
|
452
|
+
// (except some security checks that can still run)
|
|
453
|
+
const apiRequiredCategories = new Set([
|
|
454
|
+
"PAGINATION",
|
|
455
|
+
"ERROR_HANDLING",
|
|
456
|
+
"VERSIONING",
|
|
457
|
+
"COMMON_OPERATIONS",
|
|
458
|
+
"MEDIA_TYPES",
|
|
459
|
+
"NAMING_CONVENTIONS"
|
|
460
|
+
]);
|
|
461
|
+
|
|
462
|
+
if (apiRequiredCategories.has(categoryKey) && !signals.api_surface_detected) {
|
|
463
|
+
result.status = "NOT_EVALUATED";
|
|
464
|
+
result.reason = "No API surface detected (no routes/controllers/OpenAPI). Cannot evaluate this rule.";
|
|
465
|
+
return result;
|
|
466
|
+
}
|
|
467
|
+
|
|
468
|
+
// Dispatch
|
|
469
|
+
switch (categoryKey) {
|
|
470
|
+
case "PAGINATION":
|
|
471
|
+
return checkPaginationRules(parsedFiles, rule, scope);
|
|
472
|
+
case "ERROR_HANDLING":
|
|
473
|
+
return checkErrorHandlingRules(parsedFiles, rule, scope);
|
|
474
|
+
case "SECURITY":
|
|
475
|
+
return checkSecurityRules(parsedFiles, rule, scope);
|
|
476
|
+
case "VERSIONING":
|
|
477
|
+
return checkVersioningRules(parsedFiles, rule, scope);
|
|
478
|
+
case "COMMON_OPERATIONS":
|
|
479
|
+
return checkCommonOperations(parsedFiles, rule, scope);
|
|
480
|
+
case "NAMING_CONVENTIONS":
|
|
481
|
+
return checkNamingConventionsRules(parsedFiles, rule, scope);
|
|
482
|
+
case "MEDIA_TYPES":
|
|
483
|
+
return checkMediaTypesRules(parsedFiles, rule, scope);
|
|
484
|
+
default:
|
|
485
|
+
return {
|
|
486
|
+
status: "NOT_IMPLEMENTED",
|
|
487
|
+
reason: `No checker implemented for category '${categoryKey}'.`,
|
|
488
|
+
evidence_locations: [],
|
|
489
|
+
violations: []
|
|
490
|
+
};
|
|
491
|
+
}
|
|
492
|
+
}
|
|
493
|
+
|
|
494
|
+
// -----------------------------
|
|
495
|
+
// Checkers must produce one of: PASS/FAIL/NOT_EVALUATED
|
|
496
|
+
// PASS must include positive evidence_locations (>=1)
|
|
497
|
+
// FAIL must include violations (>=1)
|
|
498
|
+
// NOT_EVALUATED must have reason
|
|
499
|
+
// -----------------------------
|
|
500
|
+
function checkPaginationRules(parsedFiles, rule, scope) {
|
|
501
|
+
const relevantFiles = parsedFiles.filter(f => f.type === "routes" || f.type === "controller");
|
|
502
|
+
if (relevantFiles.length === 0) {
|
|
503
|
+
return { status: "NOT_EVALUATED", reason: "No routes/controllers found to evaluate pagination.", evidence_locations: [], violations: [] };
|
|
504
|
+
}
|
|
505
|
+
|
|
506
|
+
const evidence = [];
|
|
507
|
+
const violations = [];
|
|
508
|
+
|
|
509
|
+
for (const file of relevantFiles) {
|
|
510
|
+
for (const hit of (file.analysis.paginationPatterns || []).slice(0, 5)) {
|
|
511
|
+
evidence.push({ file_path: file.relativePath, line_number: hit.lineNumber, snippet: file.lines[hit.lineNumber - 1]?.trim() ?? "" });
|
|
512
|
+
}
|
|
513
|
+
// For MUST pagination rules: if a route exists and there is no pagination keyword anywhere, mark violation (heuristic)
|
|
514
|
+
if ((file.analysis.routes?.length ?? 0) > 0 && (file.analysis.paginationPatterns?.length ?? 0) === 0 && rule.normative === "MUST") {
|
|
515
|
+
const ln = file.analysis.routes[0].lineNumber;
|
|
516
|
+
violations.push({
|
|
517
|
+
scope, category: rule.category, ruleId: rule.ruleId, ruleName: rule.ruleName,
|
|
518
|
+
referenceUrl: rule.referenceUrl ?? null, normative: rule.normative ?? null, severity: rule.severity ?? "high",
|
|
519
|
+
filePath: file.relativePath, lineNumber: ln, columnNumber: null,
|
|
520
|
+
description: "Routes found but no pagination signals (limit/offset/page/cursor) detected.",
|
|
521
|
+
currentCode: file.lines[ln - 1]?.trim() ?? "",
|
|
522
|
+
suggestedFix: "Add pagination support (limit/offset or cursor) for collection endpoints."
|
|
523
|
+
});
|
|
524
|
+
}
|
|
525
|
+
}
|
|
526
|
+
|
|
527
|
+
if (violations.length) return { status: "FAIL", reason: "Pagination violations detected.", evidence_locations: evidence, violations };
|
|
528
|
+
if (evidence.length) return { status: "PASS", reason: "Pagination signals detected in code.", evidence_locations: evidence, violations: [] };
|
|
529
|
+
return { status: "NOT_EVALUATED", reason: "No pagination signals detected; cannot confirm compliance.", evidence_locations: [], violations: [] };
|
|
530
|
+
}
|
|
531
|
+
|
|
532
|
+
function checkErrorHandlingRules(parsedFiles, rule, scope) {
|
|
533
|
+
const relevantFiles = parsedFiles.filter(f => f.type === "routes" || f.type === "controller");
|
|
534
|
+
if (relevantFiles.length === 0) {
|
|
535
|
+
return { status: "NOT_EVALUATED", reason: "No routes/controllers found to evaluate error handling.", evidence_locations: [], violations: [] };
|
|
536
|
+
}
|
|
537
|
+
|
|
538
|
+
const evidence = [];
|
|
539
|
+
const violations = [];
|
|
540
|
+
|
|
541
|
+
for (const file of relevantFiles) {
|
|
542
|
+
file.lines.forEach((line, idx) => {
|
|
543
|
+
const ln = idx + 1;
|
|
544
|
+
|
|
545
|
+
if (/\b(res|response)\.status\s*\(\s*\d{3}\s*\)/.test(line)) {
|
|
546
|
+
if (evidence.length < 10) evidence.push({ file_path: file.relativePath, line_number: ln, snippet: line.trim() });
|
|
547
|
+
}
|
|
548
|
+
|
|
549
|
+
if (/\b(res|response)\.(send|json)\s*\(/.test(line) && !/\b(res|response)\.status\s*\(/.test(line)) {
|
|
550
|
+
violations.push({
|
|
551
|
+
scope, category: rule.category, ruleId: rule.ruleId, ruleName: rule.ruleName,
|
|
552
|
+
referenceUrl: rule.referenceUrl ?? null, normative: rule.normative ?? null, severity: rule.severity ?? "high",
|
|
553
|
+
filePath: file.relativePath, lineNumber: ln, columnNumber: null,
|
|
554
|
+
description: "Response sent without explicit HTTP status code.",
|
|
555
|
+
currentCode: line.trim(),
|
|
556
|
+
suggestedFix: "Use res.status(<code>).json()/send() consistently."
|
|
557
|
+
});
|
|
558
|
+
}
|
|
559
|
+
});
|
|
560
|
+
}
|
|
561
|
+
|
|
562
|
+
if (violations.length) return { status: "FAIL", reason: "Missing explicit status codes detected.", evidence_locations: evidence, violations };
|
|
563
|
+
if (evidence.length) return { status: "PASS", reason: "Explicit status code usage detected.", evidence_locations: evidence, violations: [] };
|
|
564
|
+
return { status: "NOT_EVALUATED", reason: "No status code usage detected; cannot confirm error handling behavior.", evidence_locations: [], violations: [] };
|
|
565
|
+
}
|
|
566
|
+
|
|
567
|
+
function checkSecurityRules(parsedFiles, rule, scope) {
|
|
568
|
+
const evidence = [];
|
|
569
|
+
const violations = [];
|
|
570
|
+
|
|
571
|
+
for (const file of parsedFiles) {
|
|
572
|
+
for (const pattern of (file.analysis.securityPatterns || [])) {
|
|
573
|
+
evidence.push({ file_path: file.relativePath, line_number: pattern.lineNumber, snippet: pattern.line?.trim() ?? "" });
|
|
574
|
+
violations.push({
|
|
575
|
+
scope, category: rule.category, ruleId: rule.ruleId, ruleName: rule.ruleName,
|
|
576
|
+
referenceUrl: rule.referenceUrl ?? null, normative: "MUST", severity: "critical",
|
|
577
|
+
filePath: file.relativePath, lineNumber: pattern.lineNumber, columnNumber: null,
|
|
578
|
+
description: "Insecure http:// usage detected (must use https://).",
|
|
579
|
+
currentCode: pattern.line,
|
|
580
|
+
suggestedFix: "Replace http:// with https:// or ensure TLS termination."
|
|
581
|
+
});
|
|
582
|
+
}
|
|
583
|
+
}
|
|
584
|
+
|
|
585
|
+
if (violations.length) return { status: "FAIL", reason: "Security violations detected (http://).", evidence_locations: evidence, violations };
|
|
586
|
+
// For SECURITY, PASS needs positive evidence; otherwise NOT_EVALUATED (don’t claim secure just because nothing found)
|
|
587
|
+
return { status: "NOT_EVALUATED", reason: "No explicit security evidence detected; security cannot be confirmed by heuristic.", evidence_locations: [], violations: [] };
|
|
588
|
+
}
|
|
589
|
+
|
|
590
|
+
function checkVersioningRules(parsedFiles, rule, scope) {
|
|
591
|
+
const relevantFiles = parsedFiles.filter(f => f.type === "routes" || f.type === "controller");
|
|
592
|
+
if (relevantFiles.length === 0) {
|
|
593
|
+
return { status: "NOT_EVALUATED", reason: "No routes/controllers found to evaluate versioning.", evidence_locations: [], violations: [] };
|
|
594
|
+
}
|
|
595
|
+
|
|
596
|
+
const evidence = [];
|
|
597
|
+
const violations = [];
|
|
598
|
+
|
|
599
|
+
for (const file of relevantFiles) {
|
|
600
|
+
for (const hit of (file.analysis.versionPatterns || []).slice(0, 5)) {
|
|
601
|
+
evidence.push({ file_path: file.relativePath, line_number: hit.lineNumber, snippet: file.lines[hit.lineNumber - 1]?.trim() ?? "" });
|
|
602
|
+
}
|
|
603
|
+
|
|
604
|
+
if ((file.analysis.routes?.length ?? 0) > 0 && (file.analysis.versionPatterns?.length ?? 0) === 0 && rule.normative === "MUST") {
|
|
605
|
+
const ln = file.analysis.routes[0].lineNumber;
|
|
606
|
+
violations.push({
|
|
607
|
+
scope, category: rule.category, ruleId: rule.ruleId, ruleName: rule.ruleName,
|
|
608
|
+
referenceUrl: rule.referenceUrl ?? null, normative: rule.normative ?? null, severity: rule.severity ?? "medium",
|
|
609
|
+
filePath: file.relativePath, lineNumber: ln, columnNumber: null,
|
|
610
|
+
description: "Routes found but no versioning signals (/v1/) detected.",
|
|
611
|
+
currentCode: file.lines[ln - 1]?.trim() ?? "",
|
|
612
|
+
suggestedFix: "Prefix API routes with a version (e.g., /api/v1/...)."
|
|
613
|
+
});
|
|
614
|
+
}
|
|
615
|
+
}
|
|
616
|
+
|
|
617
|
+
if (violations.length) return { status: "FAIL", reason: "Versioning violations detected.", evidence_locations: evidence, violations };
|
|
618
|
+
if (evidence.length) return { status: "PASS", reason: "Versioning signals detected (/vN/).", evidence_locations: evidence, violations: [] };
|
|
619
|
+
return { status: "NOT_EVALUATED", reason: "No versioning signals detected; cannot confirm compliance.", evidence_locations: [], violations: [] };
|
|
620
|
+
}
|
|
621
|
+
|
|
622
|
+
function checkCommonOperations(parsedFiles, rule, scope) {
|
|
623
|
+
const relevantFiles = parsedFiles.filter(f => f.type === "routes" || f.type === "controller");
|
|
624
|
+
if (relevantFiles.length === 0) {
|
|
625
|
+
return { status: "NOT_EVALUATED", reason: "No routes/controllers found to evaluate common operations.", evidence_locations: [], violations: [] };
|
|
626
|
+
}
|
|
627
|
+
|
|
628
|
+
const evidence = [];
|
|
629
|
+
const violations = [];
|
|
630
|
+
|
|
631
|
+
for (const file of relevantFiles) {
|
|
632
|
+
for (const route of (file.analysis.routes || [])) {
|
|
633
|
+
if (evidence.length < 10) {
|
|
634
|
+
evidence.push({ file_path: file.relativePath, line_number: route.lineNumber, snippet: `${route.method} ${route.path}` });
|
|
635
|
+
}
|
|
636
|
+
if (route.method === "GET" && /create|add|insert/i.test(route.path)) {
|
|
637
|
+
violations.push({
|
|
638
|
+
scope, category: rule.category, ruleId: rule.ruleId, ruleName: rule.ruleName,
|
|
639
|
+
referenceUrl: rule.referenceUrl ?? null, normative: rule.normative ?? "SHOULD", severity: rule.severity ?? "medium",
|
|
640
|
+
filePath: file.relativePath, lineNumber: route.lineNumber, columnNumber: null,
|
|
641
|
+
description: "GET used for resource creation-like endpoint.",
|
|
642
|
+
currentCode: `${route.method} ${route.path}`,
|
|
643
|
+
suggestedFix: "Use POST for creating resources."
|
|
644
|
+
});
|
|
645
|
+
}
|
|
646
|
+
}
|
|
647
|
+
}
|
|
648
|
+
|
|
649
|
+
if (violations.length) return { status: "FAIL", reason: "HTTP method misuse detected.", evidence_locations: evidence, violations };
|
|
650
|
+
if (evidence.length) return { status: "PASS", reason: "Route evidence found; no method misuse detected by heuristic.", evidence_locations: evidence, violations: [] };
|
|
651
|
+
return { status: "NOT_EVALUATED", reason: "No route evidence; cannot evaluate operations.", evidence_locations: [], violations: [] };
|
|
652
|
+
}
|
|
653
|
+
|
|
654
|
+
// OpenAPI-focused checks
|
|
655
|
+
function checkNamingConventionsRules(parsedFiles, rule, scope) {
|
|
656
|
+
const apiSpecFiles = parsedFiles.filter(f => f.type === "openapi" || f.type === "schema");
|
|
657
|
+
if (apiSpecFiles.length === 0) {
|
|
658
|
+
return { status: "NOT_EVALUATED", reason: "No OpenAPI/schema files found; naming rules require API spec to evaluate reliably.", evidence_locations: [], violations: [] };
|
|
659
|
+
}
|
|
660
|
+
|
|
661
|
+
const evidence = [];
|
|
662
|
+
const violations = [];
|
|
663
|
+
|
|
664
|
+
for (const file of apiSpecFiles) {
|
|
665
|
+
if (file.extension !== ".json") continue;
|
|
666
|
+
let json;
|
|
667
|
+
try { json = JSON.parse(file.content); } catch { continue; }
|
|
668
|
+
|
|
669
|
+
const keys = extractOpenApiSchemaPropertyKeys(json);
|
|
670
|
+
if (keys.length === 0) continue;
|
|
671
|
+
|
|
672
|
+
for (const key of keys.slice(0, 10)) {
|
|
673
|
+
const ln = findKeyLine(file.lines, key);
|
|
674
|
+
evidence.push({ file_path: file.relativePath, line_number: ln, snippet: file.lines[ln - 1]?.trim() ?? `"${key}": ...` });
|
|
675
|
+
}
|
|
676
|
+
|
|
677
|
+
for (const key of keys) {
|
|
678
|
+
if (!/^[a-z][a-zA-Z0-9]*$/.test(key) && key !== "_id" && !key.startsWith("$")) {
|
|
679
|
+
const ln = findKeyLine(file.lines, key);
|
|
680
|
+
violations.push({
|
|
681
|
+
scope, category: rule.category, ruleId: rule.ruleId, ruleName: rule.ruleName,
|
|
682
|
+
referenceUrl: rule.referenceUrl ?? null, normative: rule.normative ?? null, severity: rule.severity ?? "medium",
|
|
683
|
+
filePath: file.relativePath, lineNumber: ln, columnNumber: null,
|
|
684
|
+
description: `Schema property '${key}' not camelCase.`,
|
|
685
|
+
currentCode: file.lines[ln - 1]?.trim() ?? `"${key}": ...`,
|
|
686
|
+
suggestedFix: "Rename schema properties to camelCase and update clients."
|
|
687
|
+
});
|
|
688
|
+
}
|
|
689
|
+
}
|
|
690
|
+
}
|
|
691
|
+
|
|
692
|
+
if (violations.length) return { status: "FAIL", reason: "Naming violations found in API specs.", evidence_locations: evidence, violations };
|
|
693
|
+
if (evidence.length) return { status: "PASS", reason: "API spec schema properties inspected; no naming violations detected.", evidence_locations: evidence, violations: [] };
|
|
694
|
+
return { status: "NOT_EVALUATED", reason: "OpenAPI/schema present but no schema properties found.", evidence_locations: [], violations: [] };
|
|
695
|
+
}
|
|
696
|
+
|
|
697
|
+
function checkMediaTypesRules(parsedFiles, rule, scope) {
|
|
698
|
+
const openapiFiles = parsedFiles.filter(f => f.type === "openapi" && f.extension === ".json");
|
|
699
|
+
if (openapiFiles.length === 0) {
|
|
700
|
+
return { status: "NOT_EVALUATED", reason: "No OpenAPI JSON found; media type rules require OpenAPI spec to evaluate.", evidence_locations: [], violations: [] };
|
|
701
|
+
}
|
|
702
|
+
|
|
703
|
+
const evidence = [];
|
|
704
|
+
const violations = [];
|
|
705
|
+
|
|
706
|
+
for (const file of openapiFiles) {
|
|
707
|
+
let json;
|
|
708
|
+
try { json = JSON.parse(file.content); } catch { continue; }
|
|
709
|
+
if (!looksLikeOpenApi(json)) continue;
|
|
710
|
+
|
|
711
|
+
const mtypes = findOpenApiMediaTypes(json);
|
|
712
|
+
for (const mt of Array.from(mtypes).slice(0, 10)) {
|
|
713
|
+
evidence.push({ file_path: file.relativePath, line_number: findAnyLineContaining(file.lines, `"${mt}"`) || 1, snippet: mt });
|
|
714
|
+
}
|
|
715
|
+
|
|
716
|
+
if (!mtypes.has("application/json") && rule.normative === "MUST") {
|
|
717
|
+
const ln = findAnyLineContaining(file.lines, '"content"') || 1;
|
|
718
|
+
violations.push({
|
|
719
|
+
scope, category: rule.category, ruleId: rule.ruleId, ruleName: rule.ruleName,
|
|
720
|
+
referenceUrl: rule.referenceUrl ?? null, normative: rule.normative ?? null, severity: rule.severity ?? "high",
|
|
721
|
+
filePath: file.relativePath, lineNumber: ln, columnNumber: null,
|
|
722
|
+
description: "OpenAPI spec missing application/json content declaration.",
|
|
723
|
+
currentCode: file.lines[ln - 1]?.trim() ?? "",
|
|
724
|
+
suggestedFix: 'Add content: { "application/json": { schema: ... } } to requestBody/responses.'
|
|
725
|
+
});
|
|
726
|
+
}
|
|
727
|
+
}
|
|
728
|
+
|
|
729
|
+
if (violations.length) return { status: "FAIL", reason: "Media type violations found in OpenAPI spec.", evidence_locations: evidence, violations };
|
|
730
|
+
if (evidence.length) return { status: "PASS", reason: "OpenAPI media types inspected; application/json present.", evidence_locations: evidence, violations: [] };
|
|
731
|
+
return { status: "NOT_EVALUATED", reason: "OpenAPI present but no media type evidence found.", evidence_locations: [], violations: [] };
|
|
732
|
+
}
|
|
733
|
+
|
|
734
|
+
// -----------------------------
|
|
735
|
+
// OpenAPI helpers
|
|
736
|
+
// -----------------------------
|
|
737
|
+
function looksLikeOpenApi(json) {
|
|
738
|
+
return typeof json?.openapi === "string" || typeof json?.swagger === "string" || (json?.paths && typeof json.paths === "object");
|
|
739
|
+
}
|
|
740
|
+
|
|
741
|
+
function extractOpenApiSchemaPropertyKeys(json) {
|
|
742
|
+
if (!looksLikeOpenApi(json)) return [];
|
|
743
|
+
const schemas = json?.components?.schemas;
|
|
744
|
+
if (!schemas || typeof schemas !== "object") return [];
|
|
745
|
+
const keys = [];
|
|
746
|
+
for (const schemaName of Object.keys(schemas)) {
|
|
747
|
+
const props = schemas[schemaName]?.properties;
|
|
748
|
+
if (props && typeof props === "object") {
|
|
749
|
+
for (const k of Object.keys(props)) keys.push(k);
|
|
750
|
+
}
|
|
751
|
+
}
|
|
752
|
+
return keys;
|
|
753
|
+
}
|
|
754
|
+
|
|
755
|
+
function findOpenApiMediaTypes(json) {
|
|
756
|
+
const set = new Set();
|
|
757
|
+
if (!looksLikeOpenApi(json)) return set;
|
|
758
|
+
|
|
759
|
+
const paths = json.paths || {};
|
|
760
|
+
for (const p of Object.keys(paths)) {
|
|
761
|
+
const item = paths[p] || {};
|
|
762
|
+
for (const method of Object.keys(item)) {
|
|
763
|
+
const op = item[method];
|
|
764
|
+
if (!op || typeof op !== "object") continue;
|
|
765
|
+
|
|
766
|
+
const rb = op.requestBody?.content;
|
|
767
|
+
if (rb && typeof rb === "object") Object.keys(rb).forEach(k => set.add(k));
|
|
768
|
+
|
|
769
|
+
const res = op.responses || {};
|
|
770
|
+
for (const code of Object.keys(res)) {
|
|
771
|
+
const content = res[code]?.content;
|
|
772
|
+
if (content && typeof content === "object") Object.keys(content).forEach(k => set.add(k));
|
|
773
|
+
}
|
|
774
|
+
}
|
|
775
|
+
}
|
|
776
|
+
return set;
|
|
777
|
+
}
|
|
778
|
+
|
|
779
|
+
// -----------------------------
|
|
780
|
+
// Report persistence
|
|
781
|
+
// -----------------------------
|
|
782
|
+
async function writeReports(payload) {
|
|
783
|
+
const reportsDir = path.resolve("mcp", "reports");
|
|
784
|
+
await fsp.mkdir(reportsDir, { recursive: true });
|
|
785
|
+
|
|
786
|
+
const runId = `run_${new Date().toISOString().replace(/[:.]/g, "-")}_${crypto.randomUUID()}`;
|
|
787
|
+
const jsonPath = path.join(reportsDir, `${runId}.json`);
|
|
788
|
+
|
|
789
|
+
await fsp.writeFile(jsonPath, JSON.stringify(payload, null, 2), "utf8");
|
|
790
|
+
|
|
791
|
+
return { run_id: runId, json_path: jsonPath };
|
|
792
|
+
}
|
|
793
|
+
|
|
794
|
+
// -----------------------------
|
|
795
|
+
// Recommendations (same idea as before)
|
|
796
|
+
// -----------------------------
|
|
797
|
+
function generateRecommendations(violations) {
|
|
798
|
+
const actions = [];
|
|
799
|
+
const fixGroups = {};
|
|
800
|
+
|
|
801
|
+
for (const v of violations) {
|
|
802
|
+
if (!v.suggestedFix) continue;
|
|
803
|
+
const key = `${v.category}::${v.suggestedFix}`;
|
|
804
|
+
if (!fixGroups[key]) {
|
|
805
|
+
fixGroups[key] = {
|
|
806
|
+
fix: v.suggestedFix,
|
|
807
|
+
category: v.category,
|
|
808
|
+
severity: v.severity,
|
|
809
|
+
files: [],
|
|
810
|
+
ruleIds: new Set()
|
|
811
|
+
};
|
|
812
|
+
}
|
|
813
|
+
if (v.filePath) fixGroups[key].files.push(v.filePath);
|
|
814
|
+
fixGroups[key].ruleIds.add(v.ruleId);
|
|
815
|
+
}
|
|
816
|
+
|
|
817
|
+
for (const group of Object.values(fixGroups)) {
|
|
818
|
+
const priority = group.severity === "critical" ? 1 : group.severity === "high" ? 2 : 3;
|
|
819
|
+
actions.push({
|
|
820
|
+
priority,
|
|
821
|
+
severity: group.severity,
|
|
822
|
+
category: group.category,
|
|
823
|
+
action: group.fix,
|
|
824
|
+
affected_files: [...new Set(group.files)].length,
|
|
825
|
+
total_occurrences: group.files.length,
|
|
826
|
+
reference_rules: Array.from(group.ruleIds)
|
|
827
|
+
});
|
|
828
|
+
}
|
|
829
|
+
|
|
830
|
+
actions.sort((a, b) => a.priority - b.priority);
|
|
831
|
+
return actions;
|
|
832
|
+
}
|
|
833
|
+
|
|
834
|
+
// -----------------------------
|
|
835
|
+
// Generic helpers
|
|
836
|
+
// -----------------------------
|
|
837
|
+
function findKeyLine(lines, key) {
|
|
838
|
+
for (let i = 0; i < lines.length; i++) {
|
|
839
|
+
if (lines[i].includes(`"${key}"`)) return i + 1;
|
|
840
|
+
}
|
|
841
|
+
return 1;
|
|
842
|
+
}
|
|
843
|
+
|
|
844
|
+
function findAnyLineContaining(lines, needle) {
|
|
845
|
+
const n = String(needle);
|
|
846
|
+
for (let i = 0; i < lines.length; i++) {
|
|
847
|
+
if (lines[i].includes(n)) return i + 1;
|
|
848
|
+
}
|
|
849
|
+
return null;
|
|
850
|
+
}
|
|
851
|
+
|
|
852
|
+
export default executionTool;
|