x-readiness-mcp 0.3.0 ā 0.5.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +325 -0
- package/USAGE_GUIDE.md +271 -0
- package/package.json +2 -2
- package/server.js +103 -213
- package/tools/autofix.js +151 -0
- package/tools/execution.js +193 -814
- package/tools/gitignore-helper.js +88 -0
- package/tools/planning.js +120 -333
- package/tools/rule-checkers.js +394 -0
- package/tools/template-parser.js +204 -0
- package/templates/master_template.json +0 -524
package/tools/execution.js
CHANGED
|
@@ -1,852 +1,231 @@
|
|
|
1
|
-
// tools/execution.js
|
|
2
|
-
|
|
3
|
-
//
|
|
4
|
-
// Guarantees:
|
|
5
|
-
// - Every checklist rule returns a row in rule_evidence[] with a status + reason
|
|
6
|
-
// - No "silent SKIPPED": unsupported categories => NOT_IMPLEMENTED
|
|
7
|
-
// - PASS requires positive evidence (not just "no violations found")
|
|
8
|
-
// - If any rule is NOT_EVALUATED / NOT_IMPLEMENTED => overall status INCOMPLETE + score "N/A" (strict_default)
|
|
9
|
-
// - 100% only possible when ALL rules are evaluated (PASS/FAIL only) and failures=0
|
|
10
|
-
//
|
|
11
|
-
// Optional behavior knobs (recommended defaults below):
|
|
12
|
-
// - strict: true => score becomes N/A if any rule not evaluated / not implemented
|
|
13
|
-
// - treatUnknownAsFail: false => if true, unknown rules count as FAIL (harsh mode)
|
|
14
|
-
|
|
15
|
-
import fs from "node:fs";
|
|
16
|
-
import fsp from "node:fs/promises";
|
|
1
|
+
// mcp/tools/execution.js
|
|
2
|
+
import fs from "node:fs/promises";
|
|
17
3
|
import path from "node:path";
|
|
18
4
|
import crypto from "node:crypto";
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
const
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
reason: r.reason,
|
|
100
|
-
|
|
101
|
-
evidence_locations: (r.evidence_locations ?? []).slice(0, 10),
|
|
102
|
-
violations_found: r.violations?.length ?? 0
|
|
103
|
-
});
|
|
104
|
-
}
|
|
105
|
-
|
|
106
|
-
// Readiness scoring rules:
|
|
107
|
-
// - Evaluated rules = PASS + FAIL
|
|
108
|
-
// - If strict and any unknown/not implemented => score N/A
|
|
109
|
-
const evaluated = pass + fail;
|
|
110
|
-
const total = normalizedChecklist.rules.length;
|
|
111
|
-
|
|
112
|
-
let score = null;
|
|
113
|
-
let scoreText = "N/A";
|
|
114
|
-
let status = "INCOMPLETE";
|
|
115
|
-
let message = "Some rules were not evaluated or not implemented. Score is not available in strict mode.";
|
|
116
|
-
|
|
117
|
-
if (!strict && evaluated > 0) {
|
|
118
|
-
score = Math.round((pass / evaluated) * 100);
|
|
119
|
-
scoreText = `${score}%`;
|
|
120
|
-
status = score >= 80 ? "GOOD" : score >= 60 ? "FAIR" : "NEEDS_IMPROVEMENT";
|
|
121
|
-
message = `Your repository is ${score}% X-API ready (based on evaluated rules only).`;
|
|
122
|
-
}
|
|
123
|
-
|
|
124
|
-
// Only allow a real % score in strict mode if ALL rules were evaluated
|
|
125
|
-
if (strict && evaluated === total) {
|
|
126
|
-
score = Math.round((pass / evaluated) * 100);
|
|
127
|
-
scoreText = `${score}%`;
|
|
128
|
-
status = score === 100 ? "GOOD" : score >= 80 ? "GOOD" : score >= 60 ? "FAIR" : "NEEDS_IMPROVEMENT";
|
|
129
|
-
message = `All rules evaluated. Your repository is ${score}% X-API ready.`;
|
|
130
|
-
}
|
|
131
|
-
|
|
132
|
-
// Never allow 100% unless evaluated === total and fail === 0
|
|
133
|
-
if (score === 100 && (evaluated !== total || fail !== 0)) {
|
|
134
|
-
score = null;
|
|
135
|
-
scoreText = "N/A";
|
|
136
|
-
status = "INCOMPLETE";
|
|
137
|
-
message = "100% suppressed because not all rules were fully evaluated.";
|
|
138
|
-
}
|
|
139
|
-
|
|
140
|
-
const executionTimeMs = new Date() - startTime;
|
|
141
|
-
|
|
142
|
-
const violationsFlat = violations.map(v => ({
|
|
143
|
-
scope: v.scope,
|
|
144
|
-
category: v.category,
|
|
145
|
-
rule_id: v.ruleId,
|
|
146
|
-
rule_name: v.ruleName,
|
|
147
|
-
reference_url: v.referenceUrl,
|
|
148
|
-
normative: v.normative,
|
|
149
|
-
severity: v.severity,
|
|
150
|
-
file_path: v.filePath,
|
|
151
|
-
line_number: v.lineNumber,
|
|
152
|
-
column_number: v.columnNumber ?? null,
|
|
153
|
-
violation_description: v.description,
|
|
154
|
-
current_code: v.currentCode,
|
|
155
|
-
suggested_fix: v.suggestedFix
|
|
156
|
-
}));
|
|
157
|
-
|
|
158
|
-
const coverage = total === 0 ? 0 : Math.round((evaluated / total) * 100);
|
|
159
|
-
|
|
160
|
-
const reportPayload = {
|
|
161
|
-
readiness_summary: {
|
|
162
|
-
score: scoreText,
|
|
163
|
-
score_value: score,
|
|
164
|
-
status,
|
|
165
|
-
message,
|
|
166
|
-
|
|
167
|
-
rules_total: total,
|
|
168
|
-
rules_evaluated: evaluated,
|
|
169
|
-
rules_passed: pass,
|
|
170
|
-
rules_failed: fail,
|
|
171
|
-
rules_not_evaluated: notEvaluated,
|
|
172
|
-
rules_not_implemented: notImplemented,
|
|
173
|
-
|
|
174
|
-
evaluation_coverage_percent: coverage,
|
|
175
|
-
|
|
176
|
-
total_violations: violationsFlat.length,
|
|
177
|
-
files_discovered: sourceFiles.length,
|
|
178
|
-
files_parsed: parsedFiles.length,
|
|
179
|
-
|
|
180
|
-
api_signals: signals // super important for debugging why rules aren't evaluatable
|
|
181
|
-
},
|
|
182
|
-
|
|
183
|
-
// Always detailed per-rule status (so no rule is missed)
|
|
184
|
-
rule_evidence: ruleEvidence,
|
|
185
|
-
|
|
186
|
-
// Only populated if failures exist
|
|
187
|
-
violations: violationsFlat,
|
|
188
|
-
|
|
189
|
-
recommended_actions: generateRecommendations(violations),
|
|
190
|
-
|
|
191
|
-
detailed_results: {
|
|
192
|
-
checklist_id: normalizedChecklist.checklist_id || "unknown",
|
|
193
|
-
scope: normalizedChecklist.scope,
|
|
194
|
-
intent: normalizedChecklist.intent || "X-API readiness check",
|
|
195
|
-
checked_at: startTime.toISOString(),
|
|
196
|
-
repository: repoPath,
|
|
197
|
-
execution_time_ms: executionTimeMs,
|
|
198
|
-
options_used: { strict, treatUnknownAsFail }
|
|
199
|
-
}
|
|
200
|
-
};
|
|
201
|
-
|
|
202
|
-
// Persist report files
|
|
203
|
-
const reports = await writeReports(reportPayload);
|
|
204
|
-
reportPayload.reports = reports;
|
|
205
|
-
|
|
206
|
-
return reportPayload;
|
|
207
|
-
}
|
|
208
|
-
|
|
209
|
-
// -----------------------------
|
|
210
|
-
// Checklist normalization
|
|
211
|
-
// -----------------------------
|
|
212
|
-
function normalizeChecklist(checklist) {
|
|
213
|
-
const normalized = {
|
|
214
|
-
checklist_id: checklist?.checklist_id,
|
|
215
|
-
intent: checklist?.intent,
|
|
216
|
-
scope: checklist?.scope || checklist?.readiness_scope || "x_api_readiness",
|
|
217
|
-
rules: []
|
|
218
|
-
};
|
|
219
|
-
|
|
220
|
-
const normalizeCategoryKey = (v) => {
|
|
221
|
-
if (!v) return "UNKNOWN";
|
|
222
|
-
const up = String(v).trim().replace(/[\s\-]+/g, "_").toUpperCase();
|
|
223
|
-
const map = {
|
|
224
|
-
"API_VERSIONING": "VERSIONING",
|
|
225
|
-
"VERSIONING": "VERSIONING",
|
|
226
|
-
"ERROR_HANDLING": "ERROR_HANDLING",
|
|
227
|
-
"MEDIA_TYPES": "MEDIA_TYPES",
|
|
228
|
-
"NAMING_CONVENTIONS": "NAMING_CONVENTIONS",
|
|
229
|
-
"PAGINATION": "PAGINATION",
|
|
230
|
-
"SECURITY": "SECURITY",
|
|
231
|
-
"COMMON_OPERATIONS": "COMMON_OPERATIONS"
|
|
232
|
-
};
|
|
233
|
-
return map[up] || up;
|
|
234
|
-
};
|
|
235
|
-
|
|
236
|
-
const pushRule = (rule, fallbackCategoryKey) => {
|
|
237
|
-
if (!rule || typeof rule !== "object") return;
|
|
238
|
-
|
|
239
|
-
const categoryKey = normalizeCategoryKey(
|
|
240
|
-
rule.categoryKey ||
|
|
241
|
-
rule.category_key ||
|
|
242
|
-
rule.category ||
|
|
243
|
-
fallbackCategoryKey
|
|
244
|
-
);
|
|
245
|
-
|
|
246
|
-
const ruleId = rule.ruleId || rule.rule_id || rule.id;
|
|
247
|
-
const ruleName = rule.ruleName || rule.rule_name || rule.name;
|
|
248
|
-
if (!ruleId || !ruleName) return;
|
|
249
|
-
|
|
250
|
-
normalized.rules.push({
|
|
251
|
-
ruleId,
|
|
252
|
-
ruleName,
|
|
253
|
-
category: categoryKey,
|
|
254
|
-
categoryKey,
|
|
255
|
-
normative: rule.normative || null,
|
|
256
|
-
severity: rule.severity || "medium",
|
|
257
|
-
referenceUrl: rule.referenceUrl || rule.reference_url || rule.reference || rule.url || null
|
|
258
|
-
});
|
|
259
|
-
};
|
|
260
|
-
|
|
261
|
-
if (Array.isArray(checklist?.rules)) {
|
|
262
|
-
checklist.rules.forEach((r) => pushRule(r));
|
|
263
|
-
}
|
|
264
|
-
|
|
265
|
-
if (normalized.rules.length === 0 && Array.isArray(checklist?.features)) {
|
|
266
|
-
for (const feature of checklist.features) {
|
|
267
|
-
const fallbackCategoryKey =
|
|
268
|
-
feature?.category_key ||
|
|
269
|
-
feature?.categoryKey ||
|
|
270
|
-
feature?.category ||
|
|
271
|
-
feature?.feature_id ||
|
|
272
|
-
null;
|
|
273
|
-
|
|
274
|
-
const featureRules = Array.isArray(feature?.rules) ? feature.rules : [];
|
|
275
|
-
featureRules.forEach((r) => pushRule(r, fallbackCategoryKey));
|
|
276
|
-
}
|
|
277
|
-
}
|
|
278
|
-
|
|
279
|
-
return normalized;
|
|
280
|
-
}
|
|
281
|
-
|
|
282
|
-
// -----------------------------
|
|
283
|
-
// File discovery / parsing
|
|
284
|
-
// -----------------------------
|
|
285
|
-
async function discoverSourceFiles(repoPath) {
|
|
286
|
-
const files = [];
|
|
287
|
-
const extensions = [".js", ".mjs", ".cjs", ".ts", ".tsx", ".json", ".yml", ".yaml"];
|
|
288
|
-
|
|
289
|
-
const excludeDirs = ["node_modules", ".git", "dist", "build", "coverage", ".next", "out"];
|
|
290
|
-
const excludeFiles = new Set([
|
|
291
|
-
"package-lock.json",
|
|
292
|
-
"package.json",
|
|
293
|
-
"yarn.lock",
|
|
294
|
-
"pnpm-lock.yaml",
|
|
295
|
-
"npm-shrinkwrap.json"
|
|
296
|
-
]);
|
|
297
|
-
|
|
298
|
-
function scanDirectory(dir, depth = 0) {
|
|
299
|
-
if (depth > 12) return;
|
|
300
|
-
if (!fs.existsSync(dir)) return;
|
|
301
|
-
|
|
302
|
-
const entries = fs.readdirSync(dir, { withFileTypes: true });
|
|
303
|
-
for (const entry of entries) {
|
|
304
|
-
const fullPath = path.join(dir, entry.name);
|
|
305
|
-
|
|
306
|
-
if (entry.isDirectory()) {
|
|
307
|
-
if (excludeDirs.includes(entry.name) || entry.name.startsWith(".")) continue;
|
|
308
|
-
scanDirectory(fullPath, depth + 1);
|
|
309
|
-
} else if (entry.isFile()) {
|
|
310
|
-
if (excludeFiles.has(entry.name)) continue;
|
|
311
|
-
const ext = path.extname(entry.name);
|
|
312
|
-
if (extensions.includes(ext)) {
|
|
313
|
-
files.push({
|
|
314
|
-
absolutePath: fullPath,
|
|
315
|
-
relativePath: path.relative(repoPath, fullPath),
|
|
316
|
-
fileName: entry.name,
|
|
317
|
-
extension: ext,
|
|
318
|
-
type: determineFileType(entry.name, fullPath)
|
|
319
|
-
});
|
|
5
|
+
import { evaluateRule } from "./rule-checkers.js";
|
|
6
|
+
|
|
7
|
+
function reportsDir(repoPath) {
|
|
8
|
+
return path.resolve(repoPath, ".xreadiness", "reports");
|
|
9
|
+
}
|
|
10
|
+
|
|
11
|
+
async function ensureDir(p) {
|
|
12
|
+
await fs.mkdir(p, { recursive: true });
|
|
13
|
+
}
|
|
14
|
+
|
|
15
|
+
async function loadPlan(planPath) {
|
|
16
|
+
const raw = await fs.readFile(planPath, "utf8");
|
|
17
|
+
return JSON.parse(raw);
|
|
18
|
+
}
|
|
19
|
+
|
|
20
|
+
/**
|
|
21
|
+
* Build detailed markdown report with violation table
|
|
22
|
+
*/
|
|
23
|
+
function buildMarkdownReport({ runId, repoPath, plan, results }) {
|
|
24
|
+
const { readiness_summary, violations } = results;
|
|
25
|
+
|
|
26
|
+
let md = `# API X-Readiness Execution Report\n\n`;
|
|
27
|
+
md += `**Generated:** ${new Date().toISOString()}\n\n`;
|
|
28
|
+
md += `- **Run ID:** ${runId}\n`;
|
|
29
|
+
md += `- **Repository Path:** ${repoPath}\n`;
|
|
30
|
+
md += `- **Plan ID:** ${plan.plan_id}\n`;
|
|
31
|
+
md += `- **Scope:** ${plan.scope}\n`;
|
|
32
|
+
md += `- **Scope Description:** ${plan.scope_description}\n\n`;
|
|
33
|
+
|
|
34
|
+
md += `---\n\n`;
|
|
35
|
+
md += `## Summary\n\n`;
|
|
36
|
+
md += `| Metric | Count |\n`;
|
|
37
|
+
md += `|--------|-------|\n`;
|
|
38
|
+
md += `| Rules Checked | ${readiness_summary.rules_checked} |\n`;
|
|
39
|
+
md += `| ā
Passed | ${readiness_summary.passed} |\n`;
|
|
40
|
+
md += `| ā Failed | ${readiness_summary.failed} |\n`;
|
|
41
|
+
md += `| āļø Skipped | ${readiness_summary.skipped} |\n`;
|
|
42
|
+
md += `| ā ļø Errors | ${readiness_summary.errors} |\n\n`;
|
|
43
|
+
|
|
44
|
+
const passRate = readiness_summary.rules_checked > 0
|
|
45
|
+
? ((readiness_summary.passed / readiness_summary.rules_checked) * 100).toFixed(1)
|
|
46
|
+
: 0;
|
|
47
|
+
md += `**Pass Rate:** ${passRate}%\n\n`;
|
|
48
|
+
|
|
49
|
+
md += `---\n\n`;
|
|
50
|
+
md += `## Rule Violations\n\n`;
|
|
51
|
+
|
|
52
|
+
if (!violations.length) {
|
|
53
|
+
md += `ā
**No violations found!** Your API conforms to all checked X-Readiness rules.\n\n`;
|
|
54
|
+
} else {
|
|
55
|
+
md += `Found **${violations.length} violations** across the codebase:\n\n`;
|
|
56
|
+
md += `| Rule ID | Category | Severity | File | Line | Violation Message | Guideline |\n`;
|
|
57
|
+
md += `|---------|----------|----------|------|-----:|-------------------|------------|\n`;
|
|
58
|
+
|
|
59
|
+
for (const v of violations) {
|
|
60
|
+
const fileLink = v.file_path;
|
|
61
|
+
const line = v.line_number || "-";
|
|
62
|
+
const message = v.message.replace(/\|/g, "\\|").replace(/\n/g, " ");
|
|
63
|
+
const guidelineLink = v.guideline_url
|
|
64
|
+
? `[š Ref](${v.guideline_url})`
|
|
65
|
+
: "-";
|
|
66
|
+
|
|
67
|
+
md += `| \`${v.rule_id}\` | ${v.category} | ${v.severity} | \`${fileLink}\` | ${line} | ${message} | ${guidelineLink} |\n`;
|
|
68
|
+
}
|
|
69
|
+
md += `\n`;
|
|
70
|
+
|
|
71
|
+
// Group violations by file
|
|
72
|
+
md += `### Violations by File\n\n`;
|
|
73
|
+
const byFile = {};
|
|
74
|
+
for (const v of violations) {
|
|
75
|
+
if (!byFile[v.file_path]) byFile[v.file_path] = [];
|
|
76
|
+
byFile[v.file_path].push(v);
|
|
77
|
+
}
|
|
78
|
+
|
|
79
|
+
for (const [file, viols] of Object.entries(byFile)) {
|
|
80
|
+
md += `#### \`${file}\` (${viols.length} violations)\n\n`;
|
|
81
|
+
for (const v of viols) {
|
|
82
|
+
md += `- **Line ${v.line_number || "?"}** ā Rule \`${v.rule_id}\`: ${v.message}\n`;
|
|
83
|
+
if (v.line_content) {
|
|
84
|
+
md += ` \`\`\`\n ${v.line_content}\n \`\`\`\n`;
|
|
320
85
|
}
|
|
321
86
|
}
|
|
87
|
+
md += `\n`;
|
|
322
88
|
}
|
|
323
89
|
}
|
|
324
90
|
|
|
325
|
-
|
|
326
|
-
|
|
327
|
-
|
|
328
|
-
|
|
329
|
-
|
|
330
|
-
|
|
331
|
-
|
|
332
|
-
|
|
333
|
-
|
|
334
|
-
|
|
335
|
-
if (lower.includes("route") || lower.includes("router")) return "routes";
|
|
336
|
-
if (lower.includes("controller")) return "controller";
|
|
337
|
-
if (lower.includes("service")) return "service";
|
|
338
|
-
if (lower.includes("config")) return "config";
|
|
339
|
-
return "other";
|
|
340
|
-
}
|
|
341
|
-
|
|
342
|
-
async function parseSourceFiles(sourceFiles) {
|
|
343
|
-
const parsed = [];
|
|
344
|
-
for (const file of sourceFiles) {
|
|
345
|
-
try {
|
|
346
|
-
const content = fs.readFileSync(file.absolutePath, "utf8");
|
|
347
|
-
const lines = content.split("\n");
|
|
348
|
-
parsed.push({
|
|
349
|
-
...file,
|
|
350
|
-
content,
|
|
351
|
-
lines,
|
|
352
|
-
lineCount: lines.length,
|
|
353
|
-
analysis: analyzeFileContent(content, lines)
|
|
354
|
-
});
|
|
355
|
-
} catch {
|
|
356
|
-
// skip unreadable
|
|
357
|
-
}
|
|
358
|
-
}
|
|
359
|
-
return parsed;
|
|
360
|
-
}
|
|
361
|
-
|
|
362
|
-
function analyzeFileContent(content, lines) {
|
|
363
|
-
const analysis = {
|
|
364
|
-
routes: [],
|
|
365
|
-
statusCodes: [],
|
|
366
|
-
paginationPatterns: [],
|
|
367
|
-
versionPatterns: [],
|
|
368
|
-
securityPatterns: []
|
|
369
|
-
};
|
|
370
|
-
|
|
371
|
-
// Express-like routes
|
|
372
|
-
const routePatterns = [
|
|
373
|
-
/router\.(get|post|put|patch|delete)\s*\(\s*['"`]([^'"`]+)['"`]/gi,
|
|
374
|
-
/app\.(get|post|put|patch|delete)\s*\(\s*['"`]([^'"`]+)['"`]/gi
|
|
375
|
-
];
|
|
376
|
-
routePatterns.forEach(pattern => {
|
|
377
|
-
let match;
|
|
378
|
-
while ((match = pattern.exec(content)) !== null) {
|
|
379
|
-
analysis.routes.push({
|
|
380
|
-
method: match[1].toUpperCase(),
|
|
381
|
-
path: match[2],
|
|
382
|
-
lineNumber: content.substring(0, match.index).split("\n").length
|
|
383
|
-
});
|
|
384
|
-
}
|
|
385
|
-
});
|
|
386
|
-
|
|
387
|
-
// Status codes
|
|
388
|
-
lines.forEach((line, index) => {
|
|
389
|
-
const m = line.match(/\b(res|response)\.status\s*\(\s*(\d{3})\s*\)/);
|
|
390
|
-
if (m) analysis.statusCodes.push({ code: parseInt(m[2], 10), lineNumber: index + 1 });
|
|
391
|
-
});
|
|
392
|
-
|
|
393
|
-
// Pagination keywords
|
|
394
|
-
const paginationKeywords = ["limit", "offset", "page", "cursor"];
|
|
395
|
-
lines.forEach((line, index) => {
|
|
396
|
-
for (const keyword of paginationKeywords) {
|
|
397
|
-
if (new RegExp(`\\b${keyword}\\b`, "i").test(line)) {
|
|
398
|
-
analysis.paginationPatterns.push({ keyword, lineNumber: index + 1 });
|
|
399
|
-
}
|
|
400
|
-
}
|
|
401
|
-
});
|
|
402
|
-
|
|
403
|
-
// Versioning
|
|
404
|
-
lines.forEach((line, index) => {
|
|
405
|
-
const m = line.match(/\/v(\d+)\//i);
|
|
406
|
-
if (m) analysis.versionPatterns.push({ version: m[1], lineNumber: index + 1 });
|
|
407
|
-
});
|
|
408
|
-
|
|
409
|
-
// Security (http://)
|
|
410
|
-
lines.forEach((line, index) => {
|
|
411
|
-
if (/http:\/\//i.test(line) && !/localhost|127\.0\.0\.1/i.test(line)) {
|
|
412
|
-
analysis.securityPatterns.push({ keyword: "insecure-http", lineNumber: index + 1, line: line.trim() });
|
|
413
|
-
}
|
|
414
|
-
});
|
|
415
|
-
|
|
416
|
-
return analysis;
|
|
417
|
-
}
|
|
418
|
-
|
|
419
|
-
// -----------------------------
|
|
420
|
-
// API signal detection
|
|
421
|
-
// -----------------------------
|
|
422
|
-
function detectApiSignals(parsedFiles) {
|
|
423
|
-
const routes = parsedFiles.reduce((acc, f) => acc + (f.analysis?.routes?.length ?? 0), 0);
|
|
424
|
-
const statusCalls = parsedFiles.reduce((acc, f) => acc + (f.analysis?.statusCodes?.length ?? 0), 0);
|
|
425
|
-
const hasOpenApi = parsedFiles.some(f => f.type === "openapi");
|
|
426
|
-
const hasControllersOrRoutes = parsedFiles.some(f => f.type === "routes" || f.type === "controller");
|
|
427
|
-
|
|
428
|
-
return {
|
|
429
|
-
routes_found: routes,
|
|
430
|
-
status_calls_found: statusCalls,
|
|
431
|
-
openapi_found: hasOpenApi,
|
|
432
|
-
route_or_controller_files_found: hasControllersOrRoutes,
|
|
433
|
-
api_surface_detected: hasOpenApi || routes > 0 || hasControllersOrRoutes
|
|
434
|
-
};
|
|
435
|
-
}
|
|
436
|
-
|
|
437
|
-
// -----------------------------
|
|
438
|
-
// Rule execution: no silent skips
|
|
439
|
-
// -----------------------------
|
|
440
|
-
async function executeRuleCheck(parsedFiles, rule, scope, signals) {
|
|
441
|
-
// default: NOT_EVALUATED until proven PASS/FAIL or NOT_IMPLEMENTED
|
|
442
|
-
const result = {
|
|
443
|
-
status: "NOT_EVALUATED",
|
|
444
|
-
reason: "No positive evidence found yet.",
|
|
445
|
-
evidence_locations: [],
|
|
446
|
-
violations: []
|
|
447
|
-
};
|
|
448
|
-
|
|
449
|
-
const categoryKey = rule.categoryKey || rule.category;
|
|
450
|
-
|
|
451
|
-
// If we don't even detect API surface, API rules cannot be evaluated meaningfully
|
|
452
|
-
// (except some security checks that can still run)
|
|
453
|
-
const apiRequiredCategories = new Set([
|
|
454
|
-
"PAGINATION",
|
|
455
|
-
"ERROR_HANDLING",
|
|
456
|
-
"VERSIONING",
|
|
457
|
-
"COMMON_OPERATIONS",
|
|
458
|
-
"MEDIA_TYPES",
|
|
459
|
-
"NAMING_CONVENTIONS"
|
|
460
|
-
]);
|
|
461
|
-
|
|
462
|
-
if (apiRequiredCategories.has(categoryKey) && !signals.api_surface_detected) {
|
|
463
|
-
result.status = "NOT_EVALUATED";
|
|
464
|
-
result.reason = "No API surface detected (no routes/controllers/OpenAPI). Cannot evaluate this rule.";
|
|
465
|
-
return result;
|
|
91
|
+
md += `---\n\n`;
|
|
92
|
+
md += `## Next Steps\n\n`;
|
|
93
|
+
if (violations.length > 0) {
|
|
94
|
+
md += `1. **Review the violations** listed above\n`;
|
|
95
|
+
md += `2. **Fix the issues** in your source code\n`;
|
|
96
|
+
md += `3. **Re-run the execution** to verify fixes\n`;
|
|
97
|
+
md += `4. **Or use auto-fix tool** (if available) by calling \`auto_fix\` with this run ID\n`;
|
|
98
|
+
md += ` - ā ļø Auto-fix requires your approval before applying changes\n\n`;
|
|
99
|
+
} else {
|
|
100
|
+
md += `š **Congratulations!** Your API is X-Readiness compliant for the checked scope.\n\n`;
|
|
466
101
|
}
|
|
467
102
|
|
|
468
|
-
|
|
469
|
-
switch (categoryKey) {
|
|
470
|
-
case "PAGINATION":
|
|
471
|
-
return checkPaginationRules(parsedFiles, rule, scope);
|
|
472
|
-
case "ERROR_HANDLING":
|
|
473
|
-
return checkErrorHandlingRules(parsedFiles, rule, scope);
|
|
474
|
-
case "SECURITY":
|
|
475
|
-
return checkSecurityRules(parsedFiles, rule, scope);
|
|
476
|
-
case "VERSIONING":
|
|
477
|
-
return checkVersioningRules(parsedFiles, rule, scope);
|
|
478
|
-
case "COMMON_OPERATIONS":
|
|
479
|
-
return checkCommonOperations(parsedFiles, rule, scope);
|
|
480
|
-
case "NAMING_CONVENTIONS":
|
|
481
|
-
return checkNamingConventionsRules(parsedFiles, rule, scope);
|
|
482
|
-
case "MEDIA_TYPES":
|
|
483
|
-
return checkMediaTypesRules(parsedFiles, rule, scope);
|
|
484
|
-
default:
|
|
485
|
-
return {
|
|
486
|
-
status: "NOT_IMPLEMENTED",
|
|
487
|
-
reason: `No checker implemented for category '${categoryKey}'.`,
|
|
488
|
-
evidence_locations: [],
|
|
489
|
-
violations: []
|
|
490
|
-
};
|
|
491
|
-
}
|
|
103
|
+
return md;
|
|
492
104
|
}
|
|
493
105
|
|
|
494
|
-
|
|
495
|
-
|
|
496
|
-
|
|
497
|
-
|
|
498
|
-
|
|
499
|
-
|
|
500
|
-
function checkPaginationRules(parsedFiles, rule, scope) {
|
|
501
|
-
const relevantFiles = parsedFiles.filter(f => f.type === "routes" || f.type === "controller");
|
|
502
|
-
if (relevantFiles.length === 0) {
|
|
503
|
-
return { status: "NOT_EVALUATED", reason: "No routes/controllers found to evaluate pagination.", evidence_locations: [], violations: [] };
|
|
504
|
-
}
|
|
106
|
+
/**
|
|
107
|
+
* Main execution tool - checks each rule systematically
|
|
108
|
+
*/
|
|
109
|
+
export async function executeTool({ repoPath, planPath }) {
|
|
110
|
+
console.error(`[executeTool] Loading plan from: ${planPath}`);
|
|
111
|
+
const plan = await loadPlan(planPath);
|
|
505
112
|
|
|
506
|
-
|
|
507
|
-
const
|
|
508
|
-
|
|
509
|
-
|
|
510
|
-
|
|
511
|
-
|
|
512
|
-
|
|
513
|
-
|
|
514
|
-
|
|
515
|
-
const ln = file.analysis.routes[0].lineNumber;
|
|
516
|
-
violations.push({
|
|
517
|
-
scope, category: rule.category, ruleId: rule.ruleId, ruleName: rule.ruleName,
|
|
518
|
-
referenceUrl: rule.referenceUrl ?? null, normative: rule.normative ?? null, severity: rule.severity ?? "high",
|
|
519
|
-
filePath: file.relativePath, lineNumber: ln, columnNumber: null,
|
|
520
|
-
description: "Routes found but no pagination signals (limit/offset/page/cursor) detected.",
|
|
521
|
-
currentCode: file.lines[ln - 1]?.trim() ?? "",
|
|
522
|
-
suggestedFix: "Add pagination support (limit/offset or cursor) for collection endpoints."
|
|
113
|
+
// Extract all rules from plan categories
|
|
114
|
+
const allRules = [];
|
|
115
|
+
for (const category of plan.categories ?? []) {
|
|
116
|
+
for (const rule of category.rules ?? []) {
|
|
117
|
+
allRules.push({
|
|
118
|
+
...rule,
|
|
119
|
+
category: category.name,
|
|
120
|
+
priority: category.priority,
|
|
121
|
+
guideline_url: category.guideline_url
|
|
523
122
|
});
|
|
524
123
|
}
|
|
525
124
|
}
|
|
526
125
|
|
|
527
|
-
|
|
528
|
-
if (evidence.length) return { status: "PASS", reason: "Pagination signals detected in code.", evidence_locations: evidence, violations: [] };
|
|
529
|
-
return { status: "NOT_EVALUATED", reason: "No pagination signals detected; cannot confirm compliance.", evidence_locations: [], violations: [] };
|
|
530
|
-
}
|
|
531
|
-
|
|
532
|
-
function checkErrorHandlingRules(parsedFiles, rule, scope) {
|
|
533
|
-
const relevantFiles = parsedFiles.filter(f => f.type === "routes" || f.type === "controller");
|
|
534
|
-
if (relevantFiles.length === 0) {
|
|
535
|
-
return { status: "NOT_EVALUATED", reason: "No routes/controllers found to evaluate error handling.", evidence_locations: [], violations: [] };
|
|
536
|
-
}
|
|
126
|
+
console.error(`[executeTool] Checking ${allRules.length} rules...`);
|
|
537
127
|
|
|
538
128
|
const evidence = [];
|
|
539
129
|
const violations = [];
|
|
130
|
+
let errorCount = 0;
|
|
540
131
|
|
|
541
|
-
|
|
542
|
-
|
|
543
|
-
|
|
132
|
+
// Execute each rule check
|
|
133
|
+
for (let idx = 0; idx < allRules.length; idx++) {
|
|
134
|
+
const rule = allRules[idx];
|
|
135
|
+
console.error(`[executeTool] [${idx + 1}/${allRules.length}] Checking rule: ${rule.rule_id}`);
|
|
544
136
|
|
|
545
|
-
|
|
546
|
-
|
|
547
|
-
}
|
|
548
|
-
|
|
549
|
-
if (/\b(res|response)\.(send|json)\s*\(/.test(line) && !/\b(res|response)\.status\s*\(/.test(line)) {
|
|
550
|
-
violations.push({
|
|
551
|
-
scope, category: rule.category, ruleId: rule.ruleId, ruleName: rule.ruleName,
|
|
552
|
-
referenceUrl: rule.referenceUrl ?? null, normative: rule.normative ?? null, severity: rule.severity ?? "high",
|
|
553
|
-
filePath: file.relativePath, lineNumber: ln, columnNumber: null,
|
|
554
|
-
description: "Response sent without explicit HTTP status code.",
|
|
555
|
-
currentCode: line.trim(),
|
|
556
|
-
suggestedFix: "Use res.status(<code>).json()/send() consistently."
|
|
557
|
-
});
|
|
558
|
-
}
|
|
559
|
-
});
|
|
560
|
-
}
|
|
561
|
-
|
|
562
|
-
if (violations.length) return { status: "FAIL", reason: "Missing explicit status codes detected.", evidence_locations: evidence, violations };
|
|
563
|
-
if (evidence.length) return { status: "PASS", reason: "Explicit status code usage detected.", evidence_locations: evidence, violations: [] };
|
|
564
|
-
return { status: "NOT_EVALUATED", reason: "No status code usage detected; cannot confirm error handling behavior.", evidence_locations: [], violations: [] };
|
|
565
|
-
}
|
|
566
|
-
|
|
567
|
-
function checkSecurityRules(parsedFiles, rule, scope) {
|
|
568
|
-
const evidence = [];
|
|
569
|
-
const violations = [];
|
|
570
|
-
|
|
571
|
-
for (const file of parsedFiles) {
|
|
572
|
-
for (const pattern of (file.analysis.securityPatterns || [])) {
|
|
573
|
-
evidence.push({ file_path: file.relativePath, line_number: pattern.lineNumber, snippet: pattern.line?.trim() ?? "" });
|
|
574
|
-
violations.push({
|
|
575
|
-
scope, category: rule.category, ruleId: rule.ruleId, ruleName: rule.ruleName,
|
|
576
|
-
referenceUrl: rule.referenceUrl ?? null, normative: "MUST", severity: "critical",
|
|
577
|
-
filePath: file.relativePath, lineNumber: pattern.lineNumber, columnNumber: null,
|
|
578
|
-
description: "Insecure http:// usage detected (must use https://).",
|
|
579
|
-
currentCode: pattern.line,
|
|
580
|
-
suggestedFix: "Replace http:// with https:// or ensure TLS termination."
|
|
581
|
-
});
|
|
582
|
-
}
|
|
583
|
-
}
|
|
584
|
-
|
|
585
|
-
if (violations.length) return { status: "FAIL", reason: "Security violations detected (http://).", evidence_locations: evidence, violations };
|
|
586
|
-
// For SECURITY, PASS needs positive evidence; otherwise NOT_EVALUATED (donāt claim secure just because nothing found)
|
|
587
|
-
return { status: "NOT_EVALUATED", reason: "No explicit security evidence detected; security cannot be confirmed by heuristic.", evidence_locations: [], violations: [] };
|
|
588
|
-
}
|
|
589
|
-
|
|
590
|
-
function checkVersioningRules(parsedFiles, rule, scope) {
|
|
591
|
-
const relevantFiles = parsedFiles.filter(f => f.type === "routes" || f.type === "controller");
|
|
592
|
-
if (relevantFiles.length === 0) {
|
|
593
|
-
return { status: "NOT_EVALUATED", reason: "No routes/controllers found to evaluate versioning.", evidence_locations: [], violations: [] };
|
|
594
|
-
}
|
|
595
|
-
|
|
596
|
-
const evidence = [];
|
|
597
|
-
const violations = [];
|
|
598
|
-
|
|
599
|
-
for (const file of relevantFiles) {
|
|
600
|
-
for (const hit of (file.analysis.versionPatterns || []).slice(0, 5)) {
|
|
601
|
-
evidence.push({ file_path: file.relativePath, line_number: hit.lineNumber, snippet: file.lines[hit.lineNumber - 1]?.trim() ?? "" });
|
|
602
|
-
}
|
|
137
|
+
try {
|
|
138
|
+
const result = await evaluateRule(repoPath, rule);
|
|
603
139
|
|
|
604
|
-
|
|
605
|
-
|
|
606
|
-
|
|
607
|
-
|
|
608
|
-
|
|
609
|
-
|
|
610
|
-
description: "Routes found but no versioning signals (/v1/) detected.",
|
|
611
|
-
currentCode: file.lines[ln - 1]?.trim() ?? "",
|
|
612
|
-
suggestedFix: "Prefix API routes with a version (e.g., /api/v1/...)."
|
|
140
|
+
evidence.push({
|
|
141
|
+
rule_id: rule.rule_id,
|
|
142
|
+
rule_name: rule.rule_name,
|
|
143
|
+
category: rule.category,
|
|
144
|
+
status: result.status,
|
|
145
|
+
reason: result.reason || null
|
|
613
146
|
});
|
|
614
|
-
}
|
|
615
|
-
}
|
|
616
|
-
|
|
617
|
-
if (violations.length) return { status: "FAIL", reason: "Versioning violations detected.", evidence_locations: evidence, violations };
|
|
618
|
-
if (evidence.length) return { status: "PASS", reason: "Versioning signals detected (/vN/).", evidence_locations: evidence, violations: [] };
|
|
619
|
-
return { status: "NOT_EVALUATED", reason: "No versioning signals detected; cannot confirm compliance.", evidence_locations: [], violations: [] };
|
|
620
|
-
}
|
|
621
|
-
|
|
622
|
-
function checkCommonOperations(parsedFiles, rule, scope) {
|
|
623
|
-
const relevantFiles = parsedFiles.filter(f => f.type === "routes" || f.type === "controller");
|
|
624
|
-
if (relevantFiles.length === 0) {
|
|
625
|
-
return { status: "NOT_EVALUATED", reason: "No routes/controllers found to evaluate common operations.", evidence_locations: [], violations: [] };
|
|
626
|
-
}
|
|
627
147
|
|
|
628
|
-
|
|
629
|
-
|
|
630
|
-
|
|
631
|
-
|
|
632
|
-
|
|
633
|
-
|
|
634
|
-
|
|
635
|
-
|
|
636
|
-
|
|
637
|
-
|
|
638
|
-
|
|
639
|
-
|
|
640
|
-
|
|
641
|
-
|
|
642
|
-
currentCode: `${route.method} ${route.path}`,
|
|
643
|
-
suggestedFix: "Use POST for creating resources."
|
|
644
|
-
});
|
|
148
|
+
// Collect violations
|
|
149
|
+
if (result.status === "FAIL") {
|
|
150
|
+
for (const v of result.violations ?? []) {
|
|
151
|
+
violations.push({
|
|
152
|
+
rule_id: rule.rule_id,
|
|
153
|
+
category: rule.category,
|
|
154
|
+
severity: rule.priority || "medium",
|
|
155
|
+
file_path: v.file_path,
|
|
156
|
+
line_number: v.line_number,
|
|
157
|
+
line_content: v.line_content,
|
|
158
|
+
message: v.message,
|
|
159
|
+
guideline_url: rule.guideline_url
|
|
160
|
+
});
|
|
161
|
+
}
|
|
645
162
|
}
|
|
646
|
-
}
|
|
647
|
-
}
|
|
648
|
-
|
|
649
|
-
if (violations.length) return { status: "FAIL", reason: "HTTP method misuse detected.", evidence_locations: evidence, violations };
|
|
650
|
-
if (evidence.length) return { status: "PASS", reason: "Route evidence found; no method misuse detected by heuristic.", evidence_locations: evidence, violations: [] };
|
|
651
|
-
return { status: "NOT_EVALUATED", reason: "No route evidence; cannot evaluate operations.", evidence_locations: [], violations: [] };
|
|
652
|
-
}
|
|
653
163
|
|
|
654
|
-
|
|
655
|
-
|
|
656
|
-
const apiSpecFiles = parsedFiles.filter(f => f.type === "openapi" || f.type === "schema");
|
|
657
|
-
if (apiSpecFiles.length === 0) {
|
|
658
|
-
return { status: "NOT_EVALUATED", reason: "No OpenAPI/schema files found; naming rules require API spec to evaluate reliably.", evidence_locations: [], violations: [] };
|
|
659
|
-
}
|
|
660
|
-
|
|
661
|
-
const evidence = [];
|
|
662
|
-
const violations = [];
|
|
663
|
-
|
|
664
|
-
for (const file of apiSpecFiles) {
|
|
665
|
-
if (file.extension !== ".json") continue;
|
|
666
|
-
let json;
|
|
667
|
-
try { json = JSON.parse(file.content); } catch { continue; }
|
|
668
|
-
|
|
669
|
-
const keys = extractOpenApiSchemaPropertyKeys(json);
|
|
670
|
-
if (keys.length === 0) continue;
|
|
671
|
-
|
|
672
|
-
for (const key of keys.slice(0, 10)) {
|
|
673
|
-
const ln = findKeyLine(file.lines, key);
|
|
674
|
-
evidence.push({ file_path: file.relativePath, line_number: ln, snippet: file.lines[ln - 1]?.trim() ?? `"${key}": ...` });
|
|
675
|
-
}
|
|
676
|
-
|
|
677
|
-
for (const key of keys) {
|
|
678
|
-
if (!/^[a-z][a-zA-Z0-9]*$/.test(key) && key !== "_id" && !key.startsWith("$")) {
|
|
679
|
-
const ln = findKeyLine(file.lines, key);
|
|
680
|
-
violations.push({
|
|
681
|
-
scope, category: rule.category, ruleId: rule.ruleId, ruleName: rule.ruleName,
|
|
682
|
-
referenceUrl: rule.referenceUrl ?? null, normative: rule.normative ?? null, severity: rule.severity ?? "medium",
|
|
683
|
-
filePath: file.relativePath, lineNumber: ln, columnNumber: null,
|
|
684
|
-
description: `Schema property '${key}' not camelCase.`,
|
|
685
|
-
currentCode: file.lines[ln - 1]?.trim() ?? `"${key}": ...`,
|
|
686
|
-
suggestedFix: "Rename schema properties to camelCase and update clients."
|
|
687
|
-
});
|
|
164
|
+
if (result.status === "ERROR") {
|
|
165
|
+
errorCount++;
|
|
688
166
|
}
|
|
689
|
-
}
|
|
690
|
-
|
|
691
|
-
|
|
692
|
-
|
|
693
|
-
|
|
694
|
-
|
|
695
|
-
|
|
696
|
-
|
|
697
|
-
function checkMediaTypesRules(parsedFiles, rule, scope) {
|
|
698
|
-
const openapiFiles = parsedFiles.filter(f => f.type === "openapi" && f.extension === ".json");
|
|
699
|
-
if (openapiFiles.length === 0) {
|
|
700
|
-
return { status: "NOT_EVALUATED", reason: "No OpenAPI JSON found; media type rules require OpenAPI spec to evaluate.", evidence_locations: [], violations: [] };
|
|
701
|
-
}
|
|
702
|
-
|
|
703
|
-
const evidence = [];
|
|
704
|
-
const violations = [];
|
|
705
|
-
|
|
706
|
-
for (const file of openapiFiles) {
|
|
707
|
-
let json;
|
|
708
|
-
try { json = JSON.parse(file.content); } catch { continue; }
|
|
709
|
-
if (!looksLikeOpenApi(json)) continue;
|
|
710
|
-
|
|
711
|
-
const mtypes = findOpenApiMediaTypes(json);
|
|
712
|
-
for (const mt of Array.from(mtypes).slice(0, 10)) {
|
|
713
|
-
evidence.push({ file_path: file.relativePath, line_number: findAnyLineContaining(file.lines, `"${mt}"`) || 1, snippet: mt });
|
|
714
|
-
}
|
|
715
|
-
|
|
716
|
-
if (!mtypes.has("application/json") && rule.normative === "MUST") {
|
|
717
|
-
const ln = findAnyLineContaining(file.lines, '"content"') || 1;
|
|
718
|
-
violations.push({
|
|
719
|
-
scope, category: rule.category, ruleId: rule.ruleId, ruleName: rule.ruleName,
|
|
720
|
-
referenceUrl: rule.referenceUrl ?? null, normative: rule.normative ?? null, severity: rule.severity ?? "high",
|
|
721
|
-
filePath: file.relativePath, lineNumber: ln, columnNumber: null,
|
|
722
|
-
description: "OpenAPI spec missing application/json content declaration.",
|
|
723
|
-
currentCode: file.lines[ln - 1]?.trim() ?? "",
|
|
724
|
-
suggestedFix: 'Add content: { "application/json": { schema: ... } } to requestBody/responses.'
|
|
167
|
+
} catch (error) {
|
|
168
|
+
console.error(`[executeTool] Error checking rule ${rule.rule_id}:`, error);
|
|
169
|
+
evidence.push({
|
|
170
|
+
rule_id: rule.rule_id,
|
|
171
|
+
rule_name: rule.rule_name,
|
|
172
|
+
category: rule.category,
|
|
173
|
+
status: "ERROR",
|
|
174
|
+
reason: error.message
|
|
725
175
|
});
|
|
176
|
+
errorCount++;
|
|
726
177
|
}
|
|
727
178
|
}
|
|
728
179
|
|
|
729
|
-
|
|
730
|
-
|
|
731
|
-
|
|
732
|
-
|
|
733
|
-
|
|
734
|
-
|
|
735
|
-
|
|
736
|
-
// -----------------------------
|
|
737
|
-
function looksLikeOpenApi(json) {
|
|
738
|
-
return typeof json?.openapi === "string" || typeof json?.swagger === "string" || (json?.paths && typeof json.paths === "object");
|
|
739
|
-
}
|
|
740
|
-
|
|
741
|
-
function extractOpenApiSchemaPropertyKeys(json) {
|
|
742
|
-
if (!looksLikeOpenApi(json)) return [];
|
|
743
|
-
const schemas = json?.components?.schemas;
|
|
744
|
-
if (!schemas || typeof schemas !== "object") return [];
|
|
745
|
-
const keys = [];
|
|
746
|
-
for (const schemaName of Object.keys(schemas)) {
|
|
747
|
-
const props = schemas[schemaName]?.properties;
|
|
748
|
-
if (props && typeof props === "object") {
|
|
749
|
-
for (const k of Object.keys(props)) keys.push(k);
|
|
750
|
-
}
|
|
751
|
-
}
|
|
752
|
-
return keys;
|
|
753
|
-
}
|
|
754
|
-
|
|
755
|
-
function findOpenApiMediaTypes(json) {
|
|
756
|
-
const set = new Set();
|
|
757
|
-
if (!looksLikeOpenApi(json)) return set;
|
|
758
|
-
|
|
759
|
-
const paths = json.paths || {};
|
|
760
|
-
for (const p of Object.keys(paths)) {
|
|
761
|
-
const item = paths[p] || {};
|
|
762
|
-
for (const method of Object.keys(item)) {
|
|
763
|
-
const op = item[method];
|
|
764
|
-
if (!op || typeof op !== "object") continue;
|
|
765
|
-
|
|
766
|
-
const rb = op.requestBody?.content;
|
|
767
|
-
if (rb && typeof rb === "object") Object.keys(rb).forEach(k => set.add(k));
|
|
768
|
-
|
|
769
|
-
const res = op.responses || {};
|
|
770
|
-
for (const code of Object.keys(res)) {
|
|
771
|
-
const content = res[code]?.content;
|
|
772
|
-
if (content && typeof content === "object") Object.keys(content).forEach(k => set.add(k));
|
|
773
|
-
}
|
|
774
|
-
}
|
|
775
|
-
}
|
|
776
|
-
return set;
|
|
777
|
-
}
|
|
778
|
-
|
|
779
|
-
// -----------------------------
|
|
780
|
-
// Report persistence
|
|
781
|
-
// -----------------------------
|
|
782
|
-
async function writeReports(payload) {
|
|
783
|
-
const reportsDir = path.resolve("mcp", "reports");
|
|
784
|
-
await fsp.mkdir(reportsDir, { recursive: true });
|
|
785
|
-
|
|
786
|
-
const runId = `run_${new Date().toISOString().replace(/[:.]/g, "-")}_${crypto.randomUUID()}`;
|
|
787
|
-
const jsonPath = path.join(reportsDir, `${runId}.json`);
|
|
788
|
-
|
|
789
|
-
await fsp.writeFile(jsonPath, JSON.stringify(payload, null, 2), "utf8");
|
|
790
|
-
|
|
791
|
-
return { run_id: runId, json_path: jsonPath };
|
|
792
|
-
}
|
|
180
|
+
const readiness_summary = {
|
|
181
|
+
rules_checked: allRules.length,
|
|
182
|
+
passed: evidence.filter((e) => e.status === "PASS").length,
|
|
183
|
+
failed: evidence.filter((e) => e.status === "FAIL").length,
|
|
184
|
+
skipped: evidence.filter((e) => e.status === "SKIPPED").length,
|
|
185
|
+
errors: errorCount
|
|
186
|
+
};
|
|
793
187
|
|
|
794
|
-
|
|
795
|
-
|
|
796
|
-
|
|
797
|
-
|
|
798
|
-
const
|
|
799
|
-
|
|
188
|
+
const runId = `run_${new Date().toISOString().replace(/[:.]/g, "-")}_${crypto.randomUUID().slice(0, 8)}`;
|
|
189
|
+
const dir = reportsDir(repoPath);
|
|
190
|
+
await ensureDir(dir);
|
|
191
|
+
|
|
192
|
+
const report = {
|
|
193
|
+
status: readiness_summary.failed > 0 ? "INCOMPLETE" : "COMPLETED",
|
|
194
|
+
run_id: runId,
|
|
195
|
+
plan_id: plan.plan_id,
|
|
196
|
+
scope: plan.scope,
|
|
197
|
+
scope_description: plan.scope_description,
|
|
198
|
+
executed_at: new Date().toISOString(),
|
|
199
|
+
readiness_summary,
|
|
200
|
+
rule_evidence: evidence,
|
|
201
|
+
violations
|
|
202
|
+
};
|
|
800
203
|
|
|
801
|
-
|
|
802
|
-
|
|
803
|
-
const key = `${v.category}::${v.suggestedFix}`;
|
|
804
|
-
if (!fixGroups[key]) {
|
|
805
|
-
fixGroups[key] = {
|
|
806
|
-
fix: v.suggestedFix,
|
|
807
|
-
category: v.category,
|
|
808
|
-
severity: v.severity,
|
|
809
|
-
files: [],
|
|
810
|
-
ruleIds: new Set()
|
|
811
|
-
};
|
|
812
|
-
}
|
|
813
|
-
if (v.filePath) fixGroups[key].files.push(v.filePath);
|
|
814
|
-
fixGroups[key].ruleIds.add(v.ruleId);
|
|
815
|
-
}
|
|
204
|
+
const reportJsonPath = path.join(dir, `${runId}.json`);
|
|
205
|
+
const reportMdPath = path.join(dir, `${runId}.md`);
|
|
816
206
|
|
|
817
|
-
|
|
818
|
-
|
|
819
|
-
actions.push({
|
|
820
|
-
priority,
|
|
821
|
-
severity: group.severity,
|
|
822
|
-
category: group.category,
|
|
823
|
-
action: group.fix,
|
|
824
|
-
affected_files: [...new Set(group.files)].length,
|
|
825
|
-
total_occurrences: group.files.length,
|
|
826
|
-
reference_rules: Array.from(group.ruleIds)
|
|
827
|
-
});
|
|
828
|
-
}
|
|
207
|
+
await fs.writeFile(reportJsonPath, JSON.stringify(report, null, 2), "utf8");
|
|
208
|
+
await fs.writeFile(reportMdPath, buildMarkdownReport({ runId, repoPath, plan, results: report }), "utf8");
|
|
829
209
|
|
|
830
|
-
|
|
831
|
-
return actions;
|
|
832
|
-
}
|
|
210
|
+
console.error(`[executeTool] Execution complete. Report saved to: ${reportMdPath}`);
|
|
833
211
|
|
|
834
|
-
|
|
835
|
-
|
|
836
|
-
|
|
837
|
-
|
|
838
|
-
|
|
839
|
-
|
|
840
|
-
|
|
841
|
-
|
|
842
|
-
|
|
843
|
-
|
|
844
|
-
|
|
845
|
-
|
|
846
|
-
|
|
847
|
-
|
|
848
|
-
|
|
849
|
-
|
|
212
|
+
return {
|
|
213
|
+
status: report.status,
|
|
214
|
+
run_id: runId,
|
|
215
|
+
report_json_path: reportJsonPath,
|
|
216
|
+
report_md_path: reportMdPath,
|
|
217
|
+
readiness_summary,
|
|
218
|
+
violations_count: violations.length,
|
|
219
|
+
message: `ā
Execution completed!\n\nš Results:\n- ${readiness_summary.passed} rules passed\n- ${readiness_summary.failed} rules failed\n- ${readiness_summary.skipped} rules skipped\n- ${violations.length} violations found\n\nš Full report saved to:\n- JSON: ${reportJsonPath}\n- Markdown: ${reportMdPath}\n\n${violations.length > 0 ? "ā ļø Next step: Review violations and fix issues, or call 'auto_fix' tool (requires approval)" : "š No violations found!"}`,
|
|
220
|
+
next_step: violations.length > 0 ? {
|
|
221
|
+
tool: "auto_fix",
|
|
222
|
+
parameters: {
|
|
223
|
+
repoPath: repoPath,
|
|
224
|
+
runId: runId,
|
|
225
|
+
mode: "propose"
|
|
226
|
+
},
|
|
227
|
+
note: "Auto-fix requires approval code"
|
|
228
|
+
} : null
|
|
229
|
+
};
|
|
850
230
|
}
|
|
851
231
|
|
|
852
|
-
export default executionTool;
|