@toolbaux/guardian 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +21 -0
- package/README.md +366 -0
- package/dist/adapters/csharp-adapter.js +149 -0
- package/dist/adapters/go-adapter.js +96 -0
- package/dist/adapters/index.js +16 -0
- package/dist/adapters/java-adapter.js +122 -0
- package/dist/adapters/python-adapter.js +183 -0
- package/dist/adapters/runner.js +69 -0
- package/dist/adapters/types.js +1 -0
- package/dist/adapters/typescript-adapter.js +179 -0
- package/dist/benchmarking/framework.js +91 -0
- package/dist/cli.js +343 -0
- package/dist/commands/analyze-depth.js +43 -0
- package/dist/commands/api-spec-extractor.js +52 -0
- package/dist/commands/breaking-change-analyzer.js +334 -0
- package/dist/commands/config-compliance.js +219 -0
- package/dist/commands/constraints.js +221 -0
- package/dist/commands/context.js +101 -0
- package/dist/commands/data-flow-tracer.js +291 -0
- package/dist/commands/dependency-impact-analyzer.js +27 -0
- package/dist/commands/diff.js +146 -0
- package/dist/commands/discrepancy.js +71 -0
- package/dist/commands/doc-generate.js +163 -0
- package/dist/commands/doc-html.js +120 -0
- package/dist/commands/drift.js +88 -0
- package/dist/commands/extract.js +16 -0
- package/dist/commands/feature-context.js +116 -0
- package/dist/commands/generate.js +339 -0
- package/dist/commands/guard.js +182 -0
- package/dist/commands/init.js +209 -0
- package/dist/commands/intel.js +20 -0
- package/dist/commands/license-dependency-auditor.js +33 -0
- package/dist/commands/performance-hotspot-profiler.js +42 -0
- package/dist/commands/search.js +314 -0
- package/dist/commands/security-boundary-auditor.js +359 -0
- package/dist/commands/simulate.js +294 -0
- package/dist/commands/summary.js +27 -0
- package/dist/commands/test-coverage-mapper.js +264 -0
- package/dist/commands/verify-drift.js +62 -0
- package/dist/config.js +441 -0
- package/dist/extract/ai-context-hints.js +107 -0
- package/dist/extract/analyzers/backend.js +1704 -0
- package/dist/extract/analyzers/depth.js +264 -0
- package/dist/extract/analyzers/frontend.js +2221 -0
- package/dist/extract/api-usage-tracker.js +19 -0
- package/dist/extract/cache.js +53 -0
- package/dist/extract/codebase-intel.js +190 -0
- package/dist/extract/compress.js +452 -0
- package/dist/extract/context-block.js +356 -0
- package/dist/extract/contracts.js +183 -0
- package/dist/extract/discrepancies.js +233 -0
- package/dist/extract/docs-loader.js +110 -0
- package/dist/extract/docs.js +2379 -0
- package/dist/extract/drift.js +1578 -0
- package/dist/extract/duplicates.js +435 -0
- package/dist/extract/feature-arcs.js +138 -0
- package/dist/extract/graph.js +76 -0
- package/dist/extract/html-doc.js +1409 -0
- package/dist/extract/ignore.js +45 -0
- package/dist/extract/index.js +455 -0
- package/dist/extract/llm-client.js +159 -0
- package/dist/extract/pattern-registry.js +141 -0
- package/dist/extract/product-doc.js +497 -0
- package/dist/extract/python.js +1202 -0
- package/dist/extract/runtime.js +193 -0
- package/dist/extract/schema-evolution-validator.js +35 -0
- package/dist/extract/test-gap-analyzer.js +20 -0
- package/dist/extract/tests.js +74 -0
- package/dist/extract/types.js +1 -0
- package/dist/extract/validate-backend.js +30 -0
- package/dist/extract/writer.js +11 -0
- package/dist/output-layout.js +37 -0
- package/dist/project-discovery.js +309 -0
- package/dist/schema/architecture.js +350 -0
- package/dist/schema/feature-spec.js +89 -0
- package/dist/schema/index.js +8 -0
- package/dist/schema/ux.js +46 -0
- package/package.json +75 -0
|
@@ -0,0 +1,359 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* FEATURE 6: SECURITY BOUNDARY AUDITOR
|
|
3
|
+
*
|
|
4
|
+
* Identify potential security violations:
|
|
5
|
+
* - Unauthorized access patterns
|
|
6
|
+
* - Missing authentication checks
|
|
7
|
+
* - Data leakage across service boundaries
|
|
8
|
+
* - Credential exposure
|
|
9
|
+
*
|
|
10
|
+
* Benchmarking: High complexity
|
|
11
|
+
* Problem Domain: Security, Access Control, Compliance
|
|
12
|
+
*/
|
|
13
|
+
import fs from "node:fs/promises";
|
|
14
|
+
import path from "node:path";
|
|
15
|
+
/**
|
|
16
|
+
* Main function: Audit security boundaries
|
|
17
|
+
*/
|
|
18
|
+
export async function auditSecurityBoundaries(options) {
|
|
19
|
+
const { srcRoot, output, checkCredentials = true, checkSqlInjection = true, checkAuthentication = true, } = options;
|
|
20
|
+
const sourceFiles = await scanSourceFiles(srcRoot);
|
|
21
|
+
const issues = [];
|
|
22
|
+
// Run all security checks
|
|
23
|
+
if (checkCredentials) {
|
|
24
|
+
issues.push(...checkExposedCredentials(sourceFiles));
|
|
25
|
+
}
|
|
26
|
+
if (checkAuthentication) {
|
|
27
|
+
issues.push(...checkMissingAuthentication(sourceFiles));
|
|
28
|
+
}
|
|
29
|
+
if (checkSqlInjection) {
|
|
30
|
+
issues.push(...checkSqlInjectionRisks(sourceFiles));
|
|
31
|
+
}
|
|
32
|
+
issues.push(...checkDataLeakage(sourceFiles));
|
|
33
|
+
issues.push(...checkUnauthorizedAccess(sourceFiles));
|
|
34
|
+
// Identify security boundaries
|
|
35
|
+
const boundaries = identifySecurityBoundaries(sourceFiles, issues);
|
|
36
|
+
// Calculate metrics
|
|
37
|
+
const criticalCount = issues.filter((i) => i.severity === "critical").length;
|
|
38
|
+
const highCount = issues.filter((i) => i.severity === "high").length;
|
|
39
|
+
const riskScore = calculateSecurityRiskScore(issues);
|
|
40
|
+
const complianceStatus = determineComplianceStatus(riskScore);
|
|
41
|
+
const report = {
|
|
42
|
+
timestamp: new Date().toISOString(),
|
|
43
|
+
totalIssues: issues.length,
|
|
44
|
+
criticalIssues: criticalCount,
|
|
45
|
+
highIssues: highCount,
|
|
46
|
+
issues,
|
|
47
|
+
boundaries,
|
|
48
|
+
riskScore,
|
|
49
|
+
complianceStatus,
|
|
50
|
+
recommendations: generateSecurityRecommendations(issues),
|
|
51
|
+
};
|
|
52
|
+
if (output) {
|
|
53
|
+
await writeAuditReport(report, output);
|
|
54
|
+
}
|
|
55
|
+
return report;
|
|
56
|
+
}
|
|
57
|
+
/**
|
|
58
|
+
* Helper: Scan source files
|
|
59
|
+
*/
|
|
60
|
+
async function scanSourceFiles(srcRoot) {
|
|
61
|
+
const files = new Map();
|
|
62
|
+
async function walkDir(dir) {
|
|
63
|
+
try {
|
|
64
|
+
const entries = await fs.readdir(dir, { withFileTypes: true });
|
|
65
|
+
for (const entry of entries) {
|
|
66
|
+
const fullPath = path.join(dir, entry.name);
|
|
67
|
+
if (entry.isDirectory() &&
|
|
68
|
+
[".git", "node_modules", "dist", "build", "coverage"].includes(entry.name)) {
|
|
69
|
+
continue;
|
|
70
|
+
}
|
|
71
|
+
if (entry.isDirectory()) {
|
|
72
|
+
await walkDir(fullPath);
|
|
73
|
+
}
|
|
74
|
+
else if (entry.isFile() &&
|
|
75
|
+
[".ts", ".tsx", ".js", ".jsx"].some((ext) => entry.name.endsWith(ext))) {
|
|
76
|
+
const content = await fs.readFile(fullPath, "utf8");
|
|
77
|
+
const relPath = path.relative(srcRoot, fullPath);
|
|
78
|
+
files.set(relPath, content);
|
|
79
|
+
}
|
|
80
|
+
}
|
|
81
|
+
}
|
|
82
|
+
catch {
|
|
83
|
+
// Skip inaccessible directories
|
|
84
|
+
}
|
|
85
|
+
}
|
|
86
|
+
await walkDir(srcRoot);
|
|
87
|
+
return files;
|
|
88
|
+
}
|
|
89
|
+
/**
|
|
90
|
+
* Helper: Check for exposed credentials
|
|
91
|
+
*/
|
|
92
|
+
function checkExposedCredentials(sourceFiles) {
|
|
93
|
+
const issues = [];
|
|
94
|
+
const patterns = [
|
|
95
|
+
{ regex: /password\s*[:=]\s*['"`]([^'"`]+)/gi, type: "password" },
|
|
96
|
+
{ regex: /api[_-]?key\s*[:=]\s*['"`]([^'"`]+)/gi, type: "API key" },
|
|
97
|
+
{ regex: /secret\s*[:=]\s*['"`]([^'"`]+)/gi, type: "secret" },
|
|
98
|
+
{ regex: /token\s*[:=]\s*['"`](eyJ[^'"`]+)/gi, type: "JWT token" },
|
|
99
|
+
];
|
|
100
|
+
for (const [file, content] of sourceFiles.entries()) {
|
|
101
|
+
const lines = content.split("\n");
|
|
102
|
+
for (const { regex, type } of patterns) {
|
|
103
|
+
lines.forEach((line, idx) => {
|
|
104
|
+
if (regex.test(line)) {
|
|
105
|
+
issues.push({
|
|
106
|
+
type: "exposed_credentials",
|
|
107
|
+
severity: "critical",
|
|
108
|
+
file,
|
|
109
|
+
line: idx + 1,
|
|
110
|
+
code: line.trim(),
|
|
111
|
+
description: `Hardcoded ${type} detected in source code`,
|
|
112
|
+
remediationSteps: [
|
|
113
|
+
"Move to environment variables",
|
|
114
|
+
"Use secrets management system",
|
|
115
|
+
"Rotate compromised credentials",
|
|
116
|
+
],
|
|
117
|
+
cwes: ["CWE-798", "CWE-321"],
|
|
118
|
+
});
|
|
119
|
+
}
|
|
120
|
+
});
|
|
121
|
+
}
|
|
122
|
+
}
|
|
123
|
+
return issues;
|
|
124
|
+
}
|
|
125
|
+
/**
|
|
126
|
+
* Helper: Check for missing authentication
|
|
127
|
+
*/
|
|
128
|
+
function checkMissingAuthentication(sourceFiles) {
|
|
129
|
+
const issues = [];
|
|
130
|
+
for (const [file, content] of sourceFiles.entries()) {
|
|
131
|
+
const lines = content.split("\n");
|
|
132
|
+
// Look for endpoints without auth checks
|
|
133
|
+
const endpointPattern = /(?:app|router)\.(?:get|post|put|delete|patch)\s*\(\s*['"`]([^'"`]+)/gi;
|
|
134
|
+
let match;
|
|
135
|
+
while ((match = endpointPattern.exec(content)) !== null) {
|
|
136
|
+
const lineIdx = content.substring(0, match.index).split("\n").length - 1;
|
|
137
|
+
const line = lines[lineIdx];
|
|
138
|
+
// Check if there's an auth middleware or check nearby
|
|
139
|
+
const hasAuth = line.includes("auth") ||
|
|
140
|
+
line.includes("unauthorized") ||
|
|
141
|
+
line.includes("private") ||
|
|
142
|
+
(lines[lineIdx - 1] && lines[lineIdx - 1].includes("auth"));
|
|
143
|
+
if (!hasAuth && !match[1].includes("public")) {
|
|
144
|
+
issues.push({
|
|
145
|
+
type: "missing_auth",
|
|
146
|
+
severity: "high",
|
|
147
|
+
file,
|
|
148
|
+
line: lineIdx + 1,
|
|
149
|
+
code: line.trim(),
|
|
150
|
+
description: `Endpoint may lack authentication check: ${match[1]}`,
|
|
151
|
+
remediationSteps: [
|
|
152
|
+
"Add authentication middleware",
|
|
153
|
+
"Verify authorization rules",
|
|
154
|
+
"Document access requirements",
|
|
155
|
+
],
|
|
156
|
+
cwes: ["CWE-284", "CWE-287"],
|
|
157
|
+
});
|
|
158
|
+
}
|
|
159
|
+
}
|
|
160
|
+
}
|
|
161
|
+
return issues;
|
|
162
|
+
}
|
|
163
|
+
/**
|
|
164
|
+
* Helper: Check for SQL injection risks
|
|
165
|
+
*/
|
|
166
|
+
function checkSqlInjectionRisks(sourceFiles) {
|
|
167
|
+
const issues = [];
|
|
168
|
+
const sqlPattern = /query\s*\(\s*\$\{|query\s*\(\s*[`'].*\$\{|execute\s*\(\s*\$\{/gi;
|
|
169
|
+
for (const [file, content] of sourceFiles.entries()) {
|
|
170
|
+
const lines = content.split("\n");
|
|
171
|
+
lines.forEach((line, idx) => {
|
|
172
|
+
if (sqlPattern.test(line)) {
|
|
173
|
+
issues.push({
|
|
174
|
+
type: "sql_injection_risk",
|
|
175
|
+
severity: "critical",
|
|
176
|
+
file,
|
|
177
|
+
line: idx + 1,
|
|
178
|
+
code: line.trim(),
|
|
179
|
+
description: "Potential SQL injection: string interpolation in query",
|
|
180
|
+
remediationSteps: [
|
|
181
|
+
"Use parameterized queries",
|
|
182
|
+
"Use prepared statements",
|
|
183
|
+
"Use ORM with proper escaping",
|
|
184
|
+
],
|
|
185
|
+
cwes: ["CWE-89"],
|
|
186
|
+
});
|
|
187
|
+
}
|
|
188
|
+
});
|
|
189
|
+
}
|
|
190
|
+
return issues;
|
|
191
|
+
}
|
|
192
|
+
/**
|
|
193
|
+
* Helper: Check for data leakage
|
|
194
|
+
*/
|
|
195
|
+
function checkDataLeakage(sourceFiles) {
|
|
196
|
+
const issues = [];
|
|
197
|
+
const leakagePatterns = [
|
|
198
|
+
/console\s*\.\s*log\s*\(\s*(?:password|token|secret|auth)/gi,
|
|
199
|
+
/error\s*\(\s*.*(?:password|token|secret)/gi,
|
|
200
|
+
];
|
|
201
|
+
for (const [file, content] of sourceFiles.entries()) {
|
|
202
|
+
const lines = content.split("\n");
|
|
203
|
+
lines.forEach((line, idx) => {
|
|
204
|
+
for (const pattern of leakagePatterns) {
|
|
205
|
+
if (pattern.test(line)) {
|
|
206
|
+
issues.push({
|
|
207
|
+
type: "data_leakage",
|
|
208
|
+
severity: "high",
|
|
209
|
+
file,
|
|
210
|
+
line: idx + 1,
|
|
211
|
+
code: line.trim(),
|
|
212
|
+
description: "Sensitive data logged or exposed in error messages",
|
|
213
|
+
remediationSteps: [
|
|
214
|
+
"Remove sensitive data from logs",
|
|
215
|
+
"Use data filtering/masking",
|
|
216
|
+
"Implement proper logging practices",
|
|
217
|
+
],
|
|
218
|
+
cwes: ["CWE-532"],
|
|
219
|
+
});
|
|
220
|
+
}
|
|
221
|
+
}
|
|
222
|
+
});
|
|
223
|
+
}
|
|
224
|
+
return issues;
|
|
225
|
+
}
|
|
226
|
+
/**
|
|
227
|
+
* Helper: Check for unauthorized access patterns
|
|
228
|
+
*/
|
|
229
|
+
function checkUnauthorizedAccess(sourceFiles) {
|
|
230
|
+
const issues = [];
|
|
231
|
+
for (const [file, content] of sourceFiles.entries()) {
|
|
232
|
+
const lines = content.split("\n");
|
|
233
|
+
lines.forEach((line, idx) => {
|
|
234
|
+
// Check for hardcoded admin/superuser patterns
|
|
235
|
+
if (line.includes("isAdmin") &&
|
|
236
|
+
!line.includes("check") &&
|
|
237
|
+
!line.includes("verify")) {
|
|
238
|
+
issues.push({
|
|
239
|
+
type: "unauthorized_access",
|
|
240
|
+
severity: "medium",
|
|
241
|
+
file,
|
|
242
|
+
line: idx + 1,
|
|
243
|
+
code: line.trim(),
|
|
244
|
+
description: "Authorization check may be incomplete",
|
|
245
|
+
remediationSteps: [
|
|
246
|
+
"Verify authorization logic",
|
|
247
|
+
"Add role-based access control",
|
|
248
|
+
"Audit permission matrix",
|
|
249
|
+
],
|
|
250
|
+
cwes: ["CWE-639"],
|
|
251
|
+
});
|
|
252
|
+
}
|
|
253
|
+
});
|
|
254
|
+
}
|
|
255
|
+
return issues;
|
|
256
|
+
}
|
|
257
|
+
/**
|
|
258
|
+
* Helper: Identify security boundaries
|
|
259
|
+
*/
|
|
260
|
+
function identifySecurityBoundaries(sourceFiles, issues) {
|
|
261
|
+
const boundaries = [];
|
|
262
|
+
// Group files by directory as boundaries
|
|
263
|
+
const dirs = new Set();
|
|
264
|
+
for (const file of sourceFiles.keys()) {
|
|
265
|
+
dirs.add(path.dirname(file));
|
|
266
|
+
}
|
|
267
|
+
for (const dir of dirs) {
|
|
268
|
+
const files = Array.from(sourceFiles.keys()).filter((f) => f.startsWith(dir));
|
|
269
|
+
// Determine access control level based on issues and naming
|
|
270
|
+
const accessControl = dir.includes("public") ? "public" : "protected";
|
|
271
|
+
const authenticatedOnly = dir.includes("admin") || dir.includes("private");
|
|
272
|
+
const hasExposed = issues.some((i) => i.type === "exposed_credentials" && i.file.includes(dir));
|
|
273
|
+
boundaries.push({
|
|
274
|
+
name: dir,
|
|
275
|
+
files,
|
|
276
|
+
accessControl,
|
|
277
|
+
authenticatedOnly,
|
|
278
|
+
encryptionStatus: hasExposed ? "none" : "partial",
|
|
279
|
+
});
|
|
280
|
+
}
|
|
281
|
+
return boundaries;
|
|
282
|
+
}
|
|
283
|
+
/**
|
|
284
|
+
* Helper: Calculate security risk score
|
|
285
|
+
*/
|
|
286
|
+
function calculateSecurityRiskScore(issues) {
|
|
287
|
+
const criticalWeight = 40;
|
|
288
|
+
const highWeight = 20;
|
|
289
|
+
const mediumWeight = 10;
|
|
290
|
+
const lowWeight = 5;
|
|
291
|
+
const score = issues.filter((i) => i.severity === "critical").length * criticalWeight +
|
|
292
|
+
issues.filter((i) => i.severity === "high").length * highWeight +
|
|
293
|
+
issues.filter((i) => i.severity === "medium").length * mediumWeight +
|
|
294
|
+
issues.filter((i) => i.severity === "low").length * lowWeight;
|
|
295
|
+
return Math.min(100, score);
|
|
296
|
+
}
|
|
297
|
+
/**
|
|
298
|
+
* Helper: Determine compliance status
|
|
299
|
+
*/
|
|
300
|
+
function determineComplianceStatus(riskScore) {
|
|
301
|
+
if (riskScore >= 80)
|
|
302
|
+
return "CRITICAL - Immediate action required";
|
|
303
|
+
if (riskScore >= 60)
|
|
304
|
+
return "HIGH - Significant vulnerabilities";
|
|
305
|
+
if (riskScore >= 40)
|
|
306
|
+
return "MEDIUM - Notable issues to address";
|
|
307
|
+
if (riskScore >= 20)
|
|
308
|
+
return "LOW - Minor concerns";
|
|
309
|
+
return "ACCEPTABLE - Security posture is good";
|
|
310
|
+
}
|
|
311
|
+
/**
|
|
312
|
+
* Helper: Generate security recommendations
|
|
313
|
+
*/
|
|
314
|
+
function generateSecurityRecommendations(issues) {
|
|
315
|
+
const recommendations = [];
|
|
316
|
+
const criticalCount = issues.filter((i) => i.severity === "critical").length;
|
|
317
|
+
if (criticalCount > 0) {
|
|
318
|
+
recommendations.push(`🚨 CRITICAL: ${criticalCount} critical vulnerabilities must be fixed immediately`);
|
|
319
|
+
}
|
|
320
|
+
const credentialCount = issues.filter((i) => i.type === "exposed_credentials").length;
|
|
321
|
+
if (credentialCount > 0) {
|
|
322
|
+
recommendations.push(`🔐 Move ${credentialCount} hardcoded credentials to env vars`);
|
|
323
|
+
}
|
|
324
|
+
const authCount = issues.filter((i) => i.type === "missing_auth").length;
|
|
325
|
+
if (authCount > 0) {
|
|
326
|
+
recommendations.push(`🔒 Add/verify authentication on ${authCount} endpoints`);
|
|
327
|
+
}
|
|
328
|
+
if (issues.length === 0) {
|
|
329
|
+
recommendations.push(`✅ No security issues detected`);
|
|
330
|
+
}
|
|
331
|
+
return recommendations;
|
|
332
|
+
}
|
|
333
|
+
/**
|
|
334
|
+
* Helper: Write audit report
|
|
335
|
+
*/
|
|
336
|
+
async function writeAuditReport(report, outputPath) {
|
|
337
|
+
let md = `# Security Boundary Audit Report\n\n`;
|
|
338
|
+
md += `Generated: ${report.timestamp}\n\n`;
|
|
339
|
+
md += `## Summary\n`;
|
|
340
|
+
md += `- **Risk Score:** ${report.riskScore}/100\n`;
|
|
341
|
+
md += `- **Compliance:** ${report.complianceStatus}\n`;
|
|
342
|
+
md += `- **Total Issues:** ${report.totalIssues}\n`;
|
|
343
|
+
md += `- **Critical:** ${report.criticalIssues} | High: ${report.highIssues}\n\n`;
|
|
344
|
+
md += `## Issues by Type\n`;
|
|
345
|
+
const byType = new Map();
|
|
346
|
+
for (const issue of report.issues) {
|
|
347
|
+
byType.set(issue.type, (byType.get(issue.type) || 0) + 1);
|
|
348
|
+
}
|
|
349
|
+
for (const [type, count] of byType.entries()) {
|
|
350
|
+
md += `- ${type}: ${count}\n`;
|
|
351
|
+
}
|
|
352
|
+
md += `\n## Recommendations\n`;
|
|
353
|
+
for (const rec of report.recommendations) {
|
|
354
|
+
md += `- ${rec}\n`;
|
|
355
|
+
}
|
|
356
|
+
await fs.mkdir(path.dirname(outputPath), { recursive: true });
|
|
357
|
+
await fs.writeFile(outputPath, md, "utf8");
|
|
358
|
+
}
|
|
359
|
+
export default auditSecurityBoundaries;
|
|
@@ -0,0 +1,294 @@
|
|
|
1
|
+
import fs from "node:fs/promises";
|
|
2
|
+
import * as fsSync from "node:fs";
|
|
3
|
+
import os from "node:os";
|
|
4
|
+
import path from "node:path";
|
|
5
|
+
import { spawn } from "node:child_process";
|
|
6
|
+
import yaml from "js-yaml";
|
|
7
|
+
import { buildSnapshots } from "../extract/index.js";
|
|
8
|
+
import { buildArchitectureSummary, loadArchitectureSummary } from "../extract/compress.js";
|
|
9
|
+
import { createIgnoreMatcher } from "../extract/ignore.js";
|
|
10
|
+
import { logResolvedProjectPaths, resolveProjectPaths } from "../project-discovery.js";
|
|
11
|
+
export async function runSimulate(options) {
|
|
12
|
+
const resolved = await resolveProjectPaths({
|
|
13
|
+
projectRoot: options.projectRoot,
|
|
14
|
+
backendRoot: options.backendRoot,
|
|
15
|
+
frontendRoot: options.frontendRoot,
|
|
16
|
+
configPath: options.configPath
|
|
17
|
+
});
|
|
18
|
+
const resolvedBackendRoot = resolved.backendRoot;
|
|
19
|
+
const resolvedFrontendRoot = resolved.frontendRoot;
|
|
20
|
+
const originalRoot = resolved.workspaceRoot;
|
|
21
|
+
const config = resolved.config;
|
|
22
|
+
logResolvedProjectPaths(resolved);
|
|
23
|
+
let simulationRoot = originalRoot;
|
|
24
|
+
let simBackendRoot = resolvedBackendRoot;
|
|
25
|
+
let simFrontendRoot = resolvedFrontendRoot;
|
|
26
|
+
let cleanupTemp = false;
|
|
27
|
+
try {
|
|
28
|
+
if (options.patch) {
|
|
29
|
+
const tempRoot = await createTempCopy(originalRoot, config);
|
|
30
|
+
await applyPatch(tempRoot, options.patch);
|
|
31
|
+
simulationRoot = tempRoot;
|
|
32
|
+
simBackendRoot = remapPath(resolvedBackendRoot, originalRoot, tempRoot);
|
|
33
|
+
simFrontendRoot = remapPath(resolvedFrontendRoot, originalRoot, tempRoot);
|
|
34
|
+
cleanupTemp = true;
|
|
35
|
+
}
|
|
36
|
+
const { architecture, ux } = await buildSnapshots({
|
|
37
|
+
projectRoot: simulationRoot,
|
|
38
|
+
backendRoot: simBackendRoot,
|
|
39
|
+
frontendRoot: simFrontendRoot,
|
|
40
|
+
output: options.output,
|
|
41
|
+
includeFileGraph: true,
|
|
42
|
+
configPath: options.configPath
|
|
43
|
+
});
|
|
44
|
+
const candidate = architecture.drift;
|
|
45
|
+
const candidateSummary = buildArchitectureSummary(architecture, ux);
|
|
46
|
+
const baselinePath = await resolveBaselinePath({
|
|
47
|
+
projectRoot: originalRoot,
|
|
48
|
+
config,
|
|
49
|
+
override: options.baseline
|
|
50
|
+
});
|
|
51
|
+
const baseline = baselinePath ? await loadDriftFromFile(baselinePath) : null;
|
|
52
|
+
const baselineSummaryPath = await resolveBaselineSummaryPath({
|
|
53
|
+
projectRoot: originalRoot,
|
|
54
|
+
override: options.baselineSummary
|
|
55
|
+
});
|
|
56
|
+
const baselineSummary = baselineSummaryPath
|
|
57
|
+
? await loadSummaryFromPath(baselineSummaryPath)
|
|
58
|
+
: null;
|
|
59
|
+
const evaluation = evaluateCandidate({
|
|
60
|
+
candidate,
|
|
61
|
+
baseline,
|
|
62
|
+
config,
|
|
63
|
+
baselineSummary,
|
|
64
|
+
candidateSummary,
|
|
65
|
+
mode: options.mode ?? config.guard?.mode ?? "soft"
|
|
66
|
+
});
|
|
67
|
+
const outputPath = path.resolve(options.output ?? "specs-out/drift.simulation.json");
|
|
68
|
+
await fs.mkdir(path.dirname(outputPath), { recursive: true });
|
|
69
|
+
await fs.writeFile(outputPath, JSON.stringify(evaluation, null, 2));
|
|
70
|
+
console.log(`Simulation decision: ${evaluation.decision}`);
|
|
71
|
+
if (evaluation.reasons.length > 0) {
|
|
72
|
+
console.log(`Reasons: ${evaluation.reasons.join(", ")}`);
|
|
73
|
+
}
|
|
74
|
+
console.log(`Wrote ${outputPath}`);
|
|
75
|
+
}
|
|
76
|
+
finally {
|
|
77
|
+
if (cleanupTemp) {
|
|
78
|
+
await fs.rm(simulationRoot, { recursive: true, force: true });
|
|
79
|
+
}
|
|
80
|
+
}
|
|
81
|
+
}
|
|
82
|
+
async function resolveBaselinePath(params) {
|
|
83
|
+
const { projectRoot, config, override } = params;
|
|
84
|
+
const candidates = [];
|
|
85
|
+
if (override) {
|
|
86
|
+
candidates.push(override);
|
|
87
|
+
}
|
|
88
|
+
if (config.drift?.baselinePath) {
|
|
89
|
+
candidates.push(config.drift.baselinePath);
|
|
90
|
+
}
|
|
91
|
+
candidates.push("specs-out/machine/baseline.json");
|
|
92
|
+
candidates.push("specs-out/machine/drift.report.json");
|
|
93
|
+
candidates.push("specs-out/machine/architecture.snapshot.yaml");
|
|
94
|
+
for (const candidate of candidates) {
|
|
95
|
+
const resolved = path.isAbsolute(candidate)
|
|
96
|
+
? candidate
|
|
97
|
+
: path.resolve(projectRoot, candidate);
|
|
98
|
+
if (await fileExists(resolved)) {
|
|
99
|
+
return resolved;
|
|
100
|
+
}
|
|
101
|
+
}
|
|
102
|
+
return null;
|
|
103
|
+
}
|
|
104
|
+
async function resolveBaselineSummaryPath(params) {
|
|
105
|
+
const candidates = [];
|
|
106
|
+
if (params.override) {
|
|
107
|
+
candidates.push(params.override);
|
|
108
|
+
}
|
|
109
|
+
candidates.push(path.join(params.projectRoot, "specs-out", "machine", "architecture.summary.json"));
|
|
110
|
+
for (const candidate of candidates) {
|
|
111
|
+
const resolved = path.isAbsolute(candidate)
|
|
112
|
+
? candidate
|
|
113
|
+
: path.resolve(params.projectRoot, candidate);
|
|
114
|
+
if (await fileExists(resolved)) {
|
|
115
|
+
return resolved;
|
|
116
|
+
}
|
|
117
|
+
}
|
|
118
|
+
return null;
|
|
119
|
+
}
|
|
120
|
+
async function loadSummaryFromPath(filePath) {
|
|
121
|
+
const dir = path.dirname(filePath);
|
|
122
|
+
if (path.basename(filePath) === "architecture.summary.json") {
|
|
123
|
+
return loadArchitectureSummary(dir);
|
|
124
|
+
}
|
|
125
|
+
try {
|
|
126
|
+
const raw = await fs.readFile(filePath, "utf8");
|
|
127
|
+
return JSON.parse(raw);
|
|
128
|
+
}
|
|
129
|
+
catch {
|
|
130
|
+
return null;
|
|
131
|
+
}
|
|
132
|
+
}
|
|
133
|
+
async function loadDriftFromFile(filePath) {
|
|
134
|
+
try {
|
|
135
|
+
const raw = await fs.readFile(filePath, "utf8");
|
|
136
|
+
const ext = path.extname(filePath).toLowerCase();
|
|
137
|
+
const parsed = ext === ".yaml" || ext === ".yml"
|
|
138
|
+
? yaml.load(raw)
|
|
139
|
+
: JSON.parse(raw);
|
|
140
|
+
if (!parsed) {
|
|
141
|
+
return null;
|
|
142
|
+
}
|
|
143
|
+
const drift = parsed["drift"];
|
|
144
|
+
if (drift && typeof drift.delta === "number") {
|
|
145
|
+
return drift;
|
|
146
|
+
}
|
|
147
|
+
if (typeof parsed.delta === "number") {
|
|
148
|
+
return parsed;
|
|
149
|
+
}
|
|
150
|
+
return null;
|
|
151
|
+
}
|
|
152
|
+
catch {
|
|
153
|
+
return null;
|
|
154
|
+
}
|
|
155
|
+
}
|
|
156
|
+
function evaluateCandidate(params) {
|
|
157
|
+
const { candidate, baseline, config, baselineSummary, candidateSummary, mode } = params;
|
|
158
|
+
const reasons = [];
|
|
159
|
+
if (!baseline) {
|
|
160
|
+
reasons.push("baseline_missing");
|
|
161
|
+
}
|
|
162
|
+
else {
|
|
163
|
+
if (candidate.delta < baseline.delta) {
|
|
164
|
+
reasons.push("delta_regressed");
|
|
165
|
+
}
|
|
166
|
+
const baselineEdges = baseline.details?.edges ?? 0;
|
|
167
|
+
const candidateEdges = candidate.details?.edges ?? 0;
|
|
168
|
+
if (baselineEdges > 0) {
|
|
169
|
+
const ratio = (candidateEdges - baselineEdges) / baselineEdges;
|
|
170
|
+
const maxRatio = config.drift?.growth?.maxEdgeGrowthRatio ?? 0;
|
|
171
|
+
if (maxRatio > 0 && ratio > maxRatio) {
|
|
172
|
+
reasons.push("edge_growth_ratio_exceeded");
|
|
173
|
+
}
|
|
174
|
+
}
|
|
175
|
+
}
|
|
176
|
+
if (candidate.status === "drift") {
|
|
177
|
+
reasons.push("candidate_in_drift");
|
|
178
|
+
}
|
|
179
|
+
if (candidate.capacity.status === "critical") {
|
|
180
|
+
reasons.push("capacity_critical");
|
|
181
|
+
}
|
|
182
|
+
if (candidate.growth.status === "critical") {
|
|
183
|
+
reasons.push("growth_critical");
|
|
184
|
+
}
|
|
185
|
+
let shapeEquivalent;
|
|
186
|
+
if (baselineSummary) {
|
|
187
|
+
shapeEquivalent = baselineSummary.shape_fingerprint === candidateSummary.shape_fingerprint;
|
|
188
|
+
if (!shapeEquivalent) {
|
|
189
|
+
reasons.push("shape_changed");
|
|
190
|
+
}
|
|
191
|
+
}
|
|
192
|
+
let decision = "accept";
|
|
193
|
+
if (reasons.length > 0) {
|
|
194
|
+
decision = mode === "hard" ? "reject" : "warn";
|
|
195
|
+
}
|
|
196
|
+
return {
|
|
197
|
+
decision,
|
|
198
|
+
reasons,
|
|
199
|
+
feedback: {
|
|
200
|
+
previous_delta: baseline?.delta,
|
|
201
|
+
candidate_delta: candidate.delta,
|
|
202
|
+
reasons,
|
|
203
|
+
suggestions: buildSuggestions(reasons)
|
|
204
|
+
},
|
|
205
|
+
mode,
|
|
206
|
+
baseline_delta: baseline?.delta,
|
|
207
|
+
candidate_delta: candidate.delta,
|
|
208
|
+
edge_growth_ratio: baseline && (baseline.details?.edges ?? 0) > 0
|
|
209
|
+
? ((candidate.details?.edges ?? 0) - (baseline.details?.edges ?? 0)) /
|
|
210
|
+
(baseline.details?.edges ?? 1)
|
|
211
|
+
: undefined,
|
|
212
|
+
shape_equivalent: shapeEquivalent
|
|
213
|
+
};
|
|
214
|
+
}
|
|
215
|
+
async function fileExists(target) {
|
|
216
|
+
try {
|
|
217
|
+
const stat = await fs.stat(target);
|
|
218
|
+
return stat.isFile();
|
|
219
|
+
}
|
|
220
|
+
catch {
|
|
221
|
+
return false;
|
|
222
|
+
}
|
|
223
|
+
}
|
|
224
|
+
function buildSuggestions(reasons) {
|
|
225
|
+
const suggestions = new Set();
|
|
226
|
+
const mapping = {
|
|
227
|
+
delta_regressed: "Reduce cross-layer coupling and refactor to lower drift.",
|
|
228
|
+
edge_growth_ratio_exceeded: "Limit new dependencies; split the change into smaller steps.",
|
|
229
|
+
candidate_in_drift: "Refactor to remove cycles or reduce coupling before applying.",
|
|
230
|
+
capacity_critical: "Reduce edges in saturated layers or consolidate modules.",
|
|
231
|
+
growth_critical: "Avoid adding new dependencies in this patch.",
|
|
232
|
+
shape_changed: "Preserve the existing architecture shape; avoid new structural coupling.",
|
|
233
|
+
baseline_missing: "Capture a baseline before enforcing drift gates."
|
|
234
|
+
};
|
|
235
|
+
for (const reason of reasons) {
|
|
236
|
+
const suggestion = mapping[reason];
|
|
237
|
+
if (suggestion) {
|
|
238
|
+
suggestions.add(suggestion);
|
|
239
|
+
}
|
|
240
|
+
}
|
|
241
|
+
return Array.from(suggestions);
|
|
242
|
+
}
|
|
243
|
+
async function createTempCopy(sourceRoot, config) {
|
|
244
|
+
const tempRoot = await fs.mkdtemp(path.join(os.tmpdir(), "specguard-sim-"));
|
|
245
|
+
const ignore = createIgnoreMatcher(config, sourceRoot);
|
|
246
|
+
await fs.cp(sourceRoot, tempRoot, {
|
|
247
|
+
recursive: true,
|
|
248
|
+
filter: (src) => filterCopy(src, sourceRoot, ignore)
|
|
249
|
+
});
|
|
250
|
+
return tempRoot;
|
|
251
|
+
}
|
|
252
|
+
function filterCopy(src, baseRoot, ignore) {
|
|
253
|
+
if (src === baseRoot) {
|
|
254
|
+
return true;
|
|
255
|
+
}
|
|
256
|
+
const relative = path.relative(baseRoot, src);
|
|
257
|
+
if (!relative) {
|
|
258
|
+
return true;
|
|
259
|
+
}
|
|
260
|
+
const name = path.basename(src);
|
|
261
|
+
let stat;
|
|
262
|
+
try {
|
|
263
|
+
stat = fsSync.statSync(src);
|
|
264
|
+
}
|
|
265
|
+
catch {
|
|
266
|
+
return false;
|
|
267
|
+
}
|
|
268
|
+
if (stat.isDirectory()) {
|
|
269
|
+
return !ignore.isIgnoredDir(name, src);
|
|
270
|
+
}
|
|
271
|
+
return !ignore.isIgnoredPath(relative);
|
|
272
|
+
}
|
|
273
|
+
async function applyPatch(workspaceRoot, patchPath) {
|
|
274
|
+
const resolvedPatch = path.resolve(patchPath);
|
|
275
|
+
const result = await runCommand("git", ["apply", "--whitespace=nowarn", resolvedPatch], workspaceRoot);
|
|
276
|
+
if (result.code !== 0) {
|
|
277
|
+
throw new Error(`Failed to apply patch: ${result.stderr || result.stdout}`);
|
|
278
|
+
}
|
|
279
|
+
}
|
|
280
|
+
function runCommand(command, args, cwd) {
|
|
281
|
+
return new Promise((resolve) => {
|
|
282
|
+
const child = spawn(command, args, { cwd, shell: true });
|
|
283
|
+
let stdout = "";
|
|
284
|
+
let stderr = "";
|
|
285
|
+
child.stdout.on("data", (data) => (stdout += data.toString()));
|
|
286
|
+
child.stderr.on("data", (data) => (stderr += data.toString()));
|
|
287
|
+
child.on("close", (code) => resolve({ code: code ?? 1, stdout, stderr }));
|
|
288
|
+
child.on("error", () => resolve({ code: 1, stdout, stderr }));
|
|
289
|
+
});
|
|
290
|
+
}
|
|
291
|
+
function remapPath(original, sourceRoot, targetRoot) {
|
|
292
|
+
const relative = path.relative(sourceRoot, original);
|
|
293
|
+
return path.join(targetRoot, relative);
|
|
294
|
+
}
|
|
@@ -0,0 +1,27 @@
|
|
|
1
|
+
import fs from "node:fs/promises";
|
|
2
|
+
import path from "node:path";
|
|
3
|
+
import yaml from "js-yaml";
|
|
4
|
+
import { renderExecutiveSummary } from "../extract/docs.js";
|
|
5
|
+
import { loadArchitectureSummary, loadArchitectureDiff, loadHeatmap } from "../extract/compress.js";
|
|
6
|
+
import { resolveMachineInputDir } from "../output-layout.js";
|
|
7
|
+
export async function runSummary(options) {
|
|
8
|
+
const inputDir = await resolveMachineInputDir(options.input || "specs-out");
|
|
9
|
+
const architecturePath = path.join(inputDir, "architecture.snapshot.yaml");
|
|
10
|
+
const uxPath = path.join(inputDir, "ux.snapshot.yaml");
|
|
11
|
+
const [architectureRaw, uxRaw] = await Promise.all([
|
|
12
|
+
fs.readFile(architecturePath, "utf8"),
|
|
13
|
+
fs.readFile(uxPath, "utf8")
|
|
14
|
+
]);
|
|
15
|
+
const architecture = yaml.load(architectureRaw);
|
|
16
|
+
const ux = yaml.load(uxRaw);
|
|
17
|
+
const summary = await loadArchitectureSummary(inputDir);
|
|
18
|
+
const diff = await loadArchitectureDiff(inputDir);
|
|
19
|
+
const heatmap = await loadHeatmap(inputDir);
|
|
20
|
+
const content = renderExecutiveSummary(architecture, ux, { summary, diff, heatmap });
|
|
21
|
+
const outputPath = options.output
|
|
22
|
+
? path.resolve(options.output)
|
|
23
|
+
: path.join(inputDir, "docs", "summary.md");
|
|
24
|
+
await fs.mkdir(path.dirname(outputPath), { recursive: true });
|
|
25
|
+
await fs.writeFile(outputPath, content);
|
|
26
|
+
console.log(`Wrote ${outputPath}`);
|
|
27
|
+
}
|