@toolbaux/guardian 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +21 -0
- package/README.md +366 -0
- package/dist/adapters/csharp-adapter.js +149 -0
- package/dist/adapters/go-adapter.js +96 -0
- package/dist/adapters/index.js +16 -0
- package/dist/adapters/java-adapter.js +122 -0
- package/dist/adapters/python-adapter.js +183 -0
- package/dist/adapters/runner.js +69 -0
- package/dist/adapters/types.js +1 -0
- package/dist/adapters/typescript-adapter.js +179 -0
- package/dist/benchmarking/framework.js +91 -0
- package/dist/cli.js +343 -0
- package/dist/commands/analyze-depth.js +43 -0
- package/dist/commands/api-spec-extractor.js +52 -0
- package/dist/commands/breaking-change-analyzer.js +334 -0
- package/dist/commands/config-compliance.js +219 -0
- package/dist/commands/constraints.js +221 -0
- package/dist/commands/context.js +101 -0
- package/dist/commands/data-flow-tracer.js +291 -0
- package/dist/commands/dependency-impact-analyzer.js +27 -0
- package/dist/commands/diff.js +146 -0
- package/dist/commands/discrepancy.js +71 -0
- package/dist/commands/doc-generate.js +163 -0
- package/dist/commands/doc-html.js +120 -0
- package/dist/commands/drift.js +88 -0
- package/dist/commands/extract.js +16 -0
- package/dist/commands/feature-context.js +116 -0
- package/dist/commands/generate.js +339 -0
- package/dist/commands/guard.js +182 -0
- package/dist/commands/init.js +209 -0
- package/dist/commands/intel.js +20 -0
- package/dist/commands/license-dependency-auditor.js +33 -0
- package/dist/commands/performance-hotspot-profiler.js +42 -0
- package/dist/commands/search.js +314 -0
- package/dist/commands/security-boundary-auditor.js +359 -0
- package/dist/commands/simulate.js +294 -0
- package/dist/commands/summary.js +27 -0
- package/dist/commands/test-coverage-mapper.js +264 -0
- package/dist/commands/verify-drift.js +62 -0
- package/dist/config.js +441 -0
- package/dist/extract/ai-context-hints.js +107 -0
- package/dist/extract/analyzers/backend.js +1704 -0
- package/dist/extract/analyzers/depth.js +264 -0
- package/dist/extract/analyzers/frontend.js +2221 -0
- package/dist/extract/api-usage-tracker.js +19 -0
- package/dist/extract/cache.js +53 -0
- package/dist/extract/codebase-intel.js +190 -0
- package/dist/extract/compress.js +452 -0
- package/dist/extract/context-block.js +356 -0
- package/dist/extract/contracts.js +183 -0
- package/dist/extract/discrepancies.js +233 -0
- package/dist/extract/docs-loader.js +110 -0
- package/dist/extract/docs.js +2379 -0
- package/dist/extract/drift.js +1578 -0
- package/dist/extract/duplicates.js +435 -0
- package/dist/extract/feature-arcs.js +138 -0
- package/dist/extract/graph.js +76 -0
- package/dist/extract/html-doc.js +1409 -0
- package/dist/extract/ignore.js +45 -0
- package/dist/extract/index.js +455 -0
- package/dist/extract/llm-client.js +159 -0
- package/dist/extract/pattern-registry.js +141 -0
- package/dist/extract/product-doc.js +497 -0
- package/dist/extract/python.js +1202 -0
- package/dist/extract/runtime.js +193 -0
- package/dist/extract/schema-evolution-validator.js +35 -0
- package/dist/extract/test-gap-analyzer.js +20 -0
- package/dist/extract/tests.js +74 -0
- package/dist/extract/types.js +1 -0
- package/dist/extract/validate-backend.js +30 -0
- package/dist/extract/writer.js +11 -0
- package/dist/output-layout.js +37 -0
- package/dist/project-discovery.js +309 -0
- package/dist/schema/architecture.js +350 -0
- package/dist/schema/feature-spec.js +89 -0
- package/dist/schema/index.js +8 -0
- package/dist/schema/ux.js +46 -0
- package/package.json +75 -0
|
@@ -0,0 +1,116 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* `specguard feature-context` — generate a filtered, self-contained context packet
|
|
3
|
+
* for implementing a single feature.
|
|
4
|
+
*
|
|
5
|
+
* Analogous to `chapter-context` in the book workflow: given a feature spec YAML,
|
|
6
|
+
* outputs only the endpoints, models, enums, and patterns relevant to that feature —
|
|
7
|
+
* plus one-hop neighbours (endpoints that share a model with the declared ones).
|
|
8
|
+
*
|
|
9
|
+
* Reads: feature spec YAML + codebase-intelligence.json
|
|
10
|
+
* Writes: specs-out/machine/feature-context/<spec-name>.json (or --output)
|
|
11
|
+
*/
|
|
12
|
+
import fs from "node:fs/promises";
|
|
13
|
+
import path from "node:path";
|
|
14
|
+
import yaml from "js-yaml";
|
|
15
|
+
import { parseFeatureSpec } from "../schema/feature-spec.js";
|
|
16
|
+
import { loadCodebaseIntelligence } from "../extract/codebase-intel.js";
|
|
17
|
+
import { getOutputLayout } from "../output-layout.js";
|
|
18
|
+
export async function runFeatureContext(options) {
|
|
19
|
+
const specPath = path.resolve(options.spec);
|
|
20
|
+
const specsDir = path.resolve(options.specs);
|
|
21
|
+
const layout = getOutputLayout(specsDir);
|
|
22
|
+
// Load and validate feature spec
|
|
23
|
+
const raw = await fs.readFile(specPath, "utf8");
|
|
24
|
+
const parsed = yaml.load(raw);
|
|
25
|
+
const spec = parseFeatureSpec(parsed);
|
|
26
|
+
// Load codebase intelligence
|
|
27
|
+
const intelPath = path.join(layout.machineDir, "codebase-intelligence.json");
|
|
28
|
+
const intel = await loadCodebaseIntelligence(intelPath).catch(() => {
|
|
29
|
+
throw new Error(`Could not load codebase-intelligence.json from ${intelPath}. Run \`specguard intel --specs ${options.specs}\` first.`);
|
|
30
|
+
});
|
|
31
|
+
// Build filtered context
|
|
32
|
+
const context = buildFeatureContext(spec, intel);
|
|
33
|
+
// Determine output path
|
|
34
|
+
const specName = path.basename(specPath).replace(/\.ya?ml$/, "");
|
|
35
|
+
const outputPath = options.output
|
|
36
|
+
? path.resolve(options.output)
|
|
37
|
+
: path.join(layout.machineDir, "feature-context", `${specName}.json`);
|
|
38
|
+
await fs.mkdir(path.dirname(outputPath), { recursive: true });
|
|
39
|
+
await fs.writeFile(outputPath, JSON.stringify(context, null, 2), "utf8");
|
|
40
|
+
console.log(`Wrote ${outputPath}`);
|
|
41
|
+
console.log(` ${Object.keys(context.declared_endpoints).length} declared endpoints, ` +
|
|
42
|
+
`${Object.keys(context.neighbour_endpoints).length} neighbours, ` +
|
|
43
|
+
`${Object.keys(context.affected_models).length} models, ` +
|
|
44
|
+
`${Object.keys(context.affected_enums).length} enums`);
|
|
45
|
+
}
|
|
46
|
+
function buildFeatureContext(spec, intel) {
|
|
47
|
+
// Declared endpoints
|
|
48
|
+
const declaredEndpoints = {};
|
|
49
|
+
for (const epKey of spec.affected_endpoints) {
|
|
50
|
+
if (intel.api_registry[epKey]) {
|
|
51
|
+
declaredEndpoints[epKey] = intel.api_registry[epKey];
|
|
52
|
+
}
|
|
53
|
+
}
|
|
54
|
+
// Affected models (declared + inferred from endpoint model usage)
|
|
55
|
+
const modelNames = new Set(spec.affected_models);
|
|
56
|
+
// Enrich: collect models used by declared endpoints via service_calls heuristic
|
|
57
|
+
// (we don't have full endpoint→model map in intel — use request/response schema names)
|
|
58
|
+
for (const ep of Object.values(declaredEndpoints)) {
|
|
59
|
+
if (ep.request_schema)
|
|
60
|
+
modelNames.add(ep.request_schema);
|
|
61
|
+
if (ep.response_schema)
|
|
62
|
+
modelNames.add(ep.response_schema);
|
|
63
|
+
}
|
|
64
|
+
const affectedModels = {};
|
|
65
|
+
for (const name of modelNames) {
|
|
66
|
+
if (intel.model_registry[name]) {
|
|
67
|
+
affectedModels[name] = intel.model_registry[name];
|
|
68
|
+
}
|
|
69
|
+
}
|
|
70
|
+
// One-hop neighbours: other endpoints that share any affected model
|
|
71
|
+
const neighbourEndpoints = {};
|
|
72
|
+
for (const [key, ep] of Object.entries(intel.api_registry)) {
|
|
73
|
+
if (declaredEndpoints[key])
|
|
74
|
+
continue; // already declared
|
|
75
|
+
const requestMatch = ep.request_schema && modelNames.has(ep.request_schema);
|
|
76
|
+
const responseMatch = ep.response_schema && modelNames.has(ep.response_schema);
|
|
77
|
+
if (requestMatch || responseMatch) {
|
|
78
|
+
neighbourEndpoints[key] = ep;
|
|
79
|
+
}
|
|
80
|
+
}
|
|
81
|
+
// Affected enums: those whose names appear in model field lists
|
|
82
|
+
const affectedEnums = {};
|
|
83
|
+
for (const model of Object.values(affectedModels)) {
|
|
84
|
+
for (const field of model.fields) {
|
|
85
|
+
for (const [enumName, enumEntry] of Object.entries(intel.enum_registry)) {
|
|
86
|
+
if (field.toLowerCase().includes(enumName.toLowerCase())) {
|
|
87
|
+
affectedEnums[enumName] = enumEntry;
|
|
88
|
+
}
|
|
89
|
+
}
|
|
90
|
+
}
|
|
91
|
+
}
|
|
92
|
+
// Pattern definitions for declared patterns
|
|
93
|
+
const patternDefs = intel.pattern_registry.patterns.filter((p) => spec.pattern.includes(p.id));
|
|
94
|
+
return {
|
|
95
|
+
meta: {
|
|
96
|
+
feature: spec.feature,
|
|
97
|
+
description: spec.description,
|
|
98
|
+
patterns: spec.pattern,
|
|
99
|
+
tradeoff: spec.tradeoff,
|
|
100
|
+
failure_risk: spec.failure_risk,
|
|
101
|
+
maps_to: spec.maps_to,
|
|
102
|
+
generated_at: new Date().toISOString(),
|
|
103
|
+
},
|
|
104
|
+
declared_endpoints: declaredEndpoints,
|
|
105
|
+
neighbour_endpoints: neighbourEndpoints,
|
|
106
|
+
affected_models: affectedModels,
|
|
107
|
+
affected_enums: affectedEnums,
|
|
108
|
+
pattern_definitions: patternDefs,
|
|
109
|
+
write_guide: {
|
|
110
|
+
rule: "Only use endpoints, models, and patterns listed in this file. Do not invent endpoints or models not listed here.",
|
|
111
|
+
endpoint_lookup: "declared_endpoints contains the exact endpoints this feature adds or modifies.",
|
|
112
|
+
model_lookup: "affected_models contains all ORM models this feature reads or writes.",
|
|
113
|
+
pattern_lookup: "pattern_definitions describes the implementation pattern(s) to follow.",
|
|
114
|
+
},
|
|
115
|
+
};
|
|
116
|
+
}
|
|
@@ -0,0 +1,339 @@
|
|
|
1
|
+
import fs from "node:fs/promises";
|
|
2
|
+
import path from "node:path";
|
|
3
|
+
import { buildSnapshots } from "../extract/index.js";
|
|
4
|
+
import { renderContextBlock } from "../extract/context-block.js";
|
|
5
|
+
import { getOutputLayout } from "../output-layout.js";
|
|
6
|
+
import { analyzeDepth } from "../extract/analyzers/depth.js";
|
|
7
|
+
export async function runGenerate(options) {
|
|
8
|
+
if (!options.aiContext) {
|
|
9
|
+
throw new Error("`specguard generate` currently supports `--ai-context` only.");
|
|
10
|
+
}
|
|
11
|
+
const outputRoot = path.resolve(options.output ?? "specs-out");
|
|
12
|
+
const layout = getOutputLayout(outputRoot);
|
|
13
|
+
const { architecture, ux } = await buildSnapshots({
|
|
14
|
+
projectRoot: options.projectRoot,
|
|
15
|
+
backendRoot: options.backendRoot,
|
|
16
|
+
frontendRoot: options.frontendRoot,
|
|
17
|
+
output: outputRoot,
|
|
18
|
+
includeFileGraph: true,
|
|
19
|
+
configPath: options.configPath
|
|
20
|
+
});
|
|
21
|
+
// Load persisted Structural Intelligence reports emitted by `specguard extract`
|
|
22
|
+
const siReports = await loadStructuralIntelligenceReports(layout.machineDir);
|
|
23
|
+
// If a --focus query is provided, prepend a real-time SI report for that query
|
|
24
|
+
if (options.focus) {
|
|
25
|
+
try {
|
|
26
|
+
const focusReport = analyzeDepth({
|
|
27
|
+
query: options.focus,
|
|
28
|
+
modules: architecture.modules,
|
|
29
|
+
moduleGraph: architecture.dependencies.module_graph,
|
|
30
|
+
fileGraph: architecture.dependencies.file_graph,
|
|
31
|
+
circularDependencies: architecture.analysis.circular_dependencies
|
|
32
|
+
});
|
|
33
|
+
const alreadyPresent = siReports.some((r) => r.feature === focusReport.feature);
|
|
34
|
+
if (!alreadyPresent)
|
|
35
|
+
siReports.unshift(focusReport);
|
|
36
|
+
}
|
|
37
|
+
catch {
|
|
38
|
+
// Non-fatal — just skip injection for this query
|
|
39
|
+
}
|
|
40
|
+
}
|
|
41
|
+
// Inject into the architecture object so renderContextBlock can consume it
|
|
42
|
+
architecture.structural_intelligence = siReports;
|
|
43
|
+
const pythonImportReferences = await pickPythonImportReferences(architecture);
|
|
44
|
+
// Load product description from README if available
|
|
45
|
+
let productDescription = "";
|
|
46
|
+
const readmeCandidates = [
|
|
47
|
+
path.join(path.resolve(architecture.project.workspace_root), "README.md"),
|
|
48
|
+
path.join(path.resolve(architecture.project.workspace_root), "readme.md"),
|
|
49
|
+
];
|
|
50
|
+
for (const candidate of readmeCandidates) {
|
|
51
|
+
try {
|
|
52
|
+
const raw = await fs.readFile(candidate, "utf8");
|
|
53
|
+
// Extract first H1 + first paragraph (concise product description)
|
|
54
|
+
const lines = raw.split("\n");
|
|
55
|
+
const descLines = [];
|
|
56
|
+
let pastH1 = false;
|
|
57
|
+
for (const line of lines) {
|
|
58
|
+
if (line.startsWith("# ")) {
|
|
59
|
+
pastH1 = true;
|
|
60
|
+
descLines.push(line.replace(/^# /, "").trim());
|
|
61
|
+
continue;
|
|
62
|
+
}
|
|
63
|
+
if (!pastH1)
|
|
64
|
+
continue;
|
|
65
|
+
if (line.startsWith("## "))
|
|
66
|
+
break; // stop at first H2
|
|
67
|
+
if (line.trim())
|
|
68
|
+
descLines.push(line.trim());
|
|
69
|
+
if (descLines.length >= 4)
|
|
70
|
+
break; // max 4 lines
|
|
71
|
+
}
|
|
72
|
+
productDescription = descLines.join(" ").slice(0, 300);
|
|
73
|
+
break;
|
|
74
|
+
}
|
|
75
|
+
catch {
|
|
76
|
+
// Not found
|
|
77
|
+
}
|
|
78
|
+
}
|
|
79
|
+
const content = renderAiContextMarkdown(architecture, ux, {
|
|
80
|
+
focusQuery: options.focus,
|
|
81
|
+
maxLines: normalizeMaxLines(options.maxLines),
|
|
82
|
+
pythonImportReferences,
|
|
83
|
+
structuralIntelligence: siReports,
|
|
84
|
+
productDescription
|
|
85
|
+
});
|
|
86
|
+
const outputPath = path.join(layout.machineDir, "architecture-context.md");
|
|
87
|
+
await fs.mkdir(path.dirname(outputPath), { recursive: true });
|
|
88
|
+
await fs.writeFile(outputPath, content, "utf8");
|
|
89
|
+
console.log(`Wrote ${outputPath}`);
|
|
90
|
+
}
|
|
91
|
+
async function loadStructuralIntelligenceReports(machineDir) {
|
|
92
|
+
const siPath = path.join(machineDir, "structural-intelligence.json");
|
|
93
|
+
try {
|
|
94
|
+
const raw = await fs.readFile(siPath, "utf8");
|
|
95
|
+
const parsed = JSON.parse(raw);
|
|
96
|
+
if (Array.isArray(parsed)) {
|
|
97
|
+
return parsed;
|
|
98
|
+
}
|
|
99
|
+
}
|
|
100
|
+
catch {
|
|
101
|
+
// File doesn't exist yet — run extract first
|
|
102
|
+
}
|
|
103
|
+
return [];
|
|
104
|
+
}
|
|
105
|
+
function renderAiContextMarkdown(architecture, ux, options) {
|
|
106
|
+
const keyModules = pickKeyBackendModules(architecture);
|
|
107
|
+
const coreOrmModels = pickCoreOrmModels(architecture);
|
|
108
|
+
const fallbackSchemas = pickTopSchemas(architecture);
|
|
109
|
+
const lines = [];
|
|
110
|
+
lines.push("<!-- guardian:ai-context -->");
|
|
111
|
+
lines.push("# Architecture Context");
|
|
112
|
+
lines.push("");
|
|
113
|
+
lines.push("Use this file as compact architectural memory for AI coding tools. It is optimized for machine consumption and omits full docs, charts, and raw snapshots.");
|
|
114
|
+
lines.push("");
|
|
115
|
+
lines.push(`Project: **${architecture.project.name}**`);
|
|
116
|
+
if (options?.productDescription) {
|
|
117
|
+
lines.push(`Description: ${options.productDescription}`);
|
|
118
|
+
}
|
|
119
|
+
lines.push(`Workspace: \`${architecture.project.workspace_root}\``);
|
|
120
|
+
lines.push(`Backend: \`${architecture.project.backend_root}\``);
|
|
121
|
+
lines.push(`Frontend: \`${architecture.project.frontend_root}\``);
|
|
122
|
+
lines.push("");
|
|
123
|
+
if (keyModules.length > 0) {
|
|
124
|
+
lines.push(`Key backend modules: ${keyModules.join(", ")}`);
|
|
125
|
+
}
|
|
126
|
+
if (coreOrmModels.length > 0) {
|
|
127
|
+
lines.push(`Core data models: ${coreOrmModels.join(", ")}`);
|
|
128
|
+
}
|
|
129
|
+
else if (fallbackSchemas.length > 0) {
|
|
130
|
+
lines.push(`Core data models: no ORM models detected; top schemas: ${fallbackSchemas.join(", ")}`);
|
|
131
|
+
}
|
|
132
|
+
else {
|
|
133
|
+
lines.push("Core data models: none detected");
|
|
134
|
+
}
|
|
135
|
+
lines.push("");
|
|
136
|
+
if ((options?.pythonImportReferences?.length ?? 0) > 0) {
|
|
137
|
+
lines.push("## Backend Import Reference");
|
|
138
|
+
lines.push("");
|
|
139
|
+
for (const reference of options?.pythonImportReferences ?? []) {
|
|
140
|
+
lines.push(`- ${reference.name} -> \`${reference.statement}\` (${reference.kind}, ${reference.file})`);
|
|
141
|
+
}
|
|
142
|
+
lines.push("");
|
|
143
|
+
}
|
|
144
|
+
lines.push(renderContextBlock(architecture, ux, {
|
|
145
|
+
focusQuery: options?.focusQuery,
|
|
146
|
+
maxLines: options?.maxLines ?? 80,
|
|
147
|
+
structuralIntelligence: options?.structuralIntelligence
|
|
148
|
+
}));
|
|
149
|
+
lines.push("");
|
|
150
|
+
lines.push("## Usage");
|
|
151
|
+
lines.push("");
|
|
152
|
+
lines.push("- Paste into `CLAUDE.md`, `.cursorrules`, or a coding prompt.");
|
|
153
|
+
lines.push("- Prefer this file over the human-readable docs when minimizing AI context size.");
|
|
154
|
+
lines.push("- For deeper lookup, run `guardian search --query \"<feature>\"`.");
|
|
155
|
+
lines.push("- Regenerate after major structural changes.");
|
|
156
|
+
lines.push("");
|
|
157
|
+
lines.push("<!-- /guardian:ai-context -->");
|
|
158
|
+
return lines.join("\n");
|
|
159
|
+
}
|
|
160
|
+
function pickKeyBackendModules(architecture) {
|
|
161
|
+
return architecture.modules
|
|
162
|
+
.filter((module) => module.type === "backend")
|
|
163
|
+
.map((module) => ({
|
|
164
|
+
id: module.id,
|
|
165
|
+
score: module.endpoints.length * 3 + module.files.length + module.imports.length
|
|
166
|
+
}))
|
|
167
|
+
.sort((a, b) => b.score - a.score || a.id.localeCompare(b.id))
|
|
168
|
+
.slice(0, 4)
|
|
169
|
+
.map((module) => module.id);
|
|
170
|
+
}
|
|
171
|
+
function pickCoreOrmModels(architecture) {
|
|
172
|
+
const usageCounts = new Map();
|
|
173
|
+
for (const usage of architecture.endpoint_model_usage) {
|
|
174
|
+
for (const model of usage.models) {
|
|
175
|
+
usageCounts.set(model.name, (usageCounts.get(model.name) ?? 0) + 1);
|
|
176
|
+
}
|
|
177
|
+
}
|
|
178
|
+
return architecture.data_models
|
|
179
|
+
.filter((model) => model.framework !== "pydantic")
|
|
180
|
+
.map((model) => ({
|
|
181
|
+
name: model.name,
|
|
182
|
+
score: (usageCounts.get(model.name) ?? 0) * 4 +
|
|
183
|
+
model.relationships.length * 2 +
|
|
184
|
+
Math.min(model.fields.length, 10)
|
|
185
|
+
}))
|
|
186
|
+
.sort((a, b) => b.score - a.score || a.name.localeCompare(b.name))
|
|
187
|
+
.slice(0, 5)
|
|
188
|
+
.map((model) => model.name);
|
|
189
|
+
}
|
|
190
|
+
function pickTopSchemas(architecture) {
|
|
191
|
+
const schemaMentions = new Map();
|
|
192
|
+
for (const endpoint of architecture.endpoints) {
|
|
193
|
+
if (endpoint.request_schema) {
|
|
194
|
+
schemaMentions.set(endpoint.request_schema, (schemaMentions.get(endpoint.request_schema) ?? 0) + 1);
|
|
195
|
+
}
|
|
196
|
+
if (endpoint.response_schema) {
|
|
197
|
+
schemaMentions.set(endpoint.response_schema, (schemaMentions.get(endpoint.response_schema) ?? 0) + 1);
|
|
198
|
+
}
|
|
199
|
+
}
|
|
200
|
+
return architecture.data_models
|
|
201
|
+
.filter((model) => model.framework === "pydantic")
|
|
202
|
+
.map((model) => ({
|
|
203
|
+
name: model.name,
|
|
204
|
+
score: (schemaMentions.get(model.name) ?? 0) * 3 + Math.min(model.fields.length, 8)
|
|
205
|
+
}))
|
|
206
|
+
.sort((a, b) => b.score - a.score || a.name.localeCompare(b.name))
|
|
207
|
+
.slice(0, 5)
|
|
208
|
+
.map((model) => model.name);
|
|
209
|
+
}
|
|
210
|
+
function normalizeMaxLines(value) {
|
|
211
|
+
if (typeof value === "number" && Number.isFinite(value)) {
|
|
212
|
+
return value;
|
|
213
|
+
}
|
|
214
|
+
if (typeof value === "string" && value.trim().length > 0) {
|
|
215
|
+
const parsed = Number.parseInt(value, 10);
|
|
216
|
+
if (Number.isFinite(parsed) && parsed > 0) {
|
|
217
|
+
return parsed;
|
|
218
|
+
}
|
|
219
|
+
}
|
|
220
|
+
return undefined;
|
|
221
|
+
}
|
|
222
|
+
async function pickPythonImportReferences(architecture) {
|
|
223
|
+
const backendRoot = path.resolve(architecture.project.backend_root);
|
|
224
|
+
const workspaceRoot = path.resolve(architecture.project.workspace_root);
|
|
225
|
+
const topSchemas = pickTopSchemas(architecture).slice(0, 3);
|
|
226
|
+
const topOrmModels = pickCoreOrmModels(architecture).slice(0, 2);
|
|
227
|
+
const candidateNames = Array.from(new Set([...topSchemas, ...topOrmModels]));
|
|
228
|
+
if (candidateNames.length === 0) {
|
|
229
|
+
return [];
|
|
230
|
+
}
|
|
231
|
+
const modelByName = new Map(architecture.data_models.map((model) => [model.name, model]));
|
|
232
|
+
const importSpecifiers = await collectPythonImportSpecifiers(backendRoot, new Set(candidateNames));
|
|
233
|
+
const references = [];
|
|
234
|
+
for (const name of candidateNames) {
|
|
235
|
+
const model = modelByName.get(name);
|
|
236
|
+
if (!model) {
|
|
237
|
+
continue;
|
|
238
|
+
}
|
|
239
|
+
const specifier = importSpecifiers.get(name);
|
|
240
|
+
const derivedModulePath = derivePythonModulePath(model.file, workspaceRoot, backendRoot);
|
|
241
|
+
const statement = specifier
|
|
242
|
+
? `from ${specifier} import ${name}`
|
|
243
|
+
: derivedModulePath
|
|
244
|
+
? `from ${derivedModulePath} import ${name}`
|
|
245
|
+
: "";
|
|
246
|
+
if (!statement) {
|
|
247
|
+
continue;
|
|
248
|
+
}
|
|
249
|
+
references.push({
|
|
250
|
+
name,
|
|
251
|
+
statement,
|
|
252
|
+
file: model.file,
|
|
253
|
+
kind: model.framework === "pydantic" ? "schema" : "model"
|
|
254
|
+
});
|
|
255
|
+
}
|
|
256
|
+
return references;
|
|
257
|
+
}
|
|
258
|
+
async function collectPythonImportSpecifiers(backendRoot, targetNames) {
|
|
259
|
+
const counts = new Map();
|
|
260
|
+
const files = await listPythonFiles(backendRoot);
|
|
261
|
+
for (const file of files) {
|
|
262
|
+
let content = "";
|
|
263
|
+
try {
|
|
264
|
+
content = await fs.readFile(file, "utf8");
|
|
265
|
+
}
|
|
266
|
+
catch {
|
|
267
|
+
continue;
|
|
268
|
+
}
|
|
269
|
+
for (const usage of extractPythonFromImports(content)) {
|
|
270
|
+
for (const symbol of usage.symbols) {
|
|
271
|
+
if (!targetNames.has(symbol)) {
|
|
272
|
+
continue;
|
|
273
|
+
}
|
|
274
|
+
const specifierCounts = counts.get(symbol) ?? new Map();
|
|
275
|
+
specifierCounts.set(usage.specifier, (specifierCounts.get(usage.specifier) ?? 0) + 1);
|
|
276
|
+
counts.set(symbol, specifierCounts);
|
|
277
|
+
}
|
|
278
|
+
}
|
|
279
|
+
}
|
|
280
|
+
const winners = new Map();
|
|
281
|
+
for (const [symbol, specifierCounts] of counts.entries()) {
|
|
282
|
+
const best = Array.from(specifierCounts.entries()).sort((a, b) => b[1] - a[1] || a[0].localeCompare(b[0]))[0]?.[0];
|
|
283
|
+
if (best) {
|
|
284
|
+
winners.set(symbol, best);
|
|
285
|
+
}
|
|
286
|
+
}
|
|
287
|
+
return winners;
|
|
288
|
+
}
|
|
289
|
+
async function listPythonFiles(root) {
|
|
290
|
+
const entries = await fs.readdir(root, { withFileTypes: true });
|
|
291
|
+
const files = [];
|
|
292
|
+
for (const entry of entries) {
|
|
293
|
+
const fullPath = path.join(root, entry.name);
|
|
294
|
+
if (entry.isDirectory()) {
|
|
295
|
+
files.push(...(await listPythonFiles(fullPath)));
|
|
296
|
+
continue;
|
|
297
|
+
}
|
|
298
|
+
if (entry.isFile() && fullPath.endsWith(".py")) {
|
|
299
|
+
files.push(fullPath);
|
|
300
|
+
}
|
|
301
|
+
}
|
|
302
|
+
return files;
|
|
303
|
+
}
|
|
304
|
+
function extractPythonFromImports(content) {
|
|
305
|
+
const imports = [];
|
|
306
|
+
for (const match of content.matchAll(/^\s*from\s+([.\w]+)\s+import\s+(.+)$/gm)) {
|
|
307
|
+
let namesPart = match[2].split("#")[0]?.trim() ?? "";
|
|
308
|
+
namesPart = namesPart.replace(/[()]/g, "");
|
|
309
|
+
const symbols = namesPart
|
|
310
|
+
.split(",")
|
|
311
|
+
.map((name) => name.trim())
|
|
312
|
+
.filter(Boolean)
|
|
313
|
+
.map((name) => name.split(/\s+as\s+/i)[0]?.trim() ?? "")
|
|
314
|
+
.filter((name) => name.length > 0 && name !== "*");
|
|
315
|
+
if (symbols.length > 0) {
|
|
316
|
+
imports.push({ specifier: match[1], symbols });
|
|
317
|
+
}
|
|
318
|
+
}
|
|
319
|
+
return imports;
|
|
320
|
+
}
|
|
321
|
+
function derivePythonModulePath(relativeFile, workspaceRoot, backendRoot) {
|
|
322
|
+
const absoluteFile = path.resolve(workspaceRoot, relativeFile);
|
|
323
|
+
const relativeToBackend = path.relative(backendRoot, absoluteFile).replace(/\\/g, "/");
|
|
324
|
+
if (!relativeToBackend || relativeToBackend.startsWith("../") || !relativeToBackend.endsWith(".py")) {
|
|
325
|
+
return null;
|
|
326
|
+
}
|
|
327
|
+
const withoutExt = relativeToBackend.replace(/\.py$/, "");
|
|
328
|
+
const normalized = withoutExt.endsWith("/__init__")
|
|
329
|
+
? withoutExt.slice(0, -"/__init__".length)
|
|
330
|
+
: withoutExt;
|
|
331
|
+
if (!normalized) {
|
|
332
|
+
return null;
|
|
333
|
+
}
|
|
334
|
+
const segments = normalized.split("/").filter(Boolean);
|
|
335
|
+
if (segments.some((segment) => !/^[A-Za-z_][A-Za-z0-9_]*$/.test(segment))) {
|
|
336
|
+
return null;
|
|
337
|
+
}
|
|
338
|
+
return segments.join(".");
|
|
339
|
+
}
|
|
@@ -0,0 +1,182 @@
|
|
|
1
|
+
import fs from "node:fs/promises";
|
|
2
|
+
import path from "node:path";
|
|
3
|
+
import { spawn } from "node:child_process";
|
|
4
|
+
import { runConstraints } from "./constraints.js";
|
|
5
|
+
import { runSimulate } from "./simulate.js";
|
|
6
|
+
import { buildSnapshots } from "../extract/index.js";
|
|
7
|
+
import { renderContextBlock } from "../extract/context-block.js";
|
|
8
|
+
import { logResolvedProjectPaths, resolveProjectPaths } from "../project-discovery.js";
|
|
9
|
+
export async function runGuard(options) {
|
|
10
|
+
const resolved = await resolveProjectPaths({
|
|
11
|
+
projectRoot: options.projectRoot,
|
|
12
|
+
backendRoot: options.backendRoot,
|
|
13
|
+
frontendRoot: options.frontendRoot,
|
|
14
|
+
configPath: options.configPath
|
|
15
|
+
});
|
|
16
|
+
const config = resolved.config;
|
|
17
|
+
logResolvedProjectPaths(resolved);
|
|
18
|
+
const constraintsPath = path.resolve("specs-out/machine/constraints.json");
|
|
19
|
+
await runConstraints({
|
|
20
|
+
projectRoot: resolved.workspaceRoot,
|
|
21
|
+
backendRoot: resolved.backendRoot,
|
|
22
|
+
frontendRoot: resolved.frontendRoot,
|
|
23
|
+
output: constraintsPath,
|
|
24
|
+
configPath: options.configPath
|
|
25
|
+
});
|
|
26
|
+
const constraints = await loadConstraints(constraintsPath);
|
|
27
|
+
const basePrompt = constraints && typeof constraints["prompt"] === "string" ? constraints["prompt"] : "";
|
|
28
|
+
if (options.printContext) {
|
|
29
|
+
const { architecture, ux } = await buildSnapshots({
|
|
30
|
+
projectRoot: resolved.workspaceRoot,
|
|
31
|
+
backendRoot: resolved.backendRoot,
|
|
32
|
+
frontendRoot: resolved.frontendRoot,
|
|
33
|
+
output: "specs-out",
|
|
34
|
+
includeFileGraph: true,
|
|
35
|
+
configPath: options.configPath
|
|
36
|
+
});
|
|
37
|
+
const context = renderGuardContext({
|
|
38
|
+
task: options.task,
|
|
39
|
+
constraintPrompt: basePrompt,
|
|
40
|
+
contextBlock: renderContextBlock(architecture, ux, {
|
|
41
|
+
focusQuery: options.task,
|
|
42
|
+
maxLines: 140
|
|
43
|
+
})
|
|
44
|
+
});
|
|
45
|
+
console.log(context);
|
|
46
|
+
return;
|
|
47
|
+
}
|
|
48
|
+
const prompt = buildGuardPrompt(basePrompt, options.task, config.llm?.promptTemplate);
|
|
49
|
+
const promptPath = path.resolve(options.promptOutput ?? "specs-out/machine/guard.prompt.txt");
|
|
50
|
+
await fs.mkdir(path.dirname(promptPath), { recursive: true });
|
|
51
|
+
await fs.writeFile(promptPath, prompt);
|
|
52
|
+
console.log(`Wrote ${promptPath}`);
|
|
53
|
+
const llmCommand = options.llmCommand || config.llm?.command;
|
|
54
|
+
if (!llmCommand) {
|
|
55
|
+
console.log("No LLM command configured. Provide llm.command in config or --llm-command.");
|
|
56
|
+
return;
|
|
57
|
+
}
|
|
58
|
+
const { command, args } = resolveCommand(llmCommand, config.llm?.args ?? []);
|
|
59
|
+
const patch = await runLlmCommand(command, args, prompt, config.llm?.timeoutMs ?? 120000);
|
|
60
|
+
if (!patch.trim()) {
|
|
61
|
+
throw new Error("LLM command returned empty output.");
|
|
62
|
+
}
|
|
63
|
+
const patchPath = path.resolve(options.patchOutput ?? "specs-out/machine/guard.patch");
|
|
64
|
+
await fs.mkdir(path.dirname(patchPath), { recursive: true });
|
|
65
|
+
await fs.writeFile(patchPath, patch);
|
|
66
|
+
console.log(`Wrote ${patchPath}`);
|
|
67
|
+
const simulationPath = path.resolve(options.simulationOutput ?? "specs-out/machine/drift.simulation.json");
|
|
68
|
+
await runSimulate({
|
|
69
|
+
projectRoot: resolved.workspaceRoot,
|
|
70
|
+
backendRoot: resolved.backendRoot,
|
|
71
|
+
frontendRoot: resolved.frontendRoot,
|
|
72
|
+
output: simulationPath,
|
|
73
|
+
configPath: options.configPath,
|
|
74
|
+
patch: patchPath,
|
|
75
|
+
mode: options.mode ?? config.guard?.mode ?? "soft"
|
|
76
|
+
});
|
|
77
|
+
}
|
|
78
|
+
function renderGuardContext(params) {
|
|
79
|
+
const lines = [];
|
|
80
|
+
lines.push("<!-- guardian:guard-context -->");
|
|
81
|
+
lines.push("## Requested Task");
|
|
82
|
+
lines.push(params.task.trim() || "(not provided)");
|
|
83
|
+
lines.push("");
|
|
84
|
+
lines.push("## Constraint Summary");
|
|
85
|
+
const constraints = extractConstraintLines(params.constraintPrompt);
|
|
86
|
+
if (constraints.length > 0) {
|
|
87
|
+
lines.push(...constraints);
|
|
88
|
+
}
|
|
89
|
+
else {
|
|
90
|
+
lines.push("- No explicit constraint summary available.");
|
|
91
|
+
}
|
|
92
|
+
lines.push("");
|
|
93
|
+
lines.push(params.contextBlock.trim());
|
|
94
|
+
lines.push("<!-- /guardian:guard-context -->");
|
|
95
|
+
return lines.join("\n");
|
|
96
|
+
}
|
|
97
|
+
function extractConstraintLines(prompt) {
|
|
98
|
+
const lines = prompt
|
|
99
|
+
.split(/\r?\n/)
|
|
100
|
+
.map((line) => line.trim())
|
|
101
|
+
.filter(Boolean);
|
|
102
|
+
const explicit = lines.filter((line) => line.startsWith("- "));
|
|
103
|
+
if (explicit.length > 0) {
|
|
104
|
+
return explicit;
|
|
105
|
+
}
|
|
106
|
+
return lines.slice(0, 6).map((line) => `- ${line}`);
|
|
107
|
+
}
|
|
108
|
+
async function loadConstraints(constraintsPath) {
|
|
109
|
+
try {
|
|
110
|
+
const raw = await fs.readFile(constraintsPath, "utf8");
|
|
111
|
+
return JSON.parse(raw);
|
|
112
|
+
}
|
|
113
|
+
catch {
|
|
114
|
+
return null;
|
|
115
|
+
}
|
|
116
|
+
}
|
|
117
|
+
function buildGuardPrompt(basePrompt, task, template) {
|
|
118
|
+
const trimmedTask = task?.trim() ?? "";
|
|
119
|
+
const defaultPrompt = [
|
|
120
|
+
basePrompt.trim(),
|
|
121
|
+
"",
|
|
122
|
+
"User Task:",
|
|
123
|
+
trimmedTask || "(not provided)",
|
|
124
|
+
"",
|
|
125
|
+
"Return a unified diff patch only."
|
|
126
|
+
]
|
|
127
|
+
.filter(Boolean)
|
|
128
|
+
.join("\n");
|
|
129
|
+
if (!template || template.trim().length === 0) {
|
|
130
|
+
return defaultPrompt;
|
|
131
|
+
}
|
|
132
|
+
const withConstraints = template.includes("{{constraints}}")
|
|
133
|
+
? template.replace(/{{constraints}}/g, basePrompt.trim())
|
|
134
|
+
: `${template.trim()}\n\n${basePrompt.trim()}`;
|
|
135
|
+
return withConstraints.includes("{{task}}")
|
|
136
|
+
? withConstraints.replace(/{{task}}/g, trimmedTask)
|
|
137
|
+
: `${withConstraints.trim()}\n\nUser Task:\n${trimmedTask || "(not provided)"}\n`;
|
|
138
|
+
}
|
|
139
|
+
function resolveCommand(command, args) {
|
|
140
|
+
if (args.length > 0) {
|
|
141
|
+
return { command, args };
|
|
142
|
+
}
|
|
143
|
+
const parts = command.trim().split(/\s+/).filter(Boolean);
|
|
144
|
+
if (parts.length <= 1) {
|
|
145
|
+
return { command, args };
|
|
146
|
+
}
|
|
147
|
+
return { command: parts[0], args: parts.slice(1) };
|
|
148
|
+
}
|
|
149
|
+
function runLlmCommand(command, args, prompt, timeoutMs) {
|
|
150
|
+
return new Promise((resolve, reject) => {
|
|
151
|
+
const child = spawn(command, args, {
|
|
152
|
+
stdio: ["pipe", "pipe", "pipe"],
|
|
153
|
+
shell: process.platform === "win32"
|
|
154
|
+
});
|
|
155
|
+
let stdout = "";
|
|
156
|
+
let stderr = "";
|
|
157
|
+
const timer = setTimeout(() => {
|
|
158
|
+
child.kill("SIGTERM");
|
|
159
|
+
reject(new Error("LLM command timed out."));
|
|
160
|
+
}, timeoutMs);
|
|
161
|
+
child.stdout.on("data", (data) => {
|
|
162
|
+
stdout += data.toString();
|
|
163
|
+
});
|
|
164
|
+
child.stderr.on("data", (data) => {
|
|
165
|
+
stderr += data.toString();
|
|
166
|
+
});
|
|
167
|
+
child.on("error", (error) => {
|
|
168
|
+
clearTimeout(timer);
|
|
169
|
+
reject(error);
|
|
170
|
+
});
|
|
171
|
+
child.on("close", (code) => {
|
|
172
|
+
clearTimeout(timer);
|
|
173
|
+
if (code !== 0) {
|
|
174
|
+
reject(new Error(`LLM command failed (${code}): ${stderr.trim()}`));
|
|
175
|
+
return;
|
|
176
|
+
}
|
|
177
|
+
resolve(stdout);
|
|
178
|
+
});
|
|
179
|
+
child.stdin.write(prompt);
|
|
180
|
+
child.stdin.end();
|
|
181
|
+
});
|
|
182
|
+
}
|