@toolbaux/guardian 0.1.3 → 0.1.5
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +33 -0
- package/dist/cli.js +9 -9
- package/dist/commands/analyze-depth.js +1 -1
- package/dist/commands/context.js +1 -1
- package/dist/commands/discrepancy.js +2 -2
- package/dist/commands/doc-generate.js +6 -6
- package/dist/commands/doc-html.js +2 -2
- package/dist/commands/drift.js +1 -1
- package/dist/commands/feature-context.js +2 -2
- package/dist/commands/generate.js +2 -2
- package/dist/commands/init.js +2 -2
- package/dist/commands/intel.js +2 -2
- package/dist/commands/search.js +1 -1
- package/dist/commands/simulate.js +1 -1
- package/dist/commands/verify-drift.js +1 -1
- package/dist/config.js +7 -0
- package/dist/extract/analyzers/backend.js +3 -2
- package/dist/extract/cache.js +1 -1
- package/dist/extract/docs-loader.js +1 -1
- package/dist/extract/docs.js +9 -25
- package/dist/extract/html-doc.js +1 -1
- package/dist/extract/index.js +106 -2
- package/dist/extract/llm-client.js +17 -17
- package/dist/project-discovery.js +7 -1
- package/package.json +1 -1
package/README.md
CHANGED
|
@@ -105,6 +105,39 @@ guardian doc-html
|
|
|
105
105
|
|
|
106
106
|
All extraction uses Tree-Sitter AST parsing — deterministic, no LLM involved.
|
|
107
107
|
|
|
108
|
+
## LLM Usage — Opt-In Only
|
|
109
|
+
|
|
110
|
+
> **Important:** Guardian's core commands (`extract`, `generate`, `context`, `drift`, `search`, `init`) **never call an LLM**. All extraction is deterministic AST parsing — no API keys needed, no background AI calls, no cost.
|
|
111
|
+
|
|
112
|
+
Two commands **optionally** use an LLM, and **only when you explicitly configure it**:
|
|
113
|
+
|
|
114
|
+
| Command | What the LLM does | Runs automatically? |
|
|
115
|
+
|---------|-------------------|-------------------|
|
|
116
|
+
| `guardian doc-generate` | Adds narrative summaries to product docs | **No** — manual command only |
|
|
117
|
+
| `guardian guard --task "..."` | Generates a code patch from a task description | **No** — manual command only |
|
|
118
|
+
|
|
119
|
+
**If you never set API keys, no LLM is ever called.** These commands degrade gracefully — `doc-generate` produces docs without narrative sections, and `guard` prints context instead of generating patches.
|
|
120
|
+
|
|
121
|
+
### Configuring LLM (optional)
|
|
122
|
+
|
|
123
|
+
```bash
|
|
124
|
+
# Option 1: Cloud LLM (OpenAI-compatible endpoint)
|
|
125
|
+
export GUARDIAN_LLM_ENDPOINT="https://api.openai.com/v1"
|
|
126
|
+
export GUARDIAN_LLM_API_KEY="sk-..."
|
|
127
|
+
export GUARDIAN_LLM_MODEL="gpt-4o" # optional, defaults to gpt-4o
|
|
128
|
+
|
|
129
|
+
# Option 2: Local Ollama (no API key needed, auto-detected)
|
|
130
|
+
# Just have Ollama running on localhost:11434
|
|
131
|
+
export GUARDIAN_OLLAMA_HOST="http://localhost:11434" # optional, this is the default
|
|
132
|
+
export GUARDIAN_OLLAMA_MODEL="llama3.2" # optional, this is the default
|
|
133
|
+
|
|
134
|
+
# Option 3: Shell command (for guardian guard)
|
|
135
|
+
# Set in guardian.config.json:
|
|
136
|
+
# { "llm": { "command": "ollama", "args": ["run", "llama3"] } }
|
|
137
|
+
```
|
|
138
|
+
|
|
139
|
+
**No pre-commit hook, VSCode extension, or automated workflow ever triggers LLM calls.** The hook only runs `extract` + `context` (pure AST).
|
|
140
|
+
|
|
108
141
|
## What Guardian Generates
|
|
109
142
|
|
|
110
143
|
**Workflow sequence diagrams** — Mermaid diagrams for your most complex endpoints, showing the full call chain from client through handler to services and data stores.
|
package/dist/cli.js
CHANGED
|
@@ -30,7 +30,7 @@ program
|
|
|
30
30
|
.argument("[projectRoot]", "Repo or project root", process.cwd())
|
|
31
31
|
.option("--backend-root <path>", "Path to backend root")
|
|
32
32
|
.option("--frontend-root <path>", "Path to frontend root")
|
|
33
|
-
.option("--config <path>", "Path to
|
|
33
|
+
.option("--config <path>", "Path to guardian.config.json")
|
|
34
34
|
.option("--output <path>", "Output directory", DEFAULT_SPECS_DIR)
|
|
35
35
|
.option("--focus <text>", "Focus the generated AI context on a feature area")
|
|
36
36
|
.option("--max-lines <count>", "Maximum lines for the generated context")
|
|
@@ -55,7 +55,7 @@ program
|
|
|
55
55
|
.option("--frontend-root <path>", "Path to frontend root")
|
|
56
56
|
.option("--output <path>", "Output directory", DEFAULT_SPECS_DIR)
|
|
57
57
|
.option("--include-file-graph", "Include file-level dependency graph", false)
|
|
58
|
-
.option("--config <path>", "Path to
|
|
58
|
+
.option("--config <path>", "Path to guardian.config.json")
|
|
59
59
|
.option("--docs-mode <mode>", "Docs mode (lean|full)")
|
|
60
60
|
.action(async (projectRoot, options) => {
|
|
61
61
|
await runExtract({
|
|
@@ -90,7 +90,7 @@ program
|
|
|
90
90
|
.option("--output <path>", "Output report path", "specs-out/machine/drift.report.json")
|
|
91
91
|
.option("--baseline [path]", "Write baseline drift file")
|
|
92
92
|
.option("--history [path]", "Append drift history entry")
|
|
93
|
-
.option("--config <path>", "Path to
|
|
93
|
+
.option("--config <path>", "Path to guardian.config.json")
|
|
94
94
|
.action(async (projectRoot, options) => {
|
|
95
95
|
await runDrift({
|
|
96
96
|
projectRoot,
|
|
@@ -108,7 +108,7 @@ program
|
|
|
108
108
|
.argument("[projectRoot]", "Repo or project root", process.cwd())
|
|
109
109
|
.option("--backend-root <path>", "Path to backend root")
|
|
110
110
|
.option("--frontend-root <path>", "Path to frontend root")
|
|
111
|
-
.option("--config <path>", "Path to
|
|
111
|
+
.option("--config <path>", "Path to guardian.config.json")
|
|
112
112
|
.option("--baseline <path>", "Path to baseline payload")
|
|
113
113
|
.option("--strict-threshold <val>", "Maximum allowed delta shift (default 0.15)")
|
|
114
114
|
.action(async (projectRoot, options) => {
|
|
@@ -128,7 +128,7 @@ program
|
|
|
128
128
|
.option("--backend-root <path>", "Path to backend root")
|
|
129
129
|
.option("--frontend-root <path>", "Path to frontend root")
|
|
130
130
|
.option("--output <path>", "Output constraints path", "specs-out/machine/constraints.json")
|
|
131
|
-
.option("--config <path>", "Path to
|
|
131
|
+
.option("--config <path>", "Path to guardian.config.json")
|
|
132
132
|
.action(async (projectRoot, options) => {
|
|
133
133
|
await runConstraints({
|
|
134
134
|
projectRoot,
|
|
@@ -149,7 +149,7 @@ program
|
|
|
149
149
|
.option("--baseline-summary <path>", "Baseline architecture summary path")
|
|
150
150
|
.option("--patch <path>", "Patch file to apply for simulation")
|
|
151
151
|
.option("--mode <mode>", "Simulation mode (soft|hard)")
|
|
152
|
-
.option("--config <path>", "Path to
|
|
152
|
+
.option("--config <path>", "Path to guardian.config.json")
|
|
153
153
|
.action(async (projectRoot, options) => {
|
|
154
154
|
await runSimulate({
|
|
155
155
|
projectRoot,
|
|
@@ -176,7 +176,7 @@ program
|
|
|
176
176
|
.option("--mode <mode>", "Simulation mode (soft|hard)")
|
|
177
177
|
.option("--llm-command <cmd>", "Override LLM command from config")
|
|
178
178
|
.option("--print-context", "Print an IDE-ready context block instead of calling an LLM", false)
|
|
179
|
-
.option("--config <path>", "Path to
|
|
179
|
+
.option("--config <path>", "Path to guardian.config.json")
|
|
180
180
|
.action(async (projectRoot, options) => {
|
|
181
181
|
await runGuard({
|
|
182
182
|
projectRoot,
|
|
@@ -240,7 +240,7 @@ program
|
|
|
240
240
|
.requiredOption("--query <text>", "Feature or area to analyze (e.g. 'stripe', 'auth')")
|
|
241
241
|
.option("--backend-root <path>", "Path to backend root")
|
|
242
242
|
.option("--frontend-root <path>", "Path to frontend root")
|
|
243
|
-
.option("--config <path>", "Path to
|
|
243
|
+
.option("--config <path>", "Path to guardian.config.json")
|
|
244
244
|
.option("--output <path>", "Write report to a file instead of stdout")
|
|
245
245
|
.option("--format <fmt>", "Output format: yaml or json (default: yaml)")
|
|
246
246
|
.option("--ci", "Exit with code 1 when HIGH complexity is detected with strong confidence", false)
|
|
@@ -323,7 +323,7 @@ program
|
|
|
323
323
|
});
|
|
324
324
|
program
|
|
325
325
|
.command("init")
|
|
326
|
-
.description("Initialize
|
|
326
|
+
.description("Initialize guardian for a project (config, .specs dir, pre-commit hook, CLAUDE.md)")
|
|
327
327
|
.argument("[projectRoot]", "Repo or project root", process.cwd())
|
|
328
328
|
.option("--backend-root <path>", "Path to backend root")
|
|
329
329
|
.option("--frontend-root <path>", "Path to frontend root")
|
|
@@ -36,7 +36,7 @@ export async function runAnalyzeDepth(options) {
|
|
|
36
36
|
if (options.ci &&
|
|
37
37
|
report.classification.compressible === "NON_COMPRESSIBLE" &&
|
|
38
38
|
report.confidence.value >= report.guardrails.enforce_if_confidence_above) {
|
|
39
|
-
console.error(`\n[
|
|
39
|
+
console.error(`\n[Guardian] CI FAIL: "${options.query}" classified as HIGH complexity (confidence ${report.confidence.value.toFixed(2)}).\n` +
|
|
40
40
|
`Recommended pattern: ${report.recommendation.primary.pattern}\n` +
|
|
41
41
|
`Avoid: ${report.recommendation.avoid.join(", ")}`);
|
|
42
42
|
process.exit(1);
|
package/dist/commands/context.js
CHANGED
|
@@ -42,7 +42,7 @@ async function loadSnapshots(inputDir) {
|
|
|
42
42
|
}
|
|
43
43
|
catch (error) {
|
|
44
44
|
if (error.code === "ENOENT") {
|
|
45
|
-
throw new Error(`Could not find snapshots in ${inputDir}. Run \`
|
|
45
|
+
throw new Error(`Could not find snapshots in ${inputDir}. Run \`guardian extract\` first.`);
|
|
46
46
|
}
|
|
47
47
|
throw error;
|
|
48
48
|
}
|
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
/**
|
|
2
|
-
* `
|
|
2
|
+
* `guardian discrepancy` — diff current codebase intelligence against a baseline.
|
|
3
3
|
*
|
|
4
4
|
* Reads:
|
|
5
5
|
* - specs-out/machine/codebase-intelligence.json
|
|
@@ -22,7 +22,7 @@ export async function runDiscrepancy(options) {
|
|
|
22
22
|
// Load codebase intelligence
|
|
23
23
|
const intelPath = path.join(layout.machineDir, "codebase-intelligence.json");
|
|
24
24
|
const intel = await loadCodebaseIntelligence(intelPath).catch(() => {
|
|
25
|
-
throw new Error(`Could not load codebase-intelligence.json from ${intelPath}. Run \`
|
|
25
|
+
throw new Error(`Could not load codebase-intelligence.json from ${intelPath}. Run \`guardian intel --specs ${options.specs}\` first.`);
|
|
26
26
|
});
|
|
27
27
|
const baselinePath = path.join(layout.machineDir, "product-document.baseline.json");
|
|
28
28
|
const featureSpecsDir = options.featureSpecs ? path.resolve(options.featureSpecs) : null;
|
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
/**
|
|
2
|
-
* `
|
|
2
|
+
* `guardian doc-generate` — generate a human-readable, self-updating product document.
|
|
3
3
|
*
|
|
4
4
|
* Reads:
|
|
5
5
|
* - specs-out/machine/codebase-intelligence.json
|
|
@@ -12,8 +12,8 @@
|
|
|
12
12
|
* - specs-out/machine/product-document.baseline.json (if --update-baseline)
|
|
13
13
|
*
|
|
14
14
|
* LLM env vars (optional — all deterministic sections write regardless):
|
|
15
|
-
*
|
|
16
|
-
*
|
|
15
|
+
* GUARDIAN_LLM_ENDPOINT, GUARDIAN_LLM_API_KEY, GUARDIAN_LLM_MODEL
|
|
16
|
+
* GUARDIAN_OLLAMA_HOST, GUARDIAN_OLLAMA_MODEL
|
|
17
17
|
*/
|
|
18
18
|
import fs from "node:fs/promises";
|
|
19
19
|
import path from "node:path";
|
|
@@ -32,7 +32,7 @@ export async function runDocGenerate(options) {
|
|
|
32
32
|
const llmConfig = await loadLlmConfig();
|
|
33
33
|
if (!llmConfig) {
|
|
34
34
|
console.log("none (deterministic only)");
|
|
35
|
-
console.log(" Tip: set
|
|
35
|
+
console.log(" Tip: set GUARDIAN_LLM_ENDPOINT + GUARDIAN_LLM_API_KEY, or run Ollama locally, to add narrative summaries.");
|
|
36
36
|
}
|
|
37
37
|
else if (llmConfig.provider === "ollama") {
|
|
38
38
|
console.log(`Ollama (${llmConfig.model} at ${llmConfig.endpoint.replace("/api/chat", "")})`);
|
|
@@ -46,7 +46,7 @@ export async function runDocGenerate(options) {
|
|
|
46
46
|
process.stdout.write("Loading codebase intelligence... ");
|
|
47
47
|
const intel = await loadCodebaseIntelligence(intelPath).catch(() => {
|
|
48
48
|
console.log("failed");
|
|
49
|
-
throw new Error(`Could not load ${intelPath}. Run \`
|
|
49
|
+
throw new Error(`Could not load ${intelPath}. Run \`guardian intel --specs ${options.specs}\` first.`);
|
|
50
50
|
});
|
|
51
51
|
console.log(`${intel.meta.counts.endpoints} endpoints, ${intel.meta.counts.models} models, ` +
|
|
52
52
|
`${intel.meta.counts.enums} enums, ${intel.meta.counts.tasks} tasks`);
|
|
@@ -158,6 +158,6 @@ export async function runDocGenerate(options) {
|
|
|
158
158
|
// ── Done ──────────────────────────────────────────────────────────────────
|
|
159
159
|
if (discrepancies.summary.total_issues > 0) {
|
|
160
160
|
const critical = discrepancies.summary.has_critical ? " (critical changes detected)" : "";
|
|
161
|
-
console.log(` ⚠ ${discrepancies.summary.total_issues} discrepancy(s) found${critical}. Run \`
|
|
161
|
+
console.log(` ⚠ ${discrepancies.summary.total_issues} discrepancy(s) found${critical}. Run \`guardian discrepancy\` for details.`);
|
|
162
162
|
}
|
|
163
163
|
}
|
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
/**
|
|
2
|
-
* `
|
|
2
|
+
* `guardian doc-html` — generate a self-contained Javadoc-style HTML viewer.
|
|
3
3
|
*
|
|
4
4
|
* Same data pipeline as `doc-generate` but outputs a single index.html with:
|
|
5
5
|
* - Fixed sidebar navigation (collapsible, searchable)
|
|
@@ -23,7 +23,7 @@ export async function runDocHtml(options) {
|
|
|
23
23
|
process.stdout.write("Loading codebase intelligence... ");
|
|
24
24
|
const intel = await loadCodebaseIntelligence(intelPath).catch(() => {
|
|
25
25
|
console.log("failed");
|
|
26
|
-
throw new Error(`Could not load ${intelPath}. Run \`
|
|
26
|
+
throw new Error(`Could not load ${intelPath}. Run \`guardian intel --specs ${options.specs}\` first.`);
|
|
27
27
|
});
|
|
28
28
|
console.log(`${intel.meta.counts.endpoints} endpoints, ${intel.meta.counts.models} models`);
|
|
29
29
|
// ── Feature arcs (optional) ───────────────────────────────────────────────
|
package/dist/commands/drift.js
CHANGED
|
@@ -62,7 +62,7 @@ export async function runDrift(options) {
|
|
|
62
62
|
await fs.appendFile(resolvedHistory, `${JSON.stringify(entry)}\n`);
|
|
63
63
|
console.log(`Appended history ${resolvedHistory}`);
|
|
64
64
|
}
|
|
65
|
-
console.log("
|
|
65
|
+
console.log("Guardian Drift Report");
|
|
66
66
|
console.log(`Status: ${drift.status}`);
|
|
67
67
|
console.log(`D_t: ${drift.D_t.toFixed(4)}`);
|
|
68
68
|
console.log(`K_t: ${drift.K_t.toFixed(4)}`);
|
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
/**
|
|
2
|
-
* `
|
|
2
|
+
* `guardian feature-context` — generate a filtered, self-contained context packet
|
|
3
3
|
* for implementing a single feature.
|
|
4
4
|
*
|
|
5
5
|
* Analogous to `chapter-context` in the book workflow: given a feature spec YAML,
|
|
@@ -26,7 +26,7 @@ export async function runFeatureContext(options) {
|
|
|
26
26
|
// Load codebase intelligence
|
|
27
27
|
const intelPath = path.join(layout.machineDir, "codebase-intelligence.json");
|
|
28
28
|
const intel = await loadCodebaseIntelligence(intelPath).catch(() => {
|
|
29
|
-
throw new Error(`Could not load codebase-intelligence.json from ${intelPath}. Run \`
|
|
29
|
+
throw new Error(`Could not load codebase-intelligence.json from ${intelPath}. Run \`guardian intel --specs ${options.specs}\` first.`);
|
|
30
30
|
});
|
|
31
31
|
// Build filtered context
|
|
32
32
|
const context = buildFeatureContext(spec, intel);
|
|
@@ -7,7 +7,7 @@ import { DEFAULT_SPECS_DIR } from "../config.js";
|
|
|
7
7
|
import { analyzeDepth } from "../extract/analyzers/depth.js";
|
|
8
8
|
export async function runGenerate(options) {
|
|
9
9
|
if (!options.aiContext) {
|
|
10
|
-
throw new Error("`
|
|
10
|
+
throw new Error("`guardian generate` currently supports `--ai-context` only.");
|
|
11
11
|
}
|
|
12
12
|
const outputRoot = path.resolve(options.output ?? DEFAULT_SPECS_DIR);
|
|
13
13
|
const layout = getOutputLayout(outputRoot);
|
|
@@ -19,7 +19,7 @@ export async function runGenerate(options) {
|
|
|
19
19
|
includeFileGraph: true,
|
|
20
20
|
configPath: options.configPath
|
|
21
21
|
});
|
|
22
|
-
// Load persisted Structural Intelligence reports emitted by `
|
|
22
|
+
// Load persisted Structural Intelligence reports emitted by `guardian extract`
|
|
23
23
|
const siReports = await loadStructuralIntelligenceReports(layout.machineDir);
|
|
24
24
|
// If a --focus query is provided, prepend a real-time SI report for that query
|
|
25
25
|
if (options.focus) {
|
package/dist/commands/init.js
CHANGED
|
@@ -146,7 +146,7 @@ export async function runInit(options) {
|
|
|
146
146
|
const content = [
|
|
147
147
|
`# ${projectName}`,
|
|
148
148
|
"",
|
|
149
|
-
"##
|
|
149
|
+
"## Guardian Architecture Context",
|
|
150
150
|
"",
|
|
151
151
|
"<!-- guardian:auto-context -->",
|
|
152
152
|
"<!-- This block is auto-updated by guardian. Do not edit manually. -->",
|
|
@@ -182,7 +182,7 @@ export async function runInit(options) {
|
|
|
182
182
|
input: specsDir,
|
|
183
183
|
output: claudeMdPath,
|
|
184
184
|
});
|
|
185
|
-
console.log("\n✓
|
|
185
|
+
console.log("\n✓ Guardian initialized. Architecture context is in CLAUDE.md and .specs/");
|
|
186
186
|
console.log(" Pre-commit hook will keep it fresh on every commit.");
|
|
187
187
|
}
|
|
188
188
|
catch (err) {
|
package/dist/commands/intel.js
CHANGED
|
@@ -1,10 +1,10 @@
|
|
|
1
1
|
/**
|
|
2
|
-
* `
|
|
2
|
+
* `guardian intel` — build codebase-intelligence.json from existing snapshots.
|
|
3
3
|
*
|
|
4
4
|
* Reads: specs-out/machine/architecture.snapshot.yaml + ux.snapshot.yaml
|
|
5
5
|
* Writes: specs-out/machine/codebase-intelligence.json
|
|
6
6
|
*
|
|
7
|
-
* Also auto-runs at the end of `
|
|
7
|
+
* Also auto-runs at the end of `guardian extract`.
|
|
8
8
|
*/
|
|
9
9
|
import path from "node:path";
|
|
10
10
|
import { writeCodebaseIntelligence } from "../extract/codebase-intel.js";
|
package/dist/commands/search.js
CHANGED
|
@@ -39,7 +39,7 @@ async function loadSnapshots(inputDir) {
|
|
|
39
39
|
}
|
|
40
40
|
catch (error) {
|
|
41
41
|
if (error.code === "ENOENT") {
|
|
42
|
-
throw new Error(`Could not find snapshots in ${inputDir}. Run \`
|
|
42
|
+
throw new Error(`Could not find snapshots in ${inputDir}. Run \`guardian extract\` first.`);
|
|
43
43
|
}
|
|
44
44
|
throw error;
|
|
45
45
|
}
|
|
@@ -242,7 +242,7 @@ function buildSuggestions(reasons) {
|
|
|
242
242
|
return Array.from(suggestions);
|
|
243
243
|
}
|
|
244
244
|
async function createTempCopy(sourceRoot, config) {
|
|
245
|
-
const tempRoot = await fs.mkdtemp(path.join(os.tmpdir(), "
|
|
245
|
+
const tempRoot = await fs.mkdtemp(path.join(os.tmpdir(), "guardian-sim-"));
|
|
246
246
|
const ignore = createIgnoreMatcher(config, sourceRoot);
|
|
247
247
|
await fs.cp(sourceRoot, tempRoot, {
|
|
248
248
|
recursive: true,
|
|
@@ -31,7 +31,7 @@ export async function runVerifyDrift(options) {
|
|
|
31
31
|
}
|
|
32
32
|
const threshold = options.strictThreshold ? parseFloat(options.strictThreshold) : 0.15;
|
|
33
33
|
console.log("=========================================");
|
|
34
|
-
console.log("
|
|
34
|
+
console.log("Guardian Drift Verification");
|
|
35
35
|
console.log("=========================================\n");
|
|
36
36
|
console.log(`Current Status: ${drift.status}`);
|
|
37
37
|
console.log(`Current Delta: ${drift.delta.toFixed(4)}`);
|
package/dist/config.js
CHANGED
|
@@ -7,6 +7,7 @@ const DEFAULT_CONFIG = {
|
|
|
7
7
|
root: "",
|
|
8
8
|
backendRoot: "",
|
|
9
9
|
frontendRoot: "",
|
|
10
|
+
roots: [],
|
|
10
11
|
discovery: {
|
|
11
12
|
enabled: true
|
|
12
13
|
},
|
|
@@ -137,6 +138,11 @@ function normalizeConfig(input, configDir) {
|
|
|
137
138
|
project.root = resolveMaybe(project.root) ?? "";
|
|
138
139
|
project.backendRoot = resolveMaybe(project.backendRoot) ?? "";
|
|
139
140
|
project.frontendRoot = resolveMaybe(project.frontendRoot) ?? "";
|
|
141
|
+
if (Array.isArray(project.roots) && configDir) {
|
|
142
|
+
project.roots = project.roots
|
|
143
|
+
.filter((r) => typeof r === "string" && r.trim().length > 0)
|
|
144
|
+
.map(r => path.resolve(configDir, r));
|
|
145
|
+
}
|
|
140
146
|
normalized.project = project;
|
|
141
147
|
}
|
|
142
148
|
if (input.python) {
|
|
@@ -271,6 +277,7 @@ function mergeConfig(base, override) {
|
|
|
271
277
|
root: override.project?.root ?? base.project?.root ?? "",
|
|
272
278
|
backendRoot: override.project?.backendRoot ?? base.project?.backendRoot ?? "",
|
|
273
279
|
frontendRoot: override.project?.frontendRoot ?? base.project?.frontendRoot ?? "",
|
|
280
|
+
roots: mergeArrays(base.project?.roots, override.project?.roots),
|
|
274
281
|
discovery: {
|
|
275
282
|
enabled: override.project?.discovery?.enabled ??
|
|
276
283
|
base.project?.discovery?.enabled ??
|
|
@@ -1041,7 +1041,7 @@ function emptyPythonFileResult() {
|
|
|
1041
1041
|
endpointModelUsage: []
|
|
1042
1042
|
};
|
|
1043
1043
|
}
|
|
1044
|
-
export async function analyzeBackend(backendRoot, config) {
|
|
1044
|
+
export async function analyzeBackend(backendRoot, config, workspaceRoot) {
|
|
1045
1045
|
const root = path.resolve(backendRoot);
|
|
1046
1046
|
const baseRoot = path.dirname(root);
|
|
1047
1047
|
const ignore = createIgnoreMatcher(config, baseRoot);
|
|
@@ -1111,8 +1111,9 @@ export async function analyzeBackend(backendRoot, config) {
|
|
|
1111
1111
|
}
|
|
1112
1112
|
codeFiles.sort((a, b) => a.localeCompare(b));
|
|
1113
1113
|
const knownFiles = new Set(codeFiles);
|
|
1114
|
+
const cacheRoot = workspaceRoot ? path.resolve(workspaceRoot) : baseRoot;
|
|
1114
1115
|
const { cachePath, cache } = await loadBackendExtractionCache({
|
|
1115
|
-
projectRoot:
|
|
1116
|
+
projectRoot: cacheRoot,
|
|
1116
1117
|
config
|
|
1117
1118
|
});
|
|
1118
1119
|
const activeAbsoluteFiles = new Set(codeFiles.map((file) => path.join(baseRoot, file)));
|
package/dist/extract/cache.js
CHANGED
|
@@ -2,7 +2,7 @@ import crypto from "node:crypto";
|
|
|
2
2
|
import fs from "node:fs/promises";
|
|
3
3
|
import path from "node:path";
|
|
4
4
|
import { DEFAULT_SPECS_DIR } from "../config.js";
|
|
5
|
-
const BACKEND_CACHE_VERSION = "
|
|
5
|
+
const BACKEND_CACHE_VERSION = "guardian-backend-cache-v4";
|
|
6
6
|
export async function loadBackendExtractionCache(params) {
|
|
7
7
|
const cachePath = path.join(params.projectRoot, DEFAULT_SPECS_DIR, ".cache", "file-hashes.json");
|
|
8
8
|
const configHash = hashObject(params.config);
|
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
/**
|
|
2
|
-
* Docs Loader — loads and parses sections from existing
|
|
2
|
+
* Docs Loader — loads and parses sections from existing guardian doc files.
|
|
3
3
|
*
|
|
4
4
|
* Reads from specs-out/machine/docs/:
|
|
5
5
|
* hld.md → systemDiagram, couplingHeatmap, backendSubsystems, apiDomainMap
|
package/dist/extract/docs.js
CHANGED
|
@@ -189,7 +189,7 @@ function renderIndex(architecture, ux, options) {
|
|
|
189
189
|
const internalFiles = options.internalFiles ?? [];
|
|
190
190
|
const internalDir = options.internalDir ?? "internal";
|
|
191
191
|
return [
|
|
192
|
-
section("
|
|
192
|
+
section("Guardian Overview"),
|
|
193
193
|
`Project: **${architecture.project.name}**\n\n`,
|
|
194
194
|
renderTable(["Metric", "Count", "Notes"], [
|
|
195
195
|
["Modules", String(architecture.modules.length), "Backend modules"],
|
|
@@ -211,7 +211,7 @@ function renderIndex(architecture, ux, options) {
|
|
|
211
211
|
}
|
|
212
212
|
function renderHumanRootReadme(architecture) {
|
|
213
213
|
return [
|
|
214
|
-
"#
|
|
214
|
+
"# Guardian Output",
|
|
215
215
|
"",
|
|
216
216
|
`Project: **${architecture.project.name}**`,
|
|
217
217
|
"",
|
|
@@ -381,7 +381,7 @@ function renderHumanDataAndFlows(architecture, ux) {
|
|
|
381
381
|
[
|
|
382
382
|
"unverified",
|
|
383
383
|
String(architecture.cross_stack_contracts.length - verified.length),
|
|
384
|
-
"
|
|
384
|
+
"Guardian could not confidently infer enough fields yet"
|
|
385
385
|
]
|
|
386
386
|
]));
|
|
387
387
|
lines.push("Next: [Change Guide](./change-guide.md)\n");
|
|
@@ -472,7 +472,7 @@ export function renderExecutiveSummary(architecture, ux, meta) {
|
|
|
472
472
|
summaryLines.push(section("Product Summary"));
|
|
473
473
|
summaryLines.push(`Project: **${architecture.project.name}**\n\n`);
|
|
474
474
|
summaryLines.push(`Snapshot date: **${formatTimestamp(generatedAt)}**\n\n`);
|
|
475
|
-
summaryLines.push("
|
|
475
|
+
summaryLines.push("Guardian produces living, machine‑verified documentation for your codebase so teams can align on architecture, detect drift, and share an accurate system spec without manual doc maintenance.\n\n");
|
|
476
476
|
summaryLines.push("## Vision\n\n");
|
|
477
477
|
summaryLines.push("Enable teams to treat architecture as a first‑class, continuously verifiable product artifact—" +
|
|
478
478
|
"not a static diagram or an outdated wiki.\n\n");
|
|
@@ -2087,7 +2087,7 @@ function renderDiff(architecture, meta) {
|
|
|
2087
2087
|
const lines = [];
|
|
2088
2088
|
lines.push(section("Snapshot Changelog"));
|
|
2089
2089
|
if (!meta?.diff) {
|
|
2090
|
-
lines.push("*No previous summary available. Run
|
|
2090
|
+
lines.push("*No previous summary available. Run Guardian twice to generate a diff.*\n\n");
|
|
2091
2091
|
return lines.join("");
|
|
2092
2092
|
}
|
|
2093
2093
|
lines.push(`Structural change: **${meta.diff.structural_change ? "yes" : "no"}** \n` +
|
|
@@ -2166,7 +2166,7 @@ function renderIntegrationGuide(snapshot) {
|
|
|
2166
2166
|
const mismatchedCount = verifiedContracts.filter((contract) => contract.status === "mismatched").length;
|
|
2167
2167
|
const unverifiedCount = snapshot.cross_stack_contracts.length - verifiedContracts.length;
|
|
2168
2168
|
lines.push(`Verified: ${verifiedContracts.length} contracts (${mismatchedCount} mismatched). Unverified: ${unverifiedCount}.`);
|
|
2169
|
-
lines.push(" Run `
|
|
2169
|
+
lines.push(" Run `guardian extract --include-file-graph` for richer caller inference.\n\n");
|
|
2170
2170
|
if (verifiedContracts.length === 0) {
|
|
2171
2171
|
lines.push("*No verified frontend/backend contracts detected yet.*\n\n");
|
|
2172
2172
|
}
|
|
@@ -2251,7 +2251,8 @@ export async function writeDocs(outputRoot, architecture, ux, options) {
|
|
|
2251
2251
|
{ name: "data.md", content: renderData(architecture, "lean") },
|
|
2252
2252
|
{ name: "tests.md", content: renderTests(architecture) }
|
|
2253
2253
|
];
|
|
2254
|
-
|
|
2254
|
+
// Internal-only files: richer versions of lean files + files that only exist in full mode
|
|
2255
|
+
const fullOnlyFiles = [
|
|
2255
2256
|
{ name: "index.md", content: renderIndex(architecture, ux, { docsFiles: FULL_INDEX_FILES }) },
|
|
2256
2257
|
{
|
|
2257
2258
|
name: "summary.md",
|
|
@@ -2273,27 +2274,10 @@ export async function writeDocs(outputRoot, architecture, ux, options) {
|
|
|
2273
2274
|
})
|
|
2274
2275
|
},
|
|
2275
2276
|
{ name: "architecture.md", content: renderArchitecture(architecture, { summary, diff, heatmap }) },
|
|
2276
|
-
{ name: "ux.md", content: renderUx(ux) },
|
|
2277
2277
|
{ name: "data.md", content: renderData(architecture, "full") },
|
|
2278
2278
|
{ name: "data_dictionary.md", content: renderDataDictionary(architecture) },
|
|
2279
|
-
{ name: "integration.md", content: renderIntegrationGuide(architecture) },
|
|
2280
|
-
{
|
|
2281
|
-
name: "diff.md",
|
|
2282
|
-
content: renderDiff(architecture, {
|
|
2283
|
-
summary,
|
|
2284
|
-
diff,
|
|
2285
|
-
previous: options?.previous?.architecture ?? null
|
|
2286
|
-
})
|
|
2287
|
-
},
|
|
2288
2279
|
{ name: "test_coverage.md", content: renderTestCoverage(architecture) },
|
|
2289
|
-
{ name: "runtime.md", content: renderRuntime(architecture) },
|
|
2290
|
-
{ name: "infra.md", content: renderInfra(architecture) },
|
|
2291
|
-
{
|
|
2292
|
-
name: "hld.md",
|
|
2293
|
-
content: renderHld(architecture, ux, driftHistory, { summary, diff, heatmap })
|
|
2294
|
-
},
|
|
2295
2280
|
{ name: "lld.md", content: renderLld(architecture, ux, "full") },
|
|
2296
|
-
{ name: "tests.md", content: renderTests(architecture) }
|
|
2297
2281
|
];
|
|
2298
2282
|
const written = [];
|
|
2299
2283
|
await fs.writeFile(path.join(layout.rootDir, "README.md"), renderHumanRootReadme(architecture));
|
|
@@ -2306,7 +2290,7 @@ export async function writeDocs(outputRoot, architecture, ux, options) {
|
|
|
2306
2290
|
if (docsMode === "full") {
|
|
2307
2291
|
const internalDir = layout.machineInternalDir;
|
|
2308
2292
|
await fs.mkdir(internalDir, { recursive: true });
|
|
2309
|
-
for (const file of
|
|
2293
|
+
for (const file of fullOnlyFiles) {
|
|
2310
2294
|
const target = path.join(internalDir, file.name);
|
|
2311
2295
|
await fs.writeFile(target, file.content);
|
|
2312
2296
|
written.push(target);
|
package/dist/extract/html-doc.js
CHANGED
|
@@ -374,7 +374,7 @@ function renderQualityPage(intel, docs) {
|
|
|
374
374
|
// Quality signals
|
|
375
375
|
parts.push(`<section id="quality-signals"><h2>Quality Signals</h2>${docs.qualitySignals
|
|
376
376
|
? renderMd(docs.qualitySignals)
|
|
377
|
-
: `<p class="muted">Run <code>
|
|
377
|
+
: `<p class="muted">Run <code>guardian extract</code> to populate quality signals.</p>`}</section>`);
|
|
378
378
|
// Pattern registry
|
|
379
379
|
const active = intel.pattern_registry.patterns.filter((p) => p.occurrences > 0);
|
|
380
380
|
const rows = active.map((p) => {
|
package/dist/extract/index.js
CHANGED
|
@@ -28,8 +28,20 @@ export async function buildSnapshots(options) {
|
|
|
28
28
|
const reportedFrontendRoot = formatOutputPath(resolvedFrontendRoot);
|
|
29
29
|
const reportedWorkspaceRoot = formatOutputPath(workspaceRoot);
|
|
30
30
|
const config = resolvedProject.config;
|
|
31
|
-
|
|
32
|
-
const
|
|
31
|
+
// Analyze all roots — run both analyzers on each root, then merge
|
|
32
|
+
const allRoots = resolvedProject.roots;
|
|
33
|
+
const backendResults = [];
|
|
34
|
+
const frontendResults = [];
|
|
35
|
+
for (const root of allRoots) {
|
|
36
|
+
const [be, fe] = await Promise.all([
|
|
37
|
+
analyzeBackend(root, config, workspaceRoot),
|
|
38
|
+
analyzeFrontend(root, config)
|
|
39
|
+
]);
|
|
40
|
+
backendResults.push(be);
|
|
41
|
+
frontendResults.push(fe);
|
|
42
|
+
}
|
|
43
|
+
const backend = mergeBackendAnalyses(backendResults, allRoots, workspaceRoot);
|
|
44
|
+
const frontend = mergeFrontendAnalyses(frontendResults, allRoots, workspaceRoot);
|
|
33
45
|
const projectRoot = workspaceRoot;
|
|
34
46
|
const runtime = await analyzeRuntime(workspaceRoot, config);
|
|
35
47
|
const projectName = deriveProjectName(resolvedBackendRoot);
|
|
@@ -82,6 +94,7 @@ export async function buildSnapshots(options) {
|
|
|
82
94
|
workspace_root: reportedWorkspaceRoot,
|
|
83
95
|
backend_root: reportedBackendRoot,
|
|
84
96
|
frontend_root: reportedFrontendRoot,
|
|
97
|
+
roots: resolvedProject.roots.map(formatOutputPath),
|
|
85
98
|
resolution_source: resolvedProject.resolutionSource,
|
|
86
99
|
entrypoints: backend.entrypoints
|
|
87
100
|
},
|
|
@@ -305,6 +318,97 @@ async function buildFunctionTestCoverage(params) {
|
|
|
305
318
|
};
|
|
306
319
|
});
|
|
307
320
|
}
|
|
321
|
+
function mergeBackendAnalyses(results, roots, workspaceRoot) {
|
|
322
|
+
if (results.length === 1)
|
|
323
|
+
return results[0];
|
|
324
|
+
// Prefix module IDs with root-relative path so they're globally unique
|
|
325
|
+
for (let i = 0; i < results.length; i++) {
|
|
326
|
+
const rootLabel = path.relative(workspaceRoot, roots[i]).replace(/\\/g, "/");
|
|
327
|
+
const idMap = new Map();
|
|
328
|
+
for (const mod of results[i].modules) {
|
|
329
|
+
const newId = `${rootLabel}/${mod.id}`;
|
|
330
|
+
idMap.set(mod.id, newId);
|
|
331
|
+
mod.id = newId;
|
|
332
|
+
mod.path = `${rootLabel}/${mod.path}`;
|
|
333
|
+
}
|
|
334
|
+
// Remap references in graph edges, endpoints, etc.
|
|
335
|
+
for (const edge of results[i].moduleGraph) {
|
|
336
|
+
edge.from = idMap.get(edge.from) ?? edge.from;
|
|
337
|
+
edge.to = idMap.get(edge.to) ?? edge.to;
|
|
338
|
+
}
|
|
339
|
+
for (const ep of results[i].endpoints) {
|
|
340
|
+
if (ep.module && idMap.has(ep.module)) {
|
|
341
|
+
ep.module = idMap.get(ep.module);
|
|
342
|
+
}
|
|
343
|
+
}
|
|
344
|
+
for (const cycle of results[i].circularDependencies) {
|
|
345
|
+
for (let j = 0; j < cycle.length; j++) {
|
|
346
|
+
cycle[j] = idMap.get(cycle[j]) ?? cycle[j];
|
|
347
|
+
}
|
|
348
|
+
}
|
|
349
|
+
results[i].orphanModules = results[i].orphanModules.map(m => idMap.get(m) ?? m);
|
|
350
|
+
const newUsage = {};
|
|
351
|
+
for (const [key, value] of Object.entries(results[i].moduleUsage)) {
|
|
352
|
+
newUsage[idMap.get(key) ?? key] = value;
|
|
353
|
+
}
|
|
354
|
+
results[i].moduleUsage = newUsage;
|
|
355
|
+
}
|
|
356
|
+
const moduleUsage = {};
|
|
357
|
+
for (const r of results) {
|
|
358
|
+
for (const [key, value] of Object.entries(r.moduleUsage)) {
|
|
359
|
+
moduleUsage[key] = (moduleUsage[key] ?? 0) + value;
|
|
360
|
+
}
|
|
361
|
+
}
|
|
362
|
+
// Merge testCoverage: combine arrays across all roots
|
|
363
|
+
const mergedCoverage = { ...results[0].testCoverage };
|
|
364
|
+
mergedCoverage.untested_source_files = [...mergedCoverage.untested_source_files];
|
|
365
|
+
mergedCoverage.test_files_missing_source = [...mergedCoverage.test_files_missing_source];
|
|
366
|
+
mergedCoverage.coverage_map = [...mergedCoverage.coverage_map];
|
|
367
|
+
for (let i = 1; i < results.length; i++) {
|
|
368
|
+
const tc = results[i].testCoverage;
|
|
369
|
+
mergedCoverage.untested_source_files.push(...tc.untested_source_files);
|
|
370
|
+
mergedCoverage.test_files_missing_source.push(...tc.test_files_missing_source);
|
|
371
|
+
mergedCoverage.coverage_map.push(...tc.coverage_map);
|
|
372
|
+
}
|
|
373
|
+
return {
|
|
374
|
+
modules: results.flatMap(r => r.modules),
|
|
375
|
+
moduleGraph: results.flatMap(r => r.moduleGraph),
|
|
376
|
+
fileGraph: results.flatMap(r => r.fileGraph),
|
|
377
|
+
endpoints: results.flatMap(r => r.endpoints),
|
|
378
|
+
dataModels: results.flatMap(r => r.dataModels),
|
|
379
|
+
enums: results.flatMap(r => r.enums),
|
|
380
|
+
constants: results.flatMap(r => r.constants),
|
|
381
|
+
endpointModelUsage: results.flatMap(r => r.endpointModelUsage),
|
|
382
|
+
tasks: results.flatMap(r => r.tasks),
|
|
383
|
+
circularDependencies: results.flatMap(r => r.circularDependencies),
|
|
384
|
+
orphanModules: results.flatMap(r => r.orphanModules),
|
|
385
|
+
orphanFiles: results.flatMap(r => r.orphanFiles),
|
|
386
|
+
moduleUsage,
|
|
387
|
+
unusedExports: results.flatMap(r => r.unusedExports),
|
|
388
|
+
unusedEndpoints: results.flatMap(r => r.unusedEndpoints),
|
|
389
|
+
entrypoints: results.flatMap(r => r.entrypoints),
|
|
390
|
+
duplicateFunctions: results.flatMap(r => r.duplicateFunctions),
|
|
391
|
+
similarFunctions: results.flatMap(r => r.similarFunctions),
|
|
392
|
+
testCoverage: mergedCoverage,
|
|
393
|
+
tests: results.flatMap(r => r.tests)
|
|
394
|
+
};
|
|
395
|
+
}
|
|
396
|
+
function mergeFrontendAnalyses(results, _roots, _workspaceRoot) {
|
|
397
|
+
if (results.length === 1)
|
|
398
|
+
return results[0];
|
|
399
|
+
return {
|
|
400
|
+
files: results.flatMap(r => r.files),
|
|
401
|
+
pages: results.flatMap(r => r.pages),
|
|
402
|
+
apiCalls: results.flatMap(r => r.apiCalls),
|
|
403
|
+
uxPages: results.flatMap(r => r.uxPages),
|
|
404
|
+
components: results.flatMap(r => r.components),
|
|
405
|
+
componentGraph: results.flatMap(r => r.componentGraph),
|
|
406
|
+
fileGraph: results.flatMap(r => r.fileGraph),
|
|
407
|
+
orphanFiles: results.flatMap(r => r.orphanFiles),
|
|
408
|
+
unusedExports: results.flatMap(r => r.unusedExports),
|
|
409
|
+
tests: results.flatMap(r => r.tests)
|
|
410
|
+
};
|
|
411
|
+
}
|
|
308
412
|
function findCommonRoot(paths) {
|
|
309
413
|
if (paths.length === 0) {
|
|
310
414
|
return process.cwd();
|
|
@@ -2,16 +2,16 @@
|
|
|
2
2
|
* LLM Client — thin HTTP client for LLM-powered doc generation.
|
|
3
3
|
*
|
|
4
4
|
* Resolution order:
|
|
5
|
-
* 1. Configured LLM —
|
|
6
|
-
* 2. Ollama fallback —
|
|
5
|
+
* 1. Configured LLM — GUARDIAN_LLM_ENDPOINT + GUARDIAN_LLM_API_KEY both set
|
|
6
|
+
* 2. Ollama fallback — GUARDIAN_LLM_ENDPOINT/KEY not set, Ollama reachable at localhost
|
|
7
7
|
* 3. None — returns null from loadLlmConfig(), callers write placeholder text
|
|
8
8
|
*
|
|
9
9
|
* Env vars:
|
|
10
|
-
*
|
|
11
|
-
*
|
|
12
|
-
*
|
|
13
|
-
*
|
|
14
|
-
*
|
|
10
|
+
* GUARDIAN_LLM_ENDPOINT — full URL e.g. https://api.anthropic.com/v1/messages
|
|
11
|
+
* GUARDIAN_LLM_API_KEY — API key (not required for Ollama)
|
|
12
|
+
* GUARDIAN_LLM_MODEL — model ID (optional)
|
|
13
|
+
* GUARDIAN_OLLAMA_HOST — Ollama base URL (default: http://localhost:11434)
|
|
14
|
+
* GUARDIAN_OLLAMA_MODEL — Ollama model (default: llama3.2)
|
|
15
15
|
*
|
|
16
16
|
* Wire formats (auto-detected from endpoint URL):
|
|
17
17
|
* anthropic — POST /v1/messages { model, max_tokens, system, messages }
|
|
@@ -28,21 +28,21 @@ const DEFAULT_MAX_TOKENS = 2048;
|
|
|
28
28
|
* Load LLM config from environment variables.
|
|
29
29
|
*
|
|
30
30
|
* Resolution order:
|
|
31
|
-
* 1.
|
|
32
|
-
* 2. Ollama reachable at
|
|
31
|
+
* 1. GUARDIAN_LLM_ENDPOINT + GUARDIAN_LLM_API_KEY → configured cloud/local LLM
|
|
32
|
+
* 2. Ollama reachable at GUARDIAN_OLLAMA_HOST (or localhost:11434) → Ollama fallback
|
|
33
33
|
* 3. null → no LLM available
|
|
34
34
|
*/
|
|
35
35
|
export async function loadLlmConfig() {
|
|
36
36
|
// Priority 1: explicit endpoint + key
|
|
37
|
-
const endpoint = process.env["
|
|
38
|
-
const apiKey = process.env["
|
|
37
|
+
const endpoint = process.env["GUARDIAN_LLM_ENDPOINT"];
|
|
38
|
+
const apiKey = process.env["GUARDIAN_LLM_API_KEY"];
|
|
39
39
|
if (endpoint && apiKey) {
|
|
40
|
-
const model = process.env["
|
|
40
|
+
const model = process.env["GUARDIAN_LLM_MODEL"] ?? DEFAULT_CLOUD_MODEL;
|
|
41
41
|
return { endpoint, apiKey, model, provider: detectProvider(endpoint) };
|
|
42
42
|
}
|
|
43
43
|
// Priority 2: Ollama fallback
|
|
44
|
-
const ollamaHost = process.env["
|
|
45
|
-
const ollamaModel = process.env["
|
|
44
|
+
const ollamaHost = process.env["GUARDIAN_OLLAMA_HOST"] ?? DEFAULT_OLLAMA_HOST;
|
|
45
|
+
const ollamaModel = process.env["GUARDIAN_OLLAMA_MODEL"] ?? DEFAULT_OLLAMA_MODEL;
|
|
46
46
|
if (await isOllamaReachable(ollamaHost)) {
|
|
47
47
|
return {
|
|
48
48
|
endpoint: `${ollamaHost}/api/chat`,
|
|
@@ -58,11 +58,11 @@ export async function loadLlmConfig() {
|
|
|
58
58
|
* Use when async is not possible, or when you want to skip Ollama discovery.
|
|
59
59
|
*/
|
|
60
60
|
export function loadLlmConfigSync() {
|
|
61
|
-
const endpoint = process.env["
|
|
62
|
-
const apiKey = process.env["
|
|
61
|
+
const endpoint = process.env["GUARDIAN_LLM_ENDPOINT"];
|
|
62
|
+
const apiKey = process.env["GUARDIAN_LLM_API_KEY"];
|
|
63
63
|
if (!endpoint || !apiKey)
|
|
64
64
|
return null;
|
|
65
|
-
const model = process.env["
|
|
65
|
+
const model = process.env["GUARDIAN_LLM_MODEL"] ?? DEFAULT_CLOUD_MODEL;
|
|
66
66
|
return { endpoint, apiKey, model, provider: detectProvider(endpoint) };
|
|
67
67
|
}
|
|
68
68
|
/**
|
|
@@ -53,16 +53,22 @@ export async function resolveProjectPaths(options) {
|
|
|
53
53
|
(await chooseBackendRoot(workspaceRoot));
|
|
54
54
|
const frontendRoot = explicitFrontend ??
|
|
55
55
|
(await chooseFrontendRoot(workspaceRoot));
|
|
56
|
+
// Build unified roots list: backendRoot + frontendRoot + any extra from config
|
|
57
|
+
const configRoots = (config.project?.roots ?? []).map(r => path.resolve(r));
|
|
58
|
+
const allRoots = [...new Set([backendRoot, frontendRoot, ...configRoots].filter(Boolean))];
|
|
56
59
|
return {
|
|
57
60
|
workspaceRoot,
|
|
58
61
|
backendRoot,
|
|
59
62
|
frontendRoot,
|
|
63
|
+
roots: allRoots,
|
|
60
64
|
resolutionSource,
|
|
61
65
|
config
|
|
62
66
|
};
|
|
63
67
|
}
|
|
64
68
|
export function logResolvedProjectPaths(resolved) {
|
|
65
|
-
|
|
69
|
+
const extra = resolved.roots.filter(r => r !== resolved.backendRoot && r !== resolved.frontendRoot);
|
|
70
|
+
const extraMsg = extra.length > 0 ? ` +${extra.length} roots` : "";
|
|
71
|
+
console.log(`Guardian roots (${resolved.resolutionSource}): workspace=${resolved.workspaceRoot} backend=${resolved.backendRoot} frontend=${resolved.frontendRoot}${extraMsg}`);
|
|
66
72
|
}
|
|
67
73
|
async function chooseBackendRoot(workspaceRoot) {
|
|
68
74
|
const candidates = await discoverBackendCandidates(workspaceRoot);
|
package/package.json
CHANGED