@toolbaux/guardian 0.1.2 → 0.1.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -105,6 +105,39 @@ guardian doc-html
105
105
 
106
106
  All extraction uses Tree-Sitter AST parsing — deterministic, no LLM involved.
107
107
 
108
+ ## LLM Usage — Opt-In Only
109
+
110
+ > **Important:** Guardian's core commands (`extract`, `generate`, `context`, `drift`, `search`, `init`) **never call an LLM**. All extraction is deterministic AST parsing — no API keys needed, no background AI calls, no cost.
111
+
112
+ Two commands **optionally** use an LLM, and **only when you explicitly configure it**:
113
+
114
+ | Command | What the LLM does | Runs automatically? |
115
+ |---------|-------------------|-------------------|
116
+ | `guardian doc-generate` | Adds narrative summaries to product docs | **No** — manual command only |
117
+ | `guardian guard --task "..."` | Generates a code patch from a task description | **No** — manual command only |
118
+
119
+ **If you never set API keys, no LLM is ever called.** These commands degrade gracefully — `doc-generate` produces docs without narrative sections, and `guard` prints context instead of generating patches.
120
+
121
+ ### Configuring LLM (optional)
122
+
123
+ ```bash
124
+ # Option 1: Cloud LLM (OpenAI-compatible endpoint)
125
+ export GUARDIAN_LLM_ENDPOINT="https://api.openai.com/v1"
126
+ export GUARDIAN_LLM_API_KEY="sk-..."
127
+ export GUARDIAN_LLM_MODEL="gpt-4o" # optional, defaults to gpt-4o
128
+
129
+ # Option 2: Local Ollama (no API key needed, auto-detected)
130
+ # Just have Ollama running on localhost:11434
131
+ export GUARDIAN_OLLAMA_HOST="http://localhost:11434" # optional, this is the default
132
+ export GUARDIAN_OLLAMA_MODEL="llama3.2" # optional, this is the default
133
+
134
+ # Option 3: Shell command (for guardian guard)
135
+ # Set in guardian.config.json:
136
+ # { "llm": { "command": "ollama", "args": ["run", "llama3"] } }
137
+ ```
138
+
139
+ **No pre-commit hook, VSCode extension, or automated workflow ever triggers LLM calls.** The hook only runs `extract` + `context` (pure AST).
140
+
108
141
  ## What Guardian Generates
109
142
 
110
143
  **Workflow sequence diagrams** — Mermaid diagrams for your most complex endpoints, showing the full call chain from client through handler to services and data stores.
package/dist/cli.js CHANGED
@@ -18,6 +18,7 @@ import { runDocGenerate } from "./commands/doc-generate.js";
18
18
  import { runDiscrepancy } from "./commands/discrepancy.js";
19
19
  import { runDocHtml } from "./commands/doc-html.js";
20
20
  import { runInit } from "./commands/init.js";
21
+ import { DEFAULT_SPECS_DIR } from "./config.js";
21
22
  const program = new Command();
22
23
  program
23
24
  .name("guardian")
@@ -29,8 +30,8 @@ program
29
30
  .argument("[projectRoot]", "Repo or project root", process.cwd())
30
31
  .option("--backend-root <path>", "Path to backend root")
31
32
  .option("--frontend-root <path>", "Path to frontend root")
32
- .option("--config <path>", "Path to specguard.config.json")
33
- .option("--output <path>", "Output directory", "specs-out")
33
+ .option("--config <path>", "Path to guardian.config.json")
34
+ .option("--output <path>", "Output directory", DEFAULT_SPECS_DIR)
34
35
  .option("--focus <text>", "Focus the generated AI context on a feature area")
35
36
  .option("--max-lines <count>", "Maximum lines for the generated context")
36
37
  .option("--ai-context", "Generate architecture-context.md for AI tools", false)
@@ -52,16 +53,16 @@ program
52
53
  .argument("[projectRoot]", "Repo or project root", process.cwd())
53
54
  .option("--backend-root <path>", "Path to backend root")
54
55
  .option("--frontend-root <path>", "Path to frontend root")
55
- .option("--output <path>", "Output directory", "specs-out")
56
+ .option("--output <path>", "Output directory", DEFAULT_SPECS_DIR)
56
57
  .option("--include-file-graph", "Include file-level dependency graph", false)
57
- .option("--config <path>", "Path to specguard.config.json")
58
+ .option("--config <path>", "Path to guardian.config.json")
58
59
  .option("--docs-mode <mode>", "Docs mode (lean|full)")
59
60
  .action(async (projectRoot, options) => {
60
61
  await runExtract({
61
62
  projectRoot,
62
63
  backendRoot: options.backendRoot,
63
64
  frontendRoot: options.frontendRoot,
64
- output: options.output ?? "specs-out",
65
+ output: options.output ?? DEFAULT_SPECS_DIR,
65
66
  includeFileGraph: options.includeFileGraph ?? false,
66
67
  configPath: options.config,
67
68
  docsMode: options.docsMode
@@ -89,7 +90,7 @@ program
89
90
  .option("--output <path>", "Output report path", "specs-out/machine/drift.report.json")
90
91
  .option("--baseline [path]", "Write baseline drift file")
91
92
  .option("--history [path]", "Append drift history entry")
92
- .option("--config <path>", "Path to specguard.config.json")
93
+ .option("--config <path>", "Path to guardian.config.json")
93
94
  .action(async (projectRoot, options) => {
94
95
  await runDrift({
95
96
  projectRoot,
@@ -107,7 +108,7 @@ program
107
108
  .argument("[projectRoot]", "Repo or project root", process.cwd())
108
109
  .option("--backend-root <path>", "Path to backend root")
109
110
  .option("--frontend-root <path>", "Path to frontend root")
110
- .option("--config <path>", "Path to specguard.config.json")
111
+ .option("--config <path>", "Path to guardian.config.json")
111
112
  .option("--baseline <path>", "Path to baseline payload")
112
113
  .option("--strict-threshold <val>", "Maximum allowed delta shift (default 0.15)")
113
114
  .action(async (projectRoot, options) => {
@@ -127,7 +128,7 @@ program
127
128
  .option("--backend-root <path>", "Path to backend root")
128
129
  .option("--frontend-root <path>", "Path to frontend root")
129
130
  .option("--output <path>", "Output constraints path", "specs-out/machine/constraints.json")
130
- .option("--config <path>", "Path to specguard.config.json")
131
+ .option("--config <path>", "Path to guardian.config.json")
131
132
  .action(async (projectRoot, options) => {
132
133
  await runConstraints({
133
134
  projectRoot,
@@ -148,7 +149,7 @@ program
148
149
  .option("--baseline-summary <path>", "Baseline architecture summary path")
149
150
  .option("--patch <path>", "Patch file to apply for simulation")
150
151
  .option("--mode <mode>", "Simulation mode (soft|hard)")
151
- .option("--config <path>", "Path to specguard.config.json")
152
+ .option("--config <path>", "Path to guardian.config.json")
152
153
  .action(async (projectRoot, options) => {
153
154
  await runSimulate({
154
155
  projectRoot,
@@ -175,7 +176,7 @@ program
175
176
  .option("--mode <mode>", "Simulation mode (soft|hard)")
176
177
  .option("--llm-command <cmd>", "Override LLM command from config")
177
178
  .option("--print-context", "Print an IDE-ready context block instead of calling an LLM", false)
178
- .option("--config <path>", "Path to specguard.config.json")
179
+ .option("--config <path>", "Path to guardian.config.json")
179
180
  .action(async (projectRoot, options) => {
180
181
  await runGuard({
181
182
  projectRoot,
@@ -194,24 +195,24 @@ program
194
195
  program
195
196
  .command("summary")
196
197
  .description("Generate a plain-language project summary from existing snapshots")
197
- .option("--input <path>", "Snapshot output directory", "specs-out")
198
+ .option("--input <path>", "Snapshot output directory", DEFAULT_SPECS_DIR)
198
199
  .option("--output <path>", "Summary output path")
199
200
  .action(async (options) => {
200
201
  await runSummary({
201
- input: options.input ?? "specs-out",
202
+ input: options.input ?? DEFAULT_SPECS_DIR,
202
203
  output: options.output
203
204
  });
204
205
  });
205
206
  program
206
207
  .command("search")
207
208
  .description("Search existing snapshots for models, endpoints, components, modules, and tasks")
208
- .option("--input <path>", "Snapshot output directory", "specs-out")
209
+ .option("--input <path>", "Snapshot output directory", DEFAULT_SPECS_DIR)
209
210
  .requiredOption("--query <text>", "Search query")
210
211
  .option("--output <path>", "Write search results to a file")
211
212
  .option("--types <items>", "Comma-separated filters: models,endpoints,components,modules,tasks")
212
213
  .action(async (options) => {
213
214
  await runSearch({
214
- input: options.input ?? "specs-out",
215
+ input: options.input ?? DEFAULT_SPECS_DIR,
215
216
  query: options.query,
216
217
  output: options.output,
217
218
  types: options.types ? [options.types] : undefined
@@ -220,13 +221,13 @@ program
220
221
  program
221
222
  .command("context")
222
223
  .description("Render an AI-ready context block from existing snapshots")
223
- .option("--input <path>", "Snapshot output directory", "specs-out")
224
+ .option("--input <path>", "Snapshot output directory", DEFAULT_SPECS_DIR)
224
225
  .option("--output <path>", "Append the context block to a file")
225
226
  .option("--focus <text>", "Focus the context on a feature area")
226
227
  .option("--max-lines <count>", "Maximum number of lines to include")
227
228
  .action(async (options) => {
228
229
  await runContext({
229
- input: options.input ?? "specs-out",
230
+ input: options.input ?? DEFAULT_SPECS_DIR,
230
231
  output: options.output,
231
232
  focus: options.focus,
232
233
  maxLines: options.maxLines
@@ -239,7 +240,7 @@ program
239
240
  .requiredOption("--query <text>", "Feature or area to analyze (e.g. 'stripe', 'auth')")
240
241
  .option("--backend-root <path>", "Path to backend root")
241
242
  .option("--frontend-root <path>", "Path to frontend root")
242
- .option("--config <path>", "Path to specguard.config.json")
243
+ .option("--config <path>", "Path to guardian.config.json")
243
244
  .option("--output <path>", "Write report to a file instead of stdout")
244
245
  .option("--format <fmt>", "Output format: yaml or json (default: yaml)")
245
246
  .option("--ci", "Exit with code 1 when HIGH complexity is detected with strong confidence", false)
@@ -258,7 +259,7 @@ program
258
259
  program
259
260
  .command("intel")
260
261
  .description("Build codebase-intelligence.json from existing snapshots")
261
- .option("--specs <dir>", "Snapshot output directory", "specs-out")
262
+ .option("--specs <dir>", "Snapshot output directory", DEFAULT_SPECS_DIR)
262
263
  .option("--output <path>", "Output path for codebase-intelligence.json")
263
264
  .action(async (options) => {
264
265
  await runIntel({
@@ -270,7 +271,7 @@ program
270
271
  .command("feature-context")
271
272
  .description("Generate a filtered context packet for implementing a single feature")
272
273
  .requiredOption("--spec <file>", "Path to feature spec YAML")
273
- .option("--specs <dir>", "Snapshot output directory", "specs-out")
274
+ .option("--specs <dir>", "Snapshot output directory", DEFAULT_SPECS_DIR)
274
275
  .option("--output <path>", "Output path for feature context JSON")
275
276
  .action(async (options) => {
276
277
  await runFeatureContext({
@@ -282,7 +283,7 @@ program
282
283
  program
283
284
  .command("doc-generate")
284
285
  .description("Generate a human-readable product document from codebase intelligence")
285
- .option("--specs <dir>", "Snapshot output directory", "specs-out")
286
+ .option("--specs <dir>", "Snapshot output directory", DEFAULT_SPECS_DIR)
286
287
  .option("--feature-specs <dir>", "Directory of feature spec YAML files")
287
288
  .option("--output <path>", "Output path for product-document.md")
288
289
  .option("--update-baseline", "Freeze current state as new baseline for discrepancy tracking", false)
@@ -297,7 +298,7 @@ program
297
298
  program
298
299
  .command("discrepancy")
299
300
  .description("Diff current codebase intelligence against a committed baseline")
300
- .option("--specs <dir>", "Snapshot output directory", "specs-out")
301
+ .option("--specs <dir>", "Snapshot output directory", DEFAULT_SPECS_DIR)
301
302
  .option("--feature-specs <dir>", "Directory of feature spec YAML files")
302
303
  .option("--output <path>", "Output path (used when --format is json or md)")
303
304
  .option("--format <fmt>", "Output format: json, md, or both (default: both)", "both")
@@ -312,21 +313,21 @@ program
312
313
  program
313
314
  .command("doc-html")
314
315
  .description("Generate a self-contained Javadoc-style HTML viewer from codebase intelligence")
315
- .option("--specs <dir>", "Snapshot output directory", "specs-out")
316
+ .option("--specs <dir>", "Snapshot output directory", DEFAULT_SPECS_DIR)
316
317
  .option("--output <path>", "Output path for index.html")
317
318
  .action(async (options) => {
318
319
  await runDocHtml({
319
- specs: options.specs ?? "specs-out",
320
+ specs: options.specs ?? DEFAULT_SPECS_DIR,
320
321
  output: options.output,
321
322
  });
322
323
  });
323
324
  program
324
325
  .command("init")
325
- .description("Initialize specguard for a project (config, .specs dir, pre-commit hook, CLAUDE.md)")
326
+ .description("Initialize guardian for a project (config, .specs dir, pre-commit hook, CLAUDE.md)")
326
327
  .argument("[projectRoot]", "Repo or project root", process.cwd())
327
328
  .option("--backend-root <path>", "Path to backend root")
328
329
  .option("--frontend-root <path>", "Path to frontend root")
329
- .option("--output <path>", "Output directory", ".specs")
330
+ .option("--output <path>", "Output directory", DEFAULT_SPECS_DIR)
330
331
  .option("--skip-hook", "Skip pre-commit hook installation", false)
331
332
  .action(async (projectRoot, options) => {
332
333
  await runInit({
@@ -3,12 +3,13 @@ import fs from "node:fs/promises";
3
3
  import yaml from "js-yaml";
4
4
  import { buildSnapshots } from "../extract/index.js";
5
5
  import { analyzeDepth } from "../extract/analyzers/depth.js";
6
+ import { DEFAULT_SPECS_DIR } from "../config.js";
6
7
  export async function runAnalyzeDepth(options) {
7
8
  const { architecture } = await buildSnapshots({
8
9
  projectRoot: options.projectRoot,
9
10
  backendRoot: options.backendRoot,
10
11
  frontendRoot: options.frontendRoot,
11
- output: options.output ?? "specs-out",
12
+ output: options.output ?? DEFAULT_SPECS_DIR,
12
13
  includeFileGraph: true,
13
14
  configPath: options.configPath
14
15
  });
@@ -35,7 +36,7 @@ export async function runAnalyzeDepth(options) {
35
36
  if (options.ci &&
36
37
  report.classification.compressible === "NON_COMPRESSIBLE" &&
37
38
  report.confidence.value >= report.guardrails.enforce_if_confidence_above) {
38
- console.error(`\n[SpecGuard] CI FAIL: "${options.query}" classified as HIGH complexity (confidence ${report.confidence.value.toFixed(2)}).\n` +
39
+ console.error(`\n[Guardian] CI FAIL: "${options.query}" classified as HIGH complexity (confidence ${report.confidence.value.toFixed(2)}).\n` +
39
40
  `Recommended pattern: ${report.recommendation.primary.pattern}\n` +
40
41
  `Avoid: ${report.recommendation.avoid.join(", ")}`);
41
42
  process.exit(1);
@@ -4,8 +4,9 @@ import yaml from "js-yaml";
4
4
  import { loadArchitectureDiff, loadHeatmap } from "../extract/compress.js";
5
5
  import { renderContextBlock } from "../extract/context-block.js";
6
6
  import { resolveMachineInputDir } from "../output-layout.js";
7
+ import { DEFAULT_SPECS_DIR } from "../config.js";
7
8
  export async function runContext(options) {
8
- const inputDir = await resolveMachineInputDir(options.input || "specs-out");
9
+ const inputDir = await resolveMachineInputDir(options.input || DEFAULT_SPECS_DIR);
9
10
  const { architecture, ux } = await loadSnapshots(inputDir);
10
11
  const [diff, heatmap] = await Promise.all([
11
12
  loadArchitectureDiff(inputDir),
@@ -41,7 +42,7 @@ async function loadSnapshots(inputDir) {
41
42
  }
42
43
  catch (error) {
43
44
  if (error.code === "ENOENT") {
44
- throw new Error(`Could not find snapshots in ${inputDir}. Run \`specguard extract\` first.`);
45
+ throw new Error(`Could not find snapshots in ${inputDir}. Run \`guardian extract\` first.`);
45
46
  }
46
47
  throw error;
47
48
  }
@@ -1,5 +1,5 @@
1
1
  /**
2
- * `specguard discrepancy` — diff current codebase intelligence against a baseline.
2
+ * `guardian discrepancy` — diff current codebase intelligence against a baseline.
3
3
  *
4
4
  * Reads:
5
5
  * - specs-out/machine/codebase-intelligence.json
@@ -22,7 +22,7 @@ export async function runDiscrepancy(options) {
22
22
  // Load codebase intelligence
23
23
  const intelPath = path.join(layout.machineDir, "codebase-intelligence.json");
24
24
  const intel = await loadCodebaseIntelligence(intelPath).catch(() => {
25
- throw new Error(`Could not load codebase-intelligence.json from ${intelPath}. Run \`specguard intel --specs ${options.specs}\` first.`);
25
+ throw new Error(`Could not load codebase-intelligence.json from ${intelPath}. Run \`guardian intel --specs ${options.specs}\` first.`);
26
26
  });
27
27
  const baselinePath = path.join(layout.machineDir, "product-document.baseline.json");
28
28
  const featureSpecsDir = options.featureSpecs ? path.resolve(options.featureSpecs) : null;
@@ -1,5 +1,5 @@
1
1
  /**
2
- * `specguard doc-generate` — generate a human-readable, self-updating product document.
2
+ * `guardian doc-generate` — generate a human-readable, self-updating product document.
3
3
  *
4
4
  * Reads:
5
5
  * - specs-out/machine/codebase-intelligence.json
@@ -12,8 +12,8 @@
12
12
  * - specs-out/machine/product-document.baseline.json (if --update-baseline)
13
13
  *
14
14
  * LLM env vars (optional — all deterministic sections write regardless):
15
- * SPECGUARD_LLM_ENDPOINT, SPECGUARD_LLM_API_KEY, SPECGUARD_LLM_MODEL
16
- * SPECGUARD_OLLAMA_HOST, SPECGUARD_OLLAMA_MODEL
15
+ * GUARDIAN_LLM_ENDPOINT, GUARDIAN_LLM_API_KEY, GUARDIAN_LLM_MODEL
16
+ * GUARDIAN_OLLAMA_HOST, GUARDIAN_OLLAMA_MODEL
17
17
  */
18
18
  import fs from "node:fs/promises";
19
19
  import path from "node:path";
@@ -32,7 +32,7 @@ export async function runDocGenerate(options) {
32
32
  const llmConfig = await loadLlmConfig();
33
33
  if (!llmConfig) {
34
34
  console.log("none (deterministic only)");
35
- console.log(" Tip: set SPECGUARD_LLM_ENDPOINT + SPECGUARD_LLM_API_KEY, or run Ollama locally, to add narrative summaries.");
35
+ console.log(" Tip: set GUARDIAN_LLM_ENDPOINT + GUARDIAN_LLM_API_KEY, or run Ollama locally, to add narrative summaries.");
36
36
  }
37
37
  else if (llmConfig.provider === "ollama") {
38
38
  console.log(`Ollama (${llmConfig.model} at ${llmConfig.endpoint.replace("/api/chat", "")})`);
@@ -46,7 +46,7 @@ export async function runDocGenerate(options) {
46
46
  process.stdout.write("Loading codebase intelligence... ");
47
47
  const intel = await loadCodebaseIntelligence(intelPath).catch(() => {
48
48
  console.log("failed");
49
- throw new Error(`Could not load ${intelPath}. Run \`specguard intel --specs ${options.specs}\` first.`);
49
+ throw new Error(`Could not load ${intelPath}. Run \`guardian intel --specs ${options.specs}\` first.`);
50
50
  });
51
51
  console.log(`${intel.meta.counts.endpoints} endpoints, ${intel.meta.counts.models} models, ` +
52
52
  `${intel.meta.counts.enums} enums, ${intel.meta.counts.tasks} tasks`);
@@ -158,6 +158,6 @@ export async function runDocGenerate(options) {
158
158
  // ── Done ──────────────────────────────────────────────────────────────────
159
159
  if (discrepancies.summary.total_issues > 0) {
160
160
  const critical = discrepancies.summary.has_critical ? " (critical changes detected)" : "";
161
- console.log(` ⚠ ${discrepancies.summary.total_issues} discrepancy(s) found${critical}. Run \`specguard discrepancy\` for details.`);
161
+ console.log(` ⚠ ${discrepancies.summary.total_issues} discrepancy(s) found${critical}. Run \`guardian discrepancy\` for details.`);
162
162
  }
163
163
  }
@@ -1,5 +1,5 @@
1
1
  /**
2
- * `specguard doc-html` — generate a self-contained Javadoc-style HTML viewer.
2
+ * `guardian doc-html` — generate a self-contained Javadoc-style HTML viewer.
3
3
  *
4
4
  * Same data pipeline as `doc-generate` but outputs a single index.html with:
5
5
  * - Fixed sidebar navigation (collapsible, searchable)
@@ -23,7 +23,7 @@ export async function runDocHtml(options) {
23
23
  process.stdout.write("Loading codebase intelligence... ");
24
24
  const intel = await loadCodebaseIntelligence(intelPath).catch(() => {
25
25
  console.log("failed");
26
- throw new Error(`Could not load ${intelPath}. Run \`specguard intel --specs ${options.specs}\` first.`);
26
+ throw new Error(`Could not load ${intelPath}. Run \`guardian intel --specs ${options.specs}\` first.`);
27
27
  });
28
28
  console.log(`${intel.meta.counts.endpoints} endpoints, ${intel.meta.counts.models} models`);
29
29
  // ── Feature arcs (optional) ───────────────────────────────────────────────
@@ -62,7 +62,7 @@ export async function runDrift(options) {
62
62
  await fs.appendFile(resolvedHistory, `${JSON.stringify(entry)}\n`);
63
63
  console.log(`Appended history ${resolvedHistory}`);
64
64
  }
65
- console.log("SpecGuard Drift Report");
65
+ console.log("Guardian Drift Report");
66
66
  console.log(`Status: ${drift.status}`);
67
67
  console.log(`D_t: ${drift.D_t.toFixed(4)}`);
68
68
  console.log(`K_t: ${drift.K_t.toFixed(4)}`);
@@ -1,6 +1,9 @@
1
+ import fs from "node:fs/promises";
1
2
  import path from "node:path";
2
3
  import { extractProject } from "../extract/index.js";
3
4
  import { runIntel } from "./intel.js";
5
+ import { runGenerate } from "./generate.js";
6
+ import { runContext } from "./context.js";
4
7
  export async function runExtract(options) {
5
8
  const { architecturePath, uxPath } = await extractProject(options);
6
9
  console.log(`Wrote ${architecturePath}`);
@@ -13,4 +16,26 @@ export async function runExtract(options) {
13
16
  catch {
14
17
  // Non-fatal — intel build failure should not break extract
15
18
  }
19
+ // Auto-generate AI context + inject into CLAUDE.md
20
+ const projectRoot = path.resolve(options.projectRoot || process.cwd());
21
+ try {
22
+ await runGenerate({
23
+ projectRoot,
24
+ backendRoot: options.backendRoot,
25
+ frontendRoot: options.frontendRoot,
26
+ output: specsDir,
27
+ aiContext: true,
28
+ });
29
+ const claudeMdPath = path.join(projectRoot, "CLAUDE.md");
30
+ try {
31
+ await fs.stat(claudeMdPath);
32
+ await runContext({ input: specsDir, output: claudeMdPath });
33
+ }
34
+ catch {
35
+ // No CLAUDE.md — skip context injection
36
+ }
37
+ }
38
+ catch {
39
+ // Non-fatal — context generation failure should not break extract
40
+ }
16
41
  }
@@ -1,5 +1,5 @@
1
1
  /**
2
- * `specguard feature-context` — generate a filtered, self-contained context packet
2
+ * `guardian feature-context` — generate a filtered, self-contained context packet
3
3
  * for implementing a single feature.
4
4
  *
5
5
  * Analogous to `chapter-context` in the book workflow: given a feature spec YAML,
@@ -26,7 +26,7 @@ export async function runFeatureContext(options) {
26
26
  // Load codebase intelligence
27
27
  const intelPath = path.join(layout.machineDir, "codebase-intelligence.json");
28
28
  const intel = await loadCodebaseIntelligence(intelPath).catch(() => {
29
- throw new Error(`Could not load codebase-intelligence.json from ${intelPath}. Run \`specguard intel --specs ${options.specs}\` first.`);
29
+ throw new Error(`Could not load codebase-intelligence.json from ${intelPath}. Run \`guardian intel --specs ${options.specs}\` first.`);
30
30
  });
31
31
  // Build filtered context
32
32
  const context = buildFeatureContext(spec, intel);
@@ -3,12 +3,13 @@ import path from "node:path";
3
3
  import { buildSnapshots } from "../extract/index.js";
4
4
  import { renderContextBlock } from "../extract/context-block.js";
5
5
  import { getOutputLayout } from "../output-layout.js";
6
+ import { DEFAULT_SPECS_DIR } from "../config.js";
6
7
  import { analyzeDepth } from "../extract/analyzers/depth.js";
7
8
  export async function runGenerate(options) {
8
9
  if (!options.aiContext) {
9
- throw new Error("`specguard generate` currently supports `--ai-context` only.");
10
+ throw new Error("`guardian generate` currently supports `--ai-context` only.");
10
11
  }
11
- const outputRoot = path.resolve(options.output ?? "specs-out");
12
+ const outputRoot = path.resolve(options.output ?? DEFAULT_SPECS_DIR);
12
13
  const layout = getOutputLayout(outputRoot);
13
14
  const { architecture, ux } = await buildSnapshots({
14
15
  projectRoot: options.projectRoot,
@@ -18,7 +19,7 @@ export async function runGenerate(options) {
18
19
  includeFileGraph: true,
19
20
  configPath: options.configPath
20
21
  });
21
- // Load persisted Structural Intelligence reports emitted by `specguard extract`
22
+ // Load persisted Structural Intelligence reports emitted by `guardian extract`
22
23
  const siReports = await loadStructuralIntelligenceReports(layout.machineDir);
23
24
  // If a --focus query is provided, prepend a real-time SI report for that query
24
25
  if (options.focus) {
@@ -6,6 +6,7 @@ import { runSimulate } from "./simulate.js";
6
6
  import { buildSnapshots } from "../extract/index.js";
7
7
  import { renderContextBlock } from "../extract/context-block.js";
8
8
  import { logResolvedProjectPaths, resolveProjectPaths } from "../project-discovery.js";
9
+ import { DEFAULT_SPECS_DIR } from "../config.js";
9
10
  export async function runGuard(options) {
10
11
  const resolved = await resolveProjectPaths({
11
12
  projectRoot: options.projectRoot,
@@ -30,7 +31,7 @@ export async function runGuard(options) {
30
31
  projectRoot: resolved.workspaceRoot,
31
32
  backendRoot: resolved.backendRoot,
32
33
  frontendRoot: resolved.frontendRoot,
33
- output: "specs-out",
34
+ output: DEFAULT_SPECS_DIR,
34
35
  includeFileGraph: true,
35
36
  configPath: options.configPath
36
37
  });
@@ -12,6 +12,7 @@
12
12
  */
13
13
  import fs from "node:fs/promises";
14
14
  import path from "node:path";
15
+ import { DEFAULT_SPECS_DIR } from "../config.js";
15
16
  const DEFAULT_CONFIG = {
16
17
  project: {
17
18
  backendRoot: "./backend",
@@ -47,7 +48,7 @@ exit 0
47
48
  `;
48
49
  export async function runInit(options) {
49
50
  const root = path.resolve(options.projectRoot || process.cwd());
50
- const specsDir = path.join(root, options.output || ".specs");
51
+ const specsDir = path.join(root, options.output || DEFAULT_SPECS_DIR);
51
52
  console.log(`Initializing Guardian in ${root}\n`);
52
53
  // 1. Create .specs/ directory
53
54
  await fs.mkdir(path.join(specsDir, "machine", "docs"), { recursive: true });
@@ -145,7 +146,7 @@ export async function runInit(options) {
145
146
  const content = [
146
147
  `# ${projectName}`,
147
148
  "",
148
- "## SpecGuard Architecture Context",
149
+ "## Guardian Architecture Context",
149
150
  "",
150
151
  "<!-- guardian:auto-context -->",
151
152
  "<!-- This block is auto-updated by guardian. Do not edit manually. -->",
@@ -181,7 +182,7 @@ export async function runInit(options) {
181
182
  input: specsDir,
182
183
  output: claudeMdPath,
183
184
  });
184
- console.log("\n✓ SpecGuard initialized. Architecture context is in CLAUDE.md and .specs/");
185
+ console.log("\n✓ Guardian initialized. Architecture context is in CLAUDE.md and .specs/");
185
186
  console.log(" Pre-commit hook will keep it fresh on every commit.");
186
187
  }
187
188
  catch (err) {
@@ -1,10 +1,10 @@
1
1
  /**
2
- * `specguard intel` — build codebase-intelligence.json from existing snapshots.
2
+ * `guardian intel` — build codebase-intelligence.json from existing snapshots.
3
3
  *
4
4
  * Reads: specs-out/machine/architecture.snapshot.yaml + ux.snapshot.yaml
5
5
  * Writes: specs-out/machine/codebase-intelligence.json
6
6
  *
7
- * Also auto-runs at the end of `specguard extract`.
7
+ * Also auto-runs at the end of `guardian extract`.
8
8
  */
9
9
  import path from "node:path";
10
10
  import { writeCodebaseIntelligence } from "../extract/codebase-intel.js";
@@ -3,8 +3,9 @@ import path from "node:path";
3
3
  import yaml from "js-yaml";
4
4
  import { loadHeatmap } from "../extract/compress.js";
5
5
  import { resolveMachineInputDir } from "../output-layout.js";
6
+ import { DEFAULT_SPECS_DIR } from "../config.js";
6
7
  export async function runSearch(options) {
7
- const inputDir = await resolveMachineInputDir(options.input || "specs-out");
8
+ const inputDir = await resolveMachineInputDir(options.input || DEFAULT_SPECS_DIR);
8
9
  const { architecture, ux } = await loadSnapshots(inputDir);
9
10
  const heatmap = await loadHeatmap(inputDir);
10
11
  const types = normalizeTypes(options.types);
@@ -38,7 +39,7 @@ async function loadSnapshots(inputDir) {
38
39
  }
39
40
  catch (error) {
40
41
  if (error.code === "ENOENT") {
41
- throw new Error(`Could not find snapshots in ${inputDir}. Run \`specguard extract\` first.`);
42
+ throw new Error(`Could not find snapshots in ${inputDir}. Run \`guardian extract\` first.`);
42
43
  }
43
44
  throw error;
44
45
  }
@@ -6,6 +6,7 @@ import { spawn } from "node:child_process";
6
6
  import yaml from "js-yaml";
7
7
  import { buildSnapshots } from "../extract/index.js";
8
8
  import { buildArchitectureSummary, loadArchitectureSummary } from "../extract/compress.js";
9
+ import { DEFAULT_SPECS_DIR } from "../config.js";
9
10
  import { createIgnoreMatcher } from "../extract/ignore.js";
10
11
  import { logResolvedProjectPaths, resolveProjectPaths } from "../project-discovery.js";
11
12
  export async function runSimulate(options) {
@@ -106,7 +107,7 @@ async function resolveBaselineSummaryPath(params) {
106
107
  if (params.override) {
107
108
  candidates.push(params.override);
108
109
  }
109
- candidates.push(path.join(params.projectRoot, "specs-out", "machine", "architecture.summary.json"));
110
+ candidates.push(path.join(params.projectRoot, DEFAULT_SPECS_DIR, "machine", "architecture.summary.json"));
110
111
  for (const candidate of candidates) {
111
112
  const resolved = path.isAbsolute(candidate)
112
113
  ? candidate
@@ -241,7 +242,7 @@ function buildSuggestions(reasons) {
241
242
  return Array.from(suggestions);
242
243
  }
243
244
  async function createTempCopy(sourceRoot, config) {
244
- const tempRoot = await fs.mkdtemp(path.join(os.tmpdir(), "specguard-sim-"));
245
+ const tempRoot = await fs.mkdtemp(path.join(os.tmpdir(), "guardian-sim-"));
245
246
  const ignore = createIgnoreMatcher(config, sourceRoot);
246
247
  await fs.cp(sourceRoot, tempRoot, {
247
248
  recursive: true,
@@ -4,8 +4,9 @@ import yaml from "js-yaml";
4
4
  import { renderExecutiveSummary } from "../extract/docs.js";
5
5
  import { loadArchitectureSummary, loadArchitectureDiff, loadHeatmap } from "../extract/compress.js";
6
6
  import { resolveMachineInputDir } from "../output-layout.js";
7
+ import { DEFAULT_SPECS_DIR } from "../config.js";
7
8
  export async function runSummary(options) {
8
- const inputDir = await resolveMachineInputDir(options.input || "specs-out");
9
+ const inputDir = await resolveMachineInputDir(options.input || DEFAULT_SPECS_DIR);
9
10
  const architecturePath = path.join(inputDir, "architecture.snapshot.yaml");
10
11
  const uxPath = path.join(inputDir, "ux.snapshot.yaml");
11
12
  const [architectureRaw, uxRaw] = await Promise.all([
@@ -31,7 +31,7 @@ export async function runVerifyDrift(options) {
31
31
  }
32
32
  const threshold = options.strictThreshold ? parseFloat(options.strictThreshold) : 0.15;
33
33
  console.log("=========================================");
34
- console.log("SpecGuard Drift Verification");
34
+ console.log("Guardian Drift Verification");
35
35
  console.log("=========================================\n");
36
36
  console.log(`Current Status: ${drift.status}`);
37
37
  console.log(`Current Delta: ${drift.delta.toFixed(4)}`);
package/dist/config.js CHANGED
@@ -1,5 +1,7 @@
1
1
  import fs from "node:fs/promises";
2
2
  import path from "node:path";
3
+ /** Single source of truth for the default specs output directory */
4
+ export const DEFAULT_SPECS_DIR = ".specs";
3
5
  const DEFAULT_CONFIG = {
4
6
  project: {
5
7
  root: "",
@@ -29,8 +31,13 @@ const DEFAULT_CONFIG = {
29
31
  "log",
30
32
  "tmp",
31
33
  "cache",
32
- "specs-out",
33
- "ghost-out"
34
+ ".specs",
35
+ "ghost-out",
36
+ "ios",
37
+ "android",
38
+ ".expo",
39
+ ".turbo",
40
+ "web-build"
34
41
  ],
35
42
  paths: []
36
43
  },
@@ -77,6 +84,9 @@ const DEFAULT_CONFIG = {
77
84
  timeoutMs: 120000,
78
85
  promptTemplate: ""
79
86
  },
87
+ output: {
88
+ specsDir: DEFAULT_SPECS_DIR
89
+ },
80
90
  docs: {
81
91
  mode: "lean",
82
92
  internalDir: "internal"
@@ -338,6 +348,9 @@ function mergeConfig(base, override) {
338
348
  timeoutMs: override.llm?.timeoutMs ?? base.llm?.timeoutMs ?? 120000,
339
349
  promptTemplate: override.llm?.promptTemplate ?? base.llm?.promptTemplate ?? ""
340
350
  },
351
+ output: {
352
+ specsDir: override.output?.specsDir ?? base.output?.specsDir ?? DEFAULT_SPECS_DIR
353
+ },
341
354
  docs: {
342
355
  mode: override.docs?.mode ?? base.docs?.mode ?? "lean",
343
356
  internalDir: override.docs?.internalDir ?? base.docs?.internalDir ?? "internal"
@@ -1041,7 +1041,7 @@ function emptyPythonFileResult() {
1041
1041
  endpointModelUsage: []
1042
1042
  };
1043
1043
  }
1044
- export async function analyzeBackend(backendRoot, config) {
1044
+ export async function analyzeBackend(backendRoot, config, workspaceRoot) {
1045
1045
  const root = path.resolve(backendRoot);
1046
1046
  const baseRoot = path.dirname(root);
1047
1047
  const ignore = createIgnoreMatcher(config, baseRoot);
@@ -1111,8 +1111,9 @@ export async function analyzeBackend(backendRoot, config) {
1111
1111
  }
1112
1112
  codeFiles.sort((a, b) => a.localeCompare(b));
1113
1113
  const knownFiles = new Set(codeFiles);
1114
+ const cacheRoot = workspaceRoot ? path.resolve(workspaceRoot) : baseRoot;
1114
1115
  const { cachePath, cache } = await loadBackendExtractionCache({
1115
- projectRoot: baseRoot,
1116
+ projectRoot: cacheRoot,
1116
1117
  config
1117
1118
  });
1118
1119
  const activeAbsoluteFiles = new Set(codeFiles.map((file) => path.join(baseRoot, file)));
@@ -1,9 +1,10 @@
1
1
  import crypto from "node:crypto";
2
2
  import fs from "node:fs/promises";
3
3
  import path from "node:path";
4
- const BACKEND_CACHE_VERSION = "specguard-backend-cache-v4";
4
+ import { DEFAULT_SPECS_DIR } from "../config.js";
5
+ const BACKEND_CACHE_VERSION = "guardian-backend-cache-v4";
5
6
  export async function loadBackendExtractionCache(params) {
6
- const cachePath = path.join(params.projectRoot, "specs-out", ".cache", "file-hashes.json");
7
+ const cachePath = path.join(params.projectRoot, DEFAULT_SPECS_DIR, ".cache", "file-hashes.json");
7
8
  const configHash = hashObject(params.config);
8
9
  try {
9
10
  const raw = await fs.readFile(cachePath, "utf8");
@@ -1,5 +1,5 @@
1
1
  /**
2
- * Docs Loader — loads and parses sections from existing specguard doc files.
2
+ * Docs Loader — loads and parses sections from existing guardian doc files.
3
3
  *
4
4
  * Reads from specs-out/machine/docs/:
5
5
  * hld.md → systemDiagram, couplingHeatmap, backendSubsystems, apiDomainMap
@@ -189,7 +189,7 @@ function renderIndex(architecture, ux, options) {
189
189
  const internalFiles = options.internalFiles ?? [];
190
190
  const internalDir = options.internalDir ?? "internal";
191
191
  return [
192
- section("SpecGuard Overview"),
192
+ section("Guardian Overview"),
193
193
  `Project: **${architecture.project.name}**\n\n`,
194
194
  renderTable(["Metric", "Count", "Notes"], [
195
195
  ["Modules", String(architecture.modules.length), "Backend modules"],
@@ -211,7 +211,7 @@ function renderIndex(architecture, ux, options) {
211
211
  }
212
212
  function renderHumanRootReadme(architecture) {
213
213
  return [
214
- "# SpecGuard Output",
214
+ "# Guardian Output",
215
215
  "",
216
216
  `Project: **${architecture.project.name}**`,
217
217
  "",
@@ -381,7 +381,7 @@ function renderHumanDataAndFlows(architecture, ux) {
381
381
  [
382
382
  "unverified",
383
383
  String(architecture.cross_stack_contracts.length - verified.length),
384
- "SpecGuard could not confidently infer enough fields yet"
384
+ "Guardian could not confidently infer enough fields yet"
385
385
  ]
386
386
  ]));
387
387
  lines.push("Next: [Change Guide](./change-guide.md)\n");
@@ -472,7 +472,7 @@ export function renderExecutiveSummary(architecture, ux, meta) {
472
472
  summaryLines.push(section("Product Summary"));
473
473
  summaryLines.push(`Project: **${architecture.project.name}**\n\n`);
474
474
  summaryLines.push(`Snapshot date: **${formatTimestamp(generatedAt)}**\n\n`);
475
- summaryLines.push("SpecGuard produces living, machine‑verified documentation for your codebase so teams can align on architecture, detect drift, and share an accurate system spec without manual doc maintenance.\n\n");
475
+ summaryLines.push("Guardian produces living, machine‑verified documentation for your codebase so teams can align on architecture, detect drift, and share an accurate system spec without manual doc maintenance.\n\n");
476
476
  summaryLines.push("## Vision\n\n");
477
477
  summaryLines.push("Enable teams to treat architecture as a first‑class, continuously verifiable product artifact—" +
478
478
  "not a static diagram or an outdated wiki.\n\n");
@@ -2087,7 +2087,7 @@ function renderDiff(architecture, meta) {
2087
2087
  const lines = [];
2088
2088
  lines.push(section("Snapshot Changelog"));
2089
2089
  if (!meta?.diff) {
2090
- lines.push("*No previous summary available. Run SpecGuard twice to generate a diff.*\n\n");
2090
+ lines.push("*No previous summary available. Run Guardian twice to generate a diff.*\n\n");
2091
2091
  return lines.join("");
2092
2092
  }
2093
2093
  lines.push(`Structural change: **${meta.diff.structural_change ? "yes" : "no"}** \n` +
@@ -2166,7 +2166,7 @@ function renderIntegrationGuide(snapshot) {
2166
2166
  const mismatchedCount = verifiedContracts.filter((contract) => contract.status === "mismatched").length;
2167
2167
  const unverifiedCount = snapshot.cross_stack_contracts.length - verifiedContracts.length;
2168
2168
  lines.push(`Verified: ${verifiedContracts.length} contracts (${mismatchedCount} mismatched). Unverified: ${unverifiedCount}.`);
2169
- lines.push(" Run `specguard extract --include-file-graph` for richer caller inference.\n\n");
2169
+ lines.push(" Run `guardian extract --include-file-graph` for richer caller inference.\n\n");
2170
2170
  if (verifiedContracts.length === 0) {
2171
2171
  lines.push("*No verified frontend/backend contracts detected yet.*\n\n");
2172
2172
  }
@@ -2251,7 +2251,8 @@ export async function writeDocs(outputRoot, architecture, ux, options) {
2251
2251
  { name: "data.md", content: renderData(architecture, "lean") },
2252
2252
  { name: "tests.md", content: renderTests(architecture) }
2253
2253
  ];
2254
- const fullFiles = [
2254
+ // Internal-only files: richer versions of lean files + files that only exist in full mode
2255
+ const fullOnlyFiles = [
2255
2256
  { name: "index.md", content: renderIndex(architecture, ux, { docsFiles: FULL_INDEX_FILES }) },
2256
2257
  {
2257
2258
  name: "summary.md",
@@ -2273,27 +2274,10 @@ export async function writeDocs(outputRoot, architecture, ux, options) {
2273
2274
  })
2274
2275
  },
2275
2276
  { name: "architecture.md", content: renderArchitecture(architecture, { summary, diff, heatmap }) },
2276
- { name: "ux.md", content: renderUx(ux) },
2277
2277
  { name: "data.md", content: renderData(architecture, "full") },
2278
2278
  { name: "data_dictionary.md", content: renderDataDictionary(architecture) },
2279
- { name: "integration.md", content: renderIntegrationGuide(architecture) },
2280
- {
2281
- name: "diff.md",
2282
- content: renderDiff(architecture, {
2283
- summary,
2284
- diff,
2285
- previous: options?.previous?.architecture ?? null
2286
- })
2287
- },
2288
2279
  { name: "test_coverage.md", content: renderTestCoverage(architecture) },
2289
- { name: "runtime.md", content: renderRuntime(architecture) },
2290
- { name: "infra.md", content: renderInfra(architecture) },
2291
- {
2292
- name: "hld.md",
2293
- content: renderHld(architecture, ux, driftHistory, { summary, diff, heatmap })
2294
- },
2295
2280
  { name: "lld.md", content: renderLld(architecture, ux, "full") },
2296
- { name: "tests.md", content: renderTests(architecture) }
2297
2281
  ];
2298
2282
  const written = [];
2299
2283
  await fs.writeFile(path.join(layout.rootDir, "README.md"), renderHumanRootReadme(architecture));
@@ -2306,7 +2290,7 @@ export async function writeDocs(outputRoot, architecture, ux, options) {
2306
2290
  if (docsMode === "full") {
2307
2291
  const internalDir = layout.machineInternalDir;
2308
2292
  await fs.mkdir(internalDir, { recursive: true });
2309
- for (const file of fullFiles) {
2293
+ for (const file of fullOnlyFiles) {
2310
2294
  const target = path.join(internalDir, file.name);
2311
2295
  await fs.writeFile(target, file.content);
2312
2296
  written.push(target);
@@ -374,7 +374,7 @@ function renderQualityPage(intel, docs) {
374
374
  // Quality signals
375
375
  parts.push(`<section id="quality-signals"><h2>Quality Signals</h2>${docs.qualitySignals
376
376
  ? renderMd(docs.qualitySignals)
377
- : `<p class="muted">Run <code>specguard extract</code> to populate quality signals.</p>`}</section>`);
377
+ : `<p class="muted">Run <code>guardian extract</code> to populate quality signals.</p>`}</section>`);
378
378
  // Pattern registry
379
379
  const active = intel.pattern_registry.patterns.filter((p) => p.occurrences > 0);
380
380
  const rows = active.map((p) => {
@@ -28,7 +28,7 @@ export async function buildSnapshots(options) {
28
28
  const reportedFrontendRoot = formatOutputPath(resolvedFrontendRoot);
29
29
  const reportedWorkspaceRoot = formatOutputPath(workspaceRoot);
30
30
  const config = resolvedProject.config;
31
- const backend = await analyzeBackend(resolvedBackendRoot, config);
31
+ const backend = await analyzeBackend(resolvedBackendRoot, config, workspaceRoot);
32
32
  const frontend = await analyzeFrontend(resolvedFrontendRoot, config);
33
33
  const projectRoot = workspaceRoot;
34
34
  const runtime = await analyzeRuntime(workspaceRoot, config);
@@ -2,16 +2,16 @@
2
2
  * LLM Client — thin HTTP client for LLM-powered doc generation.
3
3
  *
4
4
  * Resolution order:
5
- * 1. Configured LLM — SPECGUARD_LLM_ENDPOINT + SPECGUARD_LLM_API_KEY both set
6
- * 2. Ollama fallback — SPECGUARD_LLM_ENDPOINT/KEY not set, Ollama reachable at localhost
5
+ * 1. Configured LLM — GUARDIAN_LLM_ENDPOINT + GUARDIAN_LLM_API_KEY both set
6
+ * 2. Ollama fallback — GUARDIAN_LLM_ENDPOINT/KEY not set, Ollama reachable at localhost
7
7
  * 3. None — returns null from loadLlmConfig(), callers write placeholder text
8
8
  *
9
9
  * Env vars:
10
- * SPECGUARD_LLM_ENDPOINT — full URL e.g. https://api.anthropic.com/v1/messages
11
- * SPECGUARD_LLM_API_KEY — API key (not required for Ollama)
12
- * SPECGUARD_LLM_MODEL — model ID (optional)
13
- * SPECGUARD_OLLAMA_HOST — Ollama base URL (default: http://localhost:11434)
14
- * SPECGUARD_OLLAMA_MODEL — Ollama model (default: llama3.2)
10
+ * GUARDIAN_LLM_ENDPOINT — full URL e.g. https://api.anthropic.com/v1/messages
11
+ * GUARDIAN_LLM_API_KEY — API key (not required for Ollama)
12
+ * GUARDIAN_LLM_MODEL — model ID (optional)
13
+ * GUARDIAN_OLLAMA_HOST — Ollama base URL (default: http://localhost:11434)
14
+ * GUARDIAN_OLLAMA_MODEL — Ollama model (default: llama3.2)
15
15
  *
16
16
  * Wire formats (auto-detected from endpoint URL):
17
17
  * anthropic — POST /v1/messages { model, max_tokens, system, messages }
@@ -28,21 +28,21 @@ const DEFAULT_MAX_TOKENS = 2048;
28
28
  * Load LLM config from environment variables.
29
29
  *
30
30
  * Resolution order:
31
- * 1. SPECGUARD_LLM_ENDPOINT + SPECGUARD_LLM_API_KEY → configured cloud/local LLM
32
- * 2. Ollama reachable at SPECGUARD_OLLAMA_HOST (or localhost:11434) → Ollama fallback
31
+ * 1. GUARDIAN_LLM_ENDPOINT + GUARDIAN_LLM_API_KEY → configured cloud/local LLM
32
+ * 2. Ollama reachable at GUARDIAN_OLLAMA_HOST (or localhost:11434) → Ollama fallback
33
33
  * 3. null → no LLM available
34
34
  */
35
35
  export async function loadLlmConfig() {
36
36
  // Priority 1: explicit endpoint + key
37
- const endpoint = process.env["SPECGUARD_LLM_ENDPOINT"];
38
- const apiKey = process.env["SPECGUARD_LLM_API_KEY"];
37
+ const endpoint = process.env["GUARDIAN_LLM_ENDPOINT"];
38
+ const apiKey = process.env["GUARDIAN_LLM_API_KEY"];
39
39
  if (endpoint && apiKey) {
40
- const model = process.env["SPECGUARD_LLM_MODEL"] ?? DEFAULT_CLOUD_MODEL;
40
+ const model = process.env["GUARDIAN_LLM_MODEL"] ?? DEFAULT_CLOUD_MODEL;
41
41
  return { endpoint, apiKey, model, provider: detectProvider(endpoint) };
42
42
  }
43
43
  // Priority 2: Ollama fallback
44
- const ollamaHost = process.env["SPECGUARD_OLLAMA_HOST"] ?? DEFAULT_OLLAMA_HOST;
45
- const ollamaModel = process.env["SPECGUARD_OLLAMA_MODEL"] ?? DEFAULT_OLLAMA_MODEL;
44
+ const ollamaHost = process.env["GUARDIAN_OLLAMA_HOST"] ?? DEFAULT_OLLAMA_HOST;
45
+ const ollamaModel = process.env["GUARDIAN_OLLAMA_MODEL"] ?? DEFAULT_OLLAMA_MODEL;
46
46
  if (await isOllamaReachable(ollamaHost)) {
47
47
  return {
48
48
  endpoint: `${ollamaHost}/api/chat`,
@@ -58,11 +58,11 @@ export async function loadLlmConfig() {
58
58
  * Use when async is not possible, or when you want to skip Ollama discovery.
59
59
  */
60
60
  export function loadLlmConfigSync() {
61
- const endpoint = process.env["SPECGUARD_LLM_ENDPOINT"];
62
- const apiKey = process.env["SPECGUARD_LLM_API_KEY"];
61
+ const endpoint = process.env["GUARDIAN_LLM_ENDPOINT"];
62
+ const apiKey = process.env["GUARDIAN_LLM_API_KEY"];
63
63
  if (!endpoint || !apiKey)
64
64
  return null;
65
- const model = process.env["SPECGUARD_LLM_MODEL"] ?? DEFAULT_CLOUD_MODEL;
65
+ const model = process.env["GUARDIAN_LLM_MODEL"] ?? DEFAULT_CLOUD_MODEL;
66
66
  return { endpoint, apiKey, model, provider: detectProvider(endpoint) };
67
67
  }
68
68
  /**
@@ -1,5 +1,6 @@
1
1
  import path from "node:path";
2
2
  import fs from "node:fs/promises";
3
+ import { DEFAULT_SPECS_DIR } from "./config.js";
3
4
  export function getOutputLayout(outputRoot, internalDir = "internal") {
4
5
  const rootDir = path.resolve(outputRoot);
5
6
  const machineDir = path.join(rootDir, "machine");
@@ -12,7 +13,7 @@ export function getOutputLayout(outputRoot, internalDir = "internal") {
12
13
  };
13
14
  }
14
15
  export async function resolveMachineInputDir(input) {
15
- const resolved = path.resolve(input || "specs-out");
16
+ const resolved = path.resolve(input || DEFAULT_SPECS_DIR);
16
17
  const directSnapshot = await hasMachineSnapshots(resolved);
17
18
  if (directSnapshot) {
18
19
  return resolved;
@@ -1,6 +1,6 @@
1
1
  import fs from "node:fs/promises";
2
2
  import path from "node:path";
3
- import { loadSpecGuardConfig } from "./config.js";
3
+ import { DEFAULT_SPECS_DIR, loadSpecGuardConfig } from "./config.js";
4
4
  const IGNORE_DIRS = new Set([
5
5
  ".git",
6
6
  "node_modules",
@@ -11,7 +11,7 @@ const IGNORE_DIRS = new Set([
11
11
  "__pycache__",
12
12
  ".venv",
13
13
  "venv",
14
- "specs-out",
14
+ DEFAULT_SPECS_DIR,
15
15
  ".pytest_cache",
16
16
  ".mypy_cache",
17
17
  ".turbo"
@@ -62,7 +62,7 @@ export async function resolveProjectPaths(options) {
62
62
  };
63
63
  }
64
64
  export function logResolvedProjectPaths(resolved) {
65
- console.log(`SpecGuard roots (${resolved.resolutionSource}): workspace=${resolved.workspaceRoot} backend=${resolved.backendRoot} frontend=${resolved.frontendRoot}`);
65
+ console.log(`Guardian roots (${resolved.resolutionSource}): workspace=${resolved.workspaceRoot} backend=${resolved.backendRoot} frontend=${resolved.frontendRoot}`);
66
66
  }
67
67
  async function chooseBackendRoot(workspaceRoot) {
68
68
  const candidates = await discoverBackendCandidates(workspaceRoot);
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@toolbaux/guardian",
3
- "version": "0.1.2",
3
+ "version": "0.1.4",
4
4
  "type": "module",
5
5
  "description": "Architectural intelligence for codebases. Verify that AI-generated code matches your architectural intent.",
6
6
  "keywords": [