wicked-brain 0.9.2 → 0.11.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +50 -19
- package/package.json +1 -1
- package/server/bin/onboard-wiki.mjs +36 -0
- package/server/bin/wicked-brain-server.mjs +82 -2
- package/server/lib/brain-walker.mjs +78 -0
- package/server/lib/canonical-registry.mjs +128 -0
- package/server/lib/detect-mode.mjs +233 -0
- package/server/lib/frontmatter.mjs +204 -0
- package/server/lib/gen-contract-api.mjs +178 -0
- package/server/lib/gen-contract-schema.mjs +200 -0
- package/server/lib/gen-file-map.mjs +121 -0
- package/server/lib/lint-wiki.mjs +168 -0
- package/server/lib/mode-config.mjs +120 -0
- package/server/lib/mode.schema.json +53 -0
- package/server/lib/onboard-wiki.mjs +97 -0
- package/server/lib/sqlite-search.mjs +334 -7
- package/server/lib/stamp-pointer.mjs +103 -0
- package/server/lib/viewer-page.mjs +1096 -0
- package/server/package.json +8 -3
- package/skills/wicked-brain-agent/agents/onboard.md +161 -50
- package/skills/wicked-brain-compile/SKILL.md +42 -0
- package/skills/wicked-brain-ui/SKILL.md +137 -0
|
@@ -0,0 +1,200 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Contract schema generator.
|
|
3
|
+
*
|
|
4
|
+
* Reads `sqlite-search.mjs` and extracts:
|
|
5
|
+
* - Every `CREATE TABLE IF NOT EXISTS <name>(...)` → tables + columns.
|
|
6
|
+
* - The migration ladder from `#migrate()` — each `if (currentVersion < N)`
|
|
7
|
+
* block becomes a migration entry with its notes and the columns/tables
|
|
8
|
+
* it adds.
|
|
9
|
+
*
|
|
10
|
+
* Pure functions — no I/O. CLI glue writes the outputs.
|
|
11
|
+
*/
|
|
12
|
+
|
|
13
|
+
// "IF NOT EXISTS" is optional so we match CREATE TABLEs in migrations too.
|
|
14
|
+
const CREATE_TABLE_RE =
|
|
15
|
+
/CREATE\s+TABLE(?:\s+IF\s+NOT\s+EXISTS)?\s+(\w+)\s*\(([\s\S]*?)\)\s*;/g;
|
|
16
|
+
|
|
17
|
+
const ALTER_ADD_COLUMN_RE =
|
|
18
|
+
/ALTER\s+TABLE\s+(\w+)\s+ADD\s+COLUMN\s+(\w+)\s+([A-Z]+(?:\s+[A-Z]+)*)/gi;
|
|
19
|
+
|
|
20
|
+
// Match `if (currentVersion < N) { ... }`. Body is balanced-brace-captured
|
|
21
|
+
// heuristically by scanning outward from the match until depth zero.
|
|
22
|
+
const MIGRATION_OPEN_RE = /if\s*\(\s*currentVersion\s*<\s*(\d+)\s*\)\s*\{/g;
|
|
23
|
+
|
|
24
|
+
/**
|
|
25
|
+
* Extract schema + migrations from sqlite-search.mjs source.
|
|
26
|
+
*/
|
|
27
|
+
export function extractSchema(source) {
|
|
28
|
+
const tables = extractTables(source);
|
|
29
|
+
const migrations = extractMigrations(source);
|
|
30
|
+
return { tables, migrations };
|
|
31
|
+
}
|
|
32
|
+
|
|
33
|
+
/**
|
|
34
|
+
* Render contract-schema.md.
|
|
35
|
+
*/
|
|
36
|
+
export function renderContractSchema({ tables, migrations, generatedAt, sourcePath }) {
|
|
37
|
+
const lines = [];
|
|
38
|
+
lines.push("---");
|
|
39
|
+
lines.push("status: published");
|
|
40
|
+
lines.push("canonical_for: [CONTRACT-SCHEMA]");
|
|
41
|
+
lines.push("references: [INV-MIGRATION-REQUIRED]");
|
|
42
|
+
lines.push("owner: core");
|
|
43
|
+
lines.push(`last_reviewed: ${generatedAt}`);
|
|
44
|
+
lines.push("generated: true");
|
|
45
|
+
lines.push(`source: ${sourcePath}`);
|
|
46
|
+
lines.push("---");
|
|
47
|
+
lines.push("");
|
|
48
|
+
lines.push("# Contract: SQLite schema");
|
|
49
|
+
lines.push("");
|
|
50
|
+
lines.push(
|
|
51
|
+
"Generated from `" + sourcePath + "`. Do not hand-edit — regenerate with "
|
|
52
|
+
+ "`npm run gen:wiki`. Changes to the schema require a numbered migration "
|
|
53
|
+
+ "per `INV-MIGRATION-REQUIRED`.",
|
|
54
|
+
);
|
|
55
|
+
lines.push("");
|
|
56
|
+
lines.push("## Tables");
|
|
57
|
+
lines.push("");
|
|
58
|
+
for (const t of tables) {
|
|
59
|
+
lines.push(`### \`${t.name}\``);
|
|
60
|
+
lines.push("");
|
|
61
|
+
lines.push("| Column | Type | Notes |");
|
|
62
|
+
lines.push("|---|---|---|");
|
|
63
|
+
for (const c of t.columns) {
|
|
64
|
+
lines.push(`| \`${c.name}\` | \`${c.type}\` | ${c.notes || ""} |`);
|
|
65
|
+
}
|
|
66
|
+
lines.push("");
|
|
67
|
+
}
|
|
68
|
+
|
|
69
|
+
lines.push("## Migration ladder");
|
|
70
|
+
lines.push("");
|
|
71
|
+
lines.push("| Version | Summary | Operations |");
|
|
72
|
+
lines.push("|---|---|---|");
|
|
73
|
+
for (const m of migrations) {
|
|
74
|
+
const ops = m.ops.length ? m.ops.map((o) => `\`${o}\``).join(", ") : "—";
|
|
75
|
+
lines.push(`| ${m.version} | ${m.summary || "—"} | ${ops} |`);
|
|
76
|
+
}
|
|
77
|
+
lines.push("");
|
|
78
|
+
lines.push("Current head: **v" + (migrations.at(-1)?.version ?? "?") + "**.");
|
|
79
|
+
lines.push("");
|
|
80
|
+
return lines.join("\n") + "\n";
|
|
81
|
+
}
|
|
82
|
+
|
|
83
|
+
export function renderSchemaJson({ tables, migrations, generatedAt, sourcePath }) {
|
|
84
|
+
return {
|
|
85
|
+
generated_at: generatedAt,
|
|
86
|
+
source: sourcePath,
|
|
87
|
+
canonical_id: "CONTRACT-SCHEMA",
|
|
88
|
+
head_version: migrations.at(-1)?.version ?? null,
|
|
89
|
+
tables,
|
|
90
|
+
migrations,
|
|
91
|
+
};
|
|
92
|
+
}
|
|
93
|
+
|
|
94
|
+
// --- internals ---
|
|
95
|
+
|
|
96
|
+
function extractTables(source) {
|
|
97
|
+
const tables = new Map();
|
|
98
|
+
CREATE_TABLE_RE.lastIndex = 0;
|
|
99
|
+
let m;
|
|
100
|
+
while ((m = CREATE_TABLE_RE.exec(source)) !== null) {
|
|
101
|
+
const name = m[1];
|
|
102
|
+
// Skip FTS virtual tables from CREATE VIRTUAL TABLE (different grammar);
|
|
103
|
+
// our regex is CREATE TABLE, which won't match those — but also skip
|
|
104
|
+
// `_schema_version` noise duplicates.
|
|
105
|
+
const cols = parseColumns(m[2]);
|
|
106
|
+
if (!cols.length) continue;
|
|
107
|
+
// Prefer first-seen to keep the column list from #initSchema (fuller)
|
|
108
|
+
// rather than a stripped migration copy.
|
|
109
|
+
if (!tables.has(name)) tables.set(name, { name, columns: cols });
|
|
110
|
+
}
|
|
111
|
+
return [...tables.values()];
|
|
112
|
+
}
|
|
113
|
+
|
|
114
|
+
function parseColumns(body) {
|
|
115
|
+
// Split on commas at depth zero so `DEFAULT 0.5` etc. survive.
|
|
116
|
+
const parts = [];
|
|
117
|
+
let depth = 0, buf = "";
|
|
118
|
+
for (const ch of body) {
|
|
119
|
+
if (ch === "(") { depth++; buf += ch; continue; }
|
|
120
|
+
if (ch === ")") { depth--; buf += ch; continue; }
|
|
121
|
+
if (ch === "," && depth === 0) { parts.push(buf); buf = ""; continue; }
|
|
122
|
+
buf += ch;
|
|
123
|
+
}
|
|
124
|
+
if (buf.trim().length > 0) parts.push(buf);
|
|
125
|
+
|
|
126
|
+
const out = [];
|
|
127
|
+
for (const p of parts) {
|
|
128
|
+
const trimmed = p.trim();
|
|
129
|
+
if (!trimmed) continue;
|
|
130
|
+
// Skip table-level constraints like PRIMARY KEY(a, b).
|
|
131
|
+
if (/^(PRIMARY|FOREIGN|UNIQUE|CHECK)\b/i.test(trimmed)) continue;
|
|
132
|
+
const m2 = trimmed.match(/^(\w+)\s+([A-Z]+(?:\s+[A-Z0-9_]+)*)?(.*)$/i);
|
|
133
|
+
if (!m2) continue;
|
|
134
|
+
const name = m2[1];
|
|
135
|
+
const type = (m2[2] || "").trim() || "TEXT";
|
|
136
|
+
const rest = (m2[3] || "").trim();
|
|
137
|
+
out.push({
|
|
138
|
+
name,
|
|
139
|
+
type,
|
|
140
|
+
notes: rest.replace(/\s+/g, " "),
|
|
141
|
+
});
|
|
142
|
+
}
|
|
143
|
+
return out;
|
|
144
|
+
}
|
|
145
|
+
|
|
146
|
+
function extractMigrations(source) {
|
|
147
|
+
const migrations = [];
|
|
148
|
+
MIGRATION_OPEN_RE.lastIndex = 0;
|
|
149
|
+
let m;
|
|
150
|
+
while ((m = MIGRATION_OPEN_RE.exec(source)) !== null) {
|
|
151
|
+
const version = Number(m[1]);
|
|
152
|
+
const blockStart = m.index + m[0].length; // inside `{`
|
|
153
|
+
const blockEnd = findMatchingBrace(source, blockStart - 1);
|
|
154
|
+
if (blockEnd < 0) continue;
|
|
155
|
+
const body = source.slice(blockStart, blockEnd);
|
|
156
|
+
// Find the preceding comment for the summary.
|
|
157
|
+
const preamble = source.slice(Math.max(0, m.index - 400), m.index);
|
|
158
|
+
const commentMatch = preamble.match(new RegExp(`//\\s*Migration\\s+${version}:\\s*(.+)`));
|
|
159
|
+
const summary = commentMatch ? commentMatch[1].trim() : "";
|
|
160
|
+
migrations.push({
|
|
161
|
+
version,
|
|
162
|
+
summary,
|
|
163
|
+
ops: extractMigrationOps(body),
|
|
164
|
+
});
|
|
165
|
+
}
|
|
166
|
+
migrations.sort((a, b) => a.version - b.version);
|
|
167
|
+
return migrations;
|
|
168
|
+
}
|
|
169
|
+
|
|
170
|
+
function extractMigrationOps(body) {
|
|
171
|
+
const ops = new Set();
|
|
172
|
+
ALTER_ADD_COLUMN_RE.lastIndex = 0;
|
|
173
|
+
let m;
|
|
174
|
+
while ((m = ALTER_ADD_COLUMN_RE.exec(body)) !== null) {
|
|
175
|
+
ops.add(`ADD COLUMN ${m[1]}.${m[2]}`);
|
|
176
|
+
}
|
|
177
|
+
const createRe = /CREATE\s+TABLE(?:\s+IF\s+NOT\s+EXISTS)?\s+(\w+)\s*\(/gi;
|
|
178
|
+
while ((m = createRe.exec(body)) !== null) {
|
|
179
|
+
ops.add(`CREATE TABLE ${m[1]}`);
|
|
180
|
+
}
|
|
181
|
+
const idxRe = /CREATE\s+INDEX(?:\s+IF\s+NOT\s+EXISTS)?\s+(\w+)\s+ON\s+(\w+)/gi;
|
|
182
|
+
while ((m = idxRe.exec(body)) !== null) {
|
|
183
|
+
ops.add(`CREATE INDEX ${m[1]} ON ${m[2]}`);
|
|
184
|
+
}
|
|
185
|
+
return [...ops];
|
|
186
|
+
}
|
|
187
|
+
|
|
188
|
+
function findMatchingBrace(source, openIdx) {
|
|
189
|
+
// openIdx points at '{'
|
|
190
|
+
let depth = 0;
|
|
191
|
+
for (let i = openIdx; i < source.length; i++) {
|
|
192
|
+
const ch = source[i];
|
|
193
|
+
if (ch === "{") depth++;
|
|
194
|
+
else if (ch === "}") {
|
|
195
|
+
depth--;
|
|
196
|
+
if (depth === 0) return i;
|
|
197
|
+
}
|
|
198
|
+
}
|
|
199
|
+
return -1;
|
|
200
|
+
}
|
|
@@ -0,0 +1,121 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* File-map generator.
|
|
3
|
+
*
|
|
4
|
+
* Walks `server/lib/` and `server/bin/`, produces a per-file record with:
|
|
5
|
+
* - Purpose (first paragraph of the earliest JSDoc block, if any)
|
|
6
|
+
* - Exports (`export function/class/const` plus named re-exports)
|
|
7
|
+
* - Local imports (relative `./...` imports — other server modules)
|
|
8
|
+
*
|
|
9
|
+
* Output is a markdown page (`map-files.md`) plus a JSON manifest
|
|
10
|
+
* (`_generated/files.json`). Pure functions — CLI glue walks the disk.
|
|
11
|
+
*/
|
|
12
|
+
|
|
13
|
+
const JSDOC_RE = /\/\*\*([\s\S]*?)\*\//;
|
|
14
|
+
const EXPORT_NAMED_RE =
|
|
15
|
+
/^\s*export\s+(?:async\s+)?(function|class|const|let|var)\s+([a-zA-Z_][a-zA-Z0-9_]*)/gm;
|
|
16
|
+
const EXPORT_LIST_RE = /^\s*export\s*\{\s*([^}]+)\}\s*(?:from\s*['"][^'"]+['"])?/gm;
|
|
17
|
+
const IMPORT_LOCAL_RE =
|
|
18
|
+
/^\s*import\s+[^'"]*?from\s+['"](\.\.?\/[^'"]+)['"]/gm;
|
|
19
|
+
|
|
20
|
+
/**
|
|
21
|
+
* Build a file-map record from a file's source text.
|
|
22
|
+
*/
|
|
23
|
+
export function buildFileRecord({ relPath, source }) {
|
|
24
|
+
return {
|
|
25
|
+
path: relPath,
|
|
26
|
+
purpose: extractPurpose(source),
|
|
27
|
+
exports: extractExports(source),
|
|
28
|
+
imports: extractLocalImports(source),
|
|
29
|
+
};
|
|
30
|
+
}
|
|
31
|
+
|
|
32
|
+
/**
|
|
33
|
+
* Render map-files.md from a list of file records (already ordered).
|
|
34
|
+
*/
|
|
35
|
+
export function renderFileMap({ files, generatedAt, sourceRoots }) {
|
|
36
|
+
const lines = [];
|
|
37
|
+
lines.push("---");
|
|
38
|
+
lines.push("status: published");
|
|
39
|
+
lines.push("canonical_for: [MAP-FILES]");
|
|
40
|
+
lines.push("references: []");
|
|
41
|
+
lines.push("owner: core");
|
|
42
|
+
lines.push(`last_reviewed: ${generatedAt}`);
|
|
43
|
+
lines.push("generated: true");
|
|
44
|
+
lines.push(`source_roots: [${sourceRoots.join(", ")}]`);
|
|
45
|
+
lines.push("---");
|
|
46
|
+
lines.push("");
|
|
47
|
+
lines.push("# Map: files");
|
|
48
|
+
lines.push("");
|
|
49
|
+
lines.push(
|
|
50
|
+
"Generated walk of `" + sourceRoots.join("`, `") + "`. Do not hand-edit — "
|
|
51
|
+
+ "regenerate with `npm run gen:wiki`. Purpose strings come from the "
|
|
52
|
+
+ "first JSDoc block in each file; files without a JSDoc header have "
|
|
53
|
+
+ "empty purpose and are candidates for docstring work.",
|
|
54
|
+
);
|
|
55
|
+
lines.push("");
|
|
56
|
+
lines.push("## Files");
|
|
57
|
+
lines.push("");
|
|
58
|
+
lines.push("| Path | Purpose | Exports | Local imports |");
|
|
59
|
+
lines.push("|---|---|---|---|");
|
|
60
|
+
for (const f of files) {
|
|
61
|
+
const exports = f.exports.length ? f.exports.map((e) => `\`${e}\``).join(", ") : "—";
|
|
62
|
+
const imports = f.imports.length ? f.imports.map((i) => `\`${i}\``).join(", ") : "—";
|
|
63
|
+
const purpose = (f.purpose || "").replace(/\|/g, "\\|");
|
|
64
|
+
lines.push(`| \`${f.path}\` | ${purpose || "—"} | ${exports} | ${imports} |`);
|
|
65
|
+
}
|
|
66
|
+
lines.push("");
|
|
67
|
+
return lines.join("\n") + "\n";
|
|
68
|
+
}
|
|
69
|
+
|
|
70
|
+
export function renderFileMapJson({ files, generatedAt, sourceRoots }) {
|
|
71
|
+
return {
|
|
72
|
+
generated_at: generatedAt,
|
|
73
|
+
source_roots: sourceRoots,
|
|
74
|
+
canonical_id: "MAP-FILES",
|
|
75
|
+
count: files.length,
|
|
76
|
+
files,
|
|
77
|
+
};
|
|
78
|
+
}
|
|
79
|
+
|
|
80
|
+
// --- internals ---
|
|
81
|
+
|
|
82
|
+
function extractPurpose(source) {
|
|
83
|
+
const m = source.match(JSDOC_RE);
|
|
84
|
+
if (!m) return "";
|
|
85
|
+
// Strip leading `*` prefixes, split into blank-line paragraphs, take first.
|
|
86
|
+
const cleaned = m[1]
|
|
87
|
+
.split("\n")
|
|
88
|
+
.map((line) => line.replace(/^\s*\*\s?/, "").trimEnd())
|
|
89
|
+
.join("\n")
|
|
90
|
+
.trim();
|
|
91
|
+
const firstPara = cleaned.split(/\n\s*\n/)[0] || "";
|
|
92
|
+
return firstPara.replace(/\s+/g, " ").trim();
|
|
93
|
+
}
|
|
94
|
+
|
|
95
|
+
function extractExports(source) {
|
|
96
|
+
const out = new Set();
|
|
97
|
+
EXPORT_NAMED_RE.lastIndex = 0;
|
|
98
|
+
let m;
|
|
99
|
+
while ((m = EXPORT_NAMED_RE.exec(source)) !== null) {
|
|
100
|
+
out.add(m[2]);
|
|
101
|
+
}
|
|
102
|
+
EXPORT_LIST_RE.lastIndex = 0;
|
|
103
|
+
while ((m = EXPORT_LIST_RE.exec(source)) !== null) {
|
|
104
|
+
const names = m[1]
|
|
105
|
+
.split(",")
|
|
106
|
+
.map((s) => s.trim().replace(/\s+as\s+.+$/, "").trim())
|
|
107
|
+
.filter(Boolean);
|
|
108
|
+
for (const n of names) out.add(n);
|
|
109
|
+
}
|
|
110
|
+
return [...out].sort();
|
|
111
|
+
}
|
|
112
|
+
|
|
113
|
+
function extractLocalImports(source) {
|
|
114
|
+
const out = new Set();
|
|
115
|
+
IMPORT_LOCAL_RE.lastIndex = 0;
|
|
116
|
+
let m;
|
|
117
|
+
while ((m = IMPORT_LOCAL_RE.exec(source)) !== null) {
|
|
118
|
+
out.add(m[1]);
|
|
119
|
+
}
|
|
120
|
+
return [...out].sort();
|
|
121
|
+
}
|
|
@@ -0,0 +1,168 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Wiki linter.
|
|
3
|
+
*
|
|
4
|
+
* Pure functions that run rules against a built canonical registry plus the
|
|
5
|
+
* raw file contents of each page. I/O (walking the disk, stat'ing referenced
|
|
6
|
+
* paths) lives in the CLI wrapper — this module is testable with synthetic
|
|
7
|
+
* inputs.
|
|
8
|
+
*
|
|
9
|
+
* Rule levels are "error" (fails the lint) or "warn" (informational, but
|
|
10
|
+
* `--strict` mode promotes to error).
|
|
11
|
+
*/
|
|
12
|
+
|
|
13
|
+
const DEFAULT_LONG_PAGE_LINES = 80;
|
|
14
|
+
const DEFAULT_LONG_PAGE_MIN_REFS = 3;
|
|
15
|
+
// Pages that own multiple canonical IDs are foundational (e.g. invariants,
|
|
16
|
+
// glossary) — their line count reflects the content they own, not a
|
|
17
|
+
// pointer-page that grew. Skip the restating heuristic for them.
|
|
18
|
+
const CANONICAL_ANCHOR_THRESHOLD = 2;
|
|
19
|
+
|
|
20
|
+
/**
|
|
21
|
+
* Run every rule and return a flat list of findings.
|
|
22
|
+
*
|
|
23
|
+
* Inputs:
|
|
24
|
+
* registry — output of buildRegistry
|
|
25
|
+
* pages — [{ path, data, body, lineCount }]
|
|
26
|
+
* knownPaths — Set of repo-relative paths that exist (for ref resolution)
|
|
27
|
+
*
|
|
28
|
+
* Returns: [{ rule, level, page, message, extra }]
|
|
29
|
+
*/
|
|
30
|
+
export function runLintRules({ registry, pages, knownPaths = new Set(), options = {} }) {
|
|
31
|
+
const findings = [];
|
|
32
|
+
findings.push(...ruleDuplicateCanonicalFor(registry));
|
|
33
|
+
findings.push(...ruleBrokenReference(registry, knownPaths));
|
|
34
|
+
findings.push(...ruleLongPageLowRefs(pages, options));
|
|
35
|
+
findings.push(...ruleMissingCanonicalPurpose(pages));
|
|
36
|
+
return findings;
|
|
37
|
+
}
|
|
38
|
+
|
|
39
|
+
/**
|
|
40
|
+
* ERROR if any canonical_for ID is claimed by two or more pages.
|
|
41
|
+
*/
|
|
42
|
+
export function ruleDuplicateCanonicalFor(registry) {
|
|
43
|
+
const out = [];
|
|
44
|
+
for (const dup of registry.duplicates) {
|
|
45
|
+
out.push({
|
|
46
|
+
rule: "duplicate_canonical_for",
|
|
47
|
+
level: "error",
|
|
48
|
+
page: dup.paths[0],
|
|
49
|
+
message: `canonical_for: ${dup.id} claimed by multiple pages`,
|
|
50
|
+
extra: { id: dup.id, paths: dup.paths },
|
|
51
|
+
});
|
|
52
|
+
}
|
|
53
|
+
return out;
|
|
54
|
+
}
|
|
55
|
+
|
|
56
|
+
/**
|
|
57
|
+
* ERROR if a `references` entry doesn't resolve to a canonical ID or a
|
|
58
|
+
* known path. External URLs (http/https) are skipped.
|
|
59
|
+
*/
|
|
60
|
+
export function ruleBrokenReference(registry, knownPaths) {
|
|
61
|
+
const out = [];
|
|
62
|
+
const canonicalIds = new Set(registry.byId.keys());
|
|
63
|
+
for (const page of registry.pages) {
|
|
64
|
+
for (const ref of page.references) {
|
|
65
|
+
if (/^https?:/i.test(ref)) continue;
|
|
66
|
+
if (isResolvable(ref, canonicalIds, knownPaths)) continue;
|
|
67
|
+
out.push({
|
|
68
|
+
rule: "broken_reference",
|
|
69
|
+
level: "error",
|
|
70
|
+
page: page.path,
|
|
71
|
+
message: `unresolved reference: ${ref}`,
|
|
72
|
+
extra: { ref },
|
|
73
|
+
});
|
|
74
|
+
}
|
|
75
|
+
}
|
|
76
|
+
return out;
|
|
77
|
+
}
|
|
78
|
+
|
|
79
|
+
/**
|
|
80
|
+
* WARN if a non-generated page has a body over N lines and fewer than M
|
|
81
|
+
* outbound references. Heuristic for "probably restating instead of linking."
|
|
82
|
+
*/
|
|
83
|
+
export function ruleLongPageLowRefs(pages, options = {}) {
|
|
84
|
+
const linesThreshold = options.longPageLines ?? DEFAULT_LONG_PAGE_LINES;
|
|
85
|
+
const minRefs = options.longPageMinRefs ?? DEFAULT_LONG_PAGE_MIN_REFS;
|
|
86
|
+
const out = [];
|
|
87
|
+
for (const p of pages) {
|
|
88
|
+
if (p.data?.generated === true) continue;
|
|
89
|
+
const canonicalCount = normalizeList(p.data?.canonical_for).length;
|
|
90
|
+
if (canonicalCount >= CANONICAL_ANCHOR_THRESHOLD) continue;
|
|
91
|
+
const refCount = normalizeList(p.data?.references).length;
|
|
92
|
+
if (p.lineCount > linesThreshold && refCount < minRefs) {
|
|
93
|
+
out.push({
|
|
94
|
+
rule: "long_page_low_refs",
|
|
95
|
+
level: "warn",
|
|
96
|
+
page: p.path,
|
|
97
|
+
message: `${p.lineCount} lines with only ${refCount} references — probably restating`,
|
|
98
|
+
extra: { lines: p.lineCount, refs: refCount },
|
|
99
|
+
});
|
|
100
|
+
}
|
|
101
|
+
}
|
|
102
|
+
return out;
|
|
103
|
+
}
|
|
104
|
+
|
|
105
|
+
/**
|
|
106
|
+
* WARN if a page declares neither canonical_for nor references. Such a page
|
|
107
|
+
* owns nothing and cites nothing — it's probably an orphan or a restated copy.
|
|
108
|
+
*/
|
|
109
|
+
export function ruleMissingCanonicalPurpose(pages) {
|
|
110
|
+
const out = [];
|
|
111
|
+
for (const p of pages) {
|
|
112
|
+
if (p.data?.generated === true) continue;
|
|
113
|
+
const canon = normalizeList(p.data?.canonical_for);
|
|
114
|
+
const refs = normalizeList(p.data?.references);
|
|
115
|
+
if (canon.length === 0 && refs.length === 0) {
|
|
116
|
+
out.push({
|
|
117
|
+
rule: "missing_canonical_purpose",
|
|
118
|
+
level: "warn",
|
|
119
|
+
page: p.path,
|
|
120
|
+
message: "page has no canonical_for and no references",
|
|
121
|
+
});
|
|
122
|
+
}
|
|
123
|
+
}
|
|
124
|
+
return out;
|
|
125
|
+
}
|
|
126
|
+
|
|
127
|
+
/**
|
|
128
|
+
* Exit-code mapping. Non-strict: any error → 1. Strict: any finding → 1.
|
|
129
|
+
*/
|
|
130
|
+
export function lintExitCode(findings, { strict = false } = {}) {
|
|
131
|
+
if (findings.length === 0) return 0;
|
|
132
|
+
if (strict) return 1;
|
|
133
|
+
return findings.some((f) => f.level === "error") ? 1 : 0;
|
|
134
|
+
}
|
|
135
|
+
|
|
136
|
+
/**
|
|
137
|
+
* Human-readable text report.
|
|
138
|
+
*/
|
|
139
|
+
export function formatFindings(findings) {
|
|
140
|
+
if (findings.length === 0) return "wiki lint: clean.";
|
|
141
|
+
const lines = [];
|
|
142
|
+
for (const f of findings) {
|
|
143
|
+
lines.push(`[${f.level}] ${f.page}: ${f.rule} — ${f.message}`);
|
|
144
|
+
}
|
|
145
|
+
return lines.join("\n");
|
|
146
|
+
}
|
|
147
|
+
|
|
148
|
+
// --- internals ---
|
|
149
|
+
|
|
150
|
+
function normalizeList(value) {
|
|
151
|
+
if (value == null) return [];
|
|
152
|
+
if (Array.isArray(value)) return value.map(String).filter(Boolean);
|
|
153
|
+
if (typeof value === "string" && value.length > 0) return [value];
|
|
154
|
+
return [];
|
|
155
|
+
}
|
|
156
|
+
|
|
157
|
+
function isResolvable(ref, canonicalIds, knownPaths) {
|
|
158
|
+
if (canonicalIds.has(ref)) return true;
|
|
159
|
+
const hashIdx = ref.indexOf("#");
|
|
160
|
+
if (hashIdx >= 0) {
|
|
161
|
+
const anchor = ref.slice(hashIdx + 1);
|
|
162
|
+
if (canonicalIds.has(anchor)) return true;
|
|
163
|
+
const pathPart = ref.slice(0, hashIdx);
|
|
164
|
+
if (pathPart.length > 0 && knownPaths.has(pathPart)) return true;
|
|
165
|
+
}
|
|
166
|
+
if (knownPaths.has(ref)) return true;
|
|
167
|
+
return false;
|
|
168
|
+
}
|
|
@@ -0,0 +1,120 @@
|
|
|
1
|
+
import fs from "node:fs/promises";
|
|
2
|
+
import path from "node:path";
|
|
3
|
+
|
|
4
|
+
const MODE_FILE_REL = ".wicked-brain/mode.json";
|
|
5
|
+
const SCHEMA_VERSION = 1;
|
|
6
|
+
const VALID_MODES = new Set(["code", "content", "mixed", "unknown"]);
|
|
7
|
+
const DATE_RE = /^\d{4}-\d{2}-\d{2}$/;
|
|
8
|
+
|
|
9
|
+
/**
|
|
10
|
+
* Validate a mode.json body. Returns { ok, errors } — does not throw.
|
|
11
|
+
* Kept in lockstep with mode.schema.json. The schema is the canonical
|
|
12
|
+
* documentation; this is the runtime enforcement.
|
|
13
|
+
*/
|
|
14
|
+
export function validateMode(body) {
|
|
15
|
+
const errors = [];
|
|
16
|
+
if (!body || typeof body !== "object") {
|
|
17
|
+
return { ok: false, errors: ["body is not an object"] };
|
|
18
|
+
}
|
|
19
|
+
if (body.schema_version !== SCHEMA_VERSION) {
|
|
20
|
+
errors.push(`schema_version must be ${SCHEMA_VERSION}, got ${body.schema_version}`);
|
|
21
|
+
}
|
|
22
|
+
if (!VALID_MODES.has(body.mode)) {
|
|
23
|
+
errors.push(`mode must be one of ${[...VALID_MODES].join(", ")}, got ${body.mode}`);
|
|
24
|
+
}
|
|
25
|
+
if (typeof body.wiki_root !== "string" || body.wiki_root.length === 0) {
|
|
26
|
+
errors.push("wiki_root must be a non-empty string");
|
|
27
|
+
}
|
|
28
|
+
if (body.content_root !== null && typeof body.content_root !== "string") {
|
|
29
|
+
errors.push("content_root must be string or null");
|
|
30
|
+
}
|
|
31
|
+
if (typeof body.detected_at !== "string" || !DATE_RE.test(body.detected_at)) {
|
|
32
|
+
errors.push("detected_at must be YYYY-MM-DD");
|
|
33
|
+
}
|
|
34
|
+
if (typeof body.override !== "boolean") {
|
|
35
|
+
errors.push("override must be boolean");
|
|
36
|
+
}
|
|
37
|
+
if (body.score !== undefined) {
|
|
38
|
+
if (!body.score || typeof body.score !== "object") errors.push("score must be an object");
|
|
39
|
+
else {
|
|
40
|
+
if (typeof body.score.code !== "number") errors.push("score.code must be a number");
|
|
41
|
+
if (typeof body.score.content !== "number") errors.push("score.content must be a number");
|
|
42
|
+
}
|
|
43
|
+
}
|
|
44
|
+
if (body.reasons !== undefined && !Array.isArray(body.reasons)) {
|
|
45
|
+
errors.push("reasons must be an array");
|
|
46
|
+
}
|
|
47
|
+
return { ok: errors.length === 0, errors };
|
|
48
|
+
}
|
|
49
|
+
|
|
50
|
+
/**
|
|
51
|
+
* Read .wicked-brain/mode.json for a repo. Returns null if missing.
|
|
52
|
+
* Throws on malformed JSON so callers can surface the issue rather than
|
|
53
|
+
* silently re-detecting over a corrupt file.
|
|
54
|
+
*/
|
|
55
|
+
export async function readModeFile(repoRoot) {
|
|
56
|
+
const p = path.join(repoRoot, MODE_FILE_REL);
|
|
57
|
+
let raw;
|
|
58
|
+
try {
|
|
59
|
+
raw = await fs.readFile(p, "utf8");
|
|
60
|
+
} catch (err) {
|
|
61
|
+
if (err.code === "ENOENT") return null;
|
|
62
|
+
throw err;
|
|
63
|
+
}
|
|
64
|
+
return JSON.parse(raw);
|
|
65
|
+
}
|
|
66
|
+
|
|
67
|
+
/**
|
|
68
|
+
* Write .wicked-brain/mode.json for a repo.
|
|
69
|
+
*
|
|
70
|
+
* Honors override:true on any existing file — will not overwrite a
|
|
71
|
+
* human-managed mode.json. Detection-managed writes pass through.
|
|
72
|
+
*
|
|
73
|
+
* Returns { written: boolean, reason?: string, path: string }.
|
|
74
|
+
*/
|
|
75
|
+
export async function writeModeFile(repoRoot, detection, { override = false } = {}) {
|
|
76
|
+
const p = path.join(repoRoot, MODE_FILE_REL);
|
|
77
|
+
const existing = await readModeFile(repoRoot);
|
|
78
|
+
if (existing && existing.override === true && override === false) {
|
|
79
|
+
return { written: false, reason: "override:true present — not overwriting", path: p };
|
|
80
|
+
}
|
|
81
|
+
|
|
82
|
+
const now = new Date().toISOString().slice(0, 10);
|
|
83
|
+
const body = {
|
|
84
|
+
schema_version: SCHEMA_VERSION,
|
|
85
|
+
mode: detection.mode,
|
|
86
|
+
wiki_root: detection.wiki_root,
|
|
87
|
+
content_root: detection.content_root ?? null,
|
|
88
|
+
detected_at: now,
|
|
89
|
+
override,
|
|
90
|
+
score: detection.score,
|
|
91
|
+
reasons: detection.reasons,
|
|
92
|
+
};
|
|
93
|
+
|
|
94
|
+
const { ok, errors } = validateMode(body);
|
|
95
|
+
if (!ok) {
|
|
96
|
+
throw new Error(`invalid mode.json body: ${errors.join("; ")}`);
|
|
97
|
+
}
|
|
98
|
+
|
|
99
|
+
await fs.mkdir(path.dirname(p), { recursive: true });
|
|
100
|
+
await fs.writeFile(p, JSON.stringify(body, null, 2) + "\n", "utf8");
|
|
101
|
+
return { written: true, path: p };
|
|
102
|
+
}
|
|
103
|
+
|
|
104
|
+
/**
|
|
105
|
+
* Compare an existing mode.json against a fresh detection.
|
|
106
|
+
* Returns { changed: boolean, fields: string[] } — callers decide whether
|
|
107
|
+
* to warn the user before writing.
|
|
108
|
+
*/
|
|
109
|
+
export function diffMode(existing, detection) {
|
|
110
|
+
if (!existing) return { changed: true, fields: ["(no prior mode.json)"] };
|
|
111
|
+
const fields = [];
|
|
112
|
+
if (existing.mode !== detection.mode) fields.push("mode");
|
|
113
|
+
if (existing.wiki_root !== detection.wiki_root) fields.push("wiki_root");
|
|
114
|
+
if ((existing.content_root ?? null) !== (detection.content_root ?? null)) {
|
|
115
|
+
fields.push("content_root");
|
|
116
|
+
}
|
|
117
|
+
return { changed: fields.length > 0, fields };
|
|
118
|
+
}
|
|
119
|
+
|
|
120
|
+
export const MODE_FILE_PATH = MODE_FILE_REL;
|
|
@@ -0,0 +1,53 @@
|
|
|
1
|
+
{
|
|
2
|
+
"$schema": "https://json-schema.org/draft/2020-12/schema",
|
|
3
|
+
"$id": "https://wicked-brain.dev/schemas/mode.schema.json",
|
|
4
|
+
"title": "wicked-brain mode.json",
|
|
5
|
+
"description": "Repo-mode detection result. Written to .wicked-brain/mode.json at the repo root. Drives wiki location, lint rules, and ingest scoring.",
|
|
6
|
+
"type": "object",
|
|
7
|
+
"required": ["schema_version", "mode", "wiki_root", "detected_at", "override"],
|
|
8
|
+
"additionalProperties": false,
|
|
9
|
+
"properties": {
|
|
10
|
+
"schema_version": {
|
|
11
|
+
"type": "integer",
|
|
12
|
+
"const": 1,
|
|
13
|
+
"description": "Schema version. Bumped when incompatible fields change."
|
|
14
|
+
},
|
|
15
|
+
"mode": {
|
|
16
|
+
"type": "string",
|
|
17
|
+
"enum": ["code", "content", "mixed", "unknown"],
|
|
18
|
+
"description": "Repo classification. Drives which page set, lint rules, and ingest scoring apply."
|
|
19
|
+
},
|
|
20
|
+
"wiki_root": {
|
|
21
|
+
"type": "string",
|
|
22
|
+
"description": "Repo-relative path to the contributor wiki (default: 'wiki'). Always set, even in content mode — meta pages like taxonomy.md live here."
|
|
23
|
+
},
|
|
24
|
+
"content_root": {
|
|
25
|
+
"type": ["string", "null"],
|
|
26
|
+
"description": "Repo-relative path to the content corpus. Null in code/unknown modes. Defaults to 'docs' if present, else 'content'."
|
|
27
|
+
},
|
|
28
|
+
"detected_at": {
|
|
29
|
+
"type": "string",
|
|
30
|
+
"pattern": "^\\d{4}-\\d{2}-\\d{2}$",
|
|
31
|
+
"description": "ISO date (YYYY-MM-DD) the mode was last written."
|
|
32
|
+
},
|
|
33
|
+
"override": {
|
|
34
|
+
"type": "boolean",
|
|
35
|
+
"description": "If true, the file was set by a human. Detection must not overwrite without an explicit override:true write."
|
|
36
|
+
},
|
|
37
|
+
"score": {
|
|
38
|
+
"type": "object",
|
|
39
|
+
"required": ["code", "content"],
|
|
40
|
+
"additionalProperties": false,
|
|
41
|
+
"properties": {
|
|
42
|
+
"code": { "type": "number" },
|
|
43
|
+
"content": { "type": "number" }
|
|
44
|
+
},
|
|
45
|
+
"description": "Scores that led to the classification. Informational; inspectable by agents."
|
|
46
|
+
},
|
|
47
|
+
"reasons": {
|
|
48
|
+
"type": "array",
|
|
49
|
+
"items": { "type": "string" },
|
|
50
|
+
"description": "Human-readable list of signals that produced the score. Informational."
|
|
51
|
+
}
|
|
52
|
+
}
|
|
53
|
+
}
|