wicked-brain 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,260 @@
1
+ import Database from "better-sqlite3";
2
+ import { parseWikilinks } from "./wikilinks.mjs";
3
+ import { statSync } from "node:fs";
4
+
5
+ function escapeFtsQuery(query) {
6
+ return query
7
+ .trim()
8
+ .split(/\s+/)
9
+ .filter(Boolean)
10
+ .map((w) => `"${w.replace(/"/g, '""')}"`)
11
+ .join(" ");
12
+ }
13
+
14
+ export class SqliteSearch {
15
+ #db;
16
+ #brainId;
17
+ #startTime;
18
+
19
+ constructor(dbPath, brainId) {
20
+ this.#brainId = brainId;
21
+ this.#startTime = Date.now();
22
+ this.#db = new Database(dbPath);
23
+ this.#db.pragma("journal_mode = WAL");
24
+ this.#initSchema();
25
+ }
26
+
27
+ #initSchema() {
28
+ this.#db.exec(`
29
+ CREATE TABLE IF NOT EXISTS documents (
30
+ id TEXT PRIMARY KEY,
31
+ path TEXT NOT NULL,
32
+ content TEXT NOT NULL,
33
+ frontmatter TEXT,
34
+ brain_id TEXT NOT NULL,
35
+ indexed_at INTEGER NOT NULL
36
+ );
37
+
38
+ CREATE VIRTUAL TABLE IF NOT EXISTS documents_fts USING fts5(
39
+ id,
40
+ path,
41
+ content,
42
+ brain_id,
43
+ tokenize='porter unicode61'
44
+ );
45
+
46
+ CREATE TABLE IF NOT EXISTS links (
47
+ source_id TEXT NOT NULL,
48
+ source_brain TEXT NOT NULL,
49
+ target_path TEXT NOT NULL,
50
+ target_brain TEXT,
51
+ link_text TEXT
52
+ );
53
+
54
+ CREATE INDEX IF NOT EXISTS idx_links_source ON links(source_id);
55
+ CREATE INDEX IF NOT EXISTS idx_links_target ON links(target_path);
56
+ `);
57
+ }
58
+
59
+ index(doc) {
60
+ const { id, path, content, frontmatter = null } = doc;
61
+ const brainId = this.#brainId;
62
+ const indexedAt = Date.now();
63
+
64
+ const upsertDoc = this.#db.prepare(`
65
+ INSERT INTO documents (id, path, content, frontmatter, brain_id, indexed_at)
66
+ VALUES (?, ?, ?, ?, ?, ?)
67
+ ON CONFLICT(id) DO UPDATE SET
68
+ path = excluded.path,
69
+ content = excluded.content,
70
+ frontmatter = excluded.frontmatter,
71
+ brain_id = excluded.brain_id,
72
+ indexed_at = excluded.indexed_at
73
+ `);
74
+
75
+ const deleteFts = this.#db.prepare(`DELETE FROM documents_fts WHERE id = ?`);
76
+ const insertFts = this.#db.prepare(`
77
+ INSERT INTO documents_fts (id, path, content, brain_id)
78
+ VALUES (?, ?, ?, ?)
79
+ `);
80
+
81
+ const deleteLinks = this.#db.prepare(`DELETE FROM links WHERE source_id = ?`);
82
+ const insertLink = this.#db.prepare(`
83
+ INSERT INTO links (source_id, source_brain, target_path, target_brain, link_text)
84
+ VALUES (?, ?, ?, ?, ?)
85
+ `);
86
+
87
+ const run = this.#db.transaction(() => {
88
+ upsertDoc.run(id, path, content, frontmatter, brainId, indexedAt);
89
+ deleteFts.run(id);
90
+ insertFts.run(id, path, content, brainId);
91
+ deleteLinks.run(id);
92
+ const wikilinks = parseWikilinks(content);
93
+ for (const link of wikilinks) {
94
+ insertLink.run(id, brainId, link.path, link.brain, link.raw);
95
+ }
96
+ });
97
+
98
+ run();
99
+ }
100
+
101
+ remove(id) {
102
+ const run = this.#db.transaction(() => {
103
+ this.#db.prepare(`DELETE FROM documents WHERE id = ?`).run(id);
104
+ this.#db.prepare(`DELETE FROM documents_fts WHERE id = ?`).run(id);
105
+ this.#db.prepare(`DELETE FROM links WHERE source_id = ?`).run(id);
106
+ });
107
+ run();
108
+ }
109
+
110
+ reindex(docs) {
111
+ const run = this.#db.transaction(() => {
112
+ this.#db.exec(`DELETE FROM documents`);
113
+ this.#db.exec(`DELETE FROM documents_fts`);
114
+ this.#db.exec(`DELETE FROM links`);
115
+ for (const doc of docs) {
116
+ this.index(doc);
117
+ }
118
+ });
119
+ run();
120
+ }
121
+
122
+ search({ query, limit = 10, offset = 0 }) {
123
+ const escaped = escapeFtsQuery(query);
124
+ if (!escaped) return { results: [], total_matches: 0, showing: 0 };
125
+
126
+ const rows = this.#db
127
+ .prepare(`
128
+ SELECT
129
+ d.id,
130
+ d.path,
131
+ d.brain_id,
132
+ snippet(documents_fts, 2, '<b>', '</b>', '…', 32) AS snippet
133
+ FROM documents_fts f
134
+ JOIN documents d ON d.id = f.id
135
+ WHERE documents_fts MATCH ?
136
+ ORDER BY rank
137
+ LIMIT ? OFFSET ?
138
+ `)
139
+ .all(escaped, limit, offset);
140
+
141
+ const countRow = this.#db
142
+ .prepare(`SELECT COUNT(*) as cnt FROM documents_fts WHERE documents_fts MATCH ?`)
143
+ .get(escaped);
144
+
145
+ const total_matches = countRow ? countRow.cnt : 0;
146
+
147
+ return {
148
+ results: rows,
149
+ total_matches,
150
+ showing: rows.length,
151
+ };
152
+ }
153
+
154
+ federatedSearch({ query, brains = [], limit = 10 }) {
155
+ const localResults = this.search({ query, limit });
156
+ const allResults = [...localResults.results];
157
+ const unreachable = [];
158
+
159
+ for (const { brainId, dbPath } of brains) {
160
+ try {
161
+ const attached = `brain_${brainId.replace(/[^a-zA-Z0-9_]/g, "_")}`;
162
+ this.#db.prepare(`ATTACH DATABASE ? AS ${attached}`).run(dbPath);
163
+ try {
164
+ const escaped = escapeFtsQuery(query);
165
+ const rows = this.#db
166
+ .prepare(`
167
+ SELECT
168
+ d.id,
169
+ d.path,
170
+ d.brain_id,
171
+ snippet(${attached}.documents_fts, 2, '<b>', '</b>', '…', 32) AS snippet
172
+ FROM ${attached}.documents_fts f
173
+ JOIN ${attached}.documents d ON d.id = f.id
174
+ WHERE ${attached}.documents_fts MATCH ?
175
+ ORDER BY rank
176
+ LIMIT ?
177
+ `)
178
+ .all(escaped, limit);
179
+ allResults.push(...rows);
180
+ } finally {
181
+ this.#db.prepare(`DETACH DATABASE ${attached}`).run();
182
+ }
183
+ } catch {
184
+ unreachable.push(brainId);
185
+ }
186
+ }
187
+
188
+ allResults.sort((a, b) => (a.rank ?? 0) - (b.rank ?? 0));
189
+ const trimmed = allResults.slice(0, limit);
190
+
191
+ return {
192
+ results: trimmed,
193
+ total_matches: localResults.total_matches,
194
+ showing: trimmed.length,
195
+ unreachable,
196
+ };
197
+ }
198
+
199
+ backlinks(id) {
200
+ return this.#db
201
+ .prepare(`
202
+ SELECT source_id, source_brain, link_text
203
+ FROM links
204
+ WHERE target_path = ?
205
+ `)
206
+ .all(id);
207
+ }
208
+
209
+ forwardLinks(id) {
210
+ const rows = this.#db
211
+ .prepare(`
212
+ SELECT target_path, target_brain
213
+ FROM links
214
+ WHERE source_id = ?
215
+ `)
216
+ .all(id);
217
+ return rows.map((r) => r.target_path);
218
+ }
219
+
220
+ stats() {
221
+ const total = this.#db
222
+ .prepare(`SELECT COUNT(*) as cnt FROM documents`)
223
+ .get().cnt;
224
+
225
+ const chunks = this.#db
226
+ .prepare(`SELECT COUNT(*) as cnt FROM documents WHERE path LIKE 'chunks/%'`)
227
+ .get().cnt;
228
+
229
+ const wiki = this.#db
230
+ .prepare(`SELECT COUNT(*) as cnt FROM documents WHERE path LIKE 'wiki/%'`)
231
+ .get().cnt;
232
+
233
+ const lastRow = this.#db
234
+ .prepare(`SELECT MAX(indexed_at) as last FROM documents`)
235
+ .get();
236
+ const last_indexed = lastRow ? lastRow.last : null;
237
+
238
+ const dbFile = this.#db.name;
239
+ let db_size = null;
240
+ try {
241
+ db_size = statSync(dbFile).size;
242
+ } catch {
243
+ // in-memory or inaccessible
244
+ }
245
+
246
+ return { total, chunks, wiki, last_indexed, db_size };
247
+ }
248
+
249
+ health() {
250
+ return {
251
+ status: "ok",
252
+ uptime: Date.now() - this.#startTime,
253
+ brain_id: this.#brainId,
254
+ };
255
+ }
256
+
257
+ close() {
258
+ this.#db.close();
259
+ }
260
+ }
@@ -0,0 +1,19 @@
1
+ const WIKILINK_RE = /\[\[([^\]]+)\]\]/g;
2
+
3
+ export function parseWikilinks(text) {
4
+ const links = [];
5
+ for (const match of text.matchAll(WIKILINK_RE)) {
6
+ const inner = match[1].trim();
7
+ if (!inner) continue;
8
+ const raw = match[0];
9
+ if (inner.includes("::")) {
10
+ const idx = inner.indexOf("::");
11
+ const brain = inner.slice(0, idx).trim();
12
+ const path = inner.slice(idx + 2).trim();
13
+ if (brain && path) links.push({ brain, path, raw });
14
+ } else {
15
+ links.push({ brain: null, path: inner, raw });
16
+ }
17
+ }
18
+ return links;
19
+ }
@@ -0,0 +1,38 @@
1
+ {
2
+ "name": "wicked-brain-server",
3
+ "version": "0.1.0",
4
+ "type": "module",
5
+ "description": "SQLite FTS5 search server for wicked-brain digital knowledge bases",
6
+ "keywords": [
7
+ "wicked-brain",
8
+ "sqlite",
9
+ "fts5",
10
+ "search",
11
+ "knowledge-base"
12
+ ],
13
+ "author": "Mike Parcewski",
14
+ "license": "MIT",
15
+ "repository": {
16
+ "type": "git",
17
+ "url": "https://github.com/mikeparcewski/wicked-brain.git",
18
+ "directory": "server"
19
+ },
20
+ "bin": {
21
+ "wicked-brain-server": "./bin/wicked-brain-server.mjs"
22
+ },
23
+ "files": [
24
+ "bin/",
25
+ "lib/",
26
+ "package.json"
27
+ ],
28
+ "scripts": {
29
+ "test": "node --test test/*.test.mjs",
30
+ "start": "node bin/wicked-brain-server.mjs"
31
+ },
32
+ "dependencies": {
33
+ "better-sqlite3": "^12.0.0"
34
+ },
35
+ "engines": {
36
+ "node": ">=18.0.0"
37
+ }
38
+ }
@@ -0,0 +1,112 @@
1
+ ---
2
+ name: wicked-brain:batch
3
+ description: |
4
+ Pattern for batch operations that would otherwise fill context with repetitive
5
+ tool calls. Detects the available runtime (Node, Python, shell), writes a
6
+ script, runs it, and reports results. Used internally by other skills.
7
+
8
+ Use when: any brain operation needs to process more than 5 files, run more
9
+ than 10 API calls, or would otherwise burn context on repetitive operations.
10
+ ---
11
+
12
+ # wicked-brain:batch
13
+
14
+ You handle batch operations efficiently by generating and running scripts instead
15
+ of executing repetitive tool calls inline.
16
+
17
+ ## When to use
18
+
19
+ - Ingesting a directory of files (wicked-brain:ingest calls you)
20
+ - Reindexing all content (wicked-brain:lint or rebuild)
21
+ - Bulk search across many terms
22
+ - Any operation touching more than 5 files
23
+
24
+ ## Why scripts over tool calls
25
+
26
+ | Approach | Context cost | Speed | Reliability |
27
+ |---|---|---|---|
28
+ | 50 Read + 50 Write + 50 Bash (curl) | ~150 tool calls, floods context | Slow (round-trips) | Error-prone (partial failures) |
29
+ | Write 1 script + Run 1 script + Read output | ~3 tool calls | Fast (single process) | Script handles errors internally |
30
+
31
+ ## Process
32
+
33
+ ### Step 1: Detect runtime
34
+
35
+ Check what's available, in preference order:
36
+
37
+ ```bash
38
+ node --version 2>/dev/null && echo "node" || python3 --version 2>/dev/null && echo "python3" || python --version 2>/dev/null && echo "python" || echo "shell"
39
+ ```
40
+
41
+ Prefer Node.js (since wicked-brain-server requires it, it's always available).
42
+
43
+ ### Step 2: Write the script
44
+
45
+ Write to `{brain_path}/_meta/batch-{operation}.mjs` (or `.py` or `.sh`).
46
+
47
+ The script must:
48
+ 1. Accept the brain path, server port, and operation-specific params
49
+ 2. Do all the work (walk dirs, read files, write chunks, curl APIs)
50
+ 3. Log progress to stdout (one line per file processed)
51
+ 4. Handle errors per-file (don't stop on one failure)
52
+ 5. Print a summary at the end
53
+
54
+ ### Step 3: Run the script
55
+
56
+ ```bash
57
+ node {brain_path}/_meta/batch-{operation}.mjs
58
+ ```
59
+
60
+ ### Step 4: Read output and report
61
+
62
+ Read the script's stdout. Summarize results to the user.
63
+
64
+ ### Step 5: Clean up
65
+
66
+ Optionally delete the script after successful completion:
67
+ ```bash
68
+ rm {brain_path}/_meta/batch-{operation}.mjs
69
+ ```
70
+
71
+ Or keep it for re-runs — the user can run it manually too.
72
+
73
+ ## Cross-Platform Notes
74
+
75
+ - Node.js scripts are fully cross-platform (same code on macOS/Linux/Windows)
76
+ - Python scripts are fully cross-platform
77
+ - Shell scripts need macOS/Linux + Windows variants — avoid if Node or Python available
78
+ - Use `fetch()` (Node 18+) instead of `curl` in scripts — it's native and cross-platform
79
+ - Use `node:fs` and `node:path` — they handle platform differences
80
+
81
+ ## Template: Node.js batch script
82
+
83
+ See wicked-brain:ingest for a complete example. The key structure:
84
+
85
+ ```javascript
86
+ #!/usr/bin/env node
87
+ import { ... } from "node:fs";
88
+ import { ... } from "node:path";
89
+
90
+ const BRAIN = "{brain_path}";
91
+ const PORT = {port};
92
+
93
+ // Walk, process, index, report
94
+ ```
95
+
96
+ ## Template: Python batch script
97
+
98
+ ```python
99
+ #!/usr/bin/env python3
100
+ import os, json, hashlib, urllib.request
101
+
102
+ BRAIN = "{brain_path}"
103
+ PORT = {port}
104
+
105
+ def api(action, params):
106
+ data = json.dumps({"action": action, "params": params}).encode()
107
+ req = urllib.request.Request(f"http://localhost:{PORT}/api",
108
+ data=data, headers={"Content-Type": "application/json"})
109
+ return json.loads(urllib.request.urlopen(req).read())
110
+
111
+ # Walk, process, index, report
112
+ ```
@@ -0,0 +1,124 @@
1
+ ---
2
+ name: wicked-brain:compile
3
+ description: |
4
+ Synthesize wiki articles from brain chunks. Dispatches a compile subagent
5
+ that identifies concept clusters in chunks and writes structured wiki
6
+ articles with backlinks and source attribution.
7
+
8
+ Use when: "compile the brain", "write wiki articles", "synthesize knowledge",
9
+ "brain compile".
10
+ ---
11
+
12
+ # wicked-brain:compile
13
+
14
+ You compile wiki articles from the brain's chunks by dispatching a compile subagent.
15
+
16
+ ## Cross-Platform Notes
17
+
18
+ Commands in this skill work on macOS, Linux, and Windows. When a command has
19
+ platform differences, alternatives are shown. Your native tools (Read, Write,
20
+ Grep, Glob) work everywhere — prefer them over shell commands when possible.
21
+
22
+ For the brain path default:
23
+ - macOS/Linux: ~/.wicked-brain
24
+ - Windows: %USERPROFILE%\.wicked-brain
25
+
26
+ ## Config
27
+
28
+ Read `_meta/config.json` for brain path and server port.
29
+ If it doesn't exist, trigger wicked-brain:init.
30
+
31
+ ## Process
32
+
33
+ Dispatch a compile subagent with these instructions:
34
+
35
+ ```
36
+ You are a compile agent for the digital brain at {brain_path}.
37
+ Server: http://localhost:{port}/api
38
+
39
+ ## Your task
40
+
41
+ Read chunks and synthesize wiki articles that capture key concepts.
42
+
43
+ ## Step 1: Orient
44
+
45
+ Get brain stats:
46
+ ```bash
47
+ curl -s -X POST http://localhost:{port}/api \
48
+ -H "Content-Type: application/json" \
49
+ -d '{"action":"stats","params":{}}'
50
+ ```
51
+
52
+ List existing wiki articles using your Glob tool on `{brain_path}/wiki/**/*.md`.
53
+ Shell fallback:
54
+ - macOS/Linux: `find {brain_path}/wiki -name "*.md" -type f 2>/dev/null`
55
+ - Windows: `Get-ChildItem -Recurse -Filter "*.md" "{brain_path}\wiki" 2>nul`
56
+
57
+ List chunks using your Glob tool on `{brain_path}/chunks/extracted/**/*.md`.
58
+ Shell fallback:
59
+ - macOS/Linux: `find {brain_path}/chunks/extracted -name "*.md" -type f 2>/dev/null`
60
+ - Windows: `Get-ChildItem -Recurse -Filter "*.md" "{brain_path}\chunks\extracted" 2>nul`
61
+
62
+ ## Step 2: Find uncovered chunks
63
+
64
+ For each chunk directory, check if a wiki article references those chunks.
65
+ Use your Grep tool to find which chunks are already cited in wiki articles.
66
+ Shell fallback:
67
+ - macOS/Linux: `grep -rl "chunk-" {brain_path}/wiki/ 2>/dev/null`
68
+ - Windows: `findstr /s /m "chunk-" "{brain_path}\wiki\*.md" 2>nul`
69
+
70
+ Focus on chunks NOT referenced by any wiki article.
71
+
72
+ ## Step 3: Read uncovered chunks
73
+
74
+ Read uncovered chunks (frontmatter + body) to understand their content.
75
+ Group them by topic/concept.
76
+
77
+ ## Step 4: Write wiki articles
78
+
79
+ For each concept cluster, write a wiki article to `{brain_path}/wiki/concepts/{concept-name}.md`
80
+ or `{brain_path}/wiki/topics/{topic-name}.md`:
81
+
82
+ ```
83
+ ---
84
+ authored_by: llm
85
+ authored_at: {ISO timestamp}
86
+ source_chunks:
87
+ - {chunk-path-1}
88
+ - {chunk-path-2}
89
+ contains:
90
+ - {topic tags}
91
+ ---
92
+
93
+ # {Concept Name}
94
+
95
+ {Article body with [[backlinks]] to source chunks.}
96
+
97
+ Every factual claim should link to its source: [[chunks/extracted/{source}/chunk-NNN]].
98
+
99
+ ## Related
100
+
101
+ - [[other-concept]]
102
+ - [[brain-id::cross-brain-concept]] (if applicable)
103
+ ```
104
+
105
+ ## Step 5: Index new articles
106
+
107
+ For each article written:
108
+ ```bash
109
+ curl -s -X POST http://localhost:{port}/api \
110
+ -H "Content-Type: application/json" \
111
+ -d '{"action":"index","params":{"id":"{path}","path":"{path}","content":"{content}","brain_id":"{brain_id}"}}'
112
+ ```
113
+
114
+ ## Step 6: Log
115
+
116
+ Append to `{brain_path}/_meta/log.jsonl` for each article:
117
+ ```json
118
+ {"ts":"{ISO}","op":"write","path":"{article_path}","author":"llm:compile","content_hash":"{hash}"}
119
+ ```
120
+
121
+ ## Step 7: Report
122
+
123
+ State how many articles were created/updated and what concepts they cover.
124
+ ```
@@ -0,0 +1,103 @@
1
+ ---
2
+ name: wicked-brain:enhance
3
+ description: |
4
+ Fill gaps in brain knowledge. Dispatches an enhance subagent that identifies
5
+ thin areas and writes inferred chunks to expand coverage.
6
+
7
+ Use when: "enhance the brain", "fill gaps", "brain enhance",
8
+ "what's missing in the brain".
9
+ ---
10
+
11
+ # wicked-brain:enhance
12
+
13
+ You enhance the brain by dispatching a subagent that fills knowledge gaps.
14
+
15
+ ## Cross-Platform Notes
16
+
17
+ Commands in this skill work on macOS, Linux, and Windows. When a command has
18
+ platform differences, alternatives are shown. Your native tools (Read, Write,
19
+ Grep, Glob) work everywhere — prefer them over shell commands when possible.
20
+
21
+ For the brain path default:
22
+ - macOS/Linux: ~/.wicked-brain
23
+ - Windows: %USERPROFILE%\.wicked-brain
24
+
25
+ ## Config
26
+
27
+ Read `_meta/config.json` for brain path and server port.
28
+ If it doesn't exist, trigger wicked-brain:init.
29
+
30
+ ## Process
31
+
32
+ Dispatch an enhance subagent with these instructions:
33
+
34
+ ```
35
+ You are a knowledge enhancement agent for the digital brain at {brain_path}.
36
+ Server: http://localhost:{port}/api
37
+
38
+ ## Step 1: Find gaps
39
+
40
+ Read the recent event log using your Read tool on `{brain_path}/_meta/log.jsonl`
41
+ (read the last 100 lines). Shell fallback:
42
+ - macOS/Linux: `tail -100 {brain_path}/_meta/log.jsonl`
43
+ - Windows: `Get-Content "{brain_path}\_meta\log.jsonl" -Tail 100`
44
+
45
+ Get stats:
46
+ ```bash
47
+ curl -s -X POST http://localhost:{port}/api \
48
+ -H "Content-Type: application/json" \
49
+ -d '{"action":"stats","params":{}}'
50
+ ```
51
+
52
+ Search for thin areas — topics mentioned in existing chunks but with few entries.
53
+ Use your Grep tool on `{brain_path}/chunks/` to find all `contains:` fields and
54
+ count occurrences. Shell fallback:
55
+ - macOS/Linux: `grep -roh 'contains:' {brain_path}/chunks/ -A 5 2>/dev/null | grep ' - ' | sort | uniq -c | sort -n`
56
+ - Windows: `Select-String -Recurse -Pattern " - " "{brain_path}\chunks\*.md" 2>nul | Select-Object -ExpandProperty Line | Sort-Object | Group-Object | Sort-Object Count`
57
+
58
+ ## Step 2: Identify what's missing
59
+
60
+ Based on existing content, reason about:
61
+ - Topics mentioned but never elaborated
62
+ - Connections between concepts that exist but aren't documented
63
+ - Questions the brain can't currently answer
64
+
65
+ ## Step 3: Write inferred chunks
66
+
67
+ For each gap, write a new chunk to `{brain_path}/chunks/inferred/{topic}/chunk-NNN.md`:
68
+
69
+ ```
70
+ ---
71
+ source: inferred
72
+ source_type: llm
73
+ chunk_id: inferred/{topic}/chunk-NNN
74
+ content_type:
75
+ - text
76
+ contains:
77
+ - {topic tags}
78
+ entities:
79
+ systems: []
80
+ people: []
81
+ programs: []
82
+ metrics: []
83
+ confidence: 0.6
84
+ indexed_at: {ISO timestamp}
85
+ authored_by: llm
86
+ narrative_theme: {what this fills}
87
+ source_chunks:
88
+ - {existing chunk that informed this inference}
89
+ ---
90
+
91
+ {Synthesized content based on existing brain knowledge. Do not fabricate facts.
92
+ Only synthesize connections and summaries from what already exists.}
93
+ ```
94
+
95
+ ## Step 4: Index and log
96
+
97
+ Index each new chunk via the server API.
98
+ Append to log.jsonl for each chunk written.
99
+
100
+ ## Step 5: Report
101
+
102
+ State what gaps were identified and how many inferred chunks were created.
103
+ ```