@context-vault/core 2.14.0 → 2.17.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@context-vault/core",
3
- "version": "2.14.0",
3
+ "version": "2.17.0",
4
4
  "type": "module",
5
5
  "description": "Shared core: capture, index, retrieve, tools, and utilities for context-vault",
6
6
  "main": "src/index.js",
@@ -27,6 +27,8 @@ export function writeEntry(
27
27
  identity_key,
28
28
  expires_at,
29
29
  supersedes,
30
+ source_files,
31
+ tier,
30
32
  userId,
31
33
  },
32
34
  ) {
@@ -103,6 +105,8 @@ export function writeEntry(
103
105
  identity_key,
104
106
  expires_at,
105
107
  supersedes,
108
+ source_files: source_files || null,
109
+ tier: tier || null,
106
110
  userId: userId || null,
107
111
  };
108
112
  }
@@ -134,6 +138,12 @@ export function updateEntryFile(ctx, existing, updates) {
134
138
  updates.supersedes !== undefined
135
139
  ? updates.supersedes
136
140
  : fmMeta.supersedes || null;
141
+ const source_files =
142
+ updates.source_files !== undefined
143
+ ? updates.source_files
144
+ : existing.source_files
145
+ ? JSON.parse(existing.source_files)
146
+ : null;
137
147
 
138
148
  let mergedMeta;
139
149
  if (updates.meta !== undefined) {
@@ -179,6 +189,7 @@ export function updateEntryFile(ctx, existing, updates) {
179
189
  identity_key: existing.identity_key,
180
190
  expires_at,
181
191
  supersedes,
192
+ source_files: source_files || null,
182
193
  userId: existing.user_id || null,
183
194
  };
184
195
  }
@@ -0,0 +1,112 @@
1
+ /**
2
+ * Consolidation utilities — identifies tags and entries that warrant maintenance.
3
+ *
4
+ * These are pure DB queries with no LLM calls. The caller decides what to do
5
+ * with the results (e.g. run create_snapshot, archive entries, report to user).
6
+ */
7
+
8
+ /**
9
+ * Identifies tags that have accumulated enough entries to warrant consolidation.
10
+ *
11
+ * A tag is "hot" when it has >= tagThreshold non-superseded entries AND no
12
+ * brief/snapshot was saved for it within the last maxSnapshotAgeDays days.
13
+ *
14
+ * @param {import('node:sqlite').DatabaseSync} db
15
+ * @param {{ tagThreshold?: number, maxSnapshotAgeDays?: number }} [opts]
16
+ * @returns {{ tag: string, entryCount: number, lastSnapshotAge: number | null }[]}
17
+ */
18
+ export function findHotTags(
19
+ db,
20
+ { tagThreshold = 10, maxSnapshotAgeDays = 7 } = {},
21
+ ) {
22
+ const rows = db
23
+ .prepare(
24
+ `SELECT id, tags, kind FROM vault
25
+ WHERE superseded_by IS NULL
26
+ AND tags IS NOT NULL
27
+ AND tags != '[]'`,
28
+ )
29
+ .all();
30
+
31
+ const tagCounts = new Map();
32
+
33
+ for (const row of rows) {
34
+ let tags;
35
+ try {
36
+ tags = JSON.parse(row.tags);
37
+ } catch {
38
+ continue;
39
+ }
40
+ if (!Array.isArray(tags)) continue;
41
+
42
+ for (const tag of tags) {
43
+ if (typeof tag !== "string" || !tag) continue;
44
+ tagCounts.set(tag, (tagCounts.get(tag) ?? 0) + 1);
45
+ }
46
+ }
47
+
48
+ const hotTags = [];
49
+
50
+ for (const [tag, count] of tagCounts) {
51
+ if (count < tagThreshold) continue;
52
+
53
+ const snapshotRow = db
54
+ .prepare(
55
+ `SELECT created_at FROM vault
56
+ WHERE kind = 'brief'
57
+ AND tags LIKE ?
58
+ AND created_at > datetime('now', '-' || ? || ' days')
59
+ ORDER BY created_at DESC
60
+ LIMIT 1`,
61
+ )
62
+ .get(`%"${tag}"%`, String(maxSnapshotAgeDays));
63
+
64
+ if (snapshotRow) continue;
65
+
66
+ const lastSnapshotAny = db
67
+ .prepare(
68
+ `SELECT created_at FROM vault
69
+ WHERE kind = 'brief'
70
+ AND tags LIKE ?
71
+ ORDER BY created_at DESC
72
+ LIMIT 1`,
73
+ )
74
+ .get(`%"${tag}"%`);
75
+
76
+ let lastSnapshotAge = null;
77
+ if (lastSnapshotAny) {
78
+ const ms = Date.now() - new Date(lastSnapshotAny.created_at).getTime();
79
+ lastSnapshotAge = Math.floor(ms / (1000 * 60 * 60 * 24));
80
+ }
81
+
82
+ hotTags.push({ tag, entryCount: count, lastSnapshotAge });
83
+ }
84
+
85
+ hotTags.sort((a, b) => b.entryCount - a.entryCount);
86
+
87
+ return hotTags;
88
+ }
89
+
90
+ /**
91
+ * Identifies cold entries (old, never or rarely accessed) that can be archived.
92
+ *
93
+ * Returns IDs of entries that are old enough, have low hit counts, are not
94
+ * superseded, and are not in permanent kinds (decision, architecture, brief).
95
+ *
96
+ * @param {import('node:sqlite').DatabaseSync} db
97
+ * @param {{ maxAgeDays?: number, maxHitCount?: number }} [opts]
98
+ * @returns {string[]} Entry IDs eligible for archiving
99
+ */
100
+ export function findColdEntries(db, { maxAgeDays = 90, maxHitCount = 0 } = {}) {
101
+ const rows = db
102
+ .prepare(
103
+ `SELECT id FROM vault
104
+ WHERE hit_count <= ?
105
+ AND created_at < datetime('now', '-' || ? || ' days')
106
+ AND superseded_by IS NULL
107
+ AND kind NOT IN ('decision', 'architecture', 'brief')`,
108
+ )
109
+ .all(maxHitCount, String(maxAgeDays));
110
+
111
+ return rows.map((r) => r.id);
112
+ }
package/src/constants.js CHANGED
@@ -14,8 +14,13 @@ export const MAX_SOURCE_LENGTH = 200;
14
14
  export const MAX_IDENTITY_KEY_LENGTH = 200;
15
15
 
16
16
  export const DEFAULT_GROWTH_THRESHOLDS = {
17
- totalEntries: { warn: 1000, critical: 5000 },
18
- eventEntries: { warn: 500, critical: 2000 },
17
+ totalEntries: { warn: 2000, critical: 5000 },
18
+ eventEntries: { warn: 1000, critical: 3000 },
19
19
  vaultSizeBytes: { warn: 50 * 1024 * 1024, critical: 200 * 1024 * 1024 },
20
20
  eventsWithoutTtl: { warn: 200 },
21
21
  };
22
+
23
+ export const DEFAULT_LIFECYCLE = {
24
+ event: { archiveAfterDays: 90 },
25
+ ephemeral: { archiveAfterDays: 30 },
26
+ };
@@ -21,6 +21,7 @@ const KIND_CATEGORY = {
21
21
  project: "entity",
22
22
  tool: "entity",
23
23
  source: "entity",
24
+ bucket: "entity",
24
25
  // Event — append-only, decaying
25
26
  conversation: "event",
26
27
  message: "event",
@@ -51,10 +52,19 @@ export const KIND_STALENESS_DAYS = {
51
52
  reference: 90,
52
53
  };
53
54
 
55
+ const DURABLE_KINDS = new Set(["decision", "architecture", "pattern"]);
56
+ const EPHEMERAL_KINDS = new Set(["session", "observation"]);
57
+
54
58
  export function categoryFor(kind) {
55
59
  return KIND_CATEGORY[kind] || "knowledge";
56
60
  }
57
61
 
62
+ export function defaultTierFor(kind) {
63
+ if (DURABLE_KINDS.has(kind)) return "durable";
64
+ if (EPHEMERAL_KINDS.has(kind)) return "ephemeral";
65
+ return "working";
66
+ }
67
+
58
68
  /** Returns the category directory name for a given kind (e.g. "insight" → "knowledge") */
59
69
  export function categoryDirFor(kind) {
60
70
  const cat = categoryFor(kind);
@@ -1,7 +1,7 @@
1
1
  import { existsSync, readFileSync } from "node:fs";
2
2
  import { join, resolve } from "node:path";
3
3
  import { homedir } from "node:os";
4
- import { DEFAULT_GROWTH_THRESHOLDS } from "../constants.js";
4
+ import { DEFAULT_GROWTH_THRESHOLDS, DEFAULT_LIFECYCLE } from "../constants.js";
5
5
 
6
6
  export function parseArgs(argv) {
7
7
  const args = {};
@@ -27,7 +27,7 @@ export function resolveConfig() {
27
27
  join(HOME, ".context-mcp"),
28
28
  );
29
29
  const config = {
30
- vaultDir: join(HOME, "vault"),
30
+ vaultDir: join(HOME, ".vault"),
31
31
  dataDir,
32
32
  dbPath: join(dataDir, "vault.db"),
33
33
  devDir: join(HOME, "dev"),
@@ -35,6 +35,20 @@ export function resolveConfig() {
35
35
  thresholds: { ...DEFAULT_GROWTH_THRESHOLDS },
36
36
  telemetry: false,
37
37
  resolvedFrom: "defaults",
38
+ recall: {
39
+ maxResults: 5,
40
+ maxOutputBytes: 2000,
41
+ minRelevanceScore: 0.3,
42
+ excludeKinds: [],
43
+ excludeCategories: ["event"],
44
+ bodyTruncateChars: 400,
45
+ },
46
+ consolidation: {
47
+ tagThreshold: 10,
48
+ maxAgeDays: 7,
49
+ autoConsolidate: false,
50
+ },
51
+ lifecycle: structuredClone(DEFAULT_LIFECYCLE),
38
52
  };
39
53
 
40
54
  const configPath = join(dataDir, "config.json");
@@ -49,6 +63,12 @@ export function resolveConfig() {
49
63
  if (fc.dbPath) config.dbPath = fc.dbPath;
50
64
  if (fc.devDir) config.devDir = fc.devDir;
51
65
  if (fc.eventDecayDays != null) config.eventDecayDays = fc.eventDecayDays;
66
+ if (fc.growthWarningThreshold != null) {
67
+ config.thresholds.totalEntries = {
68
+ ...config.thresholds.totalEntries,
69
+ warn: Number(fc.growthWarningThreshold),
70
+ };
71
+ }
52
72
  if (fc.thresholds) {
53
73
  const t = fc.thresholds;
54
74
  if (t.totalEntries)
@@ -73,6 +93,30 @@ export function resolveConfig() {
73
93
  };
74
94
  }
75
95
  if (fc.telemetry != null) config.telemetry = fc.telemetry === true;
96
+ if (fc.recall && typeof fc.recall === "object") {
97
+ const r = fc.recall;
98
+ if (r.maxResults != null)
99
+ config.recall.maxResults = Number(r.maxResults);
100
+ if (r.maxOutputBytes != null)
101
+ config.recall.maxOutputBytes = Number(r.maxOutputBytes);
102
+ if (r.minRelevanceScore != null)
103
+ config.recall.minRelevanceScore = Number(r.minRelevanceScore);
104
+ if (Array.isArray(r.excludeKinds))
105
+ config.recall.excludeKinds = r.excludeKinds;
106
+ if (Array.isArray(r.excludeCategories))
107
+ config.recall.excludeCategories = r.excludeCategories;
108
+ if (r.bodyTruncateChars != null)
109
+ config.recall.bodyTruncateChars = Number(r.bodyTruncateChars);
110
+ }
111
+ if (fc.consolidation && typeof fc.consolidation === "object") {
112
+ const c = fc.consolidation;
113
+ if (c.tagThreshold != null)
114
+ config.consolidation.tagThreshold = Number(c.tagThreshold);
115
+ if (c.maxAgeDays != null)
116
+ config.consolidation.maxAgeDays = Number(c.maxAgeDays);
117
+ if (c.autoConsolidate != null)
118
+ config.consolidation.autoConsolidate = c.autoConsolidate === true;
119
+ }
76
120
  // Hosted account linking (Phase 4)
77
121
  if (fc.hostedUrl) config.hostedUrl = fc.hostedUrl;
78
122
  if (fc.apiKey) config.apiKey = fc.apiKey;
@@ -212,7 +212,7 @@ export function gatherVaultStatus(ctx, opts = {}) {
212
212
  *
213
213
  * @param {object} status — result of gatherVaultStatus()
214
214
  * @param {object} thresholds — from config.thresholds
215
- * @returns {{ warnings: Array, hasCritical: boolean, hasWarnings: boolean, actions: string[] }}
215
+ * @returns {{ warnings: Array, hasCritical: boolean, hasWarnings: boolean, actions: string[], kindBreakdown: Array }}
216
216
  */
217
217
  export function computeGrowthWarnings(status, thresholds) {
218
218
  if (!thresholds)
@@ -221,6 +221,7 @@ export function computeGrowthWarnings(status, thresholds) {
221
221
  hasCritical: false,
222
222
  hasWarnings: false,
223
223
  actions: [],
224
+ kindBreakdown: [],
224
225
  };
225
226
 
226
227
  const t = thresholds;
@@ -235,12 +236,16 @@ export function computeGrowthWarnings(status, thresholds) {
235
236
  dbSizeBytes = 0,
236
237
  } = status;
237
238
 
239
+ let totalExceeded = false;
240
+
238
241
  if (t.totalEntries?.critical != null && total >= t.totalEntries.critical) {
242
+ totalExceeded = true;
239
243
  warnings.push({
240
244
  level: "critical",
241
245
  message: `Total entries: ${total.toLocaleString()} (exceeds critical limit of ${t.totalEntries.critical.toLocaleString()})`,
242
246
  });
243
247
  } else if (t.totalEntries?.warn != null && total >= t.totalEntries.warn) {
248
+ totalExceeded = true;
244
249
  warnings.push({
245
250
  level: "warn",
246
251
  message: `Total entries: ${total.toLocaleString()} (exceeds recommended ${t.totalEntries.warn.toLocaleString()})`,
@@ -320,5 +325,26 @@ export function computeGrowthWarnings(status, thresholds) {
320
325
  actions.push("Consider archiving events older than 90 days");
321
326
  }
322
327
 
323
- return { warnings, hasCritical, hasWarnings: warnings.length > 0, actions };
328
+ const kindBreakdown = totalExceeded
329
+ ? buildKindBreakdown(status.kindCounts, total)
330
+ : [];
331
+
332
+ return {
333
+ warnings,
334
+ hasCritical,
335
+ hasWarnings: warnings.length > 0,
336
+ actions,
337
+ kindBreakdown,
338
+ };
339
+ }
340
+
341
+ function buildKindBreakdown(kindCounts, total) {
342
+ if (!kindCounts?.length || total === 0) return [];
343
+ return [...kindCounts]
344
+ .sort((a, b) => b.c - a.c)
345
+ .map(({ kind, c }) => ({
346
+ kind,
347
+ count: c,
348
+ pct: Math.round((c / total) * 100),
349
+ }));
324
350
  }
package/src/index/db.js CHANGED
@@ -63,7 +63,11 @@ export const SCHEMA_DDL = `
63
63
  body_encrypted BLOB,
64
64
  title_encrypted BLOB,
65
65
  meta_encrypted BLOB,
66
- iv BLOB
66
+ iv BLOB,
67
+ hit_count INTEGER DEFAULT 0,
68
+ last_accessed_at TEXT,
69
+ source_files TEXT,
70
+ tier TEXT DEFAULT 'working' CHECK(tier IN ('ephemeral', 'working', 'durable'))
67
71
  );
68
72
 
69
73
  CREATE INDEX IF NOT EXISTS idx_vault_kind ON vault(kind);
@@ -74,6 +78,7 @@ export const SCHEMA_DDL = `
74
78
  CREATE INDEX IF NOT EXISTS idx_vault_team ON vault(team_id);
75
79
  CREATE UNIQUE INDEX IF NOT EXISTS idx_vault_identity ON vault(user_id, kind, identity_key) WHERE identity_key IS NOT NULL;
76
80
  CREATE INDEX IF NOT EXISTS idx_vault_superseded ON vault(superseded_by) WHERE superseded_by IS NOT NULL;
81
+ CREATE INDEX IF NOT EXISTS idx_vault_tier ON vault(tier);
77
82
 
78
83
  -- Single FTS5 table
79
84
  CREATE VIRTUAL TABLE IF NOT EXISTS vault_fts USING fts5(
@@ -151,13 +156,13 @@ export async function initDatabase(dbPath) {
151
156
 
152
157
  const freshDb = createDb(dbPath);
153
158
  freshDb.exec(SCHEMA_DDL);
154
- freshDb.exec("PRAGMA user_version = 9");
159
+ freshDb.exec("PRAGMA user_version = 12");
155
160
  return freshDb;
156
161
  }
157
162
 
158
163
  if (version < 5) {
159
164
  db.exec(SCHEMA_DDL);
160
- db.exec("PRAGMA user_version = 9");
165
+ db.exec("PRAGMA user_version = 12");
161
166
  } else if (version === 5) {
162
167
  // v5 -> v6 migration: add multi-tenancy + encryption columns
163
168
  // Wrapped in transaction with duplicate-column guards for idempotent retry
@@ -192,7 +197,9 @@ export async function initDatabase(dbPath) {
192
197
  db.exec(
193
198
  `CREATE INDEX IF NOT EXISTS idx_vault_superseded ON vault(superseded_by) WHERE superseded_by IS NOT NULL`,
194
199
  );
195
- db.exec("PRAGMA user_version = 9");
200
+ addColumnSafe(`ALTER TABLE vault ADD COLUMN hit_count INTEGER DEFAULT 0`);
201
+ addColumnSafe(`ALTER TABLE vault ADD COLUMN last_accessed_at TEXT`);
202
+ db.exec("PRAGMA user_version = 10");
196
203
  });
197
204
  } else if (version === 6) {
198
205
  // v6 -> v7+v8+v9 migration: add team_id, updated_at, superseded_by columns
@@ -222,7 +229,17 @@ export async function initDatabase(dbPath) {
222
229
  db.exec(
223
230
  `CREATE INDEX IF NOT EXISTS idx_vault_superseded ON vault(superseded_by) WHERE superseded_by IS NOT NULL`,
224
231
  );
225
- db.exec("PRAGMA user_version = 9");
232
+ try {
233
+ db.exec(`ALTER TABLE vault ADD COLUMN hit_count INTEGER DEFAULT 0`);
234
+ } catch (e) {
235
+ if (!e.message.includes("duplicate column")) throw e;
236
+ }
237
+ try {
238
+ db.exec(`ALTER TABLE vault ADD COLUMN last_accessed_at TEXT`);
239
+ } catch (e) {
240
+ if (!e.message.includes("duplicate column")) throw e;
241
+ }
242
+ db.exec("PRAGMA user_version = 10");
226
243
  });
227
244
  } else if (version === 7) {
228
245
  // v7 -> v8+v9 migration: add updated_at, superseded_by columns
@@ -246,7 +263,17 @@ export async function initDatabase(dbPath) {
246
263
  db.exec(
247
264
  `CREATE INDEX IF NOT EXISTS idx_vault_superseded ON vault(superseded_by) WHERE superseded_by IS NOT NULL`,
248
265
  );
249
- db.exec("PRAGMA user_version = 9");
266
+ try {
267
+ db.exec(`ALTER TABLE vault ADD COLUMN hit_count INTEGER DEFAULT 0`);
268
+ } catch (e) {
269
+ if (!e.message.includes("duplicate column")) throw e;
270
+ }
271
+ try {
272
+ db.exec(`ALTER TABLE vault ADD COLUMN last_accessed_at TEXT`);
273
+ } catch (e) {
274
+ if (!e.message.includes("duplicate column")) throw e;
275
+ }
276
+ db.exec("PRAGMA user_version = 10");
250
277
  });
251
278
  } else if (version === 8) {
252
279
  // v8 -> v9 migration: add superseded_by column
@@ -261,6 +288,60 @@ export async function initDatabase(dbPath) {
261
288
  );
262
289
  db.exec("PRAGMA user_version = 9");
263
290
  });
291
+ // fall through to v9 migration
292
+ runTransaction(db, () => {
293
+ const addColumnSafe = (sql) => {
294
+ try {
295
+ db.exec(sql);
296
+ } catch (e) {
297
+ if (!e.message.includes("duplicate column")) throw e;
298
+ }
299
+ };
300
+ addColumnSafe(`ALTER TABLE vault ADD COLUMN hit_count INTEGER DEFAULT 0`);
301
+ addColumnSafe(`ALTER TABLE vault ADD COLUMN last_accessed_at TEXT`);
302
+ db.exec("PRAGMA user_version = 10");
303
+ });
304
+ } else if (version === 9) {
305
+ // v9 -> v10 migration: add hit_count + last_accessed_at columns
306
+ runTransaction(db, () => {
307
+ const addColumnSafe = (sql) => {
308
+ try {
309
+ db.exec(sql);
310
+ } catch (e) {
311
+ if (!e.message.includes("duplicate column")) throw e;
312
+ }
313
+ };
314
+ addColumnSafe(`ALTER TABLE vault ADD COLUMN hit_count INTEGER DEFAULT 0`);
315
+ addColumnSafe(`ALTER TABLE vault ADD COLUMN last_accessed_at TEXT`);
316
+ db.exec("PRAGMA user_version = 10");
317
+ });
318
+ }
319
+
320
+ if (version >= 5 && version <= 10) {
321
+ // v10 -> v11 migration: add source_files column for stale-linking
322
+ runTransaction(db, () => {
323
+ try {
324
+ db.exec(`ALTER TABLE vault ADD COLUMN source_files TEXT`);
325
+ } catch (e) {
326
+ if (!e.message.includes("duplicate column")) throw e;
327
+ }
328
+ db.exec("PRAGMA user_version = 11");
329
+ });
330
+ }
331
+
332
+ if (version >= 5 && version <= 11) {
333
+ // v11 -> v12 migration: add tier column for memory tiers
334
+ runTransaction(db, () => {
335
+ try {
336
+ db.exec(
337
+ `ALTER TABLE vault ADD COLUMN tier TEXT DEFAULT 'working' CHECK(tier IN ('ephemeral', 'working', 'durable'))`,
338
+ );
339
+ } catch (e) {
340
+ if (!e.message.includes("duplicate column")) throw e;
341
+ }
342
+ db.exec(`CREATE INDEX IF NOT EXISTS idx_vault_tier ON vault(tier)`);
343
+ db.exec("PRAGMA user_version = 12");
344
+ });
264
345
  }
265
346
 
266
347
  return db;
@@ -270,10 +351,10 @@ export function prepareStatements(db) {
270
351
  try {
271
352
  return {
272
353
  insertEntry: db.prepare(
273
- `INSERT INTO vault (id, user_id, kind, category, title, body, meta, tags, source, file_path, identity_key, expires_at, created_at, updated_at) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`,
354
+ `INSERT INTO vault (id, user_id, kind, category, title, body, meta, tags, source, file_path, identity_key, expires_at, created_at, updated_at, source_files, tier) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`,
274
355
  ),
275
356
  insertEntryEncrypted: db.prepare(
276
- `INSERT INTO vault (id, user_id, kind, category, title, body, meta, tags, source, file_path, identity_key, expires_at, created_at, updated_at, body_encrypted, title_encrypted, meta_encrypted, iv) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`,
357
+ `INSERT INTO vault (id, user_id, kind, category, title, body, meta, tags, source, file_path, identity_key, expires_at, created_at, updated_at, body_encrypted, title_encrypted, meta_encrypted, iv, source_files, tier) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`,
277
358
  ),
278
359
  updateEntry: db.prepare(
279
360
  `UPDATE vault SET title = ?, body = ?, meta = ?, tags = ?, source = ?, category = ?, identity_key = ?, expires_at = ?, updated_at = datetime('now') WHERE file_path = ?`,
@@ -286,7 +367,10 @@ export function prepareStatements(db) {
286
367
  `SELECT * FROM vault WHERE kind = ? AND identity_key = ? AND user_id IS ?`,
287
368
  ),
288
369
  upsertByIdentityKey: db.prepare(
289
- `UPDATE vault SET title = ?, body = ?, meta = ?, tags = ?, source = ?, category = ?, file_path = ?, expires_at = ?, updated_at = datetime('now') WHERE kind = ? AND identity_key = ? AND user_id IS ?`,
370
+ `UPDATE vault SET title = ?, body = ?, meta = ?, tags = ?, source = ?, category = ?, file_path = ?, expires_at = ?, source_files = ?, updated_at = datetime('now') WHERE kind = ? AND identity_key = ? AND user_id IS ?`,
371
+ ),
372
+ updateSourceFiles: db.prepare(
373
+ `UPDATE vault SET source_files = ? WHERE id = ?`,
290
374
  ),
291
375
  insertVecStmt: db.prepare(
292
376
  `INSERT INTO vault_vec (rowid, embedding) VALUES (?, ?)`,
@@ -321,3 +405,12 @@ export function deleteVec(stmts, rowid) {
321
405
  if (safeRowid < 1n) throw new Error(`Invalid rowid: ${rowid}`);
322
406
  stmts.deleteVecStmt.run(safeRowid);
323
407
  }
408
+
409
+ export function testConnection(db) {
410
+ try {
411
+ db.prepare("SELECT 1").get();
412
+ return true;
413
+ } catch {
414
+ return false;
415
+ }
416
+ }
@@ -10,7 +10,11 @@
10
10
  import { readFileSync, readdirSync, existsSync, unlinkSync } from "node:fs";
11
11
  import { join, basename } from "node:path";
12
12
  import { dirToKind, walkDir, ulid } from "../core/files.js";
13
- import { categoryFor, CATEGORY_DIRS } from "../core/categories.js";
13
+ import {
14
+ categoryFor,
15
+ defaultTierFor,
16
+ CATEGORY_DIRS,
17
+ } from "../core/categories.js";
14
18
  import {
15
19
  parseFrontmatter,
16
20
  parseEntryFromMarkdown,
@@ -46,6 +50,8 @@ export async function indexEntry(
46
50
  createdAt,
47
51
  identity_key,
48
52
  expires_at,
53
+ source_files,
54
+ tier,
49
55
  userId,
50
56
  },
51
57
  ) {
@@ -56,7 +62,9 @@ export async function indexEntry(
56
62
 
57
63
  const tagsJson = tags ? JSON.stringify(tags) : null;
58
64
  const metaJson = meta ? JSON.stringify(meta) : null;
65
+ const sourceFilesJson = source_files ? JSON.stringify(source_files) : null;
59
66
  const cat = category || categoryFor(kind);
67
+ const effectiveTier = tier || defaultTierFor(kind);
60
68
  const userIdVal = userId || null;
61
69
 
62
70
  let wasUpdate = false;
@@ -78,6 +86,7 @@ export async function indexEntry(
78
86
  cat,
79
87
  filePath,
80
88
  expires_at || null,
89
+ sourceFilesJson,
81
90
  kind,
82
91
  identity_key,
83
92
  userIdVal,
@@ -116,6 +125,8 @@ export async function indexEntry(
116
125
  encrypted.title_encrypted,
117
126
  encrypted.meta_encrypted,
118
127
  encrypted.iv,
128
+ sourceFilesJson,
129
+ effectiveTier,
119
130
  );
120
131
  } else {
121
132
  ctx.stmts.insertEntry.run(
@@ -133,6 +144,8 @@ export async function indexEntry(
133
144
  expires_at || null,
134
145
  createdAt,
135
146
  createdAt,
147
+ sourceFilesJson,
148
+ effectiveTier,
136
149
  );
137
150
  }
138
151
  } catch (e) {
@@ -148,6 +161,16 @@ export async function indexEntry(
148
161
  expires_at || null,
149
162
  filePath,
150
163
  );
164
+ if (sourceFilesJson !== null && ctx.stmts.updateSourceFiles) {
165
+ const entryRow = ctx.stmts.getRowidByPath.get(filePath);
166
+ if (entryRow) {
167
+ const idRow = ctx.db
168
+ .prepare("SELECT id FROM vault WHERE file_path = ?")
169
+ .get(filePath);
170
+ if (idRow)
171
+ ctx.stmts.updateSourceFiles.run(sourceFilesJson, idRow.id);
172
+ }
173
+ }
151
174
  wasUpdate = true;
152
175
  } else {
153
176
  throw e;
@@ -173,18 +196,20 @@ export async function indexEntry(
173
196
  );
174
197
  }
175
198
 
176
- // Embeddings are always generated from plaintext (before encryption)
177
- const embeddingText = [title, body].filter(Boolean).join(" ");
178
- const embedding = await ctx.embed(embeddingText);
199
+ // Skip embedding generation for event entries they are excluded from
200
+ // default semantic search and don't need vector representations
201
+ if (cat !== "event") {
202
+ const embeddingText = [title, body].filter(Boolean).join(" ");
203
+ const embedding = await ctx.embed(embeddingText);
179
204
 
180
- // Upsert vec: delete old if exists, then insert new (skip if embedding unavailable)
181
- if (embedding) {
182
- try {
183
- ctx.deleteVec(rowid);
184
- } catch {
185
- /* no-op if not found */
205
+ if (embedding) {
206
+ try {
207
+ ctx.deleteVec(rowid);
208
+ } catch {
209
+ /* no-op if not found */
210
+ }
211
+ ctx.insertVec(rowid, embedding);
186
212
  }
187
- ctx.insertVec(rowid, embedding);
188
213
  }
189
214
  }
190
215
 
@@ -347,15 +372,17 @@ export async function reindex(ctx, opts = {}) {
347
372
  fmMeta.updated || created,
348
373
  );
349
374
  if (result.changes > 0) {
350
- const rowidResult = ctx.stmts.getRowid.get(id);
351
- if (rowidResult?.rowid) {
352
- const embeddingText = [parsed.title, parsed.body]
353
- .filter(Boolean)
354
- .join(" ");
355
- pendingEmbeds.push({
356
- rowid: rowidResult.rowid,
357
- text: embeddingText,
358
- });
375
+ if (category !== "event") {
376
+ const rowidResult = ctx.stmts.getRowid.get(id);
377
+ if (rowidResult?.rowid) {
378
+ const embeddingText = [parsed.title, parsed.body]
379
+ .filter(Boolean)
380
+ .join(" ");
381
+ pendingEmbeds.push({
382
+ rowid: rowidResult.rowid,
383
+ text: embeddingText,
384
+ });
385
+ }
359
386
  }
360
387
  stats.added++;
361
388
  } else {
@@ -384,7 +411,7 @@ export async function reindex(ctx, opts = {}) {
384
411
  );
385
412
 
386
413
  // Queue re-embed if title or body changed (vector ops deferred to Phase 2)
387
- if (bodyChanged || titleChanged) {
414
+ if ((bodyChanged || titleChanged) && category !== "event") {
388
415
  const rowid = ctx.stmts.getRowid.get(existing.id)?.rowid;
389
416
  if (rowid) {
390
417
  const embeddingText = [parsed.title, parsed.body]
package/src/index.js CHANGED
@@ -8,6 +8,7 @@
8
8
  export {
9
9
  categoryFor,
10
10
  categoryDirFor,
11
+ defaultTierFor,
11
12
  CATEGORY_DIRS,
12
13
  } from "./core/categories.js";
13
14
  export { parseArgs, resolveConfig } from "./core/config.js";
@@ -52,6 +53,9 @@ export { indexEntry, reindex, pruneExpired } from "./index/index.js";
52
53
  // Retrieve layer
53
54
  export { hybridSearch } from "./retrieve/index.js";
54
55
 
56
+ // Consolidation utilities
57
+ export { findHotTags, findColdEntries } from "./consolidation/index.js";
58
+
55
59
  // Server tools & helpers
56
60
  export { registerTools } from "./server/tools.js";
57
61
  export {