akm-cli 0.5.0 → 0.6.0-rc2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (118) hide show
  1. package/CHANGELOG.md +53 -5
  2. package/README.md +9 -9
  3. package/dist/cli.js +379 -1448
  4. package/dist/{completions.js → commands/completions.js} +1 -1
  5. package/dist/{config-cli.js → commands/config-cli.js} +109 -11
  6. package/dist/commands/curate.js +263 -0
  7. package/dist/{info.js → commands/info.js} +17 -11
  8. package/dist/{init.js → commands/init.js} +4 -4
  9. package/dist/{install-audit.js → commands/install-audit.js} +14 -2
  10. package/dist/{installed-kits.js → commands/installed-stashes.js} +122 -50
  11. package/dist/commands/migration-help.js +141 -0
  12. package/dist/{registry-search.js → commands/registry-search.js} +68 -9
  13. package/dist/commands/remember.js +178 -0
  14. package/dist/{stash-search.js → commands/search.js} +28 -69
  15. package/dist/{self-update.js → commands/self-update.js} +3 -3
  16. package/dist/{stash-show.js → commands/show.js} +106 -81
  17. package/dist/{stash-add.js → commands/source-add.js} +133 -67
  18. package/dist/{stash-clone.js → commands/source-clone.js} +15 -13
  19. package/dist/{stash-source-manage.js → commands/source-manage.js} +24 -24
  20. package/dist/{vault.js → commands/vault.js} +43 -0
  21. package/dist/{stash-ref.js → core/asset-ref.js} +4 -4
  22. package/dist/{asset-registry.js → core/asset-registry.js} +30 -6
  23. package/dist/{asset-spec.js → core/asset-spec.js} +13 -6
  24. package/dist/{common.js → core/common.js} +147 -50
  25. package/dist/{config.js → core/config.js} +288 -29
  26. package/dist/core/errors.js +90 -0
  27. package/dist/{frontmatter.js → core/frontmatter.js} +64 -8
  28. package/dist/{paths.js → core/paths.js} +4 -4
  29. package/dist/core/write-source.js +280 -0
  30. package/dist/{local-search.js → indexer/db-search.js} +49 -32
  31. package/dist/{db.js → indexer/db.js} +210 -81
  32. package/dist/{file-context.js → indexer/file-context.js} +3 -3
  33. package/dist/{indexer.js → indexer/indexer.js} +153 -30
  34. package/dist/{manifest.js → indexer/manifest.js} +10 -10
  35. package/dist/{matchers.js → indexer/matchers.js} +4 -7
  36. package/dist/{metadata.js → indexer/metadata.js} +9 -5
  37. package/dist/{search-source.js → indexer/search-source.js} +97 -55
  38. package/dist/{semantic-status.js → indexer/semantic-status.js} +2 -2
  39. package/dist/{walker.js → indexer/walker.js} +1 -1
  40. package/dist/{lockfile.js → integrations/lockfile.js} +29 -2
  41. package/dist/{llm.js → llm/client.js} +12 -48
  42. package/dist/llm/embedder.js +127 -0
  43. package/dist/llm/embedders/cache.js +47 -0
  44. package/dist/llm/embedders/local.js +152 -0
  45. package/dist/llm/embedders/remote.js +121 -0
  46. package/dist/llm/embedders/types.js +39 -0
  47. package/dist/llm/metadata-enhance.js +53 -0
  48. package/dist/output/cli-hints.js +301 -0
  49. package/dist/output/context.js +95 -0
  50. package/dist/{renderers.js → output/renderers.js} +57 -61
  51. package/dist/output/shapes.js +212 -0
  52. package/dist/output/text.js +520 -0
  53. package/dist/{registry-build-index.js → registry/build-index.js} +48 -32
  54. package/dist/{create-provider-registry.js → registry/create-provider-registry.js} +6 -2
  55. package/dist/registry/factory.js +33 -0
  56. package/dist/{origin-resolve.js → registry/origin-resolve.js} +1 -1
  57. package/dist/registry/providers/index.js +11 -0
  58. package/dist/{providers → registry/providers}/skills-sh.js +60 -4
  59. package/dist/{providers → registry/providers}/static-index.js +126 -56
  60. package/dist/registry/providers/types.js +25 -0
  61. package/dist/{registry-resolve.js → registry/resolve.js} +10 -6
  62. package/dist/{detect.js → setup/detect.js} +0 -27
  63. package/dist/{ripgrep-install.js → setup/ripgrep-install.js} +1 -1
  64. package/dist/{ripgrep-resolve.js → setup/ripgrep-resolve.js} +2 -2
  65. package/dist/{setup.js → setup/setup.js} +162 -129
  66. package/dist/setup/steps.js +45 -0
  67. package/dist/{kit-include.js → sources/include.js} +1 -1
  68. package/dist/sources/provider-factory.js +36 -0
  69. package/dist/sources/provider.js +21 -0
  70. package/dist/sources/providers/filesystem.js +35 -0
  71. package/dist/{stash-providers → sources/providers}/git.js +218 -28
  72. package/dist/{stash-providers → sources/providers}/index.js +4 -4
  73. package/dist/sources/providers/install-types.js +14 -0
  74. package/dist/sources/providers/npm.js +160 -0
  75. package/dist/sources/providers/provider-utils.js +173 -0
  76. package/dist/sources/providers/sync-from-ref.js +45 -0
  77. package/dist/sources/providers/tar-utils.js +154 -0
  78. package/dist/{stash-providers → sources/providers}/website.js +60 -20
  79. package/dist/{stash-resolve.js → sources/resolve.js} +13 -12
  80. package/dist/{wiki.js → wiki/wiki.js} +18 -17
  81. package/dist/{workflow-authoring.js → workflows/authoring.js} +48 -17
  82. package/dist/{workflow-cli.js → workflows/cli.js} +2 -1
  83. package/dist/{workflow-db.js → workflows/db.js} +1 -1
  84. package/dist/workflows/document-cache.js +20 -0
  85. package/dist/workflows/parser.js +379 -0
  86. package/dist/workflows/renderer.js +78 -0
  87. package/dist/{workflow-runs.js → workflows/runs.js} +84 -30
  88. package/dist/workflows/schema.js +11 -0
  89. package/dist/workflows/validator.js +48 -0
  90. package/docs/README.md +30 -0
  91. package/docs/migration/release-notes/0.0.13.md +4 -0
  92. package/docs/migration/release-notes/0.1.0.md +6 -0
  93. package/docs/migration/release-notes/0.2.0.md +6 -0
  94. package/docs/migration/release-notes/0.3.0.md +5 -0
  95. package/docs/migration/release-notes/0.5.0.md +6 -0
  96. package/docs/migration/release-notes/0.6.0.md +75 -0
  97. package/docs/migration/release-notes/README.md +21 -0
  98. package/package.json +3 -2
  99. package/dist/embedder.js +0 -351
  100. package/dist/errors.js +0 -34
  101. package/dist/migration-help.js +0 -110
  102. package/dist/registry-factory.js +0 -19
  103. package/dist/registry-install.js +0 -532
  104. package/dist/ripgrep.js +0 -2
  105. package/dist/stash-provider-factory.js +0 -35
  106. package/dist/stash-provider.js +0 -1
  107. package/dist/stash-providers/filesystem.js +0 -41
  108. package/dist/stash-providers/openviking.js +0 -348
  109. package/dist/stash-providers/provider-utils.js +0 -11
  110. package/dist/stash-types.js +0 -1
  111. package/dist/workflow-markdown.js +0 -251
  112. /package/dist/{markdown.js → core/markdown.js} +0 -0
  113. /package/dist/{warn.js → core/warn.js} +0 -0
  114. /package/dist/{search-fields.js → indexer/search-fields.js} +0 -0
  115. /package/dist/{usage-events.js → indexer/usage-events.js} +0 -0
  116. /package/dist/{github.js → integrations/github.js} +0 -0
  117. /package/dist/{registry-provider.js → registry/types.js} +0 -0
  118. /package/dist/{registry-types.js → sources/types.js} +0 -0
@@ -2,13 +2,13 @@ import { Database } from "bun:sqlite";
2
2
  import fs from "node:fs";
3
3
  import { createRequire } from "node:module";
4
4
  import path from "node:path";
5
- import { cosineSimilarity } from "./embedder";
6
- import { getDbPath } from "./paths";
5
+ import { getDbPath } from "../core/paths";
6
+ import { warn } from "../core/warn";
7
+ import { cosineSimilarity } from "../llm/embedders/types";
7
8
  import { buildSearchFields } from "./search-fields";
8
9
  import { ensureUsageEventsSchema } from "./usage-events";
9
- import { warn } from "./warn";
10
10
  // ── Constants ───────────────────────────────────────────────────────────────
11
- export const DB_VERSION = 8;
11
+ export const DB_VERSION = 9;
12
12
  export const EMBEDDING_DIM = 384;
13
13
  // ── Database lifecycle ──────────────────────────────────────────────────────
14
14
  export function openDatabase(dbPath, options) {
@@ -19,6 +19,7 @@ export function openDatabase(dbPath, options) {
19
19
  }
20
20
  const db = new Database(resolvedPath);
21
21
  db.exec("PRAGMA journal_mode = WAL");
22
+ db.exec("PRAGMA busy_timeout = 5000");
22
23
  db.exec("PRAGMA foreign_keys = ON");
23
24
  // Try to load sqlite-vec extension
24
25
  loadVecExtension(db);
@@ -73,7 +74,6 @@ export function warnIfVecMissing(db, { once } = { once: false }) {
73
74
  /* embeddings table may not exist yet during init */
74
75
  }
75
76
  }
76
- // ── Schema ──────────────────────────────────────────────────────────────────
77
77
  function ensureSchema(db, embeddingDim) {
78
78
  // Create meta table first so we can check version
79
79
  db.exec(`
@@ -84,30 +84,11 @@ function ensureSchema(db, embeddingDim) {
84
84
  `);
85
85
  // Check stored version — if it differs from DB_VERSION, drop and recreate all tables.
86
86
  // Usage events are preserved across version upgrades so that utility score
87
- // history is not silently lost.
88
- const storedVersion = getMeta(db, "version");
89
- if (storedVersion && storedVersion !== String(DB_VERSION)) {
90
- // Back up usage_events before dropping tables
91
- let usageBackup = [];
92
- try {
93
- usageBackup = db.prepare("SELECT * FROM usage_events").all();
94
- }
95
- catch {
96
- /* table may not exist in older versions */
97
- }
98
- db.exec("DROP TABLE IF EXISTS utility_scores");
99
- db.exec("DROP TABLE IF EXISTS usage_events");
100
- db.exec("DROP TABLE IF EXISTS embeddings");
101
- db.exec("DROP TABLE IF EXISTS entries_vec");
102
- db.exec("DROP TABLE IF EXISTS entries_fts");
103
- db.exec("DROP INDEX IF EXISTS idx_entries_dir");
104
- db.exec("DROP INDEX IF EXISTS idx_entries_type");
105
- db.exec("DROP TABLE IF EXISTS entries");
106
- db.exec("DELETE FROM index_meta");
107
- // Store backup for restoration after ensureUsageEventsSchema runs
108
- db.__usageBackup = usageBackup;
109
- console.warn("[akm] Index rebuilt due to version upgrade. Run 'akm index' to repopulate.");
110
- }
87
+ // history is not silently lost. The backup is captured here and threaded
88
+ // explicitly to `restoreUsageEventsBackup` below — the previous version
89
+ // attached `__usageBackup` to the Database instance via a typeless property
90
+ // injection, which was a source of fragile coupling.
91
+ const usageBackup = handleVersionUpgrade(db);
111
92
  db.exec(`
112
93
  CREATE TABLE IF NOT EXISTS entries (
113
94
  id INTEGER PRIMARY KEY AUTOINCREMENT,
@@ -122,6 +103,22 @@ function ensureSchema(db, embeddingDim) {
122
103
 
123
104
  CREATE INDEX IF NOT EXISTS idx_entries_dir ON entries(dir_path);
124
105
  CREATE INDEX IF NOT EXISTS idx_entries_type ON entries(entry_type);
106
+ `);
107
+ // Validated WorkflowDocument JSON, one row per indexed workflow entry.
108
+ // Pure index data — fully rebuilt on each `akm index`. ON DELETE CASCADE
109
+ // means clearing entries (full rebuild or per-dir delete) drops these too.
110
+ db.exec(`
111
+ CREATE TABLE IF NOT EXISTS workflow_documents (
112
+ entry_id INTEGER PRIMARY KEY REFERENCES entries(id) ON DELETE CASCADE,
113
+ schema_version INTEGER NOT NULL,
114
+ document_json TEXT NOT NULL,
115
+ source_path TEXT NOT NULL,
116
+ source_hash TEXT NOT NULL,
117
+ updated_at TEXT NOT NULL
118
+ );
119
+
120
+ CREATE INDEX IF NOT EXISTS idx_workflow_documents_source_path
121
+ ON workflow_documents(source_path);
125
122
  `);
126
123
  // Set version immediately after table creation so a crash before the end of
127
124
  // ensureSchema() does not leave the database in a versionless state on next open.
@@ -165,6 +162,15 @@ function ensureSchema(db, embeddingDim) {
165
162
  updated_at TEXT NOT NULL DEFAULT (datetime('now')),
166
163
  FOREIGN KEY (entry_id) REFERENCES entries(id) ON DELETE CASCADE
167
164
  );
165
+ `);
166
+ // FTS-dirty queue. Created here (not lazily on first upsert) so the
167
+ // per-entry write path doesn't issue a CREATE TABLE IF NOT EXISTS on
168
+ // every call — that DDL would fire thousands of times during a full
169
+ // index. See `markFtsDirty` and `rebuildFts({ incremental: true })`.
170
+ db.exec(`
171
+ CREATE TABLE IF NOT EXISTS entries_fts_dirty (
172
+ entry_id INTEGER PRIMARY KEY
173
+ );
168
174
  `);
169
175
  // sqlite-vec table
170
176
  if (isVecAvailable(db)) {
@@ -220,31 +226,67 @@ function ensureSchema(db, embeddingDim) {
220
226
  }
221
227
  // Usage telemetry table
222
228
  ensureUsageEventsSchema(db);
223
- // Restore usage_events that were backed up during a version upgrade.
224
- // Wrapped in outer try/catch because schema changes across versions may
225
- // make the backup incompatible with the new table definition.
226
- const dbAny = db;
227
- const backup = dbAny.__usageBackup;
228
- if (backup && backup.length > 0) {
229
- try {
230
- db.transaction(() => {
231
- const cols = Object.keys(backup[0]);
232
- const placeholders = cols.map(() => "?").join(", ");
233
- const insert = db.prepare(`INSERT INTO usage_events (${cols.join(", ")}) VALUES (${placeholders})`);
234
- for (const row of backup) {
235
- try {
236
- insert.run(...cols.map((c) => row[c]));
237
- }
238
- catch {
239
- /* skip rows that fail */
240
- }
229
+ // Restore usage_events backed up by the version-upgrade path above.
230
+ restoreUsageEventsBackup(db, usageBackup);
231
+ }
232
+ /**
233
+ * Detect a stored DB version that differs from {@link DB_VERSION}, drop the
234
+ * old schema, and return a backup of the previous `usage_events` rows so the
235
+ * rest of `ensureSchema()` can restore them once the new table exists.
236
+ *
237
+ * Returns an empty array when no upgrade is needed or when the previous
238
+ * `usage_events` table is unreadable.
239
+ */
240
+ function handleVersionUpgrade(db) {
241
+ const storedVersion = getMeta(db, "version");
242
+ if (!storedVersion || storedVersion === String(DB_VERSION))
243
+ return [];
244
+ let usageBackup = [];
245
+ try {
246
+ usageBackup = db.prepare("SELECT * FROM usage_events").all();
247
+ }
248
+ catch {
249
+ /* table may not exist in older versions */
250
+ }
251
+ db.exec("DROP TABLE IF EXISTS utility_scores");
252
+ db.exec("DROP TABLE IF EXISTS usage_events");
253
+ db.exec("DROP TABLE IF EXISTS embeddings");
254
+ db.exec("DROP TABLE IF EXISTS entries_vec");
255
+ db.exec("DROP TABLE IF EXISTS entries_fts");
256
+ db.exec("DROP INDEX IF EXISTS idx_entries_dir");
257
+ db.exec("DROP INDEX IF EXISTS idx_entries_type");
258
+ db.exec("DROP TABLE IF EXISTS entries");
259
+ db.exec("DELETE FROM index_meta");
260
+ console.warn("[akm] Index rebuilt due to version upgrade. Run 'akm index' to repopulate.");
261
+ return usageBackup;
262
+ }
263
+ /**
264
+ * Re-insert backed-up `usage_events` rows into the freshly-created table.
265
+ *
266
+ * Wrapped in an outer try/catch because schema changes across versions may
267
+ * make the backup incompatible with the new table definition; in that case
268
+ * the backup is discarded silently rather than blocking startup.
269
+ */
270
+ function restoreUsageEventsBackup(db, backup) {
271
+ if (backup.length === 0)
272
+ return;
273
+ try {
274
+ db.transaction(() => {
275
+ const cols = Object.keys(backup[0]);
276
+ const placeholders = cols.map(() => "?").join(", ");
277
+ const insert = db.prepare(`INSERT INTO usage_events (${cols.join(", ")}) VALUES (${placeholders})`);
278
+ for (const row of backup) {
279
+ try {
280
+ insert.run(...cols.map((c) => row[c]));
241
281
  }
242
- })();
243
- }
244
- catch {
245
- /* schema changed too much — discard backup gracefully */
246
- }
247
- delete dbAny.__usageBackup;
282
+ catch {
283
+ /* skip rows that fail */
284
+ }
285
+ }
286
+ })();
287
+ }
288
+ catch {
289
+ /* schema changed too much — discard backup gracefully */
248
290
  }
249
291
  }
250
292
  // ── Meta helpers ────────────────────────────────────────────────────────────
@@ -264,23 +306,45 @@ export function setMeta(db, key, value) {
264
306
  * reflect the changes.
265
307
  */
266
308
  export function upsertEntry(db, entryKey, dirPath, filePath, stashDir, entry, searchText) {
267
- const stmt = db.prepare(`
268
- INSERT INTO entries (entry_key, dir_path, file_path, stash_dir, entry_json, search_text, entry_type)
269
- VALUES (?, ?, ?, ?, ?, ?, ?)
270
- ON CONFLICT(entry_key) DO UPDATE SET
271
- dir_path = excluded.dir_path,
272
- file_path = excluded.file_path,
273
- stash_dir = excluded.stash_dir,
274
- entry_json = excluded.entry_json,
275
- search_text = excluded.search_text,
276
- entry_type = excluded.entry_type
277
- `);
278
- stmt.run(entryKey, dirPath, filePath, stashDir, JSON.stringify(entry), searchText, entry.type);
279
- // Fetch the row id explicitly since last_insert_rowid() is unreliable for ON CONFLICT DO UPDATE
280
- const row = db.prepare("SELECT id FROM entries WHERE entry_key = ?").get(entryKey);
281
- if (!row)
309
+ // Hot path during indexing — cache the two prepared statements per
310
+ // database connection so we don't pay the SQL parse/compile cost on
311
+ // every call. The dirty-mark INSERT and the upsert-with-RETURNING
312
+ // share the same WeakMap so they live and die with the connection.
313
+ const stmts = getUpsertStmts(db);
314
+ const result = stmts.upsert.get(entryKey, dirPath, filePath, stashDir, JSON.stringify(entry), searchText, entry.type);
315
+ if (!result)
282
316
  throw new Error("upsertEntry: entry_key not found after upsert");
283
- return row.id;
317
+ // Mark this entry as FTS-dirty so `rebuildFts({ incremental: true })`
318
+ // only revisits entries that actually changed. INSERT OR IGNORE is
319
+ // idempotent across multiple upserts of the same row.
320
+ stmts.markDirty.run(result.id);
321
+ return result.id;
322
+ }
323
+ const upsertStmtsByDb = new WeakMap();
324
+ function getUpsertStmts(db) {
325
+ const existing = upsertStmtsByDb.get(db);
326
+ if (existing)
327
+ return existing;
328
+ const stmts = {
329
+ // RETURNING id handles ON CONFLICT DO UPDATE correctly — no second
330
+ // SELECT round-trip needed (last_insert_rowid() is unreliable for
331
+ // ON CONFLICT). Use `.get()` so a single row comes back.
332
+ upsert: db.prepare(`
333
+ INSERT INTO entries (entry_key, dir_path, file_path, stash_dir, entry_json, search_text, entry_type)
334
+ VALUES (?, ?, ?, ?, ?, ?, ?)
335
+ ON CONFLICT(entry_key) DO UPDATE SET
336
+ dir_path = excluded.dir_path,
337
+ file_path = excluded.file_path,
338
+ stash_dir = excluded.stash_dir,
339
+ entry_json = excluded.entry_json,
340
+ search_text = excluded.search_text,
341
+ entry_type = excluded.entry_type
342
+ RETURNING id
343
+ `),
344
+ markDirty: db.prepare("INSERT OR IGNORE INTO entries_fts_dirty (entry_id) VALUES (?)"),
345
+ };
346
+ upsertStmtsByDb.set(db, stmts);
347
+ return stmts;
284
348
  }
285
349
  export function deleteEntriesByDir(db, dirPath) {
286
350
  db.transaction(() => {
@@ -302,6 +366,25 @@ function deleteRelatedRows(db, ids) {
302
366
  return;
303
367
  const numericIds = ids.map((r) => r.id);
304
368
  const vecAvail = isVecAvailable(db);
369
+ // Drop matching FTS rows + dirty markers immediately so an incremental
370
+ // rebuild after a deletion doesn't try to re-index entries that no longer
371
+ // exist (and so a full scan after deletion sees a consistent FTS).
372
+ for (let i = 0; i < numericIds.length; i += SQLITE_CHUNK_SIZE) {
373
+ const chunk = numericIds.slice(i, i + SQLITE_CHUNK_SIZE);
374
+ const placeholders = chunk.map(() => "?").join(",");
375
+ try {
376
+ db.prepare(`DELETE FROM entries_fts WHERE entry_id IN (${placeholders})`).run(...chunk);
377
+ }
378
+ catch {
379
+ /* fts table may not exist on a brand-new db */
380
+ }
381
+ try {
382
+ db.prepare(`DELETE FROM entries_fts_dirty WHERE entry_id IN (${placeholders})`).run(...chunk);
383
+ }
384
+ catch {
385
+ /* dirty table is created lazily by upsertEntry */
386
+ }
387
+ }
305
388
  // Process in chunks to stay within SQLITE_MAX_VARIABLE_NUMBER
306
389
  for (let i = 0; i < numericIds.length; i += SQLITE_CHUNK_SIZE) {
307
390
  const chunk = numericIds.slice(i, i + SQLITE_CHUNK_SIZE);
@@ -343,19 +426,51 @@ function deleteRelatedRows(db, ids) {
343
426
  }
344
427
  }
345
428
  }
346
- export function rebuildFts(db) {
347
- // Wrap DELETE + INSERT in a single transaction so the FTS table is
348
- // never left empty between the two statements if a crash occurs.
349
- // Store the integer id directly (FTS5 stores all content as text
350
- // internally; the join in searchFts compares numerically without CAST).
351
- //
352
- // Insert into separate FTS5 columns by extracting per-field text from
353
- // the entry_json using buildSearchFields(). The entries.search_text column
354
- // is kept as a concatenated fallback for embedding generation.
429
+ /**
430
+ * Rebuild the FTS5 search index.
431
+ *
432
+ * `incremental` (default `false`): when true, only rebuild rows that
433
+ * `upsertEntry` marked dirty since the last `rebuildFts` call. The full path
434
+ * (default) wipes `entries_fts` and re-inserts every row from `entries` —
435
+ * appropriate for `akm index --full` and version-upgrade rebuilds.
436
+ *
437
+ * Both paths are wrapped in a single transaction so the FTS table is never
438
+ * left in a half-rebuilt state.
439
+ *
440
+ * Skipped corrupt-JSON rows are aggregated into one warning instead of
441
+ * spamming stderr per-entry.
442
+ */
443
+ export function rebuildFts(db, options) {
444
+ const incremental = options?.incremental === true;
355
445
  db.transaction(() => {
356
- db.exec("DELETE FROM entries_fts");
357
- const rows = db.prepare("SELECT id, entry_json FROM entries").all();
446
+ let rows;
447
+ if (incremental) {
448
+ // Read the dirty queue and join against entries to get the JSON.
449
+ // Then drop the matching rows from entries_fts so the INSERT below
450
+ // doesn't double-up. The dirty list is drained at the end.
451
+ rows = db
452
+ .prepare(`SELECT e.id AS id, e.entry_json AS entry_json
453
+ FROM entries_fts_dirty d
454
+ JOIN entries e ON e.id = d.entry_id`)
455
+ .all();
456
+ if (rows.length === 0)
457
+ return;
458
+ const ids = rows.map((r) => r.id);
459
+ // Delete only the dirty FTS rows — chunk to stay under
460
+ // SQLITE_MAX_VARIABLE_NUMBER on large dirty queues.
461
+ for (let i = 0; i < ids.length; i += SQLITE_CHUNK_SIZE) {
462
+ const chunk = ids.slice(i, i + SQLITE_CHUNK_SIZE);
463
+ const placeholders = chunk.map(() => "?").join(",");
464
+ db.prepare(`DELETE FROM entries_fts WHERE entry_id IN (${placeholders})`).run(...chunk);
465
+ }
466
+ }
467
+ else {
468
+ // Full path: wipe and re-read every row.
469
+ db.exec("DELETE FROM entries_fts");
470
+ rows = db.prepare("SELECT id, entry_json FROM entries").all();
471
+ }
358
472
  const insertStmt = db.prepare("INSERT INTO entries_fts (entry_id, name, description, tags, hints, content) VALUES (?, ?, ?, ?, ?, ?)");
473
+ let skipped = 0;
359
474
  for (const row of rows) {
360
475
  let entry;
361
476
  let fields;
@@ -364,11 +479,25 @@ export function rebuildFts(db) {
364
479
  fields = buildSearchFields(entry);
365
480
  }
366
481
  catch {
367
- warn(`[db] rebuildFts: skipping entry id=${row.id} — invalid entry_json`);
482
+ skipped++;
368
483
  continue;
369
484
  }
370
485
  insertStmt.run(row.id, fields.name, fields.description, fields.tags, fields.hints, fields.content);
371
486
  }
487
+ if (skipped > 0) {
488
+ warn(`[db] rebuildFts: skipped ${skipped} entr${skipped === 1 ? "y" : "ies"} with invalid entry_json`);
489
+ }
490
+ // Always drain the dirty queue — if it exists. A full rebuild also
491
+ // clears it because the full path covers everything the dirty list
492
+ // tracks.
493
+ if (incremental) {
494
+ db.exec("DELETE FROM entries_fts_dirty");
495
+ }
496
+ else {
497
+ // Full path: drain the dirty queue too. The table is created by
498
+ // ensureSchema(), so it always exists at this point.
499
+ db.exec("DELETE FROM entries_fts_dirty");
500
+ }
372
501
  })();
373
502
  }
374
503
  // ── Vector operations ───────────────────────────────────────────────────────
@@ -6,8 +6,8 @@
6
6
  */
7
7
  import fs from "node:fs";
8
8
  import path from "node:path";
9
- import { toPosix } from "./common";
10
- import { parseFrontmatter } from "./frontmatter";
9
+ import { toPosix } from "../core/common";
10
+ import { parseFrontmatter } from "../core/frontmatter";
11
11
  /**
12
12
  * Build a FileContext from a stash root and an absolute file path.
13
13
  *
@@ -81,7 +81,7 @@ async function ensureBuiltinsRegistered() {
81
81
  if (!builtinsPromise) {
82
82
  builtinsPromise = (async () => {
83
83
  const { registerBuiltinMatchers } = await import("./matchers.js");
84
- const { registerBuiltinRenderers } = await import("./renderers.js");
84
+ const { registerBuiltinRenderers } = await import("../output/renderers.js");
85
85
  registerBuiltinMatchers();
86
86
  registerBuiltinRenderers();
87
87
  })();