akm-cli 0.5.0 → 0.6.0-rc1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (74) hide show
  1. package/CHANGELOG.md +32 -5
  2. package/dist/asset-registry.js +29 -5
  3. package/dist/asset-spec.js +12 -5
  4. package/dist/cli-hints.js +300 -0
  5. package/dist/cli.js +218 -1357
  6. package/dist/common.js +147 -50
  7. package/dist/config.js +224 -13
  8. package/dist/create-provider-registry.js +1 -1
  9. package/dist/curate.js +258 -0
  10. package/dist/{local-search.js → db-search.js} +30 -19
  11. package/dist/db.js +168 -62
  12. package/dist/embedder.js +49 -273
  13. package/dist/embedders/cache.js +47 -0
  14. package/dist/embedders/local.js +152 -0
  15. package/dist/embedders/remote.js +121 -0
  16. package/dist/embedders/types.js +39 -0
  17. package/dist/errors.js +14 -3
  18. package/dist/frontmatter.js +61 -7
  19. package/dist/indexer.js +38 -7
  20. package/dist/info.js +2 -2
  21. package/dist/install-audit.js +16 -1
  22. package/dist/{installed-kits.js → installed-stashes.js} +48 -22
  23. package/dist/llm-client.js +92 -0
  24. package/dist/llm.js +14 -126
  25. package/dist/lockfile.js +28 -1
  26. package/dist/matchers.js +1 -1
  27. package/dist/metadata-enhance.js +53 -0
  28. package/dist/migration-help.js +75 -44
  29. package/dist/output-context.js +77 -0
  30. package/dist/output-shapes.js +198 -0
  31. package/dist/output-text.js +520 -0
  32. package/dist/paths.js +4 -4
  33. package/dist/providers/index.js +11 -0
  34. package/dist/providers/skills-sh.js +1 -1
  35. package/dist/providers/static-index.js +47 -45
  36. package/dist/registry-build-index.js +36 -29
  37. package/dist/registry-factory.js +2 -2
  38. package/dist/registry-resolve.js +8 -4
  39. package/dist/registry-search.js +62 -5
  40. package/dist/remember.js +172 -0
  41. package/dist/renderers.js +52 -0
  42. package/dist/search-source.js +73 -42
  43. package/dist/setup-steps.js +45 -0
  44. package/dist/setup.js +149 -76
  45. package/dist/stash-add.js +94 -38
  46. package/dist/stash-clone.js +4 -4
  47. package/dist/stash-provider-factory.js +2 -2
  48. package/dist/stash-provider.js +3 -1
  49. package/dist/stash-providers/filesystem.js +31 -1
  50. package/dist/stash-providers/git.js +209 -8
  51. package/dist/stash-providers/index.js +1 -0
  52. package/dist/stash-providers/npm.js +159 -0
  53. package/dist/stash-providers/provider-utils.js +162 -0
  54. package/dist/stash-providers/sync-from-ref.js +45 -0
  55. package/dist/stash-providers/tar-utils.js +151 -0
  56. package/dist/stash-providers/website.js +80 -4
  57. package/dist/stash-resolve.js +5 -5
  58. package/dist/stash-search.js +4 -4
  59. package/dist/stash-show.js +3 -3
  60. package/dist/wiki.js +6 -6
  61. package/dist/workflow-authoring.js +12 -4
  62. package/dist/workflow-markdown.js +9 -0
  63. package/dist/workflow-runs.js +12 -2
  64. package/docs/README.md +30 -0
  65. package/docs/migration/release-notes/0.0.13.md +4 -0
  66. package/docs/migration/release-notes/0.1.0.md +6 -0
  67. package/docs/migration/release-notes/0.2.0.md +6 -0
  68. package/docs/migration/release-notes/0.3.0.md +5 -0
  69. package/docs/migration/release-notes/0.5.0.md +6 -0
  70. package/docs/migration/release-notes/0.6.0.md +29 -0
  71. package/docs/migration/release-notes/README.md +21 -0
  72. package/package.json +3 -2
  73. package/dist/registry-install.js +0 -532
  74. /package/dist/{kit-include.js → stash-include.js} +0 -0
package/dist/db.js CHANGED
@@ -2,7 +2,7 @@ import { Database } from "bun:sqlite";
2
2
  import fs from "node:fs";
3
3
  import { createRequire } from "node:module";
4
4
  import path from "node:path";
5
- import { cosineSimilarity } from "./embedder";
5
+ import { cosineSimilarity } from "./embedders/types";
6
6
  import { getDbPath } from "./paths";
7
7
  import { buildSearchFields } from "./search-fields";
8
8
  import { ensureUsageEventsSchema } from "./usage-events";
@@ -73,7 +73,6 @@ export function warnIfVecMissing(db, { once } = { once: false }) {
73
73
  /* embeddings table may not exist yet during init */
74
74
  }
75
75
  }
76
- // ── Schema ──────────────────────────────────────────────────────────────────
77
76
  function ensureSchema(db, embeddingDim) {
78
77
  // Create meta table first so we can check version
79
78
  db.exec(`
@@ -84,30 +83,11 @@ function ensureSchema(db, embeddingDim) {
84
83
  `);
85
84
  // Check stored version — if it differs from DB_VERSION, drop and recreate all tables.
86
85
  // Usage events are preserved across version upgrades so that utility score
87
- // history is not silently lost.
88
- const storedVersion = getMeta(db, "version");
89
- if (storedVersion && storedVersion !== String(DB_VERSION)) {
90
- // Back up usage_events before dropping tables
91
- let usageBackup = [];
92
- try {
93
- usageBackup = db.prepare("SELECT * FROM usage_events").all();
94
- }
95
- catch {
96
- /* table may not exist in older versions */
97
- }
98
- db.exec("DROP TABLE IF EXISTS utility_scores");
99
- db.exec("DROP TABLE IF EXISTS usage_events");
100
- db.exec("DROP TABLE IF EXISTS embeddings");
101
- db.exec("DROP TABLE IF EXISTS entries_vec");
102
- db.exec("DROP TABLE IF EXISTS entries_fts");
103
- db.exec("DROP INDEX IF EXISTS idx_entries_dir");
104
- db.exec("DROP INDEX IF EXISTS idx_entries_type");
105
- db.exec("DROP TABLE IF EXISTS entries");
106
- db.exec("DELETE FROM index_meta");
107
- // Store backup for restoration after ensureUsageEventsSchema runs
108
- db.__usageBackup = usageBackup;
109
- console.warn("[akm] Index rebuilt due to version upgrade. Run 'akm index' to repopulate.");
110
- }
86
+ // history is not silently lost. The backup is captured here and threaded
87
+ // explicitly to `restoreUsageEventsBackup` below — the previous version
88
+ // attached `__usageBackup` to the Database instance via a typeless property
89
+ // injection, which was a source of fragile coupling.
90
+ const usageBackup = handleVersionUpgrade(db);
111
91
  db.exec(`
112
92
  CREATE TABLE IF NOT EXISTS entries (
113
93
  id INTEGER PRIMARY KEY AUTOINCREMENT,
@@ -220,31 +200,67 @@ function ensureSchema(db, embeddingDim) {
220
200
  }
221
201
  // Usage telemetry table
222
202
  ensureUsageEventsSchema(db);
223
- // Restore usage_events that were backed up during a version upgrade.
224
- // Wrapped in outer try/catch because schema changes across versions may
225
- // make the backup incompatible with the new table definition.
226
- const dbAny = db;
227
- const backup = dbAny.__usageBackup;
228
- if (backup && backup.length > 0) {
229
- try {
230
- db.transaction(() => {
231
- const cols = Object.keys(backup[0]);
232
- const placeholders = cols.map(() => "?").join(", ");
233
- const insert = db.prepare(`INSERT INTO usage_events (${cols.join(", ")}) VALUES (${placeholders})`);
234
- for (const row of backup) {
235
- try {
236
- insert.run(...cols.map((c) => row[c]));
237
- }
238
- catch {
239
- /* skip rows that fail */
240
- }
203
+ // Restore usage_events backed up by the version-upgrade path above.
204
+ restoreUsageEventsBackup(db, usageBackup);
205
+ }
206
+ /**
207
+ * Detect a stored DB version that differs from {@link DB_VERSION}, drop the
208
+ * old schema, and return a backup of the previous `usage_events` rows so the
209
+ * rest of `ensureSchema()` can restore them once the new table exists.
210
+ *
211
+ * Returns an empty array when no upgrade is needed or when the previous
212
+ * `usage_events` table is unreadable.
213
+ */
214
+ function handleVersionUpgrade(db) {
215
+ const storedVersion = getMeta(db, "version");
216
+ if (!storedVersion || storedVersion === String(DB_VERSION))
217
+ return [];
218
+ let usageBackup = [];
219
+ try {
220
+ usageBackup = db.prepare("SELECT * FROM usage_events").all();
221
+ }
222
+ catch {
223
+ /* table may not exist in older versions */
224
+ }
225
+ db.exec("DROP TABLE IF EXISTS utility_scores");
226
+ db.exec("DROP TABLE IF EXISTS usage_events");
227
+ db.exec("DROP TABLE IF EXISTS embeddings");
228
+ db.exec("DROP TABLE IF EXISTS entries_vec");
229
+ db.exec("DROP TABLE IF EXISTS entries_fts");
230
+ db.exec("DROP INDEX IF EXISTS idx_entries_dir");
231
+ db.exec("DROP INDEX IF EXISTS idx_entries_type");
232
+ db.exec("DROP TABLE IF EXISTS entries");
233
+ db.exec("DELETE FROM index_meta");
234
+ console.warn("[akm] Index rebuilt due to version upgrade. Run 'akm index' to repopulate.");
235
+ return usageBackup;
236
+ }
237
+ /**
238
+ * Re-insert backed-up `usage_events` rows into the freshly-created table.
239
+ *
240
+ * Wrapped in an outer try/catch because schema changes across versions may
241
+ * make the backup incompatible with the new table definition; in that case
242
+ * the backup is discarded silently rather than blocking startup.
243
+ */
244
+ function restoreUsageEventsBackup(db, backup) {
245
+ if (backup.length === 0)
246
+ return;
247
+ try {
248
+ db.transaction(() => {
249
+ const cols = Object.keys(backup[0]);
250
+ const placeholders = cols.map(() => "?").join(", ");
251
+ const insert = db.prepare(`INSERT INTO usage_events (${cols.join(", ")}) VALUES (${placeholders})`);
252
+ for (const row of backup) {
253
+ try {
254
+ insert.run(...cols.map((c) => row[c]));
241
255
  }
242
- })();
243
- }
244
- catch {
245
- /* schema changed too much — discard backup gracefully */
246
- }
247
- delete dbAny.__usageBackup;
256
+ catch {
257
+ /* skip rows that fail */
258
+ }
259
+ }
260
+ })();
261
+ }
262
+ catch {
263
+ /* schema changed too much — discard backup gracefully */
248
264
  }
249
265
  }
250
266
  // ── Meta helpers ────────────────────────────────────────────────────────────
@@ -280,8 +296,30 @@ export function upsertEntry(db, entryKey, dirPath, filePath, stashDir, entry, se
280
296
  const row = db.prepare("SELECT id FROM entries WHERE entry_key = ?").get(entryKey);
281
297
  if (!row)
282
298
  throw new Error("upsertEntry: entry_key not found after upsert");
299
+ // Mark this entry as FTS-dirty so an incremental rebuild only revisits the
300
+ // entries that actually changed. Without this, every `akm index` run had to
301
+ // re-scan and re-insert every FTS row, even if only one entry was touched.
302
+ markFtsDirty(db, row.id);
283
303
  return row.id;
284
304
  }
305
+ /**
306
+ * Mark an entry as needing FTS re-indexing on the next `rebuildFts` call.
307
+ *
308
+ * The list lives in a small `entries_fts_dirty` table — a per-entry-id queue
309
+ * of work items. `rebuildFts({ incremental: true })` drains this list rather
310
+ * than scanning the entire `entries` table.
311
+ */
312
+ function markFtsDirty(db, entryId) {
313
+ ensureFtsDirtyTable(db);
314
+ db.prepare("INSERT OR IGNORE INTO entries_fts_dirty (entry_id) VALUES (?)").run(entryId);
315
+ }
316
+ function ensureFtsDirtyTable(db) {
317
+ db.exec(`
318
+ CREATE TABLE IF NOT EXISTS entries_fts_dirty (
319
+ entry_id INTEGER PRIMARY KEY
320
+ );
321
+ `);
322
+ }
285
323
  export function deleteEntriesByDir(db, dirPath) {
286
324
  db.transaction(() => {
287
325
  const ids = db.prepare("SELECT id FROM entries WHERE dir_path = ?").all(dirPath);
@@ -302,6 +340,25 @@ function deleteRelatedRows(db, ids) {
302
340
  return;
303
341
  const numericIds = ids.map((r) => r.id);
304
342
  const vecAvail = isVecAvailable(db);
343
+ // Drop matching FTS rows + dirty markers immediately so an incremental
344
+ // rebuild after a deletion doesn't try to re-index entries that no longer
345
+ // exist (and so a full scan after deletion sees a consistent FTS).
346
+ for (let i = 0; i < numericIds.length; i += SQLITE_CHUNK_SIZE) {
347
+ const chunk = numericIds.slice(i, i + SQLITE_CHUNK_SIZE);
348
+ const placeholders = chunk.map(() => "?").join(",");
349
+ try {
350
+ db.prepare(`DELETE FROM entries_fts WHERE entry_id IN (${placeholders})`).run(...chunk);
351
+ }
352
+ catch {
353
+ /* fts table may not exist on a brand-new db */
354
+ }
355
+ try {
356
+ db.prepare(`DELETE FROM entries_fts_dirty WHERE entry_id IN (${placeholders})`).run(...chunk);
357
+ }
358
+ catch {
359
+ /* dirty table is created lazily by upsertEntry */
360
+ }
361
+ }
305
362
  // Process in chunks to stay within SQLITE_MAX_VARIABLE_NUMBER
306
363
  for (let i = 0; i < numericIds.length; i += SQLITE_CHUNK_SIZE) {
307
364
  const chunk = numericIds.slice(i, i + SQLITE_CHUNK_SIZE);
@@ -343,19 +400,52 @@ function deleteRelatedRows(db, ids) {
343
400
  }
344
401
  }
345
402
  }
346
- export function rebuildFts(db) {
347
- // Wrap DELETE + INSERT in a single transaction so the FTS table is
348
- // never left empty between the two statements if a crash occurs.
349
- // Store the integer id directly (FTS5 stores all content as text
350
- // internally; the join in searchFts compares numerically without CAST).
351
- //
352
- // Insert into separate FTS5 columns by extracting per-field text from
353
- // the entry_json using buildSearchFields(). The entries.search_text column
354
- // is kept as a concatenated fallback for embedding generation.
403
+ /**
404
+ * Rebuild the FTS5 search index.
405
+ *
406
+ * `incremental` (default `false`): when true, only rebuild rows that
407
+ * `upsertEntry` marked dirty since the last `rebuildFts` call. The full path
408
+ * (default) wipes `entries_fts` and re-inserts every row from `entries` —
409
+ * appropriate for `akm index --full` and version-upgrade rebuilds.
410
+ *
411
+ * Both paths are wrapped in a single transaction so the FTS table is never
412
+ * left in a half-rebuilt state.
413
+ *
414
+ * Skipped corrupt-JSON rows are aggregated into one warning instead of
415
+ * spamming stderr per-entry.
416
+ */
417
+ export function rebuildFts(db, options) {
418
+ const incremental = options?.incremental === true;
355
419
  db.transaction(() => {
356
- db.exec("DELETE FROM entries_fts");
357
- const rows = db.prepare("SELECT id, entry_json FROM entries").all();
420
+ let rows;
421
+ if (incremental) {
422
+ ensureFtsDirtyTable(db);
423
+ // Read the dirty queue and join against entries to get the JSON.
424
+ // Then drop the matching rows from entries_fts so the INSERT below
425
+ // doesn't double-up. The dirty list is drained at the end.
426
+ rows = db
427
+ .prepare(`SELECT e.id AS id, e.entry_json AS entry_json
428
+ FROM entries_fts_dirty d
429
+ JOIN entries e ON e.id = d.entry_id`)
430
+ .all();
431
+ if (rows.length === 0)
432
+ return;
433
+ const ids = rows.map((r) => r.id);
434
+ // Delete only the dirty FTS rows — chunk to stay under
435
+ // SQLITE_MAX_VARIABLE_NUMBER on large dirty queues.
436
+ for (let i = 0; i < ids.length; i += SQLITE_CHUNK_SIZE) {
437
+ const chunk = ids.slice(i, i + SQLITE_CHUNK_SIZE);
438
+ const placeholders = chunk.map(() => "?").join(",");
439
+ db.prepare(`DELETE FROM entries_fts WHERE entry_id IN (${placeholders})`).run(...chunk);
440
+ }
441
+ }
442
+ else {
443
+ // Full path: wipe and re-read every row.
444
+ db.exec("DELETE FROM entries_fts");
445
+ rows = db.prepare("SELECT id, entry_json FROM entries").all();
446
+ }
358
447
  const insertStmt = db.prepare("INSERT INTO entries_fts (entry_id, name, description, tags, hints, content) VALUES (?, ?, ?, ?, ?, ?)");
448
+ let skipped = 0;
359
449
  for (const row of rows) {
360
450
  let entry;
361
451
  let fields;
@@ -364,11 +454,27 @@ export function rebuildFts(db) {
364
454
  fields = buildSearchFields(entry);
365
455
  }
366
456
  catch {
367
- warn(`[db] rebuildFts: skipping entry id=${row.id} — invalid entry_json`);
457
+ skipped++;
368
458
  continue;
369
459
  }
370
460
  insertStmt.run(row.id, fields.name, fields.description, fields.tags, fields.hints, fields.content);
371
461
  }
462
+ if (skipped > 0) {
463
+ warn(`[db] rebuildFts: skipped ${skipped} entr${skipped === 1 ? "y" : "ies"} with invalid entry_json`);
464
+ }
465
+ // Always drain the dirty queue — if it exists. A full rebuild also
466
+ // clears it because the full path covers everything the dirty list
467
+ // tracks.
468
+ if (incremental) {
469
+ db.exec("DELETE FROM entries_fts_dirty");
470
+ }
471
+ else {
472
+ // Full path: only drop the dirty table if it exists. Use
473
+ // `CREATE IF NOT EXISTS` then DELETE so we don't error on databases
474
+ // that haven't run any upserts yet (e.g. fresh schema).
475
+ ensureFtsDirtyTable(db);
476
+ db.exec("DELETE FROM entries_fts_dirty");
477
+ }
372
478
  })();
373
479
  }
374
480
  // ── Vector operations ───────────────────────────────────────────────────────