moflo 4.9.0-rc.2 → 4.9.0-rc.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -602,13 +602,18 @@ function getIntegrationStatus() {
602
602
  return { mcpServers, hasDatabase, hasApi };
603
603
  }
604
604
 
605
- // Upgrade notice (#636) — written by the session-start launcher; null when missing, expired, or malformed.
605
+ // Upgrade notice (#636, #738) — written by the session-start launcher; null
606
+ // when missing, expired, or malformed. The launcher writes status='in-progress'
607
+ // while upgrade work is running, then deletes the file when done — so a
608
+ // 'complete' status only ever shows up here for legacy notice files left by
609
+ // pre-#738 launchers.
606
610
  function getUpgradeNotice() {
607
611
  const data = readJSON(path.join(CWD, '.moflo', 'upgrade-notice.json'));
608
612
  if (!data || typeof data !== 'object') return null;
609
613
  const expiresAt = data.expiresAt ? new Date(data.expiresAt).getTime() : 0;
610
614
  if (!expiresAt || Date.now() > expiresAt) return null;
611
615
  return {
616
+ status: data.status === 'in-progress' ? 'in-progress' : 'complete',
612
617
  kind: data.kind === 'repair' ? 'repair' : 'upgrade',
613
618
  from: typeof data.from === 'string' ? data.from : '',
614
619
  to: typeof data.to === 'string' ? data.to : '',
@@ -618,16 +623,19 @@ function getUpgradeNotice() {
618
623
 
619
624
  function formatUpgradeNoticeSegment(notice) {
620
625
  if (!notice) return '';
621
- const changesPart = notice.changes > 0
622
- ? ` ${c.dim}(${notice.changes} ${notice.changes === 1 ? 'change' : 'changes'})${c.reset}`
623
- : '';
626
+ let suffix = '';
627
+ if (notice.status === 'in-progress') {
628
+ suffix = ` ${c.dim}(updating…)${c.reset}`;
629
+ } else if (notice.changes > 0) {
630
+ suffix = ` ${c.dim}(${notice.changes} ${notice.changes === 1 ? 'change' : 'changes'})${c.reset}`;
631
+ }
624
632
  if (notice.kind === 'repair') {
625
- return `${c.brightYellow}📦 install repaired${c.reset}${changesPart}`;
633
+ return `${c.brightYellow}📦 install repaired${c.reset}${suffix}`;
626
634
  }
627
635
  const versions = notice.from && notice.to
628
636
  ? `${notice.from} → ${notice.to}`
629
637
  : (notice.to || 'upgraded');
630
- return `${c.brightYellow}📦 ${versions}${c.reset}${changesPart}`;
638
+ return `${c.brightYellow}📦 ${versions}${c.reset}${suffix}`;
631
639
  }
632
640
 
633
641
  // Session stats (pure file reads)
@@ -671,11 +679,14 @@ function generateStatusline() {
671
679
 
672
680
  const parts = [];
673
681
 
682
+ // Upgrade notice \u2014 leading position so it reads as a transient banner
683
+ // rather than a permanent column (#738). Only renders during the upgrade
684
+ // window; the launcher deletes the notice file after work completes.
685
+ pushUpgradeNoticeSegment(parts);
686
+
674
687
  // Branding (always shown when enabled)
675
688
  parts.push(`${c.bold}${c.brightPurple}\u258A ${SL_CONFIG.branding}${c.reset}`);
676
689
 
677
- pushUpgradeNoticeSegment(parts);
678
-
679
690
  // User + swarm indicator
680
691
  const dot = swarm.coordinationActive ? `${c.brightGreen}\u25CF${c.reset}` : `${c.brightCyan}\u25CF${c.reset}`;
681
692
  parts.push(`${dot} ${c.brightCyan}${git.name}${c.reset}`);
@@ -758,8 +769,9 @@ function generateDashboard() {
758
769
  if (SL_CONFIG.show_session && session.duration) {
759
770
  header += ` ${c.dim}\u2502${c.reset} ${c.cyan}\u23F1 ${session.duration}${c.reset}`;
760
771
  }
761
- lines.push(header);
772
+ // Upgrade notice \u2014 leading line so it reads as a transient banner (#738).
762
773
  pushUpgradeNoticeSegment(lines);
774
+ lines.push(header);
763
775
 
764
776
  // Separator
765
777
  lines.push(`${c.dim}${'─'.repeat(53)}${c.reset}`);
@@ -834,8 +846,9 @@ function generateCompactDashboard() {
834
846
  if (SL_CONFIG.show_session && session.duration) {
835
847
  header += ` ${c.dim}\u2502${c.reset} ${c.cyan}\u23F1 ${session.duration}${c.reset}`;
836
848
  }
837
- lines.push(header);
849
+ // Upgrade notice \u2014 leading line so it reads as a transient banner (#738).
838
850
  pushUpgradeNoticeSegment(lines);
851
+ lines.push(header);
839
852
 
840
853
  // Combined swarm + agentdb + mcp line
841
854
  const segments = [];
@@ -9,9 +9,9 @@
9
9
 
10
10
  import { spawn } from 'child_process';
11
11
  import { existsSync, readFileSync, writeFileSync, copyFileSync, unlinkSync, readdirSync, mkdirSync, statSync } from 'fs';
12
- import { resolve, dirname } from 'path';
12
+ import { resolve, dirname, join } from 'path';
13
13
  import { fileURLToPath } from 'url';
14
- import { migrateClaudeFlowToMoflo, migrateMemoryDbToMoflo } from './lib/moflo-paths.mjs';
14
+ import { migrateClaudeFlowToMoflo, migrateMemoryDbToMoflo, mofloDir } from './lib/moflo-paths.mjs';
15
15
 
16
16
  const __dirname = dirname(fileURLToPath(import.meta.url));
17
17
 
@@ -56,6 +56,41 @@ const plural = (n, word) => `${n} ${word}${n === 1 ? '' : 's'}`;
56
56
  // can persist `.moflo/upgrade-notice.json` for the statusline (#636).
57
57
  let upgradeNoticeContext = null;
58
58
 
59
+ // Deferred so we commit it AFTER every upgrade-work block (see 3g). The stamp
60
+ // is the "launcher fully completed" signal — writing it mid-flight lets an
61
+ // aborted launcher strand consumers on a half-applied upgrade (#730).
62
+ let pendingVersionStampWrite = null;
63
+
64
+ // 5-min TTL is a safety net for zombie launchers (statusline ignores past-TTL
65
+ // files). The launcher deletes the notice when upgrade work finishes — no
66
+ // "complete" state lingers, see #738.
67
+ const UPGRADE_NOTICE_INPROGRESS_TTL_MS = 5 * 60 * 1000;
68
+ const UPGRADE_NOTICE_PATH = () => join(mofloDir(projectRoot), 'upgrade-notice.json');
69
+
70
+ function writeInProgressUpgradeNotice() {
71
+ if (!upgradeNoticeContext) return;
72
+ try {
73
+ mkdirSync(mofloDir(projectRoot), { recursive: true });
74
+ const now = Date.now();
75
+ const notice = {
76
+ status: 'in-progress',
77
+ kind: upgradeNoticeContext.kind,
78
+ from: upgradeNoticeContext.from,
79
+ to: upgradeNoticeContext.to,
80
+ at: new Date(now).toISOString(),
81
+ expiresAt: new Date(now + UPGRADE_NOTICE_INPROGRESS_TTL_MS).toISOString(),
82
+ changes: 0,
83
+ };
84
+ writeFileSync(UPGRADE_NOTICE_PATH(), JSON.stringify(notice, null, 2));
85
+ } catch { /* non-fatal — statusline just won't show the segment */ }
86
+ }
87
+
88
+ function clearUpgradeNotice() {
89
+ try {
90
+ unlinkSync(UPGRADE_NOTICE_PATH());
91
+ } catch { /* non-fatal — already gone or never existed */ }
92
+ }
93
+
59
94
  // ── 0. LEGACY state migration (#699) ─────────────────────────────────────────
60
95
  // Consumers upgrading from older moflo builds (inherited from upstream Ruflo)
61
96
  // get a one-time auto-migration of LEGACY `.claude-flow/` → `.moflo/` so claim
@@ -213,6 +248,11 @@ try {
213
248
  };
214
249
  emitMutation('repaired stale install', 'manifest drift detected');
215
250
  }
251
+ // Surface a transient "(updating…)" badge in the statusline before the
252
+ // long-running upgrade work (manifest sync, daemon recycle, embeddings
253
+ // migration). See #738 — the launcher clears this file after work
254
+ // completes, so the badge naturally disappears once the user is unblocked.
255
+ writeInProgressUpgradeNotice();
216
256
  const binDir = resolve(projectRoot, 'node_modules/moflo/bin');
217
257
 
218
258
  // ── Manifest-based auto-update ──────────────────────────────────────
@@ -358,12 +398,13 @@ try {
358
398
  }
359
399
  } catch { /* non-fatal — daemon recycle is best-effort */ }
360
400
 
361
- // Write updated manifest + version stamp
401
+ // Manifest reflects synced files immediately; version stamp is deferred
402
+ // to 3g so an aborted launcher re-runs upgrade detection (#730).
362
403
  try {
363
404
  const cfDir = resolve(projectRoot, '.moflo');
364
405
  if (!existsSync(cfDir)) mkdirSync(cfDir, { recursive: true });
365
406
  writeFileSync(manifestPath, JSON.stringify(currentManifest, null, 2));
366
- writeFileSync(versionStampPath, installedVersion);
407
+ pendingVersionStampWrite = { path: versionStampPath, version: installedVersion };
367
408
  } catch {}
368
409
  }
369
410
  }
@@ -665,37 +706,83 @@ try {
665
706
  } catch { /* writing the failure itself must not throw */ }
666
707
  }
667
708
 
668
- // ── 3f. Persist upgrade notice for statusline (#636) ────────────────────────
669
- // When this session bumped the version stamp or repaired manifest drift, write
670
- // a transient `.moflo/upgrade-notice.json` so the statusline can show a
671
- // leading user-visible segment (`📦 vX vY (N changes)`). The file expires
672
- // via TTL statusline silently ignores it after `expiresAt`. The next
673
- // upgrade overwrites the file, so no manual cleanup is needed.
674
- //
675
- // Stdout emits go to Claude's `additionalContext` (collapsed by default in
676
- // the system reminder); this notice surfaces the same information directly
677
- // in the user's UI. Together they close the "Claude appears hung and CPU
678
- // spikes" gap from #629 — the user always knows when an upgrade procedure
679
- // just ran.
680
- const UPGRADE_NOTICE_TTL_MS = 60 * 60 * 1000; // 1 hour
681
- if (upgradeNoticeContext && mutationCount > 0) {
709
+ // ── 3e-728. Hard-delete leftover soft-delete tombstones (#728) ─────────────
710
+ // Soft-delete was retired in story #728 `status='deleted'` rows are now
711
+ // unrecoverable bloat from prior moflo versions. Purge any stragglers and
712
+ // VACUUM. Idempotent: returns `purged: 0` once the DB is clean. Runs BEFORE
713
+ // background MCP/daemon spawn (per #727's clobber-hazard analysis) so the
714
+ // foreground sql.js write isn't overwritten by a concurrent flush.
715
+ try {
716
+ const purgePaths = [
717
+ resolve(projectRoot, 'node_modules/moflo/dist/src/cli/services/soft-delete-purge.js'),
718
+ resolve(projectRoot, 'dist/src/cli/services/soft-delete-purge.js'),
719
+ ];
720
+ const purgePath = purgePaths.find((p) => existsSync(p));
721
+ if (purgePath) {
722
+ const { purgeSoftDeletedEntries } = await import(`file://${purgePath.replace(/\\/g, '/')}`);
723
+ const result = await purgeSoftDeletedEntries();
724
+ if (result?.purged > 0) {
725
+ emitMutation(
726
+ 'reclaimed soft-deleted memory entries',
727
+ `${plural(result.purged, 'tombstone')} purged + VACUUM`,
728
+ );
729
+ }
730
+ }
731
+ } catch (err) {
732
+ // Non-fatal — leftover tombstones just sit until the next session retries.
682
733
  try {
683
- const cfDir = resolve(projectRoot, '.moflo');
684
- if (!existsSync(cfDir)) mkdirSync(cfDir, { recursive: true });
685
- const now = Date.now();
686
- const notice = {
687
- kind: upgradeNoticeContext.kind,
688
- from: upgradeNoticeContext.from,
689
- to: upgradeNoticeContext.to,
690
- at: new Date(now).toISOString(),
691
- expiresAt: new Date(now + UPGRADE_NOTICE_TTL_MS).toISOString(),
692
- changes: mutationCount,
693
- };
694
- writeFileSync(
695
- resolve(cfDir, 'upgrade-notice.json'),
696
- JSON.stringify(notice, null, 2),
697
- );
698
- } catch { /* non-fatal — statusline just won't show the segment */ }
734
+ const msg = err && err.message ? err.message : String(err);
735
+ process.stderr.write(`soft-delete purge skipped: ${msg}\n`);
736
+ } catch { /* writing the failure itself must not throw */ }
737
+ }
738
+
739
+ // ── 3e-729. Purge ephemeral-namespace rows (#729) ───────────────────────────
740
+ // Four namespaces (hive-mind, tasklist, epic-state, test-bridge-fix) store
741
+ // internal moflo run-tracking — never user knowledge — and were polluting the
742
+ // embeddings index. Going forward, writes to those namespaces skip embedding
743
+ // generation (see EPHEMERAL_NAMESPACES in memory/bridge-embedder.ts); existing
744
+ // rows from prior versions get hard-deleted here. Idempotent — returns
745
+ // `purged: 0` once the DB is clean. Runs BEFORE background MCP/daemon spawn
746
+ // so the foreground sql.js write isn't overwritten by a concurrent flush.
747
+ try {
748
+ const purgePaths = [
749
+ resolve(projectRoot, 'node_modules/moflo/dist/src/cli/services/ephemeral-namespace-purge.js'),
750
+ resolve(projectRoot, 'dist/src/cli/services/ephemeral-namespace-purge.js'),
751
+ ];
752
+ const purgePath = purgePaths.find((p) => existsSync(p));
753
+ if (purgePath) {
754
+ const { purgeEphemeralNamespaces } = await import(`file://${purgePath.replace(/\\/g, '/')}`);
755
+ const result = await purgeEphemeralNamespaces();
756
+ if (result?.purged > 0) {
757
+ emitMutation(
758
+ 'pruned ephemeral namespace rows',
759
+ `${plural(result.purged, 'row')} from internal run-tracking`,
760
+ );
761
+ }
762
+ }
763
+ } catch (err) {
764
+ // Non-fatal — leftover rows just sit until the next session retries.
765
+ try {
766
+ const msg = err && err.message ? err.message : String(err);
767
+ process.stderr.write(`ephemeral-namespace purge skipped: ${msg}\n`);
768
+ } catch { /* writing the failure itself must not throw */ }
769
+ }
770
+
771
+ // ── 3f. Clear the in-progress upgrade notice (#636, #738) ───────────────────
772
+ // Upgrade work is finished; drop the notice so the statusline badge disappears
773
+ // immediately. Change summary is already in stdout emits (Claude's
774
+ // `additionalContext`); a lingering "you upgraded a while ago" badge is noise.
775
+ if (upgradeNoticeContext) {
776
+ clearUpgradeNotice();
777
+ }
778
+
779
+ // ── 3g. Commit deferred version stamp (#730) ────────────────────────────────
780
+ // Written LAST so an abort above leaves the stamp unchanged and the next
781
+ // launcher re-detects the upgrade.
782
+ if (pendingVersionStampWrite) {
783
+ try {
784
+ writeFileSync(pendingVersionStampWrite.path, pendingVersionStampWrite.version);
785
+ } catch { /* non-fatal — next launcher re-detects + retries the upgrade */ }
699
786
  }
700
787
 
701
788
  // Bypasses emitMutation — framing, not a mutation, so it must not inflate the count.
@@ -1,8 +1,10 @@
1
1
  /**
2
2
  * Delete Memory Command - Application Layer (CQRS)
3
3
  *
4
- * Command for deleting memory entries.
5
- * Supports soft delete and hard delete.
4
+ * Hard-deletes memory entries. Soft-delete was retired in story #728 because
5
+ * tombstones were write-only (no code path ever restored a `status='deleted'`
6
+ * row) and bloated the DB indefinitely. The legitimate "keep but hide" case
7
+ * is `archived` — see `MemoryEntry.archive()` / `restore()`.
6
8
  *
7
9
  * @module v3/memory/application/commands
8
10
  */
@@ -25,42 +27,10 @@ export class DeleteMemoryCommandHandler {
25
27
  entryId = entry?.id;
26
28
  }
27
29
  if (!entryId) {
28
- return {
29
- success: false,
30
- deleted: false,
31
- wasHardDelete: false,
32
- };
30
+ return { success: false, deleted: false };
33
31
  }
34
- if (input.hardDelete) {
35
- // Hard delete - remove from database
36
- const deleted = await this.repository.delete(entryId);
37
- return {
38
- success: true,
39
- deleted,
40
- entryId,
41
- wasHardDelete: true,
42
- };
43
- }
44
- else {
45
- // Soft delete - mark as deleted
46
- const entry = await this.repository.findById(entryId);
47
- if (entry) {
48
- entry.delete();
49
- await this.repository.save(entry);
50
- return {
51
- success: true,
52
- deleted: true,
53
- entryId,
54
- wasHardDelete: false,
55
- };
56
- }
57
- }
58
- return {
59
- success: false,
60
- deleted: false,
61
- entryId,
62
- wasHardDelete: false,
63
- };
32
+ const deleted = await this.repository.delete(entryId);
33
+ return { success: true, deleted, entryId };
64
34
  }
65
35
  }
66
36
  /**
@@ -83,47 +53,15 @@ export class BulkDeleteMemoryCommandHandler {
83
53
  .map((e) => e.id);
84
54
  }
85
55
  if (idsToDelete.length === 0) {
86
- return {
87
- success: true,
88
- deletedCount: 0,
89
- failedCount: 0,
90
- errors: [],
91
- };
92
- }
93
- if (input.hardDelete) {
94
- const result = await this.repository.deleteMany(idsToDelete);
95
- return {
96
- success: result.failed === 0,
97
- deletedCount: result.success,
98
- failedCount: result.failed,
99
- errors: result.errors,
100
- };
101
- }
102
- else {
103
- // Soft delete
104
- const entries = await this.repository.findByIds(idsToDelete);
105
- let deletedCount = 0;
106
- const errors = [];
107
- for (const entry of entries) {
108
- try {
109
- entry.delete();
110
- await this.repository.save(entry);
111
- deletedCount++;
112
- }
113
- catch (error) {
114
- errors.push({
115
- id: entry.id,
116
- error: error instanceof Error ? error.message : 'Unknown error',
117
- });
118
- }
119
- }
120
- return {
121
- success: errors.length === 0,
122
- deletedCount,
123
- failedCount: errors.length,
124
- errors,
125
- };
56
+ return { success: true, deletedCount: 0, failedCount: 0, errors: [] };
126
57
  }
58
+ const result = await this.repository.deleteMany(idsToDelete);
59
+ return {
60
+ success: result.failed === 0,
61
+ deletedCount: result.success,
62
+ failedCount: result.failed,
63
+ errors: result.errors,
64
+ };
127
65
  }
128
66
  }
129
67
  //# sourceMappingURL=delete-memory.command.js.map
@@ -101,25 +101,24 @@ export class MemoryApplicationService {
101
101
  /**
102
102
  * Delete a memory entry by namespace and key
103
103
  */
104
- async delete(namespace, key, hardDelete = false) {
105
- const result = await this.deleteHandler.execute({ namespace, key, hardDelete });
104
+ async delete(namespace, key) {
105
+ const result = await this.deleteHandler.execute({ namespace, key });
106
106
  return result.deleted;
107
107
  }
108
108
  /**
109
109
  * Delete a memory entry by ID
110
110
  */
111
- async deleteById(id, hardDelete = false) {
112
- const result = await this.deleteHandler.execute({ id, hardDelete });
111
+ async deleteById(id) {
112
+ const result = await this.deleteHandler.execute({ id });
113
113
  return result.deleted;
114
114
  }
115
115
  /**
116
116
  * Delete all entries in a namespace
117
117
  */
118
- async deleteNamespace(namespace, hardDelete = false) {
118
+ async deleteNamespace(namespace) {
119
119
  const entries = await this.repository.findByNamespace(namespace);
120
120
  const result = await this.bulkDeleteHandler.execute({
121
121
  ids: entries.map((e) => e.id),
122
- hardDelete,
123
122
  });
124
123
  return result.deletedCount;
125
124
  }
@@ -222,7 +222,7 @@ const MEMORY_ENTRIES_DDL = `CREATE TABLE IF NOT EXISTS memory_entries (
222
222
  expires_at INTEGER,
223
223
  last_accessed_at INTEGER,
224
224
  access_count INTEGER DEFAULT 0,
225
- status TEXT DEFAULT 'active' CHECK(status IN ('active', 'archived', 'deleted')),
225
+ status TEXT DEFAULT 'active' CHECK(status IN ('active', 'archived')),
226
226
  UNIQUE(namespace, key)
227
227
  )`;
228
228
  export function getDb(registry) {
@@ -42,6 +42,27 @@ export const EMBEDDING_MODEL_OPT_OUT = 'none';
42
42
  * #651 doctor check can detect pre-fix residue without re-typing the literal.
43
43
  */
44
44
  export const EMBEDDING_MODEL_LEGACY_DEFAULT = 'local';
45
+ /**
46
+ * Namespaces that store internal moflo run-tracking, never user knowledge.
47
+ * Writes here skip embedding generation entirely — both `embedding` and
48
+ * `embedding_model` land as NULL, distinct from the opt-out path which still
49
+ * tags rows with `'none'`. Existing rows in these namespaces are hard-deleted
50
+ * on upgrade by `services/ephemeral-namespace-purge.ts`.
51
+ *
52
+ * Members:
53
+ * - `hive-mind` — MCP broadcast traffic (msg:*, agent_join, consensus_propose)
54
+ * - `tasklist` — Spell run records (sp-*) written by spells/core/runner.ts + daemon-dashboard.ts
55
+ * - `epic-state` — Epic progress (epic-N, story-M) written by commands/epic.ts
56
+ * - `test-bridge-fix` — Single 2026-04-23 row left over from a one-off test
57
+ *
58
+ * See story #729 for the source-trace and rationale.
59
+ */
60
+ export const EPHEMERAL_NAMESPACES = new Set([
61
+ 'hive-mind',
62
+ 'tasklist',
63
+ 'epic-state',
64
+ 'test-bridge-fix',
65
+ ]);
45
66
  let cachedEmbedder = null;
46
67
  let testOverride = null;
47
68
  class LazyFastembedBridgeEmbedder {
@@ -83,7 +104,24 @@ class LazyFastembedBridgeEmbedder {
83
104
  return vector;
84
105
  }
85
106
  }
86
- export async function resolveBridgeEmbedding(value, precomputed, generateEmbeddingFlag) {
107
+ /**
108
+ * Build the `embedding` field of a store-entry response from a resolved
109
+ * embedding. Returns `undefined` for skip paths (opt-out and ephemeral) so
110
+ * the caller can pass it straight through.
111
+ */
112
+ export function embeddingResponseFrom(resolved) {
113
+ // json !== null narrows to the embedded variant where model is `string`.
114
+ return resolved.json !== null
115
+ ? { dimensions: resolved.dimensions, model: resolved.model }
116
+ : undefined;
117
+ }
118
+ export async function resolveBridgeEmbedding(value, precomputed, generateEmbeddingFlag, namespace) {
119
+ // Ephemeral namespaces (run-tracking, never user knowledge) skip embeddings
120
+ // unconditionally — even precomputed vectors are dropped. Result row has
121
+ // `embedding IS NULL` and `embedding_model IS NULL`. See #729.
122
+ if (namespace && EPHEMERAL_NAMESPACES.has(namespace)) {
123
+ return { ok: true, json: null, dimensions: 0, model: null };
124
+ }
87
125
  const wantsEmbedding = generateEmbeddingFlag !== false && value.length > 0;
88
126
  if (!wantsEmbedding) {
89
127
  return { ok: true, json: null, dimensions: 0, model: EMBEDDING_MODEL_OPT_OUT };
@@ -8,7 +8,7 @@
8
8
  * @module v3/cli/bridge-entries
9
9
  */
10
10
  import { cosineSim, execRows, generateId, persistBridgeDb, refreshVectorStatsCache, withDb } from './bridge-core.js';
11
- import { resolveBridgeEmbedding } from './bridge-embedder.js';
11
+ import { embeddingResponseFrom, resolveBridgeEmbedding } from './bridge-embedder.js';
12
12
  function makeEntryCacheKey(namespace, key) {
13
13
  const safeNs = String(namespace).replace(/:/g, '_');
14
14
  const safeKey = String(key).replace(/:/g, '_');
@@ -98,13 +98,12 @@ export async function bridgeStoreEntry(options) {
98
98
  if (!guardResult.allowed) {
99
99
  return { success: false, id, error: `MutationGuard rejected: ${guardResult.reason}` };
100
100
  }
101
- const resolved = await resolveBridgeEmbedding(value, options.precomputedEmbedding, options.generateEmbeddingFlag);
101
+ const resolved = await resolveBridgeEmbedding(value, options.precomputedEmbedding, options.generateEmbeddingFlag, namespace);
102
102
  if (!resolved.ok) {
103
103
  return { success: false, id, error: `embedding generation failed: ${resolved.reason}` };
104
104
  }
105
- const embeddingJson = resolved.json;
106
- const dimensions = resolved.dimensions;
107
- const model = resolved.model;
105
+ const { json: embeddingJson, dimensions, model } = resolved;
106
+ const embeddingResponse = embeddingResponseFrom(resolved);
108
107
  const insertSql = options.upsert
109
108
  ? `INSERT OR REPLACE INTO memory_entries (
110
109
  id, key, namespace, content, type,
@@ -135,7 +134,7 @@ export async function bridgeStoreEntry(options) {
135
134
  return {
136
135
  success: true,
137
136
  id,
138
- embedding: embeddingJson ? { dimensions, model } : undefined,
137
+ embedding: embeddingResponse,
139
138
  guarded: true,
140
139
  cached: true,
141
140
  attested: true,
@@ -175,12 +174,13 @@ export async function bridgeStoreEntries(items, dbPath) {
175
174
  const { key, value, namespace = 'default', tags = [], ttl } = opts;
176
175
  const id = generateId('entry');
177
176
  const now = Date.now();
178
- const resolved = await resolveBridgeEmbedding(value, opts.precomputedEmbedding, opts.generateEmbeddingFlag);
177
+ const resolved = await resolveBridgeEmbedding(value, opts.precomputedEmbedding, opts.generateEmbeddingFlag, namespace);
179
178
  if (!resolved.ok) {
180
179
  results.push({ success: false, id, error: `embedding generation failed: ${resolved.reason}` });
181
180
  continue;
182
181
  }
183
182
  const { json: embeddingJson, dimensions, model } = resolved;
183
+ const embeddingResponse = embeddingResponseFrom(resolved);
184
184
  const insertSql = opts.upsert
185
185
  ? `INSERT OR REPLACE INTO memory_entries (
186
186
  id, key, namespace, content, type,
@@ -217,7 +217,7 @@ export async function bridgeStoreEntries(items, dbPath) {
217
217
  results.push({
218
218
  success: true,
219
219
  id,
220
- embedding: embeddingJson ? { dimensions, model } : undefined,
220
+ embedding: embeddingResponse,
221
221
  });
222
222
  }
223
223
  // Cache writes and attestation logs are independent post-hoc bookkeeping —
@@ -436,10 +436,9 @@ export async function bridgeDeleteEntry(options) {
436
436
  let changes = 0;
437
437
  try {
438
438
  ctx.db.prepare(`
439
- UPDATE memory_entries
440
- SET status = 'deleted', updated_at = ?
439
+ DELETE FROM memory_entries
441
440
  WHERE key = ? AND namespace = ? AND status = 'active'
442
- `).run([Date.now(), key, namespace]);
441
+ `).run([key, namespace]);
443
442
  // sql.js Statement.run returns true/false, not { changes }. Use
444
443
  // db.getRowsModified() to read the row count from the last statement.
445
444
  changes = ctx.db.getRowsModified?.() ?? 0;
@@ -150,13 +150,6 @@ export class MemoryEntry {
150
150
  this._updatedAt = new Date();
151
151
  }
152
152
  }
153
- /**
154
- * Mark as deleted (soft delete)
155
- */
156
- delete() {
157
- this._status = 'deleted';
158
- this._updatedAt = new Date();
159
- }
160
153
  /**
161
154
  * Check if memory has expired based on TTL
162
155
  */
@@ -294,7 +294,6 @@ export class HybridMemoryRepository {
294
294
  let totalSize = 0;
295
295
  let activeCount = 0;
296
296
  let archivedCount = 0;
297
- let deletedCount = 0;
298
297
  for (const entry of entries) {
299
298
  // Count by namespace
300
299
  entriesByNamespace[entry.namespace] = (entriesByNamespace[entry.namespace] ?? 0) + 1;
@@ -311,9 +310,6 @@ export class HybridMemoryRepository {
311
310
  case 'archived':
312
311
  archivedCount++;
313
312
  break;
314
- case 'deleted':
315
- deletedCount++;
316
- break;
317
313
  }
318
314
  }
319
315
  // Find hottest and coldest
@@ -324,7 +320,6 @@ export class HybridMemoryRepository {
324
320
  totalEntries: entries.length,
325
321
  activeEntries: activeCount,
326
322
  archivedEntries: archivedCount,
327
- deletedEntries: deletedCount,
328
323
  totalSize,
329
324
  entriesByNamespace,
330
325
  entriesByType,
@@ -14,7 +14,7 @@ import { mofloImport } from '../services/moflo-require.js';
14
14
  import { atomicWriteFileSync } from '../services/atomic-file-write.js';
15
15
  import { formatEmbeddingError } from './embedding-errors.js';
16
16
  import { HnswLite } from './hnsw-lite.js';
17
- import { EMBEDDING_MODEL_OPT_OUT, getBridgeEmbedder } from './bridge-embedder.js';
17
+ import { EMBEDDING_MODEL_OPT_OUT, EPHEMERAL_NAMESPACES, getBridgeEmbedder } from './bridge-embedder.js';
18
18
  import { toFloat32 } from './controllers/_shared.js';
19
19
  import { writeVectorStatsJson } from './bridge-core.js';
20
20
  import { MOFLO_DIR, hnswIndexPath, legacyMemoryDbPath, memoryDbPath, } from '../services/moflo-paths.js';
@@ -104,7 +104,7 @@ CREATE TABLE IF NOT EXISTS memory_entries (
104
104
  access_count INTEGER DEFAULT 0,
105
105
 
106
106
  -- Status
107
- status TEXT DEFAULT 'active' CHECK(status IN ('active', 'archived', 'deleted')),
107
+ status TEXT DEFAULT 'active' CHECK(status IN ('active', 'archived')),
108
108
 
109
109
  UNIQUE(namespace, key)
110
110
  );
@@ -1592,10 +1592,15 @@ export async function storeEntry(options) {
1592
1592
  // success:false rather than inserting a null-embedded row. Opt-out rows
1593
1593
  // (generateEmbeddingFlag=false) are tagged EMBEDDING_MODEL_OPT_OUT — see
1594
1594
  // the constant's docstring in bridge-embedder.ts for the rationale.
1595
+ // Ephemeral namespaces (#729) skip embedding entirely AND tag model NULL.
1595
1596
  let embeddingJson = null;
1596
1597
  let embeddingDimensions = null;
1597
1598
  let embeddingModel = EMBEDDING_MODEL_OPT_OUT;
1598
- if (generateEmbeddingFlag && value.length > 0) {
1599
+ const isEphemeralNs = EPHEMERAL_NAMESPACES.has(namespace);
1600
+ if (isEphemeralNs) {
1601
+ embeddingModel = null;
1602
+ }
1603
+ else if (generateEmbeddingFlag && value.length > 0) {
1599
1604
  if (options.precomputedEmbedding) {
1600
1605
  // Tag with the bridge embedder's canonical model so precomputed rows
1601
1606
  // are indistinguishable from live single-embed rows downstream.
@@ -2019,14 +2024,10 @@ export async function deleteEntry(options) {
2019
2024
  error: `Key '${key}' not found in namespace '${namespace}'`
2020
2025
  };
2021
2026
  }
2022
- // Delete the entry (soft delete by setting status to 'deleted')
2023
- db.run(`
2024
- UPDATE memory_entries
2025
- SET status = 'deleted', updated_at = strftime('%s', 'now') * 1000
2026
- WHERE key = '${key.replace(/'/g, "''")}'
2027
- AND namespace = '${namespace.replace(/'/g, "''")}'
2028
- AND status = 'active'
2029
- `);
2027
+ // Hard-delete the entry. Soft-delete was retired in story #728: tombstones
2028
+ // were write-only (no code ever restored from status='deleted') and bloated
2029
+ // the DB indefinitely.
2030
+ db.run(`DELETE FROM memory_entries WHERE key = ? AND namespace = ? AND status = 'active'`, [key, namespace]);
2030
2031
  // Get remaining count
2031
2032
  const countResult = db.exec(`SELECT COUNT(*) FROM memory_entries WHERE status = 'active'`);
2032
2033
  const remainingEntries = countResult[0]?.values?.[0]?.[0] || 0;
@@ -0,0 +1,75 @@
1
+ /**
2
+ * Idempotent ephemeral-namespace purge for moflo's memory DB (`.moflo/moflo.db`).
3
+ *
4
+ * Story #729 retired four namespaces from the persistent memory layer because
5
+ * they store internal moflo run-tracking — not user knowledge — and embedding
6
+ * them polluted the search index:
7
+ *
8
+ * - `hive-mind` (MCP broadcast traffic)
9
+ * - `tasklist` (spell run records)
10
+ * - `epic-state` (epic progress tracking)
11
+ * - `test-bridge-fix` (single-row leftover from a one-off test)
12
+ *
13
+ * This service hard-deletes any rows in those namespaces left over from prior
14
+ * moflo versions, then VACUUMs to reclaim disk. Future writes to these
15
+ * namespaces still land in the DB — but skip embedding generation entirely
16
+ * (see {@link EPHEMERAL_NAMESPACES} in `memory/bridge-embedder.ts`).
17
+ *
18
+ * Lives in `services/` so it has no dependency on the CLI command machinery.
19
+ * That lets `bin/session-start-launcher.mjs` dynamic-import it and run the
20
+ * purge in foreground BEFORE long-lived sql.js consumers (MCP server, daemon)
21
+ * open the DB — sql.js dumps the whole snapshot on every flush and would
22
+ * otherwise clobber our cleanup (see #727's clobber-hazard analysis).
23
+ *
24
+ * @module cli/services/ephemeral-namespace-purge
25
+ */
26
+ /* eslint-disable @typescript-eslint/no-explicit-any */
27
+ import { EPHEMERAL_NAMESPACES } from '../memory/bridge-embedder.js';
28
+ import { mofloImport } from './moflo-require.js';
29
+ import { atomicWriteFileSync } from './atomic-file-write.js';
30
+ import { memoryDbPath } from './moflo-paths.js';
31
+ /**
32
+ * Hard-delete every row whose namespace is in {@link EPHEMERAL_NAMESPACES}
33
+ * and VACUUM. Returns `{ purged: 0 }` on the happy path: no DB, sql.js
34
+ * unavailable, schema lacks `memory_entries`, or no ephemeral rows present.
35
+ * Errors propagate to the caller (the launcher absorbs them so a failed
36
+ * purge never blocks session start).
37
+ */
38
+ export async function purgeEphemeralNamespaces(options = {}) {
39
+ const fs = await import('fs');
40
+ const path = await import('path');
41
+ const dbPath = path.resolve(options.dbPath ?? memoryDbPath(process.cwd()));
42
+ if (!fs.existsSync(dbPath))
43
+ return { purged: 0 };
44
+ const initSqlJs = (await mofloImport('sql.js'))?.default;
45
+ if (!initSqlJs)
46
+ return { purged: 0 };
47
+ const SQL = await initSqlJs();
48
+ const buffer = fs.readFileSync(dbPath);
49
+ const db = new SQL.Database(buffer);
50
+ try {
51
+ // Probe: schema must carry `memory_entries`. Older / non-moflo DBs are
52
+ // a no-op so we don't VACUUM unrelated SQLite files.
53
+ const probe = db.exec(`SELECT name FROM sqlite_master WHERE type='table' AND name='memory_entries' LIMIT 1`);
54
+ if (!probe[0]?.values?.[0])
55
+ return { purged: 0 };
56
+ const namespaces = Array.from(EPHEMERAL_NAMESPACES);
57
+ const placeholders = namespaces.map(() => '?').join(', ');
58
+ // Single-scan delete + rowsModified: skips a redundant COUNT pass on dirty
59
+ // DBs and avoids the prepare/bind/step/free overhead on clean ones. VACUUM
60
+ // (and the disk write) only run when something was actually deleted.
61
+ db.run(`DELETE FROM memory_entries WHERE namespace IN (${placeholders})`, namespaces);
62
+ const purged = db.getRowsModified?.() ?? 0;
63
+ if (purged === 0)
64
+ return { purged: 0 };
65
+ // VACUUM has to run outside any open transaction; sql.js auto-commits
66
+ // each `db.run`, so this is safe to chain.
67
+ db.run('VACUUM');
68
+ atomicWriteFileSync(dbPath, db.export());
69
+ return { purged };
70
+ }
71
+ finally {
72
+ db.close();
73
+ }
74
+ }
75
+ //# sourceMappingURL=ephemeral-namespace-purge.js.map
@@ -0,0 +1,66 @@
1
+ /**
2
+ * Idempotent soft-delete purge for moflo's memory DB (`.moflo/moflo.db`).
3
+ *
4
+ * Story #728 retired soft-delete from the memory layer: tombstones were
5
+ * write-only (no code path ever restored a `status='deleted'` row) and bloated
6
+ * the DB indefinitely. This service hard-deletes any leftover `status='deleted'`
7
+ * rows from prior moflo versions, then VACUUMs to reclaim disk. `archived`
8
+ * rows are NOT touched — they are the legitimate "keep but hide" state and
9
+ * have a working `restore()` path.
10
+ *
11
+ * Lives in `services/` so it has no dependency on the CLI command machinery.
12
+ * That lets `bin/session-start-launcher.mjs` dynamic-import it and run the
13
+ * purge in foreground BEFORE long-lived sql.js consumers (MCP server, daemon)
14
+ * open the DB — sql.js dumps the whole snapshot on every flush and would
15
+ * otherwise clobber our cleanup.
16
+ *
17
+ * @module cli/services/soft-delete-purge
18
+ */
19
+ /* eslint-disable @typescript-eslint/no-explicit-any */
20
+ import { mofloImport } from './moflo-require.js';
21
+ import { atomicWriteFileSync } from './atomic-file-write.js';
22
+ import { memoryDbPath } from './moflo-paths.js';
23
+ /**
24
+ * Hard-delete all `status='deleted'` rows from the memory DB and VACUUM.
25
+ *
26
+ * Returns `{ purged: 0 }` for the happy path: no DB, sql.js unavailable,
27
+ * schema lacks `memory_entries`, or no tombstones present. Errors propagate
28
+ * to the caller (the launcher absorbs them so a failed purge never blocks
29
+ * session start).
30
+ */
31
+ export async function purgeSoftDeletedEntries(options = {}) {
32
+ const fs = await import('fs');
33
+ const path = await import('path');
34
+ const dbPath = path.resolve(options.dbPath ?? memoryDbPath(process.cwd()));
35
+ if (!fs.existsSync(dbPath))
36
+ return { purged: 0 };
37
+ const initSqlJs = (await mofloImport('sql.js'))?.default;
38
+ if (!initSqlJs)
39
+ return { purged: 0 };
40
+ const SQL = await initSqlJs();
41
+ const buffer = fs.readFileSync(dbPath);
42
+ const db = new SQL.Database(buffer);
43
+ try {
44
+ // Probe: schema must carry `memory_entries`. Older / non-moflo DBs are
45
+ // a no-op so we don't VACUUM unrelated SQLite files.
46
+ const probe = db.exec(`SELECT name FROM sqlite_master WHERE type='table' AND name='memory_entries' LIMIT 1`);
47
+ if (!probe[0]?.values?.[0])
48
+ return { purged: 0 };
49
+ // Count first — VACUUM is expensive (it rewrites the whole file), so we
50
+ // skip it entirely when there's nothing to reclaim.
51
+ const countRows = db.exec(`SELECT COUNT(*) FROM memory_entries WHERE status = 'deleted'`);
52
+ const purged = Number(countRows[0]?.values?.[0]?.[0] ?? 0);
53
+ if (purged === 0)
54
+ return { purged: 0 };
55
+ db.run(`DELETE FROM memory_entries WHERE status = 'deleted'`);
56
+ // VACUUM has to run outside any open transaction; sql.js auto-commits
57
+ // each `db.run`, so this is safe to chain.
58
+ db.run('VACUUM');
59
+ atomicWriteFileSync(dbPath, db.export());
60
+ return { purged };
61
+ }
62
+ finally {
63
+ db.close();
64
+ }
65
+ }
66
+ //# sourceMappingURL=soft-delete-purge.js.map
@@ -23,12 +23,20 @@ export function getGCSConfig() {
23
23
  prefix: process.env.GCS_PREFIX || 'claude-flow-patterns',
24
24
  };
25
25
  }
26
+ // Bound the subprocess so a slow / hung gcloud spawn doesn't stretch test
27
+ // timeouts (or session-start probes) indefinitely. gcloud responds in ~100ms
28
+ // when present; 5s is generous for a contended CI runner.
29
+ const GCLOUD_PROBE_TIMEOUT_MS = 5_000;
26
30
  /**
27
31
  * Check if gcloud CLI is available
28
32
  */
29
33
  export function isGCloudAvailable() {
30
34
  try {
31
- execSync('gcloud --version', { stdio: 'pipe', windowsHide: true });
35
+ execSync('gcloud --version', {
36
+ stdio: 'pipe',
37
+ windowsHide: true,
38
+ timeout: GCLOUD_PROBE_TIMEOUT_MS,
39
+ });
32
40
  return true;
33
41
  }
34
42
  catch {
@@ -40,7 +48,11 @@ export function isGCloudAvailable() {
40
48
  */
41
49
  export async function isGCloudAuthenticated() {
42
50
  try {
43
- execSync('gcloud auth print-access-token', { stdio: 'pipe', windowsHide: true });
51
+ execSync('gcloud auth print-access-token', {
52
+ stdio: 'pipe',
53
+ windowsHide: true,
54
+ timeout: GCLOUD_PROBE_TIMEOUT_MS,
55
+ });
44
56
  return true;
45
57
  }
46
58
  catch {
@@ -2,5 +2,5 @@
2
2
  * Auto-generated by build. Do not edit manually.
3
3
  * Source of truth: root package.json → scripts/sync-version.mjs
4
4
  */
5
- export const VERSION = '4.9.0-rc.2';
5
+ export const VERSION = '4.9.0-rc.3';
6
6
  //# sourceMappingURL=version.js.map
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "moflo",
3
- "version": "4.9.0-rc.2",
3
+ "version": "4.9.0-rc.3",
4
4
  "description": "MoFlo — AI agent orchestration for Claude Code. Forked from ruflo/claude-flow with patches applied to source, plus feature-level orchestration.",
5
5
  "main": "dist/src/cli/index.js",
6
6
  "type": "module",
@@ -78,7 +78,7 @@
78
78
  "@typescript-eslint/eslint-plugin": "^7.18.0",
79
79
  "@typescript-eslint/parser": "^7.18.0",
80
80
  "eslint": "^8.0.0",
81
- "moflo": "^4.9.0-rc.1",
81
+ "moflo": "^4.9.0-rc.2",
82
82
  "tsx": "^4.21.0",
83
83
  "typescript": "^5.9.3",
84
84
  "vitest": "^4.0.0"