moflo 4.9.0-rc.2 → 4.9.0-rc.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -602,32 +602,36 @@ function getIntegrationStatus() {
602
602
  return { mcpServers, hasDatabase, hasApi };
603
603
  }
604
604
 
605
- // Upgrade notice (#636) — written by the session-start launcher; null when missing, expired, or malformed.
605
+ // Upgrade notice (#636, #738, #743) — written by the session-start launcher
606
+ // ONLY while upgrade work is in flight; the launcher deletes the file when
607
+ // work completes. We render it strictly for status='in-progress' so a stale
608
+ // notice (legacy "complete" file from pre-#738 launchers, zombie write from
609
+ // an aborted launcher, future writer mistakes) cannot turn the statusline
610
+ // segment into a permanent column. The launcher's section 0-pre also drops
611
+ // any leftover file at session start as a second line of defence.
606
612
  function getUpgradeNotice() {
607
613
  const data = readJSON(path.join(CWD, '.moflo', 'upgrade-notice.json'));
608
614
  if (!data || typeof data !== 'object') return null;
615
+ if (data.status !== 'in-progress') return null;
609
616
  const expiresAt = data.expiresAt ? new Date(data.expiresAt).getTime() : 0;
610
617
  if (!expiresAt || Date.now() > expiresAt) return null;
611
618
  return {
612
619
  kind: data.kind === 'repair' ? 'repair' : 'upgrade',
613
620
  from: typeof data.from === 'string' ? data.from : '',
614
621
  to: typeof data.to === 'string' ? data.to : '',
615
- changes: typeof data.changes === 'number' && data.changes > 0 ? data.changes : 0,
616
622
  };
617
623
  }
618
624
 
619
625
  function formatUpgradeNoticeSegment(notice) {
620
626
  if (!notice) return '';
621
- const changesPart = notice.changes > 0
622
- ? ` ${c.dim}(${notice.changes} ${notice.changes === 1 ? 'change' : 'changes'})${c.reset}`
623
- : '';
627
+ const suffix = ` ${c.dim}(updating…)${c.reset}`;
624
628
  if (notice.kind === 'repair') {
625
- return `${c.brightYellow}📦 install repaired${c.reset}${changesPart}`;
629
+ return `${c.brightYellow}📦 install repaired${c.reset}${suffix}`;
626
630
  }
627
631
  const versions = notice.from && notice.to
628
632
  ? `${notice.from} → ${notice.to}`
629
633
  : (notice.to || 'upgraded');
630
- return `${c.brightYellow}📦 ${versions}${c.reset}${changesPart}`;
634
+ return `${c.brightYellow}📦 ${versions}${c.reset}${suffix}`;
631
635
  }
632
636
 
633
637
  // Session stats (pure file reads)
@@ -671,11 +675,14 @@ function generateStatusline() {
671
675
 
672
676
  const parts = [];
673
677
 
678
+ // Upgrade notice \u2014 leading position so it reads as a transient banner
679
+ // rather than a permanent column (#738). Only renders during the upgrade
680
+ // window; the launcher deletes the notice file after work completes.
681
+ pushUpgradeNoticeSegment(parts);
682
+
674
683
  // Branding (always shown when enabled)
675
684
  parts.push(`${c.bold}${c.brightPurple}\u258A ${SL_CONFIG.branding}${c.reset}`);
676
685
 
677
- pushUpgradeNoticeSegment(parts);
678
-
679
686
  // User + swarm indicator
680
687
  const dot = swarm.coordinationActive ? `${c.brightGreen}\u25CF${c.reset}` : `${c.brightCyan}\u25CF${c.reset}`;
681
688
  parts.push(`${dot} ${c.brightCyan}${git.name}${c.reset}`);
@@ -758,8 +765,9 @@ function generateDashboard() {
758
765
  if (SL_CONFIG.show_session && session.duration) {
759
766
  header += ` ${c.dim}\u2502${c.reset} ${c.cyan}\u23F1 ${session.duration}${c.reset}`;
760
767
  }
761
- lines.push(header);
768
+ // Upgrade notice \u2014 leading line so it reads as a transient banner (#738).
762
769
  pushUpgradeNoticeSegment(lines);
770
+ lines.push(header);
763
771
 
764
772
  // Separator
765
773
  lines.push(`${c.dim}${'─'.repeat(53)}${c.reset}`);
@@ -834,8 +842,9 @@ function generateCompactDashboard() {
834
842
  if (SL_CONFIG.show_session && session.duration) {
835
843
  header += ` ${c.dim}\u2502${c.reset} ${c.cyan}\u23F1 ${session.duration}${c.reset}`;
836
844
  }
837
- lines.push(header);
845
+ // Upgrade notice \u2014 leading line so it reads as a transient banner (#738).
838
846
  pushUpgradeNoticeSegment(lines);
847
+ lines.push(header);
839
848
 
840
849
  // Combined swarm + agentdb + mcp line
841
850
  const segments = [];
@@ -0,0 +1,100 @@
1
+ /**
2
+ * Memory-DB integrity check + auto-REINDEX (story #743).
3
+ *
4
+ * The `.moflo/moflo.db` SQLite file routinely accumulates index corruption of
5
+ * the form `row N missing from index sqlite_autoindex_memory_entries_1` —
6
+ * the row data is intact, only the unique-key index has drifted. The most
7
+ * common trigger is sql.js's whole-file dump-on-flush behaviour racing with
8
+ * concurrent writes (see `feedback_sqljs_writeback_clobber.md` and #714).
9
+ *
10
+ * Symptoms when uncorrected:
11
+ * - `index-guidance.mjs` and `index-patterns.mjs` fail mid-write with
12
+ * `database disk image is malformed`, leaving partial state.
13
+ * - The ephemeral-namespace purge (#729) fails silently, so hive-mind /
14
+ * tasklist / epic-state / test-bridge-fix rows accumulate.
15
+ * - Vector counts in the statusline stay inflated (observed: 4415 with
16
+ * 1025 unpurged ephemeral rows).
17
+ *
18
+ * Fix shape: REINDEX rebuilds indexes from the canonical row data — much less
19
+ * destructive than a full rebuild and works for the typical drift mode. If
20
+ * REINDEX itself fails to restore integrity we leave the file alone and
21
+ * report; manual `flo memory rebuild-index` is the fallback.
22
+ *
23
+ * MUST run BEFORE any long-lived sql.js consumer (MCP server, daemon) opens
24
+ * the DB and BEFORE the embeddings migration / soft-delete purge / ephemeral
25
+ * purge — those all swallow corruption errors and silently no-op.
26
+ */
27
+ import { existsSync, readFileSync, writeFileSync } from 'node:fs';
28
+ import { memoryDbPath } from './moflo-paths.mjs';
29
+
30
+ let _initSqlJs = null;
31
+
32
+ async function loadSqlJs() {
33
+ if (_initSqlJs) return _initSqlJs;
34
+ // sql.js is a hard dependency of moflo (see top-level package.json);
35
+ // resolving it from the consumer's node_modules works because the launcher
36
+ // runs from the consumer cwd.
37
+ const mod = await import('sql.js');
38
+ _initSqlJs = mod.default || mod;
39
+ return _initSqlJs;
40
+ }
41
+
42
+ function isOk(execResult) {
43
+ const rows = execResult?.[0]?.values ?? [];
44
+ return rows.length === 1 && rows[0]?.[0] === 'ok';
45
+ }
46
+
47
+ function corruptionCount(execResult) {
48
+ return execResult?.[0]?.values?.length ?? 0;
49
+ }
50
+
51
+ /**
52
+ * Probe the memory DB for index corruption and run REINDEX in place if
53
+ * found. Returns `{ repaired, errors, persistent }`:
54
+ * - `repaired: true` and `errors > 0` when REINDEX restored integrity.
55
+ * - `repaired: false, errors: 0` when the DB is healthy or absent.
56
+ * - `repaired: false, errors > 0, persistent: true` when corruption survives
57
+ * REINDEX (caller should surface to the user — manual rebuild needed).
58
+ *
59
+ * Never throws; any internal failure becomes `{ repaired: false, errors: 0 }`
60
+ * so a probe failure cannot block session start.
61
+ */
62
+ export async function repairMemoryDbIfCorrupt(projectRoot) {
63
+ const dbPath = memoryDbPath(projectRoot);
64
+ if (!existsSync(dbPath)) return { repaired: false, errors: 0 };
65
+
66
+ let initSql;
67
+ try {
68
+ initSql = await loadSqlJs();
69
+ } catch {
70
+ return { repaired: false, errors: 0 };
71
+ }
72
+
73
+ let db = null;
74
+ try {
75
+ const SQL = await initSql();
76
+ const data = readFileSync(dbPath);
77
+ db = new SQL.Database(data);
78
+
79
+ const before = db.exec('PRAGMA integrity_check');
80
+ if (isOk(before)) {
81
+ return { repaired: false, errors: 0 };
82
+ }
83
+
84
+ const errors = corruptionCount(before);
85
+ db.run('REINDEX');
86
+
87
+ const after = db.exec('PRAGMA integrity_check');
88
+ if (!isOk(after)) {
89
+ return { repaired: false, errors, persistent: true };
90
+ }
91
+
92
+ const out = Buffer.from(db.export());
93
+ writeFileSync(dbPath, out);
94
+ return { repaired: true, errors };
95
+ } catch {
96
+ return { repaired: false, errors: 0 };
97
+ } finally {
98
+ if (db) try { db.close(); } catch { /* non-fatal */ }
99
+ }
100
+ }
@@ -9,9 +9,10 @@
9
9
 
10
10
  import { spawn } from 'child_process';
11
11
  import { existsSync, readFileSync, writeFileSync, copyFileSync, unlinkSync, readdirSync, mkdirSync, statSync } from 'fs';
12
- import { resolve, dirname } from 'path';
12
+ import { resolve, dirname, join } from 'path';
13
13
  import { fileURLToPath } from 'url';
14
- import { migrateClaudeFlowToMoflo, migrateMemoryDbToMoflo } from './lib/moflo-paths.mjs';
14
+ import { migrateClaudeFlowToMoflo, migrateMemoryDbToMoflo, mofloDir } from './lib/moflo-paths.mjs';
15
+ import { repairMemoryDbIfCorrupt } from './lib/db-repair.mjs';
15
16
 
16
17
  const __dirname = dirname(fileURLToPath(import.meta.url));
17
18
 
@@ -56,6 +57,55 @@ const plural = (n, word) => `${n} ${word}${n === 1 ? '' : 's'}`;
56
57
  // can persist `.moflo/upgrade-notice.json` for the statusline (#636).
57
58
  let upgradeNoticeContext = null;
58
59
 
60
+ // Deferred so we commit it AFTER every upgrade-work block (see 3g). The stamp
61
+ // is the "launcher fully completed" signal — writing it mid-flight lets an
62
+ // aborted launcher strand consumers on a half-applied upgrade (#730).
63
+ let pendingVersionStampWrite = null;
64
+
65
+ // 5-min TTL is a safety net for zombie launchers (statusline ignores past-TTL
66
+ // files). The launcher deletes the notice when upgrade work finishes — no
67
+ // "complete" state lingers, see #738.
68
+ const UPGRADE_NOTICE_INPROGRESS_TTL_MS = 5 * 60 * 1000;
69
+ const UPGRADE_NOTICE_PATH = () => join(mofloDir(projectRoot), 'upgrade-notice.json');
70
+
71
+ function writeInProgressUpgradeNotice() {
72
+ if (!upgradeNoticeContext) return;
73
+ try {
74
+ mkdirSync(mofloDir(projectRoot), { recursive: true });
75
+ const now = Date.now();
76
+ const notice = {
77
+ status: 'in-progress',
78
+ kind: upgradeNoticeContext.kind,
79
+ from: upgradeNoticeContext.from,
80
+ to: upgradeNoticeContext.to,
81
+ at: new Date(now).toISOString(),
82
+ expiresAt: new Date(now + UPGRADE_NOTICE_INPROGRESS_TTL_MS).toISOString(),
83
+ changes: 0,
84
+ };
85
+ writeFileSync(UPGRADE_NOTICE_PATH(), JSON.stringify(notice, null, 2));
86
+ } catch { /* non-fatal — statusline just won't show the segment */ }
87
+ }
88
+
89
+ function clearUpgradeNotice() {
90
+ try {
91
+ unlinkSync(UPGRADE_NOTICE_PATH());
92
+ } catch { /* non-fatal — already gone or never existed */ }
93
+ }
94
+
95
+ // ── 0-pre. Drop any stale upgrade notice (#738, #743) ───────────────────────
96
+ // `upgrade-notice.json` is a transient handshake between launcher and
97
+ // statusline — it should never survive past the launcher run that wrote it.
98
+ // Pre-#738 launchers wrote a 1-hour-TTL "complete" notice after upgrade work
99
+ // finished; with the #738 contract that file can only be a leftover, but the
100
+ // statusline still rendered it for the rest of the hour. Unconditionally
101
+ // removing it here makes the contract self-healing — any future zombie
102
+ // notice (legacy file, aborted launcher, future writer mistake) gets dropped
103
+ // before the statusline can see it. The in-progress notice for THIS session,
104
+ // if any, is written later in section 3 and cleared in section 3f.
105
+ try {
106
+ unlinkSync(join(mofloDir(projectRoot), 'upgrade-notice.json'));
107
+ } catch { /* non-fatal — file usually doesn't exist */ }
108
+
59
109
  // ── 0. LEGACY state migration (#699) ─────────────────────────────────────────
60
110
  // Consumers upgrading from older moflo builds (inherited from upstream Ruflo)
61
111
  // get a one-time auto-migration of LEGACY `.claude-flow/` → `.moflo/` so claim
@@ -91,6 +141,38 @@ try {
91
141
  // Non-fatal — failed migration leaves both DBs in place; next session retries.
92
142
  }
93
143
 
144
+ // ── 0c. Memory DB index repair (#743) ───────────────────────────────────────
145
+ // The .moflo/moflo.db SQLite file accumulates index corruption ("row N missing
146
+ // from sqlite_autoindex_memory_entries_1") when sql.js's whole-file flush
147
+ // races with concurrent writes. Symptom is silent: indexers fail mid-write,
148
+ // the ephemeral-namespace purge (#729) silently no-ops, vector counts inflate.
149
+ //
150
+ // Probe + REINDEX in place. Must run BEFORE any sql.js consumer (the
151
+ // embeddings migration in 3e, the soft-delete + ephemeral purges in 3e-728/
152
+ // 3e-729, and the long-lived MCP server / daemon spawned in section 4) — all
153
+ // of those swallow corruption errors and silently drop work on the floor.
154
+ //
155
+ // Awaited because every downstream sql.js touch this session depends on a
156
+ // healthy index. Cost on the happy path is one PRAGMA check (~10ms).
157
+ try {
158
+ const repair = await repairMemoryDbIfCorrupt(projectRoot);
159
+ if (repair?.repaired) {
160
+ emitMutation(
161
+ 'repaired memory db index',
162
+ `${plural(repair.errors, 'index error')} fixed via REINDEX`,
163
+ );
164
+ } else if (repair?.persistent) {
165
+ // Surface to stderr — Claude additionalContext + the user both see this.
166
+ // Manual `flo memory rebuild-index` is the next step.
167
+ process.stderr.write(
168
+ `moflo: memory db has ${plural(repair.errors, 'index error')} REINDEX could not fix — run 'flo memory rebuild-index'\n`,
169
+ );
170
+ }
171
+ } catch {
172
+ // Non-fatal — repair is best-effort; downstream code paths report their
173
+ // own errors if the DB is still broken.
174
+ }
175
+
94
176
  // ── 1. Helper: fire-and-forget a background process ─────────────────────────
95
177
  function fireAndForget(cmd, args, label) {
96
178
  try {
@@ -213,6 +295,11 @@ try {
213
295
  };
214
296
  emitMutation('repaired stale install', 'manifest drift detected');
215
297
  }
298
+ // Surface a transient "(updating…)" badge in the statusline before the
299
+ // long-running upgrade work (manifest sync, daemon recycle, embeddings
300
+ // migration). See #738 — the launcher clears this file after work
301
+ // completes, so the badge naturally disappears once the user is unblocked.
302
+ writeInProgressUpgradeNotice();
216
303
  const binDir = resolve(projectRoot, 'node_modules/moflo/bin');
217
304
 
218
305
  // ── Manifest-based auto-update ──────────────────────────────────────
@@ -358,12 +445,13 @@ try {
358
445
  }
359
446
  } catch { /* non-fatal — daemon recycle is best-effort */ }
360
447
 
361
- // Write updated manifest + version stamp
448
+ // Manifest reflects synced files immediately; version stamp is deferred
449
+ // to 3g so an aborted launcher re-runs upgrade detection (#730).
362
450
  try {
363
451
  const cfDir = resolve(projectRoot, '.moflo');
364
452
  if (!existsSync(cfDir)) mkdirSync(cfDir, { recursive: true });
365
453
  writeFileSync(manifestPath, JSON.stringify(currentManifest, null, 2));
366
- writeFileSync(versionStampPath, installedVersion);
454
+ pendingVersionStampWrite = { path: versionStampPath, version: installedVersion };
367
455
  } catch {}
368
456
  }
369
457
  }
@@ -665,37 +753,83 @@ try {
665
753
  } catch { /* writing the failure itself must not throw */ }
666
754
  }
667
755
 
668
- // ── 3f. Persist upgrade notice for statusline (#636) ────────────────────────
669
- // When this session bumped the version stamp or repaired manifest drift, write
670
- // a transient `.moflo/upgrade-notice.json` so the statusline can show a
671
- // leading user-visible segment (`📦 vX vY (N changes)`). The file expires
672
- // via TTL statusline silently ignores it after `expiresAt`. The next
673
- // upgrade overwrites the file, so no manual cleanup is needed.
674
- //
675
- // Stdout emits go to Claude's `additionalContext` (collapsed by default in
676
- // the system reminder); this notice surfaces the same information directly
677
- // in the user's UI. Together they close the "Claude appears hung and CPU
678
- // spikes" gap from #629 — the user always knows when an upgrade procedure
679
- // just ran.
680
- const UPGRADE_NOTICE_TTL_MS = 60 * 60 * 1000; // 1 hour
681
- if (upgradeNoticeContext && mutationCount > 0) {
756
+ // ── 3e-728. Hard-delete leftover soft-delete tombstones (#728) ─────────────
757
+ // Soft-delete was retired in story #728 `status='deleted'` rows are now
758
+ // unrecoverable bloat from prior moflo versions. Purge any stragglers and
759
+ // VACUUM. Idempotent: returns `purged: 0` once the DB is clean. Runs BEFORE
760
+ // background MCP/daemon spawn (per #727's clobber-hazard analysis) so the
761
+ // foreground sql.js write isn't overwritten by a concurrent flush.
762
+ try {
763
+ const purgePaths = [
764
+ resolve(projectRoot, 'node_modules/moflo/dist/src/cli/services/soft-delete-purge.js'),
765
+ resolve(projectRoot, 'dist/src/cli/services/soft-delete-purge.js'),
766
+ ];
767
+ const purgePath = purgePaths.find((p) => existsSync(p));
768
+ if (purgePath) {
769
+ const { purgeSoftDeletedEntries } = await import(`file://${purgePath.replace(/\\/g, '/')}`);
770
+ const result = await purgeSoftDeletedEntries();
771
+ if (result?.purged > 0) {
772
+ emitMutation(
773
+ 'reclaimed soft-deleted memory entries',
774
+ `${plural(result.purged, 'tombstone')} purged + VACUUM`,
775
+ );
776
+ }
777
+ }
778
+ } catch (err) {
779
+ // Non-fatal — leftover tombstones just sit until the next session retries.
682
780
  try {
683
- const cfDir = resolve(projectRoot, '.moflo');
684
- if (!existsSync(cfDir)) mkdirSync(cfDir, { recursive: true });
685
- const now = Date.now();
686
- const notice = {
687
- kind: upgradeNoticeContext.kind,
688
- from: upgradeNoticeContext.from,
689
- to: upgradeNoticeContext.to,
690
- at: new Date(now).toISOString(),
691
- expiresAt: new Date(now + UPGRADE_NOTICE_TTL_MS).toISOString(),
692
- changes: mutationCount,
693
- };
694
- writeFileSync(
695
- resolve(cfDir, 'upgrade-notice.json'),
696
- JSON.stringify(notice, null, 2),
697
- );
698
- } catch { /* non-fatal — statusline just won't show the segment */ }
781
+ const msg = err && err.message ? err.message : String(err);
782
+ process.stderr.write(`soft-delete purge skipped: ${msg}\n`);
783
+ } catch { /* writing the failure itself must not throw */ }
784
+ }
785
+
786
+ // ── 3e-729. Purge ephemeral-namespace rows (#729) ───────────────────────────
787
+ // Four namespaces (hive-mind, tasklist, epic-state, test-bridge-fix) store
788
+ // internal moflo run-tracking — never user knowledge — and were polluting the
789
+ // embeddings index. Going forward, writes to those namespaces skip embedding
790
+ // generation (see EPHEMERAL_NAMESPACES in memory/bridge-embedder.ts); existing
791
+ // rows from prior versions get hard-deleted here. Idempotent — returns
792
+ // `purged: 0` once the DB is clean. Runs BEFORE background MCP/daemon spawn
793
+ // so the foreground sql.js write isn't overwritten by a concurrent flush.
794
+ try {
795
+ const purgePaths = [
796
+ resolve(projectRoot, 'node_modules/moflo/dist/src/cli/services/ephemeral-namespace-purge.js'),
797
+ resolve(projectRoot, 'dist/src/cli/services/ephemeral-namespace-purge.js'),
798
+ ];
799
+ const purgePath = purgePaths.find((p) => existsSync(p));
800
+ if (purgePath) {
801
+ const { purgeEphemeralNamespaces } = await import(`file://${purgePath.replace(/\\/g, '/')}`);
802
+ const result = await purgeEphemeralNamespaces();
803
+ if (result?.purged > 0) {
804
+ emitMutation(
805
+ 'pruned ephemeral namespace rows',
806
+ `${plural(result.purged, 'row')} from internal run-tracking`,
807
+ );
808
+ }
809
+ }
810
+ } catch (err) {
811
+ // Non-fatal — leftover rows just sit until the next session retries.
812
+ try {
813
+ const msg = err && err.message ? err.message : String(err);
814
+ process.stderr.write(`ephemeral-namespace purge skipped: ${msg}\n`);
815
+ } catch { /* writing the failure itself must not throw */ }
816
+ }
817
+
818
+ // ── 3f. Clear the in-progress upgrade notice (#636, #738) ───────────────────
819
+ // Upgrade work is finished; drop the notice so the statusline badge disappears
820
+ // immediately. Change summary is already in stdout emits (Claude's
821
+ // `additionalContext`); a lingering "you upgraded a while ago" badge is noise.
822
+ if (upgradeNoticeContext) {
823
+ clearUpgradeNotice();
824
+ }
825
+
826
+ // ── 3g. Commit deferred version stamp (#730) ────────────────────────────────
827
+ // Written LAST so an abort above leaves the stamp unchanged and the next
828
+ // launcher re-detects the upgrade.
829
+ if (pendingVersionStampWrite) {
830
+ try {
831
+ writeFileSync(pendingVersionStampWrite.path, pendingVersionStampWrite.version);
832
+ } catch { /* non-fatal — next launcher re-detects + retries the upgrade */ }
699
833
  }
700
834
 
701
835
  // Bypasses emitMutation — framing, not a mutation, so it must not inflate the count.
@@ -1,8 +1,10 @@
1
1
  /**
2
2
  * Delete Memory Command - Application Layer (CQRS)
3
3
  *
4
- * Command for deleting memory entries.
5
- * Supports soft delete and hard delete.
4
+ * Hard-deletes memory entries. Soft-delete was retired in story #728 because
5
+ * tombstones were write-only (no code path ever restored a `status='deleted'`
6
+ * row) and bloated the DB indefinitely. The legitimate "keep but hide" case
7
+ * is `archived` — see `MemoryEntry.archive()` / `restore()`.
6
8
  *
7
9
  * @module v3/memory/application/commands
8
10
  */
@@ -25,42 +27,10 @@ export class DeleteMemoryCommandHandler {
25
27
  entryId = entry?.id;
26
28
  }
27
29
  if (!entryId) {
28
- return {
29
- success: false,
30
- deleted: false,
31
- wasHardDelete: false,
32
- };
30
+ return { success: false, deleted: false };
33
31
  }
34
- if (input.hardDelete) {
35
- // Hard delete - remove from database
36
- const deleted = await this.repository.delete(entryId);
37
- return {
38
- success: true,
39
- deleted,
40
- entryId,
41
- wasHardDelete: true,
42
- };
43
- }
44
- else {
45
- // Soft delete - mark as deleted
46
- const entry = await this.repository.findById(entryId);
47
- if (entry) {
48
- entry.delete();
49
- await this.repository.save(entry);
50
- return {
51
- success: true,
52
- deleted: true,
53
- entryId,
54
- wasHardDelete: false,
55
- };
56
- }
57
- }
58
- return {
59
- success: false,
60
- deleted: false,
61
- entryId,
62
- wasHardDelete: false,
63
- };
32
+ const deleted = await this.repository.delete(entryId);
33
+ return { success: true, deleted, entryId };
64
34
  }
65
35
  }
66
36
  /**
@@ -83,47 +53,15 @@ export class BulkDeleteMemoryCommandHandler {
83
53
  .map((e) => e.id);
84
54
  }
85
55
  if (idsToDelete.length === 0) {
86
- return {
87
- success: true,
88
- deletedCount: 0,
89
- failedCount: 0,
90
- errors: [],
91
- };
92
- }
93
- if (input.hardDelete) {
94
- const result = await this.repository.deleteMany(idsToDelete);
95
- return {
96
- success: result.failed === 0,
97
- deletedCount: result.success,
98
- failedCount: result.failed,
99
- errors: result.errors,
100
- };
101
- }
102
- else {
103
- // Soft delete
104
- const entries = await this.repository.findByIds(idsToDelete);
105
- let deletedCount = 0;
106
- const errors = [];
107
- for (const entry of entries) {
108
- try {
109
- entry.delete();
110
- await this.repository.save(entry);
111
- deletedCount++;
112
- }
113
- catch (error) {
114
- errors.push({
115
- id: entry.id,
116
- error: error instanceof Error ? error.message : 'Unknown error',
117
- });
118
- }
119
- }
120
- return {
121
- success: errors.length === 0,
122
- deletedCount,
123
- failedCount: errors.length,
124
- errors,
125
- };
56
+ return { success: true, deletedCount: 0, failedCount: 0, errors: [] };
126
57
  }
58
+ const result = await this.repository.deleteMany(idsToDelete);
59
+ return {
60
+ success: result.failed === 0,
61
+ deletedCount: result.success,
62
+ failedCount: result.failed,
63
+ errors: result.errors,
64
+ };
127
65
  }
128
66
  }
129
67
  //# sourceMappingURL=delete-memory.command.js.map
@@ -101,25 +101,24 @@ export class MemoryApplicationService {
101
101
  /**
102
102
  * Delete a memory entry by namespace and key
103
103
  */
104
- async delete(namespace, key, hardDelete = false) {
105
- const result = await this.deleteHandler.execute({ namespace, key, hardDelete });
104
+ async delete(namespace, key) {
105
+ const result = await this.deleteHandler.execute({ namespace, key });
106
106
  return result.deleted;
107
107
  }
108
108
  /**
109
109
  * Delete a memory entry by ID
110
110
  */
111
- async deleteById(id, hardDelete = false) {
112
- const result = await this.deleteHandler.execute({ id, hardDelete });
111
+ async deleteById(id) {
112
+ const result = await this.deleteHandler.execute({ id });
113
113
  return result.deleted;
114
114
  }
115
115
  /**
116
116
  * Delete all entries in a namespace
117
117
  */
118
- async deleteNamespace(namespace, hardDelete = false) {
118
+ async deleteNamespace(namespace) {
119
119
  const entries = await this.repository.findByNamespace(namespace);
120
120
  const result = await this.bulkDeleteHandler.execute({
121
121
  ids: entries.map((e) => e.id),
122
- hardDelete,
123
122
  });
124
123
  return result.deletedCount;
125
124
  }
@@ -222,7 +222,7 @@ const MEMORY_ENTRIES_DDL = `CREATE TABLE IF NOT EXISTS memory_entries (
222
222
  expires_at INTEGER,
223
223
  last_accessed_at INTEGER,
224
224
  access_count INTEGER DEFAULT 0,
225
- status TEXT DEFAULT 'active' CHECK(status IN ('active', 'archived', 'deleted')),
225
+ status TEXT DEFAULT 'active' CHECK(status IN ('active', 'archived')),
226
226
  UNIQUE(namespace, key)
227
227
  )`;
228
228
  export function getDb(registry) {
@@ -42,6 +42,27 @@ export const EMBEDDING_MODEL_OPT_OUT = 'none';
42
42
  * #651 doctor check can detect pre-fix residue without re-typing the literal.
43
43
  */
44
44
  export const EMBEDDING_MODEL_LEGACY_DEFAULT = 'local';
45
+ /**
46
+ * Namespaces that store internal moflo run-tracking, never user knowledge.
47
+ * Writes here skip embedding generation entirely — both `embedding` and
48
+ * `embedding_model` land as NULL, distinct from the opt-out path which still
49
+ * tags rows with `'none'`. Existing rows in these namespaces are hard-deleted
50
+ * on upgrade by `services/ephemeral-namespace-purge.ts`.
51
+ *
52
+ * Members:
53
+ * - `hive-mind` — MCP broadcast traffic (msg:*, agent_join, consensus_propose)
54
+ * - `tasklist` — Spell run records (sp-*) written by spells/core/runner.ts + daemon-dashboard.ts
55
+ * - `epic-state` — Epic progress (epic-N, story-M) written by commands/epic.ts
56
+ * - `test-bridge-fix` — Single 2026-04-23 row left over from a one-off test
57
+ *
58
+ * See story #729 for the source-trace and rationale.
59
+ */
60
+ export const EPHEMERAL_NAMESPACES = new Set([
61
+ 'hive-mind',
62
+ 'tasklist',
63
+ 'epic-state',
64
+ 'test-bridge-fix',
65
+ ]);
45
66
  let cachedEmbedder = null;
46
67
  let testOverride = null;
47
68
  class LazyFastembedBridgeEmbedder {
@@ -83,7 +104,24 @@ class LazyFastembedBridgeEmbedder {
83
104
  return vector;
84
105
  }
85
106
  }
86
- export async function resolveBridgeEmbedding(value, precomputed, generateEmbeddingFlag) {
107
+ /**
108
+ * Build the `embedding` field of a store-entry response from a resolved
109
+ * embedding. Returns `undefined` for skip paths (opt-out and ephemeral) so
110
+ * the caller can pass it straight through.
111
+ */
112
+ export function embeddingResponseFrom(resolved) {
113
+ // json !== null narrows to the embedded variant where model is `string`.
114
+ return resolved.json !== null
115
+ ? { dimensions: resolved.dimensions, model: resolved.model }
116
+ : undefined;
117
+ }
118
+ export async function resolveBridgeEmbedding(value, precomputed, generateEmbeddingFlag, namespace) {
119
+ // Ephemeral namespaces (run-tracking, never user knowledge) skip embeddings
120
+ // unconditionally — even precomputed vectors are dropped. Result row has
121
+ // `embedding IS NULL` and `embedding_model IS NULL`. See #729.
122
+ if (namespace && EPHEMERAL_NAMESPACES.has(namespace)) {
123
+ return { ok: true, json: null, dimensions: 0, model: null };
124
+ }
87
125
  const wantsEmbedding = generateEmbeddingFlag !== false && value.length > 0;
88
126
  if (!wantsEmbedding) {
89
127
  return { ok: true, json: null, dimensions: 0, model: EMBEDDING_MODEL_OPT_OUT };
@@ -8,7 +8,7 @@
8
8
  * @module v3/cli/bridge-entries
9
9
  */
10
10
  import { cosineSim, execRows, generateId, persistBridgeDb, refreshVectorStatsCache, withDb } from './bridge-core.js';
11
- import { resolveBridgeEmbedding } from './bridge-embedder.js';
11
+ import { embeddingResponseFrom, resolveBridgeEmbedding } from './bridge-embedder.js';
12
12
  function makeEntryCacheKey(namespace, key) {
13
13
  const safeNs = String(namespace).replace(/:/g, '_');
14
14
  const safeKey = String(key).replace(/:/g, '_');
@@ -98,13 +98,12 @@ export async function bridgeStoreEntry(options) {
98
98
  if (!guardResult.allowed) {
99
99
  return { success: false, id, error: `MutationGuard rejected: ${guardResult.reason}` };
100
100
  }
101
- const resolved = await resolveBridgeEmbedding(value, options.precomputedEmbedding, options.generateEmbeddingFlag);
101
+ const resolved = await resolveBridgeEmbedding(value, options.precomputedEmbedding, options.generateEmbeddingFlag, namespace);
102
102
  if (!resolved.ok) {
103
103
  return { success: false, id, error: `embedding generation failed: ${resolved.reason}` };
104
104
  }
105
- const embeddingJson = resolved.json;
106
- const dimensions = resolved.dimensions;
107
- const model = resolved.model;
105
+ const { json: embeddingJson, dimensions, model } = resolved;
106
+ const embeddingResponse = embeddingResponseFrom(resolved);
108
107
  const insertSql = options.upsert
109
108
  ? `INSERT OR REPLACE INTO memory_entries (
110
109
  id, key, namespace, content, type,
@@ -135,7 +134,7 @@ export async function bridgeStoreEntry(options) {
135
134
  return {
136
135
  success: true,
137
136
  id,
138
- embedding: embeddingJson ? { dimensions, model } : undefined,
137
+ embedding: embeddingResponse,
139
138
  guarded: true,
140
139
  cached: true,
141
140
  attested: true,
@@ -175,12 +174,13 @@ export async function bridgeStoreEntries(items, dbPath) {
175
174
  const { key, value, namespace = 'default', tags = [], ttl } = opts;
176
175
  const id = generateId('entry');
177
176
  const now = Date.now();
178
- const resolved = await resolveBridgeEmbedding(value, opts.precomputedEmbedding, opts.generateEmbeddingFlag);
177
+ const resolved = await resolveBridgeEmbedding(value, opts.precomputedEmbedding, opts.generateEmbeddingFlag, namespace);
179
178
  if (!resolved.ok) {
180
179
  results.push({ success: false, id, error: `embedding generation failed: ${resolved.reason}` });
181
180
  continue;
182
181
  }
183
182
  const { json: embeddingJson, dimensions, model } = resolved;
183
+ const embeddingResponse = embeddingResponseFrom(resolved);
184
184
  const insertSql = opts.upsert
185
185
  ? `INSERT OR REPLACE INTO memory_entries (
186
186
  id, key, namespace, content, type,
@@ -217,7 +217,7 @@ export async function bridgeStoreEntries(items, dbPath) {
217
217
  results.push({
218
218
  success: true,
219
219
  id,
220
- embedding: embeddingJson ? { dimensions, model } : undefined,
220
+ embedding: embeddingResponse,
221
221
  });
222
222
  }
223
223
  // Cache writes and attestation logs are independent post-hoc bookkeeping —
@@ -436,10 +436,9 @@ export async function bridgeDeleteEntry(options) {
436
436
  let changes = 0;
437
437
  try {
438
438
  ctx.db.prepare(`
439
- UPDATE memory_entries
440
- SET status = 'deleted', updated_at = ?
439
+ DELETE FROM memory_entries
441
440
  WHERE key = ? AND namespace = ? AND status = 'active'
442
- `).run([Date.now(), key, namespace]);
441
+ `).run([key, namespace]);
443
442
  // sql.js Statement.run returns true/false, not { changes }. Use
444
443
  // db.getRowsModified() to read the row count from the last statement.
445
444
  changes = ctx.db.getRowsModified?.() ?? 0;
@@ -150,13 +150,6 @@ export class MemoryEntry {
150
150
  this._updatedAt = new Date();
151
151
  }
152
152
  }
153
- /**
154
- * Mark as deleted (soft delete)
155
- */
156
- delete() {
157
- this._status = 'deleted';
158
- this._updatedAt = new Date();
159
- }
160
153
  /**
161
154
  * Check if memory has expired based on TTL
162
155
  */
@@ -294,7 +294,6 @@ export class HybridMemoryRepository {
294
294
  let totalSize = 0;
295
295
  let activeCount = 0;
296
296
  let archivedCount = 0;
297
- let deletedCount = 0;
298
297
  for (const entry of entries) {
299
298
  // Count by namespace
300
299
  entriesByNamespace[entry.namespace] = (entriesByNamespace[entry.namespace] ?? 0) + 1;
@@ -311,9 +310,6 @@ export class HybridMemoryRepository {
311
310
  case 'archived':
312
311
  archivedCount++;
313
312
  break;
314
- case 'deleted':
315
- deletedCount++;
316
- break;
317
313
  }
318
314
  }
319
315
  // Find hottest and coldest
@@ -324,7 +320,6 @@ export class HybridMemoryRepository {
324
320
  totalEntries: entries.length,
325
321
  activeEntries: activeCount,
326
322
  archivedEntries: archivedCount,
327
- deletedEntries: deletedCount,
328
323
  totalSize,
329
324
  entriesByNamespace,
330
325
  entriesByType,
@@ -14,7 +14,7 @@ import { mofloImport } from '../services/moflo-require.js';
14
14
  import { atomicWriteFileSync } from '../services/atomic-file-write.js';
15
15
  import { formatEmbeddingError } from './embedding-errors.js';
16
16
  import { HnswLite } from './hnsw-lite.js';
17
- import { EMBEDDING_MODEL_OPT_OUT, getBridgeEmbedder } from './bridge-embedder.js';
17
+ import { EMBEDDING_MODEL_OPT_OUT, EPHEMERAL_NAMESPACES, getBridgeEmbedder } from './bridge-embedder.js';
18
18
  import { toFloat32 } from './controllers/_shared.js';
19
19
  import { writeVectorStatsJson } from './bridge-core.js';
20
20
  import { MOFLO_DIR, hnswIndexPath, legacyMemoryDbPath, memoryDbPath, } from '../services/moflo-paths.js';
@@ -104,7 +104,7 @@ CREATE TABLE IF NOT EXISTS memory_entries (
104
104
  access_count INTEGER DEFAULT 0,
105
105
 
106
106
  -- Status
107
- status TEXT DEFAULT 'active' CHECK(status IN ('active', 'archived', 'deleted')),
107
+ status TEXT DEFAULT 'active' CHECK(status IN ('active', 'archived')),
108
108
 
109
109
  UNIQUE(namespace, key)
110
110
  );
@@ -1592,10 +1592,15 @@ export async function storeEntry(options) {
1592
1592
  // success:false rather than inserting a null-embedded row. Opt-out rows
1593
1593
  // (generateEmbeddingFlag=false) are tagged EMBEDDING_MODEL_OPT_OUT — see
1594
1594
  // the constant's docstring in bridge-embedder.ts for the rationale.
1595
+ // Ephemeral namespaces (#729) skip embedding entirely AND tag model NULL.
1595
1596
  let embeddingJson = null;
1596
1597
  let embeddingDimensions = null;
1597
1598
  let embeddingModel = EMBEDDING_MODEL_OPT_OUT;
1598
- if (generateEmbeddingFlag && value.length > 0) {
1599
+ const isEphemeralNs = EPHEMERAL_NAMESPACES.has(namespace);
1600
+ if (isEphemeralNs) {
1601
+ embeddingModel = null;
1602
+ }
1603
+ else if (generateEmbeddingFlag && value.length > 0) {
1599
1604
  if (options.precomputedEmbedding) {
1600
1605
  // Tag with the bridge embedder's canonical model so precomputed rows
1601
1606
  // are indistinguishable from live single-embed rows downstream.
@@ -2019,14 +2024,10 @@ export async function deleteEntry(options) {
2019
2024
  error: `Key '${key}' not found in namespace '${namespace}'`
2020
2025
  };
2021
2026
  }
2022
- // Delete the entry (soft delete by setting status to 'deleted')
2023
- db.run(`
2024
- UPDATE memory_entries
2025
- SET status = 'deleted', updated_at = strftime('%s', 'now') * 1000
2026
- WHERE key = '${key.replace(/'/g, "''")}'
2027
- AND namespace = '${namespace.replace(/'/g, "''")}'
2028
- AND status = 'active'
2029
- `);
2027
+ // Hard-delete the entry. Soft-delete was retired in story #728: tombstones
2028
+ // were write-only (no code ever restored from status='deleted') and bloated
2029
+ // the DB indefinitely.
2030
+ db.run(`DELETE FROM memory_entries WHERE key = ? AND namespace = ? AND status = 'active'`, [key, namespace]);
2030
2031
  // Get remaining count
2031
2032
  const countResult = db.exec(`SELECT COUNT(*) FROM memory_entries WHERE status = 'active'`);
2032
2033
  const remainingEntries = countResult[0]?.values?.[0]?.[0] || 0;
@@ -0,0 +1,75 @@
1
+ /**
2
+ * Idempotent ephemeral-namespace purge for moflo's memory DB (`.moflo/moflo.db`).
3
+ *
4
+ * Story #729 retired four namespaces from the persistent memory layer because
5
+ * they store internal moflo run-tracking — not user knowledge — and embedding
6
+ * them polluted the search index:
7
+ *
8
+ * - `hive-mind` (MCP broadcast traffic)
9
+ * - `tasklist` (spell run records)
10
+ * - `epic-state` (epic progress tracking)
11
+ * - `test-bridge-fix` (single-row leftover from a one-off test)
12
+ *
13
+ * This service hard-deletes any rows in those namespaces left over from prior
14
+ * moflo versions, then VACUUMs to reclaim disk. Future writes to these
15
+ * namespaces still land in the DB — but skip embedding generation entirely
16
+ * (see {@link EPHEMERAL_NAMESPACES} in `memory/bridge-embedder.ts`).
17
+ *
18
+ * Lives in `services/` so it has no dependency on the CLI command machinery.
19
+ * That lets `bin/session-start-launcher.mjs` dynamic-import it and run the
20
+ * purge in foreground BEFORE long-lived sql.js consumers (MCP server, daemon)
21
+ * open the DB — sql.js dumps the whole snapshot on every flush and would
22
+ * otherwise clobber our cleanup (see #727's clobber-hazard analysis).
23
+ *
24
+ * @module cli/services/ephemeral-namespace-purge
25
+ */
26
+ /* eslint-disable @typescript-eslint/no-explicit-any */
27
+ import { EPHEMERAL_NAMESPACES } from '../memory/bridge-embedder.js';
28
+ import { mofloImport } from './moflo-require.js';
29
+ import { atomicWriteFileSync } from './atomic-file-write.js';
30
+ import { memoryDbPath } from './moflo-paths.js';
31
+ /**
32
+ * Hard-delete every row whose namespace is in {@link EPHEMERAL_NAMESPACES}
33
+ * and VACUUM. Returns `{ purged: 0 }` on the happy path: no DB, sql.js
34
+ * unavailable, schema lacks `memory_entries`, or no ephemeral rows present.
35
+ * Errors propagate to the caller (the launcher absorbs them so a failed
36
+ * purge never blocks session start).
37
+ */
38
+ export async function purgeEphemeralNamespaces(options = {}) {
39
+ const fs = await import('fs');
40
+ const path = await import('path');
41
+ const dbPath = path.resolve(options.dbPath ?? memoryDbPath(process.cwd()));
42
+ if (!fs.existsSync(dbPath))
43
+ return { purged: 0 };
44
+ const initSqlJs = (await mofloImport('sql.js'))?.default;
45
+ if (!initSqlJs)
46
+ return { purged: 0 };
47
+ const SQL = await initSqlJs();
48
+ const buffer = fs.readFileSync(dbPath);
49
+ const db = new SQL.Database(buffer);
50
+ try {
51
+ // Probe: schema must carry `memory_entries`. Older / non-moflo DBs are
52
+ // a no-op so we don't VACUUM unrelated SQLite files.
53
+ const probe = db.exec(`SELECT name FROM sqlite_master WHERE type='table' AND name='memory_entries' LIMIT 1`);
54
+ if (!probe[0]?.values?.[0])
55
+ return { purged: 0 };
56
+ const namespaces = Array.from(EPHEMERAL_NAMESPACES);
57
+ const placeholders = namespaces.map(() => '?').join(', ');
58
+ // Single-scan delete + rowsModified: skips a redundant COUNT pass on dirty
59
+ // DBs and avoids the prepare/bind/step/free overhead on clean ones. VACUUM
60
+ // (and the disk write) only run when something was actually deleted.
61
+ db.run(`DELETE FROM memory_entries WHERE namespace IN (${placeholders})`, namespaces);
62
+ const purged = db.getRowsModified?.() ?? 0;
63
+ if (purged === 0)
64
+ return { purged: 0 };
65
+ // VACUUM has to run outside any open transaction; sql.js auto-commits
66
+ // each `db.run`, so this is safe to chain.
67
+ db.run('VACUUM');
68
+ atomicWriteFileSync(dbPath, db.export());
69
+ return { purged };
70
+ }
71
+ finally {
72
+ db.close();
73
+ }
74
+ }
75
+ //# sourceMappingURL=ephemeral-namespace-purge.js.map
@@ -0,0 +1,66 @@
1
+ /**
2
+ * Idempotent soft-delete purge for moflo's memory DB (`.moflo/moflo.db`).
3
+ *
4
+ * Story #728 retired soft-delete from the memory layer: tombstones were
5
+ * write-only (no code path ever restored a `status='deleted'` row) and bloated
6
+ * the DB indefinitely. This service hard-deletes any leftover `status='deleted'`
7
+ * rows from prior moflo versions, then VACUUMs to reclaim disk. `archived`
8
+ * rows are NOT touched — they are the legitimate "keep but hide" state and
9
+ * have a working `restore()` path.
10
+ *
11
+ * Lives in `services/` so it has no dependency on the CLI command machinery.
12
+ * That lets `bin/session-start-launcher.mjs` dynamic-import it and run the
13
+ * purge in foreground BEFORE long-lived sql.js consumers (MCP server, daemon)
14
+ * open the DB — sql.js dumps the whole snapshot on every flush and would
15
+ * otherwise clobber our cleanup.
16
+ *
17
+ * @module cli/services/soft-delete-purge
18
+ */
19
+ /* eslint-disable @typescript-eslint/no-explicit-any */
20
+ import { mofloImport } from './moflo-require.js';
21
+ import { atomicWriteFileSync } from './atomic-file-write.js';
22
+ import { memoryDbPath } from './moflo-paths.js';
23
+ /**
24
+ * Hard-delete all `status='deleted'` rows from the memory DB and VACUUM.
25
+ *
26
+ * Returns `{ purged: 0 }` for the happy path: no DB, sql.js unavailable,
27
+ * schema lacks `memory_entries`, or no tombstones present. Errors propagate
28
+ * to the caller (the launcher absorbs them so a failed purge never blocks
29
+ * session start).
30
+ */
31
+ export async function purgeSoftDeletedEntries(options = {}) {
32
+ const fs = await import('fs');
33
+ const path = await import('path');
34
+ const dbPath = path.resolve(options.dbPath ?? memoryDbPath(process.cwd()));
35
+ if (!fs.existsSync(dbPath))
36
+ return { purged: 0 };
37
+ const initSqlJs = (await mofloImport('sql.js'))?.default;
38
+ if (!initSqlJs)
39
+ return { purged: 0 };
40
+ const SQL = await initSqlJs();
41
+ const buffer = fs.readFileSync(dbPath);
42
+ const db = new SQL.Database(buffer);
43
+ try {
44
+ // Probe: schema must carry `memory_entries`. Older / non-moflo DBs are
45
+ // a no-op so we don't VACUUM unrelated SQLite files.
46
+ const probe = db.exec(`SELECT name FROM sqlite_master WHERE type='table' AND name='memory_entries' LIMIT 1`);
47
+ if (!probe[0]?.values?.[0])
48
+ return { purged: 0 };
49
+ // Count first — VACUUM is expensive (it rewrites the whole file), so we
50
+ // skip it entirely when there's nothing to reclaim.
51
+ const countRows = db.exec(`SELECT COUNT(*) FROM memory_entries WHERE status = 'deleted'`);
52
+ const purged = Number(countRows[0]?.values?.[0]?.[0] ?? 0);
53
+ if (purged === 0)
54
+ return { purged: 0 };
55
+ db.run(`DELETE FROM memory_entries WHERE status = 'deleted'`);
56
+ // VACUUM has to run outside any open transaction; sql.js auto-commits
57
+ // each `db.run`, so this is safe to chain.
58
+ db.run('VACUUM');
59
+ atomicWriteFileSync(dbPath, db.export());
60
+ return { purged };
61
+ }
62
+ finally {
63
+ db.close();
64
+ }
65
+ }
66
+ //# sourceMappingURL=soft-delete-purge.js.map
@@ -23,12 +23,20 @@ export function getGCSConfig() {
23
23
  prefix: process.env.GCS_PREFIX || 'claude-flow-patterns',
24
24
  };
25
25
  }
26
+ // Bound the subprocess so a slow / hung gcloud spawn doesn't stretch test
27
+ // timeouts (or session-start probes) indefinitely. gcloud responds in ~100ms
28
+ // when present; 5s is generous for a contended CI runner.
29
+ const GCLOUD_PROBE_TIMEOUT_MS = 5_000;
26
30
  /**
27
31
  * Check if gcloud CLI is available
28
32
  */
29
33
  export function isGCloudAvailable() {
30
34
  try {
31
- execSync('gcloud --version', { stdio: 'pipe', windowsHide: true });
35
+ execSync('gcloud --version', {
36
+ stdio: 'pipe',
37
+ windowsHide: true,
38
+ timeout: GCLOUD_PROBE_TIMEOUT_MS,
39
+ });
32
40
  return true;
33
41
  }
34
42
  catch {
@@ -40,7 +48,11 @@ export function isGCloudAvailable() {
40
48
  */
41
49
  export async function isGCloudAuthenticated() {
42
50
  try {
43
- execSync('gcloud auth print-access-token', { stdio: 'pipe', windowsHide: true });
51
+ execSync('gcloud auth print-access-token', {
52
+ stdio: 'pipe',
53
+ windowsHide: true,
54
+ timeout: GCLOUD_PROBE_TIMEOUT_MS,
55
+ });
44
56
  return true;
45
57
  }
46
58
  catch {
@@ -2,5 +2,5 @@
2
2
  * Auto-generated by build. Do not edit manually.
3
3
  * Source of truth: root package.json → scripts/sync-version.mjs
4
4
  */
5
- export const VERSION = '4.9.0-rc.2';
5
+ export const VERSION = '4.9.0-rc.4';
6
6
  //# sourceMappingURL=version.js.map
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "moflo",
3
- "version": "4.9.0-rc.2",
3
+ "version": "4.9.0-rc.4",
4
4
  "description": "MoFlo — AI agent orchestration for Claude Code. Forked from ruflo/claude-flow with patches applied to source, plus feature-level orchestration.",
5
5
  "main": "dist/src/cli/index.js",
6
6
  "type": "module",
@@ -78,7 +78,7 @@
78
78
  "@typescript-eslint/eslint-plugin": "^7.18.0",
79
79
  "@typescript-eslint/parser": "^7.18.0",
80
80
  "eslint": "^8.0.0",
81
- "moflo": "^4.9.0-rc.1",
81
+ "moflo": "^4.9.0-rc.3",
82
82
  "tsx": "^4.21.0",
83
83
  "typescript": "^5.9.3",
84
84
  "vitest": "^4.0.0"