moflo 4.9.26 → 4.9.27

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -447,6 +447,24 @@ try {
447
447
  'upgraded',
448
448
  cachedVersion ? `${cachedVersion} → ${installedVersion}` : `installed ${installedVersion}`,
449
449
  );
450
+ // #981 / #987 — one-time architecture notice. Single-writer routing
451
+ // means daemon must be running for safe multi-process writes. Sentinel
452
+ // file ensures the notice fires once per consumer (across upgrades),
453
+ // not on every version bump. `flo doctor` surfaces the runtime warning
454
+ // when the daemon is disabled with MCP configured.
455
+ try {
456
+ const noticeSentinel = join(mofloDir(projectRoot), 'single-writer-notice-shown');
457
+ if (cachedVersion && !existsSync(noticeSentinel)) {
458
+ emitMutation(
459
+ 'single-writer write architecture active',
460
+ 'memory writes route through the daemon (#981) — keep daemon.auto_start: true to prevent multi-process clobber',
461
+ );
462
+ try {
463
+ mkdirSync(mofloDir(projectRoot), { recursive: true });
464
+ writeFileSync(noticeSentinel, new Date().toISOString());
465
+ } catch { /* sentinel best-effort — re-emit next session if write fails */ }
466
+ }
467
+ } catch { /* never block the upgrade flow on the notice */ }
450
468
  } else {
451
469
  upgradeNoticeContext = {
452
470
  kind: 'repair',
@@ -90,6 +90,16 @@ const startCommand = {
90
90
  }
91
91
  // Foreground mode: run in current process (blocks terminal)
92
92
  try {
93
+ // #981 — mark this process as the daemon BEFORE any storeEntry /
94
+ // deleteEntry call runs in this process. The routing preamble in
95
+ // memory-initializer reads `process.env.MOFLO_IS_DAEMON` per-call (not
96
+ // at module-load time) and skips routing when set, breaking the loop
97
+ // that would otherwise recurse: storeEntry → HTTP → daemon RPC →
98
+ // storeEntry → HTTP. Setting it here covers both direct `flo daemon
99
+ // start --foreground` and the background spawn (whose daemonEnv
100
+ // propagates this via process inheritance — see startBackgroundDaemon
101
+ // below).
102
+ process.env.MOFLO_IS_DAEMON = '1';
93
103
  // Acquire atomic daemon lock (prevents duplicate daemons).
94
104
  // Always acquire here — even when spawned as a child (CLAUDE_FLOW_DAEMON=1)
95
105
  // because on Windows the parent's child.pid is the shell PID (cmd.exe),
@@ -327,6 +337,8 @@ async function startBackgroundDaemon(projectRoot, quiet, maxCpuLoad, minFreeMemo
327
337
  const daemonEnv = {
328
338
  ...process.env,
329
339
  CLAUDE_FLOW_DAEMON: '1',
340
+ // #981 — daemon process must skip its own write-routing client.
341
+ MOFLO_IS_DAEMON: '1',
330
342
  // Prevent macOS SIGHUP kill when terminal closes
331
343
  ...(process.platform === 'darwin' ? { NOHUP: '1' } : {}),
332
344
  };
@@ -131,17 +131,43 @@ export async function checkMemoryDatabase() {
131
131
  }
132
132
  return { name: 'Memory Database', status: 'warn', message: 'Not initialized', fix: 'claude-flow memory configure --backend hybrid' };
133
133
  }
134
- export async function checkMcpServers() {
135
- const mcpConfigPaths = [
134
+ /**
135
+ * Standard MCP-config search paths: home (Claude Desktop on macOS/Linux),
136
+ * XDG config dir, project-local `.mcp.json`, and APPDATA on Windows.
137
+ *
138
+ * Shared by `checkMcpServers` (which inspects the FIRST config it finds and
139
+ * reports on flo presence) and `checkDaemonWriteRouting` (which COUNTS
140
+ * servers across all paths to detect the multi-process-clobber hazard).
141
+ */
142
+ function mcpConfigSearchPaths(cwd) {
143
+ return [
136
144
  join(os.homedir(), '.claude/claude_desktop_config.json'),
137
145
  join(os.homedir(), '.config/claude/mcp.json'),
138
- '.mcp.json',
139
- // Windows: Claude Desktop stores config under %APPDATA%\Claude\
146
+ join(cwd, '.mcp.json'),
140
147
  ...(process.platform === 'win32' && process.env.APPDATA
141
148
  ? [join(process.env.APPDATA, 'Claude', 'claude_desktop_config.json')]
142
149
  : []),
143
150
  ];
144
- for (const configPath of mcpConfigPaths) {
151
+ }
152
+ /** Sum MCP servers across every reachable config. Malformed configs counted as 0. */
153
+ function countMcpServers(cwd) {
154
+ let total = 0;
155
+ for (const configPath of mcpConfigSearchPaths(cwd)) {
156
+ if (!existsSync(configPath))
157
+ continue;
158
+ try {
159
+ const content = JSON.parse(readFileSync(configPath, 'utf8'));
160
+ const servers = content.mcpServers || content.servers || {};
161
+ total += Object.keys(servers).length;
162
+ }
163
+ catch {
164
+ // Skip unreadable / malformed config — checkMcpServers reports it.
165
+ }
166
+ }
167
+ return total;
168
+ }
169
+ export async function checkMcpServers() {
170
+ for (const configPath of mcpConfigSearchPaths(process.cwd())) {
145
171
  if (existsSync(configPath)) {
146
172
  try {
147
173
  const content = JSON.parse(readFileSync(configPath, 'utf8'));
@@ -202,6 +228,58 @@ export async function checkMofloYamlCompliance(cwd = process.cwd()) {
202
228
  fix: 'Restart Claude Code (yaml-upgrader auto-appends) or `npx moflo init --force`',
203
229
  };
204
230
  }
231
+ /**
232
+ * #981 / #987 — surfaces the single-writer-architecture safety net.
233
+ *
234
+ * When `daemon.auto_start: false` is set in moflo.yaml AND the consumer has
235
+ * an MCP server configured, every MCP-process write hits sql.js directly
236
+ * (no daemon-RPC routing). Pre-#981 multi-process clobber + reader-staleness
237
+ * hazards reappear in that configuration. Warn — never fail — because
238
+ * disabling the daemon is a legitimate consumer choice and the config
239
+ * itself isn't broken.
240
+ *
241
+ * Pass: daemon enabled (default) → routing protection active.
242
+ * Pass: daemon disabled but no MCP server detected → no multi-writer hazard.
243
+ * Warn: daemon disabled AND MCP server detected → hazard window open.
244
+ */
245
+ export async function checkDaemonWriteRouting(cwd = process.cwd()) {
246
+ const name = 'Daemon Write Routing';
247
+ let daemonEnabled = true; // default-on — matches moflo.yaml default
248
+ try {
249
+ const { loadMofloConfig } = await import('../config/moflo-config.js');
250
+ const config = loadMofloConfig(cwd);
251
+ daemonEnabled = config?.daemon?.auto_start !== false;
252
+ }
253
+ catch {
254
+ // Unreadable config — assume daemon-enabled and let other checks flag
255
+ // the config error.
256
+ daemonEnabled = true;
257
+ }
258
+ if (daemonEnabled) {
259
+ return {
260
+ name,
261
+ status: 'pass',
262
+ message: 'Daemon enabled — multi-process writes route through single writer (#981 protection active)',
263
+ };
264
+ }
265
+ // Daemon disabled — count MCP servers across every reachable config.
266
+ const mcpServerCount = countMcpServers(cwd);
267
+ if (mcpServerCount === 0) {
268
+ return {
269
+ name,
270
+ status: 'pass',
271
+ message: 'Daemon disabled and no MCP server configured — no multi-writer hazard',
272
+ };
273
+ }
274
+ return {
275
+ name,
276
+ status: 'warn',
277
+ message: `Daemon disabled (moflo.yaml) and ${mcpServerCount} MCP server(s) configured — ` +
278
+ `multi-process sql.js writes can clobber each other (#981). ` +
279
+ `Set daemon.auto_start: true to restore single-writer protection.`,
280
+ fix: 'Edit moflo.yaml: daemon.auto_start: true',
281
+ };
282
+ }
205
283
  export async function checkTestDirs() {
206
284
  const yamlPath = join(process.cwd(), 'moflo.yaml');
207
285
  if (!existsSync(yamlPath)) {
@@ -9,13 +9,19 @@ import { checkEmbeddingHygiene } from './doctor-embedding-hygiene.js';
9
9
  import { checkSwarmFunctional, checkHiveMindFunctional, } from './doctor-checks-swarm.js';
10
10
  import { checkMemoryAccessFunctional } from './doctor-checks-memory-access.js';
11
11
  import { checkBuildTools, checkClaudeCode, checkDiskSpace, checkGit, checkGitRepo, checkNodeVersion, checkNpmVersion, } from './doctor-checks-runtime.js';
12
- import { checkConfigFile, checkDaemonStatus, checkMcpServers, checkMemoryDatabase, checkMofloYamlCompliance, checkStatusLine, checkTestDirs, } from './doctor-checks-config.js';
12
+ import { checkConfigFile, checkDaemonStatus, checkDaemonWriteRouting, checkMcpServers, checkMemoryDatabase, checkMofloYamlCompliance, checkStatusLine, checkTestDirs, } from './doctor-checks-config.js';
13
13
  import { checkSpellEngine, checkSandboxTier } from './doctor-checks-platform.js';
14
14
  import { checkEmbeddings, checkSemanticQuality, } from './doctor-checks-memory.js';
15
15
  import { checkIntelligence } from './doctor-checks-intelligence.js';
16
16
  import { checkVersionFreshness } from './doctor-version.js';
17
17
  import { checkZombieProcesses } from './doctor-zombies.js';
18
- /** Order matters — top entries surface first under the spinner. */
18
+ /** Order matters — top entries surface first under the spinner.
19
+ * `checkZombieProcesses` is intentionally NOT in this list — it must run AFTER
20
+ * the parallel batch settles (see `zombieScanCheck` below and #992). Otherwise
21
+ * doctor's own subprocess probes (e.g. `checkBuildTools` running `npx tsc
22
+ * --version`) can be flagged as their own zombies on Windows, where the npx
23
+ * shim exits before its tsc child finishes.
24
+ */
19
25
  export const allChecks = [
20
26
  checkVersionFreshness,
21
27
  checkNodeVersion,
@@ -27,6 +33,7 @@ export const allChecks = [
27
33
  checkMofloYamlCompliance,
28
34
  checkStatusLine,
29
35
  checkDaemonStatus,
36
+ checkDaemonWriteRouting,
30
37
  checkMemoryDatabase,
31
38
  checkEmbeddings,
32
39
  checkEmbeddingHygiene,
@@ -37,7 +44,6 @@ export const allChecks = [
37
44
  checkSemanticQuality,
38
45
  checkIntelligence,
39
46
  checkSpellEngine,
40
- checkZombieProcesses,
41
47
  checkSubagentHealth,
42
48
  checkSpellExecution,
43
49
  checkMcpToolInvocation,
@@ -56,6 +62,8 @@ export const allChecks = [
56
62
  checkMemoryAccessFunctional,
57
63
  checkSandboxTier,
58
64
  ];
65
+ /** Sequenced check that runs AFTER `allChecks` settles. Issue #992. */
66
+ export const zombieScanCheck = checkZombieProcesses;
59
67
  /** Lookup table for `flo doctor -c <name>`. */
60
68
  export const componentMap = {
61
69
  'version': checkVersionFreshness,
@@ -69,6 +77,8 @@ export const componentMap = {
69
77
  'statusline': checkStatusLine,
70
78
  'status-line': checkStatusLine,
71
79
  'daemon': checkDaemonStatus,
80
+ 'daemon-write-routing': checkDaemonWriteRouting,
81
+ 'write-routing': checkDaemonWriteRouting,
72
82
  'memory': checkMemoryDatabase,
73
83
  'embeddings': checkEmbeddings,
74
84
  'embedding-hygiene': checkEmbeddingHygiene,
@@ -11,7 +11,7 @@
11
11
  * Created with motailz.com
12
12
  */
13
13
  import { output } from '../output.js';
14
- import { allChecks, componentMap } from './doctor-registry.js';
14
+ import { allChecks, componentMap, zombieScanCheck } from './doctor-registry.js';
15
15
  import { emitJsonOutput, finalize, formatCheck, maybeAutoInstallClaudeCode, renderSummary, runAutoFix, runKillZombiesBanner, } from './doctor-render.js';
16
16
  import { checkEmbeddings } from './doctor-checks-memory.js';
17
17
  import { checkMofloYamlCompliance } from './doctor-checks-config.js';
@@ -157,6 +157,20 @@ export const doctorCommand = {
157
157
  let checkResults;
158
158
  try {
159
159
  checkResults = await Promise.allSettled(checksToRun.map(check => check()));
160
+ // Issue #992: zombie scan must follow the parallel batch, not race it.
161
+ // Several parallel checks spawn short-lived subprocesses (notably
162
+ // `checkBuildTools` running `npx tsc --version`); on Windows the npx
163
+ // shim exits before its tsc child, leaving a transient orphan that
164
+ // the zombie scan would otherwise flag as a real leak. Skip in
165
+ // single-component (`-c`) runs since those are targeted diagnostics.
166
+ if (!component) {
167
+ try {
168
+ checkResults.push({ status: 'fulfilled', value: await zombieScanCheck() });
169
+ }
170
+ catch (reason) {
171
+ checkResults.push({ status: 'rejected', reason });
172
+ }
173
+ }
160
174
  }
161
175
  finally {
162
176
  spinner?.stop();
@@ -10,6 +10,29 @@
10
10
  import * as readline from 'node:readline';
11
11
  import { loadSpellEngine, } from '../services/engine-loader.js';
12
12
  import { createDashboardMemoryAccessor } from '../services/daemon-dashboard.js';
13
+ /**
14
+ * Wrap a MemoryAccessor with a write-failure counter so the [epic] summary
15
+ * can warn when spell progress didn't reach disk (#982). Without this, a
16
+ * persist failure surfaces only as a `[spell] storeProgress(...) failed`
17
+ * line buried mid-run, easily missed in shell scrollback.
18
+ */
19
+ function trackPersistFailures(inner) {
20
+ const tracker = {
21
+ failedWrites: 0,
22
+ async read(ns, key) { return inner.read(ns, key); },
23
+ async write(ns, key, value) {
24
+ try {
25
+ await inner.write(ns, key, value);
26
+ }
27
+ catch (err) {
28
+ tracker.failedWrites++;
29
+ throw err;
30
+ }
31
+ },
32
+ async search(ns, query) { return inner.search(ns, query); },
33
+ };
34
+ return tracker;
35
+ }
13
36
  /** Cached memory accessor — created once per process. */
14
37
  let memoryAccessor = null;
15
38
  /** Prompt the user to accept or decline spell permissions. */
@@ -37,7 +60,8 @@ export async function runEpicSpell(yamlContent, options = {}) {
37
60
  // are persisted and visible in the dashboard.
38
61
  if (!memoryAccessor) {
39
62
  try {
40
- memoryAccessor = await createDashboardMemoryAccessor();
63
+ const inner = await createDashboardMemoryAccessor();
64
+ memoryAccessor = trackPersistFailures(inner);
41
65
  console.log('[epic] Memory accessor ready — spell progress will be persisted');
42
66
  }
43
67
  catch (err) {
@@ -45,8 +69,26 @@ export async function runEpicSpell(yamlContent, options = {}) {
45
69
  console.warn('[epic] ⚠ Spell executions will NOT appear in the dashboard');
46
70
  }
47
71
  }
72
+ // memoryAccessor is module-cached, so `failedWrites` is cumulative across
73
+ // every spell run in this process. Capturing the count BEFORE this run
74
+ // and computing the delta below isolates "this run's failures" from any
75
+ // prior run's. Spell runs are sequential per process, so no race.
76
+ const failuresBefore = memoryAccessor?.failedWrites ?? 0;
48
77
  const runOpts = { ...options, projectRoot: process.cwd(), ...(memoryAccessor ? { memory: memoryAccessor } : {}) };
49
- const result = await engine.runSpellFromContent(yamlContent, undefined, runOpts);
78
+ // Print the persist-failure summary on every return path. Without this,
79
+ // a #982-style failure surfaces only as scattered `[spell] storeProgress
80
+ // failed` lines mid-run that get lost in scrollback. The summary line is
81
+ // the user's signal that the dashboard / Luminarium will show empty
82
+ // history despite a successful-looking spell run.
83
+ const reportPersistFailures = () => {
84
+ if (!memoryAccessor)
85
+ return;
86
+ const failed = memoryAccessor.failedWrites - failuresBefore;
87
+ if (failed > 0) {
88
+ console.warn(`[epic] ⚠ Spell progress was not fully persisted (${failed} write${failed === 1 ? '' : 's'} failed) — run history may be missing from the dashboard.`);
89
+ }
90
+ };
91
+ let result = await engine.runSpellFromContent(yamlContent, undefined, runOpts);
50
92
  // Auto-accept permissions on first run: the spell runner already printed
51
93
  // the full risk analysis to the console. The user initiated the epic
52
94
  // command, so we accept on their behalf and retry immediately.
@@ -55,6 +97,7 @@ export async function runEpicSpell(yamlContent, options = {}) {
55
97
  if (hasAcceptanceError) {
56
98
  const accepted = await promptAcceptPermissions();
57
99
  if (!accepted) {
100
+ reportPersistFailures();
58
101
  return result;
59
102
  }
60
103
  // Use the already-loaded engine module (dynamic import) for spells internals.
@@ -71,8 +114,9 @@ export async function runEpicSpell(yamlContent, options = {}) {
71
114
  const report = analyzeSpellPermissions(parsed.definition, stepRegistry);
72
115
  await recordAcceptance(projectRoot, parsed.definition.name, report.permissionHash);
73
116
  console.log(`[epic] Permissions accepted for "${parsed.definition.name}" — retrying...\n`);
74
- return engine.runSpellFromContent(yamlContent, undefined, runOpts);
117
+ result = await engine.runSpellFromContent(yamlContent, undefined, runOpts);
75
118
  }
119
+ reportPersistFailures();
76
120
  return result;
77
121
  }
78
122
  //# sourceMappingURL=runner-adapter.js.map
@@ -16,9 +16,51 @@ import { VERSION } from './version.js';
16
16
  export { VERSION };
17
17
  const LONG_RUNNING_COMMANDS = ['mcp', 'daemon'];
18
18
  /**
19
- * Flush stdout/stderr, shut down the memory bridge if it was initialized,
19
+ * Wait for a writable's userspace buffer to drain, then issue an empty write
20
+ * whose callback fires after libuv has committed the now-front-of-queue write.
21
+ *
22
+ * Issue #996: on Windows async pipes, the empty-write trick alone fires before
23
+ * prior multi-line writes (e.g. `printBox`) have left libuv's buffer, racing
24
+ * `process.exit` and either dropping content rows or tripping the libuv
25
+ * `UV_HANDLE_CLOSING` assertion in `src/win/async.c`.
26
+ *
27
+ * Two-stage wait: first await `'drain'` if the userspace buffer is full, then
28
+ * the empty-write callback for the libuv-level commit. A 250 ms unref'd safety
29
+ * timeout covers broken pipes where `'drain'` never fires.
30
+ */
31
+ export function drainStream(stream) {
32
+ return new Promise((resolve) => {
33
+ if (!stream.writable || stream.destroyed)
34
+ return resolve();
35
+ const finalize = () => {
36
+ try {
37
+ stream.write('', () => resolve());
38
+ }
39
+ catch {
40
+ resolve();
41
+ }
42
+ };
43
+ if (stream.writableNeedDrain) {
44
+ const onDrain = () => {
45
+ clearTimeout(timer);
46
+ finalize();
47
+ };
48
+ stream.once('drain', onDrain);
49
+ const timer = setTimeout(() => {
50
+ stream.removeListener('drain', onDrain);
51
+ resolve();
52
+ }, 250);
53
+ timer.unref();
54
+ }
55
+ else {
56
+ finalize();
57
+ }
58
+ });
59
+ }
60
+ /**
61
+ * Drain stdout/stderr, shut down the memory bridge if it was initialized,
20
62
  * then `process.exit(code)`. Prevents the libuv `uv_async_send` assertion
21
- * on Windows when stdout is an async pipe (issue #504).
63
+ * on Windows when stdout is an async pipe (issues #504, #996).
22
64
  */
23
65
  async function drainAndExit(code) {
24
66
  try {
@@ -28,12 +70,8 @@ async function drainAndExit(code) {
28
70
  catch {
29
71
  // Bridge may not have been loaded — that's fine
30
72
  }
31
- const flush = (stream) => new Promise((resolve) => {
32
- if (!stream.writable)
33
- return resolve();
34
- stream.write('', () => resolve());
35
- });
36
- await Promise.all([flush(process.stdout), flush(process.stderr)]);
73
+ process.exitCode = code;
74
+ await Promise.all([drainStream(process.stdout), drainStream(process.stderr)]);
37
75
  process.exit(code);
38
76
  }
39
77
  /**
@@ -9,8 +9,14 @@
9
9
  * adapter is the seam where the bridge is plugged in. Arbitrary values are
10
10
  * JSON-serialised into the bridge's `content` field; namespaces are prefixed
11
11
  * with `aidefence:` to isolate from general memory entries.
12
+ *
13
+ * #981 / #986 — writes funnel through `storeEntry` / `deleteEntry` so the
14
+ * single-writer daemon-routing preamble (memory-initializer.ts) covers them.
15
+ * Calling the bridge directly here would bypass the daemon and resurrect
16
+ * the multi-process clobber.
12
17
  */
13
- import { bridgeStoreEntry, bridgeSearchEntries, bridgeGetEntry, bridgeDeleteEntry, isBridgeAvailable, } from '../memory/memory-bridge.js';
18
+ import { bridgeSearchEntries, bridgeGetEntry, isBridgeAvailable, } from '../memory/memory-bridge.js';
19
+ import { storeEntry, deleteEntry } from '../memory/memory-initializer.js';
14
20
  const NS_PREFIX = 'aidefence:';
15
21
  function prefixNs(namespace) {
16
22
  return `${NS_PREFIX}${namespace}`;
@@ -30,7 +36,7 @@ function safeParse(raw) {
30
36
  */
31
37
  export class MofloDbAIDefenceStore {
32
38
  async store(params) {
33
- await bridgeStoreEntry({
39
+ await storeEntry({
34
40
  namespace: prefixNs(params.namespace),
35
41
  key: params.key,
36
42
  value: JSON.stringify(params.value),
@@ -64,7 +70,7 @@ export class MofloDbAIDefenceStore {
64
70
  return safeParse(result.entry.content);
65
71
  }
66
72
  async delete(namespace, key) {
67
- await bridgeDeleteEntry({
73
+ await deleteEntry({
68
74
  namespace: prefixNs(namespace),
69
75
  key,
70
76
  });
@@ -68,8 +68,15 @@ export const REQUIRED_BRIDGE_CONTROLLERS = Object.freeze([
68
68
  export function getBridgeLastError() {
69
69
  return lastBridgeError;
70
70
  }
71
- function logBridgeError(context, err) {
72
- if (process.env.MOFLO_BRIDGE_QUIET)
71
+ /**
72
+ * Log a bridge error. By default `MOFLO_BRIDGE_QUIET` suppresses the line
73
+ * to keep test output clean for read-path noise. Pass `{ alwaysLog: true }`
74
+ * for write-path errors that mean data did NOT reach disk — those MUST
75
+ * always log, since the quiet env var is for read-path noise control,
76
+ * not for masking data loss (#982 / #854 / #962 anti-pattern).
77
+ */
78
+ export function logBridgeError(context, err, opts) {
79
+ if (process.env.MOFLO_BRIDGE_QUIET && !opts?.alwaysLog)
73
80
  return;
74
81
  const msg = errorDetail(err);
75
82
  console.error(`[moflo] ${context}: ${msg}`);
@@ -186,6 +193,17 @@ export function execRows(db, sql, params) {
186
193
  * Persist the in-memory sql.js DB back to disk. sql.js is purely in-memory —
187
194
  * without an explicit export+writeFileSync after each mutation, writes vanish
188
195
  * when the process exits, which breaks store→retrieve across CLI commands.
196
+ *
197
+ * Throws on failure (#982). Callers that issued a mutation MUST treat a
198
+ * persist throw as the mutation having failed: the in-memory DB still has
199
+ * the new row, but it never reached disk and dies with the process.
200
+ *
201
+ * Pre-#982 this swallowed silently and logged once to stderr — the
202
+ * `bridgeStoreEntry` path then returned `{ success: true }` despite the
203
+ * data being lost, the success-lie pattern that cost #854 and #962 too.
204
+ *
205
+ * Use {@link tryPersistBridgeDb} for the rare best-effort caller (cache
206
+ * invalidation, idempotent maintenance) that genuinely doesn't care.
189
207
  */
190
208
  export function persistBridgeDb(db, dbPath) {
191
209
  // Mirror the read-side resolution so writes land where reads come from.
@@ -200,7 +218,24 @@ export function persistBridgeDb(db, dbPath) {
200
218
  atomicWriteFileSync(target, db.export());
201
219
  }
202
220
  catch (err) {
203
- logBridgeError('bridge persist failed', err);
221
+ logBridgeError('bridge persist failed', err, { alwaysLog: true });
222
+ throw err;
223
+ }
224
+ }
225
+ /**
226
+ * Best-effort variant of {@link persistBridgeDb}. Returns `{ ok: false }`
227
+ * on failure instead of throwing. Reserve for callers where a missed
228
+ * persist is genuinely acceptable (e.g. cache invalidation that the next
229
+ * mutation will redo). Always-log policy still applies — write failures
230
+ * cannot be silenced.
231
+ */
232
+ export function tryPersistBridgeDb(db, dbPath) {
233
+ try {
234
+ persistBridgeDb(db, dbPath);
235
+ return { ok: true };
236
+ }
237
+ catch (err) {
238
+ return { ok: false, error: err instanceof Error ? err : new Error(String(err)) };
204
239
  }
205
240
  }
206
241
  // Kept in sync with MEMORY_SCHEMA_V3.memory_entries in memory-initializer.ts.