moflo 4.9.26 → 4.9.27

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,335 @@
1
+ /**
2
+ * Daemon HTTP RPC for memory writes (#981 — single-writer architecture).
3
+ *
4
+ * Adds POST /api/memory/{store,delete,batch} to the existing daemon HTTP
5
+ * server. The daemon process becomes the single authoritative writer when
6
+ * these endpoints are called; other processes (CLI, MCP server) route
7
+ * writes here via the daemon-write-client (Story #984).
8
+ *
9
+ * Story #983 ships these endpoints purely additively — nothing in the
10
+ * codebase calls them yet. Stories #985 / #986 wire consumer callers.
11
+ *
12
+ * Loopback-only: the parent server binds 127.0.0.1, so no auth/CSRF.
13
+ *
14
+ * @module daemon-memory-rpc
15
+ */
16
+ import { errorDetail } from '../shared/utils/error-detail.js';
17
+ // ============================================================================
18
+ // Constants
19
+ // ============================================================================
20
+ /**
21
+ * Maximum POST body size for memory RPC. Larger bodies → 413.
22
+ * 1 MiB is well above any reasonable single entry while bounding
23
+ * pathological loopback writes (defense-in-depth even on 127.0.0.1).
24
+ */
25
+ export const MEMORY_RPC_MAX_BODY_BYTES = 1024 * 1024;
26
+ /** Conservative namespace pattern: alphanumerics, dot, dash, underscore. ≤64 chars. */
27
+ const NAMESPACE_PATTERN = /^[a-zA-Z0-9._-]{1,64}$/;
28
+ /** Max key length. Aligns with the existing memory_entries TEXT column conventions. */
29
+ const KEY_MAX_LENGTH = 256;
30
+ /** Max ops per batch. Bounds memory + write time per request. */
31
+ export const BATCH_MAX_OPS = 100;
32
+ // ============================================================================
33
+ // JSON body reader (size-capped, never throws)
34
+ // ============================================================================
35
+ /**
36
+ * Read a JSON request body with a hard byte cap. Returns null on:
37
+ * - body exceeds {@link MEMORY_RPC_MAX_BODY_BYTES}
38
+ * - body is not valid JSON
39
+ * - request errors before completion
40
+ *
41
+ * Never throws. Caller maps null → 400.
42
+ */
43
+ async function readJsonBody(req) {
44
+ return new Promise((resolve) => {
45
+ let total = 0;
46
+ const chunks = [];
47
+ let done = false;
48
+ const finish = (result) => {
49
+ if (done)
50
+ return;
51
+ done = true;
52
+ resolve(result);
53
+ };
54
+ req.on('data', (chunk) => {
55
+ total += chunk.length;
56
+ if (total > MEMORY_RPC_MAX_BODY_BYTES) {
57
+ // Stop accumulating; let the request finish but discard.
58
+ chunks.length = 0;
59
+ finish(null);
60
+ return;
61
+ }
62
+ if (!done)
63
+ chunks.push(chunk);
64
+ });
65
+ req.on('end', () => {
66
+ if (done)
67
+ return;
68
+ try {
69
+ const raw = Buffer.concat(chunks).toString('utf8');
70
+ finish(raw.length === 0 ? null : JSON.parse(raw));
71
+ }
72
+ catch {
73
+ finish(null);
74
+ }
75
+ });
76
+ req.on('error', () => finish(null));
77
+ });
78
+ }
79
+ // ============================================================================
80
+ // Validation
81
+ // ============================================================================
82
+ function isNamespace(ns) {
83
+ return typeof ns === 'string' && NAMESPACE_PATTERN.test(ns);
84
+ }
85
+ function isKey(key) {
86
+ return typeof key === 'string' && key.length > 0 && key.length <= KEY_MAX_LENGTH;
87
+ }
88
+ function isStringArray(value) {
89
+ return Array.isArray(value) && value.every(v => typeof v === 'string');
90
+ }
91
+ function validateStorePayload(body) {
92
+ if (typeof body !== 'object' || body === null)
93
+ return { ok: false, error: 'body must be a JSON object' };
94
+ const b = body;
95
+ if (!isNamespace(b.namespace))
96
+ return { ok: false, error: `invalid namespace (must match ${NAMESPACE_PATTERN}, ≤64 chars)` };
97
+ if (!isKey(b.key))
98
+ return { ok: false, error: `invalid key (non-empty string ≤${KEY_MAX_LENGTH} chars)` };
99
+ if (b.value === undefined)
100
+ return { ok: false, error: 'value is required' };
101
+ if (b.tags !== undefined && !isStringArray(b.tags))
102
+ return { ok: false, error: 'tags must be an array of strings' };
103
+ if (b.ttl !== undefined && (typeof b.ttl !== 'number' || !Number.isFinite(b.ttl) || b.ttl <= 0)) {
104
+ return { ok: false, error: 'ttl must be a positive finite number (seconds)' };
105
+ }
106
+ return {
107
+ ok: true,
108
+ op: {
109
+ namespace: b.namespace,
110
+ key: b.key,
111
+ value: b.value,
112
+ tags: b.tags,
113
+ ttl: b.ttl,
114
+ },
115
+ };
116
+ }
117
+ function validateDeletePayload(body) {
118
+ if (typeof body !== 'object' || body === null)
119
+ return { ok: false, error: 'body must be a JSON object' };
120
+ const b = body;
121
+ if (!isNamespace(b.namespace))
122
+ return { ok: false, error: 'invalid namespace' };
123
+ if (!isKey(b.key))
124
+ return { ok: false, error: 'invalid key' };
125
+ return { ok: true, op: { namespace: b.namespace, key: b.key } };
126
+ }
127
+ function validateBatchPayload(body) {
128
+ if (typeof body !== 'object' || body === null)
129
+ return { ok: false, error: 'body must be a JSON object' };
130
+ const ops = body.ops;
131
+ if (!Array.isArray(ops) || ops.length === 0) {
132
+ return { ok: false, error: 'ops must be a non-empty array' };
133
+ }
134
+ if (ops.length > BATCH_MAX_OPS) {
135
+ return { ok: false, error: `batch too large (max ${BATCH_MAX_OPS} ops)` };
136
+ }
137
+ const validated = [];
138
+ for (let i = 0; i < ops.length; i++) {
139
+ const raw = ops[i];
140
+ const opType = raw?.op;
141
+ if (opType === 'store') {
142
+ const r = validateStorePayload(raw);
143
+ if (!r.ok)
144
+ return { ok: false, error: r.error, index: i };
145
+ validated.push({ op: 'store', ...r.op });
146
+ }
147
+ else if (opType === 'delete') {
148
+ const r = validateDeletePayload(raw);
149
+ if (!r.ok)
150
+ return { ok: false, error: r.error, index: i };
151
+ validated.push({ op: 'delete', ...r.op });
152
+ }
153
+ else {
154
+ return { ok: false, error: "op must be 'store' or 'delete'", index: i };
155
+ }
156
+ }
157
+ return { ok: true, ops: validated };
158
+ }
159
+ // ============================================================================
160
+ // JSON response helper (intentionally duplicates daemon-dashboard's helper —
161
+ // keeping this module standalone simplifies testing and avoids a circular
162
+ // dep when daemon-dashboard imports the route handlers below).
163
+ // ============================================================================
164
+ function sendJson(res, status, body) {
165
+ const json = JSON.stringify(body);
166
+ res.writeHead(status, {
167
+ 'Content-Type': 'application/json',
168
+ 'Content-Length': Buffer.byteLength(json),
169
+ 'Cache-Control': 'no-cache',
170
+ });
171
+ res.end(json);
172
+ }
173
+ function valueToString(value) {
174
+ return typeof value === 'string' ? value : JSON.stringify(value);
175
+ }
176
+ // ============================================================================
177
+ // Route handlers
178
+ // ============================================================================
179
+ /** Lazy import to avoid a circular dep with memory-initializer's heavy graph. */
180
+ async function getMemoryFns() {
181
+ const mod = await import('../memory/memory-initializer.js');
182
+ return { storeEntry: mod.storeEntry, deleteEntry: mod.deleteEntry };
183
+ }
184
+ /**
185
+ * POST /api/memory/store — write a single entry through the daemon's
186
+ * authoritative sql.js path. Upsert semantics (matches `memory_store`
187
+ * MCP tool default — see #962 for why this is required).
188
+ */
189
+ export async function handleMemoryStore(req, res, memory) {
190
+ if (!memory) {
191
+ sendJson(res, 503, { error: 'Memory accessor not attached' });
192
+ return;
193
+ }
194
+ const body = await readJsonBody(req);
195
+ if (body === null) {
196
+ sendJson(res, 400, { error: 'Invalid or oversized JSON body' });
197
+ return;
198
+ }
199
+ const v = validateStorePayload(body);
200
+ if (!v.ok) {
201
+ sendJson(res, 400, { error: 'Invalid store request', message: v.error });
202
+ return;
203
+ }
204
+ try {
205
+ const { storeEntry } = await getMemoryFns();
206
+ const result = await storeEntry({
207
+ key: v.op.key,
208
+ value: valueToString(v.op.value),
209
+ namespace: v.op.namespace,
210
+ tags: v.op.tags,
211
+ ttl: v.op.ttl,
212
+ upsert: true,
213
+ });
214
+ if (!result.success) {
215
+ sendJson(res, 500, { error: 'Store failed', message: result.error ?? 'unknown' });
216
+ return;
217
+ }
218
+ sendJson(res, 200, { ok: true, stored: true, id: result.id });
219
+ }
220
+ catch (err) {
221
+ sendJson(res, 500, { error: 'Internal error', message: errorDetail(err) });
222
+ }
223
+ }
224
+ /**
225
+ * POST /api/memory/delete — remove an entry via the daemon's authoritative
226
+ * write path.
227
+ */
228
+ export async function handleMemoryDelete(req, res, memory) {
229
+ if (!memory) {
230
+ sendJson(res, 503, { error: 'Memory accessor not attached' });
231
+ return;
232
+ }
233
+ const body = await readJsonBody(req);
234
+ if (body === null) {
235
+ sendJson(res, 400, { error: 'Invalid or oversized JSON body' });
236
+ return;
237
+ }
238
+ const v = validateDeletePayload(body);
239
+ if (!v.ok) {
240
+ sendJson(res, 400, { error: 'Invalid delete request', message: v.error });
241
+ return;
242
+ }
243
+ try {
244
+ const { deleteEntry } = await getMemoryFns();
245
+ const result = await deleteEntry({ key: v.op.key, namespace: v.op.namespace });
246
+ if (!result.success) {
247
+ sendJson(res, 500, { error: 'Delete failed', message: result.error ?? 'unknown' });
248
+ return;
249
+ }
250
+ sendJson(res, 200, { ok: true, deleted: result.deleted });
251
+ }
252
+ catch (err) {
253
+ sendJson(res, 500, { error: 'Internal error', message: errorDetail(err) });
254
+ }
255
+ }
256
+ /**
257
+ * POST /api/memory/batch — apply a sequence of store/delete ops.
258
+ *
259
+ * Validation is all-or-nothing: any invalid op fails the request with 400
260
+ * and no application. Application is sequential: a failure mid-stream
261
+ * returns 207 Multi-Status with structured per-op results so the caller
262
+ * can retry only the failures.
263
+ *
264
+ * True transactional atomicity isn't possible without a long-lived
265
+ * write-handle inside the daemon (a future evolution); for now this
266
+ * mirrors the existing `storeEntries` fallback semantics.
267
+ */
268
+ export async function handleMemoryBatch(req, res, memory) {
269
+ if (!memory) {
270
+ sendJson(res, 503, { error: 'Memory accessor not attached' });
271
+ return;
272
+ }
273
+ const body = await readJsonBody(req);
274
+ if (body === null) {
275
+ sendJson(res, 400, { error: 'Invalid or oversized JSON body' });
276
+ return;
277
+ }
278
+ const v = validateBatchPayload(body);
279
+ if (!v.ok) {
280
+ sendJson(res, 400, { error: 'Invalid batch request', message: v.error, index: v.index });
281
+ return;
282
+ }
283
+ const { storeEntry, deleteEntry } = await getMemoryFns();
284
+ const results = [];
285
+ let anyFailed = false;
286
+ for (const op of v.ops) {
287
+ try {
288
+ if (op.op === 'store') {
289
+ const r = await storeEntry({
290
+ key: op.key,
291
+ value: valueToString(op.value),
292
+ namespace: op.namespace,
293
+ tags: op.tags,
294
+ ttl: op.ttl,
295
+ upsert: true,
296
+ });
297
+ if (r.success) {
298
+ results.push({ ok: true, id: r.id });
299
+ }
300
+ else {
301
+ results.push({ ok: false, error: r.error ?? 'unknown' });
302
+ anyFailed = true;
303
+ }
304
+ }
305
+ else {
306
+ const r = await deleteEntry({ key: op.key, namespace: op.namespace });
307
+ if (r.success) {
308
+ results.push({ ok: true, deleted: r.deleted });
309
+ }
310
+ else {
311
+ results.push({ ok: false, error: r.error ?? 'unknown' });
312
+ anyFailed = true;
313
+ }
314
+ }
315
+ }
316
+ catch (err) {
317
+ results.push({ ok: false, error: errorDetail(err) });
318
+ anyFailed = true;
319
+ }
320
+ }
321
+ sendJson(res, anyFailed ? 207 : 200, { ok: !anyFailed, results });
322
+ }
323
+ export function matchMemoryRpcRoute(url) {
324
+ if (!url)
325
+ return null;
326
+ const path = url.split('?')[0];
327
+ if (path === '/api/memory/store')
328
+ return 'store';
329
+ if (path === '/api/memory/delete')
330
+ return 'delete';
331
+ if (path === '/api/memory/batch')
332
+ return 'batch';
333
+ return null;
334
+ }
335
+ //# sourceMappingURL=daemon-memory-rpc.js.map
@@ -23,6 +23,16 @@ export class WriteThroughAdapter {
23
23
  attached = false;
24
24
  boundHandler;
25
25
  stats = { written: 0, errors: 0, reaped: 0 };
26
+ /**
27
+ * #981 — track in-flight fire-and-forget writes so `clearNamespace` (and
28
+ * any future shutdown waiter) can await them before listing/deleting.
29
+ * Without this, a unified message that hits the bus shortly before
30
+ * shutdown's clearNamespace() races the listEntries query — its write
31
+ * lands AFTER list and survives the shutdown's deletes. Pre-#981 this
32
+ * race was hidden by sql.js multi-process clobber; under single-writer
33
+ * routing the write deterministically persists.
34
+ */
35
+ pendingWrites = new Set();
26
36
  constructor(bus, config, storeEntry, options) {
27
37
  this.bus = bus;
28
38
  this.config = config;
@@ -77,6 +87,10 @@ export class WriteThroughAdapter {
77
87
  async clearNamespace(namespace) {
78
88
  if (!this.listEntries || !this.deleteEntry)
79
89
  return;
90
+ // #981 — drain in-flight writes before listing. Without this the list
91
+ // can return BEFORE a queued write completes; the deletes that follow
92
+ // skip that row, and it survives the shutdown.
93
+ await this.drainPendingWrites();
80
94
  try {
81
95
  const result = await this.listEntries({ namespace, limit: 1000 });
82
96
  for (const entry of result.entries) {
@@ -87,13 +101,25 @@ export class WriteThroughAdapter {
87
101
  // Best-effort cleanup
88
102
  }
89
103
  }
104
+ /**
105
+ * Wait for every fire-and-forget write currently in flight to settle.
106
+ * No-op when no writes are queued. Bounded by `Promise.allSettled` so a
107
+ * single hung write can't block forever (each individual write has its
108
+ * own daemon-write-client timeout).
109
+ */
110
+ async drainPendingWrites() {
111
+ if (this.pendingWrites.size === 0)
112
+ return;
113
+ await Promise.allSettled([...this.pendingWrites]);
114
+ }
90
115
  onUnifiedMessage(event) {
91
116
  if (!event.namespace || !this.enabledNamespaces.has(event.namespace)) {
92
117
  return;
93
118
  }
94
119
  const ttlSeconds = event.ttlMs ? Math.ceil(event.ttlMs / 1000) : undefined;
95
- // Fire-and-forget write to Memory DB
96
- this.storeEntry({
120
+ // Fire-and-forget write to Memory DB. The promise is tracked in
121
+ // `pendingWrites` so `clearNamespace`/`drainPendingWrites` can await it.
122
+ const writePromise = this.storeEntry({
97
123
  key: `msg:${event.messageId}`,
98
124
  value: JSON.stringify({
99
125
  id: event.messageId,
@@ -115,6 +141,8 @@ export class WriteThroughAdapter {
115
141
  }).catch(() => {
116
142
  this.stats.errors++;
117
143
  });
144
+ this.pendingWrites.add(writePromise);
145
+ writePromise.finally(() => { this.pendingWrites.delete(writePromise); });
118
146
  }
119
147
  /**
120
148
  * DB reaper: cleans expired entries from Memory DB.
@@ -2,5 +2,5 @@
2
2
  * Auto-generated by build. Do not edit manually.
3
3
  * Source of truth: root package.json → scripts/sync-version.mjs
4
4
  */
5
- export const VERSION = '4.9.26';
5
+ export const VERSION = '4.9.27';
6
6
  //# sourceMappingURL=version.js.map
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "moflo",
3
- "version": "4.9.26",
3
+ "version": "4.9.27",
4
4
  "description": "MoFlo — AI agent orchestration for Claude Code. A standalone, opinionated toolkit with semantic memory, learned routing, gates, spells, and the /flo issue-execution skill.",
5
5
  "main": "dist/src/cli/index.js",
6
6
  "type": "module",
@@ -84,7 +84,7 @@
84
84
  "@typescript-eslint/eslint-plugin": "^7.18.0",
85
85
  "@typescript-eslint/parser": "^7.18.0",
86
86
  "eslint": "^8.0.0",
87
- "moflo": "^4.9.25",
87
+ "moflo": "^4.9.26",
88
88
  "tsx": "^4.21.0",
89
89
  "typescript": "^5.9.3",
90
90
  "vitest": "^4.0.0"