@soleri/core 9.4.0 → 9.5.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (45) hide show
  1. package/dist/hooks/candidate-scorer.d.ts +28 -0
  2. package/dist/hooks/candidate-scorer.d.ts.map +1 -0
  3. package/dist/hooks/candidate-scorer.js +20 -0
  4. package/dist/hooks/candidate-scorer.js.map +1 -0
  5. package/dist/hooks/index.d.ts +2 -0
  6. package/dist/hooks/index.d.ts.map +1 -0
  7. package/dist/hooks/index.js +2 -0
  8. package/dist/hooks/index.js.map +1 -0
  9. package/dist/planning/plan-lifecycle.d.ts.map +1 -1
  10. package/dist/planning/plan-lifecycle.js +6 -1
  11. package/dist/planning/plan-lifecycle.js.map +1 -1
  12. package/package.json +1 -1
  13. package/src/brain/brain.ts +120 -46
  14. package/src/brain/intelligence.ts +42 -34
  15. package/src/chat/agent-loop.ts +1 -1
  16. package/src/chat/notifications.ts +4 -0
  17. package/src/control/intent-router.ts +10 -8
  18. package/src/curator/curator.ts +145 -29
  19. package/src/hooks/candidate-scorer.test.ts +76 -0
  20. package/src/hooks/candidate-scorer.ts +39 -0
  21. package/src/hooks/index.ts +6 -0
  22. package/src/index.ts +2 -0
  23. package/src/llm/llm-client.ts +1 -0
  24. package/src/persistence/sqlite-provider.ts +1 -0
  25. package/src/planning/github-projection.ts +48 -44
  26. package/src/planning/plan-lifecycle.ts +14 -1
  27. package/src/queue/pipeline-runner.ts +4 -0
  28. package/src/runtime/curator-extra-ops.test.ts +7 -0
  29. package/src/runtime/curator-extra-ops.ts +10 -1
  30. package/src/runtime/facades/curator-facade.test.ts +7 -0
  31. package/src/runtime/facades/memory-facade.ts +187 -0
  32. package/src/runtime/orchestrate-ops.ts +3 -3
  33. package/src/runtime/runtime.test.ts +50 -2
  34. package/src/runtime/runtime.ts +117 -89
  35. package/src/runtime/shutdown-registry.test.ts +151 -0
  36. package/src/runtime/shutdown-registry.ts +85 -0
  37. package/src/runtime/types.ts +4 -1
  38. package/src/transport/http-server.ts +50 -3
  39. package/src/transport/ws-server.ts +8 -0
  40. package/src/vault/linking.test.ts +12 -0
  41. package/src/vault/linking.ts +90 -44
  42. package/src/vault/vault-maintenance.ts +11 -18
  43. package/src/vault/vault-memories.ts +21 -13
  44. package/src/vault/vault-schema.ts +21 -0
  45. package/src/vault/vault.ts +8 -3
@@ -0,0 +1,151 @@
1
+ /**
2
+ * Unit tests for ShutdownRegistry — centralized cleanup for agent runtime.
3
+ */
4
+
5
+ import { describe, it, expect, vi } from 'vitest';
6
+ import { ShutdownRegistry } from './shutdown-registry.js';
7
+
8
+ describe('ShutdownRegistry', () => {
9
+ it('starts with zero entries and not closed', () => {
10
+ const registry = new ShutdownRegistry();
11
+ expect(registry.size).toBe(0);
12
+ expect(registry.isClosed).toBe(false);
13
+ });
14
+
15
+ it('tracks registered entries', () => {
16
+ const registry = new ShutdownRegistry();
17
+ registry.register('a', vi.fn());
18
+ registry.register('b', vi.fn());
19
+ expect(registry.size).toBe(2);
20
+ });
21
+
22
+ it('calls callbacks in LIFO order on closeAll', async () => {
23
+ const order: string[] = [];
24
+ const registry = new ShutdownRegistry();
25
+ registry.register('first', () => {
26
+ order.push('first');
27
+ });
28
+ registry.register('second', () => {
29
+ order.push('second');
30
+ });
31
+ registry.register('third', () => {
32
+ order.push('third');
33
+ });
34
+
35
+ await registry.closeAll();
36
+ expect(order).toEqual(['third', 'second', 'first']);
37
+ });
38
+
39
+ it('calls callbacks in LIFO order on closeAllSync', () => {
40
+ const order: string[] = [];
41
+ const registry = new ShutdownRegistry();
42
+ registry.register('first', () => {
43
+ order.push('first');
44
+ });
45
+ registry.register('second', () => {
46
+ order.push('second');
47
+ });
48
+ registry.register('third', () => {
49
+ order.push('third');
50
+ });
51
+
52
+ registry.closeAllSync();
53
+ expect(order).toEqual(['third', 'second', 'first']);
54
+ });
55
+
56
+ it('is idempotent — second closeAll is a no-op', async () => {
57
+ const callback = vi.fn();
58
+ const registry = new ShutdownRegistry();
59
+ registry.register('test', callback);
60
+
61
+ await registry.closeAll();
62
+ await registry.closeAll();
63
+
64
+ expect(callback).toHaveBeenCalledTimes(1);
65
+ expect(registry.isClosed).toBe(true);
66
+ });
67
+
68
+ it('is idempotent — second closeAllSync is a no-op', () => {
69
+ const callback = vi.fn();
70
+ const registry = new ShutdownRegistry();
71
+ registry.register('test', callback);
72
+
73
+ registry.closeAllSync();
74
+ registry.closeAllSync();
75
+
76
+ expect(callback).toHaveBeenCalledTimes(1);
77
+ });
78
+
79
+ it('handles async callbacks in closeAll', async () => {
80
+ const order: string[] = [];
81
+ const registry = new ShutdownRegistry();
82
+ registry.register('sync', () => {
83
+ order.push('sync');
84
+ });
85
+ registry.register('async', async () => {
86
+ await new Promise((r) => setTimeout(r, 5));
87
+ order.push('async');
88
+ });
89
+
90
+ await registry.closeAll();
91
+ expect(order).toEqual(['async', 'sync']);
92
+ });
93
+
94
+ it('continues on error — remaining callbacks still execute', async () => {
95
+ const stderrSpy = vi.spyOn(process.stderr, 'write').mockReturnValue(true);
96
+ const order: string[] = [];
97
+ const registry = new ShutdownRegistry();
98
+ registry.register('first', () => {
99
+ order.push('first');
100
+ });
101
+ registry.register('failing', () => {
102
+ throw new Error('boom');
103
+ });
104
+ registry.register('third', () => {
105
+ order.push('third');
106
+ });
107
+
108
+ await registry.closeAll();
109
+
110
+ // third runs first (LIFO), failing throws but first still runs
111
+ expect(order).toEqual(['third', 'first']);
112
+ expect(stderrSpy).toHaveBeenCalledWith(expect.stringContaining('boom'));
113
+ stderrSpy.mockRestore();
114
+ });
115
+
116
+ it('closeAllSync swallows errors silently', () => {
117
+ const order: string[] = [];
118
+ const registry = new ShutdownRegistry();
119
+ registry.register('first', () => {
120
+ order.push('first');
121
+ });
122
+ registry.register('failing', () => {
123
+ throw new Error('boom');
124
+ });
125
+ registry.register('third', () => {
126
+ order.push('third');
127
+ });
128
+
129
+ // Should not throw
130
+ registry.closeAllSync();
131
+ expect(order).toEqual(['third', 'first']);
132
+ });
133
+
134
+ it('ignores registrations after close', async () => {
135
+ const registry = new ShutdownRegistry();
136
+ await registry.closeAll();
137
+
138
+ const callback = vi.fn();
139
+ registry.register('late', callback);
140
+ expect(registry.size).toBe(0);
141
+ });
142
+
143
+ it('clears entries after closeAll', async () => {
144
+ const registry = new ShutdownRegistry();
145
+ registry.register('test', vi.fn());
146
+ expect(registry.size).toBe(1);
147
+
148
+ await registry.closeAll();
149
+ expect(registry.size).toBe(0);
150
+ });
151
+ });
@@ -0,0 +1,85 @@
1
+ /**
2
+ * Shutdown Registry — centralized cleanup for agent runtime resources.
3
+ *
4
+ * Modules register their cleanup callbacks (clear timers, close watchers,
5
+ * kill child processes). On shutdown, callbacks run in LIFO order so
6
+ * dependents close before their dependencies.
7
+ *
8
+ * Idempotent: calling `closeAll()` multiple times is safe.
9
+ */
10
+
11
+ export type ShutdownCallback = () => void | Promise<void>;
12
+
13
+ interface ShutdownEntry {
14
+ name: string;
15
+ callback: ShutdownCallback;
16
+ }
17
+
18
+ export class ShutdownRegistry {
19
+ private entries: ShutdownEntry[] = [];
20
+ private closed = false;
21
+
22
+ /**
23
+ * Register a named cleanup callback.
24
+ * Callbacks are invoked in LIFO order (last registered = first closed).
25
+ */
26
+ register(name: string, callback: ShutdownCallback): void {
27
+ if (this.closed) return;
28
+ this.entries.push({ name, callback });
29
+ }
30
+
31
+ /**
32
+ * Run all registered cleanup callbacks in LIFO order.
33
+ * Idempotent — subsequent calls are no-ops.
34
+ * Errors in individual callbacks are caught and logged to stderr
35
+ * so that remaining callbacks still execute.
36
+ */
37
+ async closeAll(): Promise<void> {
38
+ if (this.closed) return;
39
+ this.closed = true;
40
+
41
+ // LIFO order
42
+ for (let i = this.entries.length - 1; i >= 0; i--) {
43
+ const entry = this.entries[i];
44
+ try {
45
+ await entry.callback();
46
+ } catch (err) {
47
+ // Log but don't throw — remaining cleanups must still run
48
+ const msg = err instanceof Error ? err.message : String(err);
49
+ process.stderr.write(`[shutdown] ${entry.name}: ${msg}\n`);
50
+ }
51
+ }
52
+
53
+ this.entries = [];
54
+ }
55
+
56
+ /**
57
+ * Synchronous close — best-effort for non-async callbacks.
58
+ * Use when you can't await (e.g. process.on('exit')).
59
+ */
60
+ closeAllSync(): void {
61
+ if (this.closed) return;
62
+ this.closed = true;
63
+
64
+ for (let i = this.entries.length - 1; i >= 0; i--) {
65
+ const entry = this.entries[i];
66
+ try {
67
+ entry.callback();
68
+ } catch {
69
+ // Best-effort — swallow errors in sync path
70
+ }
71
+ }
72
+
73
+ this.entries = [];
74
+ }
75
+
76
+ /** Number of registered callbacks. */
77
+ get size(): number {
78
+ return this.entries.length;
79
+ }
80
+
81
+ /** Whether closeAll() has been called. */
82
+ get isClosed(): boolean {
83
+ return this.closed;
84
+ }
85
+ }
@@ -36,6 +36,7 @@ import type { PipelineRunner } from '../queue/pipeline-runner.js';
36
36
  import type { OperatorProfileStore } from '../operator/operator-profile.js';
37
37
  import type { OperatorContextStore } from '../operator/operator-context-store.js';
38
38
  import type { ContextHealthMonitor } from './context-health.js';
39
+ import type { ShutdownRegistry } from './shutdown-registry.js';
39
40
 
40
41
  /**
41
42
  * Configuration for creating an agent runtime.
@@ -130,8 +131,10 @@ export interface AgentRuntime {
130
131
  personaInstructions: import('../persona/types.js').PersonaSystemInstructions;
131
132
  /** Context health monitor — tracks tool call volume and context window fill. */
132
133
  contextHealth: ContextHealthMonitor;
134
+ /** Shutdown registry — centralized cleanup for timers, watchers, child processes. */
135
+ shutdownRegistry: ShutdownRegistry;
133
136
  /** Timestamp (ms since epoch) when this runtime was created. */
134
137
  createdAt: number;
135
- /** Close the vault database connection. Call on shutdown. */
138
+ /** Close all runtime resources (vault, timers, watchers). Call on shutdown. */
136
139
  close(): void;
137
140
  }
@@ -151,7 +151,18 @@ export class HttpMcpServer {
151
151
  const sessionId = req.headers['mcp-session-id'] as string | undefined;
152
152
 
153
153
  if (method === 'POST') {
154
- const body = await this.readBody(req);
154
+ let body: unknown;
155
+ try {
156
+ body = await this.readBody(req);
157
+ } catch (err) {
158
+ const statusCode = (err as { statusCode?: number }).statusCode;
159
+ if (statusCode === 413) {
160
+ this.sendJSON(res, 413, { error: 'Request body too large' });
161
+ return;
162
+ }
163
+ this.sendJSON(res, 400, { error: 'Failed to read request body' });
164
+ return;
165
+ }
155
166
 
156
167
  if (sessionId) {
157
168
  const session = this.sessions.get(sessionId);
@@ -241,10 +252,41 @@ export class HttpMcpServer {
241
252
  }
242
253
 
243
254
  private readBody(req: IncomingMessage): Promise<unknown> {
255
+ const MAX_BODY_SIZE = 10 * 1024 * 1024; // 10 MB
256
+ const BODY_TIMEOUT = 30_000; // 30 seconds
257
+
244
258
  return new Promise((resolve, reject) => {
259
+ let size = 0;
245
260
  const chunks: Buffer[] = [];
246
- req.on('data', (chunk: Buffer) => chunks.push(chunk));
261
+ let settled = false;
262
+
263
+ const timer = setTimeout(() => {
264
+ if (!settled) {
265
+ settled = true;
266
+ req.destroy();
267
+ reject(new Error('Request body timeout'));
268
+ }
269
+ }, BODY_TIMEOUT);
270
+
271
+ const cleanup = () => clearTimeout(timer);
272
+
273
+ req.on('data', (chunk: Buffer) => {
274
+ size += chunk.length;
275
+ if (size > MAX_BODY_SIZE) {
276
+ if (!settled) {
277
+ settled = true;
278
+ cleanup();
279
+ req.destroy();
280
+ reject(Object.assign(new Error('Request body too large'), { statusCode: 413 }));
281
+ }
282
+ return;
283
+ }
284
+ chunks.push(chunk);
285
+ });
247
286
  req.on('end', () => {
287
+ if (settled) return;
288
+ settled = true;
289
+ cleanup();
248
290
  try {
249
291
  const text = Buffer.concat(chunks).toString('utf-8');
250
292
  resolve(text.length > 0 ? JSON.parse(text) : {});
@@ -252,7 +294,12 @@ export class HttpMcpServer {
252
294
  reject(e);
253
295
  }
254
296
  });
255
- req.on('error', reject);
297
+ req.on('error', (e) => {
298
+ if (settled) return;
299
+ settled = true;
300
+ cleanup();
301
+ reject(e);
302
+ });
256
303
  });
257
304
  }
258
305
 
@@ -230,11 +230,19 @@ export class WsMcpServer {
230
230
 
231
231
  // Set up frame reader
232
232
  const maxSize = this.config.maxMessageSize ?? DEFAULT_MAX_MESSAGE_SIZE;
233
+ const maxBufferSize = maxSize; // raw buffer limit matches max message size (1 MB default)
233
234
  let buffer = Buffer.alloc(0);
234
235
 
235
236
  socket.on('data', (chunk: Buffer) => {
236
237
  buffer = Buffer.concat([buffer, chunk]);
237
238
 
239
+ // Guard against unbounded buffer growth (e.g. slow-drip DoS with no complete frames)
240
+ if (buffer.length > maxBufferSize) {
241
+ this.sendClose(socket, 1009, 'Buffer exceeded max size');
242
+ socket.destroy();
243
+ return;
244
+ }
245
+
238
246
  // Process all complete frames in the buffer
239
247
  while (buffer.length >= 2) {
240
248
  const frame = this.parseFrame(buffer);
@@ -68,6 +68,12 @@ class LinkingMockDB implements PersistenceProvider {
68
68
  get<T>(sql: string, params?: unknown[]): T | undefined {
69
69
  const p = params ?? [];
70
70
  if (sql.includes('COUNT(*)')) {
71
+ if (sql.includes('NOT IN')) {
72
+ // Count orphan entries (no links)
73
+ const linkedIds = new Set(this.links.flatMap((l) => [l.source_id, l.target_id]));
74
+ const count = this.entries.filter((e) => !linkedIds.has(e.id)).length;
75
+ return { count } as T;
76
+ }
71
77
  const id = p[0] as string;
72
78
  const count = this.links.filter((l) => l.source_id === id || l.target_id === id).length;
73
79
  return { count } as T;
@@ -102,6 +108,12 @@ class LinkingMockDB implements PersistenceProvider {
102
108
  const ids = new Set(p.slice(0, half) as string[]);
103
109
  return this.links.filter((l) => ids.has(l.source_id) || ids.has(l.target_id)) as T[];
104
110
  }
111
+ if (sql.includes('FROM entries WHERE id IN')) {
112
+ const ids = new Set(p as string[]);
113
+ return this.entries
114
+ .filter((e) => ids.has(e.id))
115
+ .map((e) => ({ id: e.id, title: e.title, type: e.type, domain: e.domain })) as T[];
116
+ }
105
117
  if (sql.includes('NOT IN')) {
106
118
  const limit = p[0] as number;
107
119
  const linkedIds = new Set(this.links.flatMap((l) => [l.source_id, l.target_id]));
@@ -145,6 +145,7 @@ export class LinkManager {
145
145
  /**
146
146
  * Walk the link graph from a starting entry up to `depth` hops.
147
147
  * BFS — walks both outgoing and incoming links (undirected).
148
+ * Batch-loads all links per frontier level to avoid N+1 queries.
148
149
  */
149
150
  traverse(entryId: string, depth: number = 2): LinkedEntry[] {
150
151
  const visited = new Set<string>([entryId]);
@@ -152,9 +153,47 @@ export class LinkManager {
152
153
  let frontier = [entryId];
153
154
 
154
155
  for (let d = 0; d < depth && frontier.length > 0; d++) {
156
+ // Batch-load all links for entire frontier in one query
157
+ const allLinks = this.getAllLinksForEntries(frontier);
158
+
159
+ // Collect unvisited neighbor IDs
160
+ const neighborMap = new Map<
161
+ string,
162
+ { link: VaultLink; direction: 'outgoing' | 'incoming' }
163
+ >();
164
+ for (const link of allLinks) {
165
+ // Outgoing: source is in frontier, target is the neighbor
166
+ if (frontier.includes(link.sourceId) && !visited.has(link.targetId)) {
167
+ if (!neighborMap.has(link.targetId)) {
168
+ neighborMap.set(link.targetId, { link, direction: 'outgoing' });
169
+ }
170
+ }
171
+ // Incoming: target is in frontier, source is the neighbor
172
+ if (frontier.includes(link.targetId) && !visited.has(link.sourceId)) {
173
+ if (!neighborMap.has(link.sourceId)) {
174
+ neighborMap.set(link.sourceId, { link, direction: 'incoming' });
175
+ }
176
+ }
177
+ }
178
+
179
+ if (neighborMap.size === 0) break;
180
+
181
+ // Batch-load entry metadata for all neighbors in one query
182
+ const neighborIds = [...neighborMap.keys()];
183
+ const metaMap = this.getEntryMetaBatch(neighborIds);
184
+
155
185
  const nextFrontier: string[] = [];
156
- for (const currentId of frontier) {
157
- this.collectNeighbors(currentId, visited, nextFrontier, result);
186
+ for (const [neighborId, { link, direction }] of neighborMap) {
187
+ visited.add(neighborId);
188
+ nextFrontier.push(neighborId);
189
+ const entry = metaMap.get(neighborId);
190
+ if (!entry) continue;
191
+ result.push({
192
+ ...entry,
193
+ linkType: link.linkType,
194
+ linkDirection: direction,
195
+ linkNote: link.note,
196
+ });
158
197
  }
159
198
  frontier = nextFrontier;
160
199
  }
@@ -162,43 +201,6 @@ export class LinkManager {
162
201
  return result;
163
202
  }
164
203
 
165
- /** Collect unvisited outgoing and incoming neighbors for BFS. */
166
- private collectNeighbors(
167
- currentId: string,
168
- visited: Set<string>,
169
- nextFrontier: string[],
170
- result: LinkedEntry[],
171
- ): void {
172
- for (const link of this.getLinks(currentId)) {
173
- this.visitNeighbor(link.targetId, link, 'outgoing', visited, nextFrontier, result);
174
- }
175
- for (const link of this.getBacklinks(currentId)) {
176
- this.visitNeighbor(link.sourceId, link, 'incoming', visited, nextFrontier, result);
177
- }
178
- }
179
-
180
- /** Visit a single neighbor node if not already visited. */
181
- private visitNeighbor(
182
- neighborId: string,
183
- link: VaultLink,
184
- direction: 'outgoing' | 'incoming',
185
- visited: Set<string>,
186
- nextFrontier: string[],
187
- result: LinkedEntry[],
188
- ): void {
189
- if (visited.has(neighborId)) return;
190
- visited.add(neighborId);
191
- nextFrontier.push(neighborId);
192
- const entry = this.getEntryMeta(neighborId);
193
- if (!entry) return;
194
- result.push({
195
- ...entry,
196
- linkType: link.linkType,
197
- linkDirection: direction,
198
- linkNote: link.note,
199
- });
200
- }
201
-
202
204
  // ── Bulk Queries ────────────────────────────────────────────────────
203
205
 
204
206
  /** Get all links where either source or target is in the given ID set. */
@@ -317,20 +319,43 @@ export class LinkManager {
317
319
  const batchSize = opts?.batchSize ?? 50;
318
320
  const start = Date.now();
319
321
 
320
- const orphans = this.getOrphans(10000);
321
322
  let processed = 0;
322
323
  let linksCreated = 0;
323
324
  const preview: Array<{ sourceId: string; targetId: string; linkType: string; score: number }> =
324
325
  [];
325
326
 
326
- for (let i = 0; i < orphans.length; i += batchSize) {
327
- const batch = orphans.slice(i, i + batchSize);
328
- for (const entry of batch) {
327
+ // Estimate total for progress reporting (single COUNT query)
328
+ let totalEstimate = 0;
329
+ try {
330
+ const countRow = this.provider.get<{ count: number }>(
331
+ `SELECT COUNT(*) as count FROM entries
332
+ WHERE id NOT IN (SELECT source_id FROM vault_links)
333
+ AND id NOT IN (SELECT target_id FROM vault_links)`,
334
+ );
335
+ totalEstimate = countRow?.count ?? 0;
336
+ } catch {
337
+ // fall through with 0
338
+ }
339
+
340
+ // Process orphans in batches of batchSize instead of loading all at once.
341
+ // After each batch, successfully linked entries are no longer orphans,
342
+ // so the next getOrphans() call returns the next set.
343
+ // For dry-run mode, we must track processed IDs to avoid re-fetching the same orphans.
344
+ const processedIds = new Set<string>();
345
+ // eslint-disable-next-line no-constant-condition
346
+ while (true) {
347
+ const batch = this.getOrphans(batchSize);
348
+ // Filter out already-processed entries (relevant for dry-run where orphan status doesn't change)
349
+ const unprocessed = batch.filter((e) => !processedIds.has(e.id));
350
+ if (unprocessed.length === 0) break;
351
+
352
+ for (const entry of unprocessed) {
353
+ processedIds.add(entry.id);
329
354
  const created = this.processOrphan(entry.id, threshold, maxLinks, dryRun, preview);
330
355
  linksCreated += created;
331
356
  processed++;
332
357
  }
333
- opts?.onProgress?.({ processed, total: orphans.length, linksCreated });
358
+ opts?.onProgress?.({ processed, total: totalEstimate, linksCreated });
334
359
  }
335
360
 
336
361
  return {
@@ -381,6 +406,27 @@ export class LinkManager {
381
406
  return null;
382
407
  }
383
408
  }
409
+
410
+ /** Batch-load entry metadata for multiple IDs in a single query. */
411
+ private getEntryMetaBatch(
412
+ entryIds: string[],
413
+ ): Map<string, Omit<LinkedEntry, 'linkType' | 'linkDirection' | 'linkNote'>> {
414
+ const result = new Map<string, Omit<LinkedEntry, 'linkType' | 'linkDirection' | 'linkNote'>>();
415
+ if (entryIds.length === 0) return result;
416
+ try {
417
+ const placeholders = entryIds.map(() => '?').join(',');
418
+ const rows = this.provider.all<{ id: string; title: string; type: string; domain: string }>(
419
+ `SELECT id, title, type, domain FROM entries WHERE id IN (${placeholders})`,
420
+ entryIds,
421
+ );
422
+ for (const row of rows) {
423
+ result.set(row.id, row);
424
+ }
425
+ } catch {
426
+ // graceful degradation
427
+ }
428
+ return result;
429
+ }
384
430
  }
385
431
 
386
432
  // ── Free-standing helpers ─────────────────────────────────────────────
@@ -111,25 +111,18 @@ export function archive(
111
111
  const reason = options.reason ?? `Archived: older than ${options.olderThanDays} days`;
112
112
 
113
113
  return provider.transaction(() => {
114
- const candidates = provider.all<{ id: string }>('SELECT id FROM entries WHERE updated_at < ?', [
115
- cutoff,
116
- ]);
117
-
118
- if (candidates.length === 0) return { archived: 0 };
119
-
120
- let archived = 0;
121
- for (const { id } of candidates) {
122
- provider.run(
123
- `INSERT OR IGNORE INTO entries_archive (id, type, domain, title, severity, description, context, example, counter_example, why, tags, applies_to, created_at, updated_at, valid_from, valid_until, archive_reason)
124
- SELECT id, type, domain, title, severity, description, context, example, counter_example, why, tags, applies_to, created_at, updated_at, valid_from, valid_until, ?
125
- FROM entries WHERE id = ?`,
126
- [reason, id],
127
- );
128
- const result = provider.run('DELETE FROM entries WHERE id = ?', [id]);
129
- archived += result.changes;
130
- }
114
+ // Bulk INSERT INTO ... SELECT copies all matching entries to archive in one query
115
+ provider.run(
116
+ `INSERT OR IGNORE INTO entries_archive (id, type, domain, title, severity, description, context, example, counter_example, why, tags, applies_to, created_at, updated_at, valid_from, valid_until, archive_reason)
117
+ SELECT id, type, domain, title, severity, description, context, example, counter_example, why, tags, applies_to, created_at, updated_at, valid_from, valid_until, ?
118
+ FROM entries WHERE updated_at < ?`,
119
+ [reason, cutoff],
120
+ );
121
+
122
+ // Bulk DELETE — removes all archived entries in one query
123
+ const result = provider.run('DELETE FROM entries WHERE updated_at < ?', [cutoff]);
131
124
 
132
- return { archived };
125
+ return { archived: result.changes };
133
126
  });
134
127
  }
135
128
 
@@ -299,21 +299,29 @@ export function memoryTopics(
299
299
  export function memoriesByProject(
300
300
  provider: PersistenceProvider,
301
301
  ): Array<{ project: string; count: number; memories: Memory[] }> {
302
- const rows = provider.all<{ project: string; count: number }>(
303
- 'SELECT project_path as project, COUNT(*) as count FROM memories WHERE archived_at IS NULL GROUP BY project_path ORDER BY count DESC',
302
+ // Single query fetching all non-archived memories, grouped client-side by project
303
+ const allRows = provider.all<Record<string, unknown>>(
304
+ 'SELECT * FROM memories WHERE archived_at IS NULL ORDER BY project_path, created_at DESC',
304
305
  );
305
306
 
306
- return rows.map((row) => {
307
- const mems = provider.all<Record<string, unknown>>(
308
- 'SELECT * FROM memories WHERE project_path = ? AND archived_at IS NULL ORDER BY created_at DESC',
309
- [row.project],
310
- );
311
- return {
312
- project: row.project,
313
- count: row.count,
314
- memories: mems.map(rowToMemory),
315
- };
316
- });
307
+ const projectMap = new Map<string, Memory[]>();
308
+ for (const row of allRows) {
309
+ const memory = rowToMemory(row);
310
+ const project = memory.projectPath;
311
+ if (!projectMap.has(project)) {
312
+ projectMap.set(project, []);
313
+ }
314
+ projectMap.get(project)!.push(memory);
315
+ }
316
+
317
+ // Sort by count descending (matching original behavior)
318
+ return [...projectMap.entries()]
319
+ .map(([project, memories]) => ({
320
+ project,
321
+ count: memories.length,
322
+ memories,
323
+ }))
324
+ .sort((a, b) => b.count - a.count);
317
325
  }
318
326
 
319
327
  // ── Helper ──────────────────────────────────────────────────────────────
@@ -27,6 +27,7 @@ export function initializeSchema(provider: PersistenceProvider): void {
27
27
  migrateOriginColumn(provider);
28
28
  migrateContentHash(provider);
29
29
  migrateTierColumn(provider);
30
+ migratePerformanceIndexes(provider);
30
31
  }
31
32
 
32
33
  function createCoreTables(provider: PersistenceProvider): void {
@@ -236,3 +237,23 @@ function migrateTierColumn(provider: PersistenceProvider): void {
236
237
  'CREATE INDEX IF NOT EXISTS idx_entries_tier ON entries(tier) WHERE tier IS NOT NULL',
237
238
  );
238
239
  }
240
+
241
+ export function migratePerformanceIndexes(provider: PersistenceProvider): void {
242
+ provider.execSql(`
243
+ CREATE INDEX IF NOT EXISTS idx_memories_archived_at ON memories(archived_at);
244
+ CREATE INDEX IF NOT EXISTS idx_entries_updated_at ON entries(updated_at);
245
+ CREATE INDEX IF NOT EXISTS idx_brain_feedback_entry_id ON brain_feedback(entry_id);
246
+ CREATE INDEX IF NOT EXISTS idx_entries_valid_until ON entries(valid_until) WHERE valid_until IS NOT NULL;
247
+ CREATE INDEX IF NOT EXISTS idx_entries_valid_from ON entries(valid_from) WHERE valid_from IS NOT NULL;
248
+ `);
249
+
250
+ // brain_sessions may not exist yet if intelligence module hasn't initialized
251
+ try {
252
+ provider.execSql(`
253
+ CREATE INDEX IF NOT EXISTS idx_brain_sessions_plan_id ON brain_sessions(plan_id) WHERE plan_id IS NOT NULL;
254
+ CREATE INDEX IF NOT EXISTS idx_brain_sessions_started_at ON brain_sessions(started_at);
255
+ `);
256
+ } catch {
257
+ /* brain_sessions table doesn't exist yet — indexes will be created on next init */
258
+ }
259
+ }