clementine-agent 1.2.2 → 1.3.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (41) hide show
  1. package/dist/agent/assistant.js +12 -0
  2. package/dist/cli/dashboard.js +3034 -734
  3. package/dist/cli/static/LICENSE-NOTICES.md +12 -0
  4. package/dist/cli/static/drawflow.min.css +1 -0
  5. package/dist/cli/static/drawflow.min.js +1 -0
  6. package/dist/config.d.ts +11 -0
  7. package/dist/config.js +16 -0
  8. package/dist/dashboard/builder/dry-run.d.ts +31 -0
  9. package/dist/dashboard/builder/dry-run.js +138 -0
  10. package/dist/dashboard/builder/events.d.ts +23 -0
  11. package/dist/dashboard/builder/events.js +28 -0
  12. package/dist/dashboard/builder/mcp-invoke.d.ts +25 -0
  13. package/dist/dashboard/builder/mcp-invoke.js +143 -0
  14. package/dist/dashboard/builder/runner.d.ts +68 -0
  15. package/dist/dashboard/builder/runner.js +418 -0
  16. package/dist/dashboard/builder/serializer.d.ts +79 -0
  17. package/dist/dashboard/builder/serializer.js +547 -0
  18. package/dist/dashboard/builder/snapshots.d.ts +32 -0
  19. package/dist/dashboard/builder/snapshots.js +138 -0
  20. package/dist/dashboard/builder/validation.d.ts +26 -0
  21. package/dist/dashboard/builder/validation.js +183 -0
  22. package/dist/gateway/router.js +31 -2
  23. package/dist/index.js +38 -0
  24. package/dist/memory/chunker.js +13 -2
  25. package/dist/memory/hot-cache.d.ts +38 -0
  26. package/dist/memory/hot-cache.js +73 -0
  27. package/dist/memory/integrity.d.ts +28 -0
  28. package/dist/memory/integrity.js +119 -0
  29. package/dist/memory/maintenance.d.ts +23 -2
  30. package/dist/memory/maintenance.js +140 -3
  31. package/dist/memory/store.d.ts +259 -2
  32. package/dist/memory/store.js +751 -21
  33. package/dist/memory/write-queue.d.ts +96 -0
  34. package/dist/memory/write-queue.js +165 -0
  35. package/dist/tools/builder-tools.d.ts +13 -0
  36. package/dist/tools/builder-tools.js +437 -0
  37. package/dist/tools/mcp-server.js +2 -0
  38. package/dist/tools/memory-tools.js +38 -1
  39. package/dist/types.d.ts +56 -2
  40. package/package.json +2 -2
  41. package/vault/00-System/skills/builder-canvas.md +126 -0
@@ -0,0 +1,138 @@
1
+ /**
2
+ * Builder snapshot history — file-based undo for workflow saves.
3
+ *
4
+ * Every successful save writes a copy of the source file to:
5
+ * ~/.clementine/snapshots/builder/<origin>/<key>/<timestamp>.md
6
+ *
7
+ * Bounded: keep at most MAX_PER_WORKFLOW snapshots per workflow id.
8
+ * Older snapshots are pruned on each save.
9
+ *
10
+ * No git dependency, no user-facing CLI — agent invokes via MCP tools
11
+ * (workflow_history / workflow_restore).
12
+ */
13
+ import { copyFileSync, existsSync, mkdirSync, readdirSync, readFileSync, statSync, unlinkSync, writeFileSync, } from 'node:fs';
14
+ import path from 'node:path';
15
+ import os from 'node:os';
16
+ import { parseBuilderId } from './serializer.js';
17
+ function snapRoot() {
18
+ return path.join(process.env.CLEMENTINE_HOME || path.join(os.homedir(), '.clementine'), 'snapshots', 'builder');
19
+ }
20
+ const MAX_PER_WORKFLOW = 20;
21
+ let _snapshotCounter = 0;
22
+ function nextSnapshotFilename() {
23
+ const ts = new Date().toISOString().replace(/[:.]/g, '-');
24
+ const seq = (++_snapshotCounter).toString(36).padStart(3, '0');
25
+ return `${ts}-${seq}.md`;
26
+ }
27
+ function snapshotDirFor(id) {
28
+ const parsed = parseBuilderId(id);
29
+ if (!parsed)
30
+ return null;
31
+ return path.join(snapRoot(), parsed.origin, sanitizeKey(parsed.key));
32
+ }
33
+ function sanitizeKey(key) {
34
+ return key.replace(/[^a-z0-9._-]/gi, '_').slice(0, 120);
35
+ }
36
+ /**
37
+ * Write a snapshot of the current state of a workflow's source file.
38
+ * Best-effort — failures are logged but never block the underlying save.
39
+ */
40
+ export function snapshotWorkflow(id, sourceFile) {
41
+ if (!sourceFile || !existsSync(sourceFile))
42
+ return null;
43
+ const dir = snapshotDirFor(id);
44
+ if (!dir)
45
+ return null;
46
+ try {
47
+ mkdirSync(dir, { recursive: true });
48
+ const filename = nextSnapshotFilename();
49
+ const dst = path.join(dir, filename);
50
+ copyFileSync(sourceFile, dst);
51
+ pruneOldSnapshots(dir);
52
+ return entryFromFile(id, dir, filename);
53
+ }
54
+ catch {
55
+ return null;
56
+ }
57
+ }
58
+ /** List snapshots for a builder id, newest first. */
59
+ export function listSnapshots(id) {
60
+ const dir = snapshotDirFor(id);
61
+ if (!dir || !existsSync(dir))
62
+ return [];
63
+ try {
64
+ const files = readdirSync(dir).filter(f => f.endsWith('.md'));
65
+ return files
66
+ .map(f => entryFromFile(id, dir, f))
67
+ .filter((e) => e != null)
68
+ .sort((a, b) => b.ts.localeCompare(a.ts));
69
+ }
70
+ catch {
71
+ return [];
72
+ }
73
+ }
74
+ /** Restore a snapshot by filename. Writes the snapshot's contents back into sourceFile. */
75
+ export function restoreSnapshot(id, snapshotFilename, sourceFile) {
76
+ const dir = snapshotDirFor(id);
77
+ if (!dir)
78
+ return { ok: false, error: 'unknown id' };
79
+ const safe = path.basename(snapshotFilename);
80
+ const src = path.join(dir, safe);
81
+ if (!existsSync(src))
82
+ return { ok: false, error: 'snapshot not found' };
83
+ if (!sourceFile)
84
+ return { ok: false, error: 'missing sourceFile' };
85
+ try {
86
+ // Snapshot the *current* contents first so the restore itself is reversible.
87
+ snapshotWorkflow(id, sourceFile);
88
+ const content = readFileSync(src, 'utf-8');
89
+ writeFileSync(sourceFile, content, 'utf-8');
90
+ return { ok: true };
91
+ }
92
+ catch (err) {
93
+ return { ok: false, error: err.message };
94
+ }
95
+ }
96
+ function pruneOldSnapshots(dir) {
97
+ let files;
98
+ try {
99
+ files = readdirSync(dir).filter(f => f.endsWith('.md'));
100
+ }
101
+ catch {
102
+ return;
103
+ }
104
+ if (files.length <= MAX_PER_WORKFLOW)
105
+ return;
106
+ const sorted = files.sort(); // ISO timestamps sort naturally
107
+ const overflow = sorted.length - MAX_PER_WORKFLOW;
108
+ for (let i = 0; i < overflow; i++) {
109
+ try {
110
+ unlinkSync(path.join(dir, sorted[i]));
111
+ }
112
+ catch { /* */ }
113
+ }
114
+ }
115
+ function entryFromFile(id, dir, filename) {
116
+ try {
117
+ const full = path.join(dir, filename);
118
+ const stat = statSync(full);
119
+ const ts = filename.replace(/\.md$/, '').replace(/-/g, ':').replace(/^(\d{4}):(\d{2}):(\d{2})T/, '$1-$2-$3T').replace(/T(\d{2}):(\d{2}):(\d{2}):(\d{3})Z?$/, 'T$1:$2:$3.$4Z');
120
+ let preview = '';
121
+ try {
122
+ const head = readFileSync(full, 'utf-8').slice(0, 240);
123
+ preview = head.replace(/\n/g, ' ').slice(0, 120);
124
+ }
125
+ catch { /* */ }
126
+ return {
127
+ id,
128
+ filename,
129
+ ts,
130
+ size: stat.size,
131
+ preview,
132
+ };
133
+ }
134
+ catch {
135
+ return null;
136
+ }
137
+ }
138
+ //# sourceMappingURL=snapshots.js.map
@@ -0,0 +1,26 @@
1
+ /**
2
+ * Builder workflow validation.
3
+ *
4
+ * Static, side-effect-free checks. Used both by `workflow_validate` (agent
5
+ * runs it explicitly) and by save handlers (refuse to persist invalid
6
+ * graphs). Errors block save; warnings are surfaced for the agent and UI
7
+ * but don't prevent persistence (e.g., disabled cron with no schedule —
8
+ * legal but probably wrong).
9
+ */
10
+ import type { WorkflowDefinition } from '../../types.js';
11
+ export type ValidationSeverity = 'error' | 'warning';
12
+ export interface ValidationIssue {
13
+ severity: ValidationSeverity;
14
+ code: string;
15
+ stepId?: string;
16
+ field?: string;
17
+ message: string;
18
+ }
19
+ export interface ValidationResult {
20
+ ok: boolean;
21
+ issues: ValidationIssue[];
22
+ }
23
+ export declare function validateWorkflow(wf: WorkflowDefinition): ValidationResult;
24
+ /** Lightweight cron expression validation. Accepts standard 5-field cron and common ranges/lists/steps. */
25
+ export declare function isCronExpression(expr: string): boolean;
26
+ //# sourceMappingURL=validation.d.ts.map
@@ -0,0 +1,183 @@
1
+ /**
2
+ * Builder workflow validation.
3
+ *
4
+ * Static, side-effect-free checks. Used both by `workflow_validate` (agent
5
+ * runs it explicitly) and by save handlers (refuse to persist invalid
6
+ * graphs). Errors block save; warnings are surfaced for the agent and UI
7
+ * but don't prevent persistence (e.g., disabled cron with no schedule —
8
+ * legal but probably wrong).
9
+ */
10
+ export function validateWorkflow(wf) {
11
+ const issues = [];
12
+ // ── Workflow-level ──
13
+ if (!wf.name || !wf.name.trim()) {
14
+ issues.push({ severity: 'error', code: 'name-empty', message: 'Workflow has no name' });
15
+ }
16
+ if (wf.steps.length === 0) {
17
+ issues.push({ severity: 'error', code: 'no-steps', message: 'Workflow has no steps' });
18
+ }
19
+ if (wf.trigger.schedule && !isCronExpression(wf.trigger.schedule)) {
20
+ issues.push({
21
+ severity: 'error', code: 'bad-schedule',
22
+ field: 'trigger.schedule',
23
+ message: `Invalid cron expression: "${wf.trigger.schedule}"`,
24
+ });
25
+ }
26
+ if (wf.enabled && !wf.trigger.schedule && !wf.trigger.manual) {
27
+ issues.push({
28
+ severity: 'warning', code: 'enabled-no-trigger',
29
+ message: 'Workflow is enabled but has neither a cron schedule nor a manual trigger',
30
+ });
31
+ }
32
+ // ── Step-level ──
33
+ const seenIds = new Set();
34
+ for (const step of wf.steps) {
35
+ if (seenIds.has(step.id)) {
36
+ issues.push({
37
+ severity: 'error', code: 'duplicate-step-id',
38
+ stepId: step.id,
39
+ message: `Duplicate step id: "${step.id}"`,
40
+ });
41
+ }
42
+ seenIds.add(step.id);
43
+ issues.push(...validateStepConfig(step));
44
+ for (const dep of step.dependsOn) {
45
+ if (dep === step.id) {
46
+ issues.push({
47
+ severity: 'error', code: 'self-dep',
48
+ stepId: step.id,
49
+ message: `Step "${step.id}" depends on itself`,
50
+ });
51
+ }
52
+ }
53
+ }
54
+ // Missing dep references
55
+ for (const step of wf.steps) {
56
+ for (const dep of step.dependsOn) {
57
+ if (dep !== step.id && !seenIds.has(dep)) {
58
+ issues.push({
59
+ severity: 'error', code: 'missing-dep',
60
+ stepId: step.id,
61
+ message: `Step "${step.id}" depends on unknown step "${dep}"`,
62
+ });
63
+ }
64
+ }
65
+ }
66
+ // Cycle detection (only worthwhile if there are no missing-dep errors)
67
+ if (!issues.some(i => i.code === 'missing-dep' || i.code === 'self-dep')) {
68
+ const cycle = findCycle(wf.steps);
69
+ if (cycle) {
70
+ issues.push({
71
+ severity: 'error', code: 'cycle',
72
+ message: `Cycle detected through steps: ${cycle.join(' → ')}`,
73
+ });
74
+ }
75
+ }
76
+ return { ok: !issues.some(i => i.severity === 'error'), issues };
77
+ }
78
+ function validateStepConfig(step) {
79
+ const issues = [];
80
+ const kind = step.kind ?? 'prompt';
81
+ switch (kind) {
82
+ case 'prompt':
83
+ if (!step.prompt || !step.prompt.trim()) {
84
+ issues.push({ severity: 'error', code: 'prompt-empty', stepId: step.id, message: `Prompt step "${step.id}" has empty prompt` });
85
+ }
86
+ break;
87
+ case 'mcp':
88
+ if (!step.mcp) {
89
+ issues.push({ severity: 'error', code: 'mcp-missing', stepId: step.id, message: `MCP step "${step.id}" has no config` });
90
+ }
91
+ else {
92
+ if (!step.mcp.server)
93
+ issues.push({ severity: 'error', code: 'mcp-no-server', stepId: step.id, message: `MCP step "${step.id}" missing server` });
94
+ if (!step.mcp.tool)
95
+ issues.push({ severity: 'error', code: 'mcp-no-tool', stepId: step.id, message: `MCP step "${step.id}" missing tool` });
96
+ }
97
+ break;
98
+ case 'channel':
99
+ if (!step.channel) {
100
+ issues.push({ severity: 'error', code: 'channel-missing', stepId: step.id, message: `Channel step "${step.id}" has no config` });
101
+ }
102
+ else {
103
+ if (!step.channel.channel)
104
+ issues.push({ severity: 'error', code: 'channel-no-channel', stepId: step.id, message: `Channel step "${step.id}" missing channel` });
105
+ if (!step.channel.target)
106
+ issues.push({ severity: 'error', code: 'channel-no-target', stepId: step.id, message: `Channel step "${step.id}" missing target` });
107
+ if (!step.channel.content)
108
+ issues.push({ severity: 'warning', code: 'channel-no-content', stepId: step.id, message: `Channel step "${step.id}" has empty content` });
109
+ }
110
+ break;
111
+ case 'transform':
112
+ if (!step.transform || !step.transform.expression) {
113
+ issues.push({ severity: 'error', code: 'transform-no-expr', stepId: step.id, message: `Transform step "${step.id}" missing expression` });
114
+ }
115
+ break;
116
+ case 'conditional':
117
+ if (!step.conditional || !step.conditional.condition) {
118
+ issues.push({ severity: 'error', code: 'conditional-no-cond', stepId: step.id, message: `Conditional step "${step.id}" missing condition` });
119
+ }
120
+ break;
121
+ case 'loop':
122
+ if (!step.loop || !step.loop.items) {
123
+ issues.push({ severity: 'error', code: 'loop-no-items', stepId: step.id, message: `Loop step "${step.id}" missing items expression` });
124
+ }
125
+ break;
126
+ }
127
+ if (typeof step.tier !== 'number' || step.tier < 1 || step.tier > 5) {
128
+ issues.push({ severity: 'warning', code: 'tier-range', stepId: step.id, field: 'tier', message: `Step "${step.id}" tier out of usual range (1-5): ${step.tier}` });
129
+ }
130
+ if (typeof step.maxTurns !== 'number' || step.maxTurns < 1) {
131
+ issues.push({ severity: 'warning', code: 'max-turns-range', stepId: step.id, field: 'maxTurns', message: `Step "${step.id}" maxTurns invalid: ${step.maxTurns}` });
132
+ }
133
+ return issues;
134
+ }
135
+ /** Return the cycle as a list of step ids (first→last→first), or null if acyclic. */
136
+ function findCycle(steps) {
137
+ const adj = new Map();
138
+ for (const s of steps)
139
+ adj.set(s.id, s.dependsOn.slice());
140
+ const WHITE = 0, GRAY = 1, BLACK = 2;
141
+ const color = new Map();
142
+ for (const id of adj.keys())
143
+ color.set(id, WHITE);
144
+ const stack = [];
145
+ function dfs(node) {
146
+ color.set(node, GRAY);
147
+ stack.push(node);
148
+ for (const dep of adj.get(node) ?? []) {
149
+ const c = color.get(dep);
150
+ if (c === GRAY) {
151
+ const cycleStart = stack.indexOf(dep);
152
+ return [...stack.slice(cycleStart), dep];
153
+ }
154
+ if (c === WHITE) {
155
+ const found = dfs(dep);
156
+ if (found)
157
+ return found;
158
+ }
159
+ }
160
+ stack.pop();
161
+ color.set(node, BLACK);
162
+ return null;
163
+ }
164
+ for (const id of adj.keys()) {
165
+ if (color.get(id) === WHITE) {
166
+ const found = dfs(id);
167
+ if (found)
168
+ return found;
169
+ }
170
+ }
171
+ return null;
172
+ }
173
+ const CRON_FIELD = /^(\*|(\d+|\*\/\d+)([,-/](\d+))*)$/;
174
+ /** Lightweight cron expression validation. Accepts standard 5-field cron and common ranges/lists/steps. */
175
+ export function isCronExpression(expr) {
176
+ if (!expr || typeof expr !== 'string')
177
+ return false;
178
+ const fields = expr.trim().split(/\s+/);
179
+ if (fields.length !== 5)
180
+ return false;
181
+ return fields.every(f => CRON_FIELD.test(f) || /^[A-Z*]+$/i.test(f));
182
+ }
183
+ //# sourceMappingURL=validation.js.map
@@ -1445,9 +1445,38 @@ export class Gateway {
1445
1445
  try {
1446
1446
  logger.info({ workflow: workflow.name, inputs }, 'Running workflow');
1447
1447
  try {
1448
- const { WorkflowRunner } = await import('../agent/workflow-runner.js');
1448
+ const [{ WorkflowRunner }, { emitBuilderEvent }, { workflowId }] = await Promise.all([
1449
+ import('../agent/workflow-runner.js'),
1450
+ import('../dashboard/builder/events.js'),
1451
+ import('../dashboard/builder/serializer.js'),
1452
+ ]);
1449
1453
  const runner = new WorkflowRunner(this.assistant);
1450
- const result = await runner.run(workflow, inputs);
1454
+ // Derive builder id so the dashboard canvas can light up live if it's open.
1455
+ const baseName = workflow.sourceFile
1456
+ ? workflow.sourceFile.split('/').pop()?.replace(/\.md$/, '') ?? workflow.name
1457
+ : workflow.name;
1458
+ const builderId = workflowId(baseName);
1459
+ const runId = `sched-${Date.now()}-${Math.random().toString(36).slice(2, 6)}`;
1460
+ emitBuilderEvent({ type: 'run:started', workflowId: builderId, runId, payload: { mode: 'real', stepCount: workflow.steps.length } });
1461
+ const result = await runner.run(workflow, inputs, (updates) => {
1462
+ for (const u of updates) {
1463
+ if (u.status === 'waiting')
1464
+ continue;
1465
+ const status = u.status === 'done' ? 'done' : u.status === 'failed' ? 'failed' : u.status === 'skipped' ? 'skipped' : 'running';
1466
+ emitBuilderEvent({
1467
+ type: 'run:step-status',
1468
+ workflowId: builderId,
1469
+ runId,
1470
+ payload: { stepId: u.stepId, status, durationMs: u.durationMs, mocked: false },
1471
+ });
1472
+ }
1473
+ });
1474
+ emitBuilderEvent({
1475
+ type: 'run:completed',
1476
+ workflowId: builderId,
1477
+ runId,
1478
+ payload: { status: result.status === 'ok' ? 'ok' : 'error', durationMs: result.entry.durationMs },
1479
+ });
1451
1480
  // Re-baseline integrity checksums after workflow (may write to vault)
1452
1481
  scanner.refreshIntegrity();
1453
1482
  return result.output || '*(workflow completed — no output)*';
package/dist/index.js CHANGED
@@ -608,6 +608,15 @@ async function asyncMain() {
608
608
  {
609
609
  const memStore = assistant.getMemoryStore();
610
610
  if (memStore) {
611
+ // Async write queue: route transcript saves, recall traces, outcomes,
612
+ // and access-log inserts off the request thread. ~250ms flush window;
613
+ // drained on shutdown below. Idempotent — safe if called twice.
614
+ try {
615
+ memStore.enableWriteQueue();
616
+ }
617
+ catch (err) {
618
+ logger.warn({ err }, 'Failed to enable memory write queue — falling back to sync writes');
619
+ }
611
620
  const { runStartupMaintenance, startPeriodicMaintenance } = await import('./memory/maintenance.js');
612
621
  // Fire-and-forget startup maintenance
613
622
  runStartupMaintenance(memStore).catch(() => { });
@@ -685,6 +694,16 @@ async function asyncMain() {
685
694
  const heartbeat = new HeartbeatScheduler(gateway, dispatcher);
686
695
  const cronScheduler = new CronScheduler(gateway, dispatcher);
687
696
  heartbeat.setCronScheduler(cronScheduler);
697
+ // Builder runner — wire MCP invoke handler so canvas test runs can hit
698
+ // real read-only MCP tools (gmail.list_unread, github.list_prs, etc.).
699
+ // Stdio clients are pooled per server with idle teardown.
700
+ try {
701
+ const { installBuilderMcpHandler } = await import('./dashboard/builder/mcp-invoke.js');
702
+ installBuilderMcpHandler();
703
+ }
704
+ catch (err) {
705
+ logger.warn({ err }, 'Builder MCP invoke handler install failed (non-fatal)');
706
+ }
688
707
  // Per-agent heartbeats — one cheap-path observer per registered specialist.
689
708
  // LLM tick fires on signal change with the agent's profile and routes
690
709
  // output to their Discord channel.
@@ -999,6 +1018,25 @@ async function asyncMain() {
999
1018
  catch (err) {
1000
1019
  logger.warn({ err }, 'Session flush on shutdown failed');
1001
1020
  }
1021
+ // Drain the memory write queue so transcripts/recall traces/outcomes/access
1022
+ // logs that were enqueued in the last <250ms make it to SQLite.
1023
+ try {
1024
+ const memStore = assistant.getMemoryStore();
1025
+ if (memStore && typeof memStore.flushWrites === 'function') {
1026
+ await memStore.flushWrites();
1027
+ }
1028
+ }
1029
+ catch (err) {
1030
+ logger.warn({ err }, 'Memory write queue drain failed');
1031
+ }
1032
+ // Tear down builder MCP client pool (best-effort).
1033
+ try {
1034
+ const { shutdownBuilderMcpHandler } = await import('./dashboard/builder/mcp-invoke.js');
1035
+ await shutdownBuilderMcpHandler();
1036
+ }
1037
+ catch (err) {
1038
+ logger.warn({ err }, 'Builder MCP handler shutdown failed (non-fatal)');
1039
+ }
1002
1040
  // Now safe to tear down remaining infrastructure
1003
1041
  heartbeat.stop();
1004
1042
  cronScheduler.stop();
@@ -22,6 +22,13 @@ const DIR_CATEGORY_MAP = {
22
22
  '05-Tasks': 'advice',
23
23
  '07-Inbox': 'events',
24
24
  };
25
+ /**
26
+ * Procedural memory: learned workflows live in 00-System/procedures/.
27
+ * Frontmatter `triggers: [verb-phrases]` is parsed separately by the store
28
+ * and used at retrieval time to boost the chunk when a query mentions one
29
+ * of the trigger verbs. Pattern adopted from Mem0's v1.0.0 procedural tier.
30
+ */
31
+ const PROCEDURE_DIR = '00-System/procedures';
25
32
  /** Content keyword patterns for category detection (used as fallback). */
26
33
  const CATEGORY_KEYWORDS = [
27
34
  [/\b(prefer|always use|never use|i like|i don'?t like|i hate)\b/i, 'preferences'],
@@ -37,11 +44,15 @@ function detectCategoryAndTopic(relPath, frontmatter, content) {
37
44
  // 1. Explicit frontmatter category
38
45
  if (frontmatter.category) {
39
46
  const fm = String(frontmatter.category).toLowerCase();
40
- if (['facts', 'events', 'discoveries', 'preferences', 'advice'].includes(fm)) {
47
+ if (['facts', 'events', 'discoveries', 'preferences', 'advice', 'procedure'].includes(fm)) {
41
48
  category = fm;
42
49
  }
43
50
  }
44
- // 2. Directory-based
51
+ // 2. Procedure directory (overrides directory map below).
52
+ if (!category && relPath.startsWith(PROCEDURE_DIR)) {
53
+ category = 'procedure';
54
+ }
55
+ // 3. Directory-based
45
56
  if (!category) {
46
57
  const topDir = relPath.split('/')[0];
47
58
  category = DIR_CATEGORY_MAP[topDir] ?? null;
@@ -0,0 +1,38 @@
1
+ /**
2
+ * Tiny in-process LRU for hot chunk-row reads.
3
+ *
4
+ * Use case: searchContext + recall-trace expansion + dashboard chunk view all
5
+ * funnel through getChunksByIds, which often touches the same hot rows
6
+ * many times within a session. SQLite reads are already fast (microseconds),
7
+ * but the LRU eliminates the per-query overhead and lets us amortize the
8
+ * row-shape unpacking that getChunksByIds does.
9
+ *
10
+ * Bounded: capacity ~1000 by default (~1MB at 1KB/chunk). Map preserves
11
+ * insertion order, so we delete-then-set on access to keep most-recent at
12
+ * the tail and evict from the head.
13
+ *
14
+ * Concurrency: single-process daemon, single thread — no locking needed.
15
+ */
16
+ export declare class HotCache<K, V> {
17
+ private map;
18
+ private capacity;
19
+ private hits;
20
+ private misses;
21
+ private evictions;
22
+ constructor(capacity?: number);
23
+ get(key: K): V | undefined;
24
+ set(key: K, value: V): void;
25
+ delete(key: K): boolean;
26
+ /** Drop all entries — call when bulk-rebuilding the underlying store. */
27
+ clear(): void;
28
+ size(): number;
29
+ stats(): {
30
+ hits: number;
31
+ misses: number;
32
+ evictions: number;
33
+ size: number;
34
+ capacity: number;
35
+ hitRate: number;
36
+ };
37
+ }
38
+ //# sourceMappingURL=hot-cache.d.ts.map
@@ -0,0 +1,73 @@
1
+ /**
2
+ * Tiny in-process LRU for hot chunk-row reads.
3
+ *
4
+ * Use case: searchContext + recall-trace expansion + dashboard chunk view all
5
+ * funnel through getChunksByIds, which often touches the same hot rows
6
+ * many times within a session. SQLite reads are already fast (microseconds),
7
+ * but the LRU eliminates the per-query overhead and lets us amortize the
8
+ * row-shape unpacking that getChunksByIds does.
9
+ *
10
+ * Bounded: capacity ~1000 by default (~1MB at 1KB/chunk). Map preserves
11
+ * insertion order, so we delete-then-set on access to keep most-recent at
12
+ * the tail and evict from the head.
13
+ *
14
+ * Concurrency: single-process daemon, single thread — no locking needed.
15
+ */
16
+ export class HotCache {
17
+ map = new Map();
18
+ capacity;
19
+ hits = 0;
20
+ misses = 0;
21
+ evictions = 0;
22
+ constructor(capacity = 1000) {
23
+ this.capacity = capacity;
24
+ }
25
+ get(key) {
26
+ const v = this.map.get(key);
27
+ if (v === undefined) {
28
+ this.misses++;
29
+ return undefined;
30
+ }
31
+ // Bump to most-recent.
32
+ this.map.delete(key);
33
+ this.map.set(key, v);
34
+ this.hits++;
35
+ return v;
36
+ }
37
+ set(key, value) {
38
+ if (this.map.has(key)) {
39
+ this.map.delete(key);
40
+ }
41
+ else if (this.map.size >= this.capacity) {
42
+ // Evict oldest (first inserted).
43
+ const oldestKey = this.map.keys().next().value;
44
+ if (oldestKey !== undefined) {
45
+ this.map.delete(oldestKey);
46
+ this.evictions++;
47
+ }
48
+ }
49
+ this.map.set(key, value);
50
+ }
51
+ delete(key) {
52
+ return this.map.delete(key);
53
+ }
54
+ /** Drop all entries — call when bulk-rebuilding the underlying store. */
55
+ clear() {
56
+ this.map.clear();
57
+ }
58
+ size() {
59
+ return this.map.size;
60
+ }
61
+ stats() {
62
+ const total = this.hits + this.misses;
63
+ return {
64
+ hits: this.hits,
65
+ misses: this.misses,
66
+ evictions: this.evictions,
67
+ size: this.map.size,
68
+ capacity: this.capacity,
69
+ hitRate: total > 0 ? this.hits / total : 0,
70
+ };
71
+ }
72
+ }
73
+ //# sourceMappingURL=hot-cache.js.map
@@ -0,0 +1,28 @@
1
+ /**
2
+ * Memory store integrity probes — self-healing checks that run on the
3
+ * janitor's periodic cycle. Each probe is independent and conservative:
4
+ * - reports what it found,
5
+ * - repairs only when the fix is non-destructive,
6
+ * - never throws (logs and continues).
7
+ *
8
+ * Three checks today (the cheap, high-value ones):
9
+ * 1. FTS5 contentless-table integrity → auto-rebuild on failure
10
+ * 2. derived_from references to deleted chunks → nullify the dangling refs
11
+ * 3. chunks with content but no embedding → return count for backfill
12
+ *
13
+ * Graph reachability is intentionally NOT probed here — it lives in
14
+ * graph-store.ts's own health probe, which auto-restarts FalkorDB.
15
+ */
16
+ export interface IntegrityReport {
17
+ ftsOk: boolean;
18
+ ftsRebuilt: boolean;
19
+ orphanRefsNulled: number;
20
+ missingEmbeddings: number;
21
+ }
22
+ /**
23
+ * Run all probes and apply safe repairs. Returns a report; never throws.
24
+ * The store argument is typed loose so this module can be called from
25
+ * maintenance.ts without an import cycle.
26
+ */
27
+ export declare function runIntegrityProbes(store: any): IntegrityReport;
28
+ //# sourceMappingURL=integrity.d.ts.map