clementine-agent 1.2.2 → 1.3.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (41) hide show
  1. package/dist/agent/assistant.js +12 -0
  2. package/dist/cli/dashboard.js +3034 -734
  3. package/dist/cli/static/LICENSE-NOTICES.md +12 -0
  4. package/dist/cli/static/drawflow.min.css +1 -0
  5. package/dist/cli/static/drawflow.min.js +1 -0
  6. package/dist/config.d.ts +11 -0
  7. package/dist/config.js +16 -0
  8. package/dist/dashboard/builder/dry-run.d.ts +31 -0
  9. package/dist/dashboard/builder/dry-run.js +138 -0
  10. package/dist/dashboard/builder/events.d.ts +23 -0
  11. package/dist/dashboard/builder/events.js +28 -0
  12. package/dist/dashboard/builder/mcp-invoke.d.ts +25 -0
  13. package/dist/dashboard/builder/mcp-invoke.js +143 -0
  14. package/dist/dashboard/builder/runner.d.ts +68 -0
  15. package/dist/dashboard/builder/runner.js +418 -0
  16. package/dist/dashboard/builder/serializer.d.ts +79 -0
  17. package/dist/dashboard/builder/serializer.js +547 -0
  18. package/dist/dashboard/builder/snapshots.d.ts +32 -0
  19. package/dist/dashboard/builder/snapshots.js +138 -0
  20. package/dist/dashboard/builder/validation.d.ts +26 -0
  21. package/dist/dashboard/builder/validation.js +183 -0
  22. package/dist/gateway/router.js +31 -2
  23. package/dist/index.js +38 -0
  24. package/dist/memory/chunker.js +13 -2
  25. package/dist/memory/hot-cache.d.ts +38 -0
  26. package/dist/memory/hot-cache.js +73 -0
  27. package/dist/memory/integrity.d.ts +28 -0
  28. package/dist/memory/integrity.js +119 -0
  29. package/dist/memory/maintenance.d.ts +23 -2
  30. package/dist/memory/maintenance.js +140 -3
  31. package/dist/memory/store.d.ts +259 -2
  32. package/dist/memory/store.js +751 -21
  33. package/dist/memory/write-queue.d.ts +96 -0
  34. package/dist/memory/write-queue.js +165 -0
  35. package/dist/tools/builder-tools.d.ts +13 -0
  36. package/dist/tools/builder-tools.js +437 -0
  37. package/dist/tools/mcp-server.js +2 -0
  38. package/dist/tools/memory-tools.js +38 -1
  39. package/dist/types.d.ts +56 -2
  40. package/package.json +2 -2
  41. package/vault/00-System/skills/builder-canvas.md +126 -0
@@ -0,0 +1,96 @@
1
+ /**
2
+ * Async write queue for non-critical memory writes.
3
+ *
4
+ * Pattern: 2026-frontier agent memory layers (Mem0, Zep) defer audit and
5
+ * observability writes off the request thread to keep p95 retrieval latency
6
+ * low. Mem0 reports ~91% p95 latency reduction with this pattern; voice
7
+ * agents in particular need it since there's no scrollback to recover.
8
+ *
9
+ * Scope: only non-user-visible writes (transcripts, recall traces, outcomes,
10
+ * access log). User-driven mutations (memory_write, user_model, pinChunk,
11
+ * updateFile) stay synchronous so the user sees immediate persistence.
12
+ *
13
+ * Trade-offs:
14
+ * - `recordOutcome` updates `last_outcome_score` which feeds retrieval
15
+ * ranking. Async-deferring it means up to one flush interval (~250ms) of
16
+ * EMA staleness — acceptable for ranking signal that already smooths.
17
+ * - On hard process kill (SIGKILL, OOM) the in-flight queue is lost. Audit
18
+ * writes are best-effort by design; existing call sites already swallow
19
+ * errors. Drain on SIGTERM/SIGUSR1 covers planned shutdowns.
20
+ */
21
+ export type QueueOp = {
22
+ kind: 'transcript-turn';
23
+ sessionKey: string;
24
+ role: string;
25
+ content: string;
26
+ model: string;
27
+ } | {
28
+ kind: 'recall';
29
+ sessionKey: string;
30
+ messageId: string | null;
31
+ query: string;
32
+ chunkIds: number[];
33
+ scores: number[];
34
+ agentSlug: string | null;
35
+ } | {
36
+ kind: 'outcome';
37
+ outcomes: Array<{
38
+ chunkId: number;
39
+ referenced: boolean;
40
+ }>;
41
+ sessionKey: string | null;
42
+ } | {
43
+ kind: 'access';
44
+ chunkIds: number[];
45
+ accessType: string;
46
+ };
47
+ export interface WriteQueueOpts {
48
+ flushIntervalMs?: number;
49
+ flushSize?: number;
50
+ /** Hard cap on the buffer to bound memory under write storms. */
51
+ maxBuffer?: number;
52
+ }
53
+ /**
54
+ * Minimal write-behind queue for the memory store. Not concurrent-safe at
55
+ * the JS level — assumes the single-process daemon model that Clementine
56
+ * already uses for all memory writes.
57
+ */
58
+ export declare class WriteQueue {
59
+ private store;
60
+ private buffer;
61
+ private timer;
62
+ private readonly flushIntervalMs;
63
+ private readonly flushSize;
64
+ private readonly maxBuffer;
65
+ private flushing;
66
+ private dropped;
67
+ constructor(store: any, opts?: WriteQueueOpts);
68
+ /** Begin periodic flushing. Idempotent. */
69
+ start(): void;
70
+ /** Stop the periodic timer (does not drain). */
71
+ stop(): void;
72
+ enqueue(op: QueueOp): void;
73
+ size(): number;
74
+ stats(): {
75
+ size: number;
76
+ dropped: number;
77
+ };
78
+ /**
79
+ * Apply all queued ops to the store. Ops that fail are logged and skipped;
80
+ * they don't block the rest of the batch. Concurrent calls collapse — the
81
+ * second caller exits immediately if a flush is in progress.
82
+ */
83
+ flush(): Promise<{
84
+ flushed: number;
85
+ errors: number;
86
+ }>;
87
+ /**
88
+ * Stop the timer and flush everything currently buffered. Loops until
89
+ * the buffer is empty so any ops enqueued during a flush also drain.
90
+ */
91
+ drain(): Promise<void>;
92
+ private apply;
93
+ }
94
+ /** Convenience: install SIGTERM/SIGUSR1 drain hooks on the process. */
95
+ export declare function installShutdownDrain(queue: WriteQueue): void;
96
+ //# sourceMappingURL=write-queue.d.ts.map
@@ -0,0 +1,165 @@
1
+ /**
2
+ * Async write queue for non-critical memory writes.
3
+ *
4
+ * Pattern: 2026-frontier agent memory layers (Mem0, Zep) defer audit and
5
+ * observability writes off the request thread to keep p95 retrieval latency
6
+ * low. Mem0 reports ~91% p95 latency reduction with this pattern; voice
7
+ * agents in particular need it since there's no scrollback to recover.
8
+ *
9
+ * Scope: only non-user-visible writes (transcripts, recall traces, outcomes,
10
+ * access log). User-driven mutations (memory_write, user_model, pinChunk,
11
+ * updateFile) stay synchronous so the user sees immediate persistence.
12
+ *
13
+ * Trade-offs:
14
+ * - `recordOutcome` updates `last_outcome_score` which feeds retrieval
15
+ * ranking. Async-deferring it means up to one flush interval (~250ms) of
16
+ * EMA staleness — acceptable for ranking signal that already smooths.
17
+ * - On hard process kill (SIGKILL, OOM) the in-flight queue is lost. Audit
18
+ * writes are best-effort by design; existing call sites already swallow
19
+ * errors. Drain on SIGTERM/SIGUSR1 covers planned shutdowns.
20
+ */
21
+ import pino from 'pino';
22
+ const logger = pino({ name: 'clementine.write-queue' });
23
+ /**
24
+ * Minimal write-behind queue for the memory store. Not concurrent-safe at
25
+ * the JS level — assumes the single-process daemon model that Clementine
26
+ * already uses for all memory writes.
27
+ */
28
+ export class WriteQueue {
29
+ store;
30
+ buffer = [];
31
+ timer = null;
32
+ flushIntervalMs;
33
+ flushSize;
34
+ maxBuffer;
35
+ flushing = false;
36
+ dropped = 0;
37
+ constructor(store, opts = {}) {
38
+ this.store = store;
39
+ this.flushIntervalMs = opts.flushIntervalMs ?? 250;
40
+ this.flushSize = opts.flushSize ?? 50;
41
+ this.maxBuffer = opts.maxBuffer ?? 5000;
42
+ }
43
+ /** Begin periodic flushing. Idempotent. */
44
+ start() {
45
+ if (this.timer)
46
+ return;
47
+ this.timer = setInterval(() => {
48
+ void this.flush();
49
+ }, this.flushIntervalMs);
50
+ // Don't keep the event loop alive just for the queue.
51
+ if (typeof this.timer.unref === 'function')
52
+ this.timer.unref();
53
+ }
54
+ /** Stop the periodic timer (does not drain). */
55
+ stop() {
56
+ if (this.timer) {
57
+ clearInterval(this.timer);
58
+ this.timer = null;
59
+ }
60
+ }
61
+ enqueue(op) {
62
+ if (this.buffer.length >= this.maxBuffer) {
63
+ // Hard cap — drop oldest to bound memory. Surfaces in stats().
64
+ this.buffer.shift();
65
+ this.dropped++;
66
+ }
67
+ this.buffer.push(op);
68
+ if (this.buffer.length >= this.flushSize) {
69
+ void this.flush();
70
+ }
71
+ }
72
+ size() {
73
+ return this.buffer.length;
74
+ }
75
+ stats() {
76
+ return { size: this.buffer.length, dropped: this.dropped };
77
+ }
78
+ /**
79
+ * Apply all queued ops to the store. Ops that fail are logged and skipped;
80
+ * they don't block the rest of the batch. Concurrent calls collapse — the
81
+ * second caller exits immediately if a flush is in progress.
82
+ */
83
+ async flush() {
84
+ if (this.flushing)
85
+ return { flushed: 0, errors: 0 };
86
+ if (this.buffer.length === 0)
87
+ return { flushed: 0, errors: 0 };
88
+ this.flushing = true;
89
+ const batch = this.buffer.splice(0);
90
+ let flushed = 0;
91
+ let errors = 0;
92
+ try {
93
+ for (const op of batch) {
94
+ try {
95
+ this.apply(op);
96
+ flushed++;
97
+ }
98
+ catch (err) {
99
+ errors++;
100
+ logger.warn({ err, kind: op.kind }, 'Write op failed');
101
+ }
102
+ }
103
+ }
104
+ finally {
105
+ this.flushing = false;
106
+ }
107
+ return { flushed, errors };
108
+ }
109
+ /**
110
+ * Stop the timer and flush everything currently buffered. Loops until
111
+ * the buffer is empty so any ops enqueued during a flush also drain.
112
+ */
113
+ async drain() {
114
+ this.stop();
115
+ // Yield to let any in-flight flush finish, then keep flushing until empty.
116
+ while (this.buffer.length > 0 || this.flushing) {
117
+ if (this.flushing) {
118
+ await new Promise((r) => setTimeout(r, 5));
119
+ continue;
120
+ }
121
+ await this.flush();
122
+ }
123
+ }
124
+ apply(op) {
125
+ // Call the sync variants directly. The public methods route through this
126
+ // queue when enabled — calling them here would re-enqueue and infinite-loop.
127
+ switch (op.kind) {
128
+ case 'transcript-turn':
129
+ this.store._saveTurnSync?.(op.sessionKey, op.role, op.content, op.model);
130
+ break;
131
+ case 'recall':
132
+ this.store._logRecallTraceSync?.({
133
+ sessionKey: op.sessionKey,
134
+ messageId: op.messageId,
135
+ query: op.query,
136
+ chunkIds: op.chunkIds,
137
+ scores: op.scores,
138
+ agentSlug: op.agentSlug,
139
+ });
140
+ break;
141
+ case 'outcome':
142
+ this.store._recordOutcomeSync?.(op.outcomes, op.sessionKey);
143
+ break;
144
+ case 'access':
145
+ this.store._recordAccessSync?.(op.chunkIds, op.accessType);
146
+ break;
147
+ }
148
+ }
149
+ }
150
+ /** Convenience: install SIGTERM/SIGUSR1 drain hooks on the process. */
151
+ export function installShutdownDrain(queue) {
152
+ const drainAndExit = (signal) => {
153
+ logger.info({ signal, pending: queue.size() }, 'Draining write queue on shutdown');
154
+ queue
155
+ .drain()
156
+ .catch((err) => logger.warn({ err }, 'Drain failed'))
157
+ .finally(() => {
158
+ // Don't exit here — caller's signal handler may have other cleanup.
159
+ // We just guarantee the queue is empty before the process tears down.
160
+ });
161
+ };
162
+ process.on('SIGTERM', () => drainAndExit('SIGTERM'));
163
+ process.on('SIGUSR1', () => drainAndExit('SIGUSR1'));
164
+ }
165
+ //# sourceMappingURL=write-queue.js.map
@@ -0,0 +1,13 @@
1
+ /**
2
+ * Clementine — Builder MCP tools.
3
+ *
4
+ * Agent-facing surface for managing workflows + crons on the visual
5
+ * canvas. Read, edit, validate, and dry-run. Actual execution lives
6
+ * separately in the runner (Phase 2+).
7
+ *
8
+ * Outputs are terse plain text by default for prompt efficiency; pass
9
+ * `verbose: true` to get the underlying JSON for debugging.
10
+ */
11
+ import type { McpServer } from '@modelcontextprotocol/sdk/server/mcp.js';
12
+ export declare function registerBuilderTools(server: McpServer): void;
13
+ //# sourceMappingURL=builder-tools.d.ts.map