clementine-agent 1.2.1 → 1.2.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/agent/assistant.js +12 -0
- package/dist/cli/dashboard.js +724 -106
- package/dist/config.d.ts +11 -0
- package/dist/config.js +16 -0
- package/dist/index.js +20 -0
- package/dist/memory/chunker.js +13 -2
- package/dist/memory/hot-cache.d.ts +38 -0
- package/dist/memory/hot-cache.js +73 -0
- package/dist/memory/integrity.d.ts +28 -0
- package/dist/memory/integrity.js +119 -0
- package/dist/memory/maintenance.d.ts +23 -2
- package/dist/memory/maintenance.js +140 -3
- package/dist/memory/seed-user-model.d.ts +3 -1
- package/dist/memory/seed-user-model.js +6 -5
- package/dist/memory/store.d.ts +259 -2
- package/dist/memory/store.js +751 -21
- package/dist/memory/write-queue.d.ts +96 -0
- package/dist/memory/write-queue.js +165 -0
- package/dist/tools/memory-tools.js +38 -1
- package/dist/types.d.ts +10 -2
- package/package.json +1 -1
|
@@ -0,0 +1,96 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Async write queue for non-critical memory writes.
|
|
3
|
+
*
|
|
4
|
+
* Pattern: 2026-frontier agent memory layers (Mem0, Zep) defer audit and
|
|
5
|
+
* observability writes off the request thread to keep p95 retrieval latency
|
|
6
|
+
* low. Mem0 reports ~91% p95 latency reduction with this pattern; voice
|
|
7
|
+
* agents in particular need it since there's no scrollback to recover.
|
|
8
|
+
*
|
|
9
|
+
* Scope: only non-user-visible writes (transcripts, recall traces, outcomes,
|
|
10
|
+
* access log). User-driven mutations (memory_write, user_model, pinChunk,
|
|
11
|
+
* updateFile) stay synchronous so the user sees immediate persistence.
|
|
12
|
+
*
|
|
13
|
+
* Trade-offs:
|
|
14
|
+
* - `recordOutcome` updates `last_outcome_score` which feeds retrieval
|
|
15
|
+
* ranking. Async-deferring it means up to one flush interval (~250ms) of
|
|
16
|
+
* EMA staleness — acceptable for ranking signal that already smooths.
|
|
17
|
+
* - On hard process kill (SIGKILL, OOM) the in-flight queue is lost. Audit
|
|
18
|
+
* writes are best-effort by design; existing call sites already swallow
|
|
19
|
+
* errors. Drain on SIGTERM/SIGUSR1 covers planned shutdowns.
|
|
20
|
+
*/
|
|
21
|
+
export type QueueOp = {
|
|
22
|
+
kind: 'transcript-turn';
|
|
23
|
+
sessionKey: string;
|
|
24
|
+
role: string;
|
|
25
|
+
content: string;
|
|
26
|
+
model: string;
|
|
27
|
+
} | {
|
|
28
|
+
kind: 'recall';
|
|
29
|
+
sessionKey: string;
|
|
30
|
+
messageId: string | null;
|
|
31
|
+
query: string;
|
|
32
|
+
chunkIds: number[];
|
|
33
|
+
scores: number[];
|
|
34
|
+
agentSlug: string | null;
|
|
35
|
+
} | {
|
|
36
|
+
kind: 'outcome';
|
|
37
|
+
outcomes: Array<{
|
|
38
|
+
chunkId: number;
|
|
39
|
+
referenced: boolean;
|
|
40
|
+
}>;
|
|
41
|
+
sessionKey: string | null;
|
|
42
|
+
} | {
|
|
43
|
+
kind: 'access';
|
|
44
|
+
chunkIds: number[];
|
|
45
|
+
accessType: string;
|
|
46
|
+
};
|
|
47
|
+
export interface WriteQueueOpts {
|
|
48
|
+
flushIntervalMs?: number;
|
|
49
|
+
flushSize?: number;
|
|
50
|
+
/** Hard cap on the buffer to bound memory under write storms. */
|
|
51
|
+
maxBuffer?: number;
|
|
52
|
+
}
|
|
53
|
+
/**
|
|
54
|
+
* Minimal write-behind queue for the memory store. Not concurrent-safe at
|
|
55
|
+
* the JS level — assumes the single-process daemon model that Clementine
|
|
56
|
+
* already uses for all memory writes.
|
|
57
|
+
*/
|
|
58
|
+
export declare class WriteQueue {
|
|
59
|
+
private store;
|
|
60
|
+
private buffer;
|
|
61
|
+
private timer;
|
|
62
|
+
private readonly flushIntervalMs;
|
|
63
|
+
private readonly flushSize;
|
|
64
|
+
private readonly maxBuffer;
|
|
65
|
+
private flushing;
|
|
66
|
+
private dropped;
|
|
67
|
+
constructor(store: any, opts?: WriteQueueOpts);
|
|
68
|
+
/** Begin periodic flushing. Idempotent. */
|
|
69
|
+
start(): void;
|
|
70
|
+
/** Stop the periodic timer (does not drain). */
|
|
71
|
+
stop(): void;
|
|
72
|
+
enqueue(op: QueueOp): void;
|
|
73
|
+
size(): number;
|
|
74
|
+
stats(): {
|
|
75
|
+
size: number;
|
|
76
|
+
dropped: number;
|
|
77
|
+
};
|
|
78
|
+
/**
|
|
79
|
+
* Apply all queued ops to the store. Ops that fail are logged and skipped;
|
|
80
|
+
* they don't block the rest of the batch. Concurrent calls collapse — the
|
|
81
|
+
* second caller exits immediately if a flush is in progress.
|
|
82
|
+
*/
|
|
83
|
+
flush(): Promise<{
|
|
84
|
+
flushed: number;
|
|
85
|
+
errors: number;
|
|
86
|
+
}>;
|
|
87
|
+
/**
|
|
88
|
+
* Stop the timer and flush everything currently buffered. Loops until
|
|
89
|
+
* the buffer is empty so any ops enqueued during a flush also drain.
|
|
90
|
+
*/
|
|
91
|
+
drain(): Promise<void>;
|
|
92
|
+
private apply;
|
|
93
|
+
}
|
|
94
|
+
/** Convenience: install SIGTERM/SIGUSR1 drain hooks on the process. */
|
|
95
|
+
export declare function installShutdownDrain(queue: WriteQueue): void;
|
|
96
|
+
//# sourceMappingURL=write-queue.d.ts.map
|
|
@@ -0,0 +1,165 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Async write queue for non-critical memory writes.
|
|
3
|
+
*
|
|
4
|
+
* Pattern: 2026-frontier agent memory layers (Mem0, Zep) defer audit and
|
|
5
|
+
* observability writes off the request thread to keep p95 retrieval latency
|
|
6
|
+
* low. Mem0 reports ~91% p95 latency reduction with this pattern; voice
|
|
7
|
+
* agents in particular need it since there's no scrollback to recover.
|
|
8
|
+
*
|
|
9
|
+
* Scope: only non-user-visible writes (transcripts, recall traces, outcomes,
|
|
10
|
+
* access log). User-driven mutations (memory_write, user_model, pinChunk,
|
|
11
|
+
* updateFile) stay synchronous so the user sees immediate persistence.
|
|
12
|
+
*
|
|
13
|
+
* Trade-offs:
|
|
14
|
+
* - `recordOutcome` updates `last_outcome_score` which feeds retrieval
|
|
15
|
+
* ranking. Async-deferring it means up to one flush interval (~250ms) of
|
|
16
|
+
* EMA staleness — acceptable for ranking signal that already smooths.
|
|
17
|
+
* - On hard process kill (SIGKILL, OOM) the in-flight queue is lost. Audit
|
|
18
|
+
* writes are best-effort by design; existing call sites already swallow
|
|
19
|
+
* errors. Drain on SIGTERM/SIGUSR1 covers planned shutdowns.
|
|
20
|
+
*/
|
|
21
|
+
import pino from 'pino';
|
|
22
|
+
const logger = pino({ name: 'clementine.write-queue' });
|
|
23
|
+
/**
|
|
24
|
+
* Minimal write-behind queue for the memory store. Not concurrent-safe at
|
|
25
|
+
* the JS level — assumes the single-process daemon model that Clementine
|
|
26
|
+
* already uses for all memory writes.
|
|
27
|
+
*/
|
|
28
|
+
export class WriteQueue {
|
|
29
|
+
store;
|
|
30
|
+
buffer = [];
|
|
31
|
+
timer = null;
|
|
32
|
+
flushIntervalMs;
|
|
33
|
+
flushSize;
|
|
34
|
+
maxBuffer;
|
|
35
|
+
flushing = false;
|
|
36
|
+
dropped = 0;
|
|
37
|
+
constructor(store, opts = {}) {
|
|
38
|
+
this.store = store;
|
|
39
|
+
this.flushIntervalMs = opts.flushIntervalMs ?? 250;
|
|
40
|
+
this.flushSize = opts.flushSize ?? 50;
|
|
41
|
+
this.maxBuffer = opts.maxBuffer ?? 5000;
|
|
42
|
+
}
|
|
43
|
+
/** Begin periodic flushing. Idempotent. */
|
|
44
|
+
start() {
|
|
45
|
+
if (this.timer)
|
|
46
|
+
return;
|
|
47
|
+
this.timer = setInterval(() => {
|
|
48
|
+
void this.flush();
|
|
49
|
+
}, this.flushIntervalMs);
|
|
50
|
+
// Don't keep the event loop alive just for the queue.
|
|
51
|
+
if (typeof this.timer.unref === 'function')
|
|
52
|
+
this.timer.unref();
|
|
53
|
+
}
|
|
54
|
+
/** Stop the periodic timer (does not drain). */
|
|
55
|
+
stop() {
|
|
56
|
+
if (this.timer) {
|
|
57
|
+
clearInterval(this.timer);
|
|
58
|
+
this.timer = null;
|
|
59
|
+
}
|
|
60
|
+
}
|
|
61
|
+
enqueue(op) {
|
|
62
|
+
if (this.buffer.length >= this.maxBuffer) {
|
|
63
|
+
// Hard cap — drop oldest to bound memory. Surfaces in stats().
|
|
64
|
+
this.buffer.shift();
|
|
65
|
+
this.dropped++;
|
|
66
|
+
}
|
|
67
|
+
this.buffer.push(op);
|
|
68
|
+
if (this.buffer.length >= this.flushSize) {
|
|
69
|
+
void this.flush();
|
|
70
|
+
}
|
|
71
|
+
}
|
|
72
|
+
size() {
|
|
73
|
+
return this.buffer.length;
|
|
74
|
+
}
|
|
75
|
+
stats() {
|
|
76
|
+
return { size: this.buffer.length, dropped: this.dropped };
|
|
77
|
+
}
|
|
78
|
+
/**
|
|
79
|
+
* Apply all queued ops to the store. Ops that fail are logged and skipped;
|
|
80
|
+
* they don't block the rest of the batch. Concurrent calls collapse — the
|
|
81
|
+
* second caller exits immediately if a flush is in progress.
|
|
82
|
+
*/
|
|
83
|
+
async flush() {
|
|
84
|
+
if (this.flushing)
|
|
85
|
+
return { flushed: 0, errors: 0 };
|
|
86
|
+
if (this.buffer.length === 0)
|
|
87
|
+
return { flushed: 0, errors: 0 };
|
|
88
|
+
this.flushing = true;
|
|
89
|
+
const batch = this.buffer.splice(0);
|
|
90
|
+
let flushed = 0;
|
|
91
|
+
let errors = 0;
|
|
92
|
+
try {
|
|
93
|
+
for (const op of batch) {
|
|
94
|
+
try {
|
|
95
|
+
this.apply(op);
|
|
96
|
+
flushed++;
|
|
97
|
+
}
|
|
98
|
+
catch (err) {
|
|
99
|
+
errors++;
|
|
100
|
+
logger.warn({ err, kind: op.kind }, 'Write op failed');
|
|
101
|
+
}
|
|
102
|
+
}
|
|
103
|
+
}
|
|
104
|
+
finally {
|
|
105
|
+
this.flushing = false;
|
|
106
|
+
}
|
|
107
|
+
return { flushed, errors };
|
|
108
|
+
}
|
|
109
|
+
/**
|
|
110
|
+
* Stop the timer and flush everything currently buffered. Loops until
|
|
111
|
+
* the buffer is empty so any ops enqueued during a flush also drain.
|
|
112
|
+
*/
|
|
113
|
+
async drain() {
|
|
114
|
+
this.stop();
|
|
115
|
+
// Yield to let any in-flight flush finish, then keep flushing until empty.
|
|
116
|
+
while (this.buffer.length > 0 || this.flushing) {
|
|
117
|
+
if (this.flushing) {
|
|
118
|
+
await new Promise((r) => setTimeout(r, 5));
|
|
119
|
+
continue;
|
|
120
|
+
}
|
|
121
|
+
await this.flush();
|
|
122
|
+
}
|
|
123
|
+
}
|
|
124
|
+
apply(op) {
|
|
125
|
+
// Call the sync variants directly. The public methods route through this
|
|
126
|
+
// queue when enabled — calling them here would re-enqueue and infinite-loop.
|
|
127
|
+
switch (op.kind) {
|
|
128
|
+
case 'transcript-turn':
|
|
129
|
+
this.store._saveTurnSync?.(op.sessionKey, op.role, op.content, op.model);
|
|
130
|
+
break;
|
|
131
|
+
case 'recall':
|
|
132
|
+
this.store._logRecallTraceSync?.({
|
|
133
|
+
sessionKey: op.sessionKey,
|
|
134
|
+
messageId: op.messageId,
|
|
135
|
+
query: op.query,
|
|
136
|
+
chunkIds: op.chunkIds,
|
|
137
|
+
scores: op.scores,
|
|
138
|
+
agentSlug: op.agentSlug,
|
|
139
|
+
});
|
|
140
|
+
break;
|
|
141
|
+
case 'outcome':
|
|
142
|
+
this.store._recordOutcomeSync?.(op.outcomes, op.sessionKey);
|
|
143
|
+
break;
|
|
144
|
+
case 'access':
|
|
145
|
+
this.store._recordAccessSync?.(op.chunkIds, op.accessType);
|
|
146
|
+
break;
|
|
147
|
+
}
|
|
148
|
+
}
|
|
149
|
+
}
|
|
150
|
+
/** Convenience: install SIGTERM/SIGUSR1 drain hooks on the process. */
|
|
151
|
+
export function installShutdownDrain(queue) {
|
|
152
|
+
const drainAndExit = (signal) => {
|
|
153
|
+
logger.info({ signal, pending: queue.size() }, 'Draining write queue on shutdown');
|
|
154
|
+
queue
|
|
155
|
+
.drain()
|
|
156
|
+
.catch((err) => logger.warn({ err }, 'Drain failed'))
|
|
157
|
+
.finally(() => {
|
|
158
|
+
// Don't exit here — caller's signal handler may have other cleanup.
|
|
159
|
+
// We just guarantee the queue is empty before the process tears down.
|
|
160
|
+
});
|
|
161
|
+
};
|
|
162
|
+
process.on('SIGTERM', () => drainAndExit('SIGTERM'));
|
|
163
|
+
process.on('SIGUSR1', () => drainAndExit('SIGUSR1'));
|
|
164
|
+
}
|
|
165
|
+
//# sourceMappingURL=write-queue.js.map
|
|
@@ -362,11 +362,48 @@ export function registerMemoryTools(server) {
|
|
|
362
362
|
}
|
|
363
363
|
return textResult(`Unknown action: ${action}`);
|
|
364
364
|
});
|
|
365
|
+
// ── 2b. memory_record_procedure ────────────────────────────────────────
|
|
366
|
+
server.tool('memory_record_procedure', getToolDescription('memory_record_procedure') ?? 'Record a learned workflow as a durable procedure. Use when you notice a repeating multi-step task ("how Nate ships a release", "how to handle inbound replies"). Stored under 00-System/procedures/ with category=procedure and trigger verbs that surface it later. Different from memory_write/MEMORY.md: those store facts, this stores reusable HOW-TO. From Mem0\'s 2026 procedural-memory pattern.', {
|
|
367
|
+
title: z.string().describe('Short procedure title (becomes filename slug)'),
|
|
368
|
+
steps: z.string().describe('Numbered steps or markdown body describing how to perform the task'),
|
|
369
|
+
triggers: z.array(z.string()).min(1).describe('Verb phrases (e.g. ["ship release", "publish to npm"]) that should surface this procedure when the user query contains them. Lowercase preferred.'),
|
|
370
|
+
notes: z.string().optional().describe('Optional context: when to use, when NOT to use, gotchas'),
|
|
371
|
+
}, async ({ title, steps, triggers, notes }) => {
|
|
372
|
+
const slug = title.toLowerCase().replace(/[^a-z0-9]+/g, '-').replace(/^-+|-+$/g, '').slice(0, 80);
|
|
373
|
+
if (!slug)
|
|
374
|
+
return textResult('Error: title must contain alphanumerics');
|
|
375
|
+
const proceduresDir = path.join(SYSTEM_DIR, 'procedures');
|
|
376
|
+
mkdirSync(proceduresDir, { recursive: true });
|
|
377
|
+
const filePath = path.join(proceduresDir, `${slug}.md`);
|
|
378
|
+
const triggersYaml = triggers.map((t) => ` - ${JSON.stringify(t.toLowerCase())}`).join('\n');
|
|
379
|
+
const body = [
|
|
380
|
+
'---',
|
|
381
|
+
`title: ${JSON.stringify(title)}`,
|
|
382
|
+
'category: procedure',
|
|
383
|
+
'triggers:',
|
|
384
|
+
triggersYaml,
|
|
385
|
+
`created_at: ${new Date().toISOString()}`,
|
|
386
|
+
ACTIVE_AGENT_SLUG ? `agent_slug: ${JSON.stringify(ACTIVE_AGENT_SLUG)}` : '',
|
|
387
|
+
'---',
|
|
388
|
+
'',
|
|
389
|
+
`# ${title}`,
|
|
390
|
+
'',
|
|
391
|
+
'## Steps',
|
|
392
|
+
'',
|
|
393
|
+
steps.trim(),
|
|
394
|
+
'',
|
|
395
|
+
...(notes ? ['## Notes', '', notes.trim(), ''] : []),
|
|
396
|
+
].filter((line) => line !== '').join('\n') + '\n';
|
|
397
|
+
writeFileSync(filePath, body, 'utf-8');
|
|
398
|
+
const rel = path.relative(VAULT_DIR, filePath);
|
|
399
|
+
await incrementalSync(rel, ACTIVE_AGENT_SLUG ?? undefined);
|
|
400
|
+
return textResult(`Recorded procedure: ${rel} (triggers: ${triggers.join(', ')})`);
|
|
401
|
+
});
|
|
365
402
|
// ── 3. memory_search ───────────────────────────────────────────────────
|
|
366
403
|
server.tool('memory_search', getToolDescription('memory_search') ?? 'FTS5 search across all vault notes. Returns matching chunks with relevance scores. Optional category/topic filters narrow results.', {
|
|
367
404
|
query: z.string().describe('Search text'),
|
|
368
405
|
limit: z.number().optional().describe('Max results (default 20)'),
|
|
369
|
-
category: z.enum(['facts', 'events', 'discoveries', 'preferences', 'advice']).optional().describe('Filter by category'),
|
|
406
|
+
category: z.enum(['facts', 'events', 'discoveries', 'preferences', 'advice', 'procedure']).optional().describe('Filter by category'),
|
|
370
407
|
topic: z.string().optional().describe('Filter by topic'),
|
|
371
408
|
}, async ({ query, limit, category, topic }) => {
|
|
372
409
|
const maxResults = limit ?? 20;
|
package/dist/types.d.ts
CHANGED
|
@@ -7,7 +7,7 @@ export interface SearchResult {
|
|
|
7
7
|
content: string;
|
|
8
8
|
score: float;
|
|
9
9
|
chunkType: string;
|
|
10
|
-
matchType: 'fts' | 'recency' | 'timeline' | 'vector';
|
|
10
|
+
matchType: 'fts' | 'recency' | 'timeline' | 'vector' | 'graph';
|
|
11
11
|
lastUpdated: string;
|
|
12
12
|
chunkId: number;
|
|
13
13
|
salience: number;
|
|
@@ -17,7 +17,7 @@ export interface SearchResult {
|
|
|
17
17
|
topic?: string | null;
|
|
18
18
|
pinned?: boolean;
|
|
19
19
|
}
|
|
20
|
-
export type ChunkCategory = 'facts' | 'events' | 'discoveries' | 'preferences' | 'advice';
|
|
20
|
+
export type ChunkCategory = 'facts' | 'events' | 'discoveries' | 'preferences' | 'advice' | 'procedure';
|
|
21
21
|
export interface Chunk {
|
|
22
22
|
sourceFile: string;
|
|
23
23
|
section: string;
|
|
@@ -716,6 +716,14 @@ export interface RemoteAccessConfig {
|
|
|
716
716
|
autoPost: boolean;
|
|
717
717
|
lastStarted?: string;
|
|
718
718
|
}
|
|
719
|
+
export interface SessionRecord {
|
|
720
|
+
id: string;
|
|
721
|
+
expiresAt: number;
|
|
722
|
+
persistent: boolean;
|
|
723
|
+
createdAt: number;
|
|
724
|
+
lastUsedAt: number;
|
|
725
|
+
userAgent?: string;
|
|
726
|
+
}
|
|
719
727
|
export interface ConfigRevision {
|
|
720
728
|
id?: number;
|
|
721
729
|
agentSlug: string;
|