kongbrain 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +21 -0
- package/README.md +385 -0
- package/openclaw.plugin.json +66 -0
- package/package.json +65 -0
- package/src/acan.ts +309 -0
- package/src/causal.ts +237 -0
- package/src/cognitive-check.ts +330 -0
- package/src/config.ts +64 -0
- package/src/context-engine.ts +487 -0
- package/src/daemon-manager.ts +148 -0
- package/src/daemon-types.ts +65 -0
- package/src/embeddings.ts +77 -0
- package/src/errors.ts +43 -0
- package/src/graph-context.ts +989 -0
- package/src/hooks/after-tool-call.ts +99 -0
- package/src/hooks/before-prompt-build.ts +44 -0
- package/src/hooks/before-tool-call.ts +86 -0
- package/src/hooks/llm-output.ts +173 -0
- package/src/identity.ts +218 -0
- package/src/index.ts +435 -0
- package/src/intent.ts +190 -0
- package/src/memory-daemon.ts +495 -0
- package/src/orchestrator.ts +348 -0
- package/src/prefetch.ts +200 -0
- package/src/reflection.ts +280 -0
- package/src/retrieval-quality.ts +266 -0
- package/src/schema.surql +387 -0
- package/src/skills.ts +343 -0
- package/src/soul.ts +936 -0
- package/src/state.ts +119 -0
- package/src/surreal.ts +1371 -0
- package/src/tools/core-memory.ts +120 -0
- package/src/tools/introspect.ts +329 -0
- package/src/tools/recall.ts +102 -0
- package/src/wakeup.ts +318 -0
- package/src/workspace-migrate.ts +752 -0
|
@@ -0,0 +1,495 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Memory Daemon — Persistent worker thread for incremental extraction.
|
|
3
|
+
*
|
|
4
|
+
* Runs alongside the main conversation thread for the entire session.
|
|
5
|
+
* Receives turn batches from the main thread, calls an LLM for incremental
|
|
6
|
+
* extraction of 9 knowledge types, and writes results to SurrealDB.
|
|
7
|
+
*
|
|
8
|
+
* Extracts: causal chains, monologue traces, resolved memories,
|
|
9
|
+
* concepts, corrections, preferences, artifacts, decisions, skills.
|
|
10
|
+
*
|
|
11
|
+
* This file runs inside a Worker thread — it is NOT imported by the main thread.
|
|
12
|
+
*
|
|
13
|
+
* Ported from kongbrain — creates own SurrealStore/EmbeddingService instances.
|
|
14
|
+
*/
|
|
15
|
+
import { parentPort, workerData } from "node:worker_threads";
|
|
16
|
+
import type { DaemonMessage, DaemonResponse, DaemonWorkerData, PriorExtractions, TurnData } from "./daemon-types.js";
|
|
17
|
+
import { SurrealStore } from "./surreal.js";
|
|
18
|
+
import { EmbeddingService } from "./embeddings.js";
|
|
19
|
+
import { swallow } from "./errors.js";
|
|
20
|
+
|
|
21
|
+
if (!parentPort) {
|
|
22
|
+
throw new Error("memory-daemon.ts must be run as a worker thread");
|
|
23
|
+
}
|
|
24
|
+
|
|
25
|
+
const config = workerData as DaemonWorkerData;
|
|
26
|
+
|
|
27
|
+
// Worker-local instances
|
|
28
|
+
let store: SurrealStore;
|
|
29
|
+
let embeddings: EmbeddingService;
|
|
30
|
+
|
|
31
|
+
// --- Cumulative extraction counts ---
|
|
32
|
+
const counts = {
|
|
33
|
+
turns: 0, causal: 0, monologue: 0, resolved: 0, concept: 0,
|
|
34
|
+
correction: 0, preference: 0, artifact: 0, decision: 0, skill: 0, errors: 0,
|
|
35
|
+
};
|
|
36
|
+
|
|
37
|
+
let processing = false;
|
|
38
|
+
let shuttingDown = false;
|
|
39
|
+
const batchQueue: DaemonMessage[] = [];
|
|
40
|
+
|
|
41
|
+
const priorState: PriorExtractions = {
|
|
42
|
+
conceptNames: [], artifactPaths: [], skillNames: [],
|
|
43
|
+
};
|
|
44
|
+
|
|
45
|
+
// --- Initialization ---
|
|
46
|
+
|
|
47
|
+
async function init(): Promise<boolean> {
|
|
48
|
+
try {
|
|
49
|
+
store = new SurrealStore(config.surrealConfig);
|
|
50
|
+
await store.initialize();
|
|
51
|
+
|
|
52
|
+
embeddings = new EmbeddingService(config.embeddingConfig);
|
|
53
|
+
await embeddings.initialize();
|
|
54
|
+
|
|
55
|
+
return true;
|
|
56
|
+
} catch (e) {
|
|
57
|
+
swallow.warn("memory-daemon:init", e);
|
|
58
|
+
return false;
|
|
59
|
+
}
|
|
60
|
+
}
|
|
61
|
+
|
|
62
|
+
// --- Build the extraction prompt ---
|
|
63
|
+
|
|
64
|
+
function buildSystemPrompt(
|
|
65
|
+
hasThinking: boolean,
|
|
66
|
+
hasRetrievedMemories: boolean,
|
|
67
|
+
prior: PriorExtractions,
|
|
68
|
+
): string {
|
|
69
|
+
const dedup = prior.conceptNames.length > 0 || prior.artifactPaths.length > 0 || prior.skillNames.length > 0
|
|
70
|
+
? `\n\nALREADY EXTRACTED (do NOT repeat these):
|
|
71
|
+
- Concepts: ${prior.conceptNames.length > 0 ? prior.conceptNames.join(", ") : "none yet"}
|
|
72
|
+
- Artifacts: ${prior.artifactPaths.length > 0 ? prior.artifactPaths.join(", ") : "none yet"}
|
|
73
|
+
- Skills: ${prior.skillNames.length > 0 ? prior.skillNames.join(", ") : "none yet"}`
|
|
74
|
+
: "";
|
|
75
|
+
|
|
76
|
+
return `You are a memory extraction daemon. Analyze the conversation transcript and extract structured knowledge.
|
|
77
|
+
Return ONLY valid JSON with these fields (all arrays, use [] if none found for a field):
|
|
78
|
+
${dedup}
|
|
79
|
+
|
|
80
|
+
{
|
|
81
|
+
"causal": [
|
|
82
|
+
// Cause->effect chains from debugging, refactoring, fixing, or building.
|
|
83
|
+
// Only when there's a clear trigger and outcome. Max 5.
|
|
84
|
+
{"triggerText": "what caused it (max 200 chars)", "outcomeText": "what happened as a result", "chainType": "debug|refactor|feature|fix", "success": true/false, "confidence": 0.0-1.0, "description": "1-sentence summary"}
|
|
85
|
+
],
|
|
86
|
+
${hasThinking ? ` "monologue": [
|
|
87
|
+
// Internal reasoning moments worth preserving: doubts, tradeoffs, insights, realizations.
|
|
88
|
+
// Skip routine reasoning. Only novel/surprising thoughts. Max 5.
|
|
89
|
+
{"category": "doubt|tradeoff|alternative|insight|realization", "content": "1-2 sentence description"}
|
|
90
|
+
],` : ' "monologue": [],'}
|
|
91
|
+
${hasRetrievedMemories ? ` "resolved": [
|
|
92
|
+
// IDs from [RETRIEVED MEMORIES] that have been FULLY addressed/fixed/completed in this conversation.
|
|
93
|
+
// Must be exact IDs like "memory:abc123". Empty [] if none resolved.
|
|
94
|
+
"memory:example_id"
|
|
95
|
+
],` : ' "resolved": [],'}
|
|
96
|
+
"concepts": [
|
|
97
|
+
// Technical facts, knowledge, decisions, or findings worth remembering.
|
|
98
|
+
// NOT conversation flow — only things that would be useful to recall later.
|
|
99
|
+
// Categories: technical, architectural, behavioral, environmental, procedural
|
|
100
|
+
// Max 8 per batch.
|
|
101
|
+
{"name": "short identifier (3-6 words)", "content": "the actual knowledge (1-3 sentences)", "category": "technical|architectural|behavioral|environmental|procedural", "importance": 1-10}
|
|
102
|
+
],
|
|
103
|
+
"corrections": [
|
|
104
|
+
// Moments where the user corrects the assistant's understanding, approach, or output.
|
|
105
|
+
// These are high-value signals about what NOT to do.
|
|
106
|
+
{"original": "what the assistant said/did wrong", "correction": "what the user said the right answer/approach is", "context": "brief context of when this happened"}
|
|
107
|
+
],
|
|
108
|
+
"preferences": [
|
|
109
|
+
// User behavioral signals: communication style, workflow preferences, tool preferences.
|
|
110
|
+
// Only extract NOVEL preferences not already obvious. Max 5.
|
|
111
|
+
{"preference": "what the user prefers (1 sentence)", "evidence": "what they said/did that shows this"}
|
|
112
|
+
],
|
|
113
|
+
"artifacts": [
|
|
114
|
+
// Files that were created, modified, read, or discussed.
|
|
115
|
+
// Extract from tool calls (bash, read, write, edit, grep commands).
|
|
116
|
+
{"path": "/path/to/file", "action": "created|modified|read|discussed", "summary": "what was done to it (1 sentence)"}
|
|
117
|
+
],
|
|
118
|
+
"decisions": [
|
|
119
|
+
// Explicit choices made during the conversation with reasoning.
|
|
120
|
+
// Architecture decisions, tool choices, approach selections. Max 3.
|
|
121
|
+
{"decision": "what was decided", "rationale": "why", "alternatives_considered": "what else was considered (or 'none discussed')"}
|
|
122
|
+
],
|
|
123
|
+
"skills": [
|
|
124
|
+
// Reusable multi-step procedures that WORKED. Only extract when a procedure
|
|
125
|
+
// was successfully completed and would be useful to repeat. Max 2.
|
|
126
|
+
{"name": "short name", "steps": ["step 1", "step 2", "..."], "trigger_context": "when to use this skill"}
|
|
127
|
+
]
|
|
128
|
+
}
|
|
129
|
+
|
|
130
|
+
RULES:
|
|
131
|
+
- Return ONLY the JSON object. No markdown, no explanation.
|
|
132
|
+
- Every field must be present (use [] for empty).
|
|
133
|
+
- Quality over quantity — skip weak/uncertain extractions.
|
|
134
|
+
- Concepts should be self-contained — readable without the conversation.
|
|
135
|
+
- Corrections are the MOST important signal. Never miss one.
|
|
136
|
+
- For artifacts, extract file paths from bash/tool commands in the transcript.`;
|
|
137
|
+
}
|
|
138
|
+
|
|
139
|
+
function buildTranscript(turns: TurnData[]): string {
|
|
140
|
+
return turns
|
|
141
|
+
.map(t => {
|
|
142
|
+
const prefix = t.tool_name ? `[tool:${t.tool_name}]` : `[${t.role}]`;
|
|
143
|
+
let line = `${prefix} ${(t.text ?? "").slice(0, 1500)}`;
|
|
144
|
+
if (t.tool_result) line += `\n -> ${t.tool_result.slice(0, 500)}`;
|
|
145
|
+
if (t.file_paths && t.file_paths.length > 0) line += `\n files: ${t.file_paths.join(", ")}`;
|
|
146
|
+
return line;
|
|
147
|
+
})
|
|
148
|
+
.join("\n");
|
|
149
|
+
}
|
|
150
|
+
|
|
151
|
+
// --- Main extraction logic ---
|
|
152
|
+
|
|
153
|
+
async function processExtraction(msg: DaemonMessage & { type: "turn_batch" }): Promise<void> {
|
|
154
|
+
processing = true;
|
|
155
|
+
try {
|
|
156
|
+
const { turns, thinking, retrievedMemories, sessionId, priorExtractions } = msg;
|
|
157
|
+
|
|
158
|
+
if (turns.length < 2) return;
|
|
159
|
+
|
|
160
|
+
// Merge incoming prior state
|
|
161
|
+
if (priorExtractions) {
|
|
162
|
+
for (const name of priorExtractions.conceptNames) {
|
|
163
|
+
if (!priorState.conceptNames.includes(name)) priorState.conceptNames.push(name);
|
|
164
|
+
}
|
|
165
|
+
for (const path of priorExtractions.artifactPaths) {
|
|
166
|
+
if (!priorState.artifactPaths.includes(path)) priorState.artifactPaths.push(path);
|
|
167
|
+
}
|
|
168
|
+
for (const name of priorExtractions.skillNames) {
|
|
169
|
+
if (!priorState.skillNames.includes(name)) priorState.skillNames.push(name);
|
|
170
|
+
}
|
|
171
|
+
}
|
|
172
|
+
|
|
173
|
+
const transcript = buildTranscript(turns);
|
|
174
|
+
const sections: string[] = [`[TRANSCRIPT]\n${transcript.slice(0, 60000)}`];
|
|
175
|
+
|
|
176
|
+
if (thinking.length > 0) {
|
|
177
|
+
sections.push(`[THINKING]\n${thinking.slice(-8).join("\n---\n").slice(0, 4000)}`);
|
|
178
|
+
}
|
|
179
|
+
|
|
180
|
+
if (retrievedMemories.length > 0) {
|
|
181
|
+
const memList = retrievedMemories.map(m => `${m.id}: ${String(m.text).slice(0, 200)}`).join("\n");
|
|
182
|
+
sections.push(`[RETRIEVED MEMORIES]\nMark any that have been fully addressed/fixed/completed.\n${memList}`);
|
|
183
|
+
}
|
|
184
|
+
|
|
185
|
+
const systemPrompt = buildSystemPrompt(thinking.length > 0, retrievedMemories.length > 0, priorState);
|
|
186
|
+
|
|
187
|
+
const { completeSimple, getModel } = await import("@mariozechner/pi-ai");
|
|
188
|
+
const provider = config.llmProvider ?? "anthropic";
|
|
189
|
+
const modelId = config.llmModel ?? "claude-opus-4-6";
|
|
190
|
+
// getModel is heavily typed for known providers; cast needed for runtime-configured values
|
|
191
|
+
const model = (getModel as any)(provider, modelId);
|
|
192
|
+
|
|
193
|
+
const response = await completeSimple(model, {
|
|
194
|
+
systemPrompt,
|
|
195
|
+
messages: [{
|
|
196
|
+
role: "user",
|
|
197
|
+
timestamp: Date.now(),
|
|
198
|
+
content: sections.join("\n\n"),
|
|
199
|
+
}],
|
|
200
|
+
});
|
|
201
|
+
|
|
202
|
+
const responseText = response.content
|
|
203
|
+
.filter((c: any) => c.type === "text")
|
|
204
|
+
.map((c: any) => c.text)
|
|
205
|
+
.join("");
|
|
206
|
+
|
|
207
|
+
const jsonMatch = responseText.match(/\{[\s\S]*\}/);
|
|
208
|
+
if (!jsonMatch) return;
|
|
209
|
+
|
|
210
|
+
let result: Record<string, any>;
|
|
211
|
+
try {
|
|
212
|
+
result = JSON.parse(jsonMatch[0]);
|
|
213
|
+
} catch {
|
|
214
|
+
try {
|
|
215
|
+
result = JSON.parse(jsonMatch[0].replace(/,\s*([}\]])/g, "$1"));
|
|
216
|
+
} catch {
|
|
217
|
+
// Per-field fallback
|
|
218
|
+
result = {};
|
|
219
|
+
const fields = ["causal", "monologue", "resolved", "concepts", "corrections", "preferences", "artifacts", "decisions", "skills"];
|
|
220
|
+
for (const field of fields) {
|
|
221
|
+
const fieldMatch = jsonMatch[0].match(new RegExp(`"${field}"\\s*:\\s*(\\[[\\s\\S]*?\\])(?=\\s*[,}]\\s*"[a-z]|\\s*\\}$)`, "m"));
|
|
222
|
+
if (fieldMatch) {
|
|
223
|
+
try { result[field] = JSON.parse(fieldMatch[1]); } catch { /* skip */ }
|
|
224
|
+
}
|
|
225
|
+
}
|
|
226
|
+
if (Object.keys(result).length === 0) return;
|
|
227
|
+
}
|
|
228
|
+
}
|
|
229
|
+
|
|
230
|
+
// --- Write all results to DB ---
|
|
231
|
+
const writeOps: Promise<void>[] = [];
|
|
232
|
+
|
|
233
|
+
// 1. Causal chains
|
|
234
|
+
if (Array.isArray(result.causal) && result.causal.length > 0) {
|
|
235
|
+
const { linkCausalEdges } = await import("./causal.js");
|
|
236
|
+
const validated = result.causal
|
|
237
|
+
.filter((c: any) => c.triggerText && c.outcomeText && c.chainType && typeof c.success === "boolean")
|
|
238
|
+
.slice(0, 5)
|
|
239
|
+
.map((c: any) => ({
|
|
240
|
+
triggerText: String(c.triggerText).slice(0, 200),
|
|
241
|
+
outcomeText: String(c.outcomeText).slice(0, 200),
|
|
242
|
+
chainType: (["debug", "refactor", "feature", "fix"].includes(c.chainType) ? c.chainType : "fix") as "debug" | "refactor" | "feature" | "fix",
|
|
243
|
+
success: Boolean(c.success),
|
|
244
|
+
confidence: Math.max(0, Math.min(1, Number(c.confidence) || 0.5)),
|
|
245
|
+
description: String(c.description ?? "").slice(0, 150),
|
|
246
|
+
}));
|
|
247
|
+
if (validated.length > 0) {
|
|
248
|
+
writeOps.push(linkCausalEdges(validated, sessionId, store, embeddings));
|
|
249
|
+
counts.causal += validated.length;
|
|
250
|
+
}
|
|
251
|
+
}
|
|
252
|
+
|
|
253
|
+
// 2. Monologue traces
|
|
254
|
+
if (Array.isArray(result.monologue) && result.monologue.length > 0) {
|
|
255
|
+
for (const entry of result.monologue.slice(0, 5)) {
|
|
256
|
+
if (!entry.category || !entry.content) continue;
|
|
257
|
+
counts.monologue++;
|
|
258
|
+
writeOps.push((async () => {
|
|
259
|
+
let emb: number[] | null = null;
|
|
260
|
+
if (embeddings.isAvailable()) {
|
|
261
|
+
try { emb = await embeddings.embed(entry.content); } catch (e) { swallow("daemon:embedMonologue", e); }
|
|
262
|
+
}
|
|
263
|
+
await store.createMonologue(sessionId, entry.category, entry.content, emb);
|
|
264
|
+
})());
|
|
265
|
+
}
|
|
266
|
+
}
|
|
267
|
+
|
|
268
|
+
// 3. Resolved memories
|
|
269
|
+
if (Array.isArray(result.resolved) && result.resolved.length > 0) {
|
|
270
|
+
const RECORD_ID_RE = /^memory:[a-zA-Z0-9_]+$/;
|
|
271
|
+
writeOps.push((async () => {
|
|
272
|
+
for (const memId of result.resolved!.slice(0, 20)) {
|
|
273
|
+
if (typeof memId !== "string" || !RECORD_ID_RE.test(memId)) continue;
|
|
274
|
+
counts.resolved++;
|
|
275
|
+
await store.queryExec(
|
|
276
|
+
`UPDATE ${memId} SET status = 'resolved', resolved_at = time::now(), resolved_by = $sid`,
|
|
277
|
+
{ sid: sessionId },
|
|
278
|
+
).catch(e => swallow.warn("daemon:resolveMemory", e));
|
|
279
|
+
}
|
|
280
|
+
})());
|
|
281
|
+
}
|
|
282
|
+
|
|
283
|
+
// 4. Concepts
|
|
284
|
+
if (Array.isArray(result.concepts) && result.concepts.length > 0) {
|
|
285
|
+
for (const c of result.concepts.slice(0, 11)) {
|
|
286
|
+
if (!c.name || !c.content) continue;
|
|
287
|
+
if (priorState.conceptNames.includes(c.name)) continue;
|
|
288
|
+
counts.concept++;
|
|
289
|
+
priorState.conceptNames.push(c.name);
|
|
290
|
+
writeOps.push((async () => {
|
|
291
|
+
let emb: number[] | null = null;
|
|
292
|
+
if (embeddings.isAvailable()) {
|
|
293
|
+
try { emb = await embeddings.embed(c.content); } catch (e) { swallow("daemon:embedConcept", e); }
|
|
294
|
+
}
|
|
295
|
+
await store.upsertConcept(c.content, emb, `daemon:${sessionId}`);
|
|
296
|
+
})());
|
|
297
|
+
}
|
|
298
|
+
}
|
|
299
|
+
|
|
300
|
+
// 5. Corrections — high-importance memories
|
|
301
|
+
if (Array.isArray(result.corrections) && result.corrections.length > 0) {
|
|
302
|
+
for (const c of result.corrections.slice(0, 5)) {
|
|
303
|
+
if (!c.original || !c.correction) continue;
|
|
304
|
+
counts.correction++;
|
|
305
|
+
const text = `[CORRECTION] Original: "${String(c.original).slice(0, 200)}" -> Corrected: "${String(c.correction).slice(0, 200)}" (Context: ${String(c.context ?? "").slice(0, 100)})`;
|
|
306
|
+
writeOps.push((async () => {
|
|
307
|
+
let emb: number[] | null = null;
|
|
308
|
+
if (embeddings.isAvailable()) {
|
|
309
|
+
try { emb = await embeddings.embed(text); } catch (e) { swallow("daemon:embedCorrection", e); }
|
|
310
|
+
}
|
|
311
|
+
await store.createMemory(text, emb, 9, "correction", sessionId);
|
|
312
|
+
})());
|
|
313
|
+
}
|
|
314
|
+
}
|
|
315
|
+
|
|
316
|
+
// 6. User preferences
|
|
317
|
+
if (Array.isArray(result.preferences) && result.preferences.length > 0) {
|
|
318
|
+
for (const p of result.preferences.slice(0, 5)) {
|
|
319
|
+
if (!p.preference) continue;
|
|
320
|
+
counts.preference++;
|
|
321
|
+
const text = `[USER PREFERENCE] ${String(p.preference).slice(0, 250)} (Evidence: ${String(p.evidence ?? "").slice(0, 150)})`;
|
|
322
|
+
writeOps.push((async () => {
|
|
323
|
+
let emb: number[] | null = null;
|
|
324
|
+
if (embeddings.isAvailable()) {
|
|
325
|
+
try { emb = await embeddings.embed(text); } catch (e) { swallow("daemon:embedPreference", e); }
|
|
326
|
+
}
|
|
327
|
+
await store.createMemory(text, emb, 7, "preference", sessionId);
|
|
328
|
+
})());
|
|
329
|
+
}
|
|
330
|
+
}
|
|
331
|
+
|
|
332
|
+
// 7. Artifacts
|
|
333
|
+
if (Array.isArray(result.artifacts) && result.artifacts.length > 0) {
|
|
334
|
+
for (const a of result.artifacts.slice(0, 10)) {
|
|
335
|
+
if (!a.path) continue;
|
|
336
|
+
if (priorState.artifactPaths.includes(a.path)) continue;
|
|
337
|
+
counts.artifact++;
|
|
338
|
+
priorState.artifactPaths.push(a.path);
|
|
339
|
+
const desc = `${String(a.action ?? "modified")}: ${String(a.summary ?? "").slice(0, 200)}`;
|
|
340
|
+
writeOps.push((async () => {
|
|
341
|
+
let emb: number[] | null = null;
|
|
342
|
+
if (embeddings.isAvailable()) {
|
|
343
|
+
try { emb = await embeddings.embed(`${a.path} ${desc}`); } catch (e) { swallow("daemon:embedArtifact", e); }
|
|
344
|
+
}
|
|
345
|
+
await store.createArtifact(a.path, a.action ?? "modified", desc, emb);
|
|
346
|
+
})());
|
|
347
|
+
}
|
|
348
|
+
}
|
|
349
|
+
|
|
350
|
+
// 8. Decisions
|
|
351
|
+
if (Array.isArray(result.decisions) && result.decisions.length > 0) {
|
|
352
|
+
for (const d of result.decisions.slice(0, 6)) {
|
|
353
|
+
if (!d.decision) continue;
|
|
354
|
+
counts.decision++;
|
|
355
|
+
const text = `[DECISION] ${String(d.decision).slice(0, 200)} — Rationale: ${String(d.rationale ?? "").slice(0, 200)} (Alternatives: ${String(d.alternatives_considered ?? "none").slice(0, 100)})`;
|
|
356
|
+
writeOps.push((async () => {
|
|
357
|
+
let emb: number[] | null = null;
|
|
358
|
+
if (embeddings.isAvailable()) {
|
|
359
|
+
try { emb = await embeddings.embed(text); } catch (e) { swallow("daemon:embedDecision", e); }
|
|
360
|
+
}
|
|
361
|
+
await store.createMemory(text, emb, 7, "decision", sessionId);
|
|
362
|
+
})());
|
|
363
|
+
}
|
|
364
|
+
}
|
|
365
|
+
|
|
366
|
+
// 9. Skills
|
|
367
|
+
if (Array.isArray(result.skills) && result.skills.length > 0) {
|
|
368
|
+
for (const s of result.skills.slice(0, 3)) {
|
|
369
|
+
if (!s.name || !Array.isArray(s.steps) || s.steps.length === 0) continue;
|
|
370
|
+
if (priorState.skillNames.includes(s.name)) continue;
|
|
371
|
+
counts.skill++;
|
|
372
|
+
priorState.skillNames.push(s.name);
|
|
373
|
+
const content = `${s.name}\nTrigger: ${String(s.trigger_context ?? "").slice(0, 150)}\nSteps:\n${s.steps.map((st: string, i: number) => `${i + 1}. ${String(st).slice(0, 200)}`).join("\n")}`;
|
|
374
|
+
writeOps.push((async () => {
|
|
375
|
+
let emb: number[] | null = null;
|
|
376
|
+
if (embeddings.isAvailable()) {
|
|
377
|
+
try { emb = await embeddings.embed(content); } catch (e) { swallow("daemon:embedSkill", e); }
|
|
378
|
+
}
|
|
379
|
+
await store.queryExec(
|
|
380
|
+
`CREATE skill CONTENT $record`,
|
|
381
|
+
{
|
|
382
|
+
record: {
|
|
383
|
+
name: String(s.name).slice(0, 100),
|
|
384
|
+
description: content,
|
|
385
|
+
content,
|
|
386
|
+
steps: s.steps.map((st: string) => String(st).slice(0, 200)),
|
|
387
|
+
trigger_context: String(s.trigger_context ?? "").slice(0, 200),
|
|
388
|
+
tags: ["auto-extracted"],
|
|
389
|
+
session_id: sessionId,
|
|
390
|
+
...(emb ? { embedding: emb } : {}),
|
|
391
|
+
},
|
|
392
|
+
},
|
|
393
|
+
).catch(e => swallow.warn("daemon:createSkill", e));
|
|
394
|
+
})());
|
|
395
|
+
}
|
|
396
|
+
}
|
|
397
|
+
|
|
398
|
+
await Promise.allSettled(writeOps);
|
|
399
|
+
|
|
400
|
+
counts.turns = turns.length;
|
|
401
|
+
|
|
402
|
+
parentPort!.postMessage({
|
|
403
|
+
type: "extraction_complete",
|
|
404
|
+
extractedTurnCount: counts.turns,
|
|
405
|
+
causalCount: counts.causal,
|
|
406
|
+
monologueCount: counts.monologue,
|
|
407
|
+
resolvedCount: counts.resolved,
|
|
408
|
+
conceptCount: counts.concept,
|
|
409
|
+
correctionCount: counts.correction,
|
|
410
|
+
preferenceCount: counts.preference,
|
|
411
|
+
artifactCount: counts.artifact,
|
|
412
|
+
decisionCount: counts.decision,
|
|
413
|
+
skillCount: counts.skill,
|
|
414
|
+
extractedNames: { ...priorState },
|
|
415
|
+
} satisfies DaemonResponse);
|
|
416
|
+
} catch (e) {
|
|
417
|
+
counts.errors++;
|
|
418
|
+
swallow.warn("memory-daemon:extraction", e);
|
|
419
|
+
parentPort!.postMessage({
|
|
420
|
+
type: "error",
|
|
421
|
+
message: String(e),
|
|
422
|
+
} satisfies DaemonResponse);
|
|
423
|
+
} finally {
|
|
424
|
+
processing = false;
|
|
425
|
+
}
|
|
426
|
+
}
|
|
427
|
+
|
|
428
|
+
// --- Batch Queue Processing ---
|
|
429
|
+
|
|
430
|
+
async function drainQueue(): Promise<void> {
|
|
431
|
+
while (batchQueue.length > 0 && !shuttingDown) {
|
|
432
|
+
const batch = batchQueue.shift()!;
|
|
433
|
+
if (batch.type === "turn_batch") {
|
|
434
|
+
await processExtraction(batch);
|
|
435
|
+
}
|
|
436
|
+
}
|
|
437
|
+
}
|
|
438
|
+
|
|
439
|
+
// --- Message Handler ---
|
|
440
|
+
|
|
441
|
+
async function handleMessage(msg: DaemonMessage): Promise<void> {
|
|
442
|
+
switch (msg.type) {
|
|
443
|
+
case "turn_batch": {
|
|
444
|
+
batchQueue.length = 0;
|
|
445
|
+
batchQueue.push(msg);
|
|
446
|
+
if (!processing) {
|
|
447
|
+
drainQueue().catch(e => swallow.warn("daemon:drainQueue", e));
|
|
448
|
+
}
|
|
449
|
+
break;
|
|
450
|
+
}
|
|
451
|
+
case "shutdown": {
|
|
452
|
+
shuttingDown = true;
|
|
453
|
+
if (processing) {
|
|
454
|
+
await Promise.race([
|
|
455
|
+
new Promise<void>(resolve => {
|
|
456
|
+
const check = setInterval(() => {
|
|
457
|
+
if (!processing) { clearInterval(check); resolve(); }
|
|
458
|
+
}, 100);
|
|
459
|
+
}),
|
|
460
|
+
new Promise<void>(resolve => setTimeout(resolve, 45_000)),
|
|
461
|
+
]);
|
|
462
|
+
}
|
|
463
|
+
try {
|
|
464
|
+
await Promise.allSettled([
|
|
465
|
+
store.dispose(),
|
|
466
|
+
embeddings.dispose(),
|
|
467
|
+
]);
|
|
468
|
+
} catch (e) { swallow("daemon:cleanup", e); }
|
|
469
|
+
|
|
470
|
+
parentPort!.postMessage({ type: "shutdown_complete" } satisfies DaemonResponse);
|
|
471
|
+
break;
|
|
472
|
+
}
|
|
473
|
+
case "status_request": {
|
|
474
|
+
parentPort!.postMessage({
|
|
475
|
+
type: "status",
|
|
476
|
+
extractedTurns: counts.turns,
|
|
477
|
+
pendingBatches: batchQueue.length,
|
|
478
|
+
errors: counts.errors,
|
|
479
|
+
} satisfies DaemonResponse);
|
|
480
|
+
break;
|
|
481
|
+
}
|
|
482
|
+
}
|
|
483
|
+
}
|
|
484
|
+
|
|
485
|
+
// --- Main ---
|
|
486
|
+
|
|
487
|
+
init().then(ok => {
|
|
488
|
+
if (!ok) {
|
|
489
|
+
parentPort!.postMessage({ type: "error", message: "Daemon initialization failed" } satisfies DaemonResponse);
|
|
490
|
+
return;
|
|
491
|
+
}
|
|
492
|
+
parentPort!.on("message", (msg: DaemonMessage) => {
|
|
493
|
+
handleMessage(msg).catch(e => swallow.warn("daemon:handleMessage", e));
|
|
494
|
+
});
|
|
495
|
+
});
|