kongbrain 0.5.0 → 0.5.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.clawhubignore +0 -1
- package/.kongcode-handoff.json +8 -0
- package/README.github.md +3 -1
- package/SKILL.md +6 -1
- package/dist/causal-CZ62YZ2J.js +11 -0
- package/dist/chunk-6NWMZY3J.js +194 -0
- package/dist/chunk-B3QUPMCI.js +419 -0
- package/dist/chunk-XSIONAGJ.js +1364 -0
- package/dist/index.js +6674 -0
- package/dist/memory-daemon-MJRXOBXU.js +11 -0
- package/openclaw.plugin.json +9 -1
- package/package.json +27 -3
- package/src/config.ts +9 -1
- package/src/context-engine.ts +4 -2
- package/src/index.ts +52 -20
- package/src/model-resolution.ts +98 -0
- package/src/schema-loader.ts +21 -3
- package/src/schema.surql +8 -8
- package/src/surreal.ts +10 -2
package/.clawhubignore
CHANGED
|
@@ -0,0 +1,8 @@
|
|
|
1
|
+
{
|
|
2
|
+
"sessionId": "7a2dcd89-1468-49c3-a1be-24dac5a05662",
|
|
3
|
+
"timestamp": "2026-05-02T21:39:49.691Z",
|
|
4
|
+
"userTurnCount": 1,
|
|
5
|
+
"lastUserText": "Drain the KongCode pending_work queue. Loop: call mcp__plugin_kongcode_kongcode__fetch_pending_work to claim the next item, analyze the data per the work-type instructions, then call mcp__plugin_kongcode_kongcode__commit_work_results with your output. Repeat until fetch_pending_work returns empty. Be efficient: minimize per-item analysis. This is auto-drain, not user-facing — produce no narration, just process items.",
|
|
6
|
+
"lastAssistantText": "",
|
|
7
|
+
"unextractedTokens": 0
|
|
8
|
+
}
|
package/README.github.md
CHANGED
|
@@ -4,6 +4,8 @@
|
|
|
4
4
|
|
|
5
5
|

|
|
6
6
|
|
|
7
|
+
[](https://voidorigin.com)
|
|
8
|
+
|
|
7
9
|
[](https://www.npmjs.com/package/kongbrain)
|
|
8
10
|
[](https://clawhub.ai/packages/kongbrain)
|
|
9
11
|
[](https://github.com/42U/kongbrain)
|
|
@@ -498,6 +500,6 @@ The lobster doesn't accept contributions. The ape does.
|
|
|
498
500
|
|
|
499
501
|
<div align="center">
|
|
500
502
|
|
|
501
|
-
MIT License | Built by [42U](https://github.com/42U)
|
|
503
|
+
MIT License | Built by [42U](https://github.com/42U) | [VoidOrigin](https://voidorigin.com)
|
|
502
504
|
|
|
503
505
|
</div>
|
package/SKILL.md
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
---
|
|
2
2
|
name: kongbrain
|
|
3
3
|
description: Graph-backed persistent memory engine for OpenClaw. Replaces the default context window with SurrealDB + vector embeddings that learn across sessions.
|
|
4
|
-
version: 0.5.
|
|
4
|
+
version: 0.5.2
|
|
5
5
|
homepage: https://github.com/42U/kongbrain
|
|
6
6
|
metadata:
|
|
7
7
|
openclaw:
|
|
@@ -14,6 +14,11 @@ metadata:
|
|
|
14
14
|
- SURREAL_PASS
|
|
15
15
|
- SURREAL_NS
|
|
16
16
|
- SURREAL_DB
|
|
17
|
+
optionalEnv:
|
|
18
|
+
- KONGBRAIN_EMBED_PROVIDER
|
|
19
|
+
- EMBED_MODEL_PATH
|
|
20
|
+
- OPENAI_BASE_URL
|
|
21
|
+
- OPENAI_API_KEY
|
|
17
22
|
primaryEnv: SURREAL_URL
|
|
18
23
|
install:
|
|
19
24
|
- kind: node
|
|
@@ -0,0 +1,194 @@
|
|
|
1
|
+
import {
|
|
2
|
+
assertRecordId,
|
|
3
|
+
init_errors,
|
|
4
|
+
swallow
|
|
5
|
+
} from "./chunk-XSIONAGJ.js";
|
|
6
|
+
|
|
7
|
+
// src/causal.ts
|
|
8
|
+
init_errors();
|
|
9
|
+
async function linkCausalEdges(chains, sessionId, store, embeddings) {
|
|
10
|
+
if (chains.length === 0 || !store.isAvailable()) return;
|
|
11
|
+
for (const chain of chains) {
|
|
12
|
+
try {
|
|
13
|
+
let triggerEmb = null;
|
|
14
|
+
if (embeddings.isAvailable()) {
|
|
15
|
+
try {
|
|
16
|
+
triggerEmb = await embeddings.embed(chain.triggerText);
|
|
17
|
+
} catch (e) {
|
|
18
|
+
swallow("causal:ok", e);
|
|
19
|
+
}
|
|
20
|
+
}
|
|
21
|
+
const triggerId = await store.createMemory(
|
|
22
|
+
chain.triggerText,
|
|
23
|
+
triggerEmb,
|
|
24
|
+
5,
|
|
25
|
+
`causal_trigger_${chain.chainType}`,
|
|
26
|
+
sessionId
|
|
27
|
+
);
|
|
28
|
+
let outcomeEmb = null;
|
|
29
|
+
if (embeddings.isAvailable()) {
|
|
30
|
+
try {
|
|
31
|
+
outcomeEmb = await embeddings.embed(chain.outcomeText);
|
|
32
|
+
} catch (e) {
|
|
33
|
+
swallow("causal:ok", e);
|
|
34
|
+
}
|
|
35
|
+
}
|
|
36
|
+
const outcomeId = await store.createMemory(
|
|
37
|
+
chain.outcomeText,
|
|
38
|
+
outcomeEmb,
|
|
39
|
+
6,
|
|
40
|
+
`causal_outcome_${chain.chainType}`,
|
|
41
|
+
sessionId
|
|
42
|
+
);
|
|
43
|
+
if (!triggerId || !outcomeId) continue;
|
|
44
|
+
await store.relate(outcomeId, "caused_by", triggerId).catch((e) => swallow.warn("causal:relateCausedBy", e));
|
|
45
|
+
if (chain.success) {
|
|
46
|
+
await store.relate(outcomeId, "supports", triggerId).catch((e) => swallow.warn("causal:relateSupports", e));
|
|
47
|
+
} else {
|
|
48
|
+
await store.relate(outcomeId, "contradicts", triggerId).catch((e) => swallow.warn("causal:relateContradicts", e));
|
|
49
|
+
}
|
|
50
|
+
let descriptionId = null;
|
|
51
|
+
if (chain.description && chain.description.length > 10) {
|
|
52
|
+
const descText = `[${chain.chainType}${chain.success ? "" : " FAILED"}] ${chain.description}`;
|
|
53
|
+
let descEmb = null;
|
|
54
|
+
if (embeddings.isAvailable()) {
|
|
55
|
+
try {
|
|
56
|
+
descEmb = await embeddings.embed(descText);
|
|
57
|
+
} catch (e) {
|
|
58
|
+
swallow("causal:ok", e);
|
|
59
|
+
}
|
|
60
|
+
}
|
|
61
|
+
descriptionId = await store.createMemory(
|
|
62
|
+
descText,
|
|
63
|
+
descEmb,
|
|
64
|
+
5,
|
|
65
|
+
`causal_description_${chain.chainType}`,
|
|
66
|
+
sessionId
|
|
67
|
+
);
|
|
68
|
+
if (descriptionId) {
|
|
69
|
+
await store.relate(descriptionId, "describes", triggerId).catch((e) => swallow.warn("causal:relateDescTrigger", e));
|
|
70
|
+
await store.relate(descriptionId, "describes", outcomeId).catch((e) => swallow.warn("causal:relateDescOutcome", e));
|
|
71
|
+
}
|
|
72
|
+
}
|
|
73
|
+
await store.queryExec(`CREATE causal_chain CONTENT $data`, {
|
|
74
|
+
data: {
|
|
75
|
+
session_id: String(sessionId),
|
|
76
|
+
trigger_memory: triggerId,
|
|
77
|
+
outcome_memory: outcomeId,
|
|
78
|
+
description_memory: descriptionId,
|
|
79
|
+
chain_type: chain.chainType,
|
|
80
|
+
success: chain.success,
|
|
81
|
+
confidence: chain.confidence,
|
|
82
|
+
description: chain.description
|
|
83
|
+
}
|
|
84
|
+
}).catch((e) => swallow.warn("causal:storeChain", e));
|
|
85
|
+
} catch (e) {
|
|
86
|
+
swallow("causal:silent", e);
|
|
87
|
+
}
|
|
88
|
+
}
|
|
89
|
+
}
|
|
90
|
+
async function queryCausalContext(seedIds, queryVec, hops = 2, minConfidence = 0.4, store) {
|
|
91
|
+
if (seedIds.length === 0 || !store?.isAvailable()) return [];
|
|
92
|
+
const RECORD_ID_RE = /^[a-zA-Z_][a-zA-Z0-9_]*:[a-zA-Z0-9_]+$/;
|
|
93
|
+
const validIds = seedIds.filter((id) => RECORD_ID_RE.test(id)).slice(0, 10);
|
|
94
|
+
if (validIds.length === 0) return [];
|
|
95
|
+
const causalEdges = ["caused_by", "supports", "contradicts", "describes"];
|
|
96
|
+
const seen = new Set(validIds);
|
|
97
|
+
let frontier = validIds;
|
|
98
|
+
const results = [];
|
|
99
|
+
const bindings = { vec: queryVec, provider: store.getActiveProvider() };
|
|
100
|
+
const scoreExpr = `, IF embedding != NONE AND array::len(embedding) > 0
|
|
101
|
+
AND embedding_provider = $provider
|
|
102
|
+
THEN vector::similarity::cosine(embedding, $vec)
|
|
103
|
+
ELSE 0 END AS score`;
|
|
104
|
+
for (let hop = 0; hop < hops && frontier.length > 0; hop++) {
|
|
105
|
+
const selectFields = `SELECT id, text, importance, access_count AS accessCount,
|
|
106
|
+
created_at AS timestamp, category, meta::tb(id) AS table${scoreExpr}`;
|
|
107
|
+
const stmts = [];
|
|
108
|
+
for (const id of frontier) {
|
|
109
|
+
assertRecordId(id);
|
|
110
|
+
for (const edge of causalEdges) {
|
|
111
|
+
if (!/^[a-z_]+$/.test(edge)) continue;
|
|
112
|
+
stmts.push(`${selectFields} FROM ${id}->${edge}->? LIMIT 3`);
|
|
113
|
+
stmts.push(`${selectFields} FROM ${id}<-${edge}<-? LIMIT 3`);
|
|
114
|
+
}
|
|
115
|
+
}
|
|
116
|
+
let allQueryResults;
|
|
117
|
+
try {
|
|
118
|
+
allQueryResults = await store.queryBatch(stmts, bindings);
|
|
119
|
+
} catch (e) {
|
|
120
|
+
swallow.warn("causal:batch", e);
|
|
121
|
+
break;
|
|
122
|
+
}
|
|
123
|
+
const nextFrontier = [];
|
|
124
|
+
for (const rows of allQueryResults) {
|
|
125
|
+
for (const row of rows) {
|
|
126
|
+
const nodeId = String(row.id);
|
|
127
|
+
if (seen.has(nodeId)) continue;
|
|
128
|
+
seen.add(nodeId);
|
|
129
|
+
const text = row.text ?? "";
|
|
130
|
+
if (text) {
|
|
131
|
+
results.push({
|
|
132
|
+
id: nodeId,
|
|
133
|
+
text,
|
|
134
|
+
score: row.score ?? 0,
|
|
135
|
+
importance: row.importance,
|
|
136
|
+
accessCount: row.accessCount,
|
|
137
|
+
timestamp: row.timestamp,
|
|
138
|
+
table: String(row.table ?? "memory"),
|
|
139
|
+
source: row.category
|
|
140
|
+
});
|
|
141
|
+
if (RECORD_ID_RE.test(nodeId)) {
|
|
142
|
+
nextFrontier.push(nodeId);
|
|
143
|
+
}
|
|
144
|
+
}
|
|
145
|
+
}
|
|
146
|
+
}
|
|
147
|
+
frontier = nextFrontier.slice(0, 5);
|
|
148
|
+
}
|
|
149
|
+
if (results.length > 0 && minConfidence > 0) {
|
|
150
|
+
const resultIds = results.map((r) => r.id);
|
|
151
|
+
try {
|
|
152
|
+
const chains = await store.queryFirst(
|
|
153
|
+
`SELECT trigger_memory, outcome_memory, confidence FROM causal_chain
|
|
154
|
+
WHERE confidence >= $minConf AND (trigger_memory IN $ids OR outcome_memory IN $ids)`,
|
|
155
|
+
{ minConf: minConfidence, ids: resultIds }
|
|
156
|
+
);
|
|
157
|
+
const allowedIds = /* @__PURE__ */ new Set();
|
|
158
|
+
for (const c of chains) {
|
|
159
|
+
allowedIds.add(String(c.trigger_memory));
|
|
160
|
+
allowedIds.add(String(c.outcome_memory));
|
|
161
|
+
}
|
|
162
|
+
return results.filter((r) => allowedIds.has(r.id));
|
|
163
|
+
} catch (e) {
|
|
164
|
+
swallow.warn("causal:confidence-filter", e);
|
|
165
|
+
return results;
|
|
166
|
+
}
|
|
167
|
+
}
|
|
168
|
+
return results;
|
|
169
|
+
}
|
|
170
|
+
async function getSessionCausalChains(sessionId, store) {
|
|
171
|
+
try {
|
|
172
|
+
if (!store.isAvailable()) return { count: 0, successRate: 0 };
|
|
173
|
+
const rows = await store.queryFirst(
|
|
174
|
+
`SELECT count() AS total, math::sum(IF success THEN 1 ELSE 0 END) AS successes
|
|
175
|
+
FROM causal_chain WHERE session_id = $sid GROUP ALL`,
|
|
176
|
+
{ sid: sessionId }
|
|
177
|
+
);
|
|
178
|
+
const row = rows[0];
|
|
179
|
+
if (!row || !row.total) return { count: 0, successRate: 0 };
|
|
180
|
+
return {
|
|
181
|
+
count: Number(row.total),
|
|
182
|
+
successRate: Number(row.successes) / Number(row.total)
|
|
183
|
+
};
|
|
184
|
+
} catch (e) {
|
|
185
|
+
swallow("causal:metrics", e);
|
|
186
|
+
return { count: 0, successRate: 0 };
|
|
187
|
+
}
|
|
188
|
+
}
|
|
189
|
+
|
|
190
|
+
export {
|
|
191
|
+
linkCausalEdges,
|
|
192
|
+
queryCausalContext,
|
|
193
|
+
getSessionCausalChains
|
|
194
|
+
};
|
|
@@ -0,0 +1,419 @@
|
|
|
1
|
+
import {
|
|
2
|
+
assertRecordId,
|
|
3
|
+
init_errors,
|
|
4
|
+
swallow
|
|
5
|
+
} from "./chunk-XSIONAGJ.js";
|
|
6
|
+
|
|
7
|
+
// src/memory-daemon.ts
|
|
8
|
+
init_errors();
|
|
9
|
+
|
|
10
|
+
// src/concept-extract.ts
|
|
11
|
+
init_errors();
|
|
12
|
+
async function linkToRelevantConcepts(sourceId, edgeName, text, store, embeddings, logTag, limit = 5, threshold = 0.65, precomputedVec) {
|
|
13
|
+
if (!embeddings.isAvailable() || !text) return;
|
|
14
|
+
try {
|
|
15
|
+
const vec = precomputedVec?.length ? precomputedVec : await embeddings.embed(text);
|
|
16
|
+
if (!vec?.length) return;
|
|
17
|
+
const matches = await store.queryFirst(
|
|
18
|
+
`SELECT id, vector::similarity::cosine(embedding, $vec) AS score
|
|
19
|
+
FROM concept
|
|
20
|
+
WHERE embedding != NONE AND array::len(embedding) > 0
|
|
21
|
+
AND embedding_provider = $provider
|
|
22
|
+
ORDER BY score DESC
|
|
23
|
+
LIMIT $lim`,
|
|
24
|
+
{ vec, lim: limit, provider: embeddings.providerId }
|
|
25
|
+
);
|
|
26
|
+
for (const m of matches) {
|
|
27
|
+
if (m.score < threshold) break;
|
|
28
|
+
await store.relate(sourceId, edgeName, String(m.id)).catch((e) => swallow(`${logTag}:relate`, e));
|
|
29
|
+
}
|
|
30
|
+
} catch (e) {
|
|
31
|
+
swallow(`${logTag}:embed`, e);
|
|
32
|
+
}
|
|
33
|
+
}
|
|
34
|
+
async function linkConceptHierarchy(conceptId, conceptName, store, embeddings, logTag) {
|
|
35
|
+
try {
|
|
36
|
+
const existing = await store.queryFirst(
|
|
37
|
+
`SELECT id, content FROM concept WHERE id != $cid LIMIT 50`,
|
|
38
|
+
{ cid: conceptId }
|
|
39
|
+
);
|
|
40
|
+
if (existing.length === 0) return;
|
|
41
|
+
const lowerName = conceptName.toLowerCase();
|
|
42
|
+
let relatedCount = 0;
|
|
43
|
+
for (const other of existing) {
|
|
44
|
+
const otherLower = (other.content ?? "").toLowerCase();
|
|
45
|
+
if (!otherLower || otherLower === lowerName) continue;
|
|
46
|
+
const otherId = String(other.id);
|
|
47
|
+
if (lowerName.includes(otherLower) && lowerName !== otherLower) {
|
|
48
|
+
await store.relate(conceptId, "narrower", otherId).catch((e) => swallow(`${logTag}:narrower`, e));
|
|
49
|
+
await store.relate(otherId, "broader", conceptId).catch((e) => swallow(`${logTag}:broader`, e));
|
|
50
|
+
} else if (otherLower.includes(lowerName) && otherLower !== lowerName) {
|
|
51
|
+
await store.relate(conceptId, "broader", otherId).catch((e) => swallow(`${logTag}:broader`, e));
|
|
52
|
+
await store.relate(otherId, "narrower", conceptId).catch((e) => swallow(`${logTag}:narrower`, e));
|
|
53
|
+
}
|
|
54
|
+
}
|
|
55
|
+
if (embeddings.isAvailable()) {
|
|
56
|
+
try {
|
|
57
|
+
const conceptEmb = await embeddings.embed(conceptName);
|
|
58
|
+
if (conceptEmb?.length) {
|
|
59
|
+
const similar = await store.queryFirst(
|
|
60
|
+
`SELECT id, vector::similarity::cosine(embedding, $vec) AS score
|
|
61
|
+
FROM concept
|
|
62
|
+
WHERE id != $cid
|
|
63
|
+
AND embedding != NONE AND array::len(embedding) > 0
|
|
64
|
+
AND embedding_provider = $provider
|
|
65
|
+
ORDER BY score DESC
|
|
66
|
+
LIMIT 3`,
|
|
67
|
+
{ vec: conceptEmb, cid: conceptId, provider: embeddings.providerId }
|
|
68
|
+
);
|
|
69
|
+
for (const s of similar) {
|
|
70
|
+
if (s.score < 0.75) break;
|
|
71
|
+
const simId = String(s.id);
|
|
72
|
+
await store.relate(conceptId, "related_to", simId).catch((e) => swallow(`${logTag}:related_to`, e));
|
|
73
|
+
await store.relate(simId, "related_to", conceptId).catch((e) => swallow(`${logTag}:related_to`, e));
|
|
74
|
+
}
|
|
75
|
+
}
|
|
76
|
+
} catch (e) {
|
|
77
|
+
swallow(`${logTag}:related_to_search`, e);
|
|
78
|
+
}
|
|
79
|
+
}
|
|
80
|
+
} catch (e) {
|
|
81
|
+
swallow(`${logTag}:hierarchy`, e);
|
|
82
|
+
}
|
|
83
|
+
}
|
|
84
|
+
|
|
85
|
+
// src/memory-daemon.ts
|
|
86
|
+
function buildSystemPrompt(hasThinking, hasRetrievedMemories, prior) {
|
|
87
|
+
const dedup = prior.conceptNames.length > 0 || prior.artifactPaths.length > 0 || prior.skillNames.length > 0 ? `
|
|
88
|
+
|
|
89
|
+
ALREADY EXTRACTED (do NOT repeat these):
|
|
90
|
+
- Concepts: ${prior.conceptNames.length > 0 ? prior.conceptNames.join(", ") : "none yet"}
|
|
91
|
+
- Artifacts: ${prior.artifactPaths.length > 0 ? prior.artifactPaths.join(", ") : "none yet"}
|
|
92
|
+
- Skills: ${prior.skillNames.length > 0 ? prior.skillNames.join(", ") : "none yet"}` : "";
|
|
93
|
+
return `You are a memory extraction daemon. Analyze the conversation transcript and extract structured knowledge.
|
|
94
|
+
Return ONLY valid JSON with these fields (all arrays, use [] if none found for a field):
|
|
95
|
+
${dedup}
|
|
96
|
+
|
|
97
|
+
{
|
|
98
|
+
"causal": [
|
|
99
|
+
// Cause->effect chains from debugging, refactoring, fixing, or building.
|
|
100
|
+
// Only when there's a clear trigger and outcome. Max 5.
|
|
101
|
+
{"triggerText": "what caused it (max 200 chars)", "outcomeText": "what happened as a result", "chainType": "debug|refactor|feature|fix", "success": true/false, "confidence": 0.0-1.0, "description": "1-sentence summary"}
|
|
102
|
+
],
|
|
103
|
+
"monologue": [
|
|
104
|
+
// Internal reasoning moments worth preserving: doubts, tradeoffs, insights, realizations.
|
|
105
|
+
// Infer from the conversation flow \u2014 approach changes, surprising discoveries, tradeoff decisions.
|
|
106
|
+
// Skip routine reasoning. Only novel/surprising thoughts. Max 5.
|
|
107
|
+
{"category": "doubt|tradeoff|alternative|insight|realization", "content": "1-2 sentence description"}
|
|
108
|
+
],
|
|
109
|
+
${hasRetrievedMemories ? ` "resolved": [
|
|
110
|
+
// IDs from [RETRIEVED MEMORIES] that have been FULLY addressed/fixed/completed in this conversation.
|
|
111
|
+
// Must be exact IDs like "memory:abc123". Empty [] if none resolved.
|
|
112
|
+
"memory:example_id"
|
|
113
|
+
],` : ' "resolved": [],'}
|
|
114
|
+
"concepts": [
|
|
115
|
+
// Technical facts, knowledge, decisions, or findings worth remembering.
|
|
116
|
+
// NOT conversation flow \u2014 only things that would be useful to recall later.
|
|
117
|
+
// Categories: technical, architectural, behavioral, environmental, procedural
|
|
118
|
+
// Max 8 per batch.
|
|
119
|
+
{"name": "short identifier (3-6 words)", "content": "the actual knowledge (1-3 sentences)", "category": "technical|architectural|behavioral|environmental|procedural", "importance": 1-10}
|
|
120
|
+
],
|
|
121
|
+
"corrections": [
|
|
122
|
+
// Moments where the user corrects the assistant's understanding, approach, or output.
|
|
123
|
+
// These are high-value signals about what NOT to do.
|
|
124
|
+
{"original": "what the assistant said/did wrong", "correction": "what the user said the right answer/approach is", "context": "brief context of when this happened"}
|
|
125
|
+
],
|
|
126
|
+
"preferences": [
|
|
127
|
+
// User behavioral signals: communication style, workflow preferences, tool preferences.
|
|
128
|
+
// Only extract NOVEL preferences not already obvious. Max 5.
|
|
129
|
+
{"preference": "what the user prefers (1 sentence)", "evidence": "what they said/did that shows this"}
|
|
130
|
+
],
|
|
131
|
+
"artifacts": [
|
|
132
|
+
// Files that were created, modified, read, or discussed.
|
|
133
|
+
// Extract from tool calls (bash, read, write, edit, grep commands).
|
|
134
|
+
{"path": "/path/to/file", "action": "created|modified|read|discussed", "summary": "what was done to it (1 sentence)"}
|
|
135
|
+
],
|
|
136
|
+
"decisions": [
|
|
137
|
+
// Explicit choices made during the conversation with reasoning.
|
|
138
|
+
// Architecture decisions, tool choices, approach selections. Max 3.
|
|
139
|
+
{"decision": "what was decided", "rationale": "why", "alternatives_considered": "what else was considered (or 'none discussed')"}
|
|
140
|
+
],
|
|
141
|
+
"skills": [
|
|
142
|
+
// Reusable multi-step procedures that WORKED. Only extract when a procedure
|
|
143
|
+
// was successfully completed and would be useful to repeat. Max 2.
|
|
144
|
+
{"name": "short name", "steps": ["step 1", "step 2", "..."], "trigger_context": "when to use this skill"}
|
|
145
|
+
]
|
|
146
|
+
}
|
|
147
|
+
|
|
148
|
+
RULES:
|
|
149
|
+
- Return ONLY the JSON object. No markdown, no explanation.
|
|
150
|
+
- Every field must be present (use [] for empty).
|
|
151
|
+
- Quality over quantity \u2014 skip weak/uncertain extractions.
|
|
152
|
+
- Concepts should be self-contained \u2014 readable without the conversation.
|
|
153
|
+
- Corrections are the MOST important signal. Never miss one.
|
|
154
|
+
- For artifacts, extract file paths from bash/tool commands in the transcript.`;
|
|
155
|
+
}
|
|
156
|
+
function buildTranscript(turns) {
|
|
157
|
+
return turns.map((t) => {
|
|
158
|
+
const prefix = t.tool_name ? `[tool:${t.tool_name}]` : `[${t.role}]`;
|
|
159
|
+
let line = `${prefix} ${(t.text ?? "").slice(0, 1500)}`;
|
|
160
|
+
if (t.tool_result) line += `
|
|
161
|
+
-> ${t.tool_result.slice(0, 500)}`;
|
|
162
|
+
if (t.file_paths && t.file_paths.length > 0) line += `
|
|
163
|
+
files: ${t.file_paths.join(", ")}`;
|
|
164
|
+
return line;
|
|
165
|
+
}).join("\n");
|
|
166
|
+
}
|
|
167
|
+
async function writeExtractionResults(result, sessionId, store, embeddings, priorState, taskId, projectId, turns) {
|
|
168
|
+
const counts = {
|
|
169
|
+
causal: 0,
|
|
170
|
+
monologue: 0,
|
|
171
|
+
resolved: 0,
|
|
172
|
+
concept: 0,
|
|
173
|
+
correction: 0,
|
|
174
|
+
preference: 0,
|
|
175
|
+
artifact: 0,
|
|
176
|
+
decision: 0,
|
|
177
|
+
skill: 0
|
|
178
|
+
};
|
|
179
|
+
const extractedConceptIds = [];
|
|
180
|
+
if (Array.isArray(result.concepts) && result.concepts.length > 0) {
|
|
181
|
+
for (const c of result.concepts.slice(0, 11)) {
|
|
182
|
+
if (!c.name || !c.content) continue;
|
|
183
|
+
if (priorState.conceptNames.includes(c.name)) continue;
|
|
184
|
+
counts.concept++;
|
|
185
|
+
priorState.conceptNames.push(c.name);
|
|
186
|
+
try {
|
|
187
|
+
let emb = null;
|
|
188
|
+
if (embeddings.isAvailable()) {
|
|
189
|
+
try {
|
|
190
|
+
emb = await embeddings.embed(c.content);
|
|
191
|
+
} catch (e) {
|
|
192
|
+
swallow("daemon:embedConcept", e);
|
|
193
|
+
}
|
|
194
|
+
}
|
|
195
|
+
const conceptId = await store.upsertConcept(c.content, emb, `daemon:${sessionId}`);
|
|
196
|
+
if (conceptId) {
|
|
197
|
+
extractedConceptIds.push(conceptId);
|
|
198
|
+
await linkConceptHierarchy(conceptId, c.name, store, embeddings, "daemon:concept");
|
|
199
|
+
if (taskId) {
|
|
200
|
+
await store.relate(conceptId, "derived_from", taskId).catch((e) => swallow("daemon:concept:derived_from", e));
|
|
201
|
+
}
|
|
202
|
+
if (projectId) {
|
|
203
|
+
await store.relate(conceptId, "relevant_to", projectId).catch((e) => swallow("daemon:concept:relevant_to", e));
|
|
204
|
+
}
|
|
205
|
+
}
|
|
206
|
+
} catch (e) {
|
|
207
|
+
swallow.warn("daemon:upsertConcept", e);
|
|
208
|
+
}
|
|
209
|
+
}
|
|
210
|
+
}
|
|
211
|
+
if (turns && turns.length > 0) {
|
|
212
|
+
const turnIds = turns.filter((t) => t.turnId && t.text).slice(0, 15);
|
|
213
|
+
for (const t of turnIds) {
|
|
214
|
+
await linkToRelevantConcepts(
|
|
215
|
+
t.turnId,
|
|
216
|
+
"mentions",
|
|
217
|
+
t.text,
|
|
218
|
+
store,
|
|
219
|
+
embeddings,
|
|
220
|
+
"daemon:mentions",
|
|
221
|
+
5,
|
|
222
|
+
0.65
|
|
223
|
+
);
|
|
224
|
+
}
|
|
225
|
+
}
|
|
226
|
+
const writeOps = [];
|
|
227
|
+
if (Array.isArray(result.causal) && result.causal.length > 0) {
|
|
228
|
+
const { linkCausalEdges } = await import("./causal-CZ62YZ2J.js");
|
|
229
|
+
const validated = result.causal.filter((c) => c.triggerText && c.outcomeText && c.chainType && typeof c.success === "boolean").slice(0, 5).map((c) => ({
|
|
230
|
+
triggerText: String(c.triggerText).slice(0, 200),
|
|
231
|
+
outcomeText: String(c.outcomeText).slice(0, 200),
|
|
232
|
+
chainType: ["debug", "refactor", "feature", "fix"].includes(c.chainType) ? c.chainType : "fix",
|
|
233
|
+
success: Boolean(c.success),
|
|
234
|
+
confidence: Math.max(0, Math.min(1, Number(c.confidence) || 0.5)),
|
|
235
|
+
description: String(c.description ?? "").slice(0, 150)
|
|
236
|
+
}));
|
|
237
|
+
if (validated.length > 0) {
|
|
238
|
+
writeOps.push(linkCausalEdges(validated, sessionId, store, embeddings));
|
|
239
|
+
counts.causal += validated.length;
|
|
240
|
+
}
|
|
241
|
+
}
|
|
242
|
+
if (Array.isArray(result.monologue) && result.monologue.length > 0) {
|
|
243
|
+
for (const entry of result.monologue.slice(0, 5)) {
|
|
244
|
+
if (!entry.category || !entry.content) continue;
|
|
245
|
+
counts.monologue++;
|
|
246
|
+
writeOps.push((async () => {
|
|
247
|
+
let emb = null;
|
|
248
|
+
if (embeddings.isAvailable()) {
|
|
249
|
+
try {
|
|
250
|
+
emb = await embeddings.embed(entry.content);
|
|
251
|
+
} catch (e) {
|
|
252
|
+
swallow("daemon:embedMonologue", e);
|
|
253
|
+
}
|
|
254
|
+
}
|
|
255
|
+
await store.createMonologue(sessionId, entry.category, entry.content, emb);
|
|
256
|
+
})());
|
|
257
|
+
}
|
|
258
|
+
}
|
|
259
|
+
if (Array.isArray(result.resolved) && result.resolved.length > 0) {
|
|
260
|
+
const RECORD_ID_RE = /^memory:[a-zA-Z0-9_]+$/;
|
|
261
|
+
writeOps.push((async () => {
|
|
262
|
+
for (const memId of result.resolved.slice(0, 20)) {
|
|
263
|
+
if (typeof memId !== "string" || !RECORD_ID_RE.test(memId)) continue;
|
|
264
|
+
assertRecordId(memId);
|
|
265
|
+
counts.resolved++;
|
|
266
|
+
await store.queryExec(
|
|
267
|
+
`UPDATE ${memId} SET status = 'resolved', resolved_at = time::now(), resolved_by = $sid`,
|
|
268
|
+
{ sid: sessionId }
|
|
269
|
+
).catch((e) => swallow.warn("daemon:resolveMemory", e));
|
|
270
|
+
}
|
|
271
|
+
})());
|
|
272
|
+
}
|
|
273
|
+
if (Array.isArray(result.corrections) && result.corrections.length > 0) {
|
|
274
|
+
for (const c of result.corrections.slice(0, 5)) {
|
|
275
|
+
if (!c.original || !c.correction) continue;
|
|
276
|
+
counts.correction++;
|
|
277
|
+
const text = `[CORRECTION] Original: "${String(c.original).slice(0, 200)}" -> Corrected: "${String(c.correction).slice(0, 200)}" (Context: ${String(c.context ?? "").slice(0, 100)})`;
|
|
278
|
+
writeOps.push((async () => {
|
|
279
|
+
let emb = null;
|
|
280
|
+
if (embeddings.isAvailable()) {
|
|
281
|
+
try {
|
|
282
|
+
emb = await embeddings.embed(text);
|
|
283
|
+
} catch (e) {
|
|
284
|
+
swallow("daemon:embedCorrection", e);
|
|
285
|
+
}
|
|
286
|
+
}
|
|
287
|
+
const memId = await store.createMemory(text, emb, 9, "correction", sessionId);
|
|
288
|
+
if (memId) {
|
|
289
|
+
await linkToRelevantConcepts(memId, "about_concept", text, store, embeddings, "daemon:correction:about_concept", 5, 0.65, emb);
|
|
290
|
+
}
|
|
291
|
+
})());
|
|
292
|
+
}
|
|
293
|
+
}
|
|
294
|
+
if (Array.isArray(result.preferences) && result.preferences.length > 0) {
|
|
295
|
+
for (const p of result.preferences.slice(0, 5)) {
|
|
296
|
+
if (!p.preference) continue;
|
|
297
|
+
counts.preference++;
|
|
298
|
+
const text = `[USER PREFERENCE] ${String(p.preference).slice(0, 250)} (Evidence: ${String(p.evidence ?? "").slice(0, 150)})`;
|
|
299
|
+
writeOps.push((async () => {
|
|
300
|
+
let emb = null;
|
|
301
|
+
if (embeddings.isAvailable()) {
|
|
302
|
+
try {
|
|
303
|
+
emb = await embeddings.embed(text);
|
|
304
|
+
} catch (e) {
|
|
305
|
+
swallow("daemon:embedPreference", e);
|
|
306
|
+
}
|
|
307
|
+
}
|
|
308
|
+
const memId = await store.createMemory(text, emb, 7, "preference", sessionId);
|
|
309
|
+
if (memId) {
|
|
310
|
+
await linkToRelevantConcepts(memId, "about_concept", text, store, embeddings, "daemon:preference:about_concept", 5, 0.65, emb);
|
|
311
|
+
}
|
|
312
|
+
})());
|
|
313
|
+
}
|
|
314
|
+
}
|
|
315
|
+
if (Array.isArray(result.artifacts) && result.artifacts.length > 0) {
|
|
316
|
+
for (const a of result.artifacts.slice(0, 10)) {
|
|
317
|
+
if (!a.path) continue;
|
|
318
|
+
if (priorState.artifactPaths.includes(a.path)) continue;
|
|
319
|
+
counts.artifact++;
|
|
320
|
+
priorState.artifactPaths.push(a.path);
|
|
321
|
+
const desc = `${String(a.action ?? "modified")}: ${String(a.summary ?? "").slice(0, 200)}`;
|
|
322
|
+
writeOps.push((async () => {
|
|
323
|
+
let emb = null;
|
|
324
|
+
if (embeddings.isAvailable()) {
|
|
325
|
+
try {
|
|
326
|
+
emb = await embeddings.embed(`${a.path} ${desc}`);
|
|
327
|
+
} catch (e) {
|
|
328
|
+
swallow("daemon:embedArtifact", e);
|
|
329
|
+
}
|
|
330
|
+
}
|
|
331
|
+
const artId = await store.createArtifact(a.path, a.action ?? "modified", desc, emb);
|
|
332
|
+
if (artId) {
|
|
333
|
+
await linkToRelevantConcepts(artId, "artifact_mentions", `${a.path} ${desc}`, store, embeddings, "daemon:artifact:artifact_mentions", 5, 0.65, emb);
|
|
334
|
+
if (projectId) {
|
|
335
|
+
await store.relate(artId, "used_in", projectId).catch((e) => swallow("daemon:artifact:used_in", e));
|
|
336
|
+
}
|
|
337
|
+
}
|
|
338
|
+
})());
|
|
339
|
+
}
|
|
340
|
+
}
|
|
341
|
+
if (Array.isArray(result.decisions) && result.decisions.length > 0) {
|
|
342
|
+
for (const d of result.decisions.slice(0, 6)) {
|
|
343
|
+
if (!d.decision) continue;
|
|
344
|
+
counts.decision++;
|
|
345
|
+
const text = `[DECISION] ${String(d.decision).slice(0, 200)} \u2014 Rationale: ${String(d.rationale ?? "").slice(0, 200)} (Alternatives: ${String(d.alternatives_considered ?? "none").slice(0, 100)})`;
|
|
346
|
+
writeOps.push((async () => {
|
|
347
|
+
let emb = null;
|
|
348
|
+
if (embeddings.isAvailable()) {
|
|
349
|
+
try {
|
|
350
|
+
emb = await embeddings.embed(text);
|
|
351
|
+
} catch (e) {
|
|
352
|
+
swallow("daemon:embedDecision", e);
|
|
353
|
+
}
|
|
354
|
+
}
|
|
355
|
+
const memId = await store.createMemory(text, emb, 7, "decision", sessionId);
|
|
356
|
+
if (memId) {
|
|
357
|
+
await linkToRelevantConcepts(memId, "about_concept", text, store, embeddings, "daemon:decision:about_concept", 5, 0.65, emb);
|
|
358
|
+
}
|
|
359
|
+
})());
|
|
360
|
+
}
|
|
361
|
+
}
|
|
362
|
+
if (Array.isArray(result.skills) && result.skills.length > 0) {
|
|
363
|
+
for (const s of result.skills.slice(0, 3)) {
|
|
364
|
+
if (!s.name || !Array.isArray(s.steps) || s.steps.length === 0) continue;
|
|
365
|
+
if (priorState.skillNames.includes(s.name)) continue;
|
|
366
|
+
counts.skill++;
|
|
367
|
+
priorState.skillNames.push(s.name);
|
|
368
|
+
const content = `${s.name}
|
|
369
|
+
Trigger: ${String(s.trigger_context ?? "").slice(0, 150)}
|
|
370
|
+
Steps:
|
|
371
|
+
${s.steps.map((st, i) => `${i + 1}. ${String(st).slice(0, 200)}`).join("\n")}`;
|
|
372
|
+
writeOps.push((async () => {
|
|
373
|
+
let emb = null;
|
|
374
|
+
if (embeddings.isAvailable()) {
|
|
375
|
+
try {
|
|
376
|
+
emb = await embeddings.embed(content);
|
|
377
|
+
} catch (e) {
|
|
378
|
+
swallow("daemon:embedSkill", e);
|
|
379
|
+
}
|
|
380
|
+
}
|
|
381
|
+
try {
|
|
382
|
+
const rows = await store.queryFirst(
|
|
383
|
+
`CREATE skill CONTENT $record RETURN id`,
|
|
384
|
+
{
|
|
385
|
+
record: {
|
|
386
|
+
name: String(s.name).slice(0, 100),
|
|
387
|
+
description: content,
|
|
388
|
+
content,
|
|
389
|
+
steps: s.steps.map((st) => String(st).slice(0, 200)),
|
|
390
|
+
trigger_context: String(s.trigger_context ?? "").slice(0, 200),
|
|
391
|
+
tags: ["auto-extracted"],
|
|
392
|
+
session_id: sessionId,
|
|
393
|
+
...emb ? { embedding: emb, embedding_provider: embeddings.providerId } : {}
|
|
394
|
+
}
|
|
395
|
+
}
|
|
396
|
+
);
|
|
397
|
+
const skillId = rows[0]?.id ? String(rows[0].id) : null;
|
|
398
|
+
if (skillId) {
|
|
399
|
+
if (taskId) {
|
|
400
|
+
await store.relate(skillId, "skill_from_task", taskId).catch((e) => swallow.warn("daemon:skill:skill_from_task", e));
|
|
401
|
+
}
|
|
402
|
+
await linkToRelevantConcepts(skillId, "skill_uses_concept", content, store, embeddings, "daemon:skill:concepts", 5, 0.65, emb);
|
|
403
|
+
}
|
|
404
|
+
} catch (e) {
|
|
405
|
+
swallow.warn("daemon:createSkill", e);
|
|
406
|
+
}
|
|
407
|
+
})());
|
|
408
|
+
}
|
|
409
|
+
}
|
|
410
|
+
await Promise.allSettled(writeOps);
|
|
411
|
+
return counts;
|
|
412
|
+
}
|
|
413
|
+
|
|
414
|
+
export {
|
|
415
|
+
linkToRelevantConcepts,
|
|
416
|
+
buildSystemPrompt,
|
|
417
|
+
buildTranscript,
|
|
418
|
+
writeExtractionResults
|
|
419
|
+
};
|