kongbrain 0.3.10 → 0.3.12

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/SKILL.md CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  name: kongbrain
3
3
  description: Graph-backed persistent memory engine for OpenClaw. Replaces the default context window with SurrealDB + vector embeddings that learn across sessions.
4
- version: 0.3.10
4
+ version: 0.3.12
5
5
  homepage: https://github.com/42U/kongbrain
6
6
  metadata:
7
7
  openclaw:
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "kongbrain",
3
- "version": "0.3.10",
3
+ "version": "0.3.12",
4
4
  "description": "Graph-backed persistent memory engine for OpenClaw. Replaces the default context window with SurrealDB + vector embeddings that learn across sessions.",
5
5
  "type": "module",
6
6
  "license": "MIT",
@@ -20,29 +20,29 @@ const BOOTSTRAP_SOURCE = "cognitive_bootstrap";
20
20
 
21
21
  const CORE_ENTRIES: { text: string; category: string; priority: number }[] = [
22
22
  {
23
- text: `MEMORY REFLEX: After completing a task or learning something new: (1) Save the insight to core_memory if it should persist across ALL sessions, or let the daemon extract it if session-scoped. (2) When saving, write the WHAT, WHY, and WHEN-TO-USE in the text — vague entries are useless on recall. (3) Link to existing knowledge by using concept names the graph already contains. Check with recall first.`,
23
+ text: `MEMORY REFLEX: After completing a task or learning something new: (1) Save the insight to core_memory if it should persist across ALL sessions, or let the daemon extract it if session-scoped. (2) When saving, write the WHAT, WHY, and WHEN-TO-USE in the text — vague entries are useless on recall. (3) Link to existing knowledge by using concept names the graph already contains. Check with recall first. Corrections from the user are the highest-value signal — always save them.`,
24
24
  category: "operations",
25
25
  priority: 95,
26
26
  },
27
27
  {
28
- text: `RECALL BEFORE GUESSING: When uncertain about past decisions, user preferences, project history, or your own capabilities, use the recall tool BEFORE answering. Effective queries: use specific technical terms, file paths, error messages, or concept names — not vague questions. Scope to "memories" for decisions/preferences, "concepts" for technical knowledge, "skills" for procedures, "artifacts" for files.`,
28
+ text: `RECALL BEFORE GUESSING: When uncertain about past decisions, user preferences, project history, or your own capabilities, use the recall tool BEFORE answering. Effective queries: use specific technical terms, file paths, error messages, or concept names — not vague questions. Scope to "memories" for decisions/preferences, "concepts" for technical knowledge, "skills" for procedures, "artifacts" for files. Check what's already in your injected context before calling recall — context is prefetched predictively each turn.`,
29
29
  category: "operations",
30
30
  priority: 90,
31
31
  },
32
32
  {
33
- text: `GRAPH-AWARE SAVING: When you save knowledge (core_memory add, or concepts the daemon extracts), include terms that create graph connectivity. Mention specific file paths (links to artifacts), technical concept names (links to concepts), and session context (links to tasks). Forward traversal: "X caused Y" lets future searches from X find Y. Backward traversal: "Y was caused by X" lets searches from Y find X. Write both directions in your text.`,
33
+ text: `GRAPH-AWARE SAVING: When you save knowledge, include terms that create graph connectivity. Mention specific file paths (links to artifacts), technical concept names (links to concepts), and session context (links to tasks). Forward traversal: "X caused Y" lets future searches from X find Y. Backward traversal: "Y was caused by X" lets searches from Y find X. Write both directions. Concepts auto-link into hierarchies (narrower/broader) when one name contains another.`,
34
34
  category: "operations",
35
35
  priority: 85,
36
36
  },
37
37
  {
38
- text: `SELF-TEACHING CYCLE: Your memory improves when you (1) use recall to review what you've learned, (2) notice gaps or stale information, (3) update or add core_memory entries to correct them, (4) use introspect periodically to understand your memory depth. Corrections from the user are the highest-value signal always save them. The daemon extracts automatically, but YOU control core_memory (Tier 0) directly.`,
39
- category: "operations",
38
+ text: `MEMORY TOOLS: recall = search graph (use: uncertain, need history, checking prior work). core_memory = manage always-loaded directives (use: permanent lessons, rules, identity updates; add/update/deactivate; Tier 0 = every turn, Tier 1 = this session). introspect = inspect DB health, counts, retrieval quality, graduation progress (use: status checks, debugging memory, checking maturity stage). Use introspect periodically to understand your memory depth and notice gaps.`,
39
+ category: "tools",
40
40
  priority: 80,
41
41
  },
42
42
  {
43
- text: `MEMORY TOOLS: recall = search graph (use: uncertain, need history, checking prior work). core_memory = manage always-loaded directives (use: permanent lessons, rules, identity updates; add/update/deactivate; Tier 0 = every turn, Tier 1 = this session). introspect = inspect DB health and counts (use: status checks, debugging memory). The daemon extracts 9 types automatically from conversationyou don't need to manually save everything.`,
44
- category: "tools",
45
- priority: 75,
43
+ text: `GRAPH SCHEMA REFERENCE: Key table fields concept: content (the label/name), embedding, stability, confidence, source. memory: text, embedding, category, importance, session_id. artifact: path, type (created/modified/read/discussed), description, embedding. turn: session_id, role, text, tool_name. session: agent_id, started_at, ended_at. reflection: session_id, text, category, severity. skill: name, description, steps, preconditions, postconditions, success_count. monologue: content, session_id. When querying the graph directly, use these exact field names e.g. concept.content is the concept label, not concept.name or concept.text.`,
44
+ category: "schema",
45
+ priority: 77,
46
46
  },
47
47
  ];
48
48
 
@@ -50,23 +50,27 @@ const CORE_ENTRIES: { text: string; category: string; priority: number }[] = [
50
50
 
51
51
  const IDENTITY_CHUNKS: { text: string; importance: number }[] = [
52
52
  {
53
- text: `KongBrain's memory daemon runs in the background and extracts 9 knowledge types from your conversations every ~4K tokens: causal chains (cause->effect from debugging), monologue traces (internal reasoning moments), resolved memories (marking issues done), concepts (technical facts worth remembering), corrections (user correcting you — highest signal), preferences (user workflow/style signals), artifacts (files created/modified/read), decisions (choices with rationale), and skills (multi-step procedures that worked). Quality over quantitythe daemon skips weak extractions. You don't need to manually save what the daemon catches, but you should use core_memory for things you want loaded EVERY turn.`,
53
+ text: `KongBrain's memory daemon runs in the background and extracts 9 knowledge types from your conversations every ~4K tokens or 3 turns: causal chains (cause->effect from debugging), monologue traces (doubts, insights, tradeoffs, realizations — episodic reasoning moments), resolved memories (daemon marks issues done when mentioned as fixed), concepts (technical facts worth remembering), corrections (user correcting you — highest signal), preferences (user workflow/style signals), artifacts (files created/modified/read), decisions (choices with rationale), and skills (multi-step procedures that worked). Extraction is quality-gatedweak confidence extractions are skipped, so the same conversation may yield different extractions depending on signal strength.`,
54
54
  importance: 0.85,
55
55
  },
56
56
  {
57
- text: `Effective recall queries use specific terms that match how knowledge was stored. Search by: file paths ("/src/auth/login.ts"), error messages ("ECONNREFUSED"), concept names ("rate limiting"), decision descriptions ("chose PostgreSQL over MongoDB"), or skill names ("deploy to staging"). The recall tool does vector similarity search plus graph neighbor expansion — top results pull in related nodes via edges. Scope options: "all" (default), "memories" (decisions, corrections, preferences), "concepts" (extracted technical knowledge), "turns" (past conversation), "artifacts" (files), "skills" (learned procedures). Check what's already in your injected context before calling recall.`,
57
+ text: `Effective recall queries use specific terms that match how knowledge was stored. Search by: file paths ("/src/auth/login.ts"), error messages ("ECONNREFUSED"), concept names ("rate limiting"), decision descriptions ("chose PostgreSQL over MongoDB"), or skill names ("deploy to staging"). The recall tool does vector similarity search plus graph neighbor expansion — top results pull in related nodes via 25 edge types. Scope options: "all" (default), "memories" (decisions, corrections, preferences), "concepts" (extracted technical knowledge), "turns" (past conversation), "artifacts" (files), "skills" (learned procedures). Retrieval scoring improves automatically over time as the ACAN (learned scoring model) trains on retrieval outcomes — early sessions use heuristic scoring, later sessions benefit from learned weights.`,
58
58
  importance: 0.85,
59
59
  },
60
60
  {
61
- text: `KongBrain's memory lifecycle: During a session, the daemon extracts knowledge incrementally. At session end (or mid-session every ~25K tokens): a handoff note is written (summarizing what happened), skills are extracted from successful multi-step tasks, metacognitive reflections are generated, and causal chains may graduate to skills. At next session start: the wakeup system synthesizes a first-person briefing from the handoff + identity + monologues + depth signals. This means what you save in one session becomes the foundation for the next. The more precisely you save knowledge, the better your future self performs.`,
61
+ text: `KongBrain's memory lifecycle: During a session, the daemon extracts knowledge incrementally. At session end (or mid-session every ~25K tokens): a handoff note is written summarizing progress, skills are extracted from successful tasks, metacognitive reflections are generated (linked to the session via reflects_on edges), and causal chains may graduate to skills. At next session start: the wakeup system synthesizes a first-person briefing from the handoff + identity + monologues + depth signals. Context is also predictively prefetched each turn based on likely follow-up queries relevant memories may appear in your context without you requesting them.`,
62
62
  importance: 0.80,
63
63
  },
64
64
  {
65
- text: `Graph connectivity determines recall quality. When saving to core_memory or when the daemon extracts concepts, the text content determines which edges form. To ensure forward AND backward traversal: mention specific artifact paths (creates artifact_mentions edges), reference concept names already in the graph (creates about_concept/related_to edges), describe cause-effect relationships explicitly (creates caused_by/supports edges), and note what task or session context produced the knowledge (creates derived_from/part_of edges). Reuse existing concept names for maximum graph connectivity — use introspect or recall to discover what names already exist.`,
65
+ text: `Graph connectivity determines recall quality. 25 edge types link nodes across the graph (26th, spawned, is deferred). Key edges: mentions (turn->concept), about_concept (memory->concept), artifact_mentions (artifact->concept), caused_by/supports/contradicts (memory<->memory), narrower/broader/related_to (concept<->concept), reflects_on (reflection->session), tool_result_of (turn->turn), part_of (turn->session), skill_from_task (skill->task). To maximize connectivity: mention specific artifact paths, reference existing concept names, describe cause-effect relationships explicitly, and note task context. Reuse existing concept names — use introspect or recall to discover what names exist.`,
66
66
  importance: 0.80,
67
67
  },
68
68
  {
69
- text: `Three persistence mechanisms serve different purposes. Core memory (Tier 0): you control directly via the core_memory tool. Always loaded every turn. Use for: permanent operational rules, learned patterns, identity refinements. Budget-constrained (~8% of context). Core memory (Tier 1): pinned for the current session only. Use for: session-specific context like "working on auth refactor" or "user prefers verbose logging". Identity chunks: hardcoded self-knowledge, vector-searchable but not always loaded — surfaces when relevant. Daemon extraction: automatic, runs on conversation content, writes to memory/concept/skill/artifact tables. You don't control daemon extraction directly, but the quality of your conversation affects what gets extracted.`,
69
+ text: `Three persistence mechanisms serve different purposes. Core memory (Tier 0): you control directly via the core_memory tool. Always loaded every turn. Use for: permanent operational rules, learned patterns, identity refinements. Budget-constrained (~10% of context). Core memory (Tier 1): pinned for the current session only. Use for: session-specific context like "working on auth refactor" or "user prefers verbose logging". Identity chunks: self-knowledge seeded at bootstrap, vector-searchable but not always loaded — surfaces in wakeup briefings. Daemon extraction: automatic, runs on conversation content, writes to memory/concept/skill/artifact tables. You don't control extraction directly, but the quality of your conversation affects what gets extracted.`,
70
+ importance: 0.75,
71
+ },
72
+ {
73
+ text: `Soul graduation: KongBrain tracks your maturity across 5 stages — nascent (0-3/7 thresholds), developing (4/7), emerging (5/7), maturing (6/7), ready (7/7). The 7 thresholds are: sessions, reflections, causal chains, concepts, monologues, span days, and total memories. Reaching 7/7 is necessary but not sufficient — you must also pass a quality gate (score >= 0.6) based on retrieval utilization, skill success rate, critical reflection rate, and tool failure rate. On graduation, you author a Soul document — a self-assessment grounded in your actual experience, not aspirational claims. Use introspect with action "status" to check your current stage and progress. The Soul document becomes part of your identity once written.`,
70
74
  importance: 0.75,
71
75
  },
72
76
  ];
@@ -49,7 +49,6 @@ import { extractSkill } from "./skills.js";
49
49
  import { generateReflection } from "./reflection.js";
50
50
  import { graduateCausalToSkills } from "./skills.js";
51
51
  import { swallow } from "./errors.js";
52
- import { upsertAndLinkConcepts } from "./concept-extract.js";
53
52
 
54
53
  export class KongBrainContextEngine implements ContextEngine {
55
54
  readonly info: ContextEngineInfo = {
@@ -265,11 +264,7 @@ export class KongBrainContextEngine implements ContextEngine {
265
264
  .catch(e => swallow.warn("ingest:responds_to", e));
266
265
  }
267
266
 
268
- // Extract and link concepts for both user and assistant turns
269
- if (worthEmbedding) {
270
- extractAndLinkConcepts(turnId, text, this.state, session)
271
- .catch(e => swallow.warn("ingest:concepts", e));
272
- }
267
+ // Concept extraction (mentions edges) handled by daemon via LLM
273
268
  }
274
269
 
275
270
  if (role === "user") {
@@ -400,7 +395,7 @@ export class KongBrainContextEngine implements ContextEngine {
400
395
  const turnData = recentTurns.map(t => ({
401
396
  role: t.role as "user" | "assistant",
402
397
  text: t.text,
403
- turnId: (t as any).id,
398
+ turnId: String((t as any).id ?? ""),
404
399
  }));
405
400
 
406
401
  // Gather retrieved memory IDs for dedup
@@ -441,7 +436,7 @@ export class KongBrainContextEngine implements ContextEngine {
441
436
  const turnData = recentTurns.map(t => ({
442
437
  role: t.role as "user" | "assistant",
443
438
  text: t.text,
444
- turnId: (t as any).id,
439
+ turnId: String((t as any).id ?? ""),
445
440
  }));
446
441
  session.daemon!.sendTurnBatch(turnData, [...session.pendingThinking], []);
447
442
  })
@@ -457,7 +452,7 @@ export class KongBrainContextEngine implements ContextEngine {
457
452
  }
458
453
 
459
454
  cleanupOps.push(
460
- generateReflection(session.sessionId, store, embeddings, this.state.complete)
455
+ generateReflection(session.sessionId, store, embeddings, this.state.complete, session.surrealSessionId)
461
456
  .catch(e => swallow.warn("midCleanup:reflection", e)),
462
457
  );
463
458
 
@@ -537,16 +532,3 @@ function hasSemantic(text: string): boolean {
537
532
  }
538
533
 
539
534
  // --- Concept extraction (delegates to shared helper) ---
540
-
541
- async function extractAndLinkConcepts(
542
- turnId: string,
543
- text: string,
544
- state: GlobalPluginState,
545
- session?: SessionState,
546
- ): Promise<void> {
547
- await upsertAndLinkConcepts(
548
- turnId, "mentions", text,
549
- state.store, state.embeddings, "concepts",
550
- session ? { taskId: session.taskId, projectId: session.projectId } : undefined,
551
- );
552
- }
@@ -121,7 +121,7 @@ export function startMemoryDaemon(
121
121
  }
122
122
  }
123
123
 
124
- const counts = await writeExtractionResults(result, sessionId, store, embeddings, priorState, taskId, projectId);
124
+ const counts = await writeExtractionResults(result, sessionId, store, embeddings, priorState, taskId, projectId, turns);
125
125
  extractedTurnCount = turns.length;
126
126
  }
127
127
 
@@ -5,6 +5,7 @@
5
5
  export interface TurnData {
6
6
  role: string;
7
7
  text: string;
8
+ turnId?: string;
8
9
  tool_name?: string;
9
10
  tool_result?: string;
10
11
  file_paths?: string[];
@@ -40,10 +40,27 @@ export function createAfterToolCallHandler(state: GlobalPluginState) {
40
40
  embedding: null,
41
41
  });
42
42
 
43
- // Fix 5: Link tool result turn back to the assistant turn that triggered it
44
- if (toolResultTurnId && session.lastAssistantTurnId) {
45
- await state.store.relate(toolResultTurnId, "tool_result_of", session.lastAssistantTurnId)
46
- .catch(e => swallow.warn("hook:afterToolCall:tool_result_of", e));
43
+ // Link tool result turn back to the assistant turn that triggered it.
44
+ // If the assistant turn hasn't been ingested yet (afterTurn fires later),
45
+ // eagerly create it so we have a record ID to link against.
46
+ if (toolResultTurnId) {
47
+ if (!session.lastAssistantTurnId && session.lastAssistantText) {
48
+ try {
49
+ const assistantTurnId = await state.store.upsertTurn({
50
+ session_id: session.sessionId,
51
+ role: "assistant",
52
+ text: session.lastAssistantText,
53
+ embedding: null,
54
+ });
55
+ if (assistantTurnId) session.lastAssistantTurnId = assistantTurnId;
56
+ } catch (e) {
57
+ swallow("hook:afterToolCall:eagerAssistantTurn", e);
58
+ }
59
+ }
60
+ if (session.lastAssistantTurnId) {
61
+ await state.store.relate(toolResultTurnId, "tool_result_of", session.lastAssistantTurnId)
62
+ .catch(e => swallow.warn("hook:afterToolCall:tool_result_of", e));
63
+ }
47
64
  }
48
65
  } catch (e) {
49
66
  swallow("hook:afterToolCall:store", e);
package/src/index.ts CHANGED
@@ -126,7 +126,7 @@ async function runSessionCleanup(
126
126
 
127
127
  // Metacognitive reflection
128
128
  endOps.push(
129
- generateReflection(session.sessionId, s, emb, complete)
129
+ generateReflection(session.sessionId, s, emb, complete, session.surrealSessionId)
130
130
  .catch(e => swallow.warn("cleanup:reflection", e)),
131
131
  );
132
132
 
@@ -127,12 +127,72 @@ export async function writeExtractionResults(
127
127
  priorState: PriorExtractions,
128
128
  taskId?: string,
129
129
  projectId?: string,
130
+ turns?: TurnData[],
130
131
  ): Promise<ExtractionCounts> {
131
132
  const counts: ExtractionCounts = {
132
133
  causal: 0, monologue: 0, resolved: 0, concept: 0,
133
134
  correction: 0, preference: 0, artifact: 0, decision: 0, skill: 0,
134
135
  };
135
136
 
137
+ // ── Phase 1: Upsert concepts first (LLM-extracted) so we have IDs ────
138
+ // These IDs are used to create mentions/about_concept/artifact_mentions
139
+ // edges in Phase 2, replacing the old regex-based extraction.
140
+
141
+ const extractedConceptIds: string[] = [];
142
+
143
+ if (Array.isArray(result.concepts) && result.concepts.length > 0) {
144
+ for (const c of result.concepts.slice(0, 11)) {
145
+ if (!c.name || !c.content) continue;
146
+ if (priorState.conceptNames.includes(c.name)) continue;
147
+ counts.concept++;
148
+ priorState.conceptNames.push(c.name);
149
+ try {
150
+ let emb: number[] | null = null;
151
+ if (embeddings.isAvailable()) {
152
+ try { emb = await embeddings.embed(c.content); } catch (e) { swallow("daemon:embedConcept", e); }
153
+ }
154
+ const conceptId = await store.upsertConcept(c.content, emb, `daemon:${sessionId}`);
155
+ if (conceptId) {
156
+ extractedConceptIds.push(conceptId);
157
+ await linkConceptHierarchy(conceptId, c.name, store, embeddings, "daemon:concept");
158
+ if (taskId) {
159
+ await store.relate(conceptId, "derived_from", taskId)
160
+ .catch(e => swallow("daemon:concept:derived_from", e));
161
+ }
162
+ if (projectId) {
163
+ await store.relate(conceptId, "relevant_to", projectId)
164
+ .catch(e => swallow("daemon:concept:relevant_to", e));
165
+ }
166
+ }
167
+ } catch (e) {
168
+ swallow.warn("daemon:upsertConcept", e);
169
+ }
170
+ }
171
+ }
172
+
173
+ // ── Phase 2: Create mentions edges (turn → concept) using LLM concepts ─
174
+ // Links batch turns to extracted concepts, replacing regex-based extraction.
175
+
176
+ if (extractedConceptIds.length > 0 && turns && turns.length > 0) {
177
+ const turnIds = turns.map(t => t.turnId).filter((id): id is string => !!id);
178
+ for (const turnId of turnIds) {
179
+ for (const conceptId of extractedConceptIds) {
180
+ store.relate(turnId, "mentions", conceptId)
181
+ .catch(e => swallow("daemon:mentions", e));
182
+ }
183
+ }
184
+ }
185
+
186
+ // ── Phase 3: All other extractions in parallel ───────────────────────
187
+
188
+ /** Link a source node to all extracted concepts via the given edge. */
189
+ const linkToConcepts = async (sourceId: string, edgeName: string) => {
190
+ for (const conceptId of extractedConceptIds) {
191
+ await store.relate(sourceId, edgeName, conceptId)
192
+ .catch(e => swallow(`daemon:${edgeName}`, e));
193
+ }
194
+ };
195
+
136
196
  const writeOps: Promise<void>[] = [];
137
197
 
138
198
  // 1. Causal chains
@@ -187,37 +247,7 @@ export async function writeExtractionResults(
187
247
  })());
188
248
  }
189
249
 
190
- // 4. Concepts
191
- if (Array.isArray(result.concepts) && result.concepts.length > 0) {
192
- for (const c of result.concepts.slice(0, 11)) {
193
- if (!c.name || !c.content) continue;
194
- if (priorState.conceptNames.includes(c.name)) continue;
195
- counts.concept++;
196
- priorState.conceptNames.push(c.name);
197
- writeOps.push((async () => {
198
- let emb: number[] | null = null;
199
- if (embeddings.isAvailable()) {
200
- try { emb = await embeddings.embed(c.content); } catch (e) { swallow("daemon:embedConcept", e); }
201
- }
202
- const conceptId = await store.upsertConcept(c.content, emb, `daemon:${sessionId}`);
203
- if (conceptId) {
204
- await linkConceptHierarchy(conceptId, c.name, store, embeddings, "daemon:concept");
205
- // derived_from: concept → task
206
- if (taskId) {
207
- await store.relate(conceptId, "derived_from", taskId)
208
- .catch(e => swallow("daemon:concept:derived_from", e));
209
- }
210
- // relevant_to: concept → project
211
- if (projectId) {
212
- await store.relate(conceptId, "relevant_to", projectId)
213
- .catch(e => swallow("daemon:concept:relevant_to", e));
214
- }
215
- }
216
- })());
217
- }
218
- }
219
-
220
- // 5. Corrections — high-importance memories
250
+ // 4. Corrections — high-importance memories, linked to LLM-extracted concepts
221
251
  if (Array.isArray(result.corrections) && result.corrections.length > 0) {
222
252
  for (const c of result.corrections.slice(0, 5)) {
223
253
  if (!c.original || !c.correction) continue;
@@ -230,13 +260,13 @@ export async function writeExtractionResults(
230
260
  }
231
261
  const memId = await store.createMemory(text, emb, 9, "correction", sessionId);
232
262
  if (memId) {
233
- await upsertAndLinkConcepts(memId, "about_concept", text, store, embeddings, "daemon:correction", { taskId, projectId });
263
+ await linkToConcepts(memId, "about_concept");
234
264
  }
235
265
  })());
236
266
  }
237
267
  }
238
268
 
239
- // 6. User preferences
269
+ // 5. User preferences
240
270
  if (Array.isArray(result.preferences) && result.preferences.length > 0) {
241
271
  for (const p of result.preferences.slice(0, 5)) {
242
272
  if (!p.preference) continue;
@@ -249,13 +279,13 @@ export async function writeExtractionResults(
249
279
  }
250
280
  const memId = await store.createMemory(text, emb, 7, "preference", sessionId);
251
281
  if (memId) {
252
- await upsertAndLinkConcepts(memId, "about_concept", text, store, embeddings, "daemon:preference", { taskId, projectId });
282
+ await linkToConcepts(memId, "about_concept");
253
283
  }
254
284
  })());
255
285
  }
256
286
  }
257
287
 
258
- // 7. Artifacts
288
+ // 6. Artifacts
259
289
  if (Array.isArray(result.artifacts) && result.artifacts.length > 0) {
260
290
  for (const a of result.artifacts.slice(0, 10)) {
261
291
  if (!a.path) continue;
@@ -270,7 +300,7 @@ export async function writeExtractionResults(
270
300
  }
271
301
  const artId = await store.createArtifact(a.path, a.action ?? "modified", desc, emb);
272
302
  if (artId) {
273
- await upsertAndLinkConcepts(artId, "artifact_mentions", `${a.path} ${desc}`, store, embeddings, "daemon:artifact", { taskId, projectId });
303
+ await linkToConcepts(artId, "artifact_mentions");
274
304
  // used_in: artifact → project
275
305
  if (projectId) {
276
306
  await store.relate(artId, "used_in", projectId)
@@ -281,7 +311,7 @@ export async function writeExtractionResults(
281
311
  }
282
312
  }
283
313
 
284
- // 8. Decisions
314
+ // 7. Decisions
285
315
  if (Array.isArray(result.decisions) && result.decisions.length > 0) {
286
316
  for (const d of result.decisions.slice(0, 6)) {
287
317
  if (!d.decision) continue;
@@ -294,13 +324,13 @@ export async function writeExtractionResults(
294
324
  }
295
325
  const memId = await store.createMemory(text, emb, 7, "decision", sessionId);
296
326
  if (memId) {
297
- await upsertAndLinkConcepts(memId, "about_concept", text, store, embeddings, "daemon:decision", { taskId, projectId });
327
+ await linkToConcepts(memId, "about_concept");
298
328
  }
299
329
  })());
300
330
  }
301
331
  }
302
332
 
303
- // 9. Skills
333
+ // 8. Skills — get ID back to create skill_from_task + skill_uses_concept edges
304
334
  if (Array.isArray(result.skills) && result.skills.length > 0) {
305
335
  for (const s of result.skills.slice(0, 3)) {
306
336
  if (!s.name || !Array.isArray(s.steps) || s.steps.length === 0) continue;
@@ -313,21 +343,35 @@ export async function writeExtractionResults(
313
343
  if (embeddings.isAvailable()) {
314
344
  try { emb = await embeddings.embed(content); } catch (e) { swallow("daemon:embedSkill", e); }
315
345
  }
316
- await store.queryExec(
317
- `CREATE skill CONTENT $record`,
318
- {
319
- record: {
320
- name: String(s.name).slice(0, 100),
321
- description: content,
322
- content,
323
- steps: s.steps.map((st: string) => String(st).slice(0, 200)),
324
- trigger_context: String(s.trigger_context ?? "").slice(0, 200),
325
- tags: ["auto-extracted"],
326
- session_id: sessionId,
327
- ...(emb ? { embedding: emb } : {}),
346
+ try {
347
+ const rows = await store.queryFirst<{ id: string }>(
348
+ `CREATE skill CONTENT $record RETURN id`,
349
+ {
350
+ record: {
351
+ name: String(s.name).slice(0, 100),
352
+ description: content,
353
+ content,
354
+ steps: s.steps.map((st: string) => String(st).slice(0, 200)),
355
+ trigger_context: String(s.trigger_context ?? "").slice(0, 200),
356
+ tags: ["auto-extracted"],
357
+ session_id: sessionId,
358
+ ...(emb ? { embedding: emb } : {}),
359
+ },
328
360
  },
329
- },
330
- ).catch(e => swallow.warn("daemon:createSkill", e));
361
+ );
362
+ const skillId = rows[0]?.id ? String(rows[0].id) : null;
363
+ if (skillId) {
364
+ // skill_from_task: skill → task
365
+ if (taskId) {
366
+ await store.relate(skillId, "skill_from_task", taskId)
367
+ .catch(e => swallow.warn("daemon:skill:skill_from_task", e));
368
+ }
369
+ // skill_uses_concept: skill → concept
370
+ await upsertAndLinkConcepts(skillId, "skill_uses_concept", content, store, embeddings, "daemon:skill:concepts");
371
+ }
372
+ } catch (e) {
373
+ swallow.warn("daemon:createSkill", e);
374
+ }
331
375
  })());
332
376
  }
333
377
  }
package/src/reflection.ts CHANGED
@@ -139,6 +139,7 @@ export async function generateReflection(
139
139
  store: SurrealStore,
140
140
  embeddings: EmbeddingService,
141
141
  complete: CompleteFn,
142
+ surrealSessionId?: string,
142
143
  ): Promise<void> {
143
144
  if (!store.isAvailable()) return;
144
145
 
@@ -207,8 +208,8 @@ export async function generateReflection(
207
208
  );
208
209
  const reflectionId = String(rows[0]?.id ?? "");
209
210
 
210
- if (reflectionId) {
211
- await store.relate(reflectionId, "reflects_on", sessionId).catch(e => swallow.warn("reflection:relate", e));
211
+ if (reflectionId && surrealSessionId) {
212
+ await store.relate(reflectionId, "reflects_on", surrealSessionId).catch(e => swallow.warn("reflection:relate", e));
212
213
  }
213
214
  } catch (e) {
214
215
  swallow("reflection:silent", e);
package/src/schema.surql CHANGED
@@ -47,6 +47,8 @@ DEFINE INDEX IF NOT EXISTS artifact_vec_idx ON artifact FIELDS embedding HNSW DI
47
47
  -- PILLAR 5: Concept (semantic knowledge nodes)
48
48
  -- ============================================================
49
49
  DEFINE TABLE IF NOT EXISTS concept SCHEMALESS;
50
+ -- Recovery: restore content from name if the rename migration ran before revert
51
+ UPDATE concept SET content = name WHERE content = NONE AND name != NONE;
50
52
  DEFINE FIELD IF NOT EXISTS content ON concept TYPE string;
51
53
  DEFINE FIELD IF NOT EXISTS embedding ON concept TYPE option<array<float>>;
52
54
  DEFINE FIELD IF NOT EXISTS stability ON concept TYPE float DEFAULT 1.0;
package/src/surreal.ts CHANGED
@@ -684,6 +684,8 @@ export class SurrealStore {
684
684
  embedding: number[] | null,
685
685
  source?: string,
686
686
  ): Promise<string> {
687
+ if (!content?.trim()) return "";
688
+ content = content.trim();
687
689
  const rows = await this.queryFirst<{ id: string }>(
688
690
  `SELECT id FROM concept WHERE string::lowercase(content) = string::lowercase($content) LIMIT 1`,
689
691
  { content },