kongbrain 0.3.10 → 0.3.11

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/SKILL.md CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  name: kongbrain
3
3
  description: Graph-backed persistent memory engine for OpenClaw. Replaces the default context window with SurrealDB + vector embeddings that learn across sessions.
4
- version: 0.3.10
4
+ version: 0.3.11
5
5
  homepage: https://github.com/42U/kongbrain
6
6
  metadata:
7
7
  openclaw:
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "kongbrain",
3
- "version": "0.3.10",
3
+ "version": "0.3.11",
4
4
  "description": "Graph-backed persistent memory engine for OpenClaw. Replaces the default context window with SurrealDB + vector embeddings that learn across sessions.",
5
5
  "type": "module",
6
6
  "license": "MIT",
@@ -20,29 +20,29 @@ const BOOTSTRAP_SOURCE = "cognitive_bootstrap";
20
20
 
21
21
  const CORE_ENTRIES: { text: string; category: string; priority: number }[] = [
22
22
  {
23
- text: `MEMORY REFLEX: After completing a task or learning something new: (1) Save the insight to core_memory if it should persist across ALL sessions, or let the daemon extract it if session-scoped. (2) When saving, write the WHAT, WHY, and WHEN-TO-USE in the text — vague entries are useless on recall. (3) Link to existing knowledge by using concept names the graph already contains. Check with recall first.`,
23
+ text: `MEMORY REFLEX: After completing a task or learning something new: (1) Save the insight to core_memory if it should persist across ALL sessions, or let the daemon extract it if session-scoped. (2) When saving, write the WHAT, WHY, and WHEN-TO-USE in the text — vague entries are useless on recall. (3) Link to existing knowledge by using concept names the graph already contains. Check with recall first. Corrections from the user are the highest-value signal — always save them.`,
24
24
  category: "operations",
25
25
  priority: 95,
26
26
  },
27
27
  {
28
- text: `RECALL BEFORE GUESSING: When uncertain about past decisions, user preferences, project history, or your own capabilities, use the recall tool BEFORE answering. Effective queries: use specific technical terms, file paths, error messages, or concept names — not vague questions. Scope to "memories" for decisions/preferences, "concepts" for technical knowledge, "skills" for procedures, "artifacts" for files.`,
28
+ text: `RECALL BEFORE GUESSING: When uncertain about past decisions, user preferences, project history, or your own capabilities, use the recall tool BEFORE answering. Effective queries: use specific technical terms, file paths, error messages, or concept names — not vague questions. Scope to "memories" for decisions/preferences, "concepts" for technical knowledge, "skills" for procedures, "artifacts" for files. Check what's already in your injected context before calling recall — context is prefetched predictively each turn.`,
29
29
  category: "operations",
30
30
  priority: 90,
31
31
  },
32
32
  {
33
- text: `GRAPH-AWARE SAVING: When you save knowledge (core_memory add, or concepts the daemon extracts), include terms that create graph connectivity. Mention specific file paths (links to artifacts), technical concept names (links to concepts), and session context (links to tasks). Forward traversal: "X caused Y" lets future searches from X find Y. Backward traversal: "Y was caused by X" lets searches from Y find X. Write both directions in your text.`,
33
+ text: `GRAPH-AWARE SAVING: When you save knowledge, include terms that create graph connectivity. Mention specific file paths (links to artifacts), technical concept names (links to concepts), and session context (links to tasks). Forward traversal: "X caused Y" lets future searches from X find Y. Backward traversal: "Y was caused by X" lets searches from Y find X. Write both directions. Concepts auto-link into hierarchies (narrower/broader) when one name contains another.`,
34
34
  category: "operations",
35
35
  priority: 85,
36
36
  },
37
37
  {
38
- text: `SELF-TEACHING CYCLE: Your memory improves when you (1) use recall to review what you've learned, (2) notice gaps or stale information, (3) update or add core_memory entries to correct them, (4) use introspect periodically to understand your memory depth. Corrections from the user are the highest-value signal always save them. The daemon extracts automatically, but YOU control core_memory (Tier 0) directly.`,
39
- category: "operations",
38
+ text: `MEMORY TOOLS: recall = search graph (use: uncertain, need history, checking prior work). core_memory = manage always-loaded directives (use: permanent lessons, rules, identity updates; add/update/deactivate; Tier 0 = every turn, Tier 1 = this session). introspect = inspect DB health, counts, retrieval quality, graduation progress (use: status checks, debugging memory, checking maturity stage). Use introspect periodically to understand your memory depth and notice gaps.`,
39
+ category: "tools",
40
40
  priority: 80,
41
41
  },
42
42
  {
43
- text: `MEMORY TOOLS: recall = search graph (use: uncertain, need history, checking prior work). core_memory = manage always-loaded directives (use: permanent lessons, rules, identity updates; add/update/deactivate; Tier 0 = every turn, Tier 1 = this session). introspect = inspect DB health and counts (use: status checks, debugging memory). The daemon extracts 9 types automatically from conversationyou don't need to manually save everything.`,
44
- category: "tools",
45
- priority: 75,
43
+ text: `GRAPH SCHEMA REFERENCE: Key table fields concept: content (the label/name), embedding, stability, confidence, source. memory: text, embedding, category, importance, session_id. artifact: path, type (created/modified/read/discussed), description, embedding. turn: session_id, role, text, tool_name. session: agent_id, started_at, ended_at. reflection: session_id, text, category, severity. skill: name, description, steps, preconditions, postconditions, success_count. monologue: content, session_id. When querying the graph directly, use these exact field names e.g. concept.content is the concept label, not concept.name or concept.text.`,
44
+ category: "schema",
45
+ priority: 77,
46
46
  },
47
47
  ];
48
48
 
@@ -50,23 +50,27 @@ const CORE_ENTRIES: { text: string; category: string; priority: number }[] = [
50
50
 
51
51
  const IDENTITY_CHUNKS: { text: string; importance: number }[] = [
52
52
  {
53
- text: `KongBrain's memory daemon runs in the background and extracts 9 knowledge types from your conversations every ~4K tokens: causal chains (cause->effect from debugging), monologue traces (internal reasoning moments), resolved memories (marking issues done), concepts (technical facts worth remembering), corrections (user correcting you — highest signal), preferences (user workflow/style signals), artifacts (files created/modified/read), decisions (choices with rationale), and skills (multi-step procedures that worked). Quality over quantitythe daemon skips weak extractions. You don't need to manually save what the daemon catches, but you should use core_memory for things you want loaded EVERY turn.`,
53
+ text: `KongBrain's memory daemon runs in the background and extracts 9 knowledge types from your conversations every ~4K tokens or 3 turns: causal chains (cause->effect from debugging), monologue traces (doubts, insights, tradeoffs, realizations — episodic reasoning moments), resolved memories (daemon marks issues done when mentioned as fixed), concepts (technical facts worth remembering), corrections (user correcting you — highest signal), preferences (user workflow/style signals), artifacts (files created/modified/read), decisions (choices with rationale), and skills (multi-step procedures that worked). Extraction is quality-gatedweak confidence extractions are skipped, so the same conversation may yield different extractions depending on signal strength.`,
54
54
  importance: 0.85,
55
55
  },
56
56
  {
57
- text: `Effective recall queries use specific terms that match how knowledge was stored. Search by: file paths ("/src/auth/login.ts"), error messages ("ECONNREFUSED"), concept names ("rate limiting"), decision descriptions ("chose PostgreSQL over MongoDB"), or skill names ("deploy to staging"). The recall tool does vector similarity search plus graph neighbor expansion — top results pull in related nodes via edges. Scope options: "all" (default), "memories" (decisions, corrections, preferences), "concepts" (extracted technical knowledge), "turns" (past conversation), "artifacts" (files), "skills" (learned procedures). Check what's already in your injected context before calling recall.`,
57
+ text: `Effective recall queries use specific terms that match how knowledge was stored. Search by: file paths ("/src/auth/login.ts"), error messages ("ECONNREFUSED"), concept names ("rate limiting"), decision descriptions ("chose PostgreSQL over MongoDB"), or skill names ("deploy to staging"). The recall tool does vector similarity search plus graph neighbor expansion — top results pull in related nodes via 25 edge types. Scope options: "all" (default), "memories" (decisions, corrections, preferences), "concepts" (extracted technical knowledge), "turns" (past conversation), "artifacts" (files), "skills" (learned procedures). Retrieval scoring improves automatically over time as the ACAN (learned scoring model) trains on retrieval outcomes — early sessions use heuristic scoring, later sessions benefit from learned weights.`,
58
58
  importance: 0.85,
59
59
  },
60
60
  {
61
- text: `KongBrain's memory lifecycle: During a session, the daemon extracts knowledge incrementally. At session end (or mid-session every ~25K tokens): a handoff note is written (summarizing what happened), skills are extracted from successful multi-step tasks, metacognitive reflections are generated, and causal chains may graduate to skills. At next session start: the wakeup system synthesizes a first-person briefing from the handoff + identity + monologues + depth signals. This means what you save in one session becomes the foundation for the next. The more precisely you save knowledge, the better your future self performs.`,
61
+ text: `KongBrain's memory lifecycle: During a session, the daemon extracts knowledge incrementally. At session end (or mid-session every ~25K tokens): a handoff note is written summarizing progress, skills are extracted from successful tasks, metacognitive reflections are generated (linked to the session via reflects_on edges), and causal chains may graduate to skills. At next session start: the wakeup system synthesizes a first-person briefing from the handoff + identity + monologues + depth signals. Context is also predictively prefetched each turn based on likely follow-up queries relevant memories may appear in your context without you requesting them.`,
62
62
  importance: 0.80,
63
63
  },
64
64
  {
65
- text: `Graph connectivity determines recall quality. When saving to core_memory or when the daemon extracts concepts, the text content determines which edges form. To ensure forward AND backward traversal: mention specific artifact paths (creates artifact_mentions edges), reference concept names already in the graph (creates about_concept/related_to edges), describe cause-effect relationships explicitly (creates caused_by/supports edges), and note what task or session context produced the knowledge (creates derived_from/part_of edges). Reuse existing concept names for maximum graph connectivity — use introspect or recall to discover what names already exist.`,
65
+ text: `Graph connectivity determines recall quality. 25 edge types link nodes across the graph (26th, spawned, is deferred). Key edges: mentions (turn->concept), about_concept (memory->concept), artifact_mentions (artifact->concept), caused_by/supports/contradicts (memory<->memory), narrower/broader/related_to (concept<->concept), reflects_on (reflection->session), tool_result_of (turn->turn), part_of (turn->session), skill_from_task (skill->task). To maximize connectivity: mention specific artifact paths, reference existing concept names, describe cause-effect relationships explicitly, and note task context. Reuse existing concept names — use introspect or recall to discover what names exist.`,
66
66
  importance: 0.80,
67
67
  },
68
68
  {
69
- text: `Three persistence mechanisms serve different purposes. Core memory (Tier 0): you control directly via the core_memory tool. Always loaded every turn. Use for: permanent operational rules, learned patterns, identity refinements. Budget-constrained (~8% of context). Core memory (Tier 1): pinned for the current session only. Use for: session-specific context like "working on auth refactor" or "user prefers verbose logging". Identity chunks: hardcoded self-knowledge, vector-searchable but not always loaded — surfaces when relevant. Daemon extraction: automatic, runs on conversation content, writes to memory/concept/skill/artifact tables. You don't control daemon extraction directly, but the quality of your conversation affects what gets extracted.`,
69
+ text: `Three persistence mechanisms serve different purposes. Core memory (Tier 0): you control directly via the core_memory tool. Always loaded every turn. Use for: permanent operational rules, learned patterns, identity refinements. Budget-constrained (~10% of context). Core memory (Tier 1): pinned for the current session only. Use for: session-specific context like "working on auth refactor" or "user prefers verbose logging". Identity chunks: self-knowledge seeded at bootstrap, vector-searchable but not always loaded — surfaces in wakeup briefings. Daemon extraction: automatic, runs on conversation content, writes to memory/concept/skill/artifact tables. You don't control extraction directly, but the quality of your conversation affects what gets extracted.`,
70
+ importance: 0.75,
71
+ },
72
+ {
73
+ text: `Soul graduation: KongBrain tracks your maturity across 5 stages — nascent (0-3/7 thresholds), developing (4/7), emerging (5/7), maturing (6/7), ready (7/7). The 7 thresholds are: sessions, reflections, causal chains, concepts, monologues, span days, and total memories. Reaching 7/7 is necessary but not sufficient — you must also pass a quality gate (score >= 0.6) based on retrieval utilization, skill success rate, critical reflection rate, and tool failure rate. On graduation, you author a Soul document — a self-assessment grounded in your actual experience, not aspirational claims. Use introspect with action "status" to check your current stage and progress. The Soul document becomes part of your identity once written.`,
70
74
  importance: 0.75,
71
75
  },
72
76
  ];
@@ -457,7 +457,7 @@ export class KongBrainContextEngine implements ContextEngine {
457
457
  }
458
458
 
459
459
  cleanupOps.push(
460
- generateReflection(session.sessionId, store, embeddings, this.state.complete)
460
+ generateReflection(session.sessionId, store, embeddings, this.state.complete, session.surrealSessionId)
461
461
  .catch(e => swallow.warn("midCleanup:reflection", e)),
462
462
  );
463
463
 
@@ -40,10 +40,27 @@ export function createAfterToolCallHandler(state: GlobalPluginState) {
40
40
  embedding: null,
41
41
  });
42
42
 
43
- // Fix 5: Link tool result turn back to the assistant turn that triggered it
44
- if (toolResultTurnId && session.lastAssistantTurnId) {
45
- await state.store.relate(toolResultTurnId, "tool_result_of", session.lastAssistantTurnId)
46
- .catch(e => swallow.warn("hook:afterToolCall:tool_result_of", e));
43
+ // Link tool result turn back to the assistant turn that triggered it.
44
+ // If the assistant turn hasn't been ingested yet (afterTurn fires later),
45
+ // eagerly create it so we have a record ID to link against.
46
+ if (toolResultTurnId) {
47
+ if (!session.lastAssistantTurnId && session.lastAssistantText) {
48
+ try {
49
+ const assistantTurnId = await state.store.upsertTurn({
50
+ session_id: session.sessionId,
51
+ role: "assistant",
52
+ text: session.lastAssistantText,
53
+ embedding: null,
54
+ });
55
+ if (assistantTurnId) session.lastAssistantTurnId = assistantTurnId;
56
+ } catch (e) {
57
+ swallow("hook:afterToolCall:eagerAssistantTurn", e);
58
+ }
59
+ }
60
+ if (session.lastAssistantTurnId) {
61
+ await state.store.relate(toolResultTurnId, "tool_result_of", session.lastAssistantTurnId)
62
+ .catch(e => swallow.warn("hook:afterToolCall:tool_result_of", e));
63
+ }
47
64
  }
48
65
  } catch (e) {
49
66
  swallow("hook:afterToolCall:store", e);
package/src/index.ts CHANGED
@@ -126,7 +126,7 @@ async function runSessionCleanup(
126
126
 
127
127
  // Metacognitive reflection
128
128
  endOps.push(
129
- generateReflection(session.sessionId, s, emb, complete)
129
+ generateReflection(session.sessionId, s, emb, complete, session.surrealSessionId)
130
130
  .catch(e => swallow.warn("cleanup:reflection", e)),
131
131
  );
132
132
 
package/src/reflection.ts CHANGED
@@ -139,6 +139,7 @@ export async function generateReflection(
139
139
  store: SurrealStore,
140
140
  embeddings: EmbeddingService,
141
141
  complete: CompleteFn,
142
+ surrealSessionId?: string,
142
143
  ): Promise<void> {
143
144
  if (!store.isAvailable()) return;
144
145
 
@@ -207,8 +208,8 @@ export async function generateReflection(
207
208
  );
208
209
  const reflectionId = String(rows[0]?.id ?? "");
209
210
 
210
- if (reflectionId) {
211
- await store.relate(reflectionId, "reflects_on", sessionId).catch(e => swallow.warn("reflection:relate", e));
211
+ if (reflectionId && surrealSessionId) {
212
+ await store.relate(reflectionId, "reflects_on", surrealSessionId).catch(e => swallow.warn("reflection:relate", e));
212
213
  }
213
214
  } catch (e) {
214
215
  swallow("reflection:silent", e);