kongbrain 0.1.0 → 0.1.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +6 -5
- package/package.json +1 -1
- package/src/context-engine.ts +2 -4
- package/src/index.ts +11 -18
- package/src/memory-daemon.ts +5 -2
- package/src/reflection.ts +1 -1
package/README.md
CHANGED
|
@@ -4,8 +4,9 @@
|
|
|
4
4
|
|
|
5
5
|

|
|
6
6
|
|
|
7
|
+
[](https://www.npmjs.com/package/kongbrain)
|
|
7
8
|
[](https://github.com/42U/kongbrain)
|
|
8
|
-
[](https://opensource.org/licenses/MIT)
|
|
9
10
|
[](https://nodejs.org)
|
|
10
11
|
[](https://surrealdb.com)
|
|
11
12
|
[](https://github.com/openclaw/openclaw)
|
|
@@ -270,7 +271,7 @@ Triggers at session end when metrics indicate problems:
|
|
|
270
271
|
| Steering candidates | any detected |
|
|
271
272
|
| Context waste | > 0.5% of context window |
|
|
272
273
|
|
|
273
|
-
|
|
274
|
+
The LLM generates a 2-4 sentence reflection: root cause, error pattern, what to do differently. Stored with importance 7.0, deduped at 0.85 cosine similarity.
|
|
274
275
|
|
|
275
276
|
</details>
|
|
276
277
|
|
|
@@ -297,7 +298,7 @@ Context Injection ─ Vector search -> graph expand -> 6-signal scoring -> budge
|
|
|
297
298
|
| Scores: similarity, recency, importance, access, neighbor, utility
|
|
298
299
|
| Budget: 21% of context window reserved for retrieval
|
|
299
300
|
v
|
|
300
|
-
Agent Loop ────────
|
|
301
|
+
Agent Loop ──────── LLM + tool execution
|
|
301
302
|
| Planning gate: announces plan before touching tools
|
|
302
303
|
| Smart truncation: preserves tail of large tool outputs
|
|
303
304
|
v
|
|
@@ -307,7 +308,7 @@ Turn Storage ────── Every message embedded + stored + linked via gra
|
|
|
307
308
|
Quality Eval ────── Measures retrieval utilization (text overlap, trigrams, unigrams)
|
|
308
309
|
| Tracks tool success, context waste, feeds ACAN training
|
|
309
310
|
v
|
|
310
|
-
Memory Daemon ───── Worker thread extracts 9 knowledge types via
|
|
311
|
+
Memory Daemon ───── Worker thread extracts 9 knowledge types via LLM:
|
|
311
312
|
| causal chains, monologues, concepts, corrections,
|
|
312
313
|
| preferences, artifacts, decisions, skills, resolved memories
|
|
313
314
|
v
|
|
@@ -323,7 +324,7 @@ At session start, a wake-up briefing is synthesized from the handoff, recent mon
|
|
|
323
324
|
<details>
|
|
324
325
|
<summary><strong>Memory Daemon</strong>: background knowledge extraction</summary>
|
|
325
326
|
|
|
326
|
-
A worker thread running throughout the session. Batches turns every ~12K tokens, calls
|
|
327
|
+
A worker thread running throughout the session. Batches turns every ~12K tokens, calls the configured LLM to extract:
|
|
327
328
|
|
|
328
329
|
- **Causal chains**: trigger/outcome sequences with success/confidence
|
|
329
330
|
- **Monologue traces**: thinking blocks that reveal problem-solving approach
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "kongbrain",
|
|
3
|
-
"version": "0.1.
|
|
3
|
+
"version": "0.1.2",
|
|
4
4
|
"description": "Graph-backed persistent memory engine for OpenClaw. Replaces the default context window with SurrealDB + vector embeddings that learn across sessions.",
|
|
5
5
|
"type": "module",
|
|
6
6
|
"license": "MIT",
|
package/src/context-engine.ts
CHANGED
|
@@ -53,7 +53,7 @@ export class KongBrainContextEngine implements ContextEngine {
|
|
|
53
53
|
readonly info: ContextEngineInfo = {
|
|
54
54
|
id: "kongbrain",
|
|
55
55
|
name: "KongBrain",
|
|
56
|
-
version: "0.1.
|
|
56
|
+
version: "0.1.2",
|
|
57
57
|
ownsCompaction: true,
|
|
58
58
|
};
|
|
59
59
|
|
|
@@ -227,10 +227,9 @@ export class KongBrainContextEngine implements ContextEngine {
|
|
|
227
227
|
|
|
228
228
|
try {
|
|
229
229
|
const role = (msg as any).role as string;
|
|
230
|
-
console.log(`[kongbrain:ingest] role=${role} sessionId=${params.sessionId}`);
|
|
231
230
|
if (role === "user" || role === "assistant") {
|
|
232
231
|
const text = extractMessageText(msg);
|
|
233
|
-
if (!text)
|
|
232
|
+
if (!text) return { ingested: false };
|
|
234
233
|
|
|
235
234
|
const worthEmbedding = hasSemantic(text);
|
|
236
235
|
let embedding: number[] | null = null;
|
|
@@ -248,7 +247,6 @@ export class KongBrainContextEngine implements ContextEngine {
|
|
|
248
247
|
embedding,
|
|
249
248
|
});
|
|
250
249
|
|
|
251
|
-
console.log(`[kongbrain:ingest] turnId=${turnId} role=${role} textLen=${text.length}`);
|
|
252
250
|
if (turnId) {
|
|
253
251
|
await store.relate(turnId, "part_of", session.sessionId)
|
|
254
252
|
.catch(e => swallow.warn("ingest:relate", e));
|
package/src/index.ts
CHANGED
|
@@ -92,7 +92,7 @@ async function runSessionCleanup(
|
|
|
92
92
|
.catch(e => { swallow.warn("cleanup:soulGraduation", e); return null; });
|
|
93
93
|
endOps.push(graduationPromise);
|
|
94
94
|
|
|
95
|
-
// The session-end
|
|
95
|
+
// The session-end LLM call is critical and needs the full 45s.
|
|
96
96
|
await Promise.race([
|
|
97
97
|
Promise.allSettled(endOps),
|
|
98
98
|
new Promise(resolve => setTimeout(resolve, 45_000)),
|
|
@@ -291,6 +291,11 @@ export default definePluginEntry({
|
|
|
291
291
|
logger.warn(`Embeddings init failed — running in degraded mode: ${e}`);
|
|
292
292
|
}
|
|
293
293
|
|
|
294
|
+
// Seed identity chunks (idempotent, requires embeddings ready)
|
|
295
|
+
seedIdentity(store, embeddings)
|
|
296
|
+
.then(n => { if (n > 0) logger.info(`Seeded ${n} identity chunks`); })
|
|
297
|
+
.catch(e => swallow.warn("factory:seedIdentity", e));
|
|
298
|
+
|
|
294
299
|
return new KongBrainContextEngine(globalState!);
|
|
295
300
|
});
|
|
296
301
|
|
|
@@ -338,10 +343,6 @@ export default definePluginEntry({
|
|
|
338
343
|
swallow.warn("index:startDaemon", e);
|
|
339
344
|
}
|
|
340
345
|
|
|
341
|
-
// Seed identity chunks (idempotent — skips if already seeded)
|
|
342
|
-
seedIdentity(store, embeddings)
|
|
343
|
-
.catch(e => swallow.warn("index:seedIdentity", e));
|
|
344
|
-
|
|
345
346
|
// Check for workspace .md files from the default context engine
|
|
346
347
|
if (globalState!.workspaceDir) {
|
|
347
348
|
hasMigratableFiles(globalState!.workspaceDir)
|
|
@@ -362,26 +363,18 @@ export default definePluginEntry({
|
|
|
362
363
|
|
|
363
364
|
// Synthesize wakeup briefing (background, non-blocking)
|
|
364
365
|
// The briefing is stored and later injected via assemble()'s systemPromptAddition
|
|
365
|
-
console.log("[kongbrain:wakeup] starting synthesis...");
|
|
366
366
|
synthesizeWakeup(store, globalState!.complete, session.sessionId)
|
|
367
367
|
.then(briefing => {
|
|
368
|
-
|
|
369
|
-
if (briefing) {
|
|
370
|
-
(session as any)._wakeupBriefing = briefing;
|
|
371
|
-
}
|
|
368
|
+
if (briefing) (session as any)._wakeupBriefing = briefing;
|
|
372
369
|
})
|
|
373
|
-
.catch(e =>
|
|
370
|
+
.catch(e => swallow.warn("index:wakeup", e));
|
|
374
371
|
|
|
375
372
|
// Startup cognition (background)
|
|
376
|
-
console.log("[kongbrain:cognition] starting synthesis...");
|
|
377
373
|
synthesizeStartupCognition(store, globalState!.complete)
|
|
378
374
|
.then(cognition => {
|
|
379
|
-
|
|
380
|
-
if (cognition) {
|
|
381
|
-
(session as any)._startupCognition = cognition;
|
|
382
|
-
}
|
|
375
|
+
if (cognition) (session as any)._startupCognition = cognition;
|
|
383
376
|
})
|
|
384
|
-
.catch(e =>
|
|
377
|
+
.catch(e => swallow.warn("index:startupCognition", e));
|
|
385
378
|
});
|
|
386
379
|
|
|
387
380
|
api.on("session_end", async (event) => {
|
|
@@ -398,7 +391,7 @@ export default definePluginEntry({
|
|
|
398
391
|
});
|
|
399
392
|
|
|
400
393
|
// OpenClaw's session_end is fire-and-forget and doesn't fire on CLI exit.
|
|
401
|
-
// Register a process exit handler to ensure the critical
|
|
394
|
+
// Register a process exit handler to ensure the critical extraction
|
|
402
395
|
// completes even when the user exits with Ctrl+D or /exit.
|
|
403
396
|
// Clean up previous listeners first (register() can be called multiple times).
|
|
404
397
|
if (registeredExitHandler) {
|
package/src/memory-daemon.ts
CHANGED
|
@@ -185,8 +185,11 @@ async function processExtraction(msg: DaemonMessage & { type: "turn_batch" }): P
|
|
|
185
185
|
const systemPrompt = buildSystemPrompt(thinking.length > 0, retrievedMemories.length > 0, priorState);
|
|
186
186
|
|
|
187
187
|
const { completeSimple, getModel } = await import("@mariozechner/pi-ai");
|
|
188
|
-
const provider = config.llmProvider
|
|
189
|
-
const modelId = config.llmModel
|
|
188
|
+
const provider = config.llmProvider;
|
|
189
|
+
const modelId = config.llmModel;
|
|
190
|
+
if (!provider || !modelId) {
|
|
191
|
+
throw new Error("Memory daemon requires llmProvider and llmModel from host config");
|
|
192
|
+
}
|
|
190
193
|
// getModel is heavily typed for known providers; cast needed for runtime-configured values
|
|
191
194
|
const model = (getModel as any)(provider, modelId);
|
|
192
195
|
|
package/src/reflection.ts
CHANGED
|
@@ -3,7 +3,7 @@
|
|
|
3
3
|
*
|
|
4
4
|
* At session end, reviews own performance: tool failures, runaway detections,
|
|
5
5
|
* low retrieval utilization, wasted tokens. If problems exceeded thresholds,
|
|
6
|
-
* generates a structured reflection via
|
|
6
|
+
* generates a structured reflection via the configured LLM, stored as high-importance memory.
|
|
7
7
|
* Retrieved when similar situations arise in future sessions.
|
|
8
8
|
*
|
|
9
9
|
* Ported from kongbrain — takes SurrealStore/EmbeddingService as params.
|