suemo 0.0.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +674 -0
- package/README.md +248 -0
- package/package.json +58 -0
- package/src/cli/commands/believe.ts +27 -0
- package/src/cli/commands/consolidate.ts +24 -0
- package/src/cli/commands/export-import.ts +91 -0
- package/src/cli/commands/goal.ts +71 -0
- package/src/cli/commands/health.ts +43 -0
- package/src/cli/commands/init.ts +90 -0
- package/src/cli/commands/observe.ts +40 -0
- package/src/cli/commands/query.ts +31 -0
- package/src/cli/commands/recall.ts +17 -0
- package/src/cli/commands/serve.ts +19 -0
- package/src/cli/commands/shared.ts +20 -0
- package/src/cli/commands/sync.ts +23 -0
- package/src/cli/commands/timeline.ts +37 -0
- package/src/cli/commands/wander.ts +34 -0
- package/src/cli/index.ts +41 -0
- package/src/cli/shared.ts +9 -0
- package/src/cognitive/consolidate.ts +349 -0
- package/src/cognitive/contradiction.ts +50 -0
- package/src/cognitive/health.ts +123 -0
- package/src/config.ts +114 -0
- package/src/db/client.ts +59 -0
- package/src/db/preflight.ts +152 -0
- package/src/db/schema.surql +109 -0
- package/src/db/schema.ts +24 -0
- package/src/env.d.ts +4 -0
- package/src/goal.ts +39 -0
- package/src/index.ts +13 -0
- package/src/logger.ts +60 -0
- package/src/mcp/server.ts +23 -0
- package/src/mcp/tools.ts +100 -0
- package/src/memory/episode.ts +66 -0
- package/src/memory/read.ts +223 -0
- package/src/memory/write.ts +134 -0
- package/src/sync.ts +120 -0
- package/src/types.ts +144 -0
|
@@ -0,0 +1,40 @@
|
|
|
1
|
+
import { loadConfig } from '../../config.ts'
|
|
2
|
+
import { connect, disconnect } from '../../db/client.ts'
|
|
3
|
+
import { initLogger } from '../../logger.ts'
|
|
4
|
+
import { observe } from '../../memory/write.ts'
|
|
5
|
+
import { MemoryKindSchema } from '../../types.ts'
|
|
6
|
+
import { app } from '../shared.ts'
|
|
7
|
+
|
|
8
|
+
export const observeCmd = app.sub('observe')
|
|
9
|
+
.meta({ description: 'Store a new observation in memory' })
|
|
10
|
+
.args([{ name: 'content', type: 'string', required: true }])
|
|
11
|
+
.flags({
|
|
12
|
+
scope: { type: 'string', short: 's', description: 'Scope label' },
|
|
13
|
+
tags: { type: 'string', short: 't', description: 'Comma-separated tags' },
|
|
14
|
+
kind: { type: 'string', short: 'k', description: 'Memory kind', default: 'observation' },
|
|
15
|
+
confidence: { type: 'number', description: 'Confidence 0.0–1.0', default: 1.0 },
|
|
16
|
+
source: { type: 'string', description: 'Source label' },
|
|
17
|
+
session: { type: 'string', description: 'Session ID (attach to open episode)' },
|
|
18
|
+
})
|
|
19
|
+
.run(async ({ args, flags }) => {
|
|
20
|
+
await initLogger({ level: flags.debug ? 'debug' : 'info' })
|
|
21
|
+
|
|
22
|
+
const kindParse = MemoryKindSchema.safeParse(flags.kind)
|
|
23
|
+
if (!kindParse.success) {
|
|
24
|
+
console.error(`Invalid kind "${flags.kind}". Valid: ${MemoryKindSchema.options.join(', ')}`)
|
|
25
|
+
process.exit(1)
|
|
26
|
+
}
|
|
27
|
+
|
|
28
|
+
const config = await loadConfig(process.cwd(), flags.config)
|
|
29
|
+
const db = await connect(config.surreal)
|
|
30
|
+
const node = await observe(db, {
|
|
31
|
+
content: args.content,
|
|
32
|
+
kind: kindParse.data,
|
|
33
|
+
tags: flags.tags ? flags.tags.split(',').map((t) => t.trim()) : [],
|
|
34
|
+
scope: flags.scope,
|
|
35
|
+
confidence: flags.confidence,
|
|
36
|
+
source: flags.source,
|
|
37
|
+
})
|
|
38
|
+
await disconnect()
|
|
39
|
+
console.log(JSON.stringify({ id: node.id, kind: node.kind, valid_from: node.valid_from }, null, 2))
|
|
40
|
+
})
|
|
@@ -0,0 +1,31 @@
|
|
|
1
|
+
import { loadConfig } from '../../config.ts'
|
|
2
|
+
import { connect, disconnect } from '../../db/client.ts'
|
|
3
|
+
import { initLogger } from '../../logger.ts'
|
|
4
|
+
import { query } from '../../memory/read.ts'
|
|
5
|
+
import { app } from '../shared.ts'
|
|
6
|
+
|
|
7
|
+
export const queryCmd = app.sub('query')
|
|
8
|
+
.meta({ description: 'Hybrid semantic search over memories' })
|
|
9
|
+
.args([{ name: 'input', type: 'string', required: true }])
|
|
10
|
+
.flags({
|
|
11
|
+
scope: { type: 'string', short: 's', description: 'Filter by scope' },
|
|
12
|
+
top: { type: 'number', short: 'n', description: 'Number of results', default: 5 },
|
|
13
|
+
json: { type: 'boolean', short: 'j', description: 'Output full JSON nodes' },
|
|
14
|
+
})
|
|
15
|
+
.run(async ({ args, flags }) => {
|
|
16
|
+
await initLogger({ level: flags.debug ? 'debug' : 'info' })
|
|
17
|
+
const config = await loadConfig(process.cwd(), flags.config)
|
|
18
|
+
const db = await connect(config.surreal)
|
|
19
|
+
const nodes = await query(db, { input: args.input, scope: flags.scope, topK: flags.top })
|
|
20
|
+
await disconnect()
|
|
21
|
+
if (flags.json) {
|
|
22
|
+
console.log(JSON.stringify(nodes, null, 2))
|
|
23
|
+
} else {
|
|
24
|
+
for (const n of nodes) {
|
|
25
|
+
console.log(`[${n.kind}] ${n.id}`)
|
|
26
|
+
console.log(` ${n.content.slice(0, 120)}`)
|
|
27
|
+
if (n.scope) console.log(` scope: ${n.scope}`)
|
|
28
|
+
console.log()
|
|
29
|
+
}
|
|
30
|
+
}
|
|
31
|
+
})
|
|
@@ -0,0 +1,17 @@
|
|
|
1
|
+
import { loadConfig } from '../../config.ts'
|
|
2
|
+
import { connect, disconnect } from '../../db/client.ts'
|
|
3
|
+
import { initLogger } from '../../logger.ts'
|
|
4
|
+
import { recall } from '../../memory/read.ts'
|
|
5
|
+
import { app } from '../shared.ts'
|
|
6
|
+
|
|
7
|
+
export const recallCmd = app.sub('recall')
|
|
8
|
+
.meta({ description: 'Fetch a single node + its neighbours (ticks FSRS)' })
|
|
9
|
+
.args([{ name: 'nodeId', type: 'string', required: true }])
|
|
10
|
+
.run(async ({ args, flags }) => {
|
|
11
|
+
await initLogger({ level: flags.debug ? 'debug' : 'info' })
|
|
12
|
+
const config = await loadConfig(process.cwd(), flags.config)
|
|
13
|
+
const db = await connect(config.surreal)
|
|
14
|
+
const result = await recall(db, args.nodeId)
|
|
15
|
+
await disconnect()
|
|
16
|
+
console.log(JSON.stringify(result, null, 2))
|
|
17
|
+
})
|
|
@@ -0,0 +1,19 @@
|
|
|
1
|
+
import { loadConfig } from '../../config.ts'
|
|
2
|
+
import { initLogger } from '../../logger.ts'
|
|
3
|
+
import { startMcpServer } from '../../mcp/server.ts'
|
|
4
|
+
import { app } from '../shared.ts'
|
|
5
|
+
|
|
6
|
+
export const serveCmd = app.sub('serve')
|
|
7
|
+
.meta({ description: 'Start the MCP server' })
|
|
8
|
+
.flags({
|
|
9
|
+
port: { type: 'number', short: 'p', description: 'Port to listen on (overrides config)' },
|
|
10
|
+
host: { type: 'string', description: 'Host to bind to (overrides config)' },
|
|
11
|
+
})
|
|
12
|
+
.run(async ({ flags }) => {
|
|
13
|
+
await initLogger({ level: flags.debug ? 'debug' : 'info' })
|
|
14
|
+
const config = await loadConfig(process.cwd(), flags.config)
|
|
15
|
+
if (flags.port) config.mcp.port = flags.port
|
|
16
|
+
if (flags.host) config.mcp.host = flags.host
|
|
17
|
+
await startMcpServer(config)
|
|
18
|
+
// Server runs indefinitely — no disconnect
|
|
19
|
+
})
|
|
@@ -0,0 +1,20 @@
|
|
|
1
|
+
import { Crust } from '@crustjs/core'
|
|
2
|
+
|
|
3
|
+
// Root CLI — shared instance all commands are built off via .sub()
|
|
4
|
+
// The two flags here are inherited by every subcommand.
|
|
5
|
+
export const app = new Crust('suemo')
|
|
6
|
+
.meta({ description: 'Persistent semantic memory for AI agents — SurrealDB backend' })
|
|
7
|
+
.flags({
|
|
8
|
+
config: {
|
|
9
|
+
type: 'string',
|
|
10
|
+
short: 'c',
|
|
11
|
+
description: 'Path to suemo config file',
|
|
12
|
+
inherit: true,
|
|
13
|
+
},
|
|
14
|
+
debug: {
|
|
15
|
+
type: 'boolean',
|
|
16
|
+
short: 'd',
|
|
17
|
+
description: 'Enable verbose debug logging',
|
|
18
|
+
inherit: true,
|
|
19
|
+
},
|
|
20
|
+
})
|
|
@@ -0,0 +1,23 @@
|
|
|
1
|
+
import { loadConfig } from '../../config.ts'
|
|
2
|
+
import { connect, disconnect } from '../../db/client.ts'
|
|
3
|
+
import { initLogger } from '../../logger.ts'
|
|
4
|
+
import { syncTo } from '../../sync.ts'
|
|
5
|
+
import { app } from '../shared.ts'
|
|
6
|
+
|
|
7
|
+
export const syncCmd = app.sub('sync')
|
|
8
|
+
.meta({ description: 'Push memories to remote SurrealDB (append-only)' })
|
|
9
|
+
.flags({
|
|
10
|
+
'dry-run': { type: 'boolean', description: 'Show what would be pushed without writing', default: false },
|
|
11
|
+
})
|
|
12
|
+
.run(async ({ flags }) => {
|
|
13
|
+
await initLogger({ level: flags.debug ? 'debug' : 'info' })
|
|
14
|
+
const config = await loadConfig(process.cwd(), flags.config)
|
|
15
|
+
if (!config.sync) {
|
|
16
|
+
console.error('No sync.remote configured in suemo config.')
|
|
17
|
+
process.exit(1)
|
|
18
|
+
}
|
|
19
|
+
const db = await connect(config.surreal)
|
|
20
|
+
const result = await syncTo(db, config.sync.remote, { dryRun: flags['dry-run'] })
|
|
21
|
+
await disconnect()
|
|
22
|
+
console.log(JSON.stringify(result, null, 2))
|
|
23
|
+
})
|
|
@@ -0,0 +1,37 @@
|
|
|
1
|
+
import { loadConfig } from '../../config.ts'
|
|
2
|
+
import { connect, disconnect } from '../../db/client.ts'
|
|
3
|
+
import { initLogger } from '../../logger.ts'
|
|
4
|
+
import { timeline } from '../../memory/read.ts'
|
|
5
|
+
import { app } from '../shared.ts'
|
|
6
|
+
|
|
7
|
+
export const timelineCmd = app.sub('timeline')
|
|
8
|
+
.meta({ description: 'Chronological view of memories' })
|
|
9
|
+
.flags({
|
|
10
|
+
scope: { type: 'string', short: 's', description: 'Filter by scope' },
|
|
11
|
+
from: { type: 'string', description: 'Start datetime (ISO)' },
|
|
12
|
+
until: { type: 'string', description: 'End datetime (ISO)' },
|
|
13
|
+
limit: { type: 'number', short: 'n', description: 'Max results', default: 20 },
|
|
14
|
+
json: { type: 'boolean', short: 'j', description: 'Output full JSON' },
|
|
15
|
+
})
|
|
16
|
+
.run(async ({ flags }) => {
|
|
17
|
+
await initLogger({ level: flags.debug ? 'debug' : 'info' })
|
|
18
|
+
const config = await loadConfig(process.cwd(), flags.config)
|
|
19
|
+
const db = await connect(config.surreal)
|
|
20
|
+
const nodes = await timeline(db, {
|
|
21
|
+
...(flags.scope ? { scope: flags.scope } : {}),
|
|
22
|
+
...(flags.from ? { from: flags.from } : {}),
|
|
23
|
+
...(flags.until ? { until: flags.until } : {}),
|
|
24
|
+
...(flags.limit ? { limit: flags.limit } : {}),
|
|
25
|
+
})
|
|
26
|
+
await disconnect()
|
|
27
|
+
if (flags.json) {
|
|
28
|
+
console.log(JSON.stringify(nodes, null, 2))
|
|
29
|
+
} else {
|
|
30
|
+
for (const n of nodes) {
|
|
31
|
+
const ts = new Date(n.created_at).toLocaleString()
|
|
32
|
+
console.log(`${ts} [${n.kind}] ${n.id}`)
|
|
33
|
+
console.log(` ${n.content.slice(0, 120)}`)
|
|
34
|
+
console.log()
|
|
35
|
+
}
|
|
36
|
+
}
|
|
37
|
+
})
|
|
@@ -0,0 +1,34 @@
|
|
|
1
|
+
import { loadConfig } from '../../config.ts'
|
|
2
|
+
import { connect, disconnect } from '../../db/client.ts'
|
|
3
|
+
import { initLogger } from '../../logger.ts'
|
|
4
|
+
import { wander } from '../../memory/read.ts'
|
|
5
|
+
import { app } from '../shared.ts'
|
|
6
|
+
|
|
7
|
+
export const wanderCmd = app.sub('wander')
|
|
8
|
+
.meta({ description: 'Spreading-activation walk through the memory graph' })
|
|
9
|
+
.flags({
|
|
10
|
+
from: { type: 'string', description: 'Anchor node ID (random if omitted)' },
|
|
11
|
+
hops: { type: 'number', description: 'Traversal depth', default: 3 },
|
|
12
|
+
scope: { type: 'string', short: 's', description: 'Filter by scope' },
|
|
13
|
+
json: { type: 'boolean', short: 'j', description: 'Output full JSON nodes' },
|
|
14
|
+
})
|
|
15
|
+
.run(async ({ flags }) => {
|
|
16
|
+
await initLogger({ level: flags.debug ? 'debug' : 'info' })
|
|
17
|
+
const config = await loadConfig(process.cwd(), flags.config)
|
|
18
|
+
const db = await connect(config.surreal)
|
|
19
|
+
const nodes = await wander(db, {
|
|
20
|
+
...(flags.from ? { anchor: flags.from } : {}),
|
|
21
|
+
...(flags.hops ? { hops: flags.hops } : {}),
|
|
22
|
+
...(flags.scope ? { scope: flags.scope } : {}),
|
|
23
|
+
})
|
|
24
|
+
await disconnect()
|
|
25
|
+
if (flags.json) {
|
|
26
|
+
console.log(JSON.stringify(nodes, null, 2))
|
|
27
|
+
} else {
|
|
28
|
+
for (const n of nodes) {
|
|
29
|
+
console.log(`[${n.kind}] ${n.id} salience=${n.salience.toFixed(2)}`)
|
|
30
|
+
console.log(` ${n.content.slice(0, 120)}`)
|
|
31
|
+
console.log()
|
|
32
|
+
}
|
|
33
|
+
}
|
|
34
|
+
})
|
package/src/cli/index.ts
ADDED
|
@@ -0,0 +1,41 @@
|
|
|
1
|
+
#!/usr/bin/env bun
|
|
2
|
+
|
|
3
|
+
import { helpPlugin, versionPlugin } from '@crustjs/plugins'
|
|
4
|
+
import { initLogger } from '../logger.ts'
|
|
5
|
+
import { believeCmd } from './commands/believe.ts'
|
|
6
|
+
import { consolidateCmd } from './commands/consolidate.ts'
|
|
7
|
+
import { exportCmd, importCmd } from './commands/export-import.ts'
|
|
8
|
+
import { goalCmd } from './commands/goal.ts'
|
|
9
|
+
import { healthCmd } from './commands/health.ts'
|
|
10
|
+
import { initCmd } from './commands/init.ts'
|
|
11
|
+
import { observeCmd } from './commands/observe.ts'
|
|
12
|
+
import { queryCmd } from './commands/query.ts'
|
|
13
|
+
import { recallCmd } from './commands/recall.ts'
|
|
14
|
+
import { serveCmd } from './commands/serve.ts'
|
|
15
|
+
import { syncCmd } from './commands/sync.ts'
|
|
16
|
+
import { timelineCmd } from './commands/timeline.ts'
|
|
17
|
+
import { wanderCmd } from './commands/wander.ts'
|
|
18
|
+
import { app } from './shared.ts'
|
|
19
|
+
|
|
20
|
+
import packageJson from '../../package.json' with { type: 'json' }
|
|
21
|
+
|
|
22
|
+
await initLogger({ level: 'info' })
|
|
23
|
+
|
|
24
|
+
await app
|
|
25
|
+
.use(versionPlugin(packageJson.version ?? '0.0.0'))
|
|
26
|
+
.use(helpPlugin())
|
|
27
|
+
.command(initCmd)
|
|
28
|
+
.command(serveCmd)
|
|
29
|
+
.command(observeCmd)
|
|
30
|
+
.command(believeCmd)
|
|
31
|
+
.command(queryCmd)
|
|
32
|
+
.command(recallCmd)
|
|
33
|
+
.command(wanderCmd)
|
|
34
|
+
.command(timelineCmd)
|
|
35
|
+
.command(goalCmd) // goal is a container: goal set / goal list / goal resolve
|
|
36
|
+
.command(consolidateCmd)
|
|
37
|
+
.command(healthCmd)
|
|
38
|
+
.command(syncCmd)
|
|
39
|
+
.command(exportCmd)
|
|
40
|
+
.command(importCmd)
|
|
41
|
+
.execute()
|
|
@@ -0,0 +1,9 @@
|
|
|
1
|
+
// src/cli/shared.ts — single shared root, holds inheritable flags
|
|
2
|
+
import { Crust } from '@crustjs/core'
|
|
3
|
+
|
|
4
|
+
export const app = new Crust('suemo')
|
|
5
|
+
.meta({ description: 'Persistent semantic memory for AI agents' })
|
|
6
|
+
.flags({
|
|
7
|
+
config: { type: 'string', short: 'c', description: 'Path to config file', inherit: true },
|
|
8
|
+
debug: { type: 'boolean', short: 'd', description: 'Verbose debug logging', inherit: true },
|
|
9
|
+
})
|
|
@@ -0,0 +1,349 @@
|
|
|
1
|
+
import type { Surreal } from 'surrealdb'
|
|
2
|
+
import type { LLMConfig } from '../config.ts'
|
|
3
|
+
import { getLogger } from '../logger.ts'
|
|
4
|
+
import type { ConsolidationRun, MemoryNode } from '../types.ts'
|
|
5
|
+
|
|
6
|
+
const log = getLogger(['suemo', 'cognitive', 'consolidate'])
|
|
7
|
+
|
|
8
|
+
// ── LLM helper — OpenAI-compatible chat completions ────────────────────────
|
|
9
|
+
async function llmComplete(
|
|
10
|
+
llm: LLMConfig,
|
|
11
|
+
systemPrompt: string,
|
|
12
|
+
userContent: string,
|
|
13
|
+
): Promise<string> {
|
|
14
|
+
const res = await fetch(`${llm.url}/chat/completions`, {
|
|
15
|
+
method: 'POST',
|
|
16
|
+
headers: {
|
|
17
|
+
'Content-Type': 'application/json',
|
|
18
|
+
'Authorization': `Bearer ${llm.apiKey}`,
|
|
19
|
+
},
|
|
20
|
+
body: JSON.stringify({
|
|
21
|
+
model: llm.model,
|
|
22
|
+
messages: [
|
|
23
|
+
{ role: 'system', content: systemPrompt },
|
|
24
|
+
{ role: 'user', content: userContent },
|
|
25
|
+
],
|
|
26
|
+
max_tokens: 512,
|
|
27
|
+
temperature: 0.2,
|
|
28
|
+
}),
|
|
29
|
+
})
|
|
30
|
+
if (!res.ok) {
|
|
31
|
+
const text = await res.text()
|
|
32
|
+
throw new Error(`LLM error ${res.status}: ${text}`)
|
|
33
|
+
}
|
|
34
|
+
const json = await res.json() as { choices: { message: { content: string } }[] }
|
|
35
|
+
return json.choices[0]?.message.content ?? ''
|
|
36
|
+
}
|
|
37
|
+
|
|
38
|
+
// ── NREM — compress clusters of similar unconsolidated nodes ───────────────
|
|
39
|
+
async function runNREM(
|
|
40
|
+
db: Surreal,
|
|
41
|
+
llm: LLMConfig,
|
|
42
|
+
similarityThreshold: number,
|
|
43
|
+
): Promise<{ nodesIn: number; nodesOut: number }> {
|
|
44
|
+
log.info('NREM phase starting')
|
|
45
|
+
|
|
46
|
+
// Fetch all unconsolidated nodes
|
|
47
|
+
const result = await db.query<[MemoryNode[]]>(`
|
|
48
|
+
SELECT * FROM memory
|
|
49
|
+
WHERE consolidated = false
|
|
50
|
+
AND kind != 'goal'
|
|
51
|
+
AND (valid_until = NONE OR valid_until > time::now())
|
|
52
|
+
ORDER BY created_at ASC
|
|
53
|
+
LIMIT 500
|
|
54
|
+
`)
|
|
55
|
+
const nodes = result[0] ?? []
|
|
56
|
+
|
|
57
|
+
if (nodes.length === 0) {
|
|
58
|
+
log.info('NREM: no unconsolidated nodes')
|
|
59
|
+
return { nodesIn: 0, nodesOut: 0 }
|
|
60
|
+
}
|
|
61
|
+
|
|
62
|
+
log.info(`NREM: processing ${nodes.length} nodes`)
|
|
63
|
+
|
|
64
|
+
// Simple greedy clustering: group nodes by cosine similarity
|
|
65
|
+
// to the first unassigned node in each cluster.
|
|
66
|
+
// Production would use k-means or DBSCAN; this is correct for v1.
|
|
67
|
+
const clusters: MemoryNode[][] = []
|
|
68
|
+
const assigned = new Set<string>()
|
|
69
|
+
|
|
70
|
+
for (const node of nodes) {
|
|
71
|
+
if (assigned.has(node.id)) continue
|
|
72
|
+
const cluster: MemoryNode[] = [node]
|
|
73
|
+
assigned.add(node.id)
|
|
74
|
+
|
|
75
|
+
// Find similar unassigned nodes using DB-side cosine
|
|
76
|
+
const similarResult = await db.query<[{ id: string; score: number }[]]>(
|
|
77
|
+
`
|
|
78
|
+
SELECT id, vector::similarity::cosine(embedding, $emb) AS score
|
|
79
|
+
FROM memory
|
|
80
|
+
WHERE consolidated = false
|
|
81
|
+
AND id != $self
|
|
82
|
+
AND (valid_until = NONE OR valid_until > time::now())
|
|
83
|
+
ORDER BY embedding <|10, 20|> $emb
|
|
84
|
+
LIMIT 10
|
|
85
|
+
`,
|
|
86
|
+
{ emb: node.embedding, self: node.id },
|
|
87
|
+
)
|
|
88
|
+
|
|
89
|
+
for (const candidate of similarResult[0] ?? []) {
|
|
90
|
+
if (!assigned.has(candidate.id) && candidate.score >= similarityThreshold) {
|
|
91
|
+
const full = nodes.find((n) => n.id === candidate.id)
|
|
92
|
+
if (full) {
|
|
93
|
+
cluster.push(full)
|
|
94
|
+
assigned.add(candidate.id)
|
|
95
|
+
}
|
|
96
|
+
}
|
|
97
|
+
}
|
|
98
|
+
clusters.push(cluster)
|
|
99
|
+
}
|
|
100
|
+
|
|
101
|
+
let nodesOut = 0
|
|
102
|
+
|
|
103
|
+
for (const cluster of clusters) {
|
|
104
|
+
if (cluster.length === 1) {
|
|
105
|
+
// Singleton — mark consolidated without compression
|
|
106
|
+
await db.query('UPDATE $id SET consolidated = true', { id: cluster[0]!.id })
|
|
107
|
+
nodesOut++
|
|
108
|
+
continue
|
|
109
|
+
}
|
|
110
|
+
|
|
111
|
+
// Multi-node cluster: LLM compression
|
|
112
|
+
const combinedContent = cluster.map((n, i) => `${i + 1}. ${n.content}`).join('\n')
|
|
113
|
+
let summary: string
|
|
114
|
+
try {
|
|
115
|
+
summary = await llmComplete(
|
|
116
|
+
llm,
|
|
117
|
+
'You are a memory compression assistant. Given a list of related observations, '
|
|
118
|
+
+ 'produce a single concise summary that retains all unique facts. '
|
|
119
|
+
+ 'Reply with only the summary — no preamble.',
|
|
120
|
+
combinedContent,
|
|
121
|
+
)
|
|
122
|
+
} catch (e) {
|
|
123
|
+
log.error('LLM compression failed — marking cluster nodes individually', { error: String(e) })
|
|
124
|
+
for (const n of cluster) {
|
|
125
|
+
await db.query('UPDATE $id SET consolidated = true', { id: n.id })
|
|
126
|
+
}
|
|
127
|
+
nodesOut += cluster.length
|
|
128
|
+
continue
|
|
129
|
+
}
|
|
130
|
+
|
|
131
|
+
// Create the compressed summary node
|
|
132
|
+
const created = await db.query<[MemoryNode[]]>(
|
|
133
|
+
`
|
|
134
|
+
CREATE memory CONTENT {
|
|
135
|
+
kind: 'observation',
|
|
136
|
+
content: $summary,
|
|
137
|
+
summary: $summary,
|
|
138
|
+
tags: $tags,
|
|
139
|
+
scope: $scope,
|
|
140
|
+
embedding: fn::embed($summary),
|
|
141
|
+
confidence: 1.0,
|
|
142
|
+
salience: 0.7,
|
|
143
|
+
source: 'consolidation:nrem',
|
|
144
|
+
consolidated: true,
|
|
145
|
+
consolidated_into: NONE,
|
|
146
|
+
fsrs_stability: NONE,
|
|
147
|
+
fsrs_difficulty: NONE,
|
|
148
|
+
fsrs_next_review: NONE
|
|
149
|
+
}
|
|
150
|
+
`,
|
|
151
|
+
{
|
|
152
|
+
summary,
|
|
153
|
+
tags: [...new Set(cluster.flatMap((n) => n.tags))],
|
|
154
|
+
scope: cluster[0]!.scope ?? null,
|
|
155
|
+
},
|
|
156
|
+
)
|
|
157
|
+
const summaryNode = created[0]?.[0]
|
|
158
|
+
if (!summaryNode) continue
|
|
159
|
+
|
|
160
|
+
// Mark originals as consolidated, point to summary
|
|
161
|
+
for (const n of cluster) {
|
|
162
|
+
await db.query(
|
|
163
|
+
`
|
|
164
|
+
UPDATE $id SET
|
|
165
|
+
consolidated = true,
|
|
166
|
+
consolidated_into = $into,
|
|
167
|
+
updated_at = time::now()
|
|
168
|
+
`,
|
|
169
|
+
{ id: n.id, into: summaryNode.id },
|
|
170
|
+
)
|
|
171
|
+
|
|
172
|
+
await db.query(
|
|
173
|
+
`
|
|
174
|
+
RELATE $from->relates_to->$to CONTENT {
|
|
175
|
+
kind: 'derived_from',
|
|
176
|
+
strength: 1.0
|
|
177
|
+
}
|
|
178
|
+
`,
|
|
179
|
+
{ from: summaryNode.id, to: n.id },
|
|
180
|
+
)
|
|
181
|
+
}
|
|
182
|
+
nodesOut++
|
|
183
|
+
log.debug('NREM cluster compressed', { clusterSize: cluster.length, summaryId: summaryNode.id })
|
|
184
|
+
}
|
|
185
|
+
|
|
186
|
+
return { nodesIn: nodes.length, nodesOut }
|
|
187
|
+
}
|
|
188
|
+
|
|
189
|
+
// ── REM — connect newly consolidated nodes to broader graph ───────────────
|
|
190
|
+
async function runREM(
|
|
191
|
+
db: Surreal,
|
|
192
|
+
llm: LLMConfig,
|
|
193
|
+
relationThreshold: number,
|
|
194
|
+
): Promise<void> {
|
|
195
|
+
log.info('REM phase starting')
|
|
196
|
+
|
|
197
|
+
// Get recently consolidated summary nodes (no incoming relations yet)
|
|
198
|
+
const result = await db.query<[MemoryNode[]]>(`
|
|
199
|
+
SELECT * FROM memory
|
|
200
|
+
WHERE consolidated = true
|
|
201
|
+
AND source = 'consolidation:nrem'
|
|
202
|
+
AND (valid_until = NONE OR valid_until > time::now())
|
|
203
|
+
ORDER BY created_at DESC
|
|
204
|
+
LIMIT 50
|
|
205
|
+
`)
|
|
206
|
+
const newNodes = result[0] ?? []
|
|
207
|
+
if (newNodes.length === 0) {
|
|
208
|
+
log.info('REM: no new nodes to integrate')
|
|
209
|
+
return
|
|
210
|
+
}
|
|
211
|
+
|
|
212
|
+
for (const node of newNodes) {
|
|
213
|
+
// Find top candidates from the full graph
|
|
214
|
+
const candidates = await db.query<[MemoryNode[]]>(
|
|
215
|
+
`
|
|
216
|
+
SELECT * FROM memory
|
|
217
|
+
WHERE id != $self
|
|
218
|
+
AND consolidated = true
|
|
219
|
+
AND (valid_until = NONE OR valid_until > time::now())
|
|
220
|
+
ORDER BY embedding <|10, 40|> $emb
|
|
221
|
+
LIMIT 10
|
|
222
|
+
`,
|
|
223
|
+
{ self: node.id, emb: node.embedding },
|
|
224
|
+
)
|
|
225
|
+
|
|
226
|
+
const topCandidates = candidates[0] ?? []
|
|
227
|
+
if (topCandidates.length === 0) continue
|
|
228
|
+
|
|
229
|
+
const candidateList = topCandidates
|
|
230
|
+
.map((c, i) => `${i + 1}. [${c.id}] ${c.content}`)
|
|
231
|
+
.join('\n')
|
|
232
|
+
|
|
233
|
+
let rawScores: string
|
|
234
|
+
try {
|
|
235
|
+
rawScores = await llmComplete(
|
|
236
|
+
llm,
|
|
237
|
+
'You are a knowledge graph assistant. For each candidate memory, score how strongly '
|
|
238
|
+
+ 'it relates to the source memory. Valid relation types: supports, contradicts, '
|
|
239
|
+
+ 'derived_from, caused_by, similar_to. '
|
|
240
|
+
+ 'Reply ONLY with JSON array: [{"id":"...","kind":"...","score":0.0}, ...]. '
|
|
241
|
+
+ 'Omit entries with score < 0.3.',
|
|
242
|
+
`Source: ${node.content}\n\nCandidates:\n${candidateList}`,
|
|
243
|
+
)
|
|
244
|
+
} catch (e) {
|
|
245
|
+
log.error('LLM relation scoring failed', { nodeId: node.id, error: String(e) })
|
|
246
|
+
continue
|
|
247
|
+
}
|
|
248
|
+
|
|
249
|
+
let relations: { id: string; kind: string; score: number }[]
|
|
250
|
+
try {
|
|
251
|
+
const cleaned = rawScores.replace(/```json|```/g, '').trim()
|
|
252
|
+
relations = JSON.parse(cleaned)
|
|
253
|
+
} catch {
|
|
254
|
+
log.error('Failed to parse LLM relation JSON', { raw: rawScores.slice(0, 200) })
|
|
255
|
+
continue
|
|
256
|
+
}
|
|
257
|
+
|
|
258
|
+
for (const rel of relations) {
|
|
259
|
+
if (rel.score < relationThreshold) continue
|
|
260
|
+
try {
|
|
261
|
+
await db.query(
|
|
262
|
+
`
|
|
263
|
+
RELATE $from->relates_to->$to CONTENT {
|
|
264
|
+
kind: $kind,
|
|
265
|
+
strength: $strength
|
|
266
|
+
}
|
|
267
|
+
`,
|
|
268
|
+
{ from: node.id, to: rel.id, kind: rel.kind, strength: rel.score },
|
|
269
|
+
)
|
|
270
|
+
} catch (e) {
|
|
271
|
+
log.error('Failed to create relation', { from: node.id, to: rel.id, error: String(e) })
|
|
272
|
+
}
|
|
273
|
+
}
|
|
274
|
+
|
|
275
|
+
// Bump salience on newly connected node
|
|
276
|
+
await db.query(
|
|
277
|
+
'UPDATE $id SET salience = math::min(salience + 0.1, 1.0)',
|
|
278
|
+
{ id: node.id },
|
|
279
|
+
)
|
|
280
|
+
}
|
|
281
|
+
}
|
|
282
|
+
|
|
283
|
+
// ── Public entry point ─────────────────────────────────────────────────────
|
|
284
|
+
export async function consolidate(
|
|
285
|
+
db: Surreal,
|
|
286
|
+
opts: {
|
|
287
|
+
nremOnly?: boolean
|
|
288
|
+
nremSimilarityThreshold?: number
|
|
289
|
+
remRelationThreshold?: number
|
|
290
|
+
llm: LLMConfig
|
|
291
|
+
},
|
|
292
|
+
): Promise<ConsolidationRun> {
|
|
293
|
+
const phase = opts.nremOnly ? 'nrem' : 'full'
|
|
294
|
+
log.info('consolidate()', { phase })
|
|
295
|
+
|
|
296
|
+
// Create run log entry
|
|
297
|
+
const runResult = await db.query<[ConsolidationRun[]]>(
|
|
298
|
+
`
|
|
299
|
+
CREATE consolidation_run CONTENT {
|
|
300
|
+
phase: $phase,
|
|
301
|
+
nodes_in: 0,
|
|
302
|
+
nodes_out: 0,
|
|
303
|
+
status: 'running',
|
|
304
|
+
error: NONE
|
|
305
|
+
}
|
|
306
|
+
`,
|
|
307
|
+
{ phase },
|
|
308
|
+
)
|
|
309
|
+
const run = runResult[0]![0]!
|
|
310
|
+
|
|
311
|
+
try {
|
|
312
|
+
const { nodesIn, nodesOut } = await runNREM(
|
|
313
|
+
db,
|
|
314
|
+
opts.llm,
|
|
315
|
+
opts.nremSimilarityThreshold ?? 0.85,
|
|
316
|
+
)
|
|
317
|
+
|
|
318
|
+
if (!opts.nremOnly) {
|
|
319
|
+
await runREM(db, opts.llm, opts.remRelationThreshold ?? 0.4)
|
|
320
|
+
}
|
|
321
|
+
|
|
322
|
+
await db.query(
|
|
323
|
+
`
|
|
324
|
+
UPDATE $id SET
|
|
325
|
+
completed_at = time::now(),
|
|
326
|
+
status = 'done',
|
|
327
|
+
nodes_in = $nodesIn,
|
|
328
|
+
nodes_out = $nodesOut
|
|
329
|
+
`,
|
|
330
|
+
{ id: run.id, nodesIn, nodesOut },
|
|
331
|
+
)
|
|
332
|
+
|
|
333
|
+
log.info('consolidate() complete', { nodesIn, nodesOut, phase })
|
|
334
|
+
return { ...run, status: 'done', nodes_in: nodesIn, nodes_out: nodesOut }
|
|
335
|
+
} catch (e) {
|
|
336
|
+
const errMsg = String(e)
|
|
337
|
+
log.error('consolidate() failed', { error: errMsg })
|
|
338
|
+
await db.query(
|
|
339
|
+
`
|
|
340
|
+
UPDATE $id SET
|
|
341
|
+
completed_at = time::now(),
|
|
342
|
+
status = 'failed',
|
|
343
|
+
error = $err
|
|
344
|
+
`,
|
|
345
|
+
{ id: run.id, err: errMsg },
|
|
346
|
+
)
|
|
347
|
+
throw e
|
|
348
|
+
}
|
|
349
|
+
}
|
|
@@ -0,0 +1,50 @@
|
|
|
1
|
+
import type { Surreal } from 'surrealdb'
|
|
2
|
+
import { getLogger } from '../logger.ts'
|
|
3
|
+
import type { MemoryNode } from '../types.ts'
|
|
4
|
+
|
|
5
|
+
const log = getLogger(['suemo', 'cognitive', 'contradiction'])
|
|
6
|
+
|
|
7
|
+
/**
|
|
8
|
+
* Given a newly created belief node, find the most semantically similar
|
|
9
|
+
* existing active belief. If similarity > threshold AND an LLM call (or
|
|
10
|
+
* heuristic) determines they conflict, return the old node. Otherwise null.
|
|
11
|
+
*
|
|
12
|
+
* In v1, we use cosine similarity threshold only (no LLM call).
|
|
13
|
+
* The LLM contradiction scoring is a v2 enhancement.
|
|
14
|
+
*/
|
|
15
|
+
export async function detectContradiction(
|
|
16
|
+
db: Surreal,
|
|
17
|
+
newNode: MemoryNode,
|
|
18
|
+
similarityThreshold = 0.80,
|
|
19
|
+
): Promise<MemoryNode | null> {
|
|
20
|
+
if (newNode.kind !== 'belief') return null
|
|
21
|
+
|
|
22
|
+
log.debug('detectContradiction()', { nodeId: newNode.id })
|
|
23
|
+
|
|
24
|
+
const candidates = await db.query<[{ id: string; score: number }[]]>(
|
|
25
|
+
`
|
|
26
|
+
SELECT id, vector::similarity::cosine(embedding, $emb) AS score
|
|
27
|
+
FROM memory
|
|
28
|
+
WHERE kind = 'belief'
|
|
29
|
+
AND id != $self
|
|
30
|
+
AND (valid_until = NONE OR valid_until > time::now())
|
|
31
|
+
ORDER BY embedding <|3, 20|> $emb
|
|
32
|
+
LIMIT 3
|
|
33
|
+
`,
|
|
34
|
+
{ emb: newNode.embedding, self: newNode.id },
|
|
35
|
+
)
|
|
36
|
+
|
|
37
|
+
const top = candidates[0]
|
|
38
|
+
if (!top || top.length === 0) return null
|
|
39
|
+
|
|
40
|
+
const best = top[0]!
|
|
41
|
+
if (best.score < similarityThreshold) return null
|
|
42
|
+
|
|
43
|
+
// Fetch full node
|
|
44
|
+
const result = await db.query<[MemoryNode[]]>('SELECT * FROM $id', { id: best.id })
|
|
45
|
+
const existing = result[0]?.[0]
|
|
46
|
+
if (!existing) return null
|
|
47
|
+
|
|
48
|
+
log.info('Potential contradiction found', { old: existing.id, score: best.score })
|
|
49
|
+
return existing
|
|
50
|
+
}
|