@simbimbo/memory-ocmemog 0.1.4
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +59 -0
- package/LICENSE +21 -0
- package/README.md +223 -0
- package/brain/__init__.py +1 -0
- package/brain/runtime/__init__.py +13 -0
- package/brain/runtime/config.py +21 -0
- package/brain/runtime/inference.py +83 -0
- package/brain/runtime/instrumentation.py +17 -0
- package/brain/runtime/memory/__init__.py +13 -0
- package/brain/runtime/memory/api.py +152 -0
- package/brain/runtime/memory/artifacts.py +33 -0
- package/brain/runtime/memory/candidate.py +89 -0
- package/brain/runtime/memory/context_builder.py +87 -0
- package/brain/runtime/memory/conversation_state.py +1825 -0
- package/brain/runtime/memory/distill.py +198 -0
- package/brain/runtime/memory/embedding_engine.py +94 -0
- package/brain/runtime/memory/freshness.py +91 -0
- package/brain/runtime/memory/health.py +42 -0
- package/brain/runtime/memory/integrity.py +170 -0
- package/brain/runtime/memory/interaction_memory.py +57 -0
- package/brain/runtime/memory/memory_consolidation.py +60 -0
- package/brain/runtime/memory/memory_gate.py +38 -0
- package/brain/runtime/memory/memory_graph.py +54 -0
- package/brain/runtime/memory/memory_links.py +109 -0
- package/brain/runtime/memory/memory_salience.py +235 -0
- package/brain/runtime/memory/memory_synthesis.py +33 -0
- package/brain/runtime/memory/memory_taxonomy.py +35 -0
- package/brain/runtime/memory/person_identity.py +83 -0
- package/brain/runtime/memory/person_memory.py +138 -0
- package/brain/runtime/memory/pondering_engine.py +577 -0
- package/brain/runtime/memory/promote.py +237 -0
- package/brain/runtime/memory/provenance.py +356 -0
- package/brain/runtime/memory/reinforcement.py +73 -0
- package/brain/runtime/memory/retrieval.py +153 -0
- package/brain/runtime/memory/semantic_search.py +66 -0
- package/brain/runtime/memory/sentiment_memory.py +67 -0
- package/brain/runtime/memory/store.py +400 -0
- package/brain/runtime/memory/tool_catalog.py +68 -0
- package/brain/runtime/memory/unresolved_state.py +93 -0
- package/brain/runtime/memory/vector_index.py +270 -0
- package/brain/runtime/model_roles.py +11 -0
- package/brain/runtime/model_router.py +22 -0
- package/brain/runtime/providers.py +59 -0
- package/brain/runtime/security/__init__.py +3 -0
- package/brain/runtime/security/redaction.py +14 -0
- package/brain/runtime/state_store.py +25 -0
- package/brain/runtime/storage_paths.py +41 -0
- package/docs/architecture/memory.md +118 -0
- package/docs/release-checklist.md +34 -0
- package/docs/reports/ocmemog-code-audit-2026-03-14.md +155 -0
- package/docs/usage.md +223 -0
- package/index.ts +726 -0
- package/ocmemog/__init__.py +1 -0
- package/ocmemog/sidecar/__init__.py +1 -0
- package/ocmemog/sidecar/app.py +1068 -0
- package/ocmemog/sidecar/compat.py +74 -0
- package/ocmemog/sidecar/transcript_watcher.py +425 -0
- package/openclaw.plugin.json +18 -0
- package/package.json +60 -0
- package/scripts/install-ocmemog.sh +277 -0
- package/scripts/launchagents/com.openclaw.ocmemog.guard.plist +22 -0
- package/scripts/launchagents/com.openclaw.ocmemog.ponder.plist +22 -0
- package/scripts/launchagents/com.openclaw.ocmemog.sidecar.plist +27 -0
- package/scripts/ocmemog-context.sh +15 -0
- package/scripts/ocmemog-continuity-benchmark.py +178 -0
- package/scripts/ocmemog-demo.py +122 -0
- package/scripts/ocmemog-failover-test.sh +17 -0
- package/scripts/ocmemog-guard.sh +11 -0
- package/scripts/ocmemog-install.sh +93 -0
- package/scripts/ocmemog-load-test.py +106 -0
- package/scripts/ocmemog-ponder.sh +30 -0
- package/scripts/ocmemog-recall-test.py +58 -0
- package/scripts/ocmemog-reindex-vectors.py +14 -0
- package/scripts/ocmemog-reliability-soak.py +177 -0
- package/scripts/ocmemog-sidecar.sh +46 -0
- package/scripts/ocmemog-soak-report.py +58 -0
- package/scripts/ocmemog-soak-test.py +44 -0
- package/scripts/ocmemog-test-rig.py +345 -0
- package/scripts/ocmemog-transcript-append.py +45 -0
- package/scripts/ocmemog-transcript-watcher.py +8 -0
- package/scripts/ocmemog-transcript-watcher.sh +7 -0
|
@@ -0,0 +1,60 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from typing import Dict, List, Tuple
|
|
4
|
+
|
|
5
|
+
from brain.runtime import state_store
|
|
6
|
+
from brain.runtime.instrumentation import emit_event
|
|
7
|
+
from brain.runtime.memory import memory_taxonomy
|
|
8
|
+
|
|
9
|
+
LOGFILE = state_store.reports_dir() / "brain_memory.log.jsonl"
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
def _cluster_key(record: Dict[str, object]) -> Tuple[str, str]:
|
|
13
|
+
mem_type = memory_taxonomy.classify_memory_type(record)
|
|
14
|
+
content = str(record.get("content") or "")
|
|
15
|
+
anchor = content[:48].lower()
|
|
16
|
+
return mem_type, anchor
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
def consolidate_memories(records: List[Dict[str, object]], max_clusters: int = 5) -> Dict[str, object]:
|
|
20
|
+
emit_event(LOGFILE, "brain_memory_consolidation_start", status="ok")
|
|
21
|
+
clusters: Dict[Tuple[str, str], List[Dict[str, object]]] = {}
|
|
22
|
+
for record in records:
|
|
23
|
+
content = str(record.get("content") or "").strip()
|
|
24
|
+
if not content:
|
|
25
|
+
continue
|
|
26
|
+
key = _cluster_key(record)
|
|
27
|
+
clusters.setdefault(key, []).append(record)
|
|
28
|
+
if len(clusters) >= max_clusters:
|
|
29
|
+
break
|
|
30
|
+
consolidated: List[Dict[str, object]] = []
|
|
31
|
+
reinforcement_updates: List[Dict[str, object]] = []
|
|
32
|
+
for key, items in clusters.items():
|
|
33
|
+
mem_type, anchor = key
|
|
34
|
+
summary = f"{mem_type} cluster: {anchor}"
|
|
35
|
+
references = [str(item.get("reference") or "") for item in items if str(item.get("reference") or "")]
|
|
36
|
+
consolidated.append(
|
|
37
|
+
{
|
|
38
|
+
"memory_type": mem_type,
|
|
39
|
+
"summary": summary,
|
|
40
|
+
"count": len(items),
|
|
41
|
+
"references": references,
|
|
42
|
+
"candidate_kinds": sorted({str(item.get("candidate_kind") or "memory") for item in items}),
|
|
43
|
+
}
|
|
44
|
+
)
|
|
45
|
+
reinforcement_updates.append(
|
|
46
|
+
{
|
|
47
|
+
"memory_type": mem_type,
|
|
48
|
+
"weight": min(1.0, len(items) / 5.0),
|
|
49
|
+
"references": references,
|
|
50
|
+
}
|
|
51
|
+
)
|
|
52
|
+
emit_event(
|
|
53
|
+
LOGFILE,
|
|
54
|
+
"brain_memory_consolidation_cluster",
|
|
55
|
+
status="ok",
|
|
56
|
+
memory_type=mem_type,
|
|
57
|
+
count=len(items),
|
|
58
|
+
)
|
|
59
|
+
emit_event(LOGFILE, "brain_memory_consolidation_complete", status="ok", cluster_count=len(consolidated))
|
|
60
|
+
return {"consolidated": consolidated, "reinforcement": reinforcement_updates}
|
|
@@ -0,0 +1,38 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import os
|
|
4
|
+
from typing import Dict
|
|
5
|
+
|
|
6
|
+
from brain.runtime import state_store
|
|
7
|
+
from brain.runtime.instrumentation import emit_event
|
|
8
|
+
|
|
9
|
+
LOGFILE = state_store.reports_dir() / "brain_memory.log.jsonl"
|
|
10
|
+
|
|
11
|
+
DIRECT_THRESHOLD = float(os.environ.get("BRAIN_MEMORY_GATE_DIRECT", 1.5))
|
|
12
|
+
ASSIST_THRESHOLD = float(os.environ.get("BRAIN_MEMORY_GATE_ASSIST", 0.8))
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
def decide_gate(result: Dict[str, float]) -> Dict[str, float | str]:
|
|
16
|
+
similarity = float(result.get("similarity", result.get("score", 0.0)))
|
|
17
|
+
reinforcement = float(result.get("reinforcement_weight", 0.0))
|
|
18
|
+
freshness = float(result.get("freshness", 0.0))
|
|
19
|
+
promotion = float(result.get("promotion_confidence", 0.0))
|
|
20
|
+
score = similarity + reinforcement + freshness + promotion
|
|
21
|
+
if score >= DIRECT_THRESHOLD:
|
|
22
|
+
decision = "memory_direct"
|
|
23
|
+
elif score >= ASSIST_THRESHOLD:
|
|
24
|
+
decision = "memory_assisted"
|
|
25
|
+
else:
|
|
26
|
+
decision = "model_escalation"
|
|
27
|
+
payload = {
|
|
28
|
+
"decision": decision,
|
|
29
|
+
"score": round(score, 3),
|
|
30
|
+
"similarity": similarity,
|
|
31
|
+
"reinforcement_weight": reinforcement,
|
|
32
|
+
"freshness": freshness,
|
|
33
|
+
"promotion_confidence": promotion,
|
|
34
|
+
"salience_score": float(result.get("salience_score", 0.0)),
|
|
35
|
+
}
|
|
36
|
+
emit_event(LOGFILE, "brain_memory_gate_decision", status="ok", decision=decision, score=round(score, 3))
|
|
37
|
+
emit_event(LOGFILE, "brain_memory_gate_score", status="ok", score=round(score, 3))
|
|
38
|
+
return payload
|
|
@@ -0,0 +1,54 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import sqlite3
|
|
4
|
+
from typing import Dict, List
|
|
5
|
+
|
|
6
|
+
from brain.runtime import state_store
|
|
7
|
+
from brain.runtime.instrumentation import emit_event
|
|
8
|
+
|
|
9
|
+
LOGFILE = state_store.reports_dir() / "brain_memory.log.jsonl"
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
def _connect() -> sqlite3.Connection:
|
|
13
|
+
path = state_store.data_dir() / "memory_graph.db"
|
|
14
|
+
path.parent.mkdir(parents=True, exist_ok=True)
|
|
15
|
+
conn = sqlite3.connect(str(path))
|
|
16
|
+
conn.row_factory = sqlite3.Row
|
|
17
|
+
conn.execute(
|
|
18
|
+
"""
|
|
19
|
+
CREATE TABLE IF NOT EXISTS memory_graph (
|
|
20
|
+
source_reference TEXT NOT NULL,
|
|
21
|
+
edge_type TEXT NOT NULL,
|
|
22
|
+
target_reference TEXT NOT NULL,
|
|
23
|
+
created_at TEXT NOT NULL DEFAULT (datetime('now')),
|
|
24
|
+
UNIQUE(source_reference, edge_type, target_reference)
|
|
25
|
+
)
|
|
26
|
+
"""
|
|
27
|
+
)
|
|
28
|
+
return conn
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
def add_memory_edge(source_reference: str, edge_type: str, target_reference: str) -> None:
|
|
32
|
+
conn = _connect()
|
|
33
|
+
conn.execute(
|
|
34
|
+
"INSERT OR IGNORE INTO memory_graph (source_reference, edge_type, target_reference) VALUES (?, ?, ?)",
|
|
35
|
+
(source_reference, edge_type, target_reference),
|
|
36
|
+
)
|
|
37
|
+
conn.commit()
|
|
38
|
+
conn.close()
|
|
39
|
+
emit_event(LOGFILE, "brain_memory_graph_edge_created", status="ok", edge_type=edge_type)
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
def get_neighbors(source_reference: str) -> List[Dict[str, str]]:
|
|
43
|
+
conn = _connect()
|
|
44
|
+
rows = conn.execute(
|
|
45
|
+
"SELECT edge_type, target_reference FROM memory_graph WHERE source_reference=?",
|
|
46
|
+
(source_reference,),
|
|
47
|
+
).fetchall()
|
|
48
|
+
conn.close()
|
|
49
|
+
return [{"edge_type": row[0], "target_reference": row[1]} for row in rows]
|
|
50
|
+
|
|
51
|
+
|
|
52
|
+
def get_cluster(source_reference: str, limit: int = 5) -> List[str]:
|
|
53
|
+
neighbors = get_neighbors(source_reference)
|
|
54
|
+
return [item["target_reference"] for item in neighbors[:limit]]
|
|
@@ -0,0 +1,109 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import sqlite3
|
|
4
|
+
from typing import Dict, List
|
|
5
|
+
|
|
6
|
+
from brain.runtime import state_store
|
|
7
|
+
from brain.runtime.instrumentation import emit_event
|
|
8
|
+
from brain.runtime.memory import store
|
|
9
|
+
|
|
10
|
+
LOGFILE = state_store.reports_dir() / "brain_memory.log.jsonl"
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
def _dedupe_memory_links(conn) -> None:
|
|
14
|
+
conn.execute(
|
|
15
|
+
"""
|
|
16
|
+
DELETE FROM memory_links
|
|
17
|
+
WHERE rowid NOT IN (
|
|
18
|
+
SELECT MIN(rowid)
|
|
19
|
+
FROM memory_links
|
|
20
|
+
GROUP BY source_reference, link_type, target_reference
|
|
21
|
+
)
|
|
22
|
+
"""
|
|
23
|
+
)
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
def _ensure_table(conn) -> None:
|
|
27
|
+
conn.execute(
|
|
28
|
+
"""
|
|
29
|
+
CREATE TABLE IF NOT EXISTS memory_links (
|
|
30
|
+
source_reference TEXT NOT NULL,
|
|
31
|
+
link_type TEXT NOT NULL,
|
|
32
|
+
target_reference TEXT NOT NULL,
|
|
33
|
+
created_at TEXT NOT NULL DEFAULT (datetime('now')),
|
|
34
|
+
UNIQUE(source_reference, link_type, target_reference)
|
|
35
|
+
)
|
|
36
|
+
"""
|
|
37
|
+
)
|
|
38
|
+
try:
|
|
39
|
+
conn.execute(
|
|
40
|
+
"CREATE UNIQUE INDEX IF NOT EXISTS idx_memory_links_unique ON memory_links(source_reference, link_type, target_reference)"
|
|
41
|
+
)
|
|
42
|
+
except sqlite3.IntegrityError:
|
|
43
|
+
_dedupe_memory_links(conn)
|
|
44
|
+
conn.execute("DROP INDEX IF EXISTS idx_memory_links_unique")
|
|
45
|
+
conn.execute(
|
|
46
|
+
"CREATE UNIQUE INDEX IF NOT EXISTS idx_memory_links_unique ON memory_links(source_reference, link_type, target_reference)"
|
|
47
|
+
)
|
|
48
|
+
conn.commit()
|
|
49
|
+
|
|
50
|
+
|
|
51
|
+
def add_memory_link(source_reference: str, link_type: str, target_reference: str) -> None:
|
|
52
|
+
conn = store.connect()
|
|
53
|
+
_ensure_table(conn)
|
|
54
|
+
conn.execute(
|
|
55
|
+
"INSERT OR IGNORE INTO memory_links (source_reference, link_type, target_reference) VALUES (?, ?, ?)",
|
|
56
|
+
(source_reference, link_type, target_reference),
|
|
57
|
+
)
|
|
58
|
+
conn.commit()
|
|
59
|
+
conn.close()
|
|
60
|
+
emit_event(LOGFILE, "brain_memory_link_created", status="ok", link_type=link_type)
|
|
61
|
+
|
|
62
|
+
|
|
63
|
+
def get_memory_links(source_reference: str) -> List[Dict[str, str]]:
|
|
64
|
+
conn = store.connect()
|
|
65
|
+
_ensure_table(conn)
|
|
66
|
+
rows = conn.execute(
|
|
67
|
+
"SELECT link_type, target_reference FROM memory_links WHERE source_reference=?",
|
|
68
|
+
(source_reference,),
|
|
69
|
+
).fetchall()
|
|
70
|
+
conn.close()
|
|
71
|
+
return [{"link_type": row[0], "target_reference": row[1]} for row in rows]
|
|
72
|
+
|
|
73
|
+
|
|
74
|
+
def get_memory_links_for_target(target_reference: str) -> List[Dict[str, str]]:
|
|
75
|
+
conn = store.connect()
|
|
76
|
+
_ensure_table(conn)
|
|
77
|
+
rows = conn.execute(
|
|
78
|
+
"SELECT source_reference, link_type, target_reference FROM memory_links WHERE target_reference=? ORDER BY created_at DESC",
|
|
79
|
+
(target_reference,),
|
|
80
|
+
).fetchall()
|
|
81
|
+
conn.close()
|
|
82
|
+
return [
|
|
83
|
+
{
|
|
84
|
+
"source_reference": row[0],
|
|
85
|
+
"link_type": row[1],
|
|
86
|
+
"target_reference": row[2],
|
|
87
|
+
}
|
|
88
|
+
for row in rows
|
|
89
|
+
]
|
|
90
|
+
|
|
91
|
+
|
|
92
|
+
def get_memory_links_for_thread(thread_id: str) -> List[Dict[str, str]]:
|
|
93
|
+
return get_memory_links_for_target(f"thread:{thread_id}")
|
|
94
|
+
|
|
95
|
+
|
|
96
|
+
def get_memory_links_for_session(session_id: str) -> List[Dict[str, str]]:
|
|
97
|
+
return get_memory_links_for_target(f"session:{session_id}")
|
|
98
|
+
|
|
99
|
+
|
|
100
|
+
def get_memory_links_for_conversation(conversation_id: str) -> List[Dict[str, str]]:
|
|
101
|
+
return get_memory_links_for_target(f"conversation:{conversation_id}")
|
|
102
|
+
|
|
103
|
+
|
|
104
|
+
def count_memory_links() -> int:
|
|
105
|
+
conn = store.connect()
|
|
106
|
+
_ensure_table(conn)
|
|
107
|
+
row = conn.execute("SELECT COUNT(*) FROM memory_links").fetchone()
|
|
108
|
+
conn.close()
|
|
109
|
+
return int(row[0]) if row else 0
|
|
@@ -0,0 +1,235 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from typing import Any, Dict, List, Mapping, Sequence
|
|
4
|
+
|
|
5
|
+
from brain.runtime import state_store
|
|
6
|
+
from brain.runtime.instrumentation import emit_event
|
|
7
|
+
from brain.runtime.memory import freshness
|
|
8
|
+
|
|
9
|
+
LOGFILE = state_store.reports_dir() / "brain_memory.log.jsonl"
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
def score_salience(record: Mapping[str, float]) -> Dict[str, float | bool]:
|
|
13
|
+
importance = float(record.get("importance", 0.2))
|
|
14
|
+
novelty = float(record.get("novelty", 0.1))
|
|
15
|
+
uncertainty = float(record.get("uncertainty", 0.1))
|
|
16
|
+
risk = float(record.get("risk", 0.0))
|
|
17
|
+
goal_alignment = float(record.get("goal_alignment", 0.1))
|
|
18
|
+
reinforcement = float(record.get("reinforcement", 0.0))
|
|
19
|
+
user_interest = float(record.get("user_interest", 0.0))
|
|
20
|
+
recency = float(record.get("freshness", 0.0))
|
|
21
|
+
signal_priority = float(record.get("signal_priority", 0.0))
|
|
22
|
+
salience_score = max(
|
|
23
|
+
0.0,
|
|
24
|
+
min(3.0, importance + novelty + uncertainty + risk + goal_alignment + reinforcement + user_interest + recency + signal_priority),
|
|
25
|
+
)
|
|
26
|
+
activation_strength = min(1.0, salience_score / 3.0)
|
|
27
|
+
attention_trigger = salience_score >= 1.5
|
|
28
|
+
emit_event(LOGFILE, "brain_memory_salience_scored", status="ok", score=salience_score)
|
|
29
|
+
emit_event(LOGFILE, "brain_memory_salience_updated", status="ok", score=salience_score)
|
|
30
|
+
return {
|
|
31
|
+
"salience_score": round(salience_score, 3),
|
|
32
|
+
"activation_strength": round(activation_strength, 3),
|
|
33
|
+
"attention_trigger": attention_trigger,
|
|
34
|
+
}
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
def _as_float(value: Any, default: float = 0.0) -> float:
|
|
38
|
+
try:
|
|
39
|
+
return float(value)
|
|
40
|
+
except Exception:
|
|
41
|
+
return default
|
|
42
|
+
|
|
43
|
+
|
|
44
|
+
def _content_text(record: Mapping[str, Any]) -> str:
|
|
45
|
+
return str(record.get("effective_content") or record.get("content") or "").strip()
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
def _normalized_position(record_id: int, *, latest_id: int, earliest_id: int) -> float:
|
|
49
|
+
if latest_id <= earliest_id:
|
|
50
|
+
return 1.0
|
|
51
|
+
return max(0.0, min(1.0, (record_id - earliest_id) / float(latest_id - earliest_id)))
|
|
52
|
+
|
|
53
|
+
|
|
54
|
+
def score_turn_salience(
|
|
55
|
+
turn: Mapping[str, Any],
|
|
56
|
+
*,
|
|
57
|
+
latest_turn_id: int | None = None,
|
|
58
|
+
earliest_turn_id: int | None = None,
|
|
59
|
+
active_branch_id: str | None = None,
|
|
60
|
+
reply_chain_turn_ids: Sequence[int] | None = None,
|
|
61
|
+
) -> Dict[str, Any]:
|
|
62
|
+
metadata = turn.get("metadata") if isinstance(turn.get("metadata"), dict) else {}
|
|
63
|
+
resolution = metadata.get("resolution") if isinstance(metadata.get("resolution"), dict) else {}
|
|
64
|
+
role = str(turn.get("role") or "")
|
|
65
|
+
content = _content_text(turn)
|
|
66
|
+
turn_id = int(turn.get("id") or 0)
|
|
67
|
+
branch_id = str(metadata.get("branch_id") or "")
|
|
68
|
+
latest_turn_id = int(latest_turn_id or turn_id or 1)
|
|
69
|
+
earliest_turn_id = int(earliest_turn_id or turn_id or 1)
|
|
70
|
+
reply_chain_ids = {int(item) for item in (reply_chain_turn_ids or []) if int(item or 0) > 0}
|
|
71
|
+
freshness_score = _normalized_position(turn_id or earliest_turn_id, latest_id=latest_turn_id, earliest_id=earliest_turn_id)
|
|
72
|
+
|
|
73
|
+
importance = 0.55 if role == "user" else 0.35
|
|
74
|
+
novelty = 0.25 if metadata.get("branch_depth") == 0 and branch_id else 0.1
|
|
75
|
+
uncertainty = 0.45 if "?" in content else 0.0
|
|
76
|
+
risk = 0.35 if resolution.get("decision") == "decline" else 0.0
|
|
77
|
+
goal_alignment = 0.45 if role == "user" else 0.0
|
|
78
|
+
if resolution:
|
|
79
|
+
goal_alignment += 0.2
|
|
80
|
+
if any(token in content.lower() for token in ("i will", "i'll", "let me", "next", "need to", "please", "can you")):
|
|
81
|
+
goal_alignment += 0.2
|
|
82
|
+
reinforcement = 0.2 if resolution.get("decision") == "confirm" else 0.0
|
|
83
|
+
user_interest = 0.3 if role == "user" else 0.0
|
|
84
|
+
signal_priority = 0.0
|
|
85
|
+
if active_branch_id and branch_id and branch_id == active_branch_id:
|
|
86
|
+
signal_priority += 0.45
|
|
87
|
+
if turn_id and turn_id in reply_chain_ids:
|
|
88
|
+
signal_priority += 0.35
|
|
89
|
+
if metadata.get("reply_to_turn_id"):
|
|
90
|
+
signal_priority += 0.1
|
|
91
|
+
|
|
92
|
+
scored = score_salience(
|
|
93
|
+
{
|
|
94
|
+
"importance": importance,
|
|
95
|
+
"novelty": novelty,
|
|
96
|
+
"uncertainty": uncertainty,
|
|
97
|
+
"risk": risk,
|
|
98
|
+
"goal_alignment": min(goal_alignment, 0.8),
|
|
99
|
+
"reinforcement": reinforcement,
|
|
100
|
+
"user_interest": user_interest,
|
|
101
|
+
"freshness": freshness_score,
|
|
102
|
+
"signal_priority": min(signal_priority, 0.9),
|
|
103
|
+
}
|
|
104
|
+
)
|
|
105
|
+
return {
|
|
106
|
+
**dict(scored),
|
|
107
|
+
"reference": turn.get("reference"),
|
|
108
|
+
"id": turn.get("id"),
|
|
109
|
+
"role": role,
|
|
110
|
+
"content": content,
|
|
111
|
+
"branch_id": branch_id or None,
|
|
112
|
+
"resolution": resolution or None,
|
|
113
|
+
}
|
|
114
|
+
|
|
115
|
+
|
|
116
|
+
def score_checkpoint_salience(
|
|
117
|
+
checkpoint: Mapping[str, Any],
|
|
118
|
+
*,
|
|
119
|
+
latest_checkpoint_id: int | None = None,
|
|
120
|
+
active_branch_id: str | None = None,
|
|
121
|
+
) -> Dict[str, Any]:
|
|
122
|
+
metadata = checkpoint.get("metadata") if isinstance(checkpoint.get("metadata"), dict) else {}
|
|
123
|
+
active_branch = metadata.get("active_branch") if isinstance(metadata.get("active_branch"), dict) else {}
|
|
124
|
+
checkpoint_id = int(checkpoint.get("id") or 0)
|
|
125
|
+
latest_checkpoint_id = int(latest_checkpoint_id or checkpoint_id or 1)
|
|
126
|
+
freshness_score = 1.0 if latest_checkpoint_id <= 0 else max(0.0, min(1.0, checkpoint_id / float(latest_checkpoint_id or 1)))
|
|
127
|
+
open_loops = checkpoint.get("open_loops") if isinstance(checkpoint.get("open_loops"), list) else []
|
|
128
|
+
pending_actions = checkpoint.get("pending_actions") if isinstance(checkpoint.get("pending_actions"), list) else []
|
|
129
|
+
latest_user_ask = str(checkpoint.get("latest_user_ask") or "").strip()
|
|
130
|
+
commitment = str(checkpoint.get("last_assistant_commitment") or "").strip()
|
|
131
|
+
|
|
132
|
+
importance = 0.45 + min(0.3, len(open_loops) * 0.08)
|
|
133
|
+
novelty = 0.1 + (0.15 if int(checkpoint.get("depth") or 0) == 0 else 0.0)
|
|
134
|
+
uncertainty = 0.3 if "?" in latest_user_ask else 0.0
|
|
135
|
+
risk = min(0.45, len(pending_actions) * 0.08)
|
|
136
|
+
goal_alignment = 0.25 + (0.2 if latest_user_ask else 0.0) + (0.15 if commitment else 0.0)
|
|
137
|
+
reinforcement = 0.0
|
|
138
|
+
user_interest = 0.25 if latest_user_ask else 0.0
|
|
139
|
+
signal_priority = 0.0
|
|
140
|
+
if active_branch_id and str(active_branch.get("branch_id") or "") == active_branch_id:
|
|
141
|
+
signal_priority += 0.35
|
|
142
|
+
if open_loops:
|
|
143
|
+
signal_priority += 0.25
|
|
144
|
+
|
|
145
|
+
scored = score_salience(
|
|
146
|
+
{
|
|
147
|
+
"importance": min(importance, 0.8),
|
|
148
|
+
"novelty": novelty,
|
|
149
|
+
"uncertainty": uncertainty,
|
|
150
|
+
"risk": risk,
|
|
151
|
+
"goal_alignment": min(goal_alignment, 0.8),
|
|
152
|
+
"reinforcement": reinforcement,
|
|
153
|
+
"user_interest": user_interest,
|
|
154
|
+
"freshness": freshness_score,
|
|
155
|
+
"signal_priority": min(signal_priority, 0.8),
|
|
156
|
+
}
|
|
157
|
+
)
|
|
158
|
+
return {
|
|
159
|
+
**dict(scored),
|
|
160
|
+
"reference": checkpoint.get("reference"),
|
|
161
|
+
"id": checkpoint.get("id"),
|
|
162
|
+
"summary": str(checkpoint.get("summary") or "").strip(),
|
|
163
|
+
"active_branch_id": active_branch.get("branch_id"),
|
|
164
|
+
}
|
|
165
|
+
|
|
166
|
+
|
|
167
|
+
def rank_turns_by_salience(
|
|
168
|
+
turns: Sequence[Mapping[str, Any]],
|
|
169
|
+
*,
|
|
170
|
+
active_branch_id: str | None = None,
|
|
171
|
+
reply_chain_turn_ids: Sequence[int] | None = None,
|
|
172
|
+
limit: int | None = None,
|
|
173
|
+
) -> List[Dict[str, Any]]:
|
|
174
|
+
turns_list = list(turns)
|
|
175
|
+
if not turns_list:
|
|
176
|
+
return []
|
|
177
|
+
ids = [int(item.get("id") or 0) for item in turns_list if int(item.get("id") or 0) > 0]
|
|
178
|
+
latest_turn_id = max(ids) if ids else 1
|
|
179
|
+
earliest_turn_id = min(ids) if ids else latest_turn_id
|
|
180
|
+
ranked = []
|
|
181
|
+
for turn in turns_list:
|
|
182
|
+
salience = score_turn_salience(
|
|
183
|
+
turn,
|
|
184
|
+
latest_turn_id=latest_turn_id,
|
|
185
|
+
earliest_turn_id=earliest_turn_id,
|
|
186
|
+
active_branch_id=active_branch_id,
|
|
187
|
+
reply_chain_turn_ids=reply_chain_turn_ids,
|
|
188
|
+
)
|
|
189
|
+
ranked.append({"turn": dict(turn), "salience": salience})
|
|
190
|
+
ranked.sort(
|
|
191
|
+
key=lambda item: (
|
|
192
|
+
_as_float(item["salience"].get("salience_score")),
|
|
193
|
+
_as_float((item["turn"] or {}).get("id")),
|
|
194
|
+
),
|
|
195
|
+
reverse=True,
|
|
196
|
+
)
|
|
197
|
+
return ranked[: limit or len(ranked)]
|
|
198
|
+
|
|
199
|
+
|
|
200
|
+
def rank_checkpoints_by_salience(
|
|
201
|
+
checkpoints: Sequence[Mapping[str, Any]],
|
|
202
|
+
*,
|
|
203
|
+
active_branch_id: str | None = None,
|
|
204
|
+
limit: int | None = None,
|
|
205
|
+
) -> List[Dict[str, Any]]:
|
|
206
|
+
checkpoint_list = list(checkpoints)
|
|
207
|
+
if not checkpoint_list:
|
|
208
|
+
return []
|
|
209
|
+
latest_checkpoint_id = max(int(item.get("id") or 0) for item in checkpoint_list) or 1
|
|
210
|
+
ranked = []
|
|
211
|
+
for checkpoint in checkpoint_list:
|
|
212
|
+
salience = score_checkpoint_salience(
|
|
213
|
+
checkpoint,
|
|
214
|
+
latest_checkpoint_id=latest_checkpoint_id,
|
|
215
|
+
active_branch_id=active_branch_id,
|
|
216
|
+
)
|
|
217
|
+
ranked.append({"checkpoint": dict(checkpoint), "salience": salience})
|
|
218
|
+
ranked.sort(
|
|
219
|
+
key=lambda item: (
|
|
220
|
+
_as_float(item["salience"].get("salience_score")),
|
|
221
|
+
_as_float((item["checkpoint"] or {}).get("id")),
|
|
222
|
+
),
|
|
223
|
+
reverse=True,
|
|
224
|
+
)
|
|
225
|
+
return ranked[: limit or len(ranked)]
|
|
226
|
+
|
|
227
|
+
|
|
228
|
+
def scan_salient_memories(limit: int = 5) -> List[Dict[str, float | bool]]:
|
|
229
|
+
advisories = freshness.scan_freshness(limit=limit).get("advisories", [])
|
|
230
|
+
results = []
|
|
231
|
+
for item in advisories:
|
|
232
|
+
score = score_salience({"freshness": float(item.get("freshness_score", 0.0))})
|
|
233
|
+
if score.get("attention_trigger"):
|
|
234
|
+
results.append(score)
|
|
235
|
+
return results[:limit]
|
|
@@ -0,0 +1,33 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from typing import Dict, List
|
|
4
|
+
|
|
5
|
+
from brain.runtime import state_store
|
|
6
|
+
from brain.runtime.instrumentation import emit_event
|
|
7
|
+
from brain.runtime.memory import reinforcement
|
|
8
|
+
|
|
9
|
+
LOGFILE = state_store.reports_dir() / "brain_memory.log.jsonl"
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
SYNTHESIS_TYPES = [
|
|
13
|
+
"theme_summary",
|
|
14
|
+
"user_preference",
|
|
15
|
+
"candidate_procedure",
|
|
16
|
+
"recurring_pattern",
|
|
17
|
+
"contradiction_candidate",
|
|
18
|
+
]
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
def synthesize_memory_patterns(limit: int = 5) -> List[Dict[str, str]]:
|
|
22
|
+
emit_event(LOGFILE, "brain_memory_synthesis_start", status="ok")
|
|
23
|
+
stats = reinforcement.list_recent_experiences(limit=limit)
|
|
24
|
+
results: List[Dict[str, str]] = []
|
|
25
|
+
for key, count in stats.items():
|
|
26
|
+
results.append(
|
|
27
|
+
{
|
|
28
|
+
"type": "recurring_pattern",
|
|
29
|
+
"summary": f"{key} occurred {count} times",
|
|
30
|
+
}
|
|
31
|
+
)
|
|
32
|
+
emit_event(LOGFILE, "brain_memory_synthesis_complete", status="ok", count=len(results))
|
|
33
|
+
return results[:limit]
|
|
@@ -0,0 +1,35 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from typing import Dict, List
|
|
4
|
+
|
|
5
|
+
from brain.runtime import state_store
|
|
6
|
+
from brain.runtime.instrumentation import emit_event
|
|
7
|
+
|
|
8
|
+
LOGFILE = state_store.reports_dir() / "brain_memory.log.jsonl"
|
|
9
|
+
|
|
10
|
+
MEMORY_TYPES = [
|
|
11
|
+
"episodic",
|
|
12
|
+
"semantic",
|
|
13
|
+
"procedural",
|
|
14
|
+
"relationship",
|
|
15
|
+
"working",
|
|
16
|
+
]
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
def list_memory_types() -> List[str]:
|
|
20
|
+
return list(MEMORY_TYPES)
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
def classify_memory_type(record: Dict[str, object]) -> str:
|
|
24
|
+
content = str(record.get("content") or "")
|
|
25
|
+
memory_type = "semantic"
|
|
26
|
+
if "how to" in content.lower() or "step" in content.lower():
|
|
27
|
+
memory_type = "procedural"
|
|
28
|
+
elif "met" in content.lower() or "relationship" in content.lower():
|
|
29
|
+
memory_type = "relationship"
|
|
30
|
+
elif record.get("memory_type") in MEMORY_TYPES:
|
|
31
|
+
memory_type = str(record.get("memory_type"))
|
|
32
|
+
elif record.get("source") == "working":
|
|
33
|
+
memory_type = "working"
|
|
34
|
+
emit_event(LOGFILE, "brain_memory_taxonomy_assigned", status="ok", memory_type=memory_type)
|
|
35
|
+
return memory_type
|
|
@@ -0,0 +1,83 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from typing import Dict, Tuple
|
|
4
|
+
import re
|
|
5
|
+
|
|
6
|
+
from brain.runtime import state_store
|
|
7
|
+
from brain.runtime.instrumentation import emit_event
|
|
8
|
+
from brain.runtime.memory import person_memory
|
|
9
|
+
|
|
10
|
+
LOGFILE = state_store.reports_dir() / "brain_memory.log.jsonl"
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
def extract_intro_candidate(text: str) -> str | None:
|
|
14
|
+
if not isinstance(text, str):
|
|
15
|
+
return None
|
|
16
|
+
cleaned = text.strip()
|
|
17
|
+
if not cleaned:
|
|
18
|
+
return None
|
|
19
|
+
lowered = cleaned.lower().strip(".!")
|
|
20
|
+
call_me_match = re.search(r"\byou can call me\s+([a-z][a-z\-']*)", lowered)
|
|
21
|
+
if call_me_match:
|
|
22
|
+
return call_me_match.group(1).strip(" .!").title()
|
|
23
|
+
direct_call_match = re.search(r"\bcall me\s+([a-z][a-z\-']*)", lowered)
|
|
24
|
+
if direct_call_match:
|
|
25
|
+
return direct_call_match.group(1).strip(" .!").title()
|
|
26
|
+
patterns = [
|
|
27
|
+
r"^my name is (.+)$",
|
|
28
|
+
r"^i am (.+)$",
|
|
29
|
+
r"^i'm (.+)$",
|
|
30
|
+
]
|
|
31
|
+
for pattern in patterns:
|
|
32
|
+
match = re.match(pattern, lowered)
|
|
33
|
+
if match:
|
|
34
|
+
candidate = match.group(1).split(" but ")[0].strip(" .!")
|
|
35
|
+
return candidate.title() if candidate else None
|
|
36
|
+
return None
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
def extract_name_candidate(text: str) -> str | None:
|
|
40
|
+
candidate = extract_intro_candidate(text)
|
|
41
|
+
if candidate:
|
|
42
|
+
return candidate
|
|
43
|
+
if not isinstance(text, str):
|
|
44
|
+
return None
|
|
45
|
+
cleaned = text.strip()
|
|
46
|
+
if not cleaned:
|
|
47
|
+
return None
|
|
48
|
+
tokens = [token for token in cleaned.replace(".", "").split() if token]
|
|
49
|
+
if 1 <= len(tokens) <= 3:
|
|
50
|
+
return " ".join(tokens)
|
|
51
|
+
return None
|
|
52
|
+
|
|
53
|
+
|
|
54
|
+
def extract_operator_name(text: str) -> str | None:
|
|
55
|
+
from brain.runtime import inference
|
|
56
|
+
|
|
57
|
+
llm_result = inference.parse_operator_name(text)
|
|
58
|
+
llm_name = llm_result.get("name") if isinstance(llm_result, dict) else ""
|
|
59
|
+
if isinstance(llm_name, str) and llm_name.strip():
|
|
60
|
+
return llm_name.strip()
|
|
61
|
+
return extract_name_candidate(text)
|
|
62
|
+
|
|
63
|
+
|
|
64
|
+
def resolve_interaction_person(metadata: Dict[str, str]) -> Tuple[Dict[str, object] | None, float, bool]:
|
|
65
|
+
person = None
|
|
66
|
+
confidence = 0.0
|
|
67
|
+
name_input = metadata.get("name") or ""
|
|
68
|
+
name_candidate = extract_name_candidate(name_input) if name_input else None
|
|
69
|
+
if name_candidate:
|
|
70
|
+
person = person_memory.get_person(name_candidate) or person_memory.create_person(name_candidate, name_candidate)
|
|
71
|
+
confidence = 0.7
|
|
72
|
+
if not person and metadata.get("email"):
|
|
73
|
+
person = person_memory.find_person_by_email(metadata["email"])
|
|
74
|
+
confidence = 0.6 if person else 0.0
|
|
75
|
+
if not person and metadata.get("phone"):
|
|
76
|
+
person = person_memory.find_person_by_phone(metadata["phone"])
|
|
77
|
+
confidence = 0.6 if person else 0.0
|
|
78
|
+
ask_name_required = confidence < 0.5
|
|
79
|
+
if person:
|
|
80
|
+
emit_event(LOGFILE, "brain_person_identity_resolved", status="ok", person_id=person.get("person_id"))
|
|
81
|
+
else:
|
|
82
|
+
emit_event(LOGFILE, "brain_person_identity_uncertain", status="ok")
|
|
83
|
+
return person, confidence, ask_name_required
|