@simbimbo/memory-ocmemog 0.1.11 → 0.1.13

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (102) hide show
  1. package/CHANGELOG.md +30 -0
  2. package/README.md +83 -18
  3. package/brain/runtime/__init__.py +2 -12
  4. package/brain/runtime/config.py +1 -24
  5. package/brain/runtime/inference.py +1 -151
  6. package/brain/runtime/instrumentation.py +1 -15
  7. package/brain/runtime/memory/__init__.py +3 -13
  8. package/brain/runtime/memory/api.py +1 -1219
  9. package/brain/runtime/memory/candidate.py +1 -185
  10. package/brain/runtime/memory/conversation_state.py +1 -1823
  11. package/brain/runtime/memory/distill.py +1 -344
  12. package/brain/runtime/memory/embedding_engine.py +1 -92
  13. package/brain/runtime/memory/freshness.py +1 -112
  14. package/brain/runtime/memory/health.py +1 -40
  15. package/brain/runtime/memory/integrity.py +1 -186
  16. package/brain/runtime/memory/memory_consolidation.py +1 -58
  17. package/brain/runtime/memory/memory_links.py +1 -107
  18. package/brain/runtime/memory/memory_salience.py +1 -233
  19. package/brain/runtime/memory/memory_synthesis.py +1 -31
  20. package/brain/runtime/memory/memory_taxonomy.py +1 -33
  21. package/brain/runtime/memory/pondering_engine.py +1 -654
  22. package/brain/runtime/memory/promote.py +1 -277
  23. package/brain/runtime/memory/provenance.py +1 -406
  24. package/brain/runtime/memory/reinforcement.py +1 -71
  25. package/brain/runtime/memory/retrieval.py +1 -210
  26. package/brain/runtime/memory/semantic_search.py +1 -64
  27. package/brain/runtime/memory/store.py +1 -429
  28. package/brain/runtime/memory/unresolved_state.py +1 -91
  29. package/brain/runtime/memory/vector_index.py +1 -323
  30. package/brain/runtime/model_roles.py +1 -9
  31. package/brain/runtime/model_router.py +1 -22
  32. package/brain/runtime/providers.py +1 -66
  33. package/brain/runtime/security/redaction.py +1 -12
  34. package/brain/runtime/state_store.py +1 -23
  35. package/brain/runtime/storage_paths.py +1 -39
  36. package/docs/architecture/memory.md +20 -24
  37. package/docs/release-checklist.md +19 -6
  38. package/docs/usage.md +33 -17
  39. package/index.ts +8 -1
  40. package/ocmemog/__init__.py +11 -0
  41. package/ocmemog/doctor.py +1255 -0
  42. package/ocmemog/runtime/__init__.py +18 -0
  43. package/ocmemog/runtime/_compat_bridge.py +28 -0
  44. package/ocmemog/runtime/config.py +34 -0
  45. package/ocmemog/runtime/identity.py +115 -0
  46. package/ocmemog/runtime/inference.py +163 -0
  47. package/ocmemog/runtime/instrumentation.py +20 -0
  48. package/ocmemog/runtime/memory/__init__.py +91 -0
  49. package/ocmemog/runtime/memory/api.py +1594 -0
  50. package/ocmemog/runtime/memory/candidate.py +192 -0
  51. package/ocmemog/runtime/memory/conversation_state.py +1831 -0
  52. package/ocmemog/runtime/memory/distill.py +282 -0
  53. package/ocmemog/runtime/memory/embedding_engine.py +151 -0
  54. package/ocmemog/runtime/memory/freshness.py +114 -0
  55. package/ocmemog/runtime/memory/health.py +93 -0
  56. package/ocmemog/runtime/memory/integrity.py +208 -0
  57. package/ocmemog/runtime/memory/memory_consolidation.py +60 -0
  58. package/ocmemog/runtime/memory/memory_links.py +109 -0
  59. package/ocmemog/runtime/memory/memory_salience.py +235 -0
  60. package/ocmemog/runtime/memory/memory_synthesis.py +33 -0
  61. package/ocmemog/runtime/memory/memory_taxonomy.py +35 -0
  62. package/ocmemog/runtime/memory/pondering_engine.py +681 -0
  63. package/ocmemog/runtime/memory/promote.py +279 -0
  64. package/ocmemog/runtime/memory/provenance.py +408 -0
  65. package/ocmemog/runtime/memory/reinforcement.py +73 -0
  66. package/ocmemog/runtime/memory/retrieval.py +224 -0
  67. package/ocmemog/runtime/memory/semantic_search.py +66 -0
  68. package/ocmemog/runtime/memory/store.py +433 -0
  69. package/ocmemog/runtime/memory/unresolved_state.py +93 -0
  70. package/ocmemog/runtime/memory/vector_index.py +411 -0
  71. package/ocmemog/runtime/model_roles.py +15 -0
  72. package/ocmemog/runtime/model_router.py +28 -0
  73. package/ocmemog/runtime/providers.py +78 -0
  74. package/ocmemog/runtime/roles.py +92 -0
  75. package/ocmemog/runtime/security/__init__.py +8 -0
  76. package/ocmemog/runtime/security/redaction.py +17 -0
  77. package/ocmemog/runtime/state_store.py +32 -0
  78. package/ocmemog/runtime/storage_paths.py +70 -0
  79. package/ocmemog/sidecar/app.py +421 -60
  80. package/ocmemog/sidecar/compat.py +50 -13
  81. package/ocmemog/sidecar/transcript_watcher.py +327 -242
  82. package/openclaw.plugin.json +4 -0
  83. package/package.json +1 -1
  84. package/scripts/ocmemog-backfill-vectors.py +5 -3
  85. package/scripts/ocmemog-continuity-benchmark.py +1 -1
  86. package/scripts/ocmemog-demo.py +1 -1
  87. package/scripts/ocmemog-doctor.py +15 -0
  88. package/scripts/ocmemog-install.sh +29 -7
  89. package/scripts/ocmemog-integrated-proof.py +374 -0
  90. package/scripts/ocmemog-reindex-vectors.py +5 -3
  91. package/scripts/ocmemog-release-check.sh +330 -0
  92. package/scripts/ocmemog-sidecar.sh +4 -2
  93. package/scripts/ocmemog-test-rig.py +5 -3
  94. package/brain/runtime/memory/artifacts.py +0 -33
  95. package/brain/runtime/memory/context_builder.py +0 -112
  96. package/brain/runtime/memory/interaction_memory.py +0 -57
  97. package/brain/runtime/memory/memory_gate.py +0 -38
  98. package/brain/runtime/memory/memory_graph.py +0 -54
  99. package/brain/runtime/memory/person_identity.py +0 -83
  100. package/brain/runtime/memory/person_memory.py +0 -138
  101. package/brain/runtime/memory/sentiment_memory.py +0 -67
  102. package/brain/runtime/memory/tool_catalog.py +0 -68
@@ -0,0 +1,208 @@
1
+ from __future__ import annotations
2
+
3
+ from typing import Any, Dict, List
4
+
5
+ from ocmemog.runtime.instrumentation import emit_event
6
+ from ocmemog.runtime import state_store
7
+ from ocmemog.runtime.memory import store
8
+
9
+ EMBED_TABLES = tuple(store.MEMORY_TABLES)
10
+
11
+
12
+ def run_integrity_check() -> Dict[str, Any]:
13
+ emit_event(state_store.report_log_path(), "brain_memory_integrity_start", status="ok")
14
+ conn = store.connect()
15
+ issues: List[str] = []
16
+ repairable: List[str] = []
17
+ sqlite_ok = True
18
+
19
+ required = {
20
+ "experiences",
21
+ "knowledge",
22
+ "preferences",
23
+ "identity",
24
+ "reflections",
25
+ "tasks",
26
+ "directives",
27
+ "promotions",
28
+ "candidates",
29
+ "memory_index",
30
+ "vector_embeddings",
31
+ "runbooks",
32
+ "lessons",
33
+ }
34
+ tables = {row[0] for row in conn.execute("SELECT name FROM sqlite_master WHERE type='table'").fetchall()}
35
+ missing = required - tables
36
+ if missing:
37
+ issues.append(f"missing_tables:{','.join(sorted(missing))}")
38
+ emit_event(state_store.report_log_path(), "brain_memory_integrity_issue", status="warn")
39
+
40
+ try:
41
+ orphan = conn.execute(
42
+ "SELECT COUNT(*) FROM candidates WHERE source_event_id NOT IN (SELECT id FROM experiences)",
43
+ ).fetchone()[0]
44
+ if orphan:
45
+ issues.append(f"orphan_candidates:{orphan}")
46
+ emit_event(state_store.report_log_path(), "brain_memory_integrity_issue", status="warn")
47
+ except Exception:
48
+ pass
49
+
50
+ try:
51
+ dup_groups = conn.execute(
52
+ "SELECT COUNT(*) FROM (SELECT 1 FROM promotions GROUP BY source, content HAVING COUNT(*) > 1)",
53
+ ).fetchone()[0]
54
+ if dup_groups:
55
+ issues.append(f"duplicate_promotions:{dup_groups}")
56
+ emit_event(state_store.report_log_path(), "brain_memory_integrity_issue", status="warn")
57
+ except Exception:
58
+ pass
59
+
60
+ try:
61
+ missing_ref = conn.execute(
62
+ "SELECT COUNT(*) FROM experiences WHERE memory_reference IS NULL OR memory_reference = ''",
63
+ ).fetchone()[0]
64
+ if missing_ref:
65
+ issues.append(f"missing_memory_reference:{missing_ref}")
66
+ repairable.append("missing_memory_reference")
67
+ emit_event(state_store.report_log_path(), "brain_memory_integrity_issue", status="warn")
68
+ except Exception:
69
+ pass
70
+
71
+ missing_vectors = 0
72
+ orphan_vectors = 0
73
+ try:
74
+ for table in EMBED_TABLES:
75
+ missing_vectors += conn.execute(
76
+ f"""
77
+ SELECT COUNT(*) FROM {table} AS source
78
+ WHERE NOT EXISTS (
79
+ SELECT 1
80
+ FROM vector_embeddings AS embeddings
81
+ WHERE embeddings.source_type = ?
82
+ AND CAST(embeddings.source_id AS TEXT) = CAST(source.id AS TEXT)
83
+ )
84
+ """,
85
+ (table,),
86
+ ).fetchone()[0]
87
+ except Exception:
88
+ pass
89
+
90
+ try:
91
+ for table in EMBED_TABLES:
92
+ orphan_vectors += conn.execute(
93
+ """
94
+ SELECT COUNT(*) FROM vector_embeddings AS embeddings
95
+ WHERE embeddings.source_type = ?
96
+ AND NOT EXISTS (
97
+ SELECT 1 FROM %s AS source
98
+ WHERE CAST(source.id AS TEXT) = CAST(embeddings.source_id AS TEXT)
99
+ )
100
+ """ % table,
101
+ (table,),
102
+ ).fetchone()[0]
103
+ except Exception:
104
+ pass
105
+
106
+ if missing_vectors:
107
+ issues.append(f"vector_missing:{missing_vectors}")
108
+ emit_event(state_store.report_log_path(), "brain_memory_vector_integrity_issue", status="warn")
109
+
110
+ if orphan_vectors:
111
+ issues.append(f"vector_orphan:{orphan_vectors}")
112
+ repairable.append("vector_orphan")
113
+ emit_event(state_store.report_log_path(), "brain_memory_vector_integrity_issue", status="warn")
114
+
115
+ try:
116
+ quick_check = conn.execute("PRAGMA quick_check(1)").fetchone()
117
+ quick_value = str((quick_check or ["ok"])[0] or "ok")
118
+ if quick_value.lower() != "ok":
119
+ sqlite_ok = False
120
+ issues.append(f"sqlite_quick_check:{quick_value}")
121
+ emit_event(state_store.report_log_path(), "brain_memory_integrity_issue", status="warn")
122
+ except Exception:
123
+ sqlite_ok = False
124
+
125
+ warning_type = ""
126
+ warning_summary = ""
127
+ for issue in issues:
128
+ if issue.startswith("vector_missing"):
129
+ warning_type = "vector_missing"
130
+ warning_summary = "Vector embeddings missing entries"
131
+ break
132
+ if issue.startswith("vector_orphan"):
133
+ warning_type = "vector_orphan"
134
+ warning_summary = "Vector embeddings have orphan entries"
135
+ break
136
+
137
+ conn.close()
138
+ emit_event(state_store.report_log_path(), "brain_memory_integrity_complete", status="ok")
139
+ return {
140
+ "issues": issues,
141
+ "ok": len(issues) == 0 and sqlite_ok,
142
+ "warning_type": warning_type,
143
+ "warning_summary": warning_summary,
144
+ "repairable_issues": repairable,
145
+ "sqlite_ok": sqlite_ok,
146
+ }
147
+
148
+
149
+ def repair_integrity() -> Dict[str, Any]:
150
+ repaired: List[str] = []
151
+
152
+ def _write() -> Dict[str, Any]:
153
+ conn = store.connect()
154
+ removed_orphans = 0
155
+ repaired_missing_refs = 0
156
+ try:
157
+ tables = {row[0] for row in conn.execute("SELECT name FROM sqlite_master WHERE type='table'").fetchall()}
158
+ if "vector_embeddings" in tables:
159
+ for table in EMBED_TABLES:
160
+ if table not in tables:
161
+ continue
162
+ removed_orphans += conn.execute(
163
+ f"""
164
+ DELETE FROM vector_embeddings
165
+ WHERE source_type = ?
166
+ AND NOT EXISTS (
167
+ SELECT 1 FROM {table} AS source
168
+ WHERE CAST(source.id AS TEXT) = CAST(vector_embeddings.source_id AS TEXT)
169
+ )
170
+ """,
171
+ (table,),
172
+ ).rowcount
173
+ if "experiences" in tables:
174
+ repaired_missing_refs += conn.execute(
175
+ """
176
+ UPDATE experiences
177
+ SET memory_reference = 'legacy:' || COALESCE(experience_type, 'unknown') || ':' || id
178
+ WHERE memory_reference IS NULL OR memory_reference = ''
179
+ """,
180
+ ).rowcount
181
+ conn.commit()
182
+ return {
183
+ "removed_orphan_vectors": int(removed_orphans),
184
+ "repaired_missing_memory_references": int(repaired_missing_refs),
185
+ }
186
+ finally:
187
+ conn.close()
188
+
189
+ result = store.submit_write(_write, timeout=30.0)
190
+ if int(result.get("removed_orphan_vectors") or 0) > 0:
191
+ repaired.append(f"vector_orphan:{int(result['removed_orphan_vectors'])}")
192
+ emit_event(
193
+ state_store.report_log_path(),
194
+ "brain_memory_integrity_repair",
195
+ status="ok",
196
+ repaired="vector_orphan",
197
+ count=int(result["removed_orphan_vectors"]),
198
+ )
199
+ if int(result.get("repaired_missing_memory_references") or 0) > 0:
200
+ repaired.append(f"missing_memory_reference:{int(result['repaired_missing_memory_references'])}")
201
+ emit_event(
202
+ state_store.report_log_path(),
203
+ "brain_memory_integrity_repair",
204
+ status="ok",
205
+ repaired="missing_memory_reference",
206
+ count=int(result["repaired_missing_memory_references"]),
207
+ )
208
+ return {"ok": True, "repaired": repaired, **result}
@@ -0,0 +1,60 @@
1
+ from __future__ import annotations
2
+
3
+ from typing import Dict, List, Tuple
4
+
5
+ from ocmemog.runtime import state_store
6
+ from ocmemog.runtime.instrumentation import emit_event
7
+ from ocmemog.runtime.memory import memory_taxonomy
8
+
9
+ LOGFILE = state_store.report_log_path()
10
+
11
+
12
+ def _cluster_key(record: Dict[str, object]) -> Tuple[str, str]:
13
+ mem_type = memory_taxonomy.classify_memory_type(record)
14
+ content = str(record.get("content") or "")
15
+ anchor = content[:48].lower()
16
+ return mem_type, anchor
17
+
18
+
19
+ def consolidate_memories(records: List[Dict[str, object]], max_clusters: int = 5) -> Dict[str, object]:
20
+ emit_event(LOGFILE, "brain_memory_consolidation_start", status="ok")
21
+ clusters: Dict[Tuple[str, str], List[Dict[str, object]]] = {}
22
+ for record in records:
23
+ content = str(record.get("content") or "").strip()
24
+ if not content:
25
+ continue
26
+ key = _cluster_key(record)
27
+ clusters.setdefault(key, []).append(record)
28
+ if len(clusters) >= max_clusters:
29
+ break
30
+ consolidated: List[Dict[str, object]] = []
31
+ reinforcement_updates: List[Dict[str, object]] = []
32
+ for key, items in clusters.items():
33
+ mem_type, anchor = key
34
+ summary = f"{mem_type} cluster: {anchor}"
35
+ references = [str(item.get("reference") or "") for item in items if str(item.get("reference") or "")]
36
+ consolidated.append(
37
+ {
38
+ "memory_type": mem_type,
39
+ "summary": summary,
40
+ "count": len(items),
41
+ "references": references,
42
+ "candidate_kinds": sorted({str(item.get("candidate_kind") or "memory") for item in items}),
43
+ }
44
+ )
45
+ reinforcement_updates.append(
46
+ {
47
+ "memory_type": mem_type,
48
+ "weight": min(1.0, len(items) / 5.0),
49
+ "references": references,
50
+ }
51
+ )
52
+ emit_event(
53
+ LOGFILE,
54
+ "brain_memory_consolidation_cluster",
55
+ status="ok",
56
+ memory_type=mem_type,
57
+ count=len(items),
58
+ )
59
+ emit_event(LOGFILE, "brain_memory_consolidation_complete", status="ok", cluster_count=len(consolidated))
60
+ return {"consolidated": consolidated, "reinforcement": reinforcement_updates}
@@ -0,0 +1,109 @@
1
+ from __future__ import annotations
2
+
3
+ import sqlite3
4
+ from typing import Dict, List
5
+
6
+ from ocmemog.runtime import state_store
7
+ from ocmemog.runtime.instrumentation import emit_event
8
+ from . import store
9
+
10
+ LOGFILE = state_store.report_log_path()
11
+
12
+
13
+ def _dedupe_memory_links(conn) -> None:
14
+ conn.execute(
15
+ """
16
+ DELETE FROM memory_links
17
+ WHERE rowid NOT IN (
18
+ SELECT MIN(rowid)
19
+ FROM memory_links
20
+ GROUP BY source_reference, link_type, target_reference
21
+ )
22
+ """
23
+ )
24
+
25
+
26
+ def _ensure_table(conn) -> None:
27
+ conn.execute(
28
+ """
29
+ CREATE TABLE IF NOT EXISTS memory_links (
30
+ source_reference TEXT NOT NULL,
31
+ link_type TEXT NOT NULL,
32
+ target_reference TEXT NOT NULL,
33
+ created_at TEXT NOT NULL DEFAULT (datetime('now')),
34
+ UNIQUE(source_reference, link_type, target_reference)
35
+ )
36
+ """
37
+ )
38
+ try:
39
+ conn.execute(
40
+ "CREATE UNIQUE INDEX IF NOT EXISTS idx_memory_links_unique ON memory_links(source_reference, link_type, target_reference)"
41
+ )
42
+ except sqlite3.IntegrityError:
43
+ _dedupe_memory_links(conn)
44
+ conn.execute("DROP INDEX IF EXISTS idx_memory_links_unique")
45
+ conn.execute(
46
+ "CREATE UNIQUE INDEX IF NOT EXISTS idx_memory_links_unique ON memory_links(source_reference, link_type, target_reference)"
47
+ )
48
+ conn.commit()
49
+
50
+
51
+ def add_memory_link(source_reference: str, link_type: str, target_reference: str) -> None:
52
+ conn = store.connect()
53
+ _ensure_table(conn)
54
+ conn.execute(
55
+ "INSERT OR IGNORE INTO memory_links (source_reference, link_type, target_reference) VALUES (?, ?, ?)",
56
+ (source_reference, link_type, target_reference),
57
+ )
58
+ conn.commit()
59
+ conn.close()
60
+ emit_event(LOGFILE, "brain_memory_link_created", status="ok", link_type=link_type)
61
+
62
+
63
+ def get_memory_links(source_reference: str) -> List[Dict[str, str]]:
64
+ conn = store.connect()
65
+ _ensure_table(conn)
66
+ rows = conn.execute(
67
+ "SELECT link_type, target_reference FROM memory_links WHERE source_reference=?",
68
+ (source_reference,),
69
+ ).fetchall()
70
+ conn.close()
71
+ return [{"link_type": row[0], "target_reference": row[1]} for row in rows]
72
+
73
+
74
+ def get_memory_links_for_target(target_reference: str) -> List[Dict[str, str]]:
75
+ conn = store.connect()
76
+ _ensure_table(conn)
77
+ rows = conn.execute(
78
+ "SELECT source_reference, link_type, target_reference FROM memory_links WHERE target_reference=? ORDER BY created_at DESC",
79
+ (target_reference,),
80
+ ).fetchall()
81
+ conn.close()
82
+ return [
83
+ {
84
+ "source_reference": row[0],
85
+ "link_type": row[1],
86
+ "target_reference": row[2],
87
+ }
88
+ for row in rows
89
+ ]
90
+
91
+
92
+ def get_memory_links_for_thread(thread_id: str) -> List[Dict[str, str]]:
93
+ return get_memory_links_for_target(f"thread:{thread_id}")
94
+
95
+
96
+ def get_memory_links_for_session(session_id: str) -> List[Dict[str, str]]:
97
+ return get_memory_links_for_target(f"session:{session_id}")
98
+
99
+
100
+ def get_memory_links_for_conversation(conversation_id: str) -> List[Dict[str, str]]:
101
+ return get_memory_links_for_target(f"conversation:{conversation_id}")
102
+
103
+
104
+ def count_memory_links() -> int:
105
+ conn = store.connect()
106
+ _ensure_table(conn)
107
+ row = conn.execute("SELECT COUNT(*) FROM memory_links").fetchone()
108
+ conn.close()
109
+ return int(row[0]) if row else 0
@@ -0,0 +1,235 @@
1
+ from __future__ import annotations
2
+
3
+ from typing import Any, Dict, List, Mapping, Sequence
4
+
5
+ from ocmemog.runtime import state_store
6
+ from ocmemog.runtime.instrumentation import emit_event
7
+ from ocmemog.runtime.memory import freshness
8
+
9
+ LOGFILE = state_store.report_log_path()
10
+
11
+
12
+ def score_salience(record: Mapping[str, float]) -> Dict[str, float | bool]:
13
+ importance = float(record.get("importance", 0.2))
14
+ novelty = float(record.get("novelty", 0.1))
15
+ uncertainty = float(record.get("uncertainty", 0.1))
16
+ risk = float(record.get("risk", 0.0))
17
+ goal_alignment = float(record.get("goal_alignment", 0.1))
18
+ reinforcement = float(record.get("reinforcement", 0.0))
19
+ user_interest = float(record.get("user_interest", 0.0))
20
+ recency = float(record.get("freshness", 0.0))
21
+ signal_priority = float(record.get("signal_priority", 0.0))
22
+ salience_score = max(
23
+ 0.0,
24
+ min(3.0, importance + novelty + uncertainty + risk + goal_alignment + reinforcement + user_interest + recency + signal_priority),
25
+ )
26
+ activation_strength = min(1.0, salience_score / 3.0)
27
+ attention_trigger = salience_score >= 1.5
28
+ emit_event(LOGFILE, "brain_memory_salience_scored", status="ok", score=salience_score)
29
+ emit_event(LOGFILE, "brain_memory_salience_updated", status="ok", score=salience_score)
30
+ return {
31
+ "salience_score": round(salience_score, 3),
32
+ "activation_strength": round(activation_strength, 3),
33
+ "attention_trigger": attention_trigger,
34
+ }
35
+
36
+
37
+ def _as_float(value: Any, default: float = 0.0) -> float:
38
+ try:
39
+ return float(value)
40
+ except Exception:
41
+ return default
42
+
43
+
44
+ def _content_text(record: Mapping[str, Any]) -> str:
45
+ return str(record.get("effective_content") or record.get("content") or "").strip()
46
+
47
+
48
+ def _normalized_position(record_id: int, *, latest_id: int, earliest_id: int) -> float:
49
+ if latest_id <= earliest_id:
50
+ return 1.0
51
+ return max(0.0, min(1.0, (record_id - earliest_id) / float(latest_id - earliest_id)))
52
+
53
+
54
+ def score_turn_salience(
55
+ turn: Mapping[str, Any],
56
+ *,
57
+ latest_turn_id: int | None = None,
58
+ earliest_turn_id: int | None = None,
59
+ active_branch_id: str | None = None,
60
+ reply_chain_turn_ids: Sequence[int] | None = None,
61
+ ) -> Dict[str, Any]:
62
+ metadata = turn.get("metadata") if isinstance(turn.get("metadata"), dict) else {}
63
+ resolution = metadata.get("resolution") if isinstance(metadata.get("resolution"), dict) else {}
64
+ role = str(turn.get("role") or "")
65
+ content = _content_text(turn)
66
+ turn_id = int(turn.get("id") or 0)
67
+ branch_id = str(metadata.get("branch_id") or "")
68
+ latest_turn_id = int(latest_turn_id or turn_id or 1)
69
+ earliest_turn_id = int(earliest_turn_id or turn_id or 1)
70
+ reply_chain_ids = {int(item) for item in (reply_chain_turn_ids or []) if int(item or 0) > 0}
71
+ freshness_score = _normalized_position(turn_id or earliest_turn_id, latest_id=latest_turn_id, earliest_id=earliest_turn_id)
72
+
73
+ importance = 0.55 if role == "user" else 0.35
74
+ novelty = 0.25 if metadata.get("branch_depth") == 0 and branch_id else 0.1
75
+ uncertainty = 0.45 if "?" in content else 0.0
76
+ risk = 0.35 if resolution.get("decision") == "decline" else 0.0
77
+ goal_alignment = 0.45 if role == "user" else 0.0
78
+ if resolution:
79
+ goal_alignment += 0.2
80
+ if any(token in content.lower() for token in ("i will", "i'll", "let me", "next", "need to", "please", "can you")):
81
+ goal_alignment += 0.2
82
+ reinforcement = 0.2 if resolution.get("decision") == "confirm" else 0.0
83
+ user_interest = 0.3 if role == "user" else 0.0
84
+ signal_priority = 0.0
85
+ if active_branch_id and branch_id and branch_id == active_branch_id:
86
+ signal_priority += 0.45
87
+ if turn_id and turn_id in reply_chain_ids:
88
+ signal_priority += 0.35
89
+ if metadata.get("reply_to_turn_id"):
90
+ signal_priority += 0.1
91
+
92
+ scored = score_salience(
93
+ {
94
+ "importance": importance,
95
+ "novelty": novelty,
96
+ "uncertainty": uncertainty,
97
+ "risk": risk,
98
+ "goal_alignment": min(goal_alignment, 0.8),
99
+ "reinforcement": reinforcement,
100
+ "user_interest": user_interest,
101
+ "freshness": freshness_score,
102
+ "signal_priority": min(signal_priority, 0.9),
103
+ }
104
+ )
105
+ return {
106
+ **dict(scored),
107
+ "reference": turn.get("reference"),
108
+ "id": turn.get("id"),
109
+ "role": role,
110
+ "content": content,
111
+ "branch_id": branch_id or None,
112
+ "resolution": resolution or None,
113
+ }
114
+
115
+
116
+ def score_checkpoint_salience(
117
+ checkpoint: Mapping[str, Any],
118
+ *,
119
+ latest_checkpoint_id: int | None = None,
120
+ active_branch_id: str | None = None,
121
+ ) -> Dict[str, Any]:
122
+ metadata = checkpoint.get("metadata") if isinstance(checkpoint.get("metadata"), dict) else {}
123
+ active_branch = metadata.get("active_branch") if isinstance(metadata.get("active_branch"), dict) else {}
124
+ checkpoint_id = int(checkpoint.get("id") or 0)
125
+ latest_checkpoint_id = int(latest_checkpoint_id or checkpoint_id or 1)
126
+ freshness_score = 1.0 if latest_checkpoint_id <= 0 else max(0.0, min(1.0, checkpoint_id / float(latest_checkpoint_id or 1)))
127
+ open_loops = checkpoint.get("open_loops") if isinstance(checkpoint.get("open_loops"), list) else []
128
+ pending_actions = checkpoint.get("pending_actions") if isinstance(checkpoint.get("pending_actions"), list) else []
129
+ latest_user_ask = str(checkpoint.get("latest_user_ask") or "").strip()
130
+ commitment = str(checkpoint.get("last_assistant_commitment") or "").strip()
131
+
132
+ importance = 0.45 + min(0.3, len(open_loops) * 0.08)
133
+ novelty = 0.1 + (0.15 if int(checkpoint.get("depth") or 0) == 0 else 0.0)
134
+ uncertainty = 0.3 if "?" in latest_user_ask else 0.0
135
+ risk = min(0.45, len(pending_actions) * 0.08)
136
+ goal_alignment = 0.25 + (0.2 if latest_user_ask else 0.0) + (0.15 if commitment else 0.0)
137
+ reinforcement = 0.0
138
+ user_interest = 0.25 if latest_user_ask else 0.0
139
+ signal_priority = 0.0
140
+ if active_branch_id and str(active_branch.get("branch_id") or "") == active_branch_id:
141
+ signal_priority += 0.35
142
+ if open_loops:
143
+ signal_priority += 0.25
144
+
145
+ scored = score_salience(
146
+ {
147
+ "importance": min(importance, 0.8),
148
+ "novelty": novelty,
149
+ "uncertainty": uncertainty,
150
+ "risk": risk,
151
+ "goal_alignment": min(goal_alignment, 0.8),
152
+ "reinforcement": reinforcement,
153
+ "user_interest": user_interest,
154
+ "freshness": freshness_score,
155
+ "signal_priority": min(signal_priority, 0.8),
156
+ }
157
+ )
158
+ return {
159
+ **dict(scored),
160
+ "reference": checkpoint.get("reference"),
161
+ "id": checkpoint.get("id"),
162
+ "summary": str(checkpoint.get("summary") or "").strip(),
163
+ "active_branch_id": active_branch.get("branch_id"),
164
+ }
165
+
166
+
167
+ def rank_turns_by_salience(
168
+ turns: Sequence[Mapping[str, Any]],
169
+ *,
170
+ active_branch_id: str | None = None,
171
+ reply_chain_turn_ids: Sequence[int] | None = None,
172
+ limit: int | None = None,
173
+ ) -> List[Dict[str, Any]]:
174
+ turns_list = list(turns)
175
+ if not turns_list:
176
+ return []
177
+ ids = [int(item.get("id") or 0) for item in turns_list if int(item.get("id") or 0) > 0]
178
+ latest_turn_id = max(ids) if ids else 1
179
+ earliest_turn_id = min(ids) if ids else latest_turn_id
180
+ ranked = []
181
+ for turn in turns_list:
182
+ salience = score_turn_salience(
183
+ turn,
184
+ latest_turn_id=latest_turn_id,
185
+ earliest_turn_id=earliest_turn_id,
186
+ active_branch_id=active_branch_id,
187
+ reply_chain_turn_ids=reply_chain_turn_ids,
188
+ )
189
+ ranked.append({"turn": dict(turn), "salience": salience})
190
+ ranked.sort(
191
+ key=lambda item: (
192
+ _as_float(item["salience"].get("salience_score")),
193
+ _as_float((item["turn"] or {}).get("id")),
194
+ ),
195
+ reverse=True,
196
+ )
197
+ return ranked[: limit or len(ranked)]
198
+
199
+
200
+ def rank_checkpoints_by_salience(
201
+ checkpoints: Sequence[Mapping[str, Any]],
202
+ *,
203
+ active_branch_id: str | None = None,
204
+ limit: int | None = None,
205
+ ) -> List[Dict[str, Any]]:
206
+ checkpoint_list = list(checkpoints)
207
+ if not checkpoint_list:
208
+ return []
209
+ latest_checkpoint_id = max(int(item.get("id") or 0) for item in checkpoint_list) or 1
210
+ ranked = []
211
+ for checkpoint in checkpoint_list:
212
+ salience = score_checkpoint_salience(
213
+ checkpoint,
214
+ latest_checkpoint_id=latest_checkpoint_id,
215
+ active_branch_id=active_branch_id,
216
+ )
217
+ ranked.append({"checkpoint": dict(checkpoint), "salience": salience})
218
+ ranked.sort(
219
+ key=lambda item: (
220
+ _as_float(item["salience"].get("salience_score")),
221
+ _as_float((item["checkpoint"] or {}).get("id")),
222
+ ),
223
+ reverse=True,
224
+ )
225
+ return ranked[: limit or len(ranked)]
226
+
227
+
228
+ def scan_salient_memories(limit: int = 5) -> List[Dict[str, float | bool]]:
229
+ advisories = freshness.scan_freshness(limit=limit).get("advisories", [])
230
+ results = []
231
+ for item in advisories:
232
+ score = score_salience({"freshness": float(item.get("freshness_score", 0.0))})
233
+ if score.get("attention_trigger"):
234
+ results.append(score)
235
+ return results[:limit]
@@ -0,0 +1,33 @@
1
+ from __future__ import annotations
2
+
3
+ from typing import Dict, List
4
+
5
+ from ocmemog.runtime import state_store
6
+ from ocmemog.runtime.instrumentation import emit_event
7
+ from ocmemog.runtime.memory import reinforcement
8
+
9
+ LOGFILE = state_store.report_log_path()
10
+
11
+
12
+ SYNTHESIS_TYPES = [
13
+ "theme_summary",
14
+ "user_preference",
15
+ "candidate_procedure",
16
+ "recurring_pattern",
17
+ "contradiction_candidate",
18
+ ]
19
+
20
+
21
+ def synthesize_memory_patterns(limit: int = 5) -> List[Dict[str, str]]:
22
+ emit_event(LOGFILE, "brain_memory_synthesis_start", status="ok")
23
+ stats = reinforcement.list_recent_experiences(limit=limit)
24
+ results: List[Dict[str, str]] = []
25
+ for key, count in stats.items():
26
+ results.append(
27
+ {
28
+ "type": "recurring_pattern",
29
+ "summary": f"{key} occurred {count} times",
30
+ }
31
+ )
32
+ emit_event(LOGFILE, "brain_memory_synthesis_complete", status="ok", count=len(results))
33
+ return results[:limit]
@@ -0,0 +1,35 @@
1
+ from __future__ import annotations
2
+
3
+ from typing import Dict, List
4
+
5
+ from ocmemog.runtime import state_store
6
+ from ocmemog.runtime.instrumentation import emit_event
7
+
8
+ LOGFILE = state_store.report_log_path()
9
+
10
+ MEMORY_TYPES = [
11
+ "episodic",
12
+ "semantic",
13
+ "procedural",
14
+ "relationship",
15
+ "working",
16
+ ]
17
+
18
+
19
+ def list_memory_types() -> List[str]:
20
+ return list(MEMORY_TYPES)
21
+
22
+
23
+ def classify_memory_type(record: Dict[str, object]) -> str:
24
+ content = str(record.get("content") or "")
25
+ memory_type = "semantic"
26
+ if "how to" in content.lower() or "step" in content.lower():
27
+ memory_type = "procedural"
28
+ elif "met" in content.lower() or "relationship" in content.lower():
29
+ memory_type = "relationship"
30
+ elif record.get("memory_type") in MEMORY_TYPES:
31
+ memory_type = str(record.get("memory_type"))
32
+ elif record.get("source") == "working":
33
+ memory_type = "working"
34
+ emit_event(LOGFILE, "brain_memory_taxonomy_assigned", status="ok", memory_type=memory_type)
35
+ return memory_type