@simbimbo/memory-ocmemog 0.1.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (81) hide show
  1. package/CHANGELOG.md +59 -0
  2. package/LICENSE +21 -0
  3. package/README.md +223 -0
  4. package/brain/__init__.py +1 -0
  5. package/brain/runtime/__init__.py +13 -0
  6. package/brain/runtime/config.py +21 -0
  7. package/brain/runtime/inference.py +83 -0
  8. package/brain/runtime/instrumentation.py +17 -0
  9. package/brain/runtime/memory/__init__.py +13 -0
  10. package/brain/runtime/memory/api.py +152 -0
  11. package/brain/runtime/memory/artifacts.py +33 -0
  12. package/brain/runtime/memory/candidate.py +89 -0
  13. package/brain/runtime/memory/context_builder.py +87 -0
  14. package/brain/runtime/memory/conversation_state.py +1825 -0
  15. package/brain/runtime/memory/distill.py +198 -0
  16. package/brain/runtime/memory/embedding_engine.py +94 -0
  17. package/brain/runtime/memory/freshness.py +91 -0
  18. package/brain/runtime/memory/health.py +42 -0
  19. package/brain/runtime/memory/integrity.py +170 -0
  20. package/brain/runtime/memory/interaction_memory.py +57 -0
  21. package/brain/runtime/memory/memory_consolidation.py +60 -0
  22. package/brain/runtime/memory/memory_gate.py +38 -0
  23. package/brain/runtime/memory/memory_graph.py +54 -0
  24. package/brain/runtime/memory/memory_links.py +109 -0
  25. package/brain/runtime/memory/memory_salience.py +235 -0
  26. package/brain/runtime/memory/memory_synthesis.py +33 -0
  27. package/brain/runtime/memory/memory_taxonomy.py +35 -0
  28. package/brain/runtime/memory/person_identity.py +83 -0
  29. package/brain/runtime/memory/person_memory.py +138 -0
  30. package/brain/runtime/memory/pondering_engine.py +577 -0
  31. package/brain/runtime/memory/promote.py +237 -0
  32. package/brain/runtime/memory/provenance.py +356 -0
  33. package/brain/runtime/memory/reinforcement.py +73 -0
  34. package/brain/runtime/memory/retrieval.py +153 -0
  35. package/brain/runtime/memory/semantic_search.py +66 -0
  36. package/brain/runtime/memory/sentiment_memory.py +67 -0
  37. package/brain/runtime/memory/store.py +400 -0
  38. package/brain/runtime/memory/tool_catalog.py +68 -0
  39. package/brain/runtime/memory/unresolved_state.py +93 -0
  40. package/brain/runtime/memory/vector_index.py +270 -0
  41. package/brain/runtime/model_roles.py +11 -0
  42. package/brain/runtime/model_router.py +22 -0
  43. package/brain/runtime/providers.py +59 -0
  44. package/brain/runtime/security/__init__.py +3 -0
  45. package/brain/runtime/security/redaction.py +14 -0
  46. package/brain/runtime/state_store.py +25 -0
  47. package/brain/runtime/storage_paths.py +41 -0
  48. package/docs/architecture/memory.md +118 -0
  49. package/docs/release-checklist.md +34 -0
  50. package/docs/reports/ocmemog-code-audit-2026-03-14.md +155 -0
  51. package/docs/usage.md +223 -0
  52. package/index.ts +726 -0
  53. package/ocmemog/__init__.py +1 -0
  54. package/ocmemog/sidecar/__init__.py +1 -0
  55. package/ocmemog/sidecar/app.py +1068 -0
  56. package/ocmemog/sidecar/compat.py +74 -0
  57. package/ocmemog/sidecar/transcript_watcher.py +425 -0
  58. package/openclaw.plugin.json +18 -0
  59. package/package.json +60 -0
  60. package/scripts/install-ocmemog.sh +277 -0
  61. package/scripts/launchagents/com.openclaw.ocmemog.guard.plist +22 -0
  62. package/scripts/launchagents/com.openclaw.ocmemog.ponder.plist +22 -0
  63. package/scripts/launchagents/com.openclaw.ocmemog.sidecar.plist +27 -0
  64. package/scripts/ocmemog-context.sh +15 -0
  65. package/scripts/ocmemog-continuity-benchmark.py +178 -0
  66. package/scripts/ocmemog-demo.py +122 -0
  67. package/scripts/ocmemog-failover-test.sh +17 -0
  68. package/scripts/ocmemog-guard.sh +11 -0
  69. package/scripts/ocmemog-install.sh +93 -0
  70. package/scripts/ocmemog-load-test.py +106 -0
  71. package/scripts/ocmemog-ponder.sh +30 -0
  72. package/scripts/ocmemog-recall-test.py +58 -0
  73. package/scripts/ocmemog-reindex-vectors.py +14 -0
  74. package/scripts/ocmemog-reliability-soak.py +177 -0
  75. package/scripts/ocmemog-sidecar.sh +46 -0
  76. package/scripts/ocmemog-soak-report.py +58 -0
  77. package/scripts/ocmemog-soak-test.py +44 -0
  78. package/scripts/ocmemog-test-rig.py +345 -0
  79. package/scripts/ocmemog-transcript-append.py +45 -0
  80. package/scripts/ocmemog-transcript-watcher.py +8 -0
  81. package/scripts/ocmemog-transcript-watcher.sh +7 -0
@@ -0,0 +1,577 @@
1
+ from __future__ import annotations
2
+
3
+ import json
4
+ import re
5
+ import threading
6
+ from queue import Queue
7
+ from typing import Any, Callable, Dict, List, Optional
8
+
9
+ from brain.runtime import config, inference, state_store
10
+ from brain.runtime.instrumentation import emit_event
11
+ from brain.runtime.memory import api, integrity, memory_consolidation, memory_links, provenance, store, unresolved_state, vector_index
12
+
13
+ LOGFILE = state_store.reports_dir() / "brain_memory.log.jsonl"
14
+ _WRITABLE_MEMORY_TABLES = {"knowledge", "reflections", "directives", "tasks", "runbooks", "lessons"}
15
+ _SUMMARY_PREFIX_RE = re.compile(r"^(?:insight|recommendation|lesson)\s*:\s*", re.IGNORECASE)
16
+
17
+
18
+ def _run_with_timeout(name: str, fn: Callable[[], Any], timeout_s: float, default: Any) -> Any:
19
+ emit_event(LOGFILE, f"brain_ponder_{name}_start", status="ok")
20
+ result_queue: Queue[tuple[str, Any]] = Queue(maxsize=1)
21
+
22
+ def _target() -> None:
23
+ try:
24
+ result_queue.put(("ok", fn()))
25
+ except Exception as exc: # pragma: no cover
26
+ result_queue.put(("error", exc))
27
+
28
+ worker = threading.Thread(target=_target, name=f"ocmemog-ponder-{name}", daemon=True)
29
+ worker.start()
30
+ worker.join(timeout_s)
31
+ if worker.is_alive():
32
+ emit_event(LOGFILE, f"brain_ponder_{name}_complete", status="timeout")
33
+ return default
34
+ if result_queue.empty():
35
+ emit_event(LOGFILE, f"brain_ponder_{name}_complete", status="error", error="missing_result")
36
+ return default
37
+ status, payload = result_queue.get_nowait()
38
+ if status == "error":
39
+ emit_event(LOGFILE, f"brain_ponder_{name}_complete", status="error", error=str(payload))
40
+ return default
41
+ emit_event(LOGFILE, f"brain_ponder_{name}_complete", status="ok")
42
+ return payload
43
+
44
+
45
+ def _infer_with_timeout(prompt: str, timeout_s: float = 20.0) -> Dict[str, str]:
46
+ return _run_with_timeout(
47
+ "infer",
48
+ lambda: inference.infer(prompt, provider_name=config.OCMEMOG_PONDER_MODEL),
49
+ timeout_s,
50
+ {"status": "timeout", "output": ""},
51
+ )
52
+
53
+
54
+ def _load_recent(table: str, limit: int) -> List[Dict[str, object]]:
55
+ if table not in _WRITABLE_MEMORY_TABLES:
56
+ return []
57
+ conn = store.connect(ensure_schema=False)
58
+ try:
59
+ rows = conn.execute(
60
+ f"SELECT id, content, confidence, timestamp, source, metadata_json FROM {table} ORDER BY id DESC LIMIT ?",
61
+ (limit,),
62
+ ).fetchall()
63
+ except Exception:
64
+ rows = []
65
+ finally:
66
+ conn.close()
67
+ items: List[Dict[str, object]] = []
68
+ for row in rows:
69
+ try:
70
+ metadata = json.loads(row["metadata_json"] or "{}")
71
+ except Exception:
72
+ metadata = {}
73
+ items.append(
74
+ {
75
+ "reference": f"{table}:{row['id']}",
76
+ "content": str(row["content"] or ""),
77
+ "confidence": float(row["confidence"] or 0.0),
78
+ "timestamp": row["timestamp"],
79
+ "source": row["source"],
80
+ "metadata": metadata,
81
+ "candidate_kind": "memory",
82
+ "memory_type": table,
83
+ }
84
+ )
85
+ return items
86
+
87
+
88
+ def _load_continuity_candidates(limit: int) -> List[Dict[str, object]]:
89
+ conn = store.connect(ensure_schema=False)
90
+ items: List[Dict[str, object]] = []
91
+ try:
92
+ checkpoint_rows = conn.execute(
93
+ """
94
+ SELECT id, session_id, thread_id, conversation_id, summary, latest_user_ask,
95
+ last_assistant_commitment, metadata_json, timestamp
96
+ FROM conversation_checkpoints
97
+ ORDER BY id DESC LIMIT ?
98
+ """,
99
+ (limit,),
100
+ ).fetchall()
101
+ for row in checkpoint_rows:
102
+ try:
103
+ metadata = json.loads(row["metadata_json"] or "{}")
104
+ except Exception:
105
+ metadata = {}
106
+ content_parts = [str(row["summary"] or "").strip()]
107
+ latest_user_ask = str(row["latest_user_ask"] or "").strip()
108
+ if latest_user_ask:
109
+ content_parts.append(f"User ask: {latest_user_ask}")
110
+ last_commitment = str(row["last_assistant_commitment"] or "").strip()
111
+ if last_commitment:
112
+ content_parts.append(f"Assistant commitment: {last_commitment}")
113
+ items.append(
114
+ {
115
+ "reference": f"conversation_checkpoints:{row['id']}",
116
+ "content": " | ".join(part for part in content_parts if part),
117
+ "timestamp": row["timestamp"],
118
+ "source": "continuity",
119
+ "metadata": {
120
+ **metadata,
121
+ "conversation_id": row["conversation_id"],
122
+ "session_id": row["session_id"],
123
+ "thread_id": row["thread_id"],
124
+ },
125
+ "candidate_kind": "checkpoint",
126
+ "memory_type": "runbooks",
127
+ }
128
+ )
129
+
130
+ state_rows = conn.execute(
131
+ """
132
+ SELECT id, scope_type, scope_id, latest_user_ask, last_assistant_commitment,
133
+ open_loops_json, pending_actions_json, unresolved_state_json, metadata_json, updated_at
134
+ FROM conversation_state
135
+ ORDER BY updated_at DESC, id DESC LIMIT ?
136
+ """,
137
+ (limit,),
138
+ ).fetchall()
139
+ for row in state_rows:
140
+ try:
141
+ open_loops = json.loads(row["open_loops_json"] or "[]")
142
+ except Exception:
143
+ open_loops = []
144
+ try:
145
+ pending_actions = json.loads(row["pending_actions_json"] or "[]")
146
+ except Exception:
147
+ pending_actions = []
148
+ try:
149
+ unresolved_items = json.loads(row["unresolved_state_json"] or "[]")
150
+ except Exception:
151
+ unresolved_items = []
152
+ try:
153
+ metadata = json.loads(row["metadata_json"] or "{}")
154
+ except Exception:
155
+ metadata = {}
156
+ content_parts = [f"Continuity scope {row['scope_type']}:{row['scope_id']}"]
157
+ latest_user_ask = str(row["latest_user_ask"] or "").strip()
158
+ if latest_user_ask:
159
+ content_parts.append(f"Latest user ask: {latest_user_ask}")
160
+ last_commitment = str(row["last_assistant_commitment"] or "").strip()
161
+ if last_commitment:
162
+ content_parts.append(f"Assistant commitment: {last_commitment}")
163
+ for label, payload in (("Open loop", open_loops), ("Pending action", pending_actions), ("Unresolved", unresolved_items)):
164
+ for item in payload[:2]:
165
+ summary = str((item or {}).get("summary") or "").strip()
166
+ if summary:
167
+ content_parts.append(f"{label}: {summary}")
168
+ items.append(
169
+ {
170
+ "reference": f"conversation_state:{row['id']}",
171
+ "content": " | ".join(part for part in content_parts if part),
172
+ "timestamp": row["updated_at"],
173
+ "source": "continuity",
174
+ "metadata": metadata,
175
+ "candidate_kind": "continuity_state",
176
+ "memory_type": "runbooks",
177
+ }
178
+ )
179
+
180
+ turn_rows = conn.execute(
181
+ """
182
+ SELECT id, role, content, session_id, thread_id, conversation_id, message_id, metadata_json, timestamp
183
+ FROM conversation_turns
184
+ ORDER BY id DESC LIMIT ?
185
+ """,
186
+ (limit,),
187
+ ).fetchall()
188
+ for row in turn_rows:
189
+ try:
190
+ metadata = json.loads(row["metadata_json"] or "{}")
191
+ except Exception:
192
+ metadata = {}
193
+ items.append(
194
+ {
195
+ "reference": f"conversation_turns:{row['id']}",
196
+ "content": f"{row['role']}: {str(row['content'] or '').strip()}",
197
+ "timestamp": row["timestamp"],
198
+ "source": "continuity",
199
+ "metadata": {
200
+ **metadata,
201
+ "conversation_id": row["conversation_id"],
202
+ "session_id": row["session_id"],
203
+ "thread_id": row["thread_id"],
204
+ "message_id": row["message_id"],
205
+ },
206
+ "candidate_kind": "turn",
207
+ "memory_type": "reflections",
208
+ }
209
+ )
210
+ except Exception as exc:
211
+ emit_event(LOGFILE, "brain_ponder_continuity_candidates_failed", status="error", error=str(exc))
212
+ finally:
213
+ conn.close()
214
+ return items[:limit]
215
+
216
+
217
+ def _dedupe_candidates(items: List[Dict[str, object]], limit: int) -> List[Dict[str, object]]:
218
+ deduped: List[Dict[str, object]] = []
219
+ seen: set[str] = set()
220
+ for item in items:
221
+ reference = str(item.get("reference") or "")
222
+ content = str(item.get("content") or "").strip()
223
+ key = reference or content.lower()
224
+ if not key or key in seen or not content:
225
+ continue
226
+ seen.add(key)
227
+ deduped.append(item)
228
+ if len(deduped) >= limit:
229
+ break
230
+ return deduped
231
+
232
+
233
+ def _heuristic_summary(text: str, limit: int = 220) -> str:
234
+ collapsed = re.sub(r"\s+", " ", text or "").strip()
235
+ if len(collapsed) <= limit:
236
+ return collapsed
237
+ return f"{collapsed[: limit - 1].rstrip()}…"
238
+
239
+
240
+ def _heuristic_ponder(record: Dict[str, object]) -> Dict[str, str]:
241
+ text = str(record.get("content") or "").strip()
242
+ reference = str(record.get("reference") or "")
243
+ kind = str(record.get("candidate_kind") or "memory")
244
+ metadata = record.get("metadata") if isinstance(record.get("metadata"), dict) else {}
245
+ summary = _heuristic_summary(text)
246
+ if kind == "checkpoint":
247
+ return {
248
+ "insight": f"Checkpoint captured active continuity: {summary}",
249
+ "recommendation": "Promote the checkpoint summary into durable reflections and keep linked open loops hydrated at answer time.",
250
+ }
251
+ if kind == "continuity_state":
252
+ return {
253
+ "insight": f"Conversation continuity still carries unresolved context: {summary}",
254
+ "recommendation": "Hydrate this scope before answering so pending actions and open loops stay visible after restarts.",
255
+ }
256
+ if kind == "turn":
257
+ role = str(metadata.get("role") or "conversation")
258
+ return {
259
+ "insight": f"Recent {role} turn may shape near-term continuity: {summary}",
260
+ "recommendation": "Retain the turn in short-horizon context and checkpoint if it changes the active branch or next action.",
261
+ }
262
+ return {
263
+ "insight": f"Recent memory worth reinforcing: {summary}",
264
+ "recommendation": "Link the reflection back to its source memory so future retrieval can hydrate it with provenance.",
265
+ }
266
+
267
+
268
+ def _parse_structured_output(output: str) -> Dict[str, str]:
269
+ insight = ""
270
+ recommendation = ""
271
+ for line in output.splitlines():
272
+ if line.lower().startswith("insight:"):
273
+ insight = line.split(":", 1)[-1].strip()
274
+ elif line.lower().startswith("recommendation:"):
275
+ recommendation = line.split(":", 1)[-1].strip()
276
+ cleaned = [
277
+ _SUMMARY_PREFIX_RE.sub("", line).strip()
278
+ for line in output.splitlines()
279
+ if _SUMMARY_PREFIX_RE.sub("", line).strip()
280
+ ]
281
+ if not insight and cleaned:
282
+ insight = cleaned[0]
283
+ if not recommendation and len(cleaned) > 1:
284
+ recommendation = cleaned[1]
285
+ return {"insight": insight[:280], "recommendation": recommendation[:280]}
286
+
287
+
288
+ def _ponder_with_model(record: Dict[str, object]) -> Dict[str, str]:
289
+ text = str(record.get("content") or "").strip()
290
+ if not text:
291
+ return {"insight": "", "recommendation": ""}
292
+ prompt = (
293
+ "You are the memory pondering engine.\n"
294
+ "Given this memory/context item, return: (1) a concise insight, (2) a concrete recommendation.\n"
295
+ "Keep both actionable and under 220 characters each.\n\n"
296
+ f"Reference: {record.get('reference')}\n"
297
+ f"Kind: {record.get('candidate_kind') or 'memory'}\n"
298
+ f"Memory: {text}\n\n"
299
+ "Format:\nInsight: ...\nRecommendation: ..."
300
+ )
301
+ result = _infer_with_timeout(prompt)
302
+ output = str(result.get("output") or "").strip()
303
+ parsed = _parse_structured_output(output)
304
+ if parsed.get("insight") and parsed.get("recommendation"):
305
+ return parsed
306
+ heuristic = _heuristic_ponder(record)
307
+ return {
308
+ "insight": parsed.get("insight") or heuristic["insight"],
309
+ "recommendation": parsed.get("recommendation") or heuristic["recommendation"],
310
+ }
311
+
312
+
313
+ def _extract_lesson(record: Dict[str, object]) -> str | None:
314
+ text = str(record.get("content") or "").strip()
315
+ if not text:
316
+ return None
317
+ prompt = (
318
+ "Extract a single actionable lesson learned from this memory/context item.\n"
319
+ "If there is no clear lesson, reply with NONE. Keep it under 220 characters.\n\n"
320
+ f"Reference: {record.get('reference')}\n"
321
+ f"Memory: {text}\n\n"
322
+ "Lesson:"
323
+ )
324
+ result = _infer_with_timeout(prompt)
325
+ output = str(result.get("output") or "").strip()
326
+ if not output or output.upper().startswith("NONE"):
327
+ return None
328
+ output = _SUMMARY_PREFIX_RE.sub("", output).strip()
329
+ return output[:240] if output else None
330
+
331
+
332
+ def _memory_exists(memory_type: str, content: str, metadata: Optional[Dict[str, object]] = None) -> Optional[int]:
333
+ if memory_type not in _WRITABLE_MEMORY_TABLES:
334
+ return None
335
+ conn = store.connect(ensure_schema=False)
336
+ try:
337
+ rows = conn.execute(
338
+ f"SELECT id, metadata_json FROM {memory_type} WHERE content = ? ORDER BY id DESC LIMIT 25",
339
+ (content,),
340
+ ).fetchall()
341
+ except Exception:
342
+ rows = []
343
+ finally:
344
+ conn.close()
345
+ if not rows:
346
+ return None
347
+ wanted_ref = str((metadata or {}).get("source_reference") or "")
348
+ for row in rows:
349
+ if not wanted_ref:
350
+ return int(row["id"])
351
+ try:
352
+ row_meta = json.loads(row["metadata_json"] or "{}")
353
+ except Exception:
354
+ row_meta = {}
355
+ if str(row_meta.get("source_reference") or "") == wanted_ref:
356
+ return int(row["id"])
357
+ return None
358
+
359
+
360
+ def _link_once(source_reference: str, link_type: str, target_reference: str) -> None:
361
+ if not source_reference or not target_reference:
362
+ return
363
+ existing = memory_links.get_memory_links(source_reference)
364
+ if any(item.get("link_type") == link_type and item.get("target_reference") == target_reference for item in existing):
365
+ return
366
+ memory_links.add_memory_link(source_reference, link_type, target_reference)
367
+
368
+
369
+ def _store_reflection(summary: str, *, source_reference: str, recommendation: str = "", metadata: Optional[Dict[str, object]] = None) -> str:
370
+ content = summary.strip()
371
+ if recommendation.strip():
372
+ content = f"{content}\nRecommendation: {recommendation.strip()}"
373
+ content = content.strip()
374
+ inherited_refs = provenance.collect_source_references(source_reference, depth=2) if source_reference else []
375
+ source_refs = [ref for ref in inherited_refs if ref]
376
+ if source_reference and source_reference not in source_refs:
377
+ source_refs.insert(0, source_reference)
378
+ reflection_metadata = {
379
+ **(metadata or {}),
380
+ "source_reference": source_reference,
381
+ "source_references": source_refs,
382
+ "kind": "ponder_reflection",
383
+ "derived_via": "ponder",
384
+ }
385
+ existing_id = _memory_exists("reflections", content, reflection_metadata)
386
+ if existing_id:
387
+ return f"reflections:{existing_id}"
388
+ reflection_id = api.store_memory("reflections", content, source="ponder", metadata=reflection_metadata)
389
+ reflection_ref = f"reflections:{reflection_id}"
390
+ _link_once(reflection_ref, "derived_from", source_reference)
391
+ return reflection_ref
392
+
393
+
394
+ def _store_lesson_once(lesson: str, *, source_reference: str) -> Optional[str]:
395
+ normalized = lesson.strip()
396
+ if not normalized:
397
+ return None
398
+ inherited_refs = provenance.collect_source_references(source_reference, depth=2) if source_reference else []
399
+ metadata = {
400
+ "reference": source_reference,
401
+ "source_reference": source_reference,
402
+ "source_references": inherited_refs or ([source_reference] if source_reference else []),
403
+ "kind": "ponder_lesson",
404
+ "derived_via": "ponder",
405
+ }
406
+ existing_id = _memory_exists("lessons", normalized, metadata)
407
+ if existing_id:
408
+ return f"lessons:{existing_id}"
409
+ lesson_id = api.store_memory("lessons", normalized, source="ponder", metadata=metadata)
410
+ lesson_ref = f"lessons:{lesson_id}"
411
+ _link_once(lesson_ref, "derived_from", source_reference)
412
+ return lesson_ref
413
+
414
+
415
+ def _candidate_memories(max_items: int) -> List[Dict[str, object]]:
416
+ base_candidates: List[Dict[str, object]] = []
417
+ for table in ("reflections", "knowledge", "tasks", "runbooks"):
418
+ base_candidates.extend(_load_recent(table, max_items))
419
+ base_candidates.extend(_load_continuity_candidates(max_items))
420
+ return _dedupe_candidates(base_candidates, max_items)
421
+
422
+
423
+ def run_ponder_cycle(max_items: int = 5) -> Dict[str, object]:
424
+ emit_event(LOGFILE, "brain_ponder_cycle_start", status="ok")
425
+
426
+ unresolved = _run_with_timeout(
427
+ "unresolved",
428
+ lambda: unresolved_state.list_unresolved_state(limit=max_items),
429
+ 5.0,
430
+ [],
431
+ )
432
+ candidates = _candidate_memories(max_items)
433
+ consolidation = _run_with_timeout(
434
+ "consolidation",
435
+ lambda: memory_consolidation.consolidate_memories(candidates, max_clusters=max_items),
436
+ 15.0,
437
+ {"consolidated": [], "reinforcement": []},
438
+ )
439
+
440
+ insights: List[Dict[str, object]] = []
441
+ for item in unresolved[:max_items]:
442
+ summary = str(item.get("summary") or "").strip()
443
+ if not summary:
444
+ continue
445
+ source_reference = str(item.get("reference") or "") or str(item.get("target_reference") or "")
446
+ reflection_ref = _store_reflection(
447
+ f"Unresolved state remains active: {summary}",
448
+ source_reference=source_reference or "unresolved_state",
449
+ recommendation="Resolve or checkpoint this item so it stays visible during future hydration.",
450
+ metadata={"state_type": item.get("state_type"), "kind": "unresolved"},
451
+ )
452
+ insights.append(
453
+ {
454
+ "type": "unresolved",
455
+ "summary": summary,
456
+ "reference": source_reference,
457
+ "reflection_reference": reflection_ref,
458
+ }
459
+ )
460
+ emit_event(LOGFILE, "brain_ponder_insight_generated", status="ok", kind="unresolved")
461
+
462
+ if str(config.OCMEMOG_PONDER_ENABLED).lower() in {"1", "true", "yes"}:
463
+ for item in candidates:
464
+ content = str(item.get("content") or "").strip()
465
+ if not content:
466
+ continue
467
+ model_result = _ponder_with_model(item)
468
+ insight = str(model_result.get("insight") or "").strip()
469
+ recommendation = str(model_result.get("recommendation") or "").strip()
470
+ if not insight:
471
+ continue
472
+ reference = str(item.get("reference") or "")
473
+ reflection_ref = _store_reflection(
474
+ insight,
475
+ source_reference=reference or "ponder",
476
+ recommendation=recommendation,
477
+ metadata={
478
+ "candidate_kind": item.get("candidate_kind"),
479
+ "memory_type": item.get("memory_type"),
480
+ },
481
+ )
482
+ insights.append(
483
+ {
484
+ "type": str(item.get("candidate_kind") or "memory"),
485
+ "reference": reference,
486
+ "summary": insight,
487
+ "recommendation": recommendation,
488
+ "reflection_reference": reflection_ref,
489
+ }
490
+ )
491
+ emit_event(LOGFILE, "brain_ponder_insight_generated", status="ok", kind=str(item.get("candidate_kind") or "memory"))
492
+
493
+ lessons: List[Dict[str, object]] = []
494
+ if str(config.OCMEMOG_LESSON_MINING_ENABLED).lower() in {"1", "true", "yes"}:
495
+ for item in candidates:
496
+ reference = str(item.get("reference") or "")
497
+ if not reference:
498
+ continue
499
+ if not (reference.startswith("reflections:") or reference.startswith("conversation_checkpoints:")):
500
+ continue
501
+ lesson = _extract_lesson(item)
502
+ if not lesson:
503
+ continue
504
+ lesson_ref = _store_lesson_once(lesson, source_reference=reference)
505
+ lessons.append({"reference": reference, "lesson": lesson, "lesson_reference": lesson_ref})
506
+ emit_event(LOGFILE, "brain_ponder_lesson_generated", status="ok")
507
+
508
+ links: List[Dict[str, object]] = []
509
+ for cluster in consolidation.get("consolidated", []):
510
+ summary = str(cluster.get("summary") or "").strip()
511
+ if not summary:
512
+ continue
513
+ reflection_ref = _store_reflection(
514
+ f"Consolidated pattern: {summary}",
515
+ source_reference=str(cluster.get("references", ["ponder"])[0]),
516
+ recommendation=f"Review grouped references together ({int(cluster.get('count') or 0)} items).",
517
+ metadata={"kind": "cluster", "cluster_kind": cluster.get("memory_type")},
518
+ )
519
+ for target_reference in cluster.get("references", []) or []:
520
+ if isinstance(target_reference, str) and target_reference:
521
+ _link_once(reflection_ref, "conceptual", target_reference)
522
+ links.append(
523
+ {
524
+ "type": "cluster",
525
+ "summary": summary,
526
+ "count": int(cluster.get("count") or 0),
527
+ "references": cluster.get("references") or [],
528
+ "reflection_reference": reflection_ref,
529
+ }
530
+ )
531
+
532
+ maintenance = _run_with_timeout(
533
+ "integrity",
534
+ integrity.run_integrity_check,
535
+ 10.0,
536
+ {"issues": []},
537
+ )
538
+ if "vector_orphan" in set(maintenance.get("repairable_issues") or []):
539
+ maintenance["repair"] = _run_with_timeout(
540
+ "integrity_repair",
541
+ integrity.repair_integrity,
542
+ 10.0,
543
+ {"ok": False, "repaired": []},
544
+ )
545
+ maintenance = _run_with_timeout(
546
+ "integrity_post_repair",
547
+ integrity.run_integrity_check,
548
+ 10.0,
549
+ maintenance,
550
+ )
551
+ if any(item.startswith("vector_missing") or item.startswith("vector_orphan") for item in maintenance.get("issues", [])):
552
+ rebuild_count = _run_with_timeout(
553
+ "vector_rebuild",
554
+ vector_index.rebuild_vector_index,
555
+ 30.0,
556
+ 0,
557
+ )
558
+ maintenance["vector_rebuild"] = rebuild_count
559
+
560
+ emit_event(
561
+ LOGFILE,
562
+ "brain_ponder_cycle_complete",
563
+ status="ok",
564
+ candidates=len(candidates),
565
+ insights=len(insights),
566
+ lessons=len(lessons),
567
+ links=len(links),
568
+ )
569
+ return {
570
+ "unresolved": unresolved,
571
+ "candidates": candidates,
572
+ "insights": insights,
573
+ "lessons": lessons,
574
+ "links": links,
575
+ "maintenance": maintenance,
576
+ "consolidation": consolidation,
577
+ }