@simbimbo/memory-ocmemog 0.1.11 → 0.1.13
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +30 -0
- package/README.md +83 -18
- package/brain/runtime/__init__.py +2 -12
- package/brain/runtime/config.py +1 -24
- package/brain/runtime/inference.py +1 -151
- package/brain/runtime/instrumentation.py +1 -15
- package/brain/runtime/memory/__init__.py +3 -13
- package/brain/runtime/memory/api.py +1 -1219
- package/brain/runtime/memory/candidate.py +1 -185
- package/brain/runtime/memory/conversation_state.py +1 -1823
- package/brain/runtime/memory/distill.py +1 -344
- package/brain/runtime/memory/embedding_engine.py +1 -92
- package/brain/runtime/memory/freshness.py +1 -112
- package/brain/runtime/memory/health.py +1 -40
- package/brain/runtime/memory/integrity.py +1 -186
- package/brain/runtime/memory/memory_consolidation.py +1 -58
- package/brain/runtime/memory/memory_links.py +1 -107
- package/brain/runtime/memory/memory_salience.py +1 -233
- package/brain/runtime/memory/memory_synthesis.py +1 -31
- package/brain/runtime/memory/memory_taxonomy.py +1 -33
- package/brain/runtime/memory/pondering_engine.py +1 -654
- package/brain/runtime/memory/promote.py +1 -277
- package/brain/runtime/memory/provenance.py +1 -406
- package/brain/runtime/memory/reinforcement.py +1 -71
- package/brain/runtime/memory/retrieval.py +1 -210
- package/brain/runtime/memory/semantic_search.py +1 -64
- package/brain/runtime/memory/store.py +1 -429
- package/brain/runtime/memory/unresolved_state.py +1 -91
- package/brain/runtime/memory/vector_index.py +1 -323
- package/brain/runtime/model_roles.py +1 -9
- package/brain/runtime/model_router.py +1 -22
- package/brain/runtime/providers.py +1 -66
- package/brain/runtime/security/redaction.py +1 -12
- package/brain/runtime/state_store.py +1 -23
- package/brain/runtime/storage_paths.py +1 -39
- package/docs/architecture/memory.md +20 -24
- package/docs/release-checklist.md +19 -6
- package/docs/usage.md +33 -17
- package/index.ts +8 -1
- package/ocmemog/__init__.py +11 -0
- package/ocmemog/doctor.py +1255 -0
- package/ocmemog/runtime/__init__.py +18 -0
- package/ocmemog/runtime/_compat_bridge.py +28 -0
- package/ocmemog/runtime/config.py +34 -0
- package/ocmemog/runtime/identity.py +115 -0
- package/ocmemog/runtime/inference.py +163 -0
- package/ocmemog/runtime/instrumentation.py +20 -0
- package/ocmemog/runtime/memory/__init__.py +91 -0
- package/ocmemog/runtime/memory/api.py +1594 -0
- package/ocmemog/runtime/memory/candidate.py +192 -0
- package/ocmemog/runtime/memory/conversation_state.py +1831 -0
- package/ocmemog/runtime/memory/distill.py +282 -0
- package/ocmemog/runtime/memory/embedding_engine.py +151 -0
- package/ocmemog/runtime/memory/freshness.py +114 -0
- package/ocmemog/runtime/memory/health.py +93 -0
- package/ocmemog/runtime/memory/integrity.py +208 -0
- package/ocmemog/runtime/memory/memory_consolidation.py +60 -0
- package/ocmemog/runtime/memory/memory_links.py +109 -0
- package/ocmemog/runtime/memory/memory_salience.py +235 -0
- package/ocmemog/runtime/memory/memory_synthesis.py +33 -0
- package/ocmemog/runtime/memory/memory_taxonomy.py +35 -0
- package/ocmemog/runtime/memory/pondering_engine.py +681 -0
- package/ocmemog/runtime/memory/promote.py +279 -0
- package/ocmemog/runtime/memory/provenance.py +408 -0
- package/ocmemog/runtime/memory/reinforcement.py +73 -0
- package/ocmemog/runtime/memory/retrieval.py +224 -0
- package/ocmemog/runtime/memory/semantic_search.py +66 -0
- package/ocmemog/runtime/memory/store.py +433 -0
- package/ocmemog/runtime/memory/unresolved_state.py +93 -0
- package/ocmemog/runtime/memory/vector_index.py +411 -0
- package/ocmemog/runtime/model_roles.py +15 -0
- package/ocmemog/runtime/model_router.py +28 -0
- package/ocmemog/runtime/providers.py +78 -0
- package/ocmemog/runtime/roles.py +92 -0
- package/ocmemog/runtime/security/__init__.py +8 -0
- package/ocmemog/runtime/security/redaction.py +17 -0
- package/ocmemog/runtime/state_store.py +32 -0
- package/ocmemog/runtime/storage_paths.py +70 -0
- package/ocmemog/sidecar/app.py +421 -60
- package/ocmemog/sidecar/compat.py +50 -13
- package/ocmemog/sidecar/transcript_watcher.py +327 -242
- package/openclaw.plugin.json +4 -0
- package/package.json +1 -1
- package/scripts/ocmemog-backfill-vectors.py +5 -3
- package/scripts/ocmemog-continuity-benchmark.py +1 -1
- package/scripts/ocmemog-demo.py +1 -1
- package/scripts/ocmemog-doctor.py +15 -0
- package/scripts/ocmemog-install.sh +29 -7
- package/scripts/ocmemog-integrated-proof.py +374 -0
- package/scripts/ocmemog-reindex-vectors.py +5 -3
- package/scripts/ocmemog-release-check.sh +330 -0
- package/scripts/ocmemog-sidecar.sh +4 -2
- package/scripts/ocmemog-test-rig.py +5 -3
- package/brain/runtime/memory/artifacts.py +0 -33
- package/brain/runtime/memory/context_builder.py +0 -112
- package/brain/runtime/memory/interaction_memory.py +0 -57
- package/brain/runtime/memory/memory_gate.py +0 -38
- package/brain/runtime/memory/memory_graph.py +0 -54
- package/brain/runtime/memory/person_identity.py +0 -83
- package/brain/runtime/memory/person_memory.py +0 -138
- package/brain/runtime/memory/sentiment_memory.py +0 -67
- package/brain/runtime/memory/tool_catalog.py +0 -68
|
@@ -1,1221 +1,3 @@
|
|
|
1
1
|
from __future__ import annotations
|
|
2
2
|
|
|
3
|
-
import
|
|
4
|
-
import os
|
|
5
|
-
from typing import List, Dict, Any, Optional
|
|
6
|
-
|
|
7
|
-
from brain.runtime.memory import provenance, store
|
|
8
|
-
from brain.runtime import inference
|
|
9
|
-
from brain.runtime.instrumentation import emit_event
|
|
10
|
-
from brain.runtime.security import redaction
|
|
11
|
-
|
|
12
|
-
_REVIEW_KIND_METADATA: Dict[str, Dict[str, str]] = {
|
|
13
|
-
"duplicate_candidate": {
|
|
14
|
-
"relationship": "duplicate_of",
|
|
15
|
-
"label": "Duplicate candidate",
|
|
16
|
-
"approve_label": "Approve duplicate merge",
|
|
17
|
-
"reject_label": "Reject duplicate merge",
|
|
18
|
-
},
|
|
19
|
-
"contradiction_candidate": {
|
|
20
|
-
"relationship": "contradicts",
|
|
21
|
-
"label": "Contradiction candidate",
|
|
22
|
-
"approve_label": "Mark as contradiction",
|
|
23
|
-
"reject_label": "Dismiss contradiction",
|
|
24
|
-
},
|
|
25
|
-
"supersession_recommendation": {
|
|
26
|
-
"relationship": "supersedes",
|
|
27
|
-
"label": "Supersession recommendation",
|
|
28
|
-
"approve_label": "Approve supersession",
|
|
29
|
-
"reject_label": "Dismiss supersession",
|
|
30
|
-
},
|
|
31
|
-
}
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
def _sanitize(text: str) -> str:
|
|
35
|
-
redacted, _ = redaction.redact_text(text)
|
|
36
|
-
return redacted
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
def _emit(event: str) -> None:
|
|
40
|
-
emit_event(store.state_store.reports_dir() / "brain_memory.log.jsonl", event, status="ok")
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
def record_event(event_type: str, payload: str, *, source: str | None = None) -> None:
|
|
44
|
-
payload = _sanitize(payload)
|
|
45
|
-
details_json = json.dumps({"payload": payload})
|
|
46
|
-
def _write() -> None:
|
|
47
|
-
conn = store.connect()
|
|
48
|
-
try:
|
|
49
|
-
conn.execute(
|
|
50
|
-
"INSERT INTO memory_events (event_type, source, details_json, schema_version) VALUES (?, ?, ?, ?)",
|
|
51
|
-
(event_type, source, details_json, store.SCHEMA_VERSION),
|
|
52
|
-
)
|
|
53
|
-
conn.commit()
|
|
54
|
-
finally:
|
|
55
|
-
conn.close()
|
|
56
|
-
|
|
57
|
-
store.submit_write(_write, timeout=30.0)
|
|
58
|
-
_emit("record_event")
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
def record_task(task_id: str, status: str, *, source: str | None = None) -> None:
|
|
62
|
-
status = _sanitize(status)
|
|
63
|
-
metadata_json = json.dumps({"task_id": task_id})
|
|
64
|
-
def _write() -> None:
|
|
65
|
-
conn = store.connect()
|
|
66
|
-
try:
|
|
67
|
-
conn.execute(
|
|
68
|
-
"INSERT INTO tasks (source, confidence, metadata_json, content, schema_version) VALUES (?, ?, ?, ?, ?)",
|
|
69
|
-
(source, 1.0, metadata_json, status, store.SCHEMA_VERSION),
|
|
70
|
-
)
|
|
71
|
-
conn.commit()
|
|
72
|
-
finally:
|
|
73
|
-
conn.close()
|
|
74
|
-
|
|
75
|
-
store.submit_write(_write, timeout=30.0)
|
|
76
|
-
_emit("record_task")
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
def _recommend_supersession_from_contradictions(
|
|
80
|
-
reference: str,
|
|
81
|
-
*,
|
|
82
|
-
contradiction_candidates: List[Dict[str, Any]],
|
|
83
|
-
) -> Dict[str, Any]:
|
|
84
|
-
recommendation = {
|
|
85
|
-
"recommended": False,
|
|
86
|
-
"target_reference": None,
|
|
87
|
-
"reason": "no_candidates",
|
|
88
|
-
"signal": 0.0,
|
|
89
|
-
"auto_applied": False,
|
|
90
|
-
}
|
|
91
|
-
if not contradiction_candidates:
|
|
92
|
-
return recommendation
|
|
93
|
-
|
|
94
|
-
signal_threshold = float(os.environ.get("OCMEMOG_GOVERNANCE_SUPERSESSION_RECOMMEND_SIGNAL", "0.9") or 0.9)
|
|
95
|
-
model_conf_threshold = float(os.environ.get("OCMEMOG_GOVERNANCE_SUPERSESSION_MODEL_CONFIDENCE", "0.9") or 0.9)
|
|
96
|
-
ranked = sorted(contradiction_candidates, key=lambda item: float(item.get("signal") or 0.0), reverse=True)
|
|
97
|
-
top = ranked[0]
|
|
98
|
-
signal = float(top.get("signal") or 0.0)
|
|
99
|
-
model_hint = top.get("model_hint") if isinstance(top.get("model_hint"), dict) else {}
|
|
100
|
-
model_contradiction = bool(model_hint.get("contradiction"))
|
|
101
|
-
model_confidence = float(model_hint.get("confidence") or 0.0)
|
|
102
|
-
|
|
103
|
-
if signal < signal_threshold:
|
|
104
|
-
recommendation["reason"] = "signal_below_threshold"
|
|
105
|
-
recommendation["signal"] = signal
|
|
106
|
-
return recommendation
|
|
107
|
-
|
|
108
|
-
if model_hint and (not model_contradiction or model_confidence < model_conf_threshold):
|
|
109
|
-
recommendation["reason"] = "model_hint_not_strong_enough"
|
|
110
|
-
recommendation["signal"] = signal
|
|
111
|
-
return recommendation
|
|
112
|
-
|
|
113
|
-
target = str(top.get("reference") or "")
|
|
114
|
-
if not target:
|
|
115
|
-
recommendation["reason"] = "missing_target"
|
|
116
|
-
recommendation["signal"] = signal
|
|
117
|
-
return recommendation
|
|
118
|
-
|
|
119
|
-
recommendation.update({
|
|
120
|
-
"recommended": True,
|
|
121
|
-
"target_reference": target,
|
|
122
|
-
"reason": "high_confidence_contradiction",
|
|
123
|
-
"signal": signal,
|
|
124
|
-
"model_hint": model_hint,
|
|
125
|
-
})
|
|
126
|
-
|
|
127
|
-
return recommendation
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
def _canonicalize_duplicate_target(reference: str) -> str:
|
|
131
|
-
payload = provenance.fetch_reference(reference) or {}
|
|
132
|
-
metadata = payload.get("metadata") or {}
|
|
133
|
-
prov = metadata.get("provenance") if isinstance(metadata.get("provenance"), dict) else {}
|
|
134
|
-
canonical = str(prov.get("canonical_reference") or prov.get("duplicate_of") or reference).strip()
|
|
135
|
-
return canonical or reference
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
def _token_signature(text: str) -> frozenset[str]:
|
|
139
|
-
return frozenset(_tokenize(text))
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
def _auto_promote_duplicate_candidate(
|
|
143
|
-
reference: str,
|
|
144
|
-
*,
|
|
145
|
-
duplicate_candidates: List[Dict[str, Any]],
|
|
146
|
-
contradiction_candidates: List[Dict[str, Any]],
|
|
147
|
-
) -> Dict[str, Any]:
|
|
148
|
-
auto_promote_enabled = os.environ.get("OCMEMOG_GOVERNANCE_AUTOPROMOTE", "true").strip().lower() in {"1", "true", "yes"}
|
|
149
|
-
allow_with_contradictions = os.environ.get("OCMEMOG_GOVERNANCE_AUTOPROMOTE_ALLOW_CONTRADICTIONS", "false").strip().lower() in {"1", "true", "yes"}
|
|
150
|
-
duplicate_threshold = float(os.environ.get("OCMEMOG_GOVERNANCE_DUPLICATE_AUTOPROMOTE_SIMILARITY", "0.98") or 0.98)
|
|
151
|
-
duplicate_margin = float(os.environ.get("OCMEMOG_GOVERNANCE_DUPLICATE_AUTOPROMOTE_MARGIN", "0.02") or 0.02)
|
|
152
|
-
require_exact_tokens = os.environ.get("OCMEMOG_GOVERNANCE_DUPLICATE_AUTOPROMOTE_REQUIRE_EXACT_TOKENS", "true").strip().lower() in {"1", "true", "yes"}
|
|
153
|
-
promoted: Dict[str, Any] = {"duplicate_of": None, "promoted": False, "reason": "disabled" if not auto_promote_enabled else "none"}
|
|
154
|
-
|
|
155
|
-
if not auto_promote_enabled:
|
|
156
|
-
return promoted
|
|
157
|
-
|
|
158
|
-
if contradiction_candidates and not allow_with_contradictions:
|
|
159
|
-
promoted["reason"] = "blocked_by_contradiction_candidates"
|
|
160
|
-
return promoted
|
|
161
|
-
|
|
162
|
-
if not duplicate_candidates:
|
|
163
|
-
promoted["reason"] = "no_duplicate_candidates"
|
|
164
|
-
return promoted
|
|
165
|
-
|
|
166
|
-
payload = provenance.fetch_reference(reference) or {}
|
|
167
|
-
reference_content = str(payload.get("content") or "")
|
|
168
|
-
reference_signature = _token_signature(reference_content)
|
|
169
|
-
ranked = sorted(duplicate_candidates, key=lambda item: float(item.get("similarity") or 0.0), reverse=True)
|
|
170
|
-
top = ranked[0]
|
|
171
|
-
similarity = float(top.get("similarity") or 0.0)
|
|
172
|
-
target = _canonicalize_duplicate_target(str(top.get("reference") or ""))
|
|
173
|
-
if not target or target == reference or similarity < duplicate_threshold:
|
|
174
|
-
promoted["reason"] = "similarity_below_threshold"
|
|
175
|
-
return promoted
|
|
176
|
-
|
|
177
|
-
if len(ranked) > 1:
|
|
178
|
-
runner_up = float(ranked[1].get("similarity") or 0.0)
|
|
179
|
-
if similarity - runner_up < duplicate_margin:
|
|
180
|
-
promoted["reason"] = "ambiguous_duplicate_candidates"
|
|
181
|
-
return promoted
|
|
182
|
-
|
|
183
|
-
target_payload = provenance.fetch_reference(target) or {}
|
|
184
|
-
target_content = str(target_payload.get("content") or "")
|
|
185
|
-
if require_exact_tokens and _token_signature(target_content) != reference_signature:
|
|
186
|
-
promoted["reason"] = "token_signature_mismatch"
|
|
187
|
-
return promoted
|
|
188
|
-
|
|
189
|
-
merged = mark_memory_relationship(reference, relationship="duplicate_of", target_reference=target, status="duplicate")
|
|
190
|
-
promoted.update({
|
|
191
|
-
"duplicate_of": target,
|
|
192
|
-
"promoted": merged is not None,
|
|
193
|
-
"reason": "duplicate_high_confidence" if merged is not None else "promotion_failed",
|
|
194
|
-
"similarity": similarity,
|
|
195
|
-
})
|
|
196
|
-
return promoted
|
|
197
|
-
|
|
198
|
-
|
|
199
|
-
def _auto_apply_supersession_recommendation(
|
|
200
|
-
reference: str,
|
|
201
|
-
*,
|
|
202
|
-
contradiction_candidates: List[Dict[str, Any]],
|
|
203
|
-
supersession_recommendation: Dict[str, Any],
|
|
204
|
-
) -> Dict[str, Any]:
|
|
205
|
-
recommendation = dict(supersession_recommendation or {})
|
|
206
|
-
if not recommendation:
|
|
207
|
-
return {"recommended": False, "auto_applied": False, "reason": "missing_recommendation", "target_reference": None, "signal": 0.0}
|
|
208
|
-
|
|
209
|
-
auto_apply = os.environ.get("OCMEMOG_GOVERNANCE_AUTOPROMOTE_SUPERSESSION", "false").strip().lower() in {"1", "true", "yes"}
|
|
210
|
-
allow_with_contradictions = os.environ.get("OCMEMOG_GOVERNANCE_AUTOPROMOTE_ALLOW_CONTRADICTIONS", "false").strip().lower() in {"1", "true", "yes"}
|
|
211
|
-
auto_apply_signal = float(os.environ.get("OCMEMOG_GOVERNANCE_SUPERSESSION_AUTOPROMOTE_SIGNAL", "0.97") or 0.97)
|
|
212
|
-
model_conf_threshold = float(os.environ.get("OCMEMOG_GOVERNANCE_SUPERSESSION_AUTOPROMOTE_MODEL_CONFIDENCE", "0.97") or 0.97)
|
|
213
|
-
|
|
214
|
-
recommendation.setdefault("auto_applied", False)
|
|
215
|
-
if not recommendation.get("recommended"):
|
|
216
|
-
recommendation["reason"] = recommendation.get("reason") or "not_recommended"
|
|
217
|
-
return recommendation
|
|
218
|
-
|
|
219
|
-
if not auto_apply:
|
|
220
|
-
return recommendation
|
|
221
|
-
|
|
222
|
-
if contradiction_candidates and not allow_with_contradictions:
|
|
223
|
-
recommendation["reason"] = "blocked_by_contradiction_candidates"
|
|
224
|
-
return recommendation
|
|
225
|
-
|
|
226
|
-
signal = float(recommendation.get("signal") or 0.0)
|
|
227
|
-
if signal < auto_apply_signal:
|
|
228
|
-
recommendation["reason"] = "signal_below_autopromote_threshold"
|
|
229
|
-
return recommendation
|
|
230
|
-
|
|
231
|
-
model_hint = recommendation.get("model_hint") if isinstance(recommendation.get("model_hint"), dict) else {}
|
|
232
|
-
if not model_hint or not model_hint.get("contradiction") or float(model_hint.get("confidence") or 0.0) < model_conf_threshold:
|
|
233
|
-
recommendation["reason"] = "model_hint_below_autopromote_threshold"
|
|
234
|
-
return recommendation
|
|
235
|
-
|
|
236
|
-
target = str(recommendation.get("target_reference") or "").strip()
|
|
237
|
-
if not target or target == reference:
|
|
238
|
-
recommendation["reason"] = "missing_target"
|
|
239
|
-
return recommendation
|
|
240
|
-
|
|
241
|
-
merged = mark_memory_relationship(reference, relationship="supersedes", target_reference=target, status="active")
|
|
242
|
-
recommendation["auto_applied"] = merged is not None
|
|
243
|
-
recommendation["reason"] = "auto_applied_supersession" if merged is not None else "auto_apply_failed"
|
|
244
|
-
return recommendation
|
|
245
|
-
|
|
246
|
-
|
|
247
|
-
def _auto_attach_governance_candidates(reference: str) -> Dict[str, Any]:
|
|
248
|
-
duplicate_candidates = find_duplicate_candidates(reference, limit=5, min_similarity=0.72)
|
|
249
|
-
contradiction_candidates = find_contradiction_candidates(reference, limit=5, min_signal=0.55, use_model=True)
|
|
250
|
-
supersession_recommendation = _recommend_supersession_from_contradictions(
|
|
251
|
-
reference,
|
|
252
|
-
contradiction_candidates=contradiction_candidates,
|
|
253
|
-
)
|
|
254
|
-
auto_promotion = _auto_promote_duplicate_candidate(
|
|
255
|
-
reference,
|
|
256
|
-
duplicate_candidates=duplicate_candidates,
|
|
257
|
-
contradiction_candidates=contradiction_candidates,
|
|
258
|
-
)
|
|
259
|
-
supersession_recommendation = _auto_apply_supersession_recommendation(
|
|
260
|
-
reference,
|
|
261
|
-
contradiction_candidates=contradiction_candidates,
|
|
262
|
-
supersession_recommendation=supersession_recommendation,
|
|
263
|
-
)
|
|
264
|
-
payload = {
|
|
265
|
-
"duplicate_candidates": [item.get("reference") for item in duplicate_candidates if item.get("reference")],
|
|
266
|
-
"contradiction_candidates": [item.get("reference") for item in contradiction_candidates if item.get("reference")],
|
|
267
|
-
"auto_promotion": auto_promotion,
|
|
268
|
-
"supersession_recommendation": supersession_recommendation,
|
|
269
|
-
}
|
|
270
|
-
provenance.update_memory_metadata(reference, payload)
|
|
271
|
-
emit_event(
|
|
272
|
-
store.state_store.reports_dir() / "brain_memory.log.jsonl",
|
|
273
|
-
"store_memory_governance_candidates",
|
|
274
|
-
status="ok",
|
|
275
|
-
reference=reference,
|
|
276
|
-
duplicates=len(payload["duplicate_candidates"]),
|
|
277
|
-
contradictions=len(payload["contradiction_candidates"]),
|
|
278
|
-
auto_promoted=bool(auto_promotion.get("promoted")),
|
|
279
|
-
auto_promotion_reason=str(auto_promotion.get("reason") or "none"),
|
|
280
|
-
supersession_recommended=bool(supersession_recommendation.get("recommended")),
|
|
281
|
-
supersession_auto_applied=bool(supersession_recommendation.get("auto_applied")),
|
|
282
|
-
supersession_reason=str(supersession_recommendation.get("reason") or "none"),
|
|
283
|
-
)
|
|
284
|
-
return payload
|
|
285
|
-
|
|
286
|
-
|
|
287
|
-
def store_memory(
|
|
288
|
-
memory_type: str,
|
|
289
|
-
content: str,
|
|
290
|
-
*,
|
|
291
|
-
source: str | None = None,
|
|
292
|
-
metadata: Dict[str, Any] | None = None,
|
|
293
|
-
timestamp: str | None = None,
|
|
294
|
-
) -> int:
|
|
295
|
-
content = _sanitize(content)
|
|
296
|
-
table = memory_type.strip().lower() if memory_type else "knowledge"
|
|
297
|
-
allowed = set(store.MEMORY_TABLES)
|
|
298
|
-
if table not in allowed:
|
|
299
|
-
table = "knowledge"
|
|
300
|
-
normalized_metadata = provenance.normalize_metadata(metadata, source=source)
|
|
301
|
-
|
|
302
|
-
def _write() -> int:
|
|
303
|
-
conn = store.connect()
|
|
304
|
-
try:
|
|
305
|
-
if timestamp:
|
|
306
|
-
cur = conn.execute(
|
|
307
|
-
f"INSERT INTO {table} (source, confidence, metadata_json, content, schema_version, timestamp) VALUES (?, ?, ?, ?, ?, ?)",
|
|
308
|
-
(source, 1.0, json.dumps(normalized_metadata, ensure_ascii=False), content, store.SCHEMA_VERSION, timestamp),
|
|
309
|
-
)
|
|
310
|
-
else:
|
|
311
|
-
cur = conn.execute(
|
|
312
|
-
f"INSERT INTO {table} (source, confidence, metadata_json, content, schema_version) VALUES (?, ?, ?, ?, ?)",
|
|
313
|
-
(source, 1.0, json.dumps(normalized_metadata, ensure_ascii=False), content, store.SCHEMA_VERSION),
|
|
314
|
-
)
|
|
315
|
-
conn.commit()
|
|
316
|
-
return int(cur.lastrowid)
|
|
317
|
-
finally:
|
|
318
|
-
conn.close()
|
|
319
|
-
|
|
320
|
-
last_row_id = store.submit_write(_write, timeout=30.0)
|
|
321
|
-
reference = f"{table}:{last_row_id}"
|
|
322
|
-
provenance.apply_links(reference, normalized_metadata)
|
|
323
|
-
try:
|
|
324
|
-
from brain.runtime.memory import vector_index
|
|
325
|
-
|
|
326
|
-
vector_index.insert_memory(last_row_id, content, 1.0, source_type=table)
|
|
327
|
-
except Exception as exc:
|
|
328
|
-
emit_event(store.state_store.reports_dir() / "brain_memory.log.jsonl", "store_memory_index_failed", status="error", error=str(exc), memory_type=table)
|
|
329
|
-
try:
|
|
330
|
-
_auto_attach_governance_candidates(reference)
|
|
331
|
-
except Exception as exc:
|
|
332
|
-
emit_event(store.state_store.reports_dir() / "brain_memory.log.jsonl", "store_memory_governance_failed", status="error", error=str(exc), reference=reference)
|
|
333
|
-
_emit("store_memory")
|
|
334
|
-
return last_row_id
|
|
335
|
-
|
|
336
|
-
|
|
337
|
-
def record_reinforcement(task_id: str, outcome: str, note: str, *, source_module: str | None = None) -> None:
|
|
338
|
-
outcome = _sanitize(outcome)
|
|
339
|
-
note = _sanitize(note)
|
|
340
|
-
memory_reference = f"reinforcement:{task_id or 'unknown'}:{source_module or 'unspecified'}"
|
|
341
|
-
def _write() -> None:
|
|
342
|
-
conn = store.connect()
|
|
343
|
-
try:
|
|
344
|
-
conn.execute(
|
|
345
|
-
"INSERT INTO experiences (task_id, outcome, reward_score, confidence, memory_reference, experience_type, source_module, schema_version) "
|
|
346
|
-
"VALUES (?, ?, ?, ?, ?, ?, ?, ?)",
|
|
347
|
-
(task_id, outcome, None, 1.0, memory_reference, "reinforcement", source_module, store.SCHEMA_VERSION),
|
|
348
|
-
)
|
|
349
|
-
conn.execute(
|
|
350
|
-
"INSERT INTO memory_events (event_type, source, details_json, schema_version) VALUES (?, ?, ?, ?)",
|
|
351
|
-
("reinforcement_note", source_module, json.dumps({"task_id": task_id, "note": note, "memory_reference": memory_reference}), store.SCHEMA_VERSION),
|
|
352
|
-
)
|
|
353
|
-
conn.commit()
|
|
354
|
-
finally:
|
|
355
|
-
conn.close()
|
|
356
|
-
|
|
357
|
-
store.submit_write(_write, timeout=30.0)
|
|
358
|
-
_emit("record_reinforcement")
|
|
359
|
-
|
|
360
|
-
|
|
361
|
-
def _tokenize(text: str) -> List[str]:
|
|
362
|
-
return [token for token in "".join(ch.lower() if ch.isalnum() else " " for ch in (text or "")).split() if token]
|
|
363
|
-
|
|
364
|
-
|
|
365
|
-
def _similarity(left: str, right: str) -> float:
|
|
366
|
-
left_tokens = set(_tokenize(left))
|
|
367
|
-
right_tokens = set(_tokenize(right))
|
|
368
|
-
if not left_tokens or not right_tokens:
|
|
369
|
-
return 0.0
|
|
370
|
-
overlap = len(left_tokens & right_tokens)
|
|
371
|
-
union = len(left_tokens | right_tokens)
|
|
372
|
-
return round(overlap / max(1, union), 3)
|
|
373
|
-
|
|
374
|
-
|
|
375
|
-
def _extract_literals(text: str) -> List[str]:
|
|
376
|
-
import re
|
|
377
|
-
patterns = [
|
|
378
|
-
r"\b\d{2,6}\b",
|
|
379
|
-
r"\b\d{1,3}(?:\.\d{1,3}){3}\b",
|
|
380
|
-
r"\b\+?1?\d{10,11}\b",
|
|
381
|
-
r"\b[a-zA-Z][a-zA-Z0-9_.-]*:[0-9]{2,5}\b",
|
|
382
|
-
]
|
|
383
|
-
hits: List[str] = []
|
|
384
|
-
for pattern in patterns:
|
|
385
|
-
for match in re.findall(pattern, text or ""):
|
|
386
|
-
value = str(match).strip()
|
|
387
|
-
if value and value not in hits:
|
|
388
|
-
hits.append(value)
|
|
389
|
-
return hits
|
|
390
|
-
|
|
391
|
-
|
|
392
|
-
def _contradiction_signal(left: str, right: str) -> float:
|
|
393
|
-
left_tokens = set(_tokenize(left))
|
|
394
|
-
right_tokens = set(_tokenize(right))
|
|
395
|
-
literals_left = set(_extract_literals(left))
|
|
396
|
-
literals_right = set(_extract_literals(right))
|
|
397
|
-
shared_context = len((left_tokens & right_tokens) - literals_left - literals_right)
|
|
398
|
-
different_literals = literals_left.symmetric_difference(literals_right)
|
|
399
|
-
lexical_similarity = _similarity(left, right)
|
|
400
|
-
if not literals_left and not literals_right:
|
|
401
|
-
return 0.0
|
|
402
|
-
if literals_left == literals_right:
|
|
403
|
-
return 0.0
|
|
404
|
-
if shared_context < 2 and lexical_similarity < 0.45:
|
|
405
|
-
return 0.0
|
|
406
|
-
base = min(1.0, 0.35 * lexical_similarity + 0.12 * shared_context + 0.3 * min(2, len(different_literals)))
|
|
407
|
-
return round(base, 3)
|
|
408
|
-
|
|
409
|
-
|
|
410
|
-
def _model_contradiction_hint(left: str, right: str) -> Optional[Dict[str, Any]]:
|
|
411
|
-
prompt = (
|
|
412
|
-
"You are checking whether two short memory statements likely contradict each other.\n"
|
|
413
|
-
"Return strict JSON with keys: contradiction (true/false), confidence (0..1), rationale (string).\n"
|
|
414
|
-
f"Statement A: {left}\n"
|
|
415
|
-
f"Statement B: {right}\n"
|
|
416
|
-
)
|
|
417
|
-
result = inference.infer(
|
|
418
|
-
prompt,
|
|
419
|
-
provider_name=os.environ.get("OCMEMOG_PONDER_MODEL", "local-openai:qwen2.5-7b-instruct"),
|
|
420
|
-
)
|
|
421
|
-
if result.get("status") != "ok":
|
|
422
|
-
return None
|
|
423
|
-
try:
|
|
424
|
-
parsed = json.loads(result.get("output") or "{}")
|
|
425
|
-
except Exception:
|
|
426
|
-
return None
|
|
427
|
-
if not isinstance(parsed, dict):
|
|
428
|
-
return None
|
|
429
|
-
return {
|
|
430
|
-
"contradiction": bool(parsed.get("contradiction")),
|
|
431
|
-
"confidence": float(parsed.get("confidence") or 0.0),
|
|
432
|
-
"rationale": str(parsed.get("rationale") or "").strip(),
|
|
433
|
-
}
|
|
434
|
-
|
|
435
|
-
|
|
436
|
-
def find_duplicate_candidates(
|
|
437
|
-
reference: str,
|
|
438
|
-
*,
|
|
439
|
-
limit: int = 5,
|
|
440
|
-
min_similarity: float = 0.72,
|
|
441
|
-
) -> List[Dict[str, Any]]:
|
|
442
|
-
payload = provenance.fetch_reference(reference) or {}
|
|
443
|
-
table = str(payload.get("table") or payload.get("type") or "")
|
|
444
|
-
content = str(payload.get("content") or "")
|
|
445
|
-
if table not in set(store.MEMORY_TABLES):
|
|
446
|
-
return []
|
|
447
|
-
row_id = payload.get("id")
|
|
448
|
-
conn = store.connect()
|
|
449
|
-
try:
|
|
450
|
-
rows = conn.execute(
|
|
451
|
-
f"SELECT id, content, metadata_json, timestamp FROM {table} WHERE id != ? ORDER BY id DESC LIMIT ?",
|
|
452
|
-
(int(row_id), max(limit * 10, 50)),
|
|
453
|
-
).fetchall()
|
|
454
|
-
finally:
|
|
455
|
-
conn.close()
|
|
456
|
-
|
|
457
|
-
candidates: List[Dict[str, Any]] = []
|
|
458
|
-
for row in rows:
|
|
459
|
-
candidate_ref = f"{table}:{row['id'] if isinstance(row, dict) else row[0]}"
|
|
460
|
-
candidate_content = row["content"] if isinstance(row, dict) else row[1]
|
|
461
|
-
score = _similarity(content, candidate_content)
|
|
462
|
-
if score < min_similarity:
|
|
463
|
-
continue
|
|
464
|
-
meta_raw = row["metadata_json"] if isinstance(row, dict) else row[2]
|
|
465
|
-
try:
|
|
466
|
-
metadata = json.loads(meta_raw or "{}")
|
|
467
|
-
except Exception:
|
|
468
|
-
metadata = {}
|
|
469
|
-
preview = provenance.preview_from_metadata(metadata)
|
|
470
|
-
candidates.append({
|
|
471
|
-
"reference": candidate_ref,
|
|
472
|
-
"content": candidate_content,
|
|
473
|
-
"similarity": score,
|
|
474
|
-
"timestamp": row["timestamp"] if isinstance(row, dict) else row[3],
|
|
475
|
-
"provenance_preview": preview,
|
|
476
|
-
})
|
|
477
|
-
|
|
478
|
-
candidates.sort(key=lambda item: item["similarity"], reverse=True)
|
|
479
|
-
top = candidates[:limit]
|
|
480
|
-
if top:
|
|
481
|
-
provenance.force_update_memory_metadata(reference, {"duplicate_candidates": [item["reference"] for item in top]})
|
|
482
|
-
_emit("find_duplicate_candidates")
|
|
483
|
-
return top
|
|
484
|
-
|
|
485
|
-
|
|
486
|
-
def find_contradiction_candidates(
|
|
487
|
-
reference: str,
|
|
488
|
-
*,
|
|
489
|
-
limit: int = 5,
|
|
490
|
-
min_signal: float = 0.55,
|
|
491
|
-
use_model: bool = True,
|
|
492
|
-
) -> List[Dict[str, Any]]:
|
|
493
|
-
payload = provenance.fetch_reference(reference) or {}
|
|
494
|
-
table = str(payload.get("table") or payload.get("type") or "")
|
|
495
|
-
content = str(payload.get("content") or "")
|
|
496
|
-
if table not in set(store.MEMORY_TABLES):
|
|
497
|
-
return []
|
|
498
|
-
row_id = payload.get("id")
|
|
499
|
-
conn = store.connect()
|
|
500
|
-
try:
|
|
501
|
-
rows = conn.execute(
|
|
502
|
-
f"SELECT id, content, metadata_json, timestamp FROM {table} WHERE id != ? ORDER BY id DESC LIMIT ?",
|
|
503
|
-
(int(row_id), max(limit * 12, 60)),
|
|
504
|
-
).fetchall()
|
|
505
|
-
finally:
|
|
506
|
-
conn.close()
|
|
507
|
-
|
|
508
|
-
candidates: List[Dict[str, Any]] = []
|
|
509
|
-
for row in rows:
|
|
510
|
-
candidate_ref = f"{table}:{row['id'] if isinstance(row, dict) else row[0]}"
|
|
511
|
-
candidate_content = row["content"] if isinstance(row, dict) else row[1]
|
|
512
|
-
signal = _contradiction_signal(content, candidate_content)
|
|
513
|
-
if signal < min_signal:
|
|
514
|
-
continue
|
|
515
|
-
meta_raw = row["metadata_json"] if isinstance(row, dict) else row[2]
|
|
516
|
-
try:
|
|
517
|
-
metadata = json.loads(meta_raw or "{}")
|
|
518
|
-
except Exception:
|
|
519
|
-
metadata = {}
|
|
520
|
-
preview = provenance.preview_from_metadata(metadata)
|
|
521
|
-
item: Dict[str, Any] = {
|
|
522
|
-
"reference": candidate_ref,
|
|
523
|
-
"content": candidate_content,
|
|
524
|
-
"signal": signal,
|
|
525
|
-
"timestamp": row["timestamp"] if isinstance(row, dict) else row[3],
|
|
526
|
-
"provenance_preview": preview,
|
|
527
|
-
"literals": _extract_literals(candidate_content),
|
|
528
|
-
}
|
|
529
|
-
if use_model:
|
|
530
|
-
hint = _model_contradiction_hint(content, candidate_content)
|
|
531
|
-
if hint:
|
|
532
|
-
item["model_hint"] = hint
|
|
533
|
-
if not hint.get("contradiction") and signal < 0.8:
|
|
534
|
-
continue
|
|
535
|
-
item["signal"] = round(max(signal, float(hint.get("confidence") or 0.0)), 3)
|
|
536
|
-
candidates.append(item)
|
|
537
|
-
|
|
538
|
-
candidates.sort(key=lambda item: item["signal"], reverse=True)
|
|
539
|
-
top = candidates[:limit]
|
|
540
|
-
if top:
|
|
541
|
-
provenance.force_update_memory_metadata(reference, {"contradicts": [item["reference"] for item in top], "contradiction_status": "candidate", "contradiction_candidates": [item["reference"] for item in top]})
|
|
542
|
-
_emit("find_contradiction_candidates")
|
|
543
|
-
return top
|
|
544
|
-
|
|
545
|
-
|
|
546
|
-
def mark_memory_relationship(
|
|
547
|
-
reference: str,
|
|
548
|
-
*,
|
|
549
|
-
relationship: str,
|
|
550
|
-
target_reference: str,
|
|
551
|
-
status: str | None = None,
|
|
552
|
-
) -> Dict[str, Any] | None:
|
|
553
|
-
relationship = (relationship or "").strip().lower()
|
|
554
|
-
updates: Dict[str, Any] = {}
|
|
555
|
-
if relationship == "supersedes":
|
|
556
|
-
updates = {
|
|
557
|
-
"supersedes": target_reference,
|
|
558
|
-
"memory_status": status or "active",
|
|
559
|
-
"canonical_reference": reference,
|
|
560
|
-
}
|
|
561
|
-
provenance.force_update_memory_metadata(target_reference, {
|
|
562
|
-
"superseded_by": reference,
|
|
563
|
-
"memory_status": "superseded",
|
|
564
|
-
"canonical_reference": reference,
|
|
565
|
-
})
|
|
566
|
-
elif relationship == "duplicate_of":
|
|
567
|
-
updates = {
|
|
568
|
-
"duplicate_of": target_reference,
|
|
569
|
-
"memory_status": status or "duplicate",
|
|
570
|
-
"canonical_reference": target_reference,
|
|
571
|
-
}
|
|
572
|
-
elif relationship == "contradicts":
|
|
573
|
-
updates = {
|
|
574
|
-
"contradicts": [target_reference],
|
|
575
|
-
"contradiction_status": status or "contested",
|
|
576
|
-
"memory_status": "contested",
|
|
577
|
-
}
|
|
578
|
-
provenance.force_update_memory_metadata(target_reference, {
|
|
579
|
-
"contradicts": [reference],
|
|
580
|
-
"contradiction_status": status or "contested",
|
|
581
|
-
"memory_status": "contested",
|
|
582
|
-
})
|
|
583
|
-
else:
|
|
584
|
-
return None
|
|
585
|
-
merged = provenance.force_update_memory_metadata(reference, updates)
|
|
586
|
-
_emit(f"mark_memory_relationship_{relationship}")
|
|
587
|
-
return merged
|
|
588
|
-
|
|
589
|
-
|
|
590
|
-
def list_governance_candidates(
|
|
591
|
-
*,
|
|
592
|
-
categories: Optional[List[str]] = None,
|
|
593
|
-
limit: int = 50,
|
|
594
|
-
) -> List[Dict[str, Any]]:
|
|
595
|
-
allowed = set(store.MEMORY_TABLES)
|
|
596
|
-
tables = [table for table in (categories or list(allowed)) if table in allowed]
|
|
597
|
-
conn = store.connect()
|
|
598
|
-
try:
|
|
599
|
-
items: List[Dict[str, Any]] = []
|
|
600
|
-
for table in tables:
|
|
601
|
-
rows = conn.execute(
|
|
602
|
-
f"SELECT id, timestamp, content, metadata_json FROM {table} ORDER BY id DESC LIMIT ?",
|
|
603
|
-
(max(limit, 20),),
|
|
604
|
-
).fetchall()
|
|
605
|
-
for row in rows:
|
|
606
|
-
metadata = json.loads((row["metadata_json"] if isinstance(row, dict) else row[3]) or "{}")
|
|
607
|
-
prov = metadata.get("provenance") if isinstance(metadata.get("provenance"), dict) else {}
|
|
608
|
-
duplicate_candidates = prov.get("duplicate_candidates") or []
|
|
609
|
-
contradiction_candidates = prov.get("contradiction_candidates") or []
|
|
610
|
-
supersession_recommendation = prov.get("supersession_recommendation") or {}
|
|
611
|
-
if not duplicate_candidates and not contradiction_candidates and not supersession_recommendation:
|
|
612
|
-
continue
|
|
613
|
-
items.append({
|
|
614
|
-
"reference": f"{table}:{row['id'] if isinstance(row, dict) else row[0]}",
|
|
615
|
-
"bucket": table,
|
|
616
|
-
"timestamp": row["timestamp"] if isinstance(row, dict) else row[1],
|
|
617
|
-
"content": row["content"] if isinstance(row, dict) else row[2],
|
|
618
|
-
"memory_status": prov.get("memory_status") or metadata.get("memory_status") or "active",
|
|
619
|
-
"duplicate_candidates": duplicate_candidates,
|
|
620
|
-
"contradiction_candidates": contradiction_candidates,
|
|
621
|
-
"supersession_recommendation": supersession_recommendation,
|
|
622
|
-
})
|
|
623
|
-
items.sort(key=lambda item: str(item.get("timestamp") or ""), reverse=True)
|
|
624
|
-
return items[:limit]
|
|
625
|
-
finally:
|
|
626
|
-
conn.close()
|
|
627
|
-
|
|
628
|
-
|
|
629
|
-
def _remove_from_list(values: Any, target: str) -> List[str]:
|
|
630
|
-
return [str(item) for item in (values or []) if str(item) and str(item) != target]
|
|
631
|
-
|
|
632
|
-
|
|
633
|
-
def _review_item_context(reference: str, *, depth: int = 1) -> Dict[str, Any]:
|
|
634
|
-
payload = provenance.hydrate_reference(reference, depth=depth) or {"reference": reference}
|
|
635
|
-
metadata = payload.get("metadata") if isinstance(payload.get("metadata"), dict) else {}
|
|
636
|
-
prov = metadata.get("provenance") if isinstance(metadata.get("provenance"), dict) else {}
|
|
637
|
-
return {
|
|
638
|
-
"reference": reference,
|
|
639
|
-
"bucket": payload.get("table"),
|
|
640
|
-
"id": payload.get("id"),
|
|
641
|
-
"timestamp": payload.get("timestamp"),
|
|
642
|
-
"content": payload.get("content"),
|
|
643
|
-
"memory_status": prov.get("memory_status") or metadata.get("memory_status") or "active",
|
|
644
|
-
"provenance_preview": payload.get("provenance_preview") or provenance.preview_from_metadata(metadata),
|
|
645
|
-
"metadata": metadata,
|
|
646
|
-
"links": payload.get("links") or [],
|
|
647
|
-
"backlinks": payload.get("backlinks") or [],
|
|
648
|
-
}
|
|
649
|
-
|
|
650
|
-
|
|
651
|
-
def _review_item_summary(kind: str, reference: str, target_reference: str) -> str:
|
|
652
|
-
if kind == "duplicate_candidate":
|
|
653
|
-
return f"{reference} may duplicate {target_reference}"
|
|
654
|
-
if kind == "contradiction_candidate":
|
|
655
|
-
return f"{reference} may contradict {target_reference}"
|
|
656
|
-
if kind == "supersession_recommendation":
|
|
657
|
-
return f"{reference} may supersede {target_reference}"
|
|
658
|
-
return f"{reference} requires review against {target_reference}"
|
|
659
|
-
|
|
660
|
-
|
|
661
|
-
def _review_actions(kind: str, relationship: str) -> List[Dict[str, Any]]:
|
|
662
|
-
meta = _REVIEW_KIND_METADATA.get(kind, {})
|
|
663
|
-
return [
|
|
664
|
-
{
|
|
665
|
-
"decision": "approve",
|
|
666
|
-
"approved": True,
|
|
667
|
-
"relationship": relationship,
|
|
668
|
-
"label": meta.get("approve_label") or "Approve",
|
|
669
|
-
},
|
|
670
|
-
{
|
|
671
|
-
"decision": "reject",
|
|
672
|
-
"approved": False,
|
|
673
|
-
"relationship": relationship,
|
|
674
|
-
"label": meta.get("reject_label") or "Reject",
|
|
675
|
-
},
|
|
676
|
-
]
|
|
677
|
-
|
|
678
|
-
|
|
679
|
-
def _relationship_for_review(kind: str | None = None, relationship: str | None = None) -> str:
|
|
680
|
-
resolved = (relationship or "").strip().lower()
|
|
681
|
-
if resolved:
|
|
682
|
-
return resolved
|
|
683
|
-
kind_key = (kind or "").strip().lower()
|
|
684
|
-
return _REVIEW_KIND_METADATA.get(kind_key, {}).get("relationship", "")
|
|
685
|
-
|
|
686
|
-
|
|
687
|
-
def list_governance_review_items(
|
|
688
|
-
*,
|
|
689
|
-
categories: Optional[List[str]] = None,
|
|
690
|
-
limit: int = 100,
|
|
691
|
-
context_depth: int = 1,
|
|
692
|
-
) -> List[Dict[str, Any]]:
|
|
693
|
-
items = governance_queue(categories=categories, limit=limit)
|
|
694
|
-
review_items: List[Dict[str, Any]] = []
|
|
695
|
-
for item in items:
|
|
696
|
-
kind = str(item.get("kind") or "")
|
|
697
|
-
relationship = _relationship_for_review(kind=kind)
|
|
698
|
-
reference = str(item.get("reference") or "")
|
|
699
|
-
target_reference = str(item.get("target_reference") or "")
|
|
700
|
-
if not reference or not target_reference or not relationship:
|
|
701
|
-
continue
|
|
702
|
-
review_items.append({
|
|
703
|
-
"review_id": f"{kind}:{reference}->{target_reference}",
|
|
704
|
-
"kind": kind,
|
|
705
|
-
"kind_label": _REVIEW_KIND_METADATA.get(kind, {}).get("label") or kind.replace("_", " "),
|
|
706
|
-
"relationship": relationship,
|
|
707
|
-
"priority": int(item.get("priority") or 0),
|
|
708
|
-
"timestamp": item.get("timestamp"),
|
|
709
|
-
"bucket": item.get("bucket"),
|
|
710
|
-
"signal": float(item.get("signal") or 0.0),
|
|
711
|
-
"reason": item.get("reason"),
|
|
712
|
-
"reference": reference,
|
|
713
|
-
"target_reference": target_reference,
|
|
714
|
-
"summary": _review_item_summary(kind, reference, target_reference),
|
|
715
|
-
"actions": _review_actions(kind, relationship),
|
|
716
|
-
"source": _review_item_context(reference, depth=context_depth),
|
|
717
|
-
"target": _review_item_context(target_reference, depth=context_depth),
|
|
718
|
-
})
|
|
719
|
-
return review_items
|
|
720
|
-
|
|
721
|
-
|
|
722
|
-
def apply_governance_decision(
|
|
723
|
-
reference: str,
|
|
724
|
-
*,
|
|
725
|
-
relationship: str,
|
|
726
|
-
target_reference: str,
|
|
727
|
-
approved: bool = True,
|
|
728
|
-
) -> Dict[str, Any] | None:
|
|
729
|
-
relationship = (relationship or "").strip().lower()
|
|
730
|
-
if approved:
|
|
731
|
-
merged = mark_memory_relationship(reference, relationship=relationship, target_reference=target_reference)
|
|
732
|
-
if merged is None:
|
|
733
|
-
return None
|
|
734
|
-
updates: Dict[str, Any] = {}
|
|
735
|
-
if relationship == "duplicate_of":
|
|
736
|
-
current = provenance.fetch_reference(reference) or {}
|
|
737
|
-
metadata = current.get("metadata") or {}
|
|
738
|
-
prov = metadata.get("provenance") if isinstance(metadata.get("provenance"), dict) else {}
|
|
739
|
-
updates["duplicate_candidates"] = _remove_from_list(prov.get("duplicate_candidates"), target_reference)
|
|
740
|
-
elif relationship == "contradicts":
|
|
741
|
-
current = provenance.fetch_reference(reference) or {}
|
|
742
|
-
metadata = current.get("metadata") or {}
|
|
743
|
-
prov = metadata.get("provenance") if isinstance(metadata.get("provenance"), dict) else {}
|
|
744
|
-
updates["contradiction_candidates"] = _remove_from_list(prov.get("contradiction_candidates"), target_reference)
|
|
745
|
-
elif relationship == "supersedes":
|
|
746
|
-
updates["supersession_recommendation"] = None
|
|
747
|
-
if updates:
|
|
748
|
-
merged = provenance.force_update_memory_metadata(reference, updates) or merged
|
|
749
|
-
_emit(f"apply_governance_decision_{relationship}_approved")
|
|
750
|
-
return merged
|
|
751
|
-
|
|
752
|
-
current = provenance.fetch_reference(reference) or {}
|
|
753
|
-
metadata = current.get("metadata") or {}
|
|
754
|
-
prov = metadata.get("provenance") if isinstance(metadata.get("provenance"), dict) else {}
|
|
755
|
-
updates: Dict[str, Any] = {}
|
|
756
|
-
if relationship == "duplicate_of":
|
|
757
|
-
updates["duplicate_candidates"] = _remove_from_list(prov.get("duplicate_candidates"), target_reference)
|
|
758
|
-
elif relationship == "contradicts":
|
|
759
|
-
updates["contradiction_candidates"] = _remove_from_list(prov.get("contradiction_candidates"), target_reference)
|
|
760
|
-
elif relationship == "supersedes":
|
|
761
|
-
recommendation = prov.get("supersession_recommendation") if isinstance(prov.get("supersession_recommendation"), dict) else {}
|
|
762
|
-
if not recommendation or str(recommendation.get("target_reference") or "") == target_reference:
|
|
763
|
-
updates["supersession_recommendation"] = None
|
|
764
|
-
updates["supersedes"] = None
|
|
765
|
-
else:
|
|
766
|
-
return None
|
|
767
|
-
merged = provenance.force_update_memory_metadata(reference, updates)
|
|
768
|
-
_emit(f"apply_governance_decision_{relationship}_{'approved' if approved else 'rejected'}")
|
|
769
|
-
return merged
|
|
770
|
-
|
|
771
|
-
|
|
772
|
-
def apply_governance_review_decision(
|
|
773
|
-
reference: str,
|
|
774
|
-
*,
|
|
775
|
-
target_reference: str,
|
|
776
|
-
approved: bool = True,
|
|
777
|
-
kind: str | None = None,
|
|
778
|
-
relationship: str | None = None,
|
|
779
|
-
context_depth: int = 1,
|
|
780
|
-
) -> Dict[str, Any] | None:
|
|
781
|
-
resolved_relationship = _relationship_for_review(kind=kind, relationship=relationship)
|
|
782
|
-
if not resolved_relationship:
|
|
783
|
-
return None
|
|
784
|
-
result = apply_governance_decision(
|
|
785
|
-
reference,
|
|
786
|
-
relationship=resolved_relationship,
|
|
787
|
-
target_reference=target_reference,
|
|
788
|
-
approved=approved,
|
|
789
|
-
)
|
|
790
|
-
if result is None:
|
|
791
|
-
return None
|
|
792
|
-
resolved_kind = (kind or "").strip().lower()
|
|
793
|
-
if not resolved_kind:
|
|
794
|
-
for candidate_kind, meta in _REVIEW_KIND_METADATA.items():
|
|
795
|
-
if meta.get("relationship") == resolved_relationship:
|
|
796
|
-
resolved_kind = candidate_kind
|
|
797
|
-
break
|
|
798
|
-
return {
|
|
799
|
-
"reference": reference,
|
|
800
|
-
"target_reference": target_reference,
|
|
801
|
-
"approved": bool(approved),
|
|
802
|
-
"kind": resolved_kind or None,
|
|
803
|
-
"relationship": resolved_relationship,
|
|
804
|
-
"result": result,
|
|
805
|
-
"source": _review_item_context(reference, depth=context_depth),
|
|
806
|
-
"target": _review_item_context(target_reference, depth=context_depth),
|
|
807
|
-
}
|
|
808
|
-
|
|
809
|
-
|
|
810
|
-
def rollback_governance_decision(
|
|
811
|
-
reference: str,
|
|
812
|
-
*,
|
|
813
|
-
relationship: str,
|
|
814
|
-
target_reference: str,
|
|
815
|
-
) -> Dict[str, Any] | None:
|
|
816
|
-
relationship = (relationship or "").strip().lower()
|
|
817
|
-
if relationship not in {"duplicate_of", "supersedes", "contradicts"}:
|
|
818
|
-
return None
|
|
819
|
-
|
|
820
|
-
reference_payload = provenance.fetch_reference(reference) or {}
|
|
821
|
-
ref_meta = reference_payload.get("metadata") or {}
|
|
822
|
-
ref_prov = ref_meta.get("provenance") if isinstance(ref_meta.get("provenance"), dict) else {}
|
|
823
|
-
|
|
824
|
-
if relationship == "duplicate_of":
|
|
825
|
-
updates = {
|
|
826
|
-
"duplicate_of": None,
|
|
827
|
-
"memory_status": "active",
|
|
828
|
-
"canonical_reference": None,
|
|
829
|
-
}
|
|
830
|
-
merged = provenance.force_update_memory_metadata(reference, updates)
|
|
831
|
-
_emit("rollback_governance_duplicate_of")
|
|
832
|
-
return merged
|
|
833
|
-
|
|
834
|
-
if relationship == "supersedes":
|
|
835
|
-
provenance.force_update_memory_metadata(reference, {"supersedes": None})
|
|
836
|
-
target_updates = {
|
|
837
|
-
"superseded_by": None,
|
|
838
|
-
"memory_status": "active",
|
|
839
|
-
}
|
|
840
|
-
merged = provenance.force_update_memory_metadata(target_reference, target_updates)
|
|
841
|
-
_emit("rollback_governance_supersedes")
|
|
842
|
-
return merged
|
|
843
|
-
|
|
844
|
-
if relationship == "contradicts":
|
|
845
|
-
new_list = _remove_from_list(ref_prov.get("contradicts"), target_reference)
|
|
846
|
-
merged = provenance.force_update_memory_metadata(reference, {
|
|
847
|
-
"contradicts": new_list,
|
|
848
|
-
"contradiction_status": None,
|
|
849
|
-
"memory_status": "active",
|
|
850
|
-
})
|
|
851
|
-
target_payload = provenance.fetch_reference(target_reference) or {}
|
|
852
|
-
target_meta = target_payload.get("metadata") or {}
|
|
853
|
-
target_prov = target_meta.get("provenance") if isinstance(target_meta.get("provenance"), dict) else {}
|
|
854
|
-
target_updates = {
|
|
855
|
-
"contradicts": _remove_from_list(target_prov.get("contradicts"), reference),
|
|
856
|
-
"contradiction_status": None,
|
|
857
|
-
"memory_status": "active",
|
|
858
|
-
}
|
|
859
|
-
provenance.force_update_memory_metadata(target_reference, target_updates)
|
|
860
|
-
_emit("rollback_governance_contradicts")
|
|
861
|
-
return merged
|
|
862
|
-
|
|
863
|
-
return None
|
|
864
|
-
|
|
865
|
-
|
|
866
|
-
def governance_queue(*, categories: Optional[List[str]] = None, limit: int = 100) -> List[Dict[str, Any]]:
|
|
867
|
-
allowed = set(store.MEMORY_TABLES)
|
|
868
|
-
tables = [table for table in (categories or list(allowed)) if table in allowed]
|
|
869
|
-
conn = store.connect()
|
|
870
|
-
try:
|
|
871
|
-
items: List[Dict[str, Any]] = []
|
|
872
|
-
for table in tables:
|
|
873
|
-
rows = conn.execute(
|
|
874
|
-
f"SELECT id, timestamp, content, metadata_json FROM {table} ORDER BY id DESC LIMIT 3000"
|
|
875
|
-
).fetchall()
|
|
876
|
-
for row in rows:
|
|
877
|
-
reference = f"{table}:{row['id'] if isinstance(row, dict) else row[0]}"
|
|
878
|
-
timestamp = row["timestamp"] if isinstance(row, dict) else row[1]
|
|
879
|
-
content = row["content"] if isinstance(row, dict) else row[2]
|
|
880
|
-
try:
|
|
881
|
-
metadata = json.loads((row["metadata_json"] if isinstance(row, dict) else row[3]) or "{}")
|
|
882
|
-
except Exception:
|
|
883
|
-
metadata = {}
|
|
884
|
-
prov = metadata.get("provenance") if isinstance(metadata.get("provenance"), dict) else {}
|
|
885
|
-
duplicate_candidates = [str(x) for x in (prov.get("duplicate_candidates") or []) if x]
|
|
886
|
-
contradiction_candidates = [str(x) for x in (prov.get("contradiction_candidates") or []) if x]
|
|
887
|
-
supersession_recommendation = prov.get("supersession_recommendation") or {}
|
|
888
|
-
|
|
889
|
-
for target in duplicate_candidates:
|
|
890
|
-
items.append({
|
|
891
|
-
"reference": reference,
|
|
892
|
-
"target_reference": target,
|
|
893
|
-
"kind": "duplicate_candidate",
|
|
894
|
-
"priority": 40,
|
|
895
|
-
"timestamp": timestamp,
|
|
896
|
-
"bucket": table,
|
|
897
|
-
"content": content,
|
|
898
|
-
})
|
|
899
|
-
for target in contradiction_candidates:
|
|
900
|
-
items.append({
|
|
901
|
-
"reference": reference,
|
|
902
|
-
"target_reference": target,
|
|
903
|
-
"kind": "contradiction_candidate",
|
|
904
|
-
"priority": 70,
|
|
905
|
-
"timestamp": timestamp,
|
|
906
|
-
"bucket": table,
|
|
907
|
-
"content": content,
|
|
908
|
-
})
|
|
909
|
-
if isinstance(supersession_recommendation, dict) and supersession_recommendation.get("recommended"):
|
|
910
|
-
items.append({
|
|
911
|
-
"reference": reference,
|
|
912
|
-
"target_reference": supersession_recommendation.get("target_reference"),
|
|
913
|
-
"kind": "supersession_recommendation",
|
|
914
|
-
"priority": 90,
|
|
915
|
-
"timestamp": timestamp,
|
|
916
|
-
"bucket": table,
|
|
917
|
-
"signal": float(supersession_recommendation.get("signal") or 0.0),
|
|
918
|
-
"reason": supersession_recommendation.get("reason"),
|
|
919
|
-
"content": content,
|
|
920
|
-
})
|
|
921
|
-
items.sort(key=lambda item: (int(item.get("priority") or 0), str(item.get("timestamp") or "")), reverse=True)
|
|
922
|
-
return items[:limit]
|
|
923
|
-
finally:
|
|
924
|
-
conn.close()
|
|
925
|
-
|
|
926
|
-
|
|
927
|
-
def _resolve_auto_resolve_policy(profile: str | None = None) -> Dict[str, Any]:
|
|
928
|
-
preset = (profile or os.environ.get("OCMEMOG_GOVERNANCE_AUTORESOLVE_PROFILE", "conservative") or "conservative").strip().lower()
|
|
929
|
-
presets = {
|
|
930
|
-
"conservative": {
|
|
931
|
-
"max_apply": 5,
|
|
932
|
-
"allowed_kinds": {"duplicate_candidate", "supersession_recommendation"},
|
|
933
|
-
"min_supersession_signal": 0.95,
|
|
934
|
-
"allowed_buckets": set(),
|
|
935
|
-
},
|
|
936
|
-
"balanced": {
|
|
937
|
-
"max_apply": 10,
|
|
938
|
-
"allowed_kinds": {"duplicate_candidate", "supersession_recommendation"},
|
|
939
|
-
"min_supersession_signal": 0.9,
|
|
940
|
-
"allowed_buckets": set(),
|
|
941
|
-
},
|
|
942
|
-
"aggressive": {
|
|
943
|
-
"max_apply": 20,
|
|
944
|
-
"allowed_kinds": {"duplicate_candidate", "supersession_recommendation"},
|
|
945
|
-
"min_supersession_signal": 0.85,
|
|
946
|
-
"allowed_buckets": set(),
|
|
947
|
-
},
|
|
948
|
-
}
|
|
949
|
-
policy = presets.get(preset, presets["conservative"]).copy()
|
|
950
|
-
|
|
951
|
-
max_apply = os.environ.get("OCMEMOG_GOVERNANCE_AUTORESOLVE_MAX_APPLY")
|
|
952
|
-
if max_apply:
|
|
953
|
-
policy["max_apply"] = int(float(max_apply) or policy["max_apply"])
|
|
954
|
-
allowed_kinds_raw = os.environ.get("OCMEMOG_GOVERNANCE_AUTORESOLVE_ALLOW_KINDS")
|
|
955
|
-
if allowed_kinds_raw:
|
|
956
|
-
policy["allowed_kinds"] = {k.strip() for k in allowed_kinds_raw.split(",") if k.strip()}
|
|
957
|
-
min_supersession_signal = os.environ.get("OCMEMOG_GOVERNANCE_AUTORESOLVE_MIN_SUPERSESSION_SIGNAL")
|
|
958
|
-
if min_supersession_signal:
|
|
959
|
-
policy["min_supersession_signal"] = float(min_supersession_signal or policy["min_supersession_signal"])
|
|
960
|
-
allowed_buckets_raw = os.environ.get("OCMEMOG_GOVERNANCE_AUTORESOLVE_ALLOW_BUCKETS")
|
|
961
|
-
if allowed_buckets_raw is not None and allowed_buckets_raw != "":
|
|
962
|
-
policy["allowed_buckets"] = {k.strip() for k in allowed_buckets_raw.split(",") if k.strip()}
|
|
963
|
-
|
|
964
|
-
policy["profile"] = preset
|
|
965
|
-
return policy
|
|
966
|
-
|
|
967
|
-
|
|
968
|
-
def governance_auto_resolve(
|
|
969
|
-
*,
|
|
970
|
-
categories: Optional[List[str]] = None,
|
|
971
|
-
limit: int = 20,
|
|
972
|
-
dry_run: bool = True,
|
|
973
|
-
profile: str | None = None,
|
|
974
|
-
) -> Dict[str, Any]:
|
|
975
|
-
queue = governance_queue(categories=categories, limit=limit)
|
|
976
|
-
actions: List[Dict[str, Any]] = []
|
|
977
|
-
applied = 0
|
|
978
|
-
skipped = 0
|
|
979
|
-
|
|
980
|
-
policy = _resolve_auto_resolve_policy(profile)
|
|
981
|
-
max_apply = int(policy["max_apply"])
|
|
982
|
-
allowed_kinds = set(policy["allowed_kinds"])
|
|
983
|
-
min_supersession_signal = float(policy["min_supersession_signal"])
|
|
984
|
-
allowed_buckets = set(policy["allowed_buckets"]) if policy["allowed_buckets"] else set()
|
|
985
|
-
|
|
986
|
-
for item in queue:
|
|
987
|
-
kind = str(item.get("kind") or "")
|
|
988
|
-
bucket = str(item.get("bucket") or "")
|
|
989
|
-
reference = str(item.get("reference") or "")
|
|
990
|
-
target = str(item.get("target_reference") or "")
|
|
991
|
-
if not reference or not target:
|
|
992
|
-
skipped += 1
|
|
993
|
-
actions.append({"reference": reference, "target_reference": target, "kind": kind, "applied": False, "dry_run": bool(dry_run), "reason": "missing_reference"})
|
|
994
|
-
continue
|
|
995
|
-
|
|
996
|
-
if kind not in allowed_kinds:
|
|
997
|
-
skipped += 1
|
|
998
|
-
actions.append({"reference": reference, "target_reference": target, "kind": kind, "applied": False, "dry_run": bool(dry_run), "reason": "kind_not_allowed"})
|
|
999
|
-
continue
|
|
1000
|
-
|
|
1001
|
-
if allowed_buckets and bucket not in allowed_buckets:
|
|
1002
|
-
skipped += 1
|
|
1003
|
-
actions.append({"reference": reference, "target_reference": target, "kind": kind, "applied": False, "dry_run": bool(dry_run), "reason": "bucket_not_allowed"})
|
|
1004
|
-
continue
|
|
1005
|
-
|
|
1006
|
-
relationship = None
|
|
1007
|
-
if kind == "supersession_recommendation":
|
|
1008
|
-
signal = float(item.get("signal") or 0.0)
|
|
1009
|
-
if signal < min_supersession_signal:
|
|
1010
|
-
skipped += 1
|
|
1011
|
-
actions.append({"reference": reference, "target_reference": target, "kind": kind, "applied": False, "dry_run": bool(dry_run), "reason": "signal_below_min"})
|
|
1012
|
-
continue
|
|
1013
|
-
relationship = "supersedes"
|
|
1014
|
-
elif kind == "duplicate_candidate":
|
|
1015
|
-
relationship = "duplicate_of"
|
|
1016
|
-
else:
|
|
1017
|
-
skipped += 1
|
|
1018
|
-
actions.append({"reference": reference, "target_reference": target, "kind": kind, "applied": False, "dry_run": bool(dry_run), "reason": "unsupported_kind"})
|
|
1019
|
-
continue
|
|
1020
|
-
|
|
1021
|
-
if not dry_run and applied >= max_apply:
|
|
1022
|
-
skipped += 1
|
|
1023
|
-
actions.append({"reference": reference, "target_reference": target, "kind": kind, "relationship": relationship, "applied": False, "dry_run": False, "reason": "max_apply_reached"})
|
|
1024
|
-
continue
|
|
1025
|
-
|
|
1026
|
-
if dry_run:
|
|
1027
|
-
actions.append({
|
|
1028
|
-
"reference": reference,
|
|
1029
|
-
"target_reference": target,
|
|
1030
|
-
"kind": kind,
|
|
1031
|
-
"relationship": relationship,
|
|
1032
|
-
"applied": False,
|
|
1033
|
-
"dry_run": True,
|
|
1034
|
-
"reason": "dry_run",
|
|
1035
|
-
})
|
|
1036
|
-
continue
|
|
1037
|
-
|
|
1038
|
-
result = apply_governance_decision(
|
|
1039
|
-
reference,
|
|
1040
|
-
relationship=relationship,
|
|
1041
|
-
target_reference=target,
|
|
1042
|
-
approved=True,
|
|
1043
|
-
)
|
|
1044
|
-
ok = result is not None
|
|
1045
|
-
if ok:
|
|
1046
|
-
applied += 1
|
|
1047
|
-
else:
|
|
1048
|
-
skipped += 1
|
|
1049
|
-
actions.append({
|
|
1050
|
-
"reference": reference,
|
|
1051
|
-
"target_reference": target,
|
|
1052
|
-
"kind": kind,
|
|
1053
|
-
"relationship": relationship,
|
|
1054
|
-
"applied": ok,
|
|
1055
|
-
"dry_run": False,
|
|
1056
|
-
"reason": "applied" if ok else "apply_failed",
|
|
1057
|
-
})
|
|
1058
|
-
|
|
1059
|
-
emit_event(
|
|
1060
|
-
store.state_store.reports_dir() / "brain_memory.log.jsonl",
|
|
1061
|
-
"governance_auto_resolve",
|
|
1062
|
-
status="ok",
|
|
1063
|
-
dry_run=bool(dry_run),
|
|
1064
|
-
considered=len(queue),
|
|
1065
|
-
applied=applied,
|
|
1066
|
-
skipped=skipped,
|
|
1067
|
-
max_apply=max_apply,
|
|
1068
|
-
allowed_kinds=",".join(sorted(allowed_kinds)),
|
|
1069
|
-
min_supersession_signal=min_supersession_signal,
|
|
1070
|
-
allowed_buckets=",".join(sorted(allowed_buckets)) if allowed_buckets else "*",
|
|
1071
|
-
profile=str(policy.get("profile") or "conservative"),
|
|
1072
|
-
)
|
|
1073
|
-
return {
|
|
1074
|
-
"considered": len(queue),
|
|
1075
|
-
"applied": applied,
|
|
1076
|
-
"skipped": skipped,
|
|
1077
|
-
"dry_run": bool(dry_run),
|
|
1078
|
-
"policy": {
|
|
1079
|
-
"profile": policy.get("profile") or "conservative",
|
|
1080
|
-
"max_apply": max_apply,
|
|
1081
|
-
"allowed_kinds": sorted(allowed_kinds),
|
|
1082
|
-
"min_supersession_signal": min_supersession_signal,
|
|
1083
|
-
"allowed_buckets": sorted(allowed_buckets) if allowed_buckets else ["*"],
|
|
1084
|
-
},
|
|
1085
|
-
"actions": actions,
|
|
1086
|
-
}
|
|
1087
|
-
|
|
1088
|
-
|
|
1089
|
-
def governance_audit(*, limit: int = 100, kinds: Optional[List[str]] = None) -> List[Dict[str, Any]]:
|
|
1090
|
-
logfile = store.state_store.reports_dir() / "brain_memory.log.jsonl"
|
|
1091
|
-
if not logfile.exists():
|
|
1092
|
-
return []
|
|
1093
|
-
wanted = {k.strip() for k in (kinds or []) if k.strip()}
|
|
1094
|
-
if not wanted:
|
|
1095
|
-
wanted = {
|
|
1096
|
-
"store_memory_governance_candidates",
|
|
1097
|
-
"governance_auto_resolve",
|
|
1098
|
-
"mark_memory_relationship_supersedes",
|
|
1099
|
-
"mark_memory_relationship_duplicate_of",
|
|
1100
|
-
"mark_memory_relationship_contradicts",
|
|
1101
|
-
"apply_governance_decision_duplicate_of_approved",
|
|
1102
|
-
"apply_governance_decision_contradicts_approved",
|
|
1103
|
-
"apply_governance_decision_supersedes_approved",
|
|
1104
|
-
"apply_governance_decision_duplicate_of_rejected",
|
|
1105
|
-
"apply_governance_decision_contradicts_rejected",
|
|
1106
|
-
"apply_governance_decision_supersedes_rejected",
|
|
1107
|
-
}
|
|
1108
|
-
entries: List[Dict[str, Any]] = []
|
|
1109
|
-
try:
|
|
1110
|
-
with logfile.open("r", encoding="utf-8", errors="ignore") as handle:
|
|
1111
|
-
lines = handle.readlines()[-max(limit * 5, 200):]
|
|
1112
|
-
except Exception:
|
|
1113
|
-
return []
|
|
1114
|
-
for line in reversed(lines):
|
|
1115
|
-
line = line.strip()
|
|
1116
|
-
if not line:
|
|
1117
|
-
continue
|
|
1118
|
-
try:
|
|
1119
|
-
payload = json.loads(line)
|
|
1120
|
-
except Exception:
|
|
1121
|
-
continue
|
|
1122
|
-
event = str(payload.get("event") or payload.get("name") or "").strip()
|
|
1123
|
-
if event not in wanted:
|
|
1124
|
-
continue
|
|
1125
|
-
payload["event"] = event
|
|
1126
|
-
entries.append(payload)
|
|
1127
|
-
if len(entries) >= limit:
|
|
1128
|
-
break
|
|
1129
|
-
return list(reversed(entries))
|
|
1130
|
-
|
|
1131
|
-
|
|
1132
|
-
def governance_summary(*, categories: Optional[List[str]] = None) -> Dict[str, Any]:
|
|
1133
|
-
allowed = set(store.MEMORY_TABLES)
|
|
1134
|
-
tables = [table for table in (categories or list(allowed)) if table in allowed]
|
|
1135
|
-
conn = store.connect()
|
|
1136
|
-
try:
|
|
1137
|
-
summary: Dict[str, Any] = {
|
|
1138
|
-
"tables": {},
|
|
1139
|
-
"totals": {
|
|
1140
|
-
"rows": 0,
|
|
1141
|
-
"pending_duplicates": 0,
|
|
1142
|
-
"pending_contradictions": 0,
|
|
1143
|
-
"recommended_supersessions": 0,
|
|
1144
|
-
"status_active": 0,
|
|
1145
|
-
"status_duplicate": 0,
|
|
1146
|
-
"status_superseded": 0,
|
|
1147
|
-
"status_contested": 0,
|
|
1148
|
-
},
|
|
1149
|
-
}
|
|
1150
|
-
for table in tables:
|
|
1151
|
-
rows = conn.execute(
|
|
1152
|
-
f"SELECT id, metadata_json FROM {table} ORDER BY id DESC LIMIT 5000"
|
|
1153
|
-
).fetchall()
|
|
1154
|
-
table_stats = {
|
|
1155
|
-
"rows": 0,
|
|
1156
|
-
"pending_duplicates": 0,
|
|
1157
|
-
"pending_contradictions": 0,
|
|
1158
|
-
"recommended_supersessions": 0,
|
|
1159
|
-
"status_active": 0,
|
|
1160
|
-
"status_duplicate": 0,
|
|
1161
|
-
"status_superseded": 0,
|
|
1162
|
-
"status_contested": 0,
|
|
1163
|
-
}
|
|
1164
|
-
for row in rows:
|
|
1165
|
-
table_stats["rows"] += 1
|
|
1166
|
-
try:
|
|
1167
|
-
metadata = json.loads((row["metadata_json"] if isinstance(row, dict) else row[1]) or "{}")
|
|
1168
|
-
except Exception:
|
|
1169
|
-
metadata = {}
|
|
1170
|
-
prov = metadata.get("provenance") if isinstance(metadata.get("provenance"), dict) else {}
|
|
1171
|
-
status = str(prov.get("memory_status") or metadata.get("memory_status") or "active").strip().lower()
|
|
1172
|
-
if status not in {"active", "duplicate", "superseded", "contested"}:
|
|
1173
|
-
status = "active"
|
|
1174
|
-
table_stats[f"status_{status}"] += 1
|
|
1175
|
-
|
|
1176
|
-
dup = prov.get("duplicate_candidates") or []
|
|
1177
|
-
contra = prov.get("contradiction_candidates") or []
|
|
1178
|
-
if dup:
|
|
1179
|
-
table_stats["pending_duplicates"] += 1
|
|
1180
|
-
if contra:
|
|
1181
|
-
table_stats["pending_contradictions"] += 1
|
|
1182
|
-
rec = prov.get("supersession_recommendation") or {}
|
|
1183
|
-
if isinstance(rec, dict) and rec.get("recommended"):
|
|
1184
|
-
table_stats["recommended_supersessions"] += 1
|
|
1185
|
-
|
|
1186
|
-
summary["tables"][table] = table_stats
|
|
1187
|
-
for key in summary["totals"].keys():
|
|
1188
|
-
summary["totals"][key] += int(table_stats.get(key, 0) or 0)
|
|
1189
|
-
return summary
|
|
1190
|
-
finally:
|
|
1191
|
-
conn.close()
|
|
1192
|
-
|
|
1193
|
-
|
|
1194
|
-
def get_recent_events(limit: int = 10) -> List[Dict[str, Any]]:
|
|
1195
|
-
conn = store.connect()
|
|
1196
|
-
rows = conn.execute(
|
|
1197
|
-
"SELECT id, timestamp, event_type, source, details_json FROM memory_events ORDER BY id DESC LIMIT ?",
|
|
1198
|
-
(limit,),
|
|
1199
|
-
).fetchall()
|
|
1200
|
-
conn.close()
|
|
1201
|
-
return [dict(row) for row in rows]
|
|
1202
|
-
|
|
1203
|
-
|
|
1204
|
-
def get_recent_tasks(limit: int = 10) -> List[Dict[str, Any]]:
|
|
1205
|
-
conn = store.connect()
|
|
1206
|
-
rows = conn.execute(
|
|
1207
|
-
"SELECT id, timestamp, source, confidence, metadata_json, content FROM tasks ORDER BY id DESC LIMIT ?",
|
|
1208
|
-
(limit,),
|
|
1209
|
-
).fetchall()
|
|
1210
|
-
conn.close()
|
|
1211
|
-
return [dict(row) for row in rows]
|
|
1212
|
-
|
|
1213
|
-
|
|
1214
|
-
def get_memories(limit: int = 10) -> List[Dict[str, Any]]:
|
|
1215
|
-
conn = store.connect()
|
|
1216
|
-
rows = conn.execute(
|
|
1217
|
-
"SELECT id, timestamp, source, confidence, metadata_json, content FROM knowledge ORDER BY id DESC LIMIT ?",
|
|
1218
|
-
(limit,),
|
|
1219
|
-
).fetchall()
|
|
1220
|
-
conn.close()
|
|
1221
|
-
return [dict(row) for row in rows]
|
|
3
|
+
from ocmemog.runtime.memory.api import * # noqa: F401,F403
|