nexo-brain 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +21 -0
- package/README.md +241 -0
- package/bin/create-nexo.js +593 -0
- package/package.json +32 -0
- package/scripts/pre-commit-check.sh +55 -0
- package/src/cognitive.py +1224 -0
- package/src/db.py +2283 -0
- package/src/hooks/caffeinate-guard.sh +8 -0
- package/src/hooks/capture-session.sh +19 -0
- package/src/hooks/session-start.sh +27 -0
- package/src/hooks/session-stop.sh +11 -0
- package/src/plugin_loader.py +136 -0
- package/src/plugins/__init__.py +0 -0
- package/src/plugins/agents.py +52 -0
- package/src/plugins/backup.py +103 -0
- package/src/plugins/cognitive_memory.py +305 -0
- package/src/plugins/entities.py +61 -0
- package/src/plugins/episodic_memory.py +391 -0
- package/src/plugins/evolution.py +113 -0
- package/src/plugins/guard.py +346 -0
- package/src/plugins/preferences.py +47 -0
- package/src/scripts/nexo-auto-update.py +213 -0
- package/src/scripts/nexo-catchup.py +179 -0
- package/src/scripts/nexo-cognitive-decay.py +82 -0
- package/src/scripts/nexo-daily-self-audit.py +532 -0
- package/src/scripts/nexo-postmortem-consolidator.py +594 -0
- package/src/scripts/nexo-sleep.py +762 -0
- package/src/scripts/nexo-synthesis.py +537 -0
- package/src/server.py +560 -0
- package/src/tools_coordination.py +102 -0
- package/src/tools_credentials.py +64 -0
- package/src/tools_learnings.py +180 -0
- package/src/tools_menu.py +208 -0
- package/src/tools_reminders.py +80 -0
- package/src/tools_reminders_crud.py +157 -0
- package/src/tools_sessions.py +169 -0
- package/src/tools_task_history.py +57 -0
- package/templates/CLAUDE.md.template +89 -0
|
@@ -0,0 +1,532 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""
|
|
3
|
+
NEXO Daily Self-Audit
|
|
4
|
+
Proactively scans for common issues before they become problems.
|
|
5
|
+
Runs via launchd at 7:00 AM daily. Results saved to ~/claude/logs/self-audit.log
|
|
6
|
+
"""
|
|
7
|
+
import json
|
|
8
|
+
import os
|
|
9
|
+
import re
|
|
10
|
+
import sqlite3
|
|
11
|
+
import subprocess
|
|
12
|
+
import sys
|
|
13
|
+
import hashlib
|
|
14
|
+
from datetime import datetime, timedelta
|
|
15
|
+
from pathlib import Path
|
|
16
|
+
|
|
17
|
+
LOG_DIR = Path(os.environ.get("NEXO_HOME", str(Path.home() / ".nexo"))) / "logs"
|
|
18
|
+
LOG_DIR.mkdir(parents=True, exist_ok=True)
|
|
19
|
+
LOG_FILE = LOG_DIR / "self-audit.log"
|
|
20
|
+
NEXO_DB = Path(os.environ.get("NEXO_HOME", str(Path.home() / ".nexo"))) / "nexo.db"
|
|
21
|
+
# Project directory for git checks (user configurable)
|
|
22
|
+
HASH_REGISTRY = Path(os.environ.get("NEXO_HOME", str(Path.home() / ".nexo"))) / "scripts" / ".watchdog-hashes"
|
|
23
|
+
SNAPSHOT_GOLDEN = Path(os.environ.get("NEXO_HOME", str(Path.home() / ".nexo"))) / "snapshots" / "golden" / "files" / "claude"
|
|
24
|
+
RUNTIME_PREFLIGHT_SUMMARY = LOG_DIR / "runtime-preflight-summary.json"
|
|
25
|
+
WATCHDOG_SMOKE_SUMMARY = LOG_DIR / "watchdog-smoke-summary.json"
|
|
26
|
+
RESTORE_LOG = LOG_DIR / "snapshot-restores.log"
|
|
27
|
+
CORTEX_LOG_DIR = Path(os.environ.get("NEXO_HOME", str(Path.home() / ".nexo"))) / "cortex" / "logs"
|
|
28
|
+
|
|
29
|
+
findings = []
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
def log(msg):
|
|
33
|
+
ts = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
|
34
|
+
line = f"[{ts}] {msg}"
|
|
35
|
+
print(line, flush=True)
|
|
36
|
+
with open(LOG_FILE, "a") as f:
|
|
37
|
+
f.write(line + "\n")
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
def finding(severity, area, msg):
|
|
41
|
+
findings.append({"severity": severity, "area": area, "msg": msg})
|
|
42
|
+
log(f" [{severity}] {area}: {msg}")
|
|
43
|
+
|
|
44
|
+
|
|
45
|
+
# ── Check 1: Overdue reminders ──────────────────────────────────────────
|
|
46
|
+
def check_overdue_reminders():
|
|
47
|
+
if not NEXO_DB.exists():
|
|
48
|
+
return
|
|
49
|
+
conn = sqlite3.connect(str(NEXO_DB))
|
|
50
|
+
today = datetime.now().strftime("%Y-%m-%d")
|
|
51
|
+
rows = conn.execute(
|
|
52
|
+
"SELECT description, date FROM reminders WHERE status='PENDIENTE' AND date < ? AND date != '' ORDER BY date",
|
|
53
|
+
(today,)
|
|
54
|
+
).fetchall()
|
|
55
|
+
conn.close()
|
|
56
|
+
if rows:
|
|
57
|
+
finding("WARN", "reminders", f"{len(rows)} overdue: {', '.join(r[0][:40] for r in rows[:5])}")
|
|
58
|
+
|
|
59
|
+
|
|
60
|
+
# ── Check 2: Overdue followups ──────────────────────────────────────────
|
|
61
|
+
def check_overdue_followups():
|
|
62
|
+
if not NEXO_DB.exists():
|
|
63
|
+
return
|
|
64
|
+
conn = sqlite3.connect(str(NEXO_DB))
|
|
65
|
+
today = datetime.now().strftime("%Y-%m-%d")
|
|
66
|
+
rows = conn.execute(
|
|
67
|
+
"SELECT description, date FROM followups WHERE status='PENDIENTE' AND date < ? AND date != '' ORDER BY date",
|
|
68
|
+
(today,)
|
|
69
|
+
).fetchall()
|
|
70
|
+
conn.close()
|
|
71
|
+
if rows:
|
|
72
|
+
finding("WARN", "followups", f"{len(rows)} overdue: {', '.join(r[0][:40] for r in rows[:5])}")
|
|
73
|
+
|
|
74
|
+
|
|
75
|
+
def check_uncommitted_changes():
|
|
76
|
+
if not WAZION_DIR.exists():
|
|
77
|
+
return
|
|
78
|
+
result = subprocess.run(
|
|
79
|
+
["git", "status", "--porcelain"],
|
|
80
|
+
cwd=str(WAZION_DIR), capture_output=True, text=True
|
|
81
|
+
)
|
|
82
|
+
lines = [l for l in result.stdout.strip().split("\n") if l.strip()]
|
|
83
|
+
if len(lines) > 10:
|
|
84
|
+
finding("WARN", "git", f"{len(lines)} uncommitted changes in project repo")
|
|
85
|
+
|
|
86
|
+
|
|
87
|
+
# ── Check 4: Cron error logs (last 24h) ────────────────────────────────
|
|
88
|
+
def check_cron_errors():
|
|
89
|
+
if not NEXO_DB.exists():
|
|
90
|
+
return
|
|
91
|
+
conn = sqlite3.connect(str(NEXO_DB))
|
|
92
|
+
yesterday = (datetime.now() - timedelta(days=1)).isoformat()
|
|
93
|
+
rows = conn.execute(
|
|
94
|
+
"SELECT category, title FROM learnings WHERE category='cron_error' AND created_at > ? ORDER BY created_at DESC",
|
|
95
|
+
(yesterday,)
|
|
96
|
+
).fetchall()
|
|
97
|
+
conn.close()
|
|
98
|
+
if rows:
|
|
99
|
+
finding("ERROR", "crons", f"{len(rows)} cron errors in last 24h")
|
|
100
|
+
|
|
101
|
+
|
|
102
|
+
# ── Check 5: Evolution failures ─────────────────────────────────────────
|
|
103
|
+
def check_evolution_health():
|
|
104
|
+
obj_file = Path(os.environ.get("NEXO_HOME", str(Path.home() / ".nexo"))) / "cortex" / "evolution-objective.json"
|
|
105
|
+
if not obj_file.exists():
|
|
106
|
+
return
|
|
107
|
+
obj = json.loads(obj_file.read_text())
|
|
108
|
+
failures = obj.get("consecutive_failures", 0)
|
|
109
|
+
if failures >= 2:
|
|
110
|
+
finding("WARN", "evolution", f"{failures} consecutive failures — circuit breaker at 3")
|
|
111
|
+
if not obj.get("evolution_enabled", True):
|
|
112
|
+
finding("ERROR", "evolution", f"Evolution DISABLED: {obj.get('disabled_reason', 'unknown')}")
|
|
113
|
+
|
|
114
|
+
|
|
115
|
+
# ── Check 6: Disk space ────────────────────────────────────────────────
|
|
116
|
+
def check_disk_space():
|
|
117
|
+
result = subprocess.run(["df", "-h", "/"], capture_output=True, text=True)
|
|
118
|
+
for line in result.stdout.strip().split("\n")[1:]:
|
|
119
|
+
parts = line.split()
|
|
120
|
+
if len(parts) >= 5:
|
|
121
|
+
usage_pct = int(parts[4].replace("%", ""))
|
|
122
|
+
if usage_pct > 90:
|
|
123
|
+
finding("ERROR", "disk", f"Root disk at {usage_pct}% capacity")
|
|
124
|
+
elif usage_pct > 80:
|
|
125
|
+
finding("WARN", "disk", f"Root disk at {usage_pct}% capacity")
|
|
126
|
+
|
|
127
|
+
|
|
128
|
+
# ── Check 7: NEXO DB size ──────────────────────────────────────────────
|
|
129
|
+
def check_db_size():
|
|
130
|
+
if NEXO_DB.exists():
|
|
131
|
+
size_mb = NEXO_DB.stat().st_size / (1024 * 1024)
|
|
132
|
+
if size_mb > 100:
|
|
133
|
+
finding("WARN", "database", f"nexo.db is {size_mb:.1f} MB — consider cleanup")
|
|
134
|
+
|
|
135
|
+
|
|
136
|
+
# ── Check 8: Stale sessions ────────────────────────────────────────────
|
|
137
|
+
def check_stale_sessions():
|
|
138
|
+
if not NEXO_DB.exists():
|
|
139
|
+
return
|
|
140
|
+
conn = sqlite3.connect(str(NEXO_DB))
|
|
141
|
+
cutoff = (datetime.now() - timedelta(hours=2)).timestamp()
|
|
142
|
+
day_ago = (datetime.now() - timedelta(days=1)).timestamp()
|
|
143
|
+
rows = conn.execute(
|
|
144
|
+
"SELECT sid, task FROM sessions WHERE last_update_epoch < ? AND last_update_epoch > ?",
|
|
145
|
+
(cutoff, day_ago)
|
|
146
|
+
).fetchall()
|
|
147
|
+
conn.close()
|
|
148
|
+
if rows:
|
|
149
|
+
finding("INFO", "sessions", f"{len(rows)} stale sessions (no heartbeat >2h)")
|
|
150
|
+
|
|
151
|
+
|
|
152
|
+
# ── Check 9: Error repetition rate (Guard) ─────────────────────────────
|
|
153
|
+
def check_repetition_rate():
|
|
154
|
+
"""Alert if >30% of learnings in last 3 days are repetitions."""
|
|
155
|
+
if not NEXO_DB.exists():
|
|
156
|
+
return
|
|
157
|
+
conn = sqlite3.connect(str(NEXO_DB))
|
|
158
|
+
cutoff_3d = (datetime.now() - timedelta(days=3)).strftime("%Y-%m-%d %H:%M:%S")
|
|
159
|
+
cutoff_epoch = (datetime.now() - timedelta(days=3)).timestamp()
|
|
160
|
+
|
|
161
|
+
new_learnings = conn.execute(
|
|
162
|
+
"SELECT COUNT(*) FROM learnings WHERE created_at > ?", (cutoff_epoch,)
|
|
163
|
+
).fetchone()[0]
|
|
164
|
+
repetitions = conn.execute(
|
|
165
|
+
"SELECT COUNT(*) FROM error_repetitions WHERE created_at > ?", (cutoff_3d,)
|
|
166
|
+
).fetchone()[0]
|
|
167
|
+
conn.close()
|
|
168
|
+
|
|
169
|
+
if new_learnings > 0:
|
|
170
|
+
rate = repetitions / new_learnings
|
|
171
|
+
if rate > 0.30:
|
|
172
|
+
finding("ERROR", "guard", f"Repetition rate {rate:.0%} over last 3 days ({repetitions}/{new_learnings}) — exceeds 30% threshold")
|
|
173
|
+
elif rate > 0.20:
|
|
174
|
+
finding("WARN", "guard", f"Repetition rate {rate:.0%} over last 3 days ({repetitions}/{new_learnings})")
|
|
175
|
+
|
|
176
|
+
|
|
177
|
+
# ── Check 10: Unused learnings ─────────────────────────────────────────
|
|
178
|
+
def check_unused_learnings():
|
|
179
|
+
"""Find learnings >7 days old never returned by guard_check."""
|
|
180
|
+
if not NEXO_DB.exists():
|
|
181
|
+
return
|
|
182
|
+
conn = sqlite3.connect(str(NEXO_DB))
|
|
183
|
+
cutoff_epoch = (datetime.now() - timedelta(days=7)).timestamp()
|
|
184
|
+
|
|
185
|
+
old_learnings = conn.execute(
|
|
186
|
+
"SELECT COUNT(*) FROM learnings WHERE created_at < ?", (cutoff_epoch,)
|
|
187
|
+
).fetchone()[0]
|
|
188
|
+
total_checks = conn.execute("SELECT COUNT(*) FROM guard_checks").fetchone()[0]
|
|
189
|
+
conn.close()
|
|
190
|
+
|
|
191
|
+
if total_checks == 0 and old_learnings > 10:
|
|
192
|
+
finding("WARN", "guard", f"Guard never used — {old_learnings} learnings sitting idle. Call nexo_guard_check before edits.")
|
|
193
|
+
elif total_checks > 0 and total_checks < 5:
|
|
194
|
+
finding("INFO", "guard", f"Only {total_checks} guard checks performed — aim for >5 per session")
|
|
195
|
+
|
|
196
|
+
|
|
197
|
+
# ── Check 11: Memory reviews due ────────────────────────────────────────
|
|
198
|
+
def check_memory_reviews():
|
|
199
|
+
"""Alert when decisions/learnings are due for review."""
|
|
200
|
+
if not NEXO_DB.exists():
|
|
201
|
+
return
|
|
202
|
+
conn = sqlite3.connect(str(NEXO_DB))
|
|
203
|
+
now_epoch = datetime.now().timestamp()
|
|
204
|
+
now_iso = datetime.now().isoformat(timespec="seconds")
|
|
205
|
+
|
|
206
|
+
try:
|
|
207
|
+
due_learnings = conn.execute(
|
|
208
|
+
"SELECT COUNT(*) FROM learnings WHERE review_due_at IS NOT NULL AND status != 'superseded' AND review_due_at <= ?",
|
|
209
|
+
(now_epoch,)
|
|
210
|
+
).fetchone()[0]
|
|
211
|
+
due_decisions = conn.execute(
|
|
212
|
+
"SELECT COUNT(*) FROM decisions WHERE review_due_at IS NOT NULL AND status != 'reviewed' AND review_due_at <= ?",
|
|
213
|
+
(now_iso,)
|
|
214
|
+
).fetchone()[0]
|
|
215
|
+
except sqlite3.OperationalError:
|
|
216
|
+
conn.close()
|
|
217
|
+
return
|
|
218
|
+
conn.close()
|
|
219
|
+
|
|
220
|
+
total_due = due_learnings + due_decisions
|
|
221
|
+
if total_due >= 10:
|
|
222
|
+
finding("WARN", "memory", f"{total_due} memory reviews due ({due_decisions} decisions, {due_learnings} learnings)")
|
|
223
|
+
elif total_due > 0:
|
|
224
|
+
finding("INFO", "memory", f"{total_due} memory reviews due ({due_decisions} decisions, {due_learnings} learnings)")
|
|
225
|
+
|
|
226
|
+
|
|
227
|
+
def _sha256(path: Path) -> str:
|
|
228
|
+
return hashlib.sha256(path.read_bytes()).hexdigest()
|
|
229
|
+
|
|
230
|
+
|
|
231
|
+
# ── Check 12: Watchdog registry sanity ──────────────────────────────────
|
|
232
|
+
def check_watchdog_registry():
|
|
233
|
+
if not HASH_REGISTRY.exists():
|
|
234
|
+
finding("WARN", "watchdog", "hash registry missing")
|
|
235
|
+
return
|
|
236
|
+
text = HASH_REGISTRY.read_text(errors="ignore")
|
|
237
|
+
forbidden = ["CLAUDE.md", "db.py", "server.py", "plugin_loader.py", "cortex-wrapper.py"]
|
|
238
|
+
bad = [name for name in forbidden if name in text]
|
|
239
|
+
if bad:
|
|
240
|
+
finding("ERROR", "watchdog", f"mutable files still protected by watchdog: {', '.join(bad)}")
|
|
241
|
+
|
|
242
|
+
|
|
243
|
+
# ── Check 13: Snapshot drift on protected recovery files ────────────────
|
|
244
|
+
def check_snapshot_sync():
|
|
245
|
+
pairs = [
|
|
246
|
+
(Path(os.environ.get("NEXO_HOME", str(Path.home() / ".nexo"))) / "db.py", SNAPSHOT_GOLDEN / "nexo-mcp" / "db.py"),
|
|
247
|
+
(Path(os.environ.get("NEXO_HOME", str(Path.home() / ".nexo"))) / "cortex" / "cortex-wrapper.py", SNAPSHOT_GOLDEN / "cortex" / "cortex-wrapper.py"),
|
|
248
|
+
(Path(os.environ.get("NEXO_HOME", str(Path.home() / ".nexo"))) / "cortex" / "evolution_cycle.py", SNAPSHOT_GOLDEN / "cortex" / "evolution_cycle.py"),
|
|
249
|
+
]
|
|
250
|
+
drift = []
|
|
251
|
+
for live, snap in pairs:
|
|
252
|
+
if not live.exists() or not snap.exists():
|
|
253
|
+
drift.append(live.name)
|
|
254
|
+
continue
|
|
255
|
+
if _sha256(live) != _sha256(snap):
|
|
256
|
+
drift.append(live.name)
|
|
257
|
+
if drift:
|
|
258
|
+
finding("WARN", "snapshots", f"golden snapshot drift: {', '.join(drift)}")
|
|
259
|
+
|
|
260
|
+
|
|
261
|
+
# ── Check 14: Recent restore activity ───────────────────────────────────
|
|
262
|
+
def check_restore_activity():
|
|
263
|
+
if not RESTORE_LOG.exists():
|
|
264
|
+
return
|
|
265
|
+
cutoff_day = datetime.now() - timedelta(days=1)
|
|
266
|
+
current_hour_prefix = datetime.now().strftime("%Y-%m-%d %H")
|
|
267
|
+
recent_day = 0
|
|
268
|
+
recent_hour = 0
|
|
269
|
+
for line in RESTORE_LOG.read_text(errors="ignore").splitlines():
|
|
270
|
+
if not line.startswith("["):
|
|
271
|
+
continue
|
|
272
|
+
if "/.codex/memories/nexo-" in line:
|
|
273
|
+
continue
|
|
274
|
+
try:
|
|
275
|
+
ts = datetime.strptime(line[1:20], "%Y-%m-%d %H:%M:%S")
|
|
276
|
+
except ValueError:
|
|
277
|
+
continue
|
|
278
|
+
if ts >= cutoff_day:
|
|
279
|
+
recent_day += 1
|
|
280
|
+
if line[1:14] == current_hour_prefix:
|
|
281
|
+
recent_hour += 1
|
|
282
|
+
if recent_hour > 2:
|
|
283
|
+
finding("ERROR", "restore", f"{recent_hour} snapshot restores in last hour")
|
|
284
|
+
elif recent_day > 5:
|
|
285
|
+
finding("WARN", "restore", f"{recent_day} snapshot restores in last 24h")
|
|
286
|
+
elif recent_day > 0:
|
|
287
|
+
finding("INFO", "restore", f"{recent_day} snapshot restores in last 24h (historical activity)")
|
|
288
|
+
|
|
289
|
+
|
|
290
|
+
# ── Check 15: Bad model responses ───────────────────────────────────────
|
|
291
|
+
def check_bad_responses():
|
|
292
|
+
if not CORTEX_LOG_DIR.exists():
|
|
293
|
+
return
|
|
294
|
+
cutoff = datetime.now() - timedelta(days=1)
|
|
295
|
+
bad = [
|
|
296
|
+
p for p in CORTEX_LOG_DIR.glob("bad-response-*.json")
|
|
297
|
+
if datetime.fromtimestamp(p.stat().st_mtime) >= cutoff
|
|
298
|
+
]
|
|
299
|
+
if bad:
|
|
300
|
+
finding("WARN", "cortex", f"{len(bad)} bad model responses in last 24h")
|
|
301
|
+
|
|
302
|
+
|
|
303
|
+
# ── Check 16: Runtime preflight freshness ───────────────────────────────
|
|
304
|
+
def check_runtime_preflight():
|
|
305
|
+
if not RUNTIME_PREFLIGHT_SUMMARY.exists():
|
|
306
|
+
finding("WARN", "preflight", "runtime preflight summary missing")
|
|
307
|
+
return
|
|
308
|
+
data = json.loads(RUNTIME_PREFLIGHT_SUMMARY.read_text())
|
|
309
|
+
ts = data.get("timestamp")
|
|
310
|
+
try:
|
|
311
|
+
when = datetime.fromisoformat(ts)
|
|
312
|
+
except Exception:
|
|
313
|
+
finding("WARN", "preflight", "runtime preflight timestamp invalid")
|
|
314
|
+
return
|
|
315
|
+
if when < datetime.now() - timedelta(days=1):
|
|
316
|
+
finding("WARN", "preflight", "runtime preflight older than 24h")
|
|
317
|
+
if not data.get("ok", False):
|
|
318
|
+
finding("ERROR", "preflight", "runtime preflight failing")
|
|
319
|
+
|
|
320
|
+
|
|
321
|
+
# ── Check 17: Watchdog smoke freshness ──────────────────────────────────
|
|
322
|
+
def check_watchdog_smoke():
|
|
323
|
+
if not WATCHDOG_SMOKE_SUMMARY.exists():
|
|
324
|
+
finding("WARN", "watchdog", "watchdog smoke summary missing")
|
|
325
|
+
return
|
|
326
|
+
data = json.loads(WATCHDOG_SMOKE_SUMMARY.read_text())
|
|
327
|
+
ts = data.get("timestamp")
|
|
328
|
+
try:
|
|
329
|
+
when = datetime.fromisoformat(ts)
|
|
330
|
+
except Exception:
|
|
331
|
+
finding("WARN", "watchdog", "watchdog smoke timestamp invalid")
|
|
332
|
+
return
|
|
333
|
+
if when < datetime.now() - timedelta(days=1):
|
|
334
|
+
finding("WARN", "watchdog", "watchdog smoke older than 24h")
|
|
335
|
+
if not data.get("ok", False):
|
|
336
|
+
finding("ERROR", "watchdog", "watchdog smoke failing")
|
|
337
|
+
|
|
338
|
+
|
|
339
|
+
# ── Check 18: Cognitive memory health ────────────────────────────────
|
|
340
|
+
def check_cognitive_health():
|
|
341
|
+
"""Check cognitive.db health and run weekly GC on Sundays."""
|
|
342
|
+
cognitive_db = Path(os.environ.get("NEXO_HOME", str(Path.home() / ".nexo"))) / "cognitive.db"
|
|
343
|
+
if not cognitive_db.exists():
|
|
344
|
+
finding("WARN", "cognitive", "cognitive.db not found")
|
|
345
|
+
return
|
|
346
|
+
|
|
347
|
+
conn = sqlite3.connect(str(cognitive_db))
|
|
348
|
+
stm_count = conn.execute("SELECT COUNT(*) FROM stm_memories WHERE promoted_to_ltm = 0").fetchone()[0]
|
|
349
|
+
ltm_active = conn.execute("SELECT COUNT(*) FROM ltm_memories WHERE is_dormant = 0").fetchone()[0]
|
|
350
|
+
ltm_dormant = conn.execute("SELECT COUNT(*) FROM ltm_memories WHERE is_dormant = 1").fetchone()[0]
|
|
351
|
+
avg_stm_str = conn.execute("SELECT AVG(strength) FROM stm_memories WHERE promoted_to_ltm = 0").fetchone()[0] or 0.0
|
|
352
|
+
sensory_count = conn.execute("SELECT COUNT(*) FROM stm_memories WHERE source_type = 'sensory' AND promoted_to_ltm = 0").fetchone()[0]
|
|
353
|
+
conn.close()
|
|
354
|
+
|
|
355
|
+
size_mb = cognitive_db.stat().st_size / (1024 * 1024)
|
|
356
|
+
finding("INFO", "cognitive", f"STM: {stm_count} (sensory: {sensory_count}) | LTM: {ltm_active} active, {ltm_dormant} dormant | {size_mb:.1f} MB | avg STM strength: {avg_stm_str:.2f}")
|
|
357
|
+
|
|
358
|
+
if avg_stm_str < 0.3 and stm_count > 20:
|
|
359
|
+
finding("WARN", "cognitive", f"STM average strength very low ({avg_stm_str:.2f}) — memories decaying without access")
|
|
360
|
+
|
|
361
|
+
# Metrics report (spec section 9)
|
|
362
|
+
try:
|
|
363
|
+
sys.path.insert(0, str(Path(os.environ.get("NEXO_HOME", str(Path.home() / ".nexo")))))
|
|
364
|
+
import cognitive as cog
|
|
365
|
+
|
|
366
|
+
metrics = cog.get_metrics(days=7)
|
|
367
|
+
if metrics["total_retrievals"] > 0:
|
|
368
|
+
finding("INFO", "cognitive-metrics",
|
|
369
|
+
f"7d: {metrics['total_retrievals']} retrievals, "
|
|
370
|
+
f"relevance={metrics['retrieval_relevance_pct']}%, "
|
|
371
|
+
f"avg_score={metrics['avg_top_score']}, "
|
|
372
|
+
f"{metrics['retrievals_per_day']}/day")
|
|
373
|
+
|
|
374
|
+
if metrics["needs_multilingual"]:
|
|
375
|
+
finding("WARN", "cognitive-metrics",
|
|
376
|
+
f"Retrieval relevance {metrics['retrieval_relevance_pct']}% < 70% — consider switching to multilingual model (spec 13.3)")
|
|
377
|
+
|
|
378
|
+
if metrics["retrieval_relevance_pct"] < 50 and metrics["total_retrievals"] >= 5:
|
|
379
|
+
finding("ERROR", "cognitive-metrics",
|
|
380
|
+
f"Retrieval relevance critically low: {metrics['retrieval_relevance_pct']}%")
|
|
381
|
+
|
|
382
|
+
# Repeat error rate
|
|
383
|
+
repeats = cog.check_repeat_errors()
|
|
384
|
+
if repeats["new_count"] > 0:
|
|
385
|
+
finding("INFO", "cognitive-metrics",
|
|
386
|
+
f"Repeat errors: {repeats['duplicate_count']}/{repeats['new_count']} "
|
|
387
|
+
f"({repeats['repeat_rate_pct']}%) — target <10%")
|
|
388
|
+
if repeats["repeat_rate_pct"] > 30:
|
|
389
|
+
finding("WARN", "cognitive-metrics",
|
|
390
|
+
f"Repeat error rate {repeats['repeat_rate_pct']}% exceeds 30% threshold")
|
|
391
|
+
|
|
392
|
+
# Write metrics to file for dashboard/tracking
|
|
393
|
+
metrics_file = LOG_DIR / "cognitive-metrics.json"
|
|
394
|
+
metrics_file.write_text(json.dumps({
|
|
395
|
+
"timestamp": datetime.now().isoformat(),
|
|
396
|
+
"retrieval": metrics,
|
|
397
|
+
"repeats": {k: v for k, v in repeats.items() if k != "duplicates"},
|
|
398
|
+
}, indent=2))
|
|
399
|
+
except Exception as e:
|
|
400
|
+
finding("WARN", "cognitive-metrics", f"Metrics collection failed: {e}")
|
|
401
|
+
|
|
402
|
+
# Phase triggers monitoring (spec section 10)
|
|
403
|
+
try:
|
|
404
|
+
sys.path.insert(0, str(Path(os.environ.get("NEXO_HOME", str(Path.home() / ".nexo")))))
|
|
405
|
+
import cognitive as cog
|
|
406
|
+
|
|
407
|
+
db_cog = cog._get_db()
|
|
408
|
+
|
|
409
|
+
# v2.0: Procedural memory — trigger: >50 procedural change_logs
|
|
410
|
+
procedural_markers = ['1.', '2.', '3.', 'step ', 'Step ', 'then ', 'first ', 'First ', '→', '->', 'SSH', 'scp', 'git commit', 'deploy']
|
|
411
|
+
changes = db_cog.execute('SELECT content FROM ltm_memories WHERE source_type = "change"').fetchall()
|
|
412
|
+
procedural_count = sum(1 for r in changes if sum(1 for m in procedural_markers if m in r[0]) >= 2)
|
|
413
|
+
if procedural_count >= 50:
|
|
414
|
+
finding("WARN", "cognitive-phase", f"v2.0 TRIGGER MET: {procedural_count} procedural memories (>50). Implement Store 4 (memoria procedimental).")
|
|
415
|
+
|
|
416
|
+
# v2.1: MEMORY.md reduction — trigger: RAG relevance >80% for 30 days
|
|
417
|
+
metrics_file = LOG_DIR / "cognitive-metrics-history.json"
|
|
418
|
+
try:
|
|
419
|
+
history = json.loads(metrics_file.read_text()) if metrics_file.exists() else []
|
|
420
|
+
except Exception:
|
|
421
|
+
history = []
|
|
422
|
+
|
|
423
|
+
# Append today's metrics
|
|
424
|
+
m = cog.get_metrics(days=1)
|
|
425
|
+
if m["total_retrievals"] > 0:
|
|
426
|
+
history.append({
|
|
427
|
+
"date": datetime.now().strftime("%Y-%m-%d"),
|
|
428
|
+
"relevance": m["retrieval_relevance_pct"],
|
|
429
|
+
"retrievals": m["total_retrievals"],
|
|
430
|
+
})
|
|
431
|
+
# Keep last 60 days
|
|
432
|
+
history = history[-60:]
|
|
433
|
+
metrics_file.write_text(json.dumps(history, indent=2))
|
|
434
|
+
|
|
435
|
+
# Check if last 30 entries all have relevance >80%
|
|
436
|
+
if len(history) >= 30:
|
|
437
|
+
last_30 = history[-30:]
|
|
438
|
+
all_above_80 = all(h["relevance"] >= 80.0 for h in last_30)
|
|
439
|
+
if all_above_80:
|
|
440
|
+
finding("WARN", "cognitive-phase", "v2.1 TRIGGER MET: RAG relevance >80% for 30 consecutive days. Reduce MEMORY.md to ~20 lines.")
|
|
441
|
+
|
|
442
|
+
# v2.2: Dashboard — trigger: 30 days of metrics
|
|
443
|
+
if len(history) >= 30:
|
|
444
|
+
finding("INFO", "cognitive-phase", f"v2.2 TRIGGER MET: {len(history)} days of metrics accumulated. Implement HTML dashboard.")
|
|
445
|
+
|
|
446
|
+
# v3.0: Clustering — trigger: LTM >1000
|
|
447
|
+
ltm_count = db_cog.execute('SELECT COUNT(*) FROM ltm_memories WHERE is_dormant = 0').fetchone()[0]
|
|
448
|
+
if ltm_count >= 1000:
|
|
449
|
+
finding("WARN", "cognitive-phase", f"v3.0 TRIGGER MET: {ltm_count} LTM vectors (>1000). Implement K-means clustering.")
|
|
450
|
+
|
|
451
|
+
# v1.4: Multilingual — already checked in metrics section above
|
|
452
|
+
|
|
453
|
+
except Exception as e:
|
|
454
|
+
finding("WARN", "cognitive-phase", f"Phase trigger check failed: {e}")
|
|
455
|
+
|
|
456
|
+
# Weekly GC on Sundays
|
|
457
|
+
if datetime.now().weekday() == 6:
|
|
458
|
+
log(" Running weekly cognitive GC (Sunday)...")
|
|
459
|
+
try:
|
|
460
|
+
sys.path.insert(0, str(Path(os.environ.get("NEXO_HOME", str(Path.home() / ".nexo")))))
|
|
461
|
+
import cognitive as cog
|
|
462
|
+
|
|
463
|
+
# 1. Delete STM with strength < 0.1 and > 30 days
|
|
464
|
+
gc_stm = cog.gc_stm()
|
|
465
|
+
|
|
466
|
+
# 2. GC sensory > 48h (should already be cleaned by postmortem, but safety net)
|
|
467
|
+
gc_sensory = cog.gc_sensory(max_age_hours=48)
|
|
468
|
+
|
|
469
|
+
# 3. Delete dormant LTM with strength < 0.1 and > 30 days
|
|
470
|
+
gc_ltm = cog.gc_ltm_dormant(min_age_days=30)
|
|
471
|
+
|
|
472
|
+
log(f" Weekly GC results: STM removed={gc_stm}, sensory removed={gc_sensory}, LTM dormant removed={gc_ltm}")
|
|
473
|
+
if gc_stm + gc_sensory + gc_ltm > 0:
|
|
474
|
+
finding("INFO", "cognitive", f"Weekly GC cleaned: {gc_stm} STM + {gc_sensory} sensory + {gc_ltm} dormant LTM")
|
|
475
|
+
except Exception as e:
|
|
476
|
+
finding("WARN", "cognitive", f"Weekly GC failed: {e}")
|
|
477
|
+
|
|
478
|
+
|
|
479
|
+
# ── Main ────────────────────────────────────────────────────────────────
|
|
480
|
+
def main():
|
|
481
|
+
log("=" * 60)
|
|
482
|
+
log("NEXO Daily Self-Audit starting")
|
|
483
|
+
|
|
484
|
+
check_overdue_reminders()
|
|
485
|
+
check_overdue_followups()
|
|
486
|
+
check_uncommitted_changes()
|
|
487
|
+
check_cron_errors()
|
|
488
|
+
check_evolution_health()
|
|
489
|
+
check_disk_space()
|
|
490
|
+
check_db_size()
|
|
491
|
+
check_stale_sessions()
|
|
492
|
+
check_repetition_rate()
|
|
493
|
+
check_unused_learnings()
|
|
494
|
+
check_memory_reviews()
|
|
495
|
+
check_watchdog_registry()
|
|
496
|
+
check_snapshot_sync()
|
|
497
|
+
check_restore_activity()
|
|
498
|
+
check_bad_responses()
|
|
499
|
+
check_runtime_preflight()
|
|
500
|
+
check_watchdog_smoke()
|
|
501
|
+
check_cognitive_health()
|
|
502
|
+
|
|
503
|
+
errors = sum(1 for f in findings if f["severity"] == "ERROR")
|
|
504
|
+
warns = sum(1 for f in findings if f["severity"] == "WARN")
|
|
505
|
+
infos = sum(1 for f in findings if f["severity"] == "INFO")
|
|
506
|
+
|
|
507
|
+
log(f"Audit complete: {errors} errors, {warns} warnings, {infos} info")
|
|
508
|
+
|
|
509
|
+
# Write summary for NEXO startup to read
|
|
510
|
+
summary_file = LOG_DIR / "self-audit-summary.json"
|
|
511
|
+
summary_file.write_text(json.dumps({
|
|
512
|
+
"timestamp": datetime.now().isoformat(),
|
|
513
|
+
"findings": findings,
|
|
514
|
+
"counts": {"error": errors, "warn": warns, "info": infos}
|
|
515
|
+
}, indent=2))
|
|
516
|
+
|
|
517
|
+
# Register successful run for catch-up
|
|
518
|
+
try:
|
|
519
|
+
import json as _json
|
|
520
|
+
_state_file = Path(os.environ.get("NEXO_HOME", str(Path.home() / ".nexo"))) / "operations" / ".catchup-state.json"
|
|
521
|
+
_state = _json.loads(_state_file.read_text()) if _state_file.exists() else {}
|
|
522
|
+
_state["self-audit"] = datetime.now().isoformat()
|
|
523
|
+
_state_file.write_text(_json.dumps(_state, indent=2))
|
|
524
|
+
except Exception:
|
|
525
|
+
pass
|
|
526
|
+
|
|
527
|
+
log("=" * 60)
|
|
528
|
+
return 1 if errors > 0 else 0
|
|
529
|
+
|
|
530
|
+
|
|
531
|
+
if __name__ == "__main__":
|
|
532
|
+
sys.exit(main())
|