nexo-brain 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (38) hide show
  1. package/LICENSE +21 -0
  2. package/README.md +241 -0
  3. package/bin/create-nexo.js +593 -0
  4. package/package.json +32 -0
  5. package/scripts/pre-commit-check.sh +55 -0
  6. package/src/cognitive.py +1224 -0
  7. package/src/db.py +2283 -0
  8. package/src/hooks/caffeinate-guard.sh +8 -0
  9. package/src/hooks/capture-session.sh +19 -0
  10. package/src/hooks/session-start.sh +27 -0
  11. package/src/hooks/session-stop.sh +11 -0
  12. package/src/plugin_loader.py +136 -0
  13. package/src/plugins/__init__.py +0 -0
  14. package/src/plugins/agents.py +52 -0
  15. package/src/plugins/backup.py +103 -0
  16. package/src/plugins/cognitive_memory.py +305 -0
  17. package/src/plugins/entities.py +61 -0
  18. package/src/plugins/episodic_memory.py +391 -0
  19. package/src/plugins/evolution.py +113 -0
  20. package/src/plugins/guard.py +346 -0
  21. package/src/plugins/preferences.py +47 -0
  22. package/src/scripts/nexo-auto-update.py +213 -0
  23. package/src/scripts/nexo-catchup.py +179 -0
  24. package/src/scripts/nexo-cognitive-decay.py +82 -0
  25. package/src/scripts/nexo-daily-self-audit.py +532 -0
  26. package/src/scripts/nexo-postmortem-consolidator.py +594 -0
  27. package/src/scripts/nexo-sleep.py +762 -0
  28. package/src/scripts/nexo-synthesis.py +537 -0
  29. package/src/server.py +560 -0
  30. package/src/tools_coordination.py +102 -0
  31. package/src/tools_credentials.py +64 -0
  32. package/src/tools_learnings.py +180 -0
  33. package/src/tools_menu.py +208 -0
  34. package/src/tools_reminders.py +80 -0
  35. package/src/tools_reminders_crud.py +157 -0
  36. package/src/tools_sessions.py +169 -0
  37. package/src/tools_task_history.py +57 -0
  38. package/templates/CLAUDE.md.template +89 -0
@@ -0,0 +1,346 @@
1
+ """Guard plugin — Error prevention closed-loop system.
2
+
3
+ Surfaces relevant learnings at the moment of action, tracks repetitions,
4
+ and provides stats on error prevention effectiveness.
5
+ """
6
+ import json
7
+ import os
8
+ from datetime import datetime, timedelta
9
+ from pathlib import Path
10
+ from db import get_db, find_similar_learnings, extract_keywords
11
+
12
+ NEXO_HOME = Path(os.environ.get("NEXO_HOME", str(Path.home() / ".nexo")))
13
+ SCHEMA_CACHE_PATH = str(NEXO_HOME / "schema_cache.json")
14
+
15
+
16
+ def _load_schema_cache() -> dict:
17
+ """Load cached DB schemas from schema_cache.json."""
18
+ try:
19
+ if os.path.exists(SCHEMA_CACHE_PATH):
20
+ with open(SCHEMA_CACHE_PATH) as f:
21
+ return json.load(f)
22
+ except Exception:
23
+ pass
24
+ return {}
25
+
26
+
27
+ def _get_nexo_table_schema(table_name: str) -> str:
28
+ """Get schema for a nexo.db table via PRAGMA."""
29
+ conn = get_db()
30
+ try:
31
+ rows = conn.execute(f"PRAGMA table_info({table_name})").fetchall()
32
+ if rows:
33
+ cols = [f"{r['name']}({r['type']})" for r in rows]
34
+ return ", ".join(cols)
35
+ except Exception:
36
+ pass
37
+ return ""
38
+
39
+
40
+ def _extract_table_names(content: str) -> set:
41
+ """Extract SQL table names from source code."""
42
+ import re
43
+ tables = set()
44
+ patterns = [
45
+ r'(?:FROM|JOIN|INTO|UPDATE)\s+`?(\w+)`?',
46
+ r'CREATE\s+TABLE\s+(?:IF\s+NOT\s+EXISTS\s+)?`?(\w+)`?',
47
+ r'DESCRIBE\s+`?(\w+)`?',
48
+ r'table_info\([\'"]?(\w+)[\'"]?\)',
49
+ ]
50
+ for pat in patterns:
51
+ for m in re.finditer(pat, content, re.IGNORECASE):
52
+ tables.add(m.group(1))
53
+ sql_keywords = {'SELECT', 'WHERE', 'AND', 'OR', 'NOT', 'NULL', 'SET', 'VALUES', 'INTO', 'AS'}
54
+ return {t for t in tables if t.upper() not in sql_keywords}
55
+
56
+
57
+ def handle_guard_check(files: str = "", area: str = "", include_schemas: str = "true") -> str:
58
+ """Check learnings relevant to files/area before editing. Call BEFORE any code change.
59
+
60
+ Args:
61
+ files: Comma-separated file paths about to be edited
62
+ area: System area (infrastructure, api, database, backend, etc.)
63
+ include_schemas: Include DB table schemas if files touch database code (true/false)
64
+ """
65
+ conn = get_db()
66
+ include_schemas_bool = include_schemas.lower() in ("true", "1", "yes")
67
+ file_list = [f.strip() for f in files.split(",") if f.strip()] if files else []
68
+
69
+ result = {
70
+ "learnings": [],
71
+ "universal_rules": [],
72
+ "schemas": {},
73
+ "area_repetition_rate": 0.0,
74
+ "blocking_rules": [],
75
+ }
76
+
77
+ seen_ids = set()
78
+
79
+ # 1. By file path — learnings mentioning the file name or parent directory
80
+ for filepath in file_list:
81
+ from pathlib import Path
82
+ p = Path(filepath)
83
+ filename = p.name
84
+ parent_dir = p.parent.name
85
+
86
+ rows = conn.execute(
87
+ "SELECT id, category, title, content FROM learnings WHERE INSTR(content, ?) > 0 OR INSTR(content, ?) > 0",
88
+ (filename, parent_dir)
89
+ ).fetchall()
90
+ for r in rows:
91
+ if r["id"] not in seen_ids:
92
+ seen_ids.add(r["id"])
93
+ result["learnings"].append({"id": r["id"], "category": r["category"], "rule": r["title"]})
94
+
95
+ # 2. By area/category
96
+ if area:
97
+ rows = conn.execute(
98
+ "SELECT id, category, title, content FROM learnings WHERE category = ?",
99
+ (area,)
100
+ ).fetchall()
101
+ for r in rows:
102
+ if r["id"] not in seen_ids:
103
+ seen_ids.add(r["id"])
104
+ result["learnings"].append({"id": r["id"], "category": r["category"], "rule": r["title"]})
105
+
106
+ # 3. Universal rules (SIEMPRE, NUNCA, ANTES, always, never)
107
+ rows = conn.execute(
108
+ "SELECT id, category, title, content FROM learnings WHERE "
109
+ "content LIKE '%SIEMPRE%' OR content LIKE '%NUNCA%' OR content LIKE '%ANTES%' "
110
+ "OR content LIKE '%always%' OR content LIKE '%never%'"
111
+ ).fetchall()
112
+ for r in rows:
113
+ if r["id"] not in seen_ids:
114
+ result["universal_rules"].append({"id": r["id"], "rule": r["title"]})
115
+
116
+ # 4. DB schemas if files contain SQL keywords
117
+ if include_schemas_bool and file_list:
118
+ all_tables = set()
119
+ for filepath in file_list:
120
+ try:
121
+ with open(filepath, 'r', errors='ignore') as f:
122
+ content = f.read()
123
+ sql_keywords = ['SELECT', 'INSERT', 'UPDATE', 'DELETE', 'CREATE TABLE']
124
+ if any(kw in content.upper() for kw in sql_keywords):
125
+ all_tables.update(_extract_table_names(content))
126
+ except (FileNotFoundError, PermissionError):
127
+ continue
128
+
129
+ cache = _load_schema_cache()
130
+ for table in all_tables:
131
+ schema = _get_nexo_table_schema(table)
132
+ if schema:
133
+ result["schemas"][table] = schema
134
+ elif "cloud_sql" in cache and table in cache["cloud_sql"]:
135
+ result["schemas"][table] = cache["cloud_sql"][table]
136
+
137
+ # 5. Check for blocking rules (5+ repetitions)
138
+ for learning in result["learnings"]:
139
+ lid = learning["id"]
140
+ rep_count = conn.execute(
141
+ "SELECT COUNT(*) as cnt FROM error_repetitions WHERE original_learning_id = ?",
142
+ (lid,)
143
+ ).fetchone()["cnt"]
144
+ if rep_count >= 5:
145
+ result["blocking_rules"].append(
146
+ {"id": lid, "rule": learning["rule"], "repetitions": rep_count}
147
+ )
148
+
149
+ # 6. Area repetition rate
150
+ if area:
151
+ total_area = conn.execute(
152
+ "SELECT COUNT(*) as cnt FROM learnings WHERE category = ?", (area,)
153
+ ).fetchone()["cnt"]
154
+ reps_area = conn.execute(
155
+ "SELECT COUNT(*) as cnt FROM error_repetitions WHERE area = ?", (area,)
156
+ ).fetchone()["cnt"]
157
+ if total_area > 0:
158
+ result["area_repetition_rate"] = round(reps_area / total_area, 2)
159
+
160
+ # 7. Cognitive metacognition — semantic search for related warnings
161
+ cognitive_warnings = []
162
+ trust_note = ""
163
+ try:
164
+ import cognitive
165
+ trust = cognitive.get_trust_score()
166
+
167
+ if trust < 40:
168
+ cog_top_k = 6
169
+ cog_min_score = 0.55
170
+ trust_note = f" [RIGOR: PARANOID — trust={trust:.0f}]"
171
+ elif trust > 80:
172
+ cog_top_k = 2
173
+ cog_min_score = 0.75
174
+ trust_note = f" [RIGOR: FLUENT — trust={trust:.0f}]"
175
+ else:
176
+ cog_top_k = 3
177
+ cog_min_score = 0.65
178
+
179
+ query_parts = []
180
+ if file_list:
181
+ query_parts.append(f"editing files: {', '.join(file_list[:5])}")
182
+ if area:
183
+ query_parts.append(f"area: {area}")
184
+ if query_parts:
185
+ query_text = ". ".join(query_parts)
186
+ cog_results = cognitive.search(
187
+ query_text, top_k=cog_top_k, min_score=cog_min_score,
188
+ stores="ltm", source_type_filter="learning", rehearse=False
189
+ )
190
+ for r in cog_results:
191
+ cognitive_warnings.append(
192
+ f"[{r['score']:.2f}]: {r['source_title']} — {r['content'][:200]}"
193
+ )
194
+ except Exception:
195
+ pass # Cognitive is optional
196
+
197
+ # Log the guard check
198
+ conn.execute(
199
+ "INSERT INTO guard_checks (session_id, files, area, learnings_returned, blocking_rules_returned) "
200
+ "VALUES (?, ?, ?, ?, ?)",
201
+ ("", files, area, len(result["learnings"]) + len(result["universal_rules"]),
202
+ len(result["blocking_rules"]))
203
+ )
204
+ conn.commit()
205
+
206
+ # Format output
207
+ lines = []
208
+ if result["blocking_rules"]:
209
+ lines.append("BLOCKING RULES (resolve BEFORE writing):")
210
+ for r in result["blocking_rules"]:
211
+ lines.append(f" #{r['id']} ({r['repetitions']}x repeated): {r['rule']}")
212
+ lines.append("")
213
+
214
+ if result["learnings"]:
215
+ lines.append(f"RELEVANT LEARNINGS ({len(result['learnings'])}):")
216
+ for l in result["learnings"][:15]:
217
+ lines.append(f" #{l['id']} [{l['category']}] {l['rule']}")
218
+ lines.append("")
219
+
220
+ if result["universal_rules"]:
221
+ lines.append(f"UNIVERSAL RULES ({len(result['universal_rules'])}):")
222
+ for r in result["universal_rules"][:10]:
223
+ lines.append(f" #{r['id']} {r['rule']}")
224
+ lines.append("")
225
+
226
+ if result["schemas"]:
227
+ lines.append("DB SCHEMAS:")
228
+ for table, schema in result["schemas"].items():
229
+ lines.append(f" {table}: {schema}")
230
+ lines.append("")
231
+
232
+ if result["area_repetition_rate"] > 0:
233
+ lines.append(f"Area repetition rate: {result['area_repetition_rate']:.0%}")
234
+
235
+ if cognitive_warnings:
236
+ lines.append(f"\nCOGNITIVE SEMANTIC MATCHES{trust_note}:")
237
+ for w in cognitive_warnings:
238
+ lines.append(f" COGNITIVE MATCH {w}")
239
+
240
+ if not lines:
241
+ return "No relevant learnings found for these files/area."
242
+
243
+ return "\n".join(lines)
244
+
245
+
246
+ def handle_guard_stats(period_days: int = 7) -> str:
247
+ """Get guard system statistics for the specified period.
248
+
249
+ Args:
250
+ period_days: Number of days to look back (default 7)
251
+ """
252
+ conn = get_db()
253
+ cutoff = (datetime.now() - timedelta(days=period_days)).strftime("%Y-%m-%d %H:%M:%S")
254
+
255
+ total_learnings = conn.execute("SELECT COUNT(*) as cnt FROM learnings").fetchone()["cnt"]
256
+
257
+ total_reps = conn.execute(
258
+ "SELECT COUNT(*) as cnt FROM error_repetitions WHERE created_at > ?", (cutoff,)
259
+ ).fetchone()["cnt"]
260
+
261
+ new_learnings_period = conn.execute(
262
+ "SELECT COUNT(*) as cnt FROM learnings WHERE created_at > ?",
263
+ ((datetime.now() - timedelta(days=period_days)).timestamp(),)
264
+ ).fetchone()["cnt"]
265
+ rep_rate = round(total_reps / new_learnings_period, 2) if new_learnings_period > 0 else 0.0
266
+
267
+ prev_cutoff = (datetime.now() - timedelta(days=period_days * 2)).strftime("%Y-%m-%d %H:%M:%S")
268
+ prev_reps = conn.execute(
269
+ "SELECT COUNT(*) as cnt FROM error_repetitions WHERE created_at > ? AND created_at <= ?",
270
+ (prev_cutoff, cutoff)
271
+ ).fetchone()["cnt"]
272
+ trend = "stable"
273
+ if total_reps < prev_reps:
274
+ trend = "improving"
275
+ elif total_reps > prev_reps:
276
+ trend = "worsening"
277
+
278
+ area_rows = conn.execute(
279
+ "SELECT area, COUNT(*) as cnt FROM error_repetitions WHERE created_at > ? GROUP BY area ORDER BY cnt DESC LIMIT 5",
280
+ (cutoff,)
281
+ ).fetchall()
282
+
283
+ ignored_rows = conn.execute(
284
+ "SELECT original_learning_id, COUNT(*) as cnt FROM error_repetitions "
285
+ "GROUP BY original_learning_id ORDER BY cnt DESC LIMIT 5"
286
+ ).fetchall()
287
+ most_ignored = []
288
+ for r in ignored_rows:
289
+ lr = conn.execute("SELECT title FROM learnings WHERE id = ?", (r["original_learning_id"],)).fetchone()
290
+ if lr:
291
+ most_ignored.append({"id": r["original_learning_id"], "title": lr["title"], "times_repeated": r["cnt"]})
292
+
293
+ checks_count = conn.execute(
294
+ "SELECT COUNT(*) as cnt FROM guard_checks WHERE created_at > ?", (cutoff,)
295
+ ).fetchone()["cnt"]
296
+
297
+ lines = [
298
+ f"GUARD STATS (last {period_days} days):",
299
+ f" Repetition rate: {rep_rate:.0%} ({trend})",
300
+ f" Total learnings: {total_learnings}",
301
+ f" Repetitions in period: {total_reps}",
302
+ f" Guard checks performed: {checks_count}",
303
+ ]
304
+
305
+ if area_rows:
306
+ lines.append(" Top areas:")
307
+ for r in area_rows:
308
+ lines.append(f" {r['area']}: {r['cnt']} repetitions")
309
+
310
+ if most_ignored:
311
+ lines.append(" Most repeated learnings:")
312
+ for m in most_ignored:
313
+ lines.append(f" #{m['id']} ({m['times_repeated']}x): {m['title'][:60]}")
314
+
315
+ return "\n".join(lines)
316
+
317
+
318
+ def handle_guard_log_repetition(new_learning_id: int, original_learning_id: int, similarity: float = 0.75) -> str:
319
+ """Log that a new learning is similar to an existing one (repetition detected).
320
+
321
+ Args:
322
+ new_learning_id: ID of the new learning
323
+ original_learning_id: ID of the original learning it matches
324
+ similarity: Similarity score (0-1)
325
+ """
326
+ conn = get_db()
327
+
328
+ row = conn.execute("SELECT category FROM learnings WHERE id = ?", (new_learning_id,)).fetchone()
329
+ if not row:
330
+ return f"ERROR: Learning #{new_learning_id} not found."
331
+ area = row["category"]
332
+
333
+ conn.execute(
334
+ "INSERT INTO error_repetitions (new_learning_id, original_learning_id, similarity, area) VALUES (?,?,?,?)",
335
+ (new_learning_id, original_learning_id, similarity, area)
336
+ )
337
+ conn.commit()
338
+
339
+ return f"Repetition logged: #{new_learning_id} similar to #{original_learning_id} ({similarity:.0%})"
340
+
341
+
342
+ TOOLS = [
343
+ (handle_guard_check, "nexo_guard_check", "Check learnings relevant to files/area BEFORE editing code. Call this before any code change."),
344
+ (handle_guard_stats, "nexo_guard_stats", "Get guard system statistics: repetition rate, trends, top problem areas"),
345
+ (handle_guard_log_repetition, "nexo_guard_log_repetition", "Log a learning repetition (new learning matches existing one)"),
346
+ ]
@@ -0,0 +1,47 @@
1
+ """Preferences plugin — learned behavior patterns and workflow rules."""
2
+ from db import set_preference, get_preference, list_preferences, delete_preference
3
+
4
+ def handle_preference_get(key: str) -> str:
5
+ """Get a specific preference by key."""
6
+ p = get_preference(key)
7
+ if not p: return f"Preferencia '{key}' no encontrada."
8
+ return f"{p['key']} = {p['value']} (cat: {p['category']})"
9
+
10
+ def handle_preference_set(key: str, value: str, category: str = "general") -> str:
11
+ """Set a preference (creates or updates)."""
12
+ set_preference(key, value, category)
13
+ try:
14
+ import cognitive
15
+ cognitive.ingest_to_ltm(f"{key}: {value}", "preference", key, key, "")
16
+ except Exception:
17
+ pass
18
+ return f"Preferencia '{key}' = '{value}' ({category})"
19
+
20
+ def handle_preference_list(category: str = "") -> str:
21
+ """List all preferences, optionally filtered by category."""
22
+ prefs = list_preferences(category)
23
+ if not prefs: return "Sin preferencias."
24
+ grouped = {}
25
+ for p in prefs:
26
+ c = p["category"]
27
+ if c not in grouped: grouped[c] = []
28
+ grouped[c].append(p)
29
+ lines = ["PREFERENCIAS:"]
30
+ for c, items in grouped.items():
31
+ lines.append(f"\n [{c.upper()}]")
32
+ for p in items:
33
+ lines.append(f" {p['key']} = {p['value']}")
34
+ return "\n".join(lines)
35
+
36
+ def handle_preference_delete(key: str) -> str:
37
+ """Delete a preference."""
38
+ if not delete_preference(key):
39
+ return f"ERROR: Preferencia '{key}' no encontrada."
40
+ return f"Preferencia '{key}' eliminada."
41
+
42
+ TOOLS = [
43
+ (handle_preference_get, "nexo_preference_get", "Get a specific preference value"),
44
+ (handle_preference_set, "nexo_preference_set", "Set a preference (creates or updates)"),
45
+ (handle_preference_list, "nexo_preference_list", "List all preferences grouped by category"),
46
+ (handle_preference_delete, "nexo_preference_delete", "Delete a preference"),
47
+ ]
@@ -0,0 +1,213 @@
1
+ #!/usr/bin/env python3
2
+ """
3
+ NEXO Auto-Update — checks for new versions and applies updates.
4
+
5
+ Runs at boot via the catch-up system or manually.
6
+ Compares local version with the latest GitHub release.
7
+ If a new version is available, downloads and applies the update.
8
+ """
9
+
10
+ import json
11
+ import os
12
+ import shutil
13
+ import subprocess
14
+ import sys
15
+ from datetime import datetime
16
+ from pathlib import Path
17
+
18
+ NEXO_HOME = Path(os.environ.get("NEXO_HOME", str(Path.home() / ".nexo")))
19
+ VERSION_FILE = NEXO_HOME / "version.json"
20
+ LOG_FILE = NEXO_HOME / "logs" / "auto-update.log"
21
+ REPO = "wazionapps/nexo"
22
+
23
+
24
+ def log(msg: str):
25
+ ts = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
26
+ line = f"[{ts}] {msg}"
27
+ print(line, flush=True)
28
+ LOG_FILE.parent.mkdir(parents=True, exist_ok=True)
29
+ with open(LOG_FILE, "a") as f:
30
+ f.write(line + "\n")
31
+
32
+
33
+ def get_local_version() -> str:
34
+ """Read locally installed version."""
35
+ if VERSION_FILE.exists():
36
+ try:
37
+ return json.loads(VERSION_FILE.read_text()).get("version", "0.0.0")
38
+ except Exception:
39
+ pass
40
+ return "0.0.0"
41
+
42
+
43
+ def get_remote_version() -> dict | None:
44
+ """Check latest release on GitHub."""
45
+ try:
46
+ result = subprocess.run(
47
+ ["gh", "api", f"repos/{REPO}/releases/latest"],
48
+ capture_output=True, text=True, timeout=15
49
+ )
50
+ if result.returncode == 0:
51
+ data = json.loads(result.stdout)
52
+ return {
53
+ "version": data.get("tag_name", "").lstrip("v"),
54
+ "tarball_url": data.get("tarball_url", ""),
55
+ "published_at": data.get("published_at", ""),
56
+ "body": data.get("body", "")[:500],
57
+ }
58
+ except Exception:
59
+ pass
60
+ return None
61
+
62
+
63
+ def version_compare(local: str, remote: str) -> int:
64
+ """Compare semver strings. Returns -1 (local older), 0 (same), 1 (local newer)."""
65
+ def parse(v):
66
+ parts = v.split(".")
67
+ return tuple(int(p) for p in parts if p.isdigit())
68
+
69
+ l, r = parse(local), parse(remote)
70
+ if l < r:
71
+ return -1
72
+ elif l > r:
73
+ return 1
74
+ return 0
75
+
76
+
77
+ def apply_update(tarball_url: str, new_version: str) -> bool:
78
+ """Download and apply update from GitHub release tarball."""
79
+ import tempfile
80
+
81
+ log(f"Downloading update v{new_version}...")
82
+ tmp_dir = Path(tempfile.mkdtemp(prefix="nexo-update-"))
83
+
84
+ try:
85
+ # Download tarball
86
+ tarball = tmp_dir / "release.tar.gz"
87
+ result = subprocess.run(
88
+ ["curl", "-sL", "-o", str(tarball), tarball_url],
89
+ capture_output=True, timeout=60
90
+ )
91
+ if result.returncode != 0:
92
+ log(f"Download failed: {result.stderr}")
93
+ return False
94
+
95
+ # Extract
96
+ subprocess.run(
97
+ ["tar", "xzf", str(tarball), "-C", str(tmp_dir)],
98
+ capture_output=True, timeout=30
99
+ )
100
+
101
+ # Find extracted directory (GitHub tarballs have a top-level dir)
102
+ extracted = [d for d in tmp_dir.iterdir() if d.is_dir()]
103
+ if not extracted:
104
+ log("No directory found in tarball")
105
+ return False
106
+
107
+ src_dir = extracted[0] / "src"
108
+ if not src_dir.exists():
109
+ log("No src/ directory in release")
110
+ return False
111
+
112
+ # Backup current files
113
+ backup_dir = NEXO_HOME / "backups" / f"pre-update-{new_version}"
114
+ backup_dir.mkdir(parents=True, exist_ok=True)
115
+
116
+ files_updated = 0
117
+ # Update core Python files
118
+ for py_file in src_dir.glob("*.py"):
119
+ dest = NEXO_HOME / py_file.name
120
+ if dest.exists():
121
+ shutil.copy2(dest, backup_dir / py_file.name)
122
+ shutil.copy2(py_file, dest)
123
+ files_updated += 1
124
+
125
+ # Update plugins
126
+ plugins_src = src_dir / "plugins"
127
+ if plugins_src.exists():
128
+ plugins_dest = NEXO_HOME / "plugins"
129
+ plugins_dest.mkdir(exist_ok=True)
130
+ for py_file in plugins_src.glob("*.py"):
131
+ dest = plugins_dest / py_file.name
132
+ if dest.exists():
133
+ shutil.copy2(dest, backup_dir / f"plugin_{py_file.name}")
134
+ shutil.copy2(py_file, dest)
135
+ files_updated += 1
136
+
137
+ # Update scripts
138
+ scripts_src = src_dir / "scripts"
139
+ if scripts_src.exists():
140
+ scripts_dest = NEXO_HOME / "scripts"
141
+ scripts_dest.mkdir(exist_ok=True)
142
+ for py_file in scripts_src.glob("*.py"):
143
+ dest = scripts_dest / py_file.name
144
+ if dest.exists():
145
+ shutil.copy2(dest, backup_dir / f"script_{py_file.name}")
146
+ shutil.copy2(py_file, dest)
147
+ files_updated += 1
148
+
149
+ # Update hooks
150
+ hooks_src = src_dir / "hooks"
151
+ if hooks_src.exists():
152
+ hooks_dest = NEXO_HOME / "hooks"
153
+ hooks_dest.mkdir(exist_ok=True)
154
+ for sh_file in hooks_src.glob("*.sh"):
155
+ dest = hooks_dest / sh_file.name
156
+ shutil.copy2(sh_file, dest)
157
+ os.chmod(dest, 0o755)
158
+ files_updated += 1
159
+
160
+ # Save new version
161
+ VERSION_FILE.write_text(json.dumps({
162
+ "version": new_version,
163
+ "updated_at": datetime.now().isoformat(),
164
+ "files_updated": files_updated,
165
+ "backup": str(backup_dir),
166
+ }, indent=2))
167
+
168
+ log(f"Update applied: {files_updated} files updated. Backup at {backup_dir}")
169
+ return True
170
+
171
+ except Exception as e:
172
+ log(f"Update failed: {e}")
173
+ return False
174
+ finally:
175
+ shutil.rmtree(tmp_dir, ignore_errors=True)
176
+
177
+
178
+ def main():
179
+ log("=== NEXO Auto-Update check ===")
180
+
181
+ local_ver = get_local_version()
182
+ log(f"Local version: {local_ver}")
183
+
184
+ remote = get_remote_version()
185
+ if not remote:
186
+ log("Could not check remote version (no network or no releases)")
187
+ return
188
+
189
+ remote_ver = remote["version"]
190
+ log(f"Remote version: {remote_ver}")
191
+
192
+ cmp = version_compare(local_ver, remote_ver)
193
+ if cmp >= 0:
194
+ log("Already up to date.")
195
+ return
196
+
197
+ log(f"Update available: {local_ver} → {remote_ver}")
198
+ log(f"Release notes: {remote['body'][:200]}")
199
+
200
+ if remote["tarball_url"]:
201
+ success = apply_update(remote["tarball_url"], remote_ver)
202
+ if success:
203
+ log(f"Successfully updated to v{remote_ver}")
204
+ else:
205
+ log("Update failed — will retry next boot")
206
+ else:
207
+ log("No tarball URL in release — manual update needed")
208
+
209
+ log("=== Done ===")
210
+
211
+
212
+ if __name__ == "__main__":
213
+ main()