nexo-brain 0.3.3 → 0.3.5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "nexo-brain",
3
- "version": "0.3.3",
3
+ "version": "0.3.5",
4
4
  "mcpName": "io.github.wazionapps/nexo",
5
5
  "description": "NEXO — Cognitive co-operator for Claude Code. Atkinson-Shiffrin memory, semantic RAG, trust scoring, and metacognitive error prevention.",
6
6
  "bin": {
package/src/cognitive.py CHANGED
@@ -30,13 +30,13 @@ DISCRIMINATING_ENTITIES = {
30
30
  # OS / Environment
31
31
  "linux", "mac", "macos", "windows", "darwin", "ubuntu", "debian", "alpine",
32
32
  # Platforms
33
- "shopify", "whatsapp", "chrome", "firefox",
33
+ "nexo", "other", "whatsapp", "chrome", "firefox",
34
34
  # Languages / Runtimes
35
35
  "python", "php", "javascript", "typescript", "node", "deno", "ruby",
36
36
  # Versions
37
37
  "v1", "v2", "v3", "v4", "v5", "5.6", "7.4", "8.0", "8.1", "8.2",
38
38
  # Infrastructure
39
- "cloudrun", "gcloud", "vps", "local", "production", "staging",
39
+ "vps", "local", "production", "staging",
40
40
  # DB
41
41
  "mysql", "sqlite", "postgresql", "postgres", "redis",
42
42
  }
@@ -61,8 +61,8 @@ URGENCY_SIGNALS = {
61
61
  "rápido", "ya", "ahora", "urgente", "asap", "inmediatamente", "corre",
62
62
  }
63
63
 
64
- # Trust score events and their point values
65
- TRUST_EVENTS = {
64
+ # Trust score events default deltas (overridable via trust_event_config table)
65
+ _DEFAULT_TRUST_EVENTS = {
66
66
  # Positive
67
67
  "explicit_thanks": +3,
68
68
  "delegation": +2, # the user delegates new task without micromanaging
@@ -77,6 +77,113 @@ TRUST_EVENTS = {
77
77
  "forgot_followup": -4, # Forgot to mark followup or execute it
78
78
  }
79
79
 
80
+ # Lazy-loaded from DB (trust_event_config table overrides defaults)
81
+ _trust_events_cache = None
82
+ _trust_events_cache_ts = 0
83
+
84
+
85
+ def get_trust_events() -> dict:
86
+ """Get trust events with deltas. DB overrides take priority over defaults."""
87
+ global _trust_events_cache, _trust_events_cache_ts
88
+ import time
89
+ now = time.time()
90
+ # Cache for 60s to avoid constant DB reads
91
+ if _trust_events_cache is not None and (now - _trust_events_cache_ts) < 60:
92
+ return _trust_events_cache
93
+
94
+ events = dict(_DEFAULT_TRUST_EVENTS)
95
+ try:
96
+ db = _get_db()
97
+ db.execute("""
98
+ CREATE TABLE IF NOT EXISTS trust_event_config (
99
+ event TEXT PRIMARY KEY,
100
+ delta REAL NOT NULL,
101
+ description TEXT DEFAULT '',
102
+ updated_at TEXT DEFAULT (datetime('now'))
103
+ )
104
+ """)
105
+ rows = db.execute("SELECT event, delta FROM trust_event_config").fetchall()
106
+ for r in rows:
107
+ events[r[0]] = r[1]
108
+ except Exception:
109
+ pass
110
+ _trust_events_cache = events
111
+ _trust_events_cache_ts = now
112
+ return events
113
+
114
+ # For backward compat — code that reads TRUST_EVENTS directly
115
+ TRUST_EVENTS = _DEFAULT_TRUST_EVENTS
116
+
117
+ # Auto-detection patterns for trust events from user text
118
+ # Each pattern: (event_name, keywords/phrases that trigger it, min_matches)
119
+ TRUST_AUTO_PATTERNS = {
120
+ "explicit_thanks": {
121
+ "patterns": [
122
+ "gracias", "buen trabajo", "bien hecho", "perfecto", "genial",
123
+ "excelente", "fenomenal", "great job", "nice work", "thank",
124
+ "thanks", "awesome", "amazing", "love it", "me encanta",
125
+ ],
126
+ "min_matches": 1,
127
+ },
128
+ "correction": {
129
+ "patterns": [
130
+ "ya te dije", "ya te lo dije", "otra vez", "te he dicho",
131
+ "no es así", "eso no", "mal", "incorrecto", "equivocado",
132
+ "no no no", "that's wrong", "te aviso", "te avisé",
133
+ "2ª vez", "segunda vez", "te lo repito",
134
+ ],
135
+ "min_matches": 1,
136
+ },
137
+ "repeated_error": {
138
+ "patterns": [
139
+ "otra vez lo mismo", "siempre igual", "ya te lo dije antes",
140
+ "cuántas veces", "no aprendes", "same mistake", "again the same",
141
+ "ya van", "es la 2", "es la 3", "ya te avisé",
142
+ ],
143
+ "min_matches": 1,
144
+ },
145
+ "delegation": {
146
+ "patterns": [
147
+ "encárgate", "hazlo tú", "dale tú", "te lo dejo",
148
+ "manéjalo", "resuélvelo", "handle it", "take care of",
149
+ "you decide", "tú decides", "lo que veas", "como veas",
150
+ ],
151
+ "min_matches": 1,
152
+ },
153
+ }
154
+
155
+
156
+ def auto_detect_trust_events(text: str) -> list[dict]:
157
+ """Detect trust events from user text. Returns list of {event, delta, reason}.
158
+
159
+ Called automatically by heartbeat. Only fires once per event per heartbeat
160
+ to avoid double-counting.
161
+ """
162
+ if not text or len(text.strip()) < 5:
163
+ return []
164
+
165
+ text_lower = text.lower()
166
+ events = get_trust_events()
167
+ detected = []
168
+
169
+ for event_name, config in TRUST_AUTO_PATTERNS.items():
170
+ matches = [p for p in config["patterns"] if p in text_lower]
171
+ if len(matches) >= config["min_matches"]:
172
+ delta = events.get(event_name, _DEFAULT_TRUST_EVENTS.get(event_name, 0))
173
+ detected.append({
174
+ "event": event_name,
175
+ "delta": delta,
176
+ "reason": f"auto-detected: {', '.join(matches[:3])}",
177
+ })
178
+
179
+ # Priority: if repeated_error detected, remove correction (it's a superset)
180
+ event_names = {d["event"] for d in detected}
181
+ if "repeated_error" in event_names and "correction" in event_names:
182
+ detected = [d for d in detected if d["event"] != "correction"]
183
+ # If explicit_thanks and delegation both detected, keep both (they're independent)
184
+
185
+ return detected
186
+
80
187
  _model = None
81
188
  _conn = None
82
189
 
@@ -1956,7 +2063,7 @@ def resolve_dissonance(memory_id: int, resolution: str, context: str = "") -> st
1956
2063
  Args:
1957
2064
  memory_id: The LTM memory that conflicts with the new instruction
1958
2065
  resolution: One of:
1959
- - 'paradigm_shift': the user changed his mind permanently. Decay old memory,
2066
+ - 'paradigm_shift': the user changed their mind permanently. Decay old memory,
1960
2067
  new instruction becomes the standard.
1961
2068
  - 'exception': This is a one-time override. Keep old memory as standard.
1962
2069
  - 'override': Old memory was wrong. Mark as corrupted and decay to dormant.
@@ -2159,7 +2266,8 @@ def adjust_trust(event: str, context: str = "", custom_delta: float = None) -> d
2159
2266
  db = _get_db()
2160
2267
  old_score = get_trust_score()
2161
2268
 
2162
- delta = custom_delta if custom_delta is not None else TRUST_EVENTS.get(event, 0)
2269
+ events = get_trust_events()
2270
+ delta = custom_delta if custom_delta is not None else events.get(event, 0)
2163
2271
  if delta == 0 and custom_delta is None:
2164
2272
  return {"old_score": old_score, "delta": 0, "new_score": old_score, "event": event, "error": "unknown event"}
2165
2273
 
package/src/db.py CHANGED
@@ -241,17 +241,6 @@ def init_db():
241
241
  user_signals TEXT,
242
242
  summary TEXT NOT NULL
243
243
  );
244
- CREATE TABLE IF NOT EXISTS session_diary_draft (
245
- sid TEXT PRIMARY KEY,
246
- summary_draft TEXT DEFAULT '',
247
- tasks_seen TEXT DEFAULT '[]',
248
- change_ids TEXT DEFAULT '[]',
249
- decision_ids TEXT DEFAULT '[]',
250
- last_context_hint TEXT DEFAULT '',
251
- heartbeat_count INTEGER DEFAULT 0,
252
- created_at TEXT DEFAULT (datetime('now')),
253
- updated_at TEXT DEFAULT (datetime('now'))
254
- );
255
244
 
256
245
  CREATE TABLE IF NOT EXISTS evolution_metrics (
257
246
  id INTEGER PRIMARY KEY AUTOINCREMENT,
@@ -279,55 +268,8 @@ def init_db():
279
268
  """)
280
269
  # foreign_keys=ON is set in get_db() per-connection
281
270
 
282
- # ── Schema migrations (idempotent) ────────────────────────────
283
- _migrate_add_column(conn, "learnings", "reasoning", "TEXT")
284
- _migrate_add_column(conn, "learnings", "prevention", "TEXT DEFAULT ''")
285
- _migrate_add_column(conn, "learnings", "applies_to", "TEXT DEFAULT ''")
286
- _migrate_add_column(conn, "learnings", "status", "TEXT DEFAULT 'active'")
287
- _migrate_add_column(conn, "learnings", "review_due_at", "REAL")
288
- _migrate_add_column(conn, "learnings", "last_reviewed_at", "REAL")
289
- _migrate_add_column(conn, "followups", "reasoning", "TEXT")
290
- _migrate_add_column(conn, "task_history", "reasoning", "TEXT")
291
- _migrate_add_column(conn, "decisions", "status", "TEXT DEFAULT 'pending_review'")
292
- _migrate_add_column(conn, "decisions", "review_due_at", "TEXT")
293
- _migrate_add_column(conn, "decisions", "last_reviewed_at", "TEXT")
294
- _migrate_add_index(conn, "idx_decisions_domain", "decisions", "domain")
295
- _migrate_add_index(conn, "idx_decisions_created", "decisions", "created_at")
296
- _migrate_add_index(conn, "idx_decisions_review_due", "decisions", "review_due_at")
297
- _migrate_add_index(conn, "idx_session_diary_sid", "session_diary", "session_id")
298
- _migrate_add_column(conn, "session_diary", "mental_state", "TEXT")
299
- _migrate_add_column(conn, "session_diary", "domain", "TEXT")
300
- _migrate_add_column(conn, "session_diary", "user_signals", "TEXT")
301
- _migrate_add_column(conn, "session_diary", "self_critique", "TEXT")
302
- _migrate_add_column(conn, "session_diary", "source", "TEXT DEFAULT 'claude'")
303
- _migrate_add_index(conn, "idx_change_log_created", "change_log", "created_at")
304
- _migrate_add_index(conn, "idx_change_log_files", "change_log", "files")
305
- _migrate_add_index(conn, "idx_learnings_status", "learnings", "status")
306
- _migrate_add_index(conn, "idx_learnings_review_due", "learnings", "review_due_at")
307
-
308
- conn.execute("""
309
- CREATE TABLE IF NOT EXISTS error_repetitions (
310
- id INTEGER PRIMARY KEY AUTOINCREMENT,
311
- new_learning_id INTEGER NOT NULL,
312
- original_learning_id INTEGER NOT NULL,
313
- similarity REAL NOT NULL,
314
- area TEXT NOT NULL,
315
- created_at TEXT DEFAULT (datetime('now'))
316
- )
317
- """)
318
- conn.execute("""
319
- CREATE TABLE IF NOT EXISTS guard_checks (
320
- id INTEGER PRIMARY KEY AUTOINCREMENT,
321
- session_id TEXT,
322
- files TEXT,
323
- area TEXT,
324
- learnings_returned INTEGER DEFAULT 0,
325
- blocking_rules_returned INTEGER DEFAULT 0,
326
- created_at TEXT DEFAULT (datetime('now'))
327
- )
328
- """)
329
- _migrate_add_index(conn, "idx_error_repetitions_area", "error_repetitions", "area")
330
- _migrate_add_index(conn, "idx_guard_checks_session", "guard_checks", "session_id")
271
+ # ── Run formal migrations ────────────────────────────────────
272
+ run_migrations(conn)
331
273
 
332
274
  # ── FTS5 unified search index ────────────────────────────────
333
275
  conn.execute("""
@@ -779,6 +721,149 @@ def _migrate_add_index(conn, index_name: str, table: str, column: str):
779
721
  conn.commit()
780
722
 
781
723
 
724
+ # ── Formal Migration System ─────────────────────────────────────
725
+ #
726
+ # Each migration is (version, name, callable). Migrations run once
727
+ # and are tracked in schema_migrations. The version number MUST be
728
+ # strictly increasing. Add new migrations at the end of the list.
729
+ #
730
+ # For users upgrading via npm/git, init_db() calls run_migrations()
731
+ # automatically — no manual steps needed.
732
+
733
+ def _m1_learnings_columns(conn):
734
+ _migrate_add_column(conn, "learnings", "reasoning", "TEXT")
735
+ _migrate_add_column(conn, "learnings", "prevention", "TEXT DEFAULT ''")
736
+ _migrate_add_column(conn, "learnings", "applies_to", "TEXT DEFAULT ''")
737
+ _migrate_add_column(conn, "learnings", "status", "TEXT DEFAULT 'active'")
738
+ _migrate_add_column(conn, "learnings", "review_due_at", "REAL")
739
+ _migrate_add_column(conn, "learnings", "last_reviewed_at", "REAL")
740
+
741
+ def _m2_followups_reasoning(conn):
742
+ _migrate_add_column(conn, "followups", "reasoning", "TEXT")
743
+ _migrate_add_column(conn, "task_history", "reasoning", "TEXT")
744
+
745
+ def _m3_decisions_review(conn):
746
+ _migrate_add_column(conn, "decisions", "status", "TEXT DEFAULT 'pending_review'")
747
+ _migrate_add_column(conn, "decisions", "review_due_at", "TEXT")
748
+ _migrate_add_column(conn, "decisions", "last_reviewed_at", "TEXT")
749
+ _migrate_add_index(conn, "idx_decisions_domain", "decisions", "domain")
750
+ _migrate_add_index(conn, "idx_decisions_created", "decisions", "created_at")
751
+ _migrate_add_index(conn, "idx_decisions_review_due", "decisions", "review_due_at")
752
+
753
+ def _m4_session_diary_columns(conn):
754
+ _migrate_add_index(conn, "idx_session_diary_sid", "session_diary", "session_id")
755
+ _migrate_add_column(conn, "session_diary", "mental_state", "TEXT")
756
+ _migrate_add_column(conn, "session_diary", "domain", "TEXT")
757
+ _migrate_add_column(conn, "session_diary", "user_signals", "TEXT")
758
+ _migrate_add_column(conn, "session_diary", "self_critique", "TEXT")
759
+
760
+ def _m5_change_log_indexes(conn):
761
+ _migrate_add_index(conn, "idx_change_log_created", "change_log", "created_at")
762
+ _migrate_add_index(conn, "idx_change_log_files", "change_log", "files")
763
+ _migrate_add_index(conn, "idx_learnings_status", "learnings", "status")
764
+ _migrate_add_index(conn, "idx_learnings_review_due", "learnings", "review_due_at")
765
+
766
+ def _m6_error_guard_tables(conn):
767
+ conn.execute("""
768
+ CREATE TABLE IF NOT EXISTS error_repetitions (
769
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
770
+ new_learning_id INTEGER NOT NULL,
771
+ original_learning_id INTEGER NOT NULL,
772
+ similarity REAL NOT NULL,
773
+ area TEXT NOT NULL,
774
+ created_at TEXT DEFAULT (datetime('now'))
775
+ )
776
+ """)
777
+ conn.execute("""
778
+ CREATE TABLE IF NOT EXISTS guard_checks (
779
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
780
+ session_id TEXT,
781
+ files TEXT,
782
+ area TEXT,
783
+ learnings_returned INTEGER DEFAULT 0,
784
+ blocking_rules_returned INTEGER DEFAULT 0,
785
+ created_at TEXT DEFAULT (datetime('now'))
786
+ )
787
+ """)
788
+ _migrate_add_index(conn, "idx_error_repetitions_area", "error_repetitions", "area")
789
+ _migrate_add_index(conn, "idx_guard_checks_session", "guard_checks", "session_id")
790
+
791
+ def _m7_diary_source_and_draft(conn):
792
+ _migrate_add_column(conn, "session_diary", "source", "TEXT DEFAULT 'claude'")
793
+ conn.execute("""
794
+ CREATE TABLE IF NOT EXISTS session_diary_draft (
795
+ sid TEXT PRIMARY KEY,
796
+ summary_draft TEXT DEFAULT '',
797
+ tasks_seen TEXT DEFAULT '[]',
798
+ change_ids TEXT DEFAULT '[]',
799
+ decision_ids TEXT DEFAULT '[]',
800
+ last_context_hint TEXT DEFAULT '',
801
+ heartbeat_count INTEGER DEFAULT 0,
802
+ created_at TEXT DEFAULT (datetime('now')),
803
+ updated_at TEXT DEFAULT (datetime('now'))
804
+ )
805
+ """)
806
+
807
+
808
+ # Migration registry — APPEND ONLY, never reorder or delete
809
+ MIGRATIONS = [
810
+ (1, "learnings_columns", _m1_learnings_columns),
811
+ (2, "followups_reasoning", _m2_followups_reasoning),
812
+ (3, "decisions_review", _m3_decisions_review),
813
+ (4, "session_diary_columns", _m4_session_diary_columns),
814
+ (5, "change_log_indexes", _m5_change_log_indexes),
815
+ (6, "error_guard_tables", _m6_error_guard_tables),
816
+ (7, "diary_source_and_draft", _m7_diary_source_and_draft),
817
+ ]
818
+
819
+
820
+ def run_migrations(conn=None):
821
+ """Run pending migrations. Tracks applied versions in schema_migrations.
822
+
823
+ Safe to call multiple times — skips already-applied migrations.
824
+ Called automatically by init_db() on every server start.
825
+ """
826
+ if conn is None:
827
+ conn = get_db()
828
+
829
+ conn.execute("""
830
+ CREATE TABLE IF NOT EXISTS schema_migrations (
831
+ version INTEGER PRIMARY KEY,
832
+ name TEXT NOT NULL,
833
+ applied_at TEXT DEFAULT (datetime('now'))
834
+ )
835
+ """)
836
+ conn.commit()
837
+
838
+ applied = {r[0] for r in conn.execute("SELECT version FROM schema_migrations").fetchall()}
839
+
840
+ for version, name, fn in MIGRATIONS:
841
+ if version not in applied:
842
+ try:
843
+ fn(conn)
844
+ conn.execute(
845
+ "INSERT INTO schema_migrations (version, name) VALUES (?, ?)",
846
+ (version, name)
847
+ )
848
+ conn.commit()
849
+ except Exception as e:
850
+ # Log but don't crash — partial migration is better than no server
851
+ import sys
852
+ print(f"[MIGRATION] v{version} ({name}) failed: {e}", file=sys.stderr)
853
+
854
+ return len(MIGRATIONS) - len(applied)
855
+
856
+
857
+ def get_schema_version() -> int:
858
+ """Return the highest applied migration version, or 0 if none."""
859
+ conn = get_db()
860
+ try:
861
+ row = conn.execute("SELECT MAX(version) FROM schema_migrations").fetchone()
862
+ return row[0] or 0
863
+ except Exception:
864
+ return 0
865
+
866
+
782
867
  def _gen_id(prefix: str, length: int = 8) -> str:
783
868
  """Generate a random ID like 'msg-a1b2c3' or 'q-x9y8z7w6'."""
784
869
  chars = string.ascii_lowercase + string.digits
@@ -2165,7 +2250,7 @@ def read_session_diary(session_id: str = '', last_n: int = 3, last_day: bool = F
2165
2250
  - session_id: returns entries for that specific session
2166
2251
  - last_day: returns ALL entries from the most recent day (multi-terminal aware)
2167
2252
  - last_n: returns last N entries (default)
2168
- - domain: filter by project context (project-a, project-b, nexo, other)
2253
+ - domain: filter by project context (nexo, other)
2169
2254
  """
2170
2255
  conn = get_db()
2171
2256
  domain_clause = " AND domain = ?" if domain else ""
package/src/server.py CHANGED
@@ -52,7 +52,7 @@ mcp = FastMCP(
52
52
  name="nexo",
53
53
  instructions=(
54
54
  "NEXO operational server. Provides session coordination, "
55
- "reminders, followups, and menu for user operations.\n\n"
55
+ "reminders, followups, and menu for the user's operations.\n\n"
56
56
  "When working with tool results, write down any important information "
57
57
  "you might need later in your response, as the original tool result "
58
58
  "may be cleared later."
@@ -75,15 +75,16 @@ def nexo_startup(task: str = "Startup") -> str:
75
75
 
76
76
 
77
77
  @mcp.tool
78
- def nexo_heartbeat(sid: str, task: str) -> str:
79
- """Update session task, check inbox and pending questions.
78
+ def nexo_heartbeat(sid: str, task: str, context_hint: str = '') -> str:
79
+ """Update session task, check inbox and pending questions. Auto-detects trust events.
80
80
 
81
81
  Call this at the START of every user interaction (before doing work).
82
82
  Args:
83
83
  sid: Your session ID from nexo_startup.
84
84
  task: Brief description of current work (5-10 words).
85
+ context_hint: Last 2-3 sentences from the user or current topic. Used for sentiment detection, trust auto-scoring, and mid-session RAG. ALWAYS provide this for best results.
85
86
  """
86
- return handle_heartbeat(sid, task)
87
+ return handle_heartbeat(sid, task, context_hint)
87
88
 
88
89
 
89
90
  @mcp.tool
@@ -312,7 +313,7 @@ def nexo_learning_add(category: str, title: str, content: str, reasoning: str =
312
313
  """Add a new learning (resolved error, pattern, gotcha).
313
314
 
314
315
  Args:
315
- category: One of: general, code, infrastructure, api, database, security, deployment, testing, performance, ux.
316
+ category: One of: nexo-ops, infrastructure, security, brain-engine, other.
316
317
  title: Short title for the learning.
317
318
  content: Full description with context and solution.
318
319
  reasoning: WHY this matters — what led to discovering this (optional).
@@ -102,6 +102,19 @@ def handle_heartbeat(sid: str, task: str, context_hint: str = '') -> str:
102
102
  except Exception:
103
103
  pass
104
104
 
105
+ # Auto-detect trust events from context_hint
106
+ if context_hint and len(context_hint.strip()) >= 10:
107
+ try:
108
+ import cognitive
109
+ auto_events = cognitive.auto_detect_trust_events(context_hint)
110
+ for ae in auto_events:
111
+ result = cognitive.adjust_trust(ae["event"], ae["reason"], ae["delta"])
112
+ if result.get("delta", 0) != 0:
113
+ parts.append("")
114
+ parts.append(f"TRUST AUTO: {result['old_score']:.0f} → {result['new_score']:.0f} ({result['delta']:+.0f}) [{ae['event']}] {ae['reason']}")
115
+ except Exception:
116
+ pass # Auto-trust is best-effort
117
+
105
118
  # Mid-session RAG: if context_hint provided, check for context shift
106
119
  if context_hint and len(context_hint.strip()) >= 15:
107
120
  try: