nexo-brain 0.3.2 → 0.3.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/package.json +1 -1
- package/src/auto_close_sessions.py +157 -0
- package/src/db.py +77 -5
- package/src/plugins/episodic_memory.py +65 -60
- package/src/tools_sessions.py +49 -2
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "nexo-brain",
|
|
3
|
-
"version": "0.3.
|
|
3
|
+
"version": "0.3.3",
|
|
4
4
|
"mcpName": "io.github.wazionapps/nexo",
|
|
5
5
|
"description": "NEXO — Cognitive co-operator for Claude Code. Atkinson-Shiffrin memory, semantic RAG, trust scoring, and metacognitive error prevention.",
|
|
6
6
|
"bin": {
|
|
@@ -0,0 +1,157 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""Auto-close orphan sessions and promote diary drafts.
|
|
3
|
+
|
|
4
|
+
Runs every 5 minutes via LaunchAgent (com.nexo.auto-close-sessions).
|
|
5
|
+
Finds sessions that exceeded TTL without a diary and promotes their
|
|
6
|
+
draft to a real diary entry marked as source=auto-close.
|
|
7
|
+
"""
|
|
8
|
+
|
|
9
|
+
import json
|
|
10
|
+
import os
|
|
11
|
+
import sys
|
|
12
|
+
import datetime
|
|
13
|
+
|
|
14
|
+
# Ensure we can import from nexo-mcp
|
|
15
|
+
sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)))
|
|
16
|
+
os.environ["NEXO_SKIP_FS_INDEX"] = "1" # Skip FTS rebuild on import
|
|
17
|
+
|
|
18
|
+
from db import (
|
|
19
|
+
init_db, get_db, get_diary_draft, delete_diary_draft,
|
|
20
|
+
get_orphan_sessions, write_session_diary, now_epoch,
|
|
21
|
+
SESSION_STALE_SECONDS,
|
|
22
|
+
)
|
|
23
|
+
|
|
24
|
+
LOG_DIR = os.path.expanduser("~/claude/operations/tool-logs")
|
|
25
|
+
AUTO_CLOSE_LOG = os.path.expanduser("~/claude/coordination/auto-close.log")
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
def get_tool_log_summary(sid: str) -> str:
|
|
29
|
+
"""Extract tool names from today's tool log for this session."""
|
|
30
|
+
today = datetime.date.today().isoformat()
|
|
31
|
+
log_path = os.path.join(LOG_DIR, f"{today}.jsonl")
|
|
32
|
+
if not os.path.exists(log_path):
|
|
33
|
+
return ""
|
|
34
|
+
|
|
35
|
+
tools = []
|
|
36
|
+
try:
|
|
37
|
+
with open(log_path) as f:
|
|
38
|
+
for line in f:
|
|
39
|
+
try:
|
|
40
|
+
entry = json.loads(line)
|
|
41
|
+
if entry.get("session_id") == sid:
|
|
42
|
+
tool = entry.get("tool_name", "")
|
|
43
|
+
if tool and tool not in ("Read", "Grep", "Glob"):
|
|
44
|
+
tools.append(tool)
|
|
45
|
+
except json.JSONDecodeError:
|
|
46
|
+
continue
|
|
47
|
+
except Exception:
|
|
48
|
+
pass
|
|
49
|
+
|
|
50
|
+
if tools:
|
|
51
|
+
seen = set()
|
|
52
|
+
unique = []
|
|
53
|
+
for t in tools:
|
|
54
|
+
if t not in seen:
|
|
55
|
+
seen.add(t)
|
|
56
|
+
unique.append(t)
|
|
57
|
+
return f"Tools used: {', '.join(unique[-15:])}"
|
|
58
|
+
return ""
|
|
59
|
+
|
|
60
|
+
|
|
61
|
+
def promote_draft_to_diary(sid: str, draft: dict, task: str = ""):
|
|
62
|
+
"""Promote a diary draft to a real session diary entry."""
|
|
63
|
+
tasks = json.loads(draft.get("tasks_seen", "[]"))
|
|
64
|
+
change_ids = json.loads(draft.get("change_ids", "[]"))
|
|
65
|
+
decision_ids = json.loads(draft.get("decision_ids", "[]"))
|
|
66
|
+
context_hint = draft.get("last_context_hint", "")
|
|
67
|
+
hb_count = draft.get("heartbeat_count", 0)
|
|
68
|
+
|
|
69
|
+
summary_parts = []
|
|
70
|
+
if draft.get("summary_draft"):
|
|
71
|
+
summary_parts.append(draft["summary_draft"])
|
|
72
|
+
|
|
73
|
+
tool_summary = get_tool_log_summary(sid)
|
|
74
|
+
if tool_summary:
|
|
75
|
+
summary_parts.append(tool_summary)
|
|
76
|
+
|
|
77
|
+
summary = " | ".join(summary_parts) if summary_parts else f"Auto-closed session ({hb_count} heartbeats)"
|
|
78
|
+
|
|
79
|
+
# Build decisions from actual decision records
|
|
80
|
+
decisions_text = ""
|
|
81
|
+
if decision_ids:
|
|
82
|
+
conn = get_db()
|
|
83
|
+
placeholders = ",".join("?" * len(decision_ids))
|
|
84
|
+
rows = conn.execute(
|
|
85
|
+
f"SELECT id, decision, domain FROM decisions WHERE id IN ({placeholders})",
|
|
86
|
+
decision_ids
|
|
87
|
+
).fetchall()
|
|
88
|
+
if rows:
|
|
89
|
+
decisions_text = json.dumps([
|
|
90
|
+
{"id": r["id"], "decision": r["decision"][:100], "domain": r["domain"]}
|
|
91
|
+
for r in rows
|
|
92
|
+
])
|
|
93
|
+
|
|
94
|
+
# Build context_next
|
|
95
|
+
context_next = ""
|
|
96
|
+
if context_hint:
|
|
97
|
+
context_next = f"Last topic: {context_hint}"
|
|
98
|
+
if tasks:
|
|
99
|
+
context_next += f" | Tasks: {', '.join(tasks[-5:])}"
|
|
100
|
+
|
|
101
|
+
write_session_diary(
|
|
102
|
+
session_id=sid,
|
|
103
|
+
decisions=decisions_text or "No decisions logged",
|
|
104
|
+
summary=summary,
|
|
105
|
+
discarded="",
|
|
106
|
+
pending=f"Changes: {change_ids}" if change_ids else "",
|
|
107
|
+
context_next=context_next,
|
|
108
|
+
mental_state=f"[auto-close] Session ended without explicit diary. Draft promoted. {hb_count} heartbeats recorded.",
|
|
109
|
+
domain="",
|
|
110
|
+
user_signals="",
|
|
111
|
+
self_critique="[auto-close] No self-critique available — session terminated without cleanup.",
|
|
112
|
+
source="auto-close",
|
|
113
|
+
)
|
|
114
|
+
delete_diary_draft(sid)
|
|
115
|
+
|
|
116
|
+
|
|
117
|
+
def main():
|
|
118
|
+
init_db()
|
|
119
|
+
conn = get_db()
|
|
120
|
+
|
|
121
|
+
orphans = get_orphan_sessions(SESSION_STALE_SECONDS)
|
|
122
|
+
if not orphans:
|
|
123
|
+
return
|
|
124
|
+
|
|
125
|
+
for session in orphans:
|
|
126
|
+
sid = session["sid"]
|
|
127
|
+
draft = get_diary_draft(sid)
|
|
128
|
+
|
|
129
|
+
if draft:
|
|
130
|
+
promote_draft_to_diary(sid, draft, task=session.get("task", ""))
|
|
131
|
+
else:
|
|
132
|
+
write_session_diary(
|
|
133
|
+
session_id=sid,
|
|
134
|
+
decisions="No decisions logged",
|
|
135
|
+
summary=f"Auto-closed session. Task: {session.get('task', 'unknown')}",
|
|
136
|
+
context_next="",
|
|
137
|
+
mental_state="[auto-close] No draft available. Minimal diary.",
|
|
138
|
+
self_critique="[auto-close] Session terminated without diary or draft.",
|
|
139
|
+
source="auto-close",
|
|
140
|
+
)
|
|
141
|
+
|
|
142
|
+
# Clean up the session
|
|
143
|
+
conn.execute("DELETE FROM tracked_files WHERE sid = ?", (sid,))
|
|
144
|
+
conn.execute("DELETE FROM sessions WHERE sid = ?", (sid,))
|
|
145
|
+
conn.execute("DELETE FROM session_diary_draft WHERE sid = ?", (sid,))
|
|
146
|
+
|
|
147
|
+
conn.commit()
|
|
148
|
+
|
|
149
|
+
# Log what we did
|
|
150
|
+
os.makedirs(os.path.dirname(AUTO_CLOSE_LOG), exist_ok=True)
|
|
151
|
+
with open(AUTO_CLOSE_LOG, "a") as f:
|
|
152
|
+
ts = datetime.datetime.now().isoformat(timespec="seconds")
|
|
153
|
+
f.write(f"{ts} — auto-closed {len(orphans)} session(s): {[s['sid'] for s in orphans]}\n")
|
|
154
|
+
|
|
155
|
+
|
|
156
|
+
if __name__ == "__main__":
|
|
157
|
+
main()
|
package/src/db.py
CHANGED
|
@@ -241,6 +241,18 @@ def init_db():
|
|
|
241
241
|
user_signals TEXT,
|
|
242
242
|
summary TEXT NOT NULL
|
|
243
243
|
);
|
|
244
|
+
CREATE TABLE IF NOT EXISTS session_diary_draft (
|
|
245
|
+
sid TEXT PRIMARY KEY,
|
|
246
|
+
summary_draft TEXT DEFAULT '',
|
|
247
|
+
tasks_seen TEXT DEFAULT '[]',
|
|
248
|
+
change_ids TEXT DEFAULT '[]',
|
|
249
|
+
decision_ids TEXT DEFAULT '[]',
|
|
250
|
+
last_context_hint TEXT DEFAULT '',
|
|
251
|
+
heartbeat_count INTEGER DEFAULT 0,
|
|
252
|
+
created_at TEXT DEFAULT (datetime('now')),
|
|
253
|
+
updated_at TEXT DEFAULT (datetime('now'))
|
|
254
|
+
);
|
|
255
|
+
|
|
244
256
|
CREATE TABLE IF NOT EXISTS evolution_metrics (
|
|
245
257
|
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
|
246
258
|
dimension TEXT NOT NULL,
|
|
@@ -286,6 +298,8 @@ def init_db():
|
|
|
286
298
|
_migrate_add_column(conn, "session_diary", "mental_state", "TEXT")
|
|
287
299
|
_migrate_add_column(conn, "session_diary", "domain", "TEXT")
|
|
288
300
|
_migrate_add_column(conn, "session_diary", "user_signals", "TEXT")
|
|
301
|
+
_migrate_add_column(conn, "session_diary", "self_critique", "TEXT")
|
|
302
|
+
_migrate_add_column(conn, "session_diary", "source", "TEXT DEFAULT 'claude'")
|
|
289
303
|
_migrate_add_index(conn, "idx_change_log_created", "change_log", "created_at")
|
|
290
304
|
_migrate_add_index(conn, "idx_change_log_files", "change_log", "files")
|
|
291
305
|
_migrate_add_index(conn, "idx_learnings_status", "learnings", "status")
|
|
@@ -2059,14 +2073,14 @@ def write_session_diary(session_id: str, decisions: str, summary: str,
|
|
|
2059
2073
|
discarded: str = '', pending: str = '',
|
|
2060
2074
|
context_next: str = '', mental_state: str = '',
|
|
2061
2075
|
domain: str = '', user_signals: str = '',
|
|
2062
|
-
self_critique: str = '') -> dict:
|
|
2076
|
+
self_critique: str = '', source: str = 'claude') -> dict:
|
|
2063
2077
|
"""Write a session diary entry with mental state and self-critique for continuity."""
|
|
2064
2078
|
conn = get_db()
|
|
2065
2079
|
cleanup_old_diaries()
|
|
2066
2080
|
cursor = conn.execute(
|
|
2067
|
-
"INSERT INTO session_diary (session_id, decisions, discarded, pending, context_next, mental_state, summary, domain, user_signals, self_critique) "
|
|
2068
|
-
"VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)",
|
|
2069
|
-
(session_id, decisions, discarded, pending, context_next, mental_state, summary, domain, user_signals, self_critique)
|
|
2081
|
+
"INSERT INTO session_diary (session_id, decisions, discarded, pending, context_next, mental_state, summary, domain, user_signals, self_critique, source) "
|
|
2082
|
+
"VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)",
|
|
2083
|
+
(session_id, decisions, discarded, pending, context_next, mental_state, summary, domain, user_signals, self_critique, source)
|
|
2070
2084
|
)
|
|
2071
2085
|
conn.commit()
|
|
2072
2086
|
did = cursor.lastrowid
|
|
@@ -2086,6 +2100,64 @@ def check_session_has_diary(session_id: str) -> bool:
|
|
|
2086
2100
|
return row is not None
|
|
2087
2101
|
|
|
2088
2102
|
|
|
2103
|
+
# ── Session Diary Drafts ─────────────────────────────────────────
|
|
2104
|
+
|
|
2105
|
+
|
|
2106
|
+
def upsert_diary_draft(sid: str, tasks_seen: str, change_ids: str,
|
|
2107
|
+
decision_ids: str, last_context_hint: str,
|
|
2108
|
+
heartbeat_count: int, summary_draft: str = '') -> dict:
|
|
2109
|
+
"""UPSERT diary draft for a session. Called by heartbeat to accumulate context."""
|
|
2110
|
+
conn = get_db()
|
|
2111
|
+
conn.execute(
|
|
2112
|
+
"""INSERT INTO session_diary_draft
|
|
2113
|
+
(sid, summary_draft, tasks_seen, change_ids, decision_ids,
|
|
2114
|
+
last_context_hint, heartbeat_count, updated_at)
|
|
2115
|
+
VALUES (?, ?, ?, ?, ?, ?, ?, datetime('now'))
|
|
2116
|
+
ON CONFLICT(sid) DO UPDATE SET
|
|
2117
|
+
summary_draft = excluded.summary_draft,
|
|
2118
|
+
tasks_seen = excluded.tasks_seen,
|
|
2119
|
+
change_ids = excluded.change_ids,
|
|
2120
|
+
decision_ids = excluded.decision_ids,
|
|
2121
|
+
last_context_hint = excluded.last_context_hint,
|
|
2122
|
+
heartbeat_count = excluded.heartbeat_count,
|
|
2123
|
+
updated_at = datetime('now')""",
|
|
2124
|
+
(sid, summary_draft, tasks_seen, change_ids, decision_ids,
|
|
2125
|
+
last_context_hint, heartbeat_count)
|
|
2126
|
+
)
|
|
2127
|
+
conn.commit()
|
|
2128
|
+
return {"sid": sid, "heartbeat_count": heartbeat_count}
|
|
2129
|
+
|
|
2130
|
+
|
|
2131
|
+
def get_diary_draft(sid: str) -> dict | None:
|
|
2132
|
+
"""Get diary draft for a session, or None."""
|
|
2133
|
+
conn = get_db()
|
|
2134
|
+
row = conn.execute(
|
|
2135
|
+
"SELECT * FROM session_diary_draft WHERE sid = ?", (sid,)
|
|
2136
|
+
).fetchone()
|
|
2137
|
+
return dict(row) if row else None
|
|
2138
|
+
|
|
2139
|
+
|
|
2140
|
+
def delete_diary_draft(sid: str):
|
|
2141
|
+
"""Delete diary draft after real diary is written."""
|
|
2142
|
+
conn = get_db()
|
|
2143
|
+
conn.execute("DELETE FROM session_diary_draft WHERE sid = ?", (sid,))
|
|
2144
|
+
conn.commit()
|
|
2145
|
+
|
|
2146
|
+
|
|
2147
|
+
def get_orphan_sessions(ttl_seconds: int = 900) -> list[dict]:
|
|
2148
|
+
"""Get sessions that exceeded TTL and have no diary."""
|
|
2149
|
+
conn = get_db()
|
|
2150
|
+
cutoff = now_epoch() - ttl_seconds
|
|
2151
|
+
rows = conn.execute(
|
|
2152
|
+
"""SELECT s.sid, s.task, s.started_epoch, s.last_update_epoch
|
|
2153
|
+
FROM sessions s
|
|
2154
|
+
LEFT JOIN session_diary sd ON sd.session_id = s.sid
|
|
2155
|
+
WHERE s.last_update_epoch <= ? AND sd.id IS NULL""",
|
|
2156
|
+
(cutoff,)
|
|
2157
|
+
).fetchall()
|
|
2158
|
+
return [dict(r) for r in rows]
|
|
2159
|
+
|
|
2160
|
+
|
|
2089
2161
|
def read_session_diary(session_id: str = '', last_n: int = 3, last_day: bool = False,
|
|
2090
2162
|
domain: str = '') -> list[dict]:
|
|
2091
2163
|
"""Read session diary entries.
|
|
@@ -2093,7 +2165,7 @@ def read_session_diary(session_id: str = '', last_n: int = 3, last_day: bool = F
|
|
|
2093
2165
|
- session_id: returns entries for that specific session
|
|
2094
2166
|
- last_day: returns ALL entries from the most recent day (multi-terminal aware)
|
|
2095
2167
|
- last_n: returns last N entries (default)
|
|
2096
|
-
- domain: filter by project context (
|
|
2168
|
+
- domain: filter by project context (project-a, project-b, nexo, other)
|
|
2097
2169
|
"""
|
|
2098
2170
|
conn = get_db()
|
|
2099
2171
|
domain_clause = " AND domain = ?" if domain else ""
|
|
@@ -27,19 +27,19 @@ def handle_decision_log(domain: str, decision: str, alternatives: str = '',
|
|
|
27
27
|
"""Log a non-trivial decision with reasoning context.
|
|
28
28
|
|
|
29
29
|
Args:
|
|
30
|
-
domain: Area (
|
|
30
|
+
domain: Area (nexo, other)
|
|
31
31
|
decision: What was decided
|
|
32
32
|
alternatives: JSON array or text of options considered and why discarded
|
|
33
33
|
based_on: Data, metrics, or observations that informed this decision
|
|
34
34
|
confidence: high, medium, or low
|
|
35
|
-
context_ref: Related followup/reminder ID
|
|
35
|
+
context_ref: Related followup/reminder ID (e.g., NF-ADS1, R71)
|
|
36
36
|
session_id: Current session ID (auto-filled if empty)
|
|
37
37
|
"""
|
|
38
|
-
valid_domains = {'
|
|
38
|
+
valid_domains = {'nexo', 'other'}
|
|
39
39
|
if domain not in valid_domains:
|
|
40
|
-
return f"ERROR: domain
|
|
40
|
+
return f"ERROR: domain debe ser uno de: {', '.join(sorted(valid_domains))}"
|
|
41
41
|
if confidence not in ('high', 'medium', 'low'):
|
|
42
|
-
return f"ERROR: confidence
|
|
42
|
+
return f"ERROR: confidence debe ser high, medium, o low"
|
|
43
43
|
|
|
44
44
|
sid = session_id or 'unknown'
|
|
45
45
|
result = log_decision(sid, domain, decision, alternatives, based_on, confidence, context_ref)
|
|
@@ -59,7 +59,7 @@ def handle_decision_log(domain: str, decision: str, alternatives: str = '',
|
|
|
59
59
|
result = dict(conn.execute("SELECT * FROM decisions WHERE id = ?", (result["id"],)).fetchone())
|
|
60
60
|
due = result.get("review_due_at", "")
|
|
61
61
|
due_str = f" review_due={due}" if due else ""
|
|
62
|
-
return f"Decision #{result['id']}
|
|
62
|
+
return f"Decision #{result['id']} registrada [{domain}] ({confidence}): {decision[:80]}{due_str}"
|
|
63
63
|
|
|
64
64
|
|
|
65
65
|
def handle_decision_outcome(id: int, outcome: str) -> str:
|
|
@@ -78,7 +78,7 @@ def handle_decision_outcome(id: int, outcome: str) -> str:
|
|
|
78
78
|
(id,)
|
|
79
79
|
)
|
|
80
80
|
conn.commit()
|
|
81
|
-
return f"Decision #{id} outcome
|
|
81
|
+
return f"Decision #{id} outcome registrado: {outcome[:100]}"
|
|
82
82
|
|
|
83
83
|
|
|
84
84
|
def handle_decision_search(query: str = '', domain: str = '', days: int = 30) -> str:
|
|
@@ -86,18 +86,18 @@ def handle_decision_search(query: str = '', domain: str = '', days: int = 30) ->
|
|
|
86
86
|
|
|
87
87
|
Args:
|
|
88
88
|
query: Text to search in decision, alternatives, based_on, outcome
|
|
89
|
-
domain: Filter by area (
|
|
89
|
+
domain: Filter by area (nexo, other)
|
|
90
90
|
days: Look back N days (default 30)
|
|
91
91
|
"""
|
|
92
|
-
valid_domains = {'
|
|
92
|
+
valid_domains = {'nexo', 'other'}
|
|
93
93
|
if domain and domain not in valid_domains:
|
|
94
|
-
return f"ERROR: domain
|
|
94
|
+
return f"ERROR: domain debe ser uno de: {', '.join(sorted(valid_domains))}"
|
|
95
95
|
results = search_decisions(query, domain, days)
|
|
96
96
|
if not results:
|
|
97
|
-
scope = f"'{query}'" if query else domain or '
|
|
98
|
-
return f"
|
|
97
|
+
scope = f"'{query}'" if query else domain or 'todas'
|
|
98
|
+
return f"Sin decisiones encontradas para {scope} en {days} días."
|
|
99
99
|
|
|
100
|
-
lines = [f"
|
|
100
|
+
lines = [f"DECISIONES ({len(results)}):"]
|
|
101
101
|
for d in results:
|
|
102
102
|
conf = d.get('confidence', '?')
|
|
103
103
|
outcome_str = f" → {d['outcome'][:50]}" if d.get('outcome') else ""
|
|
@@ -107,9 +107,9 @@ def handle_decision_search(query: str = '', domain: str = '', days: int = 30) ->
|
|
|
107
107
|
lines.append(f" #{d['id']} ({d['created_at']}) [{d['domain']}] {conf} [{status}]{ref}{review_due}")
|
|
108
108
|
lines.append(f" {d['decision'][:120]}")
|
|
109
109
|
if d.get('based_on'):
|
|
110
|
-
lines.append(f"
|
|
110
|
+
lines.append(f" Basado en: {d['based_on'][:100]}")
|
|
111
111
|
if d.get('alternatives'):
|
|
112
|
-
lines.append(f"
|
|
112
|
+
lines.append(f" Alternativas: {d['alternatives'][:100]}")
|
|
113
113
|
if outcome_str:
|
|
114
114
|
lines.append(f" Outcome:{outcome_str}")
|
|
115
115
|
return "\n".join(lines)
|
|
@@ -161,7 +161,7 @@ def handle_session_diary_write(decisions: str, summary: str,
|
|
|
161
161
|
domain: str = '',
|
|
162
162
|
session_id: str = '',
|
|
163
163
|
self_critique: str = '') -> str:
|
|
164
|
-
"""Write session diary entry at end of session.
|
|
164
|
+
"""Write session diary entry at end of session. OBLIGATORIO antes de cerrar.
|
|
165
165
|
|
|
166
166
|
Args:
|
|
167
167
|
decisions: What was decided and why (JSON array or structured text)
|
|
@@ -169,13 +169,16 @@ def handle_session_diary_write(decisions: str, summary: str,
|
|
|
169
169
|
discarded: Options/approaches considered but rejected, and why
|
|
170
170
|
pending: Items left unresolved, with doubt level
|
|
171
171
|
context_next: What the next session should know to continue effectively
|
|
172
|
-
mental_state: Internal state to transfer — thread of thought, tone, observations not yet shared, momentum.
|
|
173
|
-
user_signals: Observable signals from the user during session — response speed, tone, corrections given.
|
|
174
|
-
domain: Project context:
|
|
172
|
+
mental_state: Internal state to transfer — thread of thought, tone, observations not yet shared, momentum. Written in first person as NEXO.
|
|
173
|
+
user_signals: Observable signals from the user during session — response speed (fast='s' vs detailed explanations), tone (direct, frustrated, exploratory, excited), corrections given, topics he initiated vs topics NEXO initiated. Factual observations only, not interpretations.
|
|
174
|
+
domain: Project context: project-a, project-b, nexo, other
|
|
175
175
|
session_id: Current session ID
|
|
176
|
-
self_critique:
|
|
176
|
+
self_critique: OBLIGATORIO. Post-mortem honesto: ¿Qué debí hacer proactivamente? ¿the user tuvo que pedirme algo que yo debería haber detectado? ¿Repetí errores conocidos? ¿Qué regla concreta evitaría la repetición? Si sesión limpia: 'Sin autocrítica — sesión limpia.'
|
|
177
177
|
"""
|
|
178
178
|
sid = session_id or 'unknown'
|
|
179
|
+
# Clean up draft — manual diary supersedes it
|
|
180
|
+
from db import delete_diary_draft
|
|
181
|
+
delete_diary_draft(sid)
|
|
179
182
|
result = write_session_diary(sid, decisions, summary, discarded, pending, context_next, mental_state, domain=domain, user_signals=user_signals, self_critique=self_critique)
|
|
180
183
|
if "error" in result:
|
|
181
184
|
return f"ERROR: {result['error']}"
|
|
@@ -185,7 +188,7 @@ def handle_session_diary_write(decisions: str, summary: str,
|
|
|
185
188
|
if mental_state and mental_state.strip():
|
|
186
189
|
_cognitive_ingest_safe(mental_state, "mental_state", f"diary#{result.get('id','')}", f"Session {sid} state", domain)
|
|
187
190
|
domain_str = f" [{domain}]" if domain else ""
|
|
188
|
-
msg = f"
|
|
191
|
+
msg = f"Diario sesión #{result['id']}{domain_str} guardado: {summary[:80]}"
|
|
189
192
|
|
|
190
193
|
# Trust score & sentiment summary for session diary
|
|
191
194
|
try:
|
|
@@ -206,14 +209,14 @@ def handle_session_diary_write(decisions: str, summary: str,
|
|
|
206
209
|
"SELECT COUNT(*) FROM change_log WHERE (commit_ref IS NULL OR commit_ref = '')"
|
|
207
210
|
).fetchone()[0]
|
|
208
211
|
if orphan_changes > 0:
|
|
209
|
-
warnings.append(f"{orphan_changes} changes
|
|
212
|
+
warnings.append(f"{orphan_changes} changes sin commit_ref")
|
|
210
213
|
orphan_decisions = conn.execute(
|
|
211
214
|
"SELECT COUNT(*) FROM decisions WHERE (outcome IS NULL OR outcome = '') AND created_at < datetime('now', '-7 days')"
|
|
212
215
|
).fetchone()[0]
|
|
213
216
|
if orphan_decisions > 0:
|
|
214
|
-
warnings.append(f"{orphan_decisions} decisions >7d
|
|
217
|
+
warnings.append(f"{orphan_decisions} decisions >7d sin outcome")
|
|
215
218
|
if warnings:
|
|
216
|
-
msg += "\n
|
|
219
|
+
msg += "\n⚠ EPISODIC GAPS: " + " | ".join(warnings) + " — resolver antes de cerrar sesión."
|
|
217
220
|
|
|
218
221
|
return msg
|
|
219
222
|
|
|
@@ -226,29 +229,29 @@ def handle_session_diary_read(session_id: str = '', last_n: int = 3, last_day: b
|
|
|
226
229
|
session_id: Specific session ID to read (optional)
|
|
227
230
|
last_n: Number of recent entries to return (default 3)
|
|
228
231
|
last_day: If true, returns ALL entries from the most recent day (multi-terminal aware). Use this at startup.
|
|
229
|
-
domain: Filter by project context:
|
|
232
|
+
domain: Filter by project context: project-a, project-b, nexo, other
|
|
230
233
|
"""
|
|
231
234
|
results = read_session_diary(session_id, last_n, last_day, domain)
|
|
232
235
|
if not results:
|
|
233
|
-
return "
|
|
236
|
+
return "Sin entradas en el diario de sesiones."
|
|
234
237
|
|
|
235
|
-
lines = [f"
|
|
238
|
+
lines = [f"DIARIO DE SESIONES ({len(results)}):"]
|
|
236
239
|
for d in results:
|
|
237
240
|
domain_label = f" [{d['domain']}]" if d.get('domain') else ""
|
|
238
|
-
lines.append(f"\n ---
|
|
239
|
-
lines.append(f"
|
|
241
|
+
lines.append(f"\n --- Sesión {d['session_id']}{domain_label} ({d['created_at']}) ---")
|
|
242
|
+
lines.append(f" Resumen: {d['summary']}")
|
|
240
243
|
if d.get('decisions'):
|
|
241
|
-
lines.append(f"
|
|
244
|
+
lines.append(f" Decisiones: {d['decisions'][:200]}")
|
|
242
245
|
if d.get('discarded'):
|
|
243
|
-
lines.append(f"
|
|
246
|
+
lines.append(f" Descartado: {d['discarded'][:150]}")
|
|
244
247
|
if d.get('pending'):
|
|
245
|
-
lines.append(f"
|
|
248
|
+
lines.append(f" Pendiente: {d['pending'][:150]}")
|
|
246
249
|
if d.get('context_next'):
|
|
247
|
-
lines.append(f"
|
|
250
|
+
lines.append(f" Para siguiente sesión: {d['context_next'][:200]}")
|
|
248
251
|
if d.get('mental_state'):
|
|
249
|
-
lines.append(f"
|
|
252
|
+
lines.append(f" Estado mental: {d['mental_state'][:300]}")
|
|
250
253
|
if d.get('user_signals'):
|
|
251
|
-
lines.append(f"
|
|
254
|
+
lines.append(f" Señales the user: {d['user_signals'][:300]}")
|
|
252
255
|
return "\n".join(lines)
|
|
253
256
|
|
|
254
257
|
|
|
@@ -256,13 +259,13 @@ def handle_change_log(files: str, what_changed: str, why: str,
|
|
|
256
259
|
triggered_by: str = '', affects: str = '',
|
|
257
260
|
risks: str = '', verify: str = '',
|
|
258
261
|
commit_ref: str = '', session_id: str = '') -> str:
|
|
259
|
-
"""Log a code/config change with full context.
|
|
262
|
+
"""Log a code/config change with full context. OBLIGATORIO after every edit to production code.
|
|
260
263
|
|
|
261
264
|
Args:
|
|
262
265
|
files: File path(s) modified (comma-separated if multiple)
|
|
263
266
|
what_changed: What was modified — functions, lines, behavior change
|
|
264
267
|
why: WHY this change was needed — the root cause, not just "fix bug"
|
|
265
|
-
triggered_by: What triggered this — bug report, metric, user's request, followup ID
|
|
268
|
+
triggered_by: What triggered this — bug report, metric, the user's request, followup ID
|
|
266
269
|
affects: What systems/users/flows this change impacts
|
|
267
270
|
risks: What could go wrong — regressions, edge cases, dependencies
|
|
268
271
|
verify: How to verify this works — what to check, followup ID if created
|
|
@@ -270,7 +273,7 @@ def handle_change_log(files: str, what_changed: str, why: str,
|
|
|
270
273
|
session_id: Current session ID
|
|
271
274
|
"""
|
|
272
275
|
if not files or not what_changed or not why:
|
|
273
|
-
return "ERROR: files, what_changed,
|
|
276
|
+
return "ERROR: files, what_changed, y why son obligatorios"
|
|
274
277
|
sid = session_id or 'unknown'
|
|
275
278
|
result = log_change(sid, files, what_changed, why, triggered_by, affects, risks, verify, commit_ref)
|
|
276
279
|
if "error" in result:
|
|
@@ -280,9 +283,9 @@ def handle_change_log(files: str, what_changed: str, why: str,
|
|
|
280
283
|
"change", f"C{result.get('id','')}", (what_changed or '')[:80], ""
|
|
281
284
|
)
|
|
282
285
|
change_id = result['id']
|
|
283
|
-
msg = f"Change #{change_id}
|
|
286
|
+
msg = f"Change #{change_id} registrado: {files[:60]} — {what_changed[:60]}"
|
|
284
287
|
if not commit_ref:
|
|
285
|
-
msg += f"\n
|
|
288
|
+
msg += f"\n⚠ SIN COMMIT. Usa nexo_change_commit({change_id}, 'hash') después del push, o 'server-direct' si fue edición directa en servidor."
|
|
286
289
|
return msg
|
|
287
290
|
|
|
288
291
|
|
|
@@ -296,22 +299,22 @@ def handle_change_search(query: str = '', files: str = '', days: int = 30) -> st
|
|
|
296
299
|
"""
|
|
297
300
|
results = search_changes(query, files, days)
|
|
298
301
|
if not results:
|
|
299
|
-
scope = f"'{query}'" if query else files or '
|
|
300
|
-
return f"
|
|
302
|
+
scope = f"'{query}'" if query else files or 'todos'
|
|
303
|
+
return f"Sin cambios encontrados para {scope} en {days} días."
|
|
301
304
|
|
|
302
|
-
lines = [f"
|
|
305
|
+
lines = [f"CAMBIOS ({len(results)}):"]
|
|
303
306
|
for c in results:
|
|
304
307
|
commit = f" [{c['commit_ref'][:8]}]" if c.get('commit_ref') else ""
|
|
305
308
|
lines.append(f" #{c['id']} ({c['created_at']}){commit}")
|
|
306
|
-
lines.append(f"
|
|
307
|
-
lines.append(f"
|
|
308
|
-
lines.append(f"
|
|
309
|
+
lines.append(f" Archivos: {c['files'][:100]}")
|
|
310
|
+
lines.append(f" Qué: {c['what_changed'][:120]}")
|
|
311
|
+
lines.append(f" Por qué: {c['why'][:120]}")
|
|
309
312
|
if c.get('triggered_by'):
|
|
310
313
|
lines.append(f" Trigger: {c['triggered_by'][:80]}")
|
|
311
314
|
if c.get('affects'):
|
|
312
|
-
lines.append(f"
|
|
315
|
+
lines.append(f" Afecta: {c['affects'][:80]}")
|
|
313
316
|
if c.get('risks'):
|
|
314
|
-
lines.append(f"
|
|
317
|
+
lines.append(f" Riesgos: {c['risks'][:80]}")
|
|
315
318
|
return "\n".join(lines)
|
|
316
319
|
|
|
317
320
|
|
|
@@ -325,7 +328,7 @@ def handle_change_commit(id: int, commit_ref: str) -> str:
|
|
|
325
328
|
result = update_change_commit(id, commit_ref)
|
|
326
329
|
if "error" in result:
|
|
327
330
|
return f"ERROR: {result['error']}"
|
|
328
|
-
return f"Change #{id}
|
|
331
|
+
return f"Change #{id} vinculado a commit {commit_ref[:8]}"
|
|
329
332
|
|
|
330
333
|
|
|
331
334
|
def handle_recall(query: str, days: int = 30) -> str:
|
|
@@ -337,9 +340,9 @@ def handle_recall(query: str, days: int = 30) -> str:
|
|
|
337
340
|
"""
|
|
338
341
|
results = recall(query, days)
|
|
339
342
|
if not results:
|
|
340
|
-
return f"
|
|
343
|
+
return f"Sin resultados para '{query}' en los últimos {days} días."
|
|
341
344
|
|
|
342
|
-
# Passive rehearsal — strengthen matching cognitive memories
|
|
345
|
+
# v1.2: Passive rehearsal — strengthen matching cognitive memories
|
|
343
346
|
try:
|
|
344
347
|
import cognitive
|
|
345
348
|
for r in results[:5]:
|
|
@@ -350,18 +353,18 @@ def handle_recall(query: str, days: int = 30) -> str:
|
|
|
350
353
|
pass
|
|
351
354
|
|
|
352
355
|
SOURCE_LABELS = {
|
|
353
|
-
'change_log': '[
|
|
354
|
-
'change': '[
|
|
355
|
-
'decision': '[
|
|
356
|
+
'change_log': '[CAMBIO]',
|
|
357
|
+
'change': '[CAMBIO]',
|
|
358
|
+
'decision': '[DECISIÓN]',
|
|
356
359
|
'learning': '[LEARNING]',
|
|
357
360
|
'followup': '[FOLLOWUP]',
|
|
358
|
-
'diary': '[
|
|
359
|
-
'entity': '[
|
|
360
|
-
'file': '[
|
|
361
|
-
'code': '[
|
|
361
|
+
'diary': '[DIARIO]',
|
|
362
|
+
'entity': '[ENTIDAD]',
|
|
363
|
+
'file': '[ARCHIVO]',
|
|
364
|
+
'code': '[CÓDIGO]',
|
|
362
365
|
}
|
|
363
366
|
|
|
364
|
-
lines = [f"RECALL '{query}' — {len(results)}
|
|
367
|
+
lines = [f"RECALL '{query}' — {len(results)} resultado(s):"]
|
|
365
368
|
for r in results:
|
|
366
369
|
source = r.get('source', '?')
|
|
367
370
|
label = SOURCE_LABELS.get(source, f"[{source.upper()}]")
|
|
@@ -374,6 +377,8 @@ def handle_recall(query: str, days: int = 30) -> str:
|
|
|
374
377
|
lines.append(f" {title}")
|
|
375
378
|
if snippet:
|
|
376
379
|
lines.append(f" {snippet}")
|
|
380
|
+
if len(results) < 5:
|
|
381
|
+
lines.append(f"\n 💡 Solo {len(results)} resultados en NEXO. Para historial más profundo, busca también en claude-mem: mcp__plugin_claude-mem_mcp-search__search")
|
|
377
382
|
return "\n".join(lines)
|
|
378
383
|
|
|
379
384
|
|
|
@@ -387,5 +392,5 @@ TOOLS = [
|
|
|
387
392
|
(handle_memory_review_queue, "nexo_memory_review_queue", "Show decisions and learnings that are due for review"),
|
|
388
393
|
(handle_session_diary_write, "nexo_session_diary_write", "Write end-of-session diary with decisions, discards, and context for next session"),
|
|
389
394
|
(handle_session_diary_read, "nexo_session_diary_read", "Read recent session diaries for context continuity"),
|
|
390
|
-
(handle_recall, "nexo_recall", "Search across ALL NEXO memory — changes, decisions, learnings, followups, diary, entities, .md files, code files."),
|
|
395
|
+
(handle_recall, "nexo_recall", "Search across ALL NEXO memory — changes, decisions, learnings, followups, diary, entities, .md files, code files. For deep historical context (older sessions, past work), also search claude-mem (mcp__plugin_claude-mem_mcp-search__search)."),
|
|
391
396
|
]
|
package/src/tools_sessions.py
CHANGED
|
@@ -65,7 +65,7 @@ def handle_heartbeat(sid: str, task: str, context_hint: str = '') -> str:
|
|
|
65
65
|
Args:
|
|
66
66
|
sid: Session ID
|
|
67
67
|
task: Current task description
|
|
68
|
-
context_hint: Optional — last 2-3 sentences from user or current topic. If provided AND
|
|
68
|
+
context_hint: Optional — last 2-3 sentences from the user or current topic. If provided AND
|
|
69
69
|
it diverges from startup memories, returns fresh cognitive memories for the new context.
|
|
70
70
|
"""
|
|
71
71
|
from db import get_db
|
|
@@ -88,7 +88,7 @@ def handle_heartbeat(sid: str, task: str, context_hint: str = '') -> str:
|
|
|
88
88
|
age = _format_age(q["created_epoch"])
|
|
89
89
|
parts.append(f" {q['qid']} de {q['from_sid']} ({age}): {q['question']}")
|
|
90
90
|
|
|
91
|
-
# Sentiment detection: analyze context_hint for user's mood
|
|
91
|
+
# Sentiment detection: analyze context_hint for the user's mood
|
|
92
92
|
if context_hint and len(context_hint.strip()) >= 10:
|
|
93
93
|
try:
|
|
94
94
|
import cognitive
|
|
@@ -137,6 +137,53 @@ def handle_heartbeat(sid: str, task: str, context_hint: str = '') -> str:
|
|
|
137
137
|
except Exception:
|
|
138
138
|
pass # Mid-session RAG is best-effort
|
|
139
139
|
|
|
140
|
+
# Incremental diary draft — accumulate every heartbeat, full UPSERT every 5
|
|
141
|
+
try:
|
|
142
|
+
import json as _json
|
|
143
|
+
from db import get_diary_draft, upsert_diary_draft
|
|
144
|
+
|
|
145
|
+
draft = get_diary_draft(sid)
|
|
146
|
+
hb_count = (draft["heartbeat_count"] + 1) if draft else 1
|
|
147
|
+
|
|
148
|
+
existing_tasks = _json.loads(draft["tasks_seen"]) if draft else []
|
|
149
|
+
if task and task not in existing_tasks:
|
|
150
|
+
existing_tasks.append(task)
|
|
151
|
+
|
|
152
|
+
_conn = get_db()
|
|
153
|
+
if hb_count % 5 == 0 or hb_count == 1:
|
|
154
|
+
change_rows = _conn.execute(
|
|
155
|
+
"SELECT id FROM change_log WHERE session_id = ? ORDER BY id", (sid,)
|
|
156
|
+
).fetchall()
|
|
157
|
+
change_ids = [r["id"] for r in change_rows]
|
|
158
|
+
|
|
159
|
+
decision_rows = _conn.execute(
|
|
160
|
+
"SELECT id FROM decisions WHERE session_id = ? ORDER BY id", (sid,)
|
|
161
|
+
).fetchall()
|
|
162
|
+
decision_ids = [r["id"] for r in decision_rows]
|
|
163
|
+
|
|
164
|
+
summary = f"Session tasks: {', '.join(existing_tasks[-10:])}"
|
|
165
|
+
upsert_diary_draft(
|
|
166
|
+
sid=sid,
|
|
167
|
+
tasks_seen=_json.dumps(existing_tasks),
|
|
168
|
+
change_ids=_json.dumps(change_ids),
|
|
169
|
+
decision_ids=_json.dumps(decision_ids),
|
|
170
|
+
last_context_hint=context_hint[:300] if context_hint else '',
|
|
171
|
+
heartbeat_count=hb_count,
|
|
172
|
+
summary_draft=summary,
|
|
173
|
+
)
|
|
174
|
+
else:
|
|
175
|
+
upsert_diary_draft(
|
|
176
|
+
sid=sid,
|
|
177
|
+
tasks_seen=_json.dumps(existing_tasks),
|
|
178
|
+
change_ids=draft["change_ids"] if draft else '[]',
|
|
179
|
+
decision_ids=draft["decision_ids"] if draft else '[]',
|
|
180
|
+
last_context_hint=context_hint[:300] if context_hint else (draft["last_context_hint"] if draft else ''),
|
|
181
|
+
heartbeat_count=hb_count,
|
|
182
|
+
summary_draft=draft["summary_draft"] if draft else f"Session task: {task}",
|
|
183
|
+
)
|
|
184
|
+
except Exception:
|
|
185
|
+
pass # Draft accumulation is best-effort, never block heartbeat
|
|
186
|
+
|
|
140
187
|
# Diary reminder: after 30 min active with no diary entry
|
|
141
188
|
conn = get_db()
|
|
142
189
|
row = conn.execute("SELECT started_epoch FROM sessions WHERE sid = ?", (sid,)).fetchone()
|