nexo-brain 0.3.2 → 0.3.4
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/package.json +1 -1
- package/src/auto_close_sessions.py +157 -0
- package/src/db.py +209 -52
- package/src/plugins/episodic_memory.py +65 -60
- package/src/tools_sessions.py +49 -2
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "nexo-brain",
|
|
3
|
-
"version": "0.3.
|
|
3
|
+
"version": "0.3.4",
|
|
4
4
|
"mcpName": "io.github.wazionapps/nexo",
|
|
5
5
|
"description": "NEXO — Cognitive co-operator for Claude Code. Atkinson-Shiffrin memory, semantic RAG, trust scoring, and metacognitive error prevention.",
|
|
6
6
|
"bin": {
|
|
@@ -0,0 +1,157 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""Auto-close orphan sessions and promote diary drafts.
|
|
3
|
+
|
|
4
|
+
Runs every 5 minutes via LaunchAgent (com.nexo.auto-close-sessions).
|
|
5
|
+
Finds sessions that exceeded TTL without a diary and promotes their
|
|
6
|
+
draft to a real diary entry marked as source=auto-close.
|
|
7
|
+
"""
|
|
8
|
+
|
|
9
|
+
import json
|
|
10
|
+
import os
|
|
11
|
+
import sys
|
|
12
|
+
import datetime
|
|
13
|
+
|
|
14
|
+
# Ensure we can import from nexo-mcp
|
|
15
|
+
sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)))
|
|
16
|
+
os.environ["NEXO_SKIP_FS_INDEX"] = "1" # Skip FTS rebuild on import
|
|
17
|
+
|
|
18
|
+
from db import (
|
|
19
|
+
init_db, get_db, get_diary_draft, delete_diary_draft,
|
|
20
|
+
get_orphan_sessions, write_session_diary, now_epoch,
|
|
21
|
+
SESSION_STALE_SECONDS,
|
|
22
|
+
)
|
|
23
|
+
|
|
24
|
+
LOG_DIR = os.path.expanduser("~/claude/operations/tool-logs")
|
|
25
|
+
AUTO_CLOSE_LOG = os.path.expanduser("~/claude/coordination/auto-close.log")
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
def get_tool_log_summary(sid: str) -> str:
|
|
29
|
+
"""Extract tool names from today's tool log for this session."""
|
|
30
|
+
today = datetime.date.today().isoformat()
|
|
31
|
+
log_path = os.path.join(LOG_DIR, f"{today}.jsonl")
|
|
32
|
+
if not os.path.exists(log_path):
|
|
33
|
+
return ""
|
|
34
|
+
|
|
35
|
+
tools = []
|
|
36
|
+
try:
|
|
37
|
+
with open(log_path) as f:
|
|
38
|
+
for line in f:
|
|
39
|
+
try:
|
|
40
|
+
entry = json.loads(line)
|
|
41
|
+
if entry.get("session_id") == sid:
|
|
42
|
+
tool = entry.get("tool_name", "")
|
|
43
|
+
if tool and tool not in ("Read", "Grep", "Glob"):
|
|
44
|
+
tools.append(tool)
|
|
45
|
+
except json.JSONDecodeError:
|
|
46
|
+
continue
|
|
47
|
+
except Exception:
|
|
48
|
+
pass
|
|
49
|
+
|
|
50
|
+
if tools:
|
|
51
|
+
seen = set()
|
|
52
|
+
unique = []
|
|
53
|
+
for t in tools:
|
|
54
|
+
if t not in seen:
|
|
55
|
+
seen.add(t)
|
|
56
|
+
unique.append(t)
|
|
57
|
+
return f"Tools used: {', '.join(unique[-15:])}"
|
|
58
|
+
return ""
|
|
59
|
+
|
|
60
|
+
|
|
61
|
+
def promote_draft_to_diary(sid: str, draft: dict, task: str = ""):
|
|
62
|
+
"""Promote a diary draft to a real session diary entry."""
|
|
63
|
+
tasks = json.loads(draft.get("tasks_seen", "[]"))
|
|
64
|
+
change_ids = json.loads(draft.get("change_ids", "[]"))
|
|
65
|
+
decision_ids = json.loads(draft.get("decision_ids", "[]"))
|
|
66
|
+
context_hint = draft.get("last_context_hint", "")
|
|
67
|
+
hb_count = draft.get("heartbeat_count", 0)
|
|
68
|
+
|
|
69
|
+
summary_parts = []
|
|
70
|
+
if draft.get("summary_draft"):
|
|
71
|
+
summary_parts.append(draft["summary_draft"])
|
|
72
|
+
|
|
73
|
+
tool_summary = get_tool_log_summary(sid)
|
|
74
|
+
if tool_summary:
|
|
75
|
+
summary_parts.append(tool_summary)
|
|
76
|
+
|
|
77
|
+
summary = " | ".join(summary_parts) if summary_parts else f"Auto-closed session ({hb_count} heartbeats)"
|
|
78
|
+
|
|
79
|
+
# Build decisions from actual decision records
|
|
80
|
+
decisions_text = ""
|
|
81
|
+
if decision_ids:
|
|
82
|
+
conn = get_db()
|
|
83
|
+
placeholders = ",".join("?" * len(decision_ids))
|
|
84
|
+
rows = conn.execute(
|
|
85
|
+
f"SELECT id, decision, domain FROM decisions WHERE id IN ({placeholders})",
|
|
86
|
+
decision_ids
|
|
87
|
+
).fetchall()
|
|
88
|
+
if rows:
|
|
89
|
+
decisions_text = json.dumps([
|
|
90
|
+
{"id": r["id"], "decision": r["decision"][:100], "domain": r["domain"]}
|
|
91
|
+
for r in rows
|
|
92
|
+
])
|
|
93
|
+
|
|
94
|
+
# Build context_next
|
|
95
|
+
context_next = ""
|
|
96
|
+
if context_hint:
|
|
97
|
+
context_next = f"Last topic: {context_hint}"
|
|
98
|
+
if tasks:
|
|
99
|
+
context_next += f" | Tasks: {', '.join(tasks[-5:])}"
|
|
100
|
+
|
|
101
|
+
write_session_diary(
|
|
102
|
+
session_id=sid,
|
|
103
|
+
decisions=decisions_text or "No decisions logged",
|
|
104
|
+
summary=summary,
|
|
105
|
+
discarded="",
|
|
106
|
+
pending=f"Changes: {change_ids}" if change_ids else "",
|
|
107
|
+
context_next=context_next,
|
|
108
|
+
mental_state=f"[auto-close] Session ended without explicit diary. Draft promoted. {hb_count} heartbeats recorded.",
|
|
109
|
+
domain="",
|
|
110
|
+
user_signals="",
|
|
111
|
+
self_critique="[auto-close] No self-critique available — session terminated without cleanup.",
|
|
112
|
+
source="auto-close",
|
|
113
|
+
)
|
|
114
|
+
delete_diary_draft(sid)
|
|
115
|
+
|
|
116
|
+
|
|
117
|
+
def main():
|
|
118
|
+
init_db()
|
|
119
|
+
conn = get_db()
|
|
120
|
+
|
|
121
|
+
orphans = get_orphan_sessions(SESSION_STALE_SECONDS)
|
|
122
|
+
if not orphans:
|
|
123
|
+
return
|
|
124
|
+
|
|
125
|
+
for session in orphans:
|
|
126
|
+
sid = session["sid"]
|
|
127
|
+
draft = get_diary_draft(sid)
|
|
128
|
+
|
|
129
|
+
if draft:
|
|
130
|
+
promote_draft_to_diary(sid, draft, task=session.get("task", ""))
|
|
131
|
+
else:
|
|
132
|
+
write_session_diary(
|
|
133
|
+
session_id=sid,
|
|
134
|
+
decisions="No decisions logged",
|
|
135
|
+
summary=f"Auto-closed session. Task: {session.get('task', 'unknown')}",
|
|
136
|
+
context_next="",
|
|
137
|
+
mental_state="[auto-close] No draft available. Minimal diary.",
|
|
138
|
+
self_critique="[auto-close] Session terminated without diary or draft.",
|
|
139
|
+
source="auto-close",
|
|
140
|
+
)
|
|
141
|
+
|
|
142
|
+
# Clean up the session
|
|
143
|
+
conn.execute("DELETE FROM tracked_files WHERE sid = ?", (sid,))
|
|
144
|
+
conn.execute("DELETE FROM sessions WHERE sid = ?", (sid,))
|
|
145
|
+
conn.execute("DELETE FROM session_diary_draft WHERE sid = ?", (sid,))
|
|
146
|
+
|
|
147
|
+
conn.commit()
|
|
148
|
+
|
|
149
|
+
# Log what we did
|
|
150
|
+
os.makedirs(os.path.dirname(AUTO_CLOSE_LOG), exist_ok=True)
|
|
151
|
+
with open(AUTO_CLOSE_LOG, "a") as f:
|
|
152
|
+
ts = datetime.datetime.now().isoformat(timespec="seconds")
|
|
153
|
+
f.write(f"{ts} — auto-closed {len(orphans)} session(s): {[s['sid'] for s in orphans]}\n")
|
|
154
|
+
|
|
155
|
+
|
|
156
|
+
if __name__ == "__main__":
|
|
157
|
+
main()
|
package/src/db.py
CHANGED
|
@@ -241,6 +241,7 @@ def init_db():
|
|
|
241
241
|
user_signals TEXT,
|
|
242
242
|
summary TEXT NOT NULL
|
|
243
243
|
);
|
|
244
|
+
|
|
244
245
|
CREATE TABLE IF NOT EXISTS evolution_metrics (
|
|
245
246
|
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
|
246
247
|
dimension TEXT NOT NULL,
|
|
@@ -267,53 +268,8 @@ def init_db():
|
|
|
267
268
|
""")
|
|
268
269
|
# foreign_keys=ON is set in get_db() per-connection
|
|
269
270
|
|
|
270
|
-
# ──
|
|
271
|
-
|
|
272
|
-
_migrate_add_column(conn, "learnings", "prevention", "TEXT DEFAULT ''")
|
|
273
|
-
_migrate_add_column(conn, "learnings", "applies_to", "TEXT DEFAULT ''")
|
|
274
|
-
_migrate_add_column(conn, "learnings", "status", "TEXT DEFAULT 'active'")
|
|
275
|
-
_migrate_add_column(conn, "learnings", "review_due_at", "REAL")
|
|
276
|
-
_migrate_add_column(conn, "learnings", "last_reviewed_at", "REAL")
|
|
277
|
-
_migrate_add_column(conn, "followups", "reasoning", "TEXT")
|
|
278
|
-
_migrate_add_column(conn, "task_history", "reasoning", "TEXT")
|
|
279
|
-
_migrate_add_column(conn, "decisions", "status", "TEXT DEFAULT 'pending_review'")
|
|
280
|
-
_migrate_add_column(conn, "decisions", "review_due_at", "TEXT")
|
|
281
|
-
_migrate_add_column(conn, "decisions", "last_reviewed_at", "TEXT")
|
|
282
|
-
_migrate_add_index(conn, "idx_decisions_domain", "decisions", "domain")
|
|
283
|
-
_migrate_add_index(conn, "idx_decisions_created", "decisions", "created_at")
|
|
284
|
-
_migrate_add_index(conn, "idx_decisions_review_due", "decisions", "review_due_at")
|
|
285
|
-
_migrate_add_index(conn, "idx_session_diary_sid", "session_diary", "session_id")
|
|
286
|
-
_migrate_add_column(conn, "session_diary", "mental_state", "TEXT")
|
|
287
|
-
_migrate_add_column(conn, "session_diary", "domain", "TEXT")
|
|
288
|
-
_migrate_add_column(conn, "session_diary", "user_signals", "TEXT")
|
|
289
|
-
_migrate_add_index(conn, "idx_change_log_created", "change_log", "created_at")
|
|
290
|
-
_migrate_add_index(conn, "idx_change_log_files", "change_log", "files")
|
|
291
|
-
_migrate_add_index(conn, "idx_learnings_status", "learnings", "status")
|
|
292
|
-
_migrate_add_index(conn, "idx_learnings_review_due", "learnings", "review_due_at")
|
|
293
|
-
|
|
294
|
-
conn.execute("""
|
|
295
|
-
CREATE TABLE IF NOT EXISTS error_repetitions (
|
|
296
|
-
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
|
297
|
-
new_learning_id INTEGER NOT NULL,
|
|
298
|
-
original_learning_id INTEGER NOT NULL,
|
|
299
|
-
similarity REAL NOT NULL,
|
|
300
|
-
area TEXT NOT NULL,
|
|
301
|
-
created_at TEXT DEFAULT (datetime('now'))
|
|
302
|
-
)
|
|
303
|
-
""")
|
|
304
|
-
conn.execute("""
|
|
305
|
-
CREATE TABLE IF NOT EXISTS guard_checks (
|
|
306
|
-
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
|
307
|
-
session_id TEXT,
|
|
308
|
-
files TEXT,
|
|
309
|
-
area TEXT,
|
|
310
|
-
learnings_returned INTEGER DEFAULT 0,
|
|
311
|
-
blocking_rules_returned INTEGER DEFAULT 0,
|
|
312
|
-
created_at TEXT DEFAULT (datetime('now'))
|
|
313
|
-
)
|
|
314
|
-
""")
|
|
315
|
-
_migrate_add_index(conn, "idx_error_repetitions_area", "error_repetitions", "area")
|
|
316
|
-
_migrate_add_index(conn, "idx_guard_checks_session", "guard_checks", "session_id")
|
|
271
|
+
# ── Run formal migrations ────────────────────────────────────
|
|
272
|
+
run_migrations(conn)
|
|
317
273
|
|
|
318
274
|
# ── FTS5 unified search index ────────────────────────────────
|
|
319
275
|
conn.execute("""
|
|
@@ -765,6 +721,149 @@ def _migrate_add_index(conn, index_name: str, table: str, column: str):
|
|
|
765
721
|
conn.commit()
|
|
766
722
|
|
|
767
723
|
|
|
724
|
+
# ── Formal Migration System ─────────────────────────────────────
|
|
725
|
+
#
|
|
726
|
+
# Each migration is (version, name, callable). Migrations run once
|
|
727
|
+
# and are tracked in schema_migrations. The version number MUST be
|
|
728
|
+
# strictly increasing. Add new migrations at the end of the list.
|
|
729
|
+
#
|
|
730
|
+
# For users upgrading via npm/git, init_db() calls run_migrations()
|
|
731
|
+
# automatically — no manual steps needed.
|
|
732
|
+
|
|
733
|
+
def _m1_learnings_columns(conn):
|
|
734
|
+
_migrate_add_column(conn, "learnings", "reasoning", "TEXT")
|
|
735
|
+
_migrate_add_column(conn, "learnings", "prevention", "TEXT DEFAULT ''")
|
|
736
|
+
_migrate_add_column(conn, "learnings", "applies_to", "TEXT DEFAULT ''")
|
|
737
|
+
_migrate_add_column(conn, "learnings", "status", "TEXT DEFAULT 'active'")
|
|
738
|
+
_migrate_add_column(conn, "learnings", "review_due_at", "REAL")
|
|
739
|
+
_migrate_add_column(conn, "learnings", "last_reviewed_at", "REAL")
|
|
740
|
+
|
|
741
|
+
def _m2_followups_reasoning(conn):
|
|
742
|
+
_migrate_add_column(conn, "followups", "reasoning", "TEXT")
|
|
743
|
+
_migrate_add_column(conn, "task_history", "reasoning", "TEXT")
|
|
744
|
+
|
|
745
|
+
def _m3_decisions_review(conn):
|
|
746
|
+
_migrate_add_column(conn, "decisions", "status", "TEXT DEFAULT 'pending_review'")
|
|
747
|
+
_migrate_add_column(conn, "decisions", "review_due_at", "TEXT")
|
|
748
|
+
_migrate_add_column(conn, "decisions", "last_reviewed_at", "TEXT")
|
|
749
|
+
_migrate_add_index(conn, "idx_decisions_domain", "decisions", "domain")
|
|
750
|
+
_migrate_add_index(conn, "idx_decisions_created", "decisions", "created_at")
|
|
751
|
+
_migrate_add_index(conn, "idx_decisions_review_due", "decisions", "review_due_at")
|
|
752
|
+
|
|
753
|
+
def _m4_session_diary_columns(conn):
|
|
754
|
+
_migrate_add_index(conn, "idx_session_diary_sid", "session_diary", "session_id")
|
|
755
|
+
_migrate_add_column(conn, "session_diary", "mental_state", "TEXT")
|
|
756
|
+
_migrate_add_column(conn, "session_diary", "domain", "TEXT")
|
|
757
|
+
_migrate_add_column(conn, "session_diary", "user_signals", "TEXT")
|
|
758
|
+
_migrate_add_column(conn, "session_diary", "self_critique", "TEXT")
|
|
759
|
+
|
|
760
|
+
def _m5_change_log_indexes(conn):
|
|
761
|
+
_migrate_add_index(conn, "idx_change_log_created", "change_log", "created_at")
|
|
762
|
+
_migrate_add_index(conn, "idx_change_log_files", "change_log", "files")
|
|
763
|
+
_migrate_add_index(conn, "idx_learnings_status", "learnings", "status")
|
|
764
|
+
_migrate_add_index(conn, "idx_learnings_review_due", "learnings", "review_due_at")
|
|
765
|
+
|
|
766
|
+
def _m6_error_guard_tables(conn):
|
|
767
|
+
conn.execute("""
|
|
768
|
+
CREATE TABLE IF NOT EXISTS error_repetitions (
|
|
769
|
+
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
|
770
|
+
new_learning_id INTEGER NOT NULL,
|
|
771
|
+
original_learning_id INTEGER NOT NULL,
|
|
772
|
+
similarity REAL NOT NULL,
|
|
773
|
+
area TEXT NOT NULL,
|
|
774
|
+
created_at TEXT DEFAULT (datetime('now'))
|
|
775
|
+
)
|
|
776
|
+
""")
|
|
777
|
+
conn.execute("""
|
|
778
|
+
CREATE TABLE IF NOT EXISTS guard_checks (
|
|
779
|
+
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
|
780
|
+
session_id TEXT,
|
|
781
|
+
files TEXT,
|
|
782
|
+
area TEXT,
|
|
783
|
+
learnings_returned INTEGER DEFAULT 0,
|
|
784
|
+
blocking_rules_returned INTEGER DEFAULT 0,
|
|
785
|
+
created_at TEXT DEFAULT (datetime('now'))
|
|
786
|
+
)
|
|
787
|
+
""")
|
|
788
|
+
_migrate_add_index(conn, "idx_error_repetitions_area", "error_repetitions", "area")
|
|
789
|
+
_migrate_add_index(conn, "idx_guard_checks_session", "guard_checks", "session_id")
|
|
790
|
+
|
|
791
|
+
def _m7_diary_source_and_draft(conn):
|
|
792
|
+
_migrate_add_column(conn, "session_diary", "source", "TEXT DEFAULT 'claude'")
|
|
793
|
+
conn.execute("""
|
|
794
|
+
CREATE TABLE IF NOT EXISTS session_diary_draft (
|
|
795
|
+
sid TEXT PRIMARY KEY,
|
|
796
|
+
summary_draft TEXT DEFAULT '',
|
|
797
|
+
tasks_seen TEXT DEFAULT '[]',
|
|
798
|
+
change_ids TEXT DEFAULT '[]',
|
|
799
|
+
decision_ids TEXT DEFAULT '[]',
|
|
800
|
+
last_context_hint TEXT DEFAULT '',
|
|
801
|
+
heartbeat_count INTEGER DEFAULT 0,
|
|
802
|
+
created_at TEXT DEFAULT (datetime('now')),
|
|
803
|
+
updated_at TEXT DEFAULT (datetime('now'))
|
|
804
|
+
)
|
|
805
|
+
""")
|
|
806
|
+
|
|
807
|
+
|
|
808
|
+
# Migration registry — APPEND ONLY, never reorder or delete
|
|
809
|
+
MIGRATIONS = [
|
|
810
|
+
(1, "learnings_columns", _m1_learnings_columns),
|
|
811
|
+
(2, "followups_reasoning", _m2_followups_reasoning),
|
|
812
|
+
(3, "decisions_review", _m3_decisions_review),
|
|
813
|
+
(4, "session_diary_columns", _m4_session_diary_columns),
|
|
814
|
+
(5, "change_log_indexes", _m5_change_log_indexes),
|
|
815
|
+
(6, "error_guard_tables", _m6_error_guard_tables),
|
|
816
|
+
(7, "diary_source_and_draft", _m7_diary_source_and_draft),
|
|
817
|
+
]
|
|
818
|
+
|
|
819
|
+
|
|
820
|
+
def run_migrations(conn=None):
|
|
821
|
+
"""Run pending migrations. Tracks applied versions in schema_migrations.
|
|
822
|
+
|
|
823
|
+
Safe to call multiple times — skips already-applied migrations.
|
|
824
|
+
Called automatically by init_db() on every server start.
|
|
825
|
+
"""
|
|
826
|
+
if conn is None:
|
|
827
|
+
conn = get_db()
|
|
828
|
+
|
|
829
|
+
conn.execute("""
|
|
830
|
+
CREATE TABLE IF NOT EXISTS schema_migrations (
|
|
831
|
+
version INTEGER PRIMARY KEY,
|
|
832
|
+
name TEXT NOT NULL,
|
|
833
|
+
applied_at TEXT DEFAULT (datetime('now'))
|
|
834
|
+
)
|
|
835
|
+
""")
|
|
836
|
+
conn.commit()
|
|
837
|
+
|
|
838
|
+
applied = {r[0] for r in conn.execute("SELECT version FROM schema_migrations").fetchall()}
|
|
839
|
+
|
|
840
|
+
for version, name, fn in MIGRATIONS:
|
|
841
|
+
if version not in applied:
|
|
842
|
+
try:
|
|
843
|
+
fn(conn)
|
|
844
|
+
conn.execute(
|
|
845
|
+
"INSERT INTO schema_migrations (version, name) VALUES (?, ?)",
|
|
846
|
+
(version, name)
|
|
847
|
+
)
|
|
848
|
+
conn.commit()
|
|
849
|
+
except Exception as e:
|
|
850
|
+
# Log but don't crash — partial migration is better than no server
|
|
851
|
+
import sys
|
|
852
|
+
print(f"[MIGRATION] v{version} ({name}) failed: {e}", file=sys.stderr)
|
|
853
|
+
|
|
854
|
+
return len(MIGRATIONS) - len(applied)
|
|
855
|
+
|
|
856
|
+
|
|
857
|
+
def get_schema_version() -> int:
|
|
858
|
+
"""Return the highest applied migration version, or 0 if none."""
|
|
859
|
+
conn = get_db()
|
|
860
|
+
try:
|
|
861
|
+
row = conn.execute("SELECT MAX(version) FROM schema_migrations").fetchone()
|
|
862
|
+
return row[0] or 0
|
|
863
|
+
except Exception:
|
|
864
|
+
return 0
|
|
865
|
+
|
|
866
|
+
|
|
768
867
|
def _gen_id(prefix: str, length: int = 8) -> str:
|
|
769
868
|
"""Generate a random ID like 'msg-a1b2c3' or 'q-x9y8z7w6'."""
|
|
770
869
|
chars = string.ascii_lowercase + string.digits
|
|
@@ -2059,14 +2158,14 @@ def write_session_diary(session_id: str, decisions: str, summary: str,
|
|
|
2059
2158
|
discarded: str = '', pending: str = '',
|
|
2060
2159
|
context_next: str = '', mental_state: str = '',
|
|
2061
2160
|
domain: str = '', user_signals: str = '',
|
|
2062
|
-
self_critique: str = '') -> dict:
|
|
2161
|
+
self_critique: str = '', source: str = 'claude') -> dict:
|
|
2063
2162
|
"""Write a session diary entry with mental state and self-critique for continuity."""
|
|
2064
2163
|
conn = get_db()
|
|
2065
2164
|
cleanup_old_diaries()
|
|
2066
2165
|
cursor = conn.execute(
|
|
2067
|
-
"INSERT INTO session_diary (session_id, decisions, discarded, pending, context_next, mental_state, summary, domain, user_signals, self_critique) "
|
|
2068
|
-
"VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)",
|
|
2069
|
-
(session_id, decisions, discarded, pending, context_next, mental_state, summary, domain, user_signals, self_critique)
|
|
2166
|
+
"INSERT INTO session_diary (session_id, decisions, discarded, pending, context_next, mental_state, summary, domain, user_signals, self_critique, source) "
|
|
2167
|
+
"VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)",
|
|
2168
|
+
(session_id, decisions, discarded, pending, context_next, mental_state, summary, domain, user_signals, self_critique, source)
|
|
2070
2169
|
)
|
|
2071
2170
|
conn.commit()
|
|
2072
2171
|
did = cursor.lastrowid
|
|
@@ -2086,6 +2185,64 @@ def check_session_has_diary(session_id: str) -> bool:
|
|
|
2086
2185
|
return row is not None
|
|
2087
2186
|
|
|
2088
2187
|
|
|
2188
|
+
# ── Session Diary Drafts ─────────────────────────────────────────
|
|
2189
|
+
|
|
2190
|
+
|
|
2191
|
+
def upsert_diary_draft(sid: str, tasks_seen: str, change_ids: str,
|
|
2192
|
+
decision_ids: str, last_context_hint: str,
|
|
2193
|
+
heartbeat_count: int, summary_draft: str = '') -> dict:
|
|
2194
|
+
"""UPSERT diary draft for a session. Called by heartbeat to accumulate context."""
|
|
2195
|
+
conn = get_db()
|
|
2196
|
+
conn.execute(
|
|
2197
|
+
"""INSERT INTO session_diary_draft
|
|
2198
|
+
(sid, summary_draft, tasks_seen, change_ids, decision_ids,
|
|
2199
|
+
last_context_hint, heartbeat_count, updated_at)
|
|
2200
|
+
VALUES (?, ?, ?, ?, ?, ?, ?, datetime('now'))
|
|
2201
|
+
ON CONFLICT(sid) DO UPDATE SET
|
|
2202
|
+
summary_draft = excluded.summary_draft,
|
|
2203
|
+
tasks_seen = excluded.tasks_seen,
|
|
2204
|
+
change_ids = excluded.change_ids,
|
|
2205
|
+
decision_ids = excluded.decision_ids,
|
|
2206
|
+
last_context_hint = excluded.last_context_hint,
|
|
2207
|
+
heartbeat_count = excluded.heartbeat_count,
|
|
2208
|
+
updated_at = datetime('now')""",
|
|
2209
|
+
(sid, summary_draft, tasks_seen, change_ids, decision_ids,
|
|
2210
|
+
last_context_hint, heartbeat_count)
|
|
2211
|
+
)
|
|
2212
|
+
conn.commit()
|
|
2213
|
+
return {"sid": sid, "heartbeat_count": heartbeat_count}
|
|
2214
|
+
|
|
2215
|
+
|
|
2216
|
+
def get_diary_draft(sid: str) -> dict | None:
|
|
2217
|
+
"""Get diary draft for a session, or None."""
|
|
2218
|
+
conn = get_db()
|
|
2219
|
+
row = conn.execute(
|
|
2220
|
+
"SELECT * FROM session_diary_draft WHERE sid = ?", (sid,)
|
|
2221
|
+
).fetchone()
|
|
2222
|
+
return dict(row) if row else None
|
|
2223
|
+
|
|
2224
|
+
|
|
2225
|
+
def delete_diary_draft(sid: str):
|
|
2226
|
+
"""Delete diary draft after real diary is written."""
|
|
2227
|
+
conn = get_db()
|
|
2228
|
+
conn.execute("DELETE FROM session_diary_draft WHERE sid = ?", (sid,))
|
|
2229
|
+
conn.commit()
|
|
2230
|
+
|
|
2231
|
+
|
|
2232
|
+
def get_orphan_sessions(ttl_seconds: int = 900) -> list[dict]:
|
|
2233
|
+
"""Get sessions that exceeded TTL and have no diary."""
|
|
2234
|
+
conn = get_db()
|
|
2235
|
+
cutoff = now_epoch() - ttl_seconds
|
|
2236
|
+
rows = conn.execute(
|
|
2237
|
+
"""SELECT s.sid, s.task, s.started_epoch, s.last_update_epoch
|
|
2238
|
+
FROM sessions s
|
|
2239
|
+
LEFT JOIN session_diary sd ON sd.session_id = s.sid
|
|
2240
|
+
WHERE s.last_update_epoch <= ? AND sd.id IS NULL""",
|
|
2241
|
+
(cutoff,)
|
|
2242
|
+
).fetchall()
|
|
2243
|
+
return [dict(r) for r in rows]
|
|
2244
|
+
|
|
2245
|
+
|
|
2089
2246
|
def read_session_diary(session_id: str = '', last_n: int = 3, last_day: bool = False,
|
|
2090
2247
|
domain: str = '') -> list[dict]:
|
|
2091
2248
|
"""Read session diary entries.
|
|
@@ -2093,7 +2250,7 @@ def read_session_diary(session_id: str = '', last_n: int = 3, last_day: bool = F
|
|
|
2093
2250
|
- session_id: returns entries for that specific session
|
|
2094
2251
|
- last_day: returns ALL entries from the most recent day (multi-terminal aware)
|
|
2095
2252
|
- last_n: returns last N entries (default)
|
|
2096
|
-
- domain: filter by project context (
|
|
2253
|
+
- domain: filter by project context (nexo, other)
|
|
2097
2254
|
"""
|
|
2098
2255
|
conn = get_db()
|
|
2099
2256
|
domain_clause = " AND domain = ?" if domain else ""
|
|
@@ -27,19 +27,19 @@ def handle_decision_log(domain: str, decision: str, alternatives: str = '',
|
|
|
27
27
|
"""Log a non-trivial decision with reasoning context.
|
|
28
28
|
|
|
29
29
|
Args:
|
|
30
|
-
domain: Area (
|
|
30
|
+
domain: Area (nexo, other)
|
|
31
31
|
decision: What was decided
|
|
32
32
|
alternatives: JSON array or text of options considered and why discarded
|
|
33
33
|
based_on: Data, metrics, or observations that informed this decision
|
|
34
34
|
confidence: high, medium, or low
|
|
35
|
-
context_ref: Related followup/reminder ID
|
|
35
|
+
context_ref: Related followup/reminder ID (e.g., NF-ADS1, R71)
|
|
36
36
|
session_id: Current session ID (auto-filled if empty)
|
|
37
37
|
"""
|
|
38
|
-
valid_domains = {'
|
|
38
|
+
valid_domains = {'nexo', 'other'}
|
|
39
39
|
if domain not in valid_domains:
|
|
40
|
-
return f"ERROR: domain
|
|
40
|
+
return f"ERROR: domain debe ser uno de: {', '.join(sorted(valid_domains))}"
|
|
41
41
|
if confidence not in ('high', 'medium', 'low'):
|
|
42
|
-
return f"ERROR: confidence
|
|
42
|
+
return f"ERROR: confidence debe ser high, medium, o low"
|
|
43
43
|
|
|
44
44
|
sid = session_id or 'unknown'
|
|
45
45
|
result = log_decision(sid, domain, decision, alternatives, based_on, confidence, context_ref)
|
|
@@ -59,7 +59,7 @@ def handle_decision_log(domain: str, decision: str, alternatives: str = '',
|
|
|
59
59
|
result = dict(conn.execute("SELECT * FROM decisions WHERE id = ?", (result["id"],)).fetchone())
|
|
60
60
|
due = result.get("review_due_at", "")
|
|
61
61
|
due_str = f" review_due={due}" if due else ""
|
|
62
|
-
return f"Decision #{result['id']}
|
|
62
|
+
return f"Decision #{result['id']} registrada [{domain}] ({confidence}): {decision[:80]}{due_str}"
|
|
63
63
|
|
|
64
64
|
|
|
65
65
|
def handle_decision_outcome(id: int, outcome: str) -> str:
|
|
@@ -78,7 +78,7 @@ def handle_decision_outcome(id: int, outcome: str) -> str:
|
|
|
78
78
|
(id,)
|
|
79
79
|
)
|
|
80
80
|
conn.commit()
|
|
81
|
-
return f"Decision #{id} outcome
|
|
81
|
+
return f"Decision #{id} outcome registrado: {outcome[:100]}"
|
|
82
82
|
|
|
83
83
|
|
|
84
84
|
def handle_decision_search(query: str = '', domain: str = '', days: int = 30) -> str:
|
|
@@ -86,18 +86,18 @@ def handle_decision_search(query: str = '', domain: str = '', days: int = 30) ->
|
|
|
86
86
|
|
|
87
87
|
Args:
|
|
88
88
|
query: Text to search in decision, alternatives, based_on, outcome
|
|
89
|
-
domain: Filter by area (
|
|
89
|
+
domain: Filter by area (nexo, other)
|
|
90
90
|
days: Look back N days (default 30)
|
|
91
91
|
"""
|
|
92
|
-
valid_domains = {'
|
|
92
|
+
valid_domains = {'nexo', 'other'}
|
|
93
93
|
if domain and domain not in valid_domains:
|
|
94
|
-
return f"ERROR: domain
|
|
94
|
+
return f"ERROR: domain debe ser uno de: {', '.join(sorted(valid_domains))}"
|
|
95
95
|
results = search_decisions(query, domain, days)
|
|
96
96
|
if not results:
|
|
97
|
-
scope = f"'{query}'" if query else domain or '
|
|
98
|
-
return f"
|
|
97
|
+
scope = f"'{query}'" if query else domain or 'todas'
|
|
98
|
+
return f"Sin decisiones encontradas para {scope} en {days} días."
|
|
99
99
|
|
|
100
|
-
lines = [f"
|
|
100
|
+
lines = [f"DECISIONES ({len(results)}):"]
|
|
101
101
|
for d in results:
|
|
102
102
|
conf = d.get('confidence', '?')
|
|
103
103
|
outcome_str = f" → {d['outcome'][:50]}" if d.get('outcome') else ""
|
|
@@ -107,9 +107,9 @@ def handle_decision_search(query: str = '', domain: str = '', days: int = 30) ->
|
|
|
107
107
|
lines.append(f" #{d['id']} ({d['created_at']}) [{d['domain']}] {conf} [{status}]{ref}{review_due}")
|
|
108
108
|
lines.append(f" {d['decision'][:120]}")
|
|
109
109
|
if d.get('based_on'):
|
|
110
|
-
lines.append(f"
|
|
110
|
+
lines.append(f" Basado en: {d['based_on'][:100]}")
|
|
111
111
|
if d.get('alternatives'):
|
|
112
|
-
lines.append(f"
|
|
112
|
+
lines.append(f" Alternativas: {d['alternatives'][:100]}")
|
|
113
113
|
if outcome_str:
|
|
114
114
|
lines.append(f" Outcome:{outcome_str}")
|
|
115
115
|
return "\n".join(lines)
|
|
@@ -161,7 +161,7 @@ def handle_session_diary_write(decisions: str, summary: str,
|
|
|
161
161
|
domain: str = '',
|
|
162
162
|
session_id: str = '',
|
|
163
163
|
self_critique: str = '') -> str:
|
|
164
|
-
"""Write session diary entry at end of session.
|
|
164
|
+
"""Write session diary entry at end of session. OBLIGATORIO antes de cerrar.
|
|
165
165
|
|
|
166
166
|
Args:
|
|
167
167
|
decisions: What was decided and why (JSON array or structured text)
|
|
@@ -169,13 +169,16 @@ def handle_session_diary_write(decisions: str, summary: str,
|
|
|
169
169
|
discarded: Options/approaches considered but rejected, and why
|
|
170
170
|
pending: Items left unresolved, with doubt level
|
|
171
171
|
context_next: What the next session should know to continue effectively
|
|
172
|
-
mental_state: Internal state to transfer — thread of thought, tone, observations not yet shared, momentum.
|
|
173
|
-
user_signals: Observable signals from the user during session — response speed, tone, corrections given.
|
|
174
|
-
domain: Project context:
|
|
172
|
+
mental_state: Internal state to transfer — thread of thought, tone, observations not yet shared, momentum. Written in first person as NEXO.
|
|
173
|
+
user_signals: Observable signals from the user during session — response speed (fast='s' vs detailed explanations), tone (direct, frustrated, exploratory, excited), corrections given, topics he initiated vs topics NEXO initiated. Factual observations only, not interpretations.
|
|
174
|
+
domain: Project context: project-a, project-b, nexo, other
|
|
175
175
|
session_id: Current session ID
|
|
176
|
-
self_critique:
|
|
176
|
+
self_critique: OBLIGATORIO. Post-mortem honesto: ¿Qué debí hacer proactivamente? ¿the user tuvo que pedirme algo que yo debería haber detectado? ¿Repetí errores conocidos? ¿Qué regla concreta evitaría la repetición? Si sesión limpia: 'Sin autocrítica — sesión limpia.'
|
|
177
177
|
"""
|
|
178
178
|
sid = session_id or 'unknown'
|
|
179
|
+
# Clean up draft — manual diary supersedes it
|
|
180
|
+
from db import delete_diary_draft
|
|
181
|
+
delete_diary_draft(sid)
|
|
179
182
|
result = write_session_diary(sid, decisions, summary, discarded, pending, context_next, mental_state, domain=domain, user_signals=user_signals, self_critique=self_critique)
|
|
180
183
|
if "error" in result:
|
|
181
184
|
return f"ERROR: {result['error']}"
|
|
@@ -185,7 +188,7 @@ def handle_session_diary_write(decisions: str, summary: str,
|
|
|
185
188
|
if mental_state and mental_state.strip():
|
|
186
189
|
_cognitive_ingest_safe(mental_state, "mental_state", f"diary#{result.get('id','')}", f"Session {sid} state", domain)
|
|
187
190
|
domain_str = f" [{domain}]" if domain else ""
|
|
188
|
-
msg = f"
|
|
191
|
+
msg = f"Diario sesión #{result['id']}{domain_str} guardado: {summary[:80]}"
|
|
189
192
|
|
|
190
193
|
# Trust score & sentiment summary for session diary
|
|
191
194
|
try:
|
|
@@ -206,14 +209,14 @@ def handle_session_diary_write(decisions: str, summary: str,
|
|
|
206
209
|
"SELECT COUNT(*) FROM change_log WHERE (commit_ref IS NULL OR commit_ref = '')"
|
|
207
210
|
).fetchone()[0]
|
|
208
211
|
if orphan_changes > 0:
|
|
209
|
-
warnings.append(f"{orphan_changes} changes
|
|
212
|
+
warnings.append(f"{orphan_changes} changes sin commit_ref")
|
|
210
213
|
orphan_decisions = conn.execute(
|
|
211
214
|
"SELECT COUNT(*) FROM decisions WHERE (outcome IS NULL OR outcome = '') AND created_at < datetime('now', '-7 days')"
|
|
212
215
|
).fetchone()[0]
|
|
213
216
|
if orphan_decisions > 0:
|
|
214
|
-
warnings.append(f"{orphan_decisions} decisions >7d
|
|
217
|
+
warnings.append(f"{orphan_decisions} decisions >7d sin outcome")
|
|
215
218
|
if warnings:
|
|
216
|
-
msg += "\n
|
|
219
|
+
msg += "\n⚠ EPISODIC GAPS: " + " | ".join(warnings) + " — resolver antes de cerrar sesión."
|
|
217
220
|
|
|
218
221
|
return msg
|
|
219
222
|
|
|
@@ -226,29 +229,29 @@ def handle_session_diary_read(session_id: str = '', last_n: int = 3, last_day: b
|
|
|
226
229
|
session_id: Specific session ID to read (optional)
|
|
227
230
|
last_n: Number of recent entries to return (default 3)
|
|
228
231
|
last_day: If true, returns ALL entries from the most recent day (multi-terminal aware). Use this at startup.
|
|
229
|
-
domain: Filter by project context:
|
|
232
|
+
domain: Filter by project context: project-a, project-b, nexo, other
|
|
230
233
|
"""
|
|
231
234
|
results = read_session_diary(session_id, last_n, last_day, domain)
|
|
232
235
|
if not results:
|
|
233
|
-
return "
|
|
236
|
+
return "Sin entradas en el diario de sesiones."
|
|
234
237
|
|
|
235
|
-
lines = [f"
|
|
238
|
+
lines = [f"DIARIO DE SESIONES ({len(results)}):"]
|
|
236
239
|
for d in results:
|
|
237
240
|
domain_label = f" [{d['domain']}]" if d.get('domain') else ""
|
|
238
|
-
lines.append(f"\n ---
|
|
239
|
-
lines.append(f"
|
|
241
|
+
lines.append(f"\n --- Sesión {d['session_id']}{domain_label} ({d['created_at']}) ---")
|
|
242
|
+
lines.append(f" Resumen: {d['summary']}")
|
|
240
243
|
if d.get('decisions'):
|
|
241
|
-
lines.append(f"
|
|
244
|
+
lines.append(f" Decisiones: {d['decisions'][:200]}")
|
|
242
245
|
if d.get('discarded'):
|
|
243
|
-
lines.append(f"
|
|
246
|
+
lines.append(f" Descartado: {d['discarded'][:150]}")
|
|
244
247
|
if d.get('pending'):
|
|
245
|
-
lines.append(f"
|
|
248
|
+
lines.append(f" Pendiente: {d['pending'][:150]}")
|
|
246
249
|
if d.get('context_next'):
|
|
247
|
-
lines.append(f"
|
|
250
|
+
lines.append(f" Para siguiente sesión: {d['context_next'][:200]}")
|
|
248
251
|
if d.get('mental_state'):
|
|
249
|
-
lines.append(f"
|
|
252
|
+
lines.append(f" Estado mental: {d['mental_state'][:300]}")
|
|
250
253
|
if d.get('user_signals'):
|
|
251
|
-
lines.append(f"
|
|
254
|
+
lines.append(f" Señales the user: {d['user_signals'][:300]}")
|
|
252
255
|
return "\n".join(lines)
|
|
253
256
|
|
|
254
257
|
|
|
@@ -256,13 +259,13 @@ def handle_change_log(files: str, what_changed: str, why: str,
|
|
|
256
259
|
triggered_by: str = '', affects: str = '',
|
|
257
260
|
risks: str = '', verify: str = '',
|
|
258
261
|
commit_ref: str = '', session_id: str = '') -> str:
|
|
259
|
-
"""Log a code/config change with full context.
|
|
262
|
+
"""Log a code/config change with full context. OBLIGATORIO after every edit to production code.
|
|
260
263
|
|
|
261
264
|
Args:
|
|
262
265
|
files: File path(s) modified (comma-separated if multiple)
|
|
263
266
|
what_changed: What was modified — functions, lines, behavior change
|
|
264
267
|
why: WHY this change was needed — the root cause, not just "fix bug"
|
|
265
|
-
triggered_by: What triggered this — bug report, metric, user's request, followup ID
|
|
268
|
+
triggered_by: What triggered this — bug report, metric, the user's request, followup ID
|
|
266
269
|
affects: What systems/users/flows this change impacts
|
|
267
270
|
risks: What could go wrong — regressions, edge cases, dependencies
|
|
268
271
|
verify: How to verify this works — what to check, followup ID if created
|
|
@@ -270,7 +273,7 @@ def handle_change_log(files: str, what_changed: str, why: str,
|
|
|
270
273
|
session_id: Current session ID
|
|
271
274
|
"""
|
|
272
275
|
if not files or not what_changed or not why:
|
|
273
|
-
return "ERROR: files, what_changed,
|
|
276
|
+
return "ERROR: files, what_changed, y why son obligatorios"
|
|
274
277
|
sid = session_id or 'unknown'
|
|
275
278
|
result = log_change(sid, files, what_changed, why, triggered_by, affects, risks, verify, commit_ref)
|
|
276
279
|
if "error" in result:
|
|
@@ -280,9 +283,9 @@ def handle_change_log(files: str, what_changed: str, why: str,
|
|
|
280
283
|
"change", f"C{result.get('id','')}", (what_changed or '')[:80], ""
|
|
281
284
|
)
|
|
282
285
|
change_id = result['id']
|
|
283
|
-
msg = f"Change #{change_id}
|
|
286
|
+
msg = f"Change #{change_id} registrado: {files[:60]} — {what_changed[:60]}"
|
|
284
287
|
if not commit_ref:
|
|
285
|
-
msg += f"\n
|
|
288
|
+
msg += f"\n⚠ SIN COMMIT. Usa nexo_change_commit({change_id}, 'hash') después del push, o 'server-direct' si fue edición directa en servidor."
|
|
286
289
|
return msg
|
|
287
290
|
|
|
288
291
|
|
|
@@ -296,22 +299,22 @@ def handle_change_search(query: str = '', files: str = '', days: int = 30) -> st
|
|
|
296
299
|
"""
|
|
297
300
|
results = search_changes(query, files, days)
|
|
298
301
|
if not results:
|
|
299
|
-
scope = f"'{query}'" if query else files or '
|
|
300
|
-
return f"
|
|
302
|
+
scope = f"'{query}'" if query else files or 'todos'
|
|
303
|
+
return f"Sin cambios encontrados para {scope} en {days} días."
|
|
301
304
|
|
|
302
|
-
lines = [f"
|
|
305
|
+
lines = [f"CAMBIOS ({len(results)}):"]
|
|
303
306
|
for c in results:
|
|
304
307
|
commit = f" [{c['commit_ref'][:8]}]" if c.get('commit_ref') else ""
|
|
305
308
|
lines.append(f" #{c['id']} ({c['created_at']}){commit}")
|
|
306
|
-
lines.append(f"
|
|
307
|
-
lines.append(f"
|
|
308
|
-
lines.append(f"
|
|
309
|
+
lines.append(f" Archivos: {c['files'][:100]}")
|
|
310
|
+
lines.append(f" Qué: {c['what_changed'][:120]}")
|
|
311
|
+
lines.append(f" Por qué: {c['why'][:120]}")
|
|
309
312
|
if c.get('triggered_by'):
|
|
310
313
|
lines.append(f" Trigger: {c['triggered_by'][:80]}")
|
|
311
314
|
if c.get('affects'):
|
|
312
|
-
lines.append(f"
|
|
315
|
+
lines.append(f" Afecta: {c['affects'][:80]}")
|
|
313
316
|
if c.get('risks'):
|
|
314
|
-
lines.append(f"
|
|
317
|
+
lines.append(f" Riesgos: {c['risks'][:80]}")
|
|
315
318
|
return "\n".join(lines)
|
|
316
319
|
|
|
317
320
|
|
|
@@ -325,7 +328,7 @@ def handle_change_commit(id: int, commit_ref: str) -> str:
|
|
|
325
328
|
result = update_change_commit(id, commit_ref)
|
|
326
329
|
if "error" in result:
|
|
327
330
|
return f"ERROR: {result['error']}"
|
|
328
|
-
return f"Change #{id}
|
|
331
|
+
return f"Change #{id} vinculado a commit {commit_ref[:8]}"
|
|
329
332
|
|
|
330
333
|
|
|
331
334
|
def handle_recall(query: str, days: int = 30) -> str:
|
|
@@ -337,9 +340,9 @@ def handle_recall(query: str, days: int = 30) -> str:
|
|
|
337
340
|
"""
|
|
338
341
|
results = recall(query, days)
|
|
339
342
|
if not results:
|
|
340
|
-
return f"
|
|
343
|
+
return f"Sin resultados para '{query}' en los últimos {days} días."
|
|
341
344
|
|
|
342
|
-
# Passive rehearsal — strengthen matching cognitive memories
|
|
345
|
+
# v1.2: Passive rehearsal — strengthen matching cognitive memories
|
|
343
346
|
try:
|
|
344
347
|
import cognitive
|
|
345
348
|
for r in results[:5]:
|
|
@@ -350,18 +353,18 @@ def handle_recall(query: str, days: int = 30) -> str:
|
|
|
350
353
|
pass
|
|
351
354
|
|
|
352
355
|
SOURCE_LABELS = {
|
|
353
|
-
'change_log': '[
|
|
354
|
-
'change': '[
|
|
355
|
-
'decision': '[
|
|
356
|
+
'change_log': '[CAMBIO]',
|
|
357
|
+
'change': '[CAMBIO]',
|
|
358
|
+
'decision': '[DECISIÓN]',
|
|
356
359
|
'learning': '[LEARNING]',
|
|
357
360
|
'followup': '[FOLLOWUP]',
|
|
358
|
-
'diary': '[
|
|
359
|
-
'entity': '[
|
|
360
|
-
'file': '[
|
|
361
|
-
'code': '[
|
|
361
|
+
'diary': '[DIARIO]',
|
|
362
|
+
'entity': '[ENTIDAD]',
|
|
363
|
+
'file': '[ARCHIVO]',
|
|
364
|
+
'code': '[CÓDIGO]',
|
|
362
365
|
}
|
|
363
366
|
|
|
364
|
-
lines = [f"RECALL '{query}' — {len(results)}
|
|
367
|
+
lines = [f"RECALL '{query}' — {len(results)} resultado(s):"]
|
|
365
368
|
for r in results:
|
|
366
369
|
source = r.get('source', '?')
|
|
367
370
|
label = SOURCE_LABELS.get(source, f"[{source.upper()}]")
|
|
@@ -374,6 +377,8 @@ def handle_recall(query: str, days: int = 30) -> str:
|
|
|
374
377
|
lines.append(f" {title}")
|
|
375
378
|
if snippet:
|
|
376
379
|
lines.append(f" {snippet}")
|
|
380
|
+
if len(results) < 5:
|
|
381
|
+
lines.append(f"\n 💡 Solo {len(results)} resultados en NEXO. Para historial más profundo, busca también en claude-mem: mcp__plugin_claude-mem_mcp-search__search")
|
|
377
382
|
return "\n".join(lines)
|
|
378
383
|
|
|
379
384
|
|
|
@@ -387,5 +392,5 @@ TOOLS = [
|
|
|
387
392
|
(handle_memory_review_queue, "nexo_memory_review_queue", "Show decisions and learnings that are due for review"),
|
|
388
393
|
(handle_session_diary_write, "nexo_session_diary_write", "Write end-of-session diary with decisions, discards, and context for next session"),
|
|
389
394
|
(handle_session_diary_read, "nexo_session_diary_read", "Read recent session diaries for context continuity"),
|
|
390
|
-
(handle_recall, "nexo_recall", "Search across ALL NEXO memory — changes, decisions, learnings, followups, diary, entities, .md files, code files."),
|
|
395
|
+
(handle_recall, "nexo_recall", "Search across ALL NEXO memory — changes, decisions, learnings, followups, diary, entities, .md files, code files. For deep historical context (older sessions, past work), also search claude-mem (mcp__plugin_claude-mem_mcp-search__search)."),
|
|
391
396
|
]
|
package/src/tools_sessions.py
CHANGED
|
@@ -65,7 +65,7 @@ def handle_heartbeat(sid: str, task: str, context_hint: str = '') -> str:
|
|
|
65
65
|
Args:
|
|
66
66
|
sid: Session ID
|
|
67
67
|
task: Current task description
|
|
68
|
-
context_hint: Optional — last 2-3 sentences from user or current topic. If provided AND
|
|
68
|
+
context_hint: Optional — last 2-3 sentences from the user or current topic. If provided AND
|
|
69
69
|
it diverges from startup memories, returns fresh cognitive memories for the new context.
|
|
70
70
|
"""
|
|
71
71
|
from db import get_db
|
|
@@ -88,7 +88,7 @@ def handle_heartbeat(sid: str, task: str, context_hint: str = '') -> str:
|
|
|
88
88
|
age = _format_age(q["created_epoch"])
|
|
89
89
|
parts.append(f" {q['qid']} de {q['from_sid']} ({age}): {q['question']}")
|
|
90
90
|
|
|
91
|
-
# Sentiment detection: analyze context_hint for user's mood
|
|
91
|
+
# Sentiment detection: analyze context_hint for the user's mood
|
|
92
92
|
if context_hint and len(context_hint.strip()) >= 10:
|
|
93
93
|
try:
|
|
94
94
|
import cognitive
|
|
@@ -137,6 +137,53 @@ def handle_heartbeat(sid: str, task: str, context_hint: str = '') -> str:
|
|
|
137
137
|
except Exception:
|
|
138
138
|
pass # Mid-session RAG is best-effort
|
|
139
139
|
|
|
140
|
+
# Incremental diary draft — accumulate every heartbeat, full UPSERT every 5
|
|
141
|
+
try:
|
|
142
|
+
import json as _json
|
|
143
|
+
from db import get_diary_draft, upsert_diary_draft
|
|
144
|
+
|
|
145
|
+
draft = get_diary_draft(sid)
|
|
146
|
+
hb_count = (draft["heartbeat_count"] + 1) if draft else 1
|
|
147
|
+
|
|
148
|
+
existing_tasks = _json.loads(draft["tasks_seen"]) if draft else []
|
|
149
|
+
if task and task not in existing_tasks:
|
|
150
|
+
existing_tasks.append(task)
|
|
151
|
+
|
|
152
|
+
_conn = get_db()
|
|
153
|
+
if hb_count % 5 == 0 or hb_count == 1:
|
|
154
|
+
change_rows = _conn.execute(
|
|
155
|
+
"SELECT id FROM change_log WHERE session_id = ? ORDER BY id", (sid,)
|
|
156
|
+
).fetchall()
|
|
157
|
+
change_ids = [r["id"] for r in change_rows]
|
|
158
|
+
|
|
159
|
+
decision_rows = _conn.execute(
|
|
160
|
+
"SELECT id FROM decisions WHERE session_id = ? ORDER BY id", (sid,)
|
|
161
|
+
).fetchall()
|
|
162
|
+
decision_ids = [r["id"] for r in decision_rows]
|
|
163
|
+
|
|
164
|
+
summary = f"Session tasks: {', '.join(existing_tasks[-10:])}"
|
|
165
|
+
upsert_diary_draft(
|
|
166
|
+
sid=sid,
|
|
167
|
+
tasks_seen=_json.dumps(existing_tasks),
|
|
168
|
+
change_ids=_json.dumps(change_ids),
|
|
169
|
+
decision_ids=_json.dumps(decision_ids),
|
|
170
|
+
last_context_hint=context_hint[:300] if context_hint else '',
|
|
171
|
+
heartbeat_count=hb_count,
|
|
172
|
+
summary_draft=summary,
|
|
173
|
+
)
|
|
174
|
+
else:
|
|
175
|
+
upsert_diary_draft(
|
|
176
|
+
sid=sid,
|
|
177
|
+
tasks_seen=_json.dumps(existing_tasks),
|
|
178
|
+
change_ids=draft["change_ids"] if draft else '[]',
|
|
179
|
+
decision_ids=draft["decision_ids"] if draft else '[]',
|
|
180
|
+
last_context_hint=context_hint[:300] if context_hint else (draft["last_context_hint"] if draft else ''),
|
|
181
|
+
heartbeat_count=hb_count,
|
|
182
|
+
summary_draft=draft["summary_draft"] if draft else f"Session task: {task}",
|
|
183
|
+
)
|
|
184
|
+
except Exception:
|
|
185
|
+
pass # Draft accumulation is best-effort, never block heartbeat
|
|
186
|
+
|
|
140
187
|
# Diary reminder: after 30 min active with no diary entry
|
|
141
188
|
conn = get_db()
|
|
142
189
|
row = conn.execute("SELECT started_epoch FROM sessions WHERE sid = ?", (sid,)).fetchone()
|