claude-memory-agent 2.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.env.example +107 -0
- package/README.md +200 -0
- package/agent_card.py +512 -0
- package/bin/cli.js +181 -0
- package/bin/postinstall.js +216 -0
- package/config.py +104 -0
- package/dashboard.html +2689 -0
- package/hooks/README.md +196 -0
- package/hooks/__pycache__/auto-detect-response.cpython-312.pyc +0 -0
- package/hooks/__pycache__/auto_capture.cpython-312.pyc +0 -0
- package/hooks/__pycache__/session_end.cpython-312.pyc +0 -0
- package/hooks/__pycache__/session_start.cpython-312.pyc +0 -0
- package/hooks/auto-detect-response.py +348 -0
- package/hooks/auto_capture.py +255 -0
- package/hooks/detect-correction.py +173 -0
- package/hooks/grounding-hook.py +348 -0
- package/hooks/log-tool-use.py +234 -0
- package/hooks/log-user-request.py +208 -0
- package/hooks/pre-tool-decision.py +218 -0
- package/hooks/problem-detector.py +343 -0
- package/hooks/session_end.py +192 -0
- package/hooks/session_start.py +227 -0
- package/install.py +887 -0
- package/main.py +2859 -0
- package/manager.py +997 -0
- package/package.json +55 -0
- package/requirements.txt +8 -0
- package/run_server.py +136 -0
- package/services/__init__.py +50 -0
- package/services/__pycache__/__init__.cpython-312.pyc +0 -0
- package/services/__pycache__/agent_registry.cpython-312.pyc +0 -0
- package/services/__pycache__/auth.cpython-312.pyc +0 -0
- package/services/__pycache__/auto_inject.cpython-312.pyc +0 -0
- package/services/__pycache__/claude_md_sync.cpython-312.pyc +0 -0
- package/services/__pycache__/cleanup.cpython-312.pyc +0 -0
- package/services/__pycache__/compaction_flush.cpython-312.pyc +0 -0
- package/services/__pycache__/confidence.cpython-312.pyc +0 -0
- package/services/__pycache__/daily_log.cpython-312.pyc +0 -0
- package/services/__pycache__/database.cpython-312.pyc +0 -0
- package/services/__pycache__/embeddings.cpython-312.pyc +0 -0
- package/services/__pycache__/insights.cpython-312.pyc +0 -0
- package/services/__pycache__/llm_analyzer.cpython-312.pyc +0 -0
- package/services/__pycache__/memory_md_sync.cpython-312.pyc +0 -0
- package/services/__pycache__/retry_queue.cpython-312.pyc +0 -0
- package/services/__pycache__/timeline.cpython-312.pyc +0 -0
- package/services/__pycache__/vector_index.cpython-312.pyc +0 -0
- package/services/__pycache__/websocket.cpython-312.pyc +0 -0
- package/services/agent_registry.py +753 -0
- package/services/auth.py +331 -0
- package/services/auto_inject.py +250 -0
- package/services/claude_md_sync.py +275 -0
- package/services/cleanup.py +667 -0
- package/services/compaction_flush.py +447 -0
- package/services/confidence.py +301 -0
- package/services/daily_log.py +333 -0
- package/services/database.py +2485 -0
- package/services/embeddings.py +358 -0
- package/services/insights.py +632 -0
- package/services/llm_analyzer.py +595 -0
- package/services/memory_md_sync.py +409 -0
- package/services/retry_queue.py +453 -0
- package/services/timeline.py +579 -0
- package/services/vector_index.py +398 -0
- package/services/websocket.py +257 -0
- package/skills/__init__.py +6 -0
- package/skills/__pycache__/__init__.cpython-312.pyc +0 -0
- package/skills/__pycache__/admin.cpython-312.pyc +0 -0
- package/skills/__pycache__/checkpoint.cpython-312.pyc +0 -0
- package/skills/__pycache__/claude_md.cpython-312.pyc +0 -0
- package/skills/__pycache__/cleanup.cpython-312.pyc +0 -0
- package/skills/__pycache__/grounding.cpython-312.pyc +0 -0
- package/skills/__pycache__/insights.cpython-312.pyc +0 -0
- package/skills/__pycache__/natural_language.cpython-312.pyc +0 -0
- package/skills/__pycache__/retrieve.cpython-312.pyc +0 -0
- package/skills/__pycache__/search.cpython-312.pyc +0 -0
- package/skills/__pycache__/state.cpython-312.pyc +0 -0
- package/skills/__pycache__/store.cpython-312.pyc +0 -0
- package/skills/__pycache__/summarize.cpython-312.pyc +0 -0
- package/skills/__pycache__/timeline.cpython-312.pyc +0 -0
- package/skills/__pycache__/verification.cpython-312.pyc +0 -0
- package/skills/admin.py +469 -0
- package/skills/checkpoint.py +198 -0
- package/skills/claude_md.py +363 -0
- package/skills/cleanup.py +241 -0
- package/skills/grounding.py +801 -0
- package/skills/insights.py +231 -0
- package/skills/natural_language.py +277 -0
- package/skills/retrieve.py +67 -0
- package/skills/search.py +213 -0
- package/skills/state.py +182 -0
- package/skills/store.py +179 -0
- package/skills/summarize.py +588 -0
- package/skills/timeline.py +387 -0
- package/skills/verification.py +391 -0
- package/start_daemon.py +155 -0
- package/test_automation.py +221 -0
- package/test_complete.py +338 -0
- package/test_full.py +322 -0
- package/update_system.py +817 -0
- package/verify_db.py +134 -0
|
@@ -0,0 +1,588 @@
|
|
|
1
|
+
"""Session summarization skill with auto-summarization and session handoff.
|
|
2
|
+
|
|
3
|
+
Provides:
|
|
4
|
+
- Manual session summarization
|
|
5
|
+
- Automatic end-of-session summarization
|
|
6
|
+
- Session handoff for continuity
|
|
7
|
+
- Diary-style detailed session entries
|
|
8
|
+
"""
|
|
9
|
+
from typing import Dict, Any, Optional, List
|
|
10
|
+
from datetime import datetime, timedelta
|
|
11
|
+
from services.database import DatabaseService
|
|
12
|
+
from services.embeddings import EmbeddingService
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
async def summarize_session(
|
|
16
|
+
db: DatabaseService,
|
|
17
|
+
embeddings: EmbeddingService,
|
|
18
|
+
session_id: str,
|
|
19
|
+
summary: str,
|
|
20
|
+
key_decisions: Optional[List[str]] = None,
|
|
21
|
+
code_patterns: Optional[List[str]] = None,
|
|
22
|
+
metadata: Optional[Dict[str, Any]] = None,
|
|
23
|
+
project_path: Optional[str] = None
|
|
24
|
+
) -> Dict[str, Any]:
|
|
25
|
+
"""
|
|
26
|
+
Store a session summary with optional key decisions and code patterns.
|
|
27
|
+
|
|
28
|
+
Args:
|
|
29
|
+
db: Database service instance
|
|
30
|
+
embeddings: Embedding service instance
|
|
31
|
+
session_id: The session identifier
|
|
32
|
+
summary: Summary of the session
|
|
33
|
+
key_decisions: List of key decisions made during session
|
|
34
|
+
code_patterns: List of important code patterns discovered
|
|
35
|
+
metadata: Additional metadata
|
|
36
|
+
project_path: Project this session worked on
|
|
37
|
+
|
|
38
|
+
Returns:
|
|
39
|
+
Dict with stored summary information
|
|
40
|
+
"""
|
|
41
|
+
stored_ids = []
|
|
42
|
+
|
|
43
|
+
# Store the main session summary
|
|
44
|
+
summary_embedding = await embeddings.generate_embedding(summary)
|
|
45
|
+
summary_meta = {
|
|
46
|
+
**(metadata or {}),
|
|
47
|
+
"summarized_at": datetime.now().isoformat()
|
|
48
|
+
}
|
|
49
|
+
summary_id = await db.store_memory(
|
|
50
|
+
memory_type="session",
|
|
51
|
+
content=summary,
|
|
52
|
+
embedding=summary_embedding,
|
|
53
|
+
metadata=summary_meta,
|
|
54
|
+
session_id=session_id,
|
|
55
|
+
project_path=project_path,
|
|
56
|
+
importance=8 # Session summaries are high importance
|
|
57
|
+
)
|
|
58
|
+
stored_ids.append({"type": "session", "id": summary_id})
|
|
59
|
+
|
|
60
|
+
# Store key decisions
|
|
61
|
+
if key_decisions:
|
|
62
|
+
for decision in key_decisions:
|
|
63
|
+
decision_embedding = await embeddings.generate_embedding(decision)
|
|
64
|
+
decision_id = await db.store_memory(
|
|
65
|
+
memory_type="decision",
|
|
66
|
+
content=decision,
|
|
67
|
+
embedding=decision_embedding,
|
|
68
|
+
metadata={"session_summary_id": summary_id},
|
|
69
|
+
session_id=session_id,
|
|
70
|
+
project_path=project_path,
|
|
71
|
+
importance=7 # Decisions are important
|
|
72
|
+
)
|
|
73
|
+
stored_ids.append({"type": "decision", "id": decision_id})
|
|
74
|
+
|
|
75
|
+
# Store code patterns
|
|
76
|
+
if code_patterns:
|
|
77
|
+
for pattern in code_patterns:
|
|
78
|
+
pattern_embedding = await embeddings.generate_embedding(pattern)
|
|
79
|
+
pattern_id = await db.store_memory(
|
|
80
|
+
memory_type="code",
|
|
81
|
+
content=pattern,
|
|
82
|
+
embedding=pattern_embedding,
|
|
83
|
+
metadata={"session_summary_id": summary_id},
|
|
84
|
+
session_id=session_id,
|
|
85
|
+
project_path=project_path,
|
|
86
|
+
importance=6 # Code patterns are useful
|
|
87
|
+
)
|
|
88
|
+
stored_ids.append({"type": "code", "id": pattern_id})
|
|
89
|
+
|
|
90
|
+
return {
|
|
91
|
+
"success": True,
|
|
92
|
+
"session_id": session_id,
|
|
93
|
+
"project_path": project_path,
|
|
94
|
+
"stored_items": stored_ids,
|
|
95
|
+
"total_items": len(stored_ids),
|
|
96
|
+
"message": f"Session {session_id} summarized with {len(stored_ids)} items"
|
|
97
|
+
}
|
|
98
|
+
|
|
99
|
+
|
|
100
|
+
async def auto_summarize_session(
|
|
101
|
+
db: DatabaseService,
|
|
102
|
+
embeddings: EmbeddingService,
|
|
103
|
+
session_id: str,
|
|
104
|
+
project_path: Optional[str] = None
|
|
105
|
+
) -> Dict[str, Any]:
|
|
106
|
+
"""Automatically summarize a session based on its timeline events.
|
|
107
|
+
|
|
108
|
+
Analyzes the session's timeline to extract:
|
|
109
|
+
- Goals and outcomes
|
|
110
|
+
- Key decisions made
|
|
111
|
+
- Patterns observed
|
|
112
|
+
- Unresolved issues
|
|
113
|
+
|
|
114
|
+
Args:
|
|
115
|
+
db: Database service
|
|
116
|
+
embeddings: Embeddings service
|
|
117
|
+
session_id: Session to summarize
|
|
118
|
+
project_path: Project context
|
|
119
|
+
|
|
120
|
+
Returns:
|
|
121
|
+
Generated summary with extracted components
|
|
122
|
+
"""
|
|
123
|
+
# Get all timeline events for this session
|
|
124
|
+
events = await db.execute_query(
|
|
125
|
+
"""
|
|
126
|
+
SELECT event_type, summary, details, outcome, status, is_anchor, created_at
|
|
127
|
+
FROM timeline_events
|
|
128
|
+
WHERE session_id = ?
|
|
129
|
+
ORDER BY sequence_num ASC
|
|
130
|
+
""",
|
|
131
|
+
(session_id,)
|
|
132
|
+
)
|
|
133
|
+
|
|
134
|
+
if not events:
|
|
135
|
+
return {
|
|
136
|
+
"success": False,
|
|
137
|
+
"error": "No timeline events found for session",
|
|
138
|
+
"session_id": session_id
|
|
139
|
+
}
|
|
140
|
+
|
|
141
|
+
# Get session state for context
|
|
142
|
+
state = await db.execute_query(
|
|
143
|
+
"""
|
|
144
|
+
SELECT current_goal, decisions_summary, pending_questions
|
|
145
|
+
FROM session_state
|
|
146
|
+
WHERE session_id = ?
|
|
147
|
+
""",
|
|
148
|
+
(session_id,)
|
|
149
|
+
)
|
|
150
|
+
session_state = state[0] if state else {}
|
|
151
|
+
|
|
152
|
+
# Extract components from events
|
|
153
|
+
goals = []
|
|
154
|
+
decisions = []
|
|
155
|
+
observations = []
|
|
156
|
+
errors = []
|
|
157
|
+
unresolved = []
|
|
158
|
+
anchors = []
|
|
159
|
+
|
|
160
|
+
for event in events:
|
|
161
|
+
event_type = event.get("event_type", "")
|
|
162
|
+
summary = event.get("summary", "")
|
|
163
|
+
outcome = event.get("outcome", "")
|
|
164
|
+
status = event.get("status", "")
|
|
165
|
+
|
|
166
|
+
if event_type == "goal":
|
|
167
|
+
goals.append(summary)
|
|
168
|
+
elif event_type == "decision":
|
|
169
|
+
decisions.append(summary)
|
|
170
|
+
elif event_type == "observation":
|
|
171
|
+
observations.append(summary)
|
|
172
|
+
elif event_type == "error":
|
|
173
|
+
errors.append(summary)
|
|
174
|
+
if status != "resolved":
|
|
175
|
+
unresolved.append(f"Error: {summary}")
|
|
176
|
+
|
|
177
|
+
if event.get("is_anchor"):
|
|
178
|
+
anchors.append(summary)
|
|
179
|
+
|
|
180
|
+
# Check for pending/incomplete status
|
|
181
|
+
if status in ("pending", "blocked", "failed"):
|
|
182
|
+
unresolved.append(f"{event_type}: {summary}")
|
|
183
|
+
|
|
184
|
+
# Check for pending questions in session state
|
|
185
|
+
if session_state.get("pending_questions"):
|
|
186
|
+
try:
|
|
187
|
+
import json
|
|
188
|
+
questions = json.loads(session_state["pending_questions"])
|
|
189
|
+
for q in questions:
|
|
190
|
+
unresolved.append(f"Unanswered: {q}")
|
|
191
|
+
except:
|
|
192
|
+
pass
|
|
193
|
+
|
|
194
|
+
# Generate summary text
|
|
195
|
+
summary_parts = []
|
|
196
|
+
|
|
197
|
+
# Goals and outcomes
|
|
198
|
+
if goals:
|
|
199
|
+
summary_parts.append(f"Goals: {'; '.join(goals[:3])}")
|
|
200
|
+
elif session_state.get("current_goal"):
|
|
201
|
+
summary_parts.append(f"Goal: {session_state['current_goal']}")
|
|
202
|
+
|
|
203
|
+
# Key accomplishments
|
|
204
|
+
completed_count = sum(1 for e in events if e.get("status") == "completed")
|
|
205
|
+
summary_parts.append(f"Completed {completed_count} actions across {len(events)} events.")
|
|
206
|
+
|
|
207
|
+
# Decisions
|
|
208
|
+
if decisions:
|
|
209
|
+
summary_parts.append(f"Key decisions: {'; '.join(decisions[:3])}")
|
|
210
|
+
|
|
211
|
+
# Issues
|
|
212
|
+
if errors:
|
|
213
|
+
summary_parts.append(f"Encountered {len(errors)} error(s).")
|
|
214
|
+
|
|
215
|
+
# Anchors (verified facts)
|
|
216
|
+
if anchors:
|
|
217
|
+
summary_parts.append(f"Verified facts: {len(anchors)}")
|
|
218
|
+
|
|
219
|
+
summary_text = " ".join(summary_parts)
|
|
220
|
+
|
|
221
|
+
# Store the auto-generated summary
|
|
222
|
+
result = await summarize_session(
|
|
223
|
+
db=db,
|
|
224
|
+
embeddings=embeddings,
|
|
225
|
+
session_id=session_id,
|
|
226
|
+
summary=summary_text,
|
|
227
|
+
key_decisions=decisions[:5] if decisions else None,
|
|
228
|
+
metadata={
|
|
229
|
+
"auto_generated": True,
|
|
230
|
+
"event_count": len(events),
|
|
231
|
+
"unresolved_count": len(unresolved)
|
|
232
|
+
},
|
|
233
|
+
project_path=project_path
|
|
234
|
+
)
|
|
235
|
+
|
|
236
|
+
result["auto_summary"] = {
|
|
237
|
+
"goals": goals[:5],
|
|
238
|
+
"decisions": decisions[:5],
|
|
239
|
+
"observations": observations[:5],
|
|
240
|
+
"errors": errors[:5],
|
|
241
|
+
"unresolved": unresolved[:5],
|
|
242
|
+
"anchors": anchors[:5],
|
|
243
|
+
"event_count": len(events)
|
|
244
|
+
}
|
|
245
|
+
|
|
246
|
+
return result
|
|
247
|
+
|
|
248
|
+
|
|
249
|
+
async def get_session_handoff(
|
|
250
|
+
db: DatabaseService,
|
|
251
|
+
embeddings: EmbeddingService,
|
|
252
|
+
project_path: Optional[str] = None,
|
|
253
|
+
include_last_n_sessions: int = 3
|
|
254
|
+
) -> Dict[str, Any]:
|
|
255
|
+
"""Get context handoff from previous sessions for continuity.
|
|
256
|
+
|
|
257
|
+
Retrieves summaries and unresolved items from recent sessions
|
|
258
|
+
to provide context for a new session.
|
|
259
|
+
|
|
260
|
+
Args:
|
|
261
|
+
db: Database service
|
|
262
|
+
embeddings: Embeddings service
|
|
263
|
+
project_path: Filter to specific project
|
|
264
|
+
include_last_n_sessions: Number of recent sessions to include
|
|
265
|
+
|
|
266
|
+
Returns:
|
|
267
|
+
Handoff context with previous session summaries
|
|
268
|
+
"""
|
|
269
|
+
# Get recent session summaries
|
|
270
|
+
query = """
|
|
271
|
+
SELECT id, content, session_id, project_path, metadata, importance, created_at
|
|
272
|
+
FROM memories
|
|
273
|
+
WHERE type = 'session'
|
|
274
|
+
"""
|
|
275
|
+
params = []
|
|
276
|
+
|
|
277
|
+
if project_path:
|
|
278
|
+
query += " AND project_path = ?"
|
|
279
|
+
params.append(project_path)
|
|
280
|
+
|
|
281
|
+
query += " ORDER BY created_at DESC LIMIT ?"
|
|
282
|
+
params.append(include_last_n_sessions)
|
|
283
|
+
|
|
284
|
+
summaries = await db.execute_query(query, tuple(params))
|
|
285
|
+
|
|
286
|
+
if not summaries:
|
|
287
|
+
return {
|
|
288
|
+
"success": True,
|
|
289
|
+
"has_previous_sessions": False,
|
|
290
|
+
"message": "No previous session summaries found",
|
|
291
|
+
"handoff": None
|
|
292
|
+
}
|
|
293
|
+
|
|
294
|
+
# Get unresolved items from recent sessions
|
|
295
|
+
session_ids = [s["session_id"] for s in summaries if s.get("session_id")]
|
|
296
|
+
|
|
297
|
+
unresolved_items = []
|
|
298
|
+
if session_ids:
|
|
299
|
+
placeholders = ",".join("?" * len(session_ids))
|
|
300
|
+
unresolved = await db.execute_query(
|
|
301
|
+
f"""
|
|
302
|
+
SELECT summary, event_type, session_id
|
|
303
|
+
FROM timeline_events
|
|
304
|
+
WHERE session_id IN ({placeholders})
|
|
305
|
+
AND status IN ('pending', 'blocked', 'failed')
|
|
306
|
+
ORDER BY created_at DESC
|
|
307
|
+
LIMIT 10
|
|
308
|
+
""",
|
|
309
|
+
tuple(session_ids)
|
|
310
|
+
)
|
|
311
|
+
unresolved_items = [
|
|
312
|
+
{"type": u["event_type"], "summary": u["summary"]}
|
|
313
|
+
for u in (unresolved or [])
|
|
314
|
+
]
|
|
315
|
+
|
|
316
|
+
# Get recent decisions for context
|
|
317
|
+
decisions = await db.execute_query(
|
|
318
|
+
"""
|
|
319
|
+
SELECT content, project_path, created_at
|
|
320
|
+
FROM memories
|
|
321
|
+
WHERE type = 'decision'
|
|
322
|
+
AND importance >= 7
|
|
323
|
+
ORDER BY created_at DESC
|
|
324
|
+
LIMIT 5
|
|
325
|
+
"""
|
|
326
|
+
)
|
|
327
|
+
|
|
328
|
+
# Build handoff
|
|
329
|
+
handoff = {
|
|
330
|
+
"previous_sessions": [
|
|
331
|
+
{
|
|
332
|
+
"session_id": s["session_id"],
|
|
333
|
+
"summary": s["content"][:500],
|
|
334
|
+
"project_path": s.get("project_path"),
|
|
335
|
+
"created_at": s["created_at"]
|
|
336
|
+
}
|
|
337
|
+
for s in summaries
|
|
338
|
+
],
|
|
339
|
+
"unresolved_items": unresolved_items,
|
|
340
|
+
"recent_decisions": [
|
|
341
|
+
{"content": d["content"][:200], "created_at": d["created_at"]}
|
|
342
|
+
for d in (decisions or [])
|
|
343
|
+
],
|
|
344
|
+
"context_message": _generate_handoff_message(summaries, unresolved_items)
|
|
345
|
+
}
|
|
346
|
+
|
|
347
|
+
return {
|
|
348
|
+
"success": True,
|
|
349
|
+
"has_previous_sessions": True,
|
|
350
|
+
"session_count": len(summaries),
|
|
351
|
+
"unresolved_count": len(unresolved_items),
|
|
352
|
+
"handoff": handoff
|
|
353
|
+
}
|
|
354
|
+
|
|
355
|
+
|
|
356
|
+
def _generate_handoff_message(summaries: List[Dict], unresolved: List[Dict]) -> str:
|
|
357
|
+
"""Generate a human-readable handoff message."""
|
|
358
|
+
parts = []
|
|
359
|
+
|
|
360
|
+
if summaries:
|
|
361
|
+
last = summaries[0]
|
|
362
|
+
parts.append(f"Last session: {last.get('content', '')[:200]}")
|
|
363
|
+
|
|
364
|
+
if unresolved:
|
|
365
|
+
items = [u["summary"][:50] for u in unresolved[:3]]
|
|
366
|
+
parts.append(f"Pending items: {'; '.join(items)}")
|
|
367
|
+
|
|
368
|
+
if not parts:
|
|
369
|
+
return "No previous context available."
|
|
370
|
+
|
|
371
|
+
return " | ".join(parts)
|
|
372
|
+
|
|
373
|
+
|
|
374
|
+
async def create_diary_entry(
|
|
375
|
+
db: DatabaseService,
|
|
376
|
+
embeddings: EmbeddingService,
|
|
377
|
+
session_id: str,
|
|
378
|
+
project_path: Optional[str] = None,
|
|
379
|
+
user_notes: Optional[str] = None
|
|
380
|
+
) -> Dict[str, Any]:
|
|
381
|
+
"""Create a detailed diary-style entry for a session.
|
|
382
|
+
|
|
383
|
+
Generates a structured narrative of the session including:
|
|
384
|
+
- Timeline of events
|
|
385
|
+
- Key milestones
|
|
386
|
+
- Learnings and insights
|
|
387
|
+
- Recommendations for future sessions
|
|
388
|
+
|
|
389
|
+
Args:
|
|
390
|
+
db: Database service
|
|
391
|
+
embeddings: Embeddings service
|
|
392
|
+
session_id: Session to create diary for
|
|
393
|
+
project_path: Project context
|
|
394
|
+
user_notes: Optional user-provided notes to include
|
|
395
|
+
|
|
396
|
+
Returns:
|
|
397
|
+
Formatted diary entry
|
|
398
|
+
"""
|
|
399
|
+
# Get session timeline
|
|
400
|
+
events = await db.execute_query(
|
|
401
|
+
"""
|
|
402
|
+
SELECT event_type, summary, details, outcome, status, is_anchor,
|
|
403
|
+
created_at, confidence
|
|
404
|
+
FROM timeline_events
|
|
405
|
+
WHERE session_id = ?
|
|
406
|
+
ORDER BY sequence_num ASC
|
|
407
|
+
""",
|
|
408
|
+
(session_id,)
|
|
409
|
+
)
|
|
410
|
+
|
|
411
|
+
if not events:
|
|
412
|
+
return {
|
|
413
|
+
"success": False,
|
|
414
|
+
"error": "No timeline events found for session"
|
|
415
|
+
}
|
|
416
|
+
|
|
417
|
+
# Get session state
|
|
418
|
+
state = await db.execute_query(
|
|
419
|
+
"""
|
|
420
|
+
SELECT current_goal, decisions_summary, entity_registry,
|
|
421
|
+
created_at, updated_at
|
|
422
|
+
FROM session_state
|
|
423
|
+
WHERE session_id = ?
|
|
424
|
+
""",
|
|
425
|
+
(session_id,)
|
|
426
|
+
)
|
|
427
|
+
session_state = state[0] if state else {}
|
|
428
|
+
|
|
429
|
+
# Build diary entry
|
|
430
|
+
import json
|
|
431
|
+
|
|
432
|
+
# Header
|
|
433
|
+
start_time = events[0]["created_at"] if events else "Unknown"
|
|
434
|
+
end_time = events[-1]["created_at"] if events else "Unknown"
|
|
435
|
+
|
|
436
|
+
diary_parts = [
|
|
437
|
+
f"# Session Diary: {session_id[:8]}...",
|
|
438
|
+
f"**Date:** {start_time[:10] if start_time else 'Unknown'}",
|
|
439
|
+
f"**Duration:** {start_time} to {end_time}",
|
|
440
|
+
f"**Project:** {project_path or 'Not specified'}",
|
|
441
|
+
"",
|
|
442
|
+
"## Goals",
|
|
443
|
+
session_state.get("current_goal", "No explicit goal recorded."),
|
|
444
|
+
""
|
|
445
|
+
]
|
|
446
|
+
|
|
447
|
+
# Timeline section
|
|
448
|
+
diary_parts.append("## Session Timeline")
|
|
449
|
+
for i, event in enumerate(events[:20], 1): # Limit to 20 events
|
|
450
|
+
status_icon = {
|
|
451
|
+
"completed": "[OK]",
|
|
452
|
+
"failed": "[FAIL]",
|
|
453
|
+
"pending": "[...]",
|
|
454
|
+
"blocked": "[BLOCK]"
|
|
455
|
+
}.get(event.get("status", ""), "[-]")
|
|
456
|
+
|
|
457
|
+
anchor_mark = " (ANCHOR)" if event.get("is_anchor") else ""
|
|
458
|
+
diary_parts.append(
|
|
459
|
+
f"{i}. {status_icon} **{event['event_type']}**: {event['summary'][:100]}{anchor_mark}"
|
|
460
|
+
)
|
|
461
|
+
|
|
462
|
+
# Decisions section
|
|
463
|
+
decisions = [e for e in events if e["event_type"] == "decision"]
|
|
464
|
+
if decisions:
|
|
465
|
+
diary_parts.extend(["", "## Key Decisions"])
|
|
466
|
+
for d in decisions[:5]:
|
|
467
|
+
diary_parts.append(f"- {d['summary']}")
|
|
468
|
+
|
|
469
|
+
# Learnings section
|
|
470
|
+
observations = [e for e in events if e["event_type"] == "observation"]
|
|
471
|
+
if observations:
|
|
472
|
+
diary_parts.extend(["", "## Observations & Learnings"])
|
|
473
|
+
for o in observations[:5]:
|
|
474
|
+
diary_parts.append(f"- {o['summary']}")
|
|
475
|
+
|
|
476
|
+
# Errors and issues
|
|
477
|
+
errors = [e for e in events if e["event_type"] == "error"]
|
|
478
|
+
if errors:
|
|
479
|
+
diary_parts.extend(["", "## Issues Encountered"])
|
|
480
|
+
for e in errors[:5]:
|
|
481
|
+
resolved = "Resolved" if e.get("status") == "completed" else "Unresolved"
|
|
482
|
+
diary_parts.append(f"- [{resolved}] {e['summary']}")
|
|
483
|
+
|
|
484
|
+
# Anchored facts
|
|
485
|
+
anchors = [e for e in events if e.get("is_anchor")]
|
|
486
|
+
if anchors:
|
|
487
|
+
diary_parts.extend(["", "## Verified Facts (Anchors)"])
|
|
488
|
+
for a in anchors[:5]:
|
|
489
|
+
diary_parts.append(f"- {a['summary']}")
|
|
490
|
+
|
|
491
|
+
# User notes
|
|
492
|
+
if user_notes:
|
|
493
|
+
diary_parts.extend(["", "## User Notes", user_notes])
|
|
494
|
+
|
|
495
|
+
# Statistics
|
|
496
|
+
diary_parts.extend([
|
|
497
|
+
"",
|
|
498
|
+
"## Statistics",
|
|
499
|
+
f"- Total events: {len(events)}",
|
|
500
|
+
f"- Decisions made: {len(decisions)}",
|
|
501
|
+
f"- Errors encountered: {len(errors)}",
|
|
502
|
+
f"- Anchored facts: {len(anchors)}"
|
|
503
|
+
])
|
|
504
|
+
|
|
505
|
+
diary_content = "\n".join(diary_parts)
|
|
506
|
+
|
|
507
|
+
# Store diary as a high-importance session memory
|
|
508
|
+
diary_embedding = await embeddings.generate_embedding(diary_content[:2000])
|
|
509
|
+
diary_id = await db.store_memory(
|
|
510
|
+
memory_type="session",
|
|
511
|
+
content=diary_content,
|
|
512
|
+
embedding=diary_embedding,
|
|
513
|
+
metadata={
|
|
514
|
+
"diary_entry": True,
|
|
515
|
+
"event_count": len(events),
|
|
516
|
+
"decision_count": len(decisions),
|
|
517
|
+
"has_user_notes": user_notes is not None
|
|
518
|
+
},
|
|
519
|
+
session_id=session_id,
|
|
520
|
+
project_path=project_path,
|
|
521
|
+
importance=9 # Diary entries are very important
|
|
522
|
+
)
|
|
523
|
+
|
|
524
|
+
return {
|
|
525
|
+
"success": True,
|
|
526
|
+
"diary_id": diary_id,
|
|
527
|
+
"session_id": session_id,
|
|
528
|
+
"content": diary_content,
|
|
529
|
+
"stats": {
|
|
530
|
+
"event_count": len(events),
|
|
531
|
+
"decision_count": len(decisions),
|
|
532
|
+
"error_count": len(errors),
|
|
533
|
+
"anchor_count": len(anchors)
|
|
534
|
+
}
|
|
535
|
+
}
|
|
536
|
+
|
|
537
|
+
|
|
538
|
+
async def check_session_inactivity(
|
|
539
|
+
db: DatabaseService,
|
|
540
|
+
session_id: str,
|
|
541
|
+
inactivity_threshold_hours: float = 4.0
|
|
542
|
+
) -> Dict[str, Any]:
|
|
543
|
+
"""Check if a session has been inactive and should be auto-summarized.
|
|
544
|
+
|
|
545
|
+
Args:
|
|
546
|
+
db: Database service
|
|
547
|
+
session_id: Session to check
|
|
548
|
+
inactivity_threshold_hours: Hours of inactivity before triggering
|
|
549
|
+
|
|
550
|
+
Returns:
|
|
551
|
+
Whether session should be summarized
|
|
552
|
+
"""
|
|
553
|
+
# Get last activity
|
|
554
|
+
result = await db.execute_query(
|
|
555
|
+
"""
|
|
556
|
+
SELECT MAX(created_at) as last_event
|
|
557
|
+
FROM timeline_events
|
|
558
|
+
WHERE session_id = ?
|
|
559
|
+
""",
|
|
560
|
+
(session_id,)
|
|
561
|
+
)
|
|
562
|
+
|
|
563
|
+
if not result or not result[0].get("last_event"):
|
|
564
|
+
return {"should_summarize": False, "reason": "No events found"}
|
|
565
|
+
|
|
566
|
+
last_event = result[0]["last_event"]
|
|
567
|
+
|
|
568
|
+
try:
|
|
569
|
+
last_dt = datetime.fromisoformat(last_event.replace('Z', '+00:00'))
|
|
570
|
+
now = datetime.now()
|
|
571
|
+
hours_inactive = (now - last_dt.replace(tzinfo=None)).total_seconds() / 3600
|
|
572
|
+
|
|
573
|
+
if hours_inactive >= inactivity_threshold_hours:
|
|
574
|
+
return {
|
|
575
|
+
"should_summarize": True,
|
|
576
|
+
"reason": f"Inactive for {hours_inactive:.1f} hours",
|
|
577
|
+
"last_activity": last_event,
|
|
578
|
+
"hours_inactive": hours_inactive
|
|
579
|
+
}
|
|
580
|
+
|
|
581
|
+
return {
|
|
582
|
+
"should_summarize": False,
|
|
583
|
+
"reason": f"Active within threshold ({hours_inactive:.1f}h < {inactivity_threshold_hours}h)",
|
|
584
|
+
"last_activity": last_event,
|
|
585
|
+
"hours_inactive": hours_inactive
|
|
586
|
+
}
|
|
587
|
+
except Exception as e:
|
|
588
|
+
return {"should_summarize": False, "reason": f"Error: {str(e)}"}
|