claude-memory-agent 2.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.env.example +107 -0
- package/README.md +200 -0
- package/agent_card.py +512 -0
- package/bin/cli.js +181 -0
- package/bin/postinstall.js +216 -0
- package/config.py +104 -0
- package/dashboard.html +2689 -0
- package/hooks/README.md +196 -0
- package/hooks/__pycache__/auto-detect-response.cpython-312.pyc +0 -0
- package/hooks/__pycache__/auto_capture.cpython-312.pyc +0 -0
- package/hooks/__pycache__/session_end.cpython-312.pyc +0 -0
- package/hooks/__pycache__/session_start.cpython-312.pyc +0 -0
- package/hooks/auto-detect-response.py +348 -0
- package/hooks/auto_capture.py +255 -0
- package/hooks/detect-correction.py +173 -0
- package/hooks/grounding-hook.py +348 -0
- package/hooks/log-tool-use.py +234 -0
- package/hooks/log-user-request.py +208 -0
- package/hooks/pre-tool-decision.py +218 -0
- package/hooks/problem-detector.py +343 -0
- package/hooks/session_end.py +192 -0
- package/hooks/session_start.py +227 -0
- package/install.py +887 -0
- package/main.py +2859 -0
- package/manager.py +997 -0
- package/package.json +55 -0
- package/requirements.txt +8 -0
- package/run_server.py +136 -0
- package/services/__init__.py +50 -0
- package/services/__pycache__/__init__.cpython-312.pyc +0 -0
- package/services/__pycache__/agent_registry.cpython-312.pyc +0 -0
- package/services/__pycache__/auth.cpython-312.pyc +0 -0
- package/services/__pycache__/auto_inject.cpython-312.pyc +0 -0
- package/services/__pycache__/claude_md_sync.cpython-312.pyc +0 -0
- package/services/__pycache__/cleanup.cpython-312.pyc +0 -0
- package/services/__pycache__/compaction_flush.cpython-312.pyc +0 -0
- package/services/__pycache__/confidence.cpython-312.pyc +0 -0
- package/services/__pycache__/daily_log.cpython-312.pyc +0 -0
- package/services/__pycache__/database.cpython-312.pyc +0 -0
- package/services/__pycache__/embeddings.cpython-312.pyc +0 -0
- package/services/__pycache__/insights.cpython-312.pyc +0 -0
- package/services/__pycache__/llm_analyzer.cpython-312.pyc +0 -0
- package/services/__pycache__/memory_md_sync.cpython-312.pyc +0 -0
- package/services/__pycache__/retry_queue.cpython-312.pyc +0 -0
- package/services/__pycache__/timeline.cpython-312.pyc +0 -0
- package/services/__pycache__/vector_index.cpython-312.pyc +0 -0
- package/services/__pycache__/websocket.cpython-312.pyc +0 -0
- package/services/agent_registry.py +753 -0
- package/services/auth.py +331 -0
- package/services/auto_inject.py +250 -0
- package/services/claude_md_sync.py +275 -0
- package/services/cleanup.py +667 -0
- package/services/compaction_flush.py +447 -0
- package/services/confidence.py +301 -0
- package/services/daily_log.py +333 -0
- package/services/database.py +2485 -0
- package/services/embeddings.py +358 -0
- package/services/insights.py +632 -0
- package/services/llm_analyzer.py +595 -0
- package/services/memory_md_sync.py +409 -0
- package/services/retry_queue.py +453 -0
- package/services/timeline.py +579 -0
- package/services/vector_index.py +398 -0
- package/services/websocket.py +257 -0
- package/skills/__init__.py +6 -0
- package/skills/__pycache__/__init__.cpython-312.pyc +0 -0
- package/skills/__pycache__/admin.cpython-312.pyc +0 -0
- package/skills/__pycache__/checkpoint.cpython-312.pyc +0 -0
- package/skills/__pycache__/claude_md.cpython-312.pyc +0 -0
- package/skills/__pycache__/cleanup.cpython-312.pyc +0 -0
- package/skills/__pycache__/grounding.cpython-312.pyc +0 -0
- package/skills/__pycache__/insights.cpython-312.pyc +0 -0
- package/skills/__pycache__/natural_language.cpython-312.pyc +0 -0
- package/skills/__pycache__/retrieve.cpython-312.pyc +0 -0
- package/skills/__pycache__/search.cpython-312.pyc +0 -0
- package/skills/__pycache__/state.cpython-312.pyc +0 -0
- package/skills/__pycache__/store.cpython-312.pyc +0 -0
- package/skills/__pycache__/summarize.cpython-312.pyc +0 -0
- package/skills/__pycache__/timeline.cpython-312.pyc +0 -0
- package/skills/__pycache__/verification.cpython-312.pyc +0 -0
- package/skills/admin.py +469 -0
- package/skills/checkpoint.py +198 -0
- package/skills/claude_md.py +363 -0
- package/skills/cleanup.py +241 -0
- package/skills/grounding.py +801 -0
- package/skills/insights.py +231 -0
- package/skills/natural_language.py +277 -0
- package/skills/retrieve.py +67 -0
- package/skills/search.py +213 -0
- package/skills/state.py +182 -0
- package/skills/store.py +179 -0
- package/skills/summarize.py +588 -0
- package/skills/timeline.py +387 -0
- package/skills/verification.py +391 -0
- package/start_daemon.py +155 -0
- package/test_automation.py +221 -0
- package/test_complete.py +338 -0
- package/test_full.py +322 -0
- package/update_system.py +817 -0
- package/verify_db.py +134 -0
package/skills/admin.py
ADDED
|
@@ -0,0 +1,469 @@
|
|
|
1
|
+
"""Admin skills for memory system management.
|
|
2
|
+
|
|
3
|
+
Provides:
|
|
4
|
+
- Embedding model switching
|
|
5
|
+
- Memory reindexing
|
|
6
|
+
- System statistics
|
|
7
|
+
- Background reindexing with progress tracking
|
|
8
|
+
"""
|
|
9
|
+
import asyncio
|
|
10
|
+
import time
|
|
11
|
+
from typing import Dict, Any, Optional, List
|
|
12
|
+
from services.embeddings import get_embedding_service, MODEL_CONFIGS
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
# Global reindex state for progress tracking
|
|
16
|
+
_reindex_state: Dict[str, Any] = {
|
|
17
|
+
"running": False,
|
|
18
|
+
"progress": 0,
|
|
19
|
+
"total": 0,
|
|
20
|
+
"current_model": None,
|
|
21
|
+
"started_at": None,
|
|
22
|
+
"errors": [],
|
|
23
|
+
"completed_at": None
|
|
24
|
+
}
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
async def get_embedding_status(
|
|
28
|
+
db,
|
|
29
|
+
embeddings
|
|
30
|
+
) -> Dict[str, Any]:
|
|
31
|
+
"""Get current embedding service status.
|
|
32
|
+
|
|
33
|
+
Returns:
|
|
34
|
+
Status including model, health, and available models
|
|
35
|
+
"""
|
|
36
|
+
status = embeddings.get_status()
|
|
37
|
+
health = await embeddings.check_health()
|
|
38
|
+
|
|
39
|
+
# Get memory stats by model
|
|
40
|
+
cursor = db.conn.cursor()
|
|
41
|
+
cursor.execute("""
|
|
42
|
+
SELECT embedding_model, COUNT(*) as count
|
|
43
|
+
FROM memories
|
|
44
|
+
GROUP BY embedding_model
|
|
45
|
+
""")
|
|
46
|
+
model_counts = {row[0] or 'nomic-embed-text': row[1] for row in cursor.fetchall()}
|
|
47
|
+
|
|
48
|
+
return {
|
|
49
|
+
"success": True,
|
|
50
|
+
"status": status,
|
|
51
|
+
"health": health,
|
|
52
|
+
"available_models": embeddings.get_available_models(),
|
|
53
|
+
"memories_by_model": model_counts
|
|
54
|
+
}
|
|
55
|
+
|
|
56
|
+
|
|
57
|
+
async def switch_embedding_model(
|
|
58
|
+
db,
|
|
59
|
+
embeddings,
|
|
60
|
+
model: str,
|
|
61
|
+
reindex_existing: bool = False
|
|
62
|
+
) -> Dict[str, Any]:
|
|
63
|
+
"""Switch the default embedding model.
|
|
64
|
+
|
|
65
|
+
Args:
|
|
66
|
+
db: Database service
|
|
67
|
+
embeddings: Embedding service
|
|
68
|
+
model: New model name to use
|
|
69
|
+
reindex_existing: If True, queue background reindex of existing memories
|
|
70
|
+
|
|
71
|
+
Returns:
|
|
72
|
+
Switch result with optional reindex status
|
|
73
|
+
"""
|
|
74
|
+
# Validate model
|
|
75
|
+
if model not in MODEL_CONFIGS and model != "default":
|
|
76
|
+
available = [k for k in MODEL_CONFIGS.keys() if "alias_for" not in MODEL_CONFIGS.get(k, {})]
|
|
77
|
+
return {
|
|
78
|
+
"success": False,
|
|
79
|
+
"error": f"Unknown model '{model}'. Available: {available}"
|
|
80
|
+
}
|
|
81
|
+
|
|
82
|
+
old_model = embeddings.get_current_model()
|
|
83
|
+
embeddings.set_model(model)
|
|
84
|
+
new_model = embeddings.get_current_model()
|
|
85
|
+
|
|
86
|
+
# Check if model is available in Ollama
|
|
87
|
+
health = await embeddings.check_health(force=True)
|
|
88
|
+
|
|
89
|
+
result = {
|
|
90
|
+
"success": True,
|
|
91
|
+
"old_model": old_model,
|
|
92
|
+
"new_model": new_model,
|
|
93
|
+
"new_dimension": embeddings.get_dimension(),
|
|
94
|
+
"model_available": health.get("model_loaded", False),
|
|
95
|
+
"message": f"Switched from {old_model} to {new_model}"
|
|
96
|
+
}
|
|
97
|
+
|
|
98
|
+
if not health.get("model_loaded", False):
|
|
99
|
+
result["warning"] = f"Model '{new_model}' not found in Ollama. Run: ollama pull {new_model}"
|
|
100
|
+
|
|
101
|
+
if reindex_existing:
|
|
102
|
+
# Start background reindex
|
|
103
|
+
asyncio.create_task(_background_reindex(db, embeddings, new_model))
|
|
104
|
+
result["reindex_started"] = True
|
|
105
|
+
result["message"] += ". Background reindexing started."
|
|
106
|
+
|
|
107
|
+
return result
|
|
108
|
+
|
|
109
|
+
|
|
110
|
+
async def reindex_memories(
|
|
111
|
+
db,
|
|
112
|
+
embeddings,
|
|
113
|
+
model: Optional[str] = None,
|
|
114
|
+
project_path: Optional[str] = None,
|
|
115
|
+
batch_size: int = 10,
|
|
116
|
+
dry_run: bool = False
|
|
117
|
+
) -> Dict[str, Any]:
|
|
118
|
+
"""Reindex memories with current or specified embedding model.
|
|
119
|
+
|
|
120
|
+
Args:
|
|
121
|
+
db: Database service
|
|
122
|
+
embeddings: Embedding service
|
|
123
|
+
model: Model to use (None = current model)
|
|
124
|
+
project_path: Filter to specific project
|
|
125
|
+
batch_size: Number of memories per batch
|
|
126
|
+
dry_run: If True, only count what would be reindexed
|
|
127
|
+
|
|
128
|
+
Returns:
|
|
129
|
+
Reindex results
|
|
130
|
+
"""
|
|
131
|
+
global _reindex_state
|
|
132
|
+
|
|
133
|
+
if _reindex_state["running"]:
|
|
134
|
+
return {
|
|
135
|
+
"success": False,
|
|
136
|
+
"error": "Reindex already in progress",
|
|
137
|
+
"progress": _reindex_state
|
|
138
|
+
}
|
|
139
|
+
|
|
140
|
+
use_model = model or embeddings.get_current_model()
|
|
141
|
+
|
|
142
|
+
# Count memories to reindex
|
|
143
|
+
cursor = db.conn.cursor()
|
|
144
|
+
query = "SELECT COUNT(*) FROM memories WHERE embedding_model IS NULL OR embedding_model != ?"
|
|
145
|
+
params = [use_model]
|
|
146
|
+
|
|
147
|
+
if project_path:
|
|
148
|
+
query += " AND project_path = ?"
|
|
149
|
+
params.append(project_path)
|
|
150
|
+
|
|
151
|
+
cursor.execute(query, params)
|
|
152
|
+
total = cursor.fetchone()[0]
|
|
153
|
+
|
|
154
|
+
if dry_run:
|
|
155
|
+
return {
|
|
156
|
+
"success": True,
|
|
157
|
+
"dry_run": True,
|
|
158
|
+
"would_reindex": total,
|
|
159
|
+
"target_model": use_model,
|
|
160
|
+
"project_filter": project_path
|
|
161
|
+
}
|
|
162
|
+
|
|
163
|
+
if total == 0:
|
|
164
|
+
return {
|
|
165
|
+
"success": True,
|
|
166
|
+
"message": "No memories need reindexing",
|
|
167
|
+
"total": 0
|
|
168
|
+
}
|
|
169
|
+
|
|
170
|
+
# Start background reindex
|
|
171
|
+
asyncio.create_task(_background_reindex(db, embeddings, use_model, project_path, batch_size))
|
|
172
|
+
|
|
173
|
+
return {
|
|
174
|
+
"success": True,
|
|
175
|
+
"message": f"Background reindexing started for {total} memories",
|
|
176
|
+
"total": total,
|
|
177
|
+
"target_model": use_model,
|
|
178
|
+
"project_filter": project_path
|
|
179
|
+
}
|
|
180
|
+
|
|
181
|
+
|
|
182
|
+
async def get_reindex_progress(
|
|
183
|
+
db,
|
|
184
|
+
embeddings
|
|
185
|
+
) -> Dict[str, Any]:
|
|
186
|
+
"""Get current reindex progress.
|
|
187
|
+
|
|
188
|
+
Returns:
|
|
189
|
+
Progress status including completion percentage
|
|
190
|
+
"""
|
|
191
|
+
global _reindex_state
|
|
192
|
+
|
|
193
|
+
if not _reindex_state["running"] and _reindex_state["completed_at"] is None:
|
|
194
|
+
return {
|
|
195
|
+
"success": True,
|
|
196
|
+
"status": "idle",
|
|
197
|
+
"message": "No reindex in progress"
|
|
198
|
+
}
|
|
199
|
+
|
|
200
|
+
progress_pct = 0
|
|
201
|
+
if _reindex_state["total"] > 0:
|
|
202
|
+
progress_pct = round(_reindex_state["progress"] / _reindex_state["total"] * 100, 1)
|
|
203
|
+
|
|
204
|
+
status = "running" if _reindex_state["running"] else "completed"
|
|
205
|
+
|
|
206
|
+
result = {
|
|
207
|
+
"success": True,
|
|
208
|
+
"status": status,
|
|
209
|
+
"progress": _reindex_state["progress"],
|
|
210
|
+
"total": _reindex_state["total"],
|
|
211
|
+
"progress_percent": progress_pct,
|
|
212
|
+
"model": _reindex_state["current_model"],
|
|
213
|
+
"started_at": _reindex_state["started_at"],
|
|
214
|
+
"errors_count": len(_reindex_state["errors"])
|
|
215
|
+
}
|
|
216
|
+
|
|
217
|
+
if _reindex_state["completed_at"]:
|
|
218
|
+
result["completed_at"] = _reindex_state["completed_at"]
|
|
219
|
+
result["duration_seconds"] = _reindex_state["completed_at"] - _reindex_state["started_at"]
|
|
220
|
+
|
|
221
|
+
if _reindex_state["errors"]:
|
|
222
|
+
result["recent_errors"] = _reindex_state["errors"][-5:] # Last 5 errors
|
|
223
|
+
|
|
224
|
+
return result
|
|
225
|
+
|
|
226
|
+
|
|
227
|
+
async def cancel_reindex(
|
|
228
|
+
db,
|
|
229
|
+
embeddings
|
|
230
|
+
) -> Dict[str, Any]:
|
|
231
|
+
"""Cancel a running reindex operation.
|
|
232
|
+
|
|
233
|
+
Returns:
|
|
234
|
+
Cancellation result
|
|
235
|
+
"""
|
|
236
|
+
global _reindex_state
|
|
237
|
+
|
|
238
|
+
if not _reindex_state["running"]:
|
|
239
|
+
return {
|
|
240
|
+
"success": False,
|
|
241
|
+
"error": "No reindex in progress"
|
|
242
|
+
}
|
|
243
|
+
|
|
244
|
+
_reindex_state["running"] = False
|
|
245
|
+
_reindex_state["completed_at"] = time.time()
|
|
246
|
+
|
|
247
|
+
return {
|
|
248
|
+
"success": True,
|
|
249
|
+
"message": f"Reindex cancelled at {_reindex_state['progress']}/{_reindex_state['total']}",
|
|
250
|
+
"progress": _reindex_state["progress"],
|
|
251
|
+
"total": _reindex_state["total"]
|
|
252
|
+
}
|
|
253
|
+
|
|
254
|
+
|
|
255
|
+
async def get_model_info(
|
|
256
|
+
db,
|
|
257
|
+
embeddings,
|
|
258
|
+
model: Optional[str] = None
|
|
259
|
+
) -> Dict[str, Any]:
|
|
260
|
+
"""Get detailed information about an embedding model.
|
|
261
|
+
|
|
262
|
+
Args:
|
|
263
|
+
db: Database service
|
|
264
|
+
embeddings: Embedding service
|
|
265
|
+
model: Model name (None = current model)
|
|
266
|
+
|
|
267
|
+
Returns:
|
|
268
|
+
Model details including dimension and availability
|
|
269
|
+
"""
|
|
270
|
+
use_model = model or embeddings.get_current_model()
|
|
271
|
+
|
|
272
|
+
config = MODEL_CONFIGS.get(use_model, {})
|
|
273
|
+
if "alias_for" in config:
|
|
274
|
+
use_model = config["alias_for"]
|
|
275
|
+
config = MODEL_CONFIGS.get(use_model, {})
|
|
276
|
+
|
|
277
|
+
# Check availability in Ollama
|
|
278
|
+
ollama_models = await embeddings.get_ollama_models()
|
|
279
|
+
is_available = any(use_model in m for m in ollama_models)
|
|
280
|
+
|
|
281
|
+
# Count memories using this model
|
|
282
|
+
cursor = db.conn.cursor()
|
|
283
|
+
cursor.execute(
|
|
284
|
+
"SELECT COUNT(*) FROM memories WHERE embedding_model = ?",
|
|
285
|
+
[use_model]
|
|
286
|
+
)
|
|
287
|
+
memory_count = cursor.fetchone()[0]
|
|
288
|
+
|
|
289
|
+
return {
|
|
290
|
+
"success": True,
|
|
291
|
+
"model": use_model,
|
|
292
|
+
"dimension": config.get("dimension", 768),
|
|
293
|
+
"description": config.get("description", "Unknown model"),
|
|
294
|
+
"is_current": use_model == embeddings.get_current_model(),
|
|
295
|
+
"available_in_ollama": is_available,
|
|
296
|
+
"memory_count": memory_count,
|
|
297
|
+
"pull_command": f"ollama pull {use_model}" if not is_available else None
|
|
298
|
+
}
|
|
299
|
+
|
|
300
|
+
|
|
301
|
+
async def _background_reindex(
|
|
302
|
+
db,
|
|
303
|
+
embeddings,
|
|
304
|
+
model: str,
|
|
305
|
+
project_path: Optional[str] = None,
|
|
306
|
+
batch_size: int = 10
|
|
307
|
+
):
|
|
308
|
+
"""Background task for reindexing memories.
|
|
309
|
+
|
|
310
|
+
Updates global _reindex_state for progress tracking.
|
|
311
|
+
"""
|
|
312
|
+
global _reindex_state
|
|
313
|
+
|
|
314
|
+
_reindex_state = {
|
|
315
|
+
"running": True,
|
|
316
|
+
"progress": 0,
|
|
317
|
+
"total": 0,
|
|
318
|
+
"current_model": model,
|
|
319
|
+
"started_at": time.time(),
|
|
320
|
+
"errors": [],
|
|
321
|
+
"completed_at": None
|
|
322
|
+
}
|
|
323
|
+
|
|
324
|
+
try:
|
|
325
|
+
cursor = db.conn.cursor()
|
|
326
|
+
|
|
327
|
+
# Get memories to reindex
|
|
328
|
+
query = """
|
|
329
|
+
SELECT id, content FROM memories
|
|
330
|
+
WHERE embedding_model IS NULL OR embedding_model != ?
|
|
331
|
+
"""
|
|
332
|
+
params = [model]
|
|
333
|
+
|
|
334
|
+
if project_path:
|
|
335
|
+
query += " AND project_path = ?"
|
|
336
|
+
params.append(project_path)
|
|
337
|
+
|
|
338
|
+
cursor.execute(query, params)
|
|
339
|
+
memories = cursor.fetchall()
|
|
340
|
+
_reindex_state["total"] = len(memories)
|
|
341
|
+
|
|
342
|
+
# Process in batches
|
|
343
|
+
for i in range(0, len(memories), batch_size):
|
|
344
|
+
if not _reindex_state["running"]:
|
|
345
|
+
break # Cancelled
|
|
346
|
+
|
|
347
|
+
batch = memories[i:i + batch_size]
|
|
348
|
+
|
|
349
|
+
for memory_id, content in batch:
|
|
350
|
+
if not _reindex_state["running"]:
|
|
351
|
+
break
|
|
352
|
+
|
|
353
|
+
try:
|
|
354
|
+
# Generate new embedding
|
|
355
|
+
embedding = await embeddings.generate_embedding(content, model=model)
|
|
356
|
+
|
|
357
|
+
if embedding:
|
|
358
|
+
# Update memory with new embedding
|
|
359
|
+
import json
|
|
360
|
+
cursor.execute("""
|
|
361
|
+
UPDATE memories
|
|
362
|
+
SET embedding = ?, embedding_model = ?
|
|
363
|
+
WHERE id = ?
|
|
364
|
+
""", [json.dumps(embedding), model, memory_id])
|
|
365
|
+
db.conn.commit()
|
|
366
|
+
else:
|
|
367
|
+
_reindex_state["errors"].append({
|
|
368
|
+
"memory_id": memory_id,
|
|
369
|
+
"error": "Failed to generate embedding"
|
|
370
|
+
})
|
|
371
|
+
|
|
372
|
+
except Exception as e:
|
|
373
|
+
_reindex_state["errors"].append({
|
|
374
|
+
"memory_id": memory_id,
|
|
375
|
+
"error": str(e)
|
|
376
|
+
})
|
|
377
|
+
|
|
378
|
+
_reindex_state["progress"] += 1
|
|
379
|
+
|
|
380
|
+
# Small delay between batches to avoid overwhelming Ollama
|
|
381
|
+
await asyncio.sleep(0.1)
|
|
382
|
+
|
|
383
|
+
_reindex_state["running"] = False
|
|
384
|
+
_reindex_state["completed_at"] = time.time()
|
|
385
|
+
|
|
386
|
+
except Exception as e:
|
|
387
|
+
_reindex_state["running"] = False
|
|
388
|
+
_reindex_state["completed_at"] = time.time()
|
|
389
|
+
_reindex_state["errors"].append({
|
|
390
|
+
"error": f"Reindex failed: {str(e)}"
|
|
391
|
+
})
|
|
392
|
+
|
|
393
|
+
|
|
394
|
+
async def get_system_stats(
|
|
395
|
+
db,
|
|
396
|
+
embeddings
|
|
397
|
+
) -> Dict[str, Any]:
|
|
398
|
+
"""Get comprehensive system statistics.
|
|
399
|
+
|
|
400
|
+
Returns:
|
|
401
|
+
System-wide statistics including memory counts, models, and health
|
|
402
|
+
"""
|
|
403
|
+
cursor = db.conn.cursor()
|
|
404
|
+
|
|
405
|
+
# Memory counts
|
|
406
|
+
cursor.execute("SELECT COUNT(*) FROM memories")
|
|
407
|
+
total_memories = cursor.fetchone()[0]
|
|
408
|
+
|
|
409
|
+
cursor.execute("SELECT COUNT(*) FROM patterns")
|
|
410
|
+
total_patterns = cursor.fetchone()[0]
|
|
411
|
+
|
|
412
|
+
cursor.execute("SELECT COUNT(*) FROM projects")
|
|
413
|
+
total_projects = cursor.fetchone()[0]
|
|
414
|
+
|
|
415
|
+
# Memories by type
|
|
416
|
+
cursor.execute("""
|
|
417
|
+
SELECT type, COUNT(*) FROM memories
|
|
418
|
+
GROUP BY type
|
|
419
|
+
""")
|
|
420
|
+
memories_by_type = {row[0]: row[1] for row in cursor.fetchall()}
|
|
421
|
+
|
|
422
|
+
# Memories by model
|
|
423
|
+
cursor.execute("""
|
|
424
|
+
SELECT embedding_model, COUNT(*) FROM memories
|
|
425
|
+
GROUP BY embedding_model
|
|
426
|
+
""")
|
|
427
|
+
memories_by_model = {row[0] or 'nomic-embed-text': row[1] for row in cursor.fetchall()}
|
|
428
|
+
|
|
429
|
+
# Recent activity
|
|
430
|
+
cursor.execute("""
|
|
431
|
+
SELECT COUNT(*) FROM memories
|
|
432
|
+
WHERE created_at > datetime('now', '-24 hours')
|
|
433
|
+
""")
|
|
434
|
+
memories_24h = cursor.fetchone()[0]
|
|
435
|
+
|
|
436
|
+
cursor.execute("""
|
|
437
|
+
SELECT COUNT(*) FROM memories
|
|
438
|
+
WHERE created_at > datetime('now', '-7 days')
|
|
439
|
+
""")
|
|
440
|
+
memories_7d = cursor.fetchone()[0]
|
|
441
|
+
|
|
442
|
+
# Embedding health
|
|
443
|
+
health = await embeddings.check_health()
|
|
444
|
+
|
|
445
|
+
return {
|
|
446
|
+
"success": True,
|
|
447
|
+
"totals": {
|
|
448
|
+
"memories": total_memories,
|
|
449
|
+
"patterns": total_patterns,
|
|
450
|
+
"projects": total_projects
|
|
451
|
+
},
|
|
452
|
+
"memories_by_type": memories_by_type,
|
|
453
|
+
"memories_by_model": memories_by_model,
|
|
454
|
+
"recent_activity": {
|
|
455
|
+
"last_24h": memories_24h,
|
|
456
|
+
"last_7d": memories_7d
|
|
457
|
+
},
|
|
458
|
+
"embedding_service": {
|
|
459
|
+
"model": embeddings.get_current_model(),
|
|
460
|
+
"dimension": embeddings.get_dimension(),
|
|
461
|
+
"healthy": health.get("healthy", False),
|
|
462
|
+
"degraded": embeddings.is_degraded()
|
|
463
|
+
},
|
|
464
|
+
"reindex_status": {
|
|
465
|
+
"running": _reindex_state["running"],
|
|
466
|
+
"progress": _reindex_state["progress"],
|
|
467
|
+
"total": _reindex_state["total"]
|
|
468
|
+
}
|
|
469
|
+
}
|
|
@@ -0,0 +1,198 @@
|
|
|
1
|
+
"""Checkpoint skills for session snapshots and resumption."""
|
|
2
|
+
from typing import Dict, Any, Optional, List
|
|
3
|
+
from services.database import DatabaseService
|
|
4
|
+
from services.embeddings import EmbeddingService
|
|
5
|
+
from services.timeline import TimelineService
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
async def checkpoint_create(
|
|
9
|
+
db: DatabaseService,
|
|
10
|
+
embeddings: EmbeddingService,
|
|
11
|
+
session_id: str,
|
|
12
|
+
summary: Optional[str] = None,
|
|
13
|
+
key_facts: Optional[List[str]] = None,
|
|
14
|
+
include_state: bool = True
|
|
15
|
+
) -> Dict[str, Any]:
|
|
16
|
+
"""
|
|
17
|
+
Create a checkpoint snapshot of the current session.
|
|
18
|
+
|
|
19
|
+
Args:
|
|
20
|
+
db: Database service instance
|
|
21
|
+
embeddings: Embedding service instance
|
|
22
|
+
session_id: The session ID
|
|
23
|
+
summary: Optional custom summary (auto-generated if not provided)
|
|
24
|
+
key_facts: Optional list of key facts to highlight
|
|
25
|
+
include_state: Include current state in checkpoint
|
|
26
|
+
|
|
27
|
+
Returns:
|
|
28
|
+
Dict with checkpoint info
|
|
29
|
+
"""
|
|
30
|
+
timeline = TimelineService(db, embeddings)
|
|
31
|
+
|
|
32
|
+
# Get current state
|
|
33
|
+
state = await db.get_or_create_session_state(session_id)
|
|
34
|
+
|
|
35
|
+
# Get recent events
|
|
36
|
+
events_since = state.get("events_since_checkpoint", 0)
|
|
37
|
+
recent_events = await db.get_timeline_events(
|
|
38
|
+
session_id=session_id,
|
|
39
|
+
limit=max(events_since, 25)
|
|
40
|
+
)
|
|
41
|
+
|
|
42
|
+
# Build summary if not provided
|
|
43
|
+
if not summary:
|
|
44
|
+
parts = []
|
|
45
|
+
if state.get("current_goal"):
|
|
46
|
+
parts.append(f"Goal: {state['current_goal']}")
|
|
47
|
+
parts.append(f"{len(recent_events)} events since last checkpoint")
|
|
48
|
+
summary = ". ".join(parts) if parts else "Manual checkpoint"
|
|
49
|
+
|
|
50
|
+
# Extract key facts if not provided
|
|
51
|
+
if not key_facts:
|
|
52
|
+
key_facts = [
|
|
53
|
+
e["summary"] for e in recent_events
|
|
54
|
+
if e.get("is_anchor") or (e.get("event_type") == "decision" and e.get("confidence", 0) >= 0.8)
|
|
55
|
+
][:10]
|
|
56
|
+
|
|
57
|
+
# Extract decisions
|
|
58
|
+
decisions = [
|
|
59
|
+
e["summary"] for e in recent_events
|
|
60
|
+
if e.get("event_type") == "decision"
|
|
61
|
+
][:10]
|
|
62
|
+
|
|
63
|
+
# Get last event ID
|
|
64
|
+
event_id = recent_events[0]["id"] if recent_events else None
|
|
65
|
+
|
|
66
|
+
# Generate embedding for checkpoint summary
|
|
67
|
+
embedding = None
|
|
68
|
+
if embeddings:
|
|
69
|
+
embed_text = summary
|
|
70
|
+
if key_facts:
|
|
71
|
+
embed_text += "\n" + "\n".join(key_facts[:5])
|
|
72
|
+
embedding = await embeddings.generate_embedding(embed_text)
|
|
73
|
+
|
|
74
|
+
# Store checkpoint
|
|
75
|
+
checkpoint_id = await db.store_checkpoint(
|
|
76
|
+
session_id=session_id,
|
|
77
|
+
summary=summary,
|
|
78
|
+
event_id=event_id,
|
|
79
|
+
key_facts=key_facts,
|
|
80
|
+
decisions=decisions,
|
|
81
|
+
entities=state.get("entity_registry") if include_state else None,
|
|
82
|
+
current_goal=state.get("current_goal") if include_state else None,
|
|
83
|
+
pending_items=state.get("pending_questions") if include_state else None,
|
|
84
|
+
embedding=embedding,
|
|
85
|
+
event_count=len(recent_events)
|
|
86
|
+
)
|
|
87
|
+
|
|
88
|
+
return {
|
|
89
|
+
"success": True,
|
|
90
|
+
"checkpoint_id": checkpoint_id,
|
|
91
|
+
"session_id": session_id,
|
|
92
|
+
"summary": summary,
|
|
93
|
+
"key_facts_count": len(key_facts),
|
|
94
|
+
"decisions_count": len(decisions),
|
|
95
|
+
"events_captured": len(recent_events),
|
|
96
|
+
"message": f"Checkpoint created with ID {checkpoint_id}"
|
|
97
|
+
}
|
|
98
|
+
|
|
99
|
+
|
|
100
|
+
async def checkpoint_load(
|
|
101
|
+
db: DatabaseService,
|
|
102
|
+
session_id: Optional[str] = None,
|
|
103
|
+
checkpoint_id: Optional[int] = None,
|
|
104
|
+
project_path: Optional[str] = None
|
|
105
|
+
) -> Dict[str, Any]:
|
|
106
|
+
"""
|
|
107
|
+
Load context from a checkpoint for session resumption.
|
|
108
|
+
|
|
109
|
+
Args:
|
|
110
|
+
db: Database service instance
|
|
111
|
+
session_id: Load latest checkpoint for this session
|
|
112
|
+
checkpoint_id: Load specific checkpoint by ID
|
|
113
|
+
project_path: Load latest checkpoint for this project
|
|
114
|
+
|
|
115
|
+
Returns:
|
|
116
|
+
Dict with checkpoint context
|
|
117
|
+
"""
|
|
118
|
+
checkpoint = None
|
|
119
|
+
|
|
120
|
+
if checkpoint_id:
|
|
121
|
+
# Load specific checkpoint
|
|
122
|
+
cursor = db.conn.cursor()
|
|
123
|
+
cursor.execute("SELECT * FROM checkpoints WHERE id = ?", (checkpoint_id,))
|
|
124
|
+
row = cursor.fetchone()
|
|
125
|
+
if row:
|
|
126
|
+
import json
|
|
127
|
+
checkpoint = {
|
|
128
|
+
"id": row["id"],
|
|
129
|
+
"session_id": row["session_id"],
|
|
130
|
+
"summary": row["summary"],
|
|
131
|
+
"key_facts": json.loads(row["key_facts"]) if row["key_facts"] else [],
|
|
132
|
+
"decisions": json.loads(row["decisions"]) if row["decisions"] else [],
|
|
133
|
+
"entities": json.loads(row["entities"]) if row["entities"] else {},
|
|
134
|
+
"current_goal": row["current_goal"],
|
|
135
|
+
"pending_items": json.loads(row["pending_items"]) if row["pending_items"] else [],
|
|
136
|
+
"event_count": row["event_count"],
|
|
137
|
+
"created_at": row["created_at"]
|
|
138
|
+
}
|
|
139
|
+
elif session_id:
|
|
140
|
+
checkpoint = await db.get_latest_checkpoint(session_id)
|
|
141
|
+
elif project_path:
|
|
142
|
+
# Get latest session for project, then its checkpoint
|
|
143
|
+
state = await db.get_latest_session_for_project(project_path)
|
|
144
|
+
if state:
|
|
145
|
+
checkpoint = await db.get_latest_checkpoint(state["session_id"])
|
|
146
|
+
|
|
147
|
+
if not checkpoint:
|
|
148
|
+
return {
|
|
149
|
+
"success": True,
|
|
150
|
+
"checkpoint": None,
|
|
151
|
+
"message": "No checkpoint found"
|
|
152
|
+
}
|
|
153
|
+
|
|
154
|
+
# Build grounding summary
|
|
155
|
+
grounding = []
|
|
156
|
+
if checkpoint.get("current_goal"):
|
|
157
|
+
grounding.append(f"Goal: {checkpoint['current_goal']}")
|
|
158
|
+
if checkpoint.get("key_facts"):
|
|
159
|
+
grounding.append(f"Key facts: {', '.join(checkpoint['key_facts'][:3])}")
|
|
160
|
+
if checkpoint.get("decisions"):
|
|
161
|
+
grounding.append(f"Decisions: {', '.join(checkpoint['decisions'][:3])}")
|
|
162
|
+
if checkpoint.get("pending_items"):
|
|
163
|
+
grounding.append(f"Pending: {', '.join(checkpoint['pending_items'][:3])}")
|
|
164
|
+
|
|
165
|
+
return {
|
|
166
|
+
"success": True,
|
|
167
|
+
"checkpoint": checkpoint,
|
|
168
|
+
"grounding_summary": " | ".join(grounding) if grounding else checkpoint.get("summary"),
|
|
169
|
+
"session_id": checkpoint.get("session_id"),
|
|
170
|
+
"message": f"Loaded checkpoint from {checkpoint.get('created_at')}"
|
|
171
|
+
}
|
|
172
|
+
|
|
173
|
+
|
|
174
|
+
async def checkpoint_list(
|
|
175
|
+
db: DatabaseService,
|
|
176
|
+
session_id: str,
|
|
177
|
+
limit: int = 10
|
|
178
|
+
) -> Dict[str, Any]:
|
|
179
|
+
"""
|
|
180
|
+
List checkpoints for a session.
|
|
181
|
+
|
|
182
|
+
Args:
|
|
183
|
+
db: Database service instance
|
|
184
|
+
session_id: The session ID
|
|
185
|
+
limit: Max checkpoints to return
|
|
186
|
+
|
|
187
|
+
Returns:
|
|
188
|
+
Dict with checkpoint list
|
|
189
|
+
"""
|
|
190
|
+
checkpoints = await db.get_checkpoints_for_session(session_id, limit)
|
|
191
|
+
|
|
192
|
+
return {
|
|
193
|
+
"success": True,
|
|
194
|
+
"session_id": session_id,
|
|
195
|
+
"checkpoints": checkpoints,
|
|
196
|
+
"count": len(checkpoints),
|
|
197
|
+
"message": f"Found {len(checkpoints)} checkpoints"
|
|
198
|
+
}
|