claude-memory-agent 2.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.env.example +107 -0
- package/README.md +200 -0
- package/agent_card.py +512 -0
- package/bin/cli.js +181 -0
- package/bin/postinstall.js +216 -0
- package/config.py +104 -0
- package/dashboard.html +2689 -0
- package/hooks/README.md +196 -0
- package/hooks/__pycache__/auto-detect-response.cpython-312.pyc +0 -0
- package/hooks/__pycache__/auto_capture.cpython-312.pyc +0 -0
- package/hooks/__pycache__/session_end.cpython-312.pyc +0 -0
- package/hooks/__pycache__/session_start.cpython-312.pyc +0 -0
- package/hooks/auto-detect-response.py +348 -0
- package/hooks/auto_capture.py +255 -0
- package/hooks/detect-correction.py +173 -0
- package/hooks/grounding-hook.py +348 -0
- package/hooks/log-tool-use.py +234 -0
- package/hooks/log-user-request.py +208 -0
- package/hooks/pre-tool-decision.py +218 -0
- package/hooks/problem-detector.py +343 -0
- package/hooks/session_end.py +192 -0
- package/hooks/session_start.py +227 -0
- package/install.py +887 -0
- package/main.py +2859 -0
- package/manager.py +997 -0
- package/package.json +55 -0
- package/requirements.txt +8 -0
- package/run_server.py +136 -0
- package/services/__init__.py +50 -0
- package/services/__pycache__/__init__.cpython-312.pyc +0 -0
- package/services/__pycache__/agent_registry.cpython-312.pyc +0 -0
- package/services/__pycache__/auth.cpython-312.pyc +0 -0
- package/services/__pycache__/auto_inject.cpython-312.pyc +0 -0
- package/services/__pycache__/claude_md_sync.cpython-312.pyc +0 -0
- package/services/__pycache__/cleanup.cpython-312.pyc +0 -0
- package/services/__pycache__/compaction_flush.cpython-312.pyc +0 -0
- package/services/__pycache__/confidence.cpython-312.pyc +0 -0
- package/services/__pycache__/daily_log.cpython-312.pyc +0 -0
- package/services/__pycache__/database.cpython-312.pyc +0 -0
- package/services/__pycache__/embeddings.cpython-312.pyc +0 -0
- package/services/__pycache__/insights.cpython-312.pyc +0 -0
- package/services/__pycache__/llm_analyzer.cpython-312.pyc +0 -0
- package/services/__pycache__/memory_md_sync.cpython-312.pyc +0 -0
- package/services/__pycache__/retry_queue.cpython-312.pyc +0 -0
- package/services/__pycache__/timeline.cpython-312.pyc +0 -0
- package/services/__pycache__/vector_index.cpython-312.pyc +0 -0
- package/services/__pycache__/websocket.cpython-312.pyc +0 -0
- package/services/agent_registry.py +753 -0
- package/services/auth.py +331 -0
- package/services/auto_inject.py +250 -0
- package/services/claude_md_sync.py +275 -0
- package/services/cleanup.py +667 -0
- package/services/compaction_flush.py +447 -0
- package/services/confidence.py +301 -0
- package/services/daily_log.py +333 -0
- package/services/database.py +2485 -0
- package/services/embeddings.py +358 -0
- package/services/insights.py +632 -0
- package/services/llm_analyzer.py +595 -0
- package/services/memory_md_sync.py +409 -0
- package/services/retry_queue.py +453 -0
- package/services/timeline.py +579 -0
- package/services/vector_index.py +398 -0
- package/services/websocket.py +257 -0
- package/skills/__init__.py +6 -0
- package/skills/__pycache__/__init__.cpython-312.pyc +0 -0
- package/skills/__pycache__/admin.cpython-312.pyc +0 -0
- package/skills/__pycache__/checkpoint.cpython-312.pyc +0 -0
- package/skills/__pycache__/claude_md.cpython-312.pyc +0 -0
- package/skills/__pycache__/cleanup.cpython-312.pyc +0 -0
- package/skills/__pycache__/grounding.cpython-312.pyc +0 -0
- package/skills/__pycache__/insights.cpython-312.pyc +0 -0
- package/skills/__pycache__/natural_language.cpython-312.pyc +0 -0
- package/skills/__pycache__/retrieve.cpython-312.pyc +0 -0
- package/skills/__pycache__/search.cpython-312.pyc +0 -0
- package/skills/__pycache__/state.cpython-312.pyc +0 -0
- package/skills/__pycache__/store.cpython-312.pyc +0 -0
- package/skills/__pycache__/summarize.cpython-312.pyc +0 -0
- package/skills/__pycache__/timeline.cpython-312.pyc +0 -0
- package/skills/__pycache__/verification.cpython-312.pyc +0 -0
- package/skills/admin.py +469 -0
- package/skills/checkpoint.py +198 -0
- package/skills/claude_md.py +363 -0
- package/skills/cleanup.py +241 -0
- package/skills/grounding.py +801 -0
- package/skills/insights.py +231 -0
- package/skills/natural_language.py +277 -0
- package/skills/retrieve.py +67 -0
- package/skills/search.py +213 -0
- package/skills/state.py +182 -0
- package/skills/store.py +179 -0
- package/skills/summarize.py +588 -0
- package/skills/timeline.py +387 -0
- package/skills/verification.py +391 -0
- package/start_daemon.py +155 -0
- package/test_automation.py +221 -0
- package/test_complete.py +338 -0
- package/test_full.py +322 -0
- package/update_system.py +817 -0
- package/verify_db.py +134 -0
|
@@ -0,0 +1,301 @@
|
|
|
1
|
+
"""Confidence scoring and verification service for memories.
|
|
2
|
+
|
|
3
|
+
Calculates confidence scores based on:
|
|
4
|
+
- Age (newer = higher confidence)
|
|
5
|
+
- Access count (frequently accessed = higher)
|
|
6
|
+
- Verification status
|
|
7
|
+
- Source reliability
|
|
8
|
+
- Contradiction checks
|
|
9
|
+
"""
|
|
10
|
+
import time
|
|
11
|
+
from typing import Dict, Any, List, Optional
|
|
12
|
+
from datetime import datetime, timedelta
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
class ConfidenceService:
|
|
16
|
+
"""Service for memory confidence scoring and verification.
|
|
17
|
+
|
|
18
|
+
Confidence is a value 0-1 representing how reliable a memory is.
|
|
19
|
+
"""
|
|
20
|
+
|
|
21
|
+
def __init__(self, db, embeddings):
|
|
22
|
+
self.db = db
|
|
23
|
+
self.embeddings = embeddings
|
|
24
|
+
|
|
25
|
+
# Weights for confidence calculation
|
|
26
|
+
self.weights = {
|
|
27
|
+
"age": 0.20, # How recent
|
|
28
|
+
"access": 0.15, # How often accessed
|
|
29
|
+
"importance": 0.15, # User-assigned importance
|
|
30
|
+
"verification": 0.25, # Verified status
|
|
31
|
+
"consistency": 0.25, # No contradictions
|
|
32
|
+
}
|
|
33
|
+
|
|
34
|
+
# Age decay parameters
|
|
35
|
+
self.age_half_life_days = 90 # 50% confidence after 90 days
|
|
36
|
+
|
|
37
|
+
def _calculate_age_score(self, created_at: str) -> float:
|
|
38
|
+
"""Calculate age-based confidence (exponential decay)."""
|
|
39
|
+
try:
|
|
40
|
+
created = datetime.fromisoformat(created_at.replace('Z', '+00:00'))
|
|
41
|
+
age_days = (datetime.now(created.tzinfo or None) - created).days
|
|
42
|
+
if age_days < 0:
|
|
43
|
+
age_days = 0
|
|
44
|
+
|
|
45
|
+
# Exponential decay
|
|
46
|
+
half_life = self.age_half_life_days
|
|
47
|
+
score = 0.5 ** (age_days / half_life)
|
|
48
|
+
return max(0.1, min(1.0, score)) # Clamp to [0.1, 1.0]
|
|
49
|
+
except:
|
|
50
|
+
return 0.5 # Default for unparseable dates
|
|
51
|
+
|
|
52
|
+
def _calculate_access_score(self, access_count: int) -> float:
|
|
53
|
+
"""Calculate access-based confidence (logarithmic growth)."""
|
|
54
|
+
if access_count <= 0:
|
|
55
|
+
return 0.3 # Baseline for never accessed
|
|
56
|
+
# Logarithmic scale - more accesses = higher confidence
|
|
57
|
+
import math
|
|
58
|
+
score = 0.3 + 0.7 * (1 - 1 / (1 + math.log(access_count + 1)))
|
|
59
|
+
return min(1.0, score)
|
|
60
|
+
|
|
61
|
+
def _calculate_importance_score(self, importance: int) -> float:
|
|
62
|
+
"""Normalize importance (1-10) to confidence contribution."""
|
|
63
|
+
return importance / 10.0
|
|
64
|
+
|
|
65
|
+
async def calculate_confidence(
|
|
66
|
+
self,
|
|
67
|
+
memory_id: int
|
|
68
|
+
) -> Dict[str, Any]:
|
|
69
|
+
"""Calculate confidence score for a memory.
|
|
70
|
+
|
|
71
|
+
Returns detailed breakdown of confidence components.
|
|
72
|
+
"""
|
|
73
|
+
cursor = self.db.conn.cursor()
|
|
74
|
+
cursor.execute("""
|
|
75
|
+
SELECT id, content, type, importance, created_at,
|
|
76
|
+
access_count, decay_factor, metadata
|
|
77
|
+
FROM memories WHERE id = ?
|
|
78
|
+
""", [memory_id])
|
|
79
|
+
|
|
80
|
+
row = cursor.fetchone()
|
|
81
|
+
if not row:
|
|
82
|
+
return {"success": False, "error": "Memory not found"}
|
|
83
|
+
|
|
84
|
+
memory = {
|
|
85
|
+
"id": row[0],
|
|
86
|
+
"content": row[1],
|
|
87
|
+
"type": row[2],
|
|
88
|
+
"importance": row[3] or 5,
|
|
89
|
+
"created_at": row[4],
|
|
90
|
+
"access_count": row[5] or 0,
|
|
91
|
+
"decay_factor": row[6] or 1.0,
|
|
92
|
+
"metadata": row[7]
|
|
93
|
+
}
|
|
94
|
+
|
|
95
|
+
# Calculate component scores
|
|
96
|
+
age_score = self._calculate_age_score(memory["created_at"])
|
|
97
|
+
access_score = self._calculate_access_score(memory["access_count"])
|
|
98
|
+
importance_score = self._calculate_importance_score(memory["importance"])
|
|
99
|
+
|
|
100
|
+
# Check verification status from metadata
|
|
101
|
+
verified = False
|
|
102
|
+
if memory.get("metadata"):
|
|
103
|
+
try:
|
|
104
|
+
import json
|
|
105
|
+
meta = json.loads(memory["metadata"])
|
|
106
|
+
verified = meta.get("verified", False)
|
|
107
|
+
except:
|
|
108
|
+
pass
|
|
109
|
+
verification_score = 1.0 if verified else 0.5
|
|
110
|
+
|
|
111
|
+
# Check for contradictions (anchors)
|
|
112
|
+
consistency_score = await self._check_consistency(memory_id, memory["content"])
|
|
113
|
+
|
|
114
|
+
# Apply decay factor
|
|
115
|
+
decay = memory["decay_factor"]
|
|
116
|
+
|
|
117
|
+
# Weighted sum
|
|
118
|
+
raw_confidence = (
|
|
119
|
+
self.weights["age"] * age_score +
|
|
120
|
+
self.weights["access"] * access_score +
|
|
121
|
+
self.weights["importance"] * importance_score +
|
|
122
|
+
self.weights["verification"] * verification_score +
|
|
123
|
+
self.weights["consistency"] * consistency_score
|
|
124
|
+
) * decay
|
|
125
|
+
|
|
126
|
+
confidence = min(1.0, max(0.0, raw_confidence))
|
|
127
|
+
|
|
128
|
+
return {
|
|
129
|
+
"success": True,
|
|
130
|
+
"memory_id": memory_id,
|
|
131
|
+
"confidence": round(confidence, 3),
|
|
132
|
+
"breakdown": {
|
|
133
|
+
"age": round(age_score, 3),
|
|
134
|
+
"access": round(access_score, 3),
|
|
135
|
+
"importance": round(importance_score, 3),
|
|
136
|
+
"verification": round(verification_score, 3),
|
|
137
|
+
"consistency": round(consistency_score, 3),
|
|
138
|
+
"decay_factor": round(decay, 3)
|
|
139
|
+
},
|
|
140
|
+
"verified": verified,
|
|
141
|
+
"interpretation": self._interpret_confidence(confidence)
|
|
142
|
+
}
|
|
143
|
+
|
|
144
|
+
async def _check_consistency(self, memory_id: int, content: str) -> float:
|
|
145
|
+
"""Check if memory is consistent with anchors."""
|
|
146
|
+
# Look for any conflicts involving this memory
|
|
147
|
+
cursor = self.db.conn.cursor()
|
|
148
|
+
cursor.execute("""
|
|
149
|
+
SELECT COUNT(*) FROM anchor_conflicts
|
|
150
|
+
WHERE (anchor1_id = ? OR anchor2_id = ?)
|
|
151
|
+
AND status = 'unresolved'
|
|
152
|
+
""", [memory_id, memory_id])
|
|
153
|
+
|
|
154
|
+
conflicts = cursor.fetchone()[0]
|
|
155
|
+
|
|
156
|
+
if conflicts > 0:
|
|
157
|
+
return 0.3 # Low consistency if conflicts exist
|
|
158
|
+
return 1.0 # Full consistency if no conflicts
|
|
159
|
+
|
|
160
|
+
def _interpret_confidence(self, confidence: float) -> str:
|
|
161
|
+
"""Human-readable interpretation of confidence score."""
|
|
162
|
+
if confidence is None:
|
|
163
|
+
return "Unknown - no confidence score"
|
|
164
|
+
if confidence >= 0.9:
|
|
165
|
+
return "Very high - this memory is reliable"
|
|
166
|
+
elif confidence >= 0.7:
|
|
167
|
+
return "High - likely accurate"
|
|
168
|
+
elif confidence >= 0.5:
|
|
169
|
+
return "Moderate - use with caution"
|
|
170
|
+
elif confidence >= 0.3:
|
|
171
|
+
return "Low - may be outdated or unverified"
|
|
172
|
+
else:
|
|
173
|
+
return "Very low - consider verification"
|
|
174
|
+
|
|
175
|
+
async def verify_memory(
|
|
176
|
+
self,
|
|
177
|
+
memory_id: int,
|
|
178
|
+
verified: bool = True,
|
|
179
|
+
verified_by: str = "user"
|
|
180
|
+
) -> Dict[str, Any]:
|
|
181
|
+
"""Mark a memory as verified or unverified."""
|
|
182
|
+
import json
|
|
183
|
+
|
|
184
|
+
cursor = self.db.conn.cursor()
|
|
185
|
+
|
|
186
|
+
# Get current metadata
|
|
187
|
+
cursor.execute("SELECT metadata FROM memories WHERE id = ?", [memory_id])
|
|
188
|
+
row = cursor.fetchone()
|
|
189
|
+
if not row:
|
|
190
|
+
return {"success": False, "error": "Memory not found"}
|
|
191
|
+
|
|
192
|
+
try:
|
|
193
|
+
metadata = json.loads(row[0]) if row[0] else {}
|
|
194
|
+
except:
|
|
195
|
+
metadata = {}
|
|
196
|
+
|
|
197
|
+
metadata["verified"] = verified
|
|
198
|
+
metadata["verified_by"] = verified_by
|
|
199
|
+
metadata["verified_at"] = datetime.now().isoformat()
|
|
200
|
+
|
|
201
|
+
cursor.execute(
|
|
202
|
+
"UPDATE memories SET metadata = ? WHERE id = ?",
|
|
203
|
+
[json.dumps(metadata), memory_id]
|
|
204
|
+
)
|
|
205
|
+
self.db.conn.commit()
|
|
206
|
+
|
|
207
|
+
# Recalculate confidence
|
|
208
|
+
new_confidence = await self.calculate_confidence(memory_id)
|
|
209
|
+
|
|
210
|
+
return {
|
|
211
|
+
"success": True,
|
|
212
|
+
"memory_id": memory_id,
|
|
213
|
+
"verified": verified,
|
|
214
|
+
"new_confidence": new_confidence.get("confidence")
|
|
215
|
+
}
|
|
216
|
+
|
|
217
|
+
async def mark_outdated(
|
|
218
|
+
self,
|
|
219
|
+
memory_id: int,
|
|
220
|
+
reason: str = "manually marked"
|
|
221
|
+
) -> Dict[str, Any]:
|
|
222
|
+
"""Mark a memory as outdated (reduces confidence significantly)."""
|
|
223
|
+
cursor = self.db.conn.cursor()
|
|
224
|
+
|
|
225
|
+
# Set decay factor to low value
|
|
226
|
+
cursor.execute(
|
|
227
|
+
"UPDATE memories SET decay_factor = 0.3 WHERE id = ?",
|
|
228
|
+
[memory_id]
|
|
229
|
+
)
|
|
230
|
+
self.db.conn.commit()
|
|
231
|
+
|
|
232
|
+
return {
|
|
233
|
+
"success": True,
|
|
234
|
+
"memory_id": memory_id,
|
|
235
|
+
"marked_outdated": True,
|
|
236
|
+
"reason": reason
|
|
237
|
+
}
|
|
238
|
+
|
|
239
|
+
async def get_low_confidence_memories(
|
|
240
|
+
self,
|
|
241
|
+
project_path: Optional[str] = None,
|
|
242
|
+
threshold: float = 0.5,
|
|
243
|
+
limit: int = 20
|
|
244
|
+
) -> Dict[str, Any]:
|
|
245
|
+
"""Get memories with low confidence that may need verification."""
|
|
246
|
+
cursor = self.db.conn.cursor()
|
|
247
|
+
|
|
248
|
+
query = """
|
|
249
|
+
SELECT id, content, type, importance, created_at, access_count, decay_factor
|
|
250
|
+
FROM memories
|
|
251
|
+
WHERE decay_factor < 0.8 OR access_count = 0
|
|
252
|
+
"""
|
|
253
|
+
params = []
|
|
254
|
+
|
|
255
|
+
if project_path:
|
|
256
|
+
query += " AND project_path = ?"
|
|
257
|
+
params.append(project_path)
|
|
258
|
+
|
|
259
|
+
query += " ORDER BY decay_factor ASC, access_count ASC LIMIT ?"
|
|
260
|
+
params.append(limit)
|
|
261
|
+
|
|
262
|
+
cursor.execute(query, params)
|
|
263
|
+
rows = cursor.fetchall()
|
|
264
|
+
|
|
265
|
+
results = []
|
|
266
|
+
for row in rows:
|
|
267
|
+
memory = {
|
|
268
|
+
"id": row[0],
|
|
269
|
+
"content": row[1][:200],
|
|
270
|
+
"type": row[2],
|
|
271
|
+
"importance": row[3],
|
|
272
|
+
"created_at": row[4],
|
|
273
|
+
"access_count": row[5],
|
|
274
|
+
"decay_factor": row[6]
|
|
275
|
+
}
|
|
276
|
+
|
|
277
|
+
# Calculate full confidence
|
|
278
|
+
conf = await self.calculate_confidence(row[0])
|
|
279
|
+
if conf.get("confidence", 1.0) <= threshold:
|
|
280
|
+
memory["confidence"] = conf.get("confidence")
|
|
281
|
+
memory["interpretation"] = conf.get("interpretation")
|
|
282
|
+
results.append(memory)
|
|
283
|
+
|
|
284
|
+
return {
|
|
285
|
+
"success": True,
|
|
286
|
+
"low_confidence_memories": results,
|
|
287
|
+
"count": len(results),
|
|
288
|
+
"threshold": threshold
|
|
289
|
+
}
|
|
290
|
+
|
|
291
|
+
|
|
292
|
+
# Global instance
|
|
293
|
+
_confidence_service: Optional[ConfidenceService] = None
|
|
294
|
+
|
|
295
|
+
|
|
296
|
+
def get_confidence_service(db, embeddings) -> ConfidenceService:
|
|
297
|
+
"""Get the global confidence service."""
|
|
298
|
+
global _confidence_service
|
|
299
|
+
if _confidence_service is None:
|
|
300
|
+
_confidence_service = ConfidenceService(db, embeddings)
|
|
301
|
+
return _confidence_service
|
|
@@ -0,0 +1,333 @@
|
|
|
1
|
+
"""Daily Log Service - Moltbot-inspired human-readable session logs.
|
|
2
|
+
|
|
3
|
+
Creates and manages YYYY-MM-DD.md append-only files for session activity.
|
|
4
|
+
Provides transparent, human-readable logs that persist beyond context window.
|
|
5
|
+
|
|
6
|
+
Storage: <project>/.claude/memory/YYYY-MM-DD.md
|
|
7
|
+
"""
|
|
8
|
+
import os
|
|
9
|
+
import logging
|
|
10
|
+
from datetime import date, datetime, timedelta
|
|
11
|
+
from pathlib import Path
|
|
12
|
+
from typing import Optional, List, Dict, Any
|
|
13
|
+
|
|
14
|
+
logger = logging.getLogger(__name__)
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
def get_log_path(project_path: str, log_date: Optional[date] = None) -> Path:
|
|
18
|
+
"""Get the path for a daily log file.
|
|
19
|
+
|
|
20
|
+
Args:
|
|
21
|
+
project_path: Root path of the project
|
|
22
|
+
log_date: Date for the log file (defaults to today)
|
|
23
|
+
|
|
24
|
+
Returns:
|
|
25
|
+
Path to the daily log markdown file
|
|
26
|
+
"""
|
|
27
|
+
if log_date is None:
|
|
28
|
+
log_date = date.today()
|
|
29
|
+
|
|
30
|
+
# Normalize project path
|
|
31
|
+
project_path = project_path.replace("\\", "/").rstrip("/")
|
|
32
|
+
|
|
33
|
+
# Create memory directory structure
|
|
34
|
+
memory_dir = Path(project_path) / ".claude" / "memory"
|
|
35
|
+
memory_dir.mkdir(parents=True, exist_ok=True)
|
|
36
|
+
|
|
37
|
+
# Log filename is YYYY-MM-DD.md
|
|
38
|
+
filename = log_date.strftime("%Y-%m-%d.md")
|
|
39
|
+
return memory_dir / filename
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
def _get_log_header(log_date: date) -> str:
|
|
43
|
+
"""Generate header for a new daily log file."""
|
|
44
|
+
return f"# Daily Log - {log_date.strftime('%Y-%m-%d')}\n\n"
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
async def append_entry(
|
|
48
|
+
project_path: str,
|
|
49
|
+
content: str,
|
|
50
|
+
entry_type: str = "note",
|
|
51
|
+
session_id: Optional[str] = None,
|
|
52
|
+
timestamp: Optional[datetime] = None
|
|
53
|
+
) -> Dict[str, Any]:
|
|
54
|
+
"""Append an entry to today's daily log.
|
|
55
|
+
|
|
56
|
+
Args:
|
|
57
|
+
project_path: Root path of the project
|
|
58
|
+
content: Content to append
|
|
59
|
+
entry_type: Type of entry (decision, accomplishment, note, error, session_summary)
|
|
60
|
+
session_id: Optional session ID for context
|
|
61
|
+
timestamp: Optional timestamp (defaults to now)
|
|
62
|
+
|
|
63
|
+
Returns:
|
|
64
|
+
Dict with success status and file path
|
|
65
|
+
"""
|
|
66
|
+
if timestamp is None:
|
|
67
|
+
timestamp = datetime.now()
|
|
68
|
+
|
|
69
|
+
log_path = get_log_path(project_path)
|
|
70
|
+
|
|
71
|
+
# Create file with header if it doesn't exist
|
|
72
|
+
file_existed = log_path.exists()
|
|
73
|
+
|
|
74
|
+
try:
|
|
75
|
+
with open(log_path, "a", encoding="utf-8") as f:
|
|
76
|
+
if not file_existed:
|
|
77
|
+
f.write(_get_log_header(timestamp.date()))
|
|
78
|
+
|
|
79
|
+
# Format entry based on type
|
|
80
|
+
time_str = timestamp.strftime("%H:%M:%S")
|
|
81
|
+
|
|
82
|
+
if entry_type == "session_summary":
|
|
83
|
+
# Session summary gets its own section
|
|
84
|
+
session_label = f" ({session_id[:8]})" if session_id else ""
|
|
85
|
+
f.write(f"\n## Session{session_label} - {time_str}\n")
|
|
86
|
+
f.write(content)
|
|
87
|
+
f.write("\n\n---\n")
|
|
88
|
+
elif entry_type == "decision":
|
|
89
|
+
f.write(f"- **[{time_str}] Decision**: {content}\n")
|
|
90
|
+
elif entry_type == "accomplishment":
|
|
91
|
+
f.write(f"- **[{time_str}] Done**: {content}\n")
|
|
92
|
+
elif entry_type == "error":
|
|
93
|
+
f.write(f"- **[{time_str}] Error**: {content}\n")
|
|
94
|
+
else:
|
|
95
|
+
# Generic note
|
|
96
|
+
f.write(f"- [{time_str}] {content}\n")
|
|
97
|
+
|
|
98
|
+
return {
|
|
99
|
+
"success": True,
|
|
100
|
+
"file_path": str(log_path),
|
|
101
|
+
"entry_type": entry_type,
|
|
102
|
+
"timestamp": timestamp.isoformat()
|
|
103
|
+
}
|
|
104
|
+
|
|
105
|
+
except Exception as e:
|
|
106
|
+
logger.error(f"Failed to append to daily log: {e}")
|
|
107
|
+
return {
|
|
108
|
+
"success": False,
|
|
109
|
+
"error": str(e)
|
|
110
|
+
}
|
|
111
|
+
|
|
112
|
+
|
|
113
|
+
async def append_session_summary(
|
|
114
|
+
project_path: str,
|
|
115
|
+
session_id: str,
|
|
116
|
+
decisions: Optional[List[str]] = None,
|
|
117
|
+
accomplishments: Optional[List[str]] = None,
|
|
118
|
+
notes: Optional[List[str]] = None,
|
|
119
|
+
errors_solved: Optional[List[str]] = None,
|
|
120
|
+
start_time: Optional[datetime] = None,
|
|
121
|
+
end_time: Optional[datetime] = None
|
|
122
|
+
) -> Dict[str, Any]:
|
|
123
|
+
"""Append a full session summary to the daily log.
|
|
124
|
+
|
|
125
|
+
Args:
|
|
126
|
+
project_path: Root path of the project
|
|
127
|
+
session_id: Session identifier
|
|
128
|
+
decisions: List of decisions made
|
|
129
|
+
accomplishments: List of things accomplished
|
|
130
|
+
notes: List of notes/observations
|
|
131
|
+
errors_solved: List of errors that were solved
|
|
132
|
+
start_time: Session start time
|
|
133
|
+
end_time: Session end time (defaults to now)
|
|
134
|
+
|
|
135
|
+
Returns:
|
|
136
|
+
Dict with success status
|
|
137
|
+
"""
|
|
138
|
+
if end_time is None:
|
|
139
|
+
end_time = datetime.now()
|
|
140
|
+
|
|
141
|
+
# Build summary content
|
|
142
|
+
lines = []
|
|
143
|
+
|
|
144
|
+
if start_time:
|
|
145
|
+
duration = end_time - start_time
|
|
146
|
+
hours = duration.seconds // 3600
|
|
147
|
+
minutes = (duration.seconds % 3600) // 60
|
|
148
|
+
lines.append(f"**Duration**: {hours}h {minutes}m\n")
|
|
149
|
+
|
|
150
|
+
if decisions:
|
|
151
|
+
lines.append("\n### Decisions")
|
|
152
|
+
for d in decisions:
|
|
153
|
+
lines.append(f"- {d}")
|
|
154
|
+
|
|
155
|
+
if accomplishments:
|
|
156
|
+
lines.append("\n### Accomplishments")
|
|
157
|
+
for a in accomplishments:
|
|
158
|
+
lines.append(f"- {a}")
|
|
159
|
+
|
|
160
|
+
if errors_solved:
|
|
161
|
+
lines.append("\n### Errors Solved")
|
|
162
|
+
for e in errors_solved:
|
|
163
|
+
lines.append(f"- {e}")
|
|
164
|
+
|
|
165
|
+
if notes:
|
|
166
|
+
lines.append("\n### Notes")
|
|
167
|
+
for n in notes:
|
|
168
|
+
lines.append(f"- {n}")
|
|
169
|
+
|
|
170
|
+
content = "\n".join(lines)
|
|
171
|
+
|
|
172
|
+
return await append_entry(
|
|
173
|
+
project_path=project_path,
|
|
174
|
+
content=content,
|
|
175
|
+
entry_type="session_summary",
|
|
176
|
+
session_id=session_id,
|
|
177
|
+
timestamp=end_time
|
|
178
|
+
)
|
|
179
|
+
|
|
180
|
+
|
|
181
|
+
async def load_recent_logs(
|
|
182
|
+
project_path: str,
|
|
183
|
+
days: int = 2,
|
|
184
|
+
max_chars: int = 8000
|
|
185
|
+
) -> Dict[str, Any]:
|
|
186
|
+
"""Load recent daily logs.
|
|
187
|
+
|
|
188
|
+
Args:
|
|
189
|
+
project_path: Root path of the project
|
|
190
|
+
days: Number of days to look back (default 2)
|
|
191
|
+
max_chars: Maximum characters to return (default 8000)
|
|
192
|
+
|
|
193
|
+
Returns:
|
|
194
|
+
Dict with success status and combined log content
|
|
195
|
+
"""
|
|
196
|
+
logs = []
|
|
197
|
+
total_chars = 0
|
|
198
|
+
|
|
199
|
+
today = date.today()
|
|
200
|
+
|
|
201
|
+
for i in range(days):
|
|
202
|
+
log_date = today - timedelta(days=i)
|
|
203
|
+
log_path = get_log_path(project_path, log_date)
|
|
204
|
+
|
|
205
|
+
if log_path.exists():
|
|
206
|
+
try:
|
|
207
|
+
content = log_path.read_text(encoding="utf-8")
|
|
208
|
+
|
|
209
|
+
# Check if adding this would exceed limit
|
|
210
|
+
if total_chars + len(content) > max_chars:
|
|
211
|
+
# Truncate to fit
|
|
212
|
+
remaining = max_chars - total_chars
|
|
213
|
+
if remaining > 200: # Only include if meaningful content
|
|
214
|
+
content = content[:remaining] + "\n\n... (truncated)"
|
|
215
|
+
logs.append({
|
|
216
|
+
"date": log_date.isoformat(),
|
|
217
|
+
"content": content,
|
|
218
|
+
"truncated": True
|
|
219
|
+
})
|
|
220
|
+
break
|
|
221
|
+
|
|
222
|
+
logs.append({
|
|
223
|
+
"date": log_date.isoformat(),
|
|
224
|
+
"content": content,
|
|
225
|
+
"truncated": False
|
|
226
|
+
})
|
|
227
|
+
total_chars += len(content)
|
|
228
|
+
|
|
229
|
+
except Exception as e:
|
|
230
|
+
logger.warning(f"Failed to read log {log_path}: {e}")
|
|
231
|
+
|
|
232
|
+
return {
|
|
233
|
+
"success": True,
|
|
234
|
+
"logs": logs,
|
|
235
|
+
"days_loaded": len(logs),
|
|
236
|
+
"total_chars": total_chars
|
|
237
|
+
}
|
|
238
|
+
|
|
239
|
+
|
|
240
|
+
async def get_today_highlights(
|
|
241
|
+
project_path: str,
|
|
242
|
+
max_entries: int = 10
|
|
243
|
+
) -> Dict[str, Any]:
|
|
244
|
+
"""Get today's log highlights for context injection.
|
|
245
|
+
|
|
246
|
+
Extracts the most important entries from today's log.
|
|
247
|
+
|
|
248
|
+
Args:
|
|
249
|
+
project_path: Root path of the project
|
|
250
|
+
max_entries: Maximum number of entries to return
|
|
251
|
+
|
|
252
|
+
Returns:
|
|
253
|
+
Dict with success status and highlights
|
|
254
|
+
"""
|
|
255
|
+
log_path = get_log_path(project_path)
|
|
256
|
+
|
|
257
|
+
if not log_path.exists():
|
|
258
|
+
return {
|
|
259
|
+
"success": True,
|
|
260
|
+
"highlights": [],
|
|
261
|
+
"has_log": False
|
|
262
|
+
}
|
|
263
|
+
|
|
264
|
+
try:
|
|
265
|
+
content = log_path.read_text(encoding="utf-8")
|
|
266
|
+
lines = content.split("\n")
|
|
267
|
+
|
|
268
|
+
highlights = []
|
|
269
|
+
for line in lines:
|
|
270
|
+
# Extract important entries (decisions, accomplishments)
|
|
271
|
+
if "**Decision**" in line or "**Done**" in line:
|
|
272
|
+
highlights.append(line.strip("- ").strip())
|
|
273
|
+
if len(highlights) >= max_entries:
|
|
274
|
+
break
|
|
275
|
+
|
|
276
|
+
return {
|
|
277
|
+
"success": True,
|
|
278
|
+
"highlights": highlights,
|
|
279
|
+
"has_log": True,
|
|
280
|
+
"entry_count": len(highlights)
|
|
281
|
+
}
|
|
282
|
+
|
|
283
|
+
except Exception as e:
|
|
284
|
+
logger.error(f"Failed to get highlights: {e}")
|
|
285
|
+
return {
|
|
286
|
+
"success": False,
|
|
287
|
+
"error": str(e)
|
|
288
|
+
}
|
|
289
|
+
|
|
290
|
+
|
|
291
|
+
async def list_logs(
|
|
292
|
+
project_path: str,
|
|
293
|
+
limit: int = 30
|
|
294
|
+
) -> Dict[str, Any]:
|
|
295
|
+
"""List available daily log files.
|
|
296
|
+
|
|
297
|
+
Args:
|
|
298
|
+
project_path: Root path of the project
|
|
299
|
+
limit: Maximum number of logs to list
|
|
300
|
+
|
|
301
|
+
Returns:
|
|
302
|
+
Dict with list of log files and their sizes
|
|
303
|
+
"""
|
|
304
|
+
memory_dir = Path(project_path) / ".claude" / "memory"
|
|
305
|
+
|
|
306
|
+
if not memory_dir.exists():
|
|
307
|
+
return {
|
|
308
|
+
"success": True,
|
|
309
|
+
"logs": [],
|
|
310
|
+
"total_count": 0
|
|
311
|
+
}
|
|
312
|
+
|
|
313
|
+
logs = []
|
|
314
|
+
for log_file in sorted(memory_dir.glob("????-??-??.md"), reverse=True):
|
|
315
|
+
if len(logs) >= limit:
|
|
316
|
+
break
|
|
317
|
+
|
|
318
|
+
try:
|
|
319
|
+
stat = log_file.stat()
|
|
320
|
+
logs.append({
|
|
321
|
+
"date": log_file.stem,
|
|
322
|
+
"path": str(log_file),
|
|
323
|
+
"size_bytes": stat.st_size,
|
|
324
|
+
"modified": datetime.fromtimestamp(stat.st_mtime).isoformat()
|
|
325
|
+
})
|
|
326
|
+
except Exception as e:
|
|
327
|
+
logger.warning(f"Failed to stat {log_file}: {e}")
|
|
328
|
+
|
|
329
|
+
return {
|
|
330
|
+
"success": True,
|
|
331
|
+
"logs": logs,
|
|
332
|
+
"total_count": len(logs)
|
|
333
|
+
}
|