claude-memory-agent 2.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.env.example +107 -0
- package/README.md +200 -0
- package/agent_card.py +512 -0
- package/bin/cli.js +181 -0
- package/bin/postinstall.js +216 -0
- package/config.py +104 -0
- package/dashboard.html +2689 -0
- package/hooks/README.md +196 -0
- package/hooks/__pycache__/auto-detect-response.cpython-312.pyc +0 -0
- package/hooks/__pycache__/auto_capture.cpython-312.pyc +0 -0
- package/hooks/__pycache__/session_end.cpython-312.pyc +0 -0
- package/hooks/__pycache__/session_start.cpython-312.pyc +0 -0
- package/hooks/auto-detect-response.py +348 -0
- package/hooks/auto_capture.py +255 -0
- package/hooks/detect-correction.py +173 -0
- package/hooks/grounding-hook.py +348 -0
- package/hooks/log-tool-use.py +234 -0
- package/hooks/log-user-request.py +208 -0
- package/hooks/pre-tool-decision.py +218 -0
- package/hooks/problem-detector.py +343 -0
- package/hooks/session_end.py +192 -0
- package/hooks/session_start.py +227 -0
- package/install.py +887 -0
- package/main.py +2859 -0
- package/manager.py +997 -0
- package/package.json +55 -0
- package/requirements.txt +8 -0
- package/run_server.py +136 -0
- package/services/__init__.py +50 -0
- package/services/__pycache__/__init__.cpython-312.pyc +0 -0
- package/services/__pycache__/agent_registry.cpython-312.pyc +0 -0
- package/services/__pycache__/auth.cpython-312.pyc +0 -0
- package/services/__pycache__/auto_inject.cpython-312.pyc +0 -0
- package/services/__pycache__/claude_md_sync.cpython-312.pyc +0 -0
- package/services/__pycache__/cleanup.cpython-312.pyc +0 -0
- package/services/__pycache__/compaction_flush.cpython-312.pyc +0 -0
- package/services/__pycache__/confidence.cpython-312.pyc +0 -0
- package/services/__pycache__/daily_log.cpython-312.pyc +0 -0
- package/services/__pycache__/database.cpython-312.pyc +0 -0
- package/services/__pycache__/embeddings.cpython-312.pyc +0 -0
- package/services/__pycache__/insights.cpython-312.pyc +0 -0
- package/services/__pycache__/llm_analyzer.cpython-312.pyc +0 -0
- package/services/__pycache__/memory_md_sync.cpython-312.pyc +0 -0
- package/services/__pycache__/retry_queue.cpython-312.pyc +0 -0
- package/services/__pycache__/timeline.cpython-312.pyc +0 -0
- package/services/__pycache__/vector_index.cpython-312.pyc +0 -0
- package/services/__pycache__/websocket.cpython-312.pyc +0 -0
- package/services/agent_registry.py +753 -0
- package/services/auth.py +331 -0
- package/services/auto_inject.py +250 -0
- package/services/claude_md_sync.py +275 -0
- package/services/cleanup.py +667 -0
- package/services/compaction_flush.py +447 -0
- package/services/confidence.py +301 -0
- package/services/daily_log.py +333 -0
- package/services/database.py +2485 -0
- package/services/embeddings.py +358 -0
- package/services/insights.py +632 -0
- package/services/llm_analyzer.py +595 -0
- package/services/memory_md_sync.py +409 -0
- package/services/retry_queue.py +453 -0
- package/services/timeline.py +579 -0
- package/services/vector_index.py +398 -0
- package/services/websocket.py +257 -0
- package/skills/__init__.py +6 -0
- package/skills/__pycache__/__init__.cpython-312.pyc +0 -0
- package/skills/__pycache__/admin.cpython-312.pyc +0 -0
- package/skills/__pycache__/checkpoint.cpython-312.pyc +0 -0
- package/skills/__pycache__/claude_md.cpython-312.pyc +0 -0
- package/skills/__pycache__/cleanup.cpython-312.pyc +0 -0
- package/skills/__pycache__/grounding.cpython-312.pyc +0 -0
- package/skills/__pycache__/insights.cpython-312.pyc +0 -0
- package/skills/__pycache__/natural_language.cpython-312.pyc +0 -0
- package/skills/__pycache__/retrieve.cpython-312.pyc +0 -0
- package/skills/__pycache__/search.cpython-312.pyc +0 -0
- package/skills/__pycache__/state.cpython-312.pyc +0 -0
- package/skills/__pycache__/store.cpython-312.pyc +0 -0
- package/skills/__pycache__/summarize.cpython-312.pyc +0 -0
- package/skills/__pycache__/timeline.cpython-312.pyc +0 -0
- package/skills/__pycache__/verification.cpython-312.pyc +0 -0
- package/skills/admin.py +469 -0
- package/skills/checkpoint.py +198 -0
- package/skills/claude_md.py +363 -0
- package/skills/cleanup.py +241 -0
- package/skills/grounding.py +801 -0
- package/skills/insights.py +231 -0
- package/skills/natural_language.py +277 -0
- package/skills/retrieve.py +67 -0
- package/skills/search.py +213 -0
- package/skills/state.py +182 -0
- package/skills/store.py +179 -0
- package/skills/summarize.py +588 -0
- package/skills/timeline.py +387 -0
- package/skills/verification.py +391 -0
- package/start_daemon.py +155 -0
- package/test_automation.py +221 -0
- package/test_complete.py +338 -0
- package/test_full.py +322 -0
- package/update_system.py +817 -0
- package/verify_db.py +134 -0
|
@@ -0,0 +1,231 @@
|
|
|
1
|
+
"""Skills for cross-session learning and insight management.
|
|
2
|
+
|
|
3
|
+
These skills allow Claude to:
|
|
4
|
+
- Run aggregation to detect patterns
|
|
5
|
+
- Retrieve insights for current context
|
|
6
|
+
- Get CLAUDE.md improvement suggestions
|
|
7
|
+
- Record feedback on insight usefulness
|
|
8
|
+
"""
|
|
9
|
+
from typing import Dict, Any, Optional, List
|
|
10
|
+
from services.insights import get_insights_service
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
async def run_aggregation(
|
|
14
|
+
db,
|
|
15
|
+
embeddings,
|
|
16
|
+
days_back: int = 30
|
|
17
|
+
) -> Dict[str, Any]:
|
|
18
|
+
"""Run cross-session learning aggregation.
|
|
19
|
+
|
|
20
|
+
Analyzes memories across sessions to identify:
|
|
21
|
+
- Recurring error patterns
|
|
22
|
+
- Repeated decision patterns
|
|
23
|
+
- User correction patterns (Claude blind spots)
|
|
24
|
+
- High-value frequently accessed memories
|
|
25
|
+
|
|
26
|
+
Args:
|
|
27
|
+
db: Database service
|
|
28
|
+
embeddings: Embeddings service
|
|
29
|
+
days_back: Number of days to analyze
|
|
30
|
+
|
|
31
|
+
Returns:
|
|
32
|
+
Summary of generated insights
|
|
33
|
+
"""
|
|
34
|
+
insights_service = get_insights_service(db, embeddings)
|
|
35
|
+
results = await insights_service.run_aggregation(days_back)
|
|
36
|
+
|
|
37
|
+
return {
|
|
38
|
+
"success": True,
|
|
39
|
+
"message": f"Generated {results['total_insights']} insights",
|
|
40
|
+
"results": results
|
|
41
|
+
}
|
|
42
|
+
|
|
43
|
+
|
|
44
|
+
async def get_insights(
|
|
45
|
+
db,
|
|
46
|
+
embeddings,
|
|
47
|
+
insight_type: Optional[str] = None,
|
|
48
|
+
project_path: Optional[str] = None,
|
|
49
|
+
min_confidence: float = 0.5,
|
|
50
|
+
limit: int = 10
|
|
51
|
+
) -> Dict[str, Any]:
|
|
52
|
+
"""Retrieve cross-session learning insights.
|
|
53
|
+
|
|
54
|
+
Args:
|
|
55
|
+
db: Database service
|
|
56
|
+
embeddings: Embeddings service
|
|
57
|
+
insight_type: Filter by type (recurring_error, decision_pattern,
|
|
58
|
+
correction_pattern, high_value_memory)
|
|
59
|
+
project_path: Filter by project
|
|
60
|
+
min_confidence: Minimum confidence threshold (0-1)
|
|
61
|
+
limit: Maximum results
|
|
62
|
+
|
|
63
|
+
Returns:
|
|
64
|
+
List of relevant insights
|
|
65
|
+
"""
|
|
66
|
+
insights_service = get_insights_service(db, embeddings)
|
|
67
|
+
insights = await insights_service.get_insights(
|
|
68
|
+
insight_type=insight_type,
|
|
69
|
+
project_path=project_path,
|
|
70
|
+
min_confidence=min_confidence,
|
|
71
|
+
limit=limit
|
|
72
|
+
)
|
|
73
|
+
|
|
74
|
+
return {
|
|
75
|
+
"success": True,
|
|
76
|
+
"insights": insights,
|
|
77
|
+
"count": len(insights),
|
|
78
|
+
"filters": {
|
|
79
|
+
"insight_type": insight_type,
|
|
80
|
+
"project_path": project_path,
|
|
81
|
+
"min_confidence": min_confidence
|
|
82
|
+
}
|
|
83
|
+
}
|
|
84
|
+
|
|
85
|
+
|
|
86
|
+
async def suggest_improvements(
|
|
87
|
+
db,
|
|
88
|
+
embeddings,
|
|
89
|
+
min_confidence: float = 0.7
|
|
90
|
+
) -> Dict[str, Any]:
|
|
91
|
+
"""Get suggestions for CLAUDE.md improvements based on insights.
|
|
92
|
+
|
|
93
|
+
Analyzes high-confidence insights that haven't been applied yet
|
|
94
|
+
and generates actionable instructions to add to CLAUDE.md.
|
|
95
|
+
|
|
96
|
+
Args:
|
|
97
|
+
db: Database service
|
|
98
|
+
embeddings: Embeddings service
|
|
99
|
+
min_confidence: Minimum confidence for suggestions
|
|
100
|
+
|
|
101
|
+
Returns:
|
|
102
|
+
List of suggested CLAUDE.md updates
|
|
103
|
+
"""
|
|
104
|
+
insights_service = get_insights_service(db, embeddings)
|
|
105
|
+
suggestions = await insights_service.suggest_claude_md_updates(min_confidence)
|
|
106
|
+
|
|
107
|
+
return {
|
|
108
|
+
"success": True,
|
|
109
|
+
"suggestions": suggestions,
|
|
110
|
+
"count": len(suggestions),
|
|
111
|
+
"message": f"Found {len(suggestions)} potential CLAUDE.md improvements"
|
|
112
|
+
}
|
|
113
|
+
|
|
114
|
+
|
|
115
|
+
async def record_insight_feedback(
|
|
116
|
+
db,
|
|
117
|
+
embeddings,
|
|
118
|
+
insight_id: int,
|
|
119
|
+
helpful: bool,
|
|
120
|
+
session_id: Optional[str] = None,
|
|
121
|
+
comment: Optional[str] = None
|
|
122
|
+
) -> Dict[str, Any]:
|
|
123
|
+
"""Record feedback on whether an insight was helpful.
|
|
124
|
+
|
|
125
|
+
This helps improve the accuracy of future insights by
|
|
126
|
+
tracking which patterns are actually useful.
|
|
127
|
+
|
|
128
|
+
Args:
|
|
129
|
+
db: Database service
|
|
130
|
+
embeddings: Embeddings service
|
|
131
|
+
insight_id: The insight ID
|
|
132
|
+
helpful: Whether the insight was helpful
|
|
133
|
+
session_id: Current session
|
|
134
|
+
comment: Optional feedback comment
|
|
135
|
+
|
|
136
|
+
Returns:
|
|
137
|
+
Confirmation of recorded feedback
|
|
138
|
+
"""
|
|
139
|
+
insights_service = get_insights_service(db, embeddings)
|
|
140
|
+
success = await insights_service.record_feedback(
|
|
141
|
+
insight_id=insight_id,
|
|
142
|
+
helpful=helpful,
|
|
143
|
+
session_id=session_id,
|
|
144
|
+
comment=comment
|
|
145
|
+
)
|
|
146
|
+
|
|
147
|
+
return {
|
|
148
|
+
"success": success,
|
|
149
|
+
"message": "Feedback recorded" if success else "Failed to record feedback",
|
|
150
|
+
"insight_id": insight_id,
|
|
151
|
+
"helpful": helpful
|
|
152
|
+
}
|
|
153
|
+
|
|
154
|
+
|
|
155
|
+
async def mark_insight_applied(
|
|
156
|
+
db,
|
|
157
|
+
embeddings,
|
|
158
|
+
insight_id: int
|
|
159
|
+
) -> Dict[str, Any]:
|
|
160
|
+
"""Mark an insight as applied to CLAUDE.md.
|
|
161
|
+
|
|
162
|
+
Call this after adding an insight's suggestion to CLAUDE.md
|
|
163
|
+
to prevent it from being suggested again.
|
|
164
|
+
|
|
165
|
+
Args:
|
|
166
|
+
db: Database service
|
|
167
|
+
embeddings: Embeddings service
|
|
168
|
+
insight_id: The insight ID
|
|
169
|
+
|
|
170
|
+
Returns:
|
|
171
|
+
Confirmation
|
|
172
|
+
"""
|
|
173
|
+
insights_service = get_insights_service(db, embeddings)
|
|
174
|
+
success = await insights_service.mark_applied_to_claude_md(insight_id)
|
|
175
|
+
|
|
176
|
+
return {
|
|
177
|
+
"success": success,
|
|
178
|
+
"message": "Insight marked as applied" if success else "Insight not found",
|
|
179
|
+
"insight_id": insight_id
|
|
180
|
+
}
|
|
181
|
+
|
|
182
|
+
|
|
183
|
+
async def get_project_insights(
|
|
184
|
+
db,
|
|
185
|
+
embeddings,
|
|
186
|
+
project_path: str,
|
|
187
|
+
include_global: bool = True,
|
|
188
|
+
limit: int = 10
|
|
189
|
+
) -> Dict[str, Any]:
|
|
190
|
+
"""Get insights relevant to a specific project.
|
|
191
|
+
|
|
192
|
+
Retrieves both project-specific insights and global insights
|
|
193
|
+
that may apply to this project's tech stack.
|
|
194
|
+
|
|
195
|
+
Args:
|
|
196
|
+
db: Database service
|
|
197
|
+
embeddings: Embeddings service
|
|
198
|
+
project_path: Project path to get insights for
|
|
199
|
+
include_global: Include insights without a specific project
|
|
200
|
+
limit: Maximum results
|
|
201
|
+
|
|
202
|
+
Returns:
|
|
203
|
+
Project-relevant insights
|
|
204
|
+
"""
|
|
205
|
+
insights_service = get_insights_service(db, embeddings)
|
|
206
|
+
|
|
207
|
+
# Get project-specific insights
|
|
208
|
+
project_insights = await insights_service.get_insights(
|
|
209
|
+
project_path=project_path,
|
|
210
|
+
limit=limit
|
|
211
|
+
)
|
|
212
|
+
|
|
213
|
+
result = {
|
|
214
|
+
"success": True,
|
|
215
|
+
"project_path": project_path,
|
|
216
|
+
"project_insights": project_insights,
|
|
217
|
+
"project_count": len(project_insights)
|
|
218
|
+
}
|
|
219
|
+
|
|
220
|
+
if include_global:
|
|
221
|
+
# Get global insights (no project_path)
|
|
222
|
+
global_insights = await insights_service.get_insights(
|
|
223
|
+
project_path=None,
|
|
224
|
+
limit=limit // 2
|
|
225
|
+
)
|
|
226
|
+
# Filter to only truly global ones
|
|
227
|
+
global_only = [i for i in global_insights if not i.get("project_path")]
|
|
228
|
+
result["global_insights"] = global_only
|
|
229
|
+
result["global_count"] = len(global_only)
|
|
230
|
+
|
|
231
|
+
return result
|
|
@@ -0,0 +1,277 @@
|
|
|
1
|
+
"""Natural language interface for memory system.
|
|
2
|
+
|
|
3
|
+
Allows users to interact with memory using natural commands like:
|
|
4
|
+
- "remember this: ..."
|
|
5
|
+
- "what did I learn about X?"
|
|
6
|
+
- "forget about Y"
|
|
7
|
+
- "show me past errors"
|
|
8
|
+
"""
|
|
9
|
+
import re
|
|
10
|
+
from typing import Dict, Any, Optional, Tuple
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
# Intent patterns - ordered from most specific to least specific
|
|
14
|
+
# More specific patterns (list_errors, list_decisions, etc.) must come before generic "search"
|
|
15
|
+
INTENT_PATTERNS = {
|
|
16
|
+
"list_errors": [
|
|
17
|
+
r"(?:show|list|get)\s+(?:me\s+)?(?:past\s+)?errors?",
|
|
18
|
+
r"what\s+errors?\s+(?:have\s+I\s+)?(?:had|seen|encountered)",
|
|
19
|
+
r"(?:past|recent)\s+errors?",
|
|
20
|
+
],
|
|
21
|
+
"list_decisions": [
|
|
22
|
+
r"(?:show|list|get)\s+(?:me\s+)?(?:past\s+)?decisions?",
|
|
23
|
+
r"what\s+(?:have\s+I\s+)?decided",
|
|
24
|
+
r"(?:past|recent)\s+decisions?",
|
|
25
|
+
],
|
|
26
|
+
"list_patterns": [
|
|
27
|
+
r"(?:show|list|get)\s+(?:me\s+)?patterns?",
|
|
28
|
+
r"what\s+patterns?\s+(?:do\s+I\s+)?(?:have|know)",
|
|
29
|
+
r"useful\s+patterns?",
|
|
30
|
+
],
|
|
31
|
+
"stats": [
|
|
32
|
+
r"(?:memory\s+)?stats?",
|
|
33
|
+
r"how\s+(?:many|much)\s+(?:do\s+I\s+)?remember",
|
|
34
|
+
r"memory\s+(?:status|info|summary)",
|
|
35
|
+
],
|
|
36
|
+
"project_context": [
|
|
37
|
+
r"(?:what|tell\s+me)\s+about\s+(?:this\s+)?project",
|
|
38
|
+
r"project\s+(?:info|context|summary)",
|
|
39
|
+
r"current\s+project",
|
|
40
|
+
],
|
|
41
|
+
"store": [
|
|
42
|
+
r"remember\s+(?:this|that)?[:\s]*(.+)",
|
|
43
|
+
r"save\s+(?:this|that)?[:\s]*(.+)",
|
|
44
|
+
r"store\s+(?:this|that)?[:\s]*(.+)",
|
|
45
|
+
r"note\s+(?:this|that)?[:\s]*(.+)",
|
|
46
|
+
r"keep\s+(?:in\s+mind)?[:\s]*(.+)",
|
|
47
|
+
],
|
|
48
|
+
"forget": [
|
|
49
|
+
r"forget\s+(?:about\s+)?(.+)",
|
|
50
|
+
r"delete\s+(?:memory\s+about\s+)?(.+)",
|
|
51
|
+
r"remove\s+(?:memory\s+about\s+)?(.+)",
|
|
52
|
+
r"clear\s+(?:memory\s+about\s+)?(.+)",
|
|
53
|
+
],
|
|
54
|
+
"search": [
|
|
55
|
+
r"what\s+(?:did\s+I|do\s+I|have\s+I)\s+(?:learn|know|remember)\s+about\s+(.+)",
|
|
56
|
+
r"show\s+me\s+(?:memories?\s+about\s+)?(.+)",
|
|
57
|
+
r"find\s+(?:memories?\s+about\s+)?(.+)",
|
|
58
|
+
r"search\s+(?:for\s+)?(.+)",
|
|
59
|
+
r"recall\s+(.+)",
|
|
60
|
+
r"what\s+about\s+(.+)",
|
|
61
|
+
],
|
|
62
|
+
}
|
|
63
|
+
|
|
64
|
+
|
|
65
|
+
def parse_intent(text: str) -> Tuple[Optional[str], Optional[str]]:
|
|
66
|
+
"""Parse natural language to determine intent and extract content.
|
|
67
|
+
|
|
68
|
+
Returns:
|
|
69
|
+
Tuple of (intent, extracted_content)
|
|
70
|
+
"""
|
|
71
|
+
text = text.strip().lower()
|
|
72
|
+
|
|
73
|
+
for intent, patterns in INTENT_PATTERNS.items():
|
|
74
|
+
for pattern in patterns:
|
|
75
|
+
match = re.search(pattern, text, re.IGNORECASE)
|
|
76
|
+
if match:
|
|
77
|
+
# Extract captured group if any
|
|
78
|
+
content = match.group(1).strip() if match.groups() else None
|
|
79
|
+
return intent, content
|
|
80
|
+
|
|
81
|
+
return None, None
|
|
82
|
+
|
|
83
|
+
|
|
84
|
+
async def process_natural_command(
|
|
85
|
+
db,
|
|
86
|
+
embeddings,
|
|
87
|
+
command: str,
|
|
88
|
+
project_path: Optional[str] = None,
|
|
89
|
+
session_id: Optional[str] = None
|
|
90
|
+
) -> Dict[str, Any]:
|
|
91
|
+
"""Process a natural language memory command.
|
|
92
|
+
|
|
93
|
+
Args:
|
|
94
|
+
db: Database service
|
|
95
|
+
embeddings: Embeddings service
|
|
96
|
+
command: Natural language command
|
|
97
|
+
project_path: Current project path
|
|
98
|
+
session_id: Current session ID
|
|
99
|
+
|
|
100
|
+
Returns:
|
|
101
|
+
Result of the processed command
|
|
102
|
+
"""
|
|
103
|
+
intent, content = parse_intent(command)
|
|
104
|
+
|
|
105
|
+
if not intent:
|
|
106
|
+
return {
|
|
107
|
+
"success": False,
|
|
108
|
+
"understood": False,
|
|
109
|
+
"message": "I didn't understand that memory command. Try:\n"
|
|
110
|
+
"- 'remember this: [content]'\n"
|
|
111
|
+
"- 'what did I learn about [topic]?'\n"
|
|
112
|
+
"- 'show me past errors'\n"
|
|
113
|
+
"- 'memory stats'"
|
|
114
|
+
}
|
|
115
|
+
|
|
116
|
+
result = {"success": True, "understood": True, "intent": intent}
|
|
117
|
+
|
|
118
|
+
if intent == "store":
|
|
119
|
+
if not content:
|
|
120
|
+
return {"success": False, "message": "What should I remember?"}
|
|
121
|
+
|
|
122
|
+
from skills.store import store_memory
|
|
123
|
+
store_result = await store_memory(
|
|
124
|
+
db=db,
|
|
125
|
+
embeddings=embeddings,
|
|
126
|
+
content=content,
|
|
127
|
+
memory_type="chunk",
|
|
128
|
+
project_path=project_path,
|
|
129
|
+
session_id=session_id,
|
|
130
|
+
importance=6,
|
|
131
|
+
tags=["natural-language", "user-stored"]
|
|
132
|
+
)
|
|
133
|
+
result["memory_id"] = store_result.get("memory_id")
|
|
134
|
+
result["message"] = f"Got it! I'll remember that. (Memory #{result['memory_id']})"
|
|
135
|
+
|
|
136
|
+
elif intent == "search":
|
|
137
|
+
if not content:
|
|
138
|
+
return {"success": False, "message": "What should I search for?"}
|
|
139
|
+
|
|
140
|
+
from skills.search import semantic_search
|
|
141
|
+
search_result = await semantic_search(
|
|
142
|
+
db=db,
|
|
143
|
+
embeddings=embeddings,
|
|
144
|
+
query=content,
|
|
145
|
+
project_path=project_path,
|
|
146
|
+
limit=5
|
|
147
|
+
)
|
|
148
|
+
|
|
149
|
+
results = search_result.get("results", [])
|
|
150
|
+
if results:
|
|
151
|
+
result["results"] = results
|
|
152
|
+
result["message"] = f"Found {len(results)} related memories:\n"
|
|
153
|
+
for i, r in enumerate(results[:3], 1):
|
|
154
|
+
result["message"] += f"\n{i}. {r['content'][:150]}..."
|
|
155
|
+
else:
|
|
156
|
+
result["message"] = f"No memories found about '{content}'"
|
|
157
|
+
|
|
158
|
+
elif intent == "forget":
|
|
159
|
+
if not content:
|
|
160
|
+
return {"success": False, "message": "What should I forget?"}
|
|
161
|
+
|
|
162
|
+
# Search and mark for deletion (soft delete via archive)
|
|
163
|
+
from skills.search import semantic_search
|
|
164
|
+
search_result = await semantic_search(
|
|
165
|
+
db=db,
|
|
166
|
+
embeddings=embeddings,
|
|
167
|
+
query=content,
|
|
168
|
+
project_path=project_path,
|
|
169
|
+
limit=3,
|
|
170
|
+
threshold=0.7 # High threshold to be sure
|
|
171
|
+
)
|
|
172
|
+
|
|
173
|
+
results = search_result.get("results", [])
|
|
174
|
+
if results:
|
|
175
|
+
# Archive the most relevant match
|
|
176
|
+
from services.cleanup import get_cleanup_service
|
|
177
|
+
cleanup = get_cleanup_service(db, embeddings)
|
|
178
|
+
top_result = results[0]
|
|
179
|
+
await cleanup._archive_memory(
|
|
180
|
+
top_result["id"],
|
|
181
|
+
"user_requested",
|
|
182
|
+
f"User asked to forget: {content}"
|
|
183
|
+
)
|
|
184
|
+
result["archived_id"] = top_result["id"]
|
|
185
|
+
result["message"] = f"Archived memory about: {top_result['content'][:100]}..."
|
|
186
|
+
else:
|
|
187
|
+
result["message"] = f"No memories found matching '{content}'"
|
|
188
|
+
|
|
189
|
+
elif intent == "list_errors":
|
|
190
|
+
from skills.search import semantic_search
|
|
191
|
+
errors = await semantic_search(
|
|
192
|
+
db=db,
|
|
193
|
+
embeddings=embeddings,
|
|
194
|
+
query="error bug problem exception",
|
|
195
|
+
project_path=project_path,
|
|
196
|
+
memory_type="error",
|
|
197
|
+
limit=5
|
|
198
|
+
)
|
|
199
|
+
|
|
200
|
+
results = errors.get("results", [])
|
|
201
|
+
if results:
|
|
202
|
+
result["results"] = results
|
|
203
|
+
result["message"] = f"Found {len(results)} past errors:\n"
|
|
204
|
+
for i, r in enumerate(results[:5], 1):
|
|
205
|
+
status = "Fixed" if r.get("success") else "Unresolved"
|
|
206
|
+
result["message"] += f"\n{i}. [{status}] {r['content'][:100]}..."
|
|
207
|
+
else:
|
|
208
|
+
result["message"] = "No past errors recorded"
|
|
209
|
+
|
|
210
|
+
elif intent == "list_decisions":
|
|
211
|
+
from skills.search import semantic_search
|
|
212
|
+
decisions = await semantic_search(
|
|
213
|
+
db=db,
|
|
214
|
+
embeddings=embeddings,
|
|
215
|
+
query="decided chose selected approach",
|
|
216
|
+
project_path=project_path,
|
|
217
|
+
memory_type="decision",
|
|
218
|
+
limit=5
|
|
219
|
+
)
|
|
220
|
+
|
|
221
|
+
results = decisions.get("results", [])
|
|
222
|
+
if results:
|
|
223
|
+
result["results"] = results
|
|
224
|
+
result["message"] = f"Found {len(results)} past decisions:\n"
|
|
225
|
+
for i, r in enumerate(results[:5], 1):
|
|
226
|
+
result["message"] += f"\n{i}. {r['content'][:100]}..."
|
|
227
|
+
else:
|
|
228
|
+
result["message"] = "No past decisions recorded"
|
|
229
|
+
|
|
230
|
+
elif intent == "list_patterns":
|
|
231
|
+
from skills.search import search_patterns
|
|
232
|
+
patterns = await search_patterns(
|
|
233
|
+
db=db,
|
|
234
|
+
embeddings=embeddings,
|
|
235
|
+
query="pattern solution approach",
|
|
236
|
+
limit=5
|
|
237
|
+
)
|
|
238
|
+
|
|
239
|
+
results = patterns.get("patterns", [])
|
|
240
|
+
if results:
|
|
241
|
+
result["results"] = results
|
|
242
|
+
result["message"] = f"Found {len(results)} patterns:\n"
|
|
243
|
+
for i, p in enumerate(results[:5], 1):
|
|
244
|
+
result["message"] += f"\n{i}. **{p['name']}**: {p['solution'][:80]}..."
|
|
245
|
+
else:
|
|
246
|
+
result["message"] = "No patterns recorded"
|
|
247
|
+
|
|
248
|
+
elif intent == "stats":
|
|
249
|
+
stats = await db.get_stats()
|
|
250
|
+
result["stats"] = stats
|
|
251
|
+
result["message"] = (
|
|
252
|
+
f"Memory Stats:\n"
|
|
253
|
+
f"- Total memories: {stats.get('total_memories', 0)}\n"
|
|
254
|
+
f"- Patterns: {stats.get('total_patterns', 0)}\n"
|
|
255
|
+
f"- Projects: {stats.get('total_projects', 0)}"
|
|
256
|
+
)
|
|
257
|
+
|
|
258
|
+
elif intent == "project_context":
|
|
259
|
+
from skills.search import get_project_context
|
|
260
|
+
context = await get_project_context(
|
|
261
|
+
db=db,
|
|
262
|
+
embeddings=embeddings,
|
|
263
|
+
project_path=project_path
|
|
264
|
+
)
|
|
265
|
+
|
|
266
|
+
if context.get("project"):
|
|
267
|
+
proj = context["project"]
|
|
268
|
+
result["project"] = proj
|
|
269
|
+
result["message"] = (
|
|
270
|
+
f"Project: {proj.get('name', 'Unknown')}\n"
|
|
271
|
+
f"Type: {proj.get('project_type', 'Unknown')}\n"
|
|
272
|
+
f"Tech Stack: {', '.join(proj.get('tech_stack', []))}"
|
|
273
|
+
)
|
|
274
|
+
else:
|
|
275
|
+
result["message"] = "No project info stored for this path"
|
|
276
|
+
|
|
277
|
+
return result
|
|
@@ -0,0 +1,67 @@
|
|
|
1
|
+
"""Retrieve memory skill with project filtering."""
|
|
2
|
+
from typing import Dict, Any, Optional, List
|
|
3
|
+
from services.database import DatabaseService
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
async def retrieve_memory(
|
|
7
|
+
db: DatabaseService,
|
|
8
|
+
memory_id: Optional[int] = None,
|
|
9
|
+
memory_type: Optional[str] = None,
|
|
10
|
+
session_id: Optional[str] = None,
|
|
11
|
+
project_path: Optional[str] = None,
|
|
12
|
+
limit: int = 10
|
|
13
|
+
) -> Dict[str, Any]:
|
|
14
|
+
"""
|
|
15
|
+
Retrieve memories by ID or filter criteria.
|
|
16
|
+
|
|
17
|
+
Args:
|
|
18
|
+
db: Database service instance
|
|
19
|
+
memory_id: Specific memory ID to retrieve
|
|
20
|
+
memory_type: Filter by memory type
|
|
21
|
+
session_id: Filter by session ID
|
|
22
|
+
project_path: Filter by project path
|
|
23
|
+
limit: Maximum number of memories to return
|
|
24
|
+
|
|
25
|
+
Returns:
|
|
26
|
+
Dict with retrieved memories
|
|
27
|
+
"""
|
|
28
|
+
if memory_id:
|
|
29
|
+
memory = await db.get_memory(memory_id)
|
|
30
|
+
if memory:
|
|
31
|
+
return {
|
|
32
|
+
"success": True,
|
|
33
|
+
"memories": [memory],
|
|
34
|
+
"count": 1
|
|
35
|
+
}
|
|
36
|
+
return {
|
|
37
|
+
"success": False,
|
|
38
|
+
"message": f"Memory with ID {memory_id} not found",
|
|
39
|
+
"memories": [],
|
|
40
|
+
"count": 0
|
|
41
|
+
}
|
|
42
|
+
|
|
43
|
+
if memory_type:
|
|
44
|
+
memories = await db.get_memories_by_type(
|
|
45
|
+
memory_type=memory_type,
|
|
46
|
+
limit=limit,
|
|
47
|
+
session_id=session_id,
|
|
48
|
+
project_path=project_path
|
|
49
|
+
)
|
|
50
|
+
return {
|
|
51
|
+
"success": True,
|
|
52
|
+
"memories": memories,
|
|
53
|
+
"count": len(memories),
|
|
54
|
+
"filters": {
|
|
55
|
+
"type": memory_type,
|
|
56
|
+
"project": project_path,
|
|
57
|
+
"session": session_id
|
|
58
|
+
}
|
|
59
|
+
}
|
|
60
|
+
|
|
61
|
+
# Return stats if no specific criteria
|
|
62
|
+
stats = await db.get_stats()
|
|
63
|
+
return {
|
|
64
|
+
"success": True,
|
|
65
|
+
"stats": stats,
|
|
66
|
+
"message": "Provide memory_id or memory_type to retrieve specific memories"
|
|
67
|
+
}
|