claude-memory-agent 2.0.0 → 2.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (34) hide show
  1. package/README.md +206 -200
  2. package/agent_card.py +186 -0
  3. package/bin/cli.js +317 -181
  4. package/bin/postinstall.js +270 -216
  5. package/dashboard.html +4232 -2689
  6. package/hooks/__pycache__/grounding-hook.cpython-312.pyc +0 -0
  7. package/hooks/__pycache__/session_end.cpython-312.pyc +0 -0
  8. package/hooks/grounding-hook.py +422 -348
  9. package/hooks/session_end.py +293 -192
  10. package/hooks/session_start.py +227 -227
  11. package/install.py +919 -887
  12. package/main.py +4496 -2859
  13. package/package.json +47 -55
  14. package/services/__init__.py +50 -50
  15. package/services/__pycache__/__init__.cpython-312.pyc +0 -0
  16. package/services/__pycache__/curator.cpython-312.pyc +0 -0
  17. package/services/__pycache__/database.cpython-312.pyc +0 -0
  18. package/services/curator.py +1606 -0
  19. package/services/database.py +3637 -2485
  20. package/skills/__init__.py +21 -1
  21. package/skills/__pycache__/__init__.cpython-312.pyc +0 -0
  22. package/skills/__pycache__/confidence_tracker.cpython-312.pyc +0 -0
  23. package/skills/__pycache__/context.cpython-312.pyc +0 -0
  24. package/skills/__pycache__/curator.cpython-312.pyc +0 -0
  25. package/skills/__pycache__/search.cpython-312.pyc +0 -0
  26. package/skills/__pycache__/session_review.cpython-312.pyc +0 -0
  27. package/skills/__pycache__/store.cpython-312.pyc +0 -0
  28. package/skills/confidence_tracker.py +441 -0
  29. package/skills/context.py +675 -0
  30. package/skills/curator.py +348 -0
  31. package/skills/search.py +369 -213
  32. package/skills/session_review.py +418 -0
  33. package/skills/store.py +377 -179
  34. package/update_system.py +829 -817
package/skills/search.py CHANGED
@@ -1,213 +1,369 @@
1
- """Semantic search skill with context filtering and fallback support."""
2
- from typing import Dict, Any, Optional, List
3
- from services.database import DatabaseService
4
- from services.embeddings import EmbeddingService
5
-
6
-
7
- async def semantic_search(
8
- db: DatabaseService,
9
- embeddings: EmbeddingService,
10
- query: str,
11
- limit: int = 10,
12
- memory_type: Optional[str] = None,
13
- session_id: Optional[str] = None,
14
- project_path: Optional[str] = None,
15
- agent_type: Optional[str] = None,
16
- success_only: bool = False,
17
- threshold: float = 0.5
18
- ) -> Dict[str, Any]:
19
- """
20
- Search memories using semantic similarity with context filters.
21
-
22
- Includes automatic fallback to keyword search when Ollama is unavailable.
23
-
24
- Args:
25
- db: Database service instance
26
- embeddings: Embedding service instance
27
- query: Search query text
28
- limit: Maximum number of results
29
- memory_type: Filter by type (session, decision, code, chunk, error)
30
- session_id: Filter by session ID
31
- project_path: Filter by project
32
- agent_type: Filter by agent that created the memory
33
- success_only: Only return memories marked as successful
34
- threshold: Minimum similarity threshold (0-1)
35
-
36
- Returns:
37
- Dict with search results ranked by similarity * importance
38
- """
39
- # Generate embedding for the query (may return None if Ollama unavailable)
40
- query_embedding = await embeddings.generate_embedding(query)
41
-
42
- # Determine search method based on embedding availability
43
- search_method = "semantic"
44
- results = []
45
-
46
- if query_embedding is not None:
47
- # Use semantic search with embeddings
48
- results = await db.search_similar(
49
- embedding=query_embedding,
50
- limit=limit,
51
- memory_type=memory_type,
52
- session_id=session_id,
53
- project_path=project_path,
54
- agent_type=agent_type,
55
- success_only=success_only,
56
- threshold=threshold
57
- )
58
- else:
59
- # Fallback to keyword search
60
- search_method = "keyword"
61
- results = await db.keyword_search(
62
- query=query,
63
- limit=limit,
64
- memory_type=memory_type,
65
- session_id=session_id,
66
- project_path=project_path,
67
- agent_type=agent_type,
68
- success_only=success_only
69
- )
70
-
71
- return {
72
- "success": True,
73
- "query": query,
74
- "results": results,
75
- "count": len(results),
76
- "search_method": search_method,
77
- "degraded_mode": embeddings.is_degraded(),
78
- "filters": {
79
- "type": memory_type,
80
- "project": project_path,
81
- "agent": agent_type,
82
- "success_only": success_only
83
- },
84
- "threshold": threshold if search_method == "semantic" else None
85
- }
86
-
87
-
88
- async def search_patterns(
89
- db: DatabaseService,
90
- embeddings: EmbeddingService,
91
- query: str,
92
- limit: int = 5,
93
- problem_type: Optional[str] = None,
94
- threshold: float = 0.5
95
- ) -> Dict[str, Any]:
96
- """
97
- Search for reusable solution patterns.
98
-
99
- Includes fallback to keyword search when Ollama is unavailable.
100
-
101
- Args:
102
- db: Database service instance
103
- embeddings: Embedding service instance
104
- query: Problem description or search query
105
- limit: Maximum number of results
106
- problem_type: Filter by problem type
107
- threshold: Minimum similarity threshold
108
-
109
- Returns:
110
- Dict with patterns ranked by similarity * success_rate
111
- """
112
- query_embedding = await embeddings.generate_embedding(query)
113
-
114
- search_method = "semantic"
115
- results = []
116
-
117
- if query_embedding is not None:
118
- results = await db.search_patterns(
119
- embedding=query_embedding,
120
- limit=limit,
121
- problem_type=problem_type,
122
- threshold=threshold
123
- )
124
- else:
125
- # Fallback: keyword search on patterns table
126
- search_method = "keyword"
127
- results = await db.keyword_search_patterns(
128
- query=query,
129
- limit=limit,
130
- problem_type=problem_type
131
- )
132
-
133
- return {
134
- "success": True,
135
- "query": query,
136
- "patterns": results,
137
- "count": len(results),
138
- "search_method": search_method,
139
- "degraded_mode": embeddings.is_degraded(),
140
- "problem_type": problem_type
141
- }
142
-
143
-
144
- async def get_project_context(
145
- db: DatabaseService,
146
- embeddings: EmbeddingService,
147
- project_path: str,
148
- query: Optional[str] = None,
149
- limit: int = 10
150
- ) -> Dict[str, Any]:
151
- """
152
- Get all relevant context for a project.
153
-
154
- Includes fallback to keyword search when Ollama is unavailable.
155
-
156
- Args:
157
- db: Database service instance
158
- embeddings: Embedding service instance
159
- project_path: Path to the project
160
- query: Optional query to filter relevant memories
161
- limit: Max memories to return
162
-
163
- Returns:
164
- Dict with project info and relevant memories
165
- """
166
- # Get project info
167
- project = await db.get_project(project_path)
168
-
169
- # Get recent decisions for this project
170
- decisions = await db.get_memories_by_type(
171
- memory_type="decision",
172
- project_path=project_path,
173
- limit=limit
174
- )
175
-
176
- # Get patterns used in this project
177
- patterns = await db.get_memories_by_type(
178
- memory_type="code",
179
- project_path=project_path,
180
- limit=limit
181
- )
182
-
183
- # If query provided, search for relevant memories
184
- relevant = []
185
- search_method = None
186
- if query:
187
- query_embedding = await embeddings.generate_embedding(query)
188
- if query_embedding is not None:
189
- search_method = "semantic"
190
- relevant = await db.search_similar(
191
- embedding=query_embedding,
192
- project_path=project_path,
193
- limit=limit,
194
- threshold=0.4
195
- )
196
- else:
197
- # Fallback to keyword search
198
- search_method = "keyword"
199
- relevant = await db.keyword_search(
200
- query=query,
201
- project_path=project_path,
202
- limit=limit
203
- )
204
-
205
- return {
206
- "success": True,
207
- "project": project,
208
- "decisions": decisions,
209
- "code_patterns": patterns,
210
- "relevant_to_query": relevant if query else None,
211
- "search_method": search_method,
212
- "degraded_mode": embeddings.is_degraded()
213
- }
1
+ """Semantic search skill with context filtering and fallback support."""
2
+ import logging
3
+ from typing import Dict, Any, Optional, List
4
+ from services.database import DatabaseService
5
+ from services.embeddings import EmbeddingService
6
+
7
+ logger = logging.getLogger(__name__)
8
+
9
+
10
+ async def _enrich_with_graph_context(db: DatabaseService, results: list) -> list:
11
+ """
12
+ Enrich search results with relationship context.
13
+ This helps Claude understand the causal chains and related knowledge.
14
+
15
+ Args:
16
+ db: Database service instance
17
+ results: List of search results to enrich
18
+
19
+ Returns:
20
+ List of enriched results with graph context added
21
+ """
22
+ enriched = []
23
+ for result in results:
24
+ memory_id = result.get('id')
25
+ if not memory_id:
26
+ enriched.append(result)
27
+ continue
28
+
29
+ # Create enriched copy
30
+ enriched_result = dict(result)
31
+
32
+ memory_type = result.get('type', '')
33
+
34
+ # For errors: find what fixes them
35
+ if memory_type == 'error':
36
+ try:
37
+ fixes = await db.get_related_memories(memory_id, 'fixes', direction='incoming', depth=1)
38
+ if fixes:
39
+ enriched_result['known_fixes'] = [
40
+ {'id': f['id'], 'content': f['content'][:200], 'outcome': f.get('outcome')}
41
+ for f in fixes
42
+ ]
43
+ except Exception as e:
44
+ logger.debug(f"Failed to get fixes for memory {memory_id}: {e}")
45
+
46
+ # For decisions: find rationale and consequences
47
+ if memory_type == 'decision':
48
+ try:
49
+ # What supports this decision
50
+ supports = await db.get_related_memories(memory_id, 'supports', direction='incoming', depth=1)
51
+ if supports:
52
+ enriched_result['rationale'] = [
53
+ {'id': s['id'], 'content': s['content'][:200]}
54
+ for s in supports
55
+ ]
56
+ except Exception as e:
57
+ logger.debug(f"Failed to get supports for memory {memory_id}: {e}")
58
+
59
+ try:
60
+ # What this decision caused
61
+ caused = await db.get_related_memories(memory_id, 'caused_by', direction='outgoing', depth=1)
62
+ if caused:
63
+ enriched_result['consequences'] = [
64
+ {'id': c['id'], 'content': c['content'][:200]}
65
+ for c in caused
66
+ ]
67
+ except Exception as e:
68
+ logger.debug(f"Failed to get consequences for memory {memory_id}: {e}")
69
+
70
+ # For all types: find contradictions (critical for anti-hallucination)
71
+ try:
72
+ contradictions = await db.find_contradictions(memory_id)
73
+ if contradictions:
74
+ enriched_result['contradictions'] = [
75
+ {'id': c['id'], 'content': c['content'][:200]}
76
+ for c in contradictions
77
+ ]
78
+ except Exception as e:
79
+ logger.debug(f"Failed to get contradictions for memory {memory_id}: {e}")
80
+
81
+ # For errors and decisions: include causal chain
82
+ if memory_type in ['error', 'decision', 'code']:
83
+ try:
84
+ chain = await db.get_causal_chain(memory_id, max_depth=3)
85
+ if chain and (chain.get('causes') or chain.get('fixes') or chain.get('root_causes')):
86
+ enriched_result['causal_chain'] = chain
87
+ except Exception as e:
88
+ logger.debug(f"Failed to get causal chain for memory {memory_id}: {e}")
89
+
90
+ enriched.append(enriched_result)
91
+
92
+ return enriched
93
+
94
+
95
+ async def semantic_search(
96
+ db: DatabaseService,
97
+ embeddings: EmbeddingService,
98
+ query: str,
99
+ limit: int = 10,
100
+ memory_type: Optional[str] = None,
101
+ session_id: Optional[str] = None,
102
+ project_path: Optional[str] = None,
103
+ agent_type: Optional[str] = None,
104
+ success_only: bool = False,
105
+ threshold: float = 0.5,
106
+ # Outcome spectrum filters
107
+ include_failed: bool = False,
108
+ include_superseded: bool = False,
109
+ include_unreliable: bool = False,
110
+ outcome_status: Optional[str] = None,
111
+ # Context-aware search
112
+ current_context: Optional[Dict[str, Any]] = None,
113
+ auto_detect_context: bool = True,
114
+ # Graph enrichment
115
+ include_graph: bool = True
116
+ ) -> Dict[str, Any]:
117
+ """
118
+ Search memories using semantic similarity with context filters.
119
+
120
+ Includes automatic fallback to keyword search when Ollama is unavailable.
121
+
122
+ Outcome-aware search behavior:
123
+ - 'success' memories rank highest (1.5x boost)
124
+ - 'partial' memories shown with warning (1.0x - no penalty)
125
+ - 'failed' memories excluded by default (use include_failed=True to show)
126
+ - 'superseded' memories excluded and replaced with their superseding memory
127
+ - 'pending' memories shown normally (1.0x)
128
+ - Unreliable memories (failure_count >= 3) excluded by default (use include_unreliable=True)
129
+
130
+ Context-aware search:
131
+ - If current_context provided, memories that worked in similar contexts get +0.2 boost
132
+ - Memories that failed in similar contexts get -0.2 penalty
133
+ - If auto_detect_context=True and project_path provided, context is auto-detected
134
+
135
+ Args:
136
+ db: Database service instance
137
+ embeddings: Embedding service instance
138
+ query: Search query text
139
+ limit: Maximum number of results
140
+ memory_type: Filter by type (session, decision, code, chunk, error)
141
+ session_id: Filter by session ID
142
+ project_path: Filter by project
143
+ agent_type: Filter by agent that created the memory
144
+ success_only: Only return memories marked as successful (legacy)
145
+ threshold: Minimum similarity threshold (0-1)
146
+ include_failed: Include memories with outcome_status='failed' (default False)
147
+ include_superseded: Include memories with outcome_status='superseded' (default False)
148
+ include_unreliable: Include memories with failure_count >= 3 (default False)
149
+ outcome_status: Filter by specific outcome status
150
+ current_context: Context dict with project_type, tech_stack, file_patterns
151
+ auto_detect_context: If True and project_path provided, auto-detect context
152
+ include_graph: Enrich results with graph context (fixes, rationale, contradictions)
153
+
154
+ Returns:
155
+ Dict with search results ranked by: (similarity * 0.7) + (confidence * 0.3) + context_adjustment
156
+ Each result includes outcome_status, outcome_warning, outcome_boost, context_adjustment,
157
+ and context_recommendation fields.
158
+ """
159
+ # Auto-detect context from project_path if enabled
160
+ detected_context = None
161
+ if auto_detect_context and project_path and not current_context:
162
+ try:
163
+ from skills.context import detect_project_context
164
+ detected_context = detect_project_context(project_path)
165
+ except Exception:
166
+ pass
167
+
168
+ # Use provided context or detected context
169
+ search_context = current_context or detected_context
170
+
171
+ # Generate embedding for the query (may return None if Ollama unavailable)
172
+ query_embedding = await embeddings.generate_embedding(query)
173
+
174
+ # Determine search method based on embedding availability
175
+ search_method = "semantic"
176
+ results = []
177
+
178
+ if query_embedding is not None:
179
+ # Use semantic search with embeddings
180
+ results = await db.search_similar(
181
+ embedding=query_embedding,
182
+ limit=limit,
183
+ memory_type=memory_type,
184
+ session_id=session_id,
185
+ project_path=project_path,
186
+ agent_type=agent_type,
187
+ success_only=success_only,
188
+ threshold=threshold,
189
+ include_failed=include_failed,
190
+ include_superseded=include_superseded,
191
+ include_unreliable=include_unreliable,
192
+ outcome_status=outcome_status,
193
+ current_context=search_context
194
+ )
195
+ else:
196
+ # Fallback to keyword search
197
+ search_method = "keyword"
198
+ results = await db.keyword_search(
199
+ query=query,
200
+ limit=limit,
201
+ memory_type=memory_type,
202
+ session_id=session_id,
203
+ project_path=project_path,
204
+ agent_type=agent_type,
205
+ success_only=success_only,
206
+ include_failed=include_failed,
207
+ include_superseded=include_superseded,
208
+ include_unreliable=include_unreliable,
209
+ outcome_status=outcome_status
210
+ )
211
+
212
+ # Enrich with graph context if requested
213
+ if include_graph:
214
+ try:
215
+ results = await _enrich_with_graph_context(db, results)
216
+ except Exception as e:
217
+ logger.warning(f"Failed to enrich with graph context: {e}")
218
+ # Continue with unenriched results
219
+
220
+ return {
221
+ "success": True,
222
+ "query": query,
223
+ "results": results,
224
+ "count": len(results),
225
+ "search_method": search_method,
226
+ "degraded_mode": embeddings.is_degraded(),
227
+ "filters": {
228
+ "type": memory_type,
229
+ "project": project_path,
230
+ "agent": agent_type,
231
+ "success_only": success_only,
232
+ "include_failed": include_failed,
233
+ "include_superseded": include_superseded,
234
+ "include_unreliable": include_unreliable,
235
+ "outcome_status": outcome_status,
236
+ "include_graph": include_graph
237
+ },
238
+ "context_aware": search_context is not None,
239
+ "detected_context": detected_context,
240
+ "threshold": threshold if search_method == "semantic" else None
241
+ }
242
+
243
+
244
+ async def search_patterns(
245
+ db: DatabaseService,
246
+ embeddings: EmbeddingService,
247
+ query: str,
248
+ limit: int = 5,
249
+ problem_type: Optional[str] = None,
250
+ threshold: float = 0.5
251
+ ) -> Dict[str, Any]:
252
+ """
253
+ Search for reusable solution patterns.
254
+
255
+ Includes fallback to keyword search when Ollama is unavailable.
256
+
257
+ Args:
258
+ db: Database service instance
259
+ embeddings: Embedding service instance
260
+ query: Problem description or search query
261
+ limit: Maximum number of results
262
+ problem_type: Filter by problem type
263
+ threshold: Minimum similarity threshold
264
+
265
+ Returns:
266
+ Dict with patterns ranked by similarity * success_rate
267
+ """
268
+ query_embedding = await embeddings.generate_embedding(query)
269
+
270
+ search_method = "semantic"
271
+ results = []
272
+
273
+ if query_embedding is not None:
274
+ results = await db.search_patterns(
275
+ embedding=query_embedding,
276
+ limit=limit,
277
+ problem_type=problem_type,
278
+ threshold=threshold
279
+ )
280
+ else:
281
+ # Fallback: keyword search on patterns table
282
+ search_method = "keyword"
283
+ results = await db.keyword_search_patterns(
284
+ query=query,
285
+ limit=limit,
286
+ problem_type=problem_type
287
+ )
288
+
289
+ return {
290
+ "success": True,
291
+ "query": query,
292
+ "patterns": results,
293
+ "count": len(results),
294
+ "search_method": search_method,
295
+ "degraded_mode": embeddings.is_degraded(),
296
+ "problem_type": problem_type
297
+ }
298
+
299
+
300
+ async def get_project_context(
301
+ db: DatabaseService,
302
+ embeddings: EmbeddingService,
303
+ project_path: str,
304
+ query: Optional[str] = None,
305
+ limit: int = 10
306
+ ) -> Dict[str, Any]:
307
+ """
308
+ Get all relevant context for a project.
309
+
310
+ Includes fallback to keyword search when Ollama is unavailable.
311
+
312
+ Args:
313
+ db: Database service instance
314
+ embeddings: Embedding service instance
315
+ project_path: Path to the project
316
+ query: Optional query to filter relevant memories
317
+ limit: Max memories to return
318
+
319
+ Returns:
320
+ Dict with project info and relevant memories
321
+ """
322
+ # Get project info
323
+ project = await db.get_project(project_path)
324
+
325
+ # Get recent decisions for this project
326
+ decisions = await db.get_memories_by_type(
327
+ memory_type="decision",
328
+ project_path=project_path,
329
+ limit=limit
330
+ )
331
+
332
+ # Get patterns used in this project
333
+ patterns = await db.get_memories_by_type(
334
+ memory_type="code",
335
+ project_path=project_path,
336
+ limit=limit
337
+ )
338
+
339
+ # If query provided, search for relevant memories
340
+ relevant = []
341
+ search_method = None
342
+ if query:
343
+ query_embedding = await embeddings.generate_embedding(query)
344
+ if query_embedding is not None:
345
+ search_method = "semantic"
346
+ relevant = await db.search_similar(
347
+ embedding=query_embedding,
348
+ project_path=project_path,
349
+ limit=limit,
350
+ threshold=0.4
351
+ )
352
+ else:
353
+ # Fallback to keyword search
354
+ search_method = "keyword"
355
+ relevant = await db.keyword_search(
356
+ query=query,
357
+ project_path=project_path,
358
+ limit=limit
359
+ )
360
+
361
+ return {
362
+ "success": True,
363
+ "project": project,
364
+ "decisions": decisions,
365
+ "code_patterns": patterns,
366
+ "relevant_to_query": relevant if query else None,
367
+ "search_method": search_method,
368
+ "degraded_mode": embeddings.is_degraded()
369
+ }