claude-memory-agent 2.0.0 → 2.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (34) hide show
  1. package/README.md +206 -200
  2. package/agent_card.py +186 -0
  3. package/bin/cli.js +317 -181
  4. package/bin/postinstall.js +270 -216
  5. package/dashboard.html +4232 -2689
  6. package/hooks/__pycache__/grounding-hook.cpython-312.pyc +0 -0
  7. package/hooks/__pycache__/session_end.cpython-312.pyc +0 -0
  8. package/hooks/grounding-hook.py +422 -348
  9. package/hooks/session_end.py +293 -192
  10. package/hooks/session_start.py +227 -227
  11. package/install.py +919 -887
  12. package/main.py +4496 -2859
  13. package/package.json +47 -55
  14. package/services/__init__.py +50 -50
  15. package/services/__pycache__/__init__.cpython-312.pyc +0 -0
  16. package/services/__pycache__/curator.cpython-312.pyc +0 -0
  17. package/services/__pycache__/database.cpython-312.pyc +0 -0
  18. package/services/curator.py +1606 -0
  19. package/services/database.py +3637 -2485
  20. package/skills/__init__.py +21 -1
  21. package/skills/__pycache__/__init__.cpython-312.pyc +0 -0
  22. package/skills/__pycache__/confidence_tracker.cpython-312.pyc +0 -0
  23. package/skills/__pycache__/context.cpython-312.pyc +0 -0
  24. package/skills/__pycache__/curator.cpython-312.pyc +0 -0
  25. package/skills/__pycache__/search.cpython-312.pyc +0 -0
  26. package/skills/__pycache__/session_review.cpython-312.pyc +0 -0
  27. package/skills/__pycache__/store.cpython-312.pyc +0 -0
  28. package/skills/confidence_tracker.py +441 -0
  29. package/skills/context.py +675 -0
  30. package/skills/curator.py +348 -0
  31. package/skills/search.py +369 -213
  32. package/skills/session_review.py +418 -0
  33. package/skills/store.py +377 -179
  34. package/update_system.py +829 -817
@@ -0,0 +1,675 @@
1
+ """Context tagging skill for context-aware memory ranking.
2
+
3
+ Context-aware memory system that tracks where solutions worked or failed.
4
+ Same solution may work in React but fail in Vue - context tags capture this.
5
+
6
+ Context structure:
7
+ {
8
+ "project_type": "react" | "python" | "wordpress" | "vue" | etc.,
9
+ "tech_stack": ["typescript", "fastapi", "docker"],
10
+ "environment": "dev" | "prod" | "test",
11
+ "file_patterns": ["*.tsx", "*.py", "*.php"]
12
+ }
13
+ """
14
+ import json
15
+ import os
16
+ import re
17
+ from typing import Dict, Any, Optional, List
18
+ from pathlib import Path
19
+
20
+
21
+ def detect_project_context(project_path: Optional[str]) -> Dict[str, Any]:
22
+ """Auto-detect context from project path by analyzing files and configs.
23
+
24
+ Args:
25
+ project_path: Path to the project directory
26
+
27
+ Returns:
28
+ Context dict with project_type, tech_stack, environment, file_patterns
29
+ """
30
+ if not project_path or not os.path.exists(project_path):
31
+ return {}
32
+
33
+ context = {
34
+ "project_type": None,
35
+ "tech_stack": [],
36
+ "environment": "dev", # Default to dev
37
+ "file_patterns": []
38
+ }
39
+
40
+ path = Path(project_path)
41
+
42
+ # Detect based on config files
43
+ config_detectors = {
44
+ "package.json": _detect_js_context,
45
+ "requirements.txt": _detect_python_context,
46
+ "pyproject.toml": _detect_python_context,
47
+ "Pipfile": _detect_python_context,
48
+ "composer.json": _detect_php_context,
49
+ "wp-config.php": _detect_wordpress_context,
50
+ "Cargo.toml": _detect_rust_context,
51
+ "go.mod": _detect_go_context,
52
+ "pom.xml": _detect_java_context,
53
+ "build.gradle": _detect_java_context,
54
+ }
55
+
56
+ for config_file, detector in config_detectors.items():
57
+ config_path = path / config_file
58
+ if config_path.exists():
59
+ try:
60
+ detected = detector(config_path)
61
+ _merge_context(context, detected)
62
+ except Exception:
63
+ pass # Ignore parsing errors
64
+
65
+ # Fallback: detect from directory structure
66
+ if not context["project_type"]:
67
+ context["project_type"] = _detect_from_structure(path)
68
+
69
+ # Detect file patterns present
70
+ context["file_patterns"] = _detect_file_patterns(path)
71
+
72
+ return context
73
+
74
+
75
+ def _detect_js_context(package_json_path: Path) -> Dict[str, Any]:
76
+ """Detect JavaScript/TypeScript project context."""
77
+ context = {"tech_stack": []}
78
+
79
+ try:
80
+ with open(package_json_path, 'r', encoding='utf-8') as f:
81
+ pkg = json.load(f)
82
+ except (json.JSONDecodeError, IOError):
83
+ return {"project_type": "nodejs"}
84
+
85
+ deps = {**pkg.get("dependencies", {}), **pkg.get("devDependencies", {})}
86
+
87
+ # Detect framework
88
+ if "react" in deps:
89
+ context["project_type"] = "react"
90
+ context["tech_stack"].append("react")
91
+ if "next" in deps:
92
+ context["project_type"] = "nextjs"
93
+ context["tech_stack"].append("nextjs")
94
+ elif "vue" in deps:
95
+ context["project_type"] = "vue"
96
+ context["tech_stack"].append("vue")
97
+ if "nuxt" in deps:
98
+ context["project_type"] = "nuxt"
99
+ context["tech_stack"].append("nuxt")
100
+ elif "angular" in deps or "@angular/core" in deps:
101
+ context["project_type"] = "angular"
102
+ context["tech_stack"].append("angular")
103
+ elif "svelte" in deps:
104
+ context["project_type"] = "svelte"
105
+ context["tech_stack"].append("svelte")
106
+ elif "express" in deps:
107
+ context["project_type"] = "express"
108
+ context["tech_stack"].append("express")
109
+ else:
110
+ context["project_type"] = "nodejs"
111
+
112
+ # Detect TypeScript
113
+ if "typescript" in deps:
114
+ context["tech_stack"].append("typescript")
115
+
116
+ # Detect testing frameworks
117
+ if "jest" in deps:
118
+ context["tech_stack"].append("jest")
119
+ if "vitest" in deps:
120
+ context["tech_stack"].append("vitest")
121
+ if "cypress" in deps:
122
+ context["tech_stack"].append("cypress")
123
+
124
+ # Detect styling
125
+ if "tailwindcss" in deps:
126
+ context["tech_stack"].append("tailwind")
127
+ if "styled-components" in deps:
128
+ context["tech_stack"].append("styled-components")
129
+
130
+ return context
131
+
132
+
133
+ def _detect_python_context(config_path: Path) -> Dict[str, Any]:
134
+ """Detect Python project context."""
135
+ context = {"project_type": "python", "tech_stack": ["python"]}
136
+
137
+ try:
138
+ content = config_path.read_text(encoding='utf-8')
139
+ except IOError:
140
+ return context
141
+
142
+ content_lower = content.lower()
143
+
144
+ # Detect frameworks
145
+ if "django" in content_lower:
146
+ context["project_type"] = "django"
147
+ context["tech_stack"].append("django")
148
+ elif "flask" in content_lower:
149
+ context["project_type"] = "flask"
150
+ context["tech_stack"].append("flask")
151
+ elif "fastapi" in content_lower:
152
+ context["project_type"] = "fastapi"
153
+ context["tech_stack"].append("fastapi")
154
+
155
+ # Detect other tools
156
+ if "pytest" in content_lower:
157
+ context["tech_stack"].append("pytest")
158
+ if "celery" in content_lower:
159
+ context["tech_stack"].append("celery")
160
+ if "sqlalchemy" in content_lower:
161
+ context["tech_stack"].append("sqlalchemy")
162
+ if "pandas" in content_lower:
163
+ context["tech_stack"].append("pandas")
164
+
165
+ return context
166
+
167
+
168
+ def _detect_php_context(composer_path: Path) -> Dict[str, Any]:
169
+ """Detect PHP project context."""
170
+ context = {"project_type": "php", "tech_stack": ["php"]}
171
+
172
+ try:
173
+ with open(composer_path, 'r', encoding='utf-8') as f:
174
+ composer = json.load(f)
175
+ except (json.JSONDecodeError, IOError):
176
+ return context
177
+
178
+ require = {**composer.get("require", {}), **composer.get("require-dev", {})}
179
+
180
+ if "laravel/framework" in require:
181
+ context["project_type"] = "laravel"
182
+ context["tech_stack"].append("laravel")
183
+ elif "symfony/framework-bundle" in require:
184
+ context["project_type"] = "symfony"
185
+ context["tech_stack"].append("symfony")
186
+
187
+ return context
188
+
189
+
190
+ def _detect_wordpress_context(wp_config_path: Path) -> Dict[str, Any]:
191
+ """Detect WordPress context."""
192
+ context = {
193
+ "project_type": "wordpress",
194
+ "tech_stack": ["php", "wordpress", "mysql"]
195
+ }
196
+
197
+ # Check if this is a plugin or theme
198
+ parent = wp_config_path.parent
199
+ if (parent / "wp-content" / "plugins").exists():
200
+ # Could be a plugin development environment
201
+ pass
202
+ if (parent / "wp-content" / "themes").exists():
203
+ # Could be a theme development environment
204
+ pass
205
+
206
+ return context
207
+
208
+
209
+ def _detect_rust_context(cargo_path: Path) -> Dict[str, Any]:
210
+ """Detect Rust project context."""
211
+ return {"project_type": "rust", "tech_stack": ["rust"]}
212
+
213
+
214
+ def _detect_go_context(go_mod_path: Path) -> Dict[str, Any]:
215
+ """Detect Go project context."""
216
+ return {"project_type": "go", "tech_stack": ["go"]}
217
+
218
+
219
+ def _detect_java_context(config_path: Path) -> Dict[str, Any]:
220
+ """Detect Java project context."""
221
+ context = {"project_type": "java", "tech_stack": ["java"]}
222
+
223
+ try:
224
+ content = config_path.read_text(encoding='utf-8')
225
+ if "spring" in content.lower():
226
+ context["project_type"] = "spring"
227
+ context["tech_stack"].append("spring")
228
+ except IOError:
229
+ pass
230
+
231
+ return context
232
+
233
+
234
+ def _detect_from_structure(path: Path) -> Optional[str]:
235
+ """Fallback detection from directory structure."""
236
+ # Check for common directory patterns
237
+ if (path / "src" / "main" / "java").exists():
238
+ return "java"
239
+ if (path / "src" / "components").exists():
240
+ return "react"
241
+ if (path / "app" / "Http" / "Controllers").exists():
242
+ return "laravel"
243
+ if (path / "manage.py").exists():
244
+ return "django"
245
+
246
+ return None
247
+
248
+
249
+ def _detect_file_patterns(path: Path) -> List[str]:
250
+ """Detect common file patterns in the project."""
251
+ patterns = set()
252
+ extensions_found = set()
253
+
254
+ # Sample files (limit depth and count for performance)
255
+ try:
256
+ for item in path.rglob("*"):
257
+ if item.is_file() and not any(p in str(item) for p in ['node_modules', '.git', 'venv', '__pycache__']):
258
+ ext = item.suffix
259
+ if ext:
260
+ extensions_found.add(ext)
261
+ if len(extensions_found) > 20:
262
+ break
263
+ except (PermissionError, OSError):
264
+ pass
265
+
266
+ # Map to glob patterns
267
+ extension_map = {
268
+ ".tsx": "*.tsx",
269
+ ".ts": "*.ts",
270
+ ".jsx": "*.jsx",
271
+ ".js": "*.js",
272
+ ".py": "*.py",
273
+ ".php": "*.php",
274
+ ".vue": "*.vue",
275
+ ".svelte": "*.svelte",
276
+ ".rs": "*.rs",
277
+ ".go": "*.go",
278
+ ".java": "*.java",
279
+ ".rb": "*.rb",
280
+ }
281
+
282
+ for ext in extensions_found:
283
+ if ext in extension_map:
284
+ patterns.add(extension_map[ext])
285
+
286
+ return list(patterns)[:10] # Limit patterns
287
+
288
+
289
+ def _merge_context(target: Dict, source: Dict):
290
+ """Merge source context into target."""
291
+ if source.get("project_type"):
292
+ target["project_type"] = source["project_type"]
293
+
294
+ if source.get("tech_stack"):
295
+ existing = set(target.get("tech_stack", []))
296
+ existing.update(source["tech_stack"])
297
+ target["tech_stack"] = list(existing)
298
+
299
+ if source.get("environment"):
300
+ target["environment"] = source["environment"]
301
+
302
+
303
+ def calculate_context_similarity(context1: Dict[str, Any], context2: Dict[str, Any]) -> float:
304
+ """Calculate similarity between two contexts (0.0 to 1.0).
305
+
306
+ Scoring:
307
+ - project_type match: 0.4 weight
308
+ - tech_stack overlap: 0.4 weight (Jaccard similarity)
309
+ - file_patterns overlap: 0.2 weight (Jaccard similarity)
310
+ """
311
+ if not context1 or not context2:
312
+ return 0.0
313
+
314
+ score = 0.0
315
+
316
+ # Project type match (0.4 weight)
317
+ type1 = context1.get("project_type", "").lower()
318
+ type2 = context2.get("project_type", "").lower()
319
+ if type1 and type2:
320
+ if type1 == type2:
321
+ score += 0.4
322
+ # Partial matches for related types
323
+ elif _are_related_types(type1, type2):
324
+ score += 0.2
325
+
326
+ # Tech stack overlap (0.4 weight) - Jaccard similarity
327
+ stack1 = set(s.lower() for s in context1.get("tech_stack", []))
328
+ stack2 = set(s.lower() for s in context2.get("tech_stack", []))
329
+ if stack1 or stack2:
330
+ intersection = len(stack1 & stack2)
331
+ union = len(stack1 | stack2)
332
+ if union > 0:
333
+ score += 0.4 * (intersection / union)
334
+
335
+ # File patterns overlap (0.2 weight) - Jaccard similarity
336
+ patterns1 = set(context1.get("file_patterns", []))
337
+ patterns2 = set(context2.get("file_patterns", []))
338
+ if patterns1 or patterns2:
339
+ intersection = len(patterns1 & patterns2)
340
+ union = len(patterns1 | patterns2)
341
+ if union > 0:
342
+ score += 0.2 * (intersection / union)
343
+
344
+ return round(score, 4)
345
+
346
+
347
+ def _are_related_types(type1: str, type2: str) -> bool:
348
+ """Check if two project types are related."""
349
+ related_groups = [
350
+ {"react", "nextjs", "gatsby"},
351
+ {"vue", "nuxt"},
352
+ {"python", "django", "flask", "fastapi"},
353
+ {"php", "laravel", "symfony", "wordpress"},
354
+ {"nodejs", "express"},
355
+ {"java", "spring"},
356
+ ]
357
+
358
+ for group in related_groups:
359
+ if type1 in group and type2 in group:
360
+ return True
361
+
362
+ return False
363
+
364
+
365
+ async def add_context_success(
366
+ db,
367
+ memory_id: int,
368
+ context: Dict[str, Any]
369
+ ) -> Dict[str, Any]:
370
+ """Record that a memory's solution worked in a specific context.
371
+
372
+ Args:
373
+ db: Database service instance
374
+ memory_id: ID of the memory
375
+ context: Context where the solution worked
376
+
377
+ Returns:
378
+ Dict with success status and updated context info
379
+ """
380
+ cursor = db.conn.cursor()
381
+
382
+ # Get current worked_in contexts
383
+ cursor.execute(
384
+ "SELECT worked_in, failed_in, context_confidence FROM memories WHERE id = ?",
385
+ [memory_id]
386
+ )
387
+ row = cursor.fetchone()
388
+
389
+ if not row:
390
+ return {
391
+ "success": False,
392
+ "error": f"Memory with ID {memory_id} not found"
393
+ }
394
+
395
+ # Parse existing contexts
396
+ worked_in = json.loads(row["worked_in"]) if row["worked_in"] else []
397
+ failed_in = json.loads(row["failed_in"]) if row["failed_in"] else []
398
+ current_confidence = row["context_confidence"] if row["context_confidence"] is not None else 0.5
399
+
400
+ # Add new context to worked_in (avoid duplicates)
401
+ context_key = _context_to_key(context)
402
+ existing_keys = [_context_to_key(c) for c in worked_in]
403
+
404
+ if context_key not in existing_keys:
405
+ worked_in.append(context)
406
+
407
+ # Recalculate context confidence
408
+ # More worked_in contexts = higher confidence
409
+ # Any failed_in contexts = lower confidence
410
+ success_count = len(worked_in)
411
+ failure_count = len(failed_in)
412
+ total = success_count + failure_count
413
+
414
+ if total > 0:
415
+ new_confidence = success_count / total
416
+ # Apply smoothing for low sample sizes
417
+ new_confidence = (new_confidence * total + 0.5) / (total + 1)
418
+ else:
419
+ new_confidence = 0.5
420
+
421
+ # Update database
422
+ cursor.execute(
423
+ """
424
+ UPDATE memories
425
+ SET worked_in = ?,
426
+ context_confidence = ?,
427
+ updated_at = datetime('now')
428
+ WHERE id = ?
429
+ """,
430
+ [json.dumps(worked_in), new_confidence, memory_id]
431
+ )
432
+ db.conn.commit()
433
+
434
+ return {
435
+ "success": True,
436
+ "memory_id": memory_id,
437
+ "context_added": context,
438
+ "worked_in_count": len(worked_in),
439
+ "failed_in_count": len(failed_in),
440
+ "old_confidence": current_confidence,
441
+ "new_confidence": new_confidence,
442
+ "message": f"Context success recorded. Confidence: {current_confidence:.3f} -> {new_confidence:.3f}"
443
+ }
444
+
445
+
446
+ async def add_context_failure(
447
+ db,
448
+ memory_id: int,
449
+ context: Dict[str, Any],
450
+ failure_reason: Optional[str] = None
451
+ ) -> Dict[str, Any]:
452
+ """Record that a memory's solution failed in a specific context.
453
+
454
+ Args:
455
+ db: Database service instance
456
+ memory_id: ID of the memory
457
+ context: Context where the solution failed
458
+ failure_reason: Optional explanation of why it failed
459
+
460
+ Returns:
461
+ Dict with success status and updated context info
462
+ """
463
+ cursor = db.conn.cursor()
464
+
465
+ # Get current contexts
466
+ cursor.execute(
467
+ "SELECT worked_in, failed_in, context_confidence FROM memories WHERE id = ?",
468
+ [memory_id]
469
+ )
470
+ row = cursor.fetchone()
471
+
472
+ if not row:
473
+ return {
474
+ "success": False,
475
+ "error": f"Memory with ID {memory_id} not found"
476
+ }
477
+
478
+ # Parse existing contexts
479
+ worked_in = json.loads(row["worked_in"]) if row["worked_in"] else []
480
+ failed_in = json.loads(row["failed_in"]) if row["failed_in"] else []
481
+ current_confidence = row["context_confidence"] if row["context_confidence"] is not None else 0.5
482
+
483
+ # Add failure reason to context if provided
484
+ if failure_reason:
485
+ context["failure_reason"] = failure_reason
486
+
487
+ # Add new context to failed_in (avoid duplicates)
488
+ context_key = _context_to_key(context)
489
+ existing_keys = [_context_to_key(c) for c in failed_in]
490
+
491
+ if context_key not in existing_keys:
492
+ failed_in.append(context)
493
+
494
+ # Recalculate context confidence
495
+ success_count = len(worked_in)
496
+ failure_count = len(failed_in)
497
+ total = success_count + failure_count
498
+
499
+ if total > 0:
500
+ new_confidence = success_count / total
501
+ new_confidence = (new_confidence * total + 0.5) / (total + 1)
502
+ else:
503
+ new_confidence = 0.5
504
+
505
+ # Update database
506
+ cursor.execute(
507
+ """
508
+ UPDATE memories
509
+ SET failed_in = ?,
510
+ context_confidence = ?,
511
+ updated_at = datetime('now')
512
+ WHERE id = ?
513
+ """,
514
+ [json.dumps(failed_in), new_confidence, memory_id]
515
+ )
516
+ db.conn.commit()
517
+
518
+ return {
519
+ "success": True,
520
+ "memory_id": memory_id,
521
+ "context_added": context,
522
+ "worked_in_count": len(worked_in),
523
+ "failed_in_count": len(failed_in),
524
+ "old_confidence": current_confidence,
525
+ "new_confidence": new_confidence,
526
+ "message": f"Context failure recorded. Confidence: {current_confidence:.3f} -> {new_confidence:.3f}"
527
+ }
528
+
529
+
530
+ async def get_context_score(
531
+ db,
532
+ memory_id: int,
533
+ current_context: Dict[str, Any]
534
+ ) -> Dict[str, Any]:
535
+ """Calculate how relevant a memory is for the current context.
536
+
537
+ Scoring logic:
538
+ - High similarity to worked_in contexts: boost score
539
+ - High similarity to failed_in contexts: reduce score
540
+ - No context data: neutral (0.0 adjustment)
541
+
542
+ Args:
543
+ db: Database service instance
544
+ memory_id: ID of the memory
545
+ current_context: Current project context
546
+
547
+ Returns:
548
+ Dict with context score (-0.2 to +0.2 adjustment)
549
+ """
550
+ cursor = db.conn.cursor()
551
+
552
+ cursor.execute(
553
+ "SELECT worked_in, failed_in, context_confidence FROM memories WHERE id = ?",
554
+ [memory_id]
555
+ )
556
+ row = cursor.fetchone()
557
+
558
+ if not row:
559
+ return {
560
+ "success": False,
561
+ "error": f"Memory with ID {memory_id} not found"
562
+ }
563
+
564
+ worked_in = json.loads(row["worked_in"]) if row["worked_in"] else []
565
+ failed_in = json.loads(row["failed_in"]) if row["failed_in"] else []
566
+ context_confidence = row["context_confidence"] if row["context_confidence"] is not None else None
567
+
568
+ # No context data - neutral
569
+ if not worked_in and not failed_in:
570
+ return {
571
+ "success": True,
572
+ "memory_id": memory_id,
573
+ "context_score": 0.0,
574
+ "context_adjustment": 0.0,
575
+ "has_context_data": False,
576
+ "context_confidence": context_confidence
577
+ }
578
+
579
+ # Calculate similarity to worked_in contexts
580
+ max_success_similarity = 0.0
581
+ for ctx in worked_in:
582
+ sim = calculate_context_similarity(current_context, ctx)
583
+ max_success_similarity = max(max_success_similarity, sim)
584
+
585
+ # Calculate similarity to failed_in contexts
586
+ max_failure_similarity = 0.0
587
+ for ctx in failed_in:
588
+ sim = calculate_context_similarity(current_context, ctx)
589
+ max_failure_similarity = max(max_failure_similarity, sim)
590
+
591
+ # Calculate adjustment (-0.2 to +0.2)
592
+ # Boost if similar to success, penalty if similar to failure
593
+ success_boost = max_success_similarity * 0.2
594
+ failure_penalty = max_failure_similarity * 0.2
595
+ context_adjustment = success_boost - failure_penalty
596
+
597
+ # Determine recommendation
598
+ recommendation = "neutral"
599
+ if context_adjustment > 0.1:
600
+ recommendation = "recommended"
601
+ elif context_adjustment < -0.1:
602
+ recommendation = "caution"
603
+
604
+ return {
605
+ "success": True,
606
+ "memory_id": memory_id,
607
+ "context_score": round(max_success_similarity - max_failure_similarity, 4),
608
+ "context_adjustment": round(context_adjustment, 4),
609
+ "has_context_data": True,
610
+ "worked_in_similarity": round(max_success_similarity, 4),
611
+ "failed_in_similarity": round(max_failure_similarity, 4),
612
+ "worked_in_count": len(worked_in),
613
+ "failed_in_count": len(failed_in),
614
+ "context_confidence": context_confidence,
615
+ "recommendation": recommendation
616
+ }
617
+
618
+
619
+ async def get_memory_contexts(
620
+ db,
621
+ memory_id: int
622
+ ) -> Dict[str, Any]:
623
+ """Get all context data for a memory.
624
+
625
+ Args:
626
+ db: Database service instance
627
+ memory_id: ID of the memory
628
+
629
+ Returns:
630
+ Dict with worked_in, failed_in, and context confidence
631
+ """
632
+ cursor = db.conn.cursor()
633
+
634
+ cursor.execute(
635
+ """
636
+ SELECT id, content, worked_in, failed_in, context_confidence,
637
+ project_type, tech_stack
638
+ FROM memories WHERE id = ?
639
+ """,
640
+ [memory_id]
641
+ )
642
+ row = cursor.fetchone()
643
+
644
+ if not row:
645
+ return {
646
+ "success": False,
647
+ "error": f"Memory with ID {memory_id} not found"
648
+ }
649
+
650
+ worked_in = json.loads(row["worked_in"]) if row["worked_in"] else []
651
+ failed_in = json.loads(row["failed_in"]) if row["failed_in"] else []
652
+ tech_stack = json.loads(row["tech_stack"]) if row["tech_stack"] else []
653
+
654
+ return {
655
+ "success": True,
656
+ "memory_id": memory_id,
657
+ "content_preview": row["content"][:200] + "..." if len(row["content"]) > 200 else row["content"],
658
+ "original_context": {
659
+ "project_type": row["project_type"],
660
+ "tech_stack": tech_stack
661
+ },
662
+ "worked_in": worked_in,
663
+ "failed_in": failed_in,
664
+ "context_confidence": row["context_confidence"],
665
+ "worked_in_count": len(worked_in),
666
+ "failed_in_count": len(failed_in)
667
+ }
668
+
669
+
670
+ def _context_to_key(context: Dict[str, Any]) -> str:
671
+ """Create a unique key for a context (for deduplication)."""
672
+ # Use project_type + sorted tech_stack as key
673
+ project_type = context.get("project_type", "")
674
+ tech_stack = sorted(context.get("tech_stack", []))
675
+ return f"{project_type}:{','.join(tech_stack)}"