claude-mpm 4.0.19__py3-none-any.whl → 4.0.20__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (38) hide show
  1. claude_mpm/VERSION +1 -1
  2. claude_mpm/__main__.py +4 -0
  3. claude_mpm/agents/BASE_AGENT_TEMPLATE.md +38 -2
  4. claude_mpm/agents/OUTPUT_STYLE.md +84 -0
  5. claude_mpm/agents/templates/qa.json +1 -1
  6. claude_mpm/cli/__init__.py +23 -1
  7. claude_mpm/cli/__main__.py +4 -0
  8. claude_mpm/cli/commands/memory.py +32 -5
  9. claude_mpm/cli/commands/run.py +33 -6
  10. claude_mpm/cli/parsers/base_parser.py +5 -0
  11. claude_mpm/cli/parsers/run_parser.py +5 -0
  12. claude_mpm/cli/utils.py +17 -4
  13. claude_mpm/core/base_service.py +1 -1
  14. claude_mpm/core/config.py +70 -5
  15. claude_mpm/core/framework_loader.py +342 -31
  16. claude_mpm/core/interactive_session.py +55 -1
  17. claude_mpm/core/oneshot_session.py +7 -1
  18. claude_mpm/core/output_style_manager.py +468 -0
  19. claude_mpm/core/unified_paths.py +190 -21
  20. claude_mpm/hooks/claude_hooks/hook_handler.py +91 -16
  21. claude_mpm/hooks/claude_hooks/hook_wrapper.sh +3 -0
  22. claude_mpm/init.py +1 -0
  23. claude_mpm/services/agents/deployment/agent_deployment.py +151 -7
  24. claude_mpm/services/agents/deployment/agent_template_builder.py +37 -1
  25. claude_mpm/services/agents/deployment/multi_source_deployment_service.py +441 -0
  26. claude_mpm/services/agents/memory/__init__.py +0 -2
  27. claude_mpm/services/agents/memory/agent_memory_manager.py +737 -43
  28. claude_mpm/services/agents/memory/content_manager.py +144 -14
  29. claude_mpm/services/agents/memory/template_generator.py +7 -354
  30. claude_mpm/services/mcp_gateway/server/stdio_server.py +61 -169
  31. claude_mpm/services/subprocess_launcher_service.py +5 -0
  32. {claude_mpm-4.0.19.dist-info → claude_mpm-4.0.20.dist-info}/METADATA +1 -1
  33. {claude_mpm-4.0.19.dist-info → claude_mpm-4.0.20.dist-info}/RECORD +37 -35
  34. claude_mpm/services/agents/memory/analyzer.py +0 -430
  35. {claude_mpm-4.0.19.dist-info → claude_mpm-4.0.20.dist-info}/WHEEL +0 -0
  36. {claude_mpm-4.0.19.dist-info → claude_mpm-4.0.20.dist-info}/entry_points.txt +0 -0
  37. {claude_mpm-4.0.19.dist-info → claude_mpm-4.0.20.dist-info}/licenses/LICENSE +0 -0
  38. {claude_mpm-4.0.19.dist-info → claude_mpm-4.0.20.dist-info}/top_level.txt +0 -0
@@ -1,6 +1,6 @@
1
- from pathlib import Path
2
-
3
1
  #!/usr/bin/env python3
2
+
3
+ from pathlib import Path
4
4
  """
5
5
  Agent Memory Manager Service
6
6
  ===========================
@@ -17,7 +17,7 @@ This service provides:
17
17
  - Directory initialization with README
18
18
 
19
19
  Memory files are stored in .claude-mpm/memories/ directory
20
- following the naming convention: {agent_id}_agent.md
20
+ following the naming convention: {agent_id}_memories.md
21
21
  """
22
22
 
23
23
  import logging
@@ -28,9 +28,6 @@ from typing import Any, Dict, List, Optional, Tuple
28
28
  from claude_mpm.core.config import Config
29
29
  from claude_mpm.core.interfaces import MemoryServiceInterface
30
30
  from claude_mpm.core.unified_paths import get_path_manager
31
- from claude_mpm.services.project.analyzer import ProjectAnalyzer
32
-
33
- from .analyzer import MemoryAnalyzer
34
31
  from .content_manager import MemoryContentManager
35
32
  from .template_generator import MemoryTemplateGenerator
36
33
 
@@ -85,28 +82,26 @@ class AgentMemoryManager(MemoryServiceInterface):
85
82
  self.project_root = get_path_manager().project_root
86
83
  # Use current working directory by default, not project root
87
84
  self.working_directory = working_directory or Path(os.getcwd())
88
- self.memories_dir = self.working_directory / ".claude-mpm" / "memories"
85
+
86
+ # Set up both user and project memory directories
87
+ self.user_memories_dir = Path.home() / ".claude-mpm" / "memories"
88
+ self.project_memories_dir = self.working_directory / ".claude-mpm" / "memories"
89
+
90
+ # Primary memories_dir points to project for backward compatibility
91
+ self.memories_dir = self.project_memories_dir
92
+
93
+ # Ensure both directories exist
89
94
  self._ensure_memories_directory()
95
+ self._ensure_user_memories_directory()
90
96
 
91
97
  # Initialize memory limits from configuration
92
98
  self._init_memory_limits()
93
99
 
94
- # Initialize project analyzer for context-aware memory creation
95
- self.project_analyzer = ProjectAnalyzer(self.config, self.working_directory)
96
-
97
100
  # Initialize component services
98
101
  self.template_generator = MemoryTemplateGenerator(
99
- self.config, self.working_directory, self.project_analyzer
102
+ self.config, self.working_directory
100
103
  )
101
104
  self.content_manager = MemoryContentManager(self.memory_limits)
102
- self.analyzer = MemoryAnalyzer(
103
- self.memories_dir,
104
- self.memory_limits,
105
- self.agent_overrides,
106
- self._get_agent_limits,
107
- self._get_agent_auto_learning,
108
- self.content_manager,
109
- )
110
105
 
111
106
  @property
112
107
  def logger(self):
@@ -202,32 +197,109 @@ class AgentMemoryManager(MemoryServiceInterface):
202
197
  # Fall back to global setting
203
198
  return self.auto_learning
204
199
 
200
+ def _get_memory_file_with_migration(self, directory: Path, agent_id: str) -> Path:
201
+ """Get memory file path, migrating from old naming if needed.
202
+
203
+ WHY: Supports backward compatibility by automatically migrating from
204
+ the old {agent_id}_agent.md and {agent_id}.md formats to the new {agent_id}_memories.md format.
205
+
206
+ Args:
207
+ directory: Directory containing memory files
208
+ agent_id: The agent identifier
209
+
210
+ Returns:
211
+ Path: Path to the memory file (may not exist)
212
+ """
213
+ new_file = directory / f"{agent_id}_memories.md"
214
+ # Support migration from both old formats
215
+ old_file_agent = directory / f"{agent_id}_agent.md"
216
+ old_file_simple = directory / f"{agent_id}.md"
217
+
218
+ # Migrate from old formats if needed
219
+ if not new_file.exists():
220
+ # Try migrating from {agent_id}_agent.md first
221
+ if old_file_agent.exists():
222
+ try:
223
+ content = old_file_agent.read_text(encoding="utf-8")
224
+ new_file.write_text(content, encoding="utf-8")
225
+ old_file_agent.unlink()
226
+ self.logger.info(f"Migrated memory file from {old_file_agent.name} to {new_file.name}")
227
+ except Exception as e:
228
+ self.logger.error(f"Failed to migrate memory file for {agent_id}: {e}")
229
+ return old_file_agent
230
+ # Try migrating from {agent_id}.md
231
+ elif old_file_simple.exists():
232
+ try:
233
+ content = old_file_simple.read_text(encoding="utf-8")
234
+ new_file.write_text(content, encoding="utf-8")
235
+ old_file_simple.unlink()
236
+ self.logger.info(f"Migrated memory file from {old_file_simple.name} to {new_file.name}")
237
+ except Exception as e:
238
+ self.logger.error(f"Failed to migrate memory file for {agent_id}: {e}")
239
+ return old_file_simple
240
+
241
+ return new_file
242
+
205
243
  def load_agent_memory(self, agent_id: str) -> str:
206
- """Load agent memory file content.
244
+ """Load agent memory file content from both user and project directories.
207
245
 
208
246
  WHY: Agents need to read their accumulated knowledge before starting tasks
209
- to apply learned patterns and avoid repeated mistakes.
247
+ to apply learned patterns and avoid repeated mistakes. This method now
248
+ aggregates memories from both user-level (global) and project-level sources.
249
+
250
+ Loading order:
251
+ 1. User-level memory (~/.claude-mpm/memories/{agent_id}_memories.md)
252
+ 2. Project-level memory (./.claude-mpm/memories/{agent_id}_memories.md)
253
+ 3. Project memory overrides/extends user memory
210
254
 
211
255
  Args:
212
256
  agent_id: The agent identifier (e.g., 'research', 'engineer')
213
257
 
214
258
  Returns:
215
- str: The memory file content, creating default if doesn't exist
259
+ str: The aggregated memory file content, creating default if doesn't exist
216
260
  """
217
- memory_file = self.memories_dir / f"{agent_id}_agent.md"
218
-
219
- if not memory_file.exists():
261
+ # Support both old and new naming conventions
262
+ user_memory_file = self._get_memory_file_with_migration(self.user_memories_dir, agent_id)
263
+ project_memory_file = self._get_memory_file_with_migration(self.project_memories_dir, agent_id)
264
+
265
+ user_memory = None
266
+ project_memory = None
267
+
268
+ # Load user-level memory if exists
269
+ if user_memory_file.exists():
270
+ try:
271
+ user_memory = user_memory_file.read_text(encoding="utf-8")
272
+ user_memory = self.content_manager.validate_and_repair(user_memory, agent_id)
273
+ self.logger.debug(f"Loaded user-level memory for {agent_id}")
274
+ except Exception as e:
275
+ self.logger.error(f"Error reading user memory file for {agent_id}: {e}")
276
+
277
+ # Load project-level memory if exists
278
+ if project_memory_file.exists():
279
+ try:
280
+ project_memory = project_memory_file.read_text(encoding="utf-8")
281
+ project_memory = self.content_manager.validate_and_repair(project_memory, agent_id)
282
+ self.logger.debug(f"Loaded project-level memory for {agent_id}")
283
+ except Exception as e:
284
+ self.logger.error(f"Error reading project memory file for {agent_id}: {e}")
285
+
286
+ # Aggregate memories
287
+ if user_memory and project_memory:
288
+ # Both exist - aggregate them
289
+ aggregated = self._aggregate_agent_memories(user_memory, project_memory, agent_id)
290
+ self.logger.info(f"Aggregated user and project memories for {agent_id}")
291
+ return aggregated
292
+ elif project_memory:
293
+ # Only project memory exists
294
+ return project_memory
295
+ elif user_memory:
296
+ # Only user memory exists
297
+ return user_memory
298
+ else:
299
+ # Neither exists - create default in project directory
220
300
  self.logger.info(f"Creating default memory for agent: {agent_id}")
221
301
  return self._create_default_memory(agent_id)
222
302
 
223
- try:
224
- content = memory_file.read_text(encoding="utf-8")
225
- return self.content_manager.validate_and_repair(content, agent_id)
226
- except Exception as e:
227
- self.logger.error(f"Error reading memory file for {agent_id}: {e}")
228
- # Return default memory on error - never fail
229
- return self._create_default_memory(agent_id)
230
-
231
303
  def update_agent_memory(self, agent_id: str, section: str, new_item: str) -> bool:
232
304
  """Add new learning item to specified section.
233
305
 
@@ -319,7 +391,7 @@ class AgentMemoryManager(MemoryServiceInterface):
319
391
 
320
392
  # Save default file
321
393
  try:
322
- memory_file = self.memories_dir / f"{agent_id}_agent.md"
394
+ memory_file = self.memories_dir / f"{agent_id}_memories.md"
323
395
  memory_file.write_text(template, encoding="utf-8")
324
396
  self.logger.info(f"Created project-specific memory file for {agent_id}")
325
397
 
@@ -328,23 +400,32 @@ class AgentMemoryManager(MemoryServiceInterface):
328
400
 
329
401
  return template
330
402
 
331
- def _save_memory_file(self, agent_id: str, content: str) -> bool:
403
+ def _save_memory_file(self, agent_id: str, content: str, save_to_user: bool = False) -> bool:
332
404
  """Save memory content to file.
333
405
 
334
406
  WHY: Memory updates need to be persisted atomically to prevent corruption
335
- and ensure learnings are preserved across agent invocations.
407
+ and ensure learnings are preserved across agent invocations. By default,
408
+ saves to project directory, but can optionally save to user directory.
336
409
 
337
410
  Args:
338
411
  agent_id: Agent identifier
339
412
  content: Content to save
413
+ save_to_user: If True, saves to user directory instead of project
340
414
 
341
415
  Returns:
342
416
  bool: True if save succeeded
343
417
  """
344
418
  try:
345
- memory_file = self.memories_dir / f"{agent_id}_agent.md"
419
+ # Choose target directory
420
+ target_dir = self.user_memories_dir if save_to_user else self.project_memories_dir
421
+ memory_file = target_dir / f"{agent_id}_memories.md"
422
+
423
+ # Ensure directory exists
424
+ target_dir.mkdir(parents=True, exist_ok=True)
425
+
346
426
  memory_file.write_text(content, encoding="utf-8")
347
- self.logger.debug(f"Saved memory for {agent_id}")
427
+ location = "user" if save_to_user else "project"
428
+ self.logger.debug(f"Saved memory for {agent_id} to {location} directory")
348
429
  return True
349
430
  except Exception as e:
350
431
  self.logger.error(f"Error saving memory for {agent_id}: {e}")
@@ -436,6 +517,364 @@ class AgentMemoryManager(MemoryServiceInterface):
436
517
  self.logger.error(f"Error routing memory command: {e}")
437
518
  return {"success": False, "error": str(e)}
438
519
 
520
+ def extract_and_update_memory(self, agent_id: str, response: str) -> bool:
521
+ """Extract memory updates from agent response and update memory file.
522
+
523
+ WHY: Agents provide memory updates in their responses that need to be
524
+ extracted and persisted. This method looks for "remember" field in JSON
525
+ responses and merges new learnings with existing memory.
526
+
527
+ Args:
528
+ agent_id: The agent identifier
529
+ response: The agent's response text (may contain JSON)
530
+
531
+ Returns:
532
+ bool: True if memory was updated, False otherwise
533
+ """
534
+ try:
535
+ import json
536
+ import re
537
+
538
+ # Look for JSON block in the response
539
+ # Pattern matches ```json ... ``` blocks
540
+ json_pattern = r'```json\s*(.*?)\s*```'
541
+ json_matches = re.findall(json_pattern, response, re.DOTALL)
542
+
543
+ if not json_matches:
544
+ # Also try to find inline JSON objects
545
+ json_pattern2 = r'\{[^{}]*"(?:remember|Remember)"[^{}]*\}'
546
+ json_matches = re.findall(json_pattern2, response, re.DOTALL)
547
+
548
+ for json_str in json_matches:
549
+ try:
550
+ data = json.loads(json_str)
551
+
552
+ # Check for memory updates in "remember" field
553
+ memory_items = None
554
+
555
+ # Check both "remember" and "Remember" fields
556
+ if "remember" in data:
557
+ memory_items = data["remember"]
558
+ elif "Remember" in data:
559
+ memory_items = data["Remember"]
560
+
561
+ # Process memory items if found and not null
562
+ if memory_items is not None and memory_items != "null":
563
+ # Skip if explicitly null or empty list
564
+ if isinstance(memory_items, list) and len(memory_items) > 0:
565
+ # Filter out empty strings and None values
566
+ valid_items = []
567
+ for item in memory_items:
568
+ if item and isinstance(item, str) and item.strip():
569
+ valid_items.append(item.strip())
570
+
571
+ # Only proceed if we have valid items
572
+ if valid_items:
573
+ success = self._add_learnings_to_memory(agent_id, valid_items)
574
+ if success:
575
+ self.logger.info(f"Added {len(valid_items)} new memories for {agent_id}")
576
+ return True
577
+
578
+ except json.JSONDecodeError:
579
+ # Not valid JSON, continue to next match
580
+ continue
581
+
582
+ return False
583
+
584
+ except Exception as e:
585
+ self.logger.error(f"Error extracting memory from response for {agent_id}: {e}")
586
+ return False
587
+
588
+ def _add_learnings_to_memory(self, agent_id: str, learnings: List[str]) -> bool:
589
+ """Add new learnings to existing agent memory.
590
+
591
+ WHY: Instead of replacing all memory, we want to intelligently merge new
592
+ learnings with existing knowledge, avoiding duplicates and maintaining
593
+ the most relevant information.
594
+
595
+ Args:
596
+ agent_id: The agent identifier
597
+ learnings: List of new learning strings to add
598
+
599
+ Returns:
600
+ bool: True if memory was successfully updated
601
+ """
602
+ try:
603
+ # Load existing memory
604
+ current_memory = self.load_agent_memory(agent_id)
605
+
606
+ # Parse existing memory into sections
607
+ sections = self._parse_memory_sections(current_memory)
608
+
609
+ # Clean sections - remove template placeholder text
610
+ sections = self._clean_template_placeholders(sections)
611
+
612
+ # Determine which section to add learnings to based on content
613
+ for learning in learnings:
614
+ if not learning or not isinstance(learning, str):
615
+ continue
616
+
617
+ learning = learning.strip()
618
+ if not learning:
619
+ continue
620
+
621
+ # Categorize the learning based on keywords
622
+ section = self._categorize_learning(learning)
623
+
624
+ # Add to appropriate section if not duplicate
625
+ if section not in sections:
626
+ sections[section] = []
627
+
628
+ # Check for duplicates (case-insensitive) - FIXED LOGIC
629
+ normalized_learning = learning.lower()
630
+ # Strip bullet points from existing items for comparison
631
+ existing_normalized = [item.lstrip('- ').strip().lower() for item in sections[section]]
632
+
633
+ if normalized_learning not in existing_normalized:
634
+ # Add bullet point if not present
635
+ if not learning.startswith("-"):
636
+ learning = f"- {learning}"
637
+ sections[section].append(learning)
638
+ else:
639
+ self.logger.debug(f"Skipping duplicate memory: {learning}")
640
+
641
+ # Rebuild memory content
642
+ new_content = self._build_memory_content(agent_id, sections)
643
+
644
+ # Validate and save
645
+ agent_limits = self._get_agent_limits(agent_id)
646
+ if self.content_manager.exceeds_limits(new_content, agent_limits):
647
+ self.logger.debug(f"Memory for {agent_id} exceeds limits, truncating")
648
+ new_content = self.content_manager.truncate_to_limits(new_content, agent_limits)
649
+
650
+ return self._save_memory_file(agent_id, new_content)
651
+
652
+ except Exception as e:
653
+ self.logger.error(f"Error adding learnings to memory for {agent_id}: {e}")
654
+ return False
655
+
656
+ def _clean_template_placeholders(self, sections: Dict[str, List[str]]) -> Dict[str, List[str]]:
657
+ """Remove template placeholder text from sections.
658
+
659
+ Args:
660
+ sections: Dict mapping section names to lists of items
661
+
662
+ Returns:
663
+ Dict with placeholder text removed
664
+ """
665
+ # Template placeholder patterns to remove
666
+ placeholders = [
667
+ "Analyze project structure to understand architecture patterns",
668
+ "Observe codebase patterns and conventions during tasks",
669
+ "Extract implementation guidelines from project documentation",
670
+ "Learn from errors encountered during project work",
671
+ "Project analysis pending - gather context during tasks",
672
+ "claude-mpm: Software project requiring analysis"
673
+ ]
674
+
675
+ cleaned = {}
676
+ for section_name, items in sections.items():
677
+ cleaned_items = []
678
+ for item in items:
679
+ # Remove bullet point for comparison
680
+ item_text = item.lstrip("- ").strip()
681
+ # Keep item if it's not a placeholder
682
+ if item_text and item_text not in placeholders:
683
+ cleaned_items.append(item)
684
+
685
+ # Only include section if it has real content
686
+ if cleaned_items:
687
+ cleaned[section_name] = cleaned_items
688
+
689
+ return cleaned
690
+
691
+ def _categorize_learning(self, learning: str) -> str:
692
+ """Categorize a learning item into appropriate section.
693
+
694
+ Args:
695
+ learning: The learning string to categorize
696
+
697
+ Returns:
698
+ str: The section name for this learning
699
+ """
700
+ learning_lower = learning.lower()
701
+
702
+ # Check for keywords to categorize with improved patterns
703
+ # Order matters - more specific patterns should come first
704
+
705
+ # Architecture keywords
706
+ if any(word in learning_lower for word in ["architecture", "structure", "design", "module", "component", "microservices", "service-oriented"]):
707
+ return "Project Architecture"
708
+
709
+ # Integration keywords (check before patterns to avoid "use" conflict)
710
+ elif any(word in learning_lower for word in ["integration", "interface", "api", "connection", "database", "pooling", "via"]):
711
+ return "Integration Points"
712
+
713
+ # Mistake keywords (check before patterns to avoid conflicts)
714
+ elif any(word in learning_lower for word in ["mistake", "error", "avoid", "don't", "never", "not"]):
715
+ return "Common Mistakes to Avoid"
716
+
717
+ # Context keywords (check before patterns to avoid "working", "version" conflicts)
718
+ elif any(word in learning_lower for word in ["context", "current", "currently", "working", "version", "release", "candidate"]):
719
+ return "Current Technical Context"
720
+
721
+ # Guideline keywords (check before patterns to avoid "must", "should" conflicts)
722
+ elif any(word in learning_lower for word in ["guideline", "rule", "standard", "practice", "docstring", "documentation", "must", "should", "include", "comprehensive"]):
723
+ return "Implementation Guidelines"
724
+
725
+ # Pattern keywords (including dependency injection, conventions)
726
+ elif any(word in learning_lower for word in ["pattern", "convention", "style", "format", "dependency injection", "instantiation", "use", "implement"]):
727
+ return "Coding Patterns Learned"
728
+
729
+ # Strategy keywords
730
+ elif any(word in learning_lower for word in ["strategy", "approach", "method", "technique", "effective"]):
731
+ return "Effective Strategies"
732
+
733
+ # Performance keywords
734
+ elif any(word in learning_lower for word in ["performance", "optimization", "speed", "efficiency"]):
735
+ return "Performance Considerations"
736
+
737
+ # Domain keywords
738
+ elif any(word in learning_lower for word in ["domain", "business", "specific"]):
739
+ return "Domain-Specific Knowledge"
740
+
741
+ else:
742
+ return "Recent Learnings"
743
+
744
+ def _build_memory_content(self, agent_id: str, sections: Dict[str, List[str]]) -> str:
745
+ """Build memory content from sections.
746
+
747
+ Args:
748
+ agent_id: The agent identifier
749
+ sections: Dict mapping section names to lists of items
750
+
751
+ Returns:
752
+ str: The formatted memory content
753
+ """
754
+ lines = []
755
+
756
+ # Add header
757
+ lines.append(f"# {agent_id.capitalize()} Agent Memory")
758
+ lines.append("")
759
+ lines.append(f"<!-- Last Updated: {datetime.now().isoformat()} -->")
760
+ lines.append("")
761
+
762
+ # Add sections in consistent order
763
+ section_order = [
764
+ "Project Architecture",
765
+ "Implementation Guidelines",
766
+ "Common Mistakes to Avoid",
767
+ "Current Technical Context",
768
+ "Coding Patterns Learned",
769
+ "Effective Strategies",
770
+ "Integration Points",
771
+ "Performance Considerations",
772
+ "Domain-Specific Knowledge",
773
+ "Recent Learnings"
774
+ ]
775
+
776
+ for section_name in section_order:
777
+ if section_name in sections and sections[section_name]:
778
+ lines.append(f"## {section_name}")
779
+ lines.append("")
780
+ for item in sections[section_name]:
781
+ if item.strip():
782
+ lines.append(item)
783
+ lines.append("")
784
+
785
+ # Add any remaining sections
786
+ remaining = set(sections.keys()) - set(section_order)
787
+ for section_name in sorted(remaining):
788
+ if sections[section_name]:
789
+ lines.append(f"## {section_name}")
790
+ lines.append("")
791
+ for item in sections[section_name]:
792
+ if item.strip():
793
+ lines.append(item)
794
+ lines.append("")
795
+
796
+ return '\n'.join(lines)
797
+
798
+ def replace_agent_memory(self, agent_id: str, memory_sections: Dict[str, List[str]]) -> bool:
799
+ """Replace agent's memory with new content organized by sections.
800
+
801
+ WHY: When agents provide memory updates, they replace the existing memory
802
+ rather than appending to it. This ensures memories stay current and relevant.
803
+
804
+ Args:
805
+ agent_id: The agent identifier
806
+ memory_sections: Dict mapping section names to lists of memory items
807
+
808
+ Returns:
809
+ bool: True if memory was successfully replaced
810
+ """
811
+ try:
812
+ # Build new memory content
813
+ lines = []
814
+
815
+ # Add header
816
+ lines.append(f"# {agent_id.capitalize()} Agent Memory")
817
+ lines.append("")
818
+ lines.append(f"<!-- Last Updated: {datetime.now().isoformat()} -->")
819
+ lines.append("")
820
+
821
+ # Add sections in a consistent order
822
+ section_order = [
823
+ "Project Architecture",
824
+ "Implementation Guidelines",
825
+ "Common Mistakes to Avoid",
826
+ "Current Technical Context",
827
+ "Coding Patterns Learned",
828
+ "Effective Strategies",
829
+ "Integration Points",
830
+ "Performance Considerations",
831
+ "Domain-Specific Knowledge",
832
+ "Recent Learnings"
833
+ ]
834
+
835
+ # First add ordered sections that exist in memory_sections
836
+ for section_name in section_order:
837
+ if section_name in memory_sections and memory_sections[section_name]:
838
+ lines.append(f"## {section_name}")
839
+ lines.append("")
840
+ for item in memory_sections[section_name]:
841
+ if item.strip(): # Skip empty items
842
+ # Add bullet point if not already present
843
+ if not item.strip().startswith("-"):
844
+ lines.append(f"- {item.strip()}")
845
+ else:
846
+ lines.append(item.strip())
847
+ lines.append("")
848
+
849
+ # Then add any remaining sections not in the order list
850
+ remaining_sections = set(memory_sections.keys()) - set(section_order)
851
+ for section_name in sorted(remaining_sections):
852
+ if memory_sections[section_name]:
853
+ lines.append(f"## {section_name}")
854
+ lines.append("")
855
+ for item in memory_sections[section_name]:
856
+ if item.strip():
857
+ if not item.strip().startswith("-"):
858
+ lines.append(f"- {item.strip()}")
859
+ else:
860
+ lines.append(item.strip())
861
+ lines.append("")
862
+
863
+ new_content = '\n'.join(lines)
864
+
865
+ # Validate and save
866
+ agent_limits = self._get_agent_limits(agent_id)
867
+ if self.content_manager.exceeds_limits(new_content, agent_limits):
868
+ self.logger.debug(f"Memory for {agent_id} exceeds limits, truncating")
869
+ new_content = self.content_manager.truncate_to_limits(new_content, agent_limits)
870
+
871
+ # Save the new memory
872
+ return self._save_memory_file(agent_id, new_content)
873
+
874
+ except Exception as e:
875
+ self.logger.error(f"Error replacing memory for {agent_id}: {e}")
876
+ return False
877
+
439
878
  def get_memory_status(self) -> Dict[str, Any]:
440
879
  """Get comprehensive memory system status.
441
880
 
@@ -446,7 +885,32 @@ class AgentMemoryManager(MemoryServiceInterface):
446
885
  Returns:
447
886
  Dict containing comprehensive memory system status
448
887
  """
449
- return self.analyzer.get_memory_status()
888
+ # Simplified status implementation without analyzer
889
+ status = {
890
+ "system_enabled": self.memory_enabled,
891
+ "auto_learning": self.auto_learning,
892
+ "memory_directory": str(self.memories_dir),
893
+ "total_agents": 0,
894
+ "total_size_kb": 0,
895
+ "agents": {},
896
+ "system_health": "healthy"
897
+ }
898
+
899
+ if self.memories_dir.exists():
900
+ memory_files = list(self.memories_dir.glob("*_memories.md"))
901
+ status["total_agents"] = len(memory_files)
902
+
903
+ for file_path in memory_files:
904
+ if file_path.name != "README.md":
905
+ size_kb = file_path.stat().st_size / 1024
906
+ status["total_size_kb"] += size_kb
907
+ agent_id = file_path.stem.replace("_memories", "")
908
+ status["agents"][agent_id] = {
909
+ "file": file_path.name,
910
+ "size_kb": round(size_kb, 2)
911
+ }
912
+
913
+ return status
450
914
 
451
915
  def cross_reference_memories(self, query: Optional[str] = None) -> Dict[str, Any]:
452
916
  """Find common patterns and cross-references across agent memories.
@@ -461,7 +925,12 @@ class AgentMemoryManager(MemoryServiceInterface):
461
925
  Returns:
462
926
  Dict containing cross-reference analysis results
463
927
  """
464
- return self.analyzer.cross_reference_memories(query)
928
+ # Deprecated - return informative message
929
+ return {
930
+ "status": "deprecated",
931
+ "message": "Cross-reference analysis has been deprecated in favor of simplified memory management",
932
+ "suggestion": "Use get_memory_status() for memory overview"
933
+ }
465
934
 
466
935
  def get_all_memories_raw(self) -> Dict[str, Any]:
467
936
  """Get all agent memories in structured JSON format.
@@ -473,7 +942,12 @@ class AgentMemoryManager(MemoryServiceInterface):
473
942
  Returns:
474
943
  Dict containing structured memory data for all agents
475
944
  """
476
- return self.analyzer.get_all_memories_raw()
945
+ # Deprecated - return informative message
946
+ return {
947
+ "status": "deprecated",
948
+ "message": "Raw memory access has been deprecated in favor of simplified memory management",
949
+ "suggestion": "Use load_agent_memory() for specific agent memories"
950
+ }
477
951
 
478
952
  def _ensure_memories_directory(self):
479
953
  """Ensure memories directory exists with README.
@@ -531,6 +1005,188 @@ Standard markdown with structured sections. Agents expect:
531
1005
  self.logger.error(f"Error ensuring memories directory: {e}")
532
1006
  # Continue anyway - memory system should not block operations
533
1007
 
1008
+ def _ensure_user_memories_directory(self):
1009
+ """Ensure user-level memories directory exists with README.
1010
+
1011
+ WHY: User-level memories provide global defaults that apply across all projects,
1012
+ allowing users to maintain common patterns and guidelines.
1013
+ """
1014
+ try:
1015
+ self.user_memories_dir.mkdir(parents=True, exist_ok=True)
1016
+ self.logger.debug(f"Ensured user memories directory exists: {self.user_memories_dir}")
1017
+
1018
+ readme_path = self.user_memories_dir / "README.md"
1019
+ if not readme_path.exists():
1020
+ readme_content = """# User-Level Agent Memory System
1021
+
1022
+ ## Purpose
1023
+ User-level memories provide global defaults that apply to all projects. These memories are
1024
+ loaded first, then project-specific memories can override or extend them.
1025
+
1026
+ ## Directory Hierarchy
1027
+ 1. **User-level memories** (~/.claude-mpm/memories/): Global defaults for all projects
1028
+ 2. **Project-level memories** (./.claude-mpm/memories/): Project-specific overrides
1029
+
1030
+ ## How Memories Are Aggregated
1031
+ - User memories are loaded first as the base
1032
+ - Project memories override or extend user memories
1033
+ - Duplicate sections are merged with project taking precedence
1034
+ - Unique sections from both sources are preserved
1035
+
1036
+ ## Manual Editing
1037
+ Feel free to edit these files to add:
1038
+ - Common coding patterns you always use
1039
+ - Personal style guidelines
1040
+ - Frequently used architectural patterns
1041
+ - Global best practices
1042
+
1043
+ ## File Format
1044
+ Same as project memories - standard markdown with structured sections:
1045
+ - Project Architecture
1046
+ - Implementation Guidelines
1047
+ - Common Mistakes to Avoid
1048
+ - Current Technical Context
1049
+
1050
+ ## Examples of Good User-Level Memories
1051
+ - "Always use type hints in Python code"
1052
+ - "Prefer composition over inheritance"
1053
+ - "Write comprehensive docstrings for public APIs"
1054
+ - "Use dependency injection for testability"
1055
+ """
1056
+ readme_path.write_text(readme_content, encoding="utf-8")
1057
+ self.logger.info("Created README.md in user memories directory")
1058
+
1059
+ except Exception as e:
1060
+ self.logger.error(f"Error ensuring user memories directory: {e}")
1061
+ # Continue anyway - memory system should not block operations
1062
+
1063
+ def _aggregate_agent_memories(self, user_memory: str, project_memory: str, agent_id: str) -> str:
1064
+ """Aggregate user and project memories for an agent.
1065
+
1066
+ WHY: When both user-level and project-level memories exist, they need to be
1067
+ intelligently merged to provide comprehensive context while avoiding duplication
1068
+ and respecting project-specific overrides.
1069
+
1070
+ Strategy:
1071
+ - Parse both memories into sections
1072
+ - Merge sections with project taking precedence
1073
+ - Remove exact duplicates within sections
1074
+ - Preserve unique items from both sources
1075
+
1076
+ Args:
1077
+ user_memory: User-level memory content
1078
+ project_memory: Project-level memory content
1079
+ agent_id: Agent identifier for context
1080
+
1081
+ Returns:
1082
+ str: Aggregated memory content
1083
+ """
1084
+ # Parse memories into sections
1085
+ user_sections = self._parse_memory_sections(user_memory)
1086
+ project_sections = self._parse_memory_sections(project_memory)
1087
+
1088
+ # Start with user sections as base
1089
+ merged_sections = {}
1090
+
1091
+ # Add all user sections first
1092
+ for section_name, items in user_sections.items():
1093
+ merged_sections[section_name] = set(items)
1094
+
1095
+ # Merge project sections (overrides/extends user)
1096
+ for section_name, items in project_sections.items():
1097
+ if section_name in merged_sections:
1098
+ # Merge items - project items take precedence
1099
+ merged_sections[section_name].update(items)
1100
+ else:
1101
+ # New section from project
1102
+ merged_sections[section_name] = set(items)
1103
+
1104
+ # Build aggregated memory content
1105
+ lines = []
1106
+
1107
+ # Add header
1108
+ lines.append(f"# {agent_id.capitalize()} Agent Memory")
1109
+ lines.append("")
1110
+ lines.append("*Aggregated from user-level and project-level memories*")
1111
+ lines.append("")
1112
+ lines.append(f"<!-- Last Updated: {datetime.now().isoformat()} -->")
1113
+ lines.append("")
1114
+
1115
+ # Add sections in a consistent order
1116
+ section_order = [
1117
+ "Project Architecture",
1118
+ "Implementation Guidelines",
1119
+ "Common Mistakes to Avoid",
1120
+ "Current Technical Context",
1121
+ "Coding Patterns Learned",
1122
+ "Effective Strategies",
1123
+ "Integration Points",
1124
+ "Performance Considerations",
1125
+ "Domain-Specific Knowledge",
1126
+ "Recent Learnings"
1127
+ ]
1128
+
1129
+ # First add ordered sections that exist
1130
+ for section_name in section_order:
1131
+ if section_name in merged_sections and merged_sections[section_name]:
1132
+ lines.append(f"## {section_name}")
1133
+ lines.append("")
1134
+ # Sort items for consistent output
1135
+ for item in sorted(merged_sections[section_name]):
1136
+ if item.strip(): # Skip empty items
1137
+ lines.append(item)
1138
+ lines.append("")
1139
+
1140
+ # Then add any remaining sections not in the order list
1141
+ remaining_sections = set(merged_sections.keys()) - set(section_order)
1142
+ for section_name in sorted(remaining_sections):
1143
+ if merged_sections[section_name]:
1144
+ lines.append(f"## {section_name}")
1145
+ lines.append("")
1146
+ for item in sorted(merged_sections[section_name]):
1147
+ if item.strip():
1148
+ lines.append(item)
1149
+ lines.append("")
1150
+
1151
+ return '\n'.join(lines)
1152
+
1153
+ def _parse_memory_sections(self, memory_content: str) -> Dict[str, List[str]]:
1154
+ """Parse memory content into sections and items.
1155
+
1156
+ Args:
1157
+ memory_content: Raw memory file content
1158
+
1159
+ Returns:
1160
+ Dict mapping section names to lists of items
1161
+ """
1162
+ sections = {}
1163
+ current_section = None
1164
+ current_items = []
1165
+
1166
+ for line in memory_content.split('\n'):
1167
+ # Skip metadata lines
1168
+ if line.startswith('<!-- ') and line.endswith(' -->'):
1169
+ continue
1170
+ # Check for section headers (## Level 2 headers)
1171
+ elif line.startswith('## '):
1172
+ # Save previous section if exists
1173
+ if current_section and current_items:
1174
+ sections[current_section] = current_items
1175
+
1176
+ # Start new section
1177
+ current_section = line[3:].strip() # Remove "## " prefix
1178
+ current_items = []
1179
+ # Collect non-empty lines as items (but not HTML comments)
1180
+ elif line.strip() and current_section and not line.strip().startswith('<!--'):
1181
+ # Keep the full line with its formatting
1182
+ current_items.append(line.strip())
1183
+
1184
+ # Save last section
1185
+ if current_section and current_items:
1186
+ sections[current_section] = current_items
1187
+
1188
+ return sections
1189
+
534
1190
  # ================================================================================
535
1191
  # Interface Adapter Methods
536
1192
  # ================================================================================
@@ -570,7 +1226,7 @@ Standard markdown with structured sections. Agents expect:
570
1226
  True if save successful
571
1227
  """
572
1228
  try:
573
- memory_path = self.memories_dir / f"{agent_id}_agent.md"
1229
+ memory_path = self.memories_dir / f"{agent_id}_memories.md"
574
1230
 
575
1231
  # Validate size before saving
576
1232
  is_valid, error_msg = self.validate_memory_size(content)
@@ -613,7 +1269,45 @@ Standard markdown with structured sections. Agents expect:
613
1269
  Returns:
614
1270
  Dictionary with memory metrics
615
1271
  """
616
- return self.analyzer.get_memory_metrics(agent_id)
1272
+ # Minimal implementation for interface compliance
1273
+ metrics = {
1274
+ "total_memory_kb": 0,
1275
+ "agent_count": 0,
1276
+ "agents": {}
1277
+ }
1278
+
1279
+ if self.memories_dir.exists():
1280
+ if agent_id:
1281
+ # Metrics for specific agent
1282
+ memory_file = self.memories_dir / f"{agent_id}_memories.md"
1283
+ if memory_file.exists():
1284
+ size_kb = memory_file.stat().st_size / 1024
1285
+ metrics["agents"][agent_id] = {
1286
+ "size_kb": round(size_kb, 2),
1287
+ "limit_kb": self._get_agent_limits(agent_id)["max_file_size_kb"],
1288
+ "usage_percent": round((size_kb / self._get_agent_limits(agent_id)["max_file_size_kb"]) * 100, 1)
1289
+ }
1290
+ metrics["total_memory_kb"] = round(size_kb, 2)
1291
+ metrics["agent_count"] = 1
1292
+ else:
1293
+ # Metrics for all agents
1294
+ memory_files = list(self.memories_dir.glob("*_memories.md"))
1295
+ for file_path in memory_files:
1296
+ if file_path.name != "README.md":
1297
+ agent_name = file_path.stem.replace("_memories", "")
1298
+ size_kb = file_path.stat().st_size / 1024
1299
+ limit_kb = self._get_agent_limits(agent_name)["max_file_size_kb"]
1300
+ metrics["agents"][agent_name] = {
1301
+ "size_kb": round(size_kb, 2),
1302
+ "limit_kb": limit_kb,
1303
+ "usage_percent": round((size_kb / limit_kb) * 100, 1)
1304
+ }
1305
+ metrics["total_memory_kb"] += size_kb
1306
+
1307
+ metrics["total_memory_kb"] = round(metrics["total_memory_kb"], 2)
1308
+ metrics["agent_count"] = len(metrics["agents"])
1309
+
1310
+ return metrics
617
1311
 
618
1312
 
619
1313
  # Convenience functions for external use