claude-mpm 4.0.19__py3-none-any.whl → 4.0.22__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (61) hide show
  1. claude_mpm/BUILD_NUMBER +1 -1
  2. claude_mpm/VERSION +1 -1
  3. claude_mpm/__main__.py +4 -0
  4. claude_mpm/agents/BASE_AGENT_TEMPLATE.md +38 -2
  5. claude_mpm/agents/INSTRUCTIONS.md +74 -0
  6. claude_mpm/agents/OUTPUT_STYLE.md +84 -0
  7. claude_mpm/agents/WORKFLOW.md +308 -4
  8. claude_mpm/agents/agents_metadata.py +52 -0
  9. claude_mpm/agents/base_agent_loader.py +75 -19
  10. claude_mpm/agents/templates/__init__.py +4 -0
  11. claude_mpm/agents/templates/api_qa.json +206 -0
  12. claude_mpm/agents/templates/qa.json +1 -1
  13. claude_mpm/agents/templates/research.json +24 -16
  14. claude_mpm/agents/templates/ticketing.json +18 -5
  15. claude_mpm/agents/templates/vercel_ops_agent.json +281 -0
  16. claude_mpm/agents/templates/vercel_ops_instructions.md +582 -0
  17. claude_mpm/cli/__init__.py +23 -1
  18. claude_mpm/cli/__main__.py +4 -0
  19. claude_mpm/cli/commands/mcp_command_router.py +87 -1
  20. claude_mpm/cli/commands/mcp_install_commands.py +207 -26
  21. claude_mpm/cli/commands/memory.py +32 -5
  22. claude_mpm/cli/commands/run.py +33 -6
  23. claude_mpm/cli/parsers/base_parser.py +5 -0
  24. claude_mpm/cli/parsers/mcp_parser.py +23 -0
  25. claude_mpm/cli/parsers/run_parser.py +5 -0
  26. claude_mpm/cli/utils.py +17 -4
  27. claude_mpm/constants.py +1 -0
  28. claude_mpm/core/base_service.py +8 -2
  29. claude_mpm/core/config.py +122 -32
  30. claude_mpm/core/framework_loader.py +385 -34
  31. claude_mpm/core/interactive_session.py +77 -12
  32. claude_mpm/core/oneshot_session.py +7 -1
  33. claude_mpm/core/output_style_manager.py +468 -0
  34. claude_mpm/core/unified_paths.py +190 -21
  35. claude_mpm/hooks/claude_hooks/hook_handler.py +91 -16
  36. claude_mpm/hooks/claude_hooks/hook_wrapper.sh +3 -0
  37. claude_mpm/init.py +1 -0
  38. claude_mpm/scripts/socketio_daemon.py +67 -7
  39. claude_mpm/scripts/socketio_daemon_hardened.py +897 -0
  40. claude_mpm/services/agents/deployment/agent_deployment.py +216 -10
  41. claude_mpm/services/agents/deployment/agent_template_builder.py +37 -1
  42. claude_mpm/services/agents/deployment/async_agent_deployment.py +65 -1
  43. claude_mpm/services/agents/deployment/multi_source_deployment_service.py +441 -0
  44. claude_mpm/services/agents/memory/__init__.py +0 -2
  45. claude_mpm/services/agents/memory/agent_memory_manager.py +577 -44
  46. claude_mpm/services/agents/memory/content_manager.py +144 -14
  47. claude_mpm/services/agents/memory/template_generator.py +7 -354
  48. claude_mpm/services/mcp_gateway/server/stdio_server.py +61 -169
  49. claude_mpm/services/memory_hook_service.py +62 -4
  50. claude_mpm/services/runner_configuration_service.py +5 -9
  51. claude_mpm/services/socketio/server/broadcaster.py +32 -1
  52. claude_mpm/services/socketio/server/core.py +4 -0
  53. claude_mpm/services/socketio/server/main.py +23 -4
  54. claude_mpm/services/subprocess_launcher_service.py +5 -0
  55. {claude_mpm-4.0.19.dist-info → claude_mpm-4.0.22.dist-info}/METADATA +1 -1
  56. {claude_mpm-4.0.19.dist-info → claude_mpm-4.0.22.dist-info}/RECORD +60 -54
  57. claude_mpm/services/agents/memory/analyzer.py +0 -430
  58. {claude_mpm-4.0.19.dist-info → claude_mpm-4.0.22.dist-info}/WHEEL +0 -0
  59. {claude_mpm-4.0.19.dist-info → claude_mpm-4.0.22.dist-info}/entry_points.txt +0 -0
  60. {claude_mpm-4.0.19.dist-info → claude_mpm-4.0.22.dist-info}/licenses/LICENSE +0 -0
  61. {claude_mpm-4.0.19.dist-info → claude_mpm-4.0.22.dist-info}/top_level.txt +0 -0
@@ -1,6 +1,6 @@
1
- from pathlib import Path
2
-
3
1
  #!/usr/bin/env python3
2
+
3
+ from pathlib import Path
4
4
  """
5
5
  Agent Memory Manager Service
6
6
  ===========================
@@ -17,7 +17,7 @@ This service provides:
17
17
  - Directory initialization with README
18
18
 
19
19
  Memory files are stored in .claude-mpm/memories/ directory
20
- following the naming convention: {agent_id}_agent.md
20
+ following the naming convention: {agent_id}_memories.md
21
21
  """
22
22
 
23
23
  import logging
@@ -28,9 +28,6 @@ from typing import Any, Dict, List, Optional, Tuple
28
28
  from claude_mpm.core.config import Config
29
29
  from claude_mpm.core.interfaces import MemoryServiceInterface
30
30
  from claude_mpm.core.unified_paths import get_path_manager
31
- from claude_mpm.services.project.analyzer import ProjectAnalyzer
32
-
33
- from .analyzer import MemoryAnalyzer
34
31
  from .content_manager import MemoryContentManager
35
32
  from .template_generator import MemoryTemplateGenerator
36
33
 
@@ -85,28 +82,24 @@ class AgentMemoryManager(MemoryServiceInterface):
85
82
  self.project_root = get_path_manager().project_root
86
83
  # Use current working directory by default, not project root
87
84
  self.working_directory = working_directory or Path(os.getcwd())
88
- self.memories_dir = self.working_directory / ".claude-mpm" / "memories"
85
+
86
+ # Use only project memory directory
87
+ self.project_memories_dir = self.working_directory / ".claude-mpm" / "memories"
88
+
89
+ # Primary memories_dir points to project
90
+ self.memories_dir = self.project_memories_dir
91
+
92
+ # Ensure project directory exists
89
93
  self._ensure_memories_directory()
90
94
 
91
95
  # Initialize memory limits from configuration
92
96
  self._init_memory_limits()
93
97
 
94
- # Initialize project analyzer for context-aware memory creation
95
- self.project_analyzer = ProjectAnalyzer(self.config, self.working_directory)
96
-
97
98
  # Initialize component services
98
99
  self.template_generator = MemoryTemplateGenerator(
99
- self.config, self.working_directory, self.project_analyzer
100
+ self.config, self.working_directory
100
101
  )
101
102
  self.content_manager = MemoryContentManager(self.memory_limits)
102
- self.analyzer = MemoryAnalyzer(
103
- self.memories_dir,
104
- self.memory_limits,
105
- self.agent_overrides,
106
- self._get_agent_limits,
107
- self._get_agent_auto_learning,
108
- self.content_manager,
109
- )
110
103
 
111
104
  @property
112
105
  def logger(self):
@@ -202,31 +195,82 @@ class AgentMemoryManager(MemoryServiceInterface):
202
195
  # Fall back to global setting
203
196
  return self.auto_learning
204
197
 
198
+ def _get_memory_file_with_migration(self, directory: Path, agent_id: str) -> Path:
199
+ """Get memory file path, migrating from old naming if needed.
200
+
201
+ WHY: Supports backward compatibility by automatically migrating from
202
+ the old {agent_id}_agent.md and {agent_id}.md formats to the new {agent_id}_memories.md format.
203
+
204
+ Args:
205
+ directory: Directory containing memory files
206
+ agent_id: The agent identifier
207
+
208
+ Returns:
209
+ Path: Path to the memory file (may not exist)
210
+ """
211
+ new_file = directory / f"{agent_id}_memories.md"
212
+ # Support migration from both old formats
213
+ old_file_agent = directory / f"{agent_id}_agent.md"
214
+ old_file_simple = directory / f"{agent_id}.md"
215
+
216
+ # Migrate from old formats if needed
217
+ if not new_file.exists():
218
+ # Try migrating from {agent_id}_agent.md first
219
+ if old_file_agent.exists():
220
+ try:
221
+ content = old_file_agent.read_text(encoding="utf-8")
222
+ new_file.write_text(content, encoding="utf-8")
223
+
224
+ # Delete old file for all agents
225
+ old_file_agent.unlink()
226
+ self.logger.info(f"Migrated memory file from {old_file_agent.name} to {new_file.name}")
227
+ except Exception as e:
228
+ self.logger.error(f"Failed to migrate memory file for {agent_id}: {e}")
229
+ return old_file_agent
230
+ # Try migrating from {agent_id}.md
231
+ elif old_file_simple.exists():
232
+ try:
233
+ content = old_file_simple.read_text(encoding="utf-8")
234
+ new_file.write_text(content, encoding="utf-8")
235
+
236
+ # Delete old file for all agents
237
+ old_file_simple.unlink()
238
+ self.logger.info(f"Migrated memory file from {old_file_simple.name} to {new_file.name}")
239
+ except Exception as e:
240
+ self.logger.error(f"Failed to migrate memory file for {agent_id}: {e}")
241
+ return old_file_simple
242
+
243
+ return new_file
244
+
205
245
  def load_agent_memory(self, agent_id: str) -> str:
206
- """Load agent memory file content.
246
+ """Load agent memory file content from project directory.
207
247
 
208
248
  WHY: Agents need to read their accumulated knowledge before starting tasks
209
- to apply learned patterns and avoid repeated mistakes.
249
+ to apply learned patterns and avoid repeated mistakes. All memories are
250
+ now stored at the project level for consistency.
210
251
 
211
252
  Args:
212
- agent_id: The agent identifier (e.g., 'research', 'engineer')
253
+ agent_id: The agent identifier (e.g., 'PM', 'research', 'engineer')
213
254
 
214
255
  Returns:
215
256
  str: The memory file content, creating default if doesn't exist
216
257
  """
217
- memory_file = self.memories_dir / f"{agent_id}_agent.md"
218
-
219
- if not memory_file.exists():
220
- self.logger.info(f"Creating default memory for agent: {agent_id}")
221
- return self._create_default_memory(agent_id)
222
-
223
- try:
224
- content = memory_file.read_text(encoding="utf-8")
225
- return self.content_manager.validate_and_repair(content, agent_id)
226
- except Exception as e:
227
- self.logger.error(f"Error reading memory file for {agent_id}: {e}")
228
- # Return default memory on error - never fail
229
- return self._create_default_memory(agent_id)
258
+ # All agents use project directory
259
+ project_memory_file = self._get_memory_file_with_migration(self.project_memories_dir, agent_id)
260
+
261
+ # Load project-level memory if exists
262
+ if project_memory_file.exists():
263
+ try:
264
+ project_memory = project_memory_file.read_text(encoding="utf-8")
265
+ project_memory = self.content_manager.validate_and_repair(project_memory, agent_id)
266
+ self.logger.debug(f"Loaded project-level memory for {agent_id}")
267
+ return project_memory
268
+ except Exception as e:
269
+ self.logger.error(f"Error reading project memory file for {agent_id}: {e}")
270
+
271
+ # Memory doesn't exist - create default in project directory
272
+ self.logger.info(f"Creating default memory for agent: {agent_id}")
273
+ return self._create_default_memory(agent_id)
230
274
 
231
275
  def update_agent_memory(self, agent_id: str, section: str, new_item: str) -> bool:
232
276
  """Add new learning item to specified section.
@@ -317,9 +361,10 @@ class AgentMemoryManager(MemoryServiceInterface):
317
361
  # Delegate to template generator
318
362
  template = self.template_generator.create_default_memory(agent_id, limits)
319
363
 
320
- # Save default file
364
+ # Save default file to project directory
321
365
  try:
322
- memory_file = self.memories_dir / f"{agent_id}_agent.md"
366
+ target_dir = self.memories_dir
367
+ memory_file = target_dir / f"{agent_id}_memories.md"
323
368
  memory_file.write_text(template, encoding="utf-8")
324
369
  self.logger.info(f"Created project-specific memory file for {agent_id}")
325
370
 
@@ -342,9 +387,16 @@ class AgentMemoryManager(MemoryServiceInterface):
342
387
  bool: True if save succeeded
343
388
  """
344
389
  try:
345
- memory_file = self.memories_dir / f"{agent_id}_agent.md"
390
+ # All agents save to project directory
391
+ target_dir = self.project_memories_dir
392
+
393
+ # Ensure directory exists
394
+ target_dir.mkdir(parents=True, exist_ok=True)
395
+
396
+ memory_file = target_dir / f"{agent_id}_memories.md"
346
397
  memory_file.write_text(content, encoding="utf-8")
347
- self.logger.debug(f"Saved memory for {agent_id}")
398
+
399
+ self.logger.info(f"Saved {agent_id} memory to project directory: {memory_file}")
348
400
  return True
349
401
  except Exception as e:
350
402
  self.logger.error(f"Error saving memory for {agent_id}: {e}")
@@ -436,6 +488,375 @@ class AgentMemoryManager(MemoryServiceInterface):
436
488
  self.logger.error(f"Error routing memory command: {e}")
437
489
  return {"success": False, "error": str(e)}
438
490
 
491
+ def extract_and_update_memory(self, agent_id: str, response: str) -> bool:
492
+ """Extract memory updates from agent response and update memory file.
493
+
494
+ WHY: Agents provide memory updates in their responses that need to be
495
+ extracted and persisted. This method looks for "remember" field in JSON
496
+ responses and merges new learnings with existing memory.
497
+
498
+ Args:
499
+ agent_id: The agent identifier
500
+ response: The agent's response text (may contain JSON)
501
+
502
+ Returns:
503
+ bool: True if memory was updated, False otherwise
504
+ """
505
+ try:
506
+ import json
507
+ import re
508
+
509
+ # Log that we're processing memory for this agent
510
+ is_pm = agent_id.upper() == "PM"
511
+ self.logger.debug(f"Extracting memory for {agent_id} (is_pm={is_pm})")
512
+
513
+ # Look for JSON block in the response
514
+ # Pattern matches ```json ... ``` blocks
515
+ json_pattern = r'```json\s*(.*?)\s*```'
516
+ json_matches = re.findall(json_pattern, response, re.DOTALL)
517
+
518
+ if not json_matches:
519
+ # Also try to find inline JSON objects
520
+ json_pattern2 = r'\{[^{}]*"(?:remember|Remember)"[^{}]*\}'
521
+ json_matches = re.findall(json_pattern2, response, re.DOTALL)
522
+
523
+ for json_str in json_matches:
524
+ try:
525
+ data = json.loads(json_str)
526
+
527
+ # Check for memory updates in "remember" field
528
+ memory_items = None
529
+
530
+ # Check both "remember" and "Remember" fields
531
+ if "remember" in data:
532
+ memory_items = data["remember"]
533
+ elif "Remember" in data:
534
+ memory_items = data["Remember"]
535
+
536
+ # Process memory items if found and not null
537
+ if memory_items is not None and memory_items != "null":
538
+ # Skip if explicitly null or empty list
539
+ if isinstance(memory_items, list) and len(memory_items) > 0:
540
+ # Filter out empty strings and None values
541
+ valid_items = []
542
+ for item in memory_items:
543
+ if item and isinstance(item, str) and item.strip():
544
+ valid_items.append(item.strip())
545
+
546
+ # Only proceed if we have valid items
547
+ if valid_items:
548
+ self.logger.info(f"Found {len(valid_items)} memory items for {agent_id}: {valid_items[:2]}...")
549
+ success = self._add_learnings_to_memory(agent_id, valid_items)
550
+ if success:
551
+ self.logger.info(f"Successfully saved {len(valid_items)} memories for {agent_id} to project directory")
552
+ return True
553
+ else:
554
+ self.logger.error(f"Failed to save memories for {agent_id}")
555
+
556
+ except json.JSONDecodeError as je:
557
+ # Not valid JSON, continue to next match
558
+ self.logger.debug(f"JSON decode error for {agent_id}: {je}")
559
+ continue
560
+
561
+ self.logger.debug(f"No memory items found in response for {agent_id}")
562
+ return False
563
+
564
+ except Exception as e:
565
+ self.logger.error(f"Error extracting memory from response for {agent_id}: {e}")
566
+ return False
567
+
568
+ def _add_learnings_to_memory(self, agent_id: str, learnings: List[str]) -> bool:
569
+ """Add new learnings to existing agent memory.
570
+
571
+ WHY: Instead of replacing all memory, we want to intelligently merge new
572
+ learnings with existing knowledge, avoiding duplicates and maintaining
573
+ the most relevant information. PM memories are always saved to user dir.
574
+
575
+ Args:
576
+ agent_id: The agent identifier
577
+ learnings: List of new learning strings to add
578
+
579
+ Returns:
580
+ bool: True if memory was successfully updated
581
+ """
582
+ try:
583
+ # Load existing memory
584
+ current_memory = self.load_agent_memory(agent_id)
585
+
586
+ # Parse existing memory into sections
587
+ sections = self._parse_memory_sections(current_memory)
588
+
589
+ # Clean sections - remove template placeholder text
590
+ sections = self._clean_template_placeholders(sections)
591
+
592
+ # Determine which section to add learnings to based on content
593
+ for learning in learnings:
594
+ if not learning or not isinstance(learning, str):
595
+ continue
596
+
597
+ learning = learning.strip()
598
+ if not learning:
599
+ continue
600
+
601
+ # Categorize the learning based on keywords
602
+ section = self._categorize_learning(learning)
603
+
604
+ # Add to appropriate section if not duplicate
605
+ if section not in sections:
606
+ sections[section] = []
607
+
608
+ # Check for duplicates (case-insensitive) - FIXED LOGIC
609
+ normalized_learning = learning.lower()
610
+ # Strip bullet points from existing items for comparison
611
+ existing_normalized = [item.lstrip('- ').strip().lower() for item in sections[section]]
612
+
613
+ if normalized_learning not in existing_normalized:
614
+ # Add bullet point if not present
615
+ if not learning.startswith("-"):
616
+ learning = f"- {learning}"
617
+ sections[section].append(learning)
618
+ self.logger.info(f"Added new memory for {agent_id}: {learning[:50]}...")
619
+ else:
620
+ self.logger.debug(f"Skipping duplicate memory for {agent_id}: {learning}")
621
+
622
+ # Rebuild memory content
623
+ new_content = self._build_memory_content(agent_id, sections)
624
+
625
+ # Validate and save
626
+ agent_limits = self._get_agent_limits(agent_id)
627
+ if self.content_manager.exceeds_limits(new_content, agent_limits):
628
+ self.logger.debug(f"Memory for {agent_id} exceeds limits, truncating")
629
+ new_content = self.content_manager.truncate_to_limits(new_content, agent_limits)
630
+
631
+ # All memories go to project directory
632
+ return self._save_memory_file(agent_id, new_content)
633
+
634
+ except Exception as e:
635
+ self.logger.error(f"Error adding learnings to memory for {agent_id}: {e}")
636
+ return False
637
+
638
+ def _clean_template_placeholders(self, sections: Dict[str, List[str]]) -> Dict[str, List[str]]:
639
+ """Remove template placeholder text from sections.
640
+
641
+ Args:
642
+ sections: Dict mapping section names to lists of items
643
+
644
+ Returns:
645
+ Dict with placeholder text removed
646
+ """
647
+ # Template placeholder patterns to remove
648
+ placeholders = [
649
+ "Analyze project structure to understand architecture patterns",
650
+ "Observe codebase patterns and conventions during tasks",
651
+ "Extract implementation guidelines from project documentation",
652
+ "Learn from errors encountered during project work",
653
+ "Project analysis pending - gather context during tasks",
654
+ "claude-mpm: Software project requiring analysis"
655
+ ]
656
+
657
+ cleaned = {}
658
+ for section_name, items in sections.items():
659
+ cleaned_items = []
660
+ for item in items:
661
+ # Remove bullet point for comparison
662
+ item_text = item.lstrip("- ").strip()
663
+ # Keep item if it's not a placeholder
664
+ if item_text and item_text not in placeholders:
665
+ cleaned_items.append(item)
666
+
667
+ # Only include section if it has real content
668
+ if cleaned_items:
669
+ cleaned[section_name] = cleaned_items
670
+
671
+ return cleaned
672
+
673
+ def _categorize_learning(self, learning: str) -> str:
674
+ """Categorize a learning item into appropriate section.
675
+
676
+ Args:
677
+ learning: The learning string to categorize
678
+
679
+ Returns:
680
+ str: The section name for this learning
681
+ """
682
+ learning_lower = learning.lower()
683
+
684
+ # Check for keywords to categorize with improved patterns
685
+ # Order matters - more specific patterns should come first
686
+
687
+ # Architecture keywords
688
+ if any(word in learning_lower for word in ["architecture", "structure", "design", "module", "component", "microservices", "service-oriented"]):
689
+ return "Project Architecture"
690
+
691
+ # Integration keywords (check before patterns to avoid "use" conflict)
692
+ elif any(word in learning_lower for word in ["integration", "interface", "api", "connection", "database", "pooling", "via"]):
693
+ return "Integration Points"
694
+
695
+ # Mistake keywords (check before patterns to avoid conflicts)
696
+ elif any(word in learning_lower for word in ["mistake", "error", "avoid", "don't", "never", "not"]):
697
+ return "Common Mistakes to Avoid"
698
+
699
+ # Context keywords (check before patterns to avoid "working", "version" conflicts)
700
+ elif any(word in learning_lower for word in ["context", "current", "currently", "working", "version", "release", "candidate"]):
701
+ return "Current Technical Context"
702
+
703
+ # Guideline keywords (check before patterns to avoid "must", "should" conflicts)
704
+ elif any(word in learning_lower for word in ["guideline", "rule", "standard", "practice", "docstring", "documentation", "must", "should", "include", "comprehensive"]):
705
+ return "Implementation Guidelines"
706
+
707
+ # Pattern keywords (including dependency injection, conventions)
708
+ elif any(word in learning_lower for word in ["pattern", "convention", "style", "format", "dependency injection", "instantiation", "use", "implement"]):
709
+ return "Coding Patterns Learned"
710
+
711
+ # Strategy keywords
712
+ elif any(word in learning_lower for word in ["strategy", "approach", "method", "technique", "effective"]):
713
+ return "Effective Strategies"
714
+
715
+ # Performance keywords
716
+ elif any(word in learning_lower for word in ["performance", "optimization", "speed", "efficiency"]):
717
+ return "Performance Considerations"
718
+
719
+ # Domain keywords
720
+ elif any(word in learning_lower for word in ["domain", "business", "specific"]):
721
+ return "Domain-Specific Knowledge"
722
+
723
+ else:
724
+ return "Recent Learnings"
725
+
726
+ def _build_memory_content(self, agent_id: str, sections: Dict[str, List[str]]) -> str:
727
+ """Build memory content from sections.
728
+
729
+ Args:
730
+ agent_id: The agent identifier
731
+ sections: Dict mapping section names to lists of items
732
+
733
+ Returns:
734
+ str: The formatted memory content
735
+ """
736
+ lines = []
737
+
738
+ # Add header
739
+ lines.append(f"# {agent_id.capitalize()} Agent Memory")
740
+ lines.append("")
741
+ lines.append(f"<!-- Last Updated: {datetime.now().isoformat()} -->")
742
+ lines.append("")
743
+
744
+ # Add sections in consistent order
745
+ section_order = [
746
+ "Project Architecture",
747
+ "Implementation Guidelines",
748
+ "Common Mistakes to Avoid",
749
+ "Current Technical Context",
750
+ "Coding Patterns Learned",
751
+ "Effective Strategies",
752
+ "Integration Points",
753
+ "Performance Considerations",
754
+ "Domain-Specific Knowledge",
755
+ "Recent Learnings"
756
+ ]
757
+
758
+ for section_name in section_order:
759
+ if section_name in sections and sections[section_name]:
760
+ lines.append(f"## {section_name}")
761
+ lines.append("")
762
+ for item in sections[section_name]:
763
+ if item.strip():
764
+ lines.append(item)
765
+ lines.append("")
766
+
767
+ # Add any remaining sections
768
+ remaining = set(sections.keys()) - set(section_order)
769
+ for section_name in sorted(remaining):
770
+ if sections[section_name]:
771
+ lines.append(f"## {section_name}")
772
+ lines.append("")
773
+ for item in sections[section_name]:
774
+ if item.strip():
775
+ lines.append(item)
776
+ lines.append("")
777
+
778
+ return '\n'.join(lines)
779
+
780
+ def replace_agent_memory(self, agent_id: str, memory_sections: Dict[str, List[str]]) -> bool:
781
+ """Replace agent's memory with new content organized by sections.
782
+
783
+ WHY: When agents provide memory updates, they replace the existing memory
784
+ rather than appending to it. This ensures memories stay current and relevant.
785
+
786
+ Args:
787
+ agent_id: The agent identifier
788
+ memory_sections: Dict mapping section names to lists of memory items
789
+
790
+ Returns:
791
+ bool: True if memory was successfully replaced
792
+ """
793
+ try:
794
+ # Build new memory content
795
+ lines = []
796
+
797
+ # Add header
798
+ lines.append(f"# {agent_id.capitalize()} Agent Memory")
799
+ lines.append("")
800
+ lines.append(f"<!-- Last Updated: {datetime.now().isoformat()} -->")
801
+ lines.append("")
802
+
803
+ # Add sections in a consistent order
804
+ section_order = [
805
+ "Project Architecture",
806
+ "Implementation Guidelines",
807
+ "Common Mistakes to Avoid",
808
+ "Current Technical Context",
809
+ "Coding Patterns Learned",
810
+ "Effective Strategies",
811
+ "Integration Points",
812
+ "Performance Considerations",
813
+ "Domain-Specific Knowledge",
814
+ "Recent Learnings"
815
+ ]
816
+
817
+ # First add ordered sections that exist in memory_sections
818
+ for section_name in section_order:
819
+ if section_name in memory_sections and memory_sections[section_name]:
820
+ lines.append(f"## {section_name}")
821
+ lines.append("")
822
+ for item in memory_sections[section_name]:
823
+ if item.strip(): # Skip empty items
824
+ # Add bullet point if not already present
825
+ if not item.strip().startswith("-"):
826
+ lines.append(f"- {item.strip()}")
827
+ else:
828
+ lines.append(item.strip())
829
+ lines.append("")
830
+
831
+ # Then add any remaining sections not in the order list
832
+ remaining_sections = set(memory_sections.keys()) - set(section_order)
833
+ for section_name in sorted(remaining_sections):
834
+ if memory_sections[section_name]:
835
+ lines.append(f"## {section_name}")
836
+ lines.append("")
837
+ for item in memory_sections[section_name]:
838
+ if item.strip():
839
+ if not item.strip().startswith("-"):
840
+ lines.append(f"- {item.strip()}")
841
+ else:
842
+ lines.append(item.strip())
843
+ lines.append("")
844
+
845
+ new_content = '\n'.join(lines)
846
+
847
+ # Validate and save
848
+ agent_limits = self._get_agent_limits(agent_id)
849
+ if self.content_manager.exceeds_limits(new_content, agent_limits):
850
+ self.logger.debug(f"Memory for {agent_id} exceeds limits, truncating")
851
+ new_content = self.content_manager.truncate_to_limits(new_content, agent_limits)
852
+
853
+ # Save the new memory
854
+ return self._save_memory_file(agent_id, new_content)
855
+
856
+ except Exception as e:
857
+ self.logger.error(f"Error replacing memory for {agent_id}: {e}")
858
+ return False
859
+
439
860
  def get_memory_status(self) -> Dict[str, Any]:
440
861
  """Get comprehensive memory system status.
441
862
 
@@ -446,7 +867,32 @@ class AgentMemoryManager(MemoryServiceInterface):
446
867
  Returns:
447
868
  Dict containing comprehensive memory system status
448
869
  """
449
- return self.analyzer.get_memory_status()
870
+ # Simplified status implementation without analyzer
871
+ status = {
872
+ "system_enabled": self.memory_enabled,
873
+ "auto_learning": self.auto_learning,
874
+ "memory_directory": str(self.memories_dir),
875
+ "total_agents": 0,
876
+ "total_size_kb": 0,
877
+ "agents": {},
878
+ "system_health": "healthy"
879
+ }
880
+
881
+ if self.memories_dir.exists():
882
+ memory_files = list(self.memories_dir.glob("*_memories.md"))
883
+ status["total_agents"] = len(memory_files)
884
+
885
+ for file_path in memory_files:
886
+ if file_path.name != "README.md":
887
+ size_kb = file_path.stat().st_size / 1024
888
+ status["total_size_kb"] += size_kb
889
+ agent_id = file_path.stem.replace("_memories", "")
890
+ status["agents"][agent_id] = {
891
+ "file": file_path.name,
892
+ "size_kb": round(size_kb, 2)
893
+ }
894
+
895
+ return status
450
896
 
451
897
  def cross_reference_memories(self, query: Optional[str] = None) -> Dict[str, Any]:
452
898
  """Find common patterns and cross-references across agent memories.
@@ -461,7 +907,12 @@ class AgentMemoryManager(MemoryServiceInterface):
461
907
  Returns:
462
908
  Dict containing cross-reference analysis results
463
909
  """
464
- return self.analyzer.cross_reference_memories(query)
910
+ # Deprecated - return informative message
911
+ return {
912
+ "status": "deprecated",
913
+ "message": "Cross-reference analysis has been deprecated in favor of simplified memory management",
914
+ "suggestion": "Use get_memory_status() for memory overview"
915
+ }
465
916
 
466
917
  def get_all_memories_raw(self) -> Dict[str, Any]:
467
918
  """Get all agent memories in structured JSON format.
@@ -473,7 +924,12 @@ class AgentMemoryManager(MemoryServiceInterface):
473
924
  Returns:
474
925
  Dict containing structured memory data for all agents
475
926
  """
476
- return self.analyzer.get_all_memories_raw()
927
+ # Deprecated - return informative message
928
+ return {
929
+ "status": "deprecated",
930
+ "message": "Raw memory access has been deprecated in favor of simplified memory management",
931
+ "suggestion": "Use load_agent_memory() for specific agent memories"
932
+ }
477
933
 
478
934
  def _ensure_memories_directory(self):
479
935
  """Ensure memories directory exists with README.
@@ -531,6 +987,45 @@ Standard markdown with structured sections. Agents expect:
531
987
  self.logger.error(f"Error ensuring memories directory: {e}")
532
988
  # Continue anyway - memory system should not block operations
533
989
 
990
+
991
+
992
+ def _parse_memory_sections(self, memory_content: str) -> Dict[str, List[str]]:
993
+ """Parse memory content into sections and items.
994
+
995
+ Args:
996
+ memory_content: Raw memory file content
997
+
998
+ Returns:
999
+ Dict mapping section names to lists of items
1000
+ """
1001
+ sections = {}
1002
+ current_section = None
1003
+ current_items = []
1004
+
1005
+ for line in memory_content.split('\n'):
1006
+ # Skip metadata lines
1007
+ if line.startswith('<!-- ') and line.endswith(' -->'):
1008
+ continue
1009
+ # Check for section headers (## Level 2 headers)
1010
+ elif line.startswith('## '):
1011
+ # Save previous section if exists
1012
+ if current_section and current_items:
1013
+ sections[current_section] = current_items
1014
+
1015
+ # Start new section
1016
+ current_section = line[3:].strip() # Remove "## " prefix
1017
+ current_items = []
1018
+ # Collect non-empty lines as items (but not HTML comments)
1019
+ elif line.strip() and current_section and not line.strip().startswith('<!--'):
1020
+ # Keep the full line with its formatting
1021
+ current_items.append(line.strip())
1022
+
1023
+ # Save last section
1024
+ if current_section and current_items:
1025
+ sections[current_section] = current_items
1026
+
1027
+ return sections
1028
+
534
1029
  # ================================================================================
535
1030
  # Interface Adapter Methods
536
1031
  # ================================================================================
@@ -570,7 +1065,7 @@ Standard markdown with structured sections. Agents expect:
570
1065
  True if save successful
571
1066
  """
572
1067
  try:
573
- memory_path = self.memories_dir / f"{agent_id}_agent.md"
1068
+ memory_path = self.memories_dir / f"{agent_id}_memories.md"
574
1069
 
575
1070
  # Validate size before saving
576
1071
  is_valid, error_msg = self.validate_memory_size(content)
@@ -613,7 +1108,45 @@ Standard markdown with structured sections. Agents expect:
613
1108
  Returns:
614
1109
  Dictionary with memory metrics
615
1110
  """
616
- return self.analyzer.get_memory_metrics(agent_id)
1111
+ # Minimal implementation for interface compliance
1112
+ metrics = {
1113
+ "total_memory_kb": 0,
1114
+ "agent_count": 0,
1115
+ "agents": {}
1116
+ }
1117
+
1118
+ if self.memories_dir.exists():
1119
+ if agent_id:
1120
+ # Metrics for specific agent
1121
+ memory_file = self.memories_dir / f"{agent_id}_memories.md"
1122
+ if memory_file.exists():
1123
+ size_kb = memory_file.stat().st_size / 1024
1124
+ metrics["agents"][agent_id] = {
1125
+ "size_kb": round(size_kb, 2),
1126
+ "limit_kb": self._get_agent_limits(agent_id)["max_file_size_kb"],
1127
+ "usage_percent": round((size_kb / self._get_agent_limits(agent_id)["max_file_size_kb"]) * 100, 1)
1128
+ }
1129
+ metrics["total_memory_kb"] = round(size_kb, 2)
1130
+ metrics["agent_count"] = 1
1131
+ else:
1132
+ # Metrics for all agents
1133
+ memory_files = list(self.memories_dir.glob("*_memories.md"))
1134
+ for file_path in memory_files:
1135
+ if file_path.name != "README.md":
1136
+ agent_name = file_path.stem.replace("_memories", "")
1137
+ size_kb = file_path.stat().st_size / 1024
1138
+ limit_kb = self._get_agent_limits(agent_name)["max_file_size_kb"]
1139
+ metrics["agents"][agent_name] = {
1140
+ "size_kb": round(size_kb, 2),
1141
+ "limit_kb": limit_kb,
1142
+ "usage_percent": round((size_kb / limit_kb) * 100, 1)
1143
+ }
1144
+ metrics["total_memory_kb"] += size_kb
1145
+
1146
+ metrics["total_memory_kb"] = round(metrics["total_memory_kb"], 2)
1147
+ metrics["agent_count"] = len(metrics["agents"])
1148
+
1149
+ return metrics
617
1150
 
618
1151
 
619
1152
  # Convenience functions for external use