claude-mpm 3.3.0__py3-none-any.whl → 3.3.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- claude_mpm/agents/templates/data_engineer.json +1 -1
- claude_mpm/agents/templates/documentation.json +1 -1
- claude_mpm/agents/templates/engineer.json +1 -1
- claude_mpm/agents/templates/ops.json +1 -1
- claude_mpm/agents/templates/pm.json +1 -1
- claude_mpm/agents/templates/qa.json +1 -1
- claude_mpm/agents/templates/research.json +1 -1
- claude_mpm/agents/templates/security.json +1 -1
- claude_mpm/agents/templates/test_integration.json +112 -0
- claude_mpm/agents/templates/version_control.json +1 -1
- claude_mpm/cli/commands/memory.py +575 -25
- claude_mpm/cli/commands/run.py +115 -14
- claude_mpm/cli/parser.py +76 -0
- claude_mpm/constants.py +5 -0
- claude_mpm/core/claude_runner.py +13 -11
- claude_mpm/core/session_manager.py +46 -0
- claude_mpm/core/simple_runner.py +13 -11
- claude_mpm/hooks/claude_hooks/hook_handler.py +2 -26
- claude_mpm/services/agent_memory_manager.py +264 -23
- claude_mpm/services/memory_builder.py +491 -0
- claude_mpm/services/memory_optimizer.py +619 -0
- claude_mpm/services/memory_router.py +445 -0
- claude_mpm/services/socketio_server.py +184 -20
- claude_mpm-3.3.2.dist-info/METADATA +159 -0
- {claude_mpm-3.3.0.dist-info → claude_mpm-3.3.2.dist-info}/RECORD +29 -28
- claude_mpm/agents/templates/test-integration-agent.md +0 -34
- claude_mpm/core/websocket_handler.py +0 -233
- claude_mpm/services/websocket_server.py +0 -376
- claude_mpm-3.3.0.dist-info/METADATA +0 -432
- {claude_mpm-3.3.0.dist-info → claude_mpm-3.3.2.dist-info}/WHEEL +0 -0
- {claude_mpm-3.3.0.dist-info → claude_mpm-3.3.2.dist-info}/entry_points.txt +0 -0
- {claude_mpm-3.3.0.dist-info → claude_mpm-3.3.2.dist-info}/licenses/LICENSE +0 -0
- {claude_mpm-3.3.0.dist-info → claude_mpm-3.3.2.dist-info}/top_level.txt +0 -0
|
@@ -27,7 +27,7 @@ import logging
|
|
|
27
27
|
from claude_mpm.core import LoggerMixin
|
|
28
28
|
from claude_mpm.core.config import Config
|
|
29
29
|
from claude_mpm.utils.paths import PathResolver
|
|
30
|
-
|
|
30
|
+
# Socket.IO notifications are optional - we'll skip them if server is not available
|
|
31
31
|
|
|
32
32
|
|
|
33
33
|
class AgentMemoryManager(LoggerMixin):
|
|
@@ -164,15 +164,7 @@ class AgentMemoryManager(LoggerMixin):
|
|
|
164
164
|
try:
|
|
165
165
|
content = memory_file.read_text(encoding='utf-8')
|
|
166
166
|
|
|
167
|
-
#
|
|
168
|
-
try:
|
|
169
|
-
ws_server = get_websocket_server()
|
|
170
|
-
file_size = len(content.encode('utf-8'))
|
|
171
|
-
# Count sections by looking for lines starting with ##
|
|
172
|
-
sections_count = sum(1 for line in content.split('\n') if line.startswith('## '))
|
|
173
|
-
ws_server.memory_loaded(agent_id, file_size, sections_count)
|
|
174
|
-
except Exception as ws_error:
|
|
175
|
-
self.logger.debug(f"WebSocket notification failed: {ws_error}")
|
|
167
|
+
# Socket.IO notifications removed - memory manager works independently
|
|
176
168
|
|
|
177
169
|
return self._validate_and_repair(content, agent_id)
|
|
178
170
|
except Exception as e:
|
|
@@ -241,13 +233,7 @@ class AgentMemoryManager(LoggerMixin):
|
|
|
241
233
|
section = section_mapping.get(learning_type, 'Recent Learnings')
|
|
242
234
|
success = self.update_agent_memory(agent_id, section, content)
|
|
243
235
|
|
|
244
|
-
#
|
|
245
|
-
if success:
|
|
246
|
-
try:
|
|
247
|
-
ws_server = get_websocket_server()
|
|
248
|
-
ws_server.memory_updated(agent_id, learning_type, content, section)
|
|
249
|
-
except Exception as ws_error:
|
|
250
|
-
self.logger.debug(f"WebSocket notification failed: {ws_error}")
|
|
236
|
+
# Socket.IO notifications removed - memory manager works independently
|
|
251
237
|
|
|
252
238
|
return success
|
|
253
239
|
|
|
@@ -323,12 +309,7 @@ class AgentMemoryManager(LoggerMixin):
|
|
|
323
309
|
memory_file.write_text(template, encoding='utf-8')
|
|
324
310
|
self.logger.info(f"Created default memory file for {agent_id}")
|
|
325
311
|
|
|
326
|
-
#
|
|
327
|
-
try:
|
|
328
|
-
ws_server = get_websocket_server()
|
|
329
|
-
ws_server.memory_created(agent_id, "default")
|
|
330
|
-
except Exception as ws_error:
|
|
331
|
-
self.logger.debug(f"WebSocket notification failed: {ws_error}")
|
|
312
|
+
# Socket.IO notifications removed - memory manager works independently
|
|
332
313
|
except Exception as e:
|
|
333
314
|
self.logger.error(f"Error saving default memory for {agent_id}: {e}")
|
|
334
315
|
|
|
@@ -609,6 +590,266 @@ class AgentMemoryManager(LoggerMixin):
|
|
|
609
590
|
self.logger.error(f"Error saving memory for {agent_id}: {e}")
|
|
610
591
|
return False
|
|
611
592
|
|
|
593
|
+
def optimize_memory(self, agent_id: Optional[str] = None) -> Dict[str, Any]:
|
|
594
|
+
"""Optimize agent memory by consolidating/cleaning memories.
|
|
595
|
+
|
|
596
|
+
WHY: Over time, memory files accumulate redundant or outdated information.
|
|
597
|
+
This method delegates to the memory optimizer service to clean up and
|
|
598
|
+
consolidate memories while preserving important information.
|
|
599
|
+
|
|
600
|
+
Args:
|
|
601
|
+
agent_id: Optional specific agent ID. If None, optimizes all agents.
|
|
602
|
+
|
|
603
|
+
Returns:
|
|
604
|
+
Dict containing optimization results and statistics
|
|
605
|
+
"""
|
|
606
|
+
try:
|
|
607
|
+
from claude_mpm.services.memory_optimizer import MemoryOptimizer
|
|
608
|
+
optimizer = MemoryOptimizer(self.config)
|
|
609
|
+
|
|
610
|
+
if agent_id:
|
|
611
|
+
result = optimizer.optimize_agent_memory(agent_id)
|
|
612
|
+
self.logger.info(f"Optimized memory for agent: {agent_id}")
|
|
613
|
+
else:
|
|
614
|
+
result = optimizer.optimize_all_memories()
|
|
615
|
+
self.logger.info("Optimized all agent memories")
|
|
616
|
+
|
|
617
|
+
return result
|
|
618
|
+
except Exception as e:
|
|
619
|
+
self.logger.error(f"Error optimizing memory: {e}")
|
|
620
|
+
return {"success": False, "error": str(e)}
|
|
621
|
+
|
|
622
|
+
def build_memories_from_docs(self, force_rebuild: bool = False) -> Dict[str, Any]:
|
|
623
|
+
"""Build agent memories from project documentation.
|
|
624
|
+
|
|
625
|
+
WHY: Project documentation contains valuable knowledge that should be
|
|
626
|
+
extracted and assigned to appropriate agents for better context awareness.
|
|
627
|
+
|
|
628
|
+
Args:
|
|
629
|
+
force_rebuild: If True, rebuilds even if docs haven't changed
|
|
630
|
+
|
|
631
|
+
Returns:
|
|
632
|
+
Dict containing build results and statistics
|
|
633
|
+
"""
|
|
634
|
+
try:
|
|
635
|
+
from claude_mpm.services.memory_builder import MemoryBuilder
|
|
636
|
+
builder = MemoryBuilder(self.config)
|
|
637
|
+
|
|
638
|
+
result = builder.build_from_documentation(force_rebuild)
|
|
639
|
+
self.logger.info("Built memories from documentation")
|
|
640
|
+
|
|
641
|
+
return result
|
|
642
|
+
except Exception as e:
|
|
643
|
+
self.logger.error(f"Error building memories from docs: {e}")
|
|
644
|
+
return {"success": False, "error": str(e)}
|
|
645
|
+
|
|
646
|
+
def route_memory_command(self, content: str, context: Optional[Dict] = None) -> Dict[str, Any]:
|
|
647
|
+
"""Route memory command to appropriate agent via PM delegation.
|
|
648
|
+
|
|
649
|
+
WHY: Memory commands like "remember this for next time" need to be analyzed
|
|
650
|
+
to determine which agent should store the information. This method provides
|
|
651
|
+
routing logic for PM agent delegation.
|
|
652
|
+
|
|
653
|
+
Args:
|
|
654
|
+
content: The content to be remembered
|
|
655
|
+
context: Optional context for routing decisions
|
|
656
|
+
|
|
657
|
+
Returns:
|
|
658
|
+
Dict containing routing decision and reasoning
|
|
659
|
+
"""
|
|
660
|
+
try:
|
|
661
|
+
from claude_mpm.services.memory_router import MemoryRouter
|
|
662
|
+
router = MemoryRouter(self.config)
|
|
663
|
+
|
|
664
|
+
routing_result = router.analyze_and_route(content, context)
|
|
665
|
+
self.logger.debug(f"Routed memory command: {routing_result['target_agent']}")
|
|
666
|
+
|
|
667
|
+
return routing_result
|
|
668
|
+
except Exception as e:
|
|
669
|
+
self.logger.error(f"Error routing memory command: {e}")
|
|
670
|
+
return {"success": False, "error": str(e)}
|
|
671
|
+
|
|
672
|
+
def get_memory_status(self) -> Dict[str, Any]:
|
|
673
|
+
"""Get comprehensive memory system status.
|
|
674
|
+
|
|
675
|
+
WHY: Provides detailed overview of memory system health, file sizes,
|
|
676
|
+
optimization opportunities, and agent-specific statistics for monitoring
|
|
677
|
+
and maintenance purposes.
|
|
678
|
+
|
|
679
|
+
Returns:
|
|
680
|
+
Dict containing comprehensive memory system status
|
|
681
|
+
"""
|
|
682
|
+
try:
|
|
683
|
+
status = {
|
|
684
|
+
"system_enabled": self.memory_enabled,
|
|
685
|
+
"auto_learning": self.auto_learning,
|
|
686
|
+
"memory_directory": str(self.memories_dir),
|
|
687
|
+
"total_agents": 0,
|
|
688
|
+
"total_size_kb": 0,
|
|
689
|
+
"agents": {},
|
|
690
|
+
"optimization_opportunities": [],
|
|
691
|
+
"system_health": "healthy"
|
|
692
|
+
}
|
|
693
|
+
|
|
694
|
+
if not self.memories_dir.exists():
|
|
695
|
+
status["system_health"] = "no_memory_dir"
|
|
696
|
+
return status
|
|
697
|
+
|
|
698
|
+
memory_files = list(self.memories_dir.glob("*_agent.md"))
|
|
699
|
+
status["total_agents"] = len(memory_files)
|
|
700
|
+
|
|
701
|
+
total_size = 0
|
|
702
|
+
for file_path in memory_files:
|
|
703
|
+
stat = file_path.stat()
|
|
704
|
+
size_kb = stat.st_size / 1024
|
|
705
|
+
total_size += stat.st_size
|
|
706
|
+
|
|
707
|
+
agent_id = file_path.stem.replace('_agent', '')
|
|
708
|
+
limits = self._get_agent_limits(agent_id)
|
|
709
|
+
|
|
710
|
+
# Analyze file content
|
|
711
|
+
try:
|
|
712
|
+
content = file_path.read_text()
|
|
713
|
+
section_count = len([line for line in content.splitlines() if line.startswith('## ')])
|
|
714
|
+
learning_count = len([line for line in content.splitlines() if line.strip().startswith('- ')])
|
|
715
|
+
|
|
716
|
+
agent_status = {
|
|
717
|
+
"size_kb": round(size_kb, 2),
|
|
718
|
+
"size_limit_kb": limits['max_file_size_kb'],
|
|
719
|
+
"size_utilization": min(100, round((size_kb / limits['max_file_size_kb']) * 100, 1)),
|
|
720
|
+
"sections": section_count,
|
|
721
|
+
"items": learning_count,
|
|
722
|
+
"last_modified": datetime.fromtimestamp(stat.st_mtime).isoformat(),
|
|
723
|
+
"auto_learning": self._get_agent_auto_learning(agent_id)
|
|
724
|
+
}
|
|
725
|
+
|
|
726
|
+
# Check for optimization opportunities
|
|
727
|
+
if size_kb > limits['max_file_size_kb'] * 0.8:
|
|
728
|
+
status["optimization_opportunities"].append(f"{agent_id}: High memory usage ({size_kb:.1f}KB)")
|
|
729
|
+
|
|
730
|
+
if section_count > limits['max_sections'] * 0.8:
|
|
731
|
+
status["optimization_opportunities"].append(f"{agent_id}: Many sections ({section_count})")
|
|
732
|
+
|
|
733
|
+
status["agents"][agent_id] = agent_status
|
|
734
|
+
|
|
735
|
+
except Exception as e:
|
|
736
|
+
status["agents"][agent_id] = {"error": str(e)}
|
|
737
|
+
|
|
738
|
+
status["total_size_kb"] = round(total_size / 1024, 2)
|
|
739
|
+
|
|
740
|
+
# Determine overall system health
|
|
741
|
+
if len(status["optimization_opportunities"]) > 3:
|
|
742
|
+
status["system_health"] = "needs_optimization"
|
|
743
|
+
elif status["total_size_kb"] > 100: # More than 100KB total
|
|
744
|
+
status["system_health"] = "high_usage"
|
|
745
|
+
|
|
746
|
+
return status
|
|
747
|
+
|
|
748
|
+
except Exception as e:
|
|
749
|
+
self.logger.error(f"Error getting memory status: {e}")
|
|
750
|
+
return {"success": False, "error": str(e)}
|
|
751
|
+
|
|
752
|
+
def cross_reference_memories(self, query: Optional[str] = None) -> Dict[str, Any]:
|
|
753
|
+
"""Find common patterns and cross-references across agent memories.
|
|
754
|
+
|
|
755
|
+
WHY: Different agents may have learned similar or related information.
|
|
756
|
+
Cross-referencing helps identify knowledge gaps, redundancies, and
|
|
757
|
+
opportunities for knowledge sharing between agents.
|
|
758
|
+
|
|
759
|
+
Args:
|
|
760
|
+
query: Optional query to filter cross-references
|
|
761
|
+
|
|
762
|
+
Returns:
|
|
763
|
+
Dict containing cross-reference analysis results
|
|
764
|
+
"""
|
|
765
|
+
try:
|
|
766
|
+
cross_refs = {
|
|
767
|
+
"common_patterns": [],
|
|
768
|
+
"knowledge_gaps": [],
|
|
769
|
+
"redundancies": [],
|
|
770
|
+
"agent_correlations": {},
|
|
771
|
+
"query_matches": [] if query else None
|
|
772
|
+
}
|
|
773
|
+
|
|
774
|
+
if not self.memories_dir.exists():
|
|
775
|
+
return cross_refs
|
|
776
|
+
|
|
777
|
+
memory_files = list(self.memories_dir.glob("*_agent.md"))
|
|
778
|
+
agent_memories = {}
|
|
779
|
+
|
|
780
|
+
# Load all agent memories
|
|
781
|
+
for file_path in memory_files:
|
|
782
|
+
agent_id = file_path.stem.replace('_agent', '')
|
|
783
|
+
try:
|
|
784
|
+
content = file_path.read_text()
|
|
785
|
+
agent_memories[agent_id] = content
|
|
786
|
+
except Exception as e:
|
|
787
|
+
self.logger.warning(f"Error reading memory for {agent_id}: {e}")
|
|
788
|
+
continue
|
|
789
|
+
|
|
790
|
+
# Find common patterns across agents
|
|
791
|
+
all_lines = []
|
|
792
|
+
agent_lines = {}
|
|
793
|
+
|
|
794
|
+
for agent_id, content in agent_memories.items():
|
|
795
|
+
lines = [line.strip() for line in content.splitlines()
|
|
796
|
+
if line.strip().startswith('- ')]
|
|
797
|
+
agent_lines[agent_id] = lines
|
|
798
|
+
all_lines.extend([(line, agent_id) for line in lines])
|
|
799
|
+
|
|
800
|
+
# Look for similar content (basic similarity check)
|
|
801
|
+
line_counts = {}
|
|
802
|
+
for line, agent_id in all_lines:
|
|
803
|
+
# Normalize line for comparison
|
|
804
|
+
normalized = line.lower().replace('- ', '').strip()
|
|
805
|
+
if len(normalized) > 20: # Only check substantial lines
|
|
806
|
+
if normalized not in line_counts:
|
|
807
|
+
line_counts[normalized] = []
|
|
808
|
+
line_counts[normalized].append(agent_id)
|
|
809
|
+
|
|
810
|
+
# Find patterns appearing in multiple agents
|
|
811
|
+
for line, agents in line_counts.items():
|
|
812
|
+
if len(set(agents)) > 1: # Appears in multiple agents
|
|
813
|
+
cross_refs["common_patterns"].append({
|
|
814
|
+
"pattern": line[:100] + "..." if len(line) > 100 else line,
|
|
815
|
+
"agents": list(set(agents)),
|
|
816
|
+
"count": len(agents)
|
|
817
|
+
})
|
|
818
|
+
|
|
819
|
+
# Query-specific matches
|
|
820
|
+
if query:
|
|
821
|
+
query_lower = query.lower()
|
|
822
|
+
for agent_id, content in agent_memories.items():
|
|
823
|
+
matches = []
|
|
824
|
+
for line in content.splitlines():
|
|
825
|
+
if query_lower in line.lower():
|
|
826
|
+
matches.append(line.strip())
|
|
827
|
+
|
|
828
|
+
if matches:
|
|
829
|
+
cross_refs["query_matches"].append({
|
|
830
|
+
"agent": agent_id,
|
|
831
|
+
"matches": matches[:5] # Limit to first 5 matches
|
|
832
|
+
})
|
|
833
|
+
|
|
834
|
+
# Calculate agent correlations (agents with similar knowledge domains)
|
|
835
|
+
for agent_a in agent_memories:
|
|
836
|
+
for agent_b in agent_memories:
|
|
837
|
+
if agent_a < agent_b: # Avoid duplicates
|
|
838
|
+
common_count = len([
|
|
839
|
+
line for line in line_counts.values()
|
|
840
|
+
if agent_a in line and agent_b in line
|
|
841
|
+
])
|
|
842
|
+
|
|
843
|
+
if common_count > 0:
|
|
844
|
+
correlation_key = f"{agent_a}+{agent_b}"
|
|
845
|
+
cross_refs["agent_correlations"][correlation_key] = common_count
|
|
846
|
+
|
|
847
|
+
return cross_refs
|
|
848
|
+
|
|
849
|
+
except Exception as e:
|
|
850
|
+
self.logger.error(f"Error cross-referencing memories: {e}")
|
|
851
|
+
return {"success": False, "error": str(e)}
|
|
852
|
+
|
|
612
853
|
def _ensure_memories_directory(self):
|
|
613
854
|
"""Ensure memories directory exists with README.
|
|
614
855
|
|