claude-mpm 4.0.19__py3-none-any.whl → 4.0.20__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (38) hide show
  1. claude_mpm/VERSION +1 -1
  2. claude_mpm/__main__.py +4 -0
  3. claude_mpm/agents/BASE_AGENT_TEMPLATE.md +38 -2
  4. claude_mpm/agents/OUTPUT_STYLE.md +84 -0
  5. claude_mpm/agents/templates/qa.json +1 -1
  6. claude_mpm/cli/__init__.py +23 -1
  7. claude_mpm/cli/__main__.py +4 -0
  8. claude_mpm/cli/commands/memory.py +32 -5
  9. claude_mpm/cli/commands/run.py +33 -6
  10. claude_mpm/cli/parsers/base_parser.py +5 -0
  11. claude_mpm/cli/parsers/run_parser.py +5 -0
  12. claude_mpm/cli/utils.py +17 -4
  13. claude_mpm/core/base_service.py +1 -1
  14. claude_mpm/core/config.py +70 -5
  15. claude_mpm/core/framework_loader.py +342 -31
  16. claude_mpm/core/interactive_session.py +55 -1
  17. claude_mpm/core/oneshot_session.py +7 -1
  18. claude_mpm/core/output_style_manager.py +468 -0
  19. claude_mpm/core/unified_paths.py +190 -21
  20. claude_mpm/hooks/claude_hooks/hook_handler.py +91 -16
  21. claude_mpm/hooks/claude_hooks/hook_wrapper.sh +3 -0
  22. claude_mpm/init.py +1 -0
  23. claude_mpm/services/agents/deployment/agent_deployment.py +151 -7
  24. claude_mpm/services/agents/deployment/agent_template_builder.py +37 -1
  25. claude_mpm/services/agents/deployment/multi_source_deployment_service.py +441 -0
  26. claude_mpm/services/agents/memory/__init__.py +0 -2
  27. claude_mpm/services/agents/memory/agent_memory_manager.py +737 -43
  28. claude_mpm/services/agents/memory/content_manager.py +144 -14
  29. claude_mpm/services/agents/memory/template_generator.py +7 -354
  30. claude_mpm/services/mcp_gateway/server/stdio_server.py +61 -169
  31. claude_mpm/services/subprocess_launcher_service.py +5 -0
  32. {claude_mpm-4.0.19.dist-info → claude_mpm-4.0.20.dist-info}/METADATA +1 -1
  33. {claude_mpm-4.0.19.dist-info → claude_mpm-4.0.20.dist-info}/RECORD +37 -35
  34. claude_mpm/services/agents/memory/analyzer.py +0 -430
  35. {claude_mpm-4.0.19.dist-info → claude_mpm-4.0.20.dist-info}/WHEEL +0 -0
  36. {claude_mpm-4.0.19.dist-info → claude_mpm-4.0.20.dist-info}/entry_points.txt +0 -0
  37. {claude_mpm-4.0.19.dist-info → claude_mpm-4.0.20.dist-info}/licenses/LICENSE +0 -0
  38. {claude_mpm-4.0.19.dist-info → claude_mpm-4.0.20.dist-info}/top_level.txt +0 -0
@@ -1,430 +0,0 @@
1
- #!/usr/bin/env python3
2
- """
3
- Memory Analyzer
4
- ==============
5
-
6
- Provides memory analysis, status reporting, and cross-reference capabilities.
7
-
8
- This module provides:
9
- - Memory system status and health monitoring
10
- - Cross-reference analysis between agent memories
11
- - Memory metrics and usage statistics
12
- - Raw memory data access for external tools
13
- """
14
-
15
- import logging
16
- from datetime import datetime
17
- from pathlib import Path
18
- from typing import Any, Dict, List, Optional
19
-
20
-
21
- class MemoryAnalyzer:
22
- """Analyzes memory system status and provides reporting capabilities.
23
-
24
- WHY: Memory system needs monitoring, analysis, and reporting capabilities
25
- for maintenance, optimization, and debugging purposes.
26
- """
27
-
28
- def __init__(
29
- self,
30
- memories_dir: Path,
31
- memory_limits: Dict[str, Any],
32
- agent_overrides: Dict[str, Any],
33
- get_agent_limits_func,
34
- get_agent_auto_learning_func,
35
- content_manager,
36
- ):
37
- """Initialize the memory analyzer.
38
-
39
- Args:
40
- memories_dir: Path to memories directory
41
- memory_limits: Default memory limits
42
- agent_overrides: Agent-specific overrides
43
- get_agent_limits_func: Function to get agent-specific limits
44
- get_agent_auto_learning_func: Function to get agent auto-learning setting
45
- content_manager: MemoryContentManager instance
46
- """
47
- self.memories_dir = memories_dir
48
- self.memory_limits = memory_limits
49
- self.agent_overrides = agent_overrides
50
- self._get_agent_limits = get_agent_limits_func
51
- self._get_agent_auto_learning = get_agent_auto_learning_func
52
- self.content_manager = content_manager
53
- self.logger = logging.getLogger(f"{__name__}.{self.__class__.__name__}")
54
-
55
- def get_memory_status(self) -> Dict[str, Any]:
56
- """Get comprehensive memory system status.
57
-
58
- WHY: Provides detailed overview of memory system health, file sizes,
59
- optimization opportunities, and agent-specific statistics for monitoring
60
- and maintenance purposes.
61
-
62
- Returns:
63
- Dict containing comprehensive memory system status
64
- """
65
- try:
66
- status = {
67
- "system_enabled": True, # Assume enabled if analyzer is created
68
- "auto_learning": True, # Default value
69
- "memory_directory": str(self.memories_dir),
70
- "total_agents": 0,
71
- "total_size_kb": 0,
72
- "agents": {},
73
- "optimization_opportunities": [],
74
- "system_health": "healthy",
75
- }
76
-
77
- if not self.memories_dir.exists():
78
- status["system_health"] = "no_memory_dir"
79
- return status
80
-
81
- memory_files = list(self.memories_dir.glob("*_agent.md"))
82
- status["total_agents"] = len(memory_files)
83
-
84
- total_size = 0
85
- for file_path in memory_files:
86
- stat = file_path.stat()
87
- size_kb = stat.st_size / 1024
88
- total_size += stat.st_size
89
-
90
- agent_id = file_path.stem.replace("_agent", "")
91
- limits = self._get_agent_limits(agent_id)
92
-
93
- # Analyze file content
94
- try:
95
- content = file_path.read_text()
96
- section_count = len(
97
- [
98
- line
99
- for line in content.splitlines()
100
- if line.startswith("## ")
101
- ]
102
- )
103
- learning_count = len(
104
- [
105
- line
106
- for line in content.splitlines()
107
- if line.strip().startswith("- ")
108
- ]
109
- )
110
-
111
- agent_status = {
112
- "size_kb": round(size_kb, 2),
113
- "size_limit_kb": limits["max_file_size_kb"],
114
- "size_utilization": min(
115
- 100, round((size_kb / limits["max_file_size_kb"]) * 100, 1)
116
- ),
117
- "sections": section_count,
118
- "items": learning_count,
119
- "last_modified": datetime.fromtimestamp(
120
- stat.st_mtime
121
- ).isoformat(),
122
- "auto_learning": self._get_agent_auto_learning(agent_id),
123
- }
124
-
125
- # Check for optimization opportunities
126
- if size_kb > limits["max_file_size_kb"] * 0.8:
127
- status["optimization_opportunities"].append(
128
- f"{agent_id}: High memory usage ({size_kb:.1f}KB)"
129
- )
130
-
131
- if section_count > limits["max_sections"] * 0.8:
132
- status["optimization_opportunities"].append(
133
- f"{agent_id}: Many sections ({section_count})"
134
- )
135
-
136
- status["agents"][agent_id] = agent_status
137
-
138
- except Exception as e:
139
- status["agents"][agent_id] = {"error": str(e)}
140
-
141
- status["total_size_kb"] = round(total_size / 1024, 2)
142
-
143
- # Determine overall system health
144
- if len(status["optimization_opportunities"]) > 3:
145
- status["system_health"] = "needs_optimization"
146
- elif status["total_size_kb"] > 100: # More than 100KB total
147
- status["system_health"] = "high_usage"
148
-
149
- return status
150
-
151
- except Exception as e:
152
- self.logger.error(f"Error getting memory status: {e}")
153
- return {"success": False, "error": str(e)}
154
-
155
- def cross_reference_memories(self, query: Optional[str] = None) -> Dict[str, Any]:
156
- """Find common patterns and cross-references across agent memories.
157
-
158
- WHY: Different agents may have learned similar or related information.
159
- Cross-referencing helps identify knowledge gaps, redundancies, and
160
- opportunities for knowledge sharing between agents.
161
-
162
- Args:
163
- query: Optional query to filter cross-references
164
-
165
- Returns:
166
- Dict containing cross-reference analysis results
167
- """
168
- try:
169
- cross_refs = {
170
- "common_patterns": [],
171
- "knowledge_gaps": [],
172
- "redundancies": [],
173
- "agent_correlations": {},
174
- "query_matches": [] if query else None,
175
- }
176
-
177
- if not self.memories_dir.exists():
178
- return cross_refs
179
-
180
- memory_files = list(self.memories_dir.glob("*_agent.md"))
181
- agent_memories = {}
182
-
183
- # Load all agent memories
184
- for file_path in memory_files:
185
- agent_id = file_path.stem.replace("_agent", "")
186
- try:
187
- content = file_path.read_text()
188
- agent_memories[agent_id] = content
189
- except Exception as e:
190
- self.logger.warning(f"Error reading memory for {agent_id}: {e}")
191
- continue
192
-
193
- # Find common patterns across agents
194
- all_lines = []
195
- agent_lines = {}
196
-
197
- for agent_id, content in agent_memories.items():
198
- lines = [
199
- line.strip()
200
- for line in content.splitlines()
201
- if line.strip().startswith("- ")
202
- ]
203
- agent_lines[agent_id] = lines
204
- all_lines.extend([(line, agent_id) for line in lines])
205
-
206
- # Look for similar content (basic similarity check)
207
- line_counts = {}
208
- for line, agent_id in all_lines:
209
- # Normalize line for comparison
210
- normalized = line.lower().replace("- ", "").strip()
211
- if len(normalized) > 20: # Only check substantial lines
212
- if normalized not in line_counts:
213
- line_counts[normalized] = []
214
- line_counts[normalized].append(agent_id)
215
-
216
- # Find patterns appearing in multiple agents
217
- for line, agents in line_counts.items():
218
- if len(set(agents)) > 1: # Appears in multiple agents
219
- cross_refs["common_patterns"].append(
220
- {
221
- "pattern": line[:100] + "..." if len(line) > 100 else line,
222
- "agents": list(set(agents)),
223
- "count": len(agents),
224
- }
225
- )
226
-
227
- # Query-specific matches
228
- if query:
229
- query_lower = query.lower()
230
- for agent_id, content in agent_memories.items():
231
- matches = []
232
- for line in content.splitlines():
233
- if query_lower in line.lower():
234
- matches.append(line.strip())
235
-
236
- if matches:
237
- cross_refs["query_matches"].append(
238
- {
239
- "agent": agent_id,
240
- "matches": matches[:5],
241
- } # Limit to first 5 matches
242
- )
243
-
244
- # Calculate agent correlations (agents with similar knowledge domains)
245
- for agent_a in agent_memories:
246
- for agent_b in agent_memories:
247
- if agent_a < agent_b: # Avoid duplicates
248
- common_count = len(
249
- [
250
- line
251
- for line in line_counts.values()
252
- if agent_a in line and agent_b in line
253
- ]
254
- )
255
-
256
- if common_count > 0:
257
- correlation_key = f"{agent_a}+{agent_b}"
258
- cross_refs["agent_correlations"][
259
- correlation_key
260
- ] = common_count
261
-
262
- return cross_refs
263
-
264
- except Exception as e:
265
- self.logger.error(f"Error cross-referencing memories: {e}")
266
- return {"success": False, "error": str(e)}
267
-
268
- def get_all_memories_raw(self) -> Dict[str, Any]:
269
- """Get all agent memories in structured format.
270
-
271
- WHY: This provides programmatic access to all agent memories, allowing
272
- external tools, scripts, or APIs to retrieve and process the complete
273
- memory state of the system.
274
-
275
- Returns:
276
- Dict containing structured memory data for all agents
277
- """
278
- try:
279
- result = {
280
- "success": True,
281
- "timestamp": datetime.now().isoformat(),
282
- "total_agents": 0,
283
- "total_size_bytes": 0,
284
- "agents": {},
285
- }
286
-
287
- # Ensure directory exists
288
- if not self.memories_dir.exists():
289
- return {
290
- "success": True,
291
- "timestamp": datetime.now().isoformat(),
292
- "total_agents": 0,
293
- "total_size_bytes": 0,
294
- "agents": {},
295
- "message": "No memory directory found",
296
- }
297
-
298
- # Find all agent memory files
299
- memory_files = list(self.memories_dir.glob("*_agent.md"))
300
- result["total_agents"] = len(memory_files)
301
-
302
- # Process each agent memory file
303
- for file_path in sorted(memory_files):
304
- agent_id = file_path.stem.replace("_agent", "")
305
-
306
- try:
307
- # Get file stats
308
- stat = file_path.stat()
309
- file_size = stat.st_size
310
- result["total_size_bytes"] += file_size
311
-
312
- # Load and parse memory content
313
- memory_content = file_path.read_text(encoding="utf-8")
314
-
315
- if memory_content:
316
- sections = self.content_manager.parse_memory_content_to_dict(
317
- memory_content
318
- )
319
-
320
- # Count total items across all sections
321
- total_items = sum(len(items) for items in sections.values())
322
-
323
- result["agents"][agent_id] = {
324
- "agent_id": agent_id,
325
- "file_path": str(file_path),
326
- "file_size_bytes": file_size,
327
- "file_size_kb": round(file_size / 1024, 2),
328
- "last_modified": datetime.fromtimestamp(
329
- stat.st_mtime
330
- ).isoformat(),
331
- "sections_count": len(sections),
332
- "total_items": total_items,
333
- "auto_learning": self._get_agent_auto_learning(agent_id),
334
- "size_limits": self._get_agent_limits(agent_id),
335
- "sections": sections,
336
- "raw_content": memory_content,
337
- }
338
- else:
339
- result["agents"][agent_id] = {
340
- "agent_id": agent_id,
341
- "file_path": str(file_path),
342
- "file_size_bytes": file_size,
343
- "file_size_kb": round(file_size / 1024, 2),
344
- "last_modified": datetime.fromtimestamp(
345
- stat.st_mtime
346
- ).isoformat(),
347
- "error": "Could not load memory content",
348
- "sections": {},
349
- "raw_content": "",
350
- }
351
-
352
- except Exception as e:
353
- self.logger.error(
354
- f"Error processing memory for agent {agent_id}: {e}"
355
- )
356
- result["agents"][agent_id] = {
357
- "agent_id": agent_id,
358
- "file_path": str(file_path),
359
- "error": str(e),
360
- "sections": {},
361
- "raw_content": "",
362
- }
363
-
364
- result["total_size_kb"] = round(result["total_size_bytes"] / 1024, 2)
365
- return result
366
-
367
- except Exception as e:
368
- self.logger.error(f"Error getting all memories raw: {e}")
369
- return {
370
- "success": False,
371
- "error": str(e),
372
- "timestamp": datetime.now().isoformat(),
373
- }
374
-
375
- def get_memory_metrics(self, agent_id: Optional[str] = None) -> Dict[str, Any]:
376
- """Get memory usage metrics.
377
-
378
- Args:
379
- agent_id: Optional specific agent ID, or None for all
380
-
381
- Returns:
382
- Dictionary with memory metrics
383
- """
384
- import re
385
-
386
- metrics = {
387
- "total_memories": 0,
388
- "total_size_kb": 0,
389
- "agent_metrics": {},
390
- "limits": self.memory_limits.copy(),
391
- }
392
-
393
- try:
394
- if agent_id:
395
- # Get metrics for specific agent
396
- memory_path = self.memories_dir / f"{agent_id}_agent.md"
397
- if memory_path.exists():
398
- content = memory_path.read_text(encoding="utf-8")
399
- size_kb = len(content.encode("utf-8")) / 1024
400
- sections = re.findall(r"^##\s+(.+)$", content, re.MULTILINE)
401
-
402
- metrics["agent_metrics"][agent_id] = {
403
- "size_kb": round(size_kb, 2),
404
- "sections": len(sections),
405
- "exists": True,
406
- }
407
- metrics["total_memories"] = 1
408
- metrics["total_size_kb"] = round(size_kb, 2)
409
- else:
410
- # Get metrics for all agents
411
- for memory_file in self.memories_dir.glob("*_agent.md"):
412
- agent_name = memory_file.stem.replace("_agent", "")
413
- content = memory_file.read_text(encoding="utf-8")
414
- size_kb = len(content.encode("utf-8")) / 1024
415
- sections = re.findall(r"^##\s+(.+)$", content, re.MULTILINE)
416
-
417
- metrics["agent_metrics"][agent_name] = {
418
- "size_kb": round(size_kb, 2),
419
- "sections": len(sections),
420
- "exists": True,
421
- }
422
- metrics["total_memories"] += 1
423
- metrics["total_size_kb"] += size_kb
424
-
425
- metrics["total_size_kb"] = round(metrics["total_size_kb"], 2)
426
-
427
- except Exception as e:
428
- self.logger.error(f"Failed to get memory metrics: {e}")
429
-
430
- return metrics