claude-mpm 3.3.2__py3-none-any.whl → 3.4.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (40) hide show
  1. claude_mpm/cli/commands/memory.py +186 -13
  2. claude_mpm/cli/parser.py +13 -1
  3. claude_mpm/constants.py +1 -0
  4. claude_mpm/core/claude_runner.py +61 -0
  5. claude_mpm/core/config.py +1 -1
  6. claude_mpm/core/simple_runner.py +61 -0
  7. claude_mpm/hooks/builtin/mpm_command_hook.py +5 -5
  8. claude_mpm/hooks/claude_hooks/hook_handler.py +211 -4
  9. claude_mpm/hooks/claude_hooks/hook_wrapper.sh +9 -2
  10. claude_mpm/hooks/memory_integration_hook.py +51 -5
  11. claude_mpm/services/__init__.py +23 -5
  12. claude_mpm/services/agent_memory_manager.py +536 -48
  13. claude_mpm/services/memory_builder.py +338 -6
  14. claude_mpm/services/project_analyzer.py +771 -0
  15. claude_mpm/services/socketio_server.py +473 -33
  16. claude_mpm/services/version_control/git_operations.py +26 -0
  17. {claude_mpm-3.3.2.dist-info → claude_mpm-3.4.0.dist-info}/METADATA +34 -10
  18. {claude_mpm-3.3.2.dist-info → claude_mpm-3.4.0.dist-info}/RECORD +22 -39
  19. claude_mpm/agents/agent-template.yaml +0 -83
  20. claude_mpm/agents/test_fix_deployment/.claude-pm/config/project.json +0 -6
  21. claude_mpm/cli/README.md +0 -109
  22. claude_mpm/cli_module/refactoring_guide.md +0 -253
  23. claude_mpm/core/agent_registry.py.bak +0 -312
  24. claude_mpm/core/base_service.py.bak +0 -406
  25. claude_mpm/hooks/README.md +0 -97
  26. claude_mpm/orchestration/SUBPROCESS_DESIGN.md +0 -66
  27. claude_mpm/schemas/README_SECURITY.md +0 -92
  28. claude_mpm/schemas/agent_schema.json +0 -395
  29. claude_mpm/schemas/agent_schema_documentation.md +0 -181
  30. claude_mpm/schemas/agent_schema_security_notes.md +0 -165
  31. claude_mpm/schemas/examples/standard_workflow.json +0 -505
  32. claude_mpm/schemas/ticket_workflow_documentation.md +0 -482
  33. claude_mpm/schemas/ticket_workflow_schema.json +0 -590
  34. claude_mpm/services/framework_claude_md_generator/README.md +0 -92
  35. claude_mpm/services/parent_directory_manager/README.md +0 -83
  36. claude_mpm/services/version_control/VERSION +0 -1
  37. {claude_mpm-3.3.2.dist-info → claude_mpm-3.4.0.dist-info}/WHEEL +0 -0
  38. {claude_mpm-3.3.2.dist-info → claude_mpm-3.4.0.dist-info}/entry_points.txt +0 -0
  39. {claude_mpm-3.3.2.dist-info → claude_mpm-3.4.0.dist-info}/licenses/LICENSE +0 -0
  40. {claude_mpm-3.3.2.dist-info → claude_mpm-3.4.0.dist-info}/top_level.txt +0 -0
@@ -31,6 +31,7 @@ from claude_mpm.core import LoggerMixin
31
31
  from claude_mpm.core.config import Config
32
32
  from claude_mpm.utils.paths import PathResolver
33
33
  from claude_mpm.services.memory_router import MemoryRouter
34
+ from claude_mpm.services.project_analyzer import ProjectAnalyzer
34
35
 
35
36
 
36
37
  class MemoryBuilder(LoggerMixin):
@@ -98,17 +99,109 @@ class MemoryBuilder(LoggerMixin):
98
99
  ]
99
100
  }
100
101
 
101
- def __init__(self, config: Optional[Config] = None):
102
+ def __init__(self, config: Optional[Config] = None, working_directory: Optional[Path] = None):
102
103
  """Initialize the memory builder.
103
104
 
104
105
  Args:
105
106
  config: Optional Config object
107
+ working_directory: Optional working directory for project-specific analysis
106
108
  """
107
109
  super().__init__()
108
110
  self.config = config or Config()
109
111
  self.project_root = PathResolver.get_project_root()
112
+ self.working_directory = working_directory or self.project_root
110
113
  self.memories_dir = self.project_root / ".claude-mpm" / "memories"
111
114
  self.router = MemoryRouter(config)
115
+ self.project_analyzer = ProjectAnalyzer(config, self.working_directory)
116
+
117
+ def _get_dynamic_doc_files(self) -> Dict[str, Dict[str, Any]]:
118
+ """Get documentation files to process based on project analysis.
119
+
120
+ WHY: Instead of hardcoded file list, dynamically discover important files
121
+ based on actual project structure and characteristics.
122
+
123
+ Returns:
124
+ Dict mapping file paths to processing configuration
125
+ """
126
+ dynamic_files = {}
127
+
128
+ # Start with static important files
129
+ static_files = self.DOC_FILES.copy()
130
+
131
+ # Get project-specific important files
132
+ try:
133
+ important_files = self.project_analyzer.get_important_files_for_context()
134
+ project_characteristics = self.project_analyzer.analyze_project()
135
+
136
+ # Add configuration files
137
+ for config_file in project_characteristics.important_configs:
138
+ if config_file not in static_files:
139
+ file_ext = Path(config_file).suffix.lower()
140
+
141
+ if file_ext in ['.json', '.toml', '.yaml', '.yml']:
142
+ dynamic_files[config_file] = {
143
+ 'priority': 'medium',
144
+ 'sections': ['configuration', 'setup', 'dependencies'],
145
+ 'agents': ['engineer', 'pm'],
146
+ 'file_type': 'config'
147
+ }
148
+
149
+ # Add project-specific documentation
150
+ for doc_file in important_files:
151
+ if doc_file not in static_files and doc_file not in dynamic_files:
152
+ file_path = Path(doc_file)
153
+
154
+ # Determine processing config based on file name/path
155
+ if 'api' in doc_file.lower() or 'endpoint' in doc_file.lower():
156
+ dynamic_files[doc_file] = {
157
+ 'priority': 'high',
158
+ 'sections': ['api', 'endpoints', 'integration'],
159
+ 'agents': ['engineer', 'integration'],
160
+ 'file_type': 'api_doc'
161
+ }
162
+ elif 'architecture' in doc_file.lower() or 'design' in doc_file.lower():
163
+ dynamic_files[doc_file] = {
164
+ 'priority': 'high',
165
+ 'sections': ['architecture', 'design', 'patterns'],
166
+ 'agents': ['engineer', 'architect'],
167
+ 'file_type': 'architecture'
168
+ }
169
+ elif 'test' in doc_file.lower():
170
+ dynamic_files[doc_file] = {
171
+ 'priority': 'medium',
172
+ 'sections': ['testing', 'quality'],
173
+ 'agents': ['qa', 'engineer'],
174
+ 'file_type': 'test_doc'
175
+ }
176
+ elif file_path.suffix.lower() == '.md':
177
+ # Generic markdown file
178
+ dynamic_files[doc_file] = {
179
+ 'priority': 'low',
180
+ 'sections': ['documentation', 'guidelines'],
181
+ 'agents': ['pm', 'engineer'],
182
+ 'file_type': 'markdown'
183
+ }
184
+
185
+ # Add key source files for pattern analysis (limited selection)
186
+ if project_characteristics.entry_points:
187
+ for entry_point in project_characteristics.entry_points[:2]: # Only first 2
188
+ if entry_point not in dynamic_files:
189
+ dynamic_files[entry_point] = {
190
+ 'priority': 'low',
191
+ 'sections': ['patterns', 'implementation'],
192
+ 'agents': ['engineer'],
193
+ 'file_type': 'source',
194
+ 'extract_patterns_only': True # Only extract patterns, not full content
195
+ }
196
+
197
+ except Exception as e:
198
+ self.logger.warning(f"Error getting dynamic doc files: {e}")
199
+
200
+ # Merge static and dynamic files
201
+ all_files = {**static_files, **dynamic_files}
202
+
203
+ self.logger.debug(f"Processing {len(all_files)} documentation files ({len(static_files)} static, {len(dynamic_files)} dynamic)")
204
+ return all_files
112
205
 
113
206
  def build_from_documentation(self, force_rebuild: bool = False) -> Dict[str, Any]:
114
207
  """Build agent memories from project documentation.
@@ -134,8 +227,11 @@ class MemoryBuilder(LoggerMixin):
134
227
  "errors": []
135
228
  }
136
229
 
230
+ # Get dynamic list of files to process
231
+ doc_files = self._get_dynamic_doc_files()
232
+
137
233
  # Process each documentation file
138
- for doc_path, doc_config in self.DOC_FILES.items():
234
+ for doc_path, doc_config in doc_files.items():
139
235
  file_path = self.project_root / doc_path
140
236
 
141
237
  if not file_path.exists():
@@ -292,8 +388,236 @@ class MemoryBuilder(LoggerMixin):
292
388
  "error": str(e)
293
389
  }
294
390
 
391
+ def _extract_from_config_file(self, content: str, file_path: Path, doc_config: Dict[str, Any]) -> List[Dict[str, Any]]:
392
+ """Extract memory-worthy information from configuration files.
393
+
394
+ WHY: Configuration files contain important setup patterns, dependencies,
395
+ and architectural decisions that agents should understand.
396
+
397
+ Args:
398
+ content: File content
399
+ file_path: Path to the file
400
+ doc_config: Processing configuration
401
+
402
+ Returns:
403
+ List of extracted memory items
404
+ """
405
+ extracted_items = []
406
+ source = str(file_path.relative_to(self.project_root))
407
+
408
+ try:
409
+ file_ext = file_path.suffix.lower()
410
+
411
+ if file_ext == '.json':
412
+ # Parse JSON configuration
413
+ import json
414
+ config_data = json.loads(content)
415
+ items = self._extract_from_json_config(config_data, source)
416
+ extracted_items.extend(items)
417
+
418
+ elif file_ext in ['.toml']:
419
+ # Parse TOML configuration
420
+ try:
421
+ try:
422
+ import tomllib
423
+ except ImportError:
424
+ import tomli as tomllib
425
+ with open(file_path, 'rb') as f:
426
+ config_data = tomllib.load(f)
427
+ items = self._extract_from_toml_config(config_data, source)
428
+ extracted_items.extend(items)
429
+ except ImportError:
430
+ self.logger.warning(f"TOML parsing not available for {source}")
431
+
432
+ elif file_ext in ['.yaml', '.yml']:
433
+ # For YAML, fall back to text-based extraction for now
434
+ items = self.extract_from_text(content, source)
435
+ extracted_items.extend(items)
436
+
437
+ # Also extract text patterns for comments and documentation
438
+ text_items = self.extract_from_text(content, source)
439
+ extracted_items.extend(text_items)
440
+
441
+ except Exception as e:
442
+ self.logger.warning(f"Error parsing config file {source}: {e}")
443
+ # Fall back to text extraction
444
+ extracted_items = self.extract_from_text(content, source)
445
+
446
+ return extracted_items
447
+
448
+ def _extract_from_json_config(self, config_data: dict, source: str) -> List[Dict[str, Any]]:
449
+ """Extract patterns from JSON configuration."""
450
+ items = []
451
+
452
+ # Extract dependencies information
453
+ if 'dependencies' in config_data:
454
+ deps = config_data['dependencies']
455
+ if isinstance(deps, dict) and deps:
456
+ dep_names = list(deps.keys())[:5] # Limit to prevent overwhelming
457
+ deps_str = ", ".join(dep_names)
458
+ items.append({
459
+ "content": f"Key dependencies: {deps_str}",
460
+ "type": "dependency_info",
461
+ "source": source,
462
+ "target_agent": "engineer",
463
+ "section": "Current Technical Context",
464
+ "confidence": 0.8
465
+ })
466
+
467
+ # Extract scripts (for package.json)
468
+ if 'scripts' in config_data:
469
+ scripts = config_data['scripts']
470
+ if isinstance(scripts, dict):
471
+ for script_name, script_cmd in list(scripts.items())[:3]: # Limit to first 3
472
+ items.append({
473
+ "content": f"Build script '{script_name}': {script_cmd[:50]}{'...' if len(script_cmd) > 50 else ''}",
474
+ "type": "build_pattern",
475
+ "source": source,
476
+ "target_agent": "engineer",
477
+ "section": "Implementation Guidelines",
478
+ "confidence": 0.7
479
+ })
480
+
481
+ return items
482
+
483
+ def _extract_from_toml_config(self, config_data: dict, source: str) -> List[Dict[str, Any]]:
484
+ """Extract patterns from TOML configuration."""
485
+ items = []
486
+
487
+ # Extract project metadata (for pyproject.toml)
488
+ if 'project' in config_data:
489
+ project_info = config_data['project']
490
+ if 'dependencies' in project_info:
491
+ deps = project_info['dependencies']
492
+ if deps:
493
+ items.append({
494
+ "content": f"Python dependencies: {', '.join(deps[:5])}",
495
+ "type": "dependency_info",
496
+ "source": source,
497
+ "target_agent": "engineer",
498
+ "section": "Current Technical Context",
499
+ "confidence": 0.8
500
+ })
501
+
502
+ # Extract Rust dependencies (for Cargo.toml)
503
+ if 'dependencies' in config_data:
504
+ deps = config_data['dependencies']
505
+ if isinstance(deps, dict) and deps:
506
+ dep_names = list(deps.keys())[:5]
507
+ items.append({
508
+ "content": f"Rust dependencies: {', '.join(dep_names)}",
509
+ "type": "dependency_info",
510
+ "source": source,
511
+ "target_agent": "engineer",
512
+ "section": "Current Technical Context",
513
+ "confidence": 0.8
514
+ })
515
+
516
+ return items
517
+
518
+ def _extract_from_source_file(self, content: str, file_path: Path, doc_config: Dict[str, Any]) -> List[Dict[str, Any]]:
519
+ """Extract patterns from source code files.
520
+
521
+ WHY: Source files contain implementation patterns and architectural
522
+ decisions that agents should be aware of, but we only extract high-level
523
+ patterns rather than detailed code analysis.
524
+
525
+ Args:
526
+ content: File content
527
+ file_path: Path to the file
528
+ doc_config: Processing configuration
529
+
530
+ Returns:
531
+ List of extracted memory items
532
+ """
533
+ extracted_items = []
534
+ source = str(file_path.relative_to(self.project_root))
535
+
536
+ # Only extract patterns if specified
537
+ if not doc_config.get('extract_patterns_only', False):
538
+ return []
539
+
540
+ file_ext = file_path.suffix.lower()
541
+
542
+ # Language-specific pattern extraction
543
+ if file_ext == '.py':
544
+ items = self._extract_python_patterns(content, source)
545
+ extracted_items.extend(items)
546
+ elif file_ext in ['.js', '.ts']:
547
+ items = self._extract_javascript_patterns(content, source)
548
+ extracted_items.extend(items)
549
+
550
+ return extracted_items[:3] # Limit to prevent overwhelming
551
+
552
+ def _extract_python_patterns(self, content: str, source: str) -> List[Dict[str, Any]]:
553
+ """Extract high-level patterns from Python source."""
554
+ items = []
555
+
556
+ # Check for common patterns
557
+ if 'if __name__ == "__main__"' in content:
558
+ items.append({
559
+ "content": "Uses if __name__ == '__main__' pattern for script execution",
560
+ "type": "pattern",
561
+ "source": source,
562
+ "target_agent": "engineer",
563
+ "section": "Coding Patterns Learned",
564
+ "confidence": 0.8
565
+ })
566
+
567
+ if 'from pathlib import Path' in content:
568
+ items.append({
569
+ "content": "Uses pathlib.Path for file operations (recommended pattern)",
570
+ "type": "pattern",
571
+ "source": source,
572
+ "target_agent": "engineer",
573
+ "section": "Coding Patterns Learned",
574
+ "confidence": 0.7
575
+ })
576
+
577
+ # Check for class definitions
578
+ class_matches = re.findall(r'class\s+(\w+)', content)
579
+ if class_matches:
580
+ items.append({
581
+ "content": f"Defines classes: {', '.join(class_matches[:3])}",
582
+ "type": "architecture",
583
+ "source": source,
584
+ "target_agent": "engineer",
585
+ "section": "Project Architecture",
586
+ "confidence": 0.6
587
+ })
588
+
589
+ return items
590
+
591
+ def _extract_javascript_patterns(self, content: str, source: str) -> List[Dict[str, Any]]:
592
+ """Extract high-level patterns from JavaScript/TypeScript source."""
593
+ items = []
594
+
595
+ # Check for async patterns
596
+ if 'async function' in content or 'async ' in content:
597
+ items.append({
598
+ "content": "Uses async/await patterns for asynchronous operations",
599
+ "type": "pattern",
600
+ "source": source,
601
+ "target_agent": "engineer",
602
+ "section": "Coding Patterns Learned",
603
+ "confidence": 0.8
604
+ })
605
+
606
+ # Check for module patterns
607
+ if 'export ' in content:
608
+ items.append({
609
+ "content": "Uses ES6 module export patterns",
610
+ "type": "pattern",
611
+ "source": source,
612
+ "target_agent": "engineer",
613
+ "section": "Coding Patterns Learned",
614
+ "confidence": 0.7
615
+ })
616
+
617
+ return items
618
+
295
619
  def _process_documentation_file(self, file_path: Path, doc_config: Dict[str, Any]) -> Dict[str, Any]:
296
- """Process a single documentation file.
620
+ """Process a single documentation file with enhanced file type support.
297
621
 
298
622
  Args:
299
623
  file_path: Path to documentation file
@@ -304,10 +628,18 @@ class MemoryBuilder(LoggerMixin):
304
628
  """
305
629
  try:
306
630
  # Read file content
307
- content = file_path.read_text(encoding='utf-8')
631
+ content = file_path.read_text(encoding='utf-8', errors='ignore')
308
632
 
309
- # Extract memory items
310
- extracted_items = self.extract_from_text(content, str(file_path.relative_to(self.project_root)))
633
+ # Handle different file types
634
+ file_type = doc_config.get('file_type', 'markdown')
635
+
636
+ if file_type == 'config':
637
+ extracted_items = self._extract_from_config_file(content, file_path, doc_config)
638
+ elif file_type == 'source':
639
+ extracted_items = self._extract_from_source_file(content, file_path, doc_config)
640
+ else:
641
+ # Default markdown/text processing
642
+ extracted_items = self.extract_from_text(content, str(file_path.relative_to(self.project_root)))
311
643
 
312
644
  result = {
313
645
  "success": True,