claude-mpm 4.1.3__py3-none-any.whl → 4.1.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (77) hide show
  1. claude_mpm/VERSION +1 -1
  2. claude_mpm/agents/BASE_AGENT_TEMPLATE.md +16 -19
  3. claude_mpm/agents/MEMORY.md +21 -49
  4. claude_mpm/agents/templates/OPTIMIZATION_REPORT.md +156 -0
  5. claude_mpm/agents/templates/api_qa.json +36 -116
  6. claude_mpm/agents/templates/backup/data_engineer_agent_20250726_234551.json +42 -9
  7. claude_mpm/agents/templates/backup/documentation_agent_20250726_234551.json +29 -6
  8. claude_mpm/agents/templates/backup/engineer_agent_20250726_234551.json +34 -6
  9. claude_mpm/agents/templates/backup/ops_agent_20250726_234551.json +41 -9
  10. claude_mpm/agents/templates/backup/qa_agent_20250726_234551.json +30 -8
  11. claude_mpm/agents/templates/backup/research_agent_2025011_234551.json +2 -2
  12. claude_mpm/agents/templates/backup/research_agent_20250726_234551.json +29 -6
  13. claude_mpm/agents/templates/backup/research_memory_efficient.json +2 -2
  14. claude_mpm/agents/templates/backup/security_agent_20250726_234551.json +41 -9
  15. claude_mpm/agents/templates/backup/version_control_agent_20250726_234551.json +23 -7
  16. claude_mpm/agents/templates/code_analyzer.json +18 -36
  17. claude_mpm/agents/templates/data_engineer.json +43 -14
  18. claude_mpm/agents/templates/documentation.json +55 -74
  19. claude_mpm/agents/templates/engineer.json +56 -61
  20. claude_mpm/agents/templates/imagemagick.json +7 -2
  21. claude_mpm/agents/templates/memory_manager.json +1 -1
  22. claude_mpm/agents/templates/ops.json +36 -4
  23. claude_mpm/agents/templates/project_organizer.json +23 -71
  24. claude_mpm/agents/templates/qa.json +34 -2
  25. claude_mpm/agents/templates/refactoring_engineer.json +9 -5
  26. claude_mpm/agents/templates/research.json +36 -4
  27. claude_mpm/agents/templates/security.json +29 -2
  28. claude_mpm/agents/templates/ticketing.json +3 -3
  29. claude_mpm/agents/templates/vercel_ops_agent.json +2 -2
  30. claude_mpm/agents/templates/version_control.json +28 -2
  31. claude_mpm/agents/templates/web_qa.json +38 -151
  32. claude_mpm/agents/templates/web_ui.json +2 -2
  33. claude_mpm/cli/commands/agent_manager.py +221 -1
  34. claude_mpm/cli/commands/tickets.py +365 -784
  35. claude_mpm/cli/parsers/agent_manager_parser.py +34 -0
  36. claude_mpm/core/framework_loader.py +91 -0
  37. claude_mpm/core/log_manager.py +49 -1
  38. claude_mpm/core/output_style_manager.py +24 -0
  39. claude_mpm/core/unified_agent_registry.py +46 -15
  40. claude_mpm/services/agents/deployment/agent_discovery_service.py +12 -3
  41. claude_mpm/services/agents/deployment/agent_lifecycle_manager.py +172 -233
  42. claude_mpm/services/agents/deployment/agent_lifecycle_manager_refactored.py +575 -0
  43. claude_mpm/services/agents/deployment/agent_operation_service.py +573 -0
  44. claude_mpm/services/agents/deployment/agent_record_service.py +419 -0
  45. claude_mpm/services/agents/deployment/agent_state_service.py +381 -0
  46. claude_mpm/services/agents/deployment/multi_source_deployment_service.py +4 -2
  47. claude_mpm/services/infrastructure/__init__.py +31 -5
  48. claude_mpm/services/infrastructure/monitoring/__init__.py +43 -0
  49. claude_mpm/services/infrastructure/monitoring/aggregator.py +437 -0
  50. claude_mpm/services/infrastructure/monitoring/base.py +130 -0
  51. claude_mpm/services/infrastructure/monitoring/legacy.py +203 -0
  52. claude_mpm/services/infrastructure/monitoring/network.py +218 -0
  53. claude_mpm/services/infrastructure/monitoring/process.py +342 -0
  54. claude_mpm/services/infrastructure/monitoring/resources.py +243 -0
  55. claude_mpm/services/infrastructure/monitoring/service.py +367 -0
  56. claude_mpm/services/infrastructure/monitoring.py +67 -1030
  57. claude_mpm/services/memory/router.py +116 -10
  58. claude_mpm/services/project/analyzer.py +13 -4
  59. claude_mpm/services/project/analyzer_refactored.py +450 -0
  60. claude_mpm/services/project/analyzer_v2.py +566 -0
  61. claude_mpm/services/project/architecture_analyzer.py +461 -0
  62. claude_mpm/services/project/dependency_analyzer.py +462 -0
  63. claude_mpm/services/project/language_analyzer.py +265 -0
  64. claude_mpm/services/project/metrics_collector.py +410 -0
  65. claude_mpm/services/ticket_manager.py +5 -1
  66. claude_mpm/services/ticket_services/__init__.py +26 -0
  67. claude_mpm/services/ticket_services/crud_service.py +328 -0
  68. claude_mpm/services/ticket_services/formatter_service.py +290 -0
  69. claude_mpm/services/ticket_services/search_service.py +324 -0
  70. claude_mpm/services/ticket_services/validation_service.py +303 -0
  71. claude_mpm/services/ticket_services/workflow_service.py +244 -0
  72. {claude_mpm-4.1.3.dist-info → claude_mpm-4.1.5.dist-info}/METADATA +1 -1
  73. {claude_mpm-4.1.3.dist-info → claude_mpm-4.1.5.dist-info}/RECORD +77 -52
  74. {claude_mpm-4.1.3.dist-info → claude_mpm-4.1.5.dist-info}/WHEEL +0 -0
  75. {claude_mpm-4.1.3.dist-info → claude_mpm-4.1.5.dist-info}/entry_points.txt +0 -0
  76. {claude_mpm-4.1.3.dist-info → claude_mpm-4.1.5.dist-info}/licenses/LICENSE +0 -0
  77. {claude_mpm-4.1.3.dist-info → claude_mpm-4.1.5.dist-info}/top_level.txt +0 -0
@@ -25,6 +25,7 @@ from datetime import datetime
25
25
  from typing import Any, Dict, List, Optional, Tuple
26
26
 
27
27
  from claude_mpm.core.config import Config
28
+ from claude_mpm.core.framework_loader import FrameworkLoader
28
29
  from claude_mpm.core.mixins import LoggerMixin
29
30
 
30
31
 
@@ -467,6 +468,84 @@ class MemoryRouter(LoggerMixin):
467
468
  """
468
469
  super().__init__()
469
470
  self.config = config or Config()
471
+ self._dynamic_patterns_loaded = False
472
+ self._dynamic_patterns = {}
473
+
474
+ def _load_dynamic_patterns(self) -> None:
475
+ """Load memory routing patterns dynamically from agent templates.
476
+
477
+ WHY: Allows agents to define their own memory routing patterns
478
+ in their template files, making the system more flexible and
479
+ maintainable.
480
+ """
481
+ if self._dynamic_patterns_loaded:
482
+ return
483
+
484
+ try:
485
+ # Initialize framework loader to access agent templates
486
+ framework_loader = FrameworkLoader()
487
+
488
+ # Try to load patterns from deployed agents
489
+ from pathlib import Path
490
+
491
+ # Check both project and user agent directories
492
+ agent_dirs = [
493
+ Path(".claude/agents"), # Project agents
494
+ Path.home() / ".claude-mpm/agents", # User agents
495
+ ]
496
+
497
+ for agent_dir in agent_dirs:
498
+ if not agent_dir.exists():
499
+ continue
500
+
501
+ # Look for deployed agent files
502
+ for agent_file in agent_dir.glob("*.md"):
503
+ agent_name = agent_file.stem
504
+
505
+ # Try to load memory routing from template
506
+ memory_routing = (
507
+ framework_loader._load_memory_routing_from_template(agent_name)
508
+ )
509
+
510
+ if memory_routing:
511
+ # Convert agent name to pattern key format
512
+ # e.g., "research-agent" -> "research"
513
+ pattern_key = (
514
+ agent_name.replace("-agent", "")
515
+ .replace("_agent", "")
516
+ .replace("-", "_")
517
+ )
518
+
519
+ # Build pattern structure from memory routing
520
+ pattern_data = {
521
+ "keywords": memory_routing.get("keywords", []),
522
+ "sections": memory_routing.get("categories", []),
523
+ }
524
+
525
+ # Merge with existing patterns or add new
526
+ if pattern_key in self.AGENT_PATTERNS:
527
+ # Merge keywords, keeping unique values
528
+ existing_keywords = set(
529
+ self.AGENT_PATTERNS[pattern_key]["keywords"]
530
+ )
531
+ new_keywords = set(memory_routing.get("keywords", []))
532
+ pattern_data["keywords"] = list(
533
+ existing_keywords | new_keywords
534
+ )
535
+
536
+ self._dynamic_patterns[pattern_key] = pattern_data
537
+ self.logger.debug(
538
+ f"Loaded dynamic memory routing for {pattern_key}"
539
+ )
540
+
541
+ self._dynamic_patterns_loaded = True
542
+ self.logger.info(
543
+ f"Loaded memory routing patterns for {len(self._dynamic_patterns)} agents"
544
+ )
545
+
546
+ except Exception as e:
547
+ self.logger.warning(f"Could not load dynamic memory routing patterns: {e}")
548
+ self._dynamic_patterns_loaded = True # Don't retry
470
549
 
471
550
  def get_supported_agents(self) -> List[str]:
472
551
  """Get list of supported agent types.
@@ -477,7 +556,12 @@ class MemoryRouter(LoggerMixin):
477
556
  Returns:
478
557
  List of supported agent type names
479
558
  """
480
- return list(self.AGENT_PATTERNS.keys())
559
+ self._load_dynamic_patterns()
560
+
561
+ # Combine static and dynamic patterns
562
+ all_agents = set(self.AGENT_PATTERNS.keys())
563
+ all_agents.update(self._dynamic_patterns.keys())
564
+ return list(all_agents)
481
565
 
482
566
  def is_agent_supported(self, agent_type: str) -> bool:
483
567
  """Check if an agent type is supported by the memory router.
@@ -491,7 +575,8 @@ class MemoryRouter(LoggerMixin):
491
575
  Returns:
492
576
  True if agent type is supported, False otherwise
493
577
  """
494
- return agent_type in self.AGENT_PATTERNS
578
+ self._load_dynamic_patterns()
579
+ return agent_type in self.AGENT_PATTERNS or agent_type in self._dynamic_patterns
495
580
 
496
581
  def analyze_and_route(
497
582
  self, content: str, context: Optional[Dict] = None
@@ -605,21 +690,30 @@ class MemoryRouter(LoggerMixin):
605
690
  Returns:
606
691
  Dict containing routing patterns and statistics
607
692
  """
693
+ self._load_dynamic_patterns()
694
+
695
+ # Combine static and dynamic patterns
696
+ all_patterns = dict(self.AGENT_PATTERNS)
697
+ all_patterns.update(self._dynamic_patterns)
698
+
608
699
  return {
609
- "agents": list(self.AGENT_PATTERNS.keys()),
700
+ "agents": list(all_patterns.keys()),
610
701
  "default_agent": self.DEFAULT_AGENT,
702
+ "static_agents": list(self.AGENT_PATTERNS.keys()),
703
+ "dynamic_agents": list(self._dynamic_patterns.keys()),
611
704
  "patterns": {
612
705
  agent: {
613
706
  "keyword_count": len(patterns["keywords"]),
614
707
  "section_count": len(patterns["sections"]),
615
708
  "keywords": patterns["keywords"][:10], # Show first 10
616
709
  "sections": patterns["sections"],
710
+ "source": (
711
+ "dynamic" if agent in self._dynamic_patterns else "static"
712
+ ),
617
713
  }
618
- for agent, patterns in self.AGENT_PATTERNS.items()
714
+ for agent, patterns in all_patterns.items()
619
715
  },
620
- "total_keywords": sum(
621
- len(p["keywords"]) for p in self.AGENT_PATTERNS.values()
622
- ),
716
+ "total_keywords": sum(len(p["keywords"]) for p in all_patterns.values()),
623
717
  }
624
718
 
625
719
  def _normalize_content(self, content: str) -> str:
@@ -663,9 +757,14 @@ class MemoryRouter(LoggerMixin):
663
757
  Returns:
664
758
  Dict mapping agent names to relevance scores
665
759
  """
760
+ self._load_dynamic_patterns()
666
761
  scores = {}
667
762
 
668
- for agent, patterns in self.AGENT_PATTERNS.items():
763
+ # Combine static and dynamic patterns
764
+ all_patterns = dict(self.AGENT_PATTERNS)
765
+ all_patterns.update(self._dynamic_patterns)
766
+
767
+ for agent, patterns in all_patterns.items():
669
768
  score = 0.0
670
769
  matched_keywords = []
671
770
 
@@ -773,10 +872,17 @@ class MemoryRouter(LoggerMixin):
773
872
  Returns:
774
873
  Section name for memory storage
775
874
  """
776
- if agent not in self.AGENT_PATTERNS:
875
+ self._load_dynamic_patterns()
876
+
877
+ # Check both static and dynamic patterns
878
+ if agent in self.AGENT_PATTERNS:
879
+ sections = self.AGENT_PATTERNS[agent]["sections"]
880
+ elif agent in self._dynamic_patterns:
881
+ sections = self._dynamic_patterns[agent]["sections"]
882
+ else:
777
883
  return "Recent Learnings"
778
884
 
779
- sections = self.AGENT_PATTERNS[agent]["sections"]
885
+ sections = sections if sections else []
780
886
 
781
887
  # Simple heuristics for section selection
782
888
  if "mistake" in content or "error" in content or "avoid" in content:
@@ -1,7 +1,7 @@
1
1
  #!/usr/bin/env python3
2
2
  """
3
- Project Analyzer Service
4
- =======================
3
+ Project Analyzer Service (Refactored)
4
+ =====================================
5
5
 
6
6
  Analyzes project characteristics to enable project-specific memory creation.
7
7
 
@@ -9,8 +9,15 @@ WHY: Instead of creating generic memories, agents need to understand the specifi
9
9
  project they're working on - its tech stack, architecture patterns, coding conventions,
10
10
  and key components. This service extracts these characteristics automatically.
11
11
 
12
- DESIGN DECISION: Separates project analysis from memory creation to allow reuse
13
- across different memory-related services and enable caching of analysis results.
12
+ REFACTORING NOTE: This module has been refactored to follow SOLID principles.
13
+ The original god class has been split into focused services:
14
+ - LanguageAnalyzerService: Language and framework detection
15
+ - DependencyAnalyzerService: Dependency and package management
16
+ - ArchitectureAnalyzerService: Architecture and structure analysis
17
+ - MetricsCollectorService: Code metrics collection
18
+
19
+ The main ProjectAnalyzer class now orchestrates these services while maintaining
20
+ full backward compatibility with the original interface.
14
21
 
15
22
  This service analyzes:
16
23
  - Technology stack from config files (package.json, requirements.txt, etc.)
@@ -34,6 +41,8 @@ from claude_mpm.core.config import Config
34
41
  from claude_mpm.core.interfaces import ProjectAnalyzerInterface
35
42
  from claude_mpm.core.unified_paths import get_path_manager
36
43
 
44
+ # Import refactored services
45
+
37
46
 
38
47
  @dataclass
39
48
  class ProjectCharacteristics:
@@ -0,0 +1,450 @@
1
+ #!/usr/bin/env python3
2
+ """
3
+ Refactored Project Analyzer Service
4
+ ===================================
5
+
6
+ WHY: Refactored from the original god class to follow SOLID principles.
7
+ This version orchestrates specialized services instead of handling all
8
+ analysis tasks directly.
9
+
10
+ DECISION: Use dependency injection and service composition to maintain
11
+ single responsibility while preserving the original interface.
12
+ """
13
+
14
+ import logging
15
+ import time
16
+ from pathlib import Path
17
+ from typing import Any, Dict, List, Optional
18
+
19
+ from claude_mpm.core.config import Config
20
+ from claude_mpm.core.interfaces import ProjectAnalyzerInterface
21
+ from claude_mpm.core.unified_paths import get_path_manager
22
+
23
+ from .analyzer import ProjectCharacteristics # Reuse data class
24
+ from .architecture_analyzer import ArchitectureAnalyzerService
25
+ from .dependency_analyzer import DependencyAnalyzerService
26
+ from .language_analyzer import LanguageAnalyzerService
27
+ from .metrics_collector import MetricsCollectorService
28
+
29
+
30
+ class RefactoredProjectAnalyzer(ProjectAnalyzerInterface):
31
+ """Refactored project analyzer using service composition.
32
+
33
+ WHY: This refactored version maintains the same interface but delegates
34
+ work to specialized services, following single responsibility principle.
35
+ Each service handles one aspect of project analysis.
36
+
37
+ DESIGN DECISION: Use dependency injection for services to enable easy
38
+ testing and future extensibility. Cache results to avoid redundant analysis.
39
+ """
40
+
41
+ def __init__(
42
+ self,
43
+ config: Optional[Config] = None,
44
+ working_directory: Optional[Path] = None,
45
+ # Dependency injection for services
46
+ language_analyzer: Optional[LanguageAnalyzerService] = None,
47
+ dependency_analyzer: Optional[DependencyAnalyzerService] = None,
48
+ architecture_analyzer: Optional[ArchitectureAnalyzerService] = None,
49
+ metrics_collector: Optional[MetricsCollectorService] = None,
50
+ ):
51
+ """Initialize the refactored project analyzer.
52
+
53
+ Args:
54
+ config: Optional Config object
55
+ working_directory: Optional working directory path
56
+ language_analyzer: Optional language analyzer service
57
+ dependency_analyzer: Optional dependency analyzer service
58
+ architecture_analyzer: Optional architecture analyzer service
59
+ metrics_collector: Optional metrics collector service
60
+ """
61
+ self.config = config or Config()
62
+ self.working_directory = (
63
+ working_directory or get_path_manager().get_project_root()
64
+ )
65
+ self.logger = logging.getLogger(f"{__name__}.{self.__class__.__name__}")
66
+
67
+ # Initialize services (use injected or create new)
68
+ self.language_analyzer = language_analyzer or LanguageAnalyzerService(
69
+ self.working_directory
70
+ )
71
+ self.dependency_analyzer = dependency_analyzer or DependencyAnalyzerService(
72
+ self.working_directory
73
+ )
74
+ self.architecture_analyzer = (
75
+ architecture_analyzer or ArchitectureAnalyzerService(self.working_directory)
76
+ )
77
+ self.metrics_collector = metrics_collector or MetricsCollectorService(
78
+ self.working_directory
79
+ )
80
+
81
+ # Cache for analysis results
82
+ self._analysis_cache: Optional[ProjectCharacteristics] = None
83
+ self._cache_timestamp: Optional[float] = None
84
+ self._cache_ttl = 300 # 5 minutes
85
+
86
+ def analyze_project(self, force_refresh: bool = False) -> ProjectCharacteristics:
87
+ """Analyze the current project and return characteristics.
88
+
89
+ WHY: Orchestrates multiple specialized services to provide comprehensive
90
+ project analysis while maintaining the original interface.
91
+
92
+ Args:
93
+ force_refresh: If True, ignores cache and performs fresh analysis
94
+
95
+ Returns:
96
+ ProjectCharacteristics: Structured project analysis results
97
+ """
98
+ try:
99
+ # Check cache first
100
+ if not force_refresh and self._is_cache_valid():
101
+ self.logger.debug("Using cached project analysis")
102
+ return self._analysis_cache
103
+
104
+ self.logger.info(f"Analyzing project at: {self.working_directory}")
105
+
106
+ # Initialize characteristics
107
+ characteristics = self._create_empty_characteristics()
108
+
109
+ # Perform analysis using specialized services
110
+ self._analyze_with_language_service(characteristics)
111
+ self._analyze_with_dependency_service(characteristics)
112
+ self._analyze_with_architecture_service(characteristics)
113
+ self._enrich_with_metrics(characteristics)
114
+
115
+ # Cache the results
116
+ self._update_cache(characteristics)
117
+
118
+ self.logger.info(
119
+ f"Project analysis complete: {characteristics.primary_language} project "
120
+ f"with {len(characteristics.frameworks)} frameworks"
121
+ )
122
+ return characteristics
123
+
124
+ except Exception as e:
125
+ self.logger.error(f"Error analyzing project: {e}")
126
+ return self._create_empty_characteristics()
127
+
128
+ def _analyze_with_language_service(
129
+ self, characteristics: ProjectCharacteristics
130
+ ) -> None:
131
+ """Use language analyzer service to populate language-related fields.
132
+
133
+ WHY: Delegates language analysis to specialized service while
134
+ maintaining data structure compatibility.
135
+ """
136
+ try:
137
+ # Detect languages
138
+ characteristics.languages = self.language_analyzer.detect_languages()
139
+
140
+ # Detect primary language
141
+ characteristics.primary_language = (
142
+ self.language_analyzer.detect_primary_language()
143
+ )
144
+
145
+ # Detect frameworks
146
+ characteristics.frameworks = self.language_analyzer.detect_frameworks()
147
+
148
+ # Analyze code style
149
+ characteristics.code_conventions = (
150
+ self.language_analyzer.analyze_code_style()
151
+ )
152
+
153
+ except Exception as e:
154
+ self.logger.warning(f"Error in language analysis: {e}")
155
+
156
+ def _analyze_with_dependency_service(
157
+ self, characteristics: ProjectCharacteristics
158
+ ) -> None:
159
+ """Use dependency analyzer service to populate dependency-related fields.
160
+
161
+ WHY: Delegates dependency analysis to specialized service for better
162
+ separation of concerns.
163
+ """
164
+ try:
165
+ # Detect package manager
166
+ characteristics.package_manager = (
167
+ self.dependency_analyzer.detect_package_manager()
168
+ )
169
+
170
+ # Analyze all dependencies
171
+ deps = self.dependency_analyzer.analyze_dependencies()
172
+
173
+ # Populate characteristics
174
+ characteristics.key_dependencies = deps["production"][:20] # Top 20
175
+ characteristics.databases = self.dependency_analyzer.detect_databases(
176
+ deps["production"] + deps["development"]
177
+ )
178
+ characteristics.web_frameworks = (
179
+ self.dependency_analyzer.detect_web_frameworks(deps["production"])
180
+ )
181
+
182
+ # Detect testing framework
183
+ testing_frameworks = self.dependency_analyzer.detect_testing_frameworks(
184
+ deps["development"] + deps["testing"]
185
+ )
186
+ if testing_frameworks:
187
+ characteristics.testing_framework = testing_frameworks[0]
188
+
189
+ # Get build tools
190
+ characteristics.build_tools = self.dependency_analyzer.get_build_tools()
191
+
192
+ except Exception as e:
193
+ self.logger.warning(f"Error in dependency analysis: {e}")
194
+
195
+ def _analyze_with_architecture_service(
196
+ self, characteristics: ProjectCharacteristics
197
+ ) -> None:
198
+ """Use architecture analyzer service to populate structure-related fields.
199
+
200
+ WHY: Delegates architectural analysis to specialized service for
201
+ better modularity and testability.
202
+ """
203
+ try:
204
+ # Analyze architecture
205
+ arch_info = self.architecture_analyzer.analyze_architecture()
206
+
207
+ # Populate characteristics
208
+ characteristics.architecture_type = arch_info.architecture_type
209
+ characteristics.main_modules = arch_info.main_modules
210
+ characteristics.key_directories = arch_info.key_directories
211
+ characteristics.entry_points = arch_info.entry_points
212
+ characteristics.api_patterns = arch_info.api_patterns
213
+ characteristics.configuration_patterns = arch_info.configuration_patterns
214
+ characteristics.project_terminology = arch_info.project_terminology
215
+
216
+ # Detect design patterns
217
+ design_patterns = self.architecture_analyzer.detect_design_patterns()
218
+ if design_patterns:
219
+ # Add to code conventions
220
+ for pattern in design_patterns[:3]: # Top 3 patterns
221
+ pattern_name = pattern.replace("_", " ").title() + " Pattern"
222
+ if pattern_name not in characteristics.code_conventions:
223
+ characteristics.code_conventions.append(pattern_name)
224
+
225
+ except Exception as e:
226
+ self.logger.warning(f"Error in architecture analysis: {e}")
227
+
228
+ def _enrich_with_metrics(self, characteristics: ProjectCharacteristics) -> None:
229
+ """Enrich characteristics with metrics data.
230
+
231
+ WHY: Metrics provide quantitative insights that complement
232
+ the qualitative analysis from other services.
233
+ """
234
+ try:
235
+ # Collect metrics
236
+ metrics = self.metrics_collector.collect_metrics()
237
+
238
+ # Add testing patterns based on metrics
239
+ if metrics.test_files > 0:
240
+ characteristics.test_patterns.append(f"{metrics.test_files} test files")
241
+
242
+ if metrics.test_to_code_ratio > 0:
243
+ ratio_pct = int(metrics.test_to_code_ratio * 100)
244
+ characteristics.test_patterns.append(
245
+ f"{ratio_pct}% test coverage ratio"
246
+ )
247
+
248
+ if metrics.test_coverage_files > 0:
249
+ characteristics.test_patterns.append("Test coverage tracking")
250
+
251
+ # Add file organization insights
252
+ if metrics.files_over_1000_lines > 0:
253
+ characteristics.code_conventions.append("Large file refactoring needed")
254
+
255
+ # Find documentation files
256
+ doc_patterns = ["README*", "CONTRIBUTING*", "CHANGELOG*", "docs/*"]
257
+ doc_files = []
258
+ for pattern in doc_patterns:
259
+ matches = list(self.working_directory.glob(pattern))
260
+ doc_files.extend(
261
+ [
262
+ str(f.relative_to(self.working_directory))
263
+ for f in matches
264
+ if f.is_file()
265
+ ]
266
+ )
267
+ characteristics.documentation_files = doc_files[:10]
268
+
269
+ # Find important configs
270
+ config_patterns = ["*.json", "*.yaml", "*.yml", "*.toml", "*.ini", ".env*"]
271
+ config_files = []
272
+ for pattern in config_patterns:
273
+ matches = list(self.working_directory.glob(pattern))
274
+ config_files.extend(
275
+ [
276
+ str(f.relative_to(self.working_directory))
277
+ for f in matches
278
+ if f.is_file()
279
+ ]
280
+ )
281
+ characteristics.important_configs = config_files[:10]
282
+
283
+ except Exception as e:
284
+ self.logger.warning(f"Error collecting metrics: {e}")
285
+
286
+ def _create_empty_characteristics(self) -> ProjectCharacteristics:
287
+ """Create empty ProjectCharacteristics with defaults."""
288
+ return ProjectCharacteristics(
289
+ project_name=self.working_directory.name,
290
+ primary_language=None,
291
+ languages=[],
292
+ frameworks=[],
293
+ architecture_type="unknown",
294
+ main_modules=[],
295
+ key_directories=[],
296
+ entry_points=[],
297
+ testing_framework=None,
298
+ test_patterns=[],
299
+ package_manager=None,
300
+ build_tools=[],
301
+ databases=[],
302
+ web_frameworks=[],
303
+ api_patterns=[],
304
+ key_dependencies=[],
305
+ code_conventions=[],
306
+ configuration_patterns=[],
307
+ project_terminology=[],
308
+ documentation_files=[],
309
+ important_configs=[],
310
+ )
311
+
312
+ def _is_cache_valid(self) -> bool:
313
+ """Check if cache is still valid."""
314
+ if self._analysis_cache is None or self._cache_timestamp is None:
315
+ return False
316
+ return time.time() - self._cache_timestamp < self._cache_ttl
317
+
318
+ def _update_cache(self, characteristics: ProjectCharacteristics) -> None:
319
+ """Update the cache with new results."""
320
+ self._analysis_cache = characteristics
321
+ self._cache_timestamp = time.time()
322
+
323
+ # ================================================================================
324
+ # Interface Adapter Methods
325
+ # ================================================================================
326
+ # These methods maintain backward compatibility with ProjectAnalyzerInterface
327
+
328
+ def detect_technology_stack(self) -> List[str]:
329
+ """Detect technologies used in the project."""
330
+ characteristics = self.analyze_project()
331
+
332
+ technologies = []
333
+ technologies.extend(characteristics.languages)
334
+ technologies.extend(characteristics.frameworks)
335
+ technologies.extend(characteristics.web_frameworks)
336
+ technologies.extend(characteristics.databases)
337
+
338
+ if characteristics.package_manager:
339
+ technologies.append(characteristics.package_manager)
340
+
341
+ technologies.extend(characteristics.build_tools)
342
+
343
+ return list(set(technologies))
344
+
345
+ def analyze_code_patterns(self) -> Dict[str, Any]:
346
+ """Analyze code patterns and conventions."""
347
+ characteristics = self.analyze_project()
348
+
349
+ return {
350
+ "code_conventions": characteristics.code_conventions,
351
+ "test_patterns": characteristics.test_patterns,
352
+ "api_patterns": characteristics.api_patterns,
353
+ "configuration_patterns": characteristics.configuration_patterns,
354
+ "architecture_type": characteristics.architecture_type,
355
+ }
356
+
357
+ def get_project_structure(self) -> Dict[str, Any]:
358
+ """Get project directory structure analysis."""
359
+ characteristics = self.analyze_project()
360
+
361
+ return {
362
+ "project_name": characteristics.project_name,
363
+ "main_modules": characteristics.main_modules,
364
+ "key_directories": characteristics.key_directories,
365
+ "entry_points": characteristics.entry_points,
366
+ "documentation_files": characteristics.documentation_files,
367
+ "important_configs": characteristics.important_configs,
368
+ "architecture_type": characteristics.architecture_type,
369
+ }
370
+
371
+ def identify_entry_points(self) -> List[Path]:
372
+ """Identify project entry points."""
373
+ characteristics = self.analyze_project()
374
+
375
+ entry_paths = []
376
+ for entry_point in characteristics.entry_points:
377
+ entry_path = self.working_directory / entry_point
378
+ if entry_path.exists():
379
+ entry_paths.append(entry_path)
380
+
381
+ return entry_paths
382
+
383
+ def get_project_context_summary(self) -> str:
384
+ """Get a concise summary of project context for memory templates."""
385
+ characteristics = self.analyze_project()
386
+
387
+ summary_parts = []
388
+
389
+ # Basic project info
390
+ lang_info = characteristics.primary_language or "mixed"
391
+ if characteristics.languages and len(characteristics.languages) > 1:
392
+ lang_info = (
393
+ f"{lang_info} (with {', '.join(characteristics.languages[1:3])})"
394
+ )
395
+
396
+ summary_parts.append(
397
+ f"{characteristics.project_name}: {lang_info} {characteristics.architecture_type.lower()}"
398
+ )
399
+
400
+ # Key directories and modules
401
+ if characteristics.main_modules:
402
+ modules_str = ", ".join(characteristics.main_modules[:4])
403
+ summary_parts.append(f"- Main modules: {modules_str}")
404
+
405
+ # Frameworks and tools
406
+ if characteristics.frameworks or characteristics.web_frameworks:
407
+ all_frameworks = characteristics.frameworks + characteristics.web_frameworks
408
+ frameworks_str = ", ".join(all_frameworks[:3])
409
+ summary_parts.append(f"- Uses: {frameworks_str}")
410
+
411
+ # Testing
412
+ if characteristics.testing_framework:
413
+ summary_parts.append(f"- Testing: {characteristics.testing_framework}")
414
+ elif characteristics.test_patterns:
415
+ summary_parts.append(f"- Testing: {characteristics.test_patterns[0]}")
416
+
417
+ # Key patterns
418
+ if characteristics.code_conventions:
419
+ patterns_str = ", ".join(characteristics.code_conventions[:2])
420
+ summary_parts.append(f"- Key patterns: {patterns_str}")
421
+
422
+ return "\n".join(summary_parts)
423
+
424
+ def get_important_files_for_context(self) -> List[str]:
425
+ """Get list of important files that should be considered for memory context."""
426
+ characteristics = self.analyze_project()
427
+ important_files = []
428
+
429
+ # Always include standard documentation
430
+ standard_docs = ["README.md", "CONTRIBUTING.md", "CHANGELOG.md"]
431
+ for doc in standard_docs:
432
+ if (self.working_directory / doc).exists():
433
+ important_files.append(doc)
434
+
435
+ # Include configuration files
436
+ important_files.extend(characteristics.important_configs)
437
+
438
+ # Include project-specific documentation
439
+ important_files.extend(characteristics.documentation_files[:5])
440
+
441
+ # Include entry points
442
+ important_files.extend(characteristics.entry_points)
443
+
444
+ # Look for architecture documentation
445
+ arch_patterns = ["ARCHITECTURE.md", "docs/architecture.md", "docs/STRUCTURE.md"]
446
+ for pattern in arch_patterns:
447
+ if (self.working_directory / pattern).exists():
448
+ important_files.append(pattern)
449
+
450
+ return list(set(important_files))