claude-mpm 4.3.20__py3-none-any.whl → 4.4.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (96) hide show
  1. claude_mpm/VERSION +1 -1
  2. claude_mpm/agents/agent_loader.py +2 -2
  3. claude_mpm/agents/agent_loader_integration.py +2 -2
  4. claude_mpm/agents/async_agent_loader.py +2 -2
  5. claude_mpm/agents/base_agent_loader.py +2 -2
  6. claude_mpm/agents/frontmatter_validator.py +2 -2
  7. claude_mpm/agents/system_agent_config.py +2 -2
  8. claude_mpm/agents/templates/data_engineer.json +1 -2
  9. claude_mpm/cli/commands/doctor.py +2 -2
  10. claude_mpm/cli/commands/mpm_init.py +560 -47
  11. claude_mpm/cli/commands/mpm_init_handler.py +6 -0
  12. claude_mpm/cli/parsers/mpm_init_parser.py +39 -1
  13. claude_mpm/cli/startup_logging.py +11 -9
  14. claude_mpm/commands/mpm-init.md +76 -12
  15. claude_mpm/config/agent_config.py +2 -2
  16. claude_mpm/config/paths.py +2 -2
  17. claude_mpm/core/agent_name_normalizer.py +2 -2
  18. claude_mpm/core/config.py +2 -1
  19. claude_mpm/core/config_aliases.py +2 -2
  20. claude_mpm/core/file_utils.py +1 -0
  21. claude_mpm/core/log_manager.py +2 -2
  22. claude_mpm/core/tool_access_control.py +2 -2
  23. claude_mpm/core/unified_agent_registry.py +2 -2
  24. claude_mpm/core/unified_paths.py +2 -2
  25. claude_mpm/experimental/cli_enhancements.py +3 -2
  26. claude_mpm/hooks/base_hook.py +2 -2
  27. claude_mpm/hooks/instruction_reinforcement.py +2 -2
  28. claude_mpm/hooks/memory_integration_hook.py +1 -1
  29. claude_mpm/hooks/validation_hooks.py +2 -2
  30. claude_mpm/scripts/mpm_doctor.py +2 -2
  31. claude_mpm/services/agents/loading/agent_profile_loader.py +2 -2
  32. claude_mpm/services/agents/loading/base_agent_manager.py +2 -2
  33. claude_mpm/services/agents/loading/framework_agent_loader.py +2 -2
  34. claude_mpm/services/agents/management/agent_capabilities_generator.py +2 -2
  35. claude_mpm/services/agents/management/agent_management_service.py +2 -2
  36. claude_mpm/services/agents/memory/content_manager.py +5 -2
  37. claude_mpm/services/agents/memory/memory_categorization_service.py +5 -2
  38. claude_mpm/services/agents/memory/memory_file_service.py +28 -6
  39. claude_mpm/services/agents/memory/memory_format_service.py +5 -2
  40. claude_mpm/services/agents/memory/memory_limits_service.py +4 -2
  41. claude_mpm/services/agents/registry/deployed_agent_discovery.py +2 -2
  42. claude_mpm/services/agents/registry/modification_tracker.py +4 -4
  43. claude_mpm/services/async_session_logger.py +2 -1
  44. claude_mpm/services/claude_session_logger.py +2 -2
  45. claude_mpm/services/core/path_resolver.py +3 -2
  46. claude_mpm/services/diagnostics/diagnostic_runner.py +4 -3
  47. claude_mpm/services/event_bus/direct_relay.py +2 -1
  48. claude_mpm/services/event_bus/event_bus.py +2 -1
  49. claude_mpm/services/event_bus/relay.py +2 -2
  50. claude_mpm/services/framework_claude_md_generator/content_assembler.py +2 -2
  51. claude_mpm/services/infrastructure/daemon_manager.py +2 -2
  52. claude_mpm/services/memory/cache/simple_cache.py +2 -2
  53. claude_mpm/services/project/archive_manager.py +981 -0
  54. claude_mpm/services/project/documentation_manager.py +536 -0
  55. claude_mpm/services/project/enhanced_analyzer.py +491 -0
  56. claude_mpm/services/project/project_organizer.py +904 -0
  57. claude_mpm/services/response_tracker.py +2 -2
  58. claude_mpm/services/socketio/handlers/connection.py +14 -33
  59. claude_mpm/services/socketio/server/eventbus_integration.py +2 -2
  60. claude_mpm/services/unified/__init__.py +65 -0
  61. claude_mpm/services/unified/analyzer_strategies/__init__.py +44 -0
  62. claude_mpm/services/unified/analyzer_strategies/code_analyzer.py +473 -0
  63. claude_mpm/services/unified/analyzer_strategies/dependency_analyzer.py +643 -0
  64. claude_mpm/services/unified/analyzer_strategies/performance_analyzer.py +804 -0
  65. claude_mpm/services/unified/analyzer_strategies/security_analyzer.py +661 -0
  66. claude_mpm/services/unified/analyzer_strategies/structure_analyzer.py +696 -0
  67. claude_mpm/services/unified/deployment_strategies/__init__.py +97 -0
  68. claude_mpm/services/unified/deployment_strategies/base.py +557 -0
  69. claude_mpm/services/unified/deployment_strategies/cloud_strategies.py +486 -0
  70. claude_mpm/services/unified/deployment_strategies/local.py +594 -0
  71. claude_mpm/services/unified/deployment_strategies/utils.py +672 -0
  72. claude_mpm/services/unified/deployment_strategies/vercel.py +471 -0
  73. claude_mpm/services/unified/interfaces.py +499 -0
  74. claude_mpm/services/unified/migration.py +532 -0
  75. claude_mpm/services/unified/strategies.py +551 -0
  76. claude_mpm/services/unified/unified_analyzer.py +534 -0
  77. claude_mpm/services/unified/unified_config.py +688 -0
  78. claude_mpm/services/unified/unified_deployment.py +470 -0
  79. claude_mpm/services/version_control/version_parser.py +5 -4
  80. claude_mpm/storage/state_storage.py +2 -2
  81. claude_mpm/utils/agent_dependency_loader.py +49 -0
  82. claude_mpm/utils/common.py +542 -0
  83. claude_mpm/utils/database_connector.py +298 -0
  84. claude_mpm/utils/error_handler.py +2 -1
  85. claude_mpm/utils/log_cleanup.py +2 -2
  86. claude_mpm/utils/path_operations.py +2 -2
  87. claude_mpm/utils/robust_installer.py +56 -0
  88. claude_mpm/utils/session_logging.py +2 -2
  89. claude_mpm/utils/subprocess_utils.py +2 -2
  90. claude_mpm/validation/agent_validator.py +2 -2
  91. {claude_mpm-4.3.20.dist-info → claude_mpm-4.4.0.dist-info}/METADATA +1 -1
  92. {claude_mpm-4.3.20.dist-info → claude_mpm-4.4.0.dist-info}/RECORD +96 -71
  93. {claude_mpm-4.3.20.dist-info → claude_mpm-4.4.0.dist-info}/WHEEL +0 -0
  94. {claude_mpm-4.3.20.dist-info → claude_mpm-4.4.0.dist-info}/entry_points.txt +0 -0
  95. {claude_mpm-4.3.20.dist-info → claude_mpm-4.4.0.dist-info}/licenses/LICENSE +0 -0
  96. {claude_mpm-4.3.20.dist-info → claude_mpm-4.4.0.dist-info}/top_level.txt +0 -0
@@ -18,14 +18,14 @@ DESIGN DECISIONS:
18
18
  - Handles agent name normalization for consistent tracking
19
19
  """
20
20
 
21
- import logging
22
21
  from datetime import datetime, timezone
23
22
  from typing import Any, Dict, Optional
24
23
 
25
24
  from claude_mpm.core.config import Config
26
25
  from claude_mpm.core.shared.config_loader import ConfigLoader
27
26
 
28
- logger = logging.getLogger(__name__)
27
+ from claude_mpm.core.logging_utils import get_logger
28
+ logger = get_logger(__name__)
29
29
 
30
30
 
31
31
  class ResponseTracker:
@@ -11,8 +11,12 @@ import time
11
11
  from datetime import datetime, timezone
12
12
  from typing import Any, Callable, Dict, List, Optional
13
13
 
14
+ from claude_mpm.core.logging_utils import get_logger
15
+
14
16
  from .base import BaseEventHandler
15
17
 
18
+ logger = get_logger(__name__)
19
+
16
20
 
17
21
  def timeout_handler(timeout_seconds: float = 5.0):
18
22
  """Decorator to add timeout protection to async handlers.
@@ -39,49 +43,26 @@ def timeout_handler(timeout_seconds: float = 5.0):
39
43
 
40
44
  elapsed = time.time() - start_time
41
45
  if elapsed > timeout_seconds * 0.8: # Warn if close to timeout
42
- # Try to get logger from closure scope or fallback to print
43
- try:
44
- import logging
45
-
46
- logger = logging.getLogger(__name__)
47
- logger.warning(
48
- f"⚠️ Handler {handler_name} took {elapsed:.2f}s "
49
- f"(close to {timeout_seconds}s timeout)"
50
- )
51
- except Exception:
52
- print(
53
- f"⚠️ Handler {handler_name} took {elapsed:.2f}s (close to {timeout_seconds}s timeout)"
54
- )
46
+ logger.warning(
47
+ f"⚠️ Handler {handler_name} took {elapsed:.2f}s "
48
+ f"(close to {timeout_seconds}s timeout)"
49
+ )
55
50
 
56
51
  return result
57
52
 
58
53
  except asyncio.TimeoutError:
59
54
  elapsed = time.time() - start_time
60
- # Try to get logger from closure scope or fallback to print
61
- try:
62
- import logging
63
-
64
- logger = logging.getLogger(__name__)
65
- logger.error(
66
- f"❌ Handler {handler_name} timed out after {elapsed:.2f}s"
67
- )
68
- except Exception:
69
- print(f"❌ Handler {handler_name} timed out after {elapsed:.2f}s")
55
+ logger.error(
56
+ f"❌ Handler {handler_name} timed out after {elapsed:.2f}s"
57
+ )
70
58
 
71
59
  return None
72
60
 
73
61
  except Exception as e:
74
62
  elapsed = time.time() - start_time
75
- # Try to get logger from closure scope or fallback to print
76
- try:
77
- import logging
78
-
79
- logger = logging.getLogger(__name__)
80
- logger.error(
81
- f"❌ Handler {handler_name} failed after {elapsed:.2f}s: {e}"
82
- )
83
- except Exception:
84
- print(f"❌ Handler {handler_name} failed after {elapsed:.2f}s: {e}")
63
+ logger.error(
64
+ f"❌ Handler {handler_name} failed after {elapsed:.2f}s: {e}"
65
+ )
85
66
  raise
86
67
 
87
68
  return wrapper
@@ -7,7 +7,6 @@ WHY this integration module:
7
7
  - Provides clean separation of concerns
8
8
  """
9
9
 
10
- import logging
11
10
  from datetime import datetime, timezone
12
11
  from typing import Optional
13
12
 
@@ -15,7 +14,8 @@ from claude_mpm.services.event_bus import EventBus
15
14
  from claude_mpm.services.event_bus.config import get_config
16
15
  from claude_mpm.services.event_bus.direct_relay import DirectSocketIORelay
17
16
 
18
- logger = logging.getLogger(__name__)
17
+ from claude_mpm.core.logging_utils import get_logger
18
+ logger = get_logger(__name__)
19
19
 
20
20
 
21
21
  class EventBusIntegration:
@@ -0,0 +1,65 @@
1
+ """
2
+ Unified Services Module for Phase 2 Service Consolidation
3
+ =========================================================
4
+
5
+ This module implements the strategy pattern framework for consolidating
6
+ Claude MPM's 314 service files into approximately 180 more maintainable services.
7
+
8
+ Architecture:
9
+ - Base service interfaces for common service patterns
10
+ - Strategy pattern for pluggable behavior
11
+ - Backward compatibility layer for existing services
12
+ - Feature flags for gradual migration
13
+
14
+ Components:
15
+ - interfaces.py: Core service interfaces (IDeploymentService, IAnalyzerService, etc.)
16
+ - strategies.py: Strategy pattern framework with plugin registry
17
+ - migration.py: Migration utilities and backward compatibility
18
+ - Unified service implementations for major service categories
19
+ """
20
+
21
+ from .interfaces import (
22
+ IAnalyzerService,
23
+ IConfigurationService,
24
+ IDeploymentService,
25
+ ServiceCapability,
26
+ ServiceMetadata,
27
+ )
28
+ from .migration import (
29
+ FeatureFlag,
30
+ MigrationStatus,
31
+ ServiceMapper,
32
+ create_compatibility_wrapper,
33
+ )
34
+ from .strategies import (
35
+ AnalyzerStrategy,
36
+ ConfigStrategy,
37
+ DeploymentStrategy,
38
+ StrategyRegistry,
39
+ )
40
+ from .unified_analyzer import UnifiedAnalyzer
41
+ from .unified_config import UnifiedConfigManager
42
+ from .unified_deployment import UnifiedDeploymentService
43
+
44
+ __all__ = [
45
+ # Interfaces
46
+ "IDeploymentService",
47
+ "IAnalyzerService",
48
+ "IConfigurationService",
49
+ "ServiceMetadata",
50
+ "ServiceCapability",
51
+ # Strategies
52
+ "DeploymentStrategy",
53
+ "AnalyzerStrategy",
54
+ "ConfigStrategy",
55
+ "StrategyRegistry",
56
+ # Migration
57
+ "ServiceMapper",
58
+ "MigrationStatus",
59
+ "FeatureFlag",
60
+ "create_compatibility_wrapper",
61
+ # Unified Services
62
+ "UnifiedDeploymentService",
63
+ "UnifiedAnalyzer",
64
+ "UnifiedConfigManager",
65
+ ]
@@ -0,0 +1,44 @@
1
+ """
2
+ Concrete Analyzer Strategy Implementations
3
+ ==========================================
4
+
5
+ This module provides concrete implementations of the AnalyzerStrategy base class,
6
+ consolidating functionality from multiple analyzer services with 70-80% code duplication.
7
+
8
+ Consolidates:
9
+ - enhanced_analyzer.py (1,118 LOC)
10
+ - project_analyzer.py (815 LOC)
11
+ - structure_analyzer.py (703 LOC)
12
+ - code_analyzer.py (385 LOC)
13
+ - dependency_analyzer.py (326 LOC)
14
+ - file_analyzer.py (247 LOC)
15
+ - project_report_generator.py (121 LOC)
16
+
17
+ Total: 3,715 LOC → ~1,200 LOC (68% reduction)
18
+
19
+ Author: Claude MPM Development Team
20
+ Created: 2025-01-26
21
+ """
22
+
23
+ from .code_analyzer import CodeAnalyzerStrategy
24
+ from .dependency_analyzer import DependencyAnalyzerStrategy
25
+ from .performance_analyzer import PerformanceAnalyzerStrategy
26
+ from .security_analyzer import SecurityAnalyzerStrategy
27
+ from .structure_analyzer import StructureAnalyzerStrategy
28
+
29
+ __all__ = [
30
+ "CodeAnalyzerStrategy",
31
+ "DependencyAnalyzerStrategy",
32
+ "StructureAnalyzerStrategy",
33
+ "SecurityAnalyzerStrategy",
34
+ "PerformanceAnalyzerStrategy",
35
+ ]
36
+
37
+ # Strategy registry for automatic discovery
38
+ ANALYZER_STRATEGIES = {
39
+ "code": CodeAnalyzerStrategy,
40
+ "dependency": DependencyAnalyzerStrategy,
41
+ "structure": StructureAnalyzerStrategy,
42
+ "security": SecurityAnalyzerStrategy,
43
+ "performance": PerformanceAnalyzerStrategy,
44
+ }
@@ -0,0 +1,473 @@
1
+ """
2
+ Code Analyzer Strategy Implementation
3
+ =====================================
4
+
5
+ Analyzes code structure, complexity, quality metrics, and patterns.
6
+ Consolidates functionality from multiple analyzer services.
7
+
8
+ Author: Claude MPM Development Team
9
+ Created: 2025-01-26
10
+ """
11
+
12
+ import ast
13
+ import re
14
+ from pathlib import Path
15
+ from typing import Any, Dict, List, Optional, Set, Tuple
16
+
17
+ from claude_mpm.core.logging_utils import get_logger
18
+
19
+ from ..strategies import AnalyzerStrategy, StrategyContext, StrategyMetadata, StrategyPriority
20
+
21
+ logger = get_logger(__name__)
22
+
23
+
24
+ class CodeAnalyzerStrategy(AnalyzerStrategy):
25
+ """
26
+ Strategy for analyzing code structure, complexity, and quality metrics.
27
+
28
+ Consolidates:
29
+ - Code complexity analysis (cyclomatic, cognitive)
30
+ - Code quality metrics (maintainability index, technical debt)
31
+ - Pattern detection (anti-patterns, code smells)
32
+ - Function/class analysis (size, complexity, coupling)
33
+ """
34
+
35
+ def __init__(self):
36
+ """Initialize code analyzer strategy."""
37
+ metadata = StrategyMetadata(
38
+ name="CodeAnalyzer",
39
+ description="Analyzes code structure, complexity, and quality metrics",
40
+ supported_types=["file", "directory", "module", "class", "function"],
41
+ supported_operations=["analyze", "metrics", "complexity", "quality"],
42
+ priority=StrategyPriority.HIGH,
43
+ tags={"code", "complexity", "quality", "metrics", "ast"},
44
+ )
45
+ super().__init__(metadata)
46
+
47
+ # Language-specific file extensions
48
+ self.language_extensions = {
49
+ "python": {".py", ".pyi"},
50
+ "javascript": {".js", ".jsx", ".mjs"},
51
+ "typescript": {".ts", ".tsx"},
52
+ "java": {".java"},
53
+ "go": {".go"},
54
+ "rust": {".rs"},
55
+ "c": {".c", ".h"},
56
+ "cpp": {".cpp", ".cc", ".cxx", ".hpp", ".h"},
57
+ }
58
+
59
+ # Code smell patterns
60
+ self.code_smell_patterns = {
61
+ "long_method": {"threshold": 50, "metric": "lines"},
62
+ "large_class": {"threshold": 500, "metric": "lines"},
63
+ "long_parameter_list": {"threshold": 5, "metric": "count"},
64
+ "duplicate_code": {"threshold": 0.7, "metric": "similarity"},
65
+ "god_class": {"threshold": 10, "metric": "responsibilities"},
66
+ }
67
+
68
+ def can_handle(self, context: StrategyContext) -> bool:
69
+ """Check if strategy can handle the given context."""
70
+ return (
71
+ context.target_type in self.metadata.supported_types
72
+ and context.operation in self.metadata.supported_operations
73
+ )
74
+
75
+ def validate_input(self, input_data: Any) -> List[str]:
76
+ """Validate input data for strategy."""
77
+ errors = []
78
+
79
+ if not input_data:
80
+ errors.append("Input data is required")
81
+ return errors
82
+
83
+ if isinstance(input_data, (str, Path)):
84
+ path = Path(input_data)
85
+ if not path.exists():
86
+ errors.append(f"Path does not exist: {path}")
87
+ elif not isinstance(input_data, dict):
88
+ errors.append(f"Invalid input type: {type(input_data).__name__}")
89
+
90
+ return errors
91
+
92
+ def analyze(
93
+ self, target: Any, options: Optional[Dict[str, Any]] = None
94
+ ) -> Dict[str, Any]:
95
+ """
96
+ Execute code analysis on target.
97
+
98
+ Args:
99
+ target: Code file, directory, or AST node to analyze
100
+ options: Analysis options (language, metrics, depth, etc.)
101
+
102
+ Returns:
103
+ Analysis results with metrics and findings
104
+ """
105
+ options = options or {}
106
+
107
+ # Determine target type and language
108
+ if isinstance(target, (str, Path)):
109
+ target_path = Path(target)
110
+ if target_path.is_file():
111
+ return self._analyze_file(target_path, options)
112
+ elif target_path.is_dir():
113
+ return self._analyze_directory(target_path, options)
114
+ elif isinstance(target, ast.AST):
115
+ return self._analyze_ast(target, options)
116
+
117
+ return {
118
+ "status": "error",
119
+ "message": f"Unsupported target type: {type(target).__name__}",
120
+ }
121
+
122
+ def _analyze_file(self, file_path: Path, options: Dict[str, Any]) -> Dict[str, Any]:
123
+ """Analyze a single code file."""
124
+ try:
125
+ # Detect language from extension
126
+ language = self._detect_language(file_path)
127
+
128
+ # Read file content
129
+ content = file_path.read_text(encoding="utf-8")
130
+ lines = content.splitlines()
131
+
132
+ # Base metrics
133
+ metrics = {
134
+ "file": str(file_path),
135
+ "language": language,
136
+ "lines_of_code": len(lines),
137
+ "blank_lines": sum(1 for line in lines if not line.strip()),
138
+ "comment_lines": self._count_comment_lines(content, language),
139
+ }
140
+
141
+ # Language-specific analysis
142
+ if language == "python":
143
+ metrics.update(self._analyze_python_code(content, file_path))
144
+
145
+ # Calculate complexity metrics
146
+ metrics["complexity"] = self._calculate_complexity_metrics(content, language)
147
+
148
+ # Detect code smells
149
+ metrics["code_smells"] = self._detect_code_smells(content, metrics)
150
+
151
+ # Calculate maintainability index
152
+ metrics["maintainability_index"] = self._calculate_maintainability(metrics)
153
+
154
+ return {
155
+ "status": "success",
156
+ "type": "file",
157
+ "path": str(file_path),
158
+ "metrics": metrics,
159
+ }
160
+
161
+ except Exception as e:
162
+ logger.error(f"Error analyzing file {file_path}: {e}")
163
+ return {
164
+ "status": "error",
165
+ "path": str(file_path),
166
+ "error": str(e),
167
+ }
168
+
169
+ def _analyze_directory(self, dir_path: Path, options: Dict[str, Any]) -> Dict[str, Any]:
170
+ """Analyze all code files in a directory."""
171
+ results = {
172
+ "status": "success",
173
+ "type": "directory",
174
+ "path": str(dir_path),
175
+ "files": [],
176
+ "summary": {},
177
+ }
178
+
179
+ # Collect all code files
180
+ code_files = []
181
+ for ext_set in self.language_extensions.values():
182
+ for ext in ext_set:
183
+ code_files.extend(dir_path.rglob(f"*{ext}"))
184
+
185
+ # Analyze each file
186
+ total_metrics = {}
187
+ for file_path in code_files:
188
+ file_result = self._analyze_file(file_path, options)
189
+ if file_result["status"] == "success":
190
+ results["files"].append(file_result)
191
+
192
+ # Aggregate metrics
193
+ for key, value in file_result.get("metrics", {}).items():
194
+ if isinstance(value, (int, float)):
195
+ total_metrics[key] = total_metrics.get(key, 0) + value
196
+
197
+ # Calculate summary statistics
198
+ results["summary"] = {
199
+ "total_files": len(results["files"]),
200
+ "total_lines": total_metrics.get("lines_of_code", 0),
201
+ "average_complexity": total_metrics.get("complexity", {}).get("cyclomatic", 0) / max(len(results["files"]), 1),
202
+ "code_smells_count": sum(
203
+ len(f.get("metrics", {}).get("code_smells", []))
204
+ for f in results["files"]
205
+ ),
206
+ }
207
+
208
+ return results
209
+
210
+ def _analyze_python_code(self, content: str, file_path: Path) -> Dict[str, Any]:
211
+ """Perform Python-specific code analysis."""
212
+ try:
213
+ tree = ast.parse(content)
214
+
215
+ # Count functions, classes, methods
216
+ functions = []
217
+ classes = []
218
+ methods = []
219
+
220
+ for node in ast.walk(tree):
221
+ if isinstance(node, ast.FunctionDef):
222
+ if any(isinstance(parent, ast.ClassDef) for parent in ast.walk(tree)):
223
+ methods.append(node.name)
224
+ else:
225
+ functions.append(node.name)
226
+ elif isinstance(node, ast.ClassDef):
227
+ classes.append(node.name)
228
+
229
+ return {
230
+ "functions": functions,
231
+ "classes": classes,
232
+ "methods": methods,
233
+ "function_count": len(functions),
234
+ "class_count": len(classes),
235
+ "method_count": len(methods),
236
+ }
237
+
238
+ except SyntaxError as e:
239
+ logger.warning(f"Syntax error in {file_path}: {e}")
240
+ return {}
241
+
242
+ def _analyze_ast(self, node: ast.AST, options: Dict[str, Any]) -> Dict[str, Any]:
243
+ """Analyze an AST node directly."""
244
+ metrics = {
245
+ "node_type": node.__class__.__name__,
246
+ "complexity": self._calculate_ast_complexity(node),
247
+ }
248
+
249
+ # Analyze specific node types
250
+ if isinstance(node, ast.FunctionDef):
251
+ metrics.update({
252
+ "name": node.name,
253
+ "parameters": len(node.args.args),
254
+ "lines": node.end_lineno - node.lineno + 1 if hasattr(node, "end_lineno") else 0,
255
+ })
256
+ elif isinstance(node, ast.ClassDef):
257
+ metrics.update({
258
+ "name": node.name,
259
+ "methods": sum(1 for n in node.body if isinstance(n, ast.FunctionDef)),
260
+ "bases": len(node.bases),
261
+ })
262
+
263
+ return {
264
+ "status": "success",
265
+ "type": "ast",
266
+ "metrics": metrics,
267
+ }
268
+
269
+ def _calculate_complexity_metrics(self, content: str, language: str) -> Dict[str, Any]:
270
+ """Calculate various complexity metrics."""
271
+ complexity = {
272
+ "cyclomatic": 1, # Base complexity
273
+ "cognitive": 0,
274
+ "halstead": {},
275
+ }
276
+
277
+ if language == "python":
278
+ try:
279
+ tree = ast.parse(content)
280
+ complexity["cyclomatic"] = self._calculate_cyclomatic_complexity(tree)
281
+ complexity["cognitive"] = self._calculate_cognitive_complexity(tree)
282
+ except:
283
+ pass
284
+
285
+ return complexity
286
+
287
+ def _calculate_cyclomatic_complexity(self, tree: ast.AST) -> int:
288
+ """Calculate cyclomatic complexity for Python AST."""
289
+ complexity = 1
290
+
291
+ for node in ast.walk(tree):
292
+ # Decision points increase complexity
293
+ if isinstance(node, (ast.If, ast.While, ast.For, ast.ExceptHandler)):
294
+ complexity += 1
295
+ elif isinstance(node, ast.BoolOp):
296
+ complexity += len(node.values) - 1
297
+
298
+ return complexity
299
+
300
+ def _calculate_cognitive_complexity(self, tree: ast.AST) -> int:
301
+ """Calculate cognitive complexity (simplified version)."""
302
+ complexity = 0
303
+ nesting_level = 0
304
+
305
+ # Simplified cognitive complexity calculation
306
+ for node in ast.walk(tree):
307
+ if isinstance(node, (ast.If, ast.While, ast.For)):
308
+ complexity += 1 + nesting_level
309
+ elif isinstance(node, ast.BoolOp):
310
+ complexity += len(node.values) - 1
311
+
312
+ return complexity
313
+
314
+ def _calculate_ast_complexity(self, node: ast.AST) -> int:
315
+ """Calculate complexity for a single AST node."""
316
+ return self._calculate_cyclomatic_complexity(node)
317
+
318
+ def _detect_code_smells(self, content: str, metrics: Dict[str, Any]) -> List[Dict[str, Any]]:
319
+ """Detect common code smells."""
320
+ smells = []
321
+
322
+ # Long method/function
323
+ if metrics.get("lines_of_code", 0) > self.code_smell_patterns["long_method"]["threshold"]:
324
+ smells.append({
325
+ "type": "long_method",
326
+ "severity": "medium",
327
+ "message": f"Method/function has {metrics['lines_of_code']} lines (threshold: {self.code_smell_patterns['long_method']['threshold']})",
328
+ })
329
+
330
+ # High complexity
331
+ complexity = metrics.get("complexity", {}).get("cyclomatic", 0)
332
+ if complexity > 10:
333
+ smells.append({
334
+ "type": "high_complexity",
335
+ "severity": "high",
336
+ "message": f"High cyclomatic complexity: {complexity}",
337
+ })
338
+
339
+ return smells
340
+
341
+ def _calculate_maintainability(self, metrics: Dict[str, Any]) -> float:
342
+ """
343
+ Calculate maintainability index (0-100 scale).
344
+ Simplified version of the standard formula.
345
+ """
346
+ loc = metrics.get("lines_of_code", 0)
347
+ complexity = metrics.get("complexity", {}).get("cyclomatic", 1)
348
+
349
+ # Simplified maintainability index
350
+ if loc == 0:
351
+ return 100.0
352
+
353
+ # Basic formula (simplified)
354
+ mi = 171 - 5.2 * (loc / 100) - 0.23 * complexity
355
+
356
+ # Normalize to 0-100 scale
357
+ return max(0, min(100, mi))
358
+
359
+ def _detect_language(self, file_path: Path) -> str:
360
+ """Detect programming language from file extension."""
361
+ ext = file_path.suffix.lower()
362
+
363
+ for language, extensions in self.language_extensions.items():
364
+ if ext in extensions:
365
+ return language
366
+
367
+ return "unknown"
368
+
369
+ def _count_comment_lines(self, content: str, language: str) -> int:
370
+ """Count comment lines based on language."""
371
+ comment_patterns = {
372
+ "python": r"^\s*#",
373
+ "javascript": r"^\s*(//|/\*|\*)",
374
+ "java": r"^\s*(//|/\*|\*)",
375
+ "c": r"^\s*(//|/\*|\*)",
376
+ "cpp": r"^\s*(//|/\*|\*)",
377
+ }
378
+
379
+ pattern = comment_patterns.get(language)
380
+ if not pattern:
381
+ return 0
382
+
383
+ count = 0
384
+ for line in content.splitlines():
385
+ if re.match(pattern, line):
386
+ count += 1
387
+
388
+ return count
389
+
390
+ def extract_metrics(self, analysis_result: Dict[str, Any]) -> Dict[str, Any]:
391
+ """Extract key metrics from analysis results."""
392
+ metrics = {}
393
+
394
+ if analysis_result.get("status") != "success":
395
+ return metrics
396
+
397
+ # Extract relevant metrics
398
+ if "metrics" in analysis_result:
399
+ raw_metrics = analysis_result["metrics"]
400
+
401
+ metrics.update({
402
+ "lines_of_code": raw_metrics.get("lines_of_code", 0),
403
+ "cyclomatic_complexity": raw_metrics.get("complexity", {}).get("cyclomatic", 0),
404
+ "cognitive_complexity": raw_metrics.get("complexity", {}).get("cognitive", 0),
405
+ "maintainability_index": raw_metrics.get("maintainability_index", 0),
406
+ "code_smells": len(raw_metrics.get("code_smells", [])),
407
+ "function_count": raw_metrics.get("function_count", 0),
408
+ "class_count": raw_metrics.get("class_count", 0),
409
+ })
410
+
411
+ # Extract summary metrics for directory analysis
412
+ if "summary" in analysis_result:
413
+ summary = analysis_result["summary"]
414
+ metrics.update({
415
+ "total_files": summary.get("total_files", 0),
416
+ "total_lines": summary.get("total_lines", 0),
417
+ "average_complexity": summary.get("average_complexity", 0),
418
+ "total_code_smells": summary.get("code_smells_count", 0),
419
+ })
420
+
421
+ return metrics
422
+
423
+ def compare_results(
424
+ self, baseline: Dict[str, Any], current: Dict[str, Any]
425
+ ) -> Dict[str, Any]:
426
+ """Compare two analysis results."""
427
+ comparison = {
428
+ "improved": [],
429
+ "degraded": [],
430
+ "unchanged": [],
431
+ }
432
+
433
+ baseline_metrics = self.extract_metrics(baseline)
434
+ current_metrics = self.extract_metrics(current)
435
+
436
+ for key in baseline_metrics:
437
+ if key not in current_metrics:
438
+ continue
439
+
440
+ baseline_val = baseline_metrics[key]
441
+ current_val = current_metrics[key]
442
+
443
+ if isinstance(baseline_val, (int, float)):
444
+ diff = current_val - baseline_val
445
+ pct_change = (diff / baseline_val * 100) if baseline_val else 0
446
+
447
+ result = {
448
+ "metric": key,
449
+ "baseline": baseline_val,
450
+ "current": current_val,
451
+ "change": diff,
452
+ "percent_change": pct_change,
453
+ }
454
+
455
+ # Determine if improvement or degradation
456
+ if key in ["maintainability_index"]:
457
+ # Higher is better
458
+ if diff > 0:
459
+ comparison["improved"].append(result)
460
+ elif diff < 0:
461
+ comparison["degraded"].append(result)
462
+ else:
463
+ comparison["unchanged"].append(result)
464
+ else:
465
+ # Lower is better (complexity, code smells, etc.)
466
+ if diff < 0:
467
+ comparison["improved"].append(result)
468
+ elif diff > 0:
469
+ comparison["degraded"].append(result)
470
+ else:
471
+ comparison["unchanged"].append(result)
472
+
473
+ return comparison