claude-mpm 4.3.22__py3-none-any.whl → 4.4.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- claude_mpm/VERSION +1 -1
- claude_mpm/cli/commands/doctor.py +2 -2
- claude_mpm/hooks/memory_integration_hook.py +1 -1
- claude_mpm/services/agents/memory/content_manager.py +5 -2
- claude_mpm/services/agents/memory/memory_file_service.py +1 -0
- claude_mpm/services/agents/memory/memory_limits_service.py +1 -0
- claude_mpm/services/unified/__init__.py +65 -0
- claude_mpm/services/unified/analyzer_strategies/__init__.py +44 -0
- claude_mpm/services/unified/analyzer_strategies/code_analyzer.py +473 -0
- claude_mpm/services/unified/analyzer_strategies/dependency_analyzer.py +643 -0
- claude_mpm/services/unified/analyzer_strategies/performance_analyzer.py +804 -0
- claude_mpm/services/unified/analyzer_strategies/security_analyzer.py +661 -0
- claude_mpm/services/unified/analyzer_strategies/structure_analyzer.py +696 -0
- claude_mpm/services/unified/deployment_strategies/__init__.py +97 -0
- claude_mpm/services/unified/deployment_strategies/base.py +557 -0
- claude_mpm/services/unified/deployment_strategies/cloud_strategies.py +486 -0
- claude_mpm/services/unified/deployment_strategies/local.py +594 -0
- claude_mpm/services/unified/deployment_strategies/utils.py +672 -0
- claude_mpm/services/unified/deployment_strategies/vercel.py +471 -0
- claude_mpm/services/unified/interfaces.py +499 -0
- claude_mpm/services/unified/migration.py +532 -0
- claude_mpm/services/unified/strategies.py +551 -0
- claude_mpm/services/unified/unified_analyzer.py +534 -0
- claude_mpm/services/unified/unified_config.py +688 -0
- claude_mpm/services/unified/unified_deployment.py +470 -0
- {claude_mpm-4.3.22.dist-info → claude_mpm-4.4.0.dist-info}/METADATA +1 -1
- {claude_mpm-4.3.22.dist-info → claude_mpm-4.4.0.dist-info}/RECORD +31 -12
- {claude_mpm-4.3.22.dist-info → claude_mpm-4.4.0.dist-info}/WHEEL +0 -0
- {claude_mpm-4.3.22.dist-info → claude_mpm-4.4.0.dist-info}/entry_points.txt +0 -0
- {claude_mpm-4.3.22.dist-info → claude_mpm-4.4.0.dist-info}/licenses/LICENSE +0 -0
- {claude_mpm-4.3.22.dist-info → claude_mpm-4.4.0.dist-info}/top_level.txt +0 -0
claude_mpm/VERSION
CHANGED
@@ -1 +1 @@
|
|
1
|
-
4.
|
1
|
+
4.4.0
|
@@ -107,8 +107,8 @@ def doctor_command(args):
|
|
107
107
|
Exit code (0 for success, 1 for warnings, 2 for errors)
|
108
108
|
"""
|
109
109
|
# Configure logging
|
110
|
-
from claude_mpm.core.logging_utils import get_logger
|
111
|
-
logger = get_logger(__name__)
|
110
|
+
from claude_mpm.core.logging_utils import get_logger
|
111
|
+
logger = get_logger(__name__)
|
112
112
|
|
113
113
|
# Determine output format
|
114
114
|
if args.json:
|
@@ -16,7 +16,7 @@ import re
|
|
16
16
|
from typing import Dict, List
|
17
17
|
|
18
18
|
from claude_mpm.core.config import Config
|
19
|
-
from claude_mpm.core.
|
19
|
+
from claude_mpm.core.logging_utils import get_logger
|
20
20
|
from claude_mpm.core.shared.config_loader import ConfigLoader
|
21
21
|
from claude_mpm.hooks.base_hook import (
|
22
22
|
HookContext,
|
@@ -12,12 +12,15 @@ This module provides:
|
|
12
12
|
- Content repair and structure validation
|
13
13
|
"""
|
14
14
|
|
15
|
-
import logging
|
16
15
|
import re
|
17
16
|
from datetime import datetime, timezone
|
18
17
|
from difflib import SequenceMatcher
|
19
18
|
from typing import Any, Dict, List, Optional, Tuple
|
20
19
|
|
20
|
+
from claude_mpm.core.logging_utils import get_logger
|
21
|
+
|
22
|
+
logger = get_logger(__name__)
|
23
|
+
|
21
24
|
|
22
25
|
class MemoryContentManager:
|
23
26
|
"""Manages memory content manipulation and validation.
|
@@ -34,7 +37,7 @@ class MemoryContentManager:
|
|
34
37
|
memory_limits: Dictionary containing memory limits configuration
|
35
38
|
"""
|
36
39
|
self.memory_limits = memory_limits
|
37
|
-
self.logger =
|
40
|
+
self.logger = logger # Use the module-level logger
|
38
41
|
|
39
42
|
def add_item_to_list(self, content: str, new_item: str) -> str:
|
40
43
|
"""Add item to memory list with deduplication.
|
@@ -18,6 +18,7 @@ class MemoryFileService:
|
|
18
18
|
memories_dir: Directory where memory files are stored
|
19
19
|
"""
|
20
20
|
self.memories_dir = memories_dir
|
21
|
+
self.logger = logger # Use the module-level logger
|
21
22
|
|
22
23
|
def get_memory_file_with_migration(self, directory: Path, agent_id: str) -> Path:
|
23
24
|
"""Get memory file path with migration support.
|
@@ -26,6 +26,7 @@ class MemoryLimitsService:
|
|
26
26
|
config: Optional Config object for reading configuration
|
27
27
|
"""
|
28
28
|
self.config = config or Config()
|
29
|
+
self.logger = logger # Use the module-level logger
|
29
30
|
self.memory_limits = self._init_memory_limits()
|
30
31
|
|
31
32
|
def _init_memory_limits(self) -> Dict[str, Any]:
|
@@ -0,0 +1,65 @@
|
|
1
|
+
"""
|
2
|
+
Unified Services Module for Phase 2 Service Consolidation
|
3
|
+
=========================================================
|
4
|
+
|
5
|
+
This module implements the strategy pattern framework for consolidating
|
6
|
+
Claude MPM's 314 service files into approximately 180 more maintainable services.
|
7
|
+
|
8
|
+
Architecture:
|
9
|
+
- Base service interfaces for common service patterns
|
10
|
+
- Strategy pattern for pluggable behavior
|
11
|
+
- Backward compatibility layer for existing services
|
12
|
+
- Feature flags for gradual migration
|
13
|
+
|
14
|
+
Components:
|
15
|
+
- interfaces.py: Core service interfaces (IDeploymentService, IAnalyzerService, etc.)
|
16
|
+
- strategies.py: Strategy pattern framework with plugin registry
|
17
|
+
- migration.py: Migration utilities and backward compatibility
|
18
|
+
- Unified service implementations for major service categories
|
19
|
+
"""
|
20
|
+
|
21
|
+
from .interfaces import (
|
22
|
+
IAnalyzerService,
|
23
|
+
IConfigurationService,
|
24
|
+
IDeploymentService,
|
25
|
+
ServiceCapability,
|
26
|
+
ServiceMetadata,
|
27
|
+
)
|
28
|
+
from .migration import (
|
29
|
+
FeatureFlag,
|
30
|
+
MigrationStatus,
|
31
|
+
ServiceMapper,
|
32
|
+
create_compatibility_wrapper,
|
33
|
+
)
|
34
|
+
from .strategies import (
|
35
|
+
AnalyzerStrategy,
|
36
|
+
ConfigStrategy,
|
37
|
+
DeploymentStrategy,
|
38
|
+
StrategyRegistry,
|
39
|
+
)
|
40
|
+
from .unified_analyzer import UnifiedAnalyzer
|
41
|
+
from .unified_config import UnifiedConfigManager
|
42
|
+
from .unified_deployment import UnifiedDeploymentService
|
43
|
+
|
44
|
+
__all__ = [
|
45
|
+
# Interfaces
|
46
|
+
"IDeploymentService",
|
47
|
+
"IAnalyzerService",
|
48
|
+
"IConfigurationService",
|
49
|
+
"ServiceMetadata",
|
50
|
+
"ServiceCapability",
|
51
|
+
# Strategies
|
52
|
+
"DeploymentStrategy",
|
53
|
+
"AnalyzerStrategy",
|
54
|
+
"ConfigStrategy",
|
55
|
+
"StrategyRegistry",
|
56
|
+
# Migration
|
57
|
+
"ServiceMapper",
|
58
|
+
"MigrationStatus",
|
59
|
+
"FeatureFlag",
|
60
|
+
"create_compatibility_wrapper",
|
61
|
+
# Unified Services
|
62
|
+
"UnifiedDeploymentService",
|
63
|
+
"UnifiedAnalyzer",
|
64
|
+
"UnifiedConfigManager",
|
65
|
+
]
|
@@ -0,0 +1,44 @@
|
|
1
|
+
"""
|
2
|
+
Concrete Analyzer Strategy Implementations
|
3
|
+
==========================================
|
4
|
+
|
5
|
+
This module provides concrete implementations of the AnalyzerStrategy base class,
|
6
|
+
consolidating functionality from multiple analyzer services with 70-80% code duplication.
|
7
|
+
|
8
|
+
Consolidates:
|
9
|
+
- enhanced_analyzer.py (1,118 LOC)
|
10
|
+
- project_analyzer.py (815 LOC)
|
11
|
+
- structure_analyzer.py (703 LOC)
|
12
|
+
- code_analyzer.py (385 LOC)
|
13
|
+
- dependency_analyzer.py (326 LOC)
|
14
|
+
- file_analyzer.py (247 LOC)
|
15
|
+
- project_report_generator.py (121 LOC)
|
16
|
+
|
17
|
+
Total: 3,715 LOC → ~1,200 LOC (68% reduction)
|
18
|
+
|
19
|
+
Author: Claude MPM Development Team
|
20
|
+
Created: 2025-01-26
|
21
|
+
"""
|
22
|
+
|
23
|
+
from .code_analyzer import CodeAnalyzerStrategy
|
24
|
+
from .dependency_analyzer import DependencyAnalyzerStrategy
|
25
|
+
from .performance_analyzer import PerformanceAnalyzerStrategy
|
26
|
+
from .security_analyzer import SecurityAnalyzerStrategy
|
27
|
+
from .structure_analyzer import StructureAnalyzerStrategy
|
28
|
+
|
29
|
+
__all__ = [
|
30
|
+
"CodeAnalyzerStrategy",
|
31
|
+
"DependencyAnalyzerStrategy",
|
32
|
+
"StructureAnalyzerStrategy",
|
33
|
+
"SecurityAnalyzerStrategy",
|
34
|
+
"PerformanceAnalyzerStrategy",
|
35
|
+
]
|
36
|
+
|
37
|
+
# Strategy registry for automatic discovery
|
38
|
+
ANALYZER_STRATEGIES = {
|
39
|
+
"code": CodeAnalyzerStrategy,
|
40
|
+
"dependency": DependencyAnalyzerStrategy,
|
41
|
+
"structure": StructureAnalyzerStrategy,
|
42
|
+
"security": SecurityAnalyzerStrategy,
|
43
|
+
"performance": PerformanceAnalyzerStrategy,
|
44
|
+
}
|
@@ -0,0 +1,473 @@
|
|
1
|
+
"""
|
2
|
+
Code Analyzer Strategy Implementation
|
3
|
+
=====================================
|
4
|
+
|
5
|
+
Analyzes code structure, complexity, quality metrics, and patterns.
|
6
|
+
Consolidates functionality from multiple analyzer services.
|
7
|
+
|
8
|
+
Author: Claude MPM Development Team
|
9
|
+
Created: 2025-01-26
|
10
|
+
"""
|
11
|
+
|
12
|
+
import ast
|
13
|
+
import re
|
14
|
+
from pathlib import Path
|
15
|
+
from typing import Any, Dict, List, Optional, Set, Tuple
|
16
|
+
|
17
|
+
from claude_mpm.core.logging_utils import get_logger
|
18
|
+
|
19
|
+
from ..strategies import AnalyzerStrategy, StrategyContext, StrategyMetadata, StrategyPriority
|
20
|
+
|
21
|
+
logger = get_logger(__name__)
|
22
|
+
|
23
|
+
|
24
|
+
class CodeAnalyzerStrategy(AnalyzerStrategy):
|
25
|
+
"""
|
26
|
+
Strategy for analyzing code structure, complexity, and quality metrics.
|
27
|
+
|
28
|
+
Consolidates:
|
29
|
+
- Code complexity analysis (cyclomatic, cognitive)
|
30
|
+
- Code quality metrics (maintainability index, technical debt)
|
31
|
+
- Pattern detection (anti-patterns, code smells)
|
32
|
+
- Function/class analysis (size, complexity, coupling)
|
33
|
+
"""
|
34
|
+
|
35
|
+
def __init__(self):
|
36
|
+
"""Initialize code analyzer strategy."""
|
37
|
+
metadata = StrategyMetadata(
|
38
|
+
name="CodeAnalyzer",
|
39
|
+
description="Analyzes code structure, complexity, and quality metrics",
|
40
|
+
supported_types=["file", "directory", "module", "class", "function"],
|
41
|
+
supported_operations=["analyze", "metrics", "complexity", "quality"],
|
42
|
+
priority=StrategyPriority.HIGH,
|
43
|
+
tags={"code", "complexity", "quality", "metrics", "ast"},
|
44
|
+
)
|
45
|
+
super().__init__(metadata)
|
46
|
+
|
47
|
+
# Language-specific file extensions
|
48
|
+
self.language_extensions = {
|
49
|
+
"python": {".py", ".pyi"},
|
50
|
+
"javascript": {".js", ".jsx", ".mjs"},
|
51
|
+
"typescript": {".ts", ".tsx"},
|
52
|
+
"java": {".java"},
|
53
|
+
"go": {".go"},
|
54
|
+
"rust": {".rs"},
|
55
|
+
"c": {".c", ".h"},
|
56
|
+
"cpp": {".cpp", ".cc", ".cxx", ".hpp", ".h"},
|
57
|
+
}
|
58
|
+
|
59
|
+
# Code smell patterns
|
60
|
+
self.code_smell_patterns = {
|
61
|
+
"long_method": {"threshold": 50, "metric": "lines"},
|
62
|
+
"large_class": {"threshold": 500, "metric": "lines"},
|
63
|
+
"long_parameter_list": {"threshold": 5, "metric": "count"},
|
64
|
+
"duplicate_code": {"threshold": 0.7, "metric": "similarity"},
|
65
|
+
"god_class": {"threshold": 10, "metric": "responsibilities"},
|
66
|
+
}
|
67
|
+
|
68
|
+
def can_handle(self, context: StrategyContext) -> bool:
|
69
|
+
"""Check if strategy can handle the given context."""
|
70
|
+
return (
|
71
|
+
context.target_type in self.metadata.supported_types
|
72
|
+
and context.operation in self.metadata.supported_operations
|
73
|
+
)
|
74
|
+
|
75
|
+
def validate_input(self, input_data: Any) -> List[str]:
|
76
|
+
"""Validate input data for strategy."""
|
77
|
+
errors = []
|
78
|
+
|
79
|
+
if not input_data:
|
80
|
+
errors.append("Input data is required")
|
81
|
+
return errors
|
82
|
+
|
83
|
+
if isinstance(input_data, (str, Path)):
|
84
|
+
path = Path(input_data)
|
85
|
+
if not path.exists():
|
86
|
+
errors.append(f"Path does not exist: {path}")
|
87
|
+
elif not isinstance(input_data, dict):
|
88
|
+
errors.append(f"Invalid input type: {type(input_data).__name__}")
|
89
|
+
|
90
|
+
return errors
|
91
|
+
|
92
|
+
def analyze(
|
93
|
+
self, target: Any, options: Optional[Dict[str, Any]] = None
|
94
|
+
) -> Dict[str, Any]:
|
95
|
+
"""
|
96
|
+
Execute code analysis on target.
|
97
|
+
|
98
|
+
Args:
|
99
|
+
target: Code file, directory, or AST node to analyze
|
100
|
+
options: Analysis options (language, metrics, depth, etc.)
|
101
|
+
|
102
|
+
Returns:
|
103
|
+
Analysis results with metrics and findings
|
104
|
+
"""
|
105
|
+
options = options or {}
|
106
|
+
|
107
|
+
# Determine target type and language
|
108
|
+
if isinstance(target, (str, Path)):
|
109
|
+
target_path = Path(target)
|
110
|
+
if target_path.is_file():
|
111
|
+
return self._analyze_file(target_path, options)
|
112
|
+
elif target_path.is_dir():
|
113
|
+
return self._analyze_directory(target_path, options)
|
114
|
+
elif isinstance(target, ast.AST):
|
115
|
+
return self._analyze_ast(target, options)
|
116
|
+
|
117
|
+
return {
|
118
|
+
"status": "error",
|
119
|
+
"message": f"Unsupported target type: {type(target).__name__}",
|
120
|
+
}
|
121
|
+
|
122
|
+
def _analyze_file(self, file_path: Path, options: Dict[str, Any]) -> Dict[str, Any]:
|
123
|
+
"""Analyze a single code file."""
|
124
|
+
try:
|
125
|
+
# Detect language from extension
|
126
|
+
language = self._detect_language(file_path)
|
127
|
+
|
128
|
+
# Read file content
|
129
|
+
content = file_path.read_text(encoding="utf-8")
|
130
|
+
lines = content.splitlines()
|
131
|
+
|
132
|
+
# Base metrics
|
133
|
+
metrics = {
|
134
|
+
"file": str(file_path),
|
135
|
+
"language": language,
|
136
|
+
"lines_of_code": len(lines),
|
137
|
+
"blank_lines": sum(1 for line in lines if not line.strip()),
|
138
|
+
"comment_lines": self._count_comment_lines(content, language),
|
139
|
+
}
|
140
|
+
|
141
|
+
# Language-specific analysis
|
142
|
+
if language == "python":
|
143
|
+
metrics.update(self._analyze_python_code(content, file_path))
|
144
|
+
|
145
|
+
# Calculate complexity metrics
|
146
|
+
metrics["complexity"] = self._calculate_complexity_metrics(content, language)
|
147
|
+
|
148
|
+
# Detect code smells
|
149
|
+
metrics["code_smells"] = self._detect_code_smells(content, metrics)
|
150
|
+
|
151
|
+
# Calculate maintainability index
|
152
|
+
metrics["maintainability_index"] = self._calculate_maintainability(metrics)
|
153
|
+
|
154
|
+
return {
|
155
|
+
"status": "success",
|
156
|
+
"type": "file",
|
157
|
+
"path": str(file_path),
|
158
|
+
"metrics": metrics,
|
159
|
+
}
|
160
|
+
|
161
|
+
except Exception as e:
|
162
|
+
logger.error(f"Error analyzing file {file_path}: {e}")
|
163
|
+
return {
|
164
|
+
"status": "error",
|
165
|
+
"path": str(file_path),
|
166
|
+
"error": str(e),
|
167
|
+
}
|
168
|
+
|
169
|
+
def _analyze_directory(self, dir_path: Path, options: Dict[str, Any]) -> Dict[str, Any]:
|
170
|
+
"""Analyze all code files in a directory."""
|
171
|
+
results = {
|
172
|
+
"status": "success",
|
173
|
+
"type": "directory",
|
174
|
+
"path": str(dir_path),
|
175
|
+
"files": [],
|
176
|
+
"summary": {},
|
177
|
+
}
|
178
|
+
|
179
|
+
# Collect all code files
|
180
|
+
code_files = []
|
181
|
+
for ext_set in self.language_extensions.values():
|
182
|
+
for ext in ext_set:
|
183
|
+
code_files.extend(dir_path.rglob(f"*{ext}"))
|
184
|
+
|
185
|
+
# Analyze each file
|
186
|
+
total_metrics = {}
|
187
|
+
for file_path in code_files:
|
188
|
+
file_result = self._analyze_file(file_path, options)
|
189
|
+
if file_result["status"] == "success":
|
190
|
+
results["files"].append(file_result)
|
191
|
+
|
192
|
+
# Aggregate metrics
|
193
|
+
for key, value in file_result.get("metrics", {}).items():
|
194
|
+
if isinstance(value, (int, float)):
|
195
|
+
total_metrics[key] = total_metrics.get(key, 0) + value
|
196
|
+
|
197
|
+
# Calculate summary statistics
|
198
|
+
results["summary"] = {
|
199
|
+
"total_files": len(results["files"]),
|
200
|
+
"total_lines": total_metrics.get("lines_of_code", 0),
|
201
|
+
"average_complexity": total_metrics.get("complexity", {}).get("cyclomatic", 0) / max(len(results["files"]), 1),
|
202
|
+
"code_smells_count": sum(
|
203
|
+
len(f.get("metrics", {}).get("code_smells", []))
|
204
|
+
for f in results["files"]
|
205
|
+
),
|
206
|
+
}
|
207
|
+
|
208
|
+
return results
|
209
|
+
|
210
|
+
def _analyze_python_code(self, content: str, file_path: Path) -> Dict[str, Any]:
|
211
|
+
"""Perform Python-specific code analysis."""
|
212
|
+
try:
|
213
|
+
tree = ast.parse(content)
|
214
|
+
|
215
|
+
# Count functions, classes, methods
|
216
|
+
functions = []
|
217
|
+
classes = []
|
218
|
+
methods = []
|
219
|
+
|
220
|
+
for node in ast.walk(tree):
|
221
|
+
if isinstance(node, ast.FunctionDef):
|
222
|
+
if any(isinstance(parent, ast.ClassDef) for parent in ast.walk(tree)):
|
223
|
+
methods.append(node.name)
|
224
|
+
else:
|
225
|
+
functions.append(node.name)
|
226
|
+
elif isinstance(node, ast.ClassDef):
|
227
|
+
classes.append(node.name)
|
228
|
+
|
229
|
+
return {
|
230
|
+
"functions": functions,
|
231
|
+
"classes": classes,
|
232
|
+
"methods": methods,
|
233
|
+
"function_count": len(functions),
|
234
|
+
"class_count": len(classes),
|
235
|
+
"method_count": len(methods),
|
236
|
+
}
|
237
|
+
|
238
|
+
except SyntaxError as e:
|
239
|
+
logger.warning(f"Syntax error in {file_path}: {e}")
|
240
|
+
return {}
|
241
|
+
|
242
|
+
def _analyze_ast(self, node: ast.AST, options: Dict[str, Any]) -> Dict[str, Any]:
|
243
|
+
"""Analyze an AST node directly."""
|
244
|
+
metrics = {
|
245
|
+
"node_type": node.__class__.__name__,
|
246
|
+
"complexity": self._calculate_ast_complexity(node),
|
247
|
+
}
|
248
|
+
|
249
|
+
# Analyze specific node types
|
250
|
+
if isinstance(node, ast.FunctionDef):
|
251
|
+
metrics.update({
|
252
|
+
"name": node.name,
|
253
|
+
"parameters": len(node.args.args),
|
254
|
+
"lines": node.end_lineno - node.lineno + 1 if hasattr(node, "end_lineno") else 0,
|
255
|
+
})
|
256
|
+
elif isinstance(node, ast.ClassDef):
|
257
|
+
metrics.update({
|
258
|
+
"name": node.name,
|
259
|
+
"methods": sum(1 for n in node.body if isinstance(n, ast.FunctionDef)),
|
260
|
+
"bases": len(node.bases),
|
261
|
+
})
|
262
|
+
|
263
|
+
return {
|
264
|
+
"status": "success",
|
265
|
+
"type": "ast",
|
266
|
+
"metrics": metrics,
|
267
|
+
}
|
268
|
+
|
269
|
+
def _calculate_complexity_metrics(self, content: str, language: str) -> Dict[str, Any]:
|
270
|
+
"""Calculate various complexity metrics."""
|
271
|
+
complexity = {
|
272
|
+
"cyclomatic": 1, # Base complexity
|
273
|
+
"cognitive": 0,
|
274
|
+
"halstead": {},
|
275
|
+
}
|
276
|
+
|
277
|
+
if language == "python":
|
278
|
+
try:
|
279
|
+
tree = ast.parse(content)
|
280
|
+
complexity["cyclomatic"] = self._calculate_cyclomatic_complexity(tree)
|
281
|
+
complexity["cognitive"] = self._calculate_cognitive_complexity(tree)
|
282
|
+
except:
|
283
|
+
pass
|
284
|
+
|
285
|
+
return complexity
|
286
|
+
|
287
|
+
def _calculate_cyclomatic_complexity(self, tree: ast.AST) -> int:
|
288
|
+
"""Calculate cyclomatic complexity for Python AST."""
|
289
|
+
complexity = 1
|
290
|
+
|
291
|
+
for node in ast.walk(tree):
|
292
|
+
# Decision points increase complexity
|
293
|
+
if isinstance(node, (ast.If, ast.While, ast.For, ast.ExceptHandler)):
|
294
|
+
complexity += 1
|
295
|
+
elif isinstance(node, ast.BoolOp):
|
296
|
+
complexity += len(node.values) - 1
|
297
|
+
|
298
|
+
return complexity
|
299
|
+
|
300
|
+
def _calculate_cognitive_complexity(self, tree: ast.AST) -> int:
|
301
|
+
"""Calculate cognitive complexity (simplified version)."""
|
302
|
+
complexity = 0
|
303
|
+
nesting_level = 0
|
304
|
+
|
305
|
+
# Simplified cognitive complexity calculation
|
306
|
+
for node in ast.walk(tree):
|
307
|
+
if isinstance(node, (ast.If, ast.While, ast.For)):
|
308
|
+
complexity += 1 + nesting_level
|
309
|
+
elif isinstance(node, ast.BoolOp):
|
310
|
+
complexity += len(node.values) - 1
|
311
|
+
|
312
|
+
return complexity
|
313
|
+
|
314
|
+
def _calculate_ast_complexity(self, node: ast.AST) -> int:
|
315
|
+
"""Calculate complexity for a single AST node."""
|
316
|
+
return self._calculate_cyclomatic_complexity(node)
|
317
|
+
|
318
|
+
def _detect_code_smells(self, content: str, metrics: Dict[str, Any]) -> List[Dict[str, Any]]:
|
319
|
+
"""Detect common code smells."""
|
320
|
+
smells = []
|
321
|
+
|
322
|
+
# Long method/function
|
323
|
+
if metrics.get("lines_of_code", 0) > self.code_smell_patterns["long_method"]["threshold"]:
|
324
|
+
smells.append({
|
325
|
+
"type": "long_method",
|
326
|
+
"severity": "medium",
|
327
|
+
"message": f"Method/function has {metrics['lines_of_code']} lines (threshold: {self.code_smell_patterns['long_method']['threshold']})",
|
328
|
+
})
|
329
|
+
|
330
|
+
# High complexity
|
331
|
+
complexity = metrics.get("complexity", {}).get("cyclomatic", 0)
|
332
|
+
if complexity > 10:
|
333
|
+
smells.append({
|
334
|
+
"type": "high_complexity",
|
335
|
+
"severity": "high",
|
336
|
+
"message": f"High cyclomatic complexity: {complexity}",
|
337
|
+
})
|
338
|
+
|
339
|
+
return smells
|
340
|
+
|
341
|
+
def _calculate_maintainability(self, metrics: Dict[str, Any]) -> float:
|
342
|
+
"""
|
343
|
+
Calculate maintainability index (0-100 scale).
|
344
|
+
Simplified version of the standard formula.
|
345
|
+
"""
|
346
|
+
loc = metrics.get("lines_of_code", 0)
|
347
|
+
complexity = metrics.get("complexity", {}).get("cyclomatic", 1)
|
348
|
+
|
349
|
+
# Simplified maintainability index
|
350
|
+
if loc == 0:
|
351
|
+
return 100.0
|
352
|
+
|
353
|
+
# Basic formula (simplified)
|
354
|
+
mi = 171 - 5.2 * (loc / 100) - 0.23 * complexity
|
355
|
+
|
356
|
+
# Normalize to 0-100 scale
|
357
|
+
return max(0, min(100, mi))
|
358
|
+
|
359
|
+
def _detect_language(self, file_path: Path) -> str:
|
360
|
+
"""Detect programming language from file extension."""
|
361
|
+
ext = file_path.suffix.lower()
|
362
|
+
|
363
|
+
for language, extensions in self.language_extensions.items():
|
364
|
+
if ext in extensions:
|
365
|
+
return language
|
366
|
+
|
367
|
+
return "unknown"
|
368
|
+
|
369
|
+
def _count_comment_lines(self, content: str, language: str) -> int:
|
370
|
+
"""Count comment lines based on language."""
|
371
|
+
comment_patterns = {
|
372
|
+
"python": r"^\s*#",
|
373
|
+
"javascript": r"^\s*(//|/\*|\*)",
|
374
|
+
"java": r"^\s*(//|/\*|\*)",
|
375
|
+
"c": r"^\s*(//|/\*|\*)",
|
376
|
+
"cpp": r"^\s*(//|/\*|\*)",
|
377
|
+
}
|
378
|
+
|
379
|
+
pattern = comment_patterns.get(language)
|
380
|
+
if not pattern:
|
381
|
+
return 0
|
382
|
+
|
383
|
+
count = 0
|
384
|
+
for line in content.splitlines():
|
385
|
+
if re.match(pattern, line):
|
386
|
+
count += 1
|
387
|
+
|
388
|
+
return count
|
389
|
+
|
390
|
+
def extract_metrics(self, analysis_result: Dict[str, Any]) -> Dict[str, Any]:
|
391
|
+
"""Extract key metrics from analysis results."""
|
392
|
+
metrics = {}
|
393
|
+
|
394
|
+
if analysis_result.get("status") != "success":
|
395
|
+
return metrics
|
396
|
+
|
397
|
+
# Extract relevant metrics
|
398
|
+
if "metrics" in analysis_result:
|
399
|
+
raw_metrics = analysis_result["metrics"]
|
400
|
+
|
401
|
+
metrics.update({
|
402
|
+
"lines_of_code": raw_metrics.get("lines_of_code", 0),
|
403
|
+
"cyclomatic_complexity": raw_metrics.get("complexity", {}).get("cyclomatic", 0),
|
404
|
+
"cognitive_complexity": raw_metrics.get("complexity", {}).get("cognitive", 0),
|
405
|
+
"maintainability_index": raw_metrics.get("maintainability_index", 0),
|
406
|
+
"code_smells": len(raw_metrics.get("code_smells", [])),
|
407
|
+
"function_count": raw_metrics.get("function_count", 0),
|
408
|
+
"class_count": raw_metrics.get("class_count", 0),
|
409
|
+
})
|
410
|
+
|
411
|
+
# Extract summary metrics for directory analysis
|
412
|
+
if "summary" in analysis_result:
|
413
|
+
summary = analysis_result["summary"]
|
414
|
+
metrics.update({
|
415
|
+
"total_files": summary.get("total_files", 0),
|
416
|
+
"total_lines": summary.get("total_lines", 0),
|
417
|
+
"average_complexity": summary.get("average_complexity", 0),
|
418
|
+
"total_code_smells": summary.get("code_smells_count", 0),
|
419
|
+
})
|
420
|
+
|
421
|
+
return metrics
|
422
|
+
|
423
|
+
def compare_results(
|
424
|
+
self, baseline: Dict[str, Any], current: Dict[str, Any]
|
425
|
+
) -> Dict[str, Any]:
|
426
|
+
"""Compare two analysis results."""
|
427
|
+
comparison = {
|
428
|
+
"improved": [],
|
429
|
+
"degraded": [],
|
430
|
+
"unchanged": [],
|
431
|
+
}
|
432
|
+
|
433
|
+
baseline_metrics = self.extract_metrics(baseline)
|
434
|
+
current_metrics = self.extract_metrics(current)
|
435
|
+
|
436
|
+
for key in baseline_metrics:
|
437
|
+
if key not in current_metrics:
|
438
|
+
continue
|
439
|
+
|
440
|
+
baseline_val = baseline_metrics[key]
|
441
|
+
current_val = current_metrics[key]
|
442
|
+
|
443
|
+
if isinstance(baseline_val, (int, float)):
|
444
|
+
diff = current_val - baseline_val
|
445
|
+
pct_change = (diff / baseline_val * 100) if baseline_val else 0
|
446
|
+
|
447
|
+
result = {
|
448
|
+
"metric": key,
|
449
|
+
"baseline": baseline_val,
|
450
|
+
"current": current_val,
|
451
|
+
"change": diff,
|
452
|
+
"percent_change": pct_change,
|
453
|
+
}
|
454
|
+
|
455
|
+
# Determine if improvement or degradation
|
456
|
+
if key in ["maintainability_index"]:
|
457
|
+
# Higher is better
|
458
|
+
if diff > 0:
|
459
|
+
comparison["improved"].append(result)
|
460
|
+
elif diff < 0:
|
461
|
+
comparison["degraded"].append(result)
|
462
|
+
else:
|
463
|
+
comparison["unchanged"].append(result)
|
464
|
+
else:
|
465
|
+
# Lower is better (complexity, code smells, etc.)
|
466
|
+
if diff < 0:
|
467
|
+
comparison["improved"].append(result)
|
468
|
+
elif diff > 0:
|
469
|
+
comparison["degraded"].append(result)
|
470
|
+
else:
|
471
|
+
comparison["unchanged"].append(result)
|
472
|
+
|
473
|
+
return comparison
|