claude-mpm 4.3.22__py3-none-any.whl → 4.4.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (74) hide show
  1. claude_mpm/VERSION +1 -1
  2. claude_mpm/agents/WORKFLOW.md +2 -14
  3. claude_mpm/cli/commands/configure.py +2 -29
  4. claude_mpm/cli/commands/doctor.py +2 -2
  5. claude_mpm/cli/commands/mpm_init.py +3 -3
  6. claude_mpm/cli/parsers/configure_parser.py +4 -15
  7. claude_mpm/core/framework/__init__.py +38 -0
  8. claude_mpm/core/framework/formatters/__init__.py +11 -0
  9. claude_mpm/core/framework/formatters/capability_generator.py +356 -0
  10. claude_mpm/core/framework/formatters/content_formatter.py +283 -0
  11. claude_mpm/core/framework/formatters/context_generator.py +180 -0
  12. claude_mpm/core/framework/loaders/__init__.py +13 -0
  13. claude_mpm/core/framework/loaders/agent_loader.py +202 -0
  14. claude_mpm/core/framework/loaders/file_loader.py +213 -0
  15. claude_mpm/core/framework/loaders/instruction_loader.py +151 -0
  16. claude_mpm/core/framework/loaders/packaged_loader.py +208 -0
  17. claude_mpm/core/framework/processors/__init__.py +11 -0
  18. claude_mpm/core/framework/processors/memory_processor.py +222 -0
  19. claude_mpm/core/framework/processors/metadata_processor.py +146 -0
  20. claude_mpm/core/framework/processors/template_processor.py +238 -0
  21. claude_mpm/core/framework_loader.py +277 -1798
  22. claude_mpm/hooks/__init__.py +9 -1
  23. claude_mpm/hooks/kuzu_memory_hook.py +352 -0
  24. claude_mpm/hooks/memory_integration_hook.py +1 -1
  25. claude_mpm/services/agents/memory/content_manager.py +5 -2
  26. claude_mpm/services/agents/memory/memory_file_service.py +1 -0
  27. claude_mpm/services/agents/memory/memory_limits_service.py +1 -0
  28. claude_mpm/services/core/path_resolver.py +1 -0
  29. claude_mpm/services/diagnostics/diagnostic_runner.py +1 -0
  30. claude_mpm/services/mcp_config_manager.py +67 -4
  31. claude_mpm/services/mcp_gateway/core/process_pool.py +281 -0
  32. claude_mpm/services/mcp_gateway/core/startup_verification.py +2 -2
  33. claude_mpm/services/mcp_gateway/main.py +3 -13
  34. claude_mpm/services/mcp_gateway/server/stdio_server.py +4 -10
  35. claude_mpm/services/mcp_gateway/tools/__init__.py +13 -2
  36. claude_mpm/services/mcp_gateway/tools/external_mcp_services.py +36 -6
  37. claude_mpm/services/mcp_gateway/tools/kuzu_memory_service.py +542 -0
  38. claude_mpm/services/shared/__init__.py +2 -1
  39. claude_mpm/services/shared/service_factory.py +8 -5
  40. claude_mpm/services/unified/__init__.py +65 -0
  41. claude_mpm/services/unified/analyzer_strategies/__init__.py +44 -0
  42. claude_mpm/services/unified/analyzer_strategies/code_analyzer.py +473 -0
  43. claude_mpm/services/unified/analyzer_strategies/dependency_analyzer.py +643 -0
  44. claude_mpm/services/unified/analyzer_strategies/performance_analyzer.py +804 -0
  45. claude_mpm/services/unified/analyzer_strategies/security_analyzer.py +661 -0
  46. claude_mpm/services/unified/analyzer_strategies/structure_analyzer.py +696 -0
  47. claude_mpm/services/unified/config_strategies/__init__.py +190 -0
  48. claude_mpm/services/unified/config_strategies/config_schema.py +689 -0
  49. claude_mpm/services/unified/config_strategies/context_strategy.py +748 -0
  50. claude_mpm/services/unified/config_strategies/error_handling_strategy.py +999 -0
  51. claude_mpm/services/unified/config_strategies/file_loader_strategy.py +871 -0
  52. claude_mpm/services/unified/config_strategies/unified_config_service.py +802 -0
  53. claude_mpm/services/unified/config_strategies/validation_strategy.py +1105 -0
  54. claude_mpm/services/unified/deployment_strategies/__init__.py +97 -0
  55. claude_mpm/services/unified/deployment_strategies/base.py +557 -0
  56. claude_mpm/services/unified/deployment_strategies/cloud_strategies.py +486 -0
  57. claude_mpm/services/unified/deployment_strategies/local.py +594 -0
  58. claude_mpm/services/unified/deployment_strategies/utils.py +672 -0
  59. claude_mpm/services/unified/deployment_strategies/vercel.py +471 -0
  60. claude_mpm/services/unified/interfaces.py +499 -0
  61. claude_mpm/services/unified/migration.py +532 -0
  62. claude_mpm/services/unified/strategies.py +551 -0
  63. claude_mpm/services/unified/unified_analyzer.py +534 -0
  64. claude_mpm/services/unified/unified_config.py +688 -0
  65. claude_mpm/services/unified/unified_deployment.py +470 -0
  66. {claude_mpm-4.3.22.dist-info → claude_mpm-4.4.3.dist-info}/METADATA +15 -15
  67. {claude_mpm-4.3.22.dist-info → claude_mpm-4.4.3.dist-info}/RECORD +71 -32
  68. claude_mpm/cli/commands/configure_tui.py +0 -1927
  69. claude_mpm/services/mcp_gateway/tools/ticket_tools.py +0 -645
  70. claude_mpm/services/mcp_gateway/tools/unified_ticket_tool.py +0 -602
  71. {claude_mpm-4.3.22.dist-info → claude_mpm-4.4.3.dist-info}/WHEEL +0 -0
  72. {claude_mpm-4.3.22.dist-info → claude_mpm-4.4.3.dist-info}/entry_points.txt +0 -0
  73. {claude_mpm-4.3.22.dist-info → claude_mpm-4.4.3.dist-info}/licenses/LICENSE +0 -0
  74. {claude_mpm-4.3.22.dist-info → claude_mpm-4.4.3.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,473 @@
1
+ """
2
+ Code Analyzer Strategy Implementation
3
+ =====================================
4
+
5
+ Analyzes code structure, complexity, quality metrics, and patterns.
6
+ Consolidates functionality from multiple analyzer services.
7
+
8
+ Author: Claude MPM Development Team
9
+ Created: 2025-01-26
10
+ """
11
+
12
+ import ast
13
+ import re
14
+ from pathlib import Path
15
+ from typing import Any, Dict, List, Optional, Set, Tuple
16
+
17
+ from claude_mpm.core.logging_utils import get_logger
18
+
19
+ from ..strategies import AnalyzerStrategy, StrategyContext, StrategyMetadata, StrategyPriority
20
+
21
+ logger = get_logger(__name__)
22
+
23
+
24
+ class CodeAnalyzerStrategy(AnalyzerStrategy):
25
+ """
26
+ Strategy for analyzing code structure, complexity, and quality metrics.
27
+
28
+ Consolidates:
29
+ - Code complexity analysis (cyclomatic, cognitive)
30
+ - Code quality metrics (maintainability index, technical debt)
31
+ - Pattern detection (anti-patterns, code smells)
32
+ - Function/class analysis (size, complexity, coupling)
33
+ """
34
+
35
+ def __init__(self):
36
+ """Initialize code analyzer strategy."""
37
+ metadata = StrategyMetadata(
38
+ name="CodeAnalyzer",
39
+ description="Analyzes code structure, complexity, and quality metrics",
40
+ supported_types=["file", "directory", "module", "class", "function"],
41
+ supported_operations=["analyze", "metrics", "complexity", "quality"],
42
+ priority=StrategyPriority.HIGH,
43
+ tags={"code", "complexity", "quality", "metrics", "ast"},
44
+ )
45
+ super().__init__(metadata)
46
+
47
+ # Language-specific file extensions
48
+ self.language_extensions = {
49
+ "python": {".py", ".pyi"},
50
+ "javascript": {".js", ".jsx", ".mjs"},
51
+ "typescript": {".ts", ".tsx"},
52
+ "java": {".java"},
53
+ "go": {".go"},
54
+ "rust": {".rs"},
55
+ "c": {".c", ".h"},
56
+ "cpp": {".cpp", ".cc", ".cxx", ".hpp", ".h"},
57
+ }
58
+
59
+ # Code smell patterns
60
+ self.code_smell_patterns = {
61
+ "long_method": {"threshold": 50, "metric": "lines"},
62
+ "large_class": {"threshold": 500, "metric": "lines"},
63
+ "long_parameter_list": {"threshold": 5, "metric": "count"},
64
+ "duplicate_code": {"threshold": 0.7, "metric": "similarity"},
65
+ "god_class": {"threshold": 10, "metric": "responsibilities"},
66
+ }
67
+
68
+ def can_handle(self, context: StrategyContext) -> bool:
69
+ """Check if strategy can handle the given context."""
70
+ return (
71
+ context.target_type in self.metadata.supported_types
72
+ and context.operation in self.metadata.supported_operations
73
+ )
74
+
75
+ def validate_input(self, input_data: Any) -> List[str]:
76
+ """Validate input data for strategy."""
77
+ errors = []
78
+
79
+ if not input_data:
80
+ errors.append("Input data is required")
81
+ return errors
82
+
83
+ if isinstance(input_data, (str, Path)):
84
+ path = Path(input_data)
85
+ if not path.exists():
86
+ errors.append(f"Path does not exist: {path}")
87
+ elif not isinstance(input_data, dict):
88
+ errors.append(f"Invalid input type: {type(input_data).__name__}")
89
+
90
+ return errors
91
+
92
+ def analyze(
93
+ self, target: Any, options: Optional[Dict[str, Any]] = None
94
+ ) -> Dict[str, Any]:
95
+ """
96
+ Execute code analysis on target.
97
+
98
+ Args:
99
+ target: Code file, directory, or AST node to analyze
100
+ options: Analysis options (language, metrics, depth, etc.)
101
+
102
+ Returns:
103
+ Analysis results with metrics and findings
104
+ """
105
+ options = options or {}
106
+
107
+ # Determine target type and language
108
+ if isinstance(target, (str, Path)):
109
+ target_path = Path(target)
110
+ if target_path.is_file():
111
+ return self._analyze_file(target_path, options)
112
+ elif target_path.is_dir():
113
+ return self._analyze_directory(target_path, options)
114
+ elif isinstance(target, ast.AST):
115
+ return self._analyze_ast(target, options)
116
+
117
+ return {
118
+ "status": "error",
119
+ "message": f"Unsupported target type: {type(target).__name__}",
120
+ }
121
+
122
+ def _analyze_file(self, file_path: Path, options: Dict[str, Any]) -> Dict[str, Any]:
123
+ """Analyze a single code file."""
124
+ try:
125
+ # Detect language from extension
126
+ language = self._detect_language(file_path)
127
+
128
+ # Read file content
129
+ content = file_path.read_text(encoding="utf-8")
130
+ lines = content.splitlines()
131
+
132
+ # Base metrics
133
+ metrics = {
134
+ "file": str(file_path),
135
+ "language": language,
136
+ "lines_of_code": len(lines),
137
+ "blank_lines": sum(1 for line in lines if not line.strip()),
138
+ "comment_lines": self._count_comment_lines(content, language),
139
+ }
140
+
141
+ # Language-specific analysis
142
+ if language == "python":
143
+ metrics.update(self._analyze_python_code(content, file_path))
144
+
145
+ # Calculate complexity metrics
146
+ metrics["complexity"] = self._calculate_complexity_metrics(content, language)
147
+
148
+ # Detect code smells
149
+ metrics["code_smells"] = self._detect_code_smells(content, metrics)
150
+
151
+ # Calculate maintainability index
152
+ metrics["maintainability_index"] = self._calculate_maintainability(metrics)
153
+
154
+ return {
155
+ "status": "success",
156
+ "type": "file",
157
+ "path": str(file_path),
158
+ "metrics": metrics,
159
+ }
160
+
161
+ except Exception as e:
162
+ logger.error(f"Error analyzing file {file_path}: {e}")
163
+ return {
164
+ "status": "error",
165
+ "path": str(file_path),
166
+ "error": str(e),
167
+ }
168
+
169
+ def _analyze_directory(self, dir_path: Path, options: Dict[str, Any]) -> Dict[str, Any]:
170
+ """Analyze all code files in a directory."""
171
+ results = {
172
+ "status": "success",
173
+ "type": "directory",
174
+ "path": str(dir_path),
175
+ "files": [],
176
+ "summary": {},
177
+ }
178
+
179
+ # Collect all code files
180
+ code_files = []
181
+ for ext_set in self.language_extensions.values():
182
+ for ext in ext_set:
183
+ code_files.extend(dir_path.rglob(f"*{ext}"))
184
+
185
+ # Analyze each file
186
+ total_metrics = {}
187
+ for file_path in code_files:
188
+ file_result = self._analyze_file(file_path, options)
189
+ if file_result["status"] == "success":
190
+ results["files"].append(file_result)
191
+
192
+ # Aggregate metrics
193
+ for key, value in file_result.get("metrics", {}).items():
194
+ if isinstance(value, (int, float)):
195
+ total_metrics[key] = total_metrics.get(key, 0) + value
196
+
197
+ # Calculate summary statistics
198
+ results["summary"] = {
199
+ "total_files": len(results["files"]),
200
+ "total_lines": total_metrics.get("lines_of_code", 0),
201
+ "average_complexity": total_metrics.get("complexity", {}).get("cyclomatic", 0) / max(len(results["files"]), 1),
202
+ "code_smells_count": sum(
203
+ len(f.get("metrics", {}).get("code_smells", []))
204
+ for f in results["files"]
205
+ ),
206
+ }
207
+
208
+ return results
209
+
210
+ def _analyze_python_code(self, content: str, file_path: Path) -> Dict[str, Any]:
211
+ """Perform Python-specific code analysis."""
212
+ try:
213
+ tree = ast.parse(content)
214
+
215
+ # Count functions, classes, methods
216
+ functions = []
217
+ classes = []
218
+ methods = []
219
+
220
+ for node in ast.walk(tree):
221
+ if isinstance(node, ast.FunctionDef):
222
+ if any(isinstance(parent, ast.ClassDef) for parent in ast.walk(tree)):
223
+ methods.append(node.name)
224
+ else:
225
+ functions.append(node.name)
226
+ elif isinstance(node, ast.ClassDef):
227
+ classes.append(node.name)
228
+
229
+ return {
230
+ "functions": functions,
231
+ "classes": classes,
232
+ "methods": methods,
233
+ "function_count": len(functions),
234
+ "class_count": len(classes),
235
+ "method_count": len(methods),
236
+ }
237
+
238
+ except SyntaxError as e:
239
+ logger.warning(f"Syntax error in {file_path}: {e}")
240
+ return {}
241
+
242
+ def _analyze_ast(self, node: ast.AST, options: Dict[str, Any]) -> Dict[str, Any]:
243
+ """Analyze an AST node directly."""
244
+ metrics = {
245
+ "node_type": node.__class__.__name__,
246
+ "complexity": self._calculate_ast_complexity(node),
247
+ }
248
+
249
+ # Analyze specific node types
250
+ if isinstance(node, ast.FunctionDef):
251
+ metrics.update({
252
+ "name": node.name,
253
+ "parameters": len(node.args.args),
254
+ "lines": node.end_lineno - node.lineno + 1 if hasattr(node, "end_lineno") else 0,
255
+ })
256
+ elif isinstance(node, ast.ClassDef):
257
+ metrics.update({
258
+ "name": node.name,
259
+ "methods": sum(1 for n in node.body if isinstance(n, ast.FunctionDef)),
260
+ "bases": len(node.bases),
261
+ })
262
+
263
+ return {
264
+ "status": "success",
265
+ "type": "ast",
266
+ "metrics": metrics,
267
+ }
268
+
269
+ def _calculate_complexity_metrics(self, content: str, language: str) -> Dict[str, Any]:
270
+ """Calculate various complexity metrics."""
271
+ complexity = {
272
+ "cyclomatic": 1, # Base complexity
273
+ "cognitive": 0,
274
+ "halstead": {},
275
+ }
276
+
277
+ if language == "python":
278
+ try:
279
+ tree = ast.parse(content)
280
+ complexity["cyclomatic"] = self._calculate_cyclomatic_complexity(tree)
281
+ complexity["cognitive"] = self._calculate_cognitive_complexity(tree)
282
+ except:
283
+ pass
284
+
285
+ return complexity
286
+
287
+ def _calculate_cyclomatic_complexity(self, tree: ast.AST) -> int:
288
+ """Calculate cyclomatic complexity for Python AST."""
289
+ complexity = 1
290
+
291
+ for node in ast.walk(tree):
292
+ # Decision points increase complexity
293
+ if isinstance(node, (ast.If, ast.While, ast.For, ast.ExceptHandler)):
294
+ complexity += 1
295
+ elif isinstance(node, ast.BoolOp):
296
+ complexity += len(node.values) - 1
297
+
298
+ return complexity
299
+
300
+ def _calculate_cognitive_complexity(self, tree: ast.AST) -> int:
301
+ """Calculate cognitive complexity (simplified version)."""
302
+ complexity = 0
303
+ nesting_level = 0
304
+
305
+ # Simplified cognitive complexity calculation
306
+ for node in ast.walk(tree):
307
+ if isinstance(node, (ast.If, ast.While, ast.For)):
308
+ complexity += 1 + nesting_level
309
+ elif isinstance(node, ast.BoolOp):
310
+ complexity += len(node.values) - 1
311
+
312
+ return complexity
313
+
314
+ def _calculate_ast_complexity(self, node: ast.AST) -> int:
315
+ """Calculate complexity for a single AST node."""
316
+ return self._calculate_cyclomatic_complexity(node)
317
+
318
+ def _detect_code_smells(self, content: str, metrics: Dict[str, Any]) -> List[Dict[str, Any]]:
319
+ """Detect common code smells."""
320
+ smells = []
321
+
322
+ # Long method/function
323
+ if metrics.get("lines_of_code", 0) > self.code_smell_patterns["long_method"]["threshold"]:
324
+ smells.append({
325
+ "type": "long_method",
326
+ "severity": "medium",
327
+ "message": f"Method/function has {metrics['lines_of_code']} lines (threshold: {self.code_smell_patterns['long_method']['threshold']})",
328
+ })
329
+
330
+ # High complexity
331
+ complexity = metrics.get("complexity", {}).get("cyclomatic", 0)
332
+ if complexity > 10:
333
+ smells.append({
334
+ "type": "high_complexity",
335
+ "severity": "high",
336
+ "message": f"High cyclomatic complexity: {complexity}",
337
+ })
338
+
339
+ return smells
340
+
341
+ def _calculate_maintainability(self, metrics: Dict[str, Any]) -> float:
342
+ """
343
+ Calculate maintainability index (0-100 scale).
344
+ Simplified version of the standard formula.
345
+ """
346
+ loc = metrics.get("lines_of_code", 0)
347
+ complexity = metrics.get("complexity", {}).get("cyclomatic", 1)
348
+
349
+ # Simplified maintainability index
350
+ if loc == 0:
351
+ return 100.0
352
+
353
+ # Basic formula (simplified)
354
+ mi = 171 - 5.2 * (loc / 100) - 0.23 * complexity
355
+
356
+ # Normalize to 0-100 scale
357
+ return max(0, min(100, mi))
358
+
359
+ def _detect_language(self, file_path: Path) -> str:
360
+ """Detect programming language from file extension."""
361
+ ext = file_path.suffix.lower()
362
+
363
+ for language, extensions in self.language_extensions.items():
364
+ if ext in extensions:
365
+ return language
366
+
367
+ return "unknown"
368
+
369
+ def _count_comment_lines(self, content: str, language: str) -> int:
370
+ """Count comment lines based on language."""
371
+ comment_patterns = {
372
+ "python": r"^\s*#",
373
+ "javascript": r"^\s*(//|/\*|\*)",
374
+ "java": r"^\s*(//|/\*|\*)",
375
+ "c": r"^\s*(//|/\*|\*)",
376
+ "cpp": r"^\s*(//|/\*|\*)",
377
+ }
378
+
379
+ pattern = comment_patterns.get(language)
380
+ if not pattern:
381
+ return 0
382
+
383
+ count = 0
384
+ for line in content.splitlines():
385
+ if re.match(pattern, line):
386
+ count += 1
387
+
388
+ return count
389
+
390
+ def extract_metrics(self, analysis_result: Dict[str, Any]) -> Dict[str, Any]:
391
+ """Extract key metrics from analysis results."""
392
+ metrics = {}
393
+
394
+ if analysis_result.get("status") != "success":
395
+ return metrics
396
+
397
+ # Extract relevant metrics
398
+ if "metrics" in analysis_result:
399
+ raw_metrics = analysis_result["metrics"]
400
+
401
+ metrics.update({
402
+ "lines_of_code": raw_metrics.get("lines_of_code", 0),
403
+ "cyclomatic_complexity": raw_metrics.get("complexity", {}).get("cyclomatic", 0),
404
+ "cognitive_complexity": raw_metrics.get("complexity", {}).get("cognitive", 0),
405
+ "maintainability_index": raw_metrics.get("maintainability_index", 0),
406
+ "code_smells": len(raw_metrics.get("code_smells", [])),
407
+ "function_count": raw_metrics.get("function_count", 0),
408
+ "class_count": raw_metrics.get("class_count", 0),
409
+ })
410
+
411
+ # Extract summary metrics for directory analysis
412
+ if "summary" in analysis_result:
413
+ summary = analysis_result["summary"]
414
+ metrics.update({
415
+ "total_files": summary.get("total_files", 0),
416
+ "total_lines": summary.get("total_lines", 0),
417
+ "average_complexity": summary.get("average_complexity", 0),
418
+ "total_code_smells": summary.get("code_smells_count", 0),
419
+ })
420
+
421
+ return metrics
422
+
423
+ def compare_results(
424
+ self, baseline: Dict[str, Any], current: Dict[str, Any]
425
+ ) -> Dict[str, Any]:
426
+ """Compare two analysis results."""
427
+ comparison = {
428
+ "improved": [],
429
+ "degraded": [],
430
+ "unchanged": [],
431
+ }
432
+
433
+ baseline_metrics = self.extract_metrics(baseline)
434
+ current_metrics = self.extract_metrics(current)
435
+
436
+ for key in baseline_metrics:
437
+ if key not in current_metrics:
438
+ continue
439
+
440
+ baseline_val = baseline_metrics[key]
441
+ current_val = current_metrics[key]
442
+
443
+ if isinstance(baseline_val, (int, float)):
444
+ diff = current_val - baseline_val
445
+ pct_change = (diff / baseline_val * 100) if baseline_val else 0
446
+
447
+ result = {
448
+ "metric": key,
449
+ "baseline": baseline_val,
450
+ "current": current_val,
451
+ "change": diff,
452
+ "percent_change": pct_change,
453
+ }
454
+
455
+ # Determine if improvement or degradation
456
+ if key in ["maintainability_index"]:
457
+ # Higher is better
458
+ if diff > 0:
459
+ comparison["improved"].append(result)
460
+ elif diff < 0:
461
+ comparison["degraded"].append(result)
462
+ else:
463
+ comparison["unchanged"].append(result)
464
+ else:
465
+ # Lower is better (complexity, code smells, etc.)
466
+ if diff < 0:
467
+ comparison["improved"].append(result)
468
+ elif diff > 0:
469
+ comparison["degraded"].append(result)
470
+ else:
471
+ comparison["unchanged"].append(result)
472
+
473
+ return comparison